diff --git a/config-xenlinux b/config-xenlinux index 19d7cca..1dce5ab 100644 --- a/config-xenlinux +++ b/config-xenlinux @@ -1,15 +1,15 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.34.1 -# Wed Jul 7 12:58:00 2010 +# Linux/x86_64 2.6.38.3 Kernel Configuration +# Sun Apr 17 01:37:01 2011 # CONFIG_64BIT=y # CONFIG_X86_32 is not set CONFIG_X86_64=y CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y CONFIG_OUTPUT_FORMAT="elf64-x86-64" CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CMOS_UPDATE=y CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_LOCKDEP_SUPPORT=y @@ -18,6 +18,7 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y CONFIG_MMU=y CONFIG_ZONE_DMA=y CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_GENERIC_ISA_DMA=y CONFIG_GENERIC_IOMAP=y CONFIG_GENERIC_BUG=y @@ -43,15 +44,10 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_AUDIT_ARCH=y CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_EARLY_RES=y -CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_X86_64_SMP=y CONFIG_X86_NO_TSS=y CONFIG_X86_NO_IDT=y +CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" # CONFIG_KTIME_SCALAR is not set CONFIG_SUSE_KERNEL=y # CONFIG_ENTERPRISE_SUPPORT is not set @@ -59,6 +55,8 @@ CONFIG_SUSE_KERNEL=y # CONFIG_KERNEL_DESKTOP is not set CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y +CONFIG_IRQ_WORK=y # # General setup @@ -66,13 +64,11 @@ CONFIG_CONSTRUCTORS=y CONFIG_EXPERIMENTAL=y CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_LOCALVERSION="-9.xenlinux.qubes.x86_64" +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-12.xenlinux.qubes.x86_64" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_HAVE_KERNEL_GZIP=y CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_LZO is not set CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y @@ -86,14 +82,28 @@ CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y CONFIG_AUDIT_TREE=y +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_PENDING_IRQ=y +# CONFIG_AUTO_IRQ_AFFINITY is not set +CONFIG_IRQ_PER_CPU=y +# CONFIG_HARDIRQS_SW_RESEND is not set +CONFIG_SPARSE_IRQ=y # # RCU Subsystem # CONFIG_TREE_RCU=y -# CONFIG_TREE_PREEMPT_RCU is not set -# CONFIG_TINY_RCU is not set +# CONFIG_PREEMPT_RCU is not set # CONFIG_RCU_TRACE is not set CONFIG_RCU_FANOUT=64 # CONFIG_RCU_FANOUT_EXACT is not set @@ -117,24 +127,29 @@ CONFIG_CGROUP_MEM_RES_CTLR=y CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y -CONFIG_MM_OWNER=y -# CONFIG_SYSFS_DEPRECATED_V2 is not set -CONFIG_RELAY=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y +# CONFIG_SCHED_AUTOGROUP is not set +CONFIG_MM_OWNER=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y CONFIG_RD_BZIP2=y CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y CONFIG_RD_LZO=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y +# CONFIG_EXPERT is not set # CONFIG_EMBEDDED is not set CONFIG_UID16=y CONFIG_SYSCTL_SYSCALL=y @@ -154,18 +169,19 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y # # Kernel Performance Events And Counters # CONFIG_PERF_EVENTS=y +# CONFIG_PERF_COUNTERS is not set # CONFIG_DEBUG_PERF_USE_VMALLOC is not set CONFIG_VM_EVENT_COUNTERS=y CONFIG_PCI_QUIRKS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set -# CONFIG_SLOB is not set CONFIG_DEFAULT_VM_DIRTY_RATIO=40 CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y @@ -173,6 +189,7 @@ CONFIG_OPROFILE=m # CONFIG_OPROFILE_EVENT_MULTIPLEX is not set CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y +# CONFIG_JUMP_LABEL is not set CONFIG_OPTPROBES=y CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y CONFIG_KRETPROBES=y @@ -182,17 +199,18 @@ CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_OPTPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_HAVE_DMA_ATTRS=y +CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y CONFIG_HAVE_DMA_API_DEBUG=y CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y # # GCOV-based kernel profiling # # CONFIG_GCOV_KERNEL is not set -CONFIG_SLOW_WORK=y -CONFIG_SLOW_WORK_DEBUG=y # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y @@ -204,11 +222,11 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_STOP_MACHINE=y +# CONFIG_UTRACE is not set CONFIG_BLOCK=y CONFIG_BLK_DEV_BSG=y CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set +# CONFIG_BLK_DEV_THROTTLING is not set CONFIG_BLOCK_COMPAT=y # @@ -218,7 +236,6 @@ CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEBUG_CFQ_IOSCHED is not set # CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set @@ -263,33 +280,11 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y -CONFIG_SPARSE_IRQ=y CONFIG_X86_MPPARSE=y CONFIG_X86_64_XEN=y CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y CONFIG_NO_BOOTMEM=y -# CONFIG_M386 is not set -# CONFIG_M486 is not set -# CONFIG_M586 is not set -# CONFIG_M586TSC is not set -# CONFIG_M586MMX is not set -# CONFIG_M686 is not set -# CONFIG_MPENTIUMII is not set -# CONFIG_MPENTIUMIII is not set -# CONFIG_MPENTIUMM is not set -# CONFIG_MPENTIUM4 is not set -# CONFIG_MK6 is not set -# CONFIG_MK7 is not set # CONFIG_MK8 is not set -# CONFIG_MCRUSOE is not set -# CONFIG_MEFFICEON is not set -# CONFIG_MWINCHIPC6 is not set -# CONFIG_MWINCHIP3D is not set -# CONFIG_MGEODEGX1 is not set -# CONFIG_MGEODE_LX is not set -# CONFIG_MCYRIXIII is not set -# CONFIG_MVIAC3_2 is not set -# CONFIG_MVIAC7 is not set # CONFIG_MPSC is not set # CONFIG_MCORE2 is not set # CONFIG_MATOM is not set @@ -297,6 +292,7 @@ CONFIG_GENERIC_CPU=y CONFIG_X86_CPU=y CONFIG_X86_INTERNODE_CACHE_SHIFT=6 CONFIG_X86_CMPXCHG=y +CONFIG_CMPXCHG_LOCAL=y CONFIG_X86_L1_CACHE_SHIFT=6 CONFIG_X86_XADD=y CONFIG_X86_WP_WORKS_OK=y @@ -313,9 +309,9 @@ CONFIG_IOMMU_HELPER=y # CONFIG_IOMMU_API is not set # CONFIG_MAXSMP is not set CONFIG_NR_CPUS=512 +# CONFIG_IRQ_TIME_ACCOUNTING is not set CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_MCE=y @@ -327,16 +323,17 @@ CONFIG_MICROCODE_OLD_INTERFACE=y CONFIG_X86_MSR=y CONFIG_X86_CPUID=m CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y CONFIG_ARCH_PROC_KCORE_TEXT=y CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 CONFIG_SELECT_MEMORY_MODEL=y CONFIG_FLATMEM_MANUAL=y -# CONFIG_DISCONTIGMEM_MANUAL is not set -# CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y @@ -391,7 +388,7 @@ CONFIG_ACPI_SLEEP=y CONFIG_ACPI_PROCFS=y CONFIG_ACPI_PROCFS_POWER=y CONFIG_ACPI_POWER_METER=m -CONFIG_ACPI_SYSFS_POWER=y +# CONFIG_ACPI_EC_DEBUGFS is not set CONFIG_ACPI_PROC_EVENT=y CONFIG_ACPI_AC=m CONFIG_ACPI_BATTERY=m @@ -400,18 +397,21 @@ CONFIG_ACPI_VIDEO=m CONFIG_ACPI_FAN=m CONFIG_ACPI_DOCK=y CONFIG_ACPI_PROCESSOR=m +# CONFIG_ACPI_IPMI is not set CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PROCESSOR_AGGREGATOR=m CONFIG_ACPI_THERMAL=m CONFIG_ACPI_CUSTOM_DSDT_FILE="" # CONFIG_ACPI_CUSTOM_DSDT is not set -CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS=y CONFIG_ACPI_BLACKLIST_YEAR=0 CONFIG_ACPI_DEBUG=y # CONFIG_ACPI_DEBUG_FUNC_TRACE is not set CONFIG_ACPI_PCI_SLOT=m CONFIG_ACPI_CONTAINER=m +CONFIG_ACPI_HOTPLUG_MEMORY=m CONFIG_ACPI_SBS=m +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_APEI is not set CONFIG_ACPI_PV_SLEEP=y CONFIG_PROCESSOR_EXTERNAL_CONTROL=y CONFIG_SFI=y @@ -420,12 +420,6 @@ CONFIG_SFI=y # CPU Frequency scaling # -# -# Memory power savings -# -CONFIG_I7300_IDLE_IOAT_CHANNEL=y -CONFIG_I7300_IDLE=m - # # Bus options (PCI etc.) # @@ -433,8 +427,7 @@ CONFIG_PCI=y CONFIG_PCI_DIRECT=y CONFIG_PCI_MMCONFIG=y CONFIG_PCI_DOMAINS=y -CONFIG_XEN_PCIDEV_FRONTEND=y -# CONFIG_XEN_PCIDEV_FE_DEBUG is not set +# CONFIG_PCI_CNB20LE_QUIRK is not set CONFIG_PCIEPORTBUS=y CONFIG_HOTPLUG_PCI_PCIE=m CONFIG_PCIEAER=y @@ -450,9 +443,11 @@ CONFIG_PCI_GUESTDEV=y CONFIG_PCI_IOMULTI=y CONFIG_PCI_RESERVE=y CONFIG_PCI_STUB=y +CONFIG_XEN_PCIDEV_FRONTEND=y +# CONFIG_XEN_PCIDEV_FE_DEBUG is not set CONFIG_PCI_IOV=y CONFIG_ISA_DMA_API=y -CONFIG_K8_NB=y +CONFIG_AMD_NB=y CONFIG_PCCARD=m CONFIG_PCMCIA=m CONFIG_PCMCIA_LOAD_CIS=y @@ -469,7 +464,7 @@ CONFIG_YENTA_ENE_TUNE=y CONFIG_YENTA_TOSHIBA=y CONFIG_PD6729=m CONFIG_I82092=m -CONFIG_PCCARD_NONSTATIC=m +CONFIG_PCCARD_NONSTATIC=y CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI_FAKE=m CONFIG_HOTPLUG_PCI_ACPI=m @@ -492,6 +487,7 @@ CONFIG_IA32_AOUT=m CONFIG_COMPAT=y CONFIG_COMPAT_FOR_U64_ALIGNMENT=y CONFIG_SYSVIPC_COMPAT=y +CONFIG_HAVE_TEXT_POKE_SMP=y CONFIG_NET=y CONFIG_COMPAT_NETLINK_MESSAGES=y @@ -522,9 +518,9 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y +# CONFIG_NET_IPGRE_DEMUX is not set CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y # CONFIG_ARPD is not set @@ -553,11 +549,7 @@ CONFIG_TCP_CONG_LP=m CONFIG_TCP_CONG_VENO=m CONFIG_TCP_CONG_YEAH=m CONFIG_TCP_CONG_ILLINOIS=m -# CONFIG_DEFAULT_BIC is not set CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_HTCP is not set -# CONFIG_DEFAULT_VEGAS is not set -# CONFIG_DEFAULT_WESTWOOD is not set # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_TCP_MD5SIG is not set @@ -584,6 +576,7 @@ CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y # CONFIG_IPV6_MROUTE is not set CONFIG_NETWORK_SECMARK=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set CONFIG_NETFILTER_ADVANCED=y @@ -596,7 +589,6 @@ CONFIG_NETFILTER_NETLINK=m CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ACCT=y CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_ZONES=y @@ -618,29 +610,47 @@ CONFIG_NF_CONNTRACK_SLP=m CONFIG_NF_CT_NETLINK=m CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_CT=m CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_HL=m +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_RATEEST=m +# CONFIG_NETFILTER_XT_TARGET_TEE is not set CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +# CONFIG_NETFILTER_XT_MATCH_CPU is not set CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m @@ -648,11 +658,13 @@ CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +# CONFIG_NETFILTER_XT_MATCH_IPVS is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=m CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m @@ -661,7 +673,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m -# CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m @@ -670,7 +681,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y # CONFIG_IP_VS_DEBUG is not set @@ -704,6 +714,8 @@ CONFIG_IP_VS_NQ=m # IPVS application helper # CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +# CONFIG_IP_VS_PE_SIP is not set # # IP: Netfilter Configuration @@ -750,6 +762,7 @@ CONFIG_IP_NF_ARP_MANGLE=m # # IPv6: Netfilter Configuration # +CONFIG_NF_DEFRAG_IPV6=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m @@ -803,7 +816,6 @@ CONFIG_INET_DCCP_DIAG=m # CONFIG_IP_DCCP_CCID2_DEBUG is not set CONFIG_IP_DCCP_CCID3=y # CONFIG_IP_DCCP_CCID3_DEBUG is not set -CONFIG_IP_DCCP_CCID3_RTO=100 CONFIG_IP_DCCP_TFRC_LIB=y # @@ -812,6 +824,7 @@ CONFIG_IP_DCCP_TFRC_LIB=y # CONFIG_IP_DCCP_DEBUG is not set # CONFIG_NET_DCCPPROBE is not set CONFIG_IP_SCTP=m +# CONFIG_NET_SCTPPROBE is not set # CONFIG_SCTP_DBG_MSG is not set # CONFIG_SCTP_DBG_OBJCNT is not set # CONFIG_SCTP_HMAC_NONE is not set @@ -829,6 +842,7 @@ CONFIG_ATM_LANE=m CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m # CONFIG_ATM_BR2684_IPFILTER is not set +# CONFIG_L2TP is not set CONFIG_STP=m CONFIG_GARP=m CONFIG_BRIDGE=m @@ -860,6 +874,7 @@ CONFIG_LAPB=m # CONFIG_ECONET is not set CONFIG_WAN_ROUTER=m CONFIG_PHONET=m +# CONFIG_PHONET_PIPECTRLR is not set CONFIG_IEEE802154=m CONFIG_NET_SCHED=y @@ -915,9 +930,15 @@ CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m +# CONFIG_NET_ACT_CSUM is not set CONFIG_NET_CLS_IND=y CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_RPS=y +CONFIG_XPS=y # # Network testing @@ -953,9 +974,12 @@ CONFIG_CAN_BCM=m # CAN Device Drivers # CONFIG_CAN_VCAN=m +# CONFIG_CAN_SLCAN is not set CONFIG_CAN_DEV=m CONFIG_CAN_CALC_BITTIMING=y CONFIG_CAN_MCP251X=m +CONFIG_CAN_JANZ_ICAN3=m +# CONFIG_PCH_CAN is not set CONFIG_CAN_SJA1000=m CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_EMS_PCI=m @@ -966,6 +990,8 @@ CONFIG_CAN_PLX_PCI=m # CAN USB interfaces # CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB2 is not set +# CONFIG_CAN_SOFTING is not set # CONFIG_CAN_DEBUG_DEVICES is not set CONFIG_IRDA=m @@ -1042,6 +1068,7 @@ CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y +# CONFIG_BT_HCIUART_ATH3K is not set CONFIG_BT_HCIUART_LL=y CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBPA10X=m @@ -1081,9 +1108,9 @@ CONFIG_LIB80211_CRYPT_TKIP=m CONFIG_MAC80211=m CONFIG_MAC80211_HAS_RC=y CONFIG_MAC80211_RC_MINSTREL=y -# CONFIG_MAC80211_RC_DEFAULT_PID is not set +CONFIG_MAC80211_RC_MINSTREL_HT=y CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel" +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" CONFIG_MAC80211_MESH=y CONFIG_MAC80211_LEDS=y CONFIG_MAC80211_DEBUGFS=y @@ -1096,6 +1123,9 @@ CONFIG_RFKILL_INPUT=y CONFIG_NET_9P=m CONFIG_NET_9P_RDMA=m # CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set CONFIG_NETVM=y # @@ -1143,6 +1173,7 @@ CONFIG_NFTL_RW=y CONFIG_INFTL=m CONFIG_RFD_FTL=m CONFIG_SSFDC=m +# CONFIG_SM_FTL is not set CONFIG_MTD_OOPS=m # @@ -1196,6 +1227,7 @@ CONFIG_MTD_SCB2_FLASH=m CONFIG_MTD_NETtel=m CONFIG_MTD_L440GX=m CONFIG_MTD_PCI=m +# CONFIG_MTD_PCMCIA is not set CONFIG_MTD_GPIO_ADDR=m CONFIG_MTD_INTEL_VR_NOR=m CONFIG_MTD_PLATRAM=m @@ -1231,11 +1263,15 @@ CONFIG_MTD_DOCPROBE_ADVANCED=y CONFIG_MTD_DOCPROBE_ADDRESS=0x0000 CONFIG_MTD_DOCPROBE_HIGH=y CONFIG_MTD_DOCPROBE_55AA=y +CONFIG_MTD_NAND_ECC=m +CONFIG_MTD_NAND_ECC_SMC=y CONFIG_MTD_NAND=m CONFIG_MTD_NAND_VERIFY_WRITE=y -CONFIG_MTD_NAND_ECC_SMC=y +# CONFIG_MTD_SM_COMMON is not set CONFIG_MTD_NAND_MUSEUM_IDS=y +# CONFIG_MTD_NAND_DENALI is not set CONFIG_MTD_NAND_IDS=m +# CONFIG_MTD_NAND_RICOH is not set CONFIG_MTD_NAND_DISKONCHIP=m CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0 @@ -1257,10 +1293,6 @@ CONFIG_MTD_ONENAND_SIM=m # CONFIG_MTD_LPDDR=m CONFIG_MTD_QINFO_PROBE=m - -# -# UBI - Unsorted block images -# CONFIG_MTD_UBI=m CONFIG_MTD_UBI_WL_THRESHOLD=4096 CONFIG_MTD_UBI_BEB_RESERVE=1 @@ -1340,10 +1372,12 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_BUFFERS=8 CONFIG_CDROM_PKTCDVD_WCACHE=y CONFIG_ATA_OVER_ETH=m -CONFIG_CIPHER_TWOFISH=m # CONFIG_BLK_DEV_HD is not set +# CONFIG_BLK_DEV_RBD is not set CONFIG_MISC_DEVICES=y CONFIG_AD525X_DPOT=m +# CONFIG_AD525X_DPOT_I2C is not set +# CONFIG_AD525X_DPOT_SPI is not set CONFIG_IBM_ASM=m CONFIG_PHANTOM=m CONFIG_SGI_IOC4=m @@ -1355,10 +1389,18 @@ CONFIG_CS5535_MFGPT=m CONFIG_CS5535_MFGPT_DEFAULT_IRQ=7 CONFIG_CS5535_CLOCK_EVENT_SRC=m CONFIG_HP_ILO=m +CONFIG_APDS9802ALS=m # CONFIG_ISL29003 is not set +CONFIG_ISL29020=m CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1780=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_HMC6352=m CONFIG_DS1682=m CONFIG_TI_DAC7512=m +CONFIG_BMP085=m +CONFIG_PCH_PHUB=m CONFIG_C2PORT=m CONFIG_C2PORT_DURAMAR_2150=m @@ -1376,6 +1418,11 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y CONFIG_IWMC3200TOP=m # CONFIG_IWMC3200TOP_DEBUG is not set # CONFIG_IWMC3200TOP_DEBUGFS is not set + +# +# Texas Instruments shared transport line discipline +# +CONFIG_TI_ST=m CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -1423,7 +1470,9 @@ CONFIG_SCSI_SRP_ATTRS=m CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_BE2ISCSI=m CONFIG_BLK_DEV_3W_XXXX_RAID=m @@ -1503,6 +1552,7 @@ CONFIG_SCSI_PM8001=m CONFIG_SCSI_SRP=m CONFIG_SCSI_BFA_FC=m CONFIG_SCSI_LOWLEVEL_PCMCIA=y +CONFIG_PCMCIA_AHA152X=m CONFIG_PCMCIA_FDOMAIN=m CONFIG_PCMCIA_QLOGIC=m CONFIG_PCMCIA_SYM53C500=m @@ -1520,69 +1570,97 @@ CONFIG_ATA=m CONFIG_ATA_VERBOSE_ERROR=y CONFIG_ATA_ACPI=y CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# CONFIG_SATA_AHCI=m +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_INIC162X=m +CONFIG_SATA_ACARD_AHCI=m CONFIG_SATA_SIL24=m CONFIG_ATA_SFF=y -CONFIG_SATA_SVW=m + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_SX4=m +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# CONFIG_ATA_PIIX=m CONFIG_SATA_MV=m CONFIG_SATA_NV=m -CONFIG_PDC_ADMA=m -CONFIG_SATA_QSTOR=m CONFIG_SATA_PROMISE=m -CONFIG_SATA_SX4=m CONFIG_SATA_SIL=m CONFIG_SATA_SIS=m +CONFIG_SATA_SVW=m CONFIG_SATA_ULI=m CONFIG_SATA_VIA=m CONFIG_SATA_VITESSE=m -CONFIG_SATA_INIC162X=m -CONFIG_PATA_ACPI=m + +# +# PATA SFF controllers with BMDMA +# CONFIG_PATA_ALI=m CONFIG_PATA_AMD=m CONFIG_PATA_ARTOP=m -CONFIG_PATA_ATP867X=m CONFIG_PATA_ATIIXP=m -CONFIG_PATA_CMD640_PCI=m +CONFIG_PATA_ATP867X=m CONFIG_PATA_CMD64X=m CONFIG_PATA_CS5520=m CONFIG_PATA_CS5530=m +CONFIG_PATA_CS5536=m CONFIG_PATA_CYPRESS=m CONFIG_PATA_EFAR=m -CONFIG_ATA_GENERIC=m CONFIG_PATA_HPT366=m CONFIG_PATA_HPT37X=m CONFIG_PATA_HPT3X2N=m CONFIG_PATA_HPT3X3=m # CONFIG_PATA_HPT3X3_DMA is not set -CONFIG_PATA_IT821X=m CONFIG_PATA_IT8213=m +CONFIG_PATA_IT821X=m CONFIG_PATA_JMICRON=m -# CONFIG_PATA_LEGACY is not set -CONFIG_PATA_TRIFLEX=m CONFIG_PATA_MARVELL=m -CONFIG_PATA_MPIIX=m -CONFIG_PATA_OLDPIIX=m CONFIG_PATA_NETCELL=m CONFIG_PATA_NINJA32=m -CONFIG_PATA_NS87410=m CONFIG_PATA_NS87415=m -CONFIG_PATA_OPTI=m +CONFIG_PATA_OLDPIIX=m CONFIG_PATA_OPTIDMA=m -CONFIG_PATA_PCMCIA=m CONFIG_PATA_PDC2027X=m CONFIG_PATA_PDC_OLD=m CONFIG_PATA_RADISYS=m CONFIG_PATA_RDC=m -CONFIG_PATA_RZ1000=m CONFIG_PATA_SC1200=m +CONFIG_PATA_SCH=m CONFIG_PATA_SERVERWORKS=m CONFIG_PATA_SIL680=m CONFIG_PATA_SIS=m CONFIG_PATA_TOSHIBA=m +CONFIG_PATA_TRIFLEX=m CONFIG_PATA_VIA=m CONFIG_PATA_WINBOND=m -CONFIG_PATA_SCH=m + +# +# PIO-only SFF controllers +# +CONFIG_PATA_CMD640_PCI=m +CONFIG_PATA_MPIIX=m +CONFIG_PATA_NS87410=m +CONFIG_PATA_OPTI=m +CONFIG_PATA_PCMCIA=m +CONFIG_PATA_RZ1000=m + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=m +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_AUTODETECT=y @@ -1592,16 +1670,14 @@ CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m # CONFIG_MULTICORE_RAID456 is not set -CONFIG_MD_RAID6_PQ=m -CONFIG_ASYNC_RAID6_TEST=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=y # CONFIG_DM_DEBUG is not set CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_RAID=m CONFIG_DM_MIRROR=m +CONFIG_DM_RAID=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m @@ -1610,6 +1686,7 @@ CONFIG_DM_MULTIPATH_ST=m CONFIG_DM_DELAY=m CONFIG_DM_RAID45=m CONFIG_DM_UEVENT=y +# CONFIG_TARGET_CORE is not set CONFIG_FUSION=y CONFIG_FUSION_SPI=m CONFIG_FUSION_FC=m @@ -1622,30 +1699,12 @@ CONFIG_FUSION_LAN=m # # IEEE 1394 (FireWire) support # - -# -# You can enable one or both FireWire driver stacks. -# - -# -# The newer stack is recommended. -# CONFIG_FIREWIRE=m CONFIG_FIREWIRE_OHCI=m CONFIG_FIREWIRE_OHCI_DEBUG=y CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m -CONFIG_IEEE1394=m -CONFIG_IEEE1394_OHCI1394=m -CONFIG_IEEE1394_PCILYNX=m -CONFIG_IEEE1394_SBP2=m -# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set -CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y -CONFIG_IEEE1394_ETH1394=m -CONFIG_IEEE1394_RAWIO=m -CONFIG_IEEE1394_VIDEO1394=m -CONFIG_IEEE1394_DV1394=m -# CONFIG_IEEE1394_VERBOSEDEBUG is not set +# CONFIG_FIREWIRE_NOSY is not set CONFIG_I2O=m CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y CONFIG_I2O_EXT_ADAPTEC=y @@ -1676,6 +1735,7 @@ CONFIG_ARCNET_COM90xx=m CONFIG_ARCNET_COM90xxIO=m CONFIG_ARCNET_RIM_I=m # CONFIG_ARCNET_COM20020 is not set +CONFIG_MII=y CONFIG_PHYLIB=y # @@ -1689,6 +1749,7 @@ CONFIG_CICADA_PHY=m CONFIG_VITESSE_PHY=m CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m +CONFIG_BCM63XX_PHY=m CONFIG_ICPLUS_PHY=m CONFIG_REALTEK_PHY=m CONFIG_NATIONAL_PHY=m @@ -1699,7 +1760,6 @@ CONFIG_FIXED_PHY=y CONFIG_MDIO_BITBANG=m CONFIG_MDIO_GPIO=m CONFIG_NET_ETHERNET=y -CONFIG_MII=y CONFIG_HAPPYMEAL=m CONFIG_SUNGEM=m CONFIG_CASSINI=m @@ -1741,7 +1801,6 @@ CONFIG_B44_PCI_AUTOSELECT=y CONFIG_B44_PCICORE_AUTOSELECT=y CONFIG_B44_PCI=y CONFIG_FORCEDETH=m -CONFIG_FORCEDETH_NAPI=y CONFIG_E100=m CONFIG_FEALNX=m CONFIG_NATSEMI=m @@ -1778,7 +1837,6 @@ CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IP1000=m CONFIG_IGB=m -CONFIG_IGB_DCA=y CONFIG_IGBVF=m CONFIG_NS83820=m CONFIG_HAMACHI=m @@ -1799,6 +1857,10 @@ CONFIG_ATL1=m CONFIG_ATL1E=m CONFIG_ATL1C=m CONFIG_JME=m +CONFIG_STMMAC_ETH=m +# CONFIG_STMMAC_DA is not set +# CONFIG_STMMAC_DUAL_MAC is not set +CONFIG_PCH_GBE=m CONFIG_NETDEV_10000=y CONFIG_MDIO=m CONFIG_CHELSIO_T1=m @@ -1807,9 +1869,10 @@ CONFIG_CHELSIO_T3_DEPENDS=y CONFIG_CHELSIO_T3=m CONFIG_CHELSIO_T4_DEPENDS=y CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF_DEPENDS=y +CONFIG_CHELSIO_T4VF=m CONFIG_ENIC=m CONFIG_IXGBE=m -CONFIG_IXGBE_DCA=y CONFIG_IXGBE_DCB=y CONFIG_IXGBEVF=m CONFIG_IXGB=m @@ -1817,7 +1880,6 @@ CONFIG_S2IO=m CONFIG_VXGE=m # CONFIG_VXGE_DEBUG_TRACE_ALL is not set CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y CONFIG_NETXEN_NIC=m CONFIG_NIU=m CONFIG_MLX4_EN=m @@ -1827,6 +1889,7 @@ CONFIG_TEHUTI=m CONFIG_BNX2X=m CONFIG_QLCNIC=m CONFIG_QLGE=m +CONFIG_BNA=m CONFIG_SFC=m CONFIG_SFC_MTD=y CONFIG_BE2NET=m @@ -1839,6 +1902,7 @@ CONFIG_ABYSS=m CONFIG_WLAN=y CONFIG_PCMCIA_RAYCS=m CONFIG_LIBERTAS_THINFIRM=m +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set CONFIG_LIBERTAS_THINFIRM_USB=m CONFIG_AIRO=m CONFIG_ATMEL=m @@ -1860,18 +1924,27 @@ CONFIG_ATH_COMMON=m # CONFIG_ATH_DEBUG is not set CONFIG_ATH5K=m # CONFIG_ATH5K_DEBUG is not set +CONFIG_ATH5K_PCI=y CONFIG_ATH9K_HW=m CONFIG_ATH9K_COMMON=m CONFIG_ATH9K=m # CONFIG_ATH9K_DEBUGFS is not set +CONFIG_ATH9K_RATE_CONTROL=y +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set CONFIG_AR9170_USB=m CONFIG_AR9170_LEDS=y +CONFIG_CARL9170=m +CONFIG_CARL9170_LEDS=y +# CONFIG_CARL9170_DEBUGFS is not set +CONFIG_CARL9170_WPC=y CONFIG_B43=m CONFIG_B43_PCI_AUTOSELECT=y CONFIG_B43_PCICORE_AUTOSELECT=y CONFIG_B43_PCMCIA=y CONFIG_B43_SDIO=y CONFIG_B43_PIO=y +CONFIG_B43_PHY_N=y CONFIG_B43_PHY_LP=y CONFIG_B43_LEDS=y CONFIG_B43_HWRNG=y @@ -1905,8 +1978,13 @@ CONFIG_IPW2200_DEBUG=y CONFIG_LIBIPW=m CONFIG_LIBIPW_DEBUG=y CONFIG_IWLWIFI=m + +# +# Debugging Options +# CONFIG_IWLWIFI_DEBUG=y CONFIG_IWLWIFI_DEBUGFS=y +# CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE is not set # CONFIG_IWLWIFI_DEVICE_TRACING is not set CONFIG_IWLAGN=m CONFIG_IWL4965=y @@ -1914,6 +1992,7 @@ CONFIG_IWL5000=y CONFIG_IWL3945=m CONFIG_IWM=m # CONFIG_IWM_DEBUG is not set +# CONFIG_IWM_TRACING is not set CONFIG_LIBERTAS=m CONFIG_LIBERTAS_USB=m CONFIG_LIBERTAS_CS=m @@ -1922,28 +2001,29 @@ CONFIG_LIBERTAS_SPI=m # CONFIG_LIBERTAS_DEBUG is not set CONFIG_LIBERTAS_MESH=y CONFIG_HERMES=m +# CONFIG_HERMES_PRISM is not set CONFIG_HERMES_CACHE_FW_ON_INIT=y CONFIG_PLX_HERMES=m CONFIG_TMD_HERMES=m CONFIG_NORTEL_HERMES=m -CONFIG_PCI_HERMES=m CONFIG_PCMCIA_HERMES=m CONFIG_PCMCIA_SPECTRUM=m +CONFIG_ORINOCO_USB=m CONFIG_P54_COMMON=m CONFIG_P54_USB=m CONFIG_P54_PCI=m CONFIG_P54_SPI=m +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set CONFIG_P54_LEDS=y CONFIG_RT2X00=m CONFIG_RT2400PCI=m CONFIG_RT2500PCI=m CONFIG_RT61PCI=m -CONFIG_RT2800PCI_PCI=y # CONFIG_RT2800PCI is not set CONFIG_RT2500USB=m CONFIG_RT73USB=m CONFIG_RT2800USB=m -# CONFIG_RT2800USB_RT30XX is not set +CONFIG_RT2800USB_RT33XX=y # CONFIG_RT2800USB_RT35XX is not set # CONFIG_RT2800USB_UNKNOWN is not set CONFIG_RT2800_LIB=m @@ -1956,11 +2036,18 @@ CONFIG_RT2X00_LIB_CRYPTO=y CONFIG_RT2X00_LIB_LEDS=y # CONFIG_RT2X00_LIB_DEBUGFS is not set # CONFIG_RT2X00_DEBUG is not set -CONFIG_WL12XX=m +CONFIG_RTL8192CE=m +CONFIG_RTLWIFI=m CONFIG_WL1251=m CONFIG_WL1251_SPI=m CONFIG_WL1251_SDIO=m -CONFIG_WL1271=m +CONFIG_WL12XX_MENU=m +CONFIG_WL12XX=m +# CONFIG_WL12XX_HT is not set +# CONFIG_WL12XX_SPI is not set +# CONFIG_WL12XX_SDIO is not set +# CONFIG_WL12XX_SDIO_TEST is not set +CONFIG_WL12XX_PLATFORM_DATA=y CONFIG_ZD1211RW=m # CONFIG_ZD1211RW_DEBUG is not set @@ -1984,6 +2071,7 @@ CONFIG_USB_USBNET=m CONFIG_USB_NET_AX8817X=m CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m CONFIG_USB_NET_DM9601=m CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m @@ -2000,6 +2088,7 @@ CONFIG_USB_ARMLINUX=y CONFIG_USB_EPSON2888=y CONFIG_USB_KC2190=y CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m CONFIG_USB_HSO=m CONFIG_USB_NET_INT51X1=m CONFIG_USB_CDC_PHONET=m @@ -2056,6 +2145,9 @@ CONFIG_ATM_ENI_BURST_RX_2W=y CONFIG_ATM_FIRESTREAM=m CONFIG_ATM_ZATM=m # CONFIG_ATM_ZATM_DEBUG is not set +CONFIG_ATM_NICSTAR=m +# CONFIG_ATM_NICSTAR_USE_SUNI is not set +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set CONFIG_ATM_IDT77252=m # CONFIG_ATM_IDT77252_DEBUG is not set # CONFIG_ATM_IDT77252_RCV_ALL is not set @@ -2075,6 +2167,10 @@ CONFIG_ATM_HE_USE_SUNI=y CONFIG_ATM_SOLOS=m CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKEHARD=m + +# +# CAIF transport drivers +# CONFIG_FDDI=m CONFIG_DEFXX=m CONFIG_DEFXX_MMIO=y @@ -2093,7 +2189,6 @@ CONFIG_PPP_BSDCOMP=m CONFIG_PPP_MPPE=m CONFIG_PPPOE=m CONFIG_PPPOATM=m -CONFIG_PPPOL2TP=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLHC=m @@ -2267,12 +2362,15 @@ CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYBOARD=y CONFIG_KEYBOARD_ADP5588=m CONFIG_KEYBOARD_ATKBD=y -CONFIG_QT2160=m +CONFIG_KEYBOARD_QT2160=m # CONFIG_KEYBOARD_LKKBD is not set CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +CONFIG_KEYBOARD_TCA6416=m CONFIG_KEYBOARD_MATRIX=m CONFIG_KEYBOARD_LM8323=m CONFIG_KEYBOARD_MAX7359=m +CONFIG_KEYBOARD_MCS=m CONFIG_KEYBOARD_NEWTON=m CONFIG_KEYBOARD_OPENCORES=m # CONFIG_KEYBOARD_STOWAWAY is not set @@ -2320,6 +2418,7 @@ CONFIG_JOYSTICK_ZHENHUA=m CONFIG_JOYSTICK_DB9=m CONFIG_JOYSTICK_GAMECON=m CONFIG_JOYSTICK_TURBOGRAFX=m +CONFIG_JOYSTICK_AS5011=m CONFIG_JOYSTICK_JOYDUMP=m CONFIG_JOYSTICK_XPAD=m CONFIG_JOYSTICK_XPAD_FF=y @@ -2329,14 +2428,19 @@ CONFIG_INPUT_TABLET=y CONFIG_TABLET_USB_ACECAD=m CONFIG_TABLET_USB_AIPTEK=m CONFIG_TABLET_USB_GTCO=m +CONFIG_TABLET_USB_HANWANG=m CONFIG_TABLET_USB_KBTAB=m CONFIG_TABLET_USB_WACOM=m CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ADS7846=m CONFIG_TOUCHSCREEN_AD7877=m -CONFIG_TOUCHSCREEN_AD7879_I2C=m CONFIG_TOUCHSCREEN_AD7879=m +CONFIG_TOUCHSCREEN_AD7879_I2C=m +CONFIG_TOUCHSCREEN_AD7879_SPI=m +CONFIG_TOUCHSCREEN_BU21013=m +CONFIG_TOUCHSCREEN_CY8CTMG110=m CONFIG_TOUCHSCREEN_DYNAPRO=m +CONFIG_TOUCHSCREEN_HAMPSHIRE=m CONFIG_TOUCHSCREEN_EETI=m CONFIG_TOUCHSCREEN_FUJITSU=m CONFIG_TOUCHSCREEN_GUNZE=m @@ -2348,6 +2452,7 @@ CONFIG_TOUCHSCREEN_MTOUCH=m CONFIG_TOUCHSCREEN_INEXIO=m CONFIG_TOUCHSCREEN_MK712=m CONFIG_TOUCHSCREEN_PENMOUNT=m +CONFIG_TOUCHSCREEN_QT602240=m CONFIG_TOUCHSCREEN_TOUCHRIGHT=m CONFIG_TOUCHSCREEN_TOUCHWIN=m CONFIG_TOUCHSCREEN_UCB1400=m @@ -2371,12 +2476,17 @@ CONFIG_TOUCHSCREEN_USB_GOTOP=y CONFIG_TOUCHSCREEN_USB_JASTEC=y CONFIG_TOUCHSCREEN_USB_E2I=y CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y -CONFIG_TOUCHSCREEN_USB_ETT_TC5UH=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y CONFIG_TOUCHSCREEN_USB_NEXIO=y CONFIG_TOUCHSCREEN_TOUCHIT213=m CONFIG_TOUCHSCREEN_TSC2007=m CONFIG_TOUCHSCREEN_PCAP=m +CONFIG_TOUCHSCREEN_ST1232=m +CONFIG_TOUCHSCREEN_TPS6507X=m CONFIG_INPUT_MISC=y +CONFIG_INPUT_AD714X=m +CONFIG_INPUT_AD714X_I2C=m +CONFIG_INPUT_AD714X_SPI=m CONFIG_INPUT_PCSPKR=m CONFIG_INPUT_APANEL=m CONFIG_INPUT_ATLAS_BTNS=m @@ -2387,9 +2497,14 @@ CONFIG_INPUT_POWERMATE=m CONFIG_INPUT_YEALINK=m CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m -CONFIG_INPUT_WINBOND_CIR=m +CONFIG_INPUT_PCF8574=m CONFIG_INPUT_GPIO_ROTARY_ENCODER=m CONFIG_INPUT_PCAP=m +CONFIG_INPUT_ADXL34X=m +CONFIG_INPUT_ADXL34X_I2C=m +CONFIG_INPUT_ADXL34X_SPI=m +CONFIG_INPUT_CMA3000=m +CONFIG_INPUT_CMA3000_I2C=m # # Hardware I/O ports @@ -2403,6 +2518,7 @@ CONFIG_SERIO_PCIPS2=m CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_PS2MULT=m CONFIG_GAMEPORT=m CONFIG_GAMEPORT_NS558=m CONFIG_GAMEPORT_L4=m @@ -2431,6 +2547,7 @@ CONFIG_SYNCLINK=m CONFIG_SYNCLINKMP=m CONFIG_SYNCLINK_GT=m CONFIG_N_HDLC=m +CONFIG_N_GSM=m CONFIG_RISCOM8=m CONFIG_SPECIALIX=m CONFIG_STALDRV=y @@ -2454,10 +2571,19 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=8 # Non-8250 serial port support # # CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +# CONFIG_SERIAL_MRST_MAX3110 is not set +# CONFIG_SERIAL_MFD_HSU is not set CONFIG_SERIAL_UARTLITE=m CONFIG_SERIAL_CORE=m CONFIG_SERIAL_JSM=m CONFIG_SERIAL_TIMBERDALE=m +CONFIG_SERIAL_ALTERA_JTAGUART=m +CONFIG_SERIAL_ALTERA_UART=m +CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4 +CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200 +CONFIG_SERIAL_IFX6X60=m +CONFIG_SERIAL_PCH_UART=m CONFIG_UNIX98_PTYS=y CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_LEGACY_PTYS=y @@ -2489,8 +2615,6 @@ CONFIG_CARDMAN_4000=m CONFIG_CARDMAN_4040=m CONFIG_IPWIRELESS=m CONFIG_MWAVE=m -CONFIG_PC8736x_GPIO=m -CONFIG_NSC_GPIO=m CONFIG_RAW_DRIVER=m CONFIG_MAX_RAW_DEVS=4096 CONFIG_HANGCHECK_TIMER=m @@ -2502,11 +2626,20 @@ CONFIG_TCG_INFINEON=m CONFIG_TCG_XEN=m CONFIG_TELCLOCK=m CONFIG_DEVPORT=y +CONFIG_RAMOOPS=y CONFIG_CRASHER=m CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_MUX_GPIO=m +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_SMBUS=m CONFIG_I2C_ALGOBIT=m @@ -2545,9 +2678,12 @@ CONFIG_I2C_SCMI=m # I2C system bus drivers (mostly embedded / system-on-chip) # CONFIG_I2C_GPIO=m +CONFIG_I2C_INTEL_MID=m CONFIG_I2C_OCORES=m +CONFIG_I2C_PCA_PLATFORM=m # CONFIG_I2C_SIMTEC is not set # CONFIG_I2C_XILINX is not set +CONFIG_I2C_EG20T=m # # External I2C/SMBus adapter drivers @@ -2560,7 +2696,6 @@ CONFIG_I2C_TINY_USB=m # # Other I2C/SMBus bus drivers # -CONFIG_I2C_PCA_PLATFORM=m CONFIG_I2C_STUB=m # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -2576,10 +2711,12 @@ CONFIG_SPI_BITBANG=m CONFIG_SPI_BUTTERFLY=m CONFIG_SPI_GPIO=m CONFIG_SPI_LM70_LLP=m +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_TOPCLIFF_PCH=m CONFIG_SPI_XILINX=m -CONFIG_SPI_XILINX_PLTFM=m CONFIG_SPI_DESIGNWARE=y CONFIG_SPI_DW_PCI=m +# CONFIG_SPI_DW_MID_DMA is not set # # SPI Protocol Masters @@ -2598,6 +2735,11 @@ CONFIG_PPS=m # # CONFIG_PPS_CLIENT_KTIMER is not set CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m + +# +# PPS generators support +# CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -2607,8 +2749,10 @@ CONFIG_GPIO_MAX730X=m # # Memory mapped GPIO expanders: # +CONFIG_GPIO_BASIC_MMIO=m CONFIG_GPIO_IT8761E=m CONFIG_GPIO_SCH=m +CONFIG_GPIO_VX855=m # # I2C GPIO expanders: @@ -2617,7 +2761,6 @@ CONFIG_GPIO_MAX7300=m CONFIG_GPIO_MAX732X=m CONFIG_GPIO_PCA953X=m CONFIG_GPIO_PCF857X=m -CONFIG_GPIO_WM8994=m CONFIG_GPIO_ADP5588=m # @@ -2625,7 +2768,10 @@ CONFIG_GPIO_ADP5588=m # CONFIG_GPIO_CS5535=m CONFIG_GPIO_LANGWELL=y +CONFIG_GPIO_PCH=m +CONFIG_GPIO_ML_IOH=m # CONFIG_GPIO_TIMBERDALE is not set +CONFIG_GPIO_RDC321X=m # # SPI GPIO expanders: @@ -2633,11 +2779,17 @@ CONFIG_GPIO_LANGWELL=y CONFIG_GPIO_MAX7301=m CONFIG_GPIO_MCP23S08=m CONFIG_GPIO_MC33880=m +CONFIG_GPIO_74X164=m # # AC97 GPIO expanders: # CONFIG_GPIO_UCB1400=y + +# +# MODULbus GPIO expanders: +# +CONFIG_GPIO_JANZ_TTL=m CONFIG_W1=m CONFIG_W1_CON=y @@ -2654,6 +2806,7 @@ CONFIG_W1_MASTER_GPIO=m # CONFIG_W1_SLAVE_THERM=m CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_DS2423=m CONFIG_W1_SLAVE_DS2431=m CONFIG_W1_SLAVE_DS2433=m CONFIG_W1_SLAVE_DS2433_CRC=y @@ -2662,10 +2815,14 @@ CONFIG_W1_SLAVE_BQ27000=m CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_PDA_POWER=m +# CONFIG_TEST_POWER is not set CONFIG_BATTERY_DS2760=m CONFIG_BATTERY_DS2782=m +CONFIG_BATTERY_BQ20Z75=m CONFIG_BATTERY_BQ27x00=m CONFIG_BATTERY_MAX17040=m +CONFIG_BATTERY_MAX17042=m +CONFIG_CHARGER_GPIO=m CONFIG_HWMON=m CONFIG_HWMON_VID=m # CONFIG_HWMON_DEBUG_CHIP is not set @@ -2693,6 +2850,7 @@ CONFIG_SENSORS_K8TEMP=m CONFIG_SENSORS_K10TEMP=m CONFIG_SENSORS_ASB100=m CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m CONFIG_SENSORS_DS1621=m CONFIG_SENSORS_I5K_AMB=m CONFIG_SENSORS_F71805F=m @@ -2702,10 +2860,13 @@ CONFIG_SENSORS_FSCHMD=m CONFIG_SENSORS_G760A=m CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_GPIO_FAN=m CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_PKGTEMP=m CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m CONFIG_SENSORS_LM63=m CONFIG_SENSORS_LM70=m CONFIG_SENSORS_LM73=m @@ -2721,6 +2882,7 @@ CONFIG_SENSORS_LM92=m CONFIG_SENSORS_LM93=m CONFIG_SENSORS_LTC4215=m CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m CONFIG_SENSORS_LM95241=m CONFIG_SENSORS_MAX1111=m CONFIG_SENSORS_MAX1619=m @@ -2729,14 +2891,20 @@ CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m CONFIG_SENSORS_PCF8591=m CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_SMM665=m CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC2103=m CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m CONFIG_SENSORS_TMP401=m CONFIG_SENSORS_TMP421=m CONFIG_SENSORS_VIA_CPUTEMP=m @@ -2747,11 +2915,12 @@ CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83791D=m CONFIG_SENSORS_W83792D=m CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83795_FANCTRL=y CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m -CONFIG_SENSORS_HDAPS=m CONFIG_SENSORS_LIS3_I2C=m CONFIG_SENSORS_APPLESMC=m CONFIG_SENSORS_MC13783_ADC=m @@ -2774,6 +2943,8 @@ CONFIG_ACQUIRE_WDT=m CONFIG_ADVANTECH_WDT=m CONFIG_ALIM1535_WDT=m CONFIG_ALIM7101_WDT=m +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m CONFIG_GEODE_WDT=m CONFIG_SC520_WDT=m CONFIG_SBC_FITPC2_WATCHDOG=m @@ -2787,8 +2958,10 @@ CONFIG_ITCO_VENDOR_SUPPORT=y CONFIG_IT8712F_WDT=m CONFIG_IT87_WDT=m CONFIG_HP_WATCHDOG=m +# CONFIG_HPWDT_NMI_DECODING is not set CONFIG_SC1200_WDT=m CONFIG_PC87413_WDT=m +CONFIG_NV_TCO=m CONFIG_60XX_WDT=m CONFIG_SBC8360_WDT=m CONFIG_CPU5_WDT=m @@ -2801,6 +2974,7 @@ CONFIG_W83877F_WDT=m CONFIG_W83977F_WDT=m CONFIG_MACHZ_WDT=m CONFIG_SBC_EPX_C3_WATCHDOG=m +CONFIG_XEN_WDT=m # # PCI-based Watchdog Cards @@ -2830,25 +3004,29 @@ CONFIG_SSB_SDIOHOST=y # CONFIG_SSB_DEBUG is not set CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y CONFIG_SSB_DRIVER_PCICORE=y - -# -# Multifunction device drivers -# +CONFIG_MFD_SUPPORT=y CONFIG_MFD_CORE=m CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y CONFIG_HTC_PASIC3=m CONFIG_UCB1400_CORE=m CONFIG_TPS65010=m +CONFIG_TPS6507X=m # CONFIG_MFD_TMIO is not set CONFIG_MFD_WM8400=m -CONFIG_MFD_WM8994=m +# CONFIG_MFD_WM831X_SPI is not set # CONFIG_MFD_PCF50633 is not set CONFIG_MFD_MC13783=m +CONFIG_MFD_MC13XXX=m +# CONFIG_ABX500_CORE is not set CONFIG_EZX_PCAP=y -CONFIG_AB4500_CORE=m +CONFIG_MFD_CS5535=m CONFIG_MFD_TIMBERDALE=m CONFIG_LPC_SCH=m +CONFIG_MFD_RDC321X=m +CONFIG_MFD_JANZ_CMODIO=m +CONFIG_MFD_VX855=m +CONFIG_MFD_WL1273_CORE=m CONFIG_REGULATOR=y # CONFIG_REGULATOR_DEBUG is not set CONFIG_REGULATOR_DUMMY=y @@ -2859,13 +3037,19 @@ CONFIG_REGULATOR_BQ24022=m CONFIG_REGULATOR_MAX1586=m CONFIG_REGULATOR_MAX8649=m CONFIG_REGULATOR_MAX8660=m +CONFIG_REGULATOR_MAX8952=m CONFIG_REGULATOR_WM8400=m -CONFIG_REGULATOR_WM8994=m CONFIG_REGULATOR_LP3971=m +CONFIG_REGULATOR_LP3972=m CONFIG_REGULATOR_PCAP=m +CONFIG_REGULATOR_MC13XXX_CORE=m CONFIG_REGULATOR_MC13783=m +CONFIG_REGULATOR_MC13892=m CONFIG_REGULATOR_TPS65023=m CONFIG_REGULATOR_TPS6507X=m +CONFIG_REGULATOR_ISL6271A=m +CONFIG_REGULATOR_AD5398=m +CONFIG_REGULATOR_TPS6524X=m CONFIG_MEDIA_SUPPORT=m # @@ -2873,8 +3057,6 @@ CONFIG_MEDIA_SUPPORT=m # CONFIG_VIDEO_DEV=m CONFIG_VIDEO_V4L2_COMMON=m -CONFIG_VIDEO_ALLOW_V4L1=y -CONFIG_VIDEO_V4L1_COMPAT=y CONFIG_DVB_CORE=m CONFIG_VIDEO_MEDIA=m @@ -2883,8 +3065,23 @@ CONFIG_VIDEO_MEDIA=m # CONFIG_VIDEO_SAA7146=m CONFIG_VIDEO_SAA7146_VV=m -CONFIG_IR_CORE=m -CONFIG_VIDEO_IR=m +CONFIG_RC_CORE=m +CONFIG_LIRC=m +CONFIG_RC_MAP=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_RC5_SZ_DECODER=m +CONFIG_IR_LIRC_CODEC=m +# CONFIG_IR_ENE is not set +CONFIG_IR_IMON=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_STREAMZAP=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_RC_LOOPBACK=m CONFIG_MEDIA_ATTACH=y CONFIG_MEDIA_TUNER=m # CONFIG_MEDIA_TUNER_CUSTOMISE is not set @@ -2906,11 +3103,12 @@ CONFIG_MEDIA_TUNER_MXL5005S=m CONFIG_MEDIA_TUNER_MXL5007T=m CONFIG_MEDIA_TUNER_MC44S803=m CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_TDA18218=m CONFIG_VIDEO_V4L2=m -CONFIG_VIDEO_V4L1=m CONFIG_VIDEOBUF_GEN=m CONFIG_VIDEOBUF_DMA_SG=m CONFIG_VIDEOBUF_VMALLOC=m +CONFIG_VIDEOBUF_DMA_CONTIG=m CONFIG_VIDEOBUF_DVB=m CONFIG_VIDEO_BTCX=m CONFIG_VIDEO_TVEEPROM=m @@ -2920,6 +3118,10 @@ CONFIG_VIDEO_CAPTURE_DRIVERS=y # CONFIG_VIDEO_FIXED_MINOR_RANGES is not set CONFIG_VIDEO_HELPER_CHIPS_AUTO=y CONFIG_VIDEO_IR_I2C=m + +# +# Audio decoders +# CONFIG_VIDEO_TVAUDIO=m CONFIG_VIDEO_TDA7432=m CONFIG_VIDEO_TDA9840=m @@ -2932,7 +3134,16 @@ CONFIG_VIDEO_M52790=m CONFIG_VIDEO_WM8775=m CONFIG_VIDEO_WM8739=m CONFIG_VIDEO_VP27SMPX=m + +# +# RDS decoders +# CONFIG_VIDEO_SAA6588=m + +# +# Video decoders +# +CONFIG_VIDEO_ADV7180=m CONFIG_VIDEO_BT819=m CONFIG_VIDEO_BT856=m CONFIG_VIDEO_BT866=m @@ -2944,12 +3155,28 @@ CONFIG_VIDEO_SAA711X=m CONFIG_VIDEO_SAA717X=m CONFIG_VIDEO_TVP5150=m CONFIG_VIDEO_VPX3220=m + +# +# Video and audio decoders +# CONFIG_VIDEO_CX25840=m + +# +# MPEG video encoders +# CONFIG_VIDEO_CX2341X=m + +# +# Video encoders +# CONFIG_VIDEO_SAA7127=m CONFIG_VIDEO_SAA7185=m CONFIG_VIDEO_ADV7170=m CONFIG_VIDEO_ADV7175=m + +# +# Video improvement chips +# CONFIG_VIDEO_UPD64031A=m CONFIG_VIDEO_UPD64083=m CONFIG_VIDEO_VIVI=m @@ -2958,13 +3185,7 @@ CONFIG_VIDEO_BT848_DVB=y CONFIG_VIDEO_BWQCAM=m CONFIG_VIDEO_CQCAM=m CONFIG_VIDEO_W9966=m -CONFIG_VIDEO_CPIA=m -CONFIG_VIDEO_CPIA_PP=m -CONFIG_VIDEO_CPIA_USB=m CONFIG_VIDEO_CPIA2=m -CONFIG_VIDEO_SAA5246A=m -CONFIG_VIDEO_SAA5249=m -CONFIG_VIDEO_STRADIS=m CONFIG_VIDEO_ZORAN=m CONFIG_VIDEO_ZORAN_DC30=m CONFIG_VIDEO_ZORAN_ZR36060=m @@ -2976,10 +3197,12 @@ CONFIG_VIDEO_ZORAN_AVS6EYES=m CONFIG_VIDEO_MEYE=m CONFIG_VIDEO_SAA7134=m CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y CONFIG_VIDEO_SAA7134_DVB=m CONFIG_VIDEO_MXB=m CONFIG_VIDEO_HEXIUM_ORION=m CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_TIMBERDALE=m CONFIG_VIDEO_CX88=m CONFIG_VIDEO_CX88_ALSA=m CONFIG_VIDEO_CX88_BLACKBIRD=m @@ -2994,7 +3217,10 @@ CONFIG_VIDEO_CX18=m CONFIG_VIDEO_CX18_ALSA=m CONFIG_VIDEO_SAA7164=m CONFIG_VIDEO_CAFE_CCIC=m +CONFIG_VIDEO_SR030PC30=m +CONFIG_VIDEO_VIA_CAMERA=m CONFIG_SOC_CAMERA=m +CONFIG_SOC_CAMERA_IMX074=m CONFIG_SOC_CAMERA_MT9M001=m CONFIG_SOC_CAMERA_MT9M111=m CONFIG_SOC_CAMERA_MT9T031=m @@ -3003,6 +3229,8 @@ CONFIG_SOC_CAMERA_MT9V022=m CONFIG_SOC_CAMERA_RJ54N1=m CONFIG_SOC_CAMERA_TW9910=m CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_SOC_CAMERA_OV2640=m +CONFIG_SOC_CAMERA_OV6650=m CONFIG_SOC_CAMERA_OV772X=m CONFIG_SOC_CAMERA_OV9640=m CONFIG_V4L_USB_DRIVERS=y @@ -3018,6 +3246,7 @@ CONFIG_USB_GSPCA_CPIA1=m CONFIG_USB_GSPCA_ETOMS=m CONFIG_USB_GSPCA_FINEPIX=m CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_KONICA=m CONFIG_USB_GSPCA_MARS=m CONFIG_USB_GSPCA_MR97310A=m CONFIG_USB_GSPCA_OV519=m @@ -3028,7 +3257,6 @@ CONFIG_USB_GSPCA_PAC7302=m CONFIG_USB_GSPCA_PAC7311=m CONFIG_USB_GSPCA_SN9C2028=m CONFIG_USB_GSPCA_SN9C20X=m -CONFIG_USB_GSPCA_SN9C20X_EVDEV=y CONFIG_USB_GSPCA_SONIXB=m CONFIG_USB_GSPCA_SONIXJ=m CONFIG_USB_GSPCA_SPCA500=m @@ -3037,14 +3265,17 @@ CONFIG_USB_GSPCA_SPCA505=m CONFIG_USB_GSPCA_SPCA506=m CONFIG_USB_GSPCA_SPCA508=m CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SPCA1528=m CONFIG_USB_GSPCA_SQ905=m CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m CONFIG_USB_GSPCA_STK014=m CONFIG_USB_GSPCA_STV0680=m CONFIG_USB_GSPCA_SUNPLUS=m CONFIG_USB_GSPCA_T613=m CONFIG_USB_GSPCA_TV8532=m CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m CONFIG_USB_GSPCA_ZC3XX=m CONFIG_VIDEO_PVRUSB2=m CONFIG_VIDEO_PVRUSB2_SYSFS=y @@ -3056,30 +3287,20 @@ CONFIG_VIDEO_EM28XX_ALSA=m CONFIG_VIDEO_EM28XX_DVB=m CONFIG_VIDEO_TLG2300=m CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_RC=y CONFIG_VIDEO_CX231XX_ALSA=m CONFIG_VIDEO_CX231XX_DVB=m CONFIG_VIDEO_USBVISION=m -CONFIG_VIDEO_USBVIDEO=m -CONFIG_USB_VICAM=m -CONFIG_USB_IBMCAM=m -CONFIG_USB_KONICAWC=m -CONFIG_USB_QUICKCAM_MESSENGER=m CONFIG_USB_ET61X251=m -CONFIG_VIDEO_OVCAMCHIP=m -CONFIG_USB_W9968CF=m -CONFIG_USB_OV511=m -CONFIG_USB_SE401=m CONFIG_USB_SN9C102=m -CONFIG_USB_STV680=m -CONFIG_USB_ZC0301=m CONFIG_USB_PWC=m # CONFIG_USB_PWC_DEBUG is not set CONFIG_USB_PWC_INPUT_EVDEV=y CONFIG_USB_ZR364XX=m CONFIG_USB_STKWEBCAM=m CONFIG_USB_S2255=m +# CONFIG_V4L_MEM2MEM_DRIVERS is not set CONFIG_RADIO_ADAPTERS=y -CONFIG_RADIO_GEMTEK_PCI=m CONFIG_RADIO_MAXIRADIO=m CONFIG_RADIO_MAESTRO=m CONFIG_I2C_SI4713=m @@ -3093,6 +3314,7 @@ CONFIG_RADIO_TEA5764=m CONFIG_RADIO_SAA7706H=m CONFIG_RADIO_TEF6862=m CONFIG_RADIO_TIMBERDALE=m +CONFIG_RADIO_WL1273=m CONFIG_DVB_MAX_ADAPTERS=8 CONFIG_DVB_DYNAMIC_MINORS=y CONFIG_DVB_CAPTURE_DRIVERS=y @@ -3143,6 +3365,7 @@ CONFIG_DVB_USB_CE6230=m CONFIG_DVB_USB_FRIIO=m CONFIG_DVB_USB_EC168=m CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_LME2510=m CONFIG_DVB_TTUSB_BUDGET=m CONFIG_DVB_TTUSB_DEC=m CONFIG_SMS_SIANO_MDTV=m @@ -3175,13 +3398,9 @@ CONFIG_DVB_PLUTO2=m # Supported SDMC DM1105 Adapters # CONFIG_DVB_DM1105=m - -# -# Supported FireWire (IEEE 1394) Adapters -# CONFIG_DVB_FIREDTV=m CONFIG_DVB_FIREDTV_FIREWIRE=y -CONFIG_DVB_FIREDTV_IEEE1394=y +# CONFIG_DVB_FIREDTV_IEEE1394 is not set CONFIG_DVB_FIREDTV_INPUT=y # @@ -3205,10 +3424,18 @@ CONFIG_DVB_NGENE=m # Supported DVB Frontends # # CONFIG_DVB_FE_CUSTOMISE is not set + +# +# Multistandard (satellite) frontends +# CONFIG_DVB_STB0899=m CONFIG_DVB_STB6100=m CONFIG_DVB_STV090x=m CONFIG_DVB_STV6110x=m + +# +# DVB-S (satellite) frontends +# CONFIG_DVB_CX24110=m CONFIG_DVB_CX24123=m CONFIG_DVB_MT312=m @@ -3232,6 +3459,10 @@ CONFIG_DVB_CX24116=m CONFIG_DVB_SI21XX=m CONFIG_DVB_DS3000=m CONFIG_DVB_MB86A16=m + +# +# DVB-T (terrestrial) frontends +# CONFIG_DVB_SP8870=m CONFIG_DVB_SP887X=m CONFIG_DVB_CX22700=m @@ -3248,10 +3479,18 @@ CONFIG_DVB_DIB7000P=m CONFIG_DVB_TDA10048=m CONFIG_DVB_AF9013=m CONFIG_DVB_EC100=m + +# +# DVB-C (cable) frontends +# CONFIG_DVB_VES1820=m CONFIG_DVB_TDA10021=m CONFIG_DVB_TDA10023=m CONFIG_DVB_STV0297=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# CONFIG_DVB_NXT200X=m CONFIG_DVB_OR51211=m CONFIG_DVB_OR51132=m @@ -3261,16 +3500,37 @@ CONFIG_DVB_LGDT3305=m CONFIG_DVB_S5H1409=m CONFIG_DVB_AU8522=m CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_S921=m CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m + +# +# Digital terrestrial only tuners/PLL +# CONFIG_DVB_PLL=m CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# CONFIG_DVB_LNBP21=m CONFIG_DVB_ISL6405=m CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m CONFIG_DVB_LGS8GXX=m CONFIG_DVB_ATBM8830=m -CONFIG_DAB=y -CONFIG_USB_DABUSB=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_IX2505V=m + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set # # Graphics support @@ -3298,6 +3558,7 @@ CONFIG_DRM_MGA=m CONFIG_DRM_SIS=m CONFIG_DRM_VIA=m CONFIG_DRM_SAVAGE=m +CONFIG_STUB_POULSBO=m CONFIG_VGASTATE=m CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y @@ -3313,6 +3574,7 @@ CONFIG_FB_SYS_COPYAREA=m CONFIG_FB_SYS_IMAGEBLIT=m # CONFIG_FB_FOREIGN_ENDIAN is not set CONFIG_FB_SYS_FOPS=m +# CONFIG_FB_WMT_GE_ROPS is not set CONFIG_FB_DEFERRED_IO=y CONFIG_FB_HECUBA=m CONFIG_FB_SVGALIB=m @@ -3336,7 +3598,6 @@ CONFIG_FB_UVESA=m CONFIG_FB_VESA=y CONFIG_FB_N411=m CONFIG_FB_HGA=m -CONFIG_FB_HGA_ACCEL=y CONFIG_FB_S1D13XXX=m CONFIG_FB_NVIDIA=m CONFIG_FB_NVIDIA_I2C=y @@ -3373,6 +3634,7 @@ CONFIG_FB_SIS=m CONFIG_FB_SIS_300=y CONFIG_FB_SIS_315=y CONFIG_FB_VIA=m +# CONFIG_FB_VIA_DIRECT_PROCFS is not set CONFIG_FB_NEOMAGIC=m CONFIG_FB_KYRO=m CONFIG_FB_3DFX=m @@ -3393,6 +3655,7 @@ CONFIG_FB_GEODE_GX1=m CONFIG_FB_TMIO=m CONFIG_FB_TMIO_ACCELL=y CONFIG_FB_SM501=m +CONFIG_FB_UDL=m CONFIG_FB_VIRTUAL=m CONFIG_FB_METRONOME=m CONFIG_FB_MB862XX=m @@ -3407,12 +3670,14 @@ CONFIG_LCD_ILI9320=m CONFIG_LCD_TDO24M=m CONFIG_LCD_VGG2432A4=m CONFIG_LCD_PLATFORM=m +CONFIG_LCD_S6E63M0=m CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PROGEAR=m CONFIG_BACKLIGHT_CARILLO_RANCH=m CONFIG_BACKLIGHT_MBP_NVIDIA=m CONFIG_BACKLIGHT_SAHARA=m +CONFIG_BACKLIGHT_ADP8860=m # # Display device support @@ -3481,6 +3746,7 @@ CONFIG_SND_AC97_CODEC=m CONFIG_SND_DRIVERS=y # CONFIG_SND_PCSP is not set CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m CONFIG_SND_MTS64=m @@ -3496,6 +3762,7 @@ CONFIG_SND_AD1889=m CONFIG_SND_ALS300=m CONFIG_SND_ALS4000=m CONFIG_SND_ALI5451=m +CONFIG_SND_ASIHPI=m CONFIG_SND_ATIIXP=m CONFIG_SND_ATIIXP_MODEM=m CONFIG_SND_AU8810=m @@ -3535,6 +3802,7 @@ CONFIG_SND_ENS1370=m CONFIG_SND_ENS1371=m CONFIG_SND_ES1938=m CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y CONFIG_SND_FM801=m CONFIG_SND_FM801_TEA575X_BOOL=y CONFIG_SND_FM801_TEA575X=m @@ -3549,10 +3817,7 @@ CONFIG_SND_HDA_CODEC_REALTEK=y CONFIG_SND_HDA_CODEC_ANALOG=y CONFIG_SND_HDA_CODEC_SIGMATEL=y CONFIG_SND_HDA_CODEC_VIA=y -CONFIG_SND_HDA_CODEC_ATIHDMI=y -CONFIG_SND_HDA_CODEC_NVHDMI=y -CONFIG_SND_HDA_CODEC_INTELHDMI=y -CONFIG_SND_HDA_ELD=y +CONFIG_SND_HDA_CODEC_HDMI=y CONFIG_SND_HDA_CODEC_CIRRUS=y CONFIG_SND_HDA_CODEC_CONEXANT=y CONFIG_SND_HDA_CODEC_CA0110=y @@ -3563,7 +3828,6 @@ CONFIG_SND_HDA_POWER_SAVE=y CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 CONFIG_SND_HDSP=m CONFIG_SND_HDSPM=m -CONFIG_SND_HIFIER=m CONFIG_SND_ICE1712=m CONFIG_SND_ICE1724=m CONFIG_SND_INTEL8X0=m @@ -3571,6 +3835,7 @@ CONFIG_SND_INTEL8X0M=m CONFIG_SND_KORG1212=m CONFIG_SND_LX6464ES=m CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y CONFIG_SND_MIXART=m CONFIG_SND_NM256=m CONFIG_SND_PCXHR=m @@ -3634,15 +3899,24 @@ CONFIG_USB_HIDDEV=y # CONFIG_HID_3M_PCT=m CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_ACRUX_FF=m CONFIG_HID_APPLE=m CONFIG_HID_BELKIN=m +CONFIG_HID_CANDO=m CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m +CONFIG_HID_PRODIKEYS=m CONFIG_HID_CYPRESS=m CONFIG_HID_DRAGONRISE=m CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=m +CONFIG_HID_EGALAX=m +CONFIG_HID_ELECOM=m CONFIG_HID_EZKEY=m CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m CONFIG_HID_GYRATION=m CONFIG_HID_TWINHAN=m CONFIG_HID_KENSINGTON=m @@ -3650,16 +3924,27 @@ CONFIG_HID_LOGITECH=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_LOGIWII_FF=y CONFIG_HID_MAGICMOUSE=m CONFIG_HID_MICROSOFT=m CONFIG_HID_MOSART=m CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m CONFIG_HID_NTRIG=m CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m CONFIG_PANTHERLORD_FF=y CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y CONFIG_HID_QUANTA=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_ROCCAT_KONE=m +CONFIG_HID_ROCCAT_KONEPLUS=m +CONFIG_HID_ROCCAT_PYRA=m CONFIG_HID_SAMSUNG=m CONFIG_HID_SONY=m CONFIG_HID_STANTUM=m @@ -3672,8 +3957,10 @@ CONFIG_HID_TOPSEED=m CONFIG_HID_THRUSTMASTER=m CONFIG_THRUSTMASTER_FF=y CONFIG_HID_WACOM=m +CONFIG_HID_WACOM_POWER_SUPPLY=y CONFIG_HID_ZEROPLUS=m CONFIG_ZEROPLUS_FF=y +CONFIG_HID_ZYDACRON=m CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y @@ -3753,6 +4040,7 @@ CONFIG_USB_STORAGE_ALAUDA=m CONFIG_USB_STORAGE_ONETOUCH=m CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_UAS=m # CONFIG_USB_LIBUSUAL is not set # @@ -3793,6 +4081,7 @@ CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y CONFIG_USB_SERIAL_MOS7840=m CONFIG_USB_SERIAL_MOTOROLA=m CONFIG_USB_SERIAL_NAVMAN=m @@ -3804,16 +4093,20 @@ CONFIG_USB_SERIAL_SPCP8X5=m CONFIG_USB_SERIAL_HP4X=m CONFIG_USB_SERIAL_SAFE=m CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SAMBA=m CONFIG_USB_SERIAL_SIEMENS_MPI=m CONFIG_USB_SERIAL_SIERRAWIRELESS=m CONFIG_USB_SERIAL_SYMBOL=m CONFIG_USB_SERIAL_TI=m CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_SERIAL_OPTICON=m CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m +CONFIG_USB_SERIAL_ZIO=m +CONFIG_USB_SERIAL_SSU100=m CONFIG_USB_SERIAL_DEBUG=m # @@ -3839,6 +4132,7 @@ CONFIG_USB_TRANCEVIBRATOR=m CONFIG_USB_IOWARRIOR=m # CONFIG_USB_TEST is not set CONFIG_USB_ISIGHTFW=m +CONFIG_USB_YUREX=m CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m CONFIG_USB_CXACRU=m @@ -3854,17 +4148,17 @@ CONFIG_USB_XUSBATM=m CONFIG_UWB=m CONFIG_UWB_HWA=m CONFIG_UWB_WHCI=m -CONFIG_UWB_WLP=m CONFIG_UWB_I1480U=m -CONFIG_UWB_I1480U_WLP=m CONFIG_MMC=m # CONFIG_MMC_DEBUG is not set # CONFIG_MMC_UNSAFE_RESUME is not set +# CONFIG_MMC_CLKGATE is not set # # MMC/SD/SDIO Card Drivers # CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 CONFIG_MMC_BLOCK_BOUNCE=y CONFIG_SDIO_UART=m CONFIG_MMC_TEST=m @@ -3882,6 +4176,7 @@ CONFIG_MMC_SPI=m CONFIG_MMC_SDRICOH_CS=m CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_USHC=m CONFIG_MEMSTICK=m # CONFIG_MEMSTICK_DEBUG is not set @@ -3902,11 +4197,14 @@ CONFIG_LEDS_CLASS=y # # LED drivers # +CONFIG_LEDS_NET5501=m CONFIG_LEDS_ALIX2=m CONFIG_LEDS_PCA9532=m CONFIG_LEDS_GPIO=m CONFIG_LEDS_GPIO_PLATFORM=y CONFIG_LEDS_LP3944=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m CONFIG_LEDS_CLEVO_MAIL=m CONFIG_LEDS_PCA955X=m CONFIG_LEDS_DAC124S085=m @@ -3915,6 +4213,7 @@ CONFIG_LEDS_BD2802=m CONFIG_LEDS_INTEL_SS4200=m CONFIG_LEDS_LT3593=m CONFIG_LEDS_DELL_NETBOOKS=m +CONFIG_LEDS_MC13783=m CONFIG_LEDS_TRIGGERS=y # @@ -3929,6 +4228,8 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=m # # iptables trigger is under Netfilter config (LED target) # +CONFIG_NFC_DEVICES=y +CONFIG_PN544_NFC=m # CONFIG_ACCESSIBILITY is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m @@ -3937,11 +4238,12 @@ CONFIG_INFINIBAND_USER_MEM=y CONFIG_INFINIBAND_ADDR_TRANS=y CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_MTHCA_DEBUG=y -CONFIG_INFINIBAND_IPATH=m +CONFIG_INFINIBAND_QIB=m CONFIG_INFINIBAND_AMSO1100=m # CONFIG_INFINIBAND_AMSO1100_DEBUG is not set CONFIG_INFINIBAND_CXGB3=m # CONFIG_INFINIBAND_CXGB3_DEBUG is not set +CONFIG_INFINIBAND_CXGB4=m CONFIG_MLX4_INFINIBAND=m CONFIG_INFINIBAND_NES=m # CONFIG_INFINIBAND_NES_DEBUG is not set @@ -3958,15 +4260,19 @@ CONFIG_EDAC=y # # CONFIG_EDAC_DEBUG is not set CONFIG_EDAC_DECODE_MCE=m +# CONFIG_EDAC_MCE_INJ is not set CONFIG_EDAC_MM_EDAC=m +CONFIG_EDAC_MCE=y CONFIG_EDAC_E752X=m CONFIG_EDAC_I82975X=m CONFIG_EDAC_I3000=m CONFIG_EDAC_I3200=m CONFIG_EDAC_X38=m CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m CONFIG_EDAC_I5000=m CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y @@ -3988,9 +4294,11 @@ CONFIG_RTC_DRV_TEST=m CONFIG_RTC_DRV_DS1307=m CONFIG_RTC_DRV_DS1374=m CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_DS3232=m CONFIG_RTC_DRV_MAX6900=m CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m CONFIG_RTC_DRV_X1205=m CONFIG_RTC_DRV_PCF8563=m CONFIG_RTC_DRV_PCF8583=m @@ -4035,15 +4343,17 @@ CONFIG_RTC_DRV_V3020=m # on-CPU RTC drivers # CONFIG_RTC_DRV_PCAP=m -CONFIG_RTC_DRV_MC13783=m +CONFIG_RTC_DRV_MC13XXX=m CONFIG_DMADEVICES=y # CONFIG_DMADEVICES_DEBUG is not set # # DMA Devices # -CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y +CONFIG_INTEL_MID_DMAC=m CONFIG_INTEL_IOATDMA=m +CONFIG_TIMB_DMA=m +CONFIG_PCH_DMA=m CONFIG_DMA_ENGINE=y # @@ -4052,7 +4362,6 @@ CONFIG_DMA_ENGINE=y CONFIG_NET_DMA=y CONFIG_ASYNC_TX_DMA=y CONFIG_DMATEST=m -CONFIG_DCA=m CONFIG_AUXDISPLAY=y CONFIG_KS0108=m CONFIG_KS0108_PORT=0x378 @@ -4067,10 +4376,6 @@ CONFIG_UIO_AEC=m CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m CONFIG_UIO_NETX=m - -# -# TI VLYNQ -# CONFIG_XEN=y CONFIG_XEN_INTERFACE_VERSION=0x00030207 @@ -4078,9 +4383,8 @@ CONFIG_XEN_INTERFACE_VERSION=0x00030207 # XEN # CONFIG_XEN_PRIVILEGED_GUEST=y -# CONFIG_XEN_UNPRIVILEGED_GUEST is not set CONFIG_XEN_PRIVCMD=y -CONFIG_XEN_DOMCTL=m +CONFIG_XEN_DOMCTL=y CONFIG_XEN_XENBUS_DEV=y CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL=m CONFIG_XEN_BACKEND=m @@ -4096,7 +4400,6 @@ CONFIG_XEN_PCIDEV_BACKEND=m CONFIG_XEN_PCIDEV_BACKEND_VPCI=y # CONFIG_XEN_PCIDEV_BACKEND_PASS is not set # CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set -# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set # CONFIG_XEN_PCIDEV_BE_DEBUG is not set CONFIG_XEN_TPMDEV_BACKEND=m CONFIG_XEN_SCSI_BACKEND=m @@ -4118,13 +4421,14 @@ CONFIG_XEN_NR_GUEST_DEVICES=2048 # CONFIG_XEN_COMPAT_030004_AND_LATER is not set # CONFIG_XEN_COMPAT_030100_AND_LATER is not set # CONFIG_XEN_COMPAT_030200_AND_LATER is not set -CONFIG_XEN_COMPAT_030300_AND_LATER=y -# CONFIG_XEN_COMPAT_030400_AND_LATER is not set +# CONFIG_XEN_COMPAT_030300_AND_LATER is not set +CONFIG_XEN_COMPAT_030400_AND_LATER=y +# CONFIG_XEN_COMPAT_040000_AND_LATER is not set +# CONFIG_XEN_COMPAT_040100_AND_LATER is not set # CONFIG_XEN_COMPAT_LATEST_ONLY is not set -CONFIG_XEN_COMPAT=0x030300 +CONFIG_XEN_COMPAT=0x030400 CONFIG_XEN_VCPU_INFO_PLACEMENT=y CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y -CONFIG_IRQ_PER_CPU=y CONFIG_ARCH_HAS_WALK_MEMORY=y CONFIG_XEN_SMPBOOT=y CONFIG_XEN_DEVMEM=y @@ -4143,6 +4447,13 @@ CONFIG_SLICOSS=m # CONFIG_VIDEO_GO7007 is not set CONFIG_VIDEO_CX25821=m CONFIG_VIDEO_CX25821_ALSA=m +CONFIG_VIDEO_TM6000=m +CONFIG_VIDEO_TM6000_ALSA=m +CONFIG_VIDEO_TM6000_DVB=m +CONFIG_USB_DABUSB=m +CONFIG_USB_SE401=m +CONFIG_VIDEO_USBVIDEO=m +CONFIG_USB_VICAM=m CONFIG_USB_IP_COMMON=m CONFIG_USB_IP_VHCI_HCD=m CONFIG_USB_IP_HOST=m @@ -4150,8 +4461,9 @@ CONFIG_USB_IP_HOST=m CONFIG_W35UND=m CONFIG_PRISM2_USB=m CONFIG_ECHO=m -CONFIG_POCH=m -CONFIG_OTUS=m +CONFIG_BRCM80211=m +CONFIG_BRCM80211_PCI=y +# CONFIG_BRCMFMAC is not set CONFIG_RT2860=m CONFIG_RT2870=m # CONFIG_COMEDI is not set @@ -4161,24 +4473,23 @@ CONFIG_PANEL_PARPORT=0 CONFIG_PANEL_PROFILE=5 # CONFIG_PANEL_CHANGE_MESSAGE is not set CONFIG_R8187SE=m -CONFIG_RTL8192SU=m CONFIG_RTL8192U=m CONFIG_RTL8192E=m +CONFIG_R8712U=m +CONFIG_R8712_AP=y CONFIG_TRANZPORT=m - -# -# Qualcomm MSM Camera And Video -# - -# -# Camera Sensor Selection -# -CONFIG_INPUT_GPIO=m CONFIG_POHMELFS=m # CONFIG_POHMELFS_DEBUG is not set CONFIG_POHMELFS_CRYPTO=y +CONFIG_AUTOFS_FS=m CONFIG_IDE_PHISON=m CONFIG_LINE6_USB=m +# CONFIG_LINE6_USB_DEBUG is not set +# CONFIG_LINE6_USB_DUMP_CTRL is not set +# CONFIG_LINE6_USB_DUMP_MIDI is not set +# CONFIG_LINE6_USB_DUMP_PCM is not set +# CONFIG_LINE6_USB_RAW is not set +# CONFIG_LINE6_USB_IMPULSE_RESPONSE is not set CONFIG_DRM_NOUVEAU=m CONFIG_DRM_NOUVEAU_BACKLIGHT=y # CONFIG_DRM_NOUVEAU_DEBUG is not set @@ -4187,11 +4498,11 @@ CONFIG_DRM_NOUVEAU_BACKLIGHT=y # I2C encoder or helper chips # CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m CONFIG_USB_SERIAL_QUATECH2=m CONFIG_USB_SERIAL_QUATECH_USB2=m CONFIG_VT6655=m CONFIG_VT6656=m -CONFIG_FB_UDL=m CONFIG_VME_BUS=m # @@ -4209,11 +4520,6 @@ CONFIG_VME_USER=m # VME Board Drivers # CONFIG_VMIVME_7805=m - -# -# RAR Register Driver -# -CONFIG_RAR_REGISTER=m CONFIG_DX_SEP=m CONFIG_IIO=m CONFIG_IIO_RING_BUFFER=y @@ -4223,6 +4529,12 @@ CONFIG_IIO_TRIGGER=y # # Accelerometers # +CONFIG_ADIS16201=m +CONFIG_ADIS16203=m +CONFIG_ADIS16204=m +CONFIG_ADIS16209=m +CONFIG_ADIS16220=m +CONFIG_ADIS16240=m CONFIG_KXSD9=m CONFIG_LIS3L02DQ=m CONFIG_SCA3000=m @@ -4232,28 +4544,163 @@ CONFIG_SCA3000=m # CONFIG_MAX1363=m CONFIG_MAX1363_RING_BUFFER=y +CONFIG_AD7150=m +CONFIG_AD7152=m +CONFIG_AD7291=m +CONFIG_AD7298=m +CONFIG_AD7314=m +CONFIG_AD799X=m +CONFIG_AD799X_RING_BUFFER=y +CONFIG_AD7476=m +CONFIG_AD7887=m +CONFIG_AD7745=m +CONFIG_AD7816=m +CONFIG_ADT75=m +CONFIG_ADT7310=m +CONFIG_ADT7410=m + +# +# Analog digital bi-direction convertors +# +CONFIG_ADT7316=m +CONFIG_ADT7316_SPI=m +CONFIG_ADT7316_I2C=m + +# +# Digital to analog convertors +# +CONFIG_AD5624R_SPI=m +CONFIG_AD5446=m + +# +# Direct Digital Synthesis +# +CONFIG_AD5930=m +CONFIG_AD9832=m +CONFIG_AD9834=m +CONFIG_AD9850=m +CONFIG_AD9852=m +CONFIG_AD9910=m +CONFIG_AD9951=m + +# +# Digital gyroscope sensors +# +CONFIG_ADIS16060=m +CONFIG_ADIS16080=m +CONFIG_ADIS16130=m +CONFIG_ADIS16260=m +CONFIG_ADIS16251=m + +# +# Inertial measurement units +# +CONFIG_ADIS16300=m +CONFIG_ADIS16350=m +CONFIG_ADIS16400=m # # Light sensors # CONFIG_SENSORS_TSL2563=m +CONFIG_SENSORS_ISL29018=m + +# +# Magnetometer sensors +# +CONFIG_SENSORS_AK8975=m +CONFIG_SENSORS_HMC5843=m + +# +# Active energy metering IC +# +CONFIG_ADE7753=m +CONFIG_ADE7754=m +CONFIG_ADE7758=m +CONFIG_ADE7759=m +CONFIG_ADE7854=m +CONFIG_ADE7854_I2C=m +CONFIG_ADE7854_SPI=m + +# +# Resolver to digital converters +# +CONFIG_AD2S90=m +CONFIG_AD2S120X=m +CONFIG_AD2S1210=m +# CONFIG_AD2S1210_GPIO_INPUT is not set +# CONFIG_AD2S1210_GPIO_OUTPUT is not set +CONFIG_AD2S1210_GPIO_NONE=y # # Triggers - standalone # CONFIG_IIO_PERIODIC_RTC_TRIGGER=m CONFIG_IIO_GPIO_TRIGGER=m -CONFIG_RAMZSWAP=m -CONFIG_RAMZSWAP_STATS=y -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_ZRAM=m +CONFIG_WLAGS49_H2=m +CONFIG_WLAGS49_H25=m CONFIG_SAMSUNG_LAPTOP=m -CONFIG_STRIP=m -CONFIG_PCMCIA_WAVELAN=m -CONFIG_PCMCIA_NETWAVE=m CONFIG_FB_SM7XX=m -CONFIG_DT3155=m +CONFIG_VIDEO_DT3155=m +CONFIG_DT3155_CCIR=y CONFIG_CRYSTALHD=m +CONFIG_CXT1E1=m +# CONFIG_SBE_PMCC4_NCOMM is not set + +# +# Texas Instruments shared transport line discipline +# +CONFIG_ST_BT=m +CONFIG_FB_XGI=m +CONFIG_LIRC_STAGING=y +CONFIG_LIRC_BT829=m +CONFIG_LIRC_IGORPLUGUSB=m +CONFIG_LIRC_IMON=m +CONFIG_LIRC_IT87=m +CONFIG_LIRC_ITE8709=m +CONFIG_LIRC_PARALLEL=m +CONFIG_LIRC_SASEM=m +CONFIG_LIRC_SERIAL=m +CONFIG_LIRC_SERIAL_TRANSMITTER=y +CONFIG_LIRC_SIR=m +CONFIG_LIRC_TTUSBIR=m +CONFIG_LIRC_ZILOG=m +# CONFIG_SMB_FS is not set +CONFIG_EASYCAP=m +CONFIG_SOLO6X10=m +CONFIG_ACPI_QUICKSTART=m +CONFIG_MACH_NO_WESTBRIDGE=y +CONFIG_SBE_2T3E3=m +CONFIG_ATH6K_LEGACY=m +CONFIG_AR600x_SD31_XXX=y +# CONFIG_AR600x_WB31_XXX is not set +# CONFIG_AR600x_SD32_XXX is not set +# CONFIG_AR600x_CUSTOM_XXX is not set +CONFIG_ATH6KL_ENABLE_COEXISTENCE=y +CONFIG_AR600x_DUAL_ANTENNA=y +# CONFIG_AR600x_SINGLE_ANTENNA is not set +# CONFIG_AR600x_BT_QCOM is not set +# CONFIG_AR600x_BT_CSR is not set +CONFIG_AR600x_BT_AR3001=y +CONFIG_ATH6KL_HCI_BRIDGE=y +# CONFIG_ATH6KL_CONFIG_GPIO_BT_RESET is not set +CONFIG_ATH6KL_CFG80211=y +# CONFIG_ATH6KL_HTC_RAW_INTERFACE is not set +# CONFIG_ATH6KL_VIRTUAL_SCATTER_GATHER is not set +# CONFIG_ATH6KL_SKIP_ABI_VERSION_CHECK is not set +# CONFIG_ATH6KL_DEBUG is not set +CONFIG_USB_ENESTORAGE=m +CONFIG_BCM_WIMAX=m +CONFIG_FT1000=m +CONFIG_FT1000_USB=m + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +CONFIG_TOUCHSCREEN_CLEARPAD_TM1217=m +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=m CONFIG_X86_PLATFORM_DEVICES=y CONFIG_ACER_WMI=m CONFIG_ACERHDF=m @@ -4268,6 +4715,7 @@ CONFIG_PANASONIC_LAPTOP=m CONFIG_COMPAL_LAPTOP=m CONFIG_SONY_LAPTOP=m CONFIG_SONYPI_COMPAT=y +CONFIG_IDEAPAD_LAPTOP=m CONFIG_THINKPAD_ACPI=m CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y # CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set @@ -4275,6 +4723,7 @@ CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y # CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set CONFIG_THINKPAD_ACPI_VIDEO=y CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_SENSORS_HDAPS=m CONFIG_INTEL_MENLOW=m CONFIG_EEEPC_LAPTOP=m CONFIG_EEEPC_WMI=m @@ -4285,6 +4734,8 @@ CONFIG_TOPSTAR_LAPTOP=m CONFIG_ACPI_TOSHIBA=m CONFIG_TOSHIBA_BT_RFKILL=m CONFIG_ACPI_CMPC=m +CONFIG_INTEL_IPS=m +CONFIG_IBM_RTL=m # # Firmware Drivers @@ -4311,13 +4762,13 @@ CONFIG_EXT3_DEFAULTS_TO_ORDERED=y CONFIG_EXT3_DEFAULTS_TO_BARRIERS_ENABLED=y CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_NFS4ACL=y CONFIG_EXT3_FS_SECURITY=y CONFIG_EXT4_FS=m CONFIG_EXT4_FS_XATTR=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y # CONFIG_EXT4_DEBUG is not set +# CONFIG_EXT4_FS_RICHACL is not set CONFIG_JBD=y # CONFIG_JBD_DEBUG is not set CONFIG_JBD2=m @@ -4335,11 +4786,8 @@ CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y # CONFIG_JFS_DEBUG is not set CONFIG_JFS_STATISTICS=y -CONFIG_FS_POSIX_ACL=y -CONFIG_FS_NFS4ACL=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y -CONFIG_XFS_DMAPI=m CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y # CONFIG_XFS_DEBUG is not set @@ -4354,13 +4802,13 @@ CONFIG_OCFS2_FS_STATS=y CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=m CONFIG_FILE_LOCKING=y CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y -CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y -CONFIG_DMAPI=m -# CONFIG_DMAPI_DEBUG is not set +CONFIG_FANOTIFY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_PRINT_QUOTA_WARNING=y @@ -4370,7 +4818,6 @@ CONFIG_QFMT_V1=m CONFIG_QFMT_V2=m CONFIG_QUOTACTL=y CONFIG_QUOTACTL_COMPAT=y -CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m CONFIG_CUSE=m @@ -4456,6 +4903,9 @@ CONFIG_UBIFS_FS_ZLIB=y CONFIG_LOGFS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y # CONFIG_SQUASHFS_EMBEDDED is not set CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 CONFIG_VXFS_FS=m @@ -4483,14 +4933,17 @@ CONFIG_NFS_V4=y CONFIG_NFS_SWAP=y # CONFIG_NFS_V4_1 is not set CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFS_USE_NEW_IDMAPPER is not set CONFIG_NFSD=m +CONFIG_NFSD_DEPRECATED=y CONFIG_NFSD_V2_ACL=y CONFIG_NFSD_V3=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_LOCKD=m CONFIG_LOCKD_V4=y -CONFIG_EXPORTFS=m CONFIG_NFS_ACL_SUPPORT=m CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m @@ -4498,10 +4951,7 @@ CONFIG_SUNRPC_GSS=m CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_SUNRPC_SWAP=y CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_SPKM3=m -# CONFIG_SMB_FS is not set CONFIG_CEPH_FS=m -CONFIG_CEPH_FS_PRETTYDEBUG=y CONFIG_CIFS=m CONFIG_CIFS_STATS=y CONFIG_CIFS_STATS2=y @@ -4511,6 +4961,8 @@ CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG2 is not set CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_FSCACHE is not set +CONFIG_CIFS_ACL=y CONFIG_CIFS_EXPERIMENTAL=y CONFIG_NCP_FS=m CONFIG_NCPFS_PACKET_SIGNING=y @@ -4527,6 +4979,7 @@ CONFIG_AFS_FS=m CONFIG_AFS_FSCACHE=y CONFIG_9P_FS=m # CONFIG_9P_FSCACHE is not set +CONFIG_9P_FS_POSIX_ACL=y CONFIG_NOVFS=m # @@ -4609,9 +5062,8 @@ CONFIG_DEBUG_FS=y CONFIG_HEADERS_CHECK=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set -CONFIG_DETECT_SOFTLOCKUP=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set CONFIG_DETECT_HUNG_TASK=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=0 # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set @@ -4626,8 +5078,10 @@ CONFIG_TIMER_STATS=y # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +CONFIG_BKL=y # CONFIG_DEBUG_LOCK_ALLOC is not set # CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set @@ -4640,6 +5094,7 @@ CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_WRITECOUNT is not set CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set # CONFIG_DEBUG_CREDENTIALS is not set @@ -4655,6 +5110,7 @@ CONFIG_STACK_UNWIND=y # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y CONFIG_LKDTM=m +# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set # CONFIG_FAULT_INJECTION is not set CONFIG_LATENCYTOP=y CONFIG_SYSCTL_SYSCALL_CHECK=y @@ -4668,8 +5124,10 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y CONFIG_RING_BUFFER=y CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y CONFIG_CONTEXT_SWITCH_TRACER=y CONFIG_RING_BUFFER_ALLOW_SWAP=y CONFIG_TRACING=y @@ -4678,17 +5136,12 @@ CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SYSPROF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_FTRACE_SYSCALLS is not set -# CONFIG_BOOT_TRACER is not set CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set # CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_KSYM_TRACER is not set # CONFIG_STACK_TRACER is not set -# CONFIG_KMEMTRACE is not set -# CONFIG_WORKQUEUE_TRACER is not set CONFIG_BLK_DEV_IO_TRACE=y CONFIG_KPROBE_EVENT=y # CONFIG_FTRACE_STARTUP_TEST is not set @@ -4698,6 +5151,8 @@ CONFIG_FIREWIRE_OHCI_REMOTE_DMA=y CONFIG_BUILD_DOCSRC=y CONFIG_DYNAMIC_DEBUG=y # CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_ASYNC_RAID6_TEST=m # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KMEMCHECK=y # CONFIG_KMEMCHECK is not set @@ -4710,6 +5165,7 @@ CONFIG_EARLY_PRINTK_DBGP=y # CONFIG_X86_PTDUMP is not set CONFIG_DEBUG_RODATA=y # CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_SET_MODULE_RONX is not set # CONFIG_DEBUG_NX_TEST is not set # CONFIG_IOMMU_STRESS is not set # CONFIG_X86_DECODER_SELFTEST is not set @@ -4730,12 +5186,12 @@ CONFIG_OPTIMIZE_INLINING=y # Security options # CONFIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=m # CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set # CONFIG_SECURITY is not set CONFIG_SECURITYFS=y -# CONFIG_DEFAULT_SECURITY_SELINUX is not set -# CONFIG_DEFAULT_SECURITY_SMACK is not set -# CONFIG_DEFAULT_SECURITY_TOMOYO is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_DEFAULT_SECURITY="" CONFIG_XOR_BLOCKS=m @@ -4751,7 +5207,6 @@ CONFIG_CRYPTO=y # # Crypto core or helper # -CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=m @@ -4762,9 +5217,11 @@ CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_RNG=m CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_PCOMP=m +CONFIG_CRYPTO_PCOMP2=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y CONFIG_CRYPTO_GF128MUL=m CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCRYPT=m @@ -4854,6 +5311,8 @@ CONFIG_CRYPTO_LZO=m # Random Number Generation # CONFIG_CRYPTO_ANSI_CPRNG=m +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_PADLOCK=m CONFIG_CRYPTO_DEV_PADLOCK_AES=m @@ -4865,6 +5324,7 @@ CONFIG_BINARY_PRINTF=y # # Library routines # +CONFIG_RAID6_PQ=m CONFIG_BITREVERSE=y CONFIG_GENERIC_FIND_FIRST_BIT=y CONFIG_GENERIC_FIND_NEXT_BIT=y @@ -4880,9 +5340,19 @@ CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m CONFIG_LZO_COMPRESS=m CONFIG_LZO_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set CONFIG_DECOMPRESS_GZIP=y CONFIG_DECOMPRESS_BZIP2=y CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y CONFIG_DECOMPRESS_LZO=y CONFIG_GENERIC_ALLOCATOR=y CONFIG_REED_SOLOMON=m @@ -4898,3 +5368,4 @@ CONFIG_HAS_DMA=y CONFIG_CHECK_SIGNATURE=y CONFIG_NLATTR=y CONFIG_LRU_CACHE=m +CONFIG_AVERAGE=y diff --git a/patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace b/patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace new file mode 100644 index 0000000..4f33e83 --- /dev/null +++ b/patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace @@ -0,0 +1,64 @@ +From: John Johansen +Date: Tue, 20 Jul 2010 06:57:08 -0700 +Subject: AppArmor: Allow dfa backward compatibility with broken userspace +Patch-mainline: 2.6.37? + +The apparmor_parser when compiling policy could generate invalid dfas +that did not have sufficient padding to avoid invalid references, when +used by the kernel. The kernels check to verify the next/check table +size was broken meaning invalid dfas were being created by userspace +and not caught. + +To remain compatible with old tools that are not fixed, pad the loaded +dfas next/check table. The dfa's themselves are valid except for the +high padding for potentially invalid transitions (high bounds error), +which have a maximimum is 256 entries. So just allocate an extra null filled +256 entries for the next/check tables. This will guarentee all bounds +are good and invalid transitions go to the null (0) state. + +Signed-off-by: John Johansen +Acked-by: Jeff Mahoney +--- + security/apparmor/match.c | 17 +++++++++++++++++ + 1 file changed, 17 insertions(+) + +--- a/security/apparmor/match.c ++++ b/security/apparmor/match.c +@@ -57,8 +57,17 @@ static struct table_header *unpack_table + if (bsize < tsize) + goto out; + ++ /* Pad table allocation for next/check by 256 entries to remain ++ * backwards compatible with old (buggy) tools and remain safe without ++ * run time checks ++ */ ++ if (th.td_id == YYTD_ID_NXT || th.td_id == YYTD_ID_CHK) ++ tsize += 256 * th.td_flags; ++ + table = kvmalloc(tsize); + if (table) { ++ /* ensure the pad is clear, else there will be errors */ ++ memset(table, 0, tsize); + *table = th; + if (th.td_flags == YYTD_DATA8) + UNPACK_ARRAY(table->td_data, blob, th.td_lolen, +@@ -134,11 +143,19 @@ static int verify_dfa(struct aa_dfa *dfa + goto out; + + if (flags & DFA_FLAG_VERIFY_STATES) { ++ int warning = 0; + for (i = 0; i < state_count; i++) { + if (DEFAULT_TABLE(dfa)[i] >= state_count) + goto out; + /* TODO: do check that DEF state recursion terminates */ + if (BASE_TABLE(dfa)[i] + 255 >= trans_count) { ++ if (warning) ++ continue; ++ printk(KERN_WARNING "AppArmor DFA next/check " ++ "upper bounds error fixed, upgrade " ++ "user space tools \n"); ++ warning = 1; ++ } else if (BASE_TABLE(dfa)[i] >= trans_count) { + printk(KERN_ERR "AppArmor DFA next/check upper " + "bounds error\n"); + goto out; diff --git a/patches.apparmor/apparmor-compatibility-patch-for-v5-interface b/patches.apparmor/apparmor-compatibility-patch-for-v5-interface new file mode 100644 index 0000000..94ddec3 --- /dev/null +++ b/patches.apparmor/apparmor-compatibility-patch-for-v5-interface @@ -0,0 +1,379 @@ +From: John Johansen +Date: Thu, 22 Jul 2010 02:32:02 -0700 +Subject: AppArmor: compatibility patch for v5 interface +Patch-mainline: 2.6.37? + +Signed-off-by: John Johansen +Acked-by: Jeff Mahoney +--- + security/apparmor/Kconfig | 9 + + security/apparmor/Makefile | 2 + security/apparmor/apparmorfs-24.c | 287 +++++++++++++++++++++++++++++++++ + security/apparmor/apparmorfs.c | 18 +- + security/apparmor/include/apparmorfs.h | 6 + 5 files changed, 320 insertions(+), 2 deletions(-) + create mode 100644 security/apparmor/apparmorfs-24.c + +--- a/security/apparmor/Kconfig ++++ b/security/apparmor/Kconfig +@@ -29,3 +29,12 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE + boot. + + If you are unsure how to answer this question, answer 1. ++ ++config SECURITY_APPARMOR_COMPAT_24 ++ bool "Enable AppArmor 2.4 compatability" ++ depends on SECURITY_APPARMOR ++ default y ++ help ++ This option enables compatability with AppArmor 2.4. It is ++ recommended if compatability with older versions of AppArmor ++ is desired. +--- a/security/apparmor/Makefile ++++ b/security/apparmor/Makefile +@@ -6,6 +6,8 @@ apparmor-y := apparmorfs.o audit.o capab + path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ + resource.o sid.o file.o net.o + ++apparmor-$(CONFIG_SECURITY_APPARMOR_COMPAT_24) += apparmorfs-24.o ++ + clean-files: capability_names.h af_names.h + + quiet_cmd_make-caps = GEN $@ +--- /dev/null ++++ b/security/apparmor/apparmorfs-24.c +@@ -0,0 +1,287 @@ ++/* ++ * AppArmor security module ++ * ++ * This file contains AppArmor /sys/kernel/secrutiy/apparmor interface functions ++ * ++ * Copyright (C) 1998-2008 Novell/SUSE ++ * Copyright 2009-2010 Canonical Ltd. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation, version 2 of the ++ * License. ++ * ++ * ++ * This file contain functions providing an interface for <= AppArmor 2.4 ++ * compatibility. It is dependent on CONFIG_SECURITY_APPARMOR_COMPAT_24 ++ * being set (see Makefile). ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "include/apparmor.h" ++#include "include/audit.h" ++#include "include/context.h" ++#include "include/policy.h" ++ ++ ++/* apparmor/matching */ ++static ssize_t aa_matching_read(struct file *file, char __user *buf, ++ size_t size, loff_t *ppos) ++{ ++ const char matching[] = "pattern=aadfa audit perms=crwxamlk/ " ++ "user::other"; ++ ++ return simple_read_from_buffer(buf, size, ppos, matching, ++ sizeof(matching) - 1); ++} ++ ++const struct file_operations aa_fs_matching_fops = { ++ .read = aa_matching_read, ++}; ++ ++/* apparmor/features */ ++static ssize_t aa_features_read(struct file *file, char __user *buf, ++ size_t size, loff_t *ppos) ++{ ++ const char features[] = "file=3.1 capability=2.0 network=1.0 " ++ "change_hat=1.5 change_profile=1.1 " "aanamespaces=1.1 rlimit=1.1"; ++ ++ return simple_read_from_buffer(buf, size, ppos, features, ++ sizeof(features) - 1); ++} ++ ++const struct file_operations aa_fs_features_fops = { ++ .read = aa_features_read, ++}; ++ ++/** ++ * __next_namespace - find the next namespace to list ++ * @root: root namespace to stop search at (NOT NULL) ++ * @ns: current ns position (NOT NULL) ++ * ++ * Find the next namespace from @ns under @root and handle all locking needed ++ * while switching current namespace. ++ * ++ * Returns: next namespace or NULL if at last namespace under @root ++ * NOTE: will not unlock root->lock ++ */ ++static struct aa_namespace *__next_namespace(struct aa_namespace *root, ++ struct aa_namespace *ns) ++{ ++ struct aa_namespace *parent; ++ ++ /* is next namespace a child */ ++ if (!list_empty(&ns->sub_ns)) { ++ struct aa_namespace *next; ++ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list); ++ read_lock(&next->lock); ++ return next; ++ } ++ ++ /* check if the next ns is a sibling, parent, gp, .. */ ++ parent = ns->parent; ++ while (parent) { ++ read_unlock(&ns->lock); ++ list_for_each_entry_continue(ns, &parent->sub_ns, base.list) { ++ read_lock(&ns->lock); ++ return ns; ++ } ++ if (parent == root) ++ return NULL; ++ ns = parent; ++ parent = parent->parent; ++ } ++ ++ return NULL; ++} ++ ++/** ++ * __first_profile - find the first profile in a namespace ++ * @root: namespace that is root of profiles being displayed (NOT NULL) ++ * @ns: namespace to start in (NOT NULL) ++ * ++ * Returns: unrefcounted profile or NULL if no profile ++ */ ++static struct aa_profile *__first_profile(struct aa_namespace *root, ++ struct aa_namespace *ns) ++{ ++ for ( ; ns; ns = __next_namespace(root, ns)) { ++ if (!list_empty(&ns->base.profiles)) ++ return list_first_entry(&ns->base.profiles, ++ struct aa_profile, base.list); ++ } ++ return NULL; ++} ++ ++/** ++ * __next_profile - step to the next profile in a profile tree ++ * @profile: current profile in tree (NOT NULL) ++ * ++ * Perform a depth first taversal on the profile tree in a namespace ++ * ++ * Returns: next profile or NULL if done ++ * Requires: profile->ns.lock to be held ++ */ ++static struct aa_profile *__next_profile(struct aa_profile *p) ++{ ++ struct aa_profile *parent; ++ struct aa_namespace *ns = p->ns; ++ ++ /* is next profile a child */ ++ if (!list_empty(&p->base.profiles)) ++ return list_first_entry(&p->base.profiles, typeof(*p), ++ base.list); ++ ++ /* is next profile a sibling, parent sibling, gp, subling, .. */ ++ parent = p->parent; ++ while (parent) { ++ list_for_each_entry_continue(p, &parent->base.profiles, ++ base.list) ++ return p; ++ p = parent; ++ parent = parent->parent; ++ } ++ ++ /* is next another profile in the namespace */ ++ list_for_each_entry_continue(p, &ns->base.profiles, base.list) ++ return p; ++ ++ return NULL; ++} ++ ++/** ++ * next_profile - step to the next profile in where ever it may be ++ * @root: root namespace (NOT NULL) ++ * @profile: current profile (NOT NULL) ++ * ++ * Returns: next profile or NULL if there isn't one ++ */ ++static struct aa_profile *next_profile(struct aa_namespace *root, ++ struct aa_profile *profile) ++{ ++ struct aa_profile *next = __next_profile(profile); ++ if (next) ++ return next; ++ ++ /* finished all profiles in namespace move to next namespace */ ++ return __first_profile(root, __next_namespace(root, profile->ns)); ++} ++ ++/** ++ * p_start - start a depth first traversal of profile tree ++ * @f: seq_file to fill ++ * @pos: current position ++ * ++ * Returns: first profile under current namespace or NULL if none found ++ * ++ * acquires first ns->lock ++ */ ++static void *p_start(struct seq_file *f, loff_t *pos) ++ __acquires(root->lock) ++{ ++ struct aa_profile *profile = NULL; ++ struct aa_namespace *root = aa_current_profile()->ns; ++ loff_t l = *pos; ++ f->private = aa_get_namespace(root); ++ ++ ++ /* find the first profile */ ++ read_lock(&root->lock); ++ profile = __first_profile(root, root); ++ ++ /* skip to position */ ++ for (; profile && l > 0; l--) ++ profile = next_profile(root, profile); ++ ++ return profile; ++} ++ ++/** ++ * p_next - read the next profile entry ++ * @f: seq_file to fill ++ * @p: profile previously returned ++ * @pos: current position ++ * ++ * Returns: next profile after @p or NULL if none ++ * ++ * may acquire/release locks in namespace tree as necessary ++ */ ++static void *p_next(struct seq_file *f, void *p, loff_t *pos) ++{ ++ struct aa_profile *profile = p; ++ struct aa_namespace *root = f->private; ++ (*pos)++; ++ ++ return next_profile(root, profile); ++} ++ ++/** ++ * p_stop - stop depth first traversal ++ * @f: seq_file we are filling ++ * @p: the last profile writen ++ * ++ * Release all locking done by p_start/p_next on namespace tree ++ */ ++static void p_stop(struct seq_file *f, void *p) ++ __releases(root->lock) ++{ ++ struct aa_profile *profile = p; ++ struct aa_namespace *root = f->private, *ns; ++ ++ if (profile) { ++ for (ns = profile->ns; ns && ns != root; ns = ns->parent) ++ read_unlock(&ns->lock); ++ } ++ read_unlock(&root->lock); ++ aa_put_namespace(root); ++} ++ ++/** ++ * seq_show_profile - show a profile entry ++ * @f: seq_file to file ++ * @p: current position (profile) (NOT NULL) ++ * ++ * Returns: error on failure ++ */ ++static int seq_show_profile(struct seq_file *f, void *p) ++{ ++ struct aa_profile *profile = (struct aa_profile *)p; ++ struct aa_namespace *root = f->private; ++ ++ if (profile->ns != root) ++ seq_printf(f, ":%s://", aa_ns_name(root, profile->ns)); ++ seq_printf(f, "%s (%s)\n", profile->base.hname, ++ COMPLAIN_MODE(profile) ? "complain" : "enforce"); ++ ++ return 0; ++} ++ ++static const struct seq_operations aa_fs_profiles_op = { ++ .start = p_start, ++ .next = p_next, ++ .stop = p_stop, ++ .show = seq_show_profile, ++}; ++ ++static int profiles_open(struct inode *inode, struct file *file) ++{ ++ return seq_open(file, &aa_fs_profiles_op); ++} ++ ++static int profiles_release(struct inode *inode, struct file *file) ++{ ++ return seq_release(inode, file); ++} ++ ++const struct file_operations aa_fs_profiles_fops = { ++ .open = profiles_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = profiles_release, ++}; +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -182,7 +182,11 @@ void __init aa_destroy_aafs(void) + aafs_remove(".remove"); + aafs_remove(".replace"); + aafs_remove(".load"); +- ++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24 ++ aafs_remove("profiles"); ++ aafs_remove("matching"); ++ aafs_remove("features"); ++#endif + securityfs_remove(aa_fs_dentry); + aa_fs_dentry = NULL; + } +@@ -213,7 +217,17 @@ int __init aa_create_aafs(void) + aa_fs_dentry = NULL; + goto error; + } +- ++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24 ++ error = aafs_create("matching", 0444, &aa_fs_matching_fops); ++ if (error) ++ goto error; ++ error = aafs_create("features", 0444, &aa_fs_features_fops); ++ if (error) ++ goto error; ++#endif ++ error = aafs_create("profiles", 0440, &aa_fs_profiles_fops); ++ if (error) ++ goto error; + error = aafs_create(".load", 0640, &aa_fs_profile_load); + if (error) + goto error; +--- a/security/apparmor/include/apparmorfs.h ++++ b/security/apparmor/include/apparmorfs.h +@@ -17,4 +17,10 @@ + + extern void __init aa_destroy_aafs(void); + ++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24 ++extern const struct file_operations aa_fs_matching_fops; ++extern const struct file_operations aa_fs_features_fops; ++extern const struct file_operations aa_fs_profiles_fops; ++#endif ++ + #endif /* __AA_APPARMORFS_H */ diff --git a/patches.apparmor/apparmor-compatibility-patch-for-v5-network-control b/patches.apparmor/apparmor-compatibility-patch-for-v5-network-control new file mode 100644 index 0000000..354a8bc --- /dev/null +++ b/patches.apparmor/apparmor-compatibility-patch-for-v5-network-control @@ -0,0 +1,518 @@ +From: John Johansen +Date: Mon, 4 Oct 2010 15:03:36 -0700 +Subject: AppArmor: compatibility patch for v5 network control +Patch-mainline: 2.6.37? + +Add compatibility for v5 network rules. + +Signed-off-by: John Johansen +Acked-by: Jeff Mahoney +--- + include/linux/lsm_audit.h | 4 + security/apparmor/Makefile | 6 + + security/apparmor/include/net.h | 40 ++++++++ + security/apparmor/include/policy.h | 3 + security/apparmor/lsm.c | 112 ++++++++++++++++++++++ + security/apparmor/net.c | 170 ++++++++++++++++++++++++++++++++++ + security/apparmor/policy.c | 1 + security/apparmor/policy_unpack.c | 48 +++++++++ + 8 files changed, 381 insertions(+), 2 deletions(-) + create mode 100644 security/apparmor/include/net.h + create mode 100644 security/apparmor/net.c + +--- a/include/linux/lsm_audit.h ++++ b/include/linux/lsm_audit.h +@@ -123,6 +123,10 @@ struct common_audit_data { + u32 denied; + uid_t ouid; + } fs; ++ struct { ++ int type, protocol; ++ struct sock *sk; ++ } net; + }; + } apparmor_audit_data; + #endif +--- a/security/apparmor/Makefile ++++ b/security/apparmor/Makefile +@@ -4,17 +4,21 @@ obj-$(CONFIG_SECURITY_APPARMOR) += appar + + apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ + path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ +- resource.o sid.o file.o ++ resource.o sid.o file.o net.o + + clean-files: capability_names.h af_names.h + + quiet_cmd_make-caps = GEN $@ + cmd_make-caps = echo "static const char *capability_names[] = {" > $@ ; sed -n -e "/CAP_FS_MASK/d" -e "s/^\#define[ \\t]\\+CAP_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ + ++quiet_cmd_make-af = GEN $@ ++cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ; sed -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "s/^\#define[ \\t]\\+AF_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ ++ + quiet_cmd_make-rlim = GEN $@ + cmd_make-rlim = echo "static const char *rlim_names[] = {" > $@ ; sed -n --e "/AF_MAX/d" -e "s/^\# \\?define[ \\t]\\+RLIMIT_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ ; echo "static const int rlim_map[] = {" >> $@ ; sed -n -e "/AF_MAX/d" -e "s/^\# \\?define[ \\t]\\+\\(RLIMIT_[A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/\\1,/p" $< >> $@ ; echo "};" >> $@ + + $(obj)/capability.o : $(obj)/capability_names.h ++$(obj)/net.o : $(obj)/af_names.h + $(obj)/resource.o : $(obj)/rlim_names.h + $(obj)/capability_names.h : $(srctree)/include/linux/capability.h + $(call cmd,make-caps) +--- /dev/null ++++ b/security/apparmor/include/net.h +@@ -0,0 +1,40 @@ ++/* ++ * AppArmor security module ++ * ++ * This file contains AppArmor network mediation definitions. ++ * ++ * Copyright (C) 1998-2008 Novell/SUSE ++ * Copyright 2009-2010 Canonical Ltd. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation, version 2 of the ++ * License. ++ */ ++ ++#ifndef __AA_NET_H ++#define __AA_NET_H ++ ++#include ++ ++/* struct aa_net - network confinement data ++ * @allowed: basic network families permissions ++ * @audit_network: which network permissions to force audit ++ * @quiet_network: which network permissions to quiet rejects ++ */ ++struct aa_net { ++ u16 allow[AF_MAX]; ++ u16 audit[AF_MAX]; ++ u16 quiet[AF_MAX]; ++}; ++ ++extern int aa_net_perm(int op, struct aa_profile *profile, u16 family, ++ int type, int protocol, struct sock *sk); ++extern int aa_revalidate_sk(int op, struct sock *sk); ++ ++static inline void aa_free_net_rules(struct aa_net *new) ++{ ++ /* NOP */ ++} ++ ++#endif /* __AA_NET_H */ +--- a/security/apparmor/include/policy.h ++++ b/security/apparmor/include/policy.h +@@ -27,6 +27,7 @@ + #include "capability.h" + #include "domain.h" + #include "file.h" ++#include "net.h" + #include "resource.h" + + extern const char *profile_mode_names[]; +@@ -145,6 +146,7 @@ struct aa_namespace { + * @size: the memory consumed by this profiles rules + * @file: The set of rules governing basic file access and domain transitions + * @caps: capabilities for the profile ++ * @net: network controls for the profile + * @rlimits: rlimits for the profile + * + * The AppArmor profile contains the basic confinement data. Each profile +@@ -181,6 +183,7 @@ struct aa_profile { + + struct aa_file_rules file; + struct aa_caps caps; ++ struct aa_net net; + struct aa_rlimit rlimits; + }; + +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -31,6 +31,7 @@ + #include "include/context.h" + #include "include/file.h" + #include "include/ipc.h" ++#include "include/net.h" + #include "include/path.h" + #include "include/policy.h" + #include "include/procattr.h" +@@ -619,6 +620,104 @@ static int apparmor_task_setrlimit(struc + return error; + } + ++static int apparmor_socket_create(int family, int type, int protocol, int kern) ++{ ++ struct aa_profile *profile; ++ int error = 0; ++ ++ if (kern) ++ return 0; ++ ++ profile = __aa_current_profile(); ++ if (!unconfined(profile)) ++ error = aa_net_perm(OP_CREATE, profile, family, type, protocol, ++ NULL); ++ return error; ++} ++ ++static int apparmor_socket_bind(struct socket *sock, ++ struct sockaddr *address, int addrlen) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_BIND, sk); ++} ++ ++static int apparmor_socket_connect(struct socket *sock, ++ struct sockaddr *address, int addrlen) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_CONNECT, sk); ++} ++ ++static int apparmor_socket_listen(struct socket *sock, int backlog) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_LISTEN, sk); ++} ++ ++static int apparmor_socket_accept(struct socket *sock, struct socket *newsock) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_ACCEPT, sk); ++} ++ ++static int apparmor_socket_sendmsg(struct socket *sock, ++ struct msghdr *msg, int size) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_SENDMSG, sk); ++} ++ ++static int apparmor_socket_recvmsg(struct socket *sock, ++ struct msghdr *msg, int size, int flags) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_RECVMSG, sk); ++} ++ ++static int apparmor_socket_getsockname(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_GETSOCKNAME, sk); ++} ++ ++static int apparmor_socket_getpeername(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_GETPEERNAME, sk); ++} ++ ++static int apparmor_socket_getsockopt(struct socket *sock, int level, ++ int optname) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_GETSOCKOPT, sk); ++} ++ ++static int apparmor_socket_setsockopt(struct socket *sock, int level, ++ int optname) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_SETSOCKOPT, sk); ++} ++ ++static int apparmor_socket_shutdown(struct socket *sock, int how) ++{ ++ struct sock *sk = sock->sk; ++ ++ return aa_revalidate_sk(OP_SOCK_SHUTDOWN, sk); ++} ++ + static struct security_operations apparmor_ops = { + .name = "apparmor", + +@@ -650,6 +749,19 @@ static struct security_operations apparm + .getprocattr = apparmor_getprocattr, + .setprocattr = apparmor_setprocattr, + ++ .socket_create = apparmor_socket_create, ++ .socket_bind = apparmor_socket_bind, ++ .socket_connect = apparmor_socket_connect, ++ .socket_listen = apparmor_socket_listen, ++ .socket_accept = apparmor_socket_accept, ++ .socket_sendmsg = apparmor_socket_sendmsg, ++ .socket_recvmsg = apparmor_socket_recvmsg, ++ .socket_getsockname = apparmor_socket_getsockname, ++ .socket_getpeername = apparmor_socket_getpeername, ++ .socket_getsockopt = apparmor_socket_getsockopt, ++ .socket_setsockopt = apparmor_socket_setsockopt, ++ .socket_shutdown = apparmor_socket_shutdown, ++ + .cred_alloc_blank = apparmor_cred_alloc_blank, + .cred_free = apparmor_cred_free, + .cred_prepare = apparmor_cred_prepare, +--- /dev/null ++++ b/security/apparmor/net.c +@@ -0,0 +1,170 @@ ++/* ++ * AppArmor security module ++ * ++ * This file contains AppArmor network mediation ++ * ++ * Copyright (C) 1998-2008 Novell/SUSE ++ * Copyright 2009-2010 Canonical Ltd. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation, version 2 of the ++ * License. ++ */ ++ ++#include "include/apparmor.h" ++#include "include/audit.h" ++#include "include/context.h" ++#include "include/net.h" ++#include "include/policy.h" ++ ++#include "af_names.h" ++ ++static const char *sock_type_names[] = { ++ "unknown(0)", ++ "stream", ++ "dgram", ++ "raw", ++ "rdm", ++ "seqpacket", ++ "dccp", ++ "unknown(7)", ++ "unknown(8)", ++ "unknown(9)", ++ "packet", ++}; ++ ++/* audit callback for net specific fields */ ++static void audit_cb(struct audit_buffer *ab, void *va) ++{ ++ struct common_audit_data *sa = va; ++ ++ audit_log_format(ab, " family="); ++ if (address_family_names[sa->u.net.family]) { ++ audit_log_string(ab, address_family_names[sa->u.net.family]); ++ } else { ++ audit_log_format(ab, " \"unknown(%d)\"", sa->u.net.family); ++ } ++ ++ audit_log_format(ab, " sock_type="); ++ if (sock_type_names[sa->aad.net.type]) { ++ audit_log_string(ab, sock_type_names[sa->aad.net.type]); ++ } else { ++ audit_log_format(ab, "\"unknown(%d)\"", sa->aad.net.type); ++ } ++ ++ audit_log_format(ab, " protocol=%d", sa->aad.net.protocol); ++} ++ ++/** ++ * audit_net - audit network access ++ * @profile: profile being enforced (NOT NULL) ++ * @op: operation being checked ++ * @family: network family ++ * @type: network type ++ * @protocol: network protocol ++ * @sk: socket auditing is being applied to ++ * @error: error code for failure else 0 ++ * ++ * Returns: %0 or sa->error else other errorcode on failure ++ */ ++static int audit_net(struct aa_profile *profile, int op, u16 family, int type, ++ int protocol, struct sock *sk, int error) ++{ ++ int audit_type = AUDIT_APPARMOR_AUTO; ++ struct common_audit_data sa; ++ if (sk) { ++ COMMON_AUDIT_DATA_INIT(&sa, NET); ++ } else { ++ COMMON_AUDIT_DATA_INIT(&sa, NONE); ++ } ++ /* todo fill in socket addr info */ ++ ++ sa.aad.op = op, ++ sa.u.net.family = family; ++ sa.u.net.sk = sk; ++ sa.aad.net.type = type; ++ sa.aad.net.protocol = protocol; ++ sa.aad.error = error; ++ ++ if (likely(!sa.aad.error)) { ++ u16 audit_mask = profile->net.audit[sa.u.net.family]; ++ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) && ++ !(1 << sa.aad.net.type & audit_mask))) ++ return 0; ++ audit_type = AUDIT_APPARMOR_AUDIT; ++ } else { ++ u16 quiet_mask = profile->net.quiet[sa.u.net.family]; ++ u16 kill_mask = 0; ++ u16 denied = (1 << sa.aad.net.type) & ~quiet_mask; ++ ++ if (denied & kill_mask) ++ audit_type = AUDIT_APPARMOR_KILL; ++ ++ if ((denied & quiet_mask) && ++ AUDIT_MODE(profile) != AUDIT_NOQUIET && ++ AUDIT_MODE(profile) != AUDIT_ALL) ++ return COMPLAIN_MODE(profile) ? 0 : sa.aad.error; ++ } ++ ++ return aa_audit(audit_type, profile, GFP_KERNEL, &sa, audit_cb); ++} ++ ++/** ++ * aa_net_perm - very course network access check ++ * @op: operation being checked ++ * @profile: profile being enforced (NOT NULL) ++ * @family: network family ++ * @type: network type ++ * @protocol: network protocol ++ * ++ * Returns: %0 else error if permission denied ++ */ ++int aa_net_perm(int op, struct aa_profile *profile, u16 family, int type, ++ int protocol, struct sock *sk) ++{ ++ u16 family_mask; ++ int error; ++ ++ if ((family < 0) || (family >= AF_MAX)) ++ return -EINVAL; ++ ++ if ((type < 0) || (type >= SOCK_MAX)) ++ return -EINVAL; ++ ++ /* unix domain and netlink sockets are handled by ipc */ ++ if (family == AF_UNIX || family == AF_NETLINK) ++ return 0; ++ ++ family_mask = profile->net.allow[family]; ++ ++ error = (family_mask & (1 << type)) ? 0 : -EACCES; ++ ++ return audit_net(profile, op, family, type, protocol, sk, error); ++} ++ ++/** ++ * aa_revalidate_sk - Revalidate access to a sock ++ * @op: operation being checked ++ * @sk: sock being revalidated (NOT NULL) ++ * ++ * Returns: %0 else error if permission denied ++ */ ++int aa_revalidate_sk(int op, struct sock *sk) ++{ ++ struct aa_profile *profile; ++ int error = 0; ++ ++ /* aa_revalidate_sk should not be called from interrupt context ++ * don't mediate these calls as they are not task related ++ */ ++ if (in_interrupt()) ++ return 0; ++ ++ profile = __aa_current_profile(); ++ if (!unconfined(profile)) ++ error = aa_net_perm(op, profile, sk->sk_family, sk->sk_type, ++ sk->sk_protocol, sk); ++ ++ return error; ++} +--- a/security/apparmor/policy.c ++++ b/security/apparmor/policy.c +@@ -745,6 +745,7 @@ static void free_profile(struct aa_profi + + aa_free_file_rules(&profile->file); + aa_free_cap_rules(&profile->caps); ++ aa_free_net_rules(&profile->net); + aa_free_rlimit_rules(&profile->rlimits); + + aa_free_sid(profile->sid); +--- a/security/apparmor/policy_unpack.c ++++ b/security/apparmor/policy_unpack.c +@@ -190,6 +190,19 @@ fail: + return 0; + } + ++static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name) ++{ ++ if (unpack_nameX(e, AA_U16, name)) { ++ if (!inbounds(e, sizeof(u16))) ++ return 0; ++ if (data) ++ *data = le16_to_cpu(get_unaligned((u16 *) e->pos)); ++ e->pos += sizeof(u16); ++ return 1; ++ } ++ return 0; ++} ++ + static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) + { + if (unpack_nameX(e, AA_U32, name)) { +@@ -468,7 +481,8 @@ static struct aa_profile *unpack_profile + { + struct aa_profile *profile = NULL; + const char *name = NULL; +- int error = -EPROTO; ++ size_t size = 0; ++ int i, error = -EPROTO; + kernel_cap_t tmpcap; + u32 tmp; + +@@ -559,6 +573,38 @@ static struct aa_profile *unpack_profile + if (!unpack_rlimits(e, profile)) + goto fail; + ++ size = unpack_array(e, "net_allowed_af"); ++ if (size) { ++ ++ for (i = 0; i < size; i++) { ++ /* discard extraneous rules that this kernel will ++ * never request ++ */ ++ if (i > AF_MAX) { ++ u16 tmp; ++ if (!unpack_u16(e, &tmp, NULL) || ++ !unpack_u16(e, &tmp, NULL) || ++ !unpack_u16(e, &tmp, NULL)) ++ goto fail; ++ continue; ++ } ++ if (!unpack_u16(e, &profile->net.allow[i], NULL)) ++ goto fail; ++ if (!unpack_u16(e, &profile->net.audit[i], NULL)) ++ goto fail; ++ if (!unpack_u16(e, &profile->net.quiet[i], NULL)) ++ goto fail; ++ } ++ if (!unpack_nameX(e, AA_ARRAYEND, NULL)) ++ goto fail; ++ /* ++ * allow unix domain and netlink sockets they are handled ++ * by IPC ++ */ ++ } ++ profile->net.allow[AF_UNIX] = 0xffff; ++ profile->net.allow[AF_NETLINK] = 0xffff; ++ + /* get file rules */ + profile->file.dfa = unpack_dfa(e); + if (IS_ERR(profile->file.dfa)) { diff --git a/patches.arch/UV-Expose-irq_desc-node-in-proc.patch b/patches.arch/UV-Expose-irq_desc-node-in-proc.patch deleted file mode 100644 index aef2d88..0000000 --- a/patches.arch/UV-Expose-irq_desc-node-in-proc.patch +++ /dev/null @@ -1,69 +0,0 @@ -From: Dimitri Sivanich -Subject: Expose the irq_desc node as /proc/irq/*/node. -References: bnc#566745, fate#306952 -Patch-mainline: not yet - -This file provides device hardware locality information for apps desiring -to include hardware locality in irq mapping decisions. - -Signed-off-by: Dimitri Sivanich -Signed-off-by: Andrew Morton -Signed-off-by: Rafael J. Wysocki ---- - - Documentation/filesystems/proc.txt | 4 ++++ - kernel/irq/proc.c | 23 +++++++++++++++++++++++ - 2 files changed, 27 insertions(+) - ---- a/Documentation/filesystems/proc.txt -+++ b/Documentation/filesystems/proc.txt -@@ -566,6 +566,10 @@ The default_smp_affinity mask applies to - IRQs which have not yet been allocated/activated, and hence which lack a - /proc/irq/[0-9]* directory. - -+The node file on an SMP system shows the node to which the device using the IRQ -+reports itself as being attached. This hardware locality information does not -+include information about any possible driver locality preference. -+ - prof_cpu_mask specifies which CPUs are to be profiled by the system wide - profiler. Default value is ffffffff (all cpus). - ---- a/kernel/irq/proc.c -+++ b/kernel/irq/proc.c -@@ -146,6 +146,26 @@ static const struct file_operations defa - .release = single_release, - .write = default_affinity_write, - }; -+ -+static int irq_node_proc_show(struct seq_file *m, void *v) -+{ -+ struct irq_desc *desc = irq_to_desc((long) m->private); -+ -+ seq_printf(m, "%d\n", desc->node); -+ return 0; -+} -+ -+static int irq_node_proc_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, irq_node_proc_show, PDE(inode)->data); -+} -+ -+static const struct file_operations irq_node_proc_fops = { -+ .open = irq_node_proc_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; - #endif - - static int irq_spurious_proc_show(struct seq_file *m, void *v) -@@ -230,6 +250,9 @@ void register_irq_proc(unsigned int irq, - /* create /proc/irq//smp_affinity */ - proc_create_data("smp_affinity", 0600, desc->dir, - &irq_affinity_proc_fops, (void *)(long)irq); -+ -+ proc_create_data("node", 0444, desc->dir, -+ &irq_node_proc_fops, (void *)(long)irq); - #endif - - proc_create_data("spurious", 0444, desc->dir, diff --git a/patches.arch/acpi-export-hotplug_execute b/patches.arch/acpi-export-hotplug_execute index c50a568..bc27ba3 100644 --- a/patches.arch/acpi-export-hotplug_execute +++ b/patches.arch/acpi-export-hotplug_execute @@ -14,7 +14,7 @@ Signed-off-by: Jeff Mahoney --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c -@@ -782,6 +782,7 @@ acpi_status acpi_os_hotplug_execute(acpi +@@ -941,6 +941,7 @@ acpi_status acpi_os_hotplug_execute(acpi { return __acpi_os_execute(0, function, context, 1); } diff --git a/patches.arch/acpi_fix_fadt_32_bit_zero_length.patch b/patches.arch/acpi_fix_fadt_32_bit_zero_length.patch new file mode 100644 index 0000000..c087474 --- /dev/null +++ b/patches.arch/acpi_fix_fadt_32_bit_zero_length.patch @@ -0,0 +1,30 @@ +From: Thomas Renninger +Subject: Only use 32 bit addresses if they have a valid length +References: bug#581644 +Patch-Mainline: not yet + +Also not sure whether it will help, but it's a fix. + +Please remove this patch again after a while also if it's not +mainline. + +--- + drivers/acpi/acpica/tbfadt.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/acpi/acpica/tbfadt.c ++++ b/drivers/acpi/acpica/tbfadt.c +@@ -550,11 +550,12 @@ static void acpi_tb_validate_fadt(void) + (!address64->address && length)) { + ACPI_WARNING((AE_INFO, + "Optional field %s has zero address or length: " +- "0x%8.8X%8.8X/0x%X", ++ "0x%8.8X%8.8X/0x%X - not using it", + name, + ACPI_FORMAT_UINT64(address64-> + address), + length)); ++ address64->address = 0; + } + } + } diff --git a/patches.arch/acpi_srat-pxm-rev-ia64.patch b/patches.arch/acpi_srat-pxm-rev-ia64.patch index c01c731..deaebfd 100644 --- a/patches.arch/acpi_srat-pxm-rev-ia64.patch +++ b/patches.arch/acpi_srat-pxm-rev-ia64.patch @@ -28,7 +28,7 @@ Signed-off-by: Kurt Garloff --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c -@@ -428,22 +428,24 @@ static u32 __devinitdata pxm_flag[PXM_FL +@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FL static struct acpi_table_slit __initdata *slit_table; cpumask_t early_cpu_possible_map = CPU_MASK_NONE; diff --git a/patches.arch/acpi_srat-pxm-rev-store.patch b/patches.arch/acpi_srat-pxm-rev-store.patch index 08b2d79..69f1d70 100644 --- a/patches.arch/acpi_srat-pxm-rev-store.patch +++ b/patches.arch/acpi_srat-pxm-rev-store.patch @@ -35,8 +35,8 @@ Signed-off-by: Kurt Garloff @@ -259,6 +261,7 @@ static int __init acpi_parse_srat(struct return -EINVAL; - srat = (struct acpi_table_srat *)table; -+ acpi_srat_revision = srat->header.revision; + /* Real work done in acpi_table_parse_srat below. */ ++ acpi_srat_revision = table->revision; return 0; } diff --git a/patches.arch/acpi_thermal_passive_blacklist.patch b/patches.arch/acpi_thermal_passive_blacklist.patch index 3f6331e..6084e87 100644 --- a/patches.arch/acpi_thermal_passive_blacklist.patch +++ b/patches.arch/acpi_thermal_passive_blacklist.patch @@ -4,20 +4,20 @@ References: https://bugzilla.novell.com/show_bug.cgi?id=333043 Patch-mainline: not yet --- - drivers/acpi/thermal.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 73 insertions(+) + drivers/acpi/thermal.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 93 insertions(+) --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c -@@ -42,6 +42,7 @@ - #include +@@ -41,6 +41,7 @@ + #include #include #include +#include #include #include #include -@@ -1383,6 +1384,66 @@ static void acpi_thermal_guess_offset(st +@@ -984,6 +985,86 @@ static void acpi_thermal_guess_offset(st tz->kelvin_offset = 2732; } @@ -80,11 +80,31 @@ Patch-mainline: not yet + }, + {}, +}; ++ ++static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds) ++{ ++ if (!tz) ++ return -EINVAL; ++ ++ /* Convert value to deci-seconds */ ++ tz->polling_frequency = seconds * 10; ++ ++ tz->thermal_zone->polling_delay = seconds * 1000; ++ ++ if (tz->tz_enabled) ++ thermal_zone_device_update(tz->thermal_zone); ++ ++ ACPI_DEBUG_PRINT((ACPI_DB_INFO, ++ "Polling frequency set to %lu seconds\n", ++ tz->polling_frequency/10)); ++ ++ return 0; ++} + static int acpi_thermal_add(struct acpi_device *device) { int result = 0; -@@ -1414,6 +1475,18 @@ static int acpi_thermal_add(struct acpi_ +@@ -1015,6 +1096,18 @@ static int acpi_thermal_add(struct acpi_ if (result) goto free_memory; @@ -100,6 +120,6 @@ Patch-mainline: not yet + } + } + - result = acpi_thermal_add_fs(device); - if (result) - goto unregister_thermal_zone; + printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", + acpi_device_name(device), acpi_device_bid(device), + KELVIN_TO_CELSIUS(tz->temperature)); diff --git a/patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch b/patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch index a932f2b..9d2086b 100644 --- a/patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch +++ b/patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch @@ -30,7 +30,7 @@ CC: Yakui Zhao --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c -@@ -1313,6 +1313,21 @@ static int __init dmi_ignore_irq0_timer_ +@@ -1350,6 +1350,21 @@ static int __init dmi_ignore_irq0_timer_ return 0; } @@ -52,7 +52,7 @@ CC: Yakui Zhao /* * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org -@@ -1388,6 +1403,32 @@ static struct dmi_system_id __initdata a +@@ -1425,6 +1440,32 @@ static struct dmi_system_id __initdata a DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, @@ -85,7 +85,7 @@ CC: Yakui Zhao {} }; -@@ -1583,6 +1624,18 @@ static int __init parse_acpi(char *arg) +@@ -1612,6 +1653,18 @@ static int __init parse_acpi(char *arg) } early_param("acpi", parse_acpi); diff --git a/patches.arch/i386-unwind-annotations b/patches.arch/i386-unwind-annotations new file mode 100644 index 0000000..c6c168c --- /dev/null +++ b/patches.arch/i386-unwind-annotations @@ -0,0 +1,15 @@ +From: jbeulich@novell.com +Subject: fix unwind annotations +Patch-mainline: queued for 2.6.39 + +--- head-2011-02-17.orig/arch/x86/lib/semaphore_32.S 2011-03-01 15:03:45.000000000 +0100 ++++ head-2011-02-17/arch/x86/lib/semaphore_32.S 2011-03-01 15:04:50.000000000 +0100 +@@ -36,7 +36,7 @@ + */ + #ifdef CONFIG_SMP + ENTRY(__write_lock_failed) +- CFI_STARTPROC simple ++ CFI_STARTPROC + FRAME + 2: LOCK_PREFIX + addl $ RW_LOCK_BIAS,(%eax) diff --git a/patches.arch/ia64-page-migration b/patches.arch/ia64-page-migration index a23ae43..69386fc 100644 --- a/patches.arch/ia64-page-migration +++ b/patches.arch/ia64-page-migration @@ -27,7 +27,7 @@ Signed-off-by: Russ Anderson --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig -@@ -505,6 +505,15 @@ config ARCH_PROC_KCORE_TEXT +@@ -511,6 +511,15 @@ config ARCH_PROC_KCORE_TEXT config IA64_MCA_RECOVERY tristate "MCA recovery from errors other than TLB." @@ -237,7 +237,7 @@ Signed-off-by: Russ Anderson + } + + list_add(&page->lru, &pagelist); -+ ret = migrate_pages(&pagelist, alloc_migrate_page, node, 0); ++ ret = migrate_pages(&pagelist, alloc_migrate_page, node, 0, true); + if (ret == 0) { + total_badpages++; + list_add_tail(&page->lru, &badpagelist); @@ -539,7 +539,7 @@ Signed-off-by: Russ Anderson */ #include #include -@@ -163,7 +166,14 @@ static int cmc_polling_enabled = 1; +@@ -164,7 +167,14 @@ static int cmc_polling_enabled = 1; * but encounters problems retrieving CPE logs. This should only be * necessary for debugging. */ @@ -555,7 +555,7 @@ Signed-off-by: Russ Anderson extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); -@@ -523,6 +533,28 @@ int mca_recover_range(unsigned long addr +@@ -524,6 +534,28 @@ int mca_recover_range(unsigned long addr } EXPORT_SYMBOL_GPL(mca_recover_range); @@ -584,7 +584,7 @@ Signed-off-by: Russ Anderson #ifdef CONFIG_ACPI int cpe_vector = -1; -@@ -534,6 +566,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, v +@@ -535,6 +567,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, v static unsigned long cpe_history[CPE_HISTORY_LENGTH]; static int index; static DEFINE_SPINLOCK(cpe_history_lock); @@ -592,7 +592,7 @@ Signed-off-by: Russ Anderson IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", __func__, cpe_irq, smp_processor_id()); -@@ -580,6 +613,8 @@ ia64_mca_cpe_int_handler (int cpe_irq, v +@@ -581,6 +614,8 @@ ia64_mca_cpe_int_handler (int cpe_irq, v out: /* Get the CPE error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); diff --git a/patches.arch/kmsg-fix-parameter-limitations b/patches.arch/kmsg-fix-parameter-limitations index 1518817..35bcfe5 100644 --- a/patches.arch/kmsg-fix-parameter-limitations +++ b/patches.arch/kmsg-fix-parameter-limitations @@ -26,13 +26,13 @@ Patch-mainline: Whenever kmsg is upstream Signed-off-by: Jeff Mahoney --- - include/linux/kernel.h | 2 +- + include/linux/printk.h | 2 +- scripts/kmsg-doc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -391,7 +391,7 @@ static inline char *pack_hex_byte(char * +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -416,7 +416,7 @@ extern int hex_to_bin(char ch); /* generate magic string for scripts/kmsg-doc to parse */ #define pr_printk_hash(level, format, ...) \ diff --git a/patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic b/patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic index 7a6f87d..b63bf11 100644 --- a/patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic +++ b/patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic @@ -22,7 +22,7 @@ Signed-off-by: Alexander Graf --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -544,20 +544,6 @@ config KVM_GUEST +@@ -548,20 +548,6 @@ config KVM_GUEST This option enables various optimizations for running under the KVM hypervisor. @@ -46,8 +46,8 @@ Signed-off-by: Alexander Graf --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -29,15 +29,6 @@ - #include - #include + #include + #include -#ifdef CONFIG_KVM_IODELAY -/* diff --git a/patches.arch/kvm-split-paravirt-ops-by-functionality b/patches.arch/kvm-split-paravirt-ops-by-functionality index 392748e..d1b9b78 100644 --- a/patches.arch/kvm-split-paravirt-ops-by-functionality +++ b/patches.arch/kvm-split-paravirt-ops-by-functionality @@ -28,15 +28,15 @@ was the closest I could get to atomic (for bisect) while staying sane. Signed-off-by: Alexander Graf --- - arch/x86/Kconfig | 47 +++++++++++++++++++++++++--- + arch/x86/Kconfig | 41 +++++++++++++++++++++++--- arch/x86/include/asm/apic.h | 2 - arch/x86/include/asm/desc.h | 4 +- arch/x86/include/asm/fixmap.h | 2 - arch/x86/include/asm/io.h | 2 - - arch/x86/include/asm/irqflags.h | 21 +++++++++--- + arch/x86/include/asm/irqflags.h | 21 ++++++++++--- arch/x86/include/asm/mmu_context.h | 4 +- arch/x86/include/asm/msr.h | 4 +- - arch/x86/include/asm/paravirt.h | 44 +++++++++++++++++++++++++- + arch/x86/include/asm/paravirt.h | 44 ++++++++++++++++++++++++++-- arch/x86/include/asm/paravirt_types.h | 12 +++++++ arch/x86/include/asm/pgalloc.h | 2 - arch/x86/include/asm/pgtable-3level_types.h | 2 - @@ -44,7 +44,7 @@ Signed-off-by: Alexander Graf arch/x86/include/asm/processor.h | 2 - arch/x86/include/asm/required-features.h | 2 - arch/x86/include/asm/smp.h | 2 - - arch/x86/include/asm/system.h | 13 +++++-- + arch/x86/include/asm/system.h | 13 +++++--- arch/x86/include/asm/tlbflush.h | 4 +- arch/x86/kernel/head_64.S | 2 - arch/x86/kernel/paravirt.c | 2 + @@ -52,29 +52,20 @@ Signed-off-by: Alexander Graf arch/x86/kernel/vsmp_64.c | 2 - arch/x86/kernel/x8664_ksyms_64.c | 2 - arch/x86/xen/Kconfig | 2 - - 24 files changed, 146 insertions(+), 37 deletions(-) + 24 files changed, 140 insertions(+), 37 deletions(-) --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -362,7 +362,7 @@ endif - +@@ -367,7 +367,7 @@ endif config X86_VSMP bool "ScaleMP vSMP" + select PARAVIRT_GUEST - select PARAVIRT + select PARAVIRT_ALL depends on X86_64 && PCI depends on X86_EXTENDED_PLATFORM ---help--- -@@ -510,7 +510,7 @@ source "arch/x86/xen/Kconfig" - - config VMI - bool "VMI Guest support (DEPRECATED)" -- select PARAVIRT -+ select PARAVIRT_ALL - depends on X86_32 - ---help--- - VMI provides a paravirtualized interface to the VMware ESX server -@@ -529,7 +529,6 @@ config VMI +@@ -533,7 +533,6 @@ config VMI config KVM_CLOCK bool "KVM paravirtualized clock" @@ -82,7 +73,7 @@ Signed-off-by: Alexander Graf select PARAVIRT_CLOCK ---help--- Turning on this option will allow you to run a paravirtualized clock -@@ -540,7 +539,7 @@ config KVM_CLOCK +@@ -544,7 +543,7 @@ config KVM_CLOCK config KVM_GUEST bool "KVM Guest support" @@ -91,34 +82,29 @@ Signed-off-by: Alexander Graf ---help--- This option enables various optimizations for running under the KVM hypervisor. -@@ -568,8 +567,48 @@ config PARAVIRT_SPINLOCKS +@@ -572,8 +571,42 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer N. +config PARAVIRT_CPU + bool + select PARAVIRT -+ default n + +config PARAVIRT_TIME + bool + select PARAVIRT -+ default n + +config PARAVIRT_IRQ + bool + select PARAVIRT -+ default n + +config PARAVIRT_APIC + bool + select PARAVIRT -+ default n + +config PARAVIRT_MMU + bool + select PARAVIRT -+ default n + +# +# This is a placeholder to activate the old "include all pv-ops functionality" @@ -132,14 +118,13 @@ Signed-off-by: Alexander Graf + select PARAVIRT_IRQ + select PARAVIRT_APIC + select PARAVIRT_MMU -+ default n + config PARAVIRT_CLOCK bool + select PARAVIRT_TIME - default n endif + --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -81,7 +81,7 @@ static inline bool apic_from_smp_config( @@ -205,11 +190,11 @@ Signed-off-by: Alexander Graf #ifndef __ASSEMBLY__ +#ifndef CONFIG_PARAVIRT_IRQ - static inline unsigned long __raw_local_save_flags(void) + static inline unsigned long arch_local_save_flags(void) { return native_save_fl(); @@ -110,12 +112,17 @@ static inline unsigned long __raw_local_ - + arch_local_irq_disable(); return flags; } -#else @@ -259,7 +244,7 @@ Signed-off-by: Alexander Graf -#endif /* CONFIG_PARAVIRT */ #ifndef __ASSEMBLY__ - #define raw_local_save_flags(flags) \ + static inline int arch_irqs_disabled_flags(unsigned long flags) --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -6,14 +6,14 @@ @@ -345,12 +330,12 @@ Signed-off-by: Alexander Graf #endif +#ifdef CONFIG_PARAVIRT_IRQ - static inline void raw_safe_halt(void) + static inline void arch_safe_halt(void) { PVOP_VCALL0(pv_irq_ops.safe_halt); @@ -114,14 +121,18 @@ static inline void halt(void) { - PVOP_VCALL0(pv_irq_ops.safe_halt); + PVOP_VCALL0(pv_irq_ops.halt); } +#endif /* CONFIG_PARAVIRT_IRQ */ @@ -436,11 +421,11 @@ Signed-off-by: Alexander Graf ((struct paravirt_callee_save) { func }) +#ifdef CONFIG_PARAVIRT_IRQ - static inline unsigned long __raw_local_save_flags(void) + static inline notrace unsigned long arch_local_save_flags(void) { return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); @@ -857,6 +880,7 @@ static inline unsigned long __raw_local_ - raw_local_irq_disable(); + arch_local_irq_disable(); return f; } +#endif /* CONFIG_PARAVIRT_IRQ */ @@ -576,9 +561,9 @@ Signed-off-by: Alexander Graf --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,7 +28,7 @@ extern unsigned long empty_zero_page[PAG - extern spinlock_t pgd_lock; - extern struct list_head pgd_list; + extern struct mm_struct *pgd_page_get_mm(struct page *page); + -#ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_MMU #include @@ -586,7 +571,7 @@ Signed-off-by: Alexander Graf #define set_pte(ptep, pte) native_set_pte(ptep, pte) --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h -@@ -573,7 +573,7 @@ static inline void native_swapgs(void) +@@ -569,7 +569,7 @@ static inline void native_swapgs(void) #endif } @@ -722,10 +707,10 @@ Signed-off-by: Alexander Graf * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c -@@ -55,6 +55,6 @@ EXPORT_SYMBOL(__memcpy); +@@ -54,6 +54,6 @@ EXPORT_SYMBOL(memcpy); + EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(empty_zero_page); - EXPORT_SYMBOL(init_level4_pgt); -#ifndef CONFIG_PARAVIRT +#ifndef CONFIG_PARAVIRT_CPU EXPORT_SYMBOL(native_load_gs_index); diff --git a/patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature b/patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature index 75f1a4a..27ce090 100644 --- a/patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature +++ b/patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature @@ -22,7 +22,7 @@ Signed-off-by: Alexander Graf --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -539,11 +539,38 @@ config KVM_CLOCK +@@ -543,11 +543,38 @@ config KVM_CLOCK config KVM_GUEST bool "KVM Guest support" @@ -65,8 +65,8 @@ Signed-off-by: Alexander Graf --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -29,6 +29,16 @@ - #include - #include + #include + #include +#ifdef CONFIG_KVM_IODELAY +/* @@ -80,7 +80,7 @@ Signed-off-by: Alexander Graf +#ifdef CONFIG_KVM_MMU #define MMU_QUEUE_SIZE 1024 - struct kvm_para_state { + static int kvmapf = 1; @@ -43,13 +53,6 @@ static struct kvm_para_state *kvm_para_s return &per_cpu(para_state, raw_smp_processor_id()); } @@ -92,9 +92,9 @@ Signed-off-by: Alexander Graf -{ -} - - static void kvm_mmu_op(void *buffer, unsigned len) - { - int r; + #define KVM_TASK_SLEEP_HASHBITS 8 + #define KVM_TASK_SLEEP_HASHSIZE (1< PG_head, /* A head page */ PG_tail, /* A tail page */ #else -@@ -168,14 +169,21 @@ static inline int TestClearPage##uname(s +@@ -167,14 +168,21 @@ static inline int TestClearPage##uname(s static inline int __TestClearPage##uname(struct page *page) \ { return __test_and_clear_bit(PG_##lname, &page->flags); } @@ -53,7 +53,7 @@ Reviewed-by: Christoph Lameter { return 0; } #define TESTSCFLAG(uname, lname) \ -@@ -393,6 +401,12 @@ static inline void __ClearPageTail(struc +@@ -391,6 +399,12 @@ static inline void __ClearPageTail(struc #endif /* !PAGEFLAGS_EXTENDED */ @@ -63,12 +63,12 @@ Reviewed-by: Christoph Lameter +PAGEFLAG_FALSE(MemError) +#endif + - #ifdef CONFIG_MMU - #define __PG_MLOCKED (1 << PG_mlocked) - #else + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* + * PageHuge() only returns true for hugetlbfs pages, but not for --- a/mm/migrate.c +++ b/mm/migrate.c -@@ -53,6 +53,7 @@ int migrate_prep(void) +@@ -63,6 +63,7 @@ int migrate_prep_local(void) return 0; } @@ -76,15 +76,15 @@ Reviewed-by: Christoph Lameter /* * Add isolated pages on the list back to the LRU under page lock -@@ -75,6 +76,7 @@ int putback_lru_pages(struct list_head * +@@ -80,6 +81,7 @@ void putback_lru_pages(struct list_head + putback_lru_page(page); } - return count; } +EXPORT_SYMBOL(putback_lru_pages); /* * Restore a potential migration pte to a working pte entry -@@ -658,6 +660,25 @@ unlock: +@@ -701,6 +703,25 @@ unlock: * restored. */ list_del(&page->lru); @@ -110,7 +110,7 @@ Reviewed-by: Christoph Lameter dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); putback_lru_page(page); -@@ -732,6 +753,17 @@ int migrate_pages(struct list_head *from +@@ -775,6 +796,17 @@ int migrate_pages(struct list_head *from } } } @@ -128,7 +128,7 @@ Reviewed-by: Christoph Lameter rc = 0; out: if (!swapwrite) -@@ -744,6 +776,7 @@ out: +@@ -787,6 +819,7 @@ out: return nr_failed + retry; } @@ -138,7 +138,7 @@ Reviewed-by: Christoph Lameter /* --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -1091,6 +1091,7 @@ int isolate_lru_page(struct page *page) +@@ -1127,6 +1127,7 @@ int isolate_lru_page(struct page *page) } return ret; } diff --git a/patches.arch/perf_timechart_fix_zero_timestamps.patch b/patches.arch/perf_timechart_fix_zero_timestamps.patch new file mode 100644 index 0000000..75bb39c --- /dev/null +++ b/patches.arch/perf_timechart_fix_zero_timestamps.patch @@ -0,0 +1,32 @@ +From: Thomas Renninger +Subject: Fix huge and wronge C-state drawings due to uninitialized start/end timestamps +Patch-Mainline: not yet +References: none + +Signed-off-by: Thomas Renninger + +--- + tools/perf/builtin-timechart.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +Index: linux-2.6.37-master/tools/perf/builtin-timechart.c +=================================================================== +--- linux-2.6.37-master.orig/tools/perf/builtin-timechart.c ++++ linux-2.6.37-master/tools/perf/builtin-timechart.c +@@ -654,8 +654,15 @@ static void draw_c_p_states(void) + * two pass drawing so that the P state bars are on top of the C state blocks + */ + while (pwr) { +- if (pwr->type == CSTATE) ++ if (pwr->type == CSTATE) { ++ /* If the first event is an _end event, start timestamp is zero ++ -> ignore these */ ++ if (pwr->start_time == 0 || pwr->end_time == 0) { ++ pwr = pwr->next; ++ continue; ++ } + svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); ++ } + pwr = pwr->next; + } + diff --git a/patches.arch/powernow-k8-add-core-performance-boost-support b/patches.arch/powernow-k8-add-core-performance-boost-support deleted file mode 100644 index 02774e1..0000000 --- a/patches.arch/powernow-k8-add-core-performance-boost-support +++ /dev/null @@ -1,266 +0,0 @@ -From: Borislav Petkov -Date: Wed, 31 Mar 2010 19:56:42 +0000 (+0200) -Subject: powernow-k8: Add core performance boost support -Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip.git -Git-commit: 73860c6b2fd159a35637e233d735e36887c266ad -References: bnc#602209 -Patch-mainline: 2.6.35-rc1 - -powernow-k8: Add core performance boost support - -Starting with F10h, revE, AMD processors add support for a dynamic -core boosting feature called Core Performance Boost. When a specific -condition is present, a subset of the cores on a system are boosted -beyond their P0 operating frequency to speed up the performance of -single-threaded applications. - -In the normal case, the system comes out of reset with core boosting -enabled. This patch adds a sysfs knob with which core boosting can be -switched on or off for benchmarking purposes. - -While at it, make the CPB code hotplug-aware so that taking cores -offline wouldn't interfere with boosting the remaining online cores. -Furthermore, add cpu_online_mask hotplug protection as suggested by -Andrew. - -Finally, cleanup the driver init codepath and update copyrights. - -Signed-off-by: Borislav Petkov -LKML-Reference: <1270065406-1814-3-git-send-email-bp@amd64.org> -Reviewed-by: Thomas Renninger -Signed-off-by: H. Peter Anvin -Acked-by: Jeff Mahoney ---- - - arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 161 ++++++++++++++++++++++++++++-- - arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 2 - 2 files changed, 151 insertions(+), 12 deletions(-) - ---- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c -+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c -@@ -1,6 +1,5 @@ -- - /* -- * (c) 2003-2006 Advanced Micro Devices, Inc. -+ * (c) 2003-2010 Advanced Micro Devices, Inc. - * Your use of this code is subject to the terms and conditions of the - * GNU general public license version 2. See "COPYING" or - * http://www.gnu.org/licenses/gpl.html -@@ -54,6 +53,10 @@ static DEFINE_PER_CPU(struct powernow_k8 - - static int cpu_family = CPU_OPTERON; - -+/* core performance boost */ -+static bool cpb_capable, cpb_enabled; -+static struct msr *msrs; -+ - #ifndef CONFIG_SMP - static inline const struct cpumask *cpu_core_mask(int cpu) - { -@@ -1394,8 +1397,77 @@ out: - return khz; - } - -+static void _cpb_toggle_msrs(bool t) -+{ -+ int cpu; -+ -+ get_online_cpus(); -+ -+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); -+ -+ for_each_cpu(cpu, cpu_online_mask) { -+ struct msr *reg = per_cpu_ptr(msrs, cpu); -+ if (t) -+ reg->l &= ~BIT(25); -+ else -+ reg->l |= BIT(25); -+ } -+ wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); -+ -+ put_online_cpus(); -+} -+ -+/* -+ * Switch on/off core performance boosting. -+ * -+ * 0=disable -+ * 1=enable. -+ */ -+static void cpb_toggle(bool t) -+{ -+ if (!cpb_capable) -+ return; -+ -+ if (t && !cpb_enabled) { -+ cpb_enabled = true; -+ _cpb_toggle_msrs(t); -+ printk(KERN_INFO PFX "Core Boosting enabled.\n"); -+ } else if (!t && cpb_enabled) { -+ cpb_enabled = false; -+ _cpb_toggle_msrs(t); -+ printk(KERN_INFO PFX "Core Boosting disabled.\n"); -+ } -+} -+ -+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, -+ size_t count) -+{ -+ int ret = -EINVAL; -+ unsigned long val = 0; -+ -+ ret = strict_strtoul(buf, 10, &val); -+ if (!ret && (val == 0 || val == 1) && cpb_capable) -+ cpb_toggle(val); -+ else -+ return -EINVAL; -+ -+ return count; -+} -+ -+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) -+{ -+ return sprintf(buf, "%u\n", cpb_enabled); -+} -+ -+#define define_one_rw(_name) \ -+static struct freq_attr _name = \ -+__ATTR(_name, 0644, show_##_name, store_##_name) -+ -+define_one_rw(cpb); -+ - static struct freq_attr *powernow_k8_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, -+ &cpb, - NULL, - }; - -@@ -1411,10 +1483,51 @@ static struct cpufreq_driver cpufreq_amd - .attr = powernow_k8_attr, - }; - -+/* -+ * Clear the boost-disable flag on the CPU_DOWN path so that this cpu -+ * cannot block the remaining ones from boosting. On the CPU_UP path we -+ * simply keep the boost-disable flag in sync with the current global -+ * state. -+ */ -+static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action, -+ void *hcpu) -+{ -+ unsigned cpu = (long)hcpu; -+ u32 lo, hi; -+ -+ switch (action) { -+ case CPU_UP_PREPARE: -+ case CPU_UP_PREPARE_FROZEN: -+ -+ if (!cpb_enabled) { -+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); -+ lo |= BIT(25); -+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); -+ } -+ break; -+ -+ case CPU_DOWN_PREPARE: -+ case CPU_DOWN_PREPARE_FROZEN: -+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); -+ lo &= ~BIT(25); -+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); -+ break; -+ -+ default: -+ break; -+ } -+ -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block __cpuinitdata cpb_nb = { -+ .notifier_call = cpb_notify, -+}; -+ - /* driver entry point for init */ - static int __cpuinit powernowk8_init(void) - { -- unsigned int i, supported_cpus = 0; -+ unsigned int i, supported_cpus = 0, cpu; - - for_each_online_cpu(i) { - int rc; -@@ -1423,15 +1536,36 @@ static int __cpuinit powernowk8_init(voi - supported_cpus++; - } - -- if (supported_cpus == num_online_cpus()) { -- printk(KERN_INFO PFX "Found %d %s " -- "processors (%d cpu cores) (" VERSION ")\n", -- num_online_nodes(), -- boot_cpu_data.x86_model_id, supported_cpus); -- return cpufreq_register_driver(&cpufreq_amd64_driver); -+ if (supported_cpus != num_online_cpus()) -+ return -ENODEV; -+ -+ printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", -+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); -+ -+ if (boot_cpu_has(X86_FEATURE_CPB)) { -+ -+ cpb_capable = true; -+ -+ register_cpu_notifier(&cpb_nb); -+ -+ msrs = msrs_alloc(); -+ if (!msrs) { -+ printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); -+ return -ENOMEM; -+ } -+ -+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); -+ -+ for_each_cpu(cpu, cpu_online_mask) { -+ struct msr *reg = per_cpu_ptr(msrs, cpu); -+ cpb_enabled |= !(!!(reg->l & BIT(25))); -+ } -+ -+ printk(KERN_INFO PFX "Core Performance Boosting: %s.\n", -+ (cpb_enabled ? "on" : "off")); - } - -- return -ENODEV; -+ return cpufreq_register_driver(&cpufreq_amd64_driver); - } - - /* driver entry point for term */ -@@ -1439,6 +1573,13 @@ static void __exit powernowk8_exit(void) - { - dprintk("exit\n"); - -+ if (boot_cpu_has(X86_FEATURE_CPB)) { -+ msrs_free(msrs); -+ msrs = NULL; -+ -+ unregister_cpu_notifier(&cpb_nb); -+ } -+ - cpufreq_unregister_driver(&cpufreq_amd64_driver); - } - ---- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h -+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h -@@ -5,7 +5,6 @@ - * http://www.gnu.org/licenses/gpl.html - */ - -- - enum pstate { - HW_PSTATE_INVALID = 0xff, - HW_PSTATE_0 = 0, -@@ -55,7 +54,6 @@ struct powernow_k8_data { - struct cpumask *available_cores; - }; - -- - /* processor's cpuid instruction support */ - #define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */ - #define CPUID_XFAM 0x0ff00000 /* extended family */ diff --git a/patches.arch/ppc-pegasos-console-autodetection.patch b/patches.arch/ppc-pegasos-console-autodetection.patch index d26df80..82da310 100644 --- a/patches.arch/ppc-pegasos-console-autodetection.patch +++ b/patches.arch/ppc-pegasos-console-autodetection.patch @@ -8,7 +8,7 @@ Patch-mainline: never --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c -@@ -294,7 +294,7 @@ static void chrp_init_early(void) +@@ -293,7 +293,7 @@ static void chrp_init_early(void) if (!property) goto out_put; if (!strcmp(property, "failsafe") || !strcmp(property, "serial")) diff --git a/patches.arch/ppc-prom-nodisplay.patch b/patches.arch/ppc-prom-nodisplay.patch index 34a7379..18916f6 100644 --- a/patches.arch/ppc-prom-nodisplay.patch +++ b/patches.arch/ppc-prom-nodisplay.patch @@ -52,7 +52,7 @@ DEFAULT CATCH!, code=fff00300 at %SRR0: 00c18ccc %SRR1: 00003030 #ifdef CONFIG_PPC64 static int __initdata prom_iommu_force_on; static int __initdata prom_iommu_off; -@@ -570,6 +571,14 @@ static void __init early_cmdline_parse(v +@@ -596,6 +597,14 @@ static void __init early_cmdline_parse(v #endif /* CONFIG_CMDLINE */ prom_printf("command line: %s\n", RELOC(prom_cmd_line)); @@ -67,7 +67,7 @@ DEFAULT CATCH!, code=fff00300 at %SRR0: 00c18ccc %SRR1: 00003030 #ifdef CONFIG_PPC64 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu=")); if (opt) { -@@ -2546,6 +2555,7 @@ unsigned long __init prom_init(unsigned +@@ -2570,6 +2579,7 @@ unsigned long __init prom_init(unsigned /* * Initialize display devices */ diff --git a/patches.arch/ppc-vio-modalias.patch b/patches.arch/ppc-vio-modalias.patch deleted file mode 100644 index ef17b56..0000000 --- a/patches.arch/ppc-vio-modalias.patch +++ /dev/null @@ -1,36 +0,0 @@ -From: Olaf Hering -Subject: [PATCH] poweroc: vio modalias -Patch-mainline: not yet - -Acked-by: Olaf Hering ---- - arch/powerpc/kernel/vio.c | 15 +++++++++++++++ - 1 file changed, 15 insertions(+) - ---- a/arch/powerpc/kernel/vio.c -+++ b/arch/powerpc/kernel/vio.c -@@ -1319,9 +1319,24 @@ static ssize_t devspec_show(struct devic - return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); - } - -+static ssize_t modalias_show (struct device *dev, struct device_attribute *attr, -+ char *buf) -+{ -+ struct device_node *of_node = dev->archdata.of_node; -+ const char *compat; -+ int i = 0; -+ -+ if (of_node) { -+ compat = of_get_property(of_node, "compatible", &i); -+ i = sprintf (buf, "vio:T%sS%s\n", of_node->type, compat); -+ } -+ return i; -+} -+ - static struct device_attribute vio_dev_attrs[] = { - __ATTR_RO(name), - __ATTR_RO(devspec), -+ __ATTR_RO(modalias), - __ATTR_NULL - }; - diff --git a/patches.arch/ppc-vmcoreinfo.diff b/patches.arch/ppc-vmcoreinfo.diff deleted file mode 100644 index 5709b52..0000000 --- a/patches.arch/ppc-vmcoreinfo.diff +++ /dev/null @@ -1,42 +0,0 @@ -Date: Thu, 9 Oct 2008 11:20:27 -0400 -From: Neil Horman -To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org, - vgoyal@redhat.com, hbabu@us.ibm.com -Subject: [PATCH] add additional symbols to /sys/kernel/vmcoreinfo data for - ppc(64) -Cc: nhorman@tuxdriver.com -Patch-mainline: not yet - -Hey- - The makdumpdile dump filtering program, in some modes of operation needs -the node_data and/or contig_page_data symbols to function properly. These -symbols are missing from the powerpc kernel. This patch adds those symbols in -properly. Tested successfully by myself and the reporter. - -Regards -Neil - -Signed-off-by: Neil Horman -Acked-by: Bernhard Walle - - arch/powerpc/kernel/machine_kexec.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - - ---- a/arch/powerpc/kernel/machine_kexec.c -+++ b/arch/powerpc/kernel/machine_kexec.c -@@ -45,6 +45,14 @@ void machine_kexec_cleanup(struct kimage - ppc_md.machine_kexec_cleanup(image); - } - -+void arch_crash_save_vmcoreinfo(void) -+{ -+#ifdef CONFIG_NEED_MULTIPLE_NODES -+ VMCOREINFO_SYMBOL(node_data); -+ VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); -+#endif -+} -+ - /* - * Do not allocate memory (or fail in any way) in machine_kexec(). - * We are past the point of no return, committed to rebooting now. diff --git a/patches.arch/ppc64-xmon-dmesg-printing.patch b/patches.arch/ppc64-xmon-dmesg-printing.patch index edf18cc..7cb2e6d 100644 --- a/patches.arch/ppc64-xmon-dmesg-printing.patch +++ b/patches.arch/ppc64-xmon-dmesg-printing.patch @@ -16,8 +16,8 @@ Signed-off-by: Olaf Hering --- arch/powerpc/xmon/xmon.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++ - kernel/printk.c | 15 ++++++++++++ - 2 files changed, 72 insertions(+) + kernel/printk.c | 2 - + 2 files changed, 58 insertions(+), 1 deletion(-) --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -29,7 +29,7 @@ Signed-off-by: Olaf Hering static int cpu_cmd(void); static void csum(void); static void bootcmds(void); -@@ -194,6 +195,7 @@ Commands:\n\ +@@ -197,6 +198,7 @@ Commands:\n\ #endif "\ C checksum\n\ @@ -37,7 +37,7 @@ Signed-off-by: Olaf Hering d dump bytes\n\ di dump instructions\n\ df dump float values\n\ -@@ -828,6 +830,9 @@ cmds(struct pt_regs *excp) +@@ -831,6 +833,9 @@ cmds(struct pt_regs *excp) case 'd': dump(); break; @@ -47,11 +47,11 @@ Signed-off-by: Olaf Hering case 'l': symbol_lookup(); break; -@@ -2599,6 +2604,58 @@ static void xmon_print_symbol(unsigned l +@@ -2607,6 +2612,58 @@ static void xmon_print_symbol(unsigned l printf("%s", after); } -+extern void debugger_syslog_data(char *syslog_data[4]); ++extern void kdb_syslog_data(char *syslog_data[]); +#define SYSLOG_WRAP(p) if (p < syslog_data[0]) p = syslog_data[1]-1; \ + else if (p >= syslog_data[1]) p = syslog_data[0]; + @@ -63,7 +63,7 @@ Signed-off-by: Olaf Hering + /* syslog_data[0,1] physical start, end+1. + * syslog_data[2,3] logical start, end+1. + */ -+ debugger_syslog_data(syslog_data); ++ kdb_syslog_data(syslog_data); + if (syslog_data[2] == syslog_data[3]) + return; + logsize = syslog_data[1] - syslog_data[0]; @@ -108,25 +108,12 @@ Signed-off-by: Olaf Hering { --- a/kernel/printk.c +++ b/kernel/printk.c -@@ -413,6 +413,21 @@ SYSCALL_DEFINE3(syslog, int, type, char +@@ -416,7 +416,7 @@ SYSCALL_DEFINE3(syslog, int, type, char return do_syslog(type, buf, len, SYSLOG_FROM_CALL); } -+#ifdef CONFIG_DEBUG_KERNEL -+/* Its very handy to be able to view the syslog buffer during debug. -+ * But do_syslog() uses locks so it cannot be used during debugging. -+ * Instead, provide the start and end of the physical and logical logs. -+ * This is equivalent to do_syslog(3). -+ */ -+void debugger_syslog_data(char *syslog_data[4]) -+{ -+ syslog_data[0] = log_buf; -+ syslog_data[1] = log_buf + log_buf_len; -+ syslog_data[2] = log_buf + log_end - (logged_chars < log_buf_len ? logged_chars : log_buf_len); -+ syslog_data[3] = log_buf + log_end; -+} -+#endif /* CONFIG_DEBUG_KERNEL */ -+ - /* - * Call the console drivers on a range of log_buf - */ +-#ifdef CONFIG_KGDB_KDB ++#if defined(CONFIG_KGDB_KDB) || defined(CONFIG_DEBUG_KERNEL) + /* kdb dmesg command needs access to the syslog buffer. do_syslog() + * uses locks so it cannot be used during debugging. Just tell kdb + * where the start and end of the physical and logical logs are. This diff --git a/patches.arch/s390-message-catalog-fix.diff b/patches.arch/s390-message-catalog-fix.diff new file mode 100644 index 0000000..2a62770 --- /dev/null +++ b/patches.arch/s390-message-catalog-fix.diff @@ -0,0 +1,23 @@ +From: Jiri Slaby +Subject: fix build on s390 as of 2.6.36-rc4 +Patch-mainline: never + +This fixes patches.arch/s390-message-catalog.diff build. + +Signed-off-by: Jiri Slaby + +--- + include/linux/device.h | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -660,6 +660,8 @@ int printk_dev_hash(const char *, const + + #endif + ++#define dev_printk(level, dev, format, arg...) \ ++ dev_printk_hash(level , dev, format, ## arg) + #define dev_emerg(dev, format, arg...) \ + dev_printk_hash(KERN_EMERG , dev , format , ## arg) + #define dev_alert(dev, format, arg...) \ diff --git a/patches.arch/s390-message-catalog.diff b/patches.arch/s390-message-catalog.diff index d83df1a..fcfcbd6 100644 --- a/patches.arch/s390-message-catalog.diff +++ b/patches.arch/s390-message-catalog.diff @@ -70,7 +70,7 @@ Acked-by: John Jolly kernel/printk.c | 45 scripts/Makefile.build | 14 scripts/kmsg-doc | 479 +++++++++ - 47 files changed, 8317 insertions(+), 15 deletions(-) + 47 files changed, 8323 insertions(+), 9 deletions(-) --- /dev/null +++ b/Documentation/kmsg/s390/aes_s390 @@ -7916,25 +7916,25 @@ Acked-by: John Jolly # Use make M=dir to specify directory of external module to build # Old syntax make ... SUBDIRS=$PWD is still supported # Setting the environment variable KBUILD_EXTMOD take precedence -@@ -329,6 +343,7 @@ CHECK = sparse +@@ -331,6 +345,7 @@ CHECK = sparse CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void $(CF) +KMSG_CHECK = $(srctree)/scripts/kmsg-doc - MODFLAGS = -DMODULE - CFLAGS_MODULE = $(MODFLAGS) - AFLAGS_MODULE = $(MODFLAGS) -@@ -371,6 +386,7 @@ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODU - export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS - export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV + CFLAGS_MODULE = + AFLAGS_MODULE = + LDFLAGS_MODULE = +@@ -379,6 +394,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAG export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE + export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE + export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL +export KBUILD_KMSG_CHECK KMSG_CHECK # When compiling out-of-tree modules, put MODVERDIR in the module # tree rather than in the kernel tree. The kernel tree might --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig -@@ -597,6 +597,14 @@ bool "s390 guest support for KVM (EXPERI +@@ -581,6 +581,14 @@ bool "s390 guest support for KVM (EXPERI virtio transport. If KVM is detected, the virtio console will be the default console. @@ -7947,14 +7947,15 @@ Acked-by: John Jolly + driver code. See "Documentation/s390/kmsg.txt" for more details. + config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on PROC_FS + def_bool y + prompt "Enable seccomp to safely compute untrusted bytecode" --- a/include/linux/device.h +++ b/include/linux/device.h -@@ -628,20 +628,40 @@ extern const char *dev_driver_string(con - printk(level "%s %s: " format , dev_driver_string(dev) , \ - dev_name(dev) , ## arg) +@@ -643,6 +643,38 @@ extern const char *dev_driver_string(con + #ifdef CONFIG_PRINTK + ++#if defined(KMSG_COMPONENT) && (defined(CONFIG_KMSG_IDS) || defined(__KMSG_CHECKER)) +/* dev_printk_hash for message documentation */ +#if defined(__KMSG_CHECKER) && defined(KMSG_COMPONENT) + @@ -7969,39 +7970,38 @@ Acked-by: John Jolly + printk_dev_hash(level "%s.%06x: ", dev_driver_string(dev), \ + "%s: " format, dev_name(dev), ## arg) + -+#else /* !defined(CONFIG_KMSG_IDS) */ -+ -+#define dev_printk_hash dev_printk -+ +#endif + - #define dev_emerg(dev, format, arg...) \ -- dev_printk(KERN_EMERG , dev , format , ## arg) ++#define dev_emerg(dev, format, arg...) \ + dev_printk_hash(KERN_EMERG , dev , format , ## arg) - #define dev_alert(dev, format, arg...) \ -- dev_printk(KERN_ALERT , dev , format , ## arg) ++#define dev_alert(dev, format, arg...) \ + dev_printk_hash(KERN_ALERT , dev , format , ## arg) - #define dev_crit(dev, format, arg...) \ -- dev_printk(KERN_CRIT , dev , format , ## arg) ++#define dev_crit(dev, format, arg...) \ + dev_printk_hash(KERN_CRIT , dev , format , ## arg) - #define dev_err(dev, format, arg...) \ -- dev_printk(KERN_ERR , dev , format , ## arg) ++#define dev_err(dev, format, arg...) \ + dev_printk_hash(KERN_ERR , dev , format , ## arg) - #define dev_warn(dev, format, arg...) \ -- dev_printk(KERN_WARNING , dev , format , ## arg) ++#define dev_warn(dev, format, arg...) \ + dev_printk_hash(KERN_WARNING , dev , format , ## arg) - #define dev_notice(dev, format, arg...) \ -- dev_printk(KERN_NOTICE , dev , format , ## arg) ++#define dev_notice(dev, format, arg...) \ + dev_printk_hash(KERN_NOTICE , dev , format , ## arg) - #define dev_info(dev, format, arg...) \ -- dev_printk(KERN_INFO , dev , format , ## arg) ++#define _dev_info(dev, format, arg...) \ + dev_printk_hash(KERN_INFO , dev , format , ## arg) ++#else + extern int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +@@ -660,7 +692,7 @@ extern int dev_notice(const struct devic + __attribute__ ((format (printf, 2, 3))); + extern int _dev_info(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +- ++#endif + #else - #if defined(DEBUG) - #define dev_dbg(dev, format, arg...) \ ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -387,22 +387,41 @@ static inline char *pack_hex_byte(char * + static inline int dev_printk(const char *level, const struct device *dev, +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -412,23 +412,42 @@ extern int hex_to_bin(char ch); #define pr_fmt(fmt) fmt #endif @@ -8025,26 +8025,27 @@ Acked-by: John Jolly +#endif + #define pr_emerg(fmt, ...) \ -- printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_EMERG, fmt, ##__VA_ARGS__) +- printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) ++ pr_printk_hash(KERN_EMERG, fmt, ##__VA_ARGS__) #define pr_alert(fmt, ...) \ -- printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_ALERT, fmt, ##__VA_ARGS__) +- printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) ++ pr_printk_hash(KERN_ALERT, fmt, ##__VA_ARGS__) #define pr_crit(fmt, ...) \ -- printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_CRIT, fmt, ##__VA_ARGS__) +- printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) ++ pr_printk_hash(KERN_CRIT, fmt, ##__VA_ARGS__) #define pr_err(fmt, ...) \ -- printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_ERR, fmt, ##__VA_ARGS__) +- printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) ++ pr_printk_hash(KERN_ERR, fmt, ##__VA_ARGS__) #define pr_warning(fmt, ...) \ -- printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_WARNING, fmt, ##__VA_ARGS__) +- printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) ++ pr_printk_hash(KERN_WARNING, fmt, ##__VA_ARGS__) + #define pr_warn pr_warning #define pr_notice(fmt, ...) \ -- printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_NOTICE, fmt, ##__VA_ARGS__) +- printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) ++ pr_printk_hash(KERN_NOTICE, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) \ -- printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_INFO, fmt, ##__VA_ARGS__) +- printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) ++ pr_printk_hash(KERN_INFO, fmt, ##__VA_ARGS__) #define pr_cont(fmt, ...) \ - printk(KERN_CONT fmt, ##__VA_ARGS__) + pr_printk_hash(KERN_CONT, fmt, ##__VA_ARGS__) @@ -8053,17 +8054,17 @@ Acked-by: John Jolly #ifdef DEBUG --- a/kernel/printk.c +++ b/kernel/printk.c -@@ -36,6 +36,8 @@ - #include - #include - #include +@@ -39,6 +39,8 @@ + #include + #include + #include +#include +#include #include -@@ -1534,3 +1536,46 @@ void kmsg_dump(enum kmsg_dump_reason rea - spin_unlock_irqrestore(&dump_list_lock, flags); +@@ -1573,3 +1575,46 @@ void kmsg_dump(enum kmsg_dump_reason rea + rcu_read_unlock(); } #endif + @@ -8111,7 +8112,7 @@ Acked-by: John Jolly +#endif --- a/scripts/Makefile.build +++ b/scripts/Makefile.build -@@ -229,12 +229,14 @@ endef +@@ -232,12 +232,14 @@ endef # Built-in and composite module parts $(obj)/%.o: $(src)/%.c FORCE $(call cmd,force_checksrc) @@ -8126,7 +8127,7 @@ Acked-by: John Jolly $(call if_changed_rule,cc_o_c) @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod) -@@ -358,6 +360,18 @@ $(multi-used-m) : %.o: $(multi-objs-m) F +@@ -361,6 +363,18 @@ $(multi-used-m) : %.o: $(multi-objs-m) F targets += $(multi-used-y) $(multi-used-m) diff --git a/patches.arch/x86-cpu-add-amd-core-boosting-feature-flag-to-proc-cpuinfo b/patches.arch/x86-cpu-add-amd-core-boosting-feature-flag-to-proc-cpuinfo deleted file mode 100644 index 982f4e0..0000000 --- a/patches.arch/x86-cpu-add-amd-core-boosting-feature-flag-to-proc-cpuinfo +++ /dev/null @@ -1,49 +0,0 @@ -From: Borislav Petkov -Date: Wed, 31 Mar 2010 19:56:41 +0000 (+0200) -Subject: x86, cpu: Add AMD core boosting feature flag to /proc/cpuinfo -Git-commit: 5958f1d5d722df7a9e5d129676614a8e5219bacd -Patch-mainline: 2.6.35-rc1 - -x86, cpu: Add AMD core boosting feature flag to /proc/cpuinfo - -By semi-popular demand, this adds the Core Performance Boost feature -flag to /proc/cpuinfo. Possible use case for this is userspace tools -like cpufreq-aperf, for example, so that they don't have to jump through -hoops of accessing "/dev/cpu/%d/cpuid" in order to check for CPB hw -support, or call cpuid from userspace. - -Signed-off-by: Borislav Petkov -LKML-Reference: <1270065406-1814-2-git-send-email-bp@amd64.org> -Reviewed-by: Thomas Renninger -Signed-off-by: H. Peter Anvin -Acked-by: Jeff Mahoney ---- - - arch/x86/include/asm/cpufeature.h | 1 + - arch/x86/kernel/cpu/addon_cpuid_features.c | 5 +++-- - 2 files changed, 4 insertions(+), 2 deletions(-) - ---- a/arch/x86/include/asm/cpufeature.h -+++ b/arch/x86/include/asm/cpufeature.h -@@ -161,6 +161,7 @@ - */ - #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ - #define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ -+#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */ - - /* Virtualization flags: Linux defined */ - #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ ---- a/arch/x86/kernel/cpu/addon_cpuid_features.c -+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c -@@ -30,8 +30,9 @@ void __cpuinit init_scattered_cpuid_feat - const struct cpuid_bit *cb; - - static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { -- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, -- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, -+ { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, -+ { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, -+ { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 }, - { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a }, - { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a }, - { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a }, diff --git a/patches.arch/x86-cpufreq-add-aperf-mperf-support-for-amd-processors b/patches.arch/x86-cpufreq-add-aperf-mperf-support-for-amd-processors deleted file mode 100644 index 990f7b1..0000000 --- a/patches.arch/x86-cpufreq-add-aperf-mperf-support-for-amd-processors +++ /dev/null @@ -1,222 +0,0 @@ -From: Mark Langsdorf -Date: Thu, 18 Mar 2010 17:41:46 +0000 (+0100) -Subject: x86, cpufreq: Add APERF/MPERF support for AMD processors -Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip.git -Git-commit: a2fed573f065e526bfd5cbf26e5491973d9e9aaa -References: bnc#602209 -Patch-mainline: 2.6.35-rc1 - -x86, cpufreq: Add APERF/MPERF support for AMD processors - -Starting with model 10 of Family 0x10, AMD processors may have -support for APERF/MPERF. Add support for identifying it and using -it within cpufreq. Move the APERF/MPERF functions out of the -acpi-cpufreq code and into their own file so they can easily be -shared. - -Signed-off-by: Mark Langsdorf -LKML-Reference: <20100401141956.GA1930@aftab> -Signed-off-by: Borislav Petkov -Reviewed-by: Thomas Renninger -Signed-off-by: H. Peter Anvin -Acked-by: Jeff Mahoney ---- - - arch/x86/kernel/cpu/cpufreq/Makefile | 4 +- - arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 44 +------------------------ - arch/x86/kernel/cpu/cpufreq/mperf.c | 51 +++++++++++++++++++++++++++++ - arch/x86/kernel/cpu/cpufreq/mperf.h | 9 +++++ - arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 8 ++++ - 5 files changed, 72 insertions(+), 44 deletions(-) - ---- a/arch/x86/kernel/cpu/cpufreq/Makefile -+++ b/arch/x86/kernel/cpu/cpufreq/Makefile -@@ -2,8 +2,8 @@ - # K8 systems. ACPI is preferred to all other hardware-specific drivers. - # speedstep-* is preferred over p4-clockmod. - --obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o --obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o -+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o -+obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o - obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o - obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o - obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o ---- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c -+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c -@@ -45,6 +45,7 @@ - #include - #include - #include -+#include "mperf.h" - - #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "acpi-cpufreq", msg) -@@ -70,8 +71,6 @@ struct acpi_cpufreq_data { - - static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); - --static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); -- - /* acpi_perf_data is a pointer to percpu data. */ - static struct acpi_processor_performance *acpi_perf_data; - -@@ -239,45 +238,6 @@ static u32 get_cur_val(const struct cpum - return cmd.val; - } - --/* Called via smp_call_function_single(), on the target CPU */ --static void read_measured_perf_ctrs(void *_cur) --{ -- struct aperfmperf *am = _cur; -- -- get_aperfmperf(am); --} -- --/* -- * Return the measured active (C0) frequency on this CPU since last call -- * to this function. -- * Input: cpu number -- * Return: Average CPU frequency in terms of max frequency (zero on error) -- * -- * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance -- * over a period of time, while CPU is in C0 state. -- * IA32_MPERF counts at the rate of max advertised frequency -- * IA32_APERF counts at the rate of actual CPU frequency -- * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and -- * no meaning should be associated with absolute values of these MSRs. -- */ --static unsigned int get_measured_perf(struct cpufreq_policy *policy, -- unsigned int cpu) --{ -- struct aperfmperf perf; -- unsigned long ratio; -- unsigned int retval; -- -- if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) -- return 0; -- -- ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); -- per_cpu(acfreq_old_perf, cpu) = perf; -- -- retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; -- -- return retval; --} -- - static unsigned int get_cur_freq_on_cpu(unsigned int cpu) - { - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); -@@ -699,7 +659,7 @@ static int acpi_cpufreq_cpu_init(struct - - /* Check for APERF/MPERF support in hardware */ - if (cpu_has(c, X86_FEATURE_APERFMPERF)) -- acpi_cpufreq_driver.getavg = get_measured_perf; -+ acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; - - dprintk("CPU%u - ACPI performance management activated.\n", cpu); - for (i = 0; i < perf->state_count; i++) ---- /dev/null -+++ b/arch/x86/kernel/cpu/cpufreq/mperf.c -@@ -0,0 +1,51 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "mperf.h" -+ -+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); -+ -+/* Called via smp_call_function_single(), on the target CPU */ -+static void read_measured_perf_ctrs(void *_cur) -+{ -+ struct aperfmperf *am = _cur; -+ -+ get_aperfmperf(am); -+} -+ -+/* -+ * Return the measured active (C0) frequency on this CPU since last call -+ * to this function. -+ * Input: cpu number -+ * Return: Average CPU frequency in terms of max frequency (zero on error) -+ * -+ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance -+ * over a period of time, while CPU is in C0 state. -+ * IA32_MPERF counts at the rate of max advertised frequency -+ * IA32_APERF counts at the rate of actual CPU frequency -+ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and -+ * no meaning should be associated with absolute values of these MSRs. -+ */ -+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, -+ unsigned int cpu) -+{ -+ struct aperfmperf perf; -+ unsigned long ratio; -+ unsigned int retval; -+ -+ if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) -+ return 0; -+ -+ ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); -+ per_cpu(acfreq_old_perf, cpu) = perf; -+ -+ retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; -+ -+ return retval; -+} -+EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf); -+MODULE_LICENSE("GPL"); ---- /dev/null -+++ b/arch/x86/kernel/cpu/cpufreq/mperf.h -@@ -0,0 +1,9 @@ -+/* -+ * (c) 2010 Advanced Micro Devices, Inc. -+ * Your use of this code is subject to the terms and conditions of the -+ * GNU general public license version 2. See "COPYING" or -+ * http://www.gnu.org/licenses/gpl.html -+ */ -+ -+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, -+ unsigned int cpu); ---- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c -+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c -@@ -45,6 +45,7 @@ - #define PFX "powernow-k8: " - #define VERSION "version 2.20.00" - #include "powernow-k8.h" -+#include "mperf.h" - - /* serialize freq changes */ - static DEFINE_MUTEX(fidvid_mutex); -@@ -57,6 +58,8 @@ static int cpu_family = CPU_OPTERON; - static bool cpb_capable, cpb_enabled; - static struct msr *msrs; - -+static struct cpufreq_driver cpufreq_amd64_driver; -+ - #ifndef CONFIG_SMP - static inline const struct cpumask *cpu_core_mask(int cpu) - { -@@ -1252,6 +1255,7 @@ static int __cpuinit powernowk8_cpu_init - struct powernow_k8_data *data; - struct init_on_cpu init_on_cpu; - int rc; -+ struct cpuinfo_x86 *c = &cpu_data(pol->cpu); - - if (!cpu_online(pol->cpu)) - return -ENODEV; -@@ -1326,6 +1330,10 @@ static int __cpuinit powernowk8_cpu_init - return -EINVAL; - } - -+ /* Check for APERF/MPERF support in hardware */ -+ if (cpu_has(c, X86_FEATURE_APERFMPERF)) -+ cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf; -+ - cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); - - if (cpu_family == CPU_HW_PSTATE) diff --git a/patches.arch/x86-hpet-pre-read b/patches.arch/x86-hpet-pre-read index 07a19da..16b70bc 100644 --- a/patches.arch/x86-hpet-pre-read +++ b/patches.arch/x86-hpet-pre-read @@ -16,7 +16,7 @@ Signed-off-by: Takashi Iwai --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c -@@ -385,6 +385,7 @@ static int hpet_next_event(unsigned long +@@ -386,6 +386,7 @@ static int hpet_next_event(unsigned long cnt += (u32) delta; hpet_writel(cnt, HPET_Tn_CMP(timer)); diff --git a/patches.arch/x86-mcp51-no-dac b/patches.arch/x86-mcp51-no-dac index b94ea13..4a85e19 100644 --- a/patches.arch/x86-mcp51-no-dac +++ b/patches.arch/x86-mcp51-no-dac @@ -17,7 +17,7 @@ Signed-off-by: Tejun Heo --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c -@@ -318,4 +318,18 @@ static __devinit void via_no_dac(struct +@@ -322,4 +322,18 @@ static __devinit void via_no_dac(struct } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); diff --git a/patches.arch/x86_64-unwind-annotations b/patches.arch/x86_64-unwind-annotations index 48d45b8..3f4fa68 100644 --- a/patches.arch/x86_64-unwind-annotations +++ b/patches.arch/x86_64-unwind-annotations @@ -4,21 +4,14 @@ Patch-mainline: tbd References: bnc#472783, bnc#588458 --- - arch/x86/kernel/entry_64.S | 193 +++++++++++++++++++++++---------------------- - arch/x86/kernel/head_64.S | 13 +++ - 2 files changed, 115 insertions(+), 91 deletions(-) + arch/x86/kernel/entry_64.S | 131 +++++++++++++++++++++++---------------------- + arch/x86/kernel/head_64.S | 13 ++++ + lib/rwsem_64.S | 56 ++++++++++++++--------- + 3 files changed, 114 insertions(+), 84 deletions(-) --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S -@@ -38,6 +38,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -240,21 +241,21 @@ ENDPROC(native_usergs_sysret64) +@@ -234,21 +234,21 @@ ENDPROC(native_usergs_sysret64) /* * initial frame state for interrupts (and exceptions without error code) */ @@ -40,71 +33,69 @@ References: bnc#472783, bnc#588458 .macro INTR_FRAME start=1 offset=0 - EMPTY_FRAME \start, SS+8+\offset-RIP + .if \start -+ EMPTY_FRAME __stringify(SS+8+\offset-RIP) ++ EMPTY_FRAME SS+8+\offset-RIP + .else + CFI_DEF_CFA_OFFSET SS+8+\offset-RIP + .endif /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ CFI_REL_OFFSET rsp, RSP+\offset-RIP /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ -@@ -267,15 +268,16 @@ ENDPROC(native_usergs_sysret64) - * with vector already pushed) +@@ -262,14 +262,15 @@ ENDPROC(native_usergs_sysret64) */ .macro XCPT_FRAME start=1 offset=0 -- INTR_FRAME \start, RIP+\offset-ORIG_RAX + INTR_FRAME \start, RIP+\offset-ORIG_RAX - /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ -+ INTR_FRAME \start, __stringify(RIP+\offset-ORIG_RAX) .endm /* * frame that enables calling into C. */ .macro PARTIAL_FRAME start=1 offset=0 -- XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET + .if \start >= 0 -+ XCPT_FRAME \start, __stringify(ORIG_RAX+\offset-ARGOFFSET) + XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET + .endif CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET -@@ -291,7 +293,9 @@ ENDPROC(native_usergs_sysret64) +@@ -285,7 +286,9 @@ ENDPROC(native_usergs_sysret64) * frame that enables passing a complete pt_regs to a C function. */ .macro DEFAULT_FRAME start=1 offset=0 -- PARTIAL_FRAME \start, R11+\offset-R15 + .if \start >= -1 -+ PARTIAL_FRAME \start, __stringify(R11+\offset-R15) + PARTIAL_FRAME \start, R11+\offset-R15 + .endif CFI_REL_OFFSET rbx, RBX+\offset CFI_REL_OFFSET rbp, RBP+\offset CFI_REL_OFFSET r12, R12+\offset -@@ -302,21 +306,23 @@ ENDPROC(native_usergs_sysret64) - +@@ -297,25 +300,27 @@ ENDPROC(native_usergs_sysret64) /* save partial stack frame */ + .pushsection .kprobes.text, "ax" ENTRY(save_args) - XCPT_FRAME -+ XCPT_FRAME offset=__stringify(ORIG_RAX-ARGOFFSET+16) ++ XCPT_FRAME offset=ORIG_RAX-RBP+8 cld -- movq_cfi rdi, RDI+16-ARGOFFSET -- movq_cfi rsi, RSI+16-ARGOFFSET -- movq_cfi rdx, RDX+16-ARGOFFSET -- movq_cfi rcx, RCX+16-ARGOFFSET -- movq_cfi rax, RAX+16-ARGOFFSET -- movq_cfi r8, R8+16-ARGOFFSET -- movq_cfi r9, R9+16-ARGOFFSET -- movq_cfi r10, R10+16-ARGOFFSET -- movq_cfi r11, R11+16-ARGOFFSET -+ movq %rdi, RDI+16-ARGOFFSET(%rsp) -+ movq %rsi, RSI+16-ARGOFFSET(%rsp) -+ movq %rdx, RDX+16-ARGOFFSET(%rsp) -+ movq %rcx, RCX+16-ARGOFFSET(%rsp) -+ movq_cfi rax, __stringify(RAX+16-ARGOFFSET) -+ movq %r8, R8+16-ARGOFFSET(%rsp) -+ movq %r9, R9+16-ARGOFFSET(%rsp) -+ movq %r10, R10+16-ARGOFFSET(%rsp) -+ movq_cfi r11, __stringify(R11+16-ARGOFFSET) + /* + * start from rbp in pt_regs and jump over + * return address. + */ + movq_cfi rdi, RDI+8-RBP +- movq_cfi rsi, RSI+8-RBP +- movq_cfi rdx, RDX+8-RBP +- movq_cfi rcx, RCX+8-RBP ++ movq %rsi, RSI+8-RBP(%rsp) ++ movq %rdx, RDX+8-RBP(%rsp) ++ movq %rcx, RCX+8-RBP(%rsp) + movq_cfi rax, RAX+8-RBP +- movq_cfi r8, R8+8-RBP +- movq_cfi r9, R9+8-RBP +- movq_cfi r10, R10+8-RBP +- movq_cfi r11, R11+8-RBP ++ movq %r8, R8+8-RBP(%rsp) ++ movq %r9, R9+8-RBP(%rsp) ++ movq %r10, R10+8-RBP(%rsp) ++ movq %r11, R11+8-RBP(%rsp) - leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ + leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ movq_cfi rbp, 8 /* push %rbp */ leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ + CFI_DEF_CFA_REGISTER rbp @@ -112,7 +103,7 @@ References: bnc#472783, bnc#588458 testl $3, CS(%rdi) je 1f SWAPGS -@@ -328,11 +334,10 @@ ENTRY(save_args) +@@ -327,11 +332,10 @@ ENTRY(save_args) */ 1: incl PER_CPU_VAR(irq_count) jne 2f @@ -127,8 +118,8 @@ References: bnc#472783, bnc#588458 /* * We entered an interrupt context - irqs are off: */ -@@ -342,14 +347,14 @@ ENTRY(save_args) - END(save_args) +@@ -342,14 +346,14 @@ END(save_args) + .popsection ENTRY(save_rest) - PARTIAL_FRAME 1 REST_SKIP+8 @@ -149,38 +140,34 @@ References: bnc#472783, bnc#588458 movq %r11, 8(%rsp) /* return address */ FIXUP_TOP_OF_STACK %r11, 16 ret -@@ -359,23 +364,23 @@ END(save_rest) +@@ -359,23 +363,23 @@ END(save_rest) /* save complete stack frame */ .pushsection .kprobes.text, "ax" ENTRY(save_paranoid) - XCPT_FRAME 1 RDI+8 -+ XCPT_FRAME offset=__stringify(ORIG_RAX-R15+8) ++ XCPT_FRAME offset=ORIG_RAX-R15+8 cld - movq_cfi rdi, RDI+8 - movq_cfi rsi, RSI+8 -- movq_cfi rdx, RDX+8 -- movq_cfi rcx, RCX+8 -- movq_cfi rax, RAX+8 ++ movq %rdi, RDI+8(%rsp) ++ movq %rsi, RSI+8(%rsp) + movq_cfi rdx, RDX+8 + movq_cfi rcx, RCX+8 + movq_cfi rax, RAX+8 - movq_cfi r8, R8+8 - movq_cfi r9, R9+8 - movq_cfi r10, R10+8 - movq_cfi r11, R11+8 -- movq_cfi rbx, RBX+8 ++ movq %r8, R8+8(%rsp) ++ movq %r9, R9+8(%rsp) ++ movq %r10, R10+8(%rsp) ++ movq %r11, R11+8(%rsp) + movq_cfi rbx, RBX+8 - movq_cfi rbp, RBP+8 - movq_cfi r12, R12+8 - movq_cfi r13, R13+8 - movq_cfi r14, R14+8 - movq_cfi r15, R15+8 -+ movq %rdi, RDI+8(%rsp) -+ movq %rsi, RSI+8(%rsp) -+ movq_cfi rdx, __stringify(RDX+8) -+ movq_cfi rcx, __stringify(RCX+8) -+ movq_cfi rax, __stringify(RAX+8) -+ movq %r8, R8+8(%rsp) -+ movq %r9, R9+8(%rsp) -+ movq %r10, R10+8(%rsp) -+ movq %r11, R11+8(%rsp) -+ movq_cfi rbx, __stringify(RBX+8) + movq %rbp, RBP+8(%rsp) + movq %r12, R12+8(%rsp) + movq %r13, R13+8(%rsp) @@ -189,7 +176,7 @@ References: bnc#472783, bnc#588458 movl $1,%ebx movl $MSR_GS_BASE,%ecx rdmsr -@@ -685,7 +690,7 @@ ENTRY(\label) +@@ -677,7 +681,7 @@ ENTRY(\label) subq $REST_SKIP, %rsp CFI_ADJUST_CFA_OFFSET REST_SKIP call save_rest @@ -198,144 +185,75 @@ References: bnc#472783, bnc#588458 leaq 8(%rsp), \arg /* pt_regs pointer */ call \func jmp ptregscall_common -@@ -702,12 +707,12 @@ END(\label) - ENTRY(ptregscall_common) - DEFAULT_FRAME 1 8 /* offset 8: return address */ - RESTORE_TOP_OF_STACK %r11, 8 -- movq_cfi_restore R15+8, r15 -- movq_cfi_restore R14+8, r14 -- movq_cfi_restore R13+8, r13 -- movq_cfi_restore R12+8, r12 -- movq_cfi_restore RBP+8, rbp -- movq_cfi_restore RBX+8, rbx -+ movq_cfi_restore __stringify(R15+8), r15 -+ movq_cfi_restore __stringify(R14+8), r14 -+ movq_cfi_restore __stringify(R13+8), r13 -+ movq_cfi_restore __stringify(R12+8), r12 -+ movq_cfi_restore __stringify(RBP+8), rbp -+ movq_cfi_restore __stringify(RBX+8), rbx - ret $REST_SKIP /* pop extended registers */ - CFI_ENDPROC - END(ptregscall_common) -@@ -719,9 +724,8 @@ END(ptregscall_common) - - ENTRY(stub_execve) - CFI_STARTPROC -- popq %r11 -- CFI_ADJUST_CFA_OFFSET -8 -- CFI_REGISTER rip, r11 -+ addq $8, %rsp -+ PARTIAL_FRAME 0 - SAVE_REST - FIXUP_TOP_OF_STACK %r11 - movq %rsp, %rcx -@@ -740,7 +744,7 @@ END(stub_execve) - ENTRY(stub_rt_sigreturn) - CFI_STARTPROC - addq $8, %rsp -- CFI_ADJUST_CFA_OFFSET -8 -+ PARTIAL_FRAME 0 - SAVE_REST - movq %rsp,%rdi - FIXUP_TOP_OF_STACK %r11 -@@ -796,10 +805,12 @@ END(interrupt) - - /* 0(%rsp): ~(interrupt number) */ - .macro interrupt func -- subq $10*8, %rsp -- CFI_ADJUST_CFA_OFFSET 10*8 -+ subq $ORIG_RAX-ARGOFFSET+8, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8 +@@ -794,7 +798,9 @@ END(interrupt) + subq $ORIG_RAX-RBP, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP call save_args - PARTIAL_FRAME 0 -+ PARTIAL_FRAME -1 8 ++ PARTIAL_FRAME -1 ARGOFFSET-RBP + CFI_REL_OFFSET rbp, 0 + CFI_DEF_CFA_REGISTER rbp call \func .endm -@@ -1036,10 +1047,10 @@ ENTRY(\sym) - INTR_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ -- subq $15*8,%rsp -- CFI_ADJUST_CFA_OFFSET 15*8 -+ subq $ORIG_RAX-R15, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 +@@ -813,7 +819,6 @@ ret_from_intr: + TRACE_IRQS_OFF + decl PER_CPU_VAR(irq_count) + leaveq +- + CFI_RESTORE rbp + CFI_DEF_CFA_REGISTER rsp + CFI_ADJUST_CFA_OFFSET -8 +@@ -1021,7 +1026,7 @@ ENTRY(\sym) + subq $ORIG_RAX-R15, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call error_entry - DEFAULT_FRAME 0 + DEFAULT_FRAME -1 movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ call \do_sym -@@ -1054,8 +1065,10 @@ ENTRY(\sym) - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $-1 /* ORIG_RAX: no syscall to restart */ - CFI_ADJUST_CFA_OFFSET 8 -- subq $15*8, %rsp -+ subq $ORIG_RAX-R15, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 +@@ -1038,6 +1043,7 @@ ENTRY(\sym) + subq $ORIG_RAX-R15, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid + DEFAULT_FRAME -1 TRACE_IRQS_OFF movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ -@@ -1071,8 +1084,10 @@ ENTRY(\sym) - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $-1 /* ORIG_RAX: no syscall to restart */ - CFI_ADJUST_CFA_OFFSET 8 -- subq $15*8, %rsp -+ subq $ORIG_RAX-R15, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 +@@ -1056,6 +1062,7 @@ ENTRY(\sym) + subq $ORIG_RAX-R15, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid + DEFAULT_FRAME -1 TRACE_IRQS_OFF movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ -@@ -1089,10 +1104,10 @@ END(\sym) - ENTRY(\sym) - XCPT_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME -- subq $15*8,%rsp -- CFI_ADJUST_CFA_OFFSET 15*8 -+ subq $ORIG_RAX-R15, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 +@@ -1074,7 +1081,7 @@ ENTRY(\sym) + subq $ORIG_RAX-R15, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call error_entry - DEFAULT_FRAME 0 + DEFAULT_FRAME -1 movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ -@@ -1107,10 +1122,10 @@ END(\sym) - ENTRY(\sym) - XCPT_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME -- subq $15*8,%rsp -- CFI_ADJUST_CFA_OFFSET 15*8 -+ subq $ORIG_RAX-R15, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 +@@ -1092,7 +1099,7 @@ ENTRY(\sym) + subq $ORIG_RAX-R15, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid - DEFAULT_FRAME 0 + DEFAULT_FRAME -1 TRACE_IRQS_OFF movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ -@@ -1426,7 +1441,7 @@ paranoidzeroentry machine_check *machine - - /* ebx: no swapgs flag */ - ENTRY(paranoid_exit) -- INTR_FRAME -+ DEFAULT_FRAME - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - testl %ebx,%ebx /* swapgs needed? */ -@@ -1476,25 +1491,24 @@ END(paranoid_exit) +@@ -1435,25 +1442,24 @@ END(paranoid_exit) * returns in "no swapgs flag" in %ebx. */ ENTRY(error_entry) - XCPT_FRAME - CFI_ADJUST_CFA_OFFSET 15*8 -+ XCPT_FRAME offset=__stringify(ORIG_RAX-R15+8) ++ XCPT_FRAME offset=ORIG_RAX-R15+8 /* oldrax contains error code */ cld - movq_cfi rdi, RDI+8 @@ -347,12 +265,6 @@ References: bnc#472783, bnc#588458 - movq_cfi r9, R9+8 - movq_cfi r10, R10+8 - movq_cfi r11, R11+8 -- movq_cfi rbx, RBX+8 -- movq_cfi rbp, RBP+8 -- movq_cfi r12, R12+8 -- movq_cfi r13, R13+8 -- movq_cfi r14, R14+8 -- movq_cfi r15, R15+8 + movq %rdi, RDI+8(%rsp) + movq %rsi, RSI+8(%rsp) + movq %rdx, RDX+8(%rsp) @@ -362,7 +274,12 @@ References: bnc#472783, bnc#588458 + movq %r9, R9+8(%rsp) + movq %r10, R10+8(%rsp) + movq %r11, R11+8(%rsp) -+ movq_cfi rbx, __stringify(RBX+8) + movq_cfi rbx, RBX+8 +- movq_cfi rbp, RBP+8 +- movq_cfi r12, R12+8 +- movq_cfi r13, R13+8 +- movq_cfi r14, R14+8 +- movq_cfi r15, R15+8 + movq %rbp, RBP+8(%rsp) + movq %r12, R12+8(%rsp) + movq %r13, R13+8(%rsp) @@ -371,15 +288,7 @@ References: bnc#472783, bnc#588458 xorl %ebx,%ebx testl $3,CS+8(%rsp) je error_kernelspace -@@ -1503,7 +1517,6 @@ error_swapgs: - error_sti: - TRACE_IRQS_OFF - ret -- CFI_ENDPROC - - /* - * There are two places in the kernel that can potentially fault with -@@ -1513,6 +1522,7 @@ error_sti: +@@ -1471,6 +1477,7 @@ error_sti: * compat mode. Check for these here too. */ error_kernelspace: @@ -387,22 +296,9 @@ References: bnc#472783, bnc#588458 incl %ebx leaq irq_return(%rip),%rcx cmpq %rcx,RIP+8(%rsp) -@@ -1528,6 +1542,7 @@ bstep_iret: - /* Fix truncated RIP */ - movq %rcx,RIP+8(%rsp) - jmp error_swapgs -+ CFI_ENDPROC - END(error_entry) - - -@@ -1556,10 +1571,10 @@ ENTRY(nmi) - INTR_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq_cfi $-1 -- subq $15*8, %rsp -- CFI_ADJUST_CFA_OFFSET 15*8 -+ subq $ORIG_RAX-R15, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 +@@ -1518,7 +1523,7 @@ ENTRY(nmi) + subq $ORIG_RAX-R15, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid - DEFAULT_FRAME 0 + DEFAULT_FRAME -1 @@ -411,7 +307,7 @@ References: bnc#472783, bnc#588458 movq $-1,%rsi --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S -@@ -285,6 +285,8 @@ early_idt_handlers: +@@ -284,6 +284,8 @@ early_idt_handlers: ENTRY(early_idt_handler) #ifdef CONFIG_EARLY_PRINTK @@ -420,7 +316,7 @@ References: bnc#472783, bnc#588458 cmpl $2,early_recursion_flag(%rip) jz 1f incl early_recursion_flag(%rip) -@@ -300,6 +302,16 @@ ENTRY(early_idt_handler) +@@ -299,6 +301,16 @@ ENTRY(early_idt_handler) testl $0x27d00,%eax je 0f popq %r8 # get error code @@ -437,7 +333,7 @@ References: bnc#472783, bnc#588458 0: movq 0(%rsp),%rcx # get ip movq 8(%rsp),%rdx # get cs xorl %eax,%eax -@@ -313,6 +325,7 @@ ENTRY(early_idt_handler) +@@ -312,6 +324,7 @@ ENTRY(early_idt_handler) movq 0(%rsp),%rsi # get rip again call __print_symbol #endif @@ -445,3 +341,99 @@ References: bnc#472783, bnc#588458 #endif /* EARLY_PRINTK */ 1: hlt jmp 1b +--- a/arch/x86/lib/rwsem_64.S ++++ b/arch/x86/lib/rwsem_64.S +@@ -23,43 +23,50 @@ + #include + + #define save_common_regs \ +- pushq %rdi; \ +- pushq %rsi; \ +- pushq %rcx; \ +- pushq %r8; \ +- pushq %r9; \ +- pushq %r10; \ +- pushq %r11 ++ pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \ ++ pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \ ++ pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \ ++ pushq_cfi %r8; CFI_REL_OFFSET r8, 0; \ ++ pushq_cfi %r9; CFI_REL_OFFSET r9, 0; \ ++ pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \ ++ pushq_cfi %r11; CFI_REL_OFFSET r11, 0 + + #define restore_common_regs \ +- popq %r11; \ +- popq %r10; \ +- popq %r9; \ +- popq %r8; \ +- popq %rcx; \ +- popq %rsi; \ +- popq %rdi ++ popq_cfi %r11; CFI_RESTORE r11; \ ++ popq_cfi %r10; CFI_RESTORE r10; \ ++ popq_cfi %r9; CFI_RESTORE r9; \ ++ popq_cfi %r8; CFI_RESTORE r8; \ ++ popq_cfi %rcx; CFI_RESTORE rcx; \ ++ popq_cfi %rsi; CFI_RESTORE rsi; \ ++ popq_cfi %rdi; CFI_RESTORE rdi + + /* Fix up special calling conventions */ + ENTRY(call_rwsem_down_read_failed) ++ CFI_STARTPROC + save_common_regs +- pushq %rdx ++ pushq_cfi %rdx ++ CFI_REL_OFFSET rdx, 0 + movq %rax,%rdi + call rwsem_down_read_failed +- popq %rdx ++ popq_cfi %rdx ++ CFI_RESTORE rdx + restore_common_regs + ret +- ENDPROC(call_rwsem_down_read_failed) ++ CFI_ENDPROC ++ENDPROC(call_rwsem_down_read_failed) + + ENTRY(call_rwsem_down_write_failed) ++ CFI_STARTPROC + save_common_regs + movq %rax,%rdi + call rwsem_down_write_failed + restore_common_regs + ret +- ENDPROC(call_rwsem_down_write_failed) ++ CFI_ENDPROC ++ENDPROC(call_rwsem_down_write_failed) + + ENTRY(call_rwsem_wake) ++ CFI_STARTPROC + decl %edx /* do nothing if still outstanding active readers */ + jnz 1f + save_common_regs +@@ -67,15 +74,20 @@ ENTRY(call_rwsem_wake) + call rwsem_wake + restore_common_regs + 1: ret +- ENDPROC(call_rwsem_wake) ++ CFI_ENDPROC ++ENDPROC(call_rwsem_wake) + + /* Fix up special calling conventions */ + ENTRY(call_rwsem_downgrade_wake) ++ CFI_STARTPROC + save_common_regs +- pushq %rdx ++ pushq_cfi %rdx ++ CFI_REL_OFFSET rdx, 0 + movq %rax,%rdi + call rwsem_downgrade_wake +- popq %rdx ++ popq_cfi %rdx ++ CFI_RESTORE rdx + restore_common_regs + ret +- ENDPROC(call_rwsem_downgrade_wake) ++ CFI_ENDPROC ++ENDPROC(call_rwsem_downgrade_wake) diff --git a/patches.arch/x86_agpgart-g33-stoeln-fix-2.patch b/patches.arch/x86_agpgart-g33-stoeln-fix-2.patch index a2cd417..51ba3ec 100644 --- a/patches.arch/x86_agpgart-g33-stoeln-fix-2.patch +++ b/patches.arch/x86_agpgart-g33-stoeln-fix-2.patch @@ -53,12 +53,12 @@ Signed-off-by: Brandon Philips Acked-by: Thomas Renninger --- - drivers/char/agp/intel-agp.c | 7 +++++++ + drivers/char/agp/intel-gtt.c | 7 +++++++ 1 file changed, 7 insertions(+) ---- a/drivers/char/agp/intel-agp.c -+++ b/drivers/char/agp/intel-agp.c -@@ -801,6 +801,13 @@ static void intel_i830_init_gtt_entries( +--- a/drivers/char/agp/intel-gtt.c ++++ b/drivers/char/agp/intel-gtt.c +@@ -648,6 +648,13 @@ static void intel_i830_init_gtt_entries( } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: diff --git a/patches.arch/x86_mce_intel_decode_physical_address.patch b/patches.arch/x86_mce_intel_decode_physical_address.patch deleted file mode 100644 index 22a635d..0000000 --- a/patches.arch/x86_mce_intel_decode_physical_address.patch +++ /dev/null @@ -1,581 +0,0 @@ -From: Andi Kleen -Subject: x86, mce: Xeon75xx specific interface to get corrected memory error information -Patch-Mainline: submitted to x86-tip, added but reverted due to a minor compile issue - which gets fixed by and incremental patch -References: bnc#573380, fate#307738 - -http://lkml.org/lkml/2010/1/22/98 - -Xeon 75xx doesn't log physical addresses on corrected machine check -events in the standard architectural MSRs. Instead the address has to -be retrieved in a model specific way. This makes it impossible to do -predictive failure analysis. - -Implement cpu model specific code to do this in mce-xeon75xx.c using a -new hook that is called from the generic poll code. The code retrieves -the physical address/DIMM of the last corrected error from the -platform and makes the address look like a standard architectural MCA -address for further processing. - -In addition the DIMM information is retrieved and put into two new -aux0/aux1 fields in struct mce. These fields are specific to a given -CPU. These fields can then be decoded by mcelog into specific DIMM -information. The latest mcelog version has support for this. - -Longer term this will be likely in a different output format, but -short term that seemed like the least intrusive solution. Older mcelog -can deal with an extended record. - -There's no code to print this information on a panic because this only -works for corrected errors, and corrected errors do not usually result -in panics. - -The act of retrieving the DIMM/PA information can take some time, so -this code has a rate limit to avoid taking too much CPU time on a -error flood. - -The whole thing can be loaded as a module and has suitable PCI-IDs so -that it can be auto-loaded by a distribution. The code also checks -explicitely for the expected CPU model number to make sure this code -doesn't run anywhere else. - -Signed-off-by: Thomas Renninger - ---- - arch/x86/Kconfig | 8 - arch/x86/include/asm/mce.h | 2 - arch/x86/kernel/cpu/mcheck/Makefile | 1 - arch/x86/kernel/cpu/mcheck/mce-internal.h | 1 - arch/x86/kernel/cpu/mcheck/mce-xeon75xx.c | 427 ++++++++++++++++++++++++++++++ - arch/x86/kernel/cpu/mcheck/mce.c | 11 - arch/x86/kernel/e820.c | 3 - 7 files changed, 452 insertions(+), 1 deletion(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -919,6 +919,14 @@ config X86_MCE_INTEL - Additional support for intel specific MCE features such as - the thermal monitor. - -+config X86_MCE_XEON75XX -+ tristate "Intel Xeon 7500 series corrected memory error driver" -+ depends on X86_MCE_INTEL -+ ---help--- -+ Add support for a Intel Xeon 7500 series specific memory error driver. -+ This allows to report the DIMM and physical address on a corrected -+ memory error machine check event. -+ - config X86_MCE_AMD - def_bool y - prompt "AMD MCE features" ---- a/arch/x86/include/asm/mce.h -+++ b/arch/x86/include/asm/mce.h -@@ -67,6 +67,8 @@ struct mce { - __u32 socketid; /* CPU socket ID */ - __u32 apicid; /* CPU initial apic ID */ - __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ -+ __u64 aux0; /* model specific */ -+ __u64 aux1; /* model specific */ - }; - - /* ---- a/arch/x86/kernel/cpu/mcheck/Makefile -+++ b/arch/x86/kernel/cpu/mcheck/Makefile -@@ -2,6 +2,7 @@ obj-y = mce.o mce-severity.o - - obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o - obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o -+obj-$(CONFIG_X86_MCE_XEON75XX) += mce-xeon75xx.o - obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o - obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o - obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o ---- a/arch/x86/kernel/cpu/mcheck/mce-internal.h -+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h -@@ -28,3 +28,4 @@ extern int mce_ser; - - extern struct mce_bank *mce_banks; - -+extern void (*cpu_specific_poll)(struct mce *); ---- /dev/null -+++ b/arch/x86/kernel/cpu/mcheck/mce-xeon75xx.c -@@ -0,0 +1,427 @@ -+/* -+ * Xeon 7500 series specific machine check support code. -+ * Copyright 2009, 2010 Intel Corporation -+ * Author: Andi Kleen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; version 2 -+ * of the License. -+ * -+ * Implement Xeon 7500 series specific code to retrieve the physical address -+ * and DIMM information for corrected memory errors. -+ * -+ * Interface: mce->aux0/aux1 is mapped to a struct pfa_dimm with pad -+ * redefined to DIMM valid bits. Consumers check CPUID and bank and -+ * then interpret aux0/aux1 -+ */ -+ -+/* #define DEBUG 1 */ /* disable for production */ -+#define pr_fmt(x) "MCE: " x -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "mce-internal.h" -+ -+#define PFA_SIG "$PFA" -+#define PFA_SIG_LEN 4 -+ -+/* DIMM description */ -+struct aux_pfa_dimm { -+ u8 fbd_channel_id; -+ u8 ddr_channel_id; -+ u8 ddr_dimm_id; -+ u8 ddr_rank_id; -+ u8 ddr_dimm_bank_id; -+ u8 ddr_dimm_row_id; -+ u8 ddr_dimm_column_id; -+ u8 valid; -+} __attribute__((packed)); -+ -+struct pfa_dimm { -+ u8 fbd_channel_id; -+ u8 ddr_channel_id; -+ u8 ddr_dimm_id; -+ u8 ddr_rank_id; -+ u8 ddr_dimm_bank_id; -+ u32 ddr_dimm_row_id; -+ u32 ddr_dimm_column_id; -+} __attribute__((packed)); -+ -+/* Memory translation table in memory. */ -+struct pfa_table { -+ u8 sig[PFA_SIG_LEN]; /* Signature: '$PFA' */ -+ u16 len; /* total length */ -+ u16 revision; /* 0x11 */ -+ u8 checksum; /* 8bit sum to zero */ -+ u8 db_value; /* mailbox port command value */ -+ u8 db_port; /* mailbox port */ -+ /* end of header; end of checksum */ -+ u8 command; /* input command */ -+ u32 valid; /* valid input/output bits */ -+ u16 status; /* output status */ -+ u8 socket_id; /* input socket id*/ -+ u8 bank_id; /* input MCE bank id */ -+ u32 pad1; -+ u64 mbox_address; -+ u64 physical_addr; /* physical address */ -+ struct pfa_dimm dimm[2]; -+ /* -+ * topology information follows: not used for now. -+ */ -+} __attribute__((packed)); -+ -+/* DIMM valid bits in valid: DIMM0: 8..12; DIMM1 16..20 */ -+#define DIMM_VALID_BITS(val, num) (((val) >> (4 + (num) * 8)) & DIMM_VALID_ALL) -+#define DIMM_SET_VALID(val, num) ((val) << (4 + (num) * 8)) -+ -+enum { -+ MCE_BANK_MBOX0 = 8, -+ MCE_BANK_MBOX1 = 9, -+ -+ PFA_REVISION = 0x11, /* v1.1 */ -+ -+ /* Status bits for valid field */ -+ PFA_VALID_MA = (1 << 0), -+ PFA_VALID_SOCKETID = (1 << 1), -+ PFA_VALID_BANKID = (1 << 2), -+ PFA_VALID_PA = (1 << 3), -+ -+ /* DIMM valid bits in valid */ -+ /* use with DIMM_VALID_BITS/DIMM_SET_VALID for pfa->valid */ -+ DIMM_VALID_FBD_CHAN = (1 << 0), -+ DIMM_VALID_DDR_CHAN = (1 << 1), -+ DIMM_VALID_DDR_DIMM = (1 << 2), -+ DIMM_VALID_DDR_RANK = (1 << 3), -+ DIMM_VALID_DIMM_BANK = (1 << 4), -+ DIMM_VALID_DIMM_ROW = (1 << 5), -+ DIMM_VALID_DIMM_COLUMN = (1 << 6), -+ DIMM_VALID_ALL = 0x7f, -+ -+ PFA_DIMM_VALID_MASK = DIMM_SET_VALID(DIMM_VALID_ALL, 0) -+ | DIMM_SET_VALID(DIMM_VALID_ALL, 1), -+ -+ /* Values for status field */ -+ PFA_STATUS_SUCCESS = 0, -+ PFA_STATUS_SOCKET_INVALID = (1 << 1), -+ PFA_STATUS_MBOX_INVALID = (1 << 2), -+ PFA_STATUS_MA_INVALID = (1 << 3), -+ PFA_STATUS_PA_INVALID = (1 << 4), -+ -+ /* Values for command field */ -+ PFA_CMD_GET_MEM_CORR_ERR_PA = 0, -+ PFA_CMD_PA_TO_DIMM_ADDR = 1, -+ PFA_CMD_DIMM_TO_PA = 2, -+ PFA_CMD_GET_TOPOLOGY = 3, -+ -+ /* PCI device IDs and the base register */ -+ ICH_PFA_CFG = 0x8c, /* SCRATCH4 */ -+ PCI_DEVICE_ID_BXB_ICH_LEGACY0 = 0x3422, -+}; -+ -+static struct pfa_table *pfa_table __read_mostly; -+static int memerr_max_conv_rate __read_mostly = 100; -+static int memerr_min_interval __read_mostly = 500; -+static int pfa_lost; /* for diagnosis */ -+ -+enum { -+ RATE_LIMIT_PERIOD = USEC_PER_SEC, /* in us; period of rate limit */ -+}; -+ -+module_param(memerr_max_conv_rate, int, 0644); -+MODULE_PARM_DESC(memerr_max_conv_rate, -+ "Maximum number of memory error conversions each second; 0 to disable"); -+module_param(memerr_min_interval, int, 0644); -+MODULE_PARM_DESC(memerr_min_interval, -+ "Minimum time delta between two memory conversions; in us; default 500"); -+ -+static int notest; -+static int nocsum; -+module_param(notest, int, 0); -+module_param(nocsum, int, 0); -+ -+static u64 encode_dimm(struct pfa_dimm *d, u8 valid) -+{ -+ union { -+ struct aux_pfa_dimm d; -+ u64 v; -+ } p; -+ -+ BUILD_BUG_ON(sizeof(struct aux_pfa_dimm) != sizeof(u64)); -+ p.d.fbd_channel_id = d->fbd_channel_id; -+ p.d.ddr_channel_id = d->ddr_channel_id; -+ p.d.ddr_dimm_id = d->ddr_dimm_id; -+ p.d.ddr_rank_id = d->ddr_rank_id; -+ p.d.ddr_dimm_bank_id = d->ddr_dimm_bank_id; -+ p.d.ddr_dimm_row_id = d->ddr_dimm_row_id; -+ if (p.d.ddr_dimm_row_id != d->ddr_dimm_row_id) /* truncated? */ -+ valid &= ~DIMM_VALID_DIMM_ROW; -+ p.d.ddr_dimm_column_id = d->ddr_dimm_column_id; -+ if (p.d.ddr_dimm_column_id != d->ddr_dimm_column_id) -+ valid &= ~DIMM_VALID_DIMM_COLUMN; -+ p.d.valid = valid; -+ pr_debug("PFA fbd_ch %u ddr_ch %u dimm %u rank %u bank %u valid %x\n", -+ d->fbd_channel_id, -+ d->ddr_channel_id, -+ d->ddr_dimm_id, -+ d->ddr_rank_id, -+ d->ddr_dimm_bank_id, -+ valid); -+ return p.v; -+} -+ -+static u8 csum(u8 *table, u16 len) -+{ -+ u8 sum = 0; -+ int i; -+ for (i = 0; i < len; i++) -+ sum += *table++; -+ return sum; -+} -+ -+/* -+ * Execute a command through the mailbox interface. -+ */ -+static int -+pfa_command(unsigned bank, unsigned socketid, unsigned command, unsigned valid) -+{ -+ pfa_table->bank_id = bank; -+ pfa_table->socket_id = socketid; -+ pfa_table->valid = valid | PFA_VALID_SOCKETID; -+ pfa_table->command = command; -+ -+ outb(pfa_table->db_value, pfa_table->db_port); -+ -+ mb(); /* Reread fields after they got changed */ -+ -+ if (pfa_table->status != PFA_STATUS_SUCCESS) { -+ pr_debug("Memory PFA command %d failed: socket:%d bank:%d status:%x\n", -+ command, socketid, bank, pfa_table->status); -+ return -pfa_table->status; -+ } -+ return 0; -+} -+ -+/* -+ * Retrieve physical address and DIMMs. -+ */ -+static int translate_memory_error(struct mce *m) -+{ -+ struct pfa_table *pfa = pfa_table; -+ u64 status; -+ int ret; -+ u32 valid; -+ int cpu = smp_processor_id(); -+ -+ /* Make sure our structures match the specification */ -+ BUILD_BUG_ON(offsetof(struct pfa_table, physical_addr) != 0x20); -+ BUILD_BUG_ON(offsetof(struct pfa_table, status) != 0x10); -+ BUILD_BUG_ON(offsetof(struct pfa_table, physical_addr) != 0x20); -+ BUILD_BUG_ON(offsetof(struct pfa_table, dimm[1].ddr_dimm_column_id) != -+ 0x3e); -+ -+ /* Ask for PA/DIMMs of last error */ -+ if (pfa_command(m->bank, m->socketid, -+ PFA_CMD_GET_MEM_CORR_ERR_PA, PFA_VALID_BANKID) < 0) -+ return -1; -+ -+ /* -+ * Recheck machine check bank. If the overflow bit was set -+ * there was a race. Don't use the information in this case. -+ */ -+ rdmsrl(MSR_IA32_MCx_STATUS(m->bank), status); -+ if (status & MCI_STATUS_OVER) { -+ pr_debug("%d: overflow race on bank %d\n", cpu, m->bank); -+ return -1; -+ } -+ -+ ret = -1; -+ valid = pfa->valid; -+ if (valid & PFA_VALID_PA) { -+ m->status |= MCI_STATUS_ADDRV; -+ m->addr = pfa_table->physical_addr; -+ pr_debug("%d: got physical address %llx valid %x\n", -+ cpu, m->addr, valid); -+ ret = 0; -+ } -+ -+ /* When DIMM information was supplied pass it out */ -+ if (valid & PFA_DIMM_VALID_MASK) { -+ m->aux0 = encode_dimm(&pfa->dimm[0], DIMM_VALID_BITS(valid, 0)); -+ m->aux1 = encode_dimm(&pfa->dimm[1], DIMM_VALID_BITS(valid, 1)); -+ ret = 0; -+ } -+ -+ return ret; -+} -+ -+/* -+ * Xeon 75xx specific mce poll method to retrieve the physical address -+ * and DIMM information. -+ */ -+static void xeon75xx_mce_poll(struct mce *m) -+{ -+ static DEFINE_SPINLOCK(convert_lock); /* Protect table and static */ -+ static unsigned long cperm; -+ static ktime_t last, last_int; -+ unsigned long flags; -+ ktime_t now; -+ s64 delta; -+ -+ /* Memory error? */ -+ if (m->bank != MCE_BANK_MBOX0 && m->bank != MCE_BANK_MBOX1) -+ return; -+ if (m->status & MCI_STATUS_OVER) -+ return; -+ if (memerr_max_conv_rate == 0) -+ return; -+ -+ spin_lock_irqsave(&convert_lock, flags); -+ /* -+ * Rate limit conversions. The conversion takes some time, -+ * but it's not good to use all the CPU time during a error -+ * flood. -+ * Enforce maximum number per second and minimum interval. -+ * The ktime call should use TSC on this machine and be fast. -+ */ -+ now = ktime_get(); -+ delta = ktime_us_delta(now, last); -+ if (delta >= RATE_LIMIT_PERIOD) { -+ cperm = 0; -+ last = now; -+ } -+ if (ktime_us_delta(now, last_int) >= memerr_min_interval && -+ ++cperm <= memerr_max_conv_rate) { -+ if (translate_memory_error(m) < 0) { -+ /* On error stop converting for the next second */ -+ cperm = memerr_max_conv_rate; -+ pr_debug("PFA translation failed\n"); -+ } -+ } else -+ pfa_lost++; -+ last_int = now; -+ spin_unlock_irqrestore(&convert_lock, flags); -+} -+ -+static struct pci_device_id bxb_mce_pciids[] = { -+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_BXB_ICH_LEGACY0) }, -+ {} -+}; -+ -+static int __init xeon75xx_mce_init(void) -+{ -+ u32 addr = 0; -+ struct pci_dev *dev; -+ -+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || -+ boot_cpu_data.x86 != 6 || -+ boot_cpu_data.x86_model != 0x2e) -+ return -ENODEV; -+ -+ /* -+ * Get table address from register in IOH. -+ * This just looks up the device, because we don't want to "own" it. -+ */ -+ dev = NULL; -+ while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, dev)) -+ != NULL) { -+ if (!pci_match_id(bxb_mce_pciids, dev)) -+ continue; -+ pci_read_config_dword(dev, ICH_PFA_CFG, &addr); -+ if (addr) -+ break; -+ } -+ pci_dev_put(dev); -+ if (!addr) -+ return -ENODEV; -+ -+ if (!e820_all_mapped(addr, addr + PAGE_SIZE, E820_RESERVED)) { -+ pr_info("PFA table at %x not e820 reserved\n", addr); -+ return -ENODEV; -+ } -+ -+ pfa_table = (__force struct pfa_table *)ioremap_cache(addr, PAGE_SIZE); -+ if (!pfa_table) { -+ pr_err("Cannot map PFA table at %x\n", addr); -+ return -EIO; -+ } -+ -+ if (memcmp(&pfa_table->sig, PFA_SIG, PFA_SIG_LEN) || -+ pfa_table->len < sizeof(struct pfa_table) || -+ /* assume newer versions are compatible */ -+ pfa_table->revision < PFA_REVISION) { -+ pr_info("PFA table at %x invalid\n", addr); -+ goto error_unmap; -+ } -+ -+ if (!nocsum && csum((u8 *)pfa_table, -+ offsetof(struct pfa_table, command))) { -+ pr_info("PFA table at %x length %u has invalid checksum\n", -+ addr, pfa_table->len); -+ goto error_unmap; -+ } -+ -+ /* Not strictly needed today */ -+ if (pfa_table->len > PAGE_SIZE) { -+ unsigned len = roundup(pfa_table->len, PAGE_SIZE); -+ iounmap(pfa_table); -+ pfa_table = (__force void *)ioremap_cache(addr, len); -+ if (!pfa_table) { -+ pr_err("Cannot remap %u bytes PFA table at %x\n", -+ len, addr); -+ return -EIO; -+ } -+ } -+ -+ if (!notest) { -+ int status = pfa_command(0, 0, PFA_CMD_GET_TOPOLOGY, 0); -+ if (status < 0) { -+ pr_err("Test of PFA table failed: %x\n", -status); -+ goto error_unmap; -+ } -+ } -+ -+ pr_info("Found Xeon75xx PFA memory error translation table at %x\n", -+ addr); -+ mb(); -+ cpu_specific_poll = xeon75xx_mce_poll; -+ return 0; -+ -+error_unmap: -+ iounmap(pfa_table); -+ return -ENODEV; -+} -+ -+MODULE_DEVICE_TABLE(pci, bxb_mce_pciids); -+MODULE_LICENSE("GPL v2"); -+MODULE_AUTHOR("Andi Kleen"); -+MODULE_DESCRIPTION("Intel Xeon 75xx specific DIMM error reporting"); -+ -+#ifdef CONFIG_MODULE -+static void __exit xeon75xx_mce_exit(void) -+{ -+ cpu_specific_poll = NULL; -+ wmb(); -+ /* Wait for all machine checks to finish before really unloading */ -+ synchronize_rcu(); -+ iounmap(pfa_table); -+} -+ -+module_init(xeon75xx_mce_init); -+module_exit(xeon75xx_mce_exit); -+#else -+/* When built-in run as soon as the PCI subsystem is up */ -+fs_initcall(xeon75xx_mce_init); -+#endif ---- a/arch/x86/kernel/cpu/mcheck/mce.c -+++ b/arch/x86/kernel/cpu/mcheck/mce.c -@@ -94,6 +94,8 @@ static char *mce_helper_argv[2] = { mc - static DECLARE_WAIT_QUEUE_HEAD(mce_wait); - static DEFINE_PER_CPU(struct mce, mces_seen); - static int cpu_missing; -+void (*cpu_specific_poll)(struct mce *); -+EXPORT_SYMBOL_GPL(cpu_specific_poll); - - /* - * CPU/chipset specific EDAC code can register a notifier call here to print -@@ -371,6 +373,11 @@ static void mce_wrmsrl(u32 msr, u64 v) - wrmsrl(msr, v); - } - -+static int under_injection(void) -+{ -+ return __get_cpu_var(injectm).finished; -+} -+ - /* - * Simple lockless ring to communicate PFNs from the exception handler with the - * process context work function. This is vastly simplified because there's -@@ -574,6 +581,10 @@ void machine_check_poll(enum mcp_flags f - - if (!(flags & MCP_TIMESTAMP)) - m.tsc = 0; -+ -+ if (cpu_specific_poll && !under_injection() && !mce_dont_log_ce) -+ cpu_specific_poll(&m); -+ - /* - * Don't get the IP here because it's unlikely to - * have anything to do with the actual error location. ---- a/arch/x86/kernel/e820.c -+++ b/arch/x86/kernel/e820.c -@@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(e820_any_mapped); - * Note: this function only works correct if the e820 table is sorted and - * not-overlapping, which is the case - */ --int __init e820_all_mapped(u64 start, u64 end, unsigned type) -+int e820_all_mapped(u64 start, u64 end, unsigned type) - { - int i; - -@@ -98,6 +98,7 @@ int __init e820_all_mapped(u64 start, u6 - } - return 0; - } -+EXPORT_SYMBOL_GPL(e820_all_mapped); - - /* - * Add a memory region to the kernel e820 map. diff --git a/patches.arch/x86_mce_intel_decode_physical_address_compile_fix.patch b/patches.arch/x86_mce_intel_decode_physical_address_compile_fix.patch deleted file mode 100644 index aa0c677..0000000 --- a/patches.arch/x86_mce_intel_decode_physical_address_compile_fix.patch +++ /dev/null @@ -1,25 +0,0 @@ -From: Andi Kleen -Subject: x86, mce: Xeon75xx specific interface to get corrected memory error information -Patch-Mainline: submitted to x86-tip, added but reverted due to a minor compile issue - which gets fixed by this patch -References: bnc#573380, fate#307738 - -http://lkml.org/lkml/2010/1/23/50 - -Signed-off-by: Thomas Renninger - ---- - arch/x86/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -921,7 +921,7 @@ config X86_MCE_INTEL - - config X86_MCE_XEON75XX - tristate "Intel Xeon 7500 series corrected memory error driver" -- depends on X86_MCE_INTEL -+ depends on X86_MCE_INTEL && PCI - ---help--- - Add support for a Intel Xeon 7500 series specific memory error driver. - This allows to report the DIMM and physical address on a corrected diff --git a/patches.arch/x86_mce_intel_decode_physical_address_rename_fix.patch b/patches.arch/x86_mce_intel_decode_physical_address_rename_fix.patch deleted file mode 100644 index 0bd7300..0000000 --- a/patches.arch/x86_mce_intel_decode_physical_address_rename_fix.patch +++ /dev/null @@ -1,72 +0,0 @@ -From: H. Peter Anvin -Subject: x86, mce: Rename cpu_specific_poll to mce_cpu_specific_poll -Patch-Mainline: submitted to x86-tip, added but reverted due to a minor compile issue - which gets fixed by and incremental patch -References: bnc#573380, fate#307738 - -http://lkml.org/lkml/2010/1/22/99 - -cpu_specific_poll is a global variable, and it should have a global -namespace name. Since it is MCE-specific (it takes a struct mce *), -rename it mce_cpu_specific_poll. - -Signed-off-by: Thomas Renninger - ---- - arch/x86/kernel/cpu/mcheck/mce-internal.h | 2 +- - arch/x86/kernel/cpu/mcheck/mce-xeon75xx.c | 4 ++-- - arch/x86/kernel/cpu/mcheck/mce.c | 8 ++++---- - 3 files changed, 7 insertions(+), 7 deletions(-) - ---- a/arch/x86/kernel/cpu/mcheck/mce-internal.h -+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h -@@ -28,4 +28,4 @@ extern int mce_ser; - - extern struct mce_bank *mce_banks; - --extern void (*cpu_specific_poll)(struct mce *); -+extern void (*mce_cpu_specific_poll)(struct mce *); ---- a/arch/x86/kernel/cpu/mcheck/mce-xeon75xx.c -+++ b/arch/x86/kernel/cpu/mcheck/mce-xeon75xx.c -@@ -396,7 +396,7 @@ static int __init xeon75xx_mce_init(void - pr_info("Found Xeon75xx PFA memory error translation table at %x\n", - addr); - mb(); -- cpu_specific_poll = xeon75xx_mce_poll; -+ mce_cpu_specific_poll = xeon75xx_mce_poll; - return 0; - - error_unmap: -@@ -412,7 +412,7 @@ MODULE_DESCRIPTION("Intel Xeon 75xx spec - #ifdef CONFIG_MODULE - static void __exit xeon75xx_mce_exit(void) - { -- cpu_specific_poll = NULL; -+ mce_cpu_specific_poll = NULL; - wmb(); - /* Wait for all machine checks to finish before really unloading */ - synchronize_rcu(); ---- a/arch/x86/kernel/cpu/mcheck/mce.c -+++ b/arch/x86/kernel/cpu/mcheck/mce.c -@@ -94,8 +94,8 @@ static char *mce_helper_argv[2] = { mc - static DECLARE_WAIT_QUEUE_HEAD(mce_wait); - static DEFINE_PER_CPU(struct mce, mces_seen); - static int cpu_missing; --void (*cpu_specific_poll)(struct mce *); --EXPORT_SYMBOL_GPL(cpu_specific_poll); -+void (*mce_cpu_specific_poll)(struct mce *); -+EXPORT_SYMBOL_GPL(mce_cpu_specific_poll); - - /* - * CPU/chipset specific EDAC code can register a notifier call here to print -@@ -582,8 +582,8 @@ void machine_check_poll(enum mcp_flags f - if (!(flags & MCP_TIMESTAMP)) - m.tsc = 0; - -- if (cpu_specific_poll && !under_injection() && !mce_dont_log_ce) -- cpu_specific_poll(&m); -+ if (mce_cpu_specific_poll && !under_injection() && !mce_dont_log_ce) -+ mce_cpu_specific_poll(&m); - - /* - * Don't get the IP here because it's unlikely to diff --git a/patches.drivers/0001-drm-i915-Use-spatio-temporal-dithering-on-PCH.patch b/patches.drivers/0001-drm-i915-Use-spatio-temporal-dithering-on-PCH.patch deleted file mode 100644 index 4c87cdd..0000000 --- a/patches.drivers/0001-drm-i915-Use-spatio-temporal-dithering-on-PCH.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 9286a0bc63de32c66d894b45dcf048a072a84cd7 Mon Sep 17 00:00:00 2001 -From: Adam Jackson -Date: Mon, 19 Apr 2010 15:57:25 -0400 -Subject: [PATCH 1/4] drm/i915: Use spatio-temporal dithering on PCH - -Spatial dither is better than nothing, but ST is even better. - -(from ajax's followup message:) - I noticed this with: - - http://ajax.fedorapeople.org/YellowFlower.jpg - - set as my desktop background in Gnome on a 1280x800 machine (in - particular, a Sony Vaio VPCB1 with 6-bit panel and a rather bright black - level). Easiest way to test this is by poking at PIPEACONF with - intel_reg_write directly: - - % sudo intel_reg_write 0x70008 0xc0000040 # no dither - % sudo intel_reg_write 0x70008 0xc0000050 # spatial - % sudo intel_reg_write 0x70008 0xc0000054 # ST - - I notice it especially strongly in the relatively flat dark area in the - top left. Closer than about 18" I can see a noticeable checkerboard - pattern with plain spatial dithering. ST smooths that out; I can still - tell that it's lacking color precision, but it's not offensive. - -Signed-off-by: Adam Jackson -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/i915/i915_reg.h | 5 ++++- - drivers/gpu/drm/i915/intel_display.c | 10 ++++++---- - 2 files changed, 10 insertions(+), 5 deletions(-) - -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h -index 4cbc521..89b6efc 100644 ---- a/drivers/gpu/drm/i915/i915_reg.h -+++ b/drivers/gpu/drm/i915/i915_reg.h -@@ -1924,7 +1924,10 @@ - /* Display & cursor control */ - - /* dithering flag on Ironlake */ --#define PIPE_ENABLE_DITHER (1 << 4) -+#define PIPE_ENABLE_DITHER (1 << 4) -+#define PIPE_DITHER_TYPE_MASK (3 << 2) -+#define PIPE_DITHER_TYPE_SPATIAL (0 << 2) -+#define PIPE_DITHER_TYPE_ST01 (1 << 2) - /* Pipe A */ - #define PIPEADSL 0x70000 - #define PIPEACONF 0x70008 -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index c7502b6..f1a37d9 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -3321,14 +3321,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - /* set the dithering flag */ - if (IS_I965G(dev)) { - if (dev_priv->lvds_dither) { -- if (HAS_PCH_SPLIT(dev)) -+ if (HAS_PCH_SPLIT(dev)) { - pipeconf |= PIPE_ENABLE_DITHER; -- else -+ pipeconf |= PIPE_DITHER_TYPE_ST01; -+ } else - lvds |= LVDS_ENABLE_DITHER; - } else { -- if (HAS_PCH_SPLIT(dev)) -+ if (HAS_PCH_SPLIT(dev)) { - pipeconf &= ~PIPE_ENABLE_DITHER; -- else -+ pipeconf &= ~PIPE_DITHER_TYPE_MASK; -+ } else - lvds &= ~LVDS_ENABLE_DITHER; - } - } --- -1.7.0.1 - diff --git a/patches.drivers/0002-drm-i915-Honor-sync-polarity-from-VBT-panel-timing-d.patch b/patches.drivers/0002-drm-i915-Honor-sync-polarity-from-VBT-panel-timing-d.patch deleted file mode 100644 index e616e04..0000000 --- a/patches.drivers/0002-drm-i915-Honor-sync-polarity-from-VBT-panel-timing-d.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 7f588d4ca94f4efd146b47cdcb6483edda4886f4 Mon Sep 17 00:00:00 2001 -From: Adam Jackson -Date: Fri, 28 May 2010 17:17:37 -0400 -Subject: [PATCH 2/4] drm/i915: Honor sync polarity from VBT panel timing descriptors - -I'm actually kind of shocked that it works at all otherwise. - -Signed-off-by: Adam Jackson -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/i915/intel_bios.c | 10 ++++++++++ - 1 files changed, 10 insertions(+), 0 deletions(-) - -diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c -index f9ba452..8905070 100644 ---- a/drivers/gpu/drm/i915/intel_bios.c -+++ b/drivers/gpu/drm/i915/intel_bios.c -@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, - panel_fixed_mode->clock = dvo_timing->clock * 10; - panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; - -+ if (dvo_timing->hsync_positive) -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; -+ else -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; -+ -+ if (dvo_timing->vsync_positive) -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; -+ else -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; -+ - /* Some VBTs have bogus h/vtotal values */ - if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) - panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; --- -1.7.0.1 - diff --git a/patches.drivers/0003-drm-i915-Add-the-support-of-eDP-on-DP-D-for-Ibex-CPT.patch b/patches.drivers/0003-drm-i915-Add-the-support-of-eDP-on-DP-D-for-Ibex-CPT.patch deleted file mode 100644 index 92f40f3..0000000 --- a/patches.drivers/0003-drm-i915-Add-the-support-of-eDP-on-DP-D-for-Ibex-CPT.patch +++ /dev/null @@ -1,266 +0,0 @@ -From 2fb8b53110fdf163eae9e8a506bf769449e2ee4b Mon Sep 17 00:00:00 2001 -From: Joanna Rutkowska -Date: Tue, 29 Jun 2010 08:34:37 +0200 -Subject: [PATCH 3/4] drm/i915: Add the support of eDP on DP-D for Ibex/CPT - -On some machines the eDP is connected on the PCH DP-D instead of DP-A. - -Signed-off-by: Zhao Yakui - -Conflicts: - - drivers/gpu/drm/i915/intel_dp.c ---- - drivers/gpu/drm/i915/intel_display.c | 2 +- - drivers/gpu/drm/i915/intel_dp.c | 99 ++++++++++++++++++++++++++++++--- - drivers/gpu/drm/i915/intel_drv.h | 1 + - 3 files changed, 92 insertions(+), 10 deletions(-) - -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index f1a37d9..32ae849 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -3073,7 +3073,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - temp |= PIPE_8BPC; - else - temp |= PIPE_6BPC; -- } else if (is_edp) { -+ } else if (is_edp || intel_edp_is_pch(crtc)) { - switch (dev_priv->edp_bpp/3) { - case 8: - temp |= PIPE_8BPC; -diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c -index 77e40cf..c13c3bf 100644 ---- a/drivers/gpu/drm/i915/intel_dp.c -+++ b/drivers/gpu/drm/i915/intel_dp.c -@@ -43,6 +43,7 @@ - #define DP_LINK_CONFIGURATION_SIZE 9 - - #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) -+#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_edpd) - - struct intel_dp_priv { - uint32_t output_reg; -@@ -58,6 +59,7 @@ struct intel_dp_priv { - struct intel_encoder *intel_encoder; - struct i2c_adapter adapter; - struct i2c_algo_dp_aux_data algo; -+ bool is_edpd; - }; - - static void -@@ -130,8 +132,9 @@ intel_dp_link_required(struct drm_device *dev, - struct intel_encoder *intel_encoder, int pixel_clock) - { - struct drm_i915_private *dev_priv = dev->dev_private; -+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - -- if (IS_eDP(intel_encoder)) -+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) - return (pixel_clock * dev_priv->edp_bpp) / 8; - else - return pixel_clock * 3; -@@ -534,14 +537,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den) - } - - static void --intel_dp_compute_m_n(int bytes_per_pixel, -+intel_dp_compute_m_n(int bpp, - int nlanes, - int pixel_clock, - int link_clock, - struct intel_dp_m_n *m_n) - { - m_n->tu = 64; -- m_n->gmch_m = pixel_clock * bytes_per_pixel; -+ m_n->gmch_m = (pixel_clock * bpp) >> 3; - m_n->gmch_n = link_clock * nlanes; - intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); - m_n->link_m = pixel_clock; -@@ -549,6 +552,31 @@ intel_dp_compute_m_n(int bytes_per_pixel, - intel_reduce_ratio(&m_n->link_m, &m_n->link_n); - } - -+bool intel_edp_is_pch(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct drm_mode_config *mode_config = &dev->mode_config; -+ struct drm_encoder *encoder; -+ bool ret = false; -+ -+ list_for_each_entry(encoder, &mode_config->encoder_list, head) { -+ struct intel_encoder *intel_encoder; -+ struct intel_dp_priv *dp_priv; -+ -+ if (!encoder || encoder->crtc != crtc) -+ continue; -+ -+ intel_encoder = enc_to_intel_encoder(encoder); -+ dp_priv = intel_encoder->dev_priv; -+ -+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { -+ ret = IS_PCH_eDP(dp_priv); -+ break; -+ } -+ } -+ return ret; -+} -+ - void - intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -@@ -558,7 +586,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_connector *connector; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); -- int lane_count = 4; -+ int lane_count = 4, bpp = 24; - struct intel_dp_m_n m_n; - - /* -@@ -573,6 +601,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { - lane_count = dp_priv->lane_count; -+ if (IS_PCH_eDP(dp_priv)) -+ bpp = dev_priv->edp_bpp; - break; - } - } -@@ -582,7 +612,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - * the number of bytes_per_pixel post-LUT, which we always - * set up for 8-bits of R/G/B, or 3 bytes total. - */ -- intel_dp_compute_m_n(3, lane_count, -+ intel_dp_compute_m_n(bpp, lane_count, - mode->clock, adjusted_mode->clock, &m_n); - - if (HAS_PCH_SPLIT(dev)) { -@@ -711,13 +741,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) - if (mode != DRM_MODE_DPMS_ON) { - if (dp_reg & DP_PORT_EN) { - intel_dp_link_down(intel_encoder, dp_priv->DP); -- if (IS_eDP(intel_encoder)) -+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) - ironlake_edp_backlight_off(dev); - } - } else { - if (!(dp_reg & DP_PORT_EN)) { - intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); -- if (IS_eDP(intel_encoder)) -+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) - ironlake_edp_backlight_on(dev); - } - } -@@ -1225,6 +1255,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) - struct intel_encoder *intel_encoder = to_intel_encoder(connector); - struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; -+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - int ret; - - /* We should parse the EDID data and find out if it has an audio sink -@@ -1235,7 +1266,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) - return ret; - - /* if eDP has no EDID, try to use fixed panel mode from VBT */ -- if (IS_eDP(intel_encoder)) { -+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { - if (dev_priv->panel_fixed_mode != NULL) { - struct drm_display_mode *mode; - mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); -@@ -1299,6 +1330,50 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) - intel_dp_check_link_status(intel_encoder); - } - -+/* Return which DP Port should be selected for Transcoder DP control */ -+int -+intel_trans_dp_port_sel (struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct drm_mode_config *mode_config = &dev->mode_config; -+ struct drm_encoder *encoder; -+ struct intel_encoder *intel_encoder = NULL; -+ -+ list_for_each_entry(encoder, &mode_config->encoder_list, head) { -+ if (encoder->crtc != crtc) -+ continue; -+ -+ intel_encoder = enc_to_intel_encoder(encoder); -+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { -+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; -+ return dp_priv->output_reg; -+ } -+ } -+ return -1; -+} -+ -+static bool intel_dpd_is_edp(struct drm_device *dev) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ struct child_device_config *p_child; -+ int i, ret = false; -+ -+ if (!dev_priv->child_dev_num) -+ return false; -+ -+ for (i = 0; i < dev_priv->child_dev_num; i++) { -+ p_child = dev_priv->child_dev + i; -+ if (p_child->device_type != DEVICE_TYPE_eDP) -+ continue; -+ -+ if (p_child->dvo_port == PORT_IDPD) { -+ ret = true; -+ break; -+ } -+ } -+ return ret; -+} -+ - void - intel_dp_init(struct drm_device *dev, int output_reg) - { -@@ -1320,6 +1395,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) - DRM_MODE_CONNECTOR_DisplayPort); - drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); - -+ dp_priv->is_edpd = false; - if (output_reg == DP_A) - intel_encoder->type = INTEL_OUTPUT_EDP; - else -@@ -1335,6 +1411,11 @@ intel_dp_init(struct drm_device *dev, int output_reg) - if (IS_eDP(intel_encoder)) - intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); - -+ if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D)) { -+ if (intel_dpd_is_edp(dev)) -+ dp_priv->is_edpd = true; -+ } -+ - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); - connector->interlace_allowed = true; - connector->doublescan_allowed = 0; -@@ -1383,7 +1464,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) - intel_encoder->ddc_bus = &dp_priv->adapter; - intel_encoder->hot_plug = intel_dp_hot_plug; - -- if (output_reg == DP_A) { -+ if ((output_reg == DP_A) || IS_PCH_eDP(dp_priv)) { - /* initialize panel mode from VBT if available for eDP */ - if (dev_priv->lfp_lvds_vbt_mode) { - dev_priv->panel_fixed_mode = -diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h -index e302537..0858a17 100644 ---- a/drivers/gpu/drm/i915/intel_drv.h -+++ b/drivers/gpu/drm/i915/intel_drv.h -@@ -175,6 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); - void - intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -+extern bool intel_edp_is_pch(struct drm_crtc *crtc); - extern void intel_edp_link_config (struct intel_encoder *, int *, int *); - - --- -1.7.0.1 - diff --git a/patches.drivers/0004-drm-i915-Configure-the-PIPECONF-dither-correctly-for.patch b/patches.drivers/0004-drm-i915-Configure-the-PIPECONF-dither-correctly-for.patch deleted file mode 100644 index fe5116c..0000000 --- a/patches.drivers/0004-drm-i915-Configure-the-PIPECONF-dither-correctly-for.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 46e3e699294d3fe4fecb08d697bb29addab29576 Mon Sep 17 00:00:00 2001 -From: Zhao Yakui -Date: Fri, 28 May 2010 20:28:41 +0800 -Subject: [PATCH 4/4] drm/i915: Configure the PIPECONF dither correctly for eDP - -The non-8 BPC can be used for the eDP output device that is connected through -DP-A or DP-D on PCH. In such case we should set the PIPECONF dither correctly. - -Signed-off-by: Zhao Yakui ---- - drivers/gpu/drm/i915/intel_display.c | 11 +++++++++++ - 1 files changed, 11 insertions(+), 0 deletions(-) - -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index 32ae849..49c9663 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -3239,6 +3239,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - /* setup pipeconf */ - pipeconf = I915_READ(pipeconf_reg); - -+ if (HAS_PCH_SPLIT(dev) && (is_edp || intel_edp_is_pch(crtc))) { -+ /* configure the dither correctly for eDP */ -+ pipeconf &= ~PIPE_DITHER_TYPE_MASK; -+ if ((pipeconf & PIPE_BPC_MASK) != PIPE_8BPC) { -+ pipeconf |= PIPE_ENABLE_DITHER; -+ pipeconf |= PIPE_DITHER_TYPE_ST01; -+ } else { -+ pipeconf &= ~PIPE_ENABLE_DITHER; -+ } -+ } -+ - /* Set up the display plane register */ - dspcntr = DISPPLANE_GAMMA_ENABLE; - --- -1.7.0.1 - diff --git a/patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl b/patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl new file mode 100644 index 0000000..4a4c134 --- /dev/null +++ b/patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl @@ -0,0 +1,35 @@ +From 4a122c10fbfe9020df469f0f669da129c5757671 Mon Sep 17 00:00:00 2001 +From: Dan Rosenberg +Date: Thu, 17 Mar 2011 18:32:24 -0400 +Subject: [PATCH] ALSA: sound/pci/asihpi: check adapter index in hpi_ioctl +Git-commit: 4a122c10fbfe9020df469f0f669da129c5757671 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git +Patch-mainline: 2.6.39-rc1 +References: bnc#680816 + +The user-supplied index into the adapters array needs to be checked, or +an out-of-bounds kernel pointer could be accessed and used, leading to +potentially exploitable memory corruption. + +Signed-off-by: Dan Rosenberg +Cc: +Signed-off-by: Takashi Iwai + +--- + sound/pci/asihpi/hpioctl.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/sound/pci/asihpi/hpioctl.c ++++ b/sound/pci/asihpi/hpioctl.c +@@ -155,6 +155,11 @@ + goto out; + } + ++ if (hm->h.adapter_index >= HPI_MAX_ADAPTERS) { ++ err = -EINVAL; ++ goto out; ++ } ++ + pa = &adapters[hm->h.adapter_index]; + hr->h.size = 0; + if (hm->h.object == HPI_OBJ_SUBSYSTEM) { diff --git a/patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo b/patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo new file mode 100644 index 0000000..dbc74f9 --- /dev/null +++ b/patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo @@ -0,0 +1,96 @@ +From c6b358748e19ce7e230b0926ac42696bc485a562 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai +Date: Mon, 28 Mar 2011 12:05:31 +0200 +Subject: [PATCH] ALSA: hda - Fix pin-config of Gigabyte mobo +Git-commit: c6b358748e19ce7e230b0926ac42696bc485a562 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git +Patch-mainline: (to be) 2.6.39-rc2 +References: bnc#677256 + +Use pin-fix instead of the static quirk for Gigabyte mobos 1458:a002. + +Bugzilla: https://bugzilla.novell.com/show_bug.cgi?id=677256 +Signed-off-by: Takashi Iwai + +--- + sound/pci/hda/patch_realtek.c | 21 ++++++++++++++++++--- + 1 file changed, 18 insertions(+), 3 deletions(-) + +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -9932,7 +9932,6 @@ static struct snd_pci_quirk alc882_cfg_t + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), + SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), + SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch), +- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG), + + SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG), + SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG), +@@ -10769,6 +10768,7 @@ enum { + PINFIX_LENOVO_Y530, + PINFIX_PB_M5210, + PINFIX_ACER_ASPIRE_7736, ++ PINFIX_GIGABYTE_880GM, + }; + + static const struct alc_fixup alc882_fixups[] = { +@@ -10800,6 +10800,13 @@ static const struct alc_fixup alc882_fix + .type = ALC_FIXUP_SKU, + .v.sku = ALC_FIXUP_SKU_IGNORE, + }, ++ [PINFIX_GIGABYTE_880GM] = { ++ .type = ALC_FIXUP_PINS, ++ .v.pins = (const struct alc_pincfg[]) { ++ { 0x14, 0x1114410 }, /* set as speaker */ ++ { } ++ } ++ }, + }; + + static struct snd_pci_quirk alc882_fixup_tbl[] = { +@@ -10807,6 +10814,7 @@ static struct snd_pci_quirk alc882_fixup + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), + SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), + SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), ++ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", PINFIX_GIGABYTE_880GM), + {} + }; + +@@ -18851,8 +18859,6 @@ static struct snd_pci_quirk alc662_cfg_t + ALC662_3ST_6ch_DIG), + SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB20x", ALC662_AUTO), + SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10), +- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L", +- ALC662_3ST_6ch_DIG), + SND_PCI_QUIRK(0x152d, 0x2304, "Quanta WH1", ALC663_ASUS_H13), + SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG), + SND_PCI_QUIRK(0x1631, 0xc10c, "PB RS65", ALC663_ASUS_M51VA), +@@ -19526,6 +19532,7 @@ enum { + ALC662_FIXUP_IDEAPAD, + ALC272_FIXUP_MARIO, + ALC662_FIXUP_CZC_P10T, ++ ALC662_FIXUP_GIGABYTE, + }; + + static const struct alc_fixup alc662_fixups[] = { +@@ -19554,12 +19561,20 @@ static const struct alc_fixup alc662_fix + {} + } + }, ++ [ALC662_FIXUP_GIGABYTE] = { ++ .type = ALC_FIXUP_PINS, ++ .v.pins = (const struct alc_pincfg[]) { ++ { 0x14, 0x1114410 }, /* set as speaker */ ++ { } ++ } ++ }, + }; + + static struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), + SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), + SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), ++ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", ALC662_FIXUP_GIGABYTE), + SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), diff --git a/patches.drivers/alsa-hda-0019-Increase-default-buffer-size b/patches.drivers/alsa-hda-0019-Increase-default-buffer-size new file mode 100644 index 0000000..4dd093c --- /dev/null +++ b/patches.drivers/alsa-hda-0019-Increase-default-buffer-size @@ -0,0 +1,22 @@ +From: Takashi Iwai +Subject: ALSA: hda - Increase the default buffer size +Patch-mainline: Never +References: 682725 + +Signed-off-by: Takashi Iwai + +--- + sound/pci/hda/hda_intel.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2057,7 +2057,7 @@ + /* buffer pre-allocation */ + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, + snd_dma_pci_data(chip->pci), +- 1024 * 64, 32 * 1024 * 1024); ++ 1024 * 1024, 32 * 1024 * 1024); + return 0; + } + diff --git a/patches.drivers/bnx2-entropy-source.patch b/patches.drivers/bnx2-entropy-source.patch index dc5dc9d..02a86de 100644 --- a/patches.drivers/bnx2-entropy-source.patch +++ b/patches.drivers/bnx2-entropy-source.patch @@ -28,7 +28,7 @@ Signed-off-by: Brandon Philips static int disable_msi = 0; module_param(disable_msi, int, 0); -@@ -6081,6 +6085,9 @@ bnx2_request_irq(struct bnx2 *bp) +@@ -6116,6 +6120,9 @@ bnx2_request_irq(struct bnx2 *bp) else flags = IRQF_SHARED; diff --git a/patches.drivers/driver-core-add-devname-module-aliases-to-allow-module-on-demand-auto-loading.patch b/patches.drivers/driver-core-add-devname-module-aliases-to-allow-module-on-demand-auto-loading.patch deleted file mode 100644 index 346fb42..0000000 --- a/patches.drivers/driver-core-add-devname-module-aliases-to-allow-module-on-demand-auto-loading.patch +++ /dev/null @@ -1,178 +0,0 @@ -From 578454ff7eab61d13a26b568f99a89a2c9edc881 Mon Sep 17 00:00:00 2001 -From: Kay Sievers -Date: Thu, 20 May 2010 18:07:20 +0200 -Subject: driver core: add devname module aliases to allow module on-demand auto-loading -Patch-mainline: 2.6.35 - -From: Kay Sievers - -commit 578454ff7eab61d13a26b568f99a89a2c9edc881 upstream. - -This adds: - alias: devname: -to some common kernel modules, which will allow the on-demand loading -of the kernel module when the device node is accessed. - -Ideally all these modules would be compiled-in, but distros seems too -much in love with their modularization that we need to cover the common -cases with this new facility. It will allow us to remove a bunch of pretty -useless init scripts and modprobes from init scripts. - -The static device node aliases will be carried in the module itself. The -program depmod will extract this information to a file in the module directory: - $ cat /lib/modules/2.6.34-00650-g537b60d-dirty/modules.devname - # Device nodes to trigger on-demand module loading. - microcode cpu/microcode c10:184 - fuse fuse c10:229 - ppp_generic ppp c108:0 - tun net/tun c10:200 - dm_mod mapper/control c10:235 - -Udev will pick up the depmod created file on startup and create all the -static device nodes which the kernel modules specify, so that these modules -get automatically loaded when the device node is accessed: - $ /sbin/udevd --debug - ... - static_dev_create_from_modules: mknod '/dev/cpu/microcode' c10:184 - static_dev_create_from_modules: mknod '/dev/fuse' c10:229 - static_dev_create_from_modules: mknod '/dev/ppp' c108:0 - static_dev_create_from_modules: mknod '/dev/net/tun' c10:200 - static_dev_create_from_modules: mknod '/dev/mapper/control' c10:235 - udev_rules_apply_static_dev_perms: chmod '/dev/net/tun' 0666 - udev_rules_apply_static_dev_perms: chmod '/dev/fuse' 0666 - -A few device nodes are switched to statically allocated numbers, to allow -the static nodes to work. This might also useful for systems which still run -a plain static /dev, which is completely unsafe to use with any dynamic minor -numbers. - -Note: -The devname aliases must be limited to the *common* and *single*instance* -device nodes, like the misc devices, and never be used for conceptually limited -systems like the loop devices, which should rather get fixed properly and get a -control node for losetup to talk to, instead of creating a random number of -device nodes in advance, regardless if they are ever used. - -This facility is to hide the mess distros are creating with too modualized -kernels, and just to hide that these modules are not compiled-in, and not to -paper-over broken concepts. Thanks! :) - -Cc: Greg Kroah-Hartman -Cc: David S. Miller -Cc: Miklos Szeredi -Cc: Chris Mason -Cc: Alasdair G Kergon -Cc: Tigran Aivazian -Cc: Ian Kent -Signed-Off-By: Kay Sievers -Signed-off-by: Greg Kroah-Hartman - ---- a/Documentation/devices.txt -+++ b/Documentation/devices.txt -@@ -443,6 +443,8 @@ Your cooperation is appreciated. - 231 = /dev/snapshot System memory snapshot device - 232 = /dev/kvm Kernel-based virtual machine (hardware virtualization extensions) - 233 = /dev/kmview View-OS A process with a view -+ 234 = /dev/btrfs-control Btrfs control device -+ 235 = /dev/autofs Autofs control device - 240-254 Reserved for local use - 255 Reserved for MISC_DYNAMIC_MINOR - -diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c -index 2cd8c54..fa6551d 100644 ---- a/arch/x86/kernel/microcode_core.c -+++ b/arch/x86/kernel/microcode_core.c -@@ -260,6 +260,7 @@ static void microcode_dev_exit(void) - } - - MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); -+MODULE_ALIAS("devname:cpu/microcode"); - #else - #define microcode_dev_init() 0 - #define microcode_dev_exit() do { } while (0) -diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c -index 5441688..c5f8eb1 100644 ---- a/drivers/net/ppp_generic.c -+++ b/drivers/net/ppp_generic.c -@@ -2926,5 +2926,5 @@ EXPORT_SYMBOL(ppp_output_wakeup); - EXPORT_SYMBOL(ppp_register_compressor); - EXPORT_SYMBOL(ppp_unregister_compressor); - MODULE_LICENSE("GPL"); --MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); --MODULE_ALIAS("/dev/ppp"); -+MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); -+MODULE_ALIAS("devname:ppp"); -diff --git a/drivers/net/tun.c b/drivers/net/tun.c -index 97b2553..005cad6 100644 ---- a/drivers/net/tun.c -+++ b/drivers/net/tun.c -@@ -1649,3 +1649,4 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); - MODULE_AUTHOR(DRV_COPYRIGHT); - MODULE_LICENSE("GPL"); - MODULE_ALIAS_MISCDEV(TUN_MINOR); -+MODULE_ALIAS("devname:net/tun"); -diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c -index d29b7f6..d832062 100644 ---- a/fs/autofs4/dev-ioctl.c -+++ b/fs/autofs4/dev-ioctl.c -@@ -736,11 +736,14 @@ static const struct file_operations _dev_ioctl_fops = { - }; - - static struct miscdevice _autofs_dev_ioctl_misc = { -- .minor = MISC_DYNAMIC_MINOR, -+ .minor = AUTOFS_MINOR, - .name = AUTOFS_DEVICE_NAME, - .fops = &_dev_ioctl_fops - }; - -+MODULE_ALIAS_MISCDEV(AUTOFS_MINOR); -+MODULE_ALIAS("devname:autofs"); -+ - /* Register/deregister misc character device */ - int autofs_dev_ioctl_init(void) - { -diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c -index 1866dff..2909a03 100644 ---- a/fs/btrfs/super.c -+++ b/fs/btrfs/super.c -@@ -832,11 +832,14 @@ static const struct file_operations btrfs_ctl_fops = { - }; - - static struct miscdevice btrfs_misc = { -- .minor = MISC_DYNAMIC_MINOR, -+ .minor = BTRFS_MINOR, - .name = "btrfs-control", - .fops = &btrfs_ctl_fops - }; - -+MODULE_ALIAS_MISCDEV(BTRFS_MINOR); -+MODULE_ALIAS("devname:btrfs-control"); -+ - static int btrfs_interface_init(void) - { - return misc_register(&btrfs_misc); -diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c -index eb7e942..e53df5e 100644 ---- a/fs/fuse/dev.c -+++ b/fs/fuse/dev.c -@@ -18,6 +18,7 @@ - #include - - MODULE_ALIAS_MISCDEV(FUSE_MINOR); -+MODULE_ALIAS("devname:fuse"); - - static struct kmem_cache *fuse_req_cachep; - -diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h -index 8b5f7cc..b631c46 100644 ---- a/include/linux/miscdevice.h -+++ b/include/linux/miscdevice.h -@@ -31,6 +31,8 @@ - #define FUSE_MINOR 229 - #define KVM_MINOR 232 - #define VHOST_NET_MINOR 233 -+#define BTRFS_MINOR 234 -+#define AUTOFS_MINOR 235 - #define MISC_DYNAMIC_MINOR 255 - - struct device; diff --git a/patches.drivers/drm-nouveau-Don-t-clear-AGPCMD-completely-on-INIT_RE.patch b/patches.drivers/drm-nouveau-Don-t-clear-AGPCMD-completely-on-INIT_RE.patch deleted file mode 100644 index c343430..0000000 --- a/patches.drivers/drm-nouveau-Don-t-clear-AGPCMD-completely-on-INIT_RE.patch +++ /dev/null @@ -1,35 +0,0 @@ -From feacc14de65224ccda1d8fae5140cdf043a151b0 Mon Sep 17 00:00:00 2001 -From: Francisco Jerez -Date: Thu, 17 Jun 2010 12:42:14 +0200 -Subject: [PATCH] drm/nouveau: Don't clear AGPCMD completely on INIT_RESET. - -We just need to clear the SBA and ENABLE bits to reset the AGP -controller: If the AGP bridge was configured to use "fast writes", -clearing the FW bit would break the subsequent MMIO writes and -eventually end with a lockup. - -Note that all the BIOSes I've seen do the same as we did (it works for -them because they don't use MMIO), OTOH the blob leaves FW untouched. - -Signed-off-by: Francisco Jerez ---- - drivers/gpu/drm/nouveau/nouveau_bios.c | 3 ++- - 1 files changed, 2 insertions(+), 1 deletions(-) - -diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c -index abc382a..7c983d8 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bios.c -+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c -@@ -1910,7 +1910,8 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - /* no iexec->execute check by design */ - - pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19); -- bios_wr32(bios, NV_PBUS_PCI_NV_19, 0); -+ bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00); -+ - bios_wr32(bios, reg, value1); - - udelay(10); --- -1.7.0.1 - diff --git a/patches.drivers/drm-nouveau-allow-cursor-image-and-position-to-survi.patch b/patches.drivers/drm-nouveau-allow-cursor-image-and-position-to-survi.patch deleted file mode 100644 index b3151f3..0000000 --- a/patches.drivers/drm-nouveau-allow-cursor-image-and-position-to-survi.patch +++ /dev/null @@ -1,110 +0,0 @@ -From d83809c6fdb908ba708382c9a506f6647d1fa86d Mon Sep 17 00:00:00 2001 -From: Maarten Maathuis -Date: Sun, 9 May 2010 14:49:52 +0200 -Subject: [PATCH] drm/nouveau: allow cursor image and position to survive suspend - -- This isn't triggered yet on a normal kernel, because it still does a VT -switch, but it seemed like a good idea to fix this now. - -Tested-by: Maxim Levitsky -Signed-off-by: Maarten Maathuis ---- - drivers/gpu/drm/nouveau/nouveau_crtc.h | 2 ++ - drivers/gpu/drm/nouveau/nouveau_drv.c | 29 +++++++++++++++++++++++++++++ - drivers/gpu/drm/nouveau/nv04_cursor.c | 1 + - drivers/gpu/drm/nouveau/nv50_cursor.c | 1 + - 4 files changed, 33 insertions(+), 0 deletions(-) - -diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h -index 49fa7b2..cb1ce2a 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_crtc.h -+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h -@@ -40,6 +40,8 @@ struct nouveau_crtc { - int sharpness; - int last_dpms; - -+ int cursor_saved_x, cursor_saved_y; -+ - struct { - int cpp; - bool blanked; -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c -index 1de974a..4bccba3 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.c -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c -@@ -177,6 +177,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) - nouveau_bo_unpin(nouveau_fb->nvbo); - } - -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -+ -+ nouveau_bo_unmap(nv_crtc->cursor.nvbo); -+ nouveau_bo_unpin(nv_crtc->cursor.nvbo); -+ } -+ - NV_INFO(dev, "Evicting buffers...\n"); - ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); - -@@ -318,12 +325,34 @@ nouveau_pci_resume(struct pci_dev *pdev) - nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); - } - -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -+ int ret; -+ -+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); -+ if (!ret) -+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo); -+ if (ret) -+ NV_ERROR(dev, "Could not pin/map cursor.\n"); -+ } -+ - if (dev_priv->card_type < NV_50) { - nv04_display_restore(dev); - NVLockVgaCrtcs(dev, false); - } else - nv50_display_init(dev); - -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -+ -+ nv_crtc->cursor.set_offset(nv_crtc, -+ nv_crtc->cursor.nvbo->bo.offset - -+ dev_priv->vm_vram_base); -+ -+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, -+ nv_crtc->cursor_saved_y); -+ } -+ - /* Force CLUT to get re-loaded during modeset */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c -index 89a91b9..aaf3de3 100644 ---- a/drivers/gpu/drm/nouveau/nv04_cursor.c -+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c -@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) - static void - nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) - { -+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; - NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, - NV_PRAMDAC_CU_START_POS, - XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | -diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c -index 753e723..03ad7ab 100644 ---- a/drivers/gpu/drm/nouveau/nv50_cursor.c -+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c -@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) - { - struct drm_device *dev = nv_crtc->base.dev; - -+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; - nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), - ((y & 0xFFFF) << 16) | (x & 0xFFFF)); - /* Needed to make the cursor move. */ --- -1.7.0.1 - diff --git a/patches.drivers/e1000-enhance-frame-fragment-detection.patch b/patches.drivers/e1000-enhance-frame-fragment-detection.patch deleted file mode 100644 index 31f47e9..0000000 --- a/patches.drivers/e1000-enhance-frame-fragment-detection.patch +++ /dev/null @@ -1,68 +0,0 @@ -From: Brandeburg, Jesse -Subject: [PATCH] e1000: enhance frame fragment detection -References: bnc#567376, CVE-2009-4536 -Patch-Mainline: Yes - -A security discussion was recently given: -http://events.ccc.de/congress/2009/Fahrplan//events/3596.en.html And a patch -that I submitted awhile back was brought up. Apparently some of their testing -revealed that they were able to force a buffer fragment in e1000 in which the -trailing fragment was greater than 4 bytes. As a result the fragment check I -introduced failed to detect the fragement and a partial invalid frame was -passed up into the network stack. I've written this patch to correct it. I'm -in the process of testing it now, but it makes good logical sense to me. -Effectively it maintains a per-adapter state variable which detects a non-EOP -frame, and discards it and subsequent non-EOP frames leading up to _and_ -_including_ the next positive-EOP frame (as it is by definition the last -fragment). This should prevent any and all partial frames from entering the -network stack from e1000. - -Signed-off-by: Jesse Brandeburg -Signed-off-by: Neil Horman -Signed-off-by: Brandon Philips - ---- - - drivers/net/e1000/e1000.h | 2 ++ - drivers/net/e1000/e1000_main.c | 13 +++++++++++-- - 2 files changed, 13 insertions(+), 2 deletions(-) - - ---- a/drivers/net/e1000/e1000.h -+++ b/drivers/net/e1000/e1000.h -@@ -326,6 +326,8 @@ struct e1000_adapter { - /* for ioport free */ - int bars; - int need_ioport; -+ -+ bool discarding; - }; - - enum e1000_state_t { ---- a/drivers/net/e1000/e1000_main.c -+++ b/drivers/net/e1000/e1000_main.c -@@ -3834,13 +3834,22 @@ static bool e1000_clean_rx_irq(struct e1 - - length = le16_to_cpu(rx_desc->length); - /* !EOP means multiple descriptors were used to store a single -- * packet, also make sure the frame isn't just CRC only */ -- if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { -+ * packet, if thats the case we need to toss it. In fact, we -+ * to toss every packet with the EOP bit clear and the next -+ * frame that _does_ have the EOP bit set, as it is by -+ * definition only a frame fragment -+ */ -+ if (unlikely(!(status & E1000_RXD_STAT_EOP))) -+ adapter->discarding = true; -+ -+ if (adapter->discarding) { - /* All receives must fit into a single buffer */ - E1000_DBG("%s: Receive packet consumed multiple" - " buffers\n", netdev->name); - /* recycle */ - buffer_info->skb = skb; -+ if (status & E1000_RXD_STAT_EOP) -+ adapter->discarding = false; - goto next_desc; - } - diff --git a/patches.drivers/e1000-entropy-source.patch b/patches.drivers/e1000-entropy-source.patch index d0ec715..d1e6c9b 100644 --- a/patches.drivers/e1000-entropy-source.patch +++ b/patches.drivers/e1000-entropy-source.patch @@ -33,9 +33,9 @@ Signed-off-by: Brandon Philips +MODULE_PARM_DESC(entropy, "Allow e1000 to populate the /dev/random entropy pool"); + /** - * e1000_init_module - Driver Registration Routine - * -@@ -262,6 +266,9 @@ static int e1000_request_irq(struct e100 + * e1000_get_hw_dev - return device + * used by hardware layer to print debugging information +@@ -272,6 +276,9 @@ static int e1000_request_irq(struct e100 int irq_flags = IRQF_SHARED; int err; diff --git a/patches.drivers/e1000e-enhance-frame-fragment-detection.patch b/patches.drivers/e1000e-enhance-frame-fragment-detection.patch deleted file mode 100644 index a68ecee..0000000 --- a/patches.drivers/e1000e-enhance-frame-fragment-detection.patch +++ /dev/null @@ -1,142 +0,0 @@ -From: Neil Horman -Subject: [PATCH] e1000e: enhance frame fragment detection -References: bnc#567376, CVE-2009-4538 - -A security discussion was recently given: -http://events.ccc.de/congress/2009/Fahrplan//events/3596.en.html And a patch -that I submitted awhile back was brought up. Apparently some of their testing -revealed that they were able to force a buffer fragment in e1000e in which the -trailing fragment was greater than 4 bytes. As a result the fragment check I -introduced failed to detect the fragement and a partial invalid frame was -passed up into the network stack. I've written this patch to correct it. I'm -in the process of testing it now, but it makes good logical sense to me. -Effectively it maintains a per-adapter state variable which detects a non-EOP -frame, and discards it and subsequent non-EOP frames leading up to _and_ -_including_ the next positive-EOP frame (as it is by definition the last -fragment). This should prevent any and all partial frames from entering the -network stack from e1000e - -Signed-off-by: Neil Horman -Signed-off-by: Brandon Philips ---- - drivers/net/e1000e/e1000.h | 3 ++- - drivers/net/e1000e/netdev.c | 13 +++++++++++-- - 2 files changed, 13 insertions(+), 3 deletions(-) - -Index: linux-2.6.31-openSUSE-11.2/drivers/net/e1000e/e1000.h -=================================================================== ---- linux-2.6.31-openSUSE-11.2.orig/drivers/net/e1000e/e1000.h -+++ linux-2.6.31-openSUSE-11.2/drivers/net/e1000e/e1000.h -@@ -412,7 +412,8 @@ struct e1000_info { - enum e1000_state_t { - __E1000_TESTING, - __E1000_RESETTING, -- __E1000_DOWN -+ __E1000_DOWN, -+ __E1000_DISCARDING - }; - - enum latency_range { -Index: linux-2.6.31-openSUSE-11.2/drivers/net/e1000e/netdev.c -=================================================================== ---- linux-2.6.31-openSUSE-11.2.orig/drivers/net/e1000e/netdev.c -+++ linux-2.6.31-openSUSE-11.2/drivers/net/e1000e/netdev.c -@@ -483,12 +483,21 @@ static bool e1000_clean_rx_irq(struct e1 - length = le16_to_cpu(rx_desc->length); - - /* !EOP means multiple descriptors were used to store a single -- * packet, also make sure the frame isn't just CRC only */ -- if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { -+ * packet, if thats the case we need to toss it. In fact, we -+ * to toss every packet with the EOP bit clear and the next -+ * frame that _does_ have the EOP bit set, as it is by -+ * definition only a frame fragment -+ */ -+ if (unlikely(!(status & E1000_RXD_STAT_EOP))) -+ set_bit(__E1000_DISCARDING, &adapter->state); -+ -+ if (test_bit(__E1000_DISCARDING, &adapter->state)) { - /* All receives must fit into a single buffer */ - e_dbg("Receive packet consumed multiple buffers\n"); - /* recycle */ - buffer_info->skb = skb; -+ if (status & E1000_RXD_STAT_EOP) -+ clear_bit(__E1000_DISCARDING, &adapter->state); - goto next_desc; - } - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/patches.drivers/e1000e-entropy-source.patch b/patches.drivers/e1000e-entropy-source.patch index 738e9b2..07f7542 100644 --- a/patches.drivers/e1000e-entropy-source.patch +++ b/patches.drivers/e1000e-entropy-source.patch @@ -17,11 +17,9 @@ Signed-off-by: Brandon Philips drivers/net/e1000e/param.c | 4 ++++ 3 files changed, 14 insertions(+), 5 deletions(-) -Index: linux-2.6.34-master/drivers/net/e1000e/e1000.h -=================================================================== ---- linux-2.6.34-master.orig/drivers/net/e1000e/e1000.h -+++ linux-2.6.34-master/drivers/net/e1000e/e1000.h -@@ -466,6 +466,7 @@ extern void e1000e_reset_interrupt_capab +--- a/drivers/net/e1000e/e1000.h ++++ b/drivers/net/e1000e/e1000.h +@@ -467,6 +467,7 @@ extern void e1000e_reset_interrupt_capab extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); extern unsigned int copybreak; @@ -29,11 +27,9 @@ Index: linux-2.6.34-master/drivers/net/e1000e/e1000.h extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); -Index: linux-2.6.34-master/drivers/net/e1000e/netdev.c -=================================================================== ---- linux-2.6.34-master.orig/drivers/net/e1000e/netdev.c -+++ linux-2.6.34-master/drivers/net/e1000e/netdev.c -@@ -1496,8 +1496,8 @@ static int e1000_request_msix(struct e10 +--- a/drivers/net/e1000e/netdev.c ++++ b/drivers/net/e1000e/netdev.c +@@ -1847,8 +1847,8 @@ static int e1000_request_msix(struct e10 else memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, @@ -44,7 +40,7 @@ Index: linux-2.6.34-master/drivers/net/e1000e/netdev.c if (err) goto out; adapter->rx_ring->itr_register = E1000_EITR_82574(vector); -@@ -1538,6 +1538,7 @@ static int e1000_request_irq(struct e100 +@@ -1889,6 +1889,7 @@ static int e1000_request_irq(struct e100 { struct net_device *netdev = adapter->netdev; int err; @@ -52,7 +48,7 @@ Index: linux-2.6.34-master/drivers/net/e1000e/netdev.c if (adapter->msix_entries) { err = e1000_request_msix(adapter); -@@ -1549,7 +1550,8 @@ static int e1000_request_irq(struct e100 +@@ -1900,7 +1901,8 @@ static int e1000_request_irq(struct e100 e1000e_set_interrupt_capability(adapter); } if (adapter->flags & FLAG_MSI_ENABLED) { @@ -62,7 +58,7 @@ Index: linux-2.6.34-master/drivers/net/e1000e/netdev.c netdev->name, netdev); if (!err) return err; -@@ -1559,8 +1561,10 @@ static int e1000_request_irq(struct e100 +@@ -1910,8 +1912,10 @@ static int e1000_request_irq(struct e100 adapter->int_mode = E1000E_INT_MODE_LEGACY; } @@ -75,10 +71,8 @@ Index: linux-2.6.34-master/drivers/net/e1000e/netdev.c if (err) e_err("Unable to allocate interrupt, Error: %d\n", err); -Index: linux-2.6.34-master/drivers/net/e1000e/param.c -=================================================================== ---- linux-2.6.34-master.orig/drivers/net/e1000e/param.c -+++ linux-2.6.34-master/drivers/net/e1000e/param.c +--- a/drivers/net/e1000e/param.c ++++ b/drivers/net/e1000e/param.c @@ -31,6 +31,10 @@ #include "e1000.h" diff --git a/patches.drivers/ehea-modinfo.patch b/patches.drivers/ehea-modinfo.patch index f2f9049..560f473 100644 --- a/patches.drivers/ehea-modinfo.patch +++ b/patches.drivers/ehea-modinfo.patch @@ -13,9 +13,9 @@ instead of the lhea-"root" entry of the device-tree --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c -@@ -111,6 +111,19 @@ static int __devinit ehea_probe_adapter( +@@ -112,6 +112,19 @@ static int __devinit ehea_probe_adapter( - static int __devexit ehea_remove(struct of_device *dev); + static int __devexit ehea_remove(struct platform_device *dev); +static struct of_device_id ehea_module_device_table[] = { + { @@ -33,11 +33,11 @@ instead of the lhea-"root" entry of the device-tree static struct of_device_id ehea_device_table[] = { { .name = "lhea", -@@ -118,7 +131,6 @@ static struct of_device_id ehea_device_t +@@ -119,7 +132,6 @@ static struct of_device_id ehea_device_t }, {}, }; -MODULE_DEVICE_TABLE(of, ehea_device_table); static struct of_platform_driver ehea_driver = { - .name = "ehea", + .driver = { diff --git a/patches.drivers/elousb.patch b/patches.drivers/elousb.patch index efc7e17..6d25c8b 100644 --- a/patches.drivers/elousb.patch +++ b/patches.drivers/elousb.patch @@ -18,30 +18,30 @@ Acked-by: Jiri Kosina --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c -@@ -1567,6 +1567,8 @@ static const struct hid_device_id hid_ig - { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, +@@ -1643,6 +1643,8 @@ static const struct hid_device_id hid_ig { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4000U) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4500U) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h -@@ -171,7 +171,9 @@ - #define USB_VENDOR_ID_DRAGONRISE 0x0079 +@@ -216,7 +216,9 @@ + #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 #define USB_VENDOR_ID_ELO 0x04E7 +#define USB_DEVICE_ID_ELO_4000U 0x0009 #define USB_DEVICE_ID_ELO_TS2700 0x0020 +#define USB_DEVICE_ID_ELO_4500U 0x0030 - #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f - #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 + #define USB_VENDOR_ID_EMS 0x2006 + #define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig -@@ -193,6 +193,18 @@ config TOUCHSCREEN_ELO +@@ -214,6 +214,18 @@ config TOUCHSCREEN_ELO To compile this driver as a module, choose M here: the module will be called elo. @@ -62,14 +62,14 @@ Acked-by: Jiri Kosina select SERIO --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile -@@ -17,6 +17,7 @@ obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dyn +@@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += h obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o +obj-$(CONFIG_TOUCHSCREEN_ELOUSB) += elousb.o obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o - obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o + obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o --- /dev/null +++ b/drivers/input/touchscreen/elousb.c @@ -0,0 +1,305 @@ diff --git a/patches.drivers/igb-entropy-source.patch b/patches.drivers/igb-entropy-source.patch index 4b58900..4758516 100644 --- a/patches.drivers/igb-entropy-source.patch +++ b/patches.drivers/igb-entropy-source.patch @@ -17,7 +17,7 @@ Signed-off-by: Brandon Philips --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c -@@ -60,6 +60,10 @@ static const struct e1000_info *igb_info +@@ -61,6 +61,10 @@ static const struct e1000_info *igb_info [board_82575] = &e1000_82575_info, }; @@ -26,9 +26,9 @@ Signed-off-by: Brandon Philips +MODULE_PARM_DESC(entropy, "Allow igb to populate the /dev/random entropy pool"); + static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, -@@ -587,7 +591,8 @@ static int igb_request_msix(struct igb_a + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, +@@ -897,7 +901,8 @@ static int igb_request_msix(struct igb_a int i, err = 0, vector = 0; err = request_irq(adapter->msix_entries[vector].vector, @@ -38,7 +38,7 @@ Signed-off-by: Brandon Philips if (err) goto out; vector++; -@@ -882,6 +887,10 @@ static int igb_request_irq(struct igb_ad +@@ -1194,6 +1199,10 @@ static int igb_request_irq(struct igb_ad struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err = 0; @@ -49,7 +49,7 @@ Signed-off-by: Brandon Philips if (adapter->msix_entries) { err = igb_request_msix(adapter); -@@ -916,7 +925,7 @@ static int igb_request_irq(struct igb_ad +@@ -1228,7 +1237,7 @@ static int igb_request_irq(struct igb_ad } if (adapter->flags & IGB_FLAG_HAS_MSI) { @@ -58,7 +58,7 @@ Signed-off-by: Brandon Philips netdev->name, adapter); if (!err) goto request_done; -@@ -926,7 +935,8 @@ static int igb_request_irq(struct igb_ad +@@ -1238,7 +1247,8 @@ static int igb_request_irq(struct igb_ad adapter->flags &= ~IGB_FLAG_HAS_MSI; } diff --git a/patches.drivers/input-Add-LED-support-to-Synaptics-device b/patches.drivers/input-Add-LED-support-to-Synaptics-device index f6ec12f..3f624ec 100644 --- a/patches.drivers/input-Add-LED-support-to-Synaptics-device +++ b/patches.drivers/input-Add-LED-support-to-Synaptics-device @@ -26,7 +26,7 @@ Signed-off-by: Takashi Iwai --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig -@@ -19,6 +19,7 @@ +@@ -19,6 +19,7 @@ config MOUSE_PS2 select SERIO_LIBPS2 select SERIO_I8042 if X86 select SERIO_GSCPS2 if GSC @@ -34,7 +34,7 @@ Signed-off-by: Takashi Iwai help Say Y here if you have a PS/2 mouse connected to your system. This includes the standard 2 or 3-button PS/2 mouse, as well as PS/2 -@@ -67,6 +68,14 @@ +@@ -67,6 +68,14 @@ config MOUSE_PS2_SYNAPTICS If unsure, say Y. @@ -47,19 +47,19 @@ Signed-off-by: Takashi Iwai + This will enable LED class driver to control the LED device. + config MOUSE_PS2_LIFEBOOK - bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED + bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT default y --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -28,6 +28,7 @@ - #include + #include #include #include +#include #include #include "psmouse.h" #include "synaptics.h" -@@ -335,6 +336,110 @@ +@@ -353,6 +354,110 @@ static void synaptics_pt_create(struct p serio_register_port(serio); } @@ -170,7 +170,7 @@ Signed-off-by: Takashi Iwai /***************************************************************************** * Functions to interpret the absolute mode packets ****************************************************************************/ -@@ -622,6 +727,7 @@ +@@ -647,6 +752,7 @@ static void set_input_params(struct inpu static void synaptics_disconnect(struct psmouse *psmouse) { @@ -178,7 +178,7 @@ Signed-off-by: Takashi Iwai synaptics_reset(psmouse); kfree(psmouse->private); psmouse->private = NULL; -@@ -653,6 +759,8 @@ +@@ -678,6 +784,8 @@ static int synaptics_reconnect(struct ps return -1; } @@ -187,7 +187,7 @@ Signed-off-by: Takashi Iwai return 0; } -@@ -727,6 +835,9 @@ +@@ -752,6 +860,9 @@ int synaptics_init(struct psmouse *psmou SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c); @@ -199,7 +199,7 @@ Signed-off-by: Takashi Iwai /* --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h -@@ -94,6 +94,8 @@ +@@ -97,6 +97,8 @@ struct synaptics_hw_state { signed char scroll; }; @@ -208,10 +208,10 @@ Signed-off-by: Takashi Iwai struct synaptics_data { /* Data read from the touchpad */ unsigned long int model_id; /* Model-ID */ -@@ -107,6 +109,7 @@ - unsigned char pkt_type; /* packet type - old, new, etc */ - unsigned char mode; /* current mode byte */ - int scroll; +@@ -110,6 +112,7 @@ struct synaptics_data { + struct serio *pt_port; /* Pass-through serio port */ + + struct synaptics_hw_state mt; /* current gesture packet */ + struct synaptics_led *led; }; diff --git a/patches.drivers/ixgbe-entropy-source.patch b/patches.drivers/ixgbe-entropy-source.patch index 015d60b..f674b35 100644 --- a/patches.drivers/ixgbe-entropy-source.patch +++ b/patches.drivers/ixgbe-entropy-source.patch @@ -12,12 +12,12 @@ be activated by the administrator. Signed-off-by: Brandon Philips --- - drivers/net/ixgbe/ixgbe_main.c | 25 +++++++++++++++++++++---- - 1 file changed, 21 insertions(+), 4 deletions(-) + drivers/net/ixgbe/ixgbe_main.c | 22 +++++++++++++++++++--- + 1 file changed, 19 insertions(+), 3 deletions(-) --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c -@@ -55,6 +55,11 @@ static const char ixgbe_driver_string[] +@@ -56,6 +56,11 @@ static const char ixgbe_driver_string[] const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; @@ -29,44 +29,44 @@ Signed-off-by: Brandon Philips static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, [board_82599] = &ixgbe_82599_info, -@@ -1717,6 +1722,7 @@ static int ixgbe_request_msix_irqs(struc +@@ -2317,6 +2322,7 @@ static int ixgbe_request_msix_irqs(struc irqreturn_t (*handler)(int, void *); int i, vector, q_vectors, err; - int ri=0, ti=0; + int ri = 0, ti = 0; + int irq_flags; /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -@@ -1732,20 +1738,26 @@ static int ixgbe_request_msix_irqs(struc - for (vector = 0; vector < q_vectors; vector++) { - handler = SET_HANDLER(adapter->q_vector[vector]); +@@ -2334,22 +2340,27 @@ static int ixgbe_request_msix_irqs(struc + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + handler = SET_HANDLER(q_vector); + irq_flags = 0; - if(handler == &ixgbe_msix_clean_rx) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "rx", ri++); + if (handler == &ixgbe_msix_clean_rx) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + if (entropy) + irq_flags = IRQF_SAMPLE_RANDOM; - } - else if(handler == &ixgbe_msix_clean_tx) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "tx", ti++); - } -- else -+ else { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "TxRx", vector); + } else if (handler == &ixgbe_msix_clean_tx) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else if (handler == &ixgbe_msix_clean_many) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + if (entropy) + irq_flags = IRQF_SAMPLE_RANDOM; -+ } - + ti++; + } else { + /* skip this unused q_vector */ + continue; + } err = request_irq(adapter->msix_entries[vector].vector, -- handler, 0, adapter->name[vector], -+ handler, irq_flags, adapter->name[vector], - adapter->q_vector[vector]); +- handler, 0, q_vector->name, ++ handler, irq_flags, q_vector->name, + q_vector); if (err) { - DPRINTK(PROBE, ERR, -@@ -1931,14 +1943,19 @@ static int ixgbe_request_irq(struct ixgb + e_err(probe, "request_irq failed for MSIX interrupt " +@@ -2563,14 +2574,19 @@ static int ixgbe_request_irq(struct ixgb { struct net_device *netdev = adapter->netdev; int err; @@ -80,11 +80,11 @@ Signed-off-by: Brandon Philips } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, + err = request_irq(adapter->pdev->irq, ixgbe_intr, irq_flags, - netdev->name, netdev); + netdev->name, netdev); } else { - err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, + irq_flags |= IRQF_SHARED; + err = request_irq(adapter->pdev->irq, ixgbe_intr, irq_flags, - netdev->name, netdev); + netdev->name, netdev); } diff --git a/patches.drivers/libata-add-waits-for-govault b/patches.drivers/libata-add-waits-for-govault deleted file mode 100644 index 13bd3e5..0000000 --- a/patches.drivers/libata-add-waits-for-govault +++ /dev/null @@ -1,35 +0,0 @@ -From: Tejun Heo -Date: Wed, 7 Feb 2007 12:37:41 -0800 -Subject: [PATCH] libata: add waits for GoVault -References: 246451 -Patch-mainline: not yet - -Iomega GoVault drives need specific waits here and there. Upstream -approach hasn't been determined yet. This is temp solution from Gary -Hade. Read the following thread for details. - -http://thread.gmane.org/gmane.linux.ide/14545/focus=14663 - -With recent changes in the reset sequence (ATA_TMOUT_FF_WAIT and -prefer-hardreset), the only thing which needs adjustment is -ATA_TMOUT_FF_WAIT (the prereset wait part is unnecessary as the wait -is necessary only for softreset when SCR registers are accessible and -in those cases libata now always uses hardreset which doesn't require -such wait). - -Signed-off-by: Tejun Heo ---- - include/linux/libata.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/include/linux/libata.h -+++ b/include/linux/libata.h -@@ -260,7 +260,7 @@ enum { - * HHD424020F7SV00. Increase to 2secs when parallel probing - * is in place. - */ -- ATA_TMOUT_FF_WAIT = 800, -+ ATA_TMOUT_FF_WAIT = 2000, - - /* Spec mandates to wait for ">= 2ms" before checking status - * after reset. We wait 150ms, because that was the magic diff --git a/patches.drivers/libata-unlock-hpa-by-default b/patches.drivers/libata-unlock-hpa-by-default index 2cae4ed..8fc78e7 100644 --- a/patches.drivers/libata-unlock-hpa-by-default +++ b/patches.drivers/libata-unlock-hpa-by-default @@ -13,7 +13,7 @@ Signed-off-by: Tejun Heo --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c -@@ -139,7 +139,7 @@ int libata_fua = 0; +@@ -138,7 +138,7 @@ int libata_fua = 0; module_param_named(fua, libata_fua, int, 0444); MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); diff --git a/patches.drivers/megaraid-mbox-fix-SG_IO b/patches.drivers/megaraid-mbox-fix-SG_IO index 41058ac..16e188e 100644 --- a/patches.drivers/megaraid-mbox-fix-SG_IO +++ b/patches.drivers/megaraid-mbox-fix-SG_IO @@ -21,7 +21,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c -@@ -1585,13 +1585,20 @@ megaraid_mbox_build_cmd(adapter_t *adapt +@@ -1586,13 +1586,20 @@ megaraid_mbox_build_cmd(adapter_t *adapt case MODE_SENSE: { struct scatterlist *sgl; @@ -45,7 +45,7 @@ Signed-off-by: Hannes Reinecke } else { con_log(CL_ANN, (KERN_WARNING -@@ -2329,9 +2336,20 @@ megaraid_mbox_dpc(unsigned long devp) +@@ -2330,9 +2337,20 @@ megaraid_mbox_dpc(unsigned long devp) if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 && IS_RAID_CH(raid_dev, scb->dev_channel)) { diff --git a/patches.drivers/pci-disable-msi-on-K8M800 b/patches.drivers/pci-disable-msi-on-K8M800 deleted file mode 100644 index 325b2ef..0000000 --- a/patches.drivers/pci-disable-msi-on-K8M800 +++ /dev/null @@ -1,30 +0,0 @@ -From: Tejun Heo -Subject: [PATCH] pci: disable MSI on VIA K8M800 -References: bnc#599508 -Patch-Mainline: Pending for 2.6.35 and -stable - -MSI delivery from on-board ahci controller doesn't work on K8M800. At -this point, it's unclear whether the culprit is with the ahci -controller or the host bridge. Given the track record and considering -the rather minimal impact of MSI, disabling it seems reasonable. - -Signed-off-by: Tejun Heo -Reported-by: Rainer Hurtado Navarro -Cc: stable@kernel.org -Signed-off-by: Tejun Heo ---- - drivers/pci/quirks.c | 1 + - 1 file changed, 1 insertion(+) - -Index: linux-2.6.34-master/drivers/pci/quirks.c -=================================================================== ---- linux-2.6.34-master.orig/drivers/pci/quirks.c -+++ linux-2.6.34-master/drivers/pci/quirks.c -@@ -2112,6 +2112,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AT - DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); - DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); - DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi); - - /* Disable MSI on chipsets that are known to not support it */ - static void __devinit quirk_disable_msi(struct pci_dev *dev) diff --git a/patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch b/patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch new file mode 100644 index 0000000..dd63298 --- /dev/null +++ b/patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch @@ -0,0 +1,707 @@ +From foo@baz Wed Feb 9 13:35:10 PST 2011 +Date: Wed, 09 Feb 2011 13:35:10 -0800 +To: Greg KH +From: Greg Kroah-Hartman +Subject: Staging: samsung-laptop: add support for lots of laptops +References: bnc#661682 +Patch-mainline: 2.6.39 + +This is a backport of the upstream version of the driver that added support for +all samsung laptop devices. + +Signed-off-by: Greg Kroah-Hartman + +diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c +index 701e8d5..51ec621 100644 +--- a/drivers/staging/samsung-laptop/samsung-laptop.c ++++ b/drivers/staging/samsung-laptop/samsung-laptop.c +@@ -1,5 +1,5 @@ + /* +- * Samsung N130 Laptop driver ++ * Samsung Laptop driver + * + * Copyright (C) 2009 Greg Kroah-Hartman (gregkh@suse.de) + * Copyright (C) 2009 Novell Inc. +@@ -33,51 +33,6 @@ + */ + #define MAX_BRIGHT 0x07 + +-/* Brightness is 0 - 8, as described above. Value 0 is for the BIOS to use */ +-#define GET_BRIGHTNESS 0x00 +-#define SET_BRIGHTNESS 0x01 +- +-/* first byte: +- * 0x00 - wireless is off +- * 0x01 - wireless is on +- * second byte: +- * 0x02 - 3G is off +- * 0x03 - 3G is on +- * TODO, verify 3G is correct, that doesn't seem right... +- */ +-#define GET_WIRELESS_BUTTON 0x02 +-#define SET_WIRELESS_BUTTON 0x03 +- +-/* 0 is off, 1 is on */ +-#define GET_BACKLIGHT 0x04 +-#define SET_BACKLIGHT 0x05 +- +-/* +- * 0x80 or 0x00 - no action +- * 0x81 - recovery key pressed +- */ +-#define GET_RECOVERY_METHOD 0x06 +-#define SET_RECOVERY_METHOD 0x07 +- +-/* 0 is low, 1 is high */ +-#define GET_PERFORMANCE_LEVEL 0x08 +-#define SET_PERFORMANCE_LEVEL 0x09 +- +-/* +- * Tell the BIOS that Linux is running on this machine. +- * 81 is on, 80 is off +- */ +-#define SET_LINUX 0x0a +- +- +-#define MAIN_FUNCTION 0x4c49 +- +-#define SABI_HEADER_PORT 0x00 +-#define SABI_HEADER_RE_MEM 0x02 +-#define SABI_HEADER_IFACEFUNC 0x03 +-#define SABI_HEADER_EN_MEM 0x04 +-#define SABI_HEADER_DATA_OFFSET 0x05 +-#define SABI_HEADER_DATA_SEGMENT 0x07 + + #define SABI_IFACE_MAIN 0x00 + #define SABI_IFACE_SUB 0x02 +@@ -89,6 +44,173 @@ struct sabi_retval { + u8 retval[20]; + }; + ++struct sabi_header_offsets { ++ u8 port; ++ u8 re_mem; ++ u8 iface_func; ++ u8 en_mem; ++ u8 data_offset; ++ u8 data_segment; ++}; ++ ++struct sabi_commands { ++ /* ++ * Brightness is 0 - 8, as described above. ++ * Value 0 is for the BIOS to use ++ */ ++ u8 get_brightness; ++ u8 set_brightness; ++ ++ /* ++ * first byte: ++ * 0x00 - wireless is off ++ * 0x01 - wireless is on ++ * second byte: ++ * 0x02 - 3G is off ++ * 0x03 - 3G is on ++ * TODO, verify 3G is correct, that doesn't seem right... ++ */ ++ u8 get_wireless_button; ++ u8 set_wireless_button; ++ ++ /* 0 is off, 1 is on */ ++ u8 get_backlight; ++ u8 set_backlight; ++ ++ /* ++ * 0x80 or 0x00 - no action ++ * 0x81 - recovery key pressed ++ */ ++ u8 get_recovery_mode; ++ u8 set_recovery_mode; ++ ++ /* ++ * on seclinux: 0 is low, 1 is high, ++ * on swsmi: 0 is normal, 1 is silent, 2 is turbo ++ */ ++ u8 get_performance_level; ++ u8 set_performance_level; ++ ++ /* ++ * Tell the BIOS that Linux is running on this machine. ++ * 81 is on, 80 is off ++ */ ++ u8 set_linux; ++}; ++ ++struct sabi_performance_level { ++ const char *name; ++ u8 value; ++}; ++ ++struct sabi_config { ++ const char *test_string; ++ u16 main_function; ++ struct sabi_header_offsets header_offsets; ++ struct sabi_commands commands; ++ struct sabi_performance_level performance_levels[4]; ++}; ++ ++static struct sabi_config sabi_configs[] = { ++ { ++ .test_string = "SECLINUX", ++ ++ .main_function = 0x4c59, ++ ++ .header_offsets = { ++ .port = 0x00, ++ .re_mem = 0x02, ++ .iface_func = 0x03, ++ .en_mem = 0x04, ++ .data_offset = 0x05, ++ .data_segment = 0x07, ++ }, ++ ++ .commands = { ++ .get_brightness = 0x00, ++ .set_brightness = 0x01, ++ ++ .get_wireless_button = 0x02, ++ .set_wireless_button = 0x03, ++ ++ .get_backlight = 0x04, ++ .set_backlight = 0x05, ++ ++ .get_recovery_mode = 0x06, ++ .set_recovery_mode = 0x07, ++ ++ .get_performance_level = 0x08, ++ .set_performance_level = 0x09, ++ ++ .set_linux = 0x0a, ++ }, ++ ++ .performance_levels = { ++ { ++ .name = "silent", ++ .value = 0, ++ }, ++ { ++ .name = "normal", ++ .value = 1, ++ }, ++ { }, ++ }, ++ }, ++ { ++ .test_string = "SwSmi@", ++ ++ .main_function = 0x5843, ++ ++ .header_offsets = { ++ .port = 0x00, ++ .re_mem = 0x04, ++ .iface_func = 0x02, ++ .en_mem = 0x03, ++ .data_offset = 0x05, ++ .data_segment = 0x07, ++ }, ++ ++ .commands = { ++ .get_brightness = 0x10, ++ .set_brightness = 0x11, ++ ++ .get_wireless_button = 0x12, ++ .set_wireless_button = 0x13, ++ ++ .get_backlight = 0x2d, ++ .set_backlight = 0x2e, ++ ++ .get_recovery_mode = 0xff, ++ .set_recovery_mode = 0xff, ++ ++ .get_performance_level = 0x31, ++ .set_performance_level = 0x32, ++ ++ .set_linux = 0xff, ++ }, ++ ++ .performance_levels = { ++ { ++ .name = "normal", ++ .value = 0, ++ }, ++ { ++ .name = "silent", ++ .value = 1, ++ }, ++ { ++ .name = "overclock", ++ .value = 2, ++ }, ++ { }, ++ }, ++ }, ++ { }, ++}; ++ ++static struct sabi_config *sabi_config; ++ + static void __iomem *sabi; + static void __iomem *sabi_iface; + static void __iomem *f0000_segment; +@@ -109,21 +231,21 @@ MODULE_PARM_DESC(debug, "Debug enabled or not"); + static int sabi_get_command(u8 command, struct sabi_retval *sretval) + { + int retval = 0; +- u16 port = readw(sabi + SABI_HEADER_PORT); ++ u16 port = readw(sabi + sabi_config->header_offsets.port); + + mutex_lock(&sabi_mutex); + + /* enable memory to be able to write to it */ +- outb(readb(sabi + SABI_HEADER_EN_MEM), port); ++ outb(readb(sabi + sabi_config->header_offsets.en_mem), port); + + /* write out the command */ +- writew(MAIN_FUNCTION, sabi_iface + SABI_IFACE_MAIN); ++ writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); + writew(command, sabi_iface + SABI_IFACE_SUB); + writeb(0, sabi_iface + SABI_IFACE_COMPLETE); +- outb(readb(sabi + SABI_HEADER_IFACEFUNC), port); ++ outb(readb(sabi + sabi_config->header_offsets.iface_func), port); + + /* write protect memory to make it safe */ +- outb(readb(sabi + SABI_HEADER_RE_MEM), port); ++ outb(readb(sabi + sabi_config->header_offsets.re_mem), port); + + /* see if the command actually succeeded */ + if (readb(sabi_iface + SABI_IFACE_COMPLETE) == 0xaa && +@@ -156,22 +278,22 @@ exit: + static int sabi_set_command(u8 command, u8 data) + { + int retval = 0; +- u16 port = readw(sabi + SABI_HEADER_PORT); ++ u16 port = readw(sabi + sabi_config->header_offsets.port); + + mutex_lock(&sabi_mutex); + + /* enable memory to be able to write to it */ +- outb(readb(sabi + SABI_HEADER_EN_MEM), port); ++ outb(readb(sabi + sabi_config->header_offsets.en_mem), port); + + /* write out the command */ +- writew(MAIN_FUNCTION, sabi_iface + SABI_IFACE_MAIN); ++ writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); + writew(command, sabi_iface + SABI_IFACE_SUB); + writeb(0, sabi_iface + SABI_IFACE_COMPLETE); + writeb(data, sabi_iface + SABI_IFACE_DATA); +- outb(readb(sabi + SABI_HEADER_IFACEFUNC), port); ++ outb(readb(sabi + sabi_config->header_offsets.iface_func), port); + + /* write protect memory to make it safe */ +- outb(readb(sabi + SABI_HEADER_RE_MEM), port); ++ outb(readb(sabi + sabi_config->header_offsets.re_mem), port); + + /* see if the command actually succeeded */ + if (readb(sabi_iface + SABI_IFACE_COMPLETE) == 0xaa && +@@ -194,21 +316,21 @@ static void test_backlight(void) + { + struct sabi_retval sretval; + +- sabi_get_command(GET_BACKLIGHT, &sretval); ++ sabi_get_command(sabi_config->commands.get_backlight, &sretval); + printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); + +- sabi_set_command(SET_BACKLIGHT, 0); ++ sabi_set_command(sabi_config->commands.set_backlight, 0); + printk(KERN_DEBUG "backlight should be off\n"); + +- sabi_get_command(GET_BACKLIGHT, &sretval); ++ sabi_get_command(sabi_config->commands.get_backlight, &sretval); + printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); + + msleep(1000); + +- sabi_set_command(SET_BACKLIGHT, 1); ++ sabi_set_command(sabi_config->commands.set_backlight, 1); + printk(KERN_DEBUG "backlight should be on\n"); + +- sabi_get_command(GET_BACKLIGHT, &sretval); ++ sabi_get_command(sabi_config->commands.get_backlight, &sretval); + printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); + } + +@@ -216,21 +338,21 @@ static void test_wireless(void) + { + struct sabi_retval sretval; + +- sabi_get_command(GET_WIRELESS_BUTTON, &sretval); ++ sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); + printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); + +- sabi_set_command(SET_WIRELESS_BUTTON, 0); ++ sabi_set_command(sabi_config->commands.set_wireless_button, 0); + printk(KERN_DEBUG "wireless led should be off\n"); + +- sabi_get_command(GET_WIRELESS_BUTTON, &sretval); ++ sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); + printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); + + msleep(1000); + +- sabi_set_command(SET_WIRELESS_BUTTON, 1); ++ sabi_set_command(sabi_config->commands.set_wireless_button, 1); + printk(KERN_DEBUG "wireless led should be on\n"); + +- sabi_get_command(GET_WIRELESS_BUTTON, &sretval); ++ sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); + printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); + } + +@@ -240,7 +362,8 @@ static u8 read_brightness(void) + int user_brightness = 0; + int retval; + +- retval = sabi_get_command(GET_BRIGHTNESS, &sretval); ++ retval = sabi_get_command(sabi_config->commands.get_brightness, ++ &sretval); + if (!retval) + user_brightness = sretval.retval[0]; + if (user_brightness != 0) +@@ -250,7 +373,8 @@ static u8 read_brightness(void) + + static void set_brightness(u8 user_brightness) + { +- sabi_set_command(SET_BRIGHTNESS, user_brightness + 1); ++ sabi_set_command(sabi_config->commands.set_brightness, ++ user_brightness + 1); + } + + static int get_brightness(struct backlight_device *bd) +@@ -263,9 +387,9 @@ static int update_status(struct backlight_device *bd) + set_brightness(bd->props.brightness); + + if (bd->props.power == FB_BLANK_UNBLANK) +- sabi_set_command(SET_BACKLIGHT, 1); ++ sabi_set_command(sabi_config->commands.set_backlight, 1); + else +- sabi_set_command(SET_BACKLIGHT, 0); ++ sabi_set_command(sabi_config->commands.set_backlight, 0); + return 0; + } + +@@ -282,9 +406,9 @@ static int rfkill_set(void *data, bool blocked) + * blocked == true is off + */ + if (blocked) +- sabi_set_command(SET_WIRELESS_BUTTON, 0); ++ sabi_set_command(sabi_config->commands.set_wireless_button, 0); + else +- sabi_set_command(SET_WIRELESS_BUTTON, 1); ++ sabi_set_command(sabi_config->commands.set_wireless_button, 1); + + return 0; + } +@@ -317,47 +441,49 @@ static void destroy_wireless(void) + rfkill_destroy(rfk); + } + +-static ssize_t get_silent_state(struct device *dev, +- struct device_attribute *attr, char *buf) ++static ssize_t get_performance_level(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct sabi_retval sretval; + int retval; ++ int i; + + /* Read the state */ +- retval = sabi_get_command(GET_PERFORMANCE_LEVEL, &sretval); ++ retval = sabi_get_command(sabi_config->commands.get_performance_level, ++ &sretval); + if (retval) + return retval; + + /* The logic is backwards, yeah, lots of fun... */ +- if (sretval.retval[0] == 0) +- retval = 1; +- else +- retval = 0; +- return sprintf(buf, "%d\n", retval); ++ for (i = 0; sabi_config->performance_levels[i].name; ++i) { ++ if (sretval.retval[0] == sabi_config->performance_levels[i].value) ++ return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); ++ } ++ return sprintf(buf, "%s\n", "unknown"); + } + +-static ssize_t set_silent_state(struct device *dev, ++static ssize_t set_performance_level(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) + { +- char value; +- + if (count >= 1) { +- value = buf[0]; +- if ((value == '0') || (value == 'n') || (value == 'N')) { +- /* Turn speed up */ +- sabi_set_command(SET_PERFORMANCE_LEVEL, 0x01); +- } else if ((value == '1') || (value == 'y') || (value == 'Y')) { +- /* Turn speed down */ +- sabi_set_command(SET_PERFORMANCE_LEVEL, 0x00); +- } else { +- return -EINVAL; ++ int i; ++ for (i = 0; sabi_config->performance_levels[i].name; ++i) { ++ struct sabi_performance_level *level = ++ &sabi_config->performance_levels[i]; ++ if (!strncasecmp(level->name, buf, strlen(level->name))) { ++ sabi_set_command(sabi_config->commands.set_performance_level, ++ level->value); ++ break; ++ } + } ++ if (!sabi_config->performance_levels[i].name) ++ return -EINVAL; + } + return count; + } +-static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO, +- get_silent_state, set_silent_state); ++static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, ++ get_performance_level, set_performance_level); + + + static int __init dmi_check_cb(const struct dmi_system_id *id) +@@ -388,18 +514,113 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { + }, + .callback = dmi_check_cb, + }, ++ { ++ .ident = "X125", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, ++ "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "X125"), ++ DMI_MATCH(DMI_BOARD_NAME, "X125"), ++ }, ++ .callback = dmi_check_cb, ++ }, ++ { ++ .ident = "NC10", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, ++ "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), ++ DMI_MATCH(DMI_BOARD_NAME, "NC10"), ++ }, ++ .callback = dmi_check_cb, ++ }, ++ { ++ .ident = "NP-Q45", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, ++ "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), ++ DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), ++ }, ++ .callback = dmi_check_cb, ++ }, ++ { ++ .ident = "X360", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, ++ "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "X360"), ++ DMI_MATCH(DMI_BOARD_NAME, "X360"), ++ }, ++ .callback = dmi_check_cb, ++ }, ++ { ++ .ident = "R518", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, ++ "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "R518"), ++ DMI_MATCH(DMI_BOARD_NAME, "R518"), ++ }, ++ .callback = dmi_check_cb, ++ }, ++ { ++ .ident = "N150/N210/N220", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, ++ "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), ++ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), ++ }, ++ .callback = dmi_check_cb, ++ }, ++ { ++ .ident = "R530/R730", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), ++ DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), ++ }, ++ .callback = dmi_check_cb, ++ }, ++ { ++ .ident = "NF110/NF210/NF310", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), ++ DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), ++ }, ++ .callback = dmi_check_cb, ++ }, + { }, + }; + MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); + ++static int find_signature(void __iomem *memcheck, const char *testStr) ++{ ++ int i = 0; ++ int loca; ++ ++ for (loca = 0; loca < 0xffff; loca++) { ++ char temp = readb(memcheck + loca); ++ ++ if (temp == testStr[i]) { ++ if (i == strlen(testStr)-1) ++ break; ++ ++i; ++ } else { ++ i = 0; ++ } ++ } ++ return loca; ++} ++ + static int __init samsung_init(void) + { + struct backlight_properties props; + struct sabi_retval sretval; +- const char *testStr = "SECLINUX"; +- void __iomem *memcheck; + unsigned int ifaceP; +- int pStr; ++ int i; + int loca; + int retval; + +@@ -414,50 +635,44 @@ static int __init samsung_init(void) + return -EINVAL; + } + +- /* Try to find the signature "SECLINUX" in memory to find the header */ +- pStr = 0; +- memcheck = f0000_segment; +- for (loca = 0; loca < 0xffff; loca++) { +- char temp = readb(memcheck + loca); +- +- if (temp == testStr[pStr]) { +- if (pStr == strlen(testStr)-1) +- break; +- ++pStr; +- } else { +- pStr = 0; +- } ++ /* Try to find one of the signatures in memory to find the header */ ++ for (i = 0; sabi_configs[i].test_string != 0; ++i) { ++ sabi_config = &sabi_configs[i]; ++ loca = find_signature(f0000_segment, sabi_config->test_string); ++ if (loca != 0xffff) ++ break; + } ++ + if (loca == 0xffff) { + printk(KERN_ERR "This computer does not support SABI\n"); + goto error_no_signature; +- } ++ } + + /* point to the SMI port Number */ + loca += 1; +- sabi = (memcheck + loca); ++ sabi = (f0000_segment + loca); + + if (debug) { + printk(KERN_DEBUG "This computer supports SABI==%x\n", + loca + 0xf0000 - 6); + printk(KERN_DEBUG "SABI header:\n"); + printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", +- readw(sabi + SABI_HEADER_PORT)); ++ readw(sabi + sabi_config->header_offsets.port)); + printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", +- readb(sabi + SABI_HEADER_IFACEFUNC)); ++ readb(sabi + sabi_config->header_offsets.iface_func)); + printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", +- readb(sabi + SABI_HEADER_EN_MEM)); ++ readb(sabi + sabi_config->header_offsets.en_mem)); + printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", +- readb(sabi + SABI_HEADER_RE_MEM)); ++ readb(sabi + sabi_config->header_offsets.re_mem)); + printk(KERN_DEBUG " SABI data offset = 0x%04x\n", +- readw(sabi + SABI_HEADER_DATA_OFFSET)); ++ readw(sabi + sabi_config->header_offsets.data_offset)); + printk(KERN_DEBUG " SABI data segment = 0x%04x\n", +- readw(sabi + SABI_HEADER_DATA_SEGMENT)); ++ readw(sabi + sabi_config->header_offsets.data_segment)); + } + + /* Get a pointer to the SABI Interface */ +- ifaceP = (readw(sabi + SABI_HEADER_DATA_SEGMENT) & 0x0ffff) << 4; +- ifaceP += readw(sabi + SABI_HEADER_DATA_OFFSET) & 0x0ffff; ++ ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; ++ ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; + sabi_iface = ioremap(ifaceP, 16); + if (!sabi_iface) { + printk(KERN_ERR "Can't remap %x\n", ifaceP); +@@ -470,15 +685,19 @@ static int __init samsung_init(void) + test_backlight(); + test_wireless(); + +- retval = sabi_get_command(GET_BRIGHTNESS, &sretval); ++ retval = sabi_get_command(sabi_config->commands.get_brightness, ++ &sretval); + printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); + } + + /* Turn on "Linux" mode in the BIOS */ +- retval = sabi_set_command(SET_LINUX, 0x81); +- if (retval) { +- printk(KERN_ERR KBUILD_MODNAME ": Linux mode was not set!\n"); +- goto error_no_platform; ++ if (sabi_config->commands.set_linux != 0xff) { ++ retval = sabi_set_command(sabi_config->commands.set_linux, ++ 0x81); ++ if (retval) { ++ printk(KERN_ERR KBUILD_MODNAME ": Linux mode was not set!\n"); ++ goto error_no_platform; ++ } + } + + /* knock up a platform device to hang stuff off of */ +@@ -503,7 +722,7 @@ static int __init samsung_init(void) + if (retval) + goto error_no_rfk; + +- retval = device_create_file(&sdev->dev, &dev_attr_silent); ++ retval = device_create_file(&sdev->dev, &dev_attr_performance_level); + if (retval) + goto error_file_create; + +@@ -530,9 +749,10 @@ error_no_signature: + static void __exit samsung_exit(void) + { + /* Turn off "Linux" mode in the BIOS */ +- sabi_set_command(SET_LINUX, 0x80); ++ if (sabi_config->commands.set_linux != 0xff) ++ sabi_set_command(sabi_config->commands.set_linux, 0x80); + +- device_remove_file(&sdev->dev, &dev_attr_silent); ++ device_remove_file(&sdev->dev, &dev_attr_performance_level); + backlight_device_unregister(backlight_device); + destroy_wireless(); + iounmap(sabi_iface); diff --git a/patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch b/patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch index 29f6f58..e356768 100644 --- a/patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch +++ b/patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch @@ -29,11 +29,9 @@ Signed-off-by: Brandon Philips drivers/net/tg3.h | 9 +++++ 2 files changed, 92 insertions(+) -Index: linux-2.6.34-master/drivers/net/tg3.c -=================================================================== ---- linux-2.6.34-master.orig/drivers/net/tg3.c -+++ linux-2.6.34-master/drivers/net/tg3.c -@@ -1956,6 +1956,58 @@ static int tg3_phy_reset(struct tg3 *tp) +--- a/drivers/net/tg3.c ++++ b/drivers/net/tg3.c +@@ -1998,6 +1998,58 @@ static int tg3_phy_reset(struct tg3 *tp) tg3_phy_toggle_apd(tp, false); out: @@ -89,14 +87,14 @@ Index: linux-2.6.34-master/drivers/net/tg3.c + MII_TG3_AUXCTL_ACTL_TX_6DB; + tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); + } - if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { + if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); -@@ -2018,6 +2070,22 @@ out: + tg3_phydsp_write(tp, 0x201f, 0x2aaa); +@@ -2054,6 +2106,22 @@ out: /* adjust output voltage */ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); } -+ else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { ++ else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 brcmtest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &brcmtest) && + !tg3_writephy(tp, MII_TG3_FET_TEST, @@ -115,9 +113,9 @@ Index: linux-2.6.34-master/drivers/net/tg3.c tg3_phy_toggle_automdix(tp, 1); tg3_phy_set_wirespeed(tp); -@@ -3260,6 +3328,15 @@ relink: - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); +@@ -3288,6 +3356,15 @@ relink: + + tg3_phy_eee_adjust(tp, current_link_up); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + if (tp->link_config.active_speed == SPEED_10) @@ -131,7 +129,7 @@ Index: linux-2.6.34-master/drivers/net/tg3.c if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { /* Polled via timer. */ tw32_f(MAC_EVENT, 0); -@@ -13505,9 +13582,11 @@ static int __devinit tg3_get_invariants( +@@ -13411,9 +13488,11 @@ static int __devinit tg3_get_invariants( GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) tp->coalesce_mode |= HOSTCC_MODE_32BYTE; @@ -143,10 +141,10 @@ Index: linux-2.6.34-master/drivers/net/tg3.c err = tg3_mdio_init(tp); if (err) -@@ -14293,6 +14372,10 @@ static char * __devinit tg3_phy_string(s - case TG3_PHY_ID_BCM5718C: return "5718C"; +@@ -14203,6 +14282,10 @@ static char * __devinit tg3_phy_string(s case TG3_PHY_ID_BCM5718S: return "5718S"; case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM50610: return "50610"; + case TG3_PHY_ID_BCM50610M: return "50610M"; + case TG3_PHY_ID_BCMAC131: return "AC131"; @@ -154,11 +152,9 @@ Index: linux-2.6.34-master/drivers/net/tg3.c case TG3_PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; -Index: linux-2.6.34-master/drivers/net/tg3.h -=================================================================== ---- linux-2.6.34-master.orig/drivers/net/tg3.h -+++ linux-2.6.34-master/drivers/net/tg3.h -@@ -2086,6 +2086,7 @@ +--- a/drivers/net/tg3.h ++++ b/drivers/net/tg3.h +@@ -2072,6 +2072,7 @@ #define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 #define MII_TG3_DSP_EXP8_AEDW 0x0200 #define MII_TG3_DSP_EXP75 0x0f75 @@ -166,7 +162,7 @@ Index: linux-2.6.34-master/drivers/net/tg3.h #define MII_TG3_DSP_EXP96 0x0f96 #define MII_TG3_DSP_EXP97 0x0f97 -@@ -2141,6 +2142,8 @@ +@@ -2127,6 +2128,8 @@ #define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010 #define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 @@ -175,7 +171,7 @@ Index: linux-2.6.34-master/drivers/net/tg3.h #define MII_TG3_TEST1 0x1e #define MII_TG3_TEST1_TRIM_EN 0x0010 #define MII_TG3_TEST1_CRC_EN 0x8000 -@@ -2158,6 +2161,8 @@ +@@ -2144,6 +2147,8 @@ #define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 #define MII_TG3_FET_SHDW_AUXMODE4 0x1a @@ -184,8 +180,8 @@ Index: linux-2.6.34-master/drivers/net/tg3.h #define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008 #define MII_TG3_FET_SHDW_AUXSTAT2 0x1b -@@ -2943,6 +2948,10 @@ struct tg3 { - #define TG3_PHY_ID_BCM57765 0x5c0d8a40 +@@ -2922,6 +2927,10 @@ struct tg3 { + #define TG3_PHY_ID_BCM5719C 0x5c0d8a20 #define TG3_PHY_ID_BCM5906 0xdc00ac40 #define TG3_PHY_ID_BCM8002 0x60010140 +#define TG3_PHY_ID_BCM50610 0xbc050d60 diff --git a/patches.drivers/tg3-entropy-source.patch b/patches.drivers/tg3-entropy-source.patch index b7e6926..9295f2b 100644 --- a/patches.drivers/tg3-entropy-source.patch +++ b/patches.drivers/tg3-entropy-source.patch @@ -18,8 +18,8 @@ Signed-off-by: Brandon Philips - #include #include - #include -@@ -66,6 +65,10 @@ + #include +@@ -67,6 +66,10 @@ #include "tg3.h" @@ -28,9 +28,9 @@ Signed-off-by: Brandon Philips +MODULE_PARM_DESC(entropy, "Allow tg3 to populate the /dev/random entropy pool"); + #define DRV_MODULE_NAME "tg3" - #define DRV_MODULE_VERSION "3.108" - #define DRV_MODULE_RELDATE "February 17, 2010" -@@ -8494,10 +8497,13 @@ restart_timer: + #define TG3_MAJ_NUM 3 + #define TG3_MIN_NUM 116 +@@ -8590,10 +8593,13 @@ restart_timer: static int tg3_request_irq(struct tg3 *tp, int irq_num) { irq_handler_t fn; @@ -45,7 +45,7 @@ Signed-off-by: Brandon Philips if (tp->irq_cnt == 1) name = tp->dev->name; else { -@@ -8510,12 +8516,11 @@ static int tg3_request_irq(struct tg3 *t +@@ -8606,12 +8612,11 @@ static int tg3_request_irq(struct tg3 *t fn = tg3_msi; if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) fn = tg3_msi_1shot; diff --git a/patches.fixes/acpi-cpufreq_fix_cpu_any_notification.patch b/patches.fixes/acpi-cpufreq_fix_cpu_any_notification.patch deleted file mode 100644 index 4b04f75..0000000 --- a/patches.fixes/acpi-cpufreq_fix_cpu_any_notification.patch +++ /dev/null @@ -1,40 +0,0 @@ -From: Thomas Renninger -Subject: acpi-cpufreq: Fix CPU_ANY CPUFREQ_{PRE,POST}CHANGE notification -Patch-Mainline: submitted - please revert after 2.6.35 -References: none - -Signed-off-by: Thomas Renninger -CC: venki@google.com -CC: davej@redhat.com -CC: arjan@infradead.org -CC: davej@redhat.com -CC: linux-kernel@vger.kernel.org ---- - arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 4 ++-- - 1 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c -index 4591680..c6de3a9 100644 ---- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c -+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c -@@ -391,7 +391,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, - - freqs.old = perf->states[perf->state].core_frequency * 1000; - freqs.new = data->freq_table[next_state].frequency; -- for_each_cpu(i, cmd.mask) { -+ for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } -@@ -407,7 +407,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, - } - } - -- for_each_cpu(i, cmd.mask) { -+ for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } --- -1.6.3 - diff --git a/patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch b/patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch new file mode 100644 index 0000000..8c6b04d --- /dev/null +++ b/patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch @@ -0,0 +1,78 @@ +From: Vasiliy Kulikov +Subject: acpi: ec_sys: access user space with get_user()/put_user() +Patch-Mainline: hopefully still 2.6.36 +References: none + +User space pointer may not be dereferenced. Use get_user()/put_user() +instead and check their return codes. + +Signed-off-by: Vasiliy Kulikov +Signed-off-by: Thomas Renninger +--- + Compile tested. + + drivers/acpi/ec_sys.c | 18 ++++++++++++++---- + 1 files changed, 14 insertions(+), 4 deletions(-) + +diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c +index 0e869b3..cc007d8 100644 +--- a/drivers/acpi/ec_sys.c ++++ b/drivers/acpi/ec_sys.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include "internal.h" + + MODULE_AUTHOR("Thomas Renninger "); +@@ -43,7 +44,6 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, + * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private; + */ + unsigned int size = EC_SPACE_SIZE; +- u8 *data = (u8 *) buf; + loff_t init_off = *off; + int err = 0; + +@@ -56,9 +56,15 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, + size = count; + + while (size) { +- err = ec_read(*off, &data[*off - init_off]); ++ u8 byte_read; ++ err = ec_read(*off, &byte_read); + if (err) + return err; ++ if (put_user(byte_read, buf + *off - init_off)) { ++ if (*off - init_off) ++ return *off - init_off; /* partial read */ ++ return -EFAULT; ++ } + *off += 1; + size--; + } +@@ -74,7 +80,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, + + unsigned int size = count; + loff_t init_off = *off; +- u8 *data = (u8 *) buf; + int err = 0; + + if (*off >= EC_SPACE_SIZE) +@@ -85,7 +90,12 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, + } + + while (size) { +- u8 byte_write = data[*off - init_off]; ++ u8 byte_write; ++ if (get_user(byte_write, buf + *off - init_off)) { ++ if (*off - init_off) ++ return *off - init_off; /* partial write */ ++ return -EFAULT; ++ } + err = ec_write(*off, byte_write); + if (err) + return err; +-- +1.7.0.4 + diff --git a/patches.fixes/acpi_processor_check_maxcpus.patch b/patches.fixes/acpi_processor_check_maxcpus.patch deleted file mode 100644 index cdc479f..0000000 --- a/patches.fixes/acpi_processor_check_maxcpus.patch +++ /dev/null @@ -1,42 +0,0 @@ -From: Thomas Renninger -Subject: Do not try to set up acpi processor stuff on cores exceeding maxcpus= -References: bnc#601520 -Patch-Mainline: Not yet - -Signed-off-by: Thomas Renninger - ---- - drivers/acpi/processor_driver.c | 5 +++++ - init/main.c | 3 ++- - 2 files changed, 7 insertions(+), 1 deletion(-) - -Index: linux-2.6.34-master/init/main.c -=================================================================== ---- linux-2.6.34-master.orig/init/main.c -+++ linux-2.6.34-master/init/main.c -@@ -124,7 +124,8 @@ static char *ramdisk_execute_command; - - #ifdef CONFIG_SMP - /* Setup configured maximum number of CPUs to activate */ --unsigned int __initdata setup_max_cpus = NR_CPUS; -+unsigned int setup_max_cpus = NR_CPUS; -+EXPORT_SYMBOL(setup_max_cpus); - - /* - * Setup routine for controlling SMP activation -Index: linux-2.6.34-master/drivers/acpi/processor_driver.c -=================================================================== ---- linux-2.6.34-master.orig/drivers/acpi/processor_driver.c -+++ linux-2.6.34-master/drivers/acpi/processor_driver.c -@@ -581,6 +581,11 @@ static int __cpuinit acpi_processor_add( - return 0; - } - -+#ifdef CONFIG_SMP -+ if (pr->id >= setup_max_cpus && pr->id != 0) -+ return 0; -+#endif -+ - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); - - /* diff --git a/patches.fixes/aggressive-zone-reclaim.patch b/patches.fixes/aggressive-zone-reclaim.patch index 1a210f0..c0d1c8c 100644 --- a/patches.fixes/aggressive-zone-reclaim.patch +++ b/patches.fixes/aggressive-zone-reclaim.patch @@ -23,7 +23,7 @@ Signed-off-by: Petr Tesarik --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -2501,7 +2501,7 @@ int zone_reclaim_mode __read_mostly; +@@ -2515,7 +2515,7 @@ int zone_reclaim_mode __read_mostly; * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. */ @@ -32,7 +32,7 @@ Signed-off-by: Petr Tesarik /* * Percentage of pages in a zone that must be unmapped for zone_reclaim to -@@ -2607,6 +2607,8 @@ static int __zone_reclaim(struct zone *z +@@ -2620,6 +2620,8 @@ static int __zone_reclaim(struct zone *z slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (slab_reclaimable > zone->min_slab_pages) { @@ -41,7 +41,7 @@ Signed-off-by: Petr Tesarik /* * shrink_slab() does not currently allow us to determine how * many pages were freed in this zone. So we take the current -@@ -2617,10 +2619,7 @@ static int __zone_reclaim(struct zone *z +@@ -2630,10 +2632,7 @@ static int __zone_reclaim(struct zone *z * Note that shrink_slab will free memory on all zones and may * take a long time. */ @@ -53,7 +53,7 @@ Signed-off-by: Petr Tesarik /* * Update nr_reclaimed by the number of slab pages we -@@ -2674,11 +2673,7 @@ int zone_reclaim(struct zone *zone, gfp_ +@@ -2687,11 +2686,7 @@ int zone_reclaim(struct zone *zone, gfp_ if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return ZONE_RECLAIM_NOSCAN; diff --git a/patches.fixes/bonding-Incorrect-TX-queue-offset.patch b/patches.fixes/bonding-Incorrect-TX-queue-offset.patch new file mode 100644 index 0000000..b0c0980 --- /dev/null +++ b/patches.fixes/bonding-Incorrect-TX-queue-offset.patch @@ -0,0 +1,61 @@ +From fd0e435b0fe85622f167b84432552885a4856ac8 Mon Sep 17 00:00:00 2001 +From: Phil Oester +Date: Mon, 14 Mar 2011 06:22:04 +0000 +Subject: [PATCH] bonding: Incorrect TX queue offset +Git-commit: fd0e435b0fe85622f167b84432552885a4856ac8 +Patch-mainline: v2.6.39-rc1~468^2~15 +Reference: bnc#687116, CVE-2011-1581 + +When packets come in from a device with >= 16 receive queues +headed out a bonding interface, syslog gets filled with this: + + kernel: bond0 selects TX queue 16, but real number of TX queues is 16 + +because queue_mapping is offset by 1. Adjust return value +to account for the offset. + +This is a revision of my earlier patch (which did not use the +skb_rx_queue_* helpers - thanks to Ben for the suggestion). +Andy submitted a similar patch which emits a pr_warning on +invalid queue selection, but I believe the log spew is +not useful. We can revisit that question in the future, +but in the interim I believe fixing the core problem is +worthwhile. + +Signed-off-by: Phil Oester +Signed-off-by: Andy Gospodarek +Signed-off-by: David S. Miller +Signed-off-by: Brandon Philips + +--- + drivers/net/bonding/bond_main.c | 11 +++++++++-- + 1 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 3ad4f50..a93d941 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -4341,11 +4341,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) + { + /* + * This helper function exists to help dev_pick_tx get the correct +- * destination queue. Using a helper function skips the a call to ++ * destination queue. Using a helper function skips a call to + * skb_tx_hash and will put the skbs in the queue we expect on their + * way down to the bonding driver. + */ +- return skb->queue_mapping; ++ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; ++ ++ if (unlikely(txq >= dev->real_num_tx_queues)) { ++ do ++ txq -= dev->real_num_tx_queues; ++ while (txq >= dev->real_num_tx_queues); ++ } ++ return txq; + } + + static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) +-- +1.7.3.4 + diff --git a/patches.fixes/bridge-module-get-put.patch b/patches.fixes/bridge-module-get-put.patch index 457444b..53d42a8 100644 --- a/patches.fixes/bridge-module-get-put.patch +++ b/patches.fixes/bridge-module-get-put.patch @@ -13,7 +13,7 @@ bridges. --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c -@@ -279,6 +279,11 @@ int br_add_bridge(struct net *net, const +@@ -291,6 +291,11 @@ int br_add_bridge(struct net *net, const if (!dev) return -ENOMEM; @@ -25,7 +25,7 @@ bridges. rtnl_lock(); if (strchr(dev->name, '%')) { ret = dev_alloc_name(dev, dev->name); -@@ -297,6 +302,8 @@ int br_add_bridge(struct net *net, const +@@ -309,6 +314,8 @@ int br_add_bridge(struct net *net, const unregister_netdevice(dev); out: rtnl_unlock(); @@ -34,7 +34,7 @@ bridges. return ret; out_free: -@@ -328,6 +335,8 @@ int br_del_bridge(struct net *net, const +@@ -340,6 +347,8 @@ int br_del_bridge(struct net *net, const del_br(netdev_priv(dev), NULL); rtnl_unlock(); diff --git a/patches.fixes/cdc-phonet-handle-empty-phonet-header.patch b/patches.fixes/cdc-phonet-handle-empty-phonet-header.patch new file mode 100644 index 0000000..cebf493 --- /dev/null +++ b/patches.fixes/cdc-phonet-handle-empty-phonet-header.patch @@ -0,0 +1,74 @@ +From 468c3f924f043cad7a04f4f4d5224a2c9bc886c1 Mon Sep 17 00:00:00 2001 +From: Jiri Slaby +Date: Sun, 13 Mar 2011 06:54:31 +0000 +Subject: NET: cdc-phonet, handle empty phonet header +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Git-commit: 468c3f924f043cad7a04f4f4d5224a2c9bc886c1 +Patch-mainline: yes +References: bnc#673992 + +Currently, for N 5800 XM I get: +cdc_phonet: probe of 1-6:1.10 failed with error -22 + +It's because phonet_header is empty. Extra altsetting looks like +there: +E 05 24 00 01 10 03 24 ab 05 24 06 0a 0b 04 24 fd .$....$..$....$. +E 00 . + +I don't see the header used anywhere so just check if the phonet +descriptor is there, not the structure itself. + +Signed-off-by: Jiri Slaby +Cc: Rémi Denis-Courmont +Cc: David S. Miller +Acked-by: Rémi Denis-Courmont +Signed-off-by: David S. Miller +--- + drivers/net/usb/cdc-phonet.c | 9 +++------ + 1 files changed, 3 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c +index 4cf4e36..f967913 100644 +--- a/drivers/net/usb/cdc-phonet.c ++++ b/drivers/net/usb/cdc-phonet.c +@@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) + { + static const char ifname[] = "usbpn%d"; + const struct usb_cdc_union_desc *union_header = NULL; +- const struct usb_cdc_header_desc *phonet_header = NULL; + const struct usb_host_interface *data_desc; + struct usb_interface *data_intf; + struct usb_device *usbdev = interface_to_usbdev(intf); + struct net_device *dev; + struct usbpn_dev *pnd; + u8 *data; ++ int phonet = 0; + int len, err; + + data = intf->altsetting->extra; +@@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) + (struct usb_cdc_union_desc *)data; + break; + case 0xAB: +- if (phonet_header || dlen < 5) +- break; +- phonet_header = +- (struct usb_cdc_header_desc *)data; ++ phonet = 1; + break; + } + } +@@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) + len -= dlen; + } + +- if (!union_header || !phonet_header) ++ if (!union_header || !phonet) + return -EINVAL; + + data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); +-- +1.7.4.1 + diff --git a/patches.fixes/cifs-fix-oops-due-to-null-nameidata b/patches.fixes/cifs-fix-oops-due-to-null-nameidata deleted file mode 100644 index 0dab951..0000000 --- a/patches.fixes/cifs-fix-oops-due-to-null-nameidata +++ /dev/null @@ -1,136 +0,0 @@ -From: Steve French -Subject: [CIFS] Allow null nd (as nfs server uses) on create -References: bnc#593940 -Patch-mainline: queued (in cifs devel git) - -commit fa588e0c57048b3d4bfcd772d80dc0615f83fd35 in cifs-2.6.git - - While creating a file on a server which supports unix extensions - such as Samba, if a file is being created which does not supply - nameidata (i.e. nd is null), cifs client can oops when calling - cifs_posix_open. - -Signed-off-by: Shirish Pargaonkar -Signed-off-by: Steve French -Acked-by: Suresh Jayaraman ---- - fs/cifs/cifsproto.h | 6 ++++-- - fs/cifs/dir.c | 20 ++++++++++++-------- - fs/cifs/file.c | 11 +++++++---- - 3 files changed, 23 insertions(+), 14 deletions(-) - -Index: linux-2.6.33-master/fs/cifs/cifsproto.h -=================================================================== ---- linux-2.6.33-master.orig/fs/cifs/cifsproto.h -+++ linux-2.6.33-master/fs/cifs/cifsproto.h -@@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fil - __u16 fileHandle, struct file *file, - struct vfsmount *mnt, unsigned int oflags); - extern int cifs_posix_open(char *full_path, struct inode **pinode, -- struct vfsmount *mnt, int mode, int oflags, -- __u32 *poplock, __u16 *pnetfid, int xid); -+ struct vfsmount *mnt, -+ struct super_block *sb, -+ int mode, int oflags, -+ __u32 *poplock, __u16 *pnetfid, int xid); - extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, - FILE_UNIX_BASIC_INFO *info, - struct cifs_sb_info *cifs_sb); -Index: linux-2.6.33-master/fs/cifs/dir.c -=================================================================== ---- linux-2.6.33-master.orig/fs/cifs/dir.c -+++ linux-2.6.33-master/fs/cifs/dir.c -@@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode - } - - int cifs_posix_open(char *full_path, struct inode **pinode, -- struct vfsmount *mnt, int mode, int oflags, -- __u32 *poplock, __u16 *pnetfid, int xid) -+ struct vfsmount *mnt, struct super_block *sb, -+ int mode, int oflags, -+ __u32 *poplock, __u16 *pnetfid, int xid) - { - int rc; - FILE_UNIX_BASIC_INFO *presp_data; - __u32 posix_flags = 0; -- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); -+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - struct cifs_fattr fattr; - - cFYI(1, ("posix open %s", full_path)); -@@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, str - - /* get new inode and set it up */ - if (*pinode == NULL) { -- *pinode = cifs_iget(mnt->mnt_sb, &fattr); -+ *pinode = cifs_iget(sb, &fattr); - if (!*pinode) { - rc = -ENOMEM; - goto posix_open_ret; -@@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, str - cifs_fattr_to_inode(*pinode, &fattr); - } - -- cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); -+ if (mnt) -+ cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); - - posix_open_ret: - kfree(presp_data); -@@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct - if (nd && (nd->flags & LOOKUP_OPEN)) - oflags = nd->intent.open.flags; - else -- oflags = FMODE_READ; -+ oflags = FMODE_READ | SMB_O_CREAT; - - if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && - (CIFS_UNIX_POSIX_PATH_OPS_CAP & - le64_to_cpu(tcon->fsUnixInfo.Capability))) { -- rc = cifs_posix_open(full_path, &newinode, nd->path.mnt, -- mode, oflags, &oplock, &fileHandle, xid); -+ rc = cifs_posix_open(full_path, &newinode, -+ nd ? nd->path.mnt : NULL, -+ inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); - /* EIO could indicate that (posix open) operation is not - supported, despite what server claimed in capability - negotation. EREMOTE indicates DFS junction, which is not -@@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_ino - (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && - (nd->intent.open.flags & O_CREAT)) { - rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, -+ parent_dir_inode->i_sb, - nd->intent.open.create_mode, - nd->intent.open.flags, &oplock, - &fileHandle, xid); -Index: linux-2.6.33-master/fs/cifs/file.c -=================================================================== ---- linux-2.6.33-master.orig/fs/cifs/file.c -+++ linux-2.6.33-master/fs/cifs/file.c -@@ -298,10 +298,12 @@ int cifs_open(struct inode *inode, struc - (CIFS_UNIX_POSIX_PATH_OPS_CAP & - le64_to_cpu(tcon->fsUnixInfo.Capability))) { - int oflags = (int) cifs_posix_convert_flags(file->f_flags); -+ oflags |= SMB_O_CREAT; - /* can not refresh inode info since size could be stale */ - rc = cifs_posix_open(full_path, &inode, file->f_path.mnt, -- cifs_sb->mnt_file_mode /* ignored */, -- oflags, &oplock, &netfid, xid); -+ inode->i_sb, -+ cifs_sb->mnt_file_mode /* ignored */, -+ oflags, &oplock, &netfid, xid); - if (rc == 0) { - cFYI(1, ("posix open succeeded")); - /* no need for special case handling of setting mode -@@ -513,8 +515,9 @@ reopen_error_exit: - int oflags = (int) cifs_posix_convert_flags(file->f_flags); - /* can not refresh inode info since size could be stale */ - rc = cifs_posix_open(full_path, NULL, file->f_path.mnt, -- cifs_sb->mnt_file_mode /* ignored */, -- oflags, &oplock, &netfid, xid); -+ inode->i_sb, -+ cifs_sb->mnt_file_mode /* ignored */, -+ oflags, &oplock, &netfid, xid); - if (rc == 0) { - cFYI(1, ("posix reopen succeeded")); - goto reopen_success; diff --git a/patches.fixes/compat-make-compat_alloc_user_space-incorporate-the-access_ok b/patches.fixes/compat-make-compat_alloc_user_space-incorporate-the-access_ok deleted file mode 100644 index 5b311ac..0000000 --- a/patches.fixes/compat-make-compat_alloc_user_space-incorporate-the-access_ok +++ /dev/null @@ -1,175 +0,0 @@ -From: H. Peter Anvin -Date: Tue, 7 Sep 2010 23:16:18 +0000 (-0700) -Subject: compat: Make compat_alloc_user_space() incorporate the access_ok() -Git-commit: c41d68a513c71e35a14f66d71782d27a79a81ea6 -References: CVE-2010-3081 bnc#639709 -Patch-mainline: 2.6.36 -Introduced-by: Prior to 2.6.5 - -compat: Make compat_alloc_user_space() incorporate the access_ok() - -compat_alloc_user_space() expects the caller to independently call -access_ok() to verify the returned area. A missing call could -introduce problems on some architectures. - -This patch incorporates the access_ok() check into -compat_alloc_user_space() and also adds a sanity check on the length. -The existing compat_alloc_user_space() implementations are renamed -arch_compat_alloc_user_space() and are used as part of the -implementation of the new global function. - -This patch assumes NULL will cause __get_user()/__put_user() to either -fail or access userspace on all architectures. This should be -followed by checking the return value of compat_access_user_space() -for NULL in the callers, at which time the access_ok() in the callers -can also be removed. - -Reported-by: Ben Hawkes -Signed-off-by: H. Peter Anvin -Acked-by: Benjamin Herrenschmidt -Acked-by: Chris Metcalf -Acked-by: David S. Miller -Acked-by: Ingo Molnar -Acked-by: Thomas Gleixner -Acked-by: Tony Luck -Cc: Andrew Morton -Cc: Arnd Bergmann -Cc: Fenghua Yu -Cc: H. Peter Anvin -Cc: Heiko Carstens -Cc: Helge Deller -Cc: James Bottomley -Cc: Kyle McMartin -Cc: Martin Schwidefsky -Cc: Paul Mackerras -Cc: Ralf Baechle -Cc: -Acked-by: Jeff Mahoney ---- - - arch/ia64/include/asm/compat.h | 2 +- - arch/mips/include/asm/compat.h | 2 +- - arch/parisc/include/asm/compat.h | 2 +- - arch/powerpc/include/asm/compat.h | 2 +- - arch/s390/include/asm/compat.h | 2 +- - arch/sparc/include/asm/compat.h | 2 +- - arch/x86/include/asm/compat.h | 2 +- - include/linux/compat.h | 3 +++ - kernel/compat.c | 21 +++++++++++++++++++++ - 9 files changed, 31 insertions(+), 7 deletions(-) - ---- a/arch/ia64/include/asm/compat.h -+++ b/arch/ia64/include/asm/compat.h -@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr) - } - - static __inline__ void __user * --compat_alloc_user_space (long len) -+arch_compat_alloc_user_space (long len) - { - struct pt_regs *regs = task_pt_regs(current); - return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); ---- a/arch/mips/include/asm/compat.h -+++ b/arch/mips/include/asm/compat.h -@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compa - return (u32)(unsigned long)uptr; - } - --static inline void __user *compat_alloc_user_space(long len) -+static inline void __user *arch_compat_alloc_user_space(long len) - { - struct pt_regs *regs = (struct pt_regs *) - ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; ---- a/arch/parisc/include/asm/compat.h -+++ b/arch/parisc/include/asm/compat.h -@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compa - return (u32)(unsigned long)uptr; - } - --static __inline__ void __user *compat_alloc_user_space(long len) -+static __inline__ void __user *arch_compat_alloc_user_space(long len) - { - struct pt_regs *regs = ¤t->thread.regs; - return (void __user *)regs->gr[30]; ---- a/arch/powerpc/include/asm/compat.h -+++ b/arch/powerpc/include/asm/compat.h -@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compa - return (u32)(unsigned long)uptr; - } - --static inline void __user *compat_alloc_user_space(long len) -+static inline void __user *arch_compat_alloc_user_space(long len) - { - struct pt_regs *regs = current->thread.regs; - unsigned long usp = regs->gpr[1]; ---- a/arch/s390/include/asm/compat.h -+++ b/arch/s390/include/asm/compat.h -@@ -181,7 +181,7 @@ static inline int is_compat_task(void) - - #endif - --static inline void __user *compat_alloc_user_space(long len) -+static inline void __user *arch_compat_alloc_user_space(long len) - { - unsigned long stack; - ---- a/arch/sparc/include/asm/compat.h -+++ b/arch/sparc/include/asm/compat.h -@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compa - return (u32)(unsigned long)uptr; - } - --static inline void __user *compat_alloc_user_space(long len) -+static inline void __user *arch_compat_alloc_user_space(long len) - { - struct pt_regs *regs = current_thread_info()->kregs; - unsigned long usp = regs->u_regs[UREG_I6]; ---- a/arch/x86/include/asm/compat.h -+++ b/arch/x86/include/asm/compat.h -@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compa - return (u32)(unsigned long)uptr; - } - --static inline void __user *compat_alloc_user_space(long len) -+static inline void __user *arch_compat_alloc_user_space(long len) - { - struct pt_regs *regs = task_pt_regs(current); - return (void __user *)regs->sp - len; ---- a/include/linux/compat.h -+++ b/include/linux/compat.h -@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvec - const struct compat_iovec __user *uvector, unsigned long nr_segs, - unsigned long fast_segs, struct iovec *fast_pointer, - struct iovec **ret_pointer); -+ -+extern void __user *compat_alloc_user_space(unsigned long len); -+ - #endif /* CONFIG_COMPAT */ - #endif /* _LINUX_COMPAT_H */ ---- a/kernel/compat.c -+++ b/kernel/compat.c -@@ -1137,3 +1137,24 @@ compat_sys_sysinfo(struct compat_sysinfo - - return 0; - } -+ -+/* -+ * Allocate user-space memory for the duration of a single system call, -+ * in order to marshall parameters inside a compat thunk. -+ */ -+void __user *compat_alloc_user_space(unsigned long len) -+{ -+ void __user *ptr; -+ -+ /* If len would occupy more than half of the entire compat space... */ -+ if (unlikely(len > (((compat_uptr_t)~0) >> 1))) -+ return NULL; -+ -+ ptr = arch_compat_alloc_user_space(len); -+ -+ if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) -+ return NULL; -+ -+ return ptr; -+} -+EXPORT_SYMBOL_GPL(compat_alloc_user_space); diff --git a/patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch b/patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch index a100c96..d30e884 100644 --- a/patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch +++ b/patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch @@ -33,7 +33,7 @@ processed on one core resulting in higher utilization of the one core. /* * The polling frequency of this governor depends on the capability of -@@ -679,6 +680,29 @@ static int cpufreq_governor_dbs(struct c +@@ -736,6 +737,29 @@ static int cpufreq_governor_dbs(struct c dbs_tuners_ins.sampling_rate = max(min_sampling_rate, latency * LATENCY_MULTIPLIER); @@ -60,6 +60,6 @@ processed on one core resulting in higher utilization of the one core. + if (num_online_cpus() > 1) + dbs_tuners_ins.up_threshold = + DEF_FREQUENCY_UP_THRESHOLD / 2; + dbs_tuners_ins.io_is_busy = should_io_be_busy(); } mutex_unlock(&dbs_mutex); - diff --git a/patches.fixes/dm-release-map_lock-before-set_disk_ro b/patches.fixes/dm-release-map_lock-before-set_disk_ro index 9063acc..9dca021 100644 --- a/patches.fixes/dm-release-map_lock-before-set_disk_ro +++ b/patches.fixes/dm-release-map_lock-before-set_disk_ro @@ -16,11 +16,9 @@ with dm_get_table() and dm_table_put() drivers/md/dm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) -Index: linux-2.6.33-master/drivers/md/dm.c -=================================================================== ---- linux-2.6.33-master.orig/drivers/md/dm.c -+++ linux-2.6.33-master/drivers/md/dm.c -@@ -2102,12 +2102,15 @@ static struct dm_table *__bind(struct ma +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2174,12 +2174,15 @@ static struct dm_table *__bind(struct ma old_map = md->map; md->map = t; dm_table_set_restrictions(t, q, limits); diff --git a/patches.fixes/dm-table-switch-to-readonly b/patches.fixes/dm-table-switch-to-readonly index 96fe175..4aa14bd 100644 --- a/patches.fixes/dm-table-switch-to-readonly +++ b/patches.fixes/dm-table-switch-to-readonly @@ -17,7 +17,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c -@@ -462,11 +462,19 @@ static int __table_get_device(struct dm_ +@@ -465,11 +465,19 @@ static int __table_get_device(struct dm_ dd->dm_dev.mode = mode; dd->dm_dev.bdev = NULL; @@ -40,7 +40,7 @@ Signed-off-by: Hannes Reinecke atomic_set(&dd->count, 0); --- a/drivers/md/dm.c +++ b/drivers/md/dm.c -@@ -337,16 +337,25 @@ int dm_deleting_md(struct mapped_device +@@ -343,16 +343,25 @@ int dm_deleting_md(struct mapped_device static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; @@ -67,7 +67,7 @@ Signed-off-by: Hannes Reinecke goto out; } -@@ -356,7 +365,7 @@ static int dm_blk_open(struct block_devi +@@ -364,7 +373,7 @@ out: out: spin_unlock(&_minor_lock); @@ -76,7 +76,7 @@ Signed-off-by: Hannes Reinecke } static int dm_blk_close(struct gendisk *disk, fmode_t mode) -@@ -2093,6 +2102,11 @@ static struct dm_table *__bind(struct ma +@@ -2165,6 +2174,11 @@ static struct dm_table *__bind(struct ma old_map = md->map; md->map = t; dm_table_set_restrictions(t, q, limits); diff --git a/patches.fixes/dmar-fix-oops-with-no-dmar-table b/patches.fixes/dmar-fix-oops-with-no-dmar-table deleted file mode 100644 index 1cc623c..0000000 --- a/patches.fixes/dmar-fix-oops-with-no-dmar-table +++ /dev/null @@ -1,25 +0,0 @@ -From: Jeff Mahoney -Subject: dmar: Fix oops with no DMAR table -References: bnc#548108 -Patch-mainline: submitted 17 Mar 2010 - - On systems without a DMAR table and with DMAR enabled, we will oops - in dmar_ir_supported. This patch makes sure we actually have a DMAR - table before checking it. - - -Signed-off-by: Jeff Mahoney ---- - - drivers/pci/dmar.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/pci/dmar.c -+++ b/drivers/pci/dmar.c -@@ -1460,5 +1460,5 @@ int __init dmar_ir_support(void) - { - struct acpi_table_dmar *dmar; - dmar = (struct acpi_table_dmar *)dmar_tbl; -- return dmar->flags & 0x1; -+ return dmar && dmar->flags & 0x1; - } diff --git a/patches.fixes/execve-improve-interactivity-with-large-arguments b/patches.fixes/execve-improve-interactivity-with-large-arguments deleted file mode 100644 index b44754c..0000000 --- a/patches.fixes/execve-improve-interactivity-with-large-arguments +++ /dev/null @@ -1,42 +0,0 @@ -From: Roland McGrath -Date: Wed, 8 Sep 2010 02:36:28 +0000 (-0700) -Subject: execve: improve interactivity with large arguments -Git-commit: 7993bc1f4663c0db67bb8f0d98e6678145b387cd -Patch-mainline: 2.6.36-rc4 -References: bnc#635425 -Introduced-by: 2.6.23 - -execve: improve interactivity with large arguments - -This adds a preemption point during the copying of the argument and -environment strings for execve, in copy_strings(). There is already -a preemption point in the count() loop, so this doesn't add any new -points in the abstract sense. - -When the total argument+environment strings are very large, the time -spent copying them can be much more than a normal user time slice. -So this change improves the interactivity of the rest of the system -when one process is doing an execve with very large arguments. - -Signed-off-by: Roland McGrath -Reviewed-by: KOSAKI Motohiro -Signed-off-by: Linus Torvalds -Acked-by: Jeff Mahoney ---- - - fs/exec.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/fs/exec.c b/fs/exec.c -index 1b63237..6f2d777 100644 ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -419,6 +419,8 @@ static int copy_strings(int argc, const char __user *const __user *argv, - while (len > 0) { - int offset, bytes_to_copy; - -+ cond_resched(); -+ - offset = pos % PAGE_SIZE; - if (offset == 0) - offset = PAGE_SIZE; diff --git a/patches.fixes/execve-make-responsive-to-sigkill-with-large-arguments b/patches.fixes/execve-make-responsive-to-sigkill-with-large-arguments deleted file mode 100644 index ee00a70..0000000 --- a/patches.fixes/execve-make-responsive-to-sigkill-with-large-arguments +++ /dev/null @@ -1,57 +0,0 @@ -From: Roland McGrath -Date: Wed, 8 Sep 2010 02:37:06 +0000 (-0700) -Subject: execve: make responsive to SIGKILL with large arguments -Git-commit: 9aea5a65aa7a1af9a4236dfaeb0088f1624f9919 -Patch-mainline: 2.6.36-rc4 -References: bnc#635425 -Introduced-by: 2.6.23 - -execve: make responsive to SIGKILL with large arguments - -An execve with a very large total of argument/environment strings -can take a really long time in the execve system call. It runs -uninterruptibly to count and copy all the strings. This change -makes it abort the exec quickly if sent a SIGKILL. - -Note that this is the conservative change, to interrupt only for -SIGKILL, by using fatal_signal_pending(). It would be perfectly -correct semantics to let any signal interrupt the string-copying in -execve, i.e. use signal_pending() instead of fatal_signal_pending(). -We'll save that change for later, since it could have user-visible -consequences, such as having a timer set too quickly make it so that -an execve can never complete, though it always happened to work before. - -Signed-off-by: Roland McGrath -Reviewed-by: KOSAKI Motohiro -Signed-off-by: Linus Torvalds -Acked-by: Jeff Mahoney ---- - - fs/exec.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/fs/exec.c b/fs/exec.c -index 6f2d777..828dd24 100644 ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max) - argv++; - if (i++ >= max) - return -E2BIG; -+ -+ if (fatal_signal_pending(current)) -+ return -ERESTARTNOHAND; - cond_resched(); - } - } -@@ -419,6 +422,10 @@ static int copy_strings(int argc, const char __user *const __user *argv, - while (len > 0) { - int offset, bytes_to_copy; - -+ if (fatal_signal_pending(current)) { -+ ret = -ERESTARTNOHAND; -+ goto out; -+ } - cond_resched(); - - offset = pos % PAGE_SIZE; diff --git a/patches.fixes/ext3-mark-super-uptodate b/patches.fixes/ext3-mark-super-uptodate deleted file mode 100644 index 5be120d..0000000 --- a/patches.fixes/ext3-mark-super-uptodate +++ /dev/null @@ -1,41 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] ext3: always mark super uptodate before dirty -References: bnc#457043 -Patch-mainline: not yet - - The superblock's bh is something of an exception. It is only read - during mount and is only released during unmount. The in-memory - copy is invariably the most recent one. - - If a write error occurs while syncing the superblock, it will be marked - !uptodate. When another error occurs, ext3_error will invoke - ext3_commit_super, which will mark the superblock dirty and try to - sync it out again. If the buffer is !uptodate, then mark_buffer_dirty - will issue a warning, but continue anyway. - - This patch marks it uptodate before writing it out. This doesn't really - change anything other than silencing the warning in mark_buffer_dirty. - If the write succeeds, good. Otherwise, it will just have uptodate - cleared again. - -Signed-off-by: Jeff Mahoney ---- - fs/ext3/super.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/fs/ext3/super.c -+++ b/fs/ext3/super.c -@@ -2382,6 +2382,13 @@ static int ext3_commit_super(struct supe - es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); - es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); - BUFFER_TRACE(sbh, "marking dirty"); -+ -+ /* We only read the superblock once. The in-memory version is -+ * always the most recent. If ext3_error is called after a -+ * superblock write failure, it will be !uptodate. This write -+ * will likely fail also, but it avoids the WARN_ON in -+ * mark_buffer_dirty. */ -+ set_buffer_uptodate(sbh); - mark_buffer_dirty(sbh); - if (sync) - error = sync_dirty_buffer(sbh); diff --git a/patches.fixes/fix-nf_conntrack_slp b/patches.fixes/fix-nf_conntrack_slp index 09e8d82..a548ac8 100644 --- a/patches.fixes/fix-nf_conntrack_slp +++ b/patches.fixes/fix-nf_conntrack_slp @@ -30,7 +30,7 @@ Acked-by: Jeff Mahoney if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) goto out; @@ -64,15 +64,18 @@ static int help(struct sk_buff *skb, uns - in_dev = __in_dev_get_rcu(rt->u.dst.dev); + in_dev = __in_dev_get_rcu(rt->dst.dev); if (in_dev != NULL) { for_primary_ifa(in_dev) { - if (ifa->ifa_broadcast == iph->daddr) { diff --git a/patches.fixes/fixes-for-make-3.82.patch b/patches.fixes/fixes-for-make-3.82.patch deleted file mode 100644 index e00e514..0000000 --- a/patches.fixes/fixes-for-make-3.82.patch +++ /dev/null @@ -1,36 +0,0 @@ -It doesn't like pattern and explicit rules to be on the same line, -and it seems to be more picky when matching file (or really directory) -names with different numbers of trailing slashes. - -Signed-off-by: Jan Beulich -Acked-by: Sam Ravnborg - ---- -firmware/Makefile | 2 +- -scripts/mkmakefile | 4 +++- -2 files changed, 4 insertions(+), 2 deletions(-) - ---- linux-2.6.36-rc1/firmware/Makefile -+++ 2.6.36-rc1-make-3.82-fixes/firmware/Makefile -@@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin - fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) - - # Directories which we _might_ need to create, so we have a rule for them. --firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all)))) -+firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all)))) - - quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) - cmd_mkdir = mkdir -p $@ ---- linux-2.6.36-rc1/scripts/mkmakefile -+++ 2.6.36-rc1-make-3.82-fixes/scripts/mkmakefile -@@ -44,7 +44,9 @@ all: - - Makefile:; - --\$(all) %/: all -+\$(all): all - @: - -+%/: all -+ @: - EOF diff --git a/patches.fixes/flexcop-fix-registering-braindead-stupid-names b/patches.fixes/flexcop-fix-registering-braindead-stupid-names new file mode 100644 index 0000000..b46e826 --- /dev/null +++ b/patches.fixes/flexcop-fix-registering-braindead-stupid-names @@ -0,0 +1,43 @@ +From: Kyle McMartin +Subject: flexcop: fix registering braindead stupid names +References: brc#575873 bnc#661429 +Patch-mainline: Unsubmitted by author + + This patch fixes an issue where the flexcop driver passes DRIVER_NAME to + request_irq, which ultimately sets up proc files. The invalid name + contains slashes so the proc file creation fails and we get a WARN_ON. + +Acked-by: Jeff Mahoney +--- + + drivers/media/dvb/b2c2/flexcop-pci.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/media/dvb/b2c2/flexcop-pci.c ++++ b/drivers/media/dvb/b2c2/flexcop-pci.c +@@ -39,6 +39,7 @@ MODULE_PARM_DESC(debug, + + #define DRIVER_VERSION "0.1" + #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" ++#define FLEXCOP_MODULE_NAME "b2c2-flexcop" + #define DRIVER_AUTHOR "Patrick Boettcher " + + struct flexcop_pci { +@@ -299,7 +300,7 @@ static int flexcop_pci_init(struct flexc + return ret; + pci_set_master(fc_pci->pdev); + +- if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0) ++ if ((ret = pci_request_regions(fc_pci->pdev, FLEXCOP_MODULE_NAME)) != 0) + goto err_pci_disable_device; + + fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800); +@@ -313,7 +314,7 @@ static int flexcop_pci_init(struct flexc + pci_set_drvdata(fc_pci->pdev, fc_pci); + spin_lock_init(&fc_pci->irq_lock); + if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr, +- IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0) ++ IRQF_SHARED, FLEXCOP_MODULE_NAME, fc_pci)) != 0) + goto err_pci_iounmap; + + fc_pci->init_state |= FC_PCI_INIT; diff --git a/patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops b/patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops new file mode 100644 index 0000000..b692fe3 --- /dev/null +++ b/patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops @@ -0,0 +1,54 @@ +From: Timo Warns +Subject: fs/partitions/efi.c: corrupted GUID partition tables can cause kernel oops +References: bnc#687113 CVE-2011-1577 +Patch-mainline: Probably 2.6.39; In -mm already + +The kernel automatically evaluates partition tables of storage devices. +The code for evaluating GUID partitions (in fs/partitions/efi.c) contains +a bug that causes a kernel oops on certain corrupted GUID partition +tables. + +This bug has security impacts, because it allows, for example, to +prepare a storage device that crashes a kernel subsystem upon connecting +the device (e.g., a "USB Stick of (Partial) Death"). + + crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size)); + +computes a CRC32 checksum over gpt covering (*gpt)->header_size bytes. +There is no validation of (*gpt)->header_size before the efi_crc32 call. + +A corrupted partition table may have large values for (*gpt)->header_size. + In this case, the CRC32 computation access memory beyond the memory +allocated for gpt, which may cause a kernel heap overflow. + +Validate value of GUID partition table header size. + +Signed-off-by: Timo Warns +Cc: Matt Domsch +Cc: Eugene Teo +Cc: Dave Jones +Signed-off-by: Andrew Morton +Acked-by: Jeff Mahoney +--- + + fs/partitions/efi.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/fs/partitions/efi.c ++++ b/fs/partitions/efi.c +@@ -310,6 +310,15 @@ static int is_gpt_valid(struct parsed_pa + goto fail; + } + ++ /* Check the GUID Partition Table header size */ ++ if (le32_to_cpu((*gpt)->header_size) > ++ bdev_logical_block_size(state->bdev)) { ++ pr_debug("GUID Partition Table Header size is wrong: %u > %u\n", ++ le32_to_cpu((*gpt)->header_size), ++ bdev_logical_block_size(state->bdev)); ++ goto fail; ++ } ++ + /* Check the GUID Partition Table CRC */ + origcrc = le32_to_cpu((*gpt)->header_crc32); + (*gpt)->header_crc32 = 0; diff --git a/patches.fixes/hfs-avoid-crash-in-hfs_bnode_create b/patches.fixes/hfs-avoid-crash-in-hfs_bnode_create new file mode 100644 index 0000000..46da711 --- /dev/null +++ b/patches.fixes/hfs-avoid-crash-in-hfs_bnode_create @@ -0,0 +1,30 @@ +From: Jeff Mahoney +Subject: hfs: avoid crash in hfs_bnode_create +Patch-mainline: not yet +References: bnc#552250 + + Commit 634725a92938b0f282b17cec0b007dca77adebd2 removed the BUG_ON + in hfs_bnode_create in hfsplus. This patch removes it from the hfs + version and avoids an fsfuzzer crash. + +Signed-off-by: Jeff Mahoney +Acked-by: Jeff Mahoney +--- + fs/hfs/bnode.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/fs/hfs/bnode.c ++++ b/fs/hfs/bnode.c +@@ -413,7 +413,11 @@ struct hfs_bnode *hfs_bnode_create(struc + spin_lock(&tree->hash_lock); + node = hfs_bnode_findhash(tree, num); + spin_unlock(&tree->hash_lock); +- BUG_ON(node); ++ if (node) { ++ printk(KERN_CRIT "new node %u already hashed?\n", num); ++ WARN_ON(1); ++ return node; ++ } + node = __hfs_bnode_create(tree, num); + if (!node) + return ERR_PTR(-ENOMEM); diff --git a/patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch b/patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch new file mode 100644 index 0000000..c376108 --- /dev/null +++ b/patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch @@ -0,0 +1,102 @@ +From: Jiri Kosina +Subject: [PATCH] HID: add support for Skycable 0x3f07 wireless presenter +References: bnc#681297 +Patch-mainline: not yet, queued in subsystem tree + +This device contains the very same bug in report descriptor as the +Ortek ones do (i.e. LogicalMinimum == 1, which is wrong for the key +array). + +As we have more reports for the Ortek devices, we are keeping the driver +name for now. Apparently there is a chip producer which sells chip with +this buggy descriptor to multiple vendors. Thus if such reports start +to come at highger frequency, we'll either have to rename the driver +accordingly, or come up with more generic workaround. + +Signed-off-by: Jiri Kosina +--- + drivers/hid/Kconfig | 9 +++++++-- + drivers/hid/hid-core.c | 1 + + drivers/hid/hid-ids.h | 3 +++ + drivers/hid/hid-ortek.c | 15 +++++++++++---- + 4 files changed, 22 insertions(+), 6 deletions(-) + +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -325,10 +325,15 @@ config HID_NTRIG + Support for N-Trig touch screen. + + config HID_ORTEK +- tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad" ++ tristate "Ortek WKB-2000/Skycable wireless keyboard and mouse trackpad" + depends on USB_HID + ---help--- +- Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad. ++ There are certain devices which have LogicalMaximum wrong in the keyboard ++ usage page of their report descriptor. The most prevailing ones so far ++ are manufactured by Ortek, thus the name of the driver. Currently ++ supported devices by this driver are ++ - Ortek WKB-2000 ++ - Skycable wireless presenter + + config HID_PANTHERLORD + tristate "Pantherlord/GreenAsia game controller" +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1411,6 +1411,7 @@ static const struct hid_device_id hid_ha + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -514,6 +514,9 @@ + #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 + #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 + ++#define USB_VENDOR_ID_SKYCABLE 0x1223 ++#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 ++ + #define USB_VENDOR_ID_SONY 0x054c + #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b + #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 +--- a/drivers/hid/hid-ortek.c ++++ b/drivers/hid/hid-ortek.c +@@ -1,9 +1,12 @@ +-/* +- * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad). +- * Fixes LogicalMaximum error in USB report description, see +- * http://bugzilla.kernel.org/show_bug.cgi?id=14787 ++/* HID driver for various devices which are apparently based on the same chipset ++ * from certain vendor which produces chips that contain wrong LogicalMaximum ++ * value in their HID report descriptor. Currently supported devices are: ++ * ++ * Ortek WKB-2000 ++ * Skycable wireless presenter + * + * Copyright (c) 2010 Johnathon Harris ++ * Copyright (c) 2011 Jiri Kosina + */ + + /* +@@ -25,6 +28,9 @@ static __u8 *ortek_report_fixup(struct h + if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { + hid_info(hdev, "Fixing up Ortek WKB-2000 report descriptor\n"); + rdesc[55] = 0x92; ++ } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { ++ hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n"); ++ rdesc[53] = 0x65; + } + return rdesc; + } +@@ -32,6 +38,7 @@ static __u8 *ortek_report_fixup(struct h + static const struct hid_device_id ortek_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, + { } + }; + MODULE_DEVICE_TABLE(hid, ortek_devices); diff --git a/patches.fixes/hp-wmi_detect_keys.patch b/patches.fixes/hp-wmi_detect_keys.patch deleted file mode 100644 index 0202e14..0000000 --- a/patches.fixes/hp-wmi_detect_keys.patch +++ /dev/null @@ -1,110 +0,0 @@ -From: Thomas Renninger -Subject: x86 platform drivers: hp-wmi Reorder event id processing -References: bnc#598059 -Patch-Mainline: submitted - -Event id 0x4 defines the hotkey event. -No need (or even wrong) to query HPWMI_HOTKEY_QUERY if event id is != 0x4. - -Reorder the eventcode conditionals and use switch case instead of if/else. -Use an enum for the event ids cases. - - -Signed-off-by: Thomas Renninger -CC: mjg@redhat.com -CC: linux-acpi@vger.kernel.org - ---- - drivers/platform/x86/hp-wmi.c | 51 ++++++++++++++++++++++++++---------------- - 1 file changed, 32 insertions(+), 19 deletions(-) - -Index: linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -=================================================================== ---- linux-2.6.33-master.orig/drivers/platform/x86/hp-wmi.c -+++ linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -@@ -58,6 +58,12 @@ enum hp_wmi_radio { - HPWMI_WWAN = 2, - }; - -+enum hp_wmi_event_ids { -+ HPWMI_DOCK_EVENT = 1, -+ HPWMI_BEZEL_BUTTON = 4, -+ HPWMI_WIRELESS = 5, -+}; -+ - static int __devinit hp_wmi_bios_setup(struct platform_device *device); - static int __exit hp_wmi_bios_remove(struct platform_device *device); - static int hp_wmi_resume_handler(struct device *device); -@@ -338,7 +344,7 @@ static void hp_wmi_notify(u32 value, voi - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - static struct key_entry *key; - union acpi_object *obj; -- int eventcode; -+ int eventcode, key_code; - acpi_status status; - - status = wmi_get_event_data(value, &response); -@@ -357,28 +363,32 @@ static void hp_wmi_notify(u32 value, voi - - eventcode = *((u8 *) obj->buffer.pointer); - kfree(obj); -- if (eventcode == 0x4) -- eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, -- 0); -- key = hp_wmi_get_entry_by_scancode(eventcode); -- if (key) { -- switch (key->type) { -- case KE_KEY: -- input_report_key(hp_wmi_input_dev, -- key->keycode, 1); -- input_sync(hp_wmi_input_dev); -- input_report_key(hp_wmi_input_dev, -- key->keycode, 0); -- input_sync(hp_wmi_input_dev); -- break; -- } -- } else if (eventcode == 0x1) { -+ switch (eventcode) { -+ case HPWMI_DOCK_EVENT: - input_report_switch(hp_wmi_input_dev, SW_DOCK, - hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); - input_sync(hp_wmi_input_dev); -- } else if (eventcode == 0x5) { -+ break; -+ case HPWMI_BEZEL_BUTTON: -+ key_code = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, -+ 0); -+ key = hp_wmi_get_entry_by_scancode(key_code); -+ if (key) { -+ switch (key->type) { -+ case KE_KEY: -+ input_report_key(hp_wmi_input_dev, -+ key->keycode, 1); -+ input_sync(hp_wmi_input_dev); -+ input_report_key(hp_wmi_input_dev, -+ key->keycode, 0); -+ input_sync(hp_wmi_input_dev); -+ break; -+ } -+ } -+ break; -+ case HPWMI_WIRELESS: - if (wifi_rfkill) - rfkill_set_states(wifi_rfkill, - hp_wmi_get_sw_state(HPWMI_WIFI), -@@ -391,9 +401,12 @@ static void hp_wmi_notify(u32 value, voi - rfkill_set_states(wwan_rfkill, - hp_wmi_get_sw_state(HPWMI_WWAN), - hp_wmi_get_hw_state(HPWMI_WWAN)); -- } else -+ break; -+ default: - printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", - eventcode); -+ break; -+ } - } - - static int __init hp_wmi_input_setup(void) diff --git a/patches.fixes/hp_wmi_add_media_key.patch b/patches.fixes/hp_wmi_add_media_key.patch deleted file mode 100644 index f828bca..0000000 --- a/patches.fixes/hp_wmi_add_media_key.patch +++ /dev/null @@ -1,25 +0,0 @@ -From: Thomas Renninger -Subject: x86 platform drivers: hp-wmi Add media key 0x20e8 -References: bnc#598059 -Patch-Mainline: submitted - -Signed-off-by: Thomas Renninger -CC: mjg@redhat.com -CC: linux-acpi@vger.kernel.org - ---- - drivers/platform/x86/hp-wmi.c | 1 + - 1 file changed, 1 insertion(+) - -Index: linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -=================================================================== ---- linux-2.6.33-master.orig/drivers/platform/x86/hp-wmi.c -+++ linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -@@ -96,6 +96,7 @@ static struct key_entry hp_wmi_keymap[] - {KE_KEY, 0x02, KEY_BRIGHTNESSUP}, - {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN}, - {KE_KEY, 0x20e6, KEY_PROG1}, -+ {KE_KEY, 0x20e8, KEY_MEDIA}, - {KE_KEY, 0x2142, KEY_MEDIA}, - {KE_KEY, 0x213b, KEY_INFO}, - {KE_KEY, 0x2169, KEY_DIRECTION}, diff --git a/patches.fixes/hp_wmi_catch_unkown_event_key_codes.patch b/patches.fixes/hp_wmi_catch_unkown_event_key_codes.patch deleted file mode 100644 index 235fbcf..0000000 --- a/patches.fixes/hp_wmi_catch_unkown_event_key_codes.patch +++ /dev/null @@ -1,39 +0,0 @@ -From: Thomas Renninger -Subject: x86 platform drivers: hp-wmi Catch and log unkown event and key codes correctly -References: bnc#598059 -Patch-Mainline: submitted - -Signed-off-by: Thomas Renninger -CC: mjg@redhat.com -CC: linux-acpi@vger.kernel.org - ---- - drivers/platform/x86/hp-wmi.c | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - -Index: linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -=================================================================== ---- linux-2.6.33-master.orig/drivers/platform/x86/hp-wmi.c -+++ linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -@@ -386,7 +386,9 @@ static void hp_wmi_notify(u32 value, voi - input_sync(hp_wmi_input_dev); - break; - } -- } -+ } else -+ printk(KERN_INFO "HP WMI: Unknown key code - 0x%x\n", -+ key_code); - break; - case HPWMI_WIRELESS: - if (wifi_rfkill) -@@ -403,8 +405,8 @@ static void hp_wmi_notify(u32 value, voi - hp_wmi_get_hw_state(HPWMI_WWAN)); - break; - default: -- printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", -- eventcode); -+ printk(KERN_INFO "HP WMI: Unknown eventcode - %d\n", -+ eventcode); - break; - } - } diff --git a/patches.fixes/hp_wmi_use_prefix_string.patch b/patches.fixes/hp_wmi_use_prefix_string.patch deleted file mode 100644 index c9de151..0000000 --- a/patches.fixes/hp_wmi_use_prefix_string.patch +++ /dev/null @@ -1,61 +0,0 @@ -From: Thomas Renninger -Subject: x86 platform drivers: hp-wmi Use consistent prefix string for messages. -References: bnc#598059 -Patch-Mainline: submitted - -Signed-off-by: Thomas Renninger -CC: mjg@redhat.com -CC: linux-acpi@vger.kernel.org - ---- - drivers/platform/x86/hp-wmi.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -Index: linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -=================================================================== ---- linux-2.6.33-master.orig/drivers/platform/x86/hp-wmi.c -+++ linux-2.6.33-master/drivers/platform/x86/hp-wmi.c -@@ -52,6 +52,8 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE9 - #define HPWMI_WIRELESS_QUERY 0x5 - #define HPWMI_HOTKEY_QUERY 0xc - -+#define PREFIX "HP WMI: " -+ - enum hp_wmi_radio { - HPWMI_WIFI = 0, - HPWMI_BLUETOOTH = 1, -@@ -349,14 +351,14 @@ static void hp_wmi_notify(u32 value, voi - - status = wmi_get_event_data(value, &response); - if (status != AE_OK) { -- printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status); -+ printk(KERN_INFO PREFIX "bad event status 0x%x\n", status); - return; - } - - obj = (union acpi_object *)response.pointer; - - if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) { -- printk(KERN_INFO "HP WMI: Unknown response received\n"); -+ printk(KERN_INFO PREFIX "Unknown response received\n"); - kfree(obj); - return; - } -@@ -387,7 +389,7 @@ static void hp_wmi_notify(u32 value, voi - break; - } - } else -- printk(KERN_INFO "HP WMI: Unknown key code - 0x%x\n", -+ printk(KERN_INFO PREFIX "Unknown key code - 0x%x\n", - key_code); - break; - case HPWMI_WIRELESS: -@@ -405,7 +407,7 @@ static void hp_wmi_notify(u32 value, voi - hp_wmi_get_hw_state(HPWMI_WWAN)); - break; - default: -- printk(KERN_INFO "HP WMI: Unknown eventcode - %d\n", -+ printk(KERN_INFO PREFIX "Unknown eventcode - %d\n", - eventcode); - break; - } diff --git a/patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch b/patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch index 85321a5..e53df70 100644 --- a/patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch +++ b/patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch @@ -34,7 +34,7 @@ Signed-off-by: Gregory Haskins --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig -@@ -545,6 +545,7 @@ config IA64_MC_ERR_INJECT +@@ -548,6 +548,7 @@ config IA64_MC_ERR_INJECT config SGI_SN def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) diff --git a/patches.fixes/ieee1394-sbp2_long_sysfs_ieee1394_id.patch b/patches.fixes/ieee1394-sbp2_long_sysfs_ieee1394_id.patch deleted file mode 100644 index cb779e4..0000000 --- a/patches.fixes/ieee1394-sbp2_long_sysfs_ieee1394_id.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: unknown@suse.de -Subject: some unknown ieee1394 patch -Patch-mainline: not yet - -make the long format the default because its also the default in the -new firewire stack. -Maybe it simplifies migration for new 10.3 installs to 11.0 or later. -Maybe it is bad for existing 10.3 and earlier installs. - -modprobe -v sbp2 sbp2_long_sysfs_ieee1394_id=0 to get the old short name -modprobe -v sbp2 sbp2_long_sysfs_ieee1394_id=1 to get the new long name - ---- - drivers/ieee1394/sbp2.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/ieee1394/sbp2.c -+++ b/drivers/ieee1394/sbp2.c -@@ -225,7 +225,7 @@ MODULE_PARM_DESC(workarounds, "Work arou - * independent of the implementation of the ieee1394 nodemgr, the longer format - * is recommended for future use. - */ --static int sbp2_long_sysfs_ieee1394_id; -+static int sbp2_long_sysfs_ieee1394_id = 1; - module_param_named(long_ieee1394_id, sbp2_long_sysfs_ieee1394_id, bool, 0644); - MODULE_PARM_DESC(long_ieee1394_id, "8+3+2 bytes format of ieee1394_id in sysfs " - "(default = backwards-compatible = N, SAM-conforming = Y)"); diff --git a/patches.fixes/input-add-acer-aspire-5710-to-nomux.patch b/patches.fixes/input-add-acer-aspire-5710-to-nomux.patch index 4cd6b40..4baaa65 100644 --- a/patches.fixes/input-add-acer-aspire-5710-to-nomux.patch +++ b/patches.fixes/input-add-acer-aspire-5710-to-nomux.patch @@ -14,7 +14,7 @@ Signed-off-by: Jiri Kosina --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h -@@ -360,6 +360,13 @@ static const struct dmi_system_id __init +@@ -371,6 +371,13 @@ static const struct dmi_system_id __init }, }, { diff --git a/patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices b/patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices new file mode 100644 index 0000000..36ad6e2 --- /dev/null +++ b/patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices @@ -0,0 +1,60 @@ +From: Reinette Chatre +Date: Tue, 29 Jun 2010 14:24:51 -0700 +Subject: [PATCH] iwlwifi: fix TX power configuration on 3945 and 4965 devices +Patch-mainline: Probably 2.6.35 +References: bnc#619440 bnc#610421 + +When receiving a TX power change request from mac80211 the functions +servicing these requests for 3945 and 4965 uses information in the active +RXON. In iwl_mac_config the staging RXON is prepared based on various +directions from mac80211 and only at the end is the staging RXON committed +to the device and the active RXON updated. + +By servicing the TX power change request while servicing the other requests +that modify the staging RXON we loose the new information provided by mac80211. + +Fix this by servicing the TX power change request after the RXON has been committed +to the device and active RXON thus contains accurate information. + + +21 Jan 2011 jeffm: +This patch may have been superced by 2295c66b68ae160dde2e6e2dc4f3061105153bfc +but needs review. + +Signed-off-by: Reinette Chatre +Acked-by: Jeff Mahoney +--- + drivers/net/wireless/iwlwifi/iwl-core.c | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +--- a/drivers/net/wireless/iwlwifi/iwl-core.c ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c +@@ -2160,13 +2160,6 @@ int iwl_mac_config(struct ieee80211_hw * + IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); + } + +- if (changed & IEEE80211_CONF_CHANGE_POWER) { +- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", +- priv->tx_power_user_lmt, conf->power_level); +- +- iwl_set_tx_power(priv, conf->power_level, false); +- } +- + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + goto out; +@@ -2181,6 +2174,14 @@ int iwl_mac_config(struct ieee80211_hw * + "Not re-sending same RXON configuration.\n"); + } + ++ if (changed & IEEE80211_CONF_CHANGE_POWER) { ++ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", ++ priv->tx_power_user_lmt, conf->power_level); ++ ++ iwl_set_tx_power(priv, conf->power_level, false); ++ } ++ ++ + out: + IWL_DEBUG_MAC80211(priv, "leave\n"); + mutex_unlock(&priv->mutex); diff --git a/patches.fixes/kbuild-fix-generating-of-.symtypes-files b/patches.fixes/kbuild-fix-generating-of-.symtypes-files deleted file mode 100644 index 87813a6..0000000 --- a/patches.fixes/kbuild-fix-generating-of-.symtypes-files +++ /dev/null @@ -1,28 +0,0 @@ -Subject: kbuild: fix generating of *.symtypes files -From: Michal Marek -Patch-mainline: submitted 2009-06-29 - -Commit 37a8d9f ("kbuild: simplify use of genksyms") broke generating of -*.symtypes files during build (with KBUILD_SYMTYPES set). This patch -fixes it. - -Signed-off-by: Michal Marek - ---- - scripts/Makefile.build | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/scripts/Makefile.build -+++ b/scripts/Makefile.build -@@ -156,9 +156,9 @@ $(obj)/%.i: $(src)/%.c FORCE - - cmd_gensymtypes = \ - $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ -- $(GENKSYMS) -T $@ -a $(ARCH) \ -+ $(GENKSYMS) $(if $(strip $(1)), -T $(@:.o=.symtypes)) -a $(ARCH) \ - $(if $(KBUILD_PRESERVE),-p) \ -- $(if $(1),-r $(firstword $(wildcard $(@:.symtypes=.symref) /dev/null))) -+ -r $(firstword $(wildcard $(basename $@).symref /dev/null)) - - quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ - cmd_cc_symtypes_c = \ diff --git a/patches.fixes/keys-fix-bug-in-keyctl_session_to_parent-if-parent-has-no-session-keyring b/patches.fixes/keys-fix-bug-in-keyctl_session_to_parent-if-parent-has-no-session-keyring deleted file mode 100644 index 76104c7..0000000 --- a/patches.fixes/keys-fix-bug-in-keyctl_session_to_parent-if-parent-has-no-session-keyring +++ /dev/null @@ -1,56 +0,0 @@ -From: David Howells -Date: Fri, 10 Sep 2010 08:59:51 +0000 (+0100) -Subject: KEYS: Fix bug in keyctl_session_to_parent() if parent has no session - keyring -Git-commit: 3d96406c7da1ed5811ea52a3b0905f4f0e295376 -Patch-mainline: 2.6.36-rc4 -References: CVE-2010-2960 bnc#634637 -Introduced-by: 2.6.32 - -KEYS: Fix bug in keyctl_session_to_parent() if parent has no session keyring - -Fix a bug in keyctl_session_to_parent() whereby it tries to check the ownership -of the parent process's session keyring whether or not the parent has a session -keyring [CVE-2010-2960]. - -This results in the following oops: - - BUG: unable to handle kernel NULL pointer dereference at 00000000000000a0 - IP: [] keyctl_session_to_parent+0x251/0x443 - ... - Call Trace: - [] ? keyctl_session_to_parent+0x67/0x443 - [] ? __do_fault+0x24b/0x3d0 - [] sys_keyctl+0xb4/0xb8 - [] system_call_fastpath+0x16/0x1b - -if the parent process has no session keyring. - -If the system is using pam_keyinit then it mostly protected against this as all -processes derived from a login will have inherited the session keyring created -by pam_keyinit during the log in procedure. - -To test this, pam_keyinit calls need to be commented out in /etc/pam.d/. - -Reported-by: Tavis Ormandy -Signed-off-by: David Howells -Acked-by: Tavis Ormandy -Signed-off-by: Linus Torvalds -Acked-by: Jeff Mahoney ---- - - security/keys/keyctl.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/security/keys/keyctl.c -+++ b/security/keys/keyctl.c -@@ -1292,7 +1292,8 @@ long keyctl_session_to_parent(void) - goto not_permitted; - - /* the keyrings must have the same UID */ -- if (pcred ->tgcred->session_keyring->uid != mycred->euid || -+ if ((pcred->tgcred->session_keyring && -+ pcred->tgcred->session_keyring->uid != mycred->euid) || - mycred->tgcred->session_keyring->uid != mycred->euid) - goto not_permitted; - diff --git a/patches.fixes/keys-fix-rcu-no-lock-warning-in-keyctl_session_to_parent b/patches.fixes/keys-fix-rcu-no-lock-warning-in-keyctl_session_to_parent deleted file mode 100644 index f354f8f..0000000 --- a/patches.fixes/keys-fix-rcu-no-lock-warning-in-keyctl_session_to_parent +++ /dev/null @@ -1,69 +0,0 @@ -From: David Howells -Date: Fri, 10 Sep 2010 08:59:46 +0000 (+0100) -Subject: KEYS: Fix RCU no-lock warning in keyctl_session_to_parent() -Git-commit: 9d1ac65a9698513d00e5608d93fca0c53f536c14 -Patch-mainline: 2.6.36-rc4 -References: CVE-2010-2960 bnc#634637 -Introduced-by: 2.6.32 - -KEYS: Fix RCU no-lock warning in keyctl_session_to_parent() - -There's an protected access to the parent process's credentials in the middle -of keyctl_session_to_parent(). This results in the following RCU warning: - - =================================================== - [ INFO: suspicious rcu_dereference_check() usage. ] - --------------------------------------------------- - security/keys/keyctl.c:1291 invoked rcu_dereference_check() without protection! - - other info that might help us debug this: - - rcu_scheduler_active = 1, debug_locks = 0 - 1 lock held by keyctl-session-/2137: - #0: (tasklist_lock){.+.+..}, at: [] keyctl_session_to_parent+0x60/0x236 - - stack backtrace: - Pid: 2137, comm: keyctl-session- Not tainted 2.6.36-rc2-cachefs+ #1 - Call Trace: - [] lockdep_rcu_dereference+0xaa/0xb3 - [] keyctl_session_to_parent+0xed/0x236 - [] sys_keyctl+0xb4/0xb6 - [] system_call_fastpath+0x16/0x1b - -The code should take the RCU read lock to make sure the parents credentials -don't go away, even though it's holding a spinlock and has IRQ disabled. - -Signed-off-by: David Howells -Signed-off-by: Linus Torvalds -Acked-by: Jeff Mahoney ---- - - security/keys/keyctl.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/security/keys/keyctl.c -+++ b/security/keys/keyctl.c -@@ -1259,6 +1259,7 @@ long keyctl_session_to_parent(void) - keyring_r = NULL; - - me = current; -+ rcu_read_lock(); - write_lock_irq(&tasklist_lock); - - parent = me->real_parent; -@@ -1313,6 +1314,7 @@ long keyctl_session_to_parent(void) - set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); - - write_unlock_irq(&tasklist_lock); -+ rcu_read_unlock(); - if (oldcred) - put_cred(oldcred); - return 0; -@@ -1321,6 +1323,7 @@ already_same: - ret = 0; - not_permitted: - write_unlock_irq(&tasklist_lock); -+ rcu_read_unlock(); - put_cred(cred); - return ret; - diff --git a/patches.fixes/kvm-ioapic.patch b/patches.fixes/kvm-ioapic.patch index bc02206..6993ddb 100644 --- a/patches.fixes/kvm-ioapic.patch +++ b/patches.fixes/kvm-ioapic.patch @@ -9,8 +9,8 @@ References: bnc#556564 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c -@@ -200,7 +200,8 @@ int kvm_ioapic_set_irq(struct kvm_ioapic - spin_lock(&ioapic->lock); +@@ -202,7 +202,8 @@ int kvm_ioapic_set_irq(struct kvm_ioapic + old_irr = ioapic->irr; if (irq >= 0 && irq < IOAPIC_NUM_PINS) { entry = ioapic->redirtbl[irq]; - level ^= entry.fields.polarity; diff --git a/patches.fixes/kvm-macos.patch b/patches.fixes/kvm-macos.patch index cc7504b..d6534af 100644 --- a/patches.fixes/kvm-macos.patch +++ b/patches.fixes/kvm-macos.patch @@ -10,7 +10,7 @@ References: bnc#556564 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c -@@ -1995,6 +1995,22 @@ static int skinit_interception(struct vc +@@ -2303,6 +2303,22 @@ static int skinit_interception(struct vc return 1; } @@ -33,7 +33,7 @@ References: bnc#556564 static int invalid_op_interception(struct vcpu_svm *svm) { kvm_queue_exception(&svm->vcpu, UD_VECTOR); -@@ -2376,8 +2392,8 @@ static int (*svm_exit_handlers[])(struct +@@ -2722,8 +2738,8 @@ static int (*svm_exit_handlers[])(struct [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = skinit_interception, [SVM_EXIT_WBINVD] = emulate_on_interception, @@ -41,12 +41,12 @@ References: bnc#556564 - [SVM_EXIT_MWAIT] = invalid_op_interception, + [SVM_EXIT_MONITOR] = monitor_interception, + [SVM_EXIT_MWAIT] = mwait_interception, + [SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_NPF] = pf_interception, }; - --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -1144,6 +1144,7 @@ int kvm_set_msr_common(struct kvm_vcpu * +@@ -1254,6 +1254,7 @@ int kvm_set_msr_common(struct kvm_vcpu * case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: break; @@ -54,7 +54,7 @@ References: bnc#556564 case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: -@@ -1400,6 +1401,7 @@ int kvm_get_msr_common(struct kvm_vcpu * +@@ -1512,6 +1513,7 @@ int kvm_get_msr_common(struct kvm_vcpu * case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: @@ -62,12 +62,12 @@ References: bnc#556564 data = 0; break; case MSR_MTRRcap: -@@ -1848,7 +1850,7 @@ static void do_cpuid_ent(struct kvm_cpui +@@ -1985,7 +1987,7 @@ static void do_cpuid_ent(struct kvm_cpui 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); /* cpuid 1.ecx */ const u32 kvm_supported_word4_x86_features = -- F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ | -+ F(XMM3) | bit((4*32+ 3)) /* MONITOR */ | 0 /* Reserved, DTES64 */ | +- F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | ++ F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64 */ | F(MWAIT) | 0 /* DS-CPL, VMX, SMX, EST */ | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | diff --git a/patches.fixes/make-note_interrupt-fast.diff b/patches.fixes/make-note_interrupt-fast.diff deleted file mode 100644 index 228f43a..0000000 --- a/patches.fixes/make-note_interrupt-fast.diff +++ /dev/null @@ -1,193 +0,0 @@ -From: Bernhard Walle -Subject: Fix performance regression on large IA64 systems -References: bnc #469589 -Patch-mainline: no (and never will) - -This patch tries to address a performance regression discovered by SGI. - -Patch b60c1f6ffd88850079ae419aa933ab0eddbd5535 removes the call -to note_interrupt() in __do_IRQ(). Patch d85a60d85ea5b7c597508c1510c88e657773d378 -adds it again. Because it's needed for irqpoll. - -That patch now introduces a new parameter 'only_fixup' for note_interrupt(). -This parameter determines two cases: - - TRUE => The function should be only executed when irqfixup is set. - Either 'irqpoll' or 'irqfixup' directly set that. - - FALSE => Just the behaviour as note_interrupt() always had. - -Now the patch converts all calls of note_interrupt() to only_fixup=FALSE, -except the call that has been removed by b60c1f6ffd88850079ae419aa933ab0eddbd5535. -So that call is always done, but the body is only executed when either -'irqpoll' or 'irqfixup' are specified. - -This patch is not meant for mainline inclusion in the first run! - - -Signed-off-by: Bernhard Walle - ---- - arch/arm/mach-ns9xxx/irq.c | 2 +- - arch/powerpc/platforms/cell/interrupt.c | 2 +- - drivers/mfd/ezx-pcap.c | 3 ++- - drivers/mfd/twl4030-irq.c | 2 +- - include/linux/irq.h | 2 +- - kernel/irq/chip.c | 12 ++++++------ - kernel/irq/handle.c | 4 ++-- - kernel/irq/spurious.c | 10 +++++++++- - 8 files changed, 23 insertions(+), 14 deletions(-) - ---- a/arch/arm/mach-ns9xxx/irq.c -+++ b/arch/arm/mach-ns9xxx/irq.c -@@ -85,7 +85,7 @@ static void handle_prio_irq(unsigned int - /* XXX: There is no direct way to access noirqdebug, so check - * unconditionally for spurious irqs... - * Maybe this function should go to kernel/irq/chip.c? */ -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - - raw_spin_lock(&desc->lock); - desc->status &= ~IRQ_INPROGRESS; ---- a/arch/powerpc/platforms/cell/interrupt.c -+++ b/arch/powerpc/platforms/cell/interrupt.c -@@ -268,7 +268,7 @@ static void handle_iic_irq(unsigned int - raw_spin_unlock(&desc->lock); - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - raw_spin_lock(&desc->lock); - - } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); ---- a/drivers/mfd/ezx-pcap.c -+++ b/drivers/mfd/ezx-pcap.c -@@ -203,7 +203,8 @@ static void pcap_isr_work(struct work_st - break; - - if (desc->status & IRQ_DISABLED) -- note_interrupt(irq, desc, IRQ_NONE); -+ note_interrupt(irq, desc, IRQ_NONE, -+ false); - else - desc->handle_irq(irq, desc); - } ---- a/drivers/mfd/twl4030-irq.c -+++ b/drivers/mfd/twl4030-irq.c -@@ -330,7 +330,7 @@ static int twl4030_irq_thread(void *data - */ - if (d->status & IRQ_DISABLED) - note_interrupt(module_irq, d, -- IRQ_NONE); -+ IRQ_NONE, false); - else - d->handle_irq(module_irq, d); - } ---- a/include/linux/irq.h -+++ b/include/linux/irq.h -@@ -324,7 +324,7 @@ static inline void generic_handle_irq(un - - /* Handling of unhandled and spurious interrupts: */ - extern void note_interrupt(unsigned int irq, struct irq_desc *desc, -- irqreturn_t action_ret); -+ irqreturn_t action_ret, bool only_fixup); - - /* Resending of interrupts :*/ - void check_irq_resend(struct irq_desc *desc, unsigned int irq); ---- a/kernel/irq/chip.c -+++ b/kernel/irq/chip.c -@@ -390,7 +390,7 @@ void handle_nested_irq(unsigned int irq) - - action_ret = action->thread_fn(action->irq, action->dev_id); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - - raw_spin_lock_irq(&desc->lock); - desc->status &= ~IRQ_INPROGRESS; -@@ -434,7 +434,7 @@ handle_simple_irq(unsigned int irq, stru - - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - - raw_spin_lock(&desc->lock); - desc->status &= ~IRQ_INPROGRESS; -@@ -479,7 +479,7 @@ handle_level_irq(unsigned int irq, struc - - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - - raw_spin_lock(&desc->lock); - desc->status &= ~IRQ_INPROGRESS; -@@ -535,7 +535,7 @@ handle_fasteoi_irq(unsigned int irq, str - - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - - raw_spin_lock(&desc->lock); - desc->status &= ~IRQ_INPROGRESS; -@@ -613,7 +613,7 @@ handle_edge_irq(unsigned int irq, struct - raw_spin_unlock(&desc->lock); - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - raw_spin_lock(&desc->lock); - - } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); -@@ -642,7 +642,7 @@ handle_percpu_irq(unsigned int irq, stru - - action_ret = handle_IRQ_event(irq, desc->action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - - if (desc->chip->eoi) - desc->chip->eoi(irq); ---- a/kernel/irq/handle.c -+++ b/kernel/irq/handle.c -@@ -465,7 +465,7 @@ unsigned int __do_IRQ(unsigned int irq) - if (likely(!(desc->status & IRQ_DISABLED))) { - action_ret = handle_IRQ_event(irq, desc->action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, true); - } - desc->chip->end(irq); - return 1; -@@ -519,7 +519,7 @@ unsigned int __do_IRQ(unsigned int irq) - - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) -- note_interrupt(irq, desc, action_ret); -+ note_interrupt(irq, desc, action_ret, false); - - raw_spin_lock(&desc->lock); - if (likely(!(desc->status & IRQ_PENDING))) ---- a/kernel/irq/spurious.c -+++ b/kernel/irq/spurious.c -@@ -213,9 +213,17 @@ try_misrouted_irq(unsigned int irq, stru - return action && (action->flags & IRQF_IRQPOLL); - } - -+/* -+ * The parameter "only_fixup" means that the function should be only executed -+ * if this parameter is set either to false or to true simultaneously with -+ * irqfixup enabled. -+ */ - void note_interrupt(unsigned int irq, struct irq_desc *desc, -- irqreturn_t action_ret) -+ irqreturn_t action_ret, bool only_fixup) - { -+ if (only_fixup && irqfixup == 0) -+ return; -+ - if (unlikely(action_ret != IRQ_HANDLED)) { - /* - * If we are seeing only the odd spurious IRQ caused by diff --git a/patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files b/patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files new file mode 100644 index 0000000..45c1a75 --- /dev/null +++ b/patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files @@ -0,0 +1,37 @@ +From: Vasiliy Kulikov +Date: Fri, 4 Feb 2011 09:23:33 -0300 +Subject: [media] video: sn9c102: world-wirtable sysfs files +Patch-mainline: v2.6.39-rc2 +Git-commit: 14ddc3188d50855ae2a419a6aced995e2834e5d4 +Introduced-by: v2.6.8-rc2 +References: bnc#673934 + +Don't allow everybody to change video settings. + +Signed-off-by: Vasiliy Kulikov +Acked-by: Mauro Carvalho Chehab +Acked-by: Luca Risolia +Signed-off-by: Mauro Carvalho Chehab +Acked-by: Jeff Mahoney +--- + drivers/media/video/sn9c102/sn9c102_core.c | 6 +++--- + 1 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c +index 84984f6..ce56a1c 100644 +--- a/drivers/media/video/sn9c102/sn9c102_core.c ++++ b/drivers/media/video/sn9c102/sn9c102_core.c +@@ -1430,9 +1430,9 @@ static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR, + sn9c102_show_i2c_reg, sn9c102_store_i2c_reg); + static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR, + sn9c102_show_i2c_val, sn9c102_store_i2c_val); +-static DEVICE_ATTR(green, S_IWUGO, NULL, sn9c102_store_green); +-static DEVICE_ATTR(blue, S_IWUGO, NULL, sn9c102_store_blue); +-static DEVICE_ATTR(red, S_IWUGO, NULL, sn9c102_store_red); ++static DEVICE_ATTR(green, S_IWUSR, NULL, sn9c102_store_green); ++static DEVICE_ATTR(blue, S_IWUSR, NULL, sn9c102_store_blue); ++static DEVICE_ATTR(red, S_IWUSR, NULL, sn9c102_store_red); + static DEVICE_ATTR(frame_header, S_IRUGO, sn9c102_show_frame_header, NULL); + + + diff --git a/patches.fixes/misdn-add-support-for-group-membership-check b/patches.fixes/misdn-add-support-for-group-membership-check new file mode 100644 index 0000000..29022c1 --- /dev/null +++ b/patches.fixes/misdn-add-support-for-group-membership-check @@ -0,0 +1,69 @@ +From: Jeff Mahoney +Subject: [PATCH] mISDN: Add support for group membership check +References: bnc#564423 +Patch-mainline: Unsubmitted, needs upstream consensus + + This patch adds a module parameter to allow a group access to the + mISDN devices. Otherwise, unpriviledged users on systems with ISDN hardware + have the ability to dial out, potentially causing expensive bills. + +Based on a different implementation by Patrick Koppen + +Acked-by: Jeff Mahoney +--- + + drivers/isdn/mISDN/core.c | 3 +++ + drivers/isdn/mISDN/core.h | 1 + + drivers/isdn/mISDN/socket.c | 8 ++++++++ + 3 files changed, 12 insertions(+) + +--- a/drivers/isdn/mISDN/core.c ++++ b/drivers/isdn/mISDN/core.c +@@ -21,10 +21,13 @@ + #include "core.h" + + static u_int debug; ++u_int misdn_permitted_gid; + + MODULE_AUTHOR("Karsten Keil"); + MODULE_LICENSE("GPL"); + module_param(debug, uint, S_IRUGO | S_IWUSR); ++module_param_named(gid, misdn_permitted_gid, uint, 0); ++MODULE_PARM_DESC(gid, "Unix group for accessing misdn socket (default 0)"); + + static u64 device_ids; + #define MAX_DEVICE_ID 63 +--- a/drivers/isdn/mISDN/core.h ++++ b/drivers/isdn/mISDN/core.h +@@ -17,6 +17,7 @@ + + extern struct mISDNdevice *get_mdevice(u_int); + extern int get_mdevice_count(void); ++extern u_int misdn_permitted_gid; + + /* stack status flag */ + #define mISDN_STACK_ACTION_MASK 0x0000ffff +--- a/drivers/isdn/mISDN/socket.c ++++ b/drivers/isdn/mISDN/socket.c +@@ -608,6 +608,10 @@ data_sock_create(struct net *net, struct + { + struct sock *sk; + ++ if(!capable(CAP_SYS_ADMIN) && (misdn_permitted_gid != current_gid()) ++ && (!in_group_p(misdn_permitted_gid))) ++ return -EPERM; ++ + if (sock->type != SOCK_DGRAM) + return -ESOCKTNOSUPPORT; + +@@ -690,6 +694,10 @@ base_sock_ioctl(struct socket *sock, uns + case IMSETDEVNAME: + { + struct mISDN_devrename dn; ++ if(!capable(CAP_SYS_ADMIN) ++ && (misdn_permitted_gid != current_gid()) ++ && (!in_group_p(misdn_permitted_gid))) ++ return -EPERM; + if (copy_from_user(&dn, (void __user *)arg, + sizeof(dn))) { + err = -EFAULT; diff --git a/patches.fixes/net-sched-fix-some-kernel-memory-leaks b/patches.fixes/net-sched-fix-some-kernel-memory-leaks deleted file mode 100644 index 919cecb..0000000 --- a/patches.fixes/net-sched-fix-some-kernel-memory-leaks +++ /dev/null @@ -1,165 +0,0 @@ -From: Eric Dumazet -Date: Mon, 16 Aug 2010 20:04:22 +0000 (+0000) -Subject: net sched: fix some kernel memory leaks -Git-commit: 1c40be12f7d8ca1d387510d39787b12e512a7ce8 -Patch-mainline: 2.6.36-rc3 -References: CVE-2010-2942 bnc#632309 - -net sched: fix some kernel memory leaks - -We leak at least 32bits of kernel memory to user land in tc dump, -because we dont init all fields (capab ?) of the dumped structure. - -Use C99 initializers so that holes and non explicit fields are zeroed. - -Signed-off-by: Eric Dumazet -Signed-off-by: David S. Miller -Acked-by: Jeff Mahoney ---- - - net/sched/act_gact.c | 21 ++++++++++++--------- - net/sched/act_mirred.c | 15 ++++++++------- - net/sched/act_police.c | 19 ++++++++----------- - net/sched/act_simple.c | 11 ++++++----- - net/sched/act_skbedit.c | 11 ++++++----- - 5 files changed, 40 insertions(+), 37 deletions(-) - ---- a/net/sched/act_gact.c -+++ b/net/sched/act_gact.c -@@ -152,21 +152,24 @@ static int tcf_gact(struct sk_buff *skb, - static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) - { - unsigned char *b = skb_tail_pointer(skb); -- struct tc_gact opt; - struct tcf_gact *gact = a->priv; -+ struct tc_gact opt = { -+ .index = gact->tcf_index, -+ .refcnt = gact->tcf_refcnt - ref, -+ .bindcnt = gact->tcf_bindcnt - bind, -+ .action = gact->tcf_action, -+ }; - struct tcf_t t; - -- opt.index = gact->tcf_index; -- opt.refcnt = gact->tcf_refcnt - ref; -- opt.bindcnt = gact->tcf_bindcnt - bind; -- opt.action = gact->tcf_action; - NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); - #ifdef CONFIG_GACT_PROB - if (gact->tcfg_ptype) { -- struct tc_gact_p p_opt; -- p_opt.paction = gact->tcfg_paction; -- p_opt.pval = gact->tcfg_pval; -- p_opt.ptype = gact->tcfg_ptype; -+ struct tc_gact_p p_opt = { -+ .paction = gact->tcfg_paction, -+ .pval = gact->tcfg_pval, -+ .ptype = gact->tcfg_ptype, -+ }; -+ - NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); - } - #endif ---- a/net/sched/act_mirred.c -+++ b/net/sched/act_mirred.c -@@ -211,15 +211,16 @@ static int tcf_mirred_dump(struct sk_buf - { - unsigned char *b = skb_tail_pointer(skb); - struct tcf_mirred *m = a->priv; -- struct tc_mirred opt; -+ struct tc_mirred opt = { -+ .index = m->tcf_index, -+ .action = m->tcf_action, -+ .refcnt = m->tcf_refcnt - ref, -+ .bindcnt = m->tcf_bindcnt - bind, -+ .eaction = m->tcfm_eaction, -+ .ifindex = m->tcfm_ifindex, -+ }; - struct tcf_t t; - -- opt.index = m->tcf_index; -- opt.action = m->tcf_action; -- opt.refcnt = m->tcf_refcnt - ref; -- opt.bindcnt = m->tcf_bindcnt - bind; -- opt.eaction = m->tcfm_eaction; -- opt.ifindex = m->tcfm_ifindex; - NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); - t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); - t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); ---- a/net/sched/act_police.c -+++ b/net/sched/act_police.c -@@ -341,22 +341,19 @@ tcf_act_police_dump(struct sk_buff *skb, - { - unsigned char *b = skb_tail_pointer(skb); - struct tcf_police *police = a->priv; -- struct tc_police opt; -+ struct tc_police opt = { -+ .index = police->tcf_index, -+ .action = police->tcf_action, -+ .mtu = police->tcfp_mtu, -+ .burst = police->tcfp_burst, -+ .refcnt = police->tcf_refcnt - ref, -+ .bindcnt = police->tcf_bindcnt - bind, -+ }; - -- opt.index = police->tcf_index; -- opt.action = police->tcf_action; -- opt.mtu = police->tcfp_mtu; -- opt.burst = police->tcfp_burst; -- opt.refcnt = police->tcf_refcnt - ref; -- opt.bindcnt = police->tcf_bindcnt - bind; - if (police->tcfp_R_tab) - opt.rate = police->tcfp_R_tab->rate; -- else -- memset(&opt.rate, 0, sizeof(opt.rate)); - if (police->tcfp_P_tab) - opt.peakrate = police->tcfp_P_tab->rate; -- else -- memset(&opt.peakrate, 0, sizeof(opt.peakrate)); - NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); - if (police->tcfp_result) - NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); ---- a/net/sched/act_simple.c -+++ b/net/sched/act_simple.c -@@ -164,13 +164,14 @@ static inline int tcf_simp_dump(struct s - { - unsigned char *b = skb_tail_pointer(skb); - struct tcf_defact *d = a->priv; -- struct tc_defact opt; -+ struct tc_defact opt = { -+ .index = d->tcf_index, -+ .refcnt = d->tcf_refcnt - ref, -+ .bindcnt = d->tcf_bindcnt - bind, -+ .action = d->tcf_action, -+ }; - struct tcf_t t; - -- opt.index = d->tcf_index; -- opt.refcnt = d->tcf_refcnt - ref; -- opt.bindcnt = d->tcf_bindcnt - bind; -- opt.action = d->tcf_action; - NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); - NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); - t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); ---- a/net/sched/act_skbedit.c -+++ b/net/sched/act_skbedit.c -@@ -159,13 +159,14 @@ static inline int tcf_skbedit_dump(struc - { - unsigned char *b = skb_tail_pointer(skb); - struct tcf_skbedit *d = a->priv; -- struct tc_skbedit opt; -+ struct tc_skbedit opt = { -+ .index = d->tcf_index, -+ .refcnt = d->tcf_refcnt - ref, -+ .bindcnt = d->tcf_bindcnt - bind, -+ .action = d->tcf_action, -+ }; - struct tcf_t t; - -- opt.index = d->tcf_index; -- opt.refcnt = d->tcf_refcnt - ref; -- opt.bindcnt = d->tcf_bindcnt - bind; -- opt.action = d->tcf_action; - NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); - if (d->flags & SKBEDIT_F_PRIORITY) - NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), diff --git a/patches.fixes/netbk-for-new-udev.patch b/patches.fixes/netbk-for-new-udev.patch deleted file mode 100644 index 6618354..0000000 --- a/patches.fixes/netbk-for-new-udev.patch +++ /dev/null @@ -1,96 +0,0 @@ ---- linux-2.6.34.1/drivers/xen/netback/xenbus.c.orig 2011-03-25 20:09:13.580685804 +0100 -+++ linux-2.6.34.1/drivers/xen/netback/xenbus.c 2011-03-25 20:08:00.215437569 +0100 -@@ -19,6 +19,7 @@ - - #include - #include -+#include - #include - #include "common.h" - -@@ -28,6 +29,7 @@ - printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) - #endif - -+static DECLARE_RWSEM(teardown_sem); - - static int connect_rings(struct backend_info *); - static void connect(struct backend_info *); -@@ -39,13 +41,18 @@ - - netback_remove_accelerators(be, dev); - -- if (be->netif) { -+ if (be->netif) - kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); -+ -+ down_write(&teardown_sem); -+ if (be->netif) { - netif_disconnect(be->netif); - be->netif = NULL; - } -- kfree(be); - dev_set_drvdata(&dev->dev, NULL); -+ up_write(&teardown_sem); -+ kfree(be); -+ - return 0; - } - -@@ -151,8 +158,7 @@ - */ - static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env) - { -- struct backend_info *be = dev_get_drvdata(&xdev->dev); -- netif_t *netif = be->netif; -+ struct backend_info *be; - char *val; - - DPRINTK("netback_uevent"); -@@ -163,12 +169,15 @@ - xenbus_dev_fatal(xdev, err, "reading script"); - return err; - } -- else { -- add_uevent_var(env, "script=%s", val); -- kfree(val); -- } - -- add_uevent_var(env, "vif=%s", netif->dev->name); -+ add_uevent_var(env, "script=%s", val); -+ kfree(val); -+ -+ down_read(&teardown_sem); -+ be = dev_get_drvdata(&xdev->dev); -+ if (be && be->netif) -+ add_uevent_var(env, "vif=%s", be->netif->dev->name); -+ up_read(&teardown_sem); - - return 0; - } -@@ -179,6 +188,7 @@ - int err; - long handle; - struct xenbus_device *dev = be->dev; -+ netif_t *netif; - - if (be->netif != NULL) - return; -@@ -189,13 +199,13 @@ - return; - } - -- be->netif = netif_alloc(&dev->dev, dev->otherend_id, handle); -- if (IS_ERR(be->netif)) { -- err = PTR_ERR(be->netif); -- be->netif = NULL; -+ netif = netif_alloc(&dev->dev, dev->otherend_id, handle); -+ if (IS_ERR(netif)) { -+ err = PTR_ERR(netif); - xenbus_dev_fatal(dev, err, "creating interface"); - return; - } -+ be->netif = netif; - - kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); - } diff --git a/patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack b/patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack new file mode 100644 index 0000000..c3a3776 --- /dev/null +++ b/patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack @@ -0,0 +1,104 @@ +From: Jeff Mahoney +Subject: netfilter: Implement RFC 1123 for FTP conntrack +References: bnc#466279 +Patch-mainline: Submitted via http://bugzilla.netfilter.org/show_bug.cgi?id=574 23 Jan 2011 + + The FTP conntrack code currently only accepts the following format for + the 227 response for PASV: + 227 Entering Passive Mode (148,100,81,40,31,161). + + It doesn't accept the following format from an obscure server: + 227 Data transfer will passively listen to 67,218,99,134,50,144 + + From RFC 1123: + The format of the 227 reply to a PASV command is not + well standardized. In particular, an FTP client cannot + assume that the parentheses shown on page 40 of RFC-959 + will be present (and in fact, Figure 3 on page 43 omits + them). Therefore, a User-FTP program that interprets + the PASV reply must scan the reply for the first digit + of the host and port numbers. + + This patch adds support for the RFC 1123 clarification by: + - Allowing a search filter to specify NUL as the terminator so that + try_number will return successfully if the array of numbers has been + filled when an unexpected character is encountered. + - Using space as the separator for the 227 reply and then scanning for + the first digit of the number sequence. The number sequence is parsed + out using the existing try_rfc959 but with a NUL terminator. + + Tracked in: https://bugzilla.novell.com/show_bug.cgi?id=466279 + +Reported-by: Mark Post +Signed-off-by: Jeff Mahoney +--- + net/netfilter/nf_conntrack_ftp.c | 36 +++++++++++++++++++++++++++++++----- + 1 file changed, 31 insertions(+), 5 deletions(-) + +--- a/net/netfilter/nf_conntrack_ftp.c ++++ b/net/netfilter/nf_conntrack_ftp.c +@@ -53,6 +53,7 @@ unsigned int (*nf_nat_ftp_hook)(struct s + EXPORT_SYMBOL_GPL(nf_nat_ftp_hook); + + static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char); ++static int try_rfc1123(const char *, size_t, struct nf_conntrack_man *, char); + static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char); + static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *, + char); +@@ -87,10 +88,10 @@ static struct ftp_search { + { + .pattern = "227 ", + .plen = sizeof("227 ") - 1, +- .skip = '(', +- .term = ')', ++ .skip = ' ', ++ .term = '\0', + .ftptype = NF_CT_FTP_PASV, +- .getnum = try_rfc959, ++ .getnum = try_rfc1123, + }, + { + .pattern = "229 ", +@@ -129,8 +130,9 @@ static int try_number(const char *data, + i++; + else { + /* Unexpected character; true if it's the +- terminator and we're finished. */ +- if (*data == term && i == array_size - 1) ++ terminator (or we don't care about one) ++ and we're finished. */ ++ if ((*data == term || !term) && i == array_size - 1) + return len; + + pr_debug("Char %u (got %u nums) `%u' unexpected\n", +@@ -160,6 +162,30 @@ static int try_rfc959(const char *data, + return length; + } + ++/* ++ * From RFC 1123: ++ * The format of the 227 reply to a PASV command is not ++ * well standardized. In particular, an FTP client cannot ++ * assume that the parentheses shown on page 40 of RFC-959 ++ * will be present (and in fact, Figure 3 on page 43 omits ++ * them). Therefore, a User-FTP program that interprets ++ * the PASV reply must scan the reply for the first digit ++ * of the host and port numbers. ++ */ ++static int try_rfc1123(const char *data, size_t dlen, ++ struct nf_conntrack_man *cmd, char term) ++{ ++ int i; ++ for (i = 0; i < dlen; i++) ++ if (isdigit(data[i])) ++ break; ++ ++ if (i == dlen) ++ return 0; ++ ++ return try_rfc959(data + i, dlen - i, cmd, 0); ++} ++ + /* Grab port: number up to delimiter */ + static int get_port(const char *data, int start, size_t dlen, char delim, + __be16 *port) diff --git a/patches.fixes/netfilter-remove-pointless-config_nf_ct_acct-warning b/patches.fixes/netfilter-remove-pointless-config_nf_ct_acct-warning deleted file mode 100644 index 1a1e754..0000000 --- a/patches.fixes/netfilter-remove-pointless-config_nf_ct_acct-warning +++ /dev/null @@ -1,120 +0,0 @@ -From: Jeff Mahoney -Subject: netfilter: Remove pointless CONFIG_NF_CT_ACCT warning -References: bnc#552033 (and others) -Patch-mainline: not yet - - CONFIG_NF_CT_ACCT was scheduled at 2.6.27 release-time to be removed - in 2.6.29. That hasn't happened, and it's sort of pointless to remove the - option as it sets the default value for whether it's nf_conntrack_acct is - enabled at boot-time. - - It still issues a really annoying warning though. This patch properly - documents the option as controlling the default and undeprecates it. It - also renames the option to a more subsystem-consistent NF_CONNTRACK_ACCT. - -Signed-off-by: Jeff Mahoney ---- - Documentation/feature-removal-schedule.txt | 9 --------- - Documentation/kernel-parameters.txt | 3 +-- - net/netfilter/Kconfig | 11 +++++------ - net/netfilter/nf_conntrack_acct.c | 8 +------- - net/netfilter/nf_conntrack_netlink.c | 2 -- - 5 files changed, 7 insertions(+), 26 deletions(-) - ---- a/Documentation/feature-removal-schedule.txt -+++ b/Documentation/feature-removal-schedule.txt -@@ -313,15 +313,6 @@ Who: Johannes Berg -- ----------------------------- -- - What: sysfs ui for changing p4-clockmod parameters - When: September 2009 - Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -1567,8 +1567,7 @@ and is between 256 and 4096 characters. - [NETFILTER] Enable connection tracking flow accounting - 0 to disable accounting - 1 to enable accounting -- Default value depends on CONFIG_NF_CT_ACCT that is -- going to be removed in 2.6.29. -+ Default value depends on CONFIG_NF_CT_ACCT. - - nfsaddrs= [NFS] - See Documentation/filesystems/nfs/nfsroot.txt. ---- a/net/netfilter/Kconfig -+++ b/net/netfilter/Kconfig -@@ -40,12 +40,13 @@ config NF_CONNTRACK - - if NF_CONNTRACK - --config NF_CT_ACCT -- bool "Connection tracking flow accounting" -+config NF_CONNTRACK_ACCT -+ bool "Enable connection tracking flow accounting by default" - depends on NETFILTER_ADVANCED - help -- If this option is enabled, the connection tracking code will -- keep per-flow packet and byte counters. -+ -+ This option controls whether per-flow packet and byte counters -+ are enabled by default. - - Those counters can be used for flow-based accounting or the - `connbytes' match. -@@ -57,8 +58,6 @@ config NF_CT_ACCT - You may also disable/enable it on a running system with: - sysctl net.netfilter.nf_conntrack_acct=0/1 - -- This option will be removed in 2.6.29. -- - If unsure, say `N'. - - config NF_CONNTRACK_MARK ---- a/net/netfilter/nf_conntrack_acct.c -+++ b/net/netfilter/nf_conntrack_acct.c -@@ -16,7 +16,7 @@ - #include - #include - --#ifdef CONFIG_NF_CT_ACCT -+#ifdef CONFIG_NF_CONNTRACK_ACCT - #define NF_CT_ACCT_DEFAULT 1 - #else - #define NF_CT_ACCT_DEFAULT 0 -@@ -113,12 +113,6 @@ int nf_conntrack_acct_init(struct net *n - net->ct.sysctl_acct = nf_ct_acct; - - if (net_eq(net, &init_net)) { --#ifdef CONFIG_NF_CT_ACCT -- printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Please use\n"); -- printk(KERN_WARNING "nf_conntrack.acct=1 kernel parameter, acct=1 nf_conntrack module option or\n"); -- printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n"); --#endif -- - ret = nf_ct_extend_register(&acct_extend); - if (ret < 0) { - printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n"); ---- a/net/netfilter/nf_conntrack_netlink.c -+++ b/net/netfilter/nf_conntrack_netlink.c -@@ -435,11 +435,9 @@ ctnetlink_nlmsg_size(const struct nf_con - + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ - + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ - + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ --#ifdef CONFIG_NF_CT_ACCT - + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ - + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ - + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ --#endif - + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ - + nla_total_size(0) /* CTA_PROTOINFO */ - + nla_total_size(0) /* CTA_HELP */ diff --git a/patches.fixes/nfs-adaptive-readdir-plus b/patches.fixes/nfs-adaptive-readdir-plus new file mode 100644 index 0000000..cc8f410 --- /dev/null +++ b/patches.fixes/nfs-adaptive-readdir-plus @@ -0,0 +1,80 @@ +From: NeilBrown +Subject: Make selection of 'readdir-plus' adapt to usage patterns. +Patch-mainline: not yet +References: bnc#678123 + +While the use of READDIRPLUS is significantly more efficient than +READDIR followed by many GETATTR calls, it is still less efficient +than just READDIR if the attributes are not required. + +We can get a hint as to whether the application requires attr information +by looking at whether any ->getattr calls are made between +->readdir calls. +If there are any, then getting the attributes seems to be worth while. + +This patch tracks whether there have been recent getattr calls on +children of a directory and uses that information to selectively +disable READDIRPLUS on that directory. + +The first 'readdir' call is always served using READDIRPLUS. +Subsequent calls only use READDIRPLUS if there was a getattr on a child +in the mean time. + +The locking of ->d_parent access needs to be reviewed. +As the bit is simply a hint, it isn't critical that it is set +on the "correct" parent if a rename is happening, but it is +critical that the 'set' doesn't set a bit in something that +isn't even an inode any more. + +Acked-by: NeilBrown +Signed-off-by: Neil Brown + +--- + fs/nfs/dir.c | 3 +++ + fs/nfs/inode.c | 9 +++++++++ + include/linux/nfs_fs.h | 4 ++++ + 3 files changed, 16 insertions(+) + +--- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/dir.c ++++ linux-2.6.37-openSUSE-11.4/fs/nfs/dir.c +@@ -802,6 +802,9 @@ static int nfs_readdir(struct file *filp + desc->dir_cookie = &nfs_file_open_context(filp)->dir_cookie; + desc->decode = NFS_PROTO(inode)->decode_dirent; + desc->plus = NFS_USE_READDIRPLUS(inode); ++ if (filp->f_pos > 0 && !test_bit(NFS_INO_SEEN_GETATTR, &NFS_I(inode)->flags)) ++ desc->plus = 0; ++ clear_bit(NFS_INO_SEEN_GETATTR, &NFS_I(inode)->flags); + + nfs_block_sillyrename(dentry); + res = nfs_revalidate_mapping(inode, filp->f_mapping); +--- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/inode.c ++++ linux-2.6.37-openSUSE-11.4/fs/nfs/inode.c +@@ -500,6 +500,15 @@ int nfs_getattr(struct vfsmount *mnt, st + struct inode *inode = dentry->d_inode; + int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; + int err; ++ struct dentry *p; ++ struct inode *pi; ++ ++ rcu_read_lock(); ++ p = dentry->d_parent; ++ pi = rcu_dereference(p)->d_inode; ++ if (pi && !test_bit(NFS_INO_SEEN_GETATTR, &NFS_I(pi)->flags)) ++ set_bit(NFS_INO_SEEN_GETATTR, &NFS_I(pi)->flags); ++ rcu_read_unlock(); + + /* Flush out writes to the server in order to update c/mtime. */ + if (S_ISREG(inode->i_mode)) { +--- linux-2.6.37-openSUSE-11.4.orig/include/linux/nfs_fs.h ++++ linux-2.6.37-openSUSE-11.4/include/linux/nfs_fs.h +@@ -220,6 +220,10 @@ struct nfs_inode { + #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ + #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ + #define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ ++#define NFS_INO_SEEN_GETATTR (8) /* flag to track if app is calling ++ * getattr in a directory during ++ * readdir ++ */ + + static inline struct nfs_inode *NFS_I(const struct inode *inode) + { diff --git a/patches.fixes/nfs-slot-table-alloc b/patches.fixes/nfs-slot-table-alloc index 0946ad9..14d3004 100644 --- a/patches.fixes/nfs-slot-table-alloc +++ b/patches.fixes/nfs-slot-table-alloc @@ -18,14 +18,14 @@ Signed-off-by: Neil Brown net/sunrpc/xprtsock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) ---- a/net/sunrpc/xprtsock.c -+++ b/net/sunrpc/xprtsock.c -@@ -2295,7 +2295,7 @@ static struct rpc_xprt *xs_setup_xprt(st - xprt = &new->xprt; +--- a/net/sunrpc/xprt.c ++++ b/net/sunrpc/xprt.c +@@ -2266,7 +2266,7 @@ static struct rpc_xprt *xs_setup_xprt(st + kref_init(&xprt->kref); + + xprt->max_reqs = max_req; +- xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); ++ xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL | __GFP_REPEAT); + if (xprt->slot == NULL) + goto out_free; - xprt->max_reqs = slot_table_size; -- xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); -+ xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL | __GFP_REPEAT); - if (xprt->slot == NULL) { - kfree(xprt); - dprintk("RPC: xs_setup_xprt: couldn't allocate slot " diff --git a/patches.fixes/nfs-write.c-bug-removal.patch b/patches.fixes/nfs-write.c-bug-removal.patch deleted file mode 100644 index d501cfb..0000000 --- a/patches.fixes/nfs-write.c-bug-removal.patch +++ /dev/null @@ -1,170 +0,0 @@ -From: ffilz@us.ibm.com -Subject: Revert "NFS: Allow redirtying of a completed unstable write." -Patch-mainline: REVERT patch from 2.6.27 -References: 442267 - -mainline commit e468bae97d243fe0e1515abaa1f7d0edf1476ad0 -introduces a BUG() that is apprently fairly easy to trigger. -As it is just making a minor performance enhancement, it is best to -revert the patch until the issue is better understood. - -Acked-by: NeilBrown -Signed-off-by: Neil Brown - ---- - fs/nfs/write.c | 65 ++++++++++++++++++++++++++++----------------------------- - 1 file changed, 33 insertions(+), 32 deletions(-) - ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -250,9 +250,12 @@ static int nfs_page_async_flush(struct n - return ret; - spin_lock(&inode->i_lock); - } -- if (test_bit(PG_CLEAN, &req->wb_flags)) { -+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { -+ /* This request is marked for commit */ - spin_unlock(&inode->i_lock); -- BUG(); -+ nfs_clear_page_tag_locked(req); -+ nfs_pageio_complete(pgio); -+ return 0; - } - if (nfs_set_page_writeback(page) != 0) { - spin_unlock(&inode->i_lock); -@@ -411,6 +414,19 @@ nfs_mark_request_dirty(struct nfs_page * - __set_page_dirty_nobuffers(req->wb_page); - } - -+/* -+ * Check if a request is dirty -+ */ -+static inline int -+nfs_dirty_request(struct nfs_page *req) -+{ -+ struct page *page = req->wb_page; -+ -+ if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags)) -+ return 0; -+ return !PageWriteback(page); -+} -+ - #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - /* - * Add a request to the inode's commit list. -@@ -422,7 +438,7 @@ nfs_mark_request_commit(struct nfs_page - struct nfs_inode *nfsi = NFS_I(inode); - - spin_lock(&inode->i_lock); -- set_bit(PG_CLEAN, &(req)->wb_flags); -+ set_bit(PG_NEED_COMMIT, &(req)->wb_flags); - radix_tree_tag_set(&nfsi->nfs_page_tree, - req->wb_index, - NFS_PAGE_TAG_COMMIT); -@@ -432,19 +448,6 @@ nfs_mark_request_commit(struct nfs_page - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); - } - --static int --nfs_clear_request_commit(struct nfs_page *req) --{ -- struct page *page = req->wb_page; -- -- if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { -- dec_zone_page_state(page, NR_UNSTABLE_NFS); -- dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); -- return 1; -- } -- return 0; --} -- - static inline - int nfs_write_need_commit(struct nfs_write_data *data) - { -@@ -454,7 +457,7 @@ int nfs_write_need_commit(struct nfs_wri - static inline - int nfs_reschedule_unstable_write(struct nfs_page *req) - { -- if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) { -+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { - nfs_mark_request_commit(req); - return 1; - } -@@ -470,12 +473,6 @@ nfs_mark_request_commit(struct nfs_page - { - } - --static inline int --nfs_clear_request_commit(struct nfs_page *req) --{ -- return 0; --} -- - static inline - int nfs_write_need_commit(struct nfs_write_data *data) - { -@@ -533,8 +530,11 @@ static void nfs_cancel_commit_list(struc - - while(!list_empty(head)) { - req = nfs_list_entry(head->next); -+ dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); -+ dec_bdi_stat(req->wb_page->mapping->backing_dev_info, -+ BDI_RECLAIMABLE); - nfs_list_remove_request(req); -- nfs_clear_request_commit(req); -+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); - nfs_inode_remove_request(req); - nfs_unlock_request(req); - } -@@ -614,7 +614,8 @@ static struct nfs_page *nfs_try_to_updat - * Note: nfs_flush_incompatible() will already - * have flushed out requests having wrong owners. - */ -- if (offset > rqend -+ if (!nfs_dirty_request(req) -+ || offset > rqend - || end < req->wb_offset) - goto out_flushme; - -@@ -630,10 +631,6 @@ static struct nfs_page *nfs_try_to_updat - spin_lock(&inode->i_lock); - } - -- if (nfs_clear_request_commit(req)) -- radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, -- req->wb_index, NFS_PAGE_TAG_COMMIT); -- - /* Okay, the request matches. Update the region */ - if (offset < req->wb_offset) { - req->wb_offset = offset; -@@ -715,7 +712,8 @@ int nfs_flush_incompatible(struct file * - req = nfs_page_find_request(page); - if (req == NULL) - return 0; -- do_flush = req->wb_page != page || req->wb_context != ctx; -+ do_flush = req->wb_page != page || req->wb_context != ctx -+ || !nfs_dirty_request(req); - nfs_release_request(req); - if (!do_flush) - return 0; -@@ -1341,7 +1339,10 @@ static void nfs_commit_release(void *cal - while (!list_empty(&data->pages)) { - req = nfs_list_entry(data->pages.next); - nfs_list_remove_request(req); -- nfs_clear_request_commit(req); -+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); -+ dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); -+ dec_bdi_stat(req->wb_page->mapping->backing_dev_info, -+ BDI_RECLAIMABLE); - - dprintk("NFS: commit (%s/%lld %d@%lld)", - req->wb_context->path.dentry->d_inode->i_sb->s_id, -@@ -1516,7 +1517,7 @@ int nfs_wb_page_cancel(struct inode *ino - req = nfs_page_find_request(page); - if (req == NULL) - goto out; -- if (test_bit(PG_CLEAN, &req->wb_flags)) { -+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { - nfs_release_request(req); - break; - } diff --git a/patches.fixes/nfsd-05-sunrpc-cache-allow-thread-to-block-while-waiting-for.patch b/patches.fixes/nfsd-05-sunrpc-cache-allow-thread-to-block-while-waiting-for.patch deleted file mode 100644 index 9e052da..0000000 --- a/patches.fixes/nfsd-05-sunrpc-cache-allow-thread-to-block-while-waiting-for.patch +++ /dev/null @@ -1,155 +0,0 @@ -Patch-mainline: submitted 04aug2009 -References: bnc#498708 -From: NeilBrown -Date: Tue, 4 Aug 2009 15:06:38 +1000 -Subject: [PATCH 07/12] sunrpc/cache: allow thread to block while waiting for cache update. - -The current practice of waiting for cache updates by queueing the -whole request to be retried has (at least) two problems. - -1/ We NFSv4, requests can be quite complex and re-trying a whole - request when a later part fails should only be a list-resort, not a - normal practice. - -2/ Large requests, and in particular any 'write' request, will not be - queued by the current code and doing so would be undesirable. - -In many cases only a very sort wait is needed before the cache gets -valid data. - -So, providing the underlying transport permits it by setting - ->thread_wait, -arrange to wait briefly for an upcall to be completed (as reflected in -the clearing of CACHE_PENDING). -If the short wait was not long enough and CACHE_PENDING is still set, -fall back on the old approach. - -The 'thread_wait' value is set to 5 seconds when there are spare -threads, and 1 second when there are no spare threads. - -These values are probably much higher than needed, but will ensure -some forward progress. - -[Fixed 18Jan2010 to return -ve from cache_refer_req waits for the - upcall to complete instead of deferring the request. - Thanks to Dong Yang Li -] - -Signed-off-by: NeilBrown - ---- - include/linux/sunrpc/cache.h | 3 ++ - net/sunrpc/cache.c | 44 ++++++++++++++++++++++++++++++++++++++++++- - net/sunrpc/svc_xprt.c | 11 ++++++++++ - 3 files changed, 57 insertions(+), 1 deletion(-) - ---- a/include/linux/sunrpc/cache.h -+++ b/include/linux/sunrpc/cache.h -@@ -125,6 +125,9 @@ struct cache_detail { - */ - struct cache_req { - struct cache_deferred_req *(*defer)(struct cache_req *req); -+ int thread_wait; /* How long (jiffies) we can block the -+ * current thread to wait for updates. -+ */ - }; - /* this must be embedded in a deferred_request that is being - * delayed awaiting cache-fill ---- a/net/sunrpc/cache.c -+++ b/net/sunrpc/cache.c -@@ -497,10 +497,22 @@ static LIST_HEAD(cache_defer_list); - static struct list_head cache_defer_hash[DFR_HASHSIZE]; - static int cache_defer_cnt; - -+struct thread_deferred_req { -+ struct cache_deferred_req handle; -+ wait_queue_head_t wait; -+}; -+static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) -+{ -+ struct thread_deferred_req *dr = -+ container_of(dreq, struct thread_deferred_req, handle); -+ wake_up(&dr->wait); -+} -+ - static int cache_defer_req(struct cache_req *req, struct cache_head *item) - { - struct cache_deferred_req *dreq, *discard; - int hash = DFR_HASH(item); -+ struct thread_deferred_req sleeper; - - if (cache_defer_cnt >= DFR_MAX) { - /* too much in the cache, randomly drop this one, -@@ -509,7 +521,14 @@ static int cache_defer_req(struct cache_ - if (net_random()&1) - return -ENOMEM; - } -- dreq = req->defer(req); -+ if (req->thread_wait) { -+ dreq = &sleeper.handle; -+ init_waitqueue_head(&sleeper.wait); -+ dreq->revisit = cache_restart_thread; -+ } else -+ dreq = req->defer(req); -+ -+ retry: - if (dreq == NULL) - return -ENOMEM; - -@@ -543,6 +562,29 @@ static int cache_defer_req(struct cache_ - cache_revisit_request(item); - return -EAGAIN; - } -+ -+ if (dreq == &sleeper.handle) { -+ wait_event_interruptible_timeout( -+ sleeper.wait, -+ !test_bit(CACHE_PENDING, &item->flags) -+ || list_empty(&sleeper.handle.hash), -+ req->thread_wait); -+ spin_lock(&cache_defer_lock); -+ if (!list_empty(&sleeper.handle.hash)) { -+ list_del_init(&sleeper.handle.recent); -+ list_del_init(&sleeper.handle.hash); -+ cache_defer_cnt--; -+ } -+ spin_unlock(&cache_defer_lock); -+ if (test_bit(CACHE_PENDING, &item->flags)) { -+ /* item is still pending, try request -+ * deferral -+ */ -+ dreq = req->defer(req); -+ goto retry; -+ } -+ return -EAGAIN; -+ } - return 0; - } - ---- a/net/sunrpc/svc_xprt.c -+++ b/net/sunrpc/svc_xprt.c -@@ -650,6 +650,11 @@ int svc_recv(struct svc_rqst *rqstp, lon - if (signalled() || kthread_should_stop()) - return -EINTR; - -+ /* Normally we will wait up to 5 seconds for any required -+ * cache information to be provided. -+ */ -+ rqstp->rq_chandle.thread_wait = 5*HZ; -+ - spin_lock_bh(&pool->sp_lock); - xprt = svc_xprt_dequeue(pool); - if (xprt) { -@@ -657,6 +662,12 @@ int svc_recv(struct svc_rqst *rqstp, lon - svc_xprt_get(xprt); - rqstp->rq_reserved = serv->sv_max_mesg; - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); -+ -+ /* As there is a shortage of threads and this request -+ * had to be queue, don't allow the thread to wait so -+ * long for cache updates. -+ */ -+ rqstp->rq_chandle.thread_wait = 1*HZ; - } else { - /* No data pending. Go to sleep */ - svc_thread_enqueue(pool, rqstp); diff --git a/patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch b/patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch index 907a3a2..a1442a4 100644 --- a/patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch +++ b/patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch @@ -21,7 +21,7 @@ Signed-off-by: NeilBrown --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c -@@ -787,9 +787,18 @@ exp_find_key(svc_client *clp, int fsid_t +@@ -794,9 +794,18 @@ exp_find_key(svc_client *clp, int fsid_t memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); ek = svc_expkey_lookup(&key); @@ -40,7 +40,7 @@ Signed-off-by: NeilBrown if (err) return ERR_PTR(err); return ek; -@@ -859,9 +868,18 @@ static svc_export *exp_get_by_name(svc_c +@@ -866,9 +875,18 @@ static svc_export *exp_get_by_name(svc_c key.ex_path = *path; exp = svc_export_lookup(&key); @@ -61,7 +61,7 @@ Signed-off-by: NeilBrown return exp; --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c -@@ -662,13 +662,14 @@ static struct unix_gid *unix_gid_lookup( +@@ -663,13 +663,14 @@ static struct unix_gid *unix_gid_lookup( static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp) { @@ -77,7 +77,7 @@ Signed-off-by: NeilBrown ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle); switch (ret) { case -ENOENT: -@@ -677,6 +678,13 @@ static struct group_info *unix_gid_find( +@@ -678,6 +679,13 @@ static struct group_info *unix_gid_find( gi = get_group_info(ug->gi); cache_put(&ug->h, &unix_gid_cache); return gi; @@ -91,7 +91,7 @@ Signed-off-by: NeilBrown default: return ERR_PTR(-EAGAIN); } -@@ -687,7 +695,7 @@ svcauth_unix_set_client(struct svc_rqst +@@ -688,7 +696,7 @@ svcauth_unix_set_client(struct svc_rqst { struct sockaddr_in *sin; struct sockaddr_in6 *sin6, sin6_storage; @@ -100,7 +100,7 @@ Signed-off-by: NeilBrown struct group_info *gi; struct svc_cred *cred = &rqstp->rq_cred; -@@ -713,14 +721,23 @@ svcauth_unix_set_client(struct svc_rqst +@@ -714,14 +722,23 @@ svcauth_unix_set_client(struct svc_rqst ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, &sin6->sin6_addr); diff --git a/patches.fixes/nfsd-07-nfsd-idmap-drop-special-request-deferal-in-favour-of.patch b/patches.fixes/nfsd-07-nfsd-idmap-drop-special-request-deferal-in-favour-of.patch deleted file mode 100644 index d3d1d88..0000000 --- a/patches.fixes/nfsd-07-nfsd-idmap-drop-special-request-deferal-in-favour-of.patch +++ /dev/null @@ -1,141 +0,0 @@ -Patch-mainline: submitted 04aug2009 -References: bnc#498708 -From: NeilBrown -Date: Tue, 4 Aug 2009 15:06:39 +1000 -Subject: [PATCH 09/12] nfsd/idmap: drop special request deferal in favour of improved default. - -The idmap code manages request deferal by waiting for a reply from -userspace rather than putting the NFS request on a queue to be retried -from the start. -Now that the comment deferal code does this there is no need for the -special code in idmap. - -Signed-off-by: NeilBrown - ---- - fs/nfsd/nfs4idmap.c | 105 +++++----------------------------------------------- - 1 file changed, 11 insertions(+), 94 deletions(-) - ---- a/fs/nfsd/nfs4idmap.c -+++ b/fs/nfsd/nfs4idmap.c -@@ -481,109 +481,26 @@ nfsd_idmap_shutdown(void) - cache_unregister(&nametoid_cache); - } - --/* -- * Deferred request handling -- */ -- --struct idmap_defer_req { -- struct cache_req req; -- struct cache_deferred_req deferred_req; -- wait_queue_head_t waitq; -- atomic_t count; --}; -- --static inline void --put_mdr(struct idmap_defer_req *mdr) --{ -- if (atomic_dec_and_test(&mdr->count)) -- kfree(mdr); --} -- --static inline void --get_mdr(struct idmap_defer_req *mdr) --{ -- atomic_inc(&mdr->count); --} -- --static void --idmap_revisit(struct cache_deferred_req *dreq, int toomany) --{ -- struct idmap_defer_req *mdr = -- container_of(dreq, struct idmap_defer_req, deferred_req); -- -- wake_up(&mdr->waitq); -- put_mdr(mdr); --} -- --static struct cache_deferred_req * --idmap_defer(struct cache_req *req) --{ -- struct idmap_defer_req *mdr = -- container_of(req, struct idmap_defer_req, req); -- -- mdr->deferred_req.revisit = idmap_revisit; -- get_mdr(mdr); -- return (&mdr->deferred_req); --} -- --static inline int --do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key, -- struct cache_detail *detail, struct ent **item, -- struct idmap_defer_req *mdr) --{ -- *item = lookup_fn(key); -- if (!*item) -- return -ENOMEM; -- return cache_check(detail, &(*item)->h, &mdr->req); --} -- --static inline int --do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *), -- struct ent *key, struct cache_detail *detail, -- struct ent **item) --{ -- int ret = -ENOMEM; -- -- *item = lookup_fn(key); -- if (!*item) -- goto out_err; -- ret = -ETIMEDOUT; -- if (!test_bit(CACHE_VALID, &(*item)->h.flags) -- || (*item)->h.expiry_time < get_seconds() -- || detail->flush_time > (*item)->h.last_refresh) -- goto out_put; -- ret = -ENOENT; -- if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags)) -- goto out_put; -- return 0; --out_put: -- cache_put(&(*item)->h, detail); --out_err: -- *item = NULL; -- return ret; --} -- - static int - idmap_lookup(struct svc_rqst *rqstp, - struct ent *(*lookup_fn)(struct ent *), struct ent *key, - struct cache_detail *detail, struct ent **item) - { -- struct idmap_defer_req *mdr; - int ret; - -- mdr = kzalloc(sizeof(*mdr), GFP_KERNEL); -- if (!mdr) -+ *item = lookup_fn(key); -+ if (!*item) - return -ENOMEM; -- atomic_set(&mdr->count, 1); -- init_waitqueue_head(&mdr->waitq); -- mdr->req.defer = idmap_defer; -- ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr); -- if (ret == -EAGAIN) { -- wait_event_interruptible_timeout(mdr->waitq, -- test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ); -- ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item); -+ retry: -+ ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle); -+ -+ if (ret == -ETIMEDOUT) { -+ struct ent *prev_item = *item; -+ *item = lookup_fn(key); -+ if (*item != prev_item) -+ goto retry; -+ cache_put(&(*item)->h, detail); - } -- put_mdr(mdr); - return ret; - } - diff --git a/patches.fixes/novfs-LFS-initialization b/patches.fixes/novfs-LFS-initialization index 6e108b7..2945305 100644 --- a/patches.fixes/novfs-LFS-initialization +++ b/patches.fixes/novfs-LFS-initialization @@ -8,11 +8,13 @@ Signed-off-by: Sankar P Signed-off-by: Samrat Kannikar Acked-by: Jan Kara -diff --git a/fs/novfs/inode.c b/fs/novfs/inode.c -index e33a5f8..1c17f7f 100644 +--- + fs/novfs/inode.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + --- a/fs/novfs/inode.c +++ b/fs/novfs/inode.c -@@ -3826,7 +3826,7 @@ int novfs_fill_super(struct super_block *SB, void *Data, int Silent) +@@ -3825,7 +3825,7 @@ int novfs_fill_super(struct super_block SB->s_blocksize = PAGE_CACHE_SIZE; SB->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -21,6 +23,3 @@ index e33a5f8..1c17f7f 100644 SB->s_op = &novfs_ops; SB->s_flags |= (MS_NODIRATIME | MS_NODEV | MS_POSIXACL); SB->s_magic = NOVFS_MAGIC; --- -1.6.4.2 - diff --git a/patches.fixes/novfs-bdi-init.diff b/patches.fixes/novfs-bdi-init.diff new file mode 100644 index 0000000..2c3e274 --- /dev/null +++ b/patches.fixes/novfs-bdi-init.diff @@ -0,0 +1,54 @@ +From: Sankar P +Subject: novfs: backing device info initialization +References: bnc#623472 +Patch-mainline: no + +The patch initializes and destroys the backing device info struct +for the novfs properly. Fixes an OOPS as well. + +Acked-by: Jan Kara +Acked-by: Sankar P +Signed-off-by: Anders Johansson + +--- + fs/novfs/inode.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -3977,6 +3977,17 @@ int __init init_novfs(void) + inHAX = 0; + inHAXTime = get_nanosecond_time(); + ++ retCode = bdi_init(&novfs_backing_dev_info); ++ ++ if(!retCode) ++ retCode = bdi_register(&novfs_backing_dev_info, NULL, "novfs-map"); ++ if (retCode) { ++ bdi_destroy(&novfs_backing_dev_info); ++ goto bdi_fail; ++ } ++ ++ ++ + retCode = novfs_proc_init(); + + novfs_profile_init(); +@@ -3992,6 +4003,8 @@ int __init init_novfs(void) + novfs_scope_exit(); + } + } ++ ++bdi_fail: + return (retCode); + } + +@@ -4007,6 +4020,8 @@ void __exit exit_novfs(void) + kfree(novfs_current_mnt); + novfs_current_mnt = NULL; + } ++ ++ bdi_destroy(&novfs_backing_dev_info); + } + + int novfs_lock_inode_cache(struct inode *i) diff --git a/patches.fixes/novfs-dentry-cache-limit.patch b/patches.fixes/novfs-dentry-cache-limit.patch index 26d49d8..d398dfa 100644 --- a/patches.fixes/novfs-dentry-cache-limit.patch +++ b/patches.fixes/novfs-dentry-cache-limit.patch @@ -9,10 +9,13 @@ number of dir_cache entries, that are maintained by novfs Signed-off-by: Samrat Kannikar Acked-by: Jan Kara -diff --git linux-2.6.32-sle11-sp1/fs/novfs/inode.c linux-2.6.32-sle11-sp1/fs/novfs/inode.c ---- linux-2.6.32-sle11-sp1/fs/novfs/inode.c -+++ linux-2.6.32-sle11-sp1/fs/novfs/inode.c -@@ -4346,8 +4346,6 @@ int novfs_add_inode_entry(struct inode *i, +--- + fs/novfs/inode.c | 15 --------------- + 1 file changed, 15 deletions(-) + +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -4345,8 +4345,6 @@ int novfs_add_inode_entry(struct inode * struct inode_data *id; struct novfs_dir_cache *new; int retVal = -ENOMEM; @@ -21,7 +24,7 @@ diff --git linux-2.6.32-sle11-sp1/fs/novfs/inode.c linux-2.6.32-sle11-sp1/fs/nov //SClark DbgPrint("i: %p", i); -@@ -4383,19 +4381,6 @@ int novfs_add_inode_entry(struct inode *i, +@@ -4382,19 +4380,6 @@ int novfs_add_inode_entry(struct inode * memcpy(new->name, name->name, name->len); new->name[new->nameLen] = '\0'; list_add(&new->list, &id->DirCache); @@ -41,5 +44,3 @@ diff --git linux-2.6.32-sle11-sp1/fs/novfs/inode.c linux-2.6.32-sle11-sp1/fs/nov } } return (retVal); --- -1.6.4.2 diff --git a/patches.fixes/novfs-fix-oops-in-scope-finding b/patches.fixes/novfs-fix-oops-in-scope-finding index a925ba8..2f69bbc 100644 --- a/patches.fixes/novfs-fix-oops-in-scope-finding +++ b/patches.fixes/novfs-fix-oops-in-scope-finding @@ -11,13 +11,11 @@ Signed-off-by: Sankar P Acked-by: Jan Kara --- fs/novfs/scope.c | 7 +++---- - 1 files changed, 3 insertions(+), 4 deletions(-) + 1 file changed, 3 insertions(+), 4 deletions(-) -diff --git a/tmp/linux-2.6.32-sle11sp1/fs/novfs/scope.c b/tmp/linux-2.6.32-sle11sp1/fs/novfs/scope.c -index 6de40a8..5b408f6 100644 --- a/fs/novfs/scope.c +++ b/fs/novfs/scope.c -@@ -170,10 +170,9 @@ static struct novfs_scope_list *Scope_Find_Scope(int Create) +@@ -170,10 +170,9 @@ static struct novfs_scope_list *Scope_Fi kfree(scope); scope = NULL; } @@ -31,6 +29,3 @@ index 6de40a8..5b408f6 100644 } } --- -1.6.4.2 - diff --git a/patches.fixes/novfs-fragment-size-fix.patch b/patches.fixes/novfs-fragment-size-fix.patch new file mode 100644 index 0000000..e17abcb --- /dev/null +++ b/patches.fixes/novfs-fragment-size-fix.patch @@ -0,0 +1,26 @@ +From: Sankar P +Subject: novfs: NCP Fragments can be upto 64k in size. +References: bnc#625965 +Patch-mainline: No + +Increase the Maximum fragment size declaration, so as to +avoid wrong boundary checks. + +Signed-off-by: Sankar P + +diff --git a/fs/novfs/nwcapi.h b/fs/novfs/nwcapi.h +index 4b6fb99..8cd842a 100644 +--- a/fs/novfs/nwcapi.h ++++ b/fs/novfs/nwcapi.h +@@ -301,7 +301,7 @@ N_EXTERN_LIBRARY(NWRCODE) + #define MIN_NUM_REPLIES 1 + #define MAX_NUM_REQUESTS 4096 + #define MIN_NUM_REQUESTS 1 +-#define MAX_FRAG_SIZE 4096 ++#define MAX_FRAG_SIZE 65536 + #define MIN_FRAG_SIZE 1 + #define MAX_INFO_LEN 4096 + #define MAX_DOMAIN_LEN MAX_NETWORK_NAME_LENGTH +-- +1.7.3.2 + diff --git a/patches.fixes/novfs-lindent b/patches.fixes/novfs-lindent new file mode 100644 index 0000000..d72aa4d --- /dev/null +++ b/patches.fixes/novfs-lindent @@ -0,0 +1,8008 @@ +From: Sankar P +Subject: [PATCH] novfs: Lindent novfs sources. +Patch-mainline: never +References: none + +The sources of novfs had numerous coding-style issues. So I ran Lindent on the +sources with just one modification. + +I set the character limit to 132 instead of 80, as it is more readable and +suggested by Linus in a mailing-list. + +Signed-off-by: Sankar P +Acked-by: Jan Kara + +--- + fs/novfs/commands.h | 7 + fs/novfs/daemon.c | 559 +++++++-------------- + fs/novfs/file.c | 478 ++++++------------ + fs/novfs/inode.c | 1355 ++++++++++++++++------------------------------------ + fs/novfs/nwcapi.c | 601 +++++++---------------- + fs/novfs/nwcapi.h | 16 + fs/novfs/nwerror.h | 3 + fs/novfs/proc.c | 18 + fs/novfs/profile.c | 158 +----- + fs/novfs/scope.c | 89 +-- + fs/novfs/vfs.h | 258 +++------ + 11 files changed, 1148 insertions(+), 2394 deletions(-) + +--- a/fs/novfs/commands.h ++++ b/fs/novfs/commands.h +@@ -159,7 +159,6 @@ struct novfs_command_reply_header { + + }; + +- + struct novfs_delete_file_request { + struct novfs_command_request_header Command; + unsigned int isDirectory; +@@ -681,7 +680,6 @@ struct nwd_server_version { + unsigned short int uRevision; + }; + +- + #define MAX_ADDRESS_LENGTH 32 + + struct tagNwdTranAddrEx { +@@ -933,8 +931,7 @@ struct novfs_set_file_lock_reply { + + }; + +- +-struct novfs_scope_list{ ++struct novfs_scope_list { + struct list_head ScopeList; + struct novfs_schandle ScopeId; + struct novfs_schandle SessionId; +@@ -952,4 +949,4 @@ struct novfs_scope_list{ + + #pragma pack(pop) + +-#endif /* __NOVFS_COMMANDS_H */ ++#endif /* __NOVFS_COMMANDS_H */ +--- a/fs/novfs/daemon.c ++++ b/fs/novfs/daemon.c +@@ -89,17 +89,16 @@ struct drive_map { + char name[1]; + }; + +-static void Queue_get(struct daemon_cmd * Que); +-static void Queue_put(struct daemon_cmd * Que); ++static void Queue_get(struct daemon_cmd *Que); ++static void Queue_put(struct daemon_cmd *Que); + static void RemoveDriveMaps(void); +-static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle); +-static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle); ++static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle); ++static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle); + static int set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); + static int unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); + static int NwdGetMountPath(struct novfs_xplat *pdata); + static long local_unlink(const char *pathname); + +- + /*===[ Global variables ]=================================================*/ + static struct daemon_queue Daemon_Queue; + +@@ -130,7 +129,7 @@ void novfs_daemon_queue_exit(void) + /*++======================================================================*/ + static void novfs_daemon_timer(unsigned long data) + { +- struct daemon_cmd *que = (struct daemon_cmd *) data; ++ struct daemon_cmd *que = (struct daemon_cmd *)data; + + if (QUEUE_ACKED != que->status) { + que->status = QUEUE_TIMEOUT; +@@ -140,10 +139,7 @@ static void novfs_daemon_timer(unsigned + + /*++======================================================================*/ + int Queue_Daemon_Command(void *request, +- unsigned long reqlen, +- void *data, +- int dlen, +- void **reply, unsigned long * replen, int interruptible) ++ unsigned long reqlen, void *data, int dlen, void **reply, unsigned long *replen, int interruptible) + { + struct daemon_cmd *que; + int retCode = 0; +@@ -167,15 +163,14 @@ int Queue_Daemon_Command(void *request, + + que->sequence = atomic_inc_return(&Sequence); + +- ((struct novfs_command_request_header *) request)->SequenceNumber = +- que->sequence; ++ ((struct novfs_command_request_header *)request)->SequenceNumber = que->sequence; + + /* + * Setup and start que timer + */ + init_timer(&que->timer); + que->timer.expires = jiffies + (HZ * Daemon_Command_Timeout); +- que->timer.data = (unsigned long) que; ++ que->timer.data = (unsigned long)que; + que->timer.function = novfs_daemon_timer; + add_timer(&que->timer); + +@@ -222,12 +217,9 @@ int Queue_Daemon_Command(void *request, + + if (QUEUE_ACKED == que->status) { + que->status = QUEUE_WAITING; +- mod_timer(&que->timer, +- jiffies + +- (HZ * 2 * Daemon_Command_Timeout)); ++ mod_timer(&que->timer, jiffies + (HZ * 2 * Daemon_Command_Timeout)); + if (interruptible) { +- retCode = +- down_interruptible(&que->semaphore); ++ retCode = down_interruptible(&que->semaphore); + } else { + down(&que->semaphore); + } +@@ -273,13 +265,13 @@ int Queue_Daemon_Command(void *request, + return (retCode); + } + +-static void Queue_get(struct daemon_cmd * Que) ++static void Queue_get(struct daemon_cmd *Que) + { + DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference)); + atomic_inc(&Que->reference); + } + +-static void Queue_put(struct daemon_cmd * Que) ++static void Queue_put(struct daemon_cmd *Que) + { + + DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference)); +@@ -308,14 +300,14 @@ struct daemon_cmd *get_next_queue(int Se + DbgPrint("que=0x%p", Daemon_Queue.list.next); + + spin_lock(&Daemon_Queue.lock); +- que = (struct daemon_cmd *) Daemon_Queue.list.next; ++ que = (struct daemon_cmd *)Daemon_Queue.list.next; + +- while (que && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) ++ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next) + && (que->status != QUEUE_SENDING)) { +- que = (struct daemon_cmd *) que->list.next; ++ que = (struct daemon_cmd *)que->list.next; + } + +- if ((NULL == que) || (que == (struct daemon_cmd *) & Daemon_Queue.list) ++ if ((NULL == que) || (que == (struct daemon_cmd *)&Daemon_Queue.list) + || (que->status != QUEUE_SENDING)) { + que = NULL; + } else if (Set_Queue_Waiting) { +@@ -339,15 +331,15 @@ static struct daemon_cmd *find_queue(uns + DbgPrint("0x%x", sequence); + + spin_lock(&Daemon_Queue.lock); +- que = (struct daemon_cmd *) Daemon_Queue.list.next; ++ que = (struct daemon_cmd *)Daemon_Queue.list.next; + +- while (que && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) ++ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next) + && (que->sequence != sequence)) { +- que = (struct daemon_cmd *) que->list.next; ++ que = (struct daemon_cmd *)que->list.next; + } + + if ((NULL == que) +- || (que == (struct daemon_cmd *) & Daemon_Queue.list.next) ++ || (que == (struct daemon_cmd *)&Daemon_Queue.list.next) + || (que->sequence != sequence)) { + que = NULL; + } +@@ -364,8 +356,7 @@ static struct daemon_cmd *find_queue(uns + + int novfs_daemon_open_control(struct inode *Inode, struct file *File) + { +- DbgPrint("pid=%d Count=%d", current->pid, +- atomic_read(&Daemon_Open_Count)); ++ DbgPrint("pid=%d Count=%d", current->pid, atomic_read(&Daemon_Open_Count)); + atomic_inc(&Daemon_Open_Count); + + return (0); +@@ -375,8 +366,7 @@ int novfs_daemon_close_control(struct in + { + struct daemon_cmd *que; + +- DbgPrint("pid=%d Count=%d", current->pid, +- atomic_read(&Daemon_Open_Count)); ++ DbgPrint("pid=%d Count=%d", current->pid, atomic_read(&Daemon_Open_Count)); + + if (atomic_dec_and_test(&Daemon_Open_Count)) { + /* +@@ -384,15 +374,14 @@ int novfs_daemon_close_control(struct in + */ + + spin_lock(&Daemon_Queue.lock); +- que = (struct daemon_cmd *) Daemon_Queue.list.next; ++ que = (struct daemon_cmd *)Daemon_Queue.list.next; + +- while (que +- && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) ++ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next) + && (que->status != QUEUE_DONE)) { + que->status = QUEUE_TIMEOUT; + up(&que->semaphore); + +- que = (struct daemon_cmd *) que->list.next; ++ que = (struct daemon_cmd *)que->list.next; + } + spin_unlock(&Daemon_Queue.lock); + +@@ -441,44 +430,29 @@ ssize_t novfs_daemon_cmd_send(struct fil + if (DLREAD == dlist->rwflag) { + bcnt = dlist->len; + DbgPrint("page=0x%p " +- "offset=0x%p len=%d", +- i, dlist->page, +- dlist->offset, dlist->len); ++ "offset=0x%p len=%d", i, dlist->page, dlist->offset, dlist->len); + if ((bcnt + retValue) <= len) { + void *km_adr = NULL; + + if (dlist->page) { +- km_adr = +- kmap(dlist-> +- page); ++ km_adr = kmap(dlist->page); + vadr = km_adr; +- vadr += +- (unsigned long) +- dlist-> +- offset; ++ vadr += (unsigned long) ++ dlist->offset; + } else { +- vadr = +- dlist-> +- offset; ++ vadr = dlist->offset; + } + +- ccnt = +- copy_to_user(buf, +- vadr, +- bcnt); ++ ccnt = copy_to_user(buf, vadr, bcnt); + +- DbgPrint("Copy %d from 0x%p to 0x%p.", +- bcnt, vadr, buf); ++ DbgPrint("Copy %d from 0x%p to 0x%p.", bcnt, vadr, buf); + if (bcnt > 0x80) +- novfs_dump(0x80, +- vadr); ++ novfs_dump(0x80, vadr); + else +- novfs_dump(bcnt, +- vadr); ++ novfs_dump(bcnt, vadr); + + if (km_adr) { +- kunmap(dlist-> +- page); ++ kunmap(dlist->page); + } + + retValue += bcnt; +@@ -497,10 +471,8 @@ ssize_t novfs_daemon_cmd_send(struct fil + retValue = -EAGAIN; + break; + } else { +- if ((error = +- down_interruptible(&Daemon_Queue.semaphore))) { +- DbgPrint("after down_interruptible error...%d", +- error); ++ if ((error = down_interruptible(&Daemon_Queue.semaphore))) { ++ DbgPrint("after down_interruptible error...%d", error); + retValue = -EINTR; + break; + } +@@ -515,7 +487,7 @@ ssize_t novfs_daemon_cmd_send(struct fil + return (retValue); + } + +-ssize_t novfs_daemon_recv_reply(struct file *file, const char *buf, size_t nbytes, loff_t * ppos) ++ssize_t novfs_daemon_recv_reply(struct file * file, const char *buf, size_t nbytes, loff_t * ppos) + { + struct daemon_cmd *que; + size_t retValue = 0; +@@ -526,8 +498,7 @@ ssize_t novfs_daemon_recv_reply(struct f + char *vadr; + int i; + +- DbgPrint("buf=0x%p nbytes=%d ppos=%llx", buf, +- nbytes, *ppos); ++ DbgPrint("buf=0x%p nbytes=%d ppos=%llx", buf, nbytes, *ppos); + + /* + * Get sequence number from reply buffer +@@ -557,30 +528,22 @@ ssize_t novfs_daemon_recv_reply(struct f + int thiscopy, left = nbytes; + retValue = 0; + +- DbgPrint("dlist=0x%p count=%d", +- dlist, que->datalen); +- for (i = 0; +- (i < que->datalen) && (retValue < nbytes); +- i++, dlist++) { ++ DbgPrint("dlist=0x%p count=%d", dlist, que->datalen); ++ for (i = 0; (i < que->datalen) && (retValue < nbytes); i++, dlist++) { + __DbgPrint("\n" +- " dlist[%d].page: 0x%p\n" +- " dlist[%d].offset: 0x%p\n" +- " dlist[%d].len: 0x%x\n" +- " dlist[%d].rwflag: 0x%x\n", +- i, dlist->page, i, +- dlist->offset, i, dlist->len, +- i, dlist->rwflag); ++ " dlist[%d].page: 0x%p\n" ++ " dlist[%d].offset: 0x%p\n" ++ " dlist[%d].len: 0x%x\n" ++ " dlist[%d].rwflag: 0x%x\n", ++ i, dlist->page, i, dlist->offset, i, dlist->len, i, dlist->rwflag); + + if (DLWRITE == dlist->rwflag) { + void *km_adr = NULL; + + if (dlist->page) { +- km_adr = +- kmap(dlist->page); ++ km_adr = kmap(dlist->page); + vadr = km_adr; +- vadr += +- (unsigned long) dlist-> +- offset; ++ vadr += (unsigned long)dlist->offset; + } else { + vadr = dlist->offset; + } +@@ -590,9 +553,7 @@ ssize_t novfs_daemon_recv_reply(struct f + thiscopy = left; + dlist->len = left; + } +- cpylen = +- copy_from_user(vadr, buf, +- thiscopy); ++ cpylen = copy_from_user(vadr, buf, thiscopy); + + if (thiscopy > 0x80) + novfs_dump(0x80, vadr); +@@ -617,9 +578,7 @@ ssize_t novfs_daemon_recv_reply(struct f + que->reply = reply; + que->replen = nbytes; + +- retValue -= +- copy_from_user(reply, buf, +- retValue); ++ retValue -= copy_from_user(reply, buf, retValue); + if (retValue > 0x80) + novfs_dump(0x80, reply); + else +@@ -646,7 +605,7 @@ ssize_t novfs_daemon_recv_reply(struct f + } + + int novfs_do_login(struct ncl_string *Server, struct ncl_string *Username, +-struct ncl_string *Password, void **lgnId, struct novfs_schandle *Session) ++ struct ncl_string *Password, void **lgnId, struct novfs_schandle *Session) + { + struct novfs_login_user_request *cmd; + struct novfs_login_user_reply *reply; +@@ -660,31 +619,30 @@ struct ncl_string *Password, void **lgnI + if (!cmd) + return -ENOMEM; + +- data = (unsigned char *) cmd + sizeof(*cmd); ++ data = (unsigned char *)cmd + sizeof(*cmd); + cmd->Command.CommandType = VFS_COMMAND_LOGIN_USER; + cmd->Command.SequenceNumber = 0; + memcpy(&cmd->Command.SessionId, Session, sizeof(*Session)); + + cmd->srvNameType = Server->type; + cmd->serverLength = Server->len; +- cmd->serverOffset = (unsigned long) (data - (unsigned char *) cmd); ++ cmd->serverOffset = (unsigned long)(data - (unsigned char *)cmd); + memcpy(data, Server->buffer, Server->len); + data += Server->len; + + cmd->usrNameType = Username->type; + cmd->userNameLength = Username->len; +- cmd->userNameOffset = (unsigned long) (data - (unsigned char *) cmd); ++ cmd->userNameOffset = (unsigned long)(data - (unsigned char *)cmd); + memcpy(data, Username->buffer, Username->len); + data += Username->len; + + cmd->pwdNameType = Password->type; + cmd->passwordLength = Password->len; +- cmd->passwordOffset = (unsigned long) (data - (unsigned char *) cmd); ++ cmd->passwordOffset = (unsigned long)(data - (unsigned char *)cmd); + memcpy(data, Password->buffer, Password->len); + data += Password->len; + +- retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (reply->Reply.ErrorCode) { + retCode = reply->Reply.ErrorCode; +@@ -720,8 +678,7 @@ int novfs_daemon_logout(struct qstr *Ser + cmd->length = Server->len; + memcpy(cmd->Name, Server->name, Server->len); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (reply->Reply.ErrorCode) { + retCode = -EIO; +@@ -745,18 +702,15 @@ int novfs_daemon_getpwuid(uid_t uid, int + SC_INITIALIZE(cmd.Command.SessionId); + cmd.uid = uid; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (reply->Reply.ErrorCode) { + retCode = -EIO; + } else { + retCode = 0; + memset(uname, 0, unamelen); +- replylen = +- replylen - offsetof(struct +- novfs_getpwuid_reply, UserName); ++ replylen = replylen - offsetof(struct ++ novfs_getpwuid_reply, UserName); + if (replylen) { + if (replylen > unamelen) { + retCode = -EINVAL; +@@ -782,15 +736,13 @@ int novfs_daemon_getversion(char *Buf, i + cmd.Command.SequenceNumber = 0; + SC_INITIALIZE(cmd.Command.SessionId); + +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (reply->Reply.ErrorCode) { + retVal = -EIO; + } else { +- retVal = +- replylen - offsetof(struct +- novfs_get_version_reply, Version); ++ retVal = replylen - offsetof(struct ++ novfs_get_version_reply, Version); + if (retVal < length) { + memcpy(Buf, reply->Version, retVal); + Buf[retVal] = '\0'; +@@ -819,18 +771,19 @@ static int daemon_login(struct novfs_log + server.len = lLogin.Server.length; + server.type = NWC_STRING_TYPE_ASCII; + if (!copy_from_user((void *)server.buffer, lLogin.Server.data, server.len)) { +- username.buffer = kmalloc(lLogin.UserName.length, GFP_KERNEL); ++ username.buffer = kmalloc(lLogin.UserName.length, GFP_KERNEL); + if (username.buffer) { + username.len = lLogin.UserName.length; + username.type = NWC_STRING_TYPE_ASCII; + if (!copy_from_user((void *)username.buffer, lLogin.UserName.data, username.len)) { +- password.buffer = kmalloc(lLogin.Password.length, GFP_KERNEL); +- if (password.buffer) +- { ++ password.buffer = kmalloc(lLogin.Password.length, GFP_KERNEL); ++ if (password.buffer) { + password.len = lLogin.Password.length; + password.type = NWC_STRING_TYPE_ASCII; +- if (!copy_from_user((void *)password.buffer, lLogin.Password.data, password.len)) { +- retCode = novfs_do_login (&server, &username, &password, NULL, Session); ++ if (!copy_from_user ++ ((void *)password.buffer, lLogin.Password.data, password.len)) { ++ retCode = ++ novfs_do_login(&server, &username, &password, NULL, Session); + if (!retCode) { + char *username; + username = novfs_scope_get_username(); +@@ -874,7 +827,7 @@ exit: + return (retCode); + } + +-int novfs_daemon_create_sessionId(struct novfs_schandle * SessionId) ++int novfs_daemon_create_sessionId(struct novfs_schandle *SessionId) + { + struct novfs_create_context_request cmd; + struct novfs_create_context_reply *reply; +@@ -887,12 +840,9 @@ int novfs_daemon_create_sessionId(struct + cmd.Command.SequenceNumber = 0; + SC_INITIALIZE(cmd.Command.SessionId); + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { +- if (!reply->Reply.ErrorCode +- && replylen > sizeof(struct novfs_command_reply_header)) { ++ if (!reply->Reply.ErrorCode && replylen > sizeof(struct novfs_command_reply_header)) { + *SessionId = reply->SessionId; + retCode = 0; + } else { +@@ -913,16 +863,13 @@ int novfs_daemon_destroy_sessionId(struc + unsigned long replylen = 0; + int retCode = 0; + +- DbgPrint("0x%p:%p", SessionId.hTypeId, +- SessionId.hId); ++ DbgPrint("0x%p:%p", SessionId.hTypeId, SessionId.hId); + + cmd.Command.CommandType = VFS_COMMAND_DESTROY_CONTEXT; + cmd.Command.SequenceNumber = 0; + cmd.Command.SessionId = SessionId; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (!reply->Reply.ErrorCode) { + struct drive_map *dm; +@@ -956,24 +903,20 @@ int novfs_daemon_destroy_sessionId(struc + } + + int novfs_daemon_get_userspace(struct novfs_schandle SessionId, uint64_t * TotalSize, +- uint64_t * Free, uint64_t * TotalEnties, +- uint64_t * FreeEnties) ++ uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties) + { + struct novfs_get_user_space cmd; + struct novfs_get_user_space_reply *reply; + unsigned long replylen = 0; + int retCode = 0; + +- DbgPrint("0x%p:%p", SessionId.hTypeId, +- SessionId.hId); ++ DbgPrint("0x%p:%p", SessionId.hTypeId, SessionId.hId); + + cmd.Command.CommandType = VFS_COMMAND_GET_USER_SPACE; + cmd.Command.SequenceNumber = 0; + cmd.Command.SessionId = SessionId; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (!reply->Reply.ErrorCode) { + +@@ -1024,9 +967,7 @@ int novfs_daemon_set_mnt_point(char *Pat + + replylen = 0; + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (!reply->Reply.ErrorCode) { + retCode = 0; +@@ -1070,9 +1011,7 @@ int novfs_daemon_debug_cmd_send(char *Co + + replylen = 0; + +- retCode = +- Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + kfree(reply); + } +@@ -1092,7 +1031,7 @@ int novfs_daemon_ioctl(struct inode *ino + + switch (cmd) { + case IOC_LOGIN: +- retCode = daemon_login((struct novfs_login *) arg, &session_id); ++ retCode = daemon_login((struct novfs_login *)arg, &session_id); + break; + + case IOC_LOGOUT: +@@ -1113,9 +1052,7 @@ int novfs_daemon_ioctl(struct inode *ino + buf = kmalloc(io.length + 1, GFP_KERNEL); + if (buf) { + buf[0] = 0; +- cpylen = +- copy_from_user(buf, io.data, +- io.length); ++ cpylen = copy_from_user(buf, io.data, io.length); + buf[io.length] = '\0'; + DbgPrint("%s", buf); + kfree(buf); +@@ -1129,8 +1066,7 @@ int novfs_daemon_ioctl(struct inode *ino + { + struct novfs_xplat data; + +- cpylen = +- copy_from_user(&data, (void *)arg, sizeof(data)); ++ cpylen = copy_from_user(&data, (void *)arg, sizeof(data)); + retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000); + + switch (data.xfunction) { +@@ -1148,18 +1084,16 @@ int novfs_daemon_ioctl(struct inode *ino + return (retCode); + } + +-static int daemon_added_resource(struct daemon_handle * DHandle, int Type, void *CHandle, +- unsigned char * FHandle, unsigned long Mode, u_long Size) ++static int daemon_added_resource(struct daemon_handle *DHandle, int Type, void *CHandle, ++ unsigned char *FHandle, unsigned long Mode, u_long Size) + { + struct daemon_resource *resource; + + if (FHandle) + DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x " +- "Mode=0x%x Size=%d", DHandle, Type, CHandle, +- *(u32 *) & FHandle[2], Mode, Size); ++ "Mode=0x%x Size=%d", DHandle, Type, CHandle, *(u32 *) & FHandle[2], Mode, Size); + else +- DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p\n", +- DHandle, Type, CHandle); ++ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p\n", DHandle, Type, CHandle); + + resource = kmalloc(sizeof(struct daemon_resource), GFP_KERNEL); + if (!resource) +@@ -1168,8 +1102,7 @@ static int daemon_added_resource(struct + resource->type = Type; + resource->connection = CHandle; + if (FHandle) +- memcpy(resource->handle, FHandle, +- sizeof(resource->handle)); ++ memcpy(resource->handle, FHandle, sizeof(resource->handle)); + else + memset(resource->handle, 0, sizeof(resource->handle)); + resource->mode = Mode; +@@ -1181,23 +1114,20 @@ static int daemon_added_resource(struct + return 0; + } + +-static int daemon_remove_resource(struct daemon_handle * DHandle, int Type, void *CHandle, +- unsigned long FHandle) ++static int daemon_remove_resource(struct daemon_handle *DHandle, int Type, void *CHandle, unsigned long FHandle) + { + struct daemon_resource *resource; + struct list_head *l; + int retVal = -ENOMEM; + +- DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x", +- DHandle, Type, CHandle, FHandle); ++ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x", DHandle, Type, CHandle, FHandle); + + write_lock(&DHandle->lock); + + list_for_each(l, &DHandle->list) { + resource = list_entry(l, struct daemon_resource, list); + +- if ((Type == resource->type) && +- (resource->connection == CHandle)) { ++ if ((Type == resource->type) && (resource->connection == CHandle)) { + DbgPrint("Found resource=0x%p", resource); + l = l->prev; + list_del(&resource->list); +@@ -1240,34 +1170,26 @@ int novfs_daemon_lib_close(struct inode + + DbgPrint("inode=0x%p file=0x%p", inode, file); + if (file->private_data) { +- dh = (struct daemon_handle *) file->private_data; ++ dh = (struct daemon_handle *)file->private_data; + + list_for_each(l, &dh->list) { + resource = list_entry(l, struct daemon_resource, list); + + if (DH_TYPE_STREAM == resource->type) { +- novfs_close_stream(resource->connection, +- resource->handle, +- dh->session); ++ novfs_close_stream(resource->connection, resource->handle, dh->session); + } else if (DH_TYPE_CONNECTION == resource->type) { +- cmd = (struct novfs_xplat_call_request *) commanddata; +- cmdlen = +- offsetof(struct novfs_xplat_call_request, +- data) + sizeof(struct nwd_close_conn); +- cmd->Command.CommandType = +- VFS_COMMAND_XPLAT_CALL; ++ cmd = (struct novfs_xplat_call_request *)commanddata; ++ cmdlen = offsetof(struct novfs_xplat_call_request, data) + sizeof(struct nwd_close_conn); ++ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; + cmd->Command.SessionId = dh->session; + cmd->NwcCommand = NWC_CLOSE_CONN; + + cmd->dataLen = sizeof(struct nwd_close_conn); +- nwdClose = (struct nwd_close_conn *) cmd->data; +- nwdClose->ConnHandle = +- (void *) resource->connection; +- +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, +- 0, (void **)&reply, +- &replylen, 0); ++ nwdClose = (struct nwd_close_conn *)cmd->data; ++ nwdClose->ConnHandle = (void *)resource->connection; ++ ++ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); + if (reply) + kfree(reply); + } +@@ -1282,8 +1204,7 @@ int novfs_daemon_lib_close(struct inode + return (0); + } + +-ssize_t novfs_daemon_lib_read(struct file * file, char *buf, size_t len, +- loff_t * off) ++ssize_t novfs_daemon_lib_read(struct file * file, char *buf, size_t len, loff_t * off) + { + struct daemon_handle *dh; + struct daemon_resource *resource; +@@ -1297,16 +1218,13 @@ ssize_t novfs_daemon_lib_read(struct fil + dh = file->private_data; + read_lock(&dh->lock); + if (&dh->list != dh->list.next) { +- resource = +- list_entry(dh->list.next, struct daemon_resource, list); ++ resource = list_entry(dh->list.next, struct daemon_resource, list); + + if (DH_TYPE_STREAM == resource->type) { + while (len > 0 && (offset < resource->size)) { + thisread = len; + if (novfs_read_stream +- (resource->connection, +- resource->handle, buf, &thisread, +- &offset, 1, dh->session) ++ (resource->connection, resource->handle, buf, &thisread, &offset, 1, dh->session) + || !thisread) { + break; + } +@@ -1324,8 +1242,7 @@ ssize_t novfs_daemon_lib_read(struct fil + return (totalread); + } + +-ssize_t novfs_daemon_lib_write(struct file * file, const char *buf, size_t len, +- loff_t * off) ++ssize_t novfs_daemon_lib_write(struct file * file, const char *buf, size_t len, loff_t * off) + { + struct daemon_handle *dh; + struct daemon_resource *resource; +@@ -1340,21 +1257,15 @@ ssize_t novfs_daemon_lib_write(struct fi + dh = file->private_data; + write_lock(&dh->lock); + if (&dh->list != dh->list.next) { +- resource = +- list_entry(dh->list.next, struct daemon_resource, list); ++ resource = list_entry(dh->list.next, struct daemon_resource, list); + + if ((DH_TYPE_STREAM == resource->type) && (len >= 0)) { + totalwrite = 0; + do { + thiswrite = len; + status = +- novfs_write_stream(resource-> +- connection, +- resource->handle, +- (void *)buf, +- &thiswrite, +- &offset, +- dh->session); ++ novfs_write_stream(resource->connection, ++ resource->handle, (void *)buf, &thiswrite, &offset, dh->session); + if (status || !thiswrite) { + /* + * If len is zero then the file will have just been +@@ -1397,8 +1308,7 @@ loff_t novfs_daemon_lib_llseek(struct fi + dh = file->private_data; + read_lock(&dh->lock); + if (&dh->list != dh->list.next) { +- resource = +- list_entry(dh->list.next, struct daemon_resource, list); ++ resource = list_entry(dh->list.next, struct daemon_resource, list); + + if (DH_TYPE_STREAM == resource->type) { + switch (origin) { +@@ -1457,20 +1367,14 @@ int novfs_daemon_lib_ioctl(struct inode + } io; + char *buf; + io.length = 0; +- cpylen = +- copy_from_user(&io, (void *)arg, +- sizeof(io)); ++ cpylen = copy_from_user(&io, (void *)arg, sizeof(io)); + if (io.length <= 0 || io.length > 1024) + return -EINVAL; + if (io.length) { +- buf = +- kmalloc(io.length + 1, +- GFP_KERNEL); ++ buf = kmalloc(io.length + 1, GFP_KERNEL); + if (buf) { + buf[0] = 0; +- cpylen = +- copy_from_user(buf, io.data, +- io.length); ++ cpylen = copy_from_user(buf, io.data, io.length); + buf[io.length] = '\0'; + __DbgPrint("%s", buf); + kfree(buf); +@@ -1484,243 +1388,177 @@ int novfs_daemon_lib_ioctl(struct inode + { + struct novfs_xplat data; + +- cpylen = +- copy_from_user(&data, (void *)arg, +- sizeof(data)); ++ cpylen = copy_from_user(&data, (void *)arg, sizeof(data)); + retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000); + + switch (data.xfunction) { + case NWC_OPEN_CONN_BY_NAME: + DbgIocCall("NwOpenConnByName"); +- retCode = +- novfs_open_conn_by_name(&data, +- &handle, dh->session); ++ retCode = novfs_open_conn_by_name(&data, &handle, dh->session); + if (!retCode) +- daemon_added_resource(dh, +- DH_TYPE_CONNECTION,handle, 0, 0, 0); ++ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0); + break; + + case NWC_OPEN_CONN_BY_ADDRESS: + DbgIocCall("NwOpenConnByAddress"); +- retCode = +- novfs_open_conn_by_addr(&data, &handle, +- dh->session); ++ retCode = novfs_open_conn_by_addr(&data, &handle, dh->session); + if (!retCode) +- daemon_added_resource(dh, +- DH_TYPE_CONNECTION, +- handle, 0, +- 0, 0); ++ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0); + break; + + case NWC_OPEN_CONN_BY_REFERENCE: + + DbgIocCall("NwOpenConnByReference"); +- retCode = +- novfs_open_conn_by_ref(&data, &handle, +- dh->session); ++ retCode = novfs_open_conn_by_ref(&data, &handle, dh->session); + if (!retCode) +- daemon_added_resource(dh, +- DH_TYPE_CONNECTION, +- handle, 0, +- 0, 0); ++ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0); + break; + + case NWC_SYS_CLOSE_CONN: + DbgIocCall("NwSysCloseConn"); +- retCode = +- novfs_sys_conn_close(&data, (unsigned long *)&handle, dh->session); ++ retCode = novfs_sys_conn_close(&data, (unsigned long *)&handle, dh->session); + daemon_remove_resource(dh, DH_TYPE_CONNECTION, handle, 0); + break; + + case NWC_CLOSE_CONN: + DbgIocCall("NwCloseConn"); +- retCode = +- novfs_conn_close(&data, &handle, +- dh->session); +- daemon_remove_resource(dh, +- DH_TYPE_CONNECTION, +- handle, 0); ++ retCode = novfs_conn_close(&data, &handle, dh->session); ++ daemon_remove_resource(dh, DH_TYPE_CONNECTION, handle, 0); + break; + + case NWC_LOGIN_IDENTITY: +- DbgIocCall("" +- "NwLoginIdentity"); +- retCode = +- novfs_login_id(&data, dh->session); ++ DbgIocCall("" "NwLoginIdentity"); ++ retCode = novfs_login_id(&data, dh->session); + break; + + case NWC_RAW_NCP_REQUEST: +- DbgIocCall("[VFS XPLAT] Send Raw " +- "NCP Request"); ++ DbgIocCall("[VFS XPLAT] Send Raw " "NCP Request"); + retCode = novfs_raw_send(&data, dh->session); + break; + + case NWC_AUTHENTICATE_CONN_WITH_ID: +- DbgIocCall("[VFS XPLAT] Authenticate " +- "Conn With ID"); +- retCode = +- novfs_auth_conn(&data, +- dh->session); ++ DbgIocCall("[VFS XPLAT] Authenticate " "Conn With ID"); ++ retCode = novfs_auth_conn(&data, dh->session); + break; + + case NWC_UNAUTHENTICATE_CONN: +- DbgIocCall("[VFS XPLAT] UnAuthenticate " +- "Conn With ID"); +- retCode = +- novfs_unauthenticate(&data, +- dh->session); ++ DbgIocCall("[VFS XPLAT] UnAuthenticate " "Conn With ID"); ++ retCode = novfs_unauthenticate(&data, dh->session); + break; + + case NWC_LICENSE_CONN: + DbgIocCall("Call NwLicenseConn"); +- retCode = +- novfs_license_conn(&data, dh->session); ++ retCode = novfs_license_conn(&data, dh->session); + break; + + case NWC_LOGOUT_IDENTITY: + DbgIocCall("NwLogoutIdentity"); +- retCode = +- novfs_logout_id(&data, +- dh->session); ++ retCode = novfs_logout_id(&data, dh->session); + break; + + case NWC_UNLICENSE_CONN: + DbgIocCall("NwUnlicense"); +- retCode = +- novfs_unlicense_conn(&data, dh->session); ++ retCode = novfs_unlicense_conn(&data, dh->session); + break; + + case NWC_GET_CONN_INFO: + DbgIocCall("NwGetConnInfo"); +- retCode = +- novfs_get_conn_info(&data, dh->session); ++ retCode = novfs_get_conn_info(&data, dh->session); + break; + + case NWC_SET_CONN_INFO: + DbgIocCall("NwSetConnInfo"); +- retCode = +- novfs_set_conn_info(&data, dh->session); ++ retCode = novfs_set_conn_info(&data, dh->session); + break; + + case NWC_SCAN_CONN_INFO: + DbgIocCall("NwScanConnInfo"); +- retCode = +- novfs_scan_conn_info(&data, dh->session); ++ retCode = novfs_scan_conn_info(&data, dh->session); + break; + + case NWC_GET_IDENTITY_INFO: + DbgIocCall("NwGetIdentityInfo"); +- retCode = +- novfs_get_id_info(&data, +- dh->session); ++ retCode = novfs_get_id_info(&data, dh->session); + break; + + case NWC_GET_REQUESTER_VERSION: + DbgIocCall("NwGetDaemonVersion"); +- retCode = +- novfs_get_daemon_ver(&data, +- dh->session); ++ retCode = novfs_get_daemon_ver(&data, dh->session); + break; + + case NWC_GET_PREFERRED_DS_TREE: + DbgIocCall("NwcGetPreferredDsTree"); +- retCode = +- novfs_get_preferred_DS_tree(&data, +- dh->session); ++ retCode = novfs_get_preferred_DS_tree(&data, dh->session); + break; + + case NWC_SET_PREFERRED_DS_TREE: + DbgIocCall("NwcSetPreferredDsTree"); +- retCode = +- novfs_set_preferred_DS_tree(&data, +- dh->session); ++ retCode = novfs_set_preferred_DS_tree(&data, dh->session); + break; + + case NWC_GET_DEFAULT_NAME_CONTEXT: + DbgIocCall("NwcGetDefaultNameContext"); +- retCode = +- novfs_get_default_ctx(&data, +- dh->session); ++ retCode = novfs_get_default_ctx(&data, dh->session); + break; + + case NWC_SET_DEFAULT_NAME_CONTEXT: + DbgIocCall("NwcSetDefaultNameContext"); +- retCode = +- novfs_set_default_ctx(&data, +- dh->session); ++ retCode = novfs_set_default_ctx(&data, dh->session); + break; + + case NWC_QUERY_FEATURE: + DbgIocCall("NwQueryFeature"); +- retCode = +- novfs_query_feature(&data, dh->session); ++ retCode = novfs_query_feature(&data, dh->session); + break; + + case NWC_GET_TREE_MONITORED_CONN_REF: + DbgIocCall("NwcGetTreeMonitoredConn"); +- retCode = +- novfs_get_tree_monitored_conn(&data, +- dh-> +- session); ++ retCode = novfs_get_tree_monitored_conn(&data, dh->session); + break; + + case NWC_ENUMERATE_IDENTITIES: + DbgIocCall("NwcEnumerateIdentities"); +- retCode = +- novfs_enum_ids(&data, +- dh->session); ++ retCode = novfs_enum_ids(&data, dh->session); + break; + + case NWC_CHANGE_KEY: + DbgIocCall("NwcChangeAuthKey"); +- retCode = +- novfs_change_auth_key(&data, +- dh->session); ++ retCode = novfs_change_auth_key(&data, dh->session); + break; + + case NWC_CONVERT_LOCAL_HANDLE: + DbgIocCall("NwdConvertLocalHandle"); +- retCode = +- NwdConvertLocalHandle(&data, dh); ++ retCode = NwdConvertLocalHandle(&data, dh); + break; + + case NWC_CONVERT_NETWARE_HANDLE: + DbgIocCall("NwdConvertNetwareHandle"); +- retCode = +- NwdConvertNetwareHandle(&data, dh); ++ retCode = NwdConvertNetwareHandle(&data, dh); + break; + + case NWC_SET_PRIMARY_CONN: + DbgIocCall("NwcSetPrimaryConn"); +- retCode = +- novfs_set_pri_conn(&data, +- dh->session); ++ retCode = novfs_set_pri_conn(&data, dh->session); + break; + + case NWC_GET_PRIMARY_CONN: + DbgIocCall("NwcGetPrimaryConn"); +- retCode = +- novfs_get_pri_conn(&data, +- dh->session); ++ retCode = novfs_get_pri_conn(&data, dh->session); + break; + + case NWC_MAP_DRIVE: + DbgIocCall("NwcMapDrive"); +- retCode = +- set_map_drive(&data, dh->session); ++ retCode = set_map_drive(&data, dh->session); + break; + + case NWC_UNMAP_DRIVE: + DbgIocCall("NwcUnMapDrive"); +- retCode = +- unmap_drive(&data, dh->session); ++ retCode = unmap_drive(&data, dh->session); + break; + + case NWC_ENUMERATE_DRIVES: + DbgIocCall("NwcEnumerateDrives"); +- retCode = +- novfs_enum_drives(&data, +- dh->session); ++ retCode = novfs_enum_drives(&data, dh->session); + break; + + case NWC_GET_MOUNT_PATH: +@@ -1730,22 +1568,17 @@ int novfs_daemon_lib_ioctl(struct inode + + case NWC_GET_BROADCAST_MESSAGE: + DbgIocCall("NwdGetBroadcastMessage"); +- retCode = +- novfs_get_bcast_msg(&data, +- dh->session); ++ retCode = novfs_get_bcast_msg(&data, dh->session); + break; + + case NWC_SET_KEY: + DbgIocCall("NwdSetKey"); +- retCode = +- novfs_set_key_value(&data, dh->session); ++ retCode = novfs_set_key_value(&data, dh->session); + break; + + case NWC_VERIFY_KEY: + DbgIocCall("NwdVerifyKey"); +- retCode = +- novfs_verify_key_value(&data, +- dh->session); ++ retCode = novfs_verify_key_value(&data, dh->session); + break; + + case NWC_RAW_NCP_REQUEST_ALL: +@@ -1757,8 +1590,7 @@ int novfs_daemon_lib_ioctl(struct inode + + } + +- DbgPrint("[NOVFS XPLAT] status Code = %X\n", +- retCode); ++ DbgPrint("[NOVFS XPLAT] status Code = %X\n", retCode); + break; + } + } +@@ -1767,8 +1599,7 @@ int novfs_daemon_lib_ioctl(struct inode + return (retCode); + } + +-unsigned int novfs_daemon_poll(struct file *file, +- struct poll_table_struct *poll_table) ++unsigned int novfs_daemon_poll(struct file *file, struct poll_table_struct *poll_table) + { + struct daemon_cmd *que; + unsigned int mask = POLLOUT | POLLWRNORM; +@@ -1779,7 +1610,7 @@ unsigned int novfs_daemon_poll(struct fi + return mask; + } + +-static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle) ++static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle) + { + int retVal; + struct nwc_convert_netware_handle nh; +@@ -1787,20 +1618,16 @@ static int NwdConvertNetwareHandle(struc + + DbgPrint("DHandle=0x%p", DHandle); + +- cpylen = +- copy_from_user(&nh, pdata->reqData, +- sizeof(struct nwc_convert_netware_handle)); ++ cpylen = copy_from_user(&nh, pdata->reqData, sizeof(struct nwc_convert_netware_handle)); + + retVal = + daemon_added_resource(DHandle, DH_TYPE_STREAM, +- Uint32toHandle(nh.ConnHandle), +- nh.NetWareHandle, nh.uAccessMode, +- nh.uFileSize); ++ Uint32toHandle(nh.ConnHandle), nh.NetWareHandle, nh.uAccessMode, nh.uFileSize); + + return (retVal); + } + +-static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle) ++static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle) + { + int retVal = NWE_REQUESTER_FAILURE; + struct daemon_resource *resource; +@@ -1816,14 +1643,12 @@ static int NwdConvertLocalHandle(struct + resource = list_entry(l, struct daemon_resource, list); + + if (DH_TYPE_STREAM == resource->type) { +- lh.uConnReference = +- HandletoUint32(resource->connection); ++ lh.uConnReference = HandletoUint32(resource->connection); + + //sgled memcpy(lh.NwWareHandle, resource->handle, sizeof(resource->handle)); + memcpy(lh.NetWareHandle, resource->handle, sizeof(resource->handle)); //sgled + if (pdata->repLen >= sizeof(struct nwc_convert_local_handle)) { +- cpylen = copy_to_user(pdata->repData, &lh, +- sizeof(struct nwc_convert_local_handle)); ++ cpylen = copy_to_user(pdata->repData, &lh, sizeof(struct nwc_convert_local_handle)); + retVal = 0; + } else { + retVal = NWE_BUFFER_OVERFLOW; +@@ -1855,9 +1680,7 @@ static int NwdGetMountPath(struct novfs_ + retVal = NWE_BUFFER_OVERFLOW; + } else { + if (mp.pMountPath) { +- cpylen = +- copy_to_user(mp.pMountPath, +- novfs_current_mnt, len); ++ cpylen = copy_to_user(mp.pMountPath, novfs_current_mnt, len); + } + retVal = 0; + } +@@ -1888,8 +1711,7 @@ static int set_map_drive(struct novfs_xp + return -EFAULT; + if (symInfo.linkOffsetLength > MAX_NAME_LEN) + return -EINVAL; +- drivemap = kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, +- GFP_KERNEL); ++ drivemap = kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, GFP_KERNEL); + if (!drivemap) + return -ENOMEM; + +@@ -1898,22 +1720,19 @@ static int set_map_drive(struct novfs_xp + cpylen = copy_from_user(drivemap->name, path, symInfo.linkOffsetLength); + + drivemap->session = Session; +- drivemap->hash = full_name_hash(drivemap->name, +- symInfo.linkOffsetLength - 1); ++ drivemap->hash = full_name_hash(drivemap->name, symInfo.linkOffsetLength - 1); + drivemap->namelen = symInfo.linkOffsetLength - 1; + DbgPrint("hash=0x%lx path=%s", drivemap->hash, drivemap->name); + +- dm = (struct drive_map *) & DriveMapList.next; ++ dm = (struct drive_map *)&DriveMapList.next; + + down(&DriveMapLock); + + list_for_each(list, &DriveMapList) { + dm = list_entry(list, struct drive_map, list); + __DbgPrint("%s: dm=0x%p\n" +- " hash: 0x%lx\n" +- " namelen: %d\n" +- " name: %s\n", __func__, +- dm, dm->hash, dm->namelen, dm->name); ++ " hash: 0x%lx\n" ++ " namelen: %d\n" " name: %s\n", __func__, dm, dm->hash, dm->namelen, dm->name); + + if (drivemap->hash == dm->hash) { + if (0 == strcmp(dm->name, drivemap->name)) { +@@ -1926,15 +1745,12 @@ static int set_map_drive(struct novfs_xp + } + + if (dm) { +- if ((dm == (struct drive_map *) & DriveMapList) || +- (dm->hash < drivemap->hash)) { ++ if ((dm == (struct drive_map *)&DriveMapList) || (dm->hash < drivemap->hash)) { + list_add(&drivemap->list, &dm->list); + } else { +- list_add_tail(&drivemap->list, +- &dm->list); ++ list_add_tail(&drivemap->list, &dm->list); + } +- } +- else ++ } else + kfree(drivemap); + up(&DriveMapLock); + return (retVal); +@@ -1949,7 +1765,6 @@ static int unmap_drive(struct novfs_xpla + struct list_head *list; + unsigned long hash; + +- + retVal = novfs_unmap_drive(pdata, Session); + if (retVal) + return retVal; +@@ -1960,7 +1775,7 @@ static int unmap_drive(struct novfs_xpla + path = kmalloc(symInfo.linkLen, GFP_KERNEL); + if (!path) + return -ENOMEM; +- if (copy_from_user(path,((struct nwc_unmap_drive_ex *) pdata->reqData)->linkData, symInfo.linkLen)) { ++ if (copy_from_user(path, ((struct nwc_unmap_drive_ex *)pdata->reqData)->linkData, symInfo.linkLen)) { + kfree(path); + return -EFAULT; + } +@@ -1975,9 +1790,7 @@ static int unmap_drive(struct novfs_xpla + list_for_each(list, &DriveMapList) { + dm = list_entry(list, struct drive_map, list); + __DbgPrint("%s: dm=0x%p %s\n" +- " hash: 0x%x\n" +- " namelen: %d\n", __func__, +- dm, dm->name, dm->hash, dm->namelen); ++ " hash: 0x%x\n" " namelen: %d\n", __func__, dm, dm->name, dm->hash, dm->namelen); + + if (hash == dm->hash) { + if (0 == strcmp(dm->name, path)) { +@@ -1991,9 +1804,7 @@ static int unmap_drive(struct novfs_xpla + + if (dm) { + __DbgPrint("%s: Remove dm=0x%p %s\n" +- " hash: 0x%x\n" +- " namelen: %d\n", __func__, +- dm, dm->name, dm->hash, dm->namelen); ++ " hash: 0x%x\n" " namelen: %d\n", __func__, dm, dm->name, dm->hash, dm->namelen); + list_del(&dm->list); + kfree(dm); + } +@@ -2012,10 +1823,8 @@ static void RemoveDriveMaps(void) + dm = list_entry(list, struct drive_map, list); + + __DbgPrint("%s: dm=0x%p\n" +- " hash: 0x%x\n" +- " namelen: %d\n" +- " name: %s\n", __func__, +- dm, dm->hash, dm->namelen, dm->name); ++ " hash: 0x%x\n" ++ " namelen: %d\n" " name: %s\n", __func__, dm, dm->hash, dm->namelen, dm->name); + local_unlink(dm->name); + list = list->prev; + list_del(&dm->list); +@@ -2044,10 +1853,10 @@ static long local_unlink(const char *pat + goto exit1; + mutex_lock(&nd.path.dentry->d_inode->i_mutex); + /* Get the filename of pathname */ +- name=c=(char *)pathname; +- while (*c!='\0') { +- if (*c=='/') +- name=++c; ++ name = c = (char *)pathname; ++ while (*c != '\0') { ++ if (*c == '/') ++ name = ++c; + c++; + } + dentry = lookup_one_len(name, nd.path.dentry, strlen(name)); +@@ -2057,7 +1866,7 @@ static long local_unlink(const char *pat + DbgPrint("dentry %p", dentry); + if (!(dentry->d_inode->i_mode & S_IFLNK)) { + DbgPrint("%s not a link", name); +- error=-ENOENT; ++ error = -ENOENT; + goto exit1; + } + /* Why not before? Because we want correct error value */ +@@ -2072,7 +1881,7 @@ static long local_unlink(const char *pat + goto exit2; + error = vfs_unlink(nd.path.dentry->d_inode, dentry); + mnt_drop_write(nd.path.mnt); +- exit2: ++exit2: + dput(dentry); + } + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); +@@ -2084,8 +1893,6 @@ exit1: + return error; + + slashes: +- error = !dentry->d_inode ? -ENOENT : +- S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; ++ error = !dentry->d_inode ? -ENOENT : S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; + goto exit2; + } +- +--- a/fs/novfs/file.c ++++ b/fs/novfs/file.c +@@ -27,11 +27,11 @@ + #include "commands.h" + #include "nwerror.h" + +-static ssize_t novfs_tree_read(struct file * file, char *buf, size_t len, loff_t * off); ++static ssize_t novfs_tree_read(struct file *file, char *buf, size_t len, loff_t * off); + extern struct dentry_operations novfs_dentry_operations; + + static struct file_operations novfs_tree_operations = { +- read:novfs_tree_read, ++read: novfs_tree_read, + }; + + /* +@@ -44,7 +44,7 @@ static int StripTrailingDots = 1; + int novfs_get_alltrees(struct dentry *parent) + { + unsigned char *p; +- struct novfs_command_reply_header * reply = NULL; ++ struct novfs_command_reply_header *reply = NULL; + unsigned long replylen = 0; + struct novfs_command_request_header cmd; + int retCode; +@@ -63,8 +63,7 @@ int novfs_get_alltrees(struct dentry *pa + DbgPrint("reply=0x%p replylen=%d", reply, replylen); + if (reply) { + novfs_dump(replylen, reply); +- if (!reply->ErrorCode +- && (replylen > sizeof(struct novfs_command_reply_header))) { ++ if (!reply->ErrorCode && (replylen > sizeof(struct novfs_command_reply_header))) { + p = (char *)reply + 8; + while (*p) { + DbgPrint("%s", p); +@@ -92,7 +91,7 @@ int novfs_get_alltrees(struct dentry *pa + return (retCode); + } + +-static ssize_t novfs_tree_read(struct file * file, char *buf, size_t len, loff_t * off) ++static ssize_t novfs_tree_read(struct file *file, char *buf, size_t len, loff_t * off) + { + if (file->f_pos != 0) { + return (0); +@@ -103,7 +102,7 @@ static ssize_t novfs_tree_read(struct fi + return (5); + } + +-int novfs_get_servers(unsigned char ** ServerList, struct novfs_schandle SessionId) ++int novfs_get_servers(unsigned char **ServerList, struct novfs_schandle SessionId) + { + struct novfs_get_connected_server_list req; + struct novfs_get_connected_server_list_reply *reply = NULL; +@@ -115,15 +114,13 @@ int novfs_get_servers(unsigned char ** S + req.Command.CommandType = VFS_COMMAND_GET_CONNECTED_SERVER_LIST; + req.Command.SessionId = SessionId; + +- retCode = +- Queue_Daemon_Command(&req, sizeof(req), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&req, sizeof(req), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + DbgPrint("reply"); + replylen -= sizeof(struct novfs_command_reply_header); + if (!reply->Reply.ErrorCode && replylen) { + memcpy(reply, reply->List, replylen); +- *ServerList = (unsigned char *) reply; ++ *ServerList = (unsigned char *)reply; + retCode = 0; + } else { + kfree(reply); +@@ -133,8 +130,7 @@ int novfs_get_servers(unsigned char ** S + return (retCode); + } + +-int novfs_get_vols(struct qstr *Server, unsigned char ** VolumeList, +- struct novfs_schandle SessionId) ++int novfs_get_vols(struct qstr *Server, unsigned char **VolumeList, struct novfs_schandle SessionId) + { + struct novfs_get_server_volume_list *req; + struct novfs_get_server_volume_list_reply *reply = NULL; +@@ -151,9 +147,7 @@ int novfs_get_vols(struct qstr *Server, + memcpy(req->Name, Server->name, Server->len); + req->Command.SessionId = SessionId; + +- retCode = +- Queue_Daemon_Command(req, reqlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(req, reqlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + DbgPrint("reply"); + novfs_dump(replylen, reply); +@@ -161,7 +155,7 @@ int novfs_get_vols(struct qstr *Server, + + if (!reply->Reply.ErrorCode && replylen) { + memcpy(reply, reply->List, replylen); +- *VolumeList = (unsigned char *) reply; ++ *VolumeList = (unsigned char *)reply; + retCode = 0; + } else { + kfree(reply); +@@ -172,11 +166,11 @@ int novfs_get_vols(struct qstr *Server, + return (retCode); + } + +-int novfs_get_file_info(unsigned char * Path, struct novfs_entry_info * Info, struct novfs_schandle SessionId) ++int novfs_get_file_info(unsigned char *Path, struct novfs_entry_info *Info, struct novfs_schandle SessionId) + { + struct novfs_verify_file_reply *reply = NULL; + unsigned long replylen = 0; +- struct novfs_verify_file_request * cmd; ++ struct novfs_verify_file_request *cmd; + int cmdlen; + int retCode = -ENOENT; + int pathlen; +@@ -195,7 +189,7 @@ int novfs_get_file_info(unsigned char * + if ('.' == Path[pathlen - 1]) + pathlen--; + } +- cmdlen = offsetof(struct novfs_verify_file_request,path) + pathlen; ++ cmdlen = offsetof(struct novfs_verify_file_request, path) + pathlen; + cmd = kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_VERIFY_FILE; +@@ -204,10 +198,7 @@ int novfs_get_file_info(unsigned char * + cmd->pathLen = pathlen; + memcpy(cmd->path, Path, cmd->pathLen); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, +- (void *)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + +@@ -217,38 +208,31 @@ int novfs_get_file_info(unsigned char * + Info->type = 3; + Info->mode = S_IRWXU; + +- if (reply-> +- fileMode & NW_ATTRIBUTE_DIRECTORY) { ++ if (reply->fileMode & NW_ATTRIBUTE_DIRECTORY) { + Info->mode |= S_IFDIR; + } else { + Info->mode |= S_IFREG; + } + +- if (reply-> +- fileMode & NW_ATTRIBUTE_READ_ONLY) { ++ if (reply->fileMode & NW_ATTRIBUTE_READ_ONLY) { + Info->mode &= ~(S_IWUSR); + } + + Info->uid = current_euid(); + Info->gid = current_egid(); + Info->size = reply->fileSize; +- Info->atime.tv_sec = +- reply->lastAccessTime; ++ Info->atime.tv_sec = reply->lastAccessTime; + Info->atime.tv_nsec = 0; + Info->mtime.tv_sec = reply->modifyTime; + Info->mtime.tv_nsec = 0; + Info->ctime.tv_sec = reply->createTime; + Info->ctime.tv_nsec = 0; + DbgPrint("replylen=%d sizeof(VERIFY_FILE_REPLY)=%d", +- replylen, +- sizeof(struct novfs_verify_file_reply)); +- if (replylen > +- sizeof(struct novfs_verify_file_reply)) { +- unsigned int *lp = +- &reply->fileMode; ++ replylen, sizeof(struct novfs_verify_file_reply)); ++ if (replylen > sizeof(struct novfs_verify_file_reply)) { ++ unsigned int *lp = &reply->fileMode; + lp++; +- DbgPrint("extra data 0x%x", +- *lp); ++ DbgPrint("extra data 0x%x", *lp); + Info->mtime.tv_nsec = *lp; + } + retCode = 0; +@@ -265,8 +249,7 @@ int novfs_get_file_info(unsigned char * + } + + int novfs_getx_file_info(char *Path, const char *Name, char *buffer, +- ssize_t buffer_size, ssize_t * dataLen, +- struct novfs_schandle SessionId) ++ ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId) + { + struct novfs_xa_get_reply *reply = NULL; + unsigned long replylen = 0; +@@ -277,14 +260,13 @@ int novfs_getx_file_info(char *Path, con + int namelen = strlen(Name); + int pathlen = strlen(Path); + +- DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i", +- Path, pathlen, Name, namelen); ++ DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i", Path, pathlen, Name, namelen); + + if (namelen > MAX_XATTR_NAME_LEN) + return -ENOATTR; + +- cmdlen = offsetof(struct novfs_xa_get_request, data) + pathlen + 1 + namelen + 1; // two '\0' +- cmd = (struct novfs_xa_get_request *) kmalloc(cmdlen, GFP_KERNEL); ++ cmdlen = offsetof(struct novfs_xa_get_request, data)+pathlen + 1 + namelen + 1; // two '\0' ++ cmd = (struct novfs_xa_get_request *)kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_GET_EXTENDED_ATTRIBUTE; + cmd->Command.SequenceNumber = 0; +@@ -297,35 +279,27 @@ int novfs_getx_file_info(char *Path, con + memcpy(cmd->data + cmd->pathLen + 1, Name, cmd->nameLen + 1); + + DbgPrint("xattr: PXA_GET_REQUEST BEGIN"); +- DbgPrint("xattr: Queue_Daemon_Command %d", +- cmd->Command.CommandType); +- DbgPrint("xattr: Command.SessionId = %d", +- cmd->Command.SessionId); ++ DbgPrint("xattr: Queue_Daemon_Command %d", cmd->Command.CommandType); ++ DbgPrint("xattr: Command.SessionId = %d", cmd->Command.SessionId); + DbgPrint("xattr: pathLen = %d", cmd->pathLen); + DbgPrint("xattr: Path = %s", cmd->data); + DbgPrint("xattr: nameLen = %d", cmd->nameLen); + DbgPrint("xattr: name = %s", (cmd->data + cmd->pathLen + 1)); + DbgPrint("xattr: PXA_GET_REQUEST END"); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + + if (reply->Reply.ErrorCode) { +- DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", +- reply->Reply.ErrorCode, +- reply->Reply.ErrorCode); ++ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", reply->Reply.ErrorCode, reply->Reply.ErrorCode); + DbgPrint("xattr: replylen=%d", replylen); + + retCode = -ENOATTR; + } else { + +- *dataLen = +- replylen - sizeof(struct novfs_command_reply_header); +- DbgPrint("xattr: replylen=%u, dataLen=%u", +- replylen, *dataLen); ++ *dataLen = replylen - sizeof(struct novfs_command_reply_header); ++ DbgPrint("xattr: replylen=%u, dataLen=%u", replylen, *dataLen); + + if (buffer_size >= *dataLen) { + DbgPrint("xattr: copying to buffer from &reply->pData"); +@@ -353,8 +327,7 @@ int novfs_getx_file_info(char *Path, con + } + + int novfs_setx_file_info(char *Path, const char *Name, const void *Value, +- unsigned long valueLen, unsigned long *bytesWritten, +- int flags, struct novfs_schandle SessionId) ++ unsigned long valueLen, unsigned long *bytesWritten, int flags, struct novfs_schandle SessionId) + { + struct novfs_xa_set_reply *reply = NULL; + unsigned long replylen = 0; +@@ -371,8 +344,8 @@ int novfs_setx_file_info(char *Path, con + if (namelen > MAX_XATTR_NAME_LEN) + return -ENOATTR; + +- cmdlen = offsetof(struct novfs_xa_set_request, data) + pathlen + 1 + namelen + 1 + valueLen; +- cmd = (struct novfs_xa_set_request *) kmalloc(cmdlen, GFP_KERNEL); ++ cmdlen = offsetof(struct novfs_xa_set_request, data)+pathlen + 1 + namelen + 1 + valueLen; ++ cmd = (struct novfs_xa_set_request *)kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_SET_EXTENDED_ATTRIBUTE; + cmd->Command.SequenceNumber = 0; +@@ -386,14 +359,11 @@ int novfs_setx_file_info(char *Path, con + memcpy(cmd->data + cmd->pathLen + 1, Name, cmd->nameLen + 1); + + cmd->valueLen = valueLen; +- memcpy(cmd->data + cmd->pathLen + 1 + cmd->nameLen + 1, Value, +- valueLen); ++ memcpy(cmd->data + cmd->pathLen + 1 + cmd->nameLen + 1, Value, valueLen); + + DbgPrint("xattr: PXA_SET_REQUEST BEGIN"); +- DbgPrint("attr: Queue_Daemon_Command %d", +- cmd->Command.CommandType); +- DbgPrint("xattr: Command.SessionId = %d", +- cmd->Command.SessionId); ++ DbgPrint("attr: Queue_Daemon_Command %d", cmd->Command.CommandType); ++ DbgPrint("xattr: Command.SessionId = %d", cmd->Command.SessionId); + DbgPrint("xattr: pathLen = %d", cmd->pathLen); + DbgPrint("xattr: Path = %s", cmd->data); + DbgPrint("xattr: nameLen = %d", cmd->nameLen); +@@ -402,26 +372,20 @@ int novfs_setx_file_info(char *Path, con + + DbgPrint("xattr: PXA_SET_REQUEST END"); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + + if (reply->Reply.ErrorCode) { +- DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", +- reply->Reply.ErrorCode, +- reply->Reply.ErrorCode); ++ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", reply->Reply.ErrorCode, reply->Reply.ErrorCode); + DbgPrint("xattr: replylen=%d", replylen); + + retCode = -reply->Reply.ErrorCode; //-ENOENT; + } else { + + DbgPrint("xattr: replylen=%u, real len = %u", +- replylen, +- replylen - sizeof(struct novfs_command_reply_header)); +- memcpy(bytesWritten, &reply->pData, +- replylen - sizeof(struct novfs_command_reply_header)); ++ replylen, replylen - sizeof(struct novfs_command_reply_header)); ++ memcpy(bytesWritten, &reply->pData, replylen - sizeof(struct novfs_command_reply_header)); + + retCode = 0; + } +@@ -437,8 +401,7 @@ int novfs_setx_file_info(char *Path, con + return retCode; + } + +-int novfs_listx_file_info(char *Path, char *buffer, ssize_t buffer_size, +- ssize_t * dataLen, struct novfs_schandle SessionId) ++int novfs_listx_file_info(char *Path, char *buffer, ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId) + { + struct novfs_xa_list_reply *reply = NULL; + unsigned long replylen = 0; +@@ -451,7 +414,7 @@ int novfs_listx_file_info(char *Path, ch + + *dataLen = 0; + cmdlen = offsetof(struct novfs_verify_file_request, path) + pathlen; +- cmd = (struct novfs_verify_file_request *) kmalloc(cmdlen, GFP_KERNEL); ++ cmd = (struct novfs_verify_file_request *)kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_LIST_EXTENDED_ATTRIBUTES; + cmd->Command.SequenceNumber = 0; +@@ -459,40 +422,30 @@ int novfs_listx_file_info(char *Path, ch + cmd->pathLen = pathlen; + memcpy(cmd->path, Path, cmd->pathLen + 1); //+ '\0' + DbgPrint("xattr: PVERIFY_FILE_REQUEST BEGIN"); +- DbgPrint("xattr: Queue_Daemon_Command %d", +- cmd->Command.CommandType); +- DbgPrint("xattr: Command.SessionId = %d", +- cmd->Command.SessionId); ++ DbgPrint("xattr: Queue_Daemon_Command %d", cmd->Command.CommandType); ++ DbgPrint("xattr: Command.SessionId = %d", cmd->Command.SessionId); + DbgPrint("xattr: pathLen = %d", cmd->pathLen); + DbgPrint("xattr: Path = %s", cmd->path); + DbgPrint("xattr: PVERIFY_FILE_REQUEST END"); + +- retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, +- (void *)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + + if (reply->Reply.ErrorCode) { +- DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", +- reply->Reply.ErrorCode, +- reply->Reply.ErrorCode); ++ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", reply->Reply.ErrorCode, reply->Reply.ErrorCode); + DbgPrint("xattr: replylen=%d", replylen); + + retCode = -ENOENT; + } else { +- *dataLen = +- replylen - sizeof(struct novfs_command_reply_header); +- DbgPrint("xattr: replylen=%u, dataLen=%u", +- replylen, *dataLen); ++ *dataLen = replylen - sizeof(struct novfs_command_reply_header); ++ DbgPrint("xattr: replylen=%u, dataLen=%u", replylen, *dataLen); + + if (buffer_size >= *dataLen) { +- DbgPrint("xattr: copying to buffer " +- "from &reply->pData"); ++ DbgPrint("xattr: copying to buffer " "from &reply->pData"); + memcpy(buffer, &reply->pData, *dataLen); + } else { +- DbgPrint("xattr: (!!!) buffer is " +- "smaller then reply\n"); ++ DbgPrint("xattr: (!!!) buffer is " "smaller then reply\n"); + retCode = -ERANGE; + } + DbgPrint("xattr: /dumping buffer"); +@@ -513,8 +466,7 @@ int novfs_listx_file_info(char *Path, ch + return retCode; + } + +-static int begin_directory_enumerate(unsigned char * Path, int PathLen, void ** EnumHandle, +- struct novfs_schandle SessionId) ++static int begin_directory_enumerate(unsigned char *Path, int PathLen, void **EnumHandle, struct novfs_schandle SessionId) + { + struct novfs_begin_enumerate_directory_request *cmd; + struct novfs_begin_enumerate_directory_reply *reply = NULL; +@@ -524,7 +476,7 @@ static int begin_directory_enumerate(uns + *EnumHandle = 0; + + cmdlen = offsetof(struct +- novfs_begin_enumerate_directory_request, path) + PathLen; ++ novfs_begin_enumerate_directory_request, path) + PathLen; + cmd = kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_START_ENUMERATE; +@@ -534,9 +486,7 @@ static int begin_directory_enumerate(uns + cmd->pathLen = PathLen; + memcpy(cmd->path, Path, PathLen); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + /* + * retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, 0); + */ +@@ -569,9 +519,7 @@ int novfs_end_directory_enumerate(void * + + cmd.enumerateHandle = EnumHandle; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, 0); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, 0); + if (reply) { + retCode = 0; + if (reply->Reply.ErrorCode) { +@@ -583,14 +531,14 @@ int novfs_end_directory_enumerate(void * + return (retCode); + } + +-static int directory_enumerate_ex(void ** EnumHandle, struct novfs_schandle SessionId, int *Count, +- struct novfs_entry_info **PInfo, int Interrupt) ++static int directory_enumerate_ex(void **EnumHandle, struct novfs_schandle SessionId, int *Count, ++ struct novfs_entry_info **PInfo, int Interrupt) + { + struct novfs_enumerate_directory_ex_request cmd; + struct novfs_enumerate_directory_ex_reply *reply = NULL; + unsigned long replylen = 0; + int retCode = 0; +- struct novfs_entry_info * info; ++ struct novfs_entry_info *info; + struct novfs_enumerate_directory_ex_data *data; + int isize; + +@@ -606,9 +554,7 @@ static int directory_enumerate_ex(void * + cmd.pathLen = 0; + cmd.path[0] = '\0'; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, Interrupt); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, Interrupt); + + if (reply) { + retCode = 0; +@@ -617,88 +563,60 @@ static int directory_enumerate_ex(void * + * error but there could still be valid data. + */ + +- if (!reply->Reply.ErrorCode || +- ((replylen > sizeof(struct novfs_command_reply_header)) && +- (reply->enumCount > 0))) { ++ if (!reply->Reply.ErrorCode || ((replylen > sizeof(struct novfs_command_reply_header)) && (reply->enumCount > 0))) { + DbgPrint("isize=%d", replylen); + data = +- (struct novfs_enumerate_directory_ex_data *) ((char *)reply + +- sizeof +- (struct novfs_enumerate_directory_ex_reply)); +- isize = +- replylen - sizeof(struct novfs_enumerate_directory_ex_reply *) - +- reply->enumCount * +- offsetof(struct +- novfs_enumerate_directory_ex_data, name); +- isize += +- (reply->enumCount * +- offsetof(struct novfs_entry_info, name)); ++ (struct novfs_enumerate_directory_ex_data *)((char *)reply + ++ sizeof(struct novfs_enumerate_directory_ex_reply)); ++ isize = replylen - sizeof(struct novfs_enumerate_directory_ex_reply *) - reply->enumCount * offsetof(struct ++ novfs_enumerate_directory_ex_data, ++ name); ++ isize += (reply->enumCount * offsetof(struct novfs_entry_info, name)); + + if (PInfo) { + *PInfo = info = kmalloc(isize, GFP_KERNEL); + if (*PInfo) { +- DbgPrint("data=0x%p info=0x%p", +- data, info); ++ DbgPrint("data=0x%p info=0x%p", data, info); + *Count = reply->enumCount; + do { +- DbgPrint("data=0x%p length=%d", +- data); ++ DbgPrint("data=0x%p length=%d", data); + + info->type = 3; + info->mode = S_IRWXU; + +- if (data-> +- mode & +- NW_ATTRIBUTE_DIRECTORY) { ++ if (data->mode & NW_ATTRIBUTE_DIRECTORY) { + info->mode |= S_IFDIR; + info->mode |= S_IXUSR; + } else { + info->mode |= S_IFREG; + } + +- if (data-> +- mode & +- NW_ATTRIBUTE_READ_ONLY) { +- info->mode &= +- ~(S_IWUSR); ++ if (data->mode & NW_ATTRIBUTE_READ_ONLY) { ++ info->mode &= ~(S_IWUSR); + } + +- if (data-> +- mode & NW_ATTRIBUTE_EXECUTE) +- { ++ if (data->mode & NW_ATTRIBUTE_EXECUTE) { + info->mode |= S_IXUSR; + } + + info->uid = current_euid(); + info->gid = current_egid(); + info->size = data->size; +- info->atime.tv_sec = +- data->lastAccessTime; ++ info->atime.tv_sec = data->lastAccessTime; + info->atime.tv_nsec = 0; +- info->mtime.tv_sec = +- data->modifyTime; ++ info->mtime.tv_sec = data->modifyTime; + info->mtime.tv_nsec = 0; +- info->ctime.tv_sec = +- data->createTime; ++ info->ctime.tv_sec = data->createTime; + info->ctime.tv_nsec = 0; +- info->namelength = +- data->nameLen; +- memcpy(info->name, data->name, +- data->nameLen); +- data = +- (struct novfs_enumerate_directory_ex_data *) +- & data->name[data->nameLen]; +- replylen = +- (int)((char *)&info-> +- name[info-> +- namelength] - +- (char *)info); ++ info->namelength = data->nameLen; ++ memcpy(info->name, data->name, data->nameLen); ++ data = (struct novfs_enumerate_directory_ex_data *) ++ &data->name[data->nameLen]; ++ replylen = (int)((char *)&info->name[info->namelength] - (char *)info); + DbgPrint("info=0x%p", info); + novfs_dump(replylen, info); + +- info = +- (struct novfs_entry_info *) & info-> +- name[info->namelength]; ++ info = (struct novfs_entry_info *)&info->name[info->namelength]; + + } while (--reply->enumCount); + } +@@ -717,9 +635,8 @@ static int directory_enumerate_ex(void * + return (retCode); + } + +-int novfs_get_dir_listex(unsigned char * Path, void ** EnumHandle, int *Count, +- struct novfs_entry_info **Info, +- struct novfs_schandle SessionId) ++int novfs_get_dir_listex(unsigned char *Path, void **EnumHandle, int *Count, ++ struct novfs_entry_info **Info, struct novfs_schandle SessionId) + { + int retCode = -ENOENT; + +@@ -728,20 +645,16 @@ int novfs_get_dir_listex(unsigned char * + if (Info) + *Info = NULL; + +- if ((void *) - 1 == *EnumHandle) { ++ if ((void *)-1 == *EnumHandle) { + return (-ENODATA); + } + + if (0 == *EnumHandle) { +- retCode = +- begin_directory_enumerate(Path, strlen(Path), EnumHandle, +- SessionId); ++ retCode = begin_directory_enumerate(Path, strlen(Path), EnumHandle, SessionId); + } + + if (*EnumHandle) { +- retCode = +- directory_enumerate_ex(EnumHandle, SessionId, Count, Info, +- INTERRUPTIBLE); ++ retCode = directory_enumerate_ex(EnumHandle, SessionId, Count, Info, INTERRUPTIBLE); + if (retCode) { + novfs_end_directory_enumerate(*EnumHandle, SessionId); + retCode = 0; +@@ -751,9 +664,7 @@ int novfs_get_dir_listex(unsigned char * + return (retCode); + } + +-int novfs_open_file(unsigned char * Path, int Flags, struct novfs_entry_info * Info, +- void ** Handle, +- struct novfs_schandle SessionId) ++int novfs_open_file(unsigned char *Path, int Flags, struct novfs_entry_info *Info, void **Handle, struct novfs_schandle SessionId) + { + struct novfs_open_file_request *cmd; + struct novfs_open_file_reply *reply; +@@ -817,19 +728,15 @@ int novfs_open_file(unsigned char * Path + cmd->pathLen = pathlen; + memcpy(cmd->path, Path, pathlen); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + if (reply->Reply.ErrorCode) { + if (NWE_OBJECT_EXISTS == reply->Reply.ErrorCode) { + retCode = -EEXIST; +- } else if (NWE_ACCESS_DENIED == +- reply->Reply.ErrorCode) { ++ } else if (NWE_ACCESS_DENIED == reply->Reply.ErrorCode) { + retCode = -EACCES; +- } else if (NWE_FILE_IN_USE == +- reply->Reply.ErrorCode) { ++ } else if (NWE_FILE_IN_USE == reply->Reply.ErrorCode) { + retCode = -EBUSY; + } else { + retCode = -ENOENT; +@@ -847,7 +754,7 @@ int novfs_open_file(unsigned char * Path + return (retCode); + } + +-int novfs_create(unsigned char * Path, int DirectoryFlag, struct novfs_schandle SessionId) ++int novfs_create(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId) + { + struct novfs_create_file_request *cmd; + struct novfs_create_file_reply *reply; +@@ -875,9 +782,7 @@ int novfs_create(unsigned char * Path, i + cmd->pathlength = pathlen; + memcpy(cmd->path, Path, pathlen); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + retCode = 0; +@@ -906,9 +811,7 @@ int novfs_close_file(void *Handle, struc + + cmd.handle = Handle; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, 0); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, 0); + if (reply) { + retCode = 0; + if (reply->Reply.ErrorCode) { +@@ -919,11 +822,10 @@ int novfs_close_file(void *Handle, struc + return (retCode); + } + +-int novfs_read_file(void *Handle, unsigned char * Buffer, size_t * Bytes, +- loff_t * Offset, struct novfs_schandle SessionId) ++int novfs_read_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) + { + struct novfs_read_file_request cmd; +- struct novfs_read_file_reply * reply = NULL; ++ struct novfs_read_file_reply *reply = NULL; + unsigned long replylen = 0; + int retCode = 0; + size_t len; +@@ -931,10 +833,9 @@ int novfs_read_file(void *Handle, unsign + len = *Bytes; + *Bytes = 0; + +- if (offsetof(struct novfs_read_file_reply, data) + len +- > novfs_max_iosize) { ++ if (offsetof(struct novfs_read_file_reply, data) + len > novfs_max_iosize) { + len = novfs_max_iosize - offsetof(struct +- novfs_read_file_reply, data); ++ novfs_read_file_reply, data); + len = (len / PAGE_SIZE) * PAGE_SIZE; + } + +@@ -946,9 +847,7 @@ int novfs_read_file(void *Handle, unsign + cmd.len = len; + cmd.offset = *Offset; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + DbgPrint("Queue_Daemon_Command 0x%x replylen=%d", retCode, replylen); + +@@ -961,11 +860,10 @@ int novfs_read_file(void *Handle, unsign + } + } else { + replylen -= offsetof(struct +- novfs_read_file_reply, data); ++ novfs_read_file_reply, data); + + if (replylen > 0) { +- replylen -= +- copy_to_user(Buffer, reply->data, replylen); ++ replylen -= copy_to_user(Buffer, reply->data, replylen); + *Bytes = replylen; + } + } +@@ -981,11 +879,10 @@ int novfs_read_file(void *Handle, unsign + } + + int novfs_read_pages(void *Handle, struct novfs_data_list *DList, +- int DList_Cnt, size_t * Bytes, loff_t * Offset, +- struct novfs_schandle SessionId) ++ int DList_Cnt, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) + { + struct novfs_read_file_request cmd; +- struct novfs_read_file_reply * reply = NULL; ++ struct novfs_read_file_reply *reply = NULL; + struct novfs_read_file_reply lreply; + unsigned long replylen = 0; + int retCode = 0; +@@ -995,8 +892,7 @@ int novfs_read_pages(void *Handle, struc + *Bytes = 0; + + DbgPrint("Handle=0x%p Dlst=0x%p Dlcnt=%d Bytes=%d Offset=%lld " +- "SessionId=0x%p:%p", Handle, DList, DList_Cnt, len, *Offset, +- SessionId.hTypeId, SessionId.hId); ++ "SessionId=0x%p:%p", Handle, DList, DList_Cnt, len, *Offset, SessionId.hTypeId, SessionId.hId); + + cmd.Command.CommandType = VFS_COMMAND_READ_FILE; + cmd.Command.SequenceNumber = 0; +@@ -1014,9 +910,7 @@ int novfs_read_pages(void *Handle, struc + DList[0].len = offsetof(struct novfs_read_file_reply, data); + DList[0].rwflag = DLWRITE; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), DList, DList_Cnt, +- (void *)&reply, &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), DList, DList_Cnt, (void *)&reply, &replylen, INTERRUPTIBLE); + + DbgPrint("Queue_Daemon_Command 0x%x", retCode); + +@@ -1033,7 +927,7 @@ int novfs_read_pages(void *Handle, struc + } + } + *Bytes = replylen - offsetof(struct +- novfs_read_file_reply, data); ++ novfs_read_file_reply, data); + } + + if (reply) { +@@ -1045,8 +939,7 @@ int novfs_read_pages(void *Handle, struc + return (retCode); + } + +-int novfs_write_file(void *Handle, unsigned char * Buffer, size_t * Bytes, +- loff_t * Offset, struct novfs_schandle SessionId) ++int novfs_write_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) + { + struct novfs_write_file_request cmd; + struct novfs_write_file_reply *reply = NULL; +@@ -1082,9 +975,7 @@ int novfs_write_file(void *Handle, unsig + + DbgPrint("cmdlen=%ld len=%ld", cmdlen, len); + +- npage = +- (((unsigned long)Buffer & ~PAGE_MASK) + len + +- (PAGE_SIZE - 1)) >> PAGE_SHIFT; ++ npage = (((unsigned long)Buffer & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + + dlist = kmalloc(sizeof(struct novfs_data_list) * (npage + 1), GFP_KERNEL); + if (NULL == dlist) { +@@ -1121,8 +1012,7 @@ int novfs_write_file(void *Handle, unsig + dlist[0].len = len; + } + +- DbgPrint("page=0x%p offset=0x%p len=%d", +- dlist[0].page, dlist[0].offset, dlist[0].len); ++ DbgPrint("page=0x%p offset=0x%p len=%d", dlist[0].page, dlist[0].offset, dlist[0].len); + + boff = dlist[0].len; + +@@ -1140,8 +1030,7 @@ int novfs_write_file(void *Handle, unsig + dlist[i].rwflag = DLREAD; + + boff += dlist[i].len; +- DbgPrint("%d: page=0x%p offset=0x%p len=%d", i, +- dlist[i].page, dlist[i].offset, dlist[i].len); ++ DbgPrint("%d: page=0x%p offset=0x%p len=%d", i, dlist[i].page, dlist[i].offset, dlist[i].len); + } + + dlist[i].page = NULL; +@@ -1152,10 +1041,7 @@ int novfs_write_file(void *Handle, unsig + + DbgPrint("Buffer=0x%p boff=0x%x len=%d", Buffer, boff, len); + +- retCode = +- Queue_Daemon_Command(&cmd, cmdlen, dlist, res, +- (void *)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, cmdlen, dlist, res, (void *)&reply, &replylen, INTERRUPTIBLE); + + } else { + char *kdata; +@@ -1175,10 +1061,7 @@ int novfs_write_file(void *Handle, unsig + dlist[1].len = sizeof(lreply); + dlist[1].rwflag = DLWRITE; + +- retCode = +- Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, +- (void *)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, &replylen, INTERRUPTIBLE); + + kfree(kdata); + } +@@ -1218,8 +1101,7 @@ int novfs_write_file(void *Handle, unsig + kfree(pages); + kfree(dlist); + +- DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, +- retCode); ++ DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, retCode); + + return (retCode); + } +@@ -1245,8 +1127,7 @@ int novfs_write_page(void *Handle, struc + int retCode = 0, cmdlen; + struct novfs_data_list dlst[2]; + +- DbgPrint("Handle=0x%p Page=0x%p Index=%lu SessionId=0x%llx", +- Handle, Page, Page->index, SessionId); ++ DbgPrint("Handle=0x%p Page=0x%p Index=%lu SessionId=0x%llx", Handle, Page, Page->index, SessionId); + + dlst[0].page = NULL; + dlst[0].offset = &lreply; +@@ -1268,9 +1149,7 @@ int novfs_write_page(void *Handle, struc + cmd.len = PAGE_CACHE_SIZE; + cmd.offset = (loff_t) Page->index << PAGE_CACHE_SHIFT;; + +- retCode = +- Queue_Daemon_Command(&cmd, cmdlen, &dlst, 2, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, cmdlen, &dlst, 2, (void *)&reply, &replylen, INTERRUPTIBLE); + if (!retCode) { + if (reply) { + memcpy(&lreply, reply, sizeof(lreply)); +@@ -1314,8 +1193,7 @@ int novfs_write_pages(void *Handle, stru + size_t len; + + DbgPrint("Handle=0x%p Dlst=0x%p Dlcnt=%d Bytes=%d Offset=%lld " +- "SessionId=0x%llx\n", Handle, DList, DList_Cnt, Bytes, +- Offset, SessionId); ++ "SessionId=0x%llx\n", Handle, DList, DList_Cnt, Bytes, Offset, SessionId); + + DList[0].page = NULL; + DList[0].offset = &lreply; +@@ -1334,10 +1212,7 @@ int novfs_write_pages(void *Handle, stru + cmd.len = len; + cmd.offset = Offset; + +- retCode = +- Queue_Daemon_Command(&cmd, cmdlen, DList, DList_Cnt, +- (void *)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, cmdlen, DList, DList_Cnt, (void *)&reply, &replylen, INTERRUPTIBLE); + if (!retCode) { + if (reply) { + memcpy(&lreply, reply, sizeof(lreply)); +@@ -1369,9 +1244,8 @@ int novfs_write_pages(void *Handle, stru + return (retCode); + } + +-int novfs_read_stream(void *ConnHandle, unsigned char * Handle, u_char * Buffer, +- size_t * Bytes, loff_t * Offset, int User, +- struct novfs_schandle SessionId) ++int novfs_read_stream(void *ConnHandle, unsigned char *Handle, u_char * Buffer, ++ size_t * Bytes, loff_t * Offset, int User, struct novfs_schandle SessionId) + { + struct novfs_read_stream_request cmd; + struct novfs_read_stream_reply *reply = NULL; +@@ -1382,10 +1256,9 @@ int novfs_read_stream(void *ConnHandle, + len = *Bytes; + *Bytes = 0; + +- if (offsetof(struct novfs_read_file_reply, data) + len +- > novfs_max_iosize) { ++ if (offsetof(struct novfs_read_file_reply, data) + len > novfs_max_iosize) { + len = novfs_max_iosize - offsetof(struct +- novfs_read_file_reply, data); ++ novfs_read_file_reply, data); + len = (len / PAGE_SIZE) * PAGE_SIZE; + } + +@@ -1398,9 +1271,7 @@ int novfs_read_stream(void *ConnHandle, + cmd.len = len; + cmd.offset = *Offset; + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + DbgPrint("Queue_Daemon_Command 0x%x replylen=%d", retCode, replylen); + +@@ -1410,12 +1281,10 @@ int novfs_read_stream(void *ConnHandle, + retCode = -EIO; + } else { + replylen -= offsetof(struct +- novfs_read_stream_reply, data); ++ novfs_read_stream_reply, data); + if (replylen > 0) { + if (User) { +- replylen -= +- copy_to_user(Buffer, reply->data, +- replylen); ++ replylen -= copy_to_user(Buffer, reply->data, replylen); + } else { + memcpy(Buffer, reply->data, replylen); + } +@@ -1431,11 +1300,11 @@ int novfs_read_stream(void *ConnHandle, + return (retCode); + } + +-int novfs_write_stream(void *ConnHandle, unsigned char * Handle, u_char * Buffer, ++int novfs_write_stream(void *ConnHandle, unsigned char *Handle, u_char * Buffer, + size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) + { +- struct novfs_write_stream_request * cmd; +- struct novfs_write_stream_reply * reply = NULL; ++ struct novfs_write_stream_request *cmd; ++ struct novfs_write_stream_reply *reply = NULL; + unsigned long replylen = 0; + int retCode = 0, cmdlen; + size_t len; +@@ -1449,7 +1318,7 @@ int novfs_write_stream(void *ConnHandle, + if (cmdlen > novfs_max_iosize) { + cmdlen = novfs_max_iosize; + len = cmdlen - offsetof(struct +- novfs_write_stream_request, data); ++ novfs_write_stream_request, data); + } + + DbgPrint("cmdlen=%d len=%d", cmdlen, len); +@@ -1472,9 +1341,7 @@ int novfs_write_stream(void *ConnHandle, + cmd->len = len; + cmd->offset = *Offset; + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + switch (reply->Reply.ErrorCode) { + case 0: +@@ -1493,8 +1360,7 @@ int novfs_write_stream(void *ConnHandle, + retCode = -EIO; + break; + } +- DbgPrint("reply->bytesWritten=0x%lx", +- reply->bytesWritten); ++ DbgPrint("reply->bytesWritten=0x%lx", reply->bytesWritten); + *Bytes = reply->bytesWritten; + kfree(reply); + } +@@ -1505,7 +1371,7 @@ int novfs_write_stream(void *ConnHandle, + return (retCode); + } + +-int novfs_close_stream(void *ConnHandle, unsigned char * Handle, struct novfs_schandle SessionId) ++int novfs_close_stream(void *ConnHandle, unsigned char *Handle, struct novfs_schandle SessionId) + { + struct novfs_close_stream_request cmd; + struct novfs_close_stream_reply *reply; +@@ -1519,9 +1385,7 @@ int novfs_close_stream(void *ConnHandle, + cmd.connection = ConnHandle; + memcpy(cmd.handle, Handle, sizeof(cmd.handle)); + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, 0); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, 0); + if (reply) { + retCode = 0; + if (reply->Reply.ErrorCode) { +@@ -1532,7 +1396,7 @@ int novfs_close_stream(void *ConnHandle, + return (retCode); + } + +-int novfs_delete(unsigned char * Path, int DirectoryFlag, struct novfs_schandle SessionId) ++int novfs_delete(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId) + { + struct novfs_delete_file_request *cmd; + struct novfs_delete_file_reply *reply; +@@ -1557,9 +1421,7 @@ int novfs_delete(unsigned char * Path, i + cmd->pathlength = pathlen; + memcpy(cmd->path, Path, pathlen); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = 0; + if (reply->Reply.ErrorCode) { +@@ -1571,7 +1433,7 @@ int novfs_delete(unsigned char * Path, i + retCode = -EACCES; + else if ((reply->Reply.ErrorCode & 0xFFFF) == 0x0513) + retCode = -ENOTEMPTY; +- else ++ else + retCode = -EIO; + } + kfree(reply); +@@ -1583,8 +1445,7 @@ int novfs_delete(unsigned char * Path, i + return (retCode); + } + +-int novfs_trunc(unsigned char * Path, int PathLen, +- struct novfs_schandle SessionId) ++int novfs_trunc(unsigned char *Path, int PathLen, struct novfs_schandle SessionId) + { + struct novfs_truncate_file_request *cmd; + struct novfs_truncate_file_reply *reply = NULL; +@@ -1596,7 +1457,7 @@ int novfs_trunc(unsigned char * Path, in + PathLen--; + } + cmdlen = offsetof(struct novfs_truncate_file_request, path) +- + PathLen; ++ + PathLen; + cmd = kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_TRUNCATE_FILE; +@@ -1606,9 +1467,7 @@ int novfs_trunc(unsigned char * Path, in + cmd->pathLen = PathLen; + memcpy(cmd->path, Path, PathLen); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + if (reply->Reply.ErrorCode) { + retCode = -EIO; +@@ -1622,8 +1481,7 @@ int novfs_trunc(unsigned char * Path, in + return (retCode); + } + +-int novfs_trunc_ex(void *Handle, loff_t Offset, +- struct novfs_schandle SessionId) ++int novfs_trunc_ex(void *Handle, loff_t Offset, struct novfs_schandle SessionId) + { + struct novfs_write_file_request cmd; + struct novfs_write_file_reply *reply = NULL; +@@ -1641,9 +1499,7 @@ int novfs_trunc_ex(void *Handle, loff_t + cmd.len = 0; + cmd.offset = Offset; + +- retCode = +- Queue_Daemon_Command(&cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + DbgPrint("retCode=0x%x reply=0x%p", retCode, reply); + +@@ -1680,9 +1536,8 @@ int novfs_trunc_ex(void *Handle, loff_t + return (retCode); + } + +-int novfs_rename_file(int DirectoryFlag, unsigned char * OldName, int OldLen, +- unsigned char * NewName, int NewLen, +- struct novfs_schandle SessionId) ++int novfs_rename_file(int DirectoryFlag, unsigned char *OldName, int OldLen, ++ unsigned char *NewName, int NewLen, struct novfs_schandle SessionId) + { + struct novfs_rename_file_request cmd; + struct novfs_rename_file_reply *reply; +@@ -1690,11 +1545,10 @@ int novfs_rename_file(int DirectoryFlag, + int retCode; + + __DbgPrint("%s:\n" +- " DirectoryFlag: %d\n" +- " OldName: %.*s\n" +- " NewName: %.*s\n" +- " SessionId: 0x%llx\n", __func__, +- DirectoryFlag, OldLen, OldName, NewLen, NewName, SessionId); ++ " DirectoryFlag: %d\n" ++ " OldName: %.*s\n" ++ " NewName: %.*s\n" ++ " SessionId: 0x%llx\n", __func__, DirectoryFlag, OldLen, OldName, NewLen, NewName, SessionId); + + cmd.Command.CommandType = VFS_COMMAND_RENAME_FILE; + cmd.Command.SequenceNumber = 0; +@@ -1715,9 +1569,7 @@ int novfs_rename_file(int DirectoryFlag, + cmd.oldnameLen = OldLen; + memcpy(cmd.oldname, OldName, OldLen); + +- retCode = +- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = 0; + if (reply->Reply.ErrorCode) { +@@ -1728,8 +1580,7 @@ int novfs_rename_file(int DirectoryFlag, + return (retCode); + } + +-int novfs_set_attr(unsigned char * Path, struct iattr *Attr, +- struct novfs_schandle SessionId) ++int novfs_set_attr(unsigned char *Path, struct iattr *Attr, struct novfs_schandle SessionId) + { + struct novfs_set_file_info_request *cmd; + struct novfs_set_file_info_reply *reply; +@@ -1743,7 +1594,7 @@ int novfs_set_attr(unsigned char * Path, + pathlen--; + } + +- cmdlen = offsetof(struct novfs_set_file_info_request,path) + pathlen; ++ cmdlen = offsetof(struct novfs_set_file_info_request, path) + pathlen; + cmd = kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_SET_FILE_INFO; +@@ -1765,9 +1616,7 @@ int novfs_set_attr(unsigned char * Path, + cmd->pathlength = pathlen; + memcpy(cmd->path, Path, pathlen); + +- retCode = +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, +- &replylen, INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + switch (reply->Reply.ErrorCode) { + case 0: +@@ -1795,8 +1644,7 @@ int novfs_set_attr(unsigned char * Path, + return (retCode); + } + +-int novfs_get_file_cache_flag(unsigned char * Path, +- struct novfs_schandle SessionId) ++int novfs_get_file_cache_flag(unsigned char *Path, struct novfs_schandle SessionId) + { + struct novfs_get_cache_flag *cmd; + struct novfs_get_cache_flag_reply *reply = NULL; +@@ -1813,10 +1661,9 @@ int novfs_get_file_cache_flag(unsigned c + if ('.' == Path[pathlen - 1]) + pathlen--; + } +- cmdlen = offsetof(struct novfs_get_cache_flag, path) + +- pathlen; ++ cmdlen = offsetof(struct novfs_get_cache_flag, path) + pathlen; + cmd = (struct novfs_get_cache_flag *) +- kmalloc(cmdlen, GFP_KERNEL); ++ kmalloc(cmdlen, GFP_KERNEL); + if (cmd) { + cmd->Command.CommandType = VFS_COMMAND_GET_CACHE_FLAG; + cmd->Command.SequenceNumber = 0; +@@ -1824,9 +1671,7 @@ int novfs_get_file_cache_flag(unsigned c + cmd->pathLen = pathlen; + memcpy(cmd->path, Path, cmd->pathLen); + +- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, +- (void *)&reply, &replylen, +- INTERRUPTIBLE); ++ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + +@@ -1851,8 +1696,7 @@ int novfs_get_file_cache_flag(unsigned c + * + * Notes: lock type - fcntl + */ +-int novfs_set_file_lock(struct novfs_schandle SessionId, void *Handle, +- unsigned char fl_type, loff_t fl_start, loff_t fl_len) ++int novfs_set_file_lock(struct novfs_schandle SessionId, void *Handle, unsigned char fl_type, loff_t fl_start, loff_t fl_len) + { + struct novfs_set_file_lock_request *cmd; + struct novfs_set_file_lock_reply *reply = NULL; +@@ -1863,8 +1707,7 @@ int novfs_set_file_lock(struct novfs_sch + + DbgPrint("SessionId: 0x%llx\n", SessionId); + +- cmd = +- (struct novfs_set_file_lock_request *) kmalloc(sizeof(struct novfs_set_file_lock_request), GFP_KERNEL); ++ cmd = (struct novfs_set_file_lock_request *)kmalloc(sizeof(struct novfs_set_file_lock_request), GFP_KERNEL); + + if (cmd) { + DbgPrint("2"); +@@ -1887,20 +1730,17 @@ int novfs_set_file_lock(struct novfs_sch + DbgPrint("3"); + + DbgPrint("BEGIN dump arguments"); +- DbgPrint("Queue_Daemon_Command %d", +- cmd->Command.CommandType); ++ DbgPrint("Queue_Daemon_Command %d", cmd->Command.CommandType); + DbgPrint("cmd->handle = 0x%p", cmd->handle); + DbgPrint("cmd->fl_type = %u", cmd->fl_type); + DbgPrint("cmd->fl_start = 0x%X", cmd->fl_start); + DbgPrint("cmd->fl_len = 0x%X", cmd->fl_len); +- DbgPrint("sizeof(SET_FILE_LOCK_REQUEST) = %u", +- sizeof(struct novfs_set_file_lock_request)); ++ DbgPrint("sizeof(SET_FILE_LOCK_REQUEST) = %u", sizeof(struct novfs_set_file_lock_request)); + DbgPrint("END dump arguments"); + + retCode = + Queue_Daemon_Command(cmd, sizeof(struct novfs_set_file_lock_request), +- NULL, 0, (void *)&reply, &replylen, +- INTERRUPTIBLE); ++ NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); + DbgPrint("4"); + + if (reply) { +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -36,7 +36,6 @@ + /*===[ Include files specific to this module ]============================*/ + #include "vfs.h" + +- + struct inode_data { + void *Scope; + unsigned long Flags; +@@ -45,7 +44,7 @@ struct inode_data { + unsigned long cntDC; + struct list_head DirCache; + struct semaphore DirCacheLock; +- void * FileHandle; ++ void *FileHandle; + int CacheFlag; + char Name[1]; /* Needs to be last entry */ + }; +@@ -57,12 +56,10 @@ struct inode_data { + static unsigned long novfs_internal_hash(struct qstr *name); + static int novfs_d_add(struct dentry *p, struct dentry *d, struct inode *i, int add); + +-static int novfs_get_sb(struct file_system_type *Fstype, int Flags, +- const char *Dev_name, void *Data, struct vfsmount *Mnt); ++static int novfs_get_sb(struct file_system_type *Fstype, int Flags, const char *Dev_name, void *Data, struct vfsmount *Mnt); + + static void novfs_kill_sb(struct super_block *SB); + +- + /* + * Declared dentry_operations + */ +@@ -81,8 +78,7 @@ int novfs_dir_release(struct inode *inod + loff_t novfs_dir_lseek(struct file *file, loff_t offset, int origin); + ssize_t novfs_dir_read(struct file *file, char *buf, size_t len, loff_t * off); + void addtodentry(struct dentry *Parent, unsigned char *List, int Level); +-int novfs_filldir(void *data, const char *name, int namelen, loff_t off, +- ino_t ino, unsigned ftype); ++int novfs_filldir(void *data, const char *name, int namelen, loff_t off, ino_t ino, unsigned ftype); + int novfs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir); + int novfs_dir_fsync(struct file *file, int datasync); + +@@ -90,19 +86,14 @@ int novfs_dir_fsync(struct file *file, i + * Declared address space operations + */ + int novfs_a_writepage(struct page *page, struct writeback_control *wbc); +-int novfs_a_writepages(struct address_space *mapping, +- struct writeback_control *wbc); ++int novfs_a_writepages(struct address_space *mapping, struct writeback_control *wbc); + int novfs_a_write_begin(struct file *file, struct address_space *mapping, +- loff_t pos, unsigned len, unsigned flags, +- struct page **pagep, void **fsdata); ++ loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); + int novfs_a_write_end(struct file *file, struct address_space *mapping, +- loff_t pos, unsigned len, unsigned copied, +- struct page *pagep, void *fsdata); ++ loff_t pos, unsigned len, unsigned copied, struct page *pagep, void *fsdata); + int novfs_a_readpage(struct file *file, struct page *page); +-int novfs_a_readpages(struct file *file, struct address_space *mapping, +- struct list_head *page_lst, unsigned nr_pages); +-ssize_t novfs_a_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov, +- loff_t offset, unsigned long nr_segs); ++int novfs_a_readpages(struct file *file, struct address_space *mapping, struct list_head *page_lst, unsigned nr_pages); ++ssize_t novfs_a_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs); + + /* + * Declared file_operations +@@ -122,14 +113,12 @@ int novfs_f_lock(struct file *, int, str + * Declared inode_operations + */ + int novfs_i_create(struct inode *, struct dentry *, int, struct nameidata *); +-struct dentry *novfs_i_lookup(struct inode *, struct dentry *, +- struct nameidata *); ++struct dentry *novfs_i_lookup(struct inode *, struct dentry *, struct nameidata *); + int novfs_i_mkdir(struct inode *, struct dentry *, int); + int novfs_i_unlink(struct inode *dir, struct dentry *dentry); + int novfs_i_rmdir(struct inode *, struct dentry *); + int novfs_i_mknod(struct inode *, struct dentry *, int, dev_t); +-int novfs_i_rename(struct inode *, struct dentry *, struct inode *, +- struct dentry *); ++int novfs_i_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); + int novfs_i_setattr(struct dentry *, struct iattr *); + int novfs_i_getattr(struct vfsmount *mnt, struct dentry *, struct kstat *); + int novfs_i_revalidate(struct dentry *dentry); +@@ -138,10 +127,8 @@ int novfs_i_revalidate(struct dentry *de + * Extended attributes operations + */ + +-ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, +- size_t size); +-int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, +- size_t value_size, int flags); ++ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); ++int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, size_t value_size, int flags); + ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); + + void update_inode(struct inode *Inode, struct novfs_entry_info *Info); +@@ -160,38 +147,27 @@ int novfs_statfs(struct dentry *de, stru + /* + * Declared control interface functions + */ +-ssize_t +-novfs_control_Read(struct file *file, char *buf, size_t nbytes, loff_t * ppos); ++ssize_t novfs_control_Read(struct file *file, char *buf, size_t nbytes, loff_t * ppos); + +-ssize_t +-novfs_control_write(struct file *file, const char *buf, size_t nbytes, +- loff_t * ppos); ++ssize_t novfs_control_write(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); + +-int novfs_control_ioctl(struct inode *inode, struct file *file, +- unsigned int cmd, unsigned long arg); ++int novfs_control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); + + int __init init_novfs(void); + void __exit exit_novfs(void); + + int novfs_lock_inode_cache(struct inode *i); + void novfs_unlock_inode_cache(struct inode *i); +-int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, +- ino_t * ino, struct novfs_entry_info *info); +-int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, +- struct novfs_entry_info *info); +-int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, +- struct novfs_entry_info *info); +-int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, +- struct novfs_entry_info *info, u64 * EntryTime); ++int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, ino_t * ino, struct novfs_entry_info *info); ++int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info); ++int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, struct novfs_entry_info *info); ++int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info, u64 * EntryTime); + int novfs_get_remove_entry(struct inode *i, ino_t * ino, struct novfs_entry_info *info); + void novfs_invalidate_inode_cache(struct inode *i); +-struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, +- ino_t ino); ++struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, ino_t ino); + int novfs_lookup_validate(struct inode *i, struct qstr *name, ino_t ino); +-int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, +- struct novfs_entry_info *info); +-int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, +- struct novfs_entry_info *info); ++int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info); ++int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info); + void novfs_remove_inode_entry(struct inode *i, struct qstr *name, ino_t ino); + void novfs_free_invalid_entries(struct inode *i); + void novfs_free_inode_cache(struct inode *i); +@@ -294,7 +270,6 @@ static struct file_operations novfs_Cont + + static atomic_t novfs_Inode_Number = ATOMIC_INIT(0); + +- + struct dentry *novfs_root = NULL; + char *novfs_current_mnt = NULL; + +@@ -325,18 +300,13 @@ static void PRINT_DENTRY(const char *s, + __DbgPrint(" d_lock: 0x%x\n", d->d_lock); + __DbgPrint(" d_inode: 0x%x\n", d->d_inode); + __DbgPrint(" d_lru: 0x%p\n" +- " next: 0x%p\n" +- " prev: 0x%p\n", &d->d_lru, d->d_lru.next, +- d->d_lru.prev); ++ " next: 0x%p\n" " prev: 0x%p\n", &d->d_lru, d->d_lru.next, d->d_lru.prev); + __DbgPrint(" d_child: 0x%p\n" " next: 0x%p\n" +- " prev: 0x%p\n", &d->d_u.d_child, +- d->d_u.d_child.next, d->d_u.d_child.prev); ++ " prev: 0x%p\n", &d->d_u.d_child, d->d_u.d_child.next, d->d_u.d_child.prev); + __DbgPrint(" d_subdirs: 0x%p\n" " next: 0x%p\n" +- " prev: 0x%p\n", &d->d_subdirs, d->d_subdirs.next, +- d->d_subdirs.prev); ++ " prev: 0x%p\n", &d->d_subdirs, d->d_subdirs.next, d->d_subdirs.prev); + __DbgPrint(" d_alias: 0x%p\n" " next: 0x%p\n" +- " prev: 0x%p\n", &d->d_alias, d->d_alias.next, +- d->d_alias.prev); ++ " prev: 0x%p\n", &d->d_alias, d->d_alias.next, d->d_alias.prev); + __DbgPrint(" d_time: 0x%x\n", d->d_time); + __DbgPrint(" d_op: 0x%p\n", d->d_op); + __DbgPrint(" d_sb: 0x%p\n", d->d_sb); +@@ -345,14 +315,11 @@ static void PRINT_DENTRY(const char *s, + __DbgPrint(" d_fsdata: 0x%p\n", d->d_fsdata); + /* DbgPrint(" d_cookie: 0x%x\n", d->d_cookie); */ + __DbgPrint(" d_parent: 0x%p\n", d->d_parent); +- __DbgPrint(" d_name: 0x%p %.*s\n", &d->d_name, d->d_name.len, +- d->d_name.name); ++ __DbgPrint(" d_name: 0x%p %.*s\n", &d->d_name, d->d_name.len, d->d_name.name); + __DbgPrint(" name: 0x%p\n" " len: %d\n" +- " hash: 0x%x\n", d->d_name.name, d->d_name.len, +- d->d_name.hash); ++ " hash: 0x%x\n", d->d_name.name, d->d_name.len, d->d_name.hash); + __DbgPrint(" d_hash: 0x%x\n" " next: 0x%x\n" +- " pprev: 0x%x\n", d->d_hash, d->d_hash.next, +- d->d_hash.pprev); ++ " pprev: 0x%x\n", d->d_hash, d->d_hash.next, d->d_hash.pprev); + } + + /*++======================================================================*/ +@@ -370,8 +337,7 @@ int novfs_remove_from_root(char *RemoveN + dentry = d_lookup(novfs_root, &name); + if (dentry) { + if (dentry->d_inode && dentry->d_inode->i_private) { +- struct inode_data *n_inode = +- dentry->d_inode->i_private; ++ struct inode_data *n_inode = dentry->d_inode->i_private; + n_inode->Scope = NULL; + } + dput(dentry); +@@ -410,7 +376,7 @@ int novfs_add_to_root(char *AddName) + info.size = 0; + info.atime = info.ctime = info.mtime = CURRENT_TIME; + +- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); ++ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); + novfs_add_inode_entry(dir, &name, ino, &info); + } + +@@ -446,20 +412,16 @@ int novfs_Add_to_Root2(char *AddName) + /* + * done in novfs_d_add now... entry->d_fsdata = (void *)novfs_internal_hash( &name ); + */ +- inode = +- novfs_get_inode(novfs_root->d_sb, S_IFDIR | 0700, 0, novfs_scope_get_uid(scope), 0, &name); ++ inode = novfs_get_inode(novfs_root->d_sb, S_IFDIR | 0700, 0, novfs_scope_get_uid(scope), 0, &name); + DbgPrint("Inode=0x%p", inode); + if (inode) { +- inode->i_atime = +- inode->i_ctime = +- inode->i_mtime = CURRENT_TIME; ++ inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME; + if (!novfs_d_add(novfs_root, entry, inode, 1)) { + if (inode->i_private) { + struct inode_data *n_inode = inode->i_private; + n_inode->Flags = USER_INODE; + } +- PRINT_DENTRY("After novfs_d_add", +- entry); ++ PRINT_DENTRY("After novfs_d_add", entry); + } else { + dput(entry); + iput(inode); +@@ -524,8 +486,7 @@ int verify_dentry(struct dentry *dentry, + return (0); + } + +- if (dentry && dentry->d_parent && +- (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) { ++ if (dentry && dentry->d_parent && (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) { + parent = dget_parent(dentry); + + info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); +@@ -538,11 +499,8 @@ int verify_dentry(struct dentry *dentry, + if (!novfs_get_entry_time(dir, &name, &ino, info, &ctime)) { + inode = dentry->d_inode; + if (inode && inode->i_private && +- ((inode->i_size != info->size) || +- (inode->i_mtime.tv_sec != +- info->mtime.tv_sec) +- || (inode->i_mtime.tv_nsec != +- info->mtime.tv_nsec))) { ++ ((inode->i_size != info->size) || (inode->i_mtime.tv_sec != info->mtime.tv_sec) ++ || (inode->i_mtime.tv_nsec != info->mtime.tv_nsec))) { + /* + * Values don't match so update. + */ +@@ -563,8 +521,7 @@ int verify_dentry(struct dentry *dentry, + } + + if (IS_ROOT(dentry->d_parent)) { +- session = novfs_scope_get_sessionId( +- novfs_get_scope_from_name(&dentry->d_name)); ++ session = novfs_scope_get_sessionId(novfs_get_scope_from_name(&dentry->d_name)); + } else + session = novfs_scope_get_sessionId(id->Scope); + +@@ -595,7 +552,7 @@ int verify_dentry(struct dentry *dentry, + info->mode = S_IFDIR | 0700; + info->size = 0; + info->atime = info->ctime = info->mtime = CURRENT_TIME; +- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); ++ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); + novfs_add_inode_entry(dir, &name, ino, info); + } + } +@@ -603,75 +560,48 @@ int verify_dentry(struct dentry *dentry, + novfs_free_invalid_entries(dir); + } else { + +- path = +- novfs_dget_path(dentry, info->name, +- PATH_LENGTH_BUFFER); ++ path = novfs_dget_path(dentry, info->name, PATH_LENGTH_BUFFER); + if (path) { +- if (dentry->d_name.len <= +- NW_MAX_PATH_LENGTH) { +- name.hash = +- novfs_internal_hash +- (&dentry->d_name); ++ if (dentry->d_name.len <= NW_MAX_PATH_LENGTH) { ++ name.hash = novfs_internal_hash(&dentry->d_name); + name.len = dentry->d_name.len; + name.name = dentry->d_name.name; + +- retVal = +- novfs_get_file_info(path, +- info, +- session); ++ retVal = novfs_get_file_info(path, info, session); + if (0 == retVal) { +- dentry->d_time = +- jiffies + +- (novfs_update_timeout +- * HZ); +- iLock = +- novfs_lock_inode_cache +- (dir); +- if (novfs_update_entry +- (dir, &name, 0, +- info)) { +- if (dentry-> +- d_inode) { ++ dentry->d_time = jiffies + (novfs_update_timeout * HZ); ++ iLock = novfs_lock_inode_cache(dir); ++ if (novfs_update_entry(dir, &name, 0, info)) { ++ if (dentry->d_inode) { + ino = dentry->d_inode->i_ino; + } else { +- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); ++ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); + } +- novfs_add_inode_entry +- (dir, &name, +- ino, info); ++ novfs_add_inode_entry(dir, &name, ino, info); + } + if (dentry->d_inode) { +- update_inode +- (dentry-> +- d_inode, +- info); +- id->Flags &= +- ~UPDATE_INODE; +- +- dentry-> +- d_inode-> +- i_flags &= +- ~S_DEAD; +- if (dentry-> +- d_inode-> +- i_private) { +- ((struct inode_data *) dentry->d_inode->i_private)->Scope = id->Scope; ++ update_inode(dentry->d_inode, info); ++ id->Flags &= ~UPDATE_INODE; ++ ++ dentry->d_inode->i_flags &= ~S_DEAD; ++ if (dentry->d_inode->i_private) { ++ ((struct inode_data *)dentry->d_inode->i_private)->Scope = ++ id->Scope; + } + } + } else if (-EINTR != retVal) { + retVal = 0; + iLock = novfs_lock_inode_cache(dir); + novfs_remove_inode_entry(dir, &name, 0); +- if (dentry->d_inode +- && !(dentry->d_inode->i_flags & S_DEAD)) { ++ if (dentry->d_inode && !(dentry->d_inode->i_flags & S_DEAD)) { + dentry->d_inode->i_flags |= S_DEAD; +- dentry->d_inode-> i_size = 0; ++ dentry->d_inode->i_size = 0; + dentry->d_inode->i_atime.tv_sec = +- dentry->d_inode->i_atime.tv_nsec = +- dentry->d_inode->i_ctime.tv_sec = +- dentry->d_inode->i_ctime.tv_nsec = +- dentry->d_inode->i_mtime.tv_sec = +- dentry->d_inode->i_mtime.tv_nsec = 0; ++ dentry->d_inode->i_atime.tv_nsec = ++ dentry->d_inode->i_ctime.tv_sec = ++ dentry->d_inode->i_ctime.tv_nsec = ++ dentry->d_inode->i_mtime.tv_sec = ++ dentry->d_inode->i_mtime.tv_nsec = 0; + dentry->d_inode->i_blocks = 0; + d_delete(dentry); /* Remove from cache */ + } +@@ -700,7 +630,6 @@ int verify_dentry(struct dentry *dentry, + return (retVal); + } + +- + static int novfs_d_add(struct dentry *Parent, struct dentry *d, struct inode *i, int a) + { + void *scope; +@@ -712,14 +641,13 @@ static int novfs_d_add(struct dentry *Pa + if (buf) { + path = novfs_dget_path(d, buf, PATH_LENGTH_BUFFER); + if (path) { +- DbgPrint("inode=0x%p ino=%d path %s", i, +- i->i_ino, path); ++ DbgPrint("inode=0x%p ino=%d path %s", i, i->i_ino, path); + } + kfree(buf); + } + + if (Parent && Parent->d_inode && Parent->d_inode->i_private) { +- id = (struct inode_data *) Parent->d_inode->i_private; ++ id = (struct inode_data *)Parent->d_inode->i_private; + } + + if (id && id->Scope) { +@@ -728,7 +656,7 @@ static int novfs_d_add(struct dentry *Pa + scope = novfs_get_scope(d); + } + +- ((struct inode_data *) i->i_private)->Scope = scope; ++ ((struct inode_data *)i->i_private)->Scope = scope; + + d->d_time = jiffies + (novfs_update_timeout * HZ); + if (a) { +@@ -750,16 +678,12 @@ int novfs_d_revalidate(struct dentry *de + __DbgPrint("%s: 0x%p %.*s\n" + " d_count: %d\n" + " d_inode: 0x%p\n", __func__, +- dentry, dentry->d_name.len, dentry->d_name.name, +- dentry->d_count, dentry->d_inode); ++ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_count, dentry->d_inode); + + if (IS_ROOT(dentry)) { + retCode = 1; + } else { +- if (dentry->d_inode && +- dentry->d_parent && +- (dir = dentry->d_parent->d_inode) && +- (id = dir->i_private)) { ++ if (dentry->d_inode && dentry->d_parent && (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) { + /* + * Check timer to see if in valid time limit + */ +@@ -769,18 +693,13 @@ int novfs_d_revalidate(struct dentry *de + */ + name.len = dentry->d_name.len; + name.name = dentry->d_name.name; +- name.hash = +- novfs_internal_hash(&dentry->d_name); ++ name.hash = novfs_internal_hash(&dentry->d_name); + dentry->d_time = 0; + + if (0 == verify_dentry(dentry, 0)) { + if (novfs_lock_inode_cache(dir)) { +- if (novfs_lookup_inode_cache +- (dir, &name, 0)) { +- dentry->d_time = +- jiffies + +- (novfs_update_timeout +- * HZ); ++ if (novfs_lookup_inode_cache(dir, &name, 0)) { ++ dentry->d_time = jiffies + (novfs_update_timeout * HZ); + retCode = 1; + } + novfs_unlock_inode_cache(dir); +@@ -800,8 +719,7 @@ int novfs_d_revalidate(struct dentry *de + */ + } + +- DbgPrint("return 0x%x %.*s", retCode, +- dentry->d_name.len, dentry->d_name.name); ++ DbgPrint("return 0x%x %.*s", retCode, dentry->d_name.len, dentry->d_name.name); + + return (retCode); + } +@@ -837,8 +755,7 @@ int novfs_d_strcmp(struct qstr *s1, stru + unsigned char *str1, *str2; + unsigned int len; + +- DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, +- s2->len, s2->name); ++ DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, s2->len, s2->name); + + if (s1->len && (s1->len == s2->len) && (s1->hash == s2->hash)) { + len = s1->len; +@@ -873,8 +790,7 @@ int novfs_d_delete(struct dentry *dentry + int retVal = 0; + + DbgPrint("0x%p %.*s; d_count: %d; d_inode: 0x%p", +- dentry, dentry->d_name.len, dentry->d_name.name, +- dentry->d_count, dentry->d_inode); ++ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_count, dentry->d_inode); + + if (dentry->d_inode && (dentry->d_inode->i_flags & S_DEAD)) { + retVal = 1; +@@ -887,15 +803,13 @@ int novfs_d_delete(struct dentry *dentry + + void novfs_d_release(struct dentry *dentry) + { +- DbgPrint("0x%p %.*s", dentry, dentry->d_name.len, +- dentry->d_name.name); ++ DbgPrint("0x%p %.*s", dentry, dentry->d_name.len, dentry->d_name.name); + } + + void novfs_d_iput(struct dentry *dentry, struct inode *inode) + { + DbgPrint("Inode=0x%p Ino=%d Dentry=0x%p i_state=%d Name=%.*s", +- inode, inode->i_ino, dentry, inode->i_state, dentry->d_name.len, +- dentry->d_name.name); ++ inode, inode->i_ino, dentry, inode->i_state, dentry->d_name.len, dentry->d_name.name); + + iput(inode); + +@@ -906,8 +820,7 @@ int novfs_dir_open(struct inode *dir, st + char *path, *buf; + struct file_private *file_private = NULL; + +- DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, +- file->f_dentry->d_name.len, file->f_dentry->d_name.name); ++ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + + buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); + if (buf) { +@@ -933,8 +846,7 @@ int novfs_dir_release(struct inode *dir, + struct inode *inode = file->f_dentry->d_inode; + struct novfs_schandle sessionId; + +- DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, +- file->f_dentry->d_name.len, file->f_dentry->d_name.name); ++ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + + if (file_private) { + if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) { +@@ -956,8 +868,7 @@ loff_t novfs_dir_lseek(struct file * fil + { + struct file_private *file_private = NULL; + +- DbgPrint("offset %lld %d Name %.*s", offset, origin, +- file->f_dentry->d_name.len, file->f_dentry->d_name.name); ++ DbgPrint("offset %lld %d Name %.*s", offset, origin, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + //printk("<1> seekdir file = %.*s offset = %i\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name, (int)offset); + + if (0 != offset) { +@@ -966,7 +877,7 @@ loff_t novfs_dir_lseek(struct file * fil + + file->f_pos = 0; + +- file_private = (struct file_private *) file->private_data; ++ file_private = (struct file_private *)file->private_data; + file_private->listedall = 0; + if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) { + struct novfs_schandle sessionId; +@@ -999,8 +910,7 @@ ssize_t novfs_dir_read(struct file * fil + } + return(rlen); + */ +- DbgPrint("%lld %d Name %.*s", *off, len, +- file->f_dentry->d_name.len, file->f_dentry->d_name.name); ++ DbgPrint("%lld %d Name %.*s", *off, len, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + return (generic_read_dir(file, buf, len, off)); + } + +@@ -1039,8 +949,7 @@ static void novfs_Dump_Info(struct novfs + DbgPrint("name = %s", namebuf); + } + +-void processList(struct file *file, void *dirent, filldir_t filldir, char *list, +- int type, struct novfs_schandle SessionId) ++void processList(struct file *file, void *dirent, filldir_t filldir, char *list, int type, struct novfs_schandle SessionId) + { + unsigned char *path, *buf = NULL, *cp; + struct qstr name; +@@ -1066,20 +975,16 @@ void processList(struct file *file, void + name.hash = novfs_internal_hash(&name); + cp += (name.len + 1); + +- pinfo = +- kmalloc(sizeof(struct novfs_entry_info) + +- PATH_LENGTH_BUFFER, GFP_KERNEL); ++ pinfo = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); + pinfo->mode = S_IFDIR | 0700; + pinfo->size = 0; +- pinfo->atime = pinfo->ctime = pinfo->mtime = +- CURRENT_TIME; ++ pinfo->atime = pinfo->ctime = pinfo->mtime = CURRENT_TIME; + strcpy(pinfo->name, name.name); + pinfo->namelength = name.len; + + novfs_Dump_Info(pinfo); + +- filldir(dirent, pinfo->name, pinfo->namelength, +- file->f_pos, file->f_pos, pinfo->mode >> 12); ++ filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, file->f_pos, pinfo->mode >> 12); + file->f_pos += 1; + + kfree(pinfo); +@@ -1091,8 +996,7 @@ void processList(struct file *file, void + } + } + +-int processEntries(struct file *file, void *dirent, filldir_t filldir, +- void ** enumHandle, struct novfs_schandle sessionId) ++int processEntries(struct file *file, void *dirent, filldir_t filldir, void **enumHandle, struct novfs_schandle sessionId) + { + unsigned char *path = NULL, *buf = NULL; + int count = 0, status = 0; +@@ -1111,9 +1015,7 @@ int processEntries(struct file *file, vo + } + //NWSearchfiles + count = 0; +- status = +- novfs_get_dir_listex(path, enumHandle, &count, &pinfo, +- sessionId); ++ status = novfs_get_dir_listex(path, enumHandle, &count, &pinfo, sessionId); + pInfoMem = pinfo; + + if ((count == -1) || (count == 0) || (status != 0)) { +@@ -1123,11 +1025,10 @@ int processEntries(struct file *file, vo + } + // parse resultset + while (pinfo && count--) { +- filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, +- file->f_pos, pinfo->mode >> 12); ++ filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, file->f_pos, pinfo->mode >> 12); + file->f_pos += 1; + +- pinfo = (struct novfs_entry_info *) (pinfo->name + pinfo->namelength); ++ pinfo = (struct novfs_entry_info *)(pinfo->name + pinfo->namelength); + } + + kfree(pInfoMem); +@@ -1146,9 +1047,8 @@ int novfs_dir_readdir(struct file *file, + struct file_private *file_private = NULL; + int lComm; + +- file_private = (struct file_private *) file->private_data; +- DbgPrint("Name %.*s", file->f_dentry->d_name.len, +- file->f_dentry->d_name.name); ++ file_private = (struct file_private *)file->private_data; ++ DbgPrint("Name %.*s", file->f_dentry->d_name.len, file->f_dentry->d_name.name); + + //printk("<1> file = %.*s\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name); + +@@ -1168,8 +1068,7 @@ int novfs_dir_readdir(struct file *file, + file_private->listedall = 1; + } else { + if (inHAX) { +- if (get_nanosecond_time() - inHAXTime > +- 100 * 1000 * 1000) { ++ if (get_nanosecond_time() - inHAXTime > 100 * 1000 * 1000) { + //printk("<1> xoverhack: it was long, long, long ago...\n"); + inHAX = 0; + } else { +@@ -1187,17 +1086,14 @@ int novfs_dir_readdir(struct file *file, + #endif + + if (file->f_pos == 0) { +- if (filldir(dirent, ".", 1, file->f_pos, inode->i_ino, DT_DIR) < +- 0) ++ if (filldir(dirent, ".", 1, file->f_pos, inode->i_ino, DT_DIR) < 0) + return 1; + file->f_pos++; + return 1; + } + + if (file->f_pos == 1) { +- if (filldir +- (dirent, "..", 2, file->f_pos, +- file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) ++ if (filldir(dirent, "..", 2, file->f_pos, file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) + return 1; + file->f_pos++; + return 1; +@@ -1209,17 +1105,12 @@ int novfs_dir_readdir(struct file *file, + + inode = file->f_dentry->d_inode; + if (inode && inode->i_private) { +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> +- Scope); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + if (0 == SC_PRESENT(sessionId)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(file->f_dentry); +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + } +- uid = novfs_scope_get_uid(((struct inode_data *) inode->i_private)->Scope); ++ uid = novfs_scope_get_uid(((struct inode_data *)inode->i_private)->Scope); + } else { + SC_INITIALIZE(sessionId); + uid = current_euid(); +@@ -1239,17 +1130,14 @@ int novfs_dir_readdir(struct file *file, + type = SERVER_LIST; + } else { + DbgPrint("Parent-Parent is Root directory"); +- novfs_get_vols(&file->f_dentry->d_name, +- &list, sessionId); ++ novfs_get_vols(&file->f_dentry->d_name, &list, sessionId); + type = VOLUME_LIST; + } + + processList(file, dirent, filldir, list, type, sessionId); + file_private->listedall = 1; + } else { +- status = +- processEntries(file, dirent, filldir, +- &file_private->enumHandle, sessionId); ++ status = processEntries(file, dirent, filldir, &file_private->enumHandle, sessionId); + + if (status != 0) { + file_private->listedall = 1; +@@ -1257,15 +1145,10 @@ int novfs_dir_readdir(struct file *file, + // Hack for crossover part 2 - begin + lComm = strlen(current->comm); + if ((lComm > 4) +- && (0 == +- strcmp(current->comm + lComm - 4, ".EXE"))) { +- if (filldir +- (dirent, " !xover", 7, file->f_pos, +- inode->i_ino, DT_DIR) < 0) ++ && (0 == strcmp(current->comm + lComm - 4, ".EXE"))) { ++ if (filldir(dirent, " !xover", 7, file->f_pos, inode->i_ino, DT_DIR) < 0) + return 1; +- if (filldir +- (dirent, "z!xover", 7, file->f_pos, +- inode->i_ino, DT_DIR) < 0) ++ if (filldir(dirent, "z!xover", 7, file->f_pos, inode->i_ino, DT_DIR) < 0) + return 1; + file->f_pos += 2; + } +@@ -1293,33 +1176,24 @@ ssize_t novfs_f_read(struct file * file, + struct novfs_schandle session; + struct inode_data *id; + +- if (file->f_dentry && +- (inode = file->f_dentry->d_inode) && +- (id = (struct inode_data *) inode->i_private)) { ++ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = (struct inode_data *)inode->i_private)) { + + DbgPrint("(0x%p 0x%p %d %lld %.*s)", +- file->private_data, +- buf, len, offset, +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name); ++ file->private_data, buf, len, offset, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + + if (novfs_page_cache && !(file->f_flags & O_DIRECT) && id->CacheFlag) { + totalread = do_sync_read(file, buf, len, off); + } else { + session = novfs_scope_get_sessionId(id->Scope); + if (0 == SC_PRESENT(session)) { +- id->Scope = +- novfs_get_scope(file->f_dentry); ++ id->Scope = novfs_get_scope(file->f_dentry); + session = novfs_scope_get_sessionId(id->Scope); + } + + while (len > 0 && (offset < i_size_read(inode))) { + int retval; + thisread = len; +- retval = +- novfs_read_file(file->private_data, buf, +- &thisread, &offset, +- session); ++ retval = novfs_read_file(file->private_data, buf, &thisread, &offset, session); + if (retval || !thisread) { + if (retval) { + totalread = retval; +@@ -1340,8 +1214,7 @@ ssize_t novfs_f_read(struct file * file, + return (totalread); + } + +-ssize_t novfs_f_write(struct file * file, const char *buf, size_t len, +- loff_t * off) ++ssize_t novfs_f_write(struct file * file, const char *buf, size_t len, loff_t * off) + { + ssize_t thiswrite, totalwrite = 0; + loff_t offset = *off; +@@ -1350,30 +1223,23 @@ ssize_t novfs_f_write(struct file * file + int status; + struct inode_data *id; + +- if (file->f_dentry && +- (inode = file->f_dentry->d_inode) && +- (id = file->f_dentry->d_inode->i_private)) { ++ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = file->f_dentry->d_inode->i_private)) { + DbgPrint("(0x%p 0x%p 0x%p %d %lld %.*s)", + file->private_data, inode, id->FileHandle, len, offset, +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name); ++ file->f_dentry->d_name.len, file->f_dentry->d_name.name); + +- if (novfs_page_cache && +- !(file->f_flags & O_DIRECT) && +- id->CacheFlag && !(file->f_flags & O_WRONLY)) { ++ if (novfs_page_cache && !(file->f_flags & O_DIRECT) && id->CacheFlag && !(file->f_flags & O_WRONLY)) { + totalwrite = do_sync_write(file, buf, len, off); + } else { + if (file->f_flags & O_APPEND) { + offset = i_size_read(inode); + DbgPrint("appending to end %lld %.*s", +- offset, file->f_dentry->d_name.len, +- file->f_dentry->d_name.name); ++ offset, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + } + + session = novfs_scope_get_sessionId(id->Scope); + if (0 == SC_PRESENT(session)) { +- id->Scope = +- novfs_get_scope(file->f_dentry); ++ id->Scope = novfs_get_scope(file->f_dentry); + session = novfs_scope_get_sessionId(id->Scope); + } + +@@ -1381,23 +1247,18 @@ ssize_t novfs_f_write(struct file * file + thiswrite = len; + if ((status = + novfs_write_file(file->private_data, +- (unsigned char *)buf, +- &thiswrite, &offset, +- session)) || !thiswrite) { ++ (unsigned char *)buf, &thiswrite, &offset, session)) || !thiswrite) { + totalwrite = status; + break; + } +- DbgPrint("thiswrite = 0x%x", +- thiswrite); ++ DbgPrint("thiswrite = 0x%x", thiswrite); + len -= thiswrite; + buf += thiswrite; + offset += thiswrite; + totalwrite += thiswrite; + if (offset > i_size_read(inode)) { + i_size_write(inode, offset); +- inode->i_blocks = +- (offset + inode->i_sb->s_blocksize - +- 1) >> inode->i_blkbits; ++ inode->i_blocks = (offset + inode->i_sb->s_blocksize - 1) >> inode->i_blkbits; + } + inode->i_mtime = inode->i_atime = CURRENT_TIME; + id->Flags |= UPDATE_INODE; +@@ -1416,8 +1277,7 @@ int novfs_f_readdir(struct file *file, v + return -EISDIR; + } + +-int novfs_f_ioctl(struct inode *inode, struct file *file, unsigned int cmd, +- unsigned long arg) ++int novfs_f_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) + { + DbgPrint("file=0x%p cmd=0x%x arg=0x%p", file, cmd, arg); + +@@ -1428,8 +1288,7 @@ int novfs_f_mmap(struct file *file, stru + { + int retCode = -EINVAL; + +- DbgPrint("file=0x%p %.*s", file, file->f_dentry->d_name.len, +- file->f_dentry->d_name.name); ++ DbgPrint("file=0x%p %.*s", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + + retCode = generic_file_mmap(file, vma); + +@@ -1449,80 +1308,56 @@ int novfs_f_open(struct inode *inode, st + int errInfo; + + DbgPrint("inode=0x%p file=0x%p dentry=0x%p dentry->d_inode=0x%p %.*s", +- inode, file, file->f_dentry, file->f_dentry->d_inode, +- file->f_dentry->d_name.len, file->f_dentry->d_name.name); ++ inode, file, file->f_dentry, file->f_dentry->d_inode, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + if (file->f_dentry) { + DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name, +- file->f_flags, file->f_mode, inode->i_mode); ++ file->f_dentry->d_name.len, file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode); + } + + if (inode && inode->i_private) { +- id = (struct inode_data *) file->f_dentry->d_inode->i_private; ++ id = (struct inode_data *)file->f_dentry->d_inode->i_private; + session = novfs_scope_get_sessionId(id->Scope); + if (0 == SC_PRESENT(session)) { + id->Scope = novfs_get_scope(file->f_dentry); + session = novfs_scope_get_sessionId(id->Scope); + } + +- info = kmalloc(sizeof(struct novfs_entry_info) + +- PATH_LENGTH_BUFFER, GFP_KERNEL); ++ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); + if (info) { +- path = +- novfs_dget_path(file->f_dentry, info->name, +- PATH_LENGTH_BUFFER); ++ path = novfs_dget_path(file->f_dentry, info->name, PATH_LENGTH_BUFFER); + if (path) { + if (file->f_flags & O_TRUNC) { +- errInfo = +- novfs_get_file_info(path, info, +- session); ++ errInfo = novfs_get_file_info(path, info, session); + + if (errInfo || info->size == 0) { + // clear O_TRUNC flag, bug #275366 +- file->f_flags = +- file->f_flags & (~O_TRUNC); ++ file->f_flags = file->f_flags & (~O_TRUNC); + } + } + + DbgPrint("%s", path); +- retCode = novfs_open_file(path, +- file-> +- f_flags & ~O_EXCL, +- info, +- &file->private_data, +- session); ++ retCode = novfs_open_file(path, file->f_flags & ~O_EXCL, info, &file->private_data, session); + +- DbgPrint("0x%x 0x%p", retCode, +- file->private_data); ++ DbgPrint("0x%x 0x%p", retCode, file->private_data); + if (!retCode) { + /* + *update_inode(inode, &info); + */ + //id->FileHandle = file->private_data; +- id->CacheFlag = +- novfs_get_file_cache_flag(path, +- session); ++ id->CacheFlag = novfs_get_file_cache_flag(path, session); + +- if (!novfs_get_file_info +- (path, info, session)) { ++ if (!novfs_get_file_info(path, info, session)) { + update_inode(inode, info); + } + + parent = dget_parent(file->f_dentry); + + if (parent && parent->d_inode) { +- struct inode *dir = +- parent->d_inode; ++ struct inode *dir = parent->d_inode; + novfs_lock_inode_cache(dir); + ino = 0; +- if (novfs_get_entry +- (dir, +- &file->f_dentry->d_name, +- &ino, info)) { +- ((struct inode_data *) inode-> +- i_private)->Flags |= +- UPDATE_INODE; ++ if (novfs_get_entry(dir, &file->f_dentry->d_name, &ino, info)) { ++ ((struct inode_data *)inode->i_private)->Flags |= UPDATE_INODE; + } + + novfs_unlock_inode_cache(dir); +@@ -1537,8 +1372,7 @@ int novfs_f_open(struct inode *inode, st + return (retCode); + } + +-int novfs_flush_mapping(void *Handle, struct address_space *mapping, +- struct novfs_schandle Session) ++int novfs_flush_mapping(void *Handle, struct address_space *mapping, struct novfs_schandle Session) + { + struct pagevec pagevec; + unsigned nrpages; +@@ -1549,10 +1383,7 @@ int novfs_flush_mapping(void *Handle, st + + do { + done = 1; +- nrpages = pagevec_lookup_tag(&pagevec, +- mapping, +- &index, +- PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE); ++ nrpages = pagevec_lookup_tag(&pagevec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE); + + if (nrpages) { + struct page *page; +@@ -1570,16 +1401,11 @@ int novfs_flush_mapping(void *Handle, st + page_cache_get(page); + if (page->mapping == mapping) { + if (clear_page_dirty_for_io(page)) { +- rc = novfs_write_page(Handle, +- page, +- Session); ++ rc = novfs_write_page(Handle, page, Session); + if (!rc) { + //ClearPageDirty(page); + radix_tree_tag_clear +- (&mapping-> +- page_tree, +- page_index(page), +- PAGECACHE_TAG_DIRTY); ++ (&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); + } + } + } +@@ -1613,41 +1439,27 @@ int novfs_f_flush(struct file *file, fl_ + inode = file->f_dentry->d_inode; + DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", + file->f_dentry->d_name.len, +- file->f_dentry->d_name.name, file->f_flags, +- file->f_mode, inode->i_mode); ++ file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode); + + session = novfs_scope_get_sessionId(id->Scope); + if (0 == SC_PRESENT(session)) { +- id->Scope = +- novfs_get_scope(file->f_dentry); ++ id->Scope = novfs_get_scope(file->f_dentry); + session = novfs_scope_get_sessionId(id->Scope); + } + +- if (inode && +- inode->i_mapping && inode->i_mapping->nrpages) { ++ if (inode && inode->i_mapping && inode->i_mapping->nrpages) { + + DbgPrint("%.*s pages=%lu", +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name, +- inode->i_mapping->nrpages); ++ file->f_dentry->d_name.len, file->f_dentry->d_name.name, inode->i_mapping->nrpages); + + if (file->f_dentry && + file->f_dentry->d_inode && + file->f_dentry->d_inode->i_mapping && + file->f_dentry->d_inode->i_mapping->a_ops && +- file->f_dentry->d_inode->i_mapping->a_ops-> +- writepage) { +- rc = filemap_fdatawrite(file->f_dentry-> +- d_inode-> +- i_mapping); ++ file->f_dentry->d_inode->i_mapping->a_ops->writepage) { ++ rc = filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); + } else { +- rc = novfs_flush_mapping(file-> +- private_data, +- file-> +- f_dentry-> +- d_inode-> +- i_mapping, +- session); ++ rc = novfs_flush_mapping(file->private_data, file->f_dentry->d_inode->i_mapping, session); + } + } + } +@@ -1662,9 +1474,7 @@ int novfs_f_release(struct inode *inode, + struct novfs_schandle session; + struct inode_data *id; + +- DbgPrint("path=%.*s handle=%p", +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name, file->private_data); ++ DbgPrint("path=%.*s handle=%p", file->f_dentry->d_name.len, file->f_dentry->d_name.name, file->private_data); + + if (inode && (id = inode->i_private)) { + session = novfs_scope_get_sessionId(id->Scope); +@@ -1675,26 +1485,18 @@ int novfs_f_release(struct inode *inode, + + if ((file->f_flags & O_ACCMODE) != O_RDONLY) { + DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name, file->f_flags, +- file->f_mode, inode->i_mode); ++ file->f_dentry->d_name.len, ++ file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode); + + if (inode->i_mapping && inode->i_mapping->nrpages) { + + DbgPrint("%.*s pages=%lu", +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name, +- inode->i_mapping->nrpages); +- +- if (inode->i_mapping->a_ops && +- inode->i_mapping->a_ops->writepage) { +- filemap_fdatawrite(file->f_dentry-> +- d_inode->i_mapping); ++ file->f_dentry->d_name.len, file->f_dentry->d_name.name, inode->i_mapping->nrpages); ++ ++ if (inode->i_mapping->a_ops && inode->i_mapping->a_ops->writepage) { ++ filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); + } else { +- novfs_flush_mapping(file->private_data, +- file->f_dentry-> +- d_inode->i_mapping, +- session); ++ novfs_flush_mapping(file->private_data, file->f_dentry->d_inode->i_mapping, session); + } + } + } +@@ -1717,8 +1519,7 @@ int novfs_f_fsync(struct file *file, int + int novfs_f_llseek(struct file *file, loff_t offset, int origin) + { + DbgPrint("File=0x%p Name=%.*s offset=%lld origin=%d", +- file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, +- offset, origin); ++ file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, offset, origin); + return (generic_file_llseek(file, offset, origin)); + } + +@@ -1756,12 +1557,9 @@ int novfs_f_lock(struct file *file, int + struct inode_data *id; + loff_t len; + +- DbgPrint("(0x%p): begin in novfs_f_lock 0x%p", +- __builtin_return_address(0), file->private_data); +- DbgPrint("cmd = %d, F_GETLK = %d, F_SETLK = %d, F_SETLKW = %d", +- cmd, F_GETLK, F_SETLK, F_SETLKW); +- DbgPrint("lock->fl_start = 0x%llX, lock->fl_end = 0x%llX", +- lock->fl_start, lock->fl_end); ++ DbgPrint("(0x%p): begin in novfs_f_lock 0x%p", __builtin_return_address(0), file->private_data); ++ DbgPrint("cmd = %d, F_GETLK = %d, F_SETLK = %d, F_SETLKW = %d", cmd, F_GETLK, F_SETLK, F_SETLKW); ++ DbgPrint("lock->fl_start = 0x%llX, lock->fl_end = 0x%llX", lock->fl_start, lock->fl_end); + + err_code = -1; + if (lock->fl_start <= lock->fl_end) { +@@ -1771,18 +1569,13 @@ int novfs_f_lock(struct file *file, int + len = 0; + } + +- if (file->f_dentry && +- (inode = file->f_dentry->d_inode) && +- (id = (struct inode_data *) inode->i_private)) { ++ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = (struct inode_data *)inode->i_private)) { + DbgPrint("(0x%p 0x%p %.*s)", +- file->private_data, inode, +- file->f_dentry->d_name.len, +- file->f_dentry->d_name.name); ++ file->private_data, inode, file->f_dentry->d_name.len, file->f_dentry->d_name.name); + + session = novfs_scope_get_sessionId(id->Scope); + if (0 == SC_PRESENT(session)) { +- id->Scope = +- novfs_get_scope(file->f_dentry); ++ id->Scope = novfs_get_scope(file->f_dentry); + session = novfs_scope_get_sessionId(id->Scope); + } + +@@ -1793,22 +1586,14 @@ int novfs_f_lock(struct file *file, int + case F_SETLK64: + #endif + +- err_code = +- novfs_set_file_lock(session, +- file->private_data, +- lock->fl_type, +- lock->fl_start, len); ++ err_code = novfs_set_file_lock(session, file->private_data, lock->fl_type, lock->fl_start, len); + break; + + case F_SETLKW: + #ifdef F_GETLK64 + case F_SETLKW64: + #endif +- err_code = +- novfs_set_file_lock(session, +- file->private_data, +- lock->fl_type, +- lock->fl_start, len); ++ err_code = novfs_set_file_lock(session, file->private_data, lock->fl_type, lock->fl_start, len); + break; + + case F_GETLK: +@@ -1822,17 +1607,13 @@ int novfs_f_lock(struct file *file, int + break; + + default: +- printk +- ("<1> novfs in novfs_f_lock, not implemented cmd = %d\n", +- cmd); +- DbgPrint("novfs in novfs_f_lock, not implemented cmd = %d", +- cmd); ++ printk("<1> novfs in novfs_f_lock, not implemented cmd = %d\n", cmd); ++ DbgPrint("novfs in novfs_f_lock, not implemented cmd = %d", cmd); + break; + } + } + +- DbgPrint("lock->fl_type = %u, err_code 0x%X", +- lock->fl_type, err_code); ++ DbgPrint("lock->fl_type = %u, err_code 0x%X", lock->fl_type, err_code); + + if ((err_code != 0) && (err_code != -1) + && (err_code != -ENOSYS)) { +@@ -1847,8 +1628,7 @@ int novfs_f_lock(struct file *file, int + + /*++======================================================================*/ + static void novfs_copy_cache_pages(struct address_space *mapping, +- struct list_head *pages, int bytes_read, +- char *data, struct pagevec *plru_pvec) ++ struct list_head *pages, int bytes_read, char *data, struct pagevec *plru_pvec) + { + struct page *page; + char *target; +@@ -1872,8 +1652,7 @@ static void novfs_copy_cache_pages(struc + if (PAGE_CACHE_SIZE > bytes_read) { + memcpy(target, data, bytes_read); + /* zero the tail end of this partial page */ +- memset(target + bytes_read, 0, +- PAGE_CACHE_SIZE - bytes_read); ++ memset(target + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); + bytes_read = 0; + } else { + memcpy(target, data, PAGE_CACHE_SIZE); +@@ -1901,7 +1680,7 @@ int novfs_a_writepage(struct page *page, + struct novfs_data_list dlst[2]; + size_t len = PAGE_CACHE_SIZE; + +- session = novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)->Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + + page_cache_get(page); + +@@ -1933,8 +1712,7 @@ int novfs_a_writepage(struct page *page, + return (retCode); + } + +-int novfs_a_writepages(struct address_space *mapping, +- struct writeback_control *wbc) ++int novfs_a_writepages(struct address_space *mapping, struct writeback_control *wbc) + { + int retCode = 0; + struct inode *inode = mapping->host; +@@ -1953,31 +1731,24 @@ int novfs_a_writepages(struct address_sp + size_t tsize; + + SC_INITIALIZE(session); +- DbgPrint("inode=0x%p mapping=0x%p wbc=0x%p nr_to_write=%d", +- inode, mapping, wbc, wbc->nr_to_write); ++ DbgPrint("inode=0x%p mapping=0x%p wbc=0x%p nr_to_write=%d", inode, mapping, wbc, wbc->nr_to_write); + + if (inode) { +- DbgPrint("Inode=0x%p Ino=%d Id=0x%p", inode, inode->i_ino, +- inode->i_private); ++ DbgPrint("Inode=0x%p Ino=%d Id=0x%p", inode, inode->i_ino, inode->i_private); + + if (NULL != (id = inode->i_private)) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); +- fh = ((struct inode_data *) inode->i_private)->FileHandle; ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); ++ fh = ((struct inode_data *)inode->i_private)->FileHandle; + } + } + + dlist = kmalloc(sizeof(struct novfs_data_list) * max_page_lookup, GFP_KERNEL); +- pages = +- kmalloc(sizeof(struct page *) * max_page_lookup, GFP_KERNEL); ++ pages = kmalloc(sizeof(struct page *) * max_page_lookup, GFP_KERNEL); + + if (id) +- DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p %s", +- inode, fh, dlist, pages, id->Name); ++ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p %s", inode, fh, dlist, pages, id->Name); + else +- DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p", +- inode, fh, dlist, pages); ++ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p", inode, fh, dlist, pages); + + if (dlist && pages) { + struct backing_dev_info *bdi = mapping->backing_dev_info; +@@ -2005,8 +1776,7 @@ int novfs_a_writepages(struct address_sp + + DbgPrint("nr_pages=%d", nr_pages); + if (!nr_pages) { +- memset(pages, 0, +- sizeof(struct page *) * max_page_lookup); ++ memset(pages, 0, sizeof(struct page *) * max_page_lookup); + + spin_lock_irq(&mapping->tree_lock); + +@@ -2016,13 +1786,8 @@ int novfs_a_writepages(struct address_sp + * the first entry for the reply buffer. + */ + nr_pages = +- radix_tree_gang_lookup_tag(&mapping-> +- page_tree, +- (void **)pages, +- index, +- max_page_lookup - +- 1, +- PAGECACHE_TAG_DIRTY); ++ radix_tree_gang_lookup_tag(&mapping->page_tree, ++ (void **)pages, index, max_page_lookup - 1, PAGECACHE_TAG_DIRTY); + + DbgPrint("2; nr_pages=%d\n", nr_pages); + /* +@@ -2044,9 +1809,7 @@ int novfs_a_writepages(struct address_sp + + if (nr_pages) { + index = pages[nr_pages - 1]->index + 1; +- pos = +- (loff_t) pages[0]-> +- index << PAGE_CACHE_SHIFT; ++ pos = (loff_t) pages[0]->index << PAGE_CACHE_SHIFT; + } + + if (!nr_pages) { +@@ -2062,9 +1825,7 @@ int novfs_a_writepages(struct address_sp + } + } else { + if (pages[i]) { +- pos = +- (loff_t) pages[i]-> +- index << PAGE_CACHE_SHIFT; ++ pos = (loff_t) pages[i]->index << PAGE_CACHE_SHIFT; + } + } + +@@ -2094,8 +1855,7 @@ int novfs_a_writepages(struct address_sp + if (wbc->sync_mode != WB_SYNC_NONE) + wait_on_page_writeback(page); + +- if (page->mapping != mapping +- || PageWriteback(page) ++ if (page->mapping != mapping || PageWriteback(page) + || !clear_page_dirty_for_io(page)) { + unlock_page(page); + continue; +@@ -2106,8 +1866,7 @@ int novfs_a_writepages(struct address_sp + dlptr[dlist_idx].len = PAGE_CACHE_SIZE; + dlptr[dlist_idx].rwflag = DLREAD; + dlist_idx++; +- DbgPrint("Add page=0x%p index=0x%lx", +- page, page->index); ++ DbgPrint("Add page=0x%p index=0x%lx", page, page->index); + } + + DbgPrint("dlist_idx=%d", dlist_idx); +@@ -2117,13 +1876,10 @@ int novfs_a_writepages(struct address_sp + * Check size so we don't write pass end of file. + */ + if ((pos + tsize) > i_size_read(inode)) { +- tsize = +- (size_t) (i_size_read(inode) - pos); ++ tsize = (size_t) (i_size_read(inode) - pos); + } + +- retCode = +- novfs_write_pages(fh, dlist, dlist_idx + 1, +- tsize, pos, session); ++ retCode = novfs_write_pages(fh, dlist, dlist_idx + 1, tsize, pos, session); + switch (retCode) { + case 0: + wbc->nr_to_write -= dlist_idx; +@@ -2144,13 +1900,11 @@ int novfs_a_writepages(struct address_sp + unlock_page((struct page *) + dlptr[dlist_idx - 1].page); + page_cache_release((struct page *) +- dlptr[dlist_idx - +- 1].page); +- DbgPrint("release page=0x%p index=0x%lx", +- dlptr[dlist_idx - 1].page, +- ((struct page *) +- dlptr[dlist_idx - +- 1].page)->index); ++ dlptr[dlist_idx - 1].page); ++ DbgPrint("release page=0x%p index=0x%lx", dlptr[dlist_idx - 1].page, ((struct page *) ++ dlptr[dlist_idx - ++ 1].page)-> ++ index); + if (!retCode) { + wbc->nr_to_write--; + } +@@ -2189,14 +1943,12 @@ int novfs_a_readpage(struct file *file, + struct novfs_schandle session; + + SC_INITIALIZE(session); +- DbgPrint("File=0x%p Name=%.*s Page=0x%p", file, +- file->f_dentry->d_name.len, file->f_dentry->d_name.name, page); ++ DbgPrint("File=0x%p Name=%.*s Page=0x%p", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, page); + + dentry = file->f_dentry; + + if (dentry) { +- DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, +- dentry->d_name.name); ++ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, dentry->d_name.name); + if (dentry->d_inode) { + inode = dentry->d_inode; + } +@@ -2206,15 +1958,10 @@ int novfs_a_readpage(struct file *file, + DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino); + + if (inode->i_private) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + if (0 == SC_PRESENT(session)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(file->f_dentry); +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + } + } + } +@@ -2233,11 +1980,8 @@ int novfs_a_readpage(struct file *file, + dlst[1].len = PAGE_CACHE_SIZE; + dlst[1].rwflag = DLWRITE; + +- DbgPrint("calling= novfs_Read_Pages %lld", +- offset); +- retCode = +- novfs_read_pages(file->private_data, dlst, 2, &len, &offset, +- session); ++ DbgPrint("calling= novfs_Read_Pages %lld", offset); ++ retCode = novfs_read_pages(file->private_data, dlst, 2, &len, &offset, session); + if (len && (len < PAGE_CACHE_SIZE)) { + pbuf = kmap_atomic(page, KM_USER0); + memset(&((char *)pbuf)[len], 0, PAGE_CACHE_SIZE - len); +@@ -2254,8 +1998,7 @@ int novfs_a_readpage(struct file *file, + + } + +-int novfs_a_readpages(struct file *file, struct address_space *mapping, +- struct list_head *page_lst, unsigned nr_pages) ++int novfs_a_readpages(struct file *file, struct address_space *mapping, struct list_head *page_lst, unsigned nr_pages) + { + int retCode = 0; + struct inode *inode = NULL; +@@ -2271,15 +2014,12 @@ int novfs_a_readpages(struct file *file, + char *rbuf, done = 0; + SC_INITIALIZE(session); + +- DbgPrint("File=0x%p Name=%.*s Pages=%d", file, +- file->f_dentry->d_name.len, file->f_dentry->d_name.name, +- nr_pages); ++ DbgPrint("File=0x%p Name=%.*s Pages=%d", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, nr_pages); + + dentry = file->f_dentry; + + if (dentry) { +- DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, +- dentry->d_name.name); ++ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, dentry->d_name.name); + if (dentry->d_inode) { + inode = dentry->d_inode; + } +@@ -2289,15 +2029,10 @@ int novfs_a_readpages(struct file *file, + DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino); + + if (inode->i_private) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + if (0 == SC_PRESENT(session)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(file->f_dentry); +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + } + } + } +@@ -2321,8 +2056,7 @@ int novfs_a_readpages(struct file *file, + * Count number of contiguous pages. + */ + list_for_each_entry_reverse(tpage, page_lst, lru) { +- if ((next_index != tpage->index) || +- (len >= novfs_max_iosize - PAGE_SIZE)) { ++ if ((next_index != tpage->index) || (len >= novfs_max_iosize - PAGE_SIZE)) { + break; + } + len += PAGE_SIZE; +@@ -2337,14 +2071,9 @@ int novfs_a_readpages(struct file *file, + dllst[1].len = len; + dllst[1].rwflag = DLWRITE; + +- DbgPrint("calling novfs_Read_Pages %lld", +- offset); +- if (!novfs_read_pages +- (file->private_data, dllst, 2, &len, +- &offset, session)) { +- novfs_copy_cache_pages(mapping, +- page_lst, len, +- rbuf, &lru_pvec); ++ DbgPrint("calling novfs_Read_Pages %lld", offset); ++ if (!novfs_read_pages(file->private_data, dllst, 2, &len, &offset, session)) { ++ novfs_copy_cache_pages(mapping, page_lst, len, rbuf, &lru_pvec); + page_idx += len >> PAGE_CACHE_SHIFT; + if ((int)(len & PAGE_CACHE_MASK) != len) { + page_idx++; +@@ -2362,8 +2091,7 @@ int novfs_a_readpages(struct file *file, + * Free any remaining pages. + */ + while (!list_empty(page_lst)) { +- struct page *page = +- list_entry(page_lst->prev, struct page, lru); ++ struct page *page = list_entry(page_lst->prev, struct page, lru); + + list_del(&page->lru); + page_cache_release(page); +@@ -2381,8 +2109,7 @@ int novfs_a_readpages(struct file *file, + } + + int novfs_a_write_begin(struct file *file, struct address_space *mapping, +- loff_t pos, unsigned len, unsigned flags, +- struct page **pagep, void **fsdata) ++ loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) + { + int retVal = 0; + loff_t offset = pos; +@@ -2405,8 +2132,7 @@ int novfs_a_write_begin(struct file *fil + *pagep = page; + + DbgPrint("File=0x%p Page=0x%p offset=0x%llx From=%u To=%u " +- "filesize=%lld\n", file, page, offset, from, to, +- i_size_read(file->f_dentry->d_inode)); ++ "filesize=%lld\n", file, page, offset, from, to, i_size_read(file->f_dentry->d_inode)); + if (!PageUptodate(page)) { + /* + * Check to see if whole page +@@ -2424,17 +2150,11 @@ int novfs_a_write_begin(struct file *fil + */ + if (file->f_dentry && file->f_dentry->d_inode) { + if (file->f_dentry->d_inode->i_private) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) +- inode-> +- i_private)-> +- Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *) ++ inode->i_private)->Scope); + if (0 == SC_PRESENT(session)) { +- ((struct inode_data *) inode-> +- i_private)->Scope = +- novfs_get_scope(file->f_dentry); +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + } + } + } +@@ -2456,10 +2176,8 @@ int novfs_a_write_begin(struct file *fil + dllst[1].len = len; + dllst[1].rwflag = DLWRITE; + +- DbgPrint("calling novfs_Read_Pages %lld", +- offset); +- novfs_read_pages(file->private_data, dllst, 2, +- &len, &offset, session); ++ DbgPrint("calling novfs_Read_Pages %lld", offset); ++ novfs_read_pages(file->private_data, dllst, 2, &len, &offset, session); + + /* + * Zero unnsed page. +@@ -2490,8 +2208,7 @@ int novfs_a_write_begin(struct file *fil + } + + int novfs_a_write_end(struct file *file, struct address_space *mapping, +- loff_t pos, unsigned len, unsigned copied, +- struct page *page, void *fsdata) ++ loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) + { + int retCode = 0; + struct inode *inode = page->mapping->host; +@@ -2507,11 +2224,9 @@ int novfs_a_write_end(struct file *file, + from = pos & (PAGE_CACHE_SIZE - 1); + to = from + len; + +- + DbgPrint("File=0x%p Page=0x%p offset=0x%x To=%u filesize=%lld", + file, page, offset, to, i_size_read(file->f_dentry->d_inode)); +- if (file->f_dentry->d_inode +- && (id = file->f_dentry->d_inode->i_private)) { ++ if (file->f_dentry->d_inode && (id = file->f_dentry->d_inode->i_private)) { + session = novfs_scope_get_sessionId(id->Scope); + if (0 == SC_PRESENT(session)) { + id->Scope = novfs_get_scope(file->f_dentry); +@@ -2528,20 +2243,17 @@ int novfs_a_write_end(struct file *file, + } + + if (!PageUptodate(page)) { +- pos = +- ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; ++ pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; + + if (to < offset) { + return (retCode); + } + dlst[0].page = page; +- dlst[0].offset = (void *)(unsigned long) offset; ++ dlst[0].offset = (void *)(unsigned long)offset; + dlst[0].len = len; + dlst[0].rwflag = DLREAD; + +- retCode = +- novfs_write_pages(id->FileHandle, dlst, 1, len, pos, +- session); ++ retCode = novfs_write_pages(id->FileHandle, dlst, 1, len, pos, session); + + } else { + set_page_dirty(page); +@@ -2552,9 +2264,7 @@ int novfs_a_write_end(struct file *file, + } + + /*++======================================================================*/ +-ssize_t novfs_a_direct_IO(int rw, struct kiocb * kiocb, +- const struct iovec * iov, +- loff_t offset, unsigned long nr_segs) ++ssize_t novfs_a_direct_IO(int rw, struct kiocb * kiocb, const struct iovec * iov, loff_t offset, unsigned long nr_segs) + /* + * + * Notes: This is a dummy function so that we can allow a file +@@ -2568,8 +2278,7 @@ ssize_t novfs_a_direct_IO(int rw, struct + } + + /*++======================================================================*/ +-int novfs_i_create(struct inode *dir, struct dentry *dentry, int mode, +- struct nameidata *nd) ++int novfs_i_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) + { + char *path, *buf; + struct novfs_entry_info info; +@@ -2577,8 +2286,7 @@ int novfs_i_create(struct inode *dir, st + struct novfs_schandle session; + int retCode = -EACCES; + +- DbgPrint("mode=0%o flags=0%o %.*s", mode, +- nd->NDOPENFLAGS, dentry->d_name.len, dentry->d_name.name); ++ DbgPrint("mode=0%o flags=0%o %.*s", mode, nd->NDOPENFLAGS, dentry->d_name.len, dentry->d_name.name); + + if (IS_ROOT(dentry) || /* Root */ + IS_ROOT(dentry->d_parent) || /* User */ +@@ -2589,41 +2297,23 @@ int novfs_i_create(struct inode *dir, st + + if (mode | S_IFREG) { + if (dir->i_private) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)-> +- Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); + if (0 == SC_PRESENT(session)) { +- ((struct inode_data *) dir->i_private)->Scope = +- novfs_get_scope(dentry); +- session = +- novfs_scope_get_sessionId(((struct inode_data *) dir-> +- i_private)->Scope); ++ ((struct inode_data *)dir->i_private)->Scope = novfs_get_scope(dentry); ++ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); + } + + buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); + if (buf) { +- path = +- novfs_dget_path(dentry, buf, +- PATH_LENGTH_BUFFER); ++ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); + if (path) { +- retCode = +- novfs_open_file(path, +- nd-> +- NDOPENFLAGS | +- O_RDWR, &info, +- &handle, session); ++ retCode = novfs_open_file(path, nd->NDOPENFLAGS | O_RDWR, &info, &handle, session); + if (!retCode && handle) { +- novfs_close_file(handle, +- session); +- if (!novfs_i_mknod +- (dir, dentry, +- mode | S_IFREG, 0)) { ++ novfs_close_file(handle, session); ++ if (!novfs_i_mknod(dir, dentry, mode | S_IFREG, 0)) { + if (dentry->d_inode) { + ((struct inode_data *) +- dentry-> +- d_inode-> +- i_private)-> +- Flags |= UPDATE_INODE; ++ dentry->d_inode->i_private)->Flags |= UPDATE_INODE; + } + } + } +@@ -2643,21 +2333,16 @@ void update_inode(struct inode *Inode, s + + DbgPrint("atime=%s", ctime_r(&Info->atime.tv_sec, dbuf)); + DbgPrint("ctime=%s", ctime_r(&Info->ctime.tv_sec, dbuf)); +- DbgPrint("mtime=%s %d", ctime_r(&Info->mtime.tv_sec, dbuf), +- Info->mtime.tv_nsec); ++ DbgPrint("mtime=%s %d", ctime_r(&Info->mtime.tv_sec, dbuf), Info->mtime.tv_nsec); + DbgPrint("size=%lld", Info->size); + DbgPrint("mode=0%o", Info->mode); + + if (Inode && + ((Inode->i_size != Info->size) || +- (Inode->i_mtime.tv_sec != Info->mtime.tv_sec) || +- (Inode->i_mtime.tv_nsec != Info->mtime.tv_nsec))) { +- DbgPrint ("calling invalidate_remote_inode sz %d %d", +- Inode->i_size, Info->size); +- DbgPrint ("calling invalidate_remote_inode sec %d %d", +- Inode->i_mtime.tv_sec, Info->mtime.tv_sec); +- DbgPrint ("calling invalidate_remote_inode ns %d %d", +- Inode->i_mtime.tv_nsec, Info->mtime.tv_nsec); ++ (Inode->i_mtime.tv_sec != Info->mtime.tv_sec) || (Inode->i_mtime.tv_nsec != Info->mtime.tv_nsec))) { ++ DbgPrint("calling invalidate_remote_inode sz %d %d", Inode->i_size, Info->size); ++ DbgPrint("calling invalidate_remote_inode sec %d %d", Inode->i_mtime.tv_sec, Info->mtime.tv_sec); ++ DbgPrint("calling invalidate_remote_inode ns %d %d", Inode->i_mtime.tv_nsec, Info->mtime.tv_nsec); + + if (Inode && Inode->i_mapping) { + invalidate_remote_inode(Inode); +@@ -2679,8 +2364,8 @@ void update_inode(struct inode *Inode, s + * + * Update i_blocks to have the number of 512 blocks + */ +- Inode->i_blocks = (((loff_t)Info->size) + Inode->i_sb->s_blocksize - 1) +- >> (loff_t)Inode->i_blkbits; ++ Inode->i_blocks = (((loff_t) Info->size) + Inode->i_sb->s_blocksize - 1) ++ >> (loff_t) Inode->i_blkbits; + Inode->i_blocks = Inode->i_blocks << (PAGE_CACHE_SHIFT - 9); + Inode->i_bytes = Info->size & (Inode->i_sb->s_blocksize - 1); + +@@ -2691,8 +2376,7 @@ void update_inode(struct inode *Inode, s + } + } + +-struct dentry *novfs_i_lookup(struct inode *dir, struct dentry *dentry, +- struct nameidata *nd) ++struct dentry *novfs_i_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) + { + struct dentry *retVal = ERR_PTR(-ENOENT); + struct dentry *parent; +@@ -2709,15 +2393,12 @@ struct dentry *novfs_i_lookup(struct ino + char *path; + path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); + if (path) { +- DbgPrint("dir 0x%p %d hash %d inode 0x%0p %s", +- dir, dir->i_ino, dentry->d_name.hash, +- dentry->d_inode, path); ++ DbgPrint("dir 0x%p %d hash %d inode 0x%0p %s", dir, dir->i_ino, dentry->d_name.hash, dentry->d_inode, path); + } + kfree(buf); + } else { + DbgPrint("dir 0x%p %d name %.*s hash %d inode 0x%0p", +- dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name, +- dentry->d_name.hash, dentry->d_inode); ++ dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name, dentry->d_name.hash, dentry->d_inode); + } + + if ((dentry->d_name.len == 7) +@@ -2742,27 +2423,18 @@ struct dentry *novfs_i_lookup(struct ino + inode = novfs_root->d_inode; + return (0); + } else { +- info = +- kmalloc(sizeof(struct novfs_entry_info) + +- PATH_LENGTH_BUFFER, GFP_KERNEL); ++ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); + if (info) { +- if (NULL == +- (retVal = +- ERR_PTR(verify_dentry(dentry, 1)))) { ++ if (NULL == (retVal = ERR_PTR(verify_dentry(dentry, 1)))) { + name.name = dentry->d_name.name; + name.len = dentry->d_name.len; + name.hash = novfs_internal_hash(&name); + + if (novfs_lock_inode_cache(dir)) { +- if (!novfs_get_entry +- (dir, &name, &ino, info)) { +- inode = +- ilookup(dentry-> +- d_sb, ino); ++ if (!novfs_get_entry(dir, &name, &ino, info)) { ++ inode = ilookup(dentry->d_sb, ino); + if (inode) { +- update_inode +- (inode, +- info); ++ update_inode(inode, info); + } + } + novfs_unlock_inode_cache(dir); +@@ -2775,16 +2447,13 @@ struct dentry *novfs_i_lookup(struct ino + uid = novfs_scope_get_uid(novfs_get_scope(dentry)); + } + if (novfs_lock_inode_cache(dir)) { +- inode = novfs_get_inode (dentry->d_sb, info->mode, 0, uid, ino, &name); ++ inode = novfs_get_inode(dentry->d_sb, info->mode, 0, uid, ino, &name); + if (inode) { + if (!novfs_get_entry(dir, &dentry->d_name, &ino, info)) { +- update_inode +- (inode, +- info); ++ update_inode(inode, info); + } + } +- novfs_unlock_inode_cache +- (dir); ++ novfs_unlock_inode_cache(dir); + } + } + } +@@ -2806,8 +2475,7 @@ struct dentry *novfs_i_lookup(struct ino + if (info) + kfree(info); + +- DbgPrint("inode=0x%p dentry->d_inode=0x%p return=0x%p", +- dir, dentry->d_inode, retVal); ++ DbgPrint("inode=0x%p dentry->d_inode=0x%p return=0x%p", dir, dentry->d_inode, retVal); + + return (retVal); + } +@@ -2820,15 +2488,11 @@ int novfs_i_unlink(struct inode *dir, st + char *path, *buf; + uint64_t t64; + +- DbgPrint("dir=0x%p dir->i_ino=%d %.*s", dir, +- dir->i_ino, dentry->d_name.len, dentry->d_name.name); ++ DbgPrint("dir=0x%p dir->i_ino=%d %.*s", dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name); + DbgPrint("IS_ROOT(dentry)=%d", IS_ROOT(dentry)); +- DbgPrint("IS_ROOT(dentry->d_parent)=%d", +- IS_ROOT(dentry->d_parent)); +- DbgPrint("IS_ROOT(dentry->d_parent->d_parent)=%d", +- IS_ROOT(dentry->d_parent->d_parent)); +- DbgPrint("IS_ROOT(dentry->d_parent->d_parent->d_parent)=%d", +- IS_ROOT(dentry->d_parent->d_parent->d_parent)); ++ DbgPrint("IS_ROOT(dentry->d_parent)=%d", IS_ROOT(dentry->d_parent)); ++ DbgPrint("IS_ROOT(dentry->d_parent->d_parent)=%d", IS_ROOT(dentry->d_parent->d_parent)); ++ DbgPrint("IS_ROOT(dentry->d_parent->d_parent->d_parent)=%d", IS_ROOT(dentry->d_parent->d_parent->d_parent)); + + if (IS_ROOT(dentry) || /* Root */ + IS_ROOT(dentry->d_parent) || /* User */ +@@ -2839,34 +2503,23 @@ int novfs_i_unlink(struct inode *dir, st + + inode = dentry->d_inode; + if (inode) { +- DbgPrint("dir=0x%p dir->i_ino=%d inode=0x%p ino=%d", +- dir, dir->i_ino, inode, inode->i_ino); ++ DbgPrint("dir=0x%p dir->i_ino=%d inode=0x%p ino=%d", dir, dir->i_ino, inode, inode->i_ino); + if (inode->i_private) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + if (0 == SC_PRESENT(session)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(dentry); +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + } + + buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); + if (buf) { +- path = +- novfs_dget_path(dentry, buf, +- PATH_LENGTH_BUFFER); ++ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); + if (path) { +- DbgPrint("path %s mode 0%o", +- path, inode->i_mode); ++ DbgPrint("path %s mode 0%o", path, inode->i_mode); + if (IS_ROOT(dentry->d_parent->d_parent)) { + retCode = novfs_daemon_logout(&dentry->d_name, &session); + } else { +- retCode = +- novfs_delete(path, +- S_ISDIR(inode->i_mode), session); ++ retCode = novfs_delete(path, S_ISDIR(inode->i_mode), session); + if (retCode) { + struct iattr ia; + memset(&ia, 0, sizeof(ia)); +@@ -2877,14 +2530,10 @@ int novfs_i_unlink(struct inode *dir, st + } + } + if (!retCode || IS_DEADDIR(inode)) { +- novfs_remove_inode_entry(dir, +- &dentry-> +- d_name, +- 0); ++ novfs_remove_inode_entry(dir, &dentry->d_name, 0); + dentry->d_time = 0; + t64 = 0; +- novfs_scope_set_userspace(&t64, &t64, +- &t64, &t64); ++ novfs_scope_set_userspace(&t64, &t64, &t64, &t64); + retCode = 0; + } + } +@@ -2907,8 +2556,7 @@ int novfs_i_mkdir(struct inode *dir, str + uid_t uid; + + DbgPrint("dir=0x%p ino=%d dentry=0x%p %.*s mode=0%lo", +- dir, dir->i_ino, dentry, dentry->d_name.len, +- dentry->d_name.name, mode); ++ dir, dir->i_ino, dentry, dentry->d_name.len, dentry->d_name.name, mode); + + if (IS_ROOT(dentry) || /* Root */ + IS_ROOT(dentry->d_parent) || /* User */ +@@ -2920,61 +2568,35 @@ int novfs_i_mkdir(struct inode *dir, str + mode |= S_IFDIR; + mode &= (S_IFMT | S_IRWXU); + if (dir->i_private) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)->Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); + if (0 == SC_PRESENT(session)) { +- ((struct inode_data *) dir->i_private)->Scope = +- novfs_get_scope(dentry); +- session = +- novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)-> +- Scope); ++ ((struct inode_data *)dir->i_private)->Scope = novfs_get_scope(dentry); ++ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); + } + +- uid = novfs_scope_get_uid(((struct inode_data *) dir->i_private)->Scope); ++ uid = novfs_scope_get_uid(((struct inode_data *)dir->i_private)->Scope); + buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); + if (buf) { + path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); + if (path) { + DbgPrint("path %s", path); +- retCode = +- novfs_create(path, S_ISDIR(mode), session); ++ retCode = novfs_create(path, S_ISDIR(mode), session); + if (!retCode) { +- retCode = +- novfs_get_file_info(path, &info, +- session); ++ retCode = novfs_get_file_info(path, &info, session); + if (!retCode) { +- retCode = +- novfs_i_mknod(dir, dentry, +- mode, 0); ++ retCode = novfs_i_mknod(dir, dentry, mode, 0); + inode = dentry->d_inode; + if (inode) { +- update_inode(inode, +- &info); +- ((struct inode_data *) inode-> +- i_private)->Flags &= +- ~UPDATE_INODE; +- +- dentry->d_time = +- jiffies + +- (novfs_update_timeout +- * HZ); +- +- novfs_lock_inode_cache +- (dir); +- if (novfs_update_entry +- (dir, +- &dentry->d_name, 0, +- &info)) { +- novfs_add_inode_entry +- (dir, +- &dentry-> +- d_name, +- inode-> +- i_ino, +- &info); ++ update_inode(inode, &info); ++ ((struct inode_data *)inode->i_private)->Flags &= ~UPDATE_INODE; ++ ++ dentry->d_time = jiffies + (novfs_update_timeout * HZ); ++ ++ novfs_lock_inode_cache(dir); ++ if (novfs_update_entry(dir, &dentry->d_name, 0, &info)) { ++ novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, &info); + } +- novfs_unlock_inode_cache +- (dir); ++ novfs_unlock_inode_cache(dir); + } + + } +@@ -3006,11 +2628,10 @@ int novfs_i_mknod(struct inode *dir, str + return (-EACCES); + } + +- if (((struct inode_data *) dir->i_private)) { +- uid = novfs_scope_get_uid(((struct inode_data *) dir->i_private)->Scope); ++ if (((struct inode_data *)dir->i_private)) { ++ uid = novfs_scope_get_uid(((struct inode_data *)dir->i_private)->Scope); + if (mode & (S_IFREG | S_IFDIR)) { +- inode = +- novfs_get_inode(dir->i_sb, mode, dev, uid, 0, &dentry->d_name); ++ inode = novfs_get_inode(dir->i_sb, mode, dev, uid, 0, &dentry->d_name); + } + } + if (inode) { +@@ -3022,8 +2643,7 @@ int novfs_i_mknod(struct inode *dir, str + memset(&info, 0, sizeof(info)); + info.mode = inode->i_mode; + novfs_lock_inode_cache(dir); +- novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, +- &info); ++ novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, &info); + novfs_unlock_inode_cache(dir); + + dput(parent); +@@ -3034,8 +2654,7 @@ int novfs_i_mknod(struct inode *dir, str + return retCode; + } + +-int novfs_i_rename(struct inode *odir, struct dentry *od, struct inode *ndir, +- struct dentry *nd) ++int novfs_i_rename(struct inode *odir, struct dentry *od, struct inode *ndir, struct dentry *nd) + { + int retCode = -ENOTEMPTY; + char *newpath, *newbuf, *newcon; +@@ -3053,8 +2672,7 @@ int novfs_i_rename(struct inode *odir, s + return (-EACCES); + } + +- DbgPrint("odir=0x%p ino=%d ndir=0x%p ino=%d", odir, +- odir->i_ino, ndir, ndir->i_ino); ++ DbgPrint("odir=0x%p ino=%d ndir=0x%p ino=%d", odir, odir->i_ino, ndir, ndir->i_ino); + + oldbuf = kmalloc(PATH_LENGTH_BUFFER * 2, GFP_KERNEL); + newbuf = oldbuf + PATH_LENGTH_BUFFER; +@@ -3065,15 +2683,12 @@ int novfs_i_rename(struct inode *odir, s + oldlen = PATH_LENGTH_BUFFER - (int)(oldpath - oldbuf); + newlen = PATH_LENGTH_BUFFER - (int)(newpath - newbuf); + +- DbgPrint("od=0x%p od->inode=0x%p od->inode->i_ino=%d %s", +- od, od->d_inode, od->d_inode->i_ino, oldpath); ++ DbgPrint("od=0x%p od->inode=0x%p od->inode->i_ino=%d %s", od, od->d_inode, od->d_inode->i_ino, oldpath); + if (nd->d_inode) { + DbgPrint("nd=0x%p nd->inode=0x%p nd->inode->i_ino=%d %s", +- nd, nd->d_inode, nd->d_inode->i_ino, +- newpath); ++ nd, nd->d_inode, nd->d_inode->i_ino, newpath); + } else { +- DbgPrint("nd=0x%p nd->inode=0x%p %s", +- nd, nd->d_inode, newpath); ++ DbgPrint("nd=0x%p nd->inode=0x%p %s", nd, nd->d_inode, newpath); + } + + /* +@@ -3084,17 +2699,12 @@ int novfs_i_rename(struct inode *odir, s + DbgPrint("newcon=0x%p newpath=0x%p", newcon, newpath); + DbgPrint("oldcon=0x%p oldpath=0x%p", oldcon, oldpath); + retCode = -EXDEV; +- if (newcon && oldcon +- && ((int)(newcon - newpath) == +- (int)(oldcon - oldpath))) { ++ if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { + newcon = strchr(newcon + 1, '\\'); + oldcon = strchr(oldcon + 1, '\\'); +- DbgPrint("2; newcon=0x%p newpath=0x%p", +- newcon, newpath); +- DbgPrint("2; oldcon=0x%p oldpath=0x%p", +- oldcon, oldpath); +- if (newcon && oldcon && +- ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { ++ DbgPrint("2; newcon=0x%p newpath=0x%p", newcon, newpath); ++ DbgPrint("2; oldcon=0x%p oldpath=0x%p", oldcon, oldpath); ++ if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { + newname.name = newpath; + newname.len = (int)(newcon - newpath); + newname.hash = 0; +@@ -3104,23 +2714,18 @@ int novfs_i_rename(struct inode *odir, s + oldname.hash = 0; + if (!novfs_d_strcmp(&newname, &oldname)) { + +- if (od->d_inode +- && od->d_inode->i_private) { ++ if (od->d_inode && od->d_inode->i_private) { + +- if (nd->d_inode +- && nd->d_inode-> +- i_private) { ++ if (nd->d_inode && nd->d_inode->i_private) { + session = + novfs_scope_get_sessionId +- (((struct inode_data *) ndir->i_private)->Scope); +- if (0 == +- SC_PRESENT +- (session)) { +- ((struct inode_data *) ndir->i_private)->Scope = novfs_get_scope(nd); +- session +- = +- novfs_scope_get_sessionId +- (((struct inode_data *) ndir->i_private)->Scope); ++ (((struct inode_data *)ndir->i_private)->Scope); ++ if (0 == SC_PRESENT(session)) { ++ ((struct inode_data *)ndir->i_private)->Scope = ++ novfs_get_scope(nd); ++ session = ++ novfs_scope_get_sessionId(((struct inode_data *)ndir-> ++ i_private)->Scope); + } + + retCode = +@@ -3131,20 +2736,28 @@ int novfs_i_rename(struct inode *odir, s + ia.ia_valid = ATTR_MODE; + ia.ia_mode = S_IRWXU; + novfs_set_attr(newpath, &ia, session); +- retCode = novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), session); ++ retCode = ++ novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), ++ session); + } + + } + +- session = novfs_scope_get_sessionId(((struct inode_data *) ndir->i_private)->Scope); ++ session = ++ novfs_scope_get_sessionId(((struct inode_data *)ndir->i_private)-> ++ Scope); + if (0 == SC_PRESENT(session)) { + ((struct inode_data *)ndir->i_private)->Scope = novfs_get_scope(nd); +- session = novfs_scope_get_sessionId(((struct inode_data *) ndir->i_private)->Scope); ++ session = ++ novfs_scope_get_sessionId(((struct inode_data *)ndir-> ++ i_private)->Scope); + } +- retCode = novfs_rename_file(S_ISDIR(od->d_inode->i_mode), oldpath, oldlen - 1, newpath, newlen - 1, session); ++ retCode = ++ novfs_rename_file(S_ISDIR(od->d_inode->i_mode), oldpath, oldlen - 1, ++ newpath, newlen - 1, session); + + if (!retCode) { +- info = (struct novfs_entry_info *) oldbuf; ++ info = (struct novfs_entry_info *)oldbuf; + od->d_time = 0; + novfs_remove_inode_entry(odir, &od->d_name, 0); + novfs_remove_inode_entry(ndir, &nd->d_name, 0); +@@ -3152,9 +2765,9 @@ int novfs_i_rename(struct inode *odir, s + nd->d_time = jiffies + (novfs_update_timeout * HZ); + + if (od->d_inode && od->d_inode->i_ino) { +- ino = od->d_inode-> i_ino; ++ ino = od->d_inode->i_ino; + } else { +- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); ++ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); + } + novfs_add_inode_entry(ndir, &nd->d_name, ino, info); + } +@@ -3172,7 +2785,6 @@ int novfs_i_rename(struct inode *odir, s + return (retCode); + } + +- + int novfs_i_setattr(struct dentry *dentry, struct iattr *attr) + { + char *path, *buf; +@@ -3192,15 +2804,10 @@ int novfs_i_setattr(struct dentry *dentr + } + + if (inode && inode->i_private) { +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> +- Scope); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + if (0 == SC_PRESENT(session)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(dentry); +- session = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); ++ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + } + + buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); +@@ -3211,40 +2818,31 @@ int novfs_i_setattr(struct dentry *dentr + strcpy(mtime_buf, "Unspecified"); + strcpy(ctime_buf, "Unspecified"); + if (attr->ia_valid & ATTR_ATIME) { +- ctime_r(&attr->ia_atime.tv_sec, +- atime_buf); ++ ctime_r(&attr->ia_atime.tv_sec, atime_buf); + } + if (attr->ia_valid & ATTR_MTIME) { +- ctime_r(&attr->ia_mtime.tv_sec, +- mtime_buf); ++ ctime_r(&attr->ia_mtime.tv_sec, mtime_buf); + } + if (attr->ia_valid & ATTR_CTIME) { +- ctime_r(&attr->ia_ctime.tv_sec, +- ctime_buf); ++ ctime_r(&attr->ia_ctime.tv_sec, ctime_buf); + } + /* Removed for Bug 132374. jlt */ + __DbgPrint("%s: %s\n" +- " ia_valid: 0x%x\n" +- " ia_mode: 0%o\n" +- " ia_uid: %d\n" +- " ia_gid: %d\n" +- " ia_size: %lld\n" +- " ia_atime: %s\n" +- " ia_mtime: %s\n" +- " ia_ctime: %s\n", __func__, +- path, +- attr->ia_valid, +- attr->ia_mode, +- attr->ia_uid, +- attr->ia_gid, +- attr->ia_size, +- atime_buf, mtime_buf, ctime_buf); +- +- if (ia_valid +- && !(retVal = +- novfs_set_attr(path, attr, session))) { +- ((struct inode_data *) inode->i_private)-> +- Flags |= UPDATE_INODE; ++ " ia_valid: 0x%x\n" ++ " ia_mode: 0%o\n" ++ " ia_uid: %d\n" ++ " ia_gid: %d\n" ++ " ia_size: %lld\n" ++ " ia_atime: %s\n" ++ " ia_mtime: %s\n" ++ " ia_ctime: %s\n", __func__, ++ path, ++ attr->ia_valid, ++ attr->ia_mode, ++ attr->ia_uid, attr->ia_gid, attr->ia_size, atime_buf, mtime_buf, ctime_buf); ++ ++ if (ia_valid && !(retVal = novfs_set_attr(path, attr, session))) { ++ ((struct inode_data *)inode->i_private)->Flags |= UPDATE_INODE; + + if (ia_valid & ATTR_ATIME) + inode->i_atime = attr->ia_atime; +@@ -3253,10 +2851,7 @@ int novfs_i_setattr(struct dentry *dentr + if (ia_valid & ATTR_CTIME) + inode->i_ctime = attr->ia_ctime; + if (ia_valid & ATTR_MODE) { +- inode->i_mode = +- attr-> +- ia_mode & (S_IFMT | +- S_IRWXU); ++ inode->i_mode = attr->ia_mode & (S_IFMT | S_IRWXU); + } + } + } +@@ -3268,8 +2863,7 @@ int novfs_i_setattr(struct dentry *dentr + return (retVal); + } + +-int novfs_i_getattr(struct vfsmount *mnt, struct dentry *dentry, +- struct kstat *kstat) ++int novfs_i_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *kstat) + { + int retCode = 0; + char atime_buf[32]; +@@ -3296,13 +2890,9 @@ int novfs_i_getattr(struct vfsmount *mnt + + buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); + if (buf) { +- path = +- novfs_dget_path(dentry, buf, +- PATH_LENGTH_BUFFER); ++ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); + if (path) { +- retCode = +- novfs_get_file_info(path, &info, +- session); ++ retCode = novfs_get_file_info(path, &info, session); + if (!retCode) { + update_inode(inode, &info); + id->Flags &= ~UPDATE_INODE; +@@ -3334,35 +2924,30 @@ int novfs_i_getattr(struct vfsmount *mnt + ctime_r(&kstat->ctime.tv_sec, ctime_buf); + + __DbgPrint("%s: 0x%x 0x%p <%.*s>\n" +- " ino: %d\n" +- " dev: 0x%x\n" +- " mode: 0%o\n" +- " nlink: 0x%x\n" +- " uid: 0x%x\n" +- " gid: 0x%x\n" +- " rdev: 0x%x\n" +- " size: 0x%llx\n" +- " atime: %s\n" +- " mtime: %s\n" +- " ctime: %s\n" +- " blksize: 0x%x\n" +- " blocks: 0x%x\n", __func__, +- retCode, dentry, dentry->d_name.len, dentry->d_name.name, +- kstat->ino, +- kstat->dev, +- kstat->mode, +- kstat->nlink, +- kstat->uid, +- kstat->gid, +- kstat->rdev, +- kstat->size, +- atime_buf, +- mtime_buf, ctime_buf, kstat->blksize, kstat->blocks); ++ " ino: %d\n" ++ " dev: 0x%x\n" ++ " mode: 0%o\n" ++ " nlink: 0x%x\n" ++ " uid: 0x%x\n" ++ " gid: 0x%x\n" ++ " rdev: 0x%x\n" ++ " size: 0x%llx\n" ++ " atime: %s\n" ++ " mtime: %s\n" ++ " ctime: %s\n" ++ " blksize: 0x%x\n" ++ " blocks: 0x%x\n", __func__, ++ retCode, dentry, dentry->d_name.len, dentry->d_name.name, ++ kstat->ino, ++ kstat->dev, ++ kstat->mode, ++ kstat->nlink, ++ kstat->uid, ++ kstat->gid, kstat->rdev, kstat->size, atime_buf, mtime_buf, ctime_buf, kstat->blksize, kstat->blocks); + return (retCode); + } + +-ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, +- size_t buffer_size) ++ssize_t novfs_i_getxattr(struct dentry * dentry, const char *name, void *buffer, size_t buffer_size) + { + struct inode *inode = dentry->d_inode; + struct novfs_schandle sessionId; +@@ -3374,23 +2959,17 @@ ssize_t novfs_i_getxattr(struct dentry * + SC_INITIALIZE(sessionId); + + DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */ +- DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", +- dentry->d_name.len, dentry->d_name.name); ++ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name); + DbgPrint("name %s", name); + DbgPrint("size %u", buffer_size); + + if (inode && inode->i_private) { +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> +- Scope); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + DbgPrint("SessionId = %u", sessionId); + //if (0 == sessionId) + if (0 == SC_PRESENT(sessionId)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(dentry); +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + DbgPrint("SessionId = %u", sessionId); + } + } +@@ -3402,22 +2981,16 @@ ssize_t novfs_i_getxattr(struct dentry * + if (path) { + bufRead = kmalloc(XA_BUFFER, GFP_KERNEL); + if (bufRead) { +- retxcode = +- novfs_getx_file_info(path, name, bufRead, +- XA_BUFFER, &dataLen, +- sessionId); +- DbgPrint("after novfs_GetX_File_Info retxcode = %d", +- retxcode); ++ retxcode = novfs_getx_file_info(path, name, bufRead, XA_BUFFER, &dataLen, sessionId); ++ DbgPrint("after novfs_GetX_File_Info retxcode = %d", retxcode); + if (!retxcode) { + novfs_dump(64, bufRead); + if (buffer_size != 0) { + if (buffer_size >= dataLen) { +- memcpy(buffer, bufRead, +- dataLen); ++ memcpy(buffer, bufRead, dataLen); + } else { + DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d", +- buffer_size, +- dataLen); ++ buffer_size, dataLen); + retxcode = -ERANGE; + } + } +@@ -3439,8 +3012,7 @@ ssize_t novfs_i_getxattr(struct dentry * + return (dataLen); + } + +-int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, +- size_t value_size, int flags) ++int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, size_t value_size, int flags) + { + + struct inode *inode = dentry->d_inode; +@@ -3453,24 +3025,18 @@ int novfs_i_setxattr(struct dentry *dent + SC_INITIALIZE(sessionId); + + DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */ +- DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", +- dentry->d_name.len, dentry->d_name.name); ++ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name); + DbgPrint("name %s", name); + DbgPrint("value_size %u", value_size); + DbgPrint("flags %d", flags); + + if (inode && inode->i_private) { +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> +- Scope); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + DbgPrint("SessionId = %u", sessionId); + //if (0 == sessionId) + if (0 == SC_PRESENT(sessionId)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(dentry); +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + DbgPrint("SessionId = %u", sessionId); + } + } +@@ -3479,10 +3045,7 @@ int novfs_i_setxattr(struct dentry *dent + if (buf) { + path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); + if (path) { +- retxcode = +- novfs_setx_file_info(path, name, value, value_size, +- &bytesWritten, flags, +- sessionId); ++ retxcode = novfs_setx_file_info(path, name, value, value_size, &bytesWritten, flags, sessionId); + if (!retxcode) { + DbgPrint("bytesWritten = %u", bytesWritten); + } +@@ -3500,7 +3063,7 @@ int novfs_i_setxattr(struct dentry *dent + return (retError); + } + +-ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) ++ssize_t novfs_i_listxattr(struct dentry * dentry, char *buffer, size_t buffer_size) + { + struct inode *inode = dentry->d_inode; + struct novfs_schandle sessionId; +@@ -3511,22 +3074,16 @@ ssize_t novfs_i_listxattr(struct dentry + SC_INITIALIZE(sessionId); + + DbgPrint("Ian"); //%.*s\n", dentry->d_name.len, dentry->d_name.name); +- DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", +- dentry->d_name.len, dentry->d_name.name); ++ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name); + DbgPrint("size %u", buffer_size); + + if (inode && inode->i_private) { +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> +- Scope); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + DbgPrint("SessionId = %u", sessionId); + //if (0 == sessionId) + if (0 == SC_PRESENT(sessionId)) { +- ((struct inode_data *) inode->i_private)->Scope = +- novfs_get_scope(dentry); +- sessionId = +- novfs_scope_get_sessionId(((struct inode_data *) inode-> +- i_private)->Scope); ++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); ++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); + DbgPrint("SessionId = %u", sessionId); + } + } +@@ -3538,19 +3095,15 @@ ssize_t novfs_i_listxattr(struct dentry + if (path) { + bufList = kmalloc(XA_BUFFER, GFP_KERNEL); + if (bufList) { +- retxcode = +- novfs_listx_file_info(path, bufList, +- XA_BUFFER, &dataLen, +- sessionId); ++ retxcode = novfs_listx_file_info(path, bufList, XA_BUFFER, &dataLen, sessionId); + + novfs_dump(64, bufList); + if (buffer_size != 0) { + if (buffer_size >= dataLen) { +- memcpy(buffer, bufList, +- dataLen); ++ memcpy(buffer, bufList, dataLen); + } else { + DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d", +- buffer_size, dataLen); ++ buffer_size, dataLen); + retxcode = -1; + } + } +@@ -3598,8 +3151,7 @@ int novfs_notify_change(struct dentry *d + struct inode *inode = dentry->d_inode; + + DbgPrint("Dentry=0x%p Name=%.*s Inode=0x%p Ino=%d ia_valid=0x%x", +- dentry, dentry->d_name.len, dentry->d_name.name, inode, +- inode->i_ino, attr->ia_valid); ++ dentry, dentry->d_name.len, dentry->d_name.name, inode, inode->i_ino, attr->ia_valid); + return (0); + } + +@@ -3610,8 +3162,7 @@ void novfs_clear_inode(struct inode *ino + if (inode->i_private) { + struct inode_data *id = inode->i_private; + +- DbgPrint("inode=0x%p ino=%d Scope=0x%p Name=%s", +- inode, inode->i_ino, id->Scope, id->Name); ++ DbgPrint("inode=0x%p ino=%d Scope=0x%p Name=%s", inode, inode->i_ino, id->Scope, id->Name); + + novfs_free_inode_cache(inode); + +@@ -3641,17 +3192,12 @@ int novfs_show_options(struct seq_file * + my_path.dentry = m->mnt_root; + path = d_path(&my_path, buf, PATH_LENGTH_BUFFER); + if (path) { +- if (!novfs_current_mnt +- || (novfs_current_mnt +- && strcmp(novfs_current_mnt, path))) { ++ if (!novfs_current_mnt || (novfs_current_mnt && strcmp(novfs_current_mnt, path))) { + DbgPrint("%.*s %.*s %s", + m->mnt_root->d_name.len, + m->mnt_root->d_name.name, +- m->mnt_mountpoint->d_name.len, +- m->mnt_mountpoint->d_name.name, path); +- tmp = kmalloc(PATH_LENGTH_BUFFER - +- (int)(path - buf), +- GFP_KERNEL); ++ m->mnt_mountpoint->d_name.len, m->mnt_mountpoint->d_name.name, path); ++ tmp = kmalloc(PATH_LENGTH_BUFFER - (int)(path - buf), GFP_KERNEL); + if (tmp) { + strcpy(tmp, path); + path = novfs_current_mnt; +@@ -3692,10 +3238,7 @@ int novfs_statfs(struct dentry *de, stru + buf->f_type = sb->s_magic; + buf->f_bsize = sb->s_blocksize; + buf->f_namelen = NW_MAX_PATH_LENGTH; +- buf->f_blocks = +- (sector_t) (td + +- (uint64_t) (sb->s_blocksize - +- 1)) >> (uint64_t) sb->s_blocksize_bits; ++ buf->f_blocks = (sector_t) (td + (uint64_t) (sb->s_blocksize - 1)) >> (uint64_t) sb->s_blocksize_bits; + buf->f_bfree = (sector_t) fd >> (uint64_t) sb->s_blocksize_bits; + buf->f_bavail = (sector_t) buf->f_bfree; + buf->f_files = (sector_t) te; +@@ -3720,8 +3263,7 @@ int novfs_statfs(struct dentry *de, stru + return 0; + } + +-struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, +- uid_t Uid, ino_t ino, struct qstr *name) ++struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, uid_t Uid, ino_t ino, struct qstr *name) + { + struct inode *inode = new_inode(sb); + +@@ -3733,7 +3275,7 @@ struct inode *novfs_get_inode(struct sup + inode->i_blkbits = sb->s_blocksize_bits; + inode->i_blocks = 0; + inode->i_rdev = 0; +- inode->i_ino = (ino) ? ino : (ino_t)atomic_inc_return(&novfs_Inode_Number); ++ inode->i_ino = (ino) ? ino : (ino_t) atomic_inc_return(&novfs_Inode_Number); + if (novfs_page_cache) { + inode->i_mapping->a_ops = &novfs_aops; + } else { +@@ -3744,13 +3286,9 @@ struct inode *novfs_get_inode(struct sup + inode->i_atime.tv_nsec = 0; + inode->i_mtime = inode->i_ctime = inode->i_atime; + +- DbgPrint("Inode=0x%p I_ino=%d len=%d", +- inode, inode->i_ino, name->len); ++ DbgPrint("Inode=0x%p I_ino=%d len=%d", inode, inode->i_ino, name->len); + +- if (NULL != +- (inode->i_private = +- kmalloc(sizeof(struct inode_data) + name->len, +- GFP_KERNEL))) { ++ if (NULL != (inode->i_private = kmalloc(sizeof(struct inode_data) + name->len, GFP_KERNEL))) { + struct inode_data *id; + id = inode->i_private; + +@@ -3863,20 +3401,15 @@ int novfs_fill_super(struct super_block + inode->i_size = info.size = 0; + inode->i_uid = info.uid = 0; + inode->i_gid = info.gid = 0; +- inode->i_atime = info.atime = +- inode->i_ctime = info.ctime = +- inode->i_mtime = info.mtime = CURRENT_TIME; ++ inode->i_atime = info.atime = inode->i_ctime = info.ctime = inode->i_mtime = info.mtime = CURRENT_TIME; + + server = d_alloc(novfs_root, &name); + if (server) { + server->d_op = &novfs_dentry_operations; + server->d_time = 0xffffffff; + d_add(server, inode); +- DbgPrint("d_add %s 0x%p", +- SERVER_DIRECTORY_NAME, server); +- novfs_add_inode_entry(novfs_root->d_inode, +- &name, inode->i_ino, +- &info); ++ DbgPrint("d_add %s 0x%p", SERVER_DIRECTORY_NAME, server); ++ novfs_add_inode_entry(novfs_root->d_inode, &name, inode->i_ino, &info); + } + } + +@@ -3891,20 +3424,15 @@ int novfs_fill_super(struct super_block + inode->i_size = info.size = 0; + inode->i_uid = info.uid = 0; + inode->i_gid = info.gid = 0; +- inode->i_atime = info.atime = +- inode->i_ctime = info.ctime = +- inode->i_mtime = info.mtime = CURRENT_TIME; ++ inode->i_atime = info.atime = inode->i_ctime = info.ctime = inode->i_mtime = info.mtime = CURRENT_TIME; + tree = d_alloc(novfs_root, &name); + if (tree) { + tree->d_op = &novfs_dentry_operations; + tree->d_time = 0xffffffff; + + d_add(tree, inode); +- DbgPrint("d_add %s 0x%p", +- TREE_DIRECTORY_NAME, tree); +- novfs_add_inode_entry(novfs_root->d_inode, +- &name, inode->i_ino, +- &info); ++ DbgPrint("d_add %s 0x%p", TREE_DIRECTORY_NAME, tree); ++ novfs_add_inode_entry(novfs_root->d_inode, &name, inode->i_ino, &info); + } + } + } +@@ -3912,8 +3440,7 @@ int novfs_fill_super(struct super_block + return (0); + } + +-static int novfs_get_sb(struct file_system_type *Fstype, int Flags, +- const char *Dev_name, void *Data, struct vfsmount *Mnt) ++static int novfs_get_sb(struct file_system_type *Fstype, int Flags, const char *Dev_name, void *Data, struct vfsmount *Mnt) + { + DbgPrint("Fstype=0x%x Dev_name=%s", Fstype, Dev_name); + return get_sb_nodev(Fstype, Flags, Data, novfs_fill_super, Mnt); +@@ -3925,8 +3452,7 @@ static void novfs_kill_sb(struct super_b + kill_litter_super(super); + } + +-ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, +- loff_t * ppos) ++ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, loff_t * ppos) + { + ssize_t retval = 0; + +@@ -3935,8 +3461,7 @@ ssize_t novfs_Control_read(struct file * + return retval; + } + +-ssize_t novfs_Control_write(struct file * file, const char *buf, size_t nbytes, +- loff_t * ppos) ++ssize_t novfs_Control_write(struct file * file, const char *buf, size_t nbytes, loff_t * ppos) + { + ssize_t retval = 0; + +@@ -3947,8 +3472,7 @@ ssize_t novfs_Control_write(struct file + return (retval); + } + +-int novfs_Control_ioctl(struct inode *inode, struct file *file, +- unsigned int cmd, unsigned long arg) ++int novfs_Control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) + { + int retval = 0; + +@@ -3976,15 +3500,13 @@ int __init init_novfs(void) + + retCode = bdi_init(&novfs_backing_dev_info); + +- if(!retCode) ++ if (!retCode) + retCode = bdi_register(&novfs_backing_dev_info, NULL, "novfs-map"); + if (retCode) { + bdi_destroy(&novfs_backing_dev_info); + goto bdi_fail; + } + +- +- + retCode = novfs_proc_init(); + + novfs_profile_init(); +@@ -4044,8 +3566,7 @@ void novfs_unlock_inode_cache(struct ino + } + } + +-int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, +- ino_t * ino, struct novfs_entry_info *info) ++int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, ino_t * ino, struct novfs_entry_info *info) + /* + * Arguments: struct inode *i - pointer to directory inode + * +@@ -4094,8 +3615,7 @@ int novfs_enumerate_inode_cache(struct i + } + + /* DirCacheLock should be held before calling this routine. */ +-int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, +- struct novfs_entry_info *info) ++int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info) + { + struct inode_data *id; + struct novfs_dir_cache *dc; +@@ -4133,8 +3653,7 @@ int novfs_get_entry(struct inode *i, str + } + + /*DirCacheLock should be held before calling this routine. */ +-int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, +- struct novfs_entry_info *info) ++int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, struct novfs_entry_info *info) + { + int retVal = -1; + loff_t count = 0; +@@ -4153,8 +3672,7 @@ int novfs_get_entry_by_pos(struct inode + } + + /* DirCacheLock should be held before calling this routine. */ +-int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, +- struct novfs_entry_info *info, u64 * EntryTime) ++int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info, u64 * EntryTime) + { + struct inode_data *id; + struct novfs_dir_cache *dc; +@@ -4255,8 +3773,7 @@ void novfs_invalidate_inode_cache(struct + } + + /*++======================================================================*/ +-struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, +- ino_t ino) ++struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, ino_t ino) + /* + * Returns: struct novfs_dir_cache entry if match + * NULL - if there is no match. +@@ -4282,8 +3799,7 @@ struct novfs_dir_cache *novfs_lookup_ino + n = (char *)name->name; + hash = name->hash; + } +- DbgPrint("inode: 0x%p; name: %.*s; hash: 0x%x;\n" +- " len: %d; ino: %d", i, nl, n, hash, nl, ino); ++ DbgPrint("inode: 0x%p; name: %.*s; hash: 0x%x;\n" " len: %d; ino: %d", i, nl, n, hash, nl, ino); + + list_for_each(l, &id->DirCache) { + dc = list_entry(l, struct novfs_dir_cache, list); +@@ -4297,9 +3813,7 @@ struct novfs_dir_cache *novfs_lookup_ino + dc, dc->ino, dc->hash, dc->nameLen, dc->nameLen, dc->name); + */ + if ((name->hash == dc->hash) && +- (name->len == dc->nameLen) && +- (0 == +- memcmp(name->name, dc->name, name->len))) { ++ (name->len == dc->nameLen) && (0 == memcmp(name->name, dc->name, name->len))) { + retVal = dc; + break; + } +@@ -4351,8 +3865,7 @@ int novfs_lookup_validate(struct inode * + * + * DirCacheLock should be held before calling this routine. + */ +-int novfs_add_inode_entry(struct inode *i, +- struct qstr *name, ino_t ino, struct novfs_entry_info *info) ++int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info) + { + struct inode_data *id; + struct novfs_dir_cache *new; +@@ -4375,8 +3888,7 @@ int novfs_add_inode_entry(struct inode * + DCCount++; + DbgPrint("inode: 0x%p; id: 0x%p; DC: 0x%p; new: 0x%p; " + "name: %.*s; ino: %d; size: %lld; mode: 0x%x", +- i, id, &id->DirCache, new, name->len, +- name->name, ino, info->size, info->mode); ++ i, id, &id->DirCache, new, name->len, name->name, ino, info->size, info->mode); + + retVal = 0; + new->flags = ENTRY_VALID; +@@ -4400,8 +3912,7 @@ int novfs_add_inode_entry(struct inode * + /* + * DirCacheLock should be held before calling this routine. + */ +-int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, +- struct novfs_entry_info *info) ++int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info) + { + struct inode_data *id; + struct novfs_dir_cache *dc; +@@ -4422,9 +3933,7 @@ int novfs_update_entry(struct inode *i, + ctime_r(&info->mtime.tv_sec, mtime_buf); + ctime_r(&info->ctime.tv_sec, ctime_buf); + DbgPrint("inode: 0x%p; name: %.*s; ino: %d; size: %lld; " +- "atime: %s; mtime: %s; ctime: %s", +- i, nl, n, ino, info->size, atime_buf, mtime_buf, +- ctime_buf); ++ "atime: %s; mtime: %s; ctime: %s", i, nl, n, ino, info->size, atime_buf, mtime_buf, ctime_buf); + + dc = novfs_lookup_inode_cache(i, name, ino); + if (dc) { +@@ -4445,9 +3954,7 @@ int novfs_update_entry(struct inode *i, + "mtime: %s %d; ctime: %s; hash: 0x%x; " + " nameLen: %d; name: %s", + dc, dc->flags, dc->jiffies, dc->ino, dc->size, +- dc->mode, atime_buf, mtime_buf, +- dc->mtime.tv_nsec, ctime_buf, dc->hash, +- dc->nameLen, dc->name); ++ dc->mode, atime_buf, mtime_buf, dc->mtime.tv_nsec, ctime_buf, dc->hash, dc->nameLen, dc->name); + } + } + DbgPrint("return %d", retVal); +@@ -4479,8 +3986,7 @@ void novfs_remove_inode_entry(struct ino + "[name: %.*s; ino: %d; next: 0x%p; " + "prev: 0x%p]", + i, id, &id->DirCache, nl, n, ino, dc, +- dc->nameLen, dc->name, dc->ino, dc->list.next, +- dc->list.prev); ++ dc->nameLen, dc->name, dc->ino, dc->list.next, dc->list.prev); + list_del(&dc->list); + kfree(dc); + DCCount--; +@@ -4506,9 +4012,7 @@ void novfs_free_invalid_entries(struct i + dc = list_entry(l, struct novfs_dir_cache, list); + if (0 == (dc->flags & ENTRY_VALID)) { + DbgPrint("inode: 0x%p; id: 0x%p; entry: 0x%p; " +- "name: %.*s; ino: %d", +- i, id, dc, dc->nameLen, dc->name, +- dc->ino); ++ "name: %.*s; ino: %d", i, id, dc, dc->nameLen, dc->name, dc->ino); + l = l->prev; + list_del(&dc->list); + kfree(dc); +@@ -4565,25 +4069,20 @@ void novfs_dump_inode(void *pf) + + pfunc("Inode=0x%p I_ino=%d\n", inode, inode->i_ino); + +- pfunc(" atime=%s\n", +- ctime_r(&inode->i_atime.tv_sec, atime_buf)); +- pfunc(" ctime=%s\n", +- ctime_r(&inode->i_mtime.tv_sec, atime_buf)); +- pfunc(" mtime=%s\n", +- ctime_r(&inode->i_ctime.tv_sec, atime_buf)); ++ pfunc(" atime=%s\n", ctime_r(&inode->i_atime.tv_sec, atime_buf)); ++ pfunc(" ctime=%s\n", ctime_r(&inode->i_mtime.tv_sec, atime_buf)); ++ pfunc(" mtime=%s\n", ctime_r(&inode->i_ctime.tv_sec, atime_buf)); + pfunc(" size=%lld\n", inode->i_size); + pfunc(" mode=0%o\n", inode->i_mode); + pfunc(" count=0%o\n", atomic_read(&inode->i_count)); + } + +- pfunc(" nofs_inode_data: 0x%p Name=%s Scope=0x%p\n", id, id->Name, +- id->Scope); ++ pfunc(" nofs_inode_data: 0x%p Name=%s Scope=0x%p\n", id, id->Name, id->Scope); + + if (id->DirCache.next) { + list_for_each(l, &id->DirCache) { + dccnt++; +- dc = list_entry(l, struct novfs_dir_cache, +- list); ++ dc = list_entry(l, struct novfs_dir_cache, list); + ctime_r(&dc->atime.tv_sec, atime_buf); + ctime_r(&dc->mtime.tv_sec, mtime_buf); + ctime_r(&dc->ctime.tv_sec, ctime_buf); +@@ -4602,15 +4101,13 @@ void novfs_dump_inode(void *pf) + " name: %s\n", + dc, dc->flags, dc->jiffies, + dc->ino, dc->size, dc->mode, +- atime_buf, mtime_buf, ctime_buf, +- dc->hash, dc->nameLen, dc->name); ++ atime_buf, mtime_buf, ctime_buf, dc->hash, dc->nameLen, dc->name); + } + } + } + up(&InodeList_lock); + +- pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, +- dccnt); ++ pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, dccnt); + + } + +--- a/fs/novfs/nwcapi.c ++++ b/fs/novfs/nwcapi.c +@@ -31,11 +31,13 @@ + #define strlen_user(str) strnlen_user(str, ~0UL >> 1) + #endif + +-static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply); +-static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply); ++static void GetUserData(struct nwc_scan_conn_info *connInfo, struct novfs_xplat_call_request *cmd, ++ struct novfs_xplat_call_reply *reply); ++static void GetConnData(struct nwc_get_conn_info *connInfo, struct novfs_xplat_call_request *cmd, ++ struct novfs_xplat_call_reply *reply); + + /*++======================================================================*/ +-int novfs_open_conn_by_name(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) ++int novfs_open_conn_by_name(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) + { + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; +@@ -63,7 +65,7 @@ int novfs_open_conn_by_name(struct novfs + cmd->NwcCommand = NWC_OPEN_CONN_BY_NAME; + + cmd->dataLen = datalen; +- openConn = (struct nwd_open_conn_by_name *) cmd->data; ++ openConn = (struct nwd_open_conn_by_name *)cmd->data; + + openConn->nameLen = pnamelen; + openConn->serviceLen = stypelen; +@@ -78,21 +80,18 @@ int novfs_open_conn_by_name(struct novfs + data += openConn->nameLen; + cpylen = copy_from_user(data, ocbn.pServiceType, openConn->serviceLen); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + /* + * we got reply data from the daemon + */ +- connReply = (struct nwd_open_conn_by_name *) reply->data; ++ connReply = (struct nwd_open_conn_by_name *)reply->data; + retCode = reply->Reply.ErrorCode; + if (!retCode) { + /* + * we got valid data. + */ +- connReply = (struct nwd_open_conn_by_name *) reply->data; ++ connReply = (struct nwd_open_conn_by_name *)reply->data; + ocbn.RetConnHandle = HandletoUint32(connReply->newConnHandle); + *Handle = connReply->newConnHandle; + +@@ -107,7 +106,7 @@ int novfs_open_conn_by_name(struct novfs + + } + +-int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) ++int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) + { + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; +@@ -131,7 +130,7 @@ int novfs_open_conn_by_addr(struct novfs + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_OPEN_CONN_BY_ADDRESS; + cmd->dataLen = datalen; +- openConn = (struct nwd_open_conn_by_addr *) cmd->data; ++ openConn = (struct nwd_open_conn_by_addr *)cmd->data; + + cpylen = copy_from_user(&tranAddr, ocba.pTranAddr, sizeof(tranAddr)); + if (tranAddr.uAddressLength > sizeof(addr)) { +@@ -151,28 +150,23 @@ int novfs_open_conn_by_addr(struct novfs + DbgPrint("addr"); + novfs_dump(sizeof(addr), addr); + +- openConn->TranAddr.oAddress = *(unsigned int *) (&addr[2]); ++ openConn->TranAddr.oAddress = *(unsigned int *)(&addr[2]); + +- retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + /* + * we got reply data from the daemon + */ +- connReply = (struct nwd_open_conn_by_addr *) reply->data; ++ connReply = (struct nwd_open_conn_by_addr *)reply->data; + retCode = reply->Reply.ErrorCode; + if (!retCode) { + /* + * we got valid data. + */ +- connReply = (struct nwd_open_conn_by_addr *) reply->data; +- ocba.ConnHandle = +- HandletoUint32(connReply->ConnHandle); ++ connReply = (struct nwd_open_conn_by_addr *)reply->data; ++ ocba.ConnHandle = HandletoUint32(connReply->ConnHandle); + *Handle = connReply->ConnHandle; +- cpylen = +- copy_to_user(pdata->reqData, &ocba, +- sizeof(ocba)); ++ cpylen = copy_to_user(pdata->reqData, &ocba, sizeof(ocba)); + DbgPrint("New Conn Handle = %X", connReply->ConnHandle); + } + kfree(reply); +@@ -184,7 +178,7 @@ out: + + } + +-int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) ++int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) + { + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; +@@ -205,20 +199,17 @@ int novfs_open_conn_by_ref(struct novfs_ + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_OPEN_CONN_BY_REFERENCE; + cmd->dataLen = datalen; +- openConn = (struct nwd_open_conn_by_ref *) cmd->data; ++ openConn = (struct nwd_open_conn_by_ref *)cmd->data; + +- openConn->uConnReference = (void *) (unsigned long) ocbr.uConnReference; ++ openConn->uConnReference = (void *)(unsigned long)ocbr.uConnReference; + openConn->uConnFlags = ocbr.uConnFlags; + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + /* + * we got reply data from the daemon + */ +- openConn = (struct nwd_open_conn_by_ref *) reply->data; ++ openConn = (struct nwd_open_conn_by_ref *)reply->data; + retCode = reply->Reply.ErrorCode; + if (!retCode) { + /* +@@ -306,8 +297,7 @@ int novfs_raw_send(struct novfs_xplat *p + * Allocate the cmd Request + */ + cmdlen = datalen + sizeof(*cmd) + sizeof(*ncpData); +- DbgPrint("[XPLAT RawNCP] - Frag Count 0x%X", +- xRequest.uNumRequestFrags); ++ DbgPrint("[XPLAT RawNCP] - Frag Count 0x%X", xRequest.uNumRequestFrags); + DbgPrint("[XPLAT RawNCP] - Total Command Data Len = %x", cmdlen); + + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -325,10 +315,10 @@ int novfs_raw_send(struct novfs_xplat *p + * build the NCP Request + */ + cmd->dataLen = cmdlen - sizeof(*cmd); +- ncpData = (struct nwd_ncp_req *) cmd->data; ++ ncpData = (struct nwd_ncp_req *)cmd->data; + ncpData->replyLen = totalLen; + ncpData->requestLen = datalen; +- ncpData->ConnHandle = (void *) (unsigned long) xRequest.ConnHandle; ++ ncpData->ConnHandle = (void *)(unsigned long)xRequest.ConnHandle; + ncpData->function = xRequest.uFunction; + + reqData = ncpData->data; +@@ -340,10 +330,7 @@ int novfs_raw_send(struct novfs_xplat *p + cFrag++; + } + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + DbgPrint("RawNCP - reply = %x", reply); + DbgPrint("RawNCP - retCode = %x", retCode); + +@@ -351,11 +338,10 @@ int novfs_raw_send(struct novfs_xplat *p + /* + * we got reply data from the daemon + */ +- ncpReply = (struct nwd_ncp_rep *) reply->data; ++ ncpReply = (struct nwd_ncp_rep *)reply->data; + retCode = reply->Reply.ErrorCode; + +- DbgPrint("RawNCP - Reply Frag Count 0x%X", +- xRequest.uNumReplyFrags); ++ DbgPrint("RawNCP - Reply Frag Count 0x%X", xRequest.uNumReplyFrags); + + /* + * We need to copy the reply frags to the packet. +@@ -366,10 +352,9 @@ int novfs_raw_send(struct novfs_xplat *p + totalLen = ncpReply->replyLen; + for (x = 0; x < xRequest.uNumReplyFrags; x++) { + +- DbgPrint("RawNCP - Copy Frag %d: 0x%X", x, +- cFrag->uLength); ++ DbgPrint("RawNCP - Copy Frag %d: 0x%X", x, cFrag->uLength); + +- datalen = min((unsigned long) cFrag->uLength, totalLen); ++ datalen = min((unsigned long)cFrag->uLength, totalLen); + + cpylen = copy_to_user(cFrag->pData, reqData, datalen); + totalLen -= datalen; +@@ -384,7 +369,6 @@ int novfs_raw_send(struct novfs_xplat *p + retCode = -EIO; + } + +- + xRequest.uActualReplyLength = actualReplyLength; + cpylen = copy_to_user(pdata->reqData, &xRequest, sizeof(xRequest)); + +@@ -396,7 +380,7 @@ out: + return (retCode); + } + +-int novfs_conn_close(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) ++int novfs_conn_close(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) + { + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; +@@ -417,16 +401,14 @@ int novfs_conn_close(struct novfs_xplat + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_CLOSE_CONN; + +- nwdClose = (struct nwd_close_conn *) cmd->data; ++ nwdClose = (struct nwd_close_conn *)cmd->data; + cmd->dataLen = sizeof(*nwdClose); + *Handle = nwdClose->ConnHandle = Uint32toHandle(cc.ConnHandle); + + /* + * send the request + */ +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, 0); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -457,16 +439,15 @@ int novfs_sys_conn_close(struct novfs_xp + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_SYS_CLOSE_CONN; + +- nwdClose = (struct nwd_close_conn *) cmd->data; ++ nwdClose = (struct nwd_close_conn *)cmd->data; + cmd->dataLen = sizeof(*nwdClose); +- nwdClose->ConnHandle = (void *) (unsigned long) cc.ConnHandle; +- *Handle = (unsigned long) cc.ConnHandle; ++ nwdClose->ConnHandle = (void *)(unsigned long)cc.ConnHandle; ++ *Handle = (unsigned long)cc.ConnHandle; + + /* + * send the request + */ +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -486,7 +467,6 @@ int novfs_login_id(struct novfs_xplat *p + unsigned long cpylen; + struct nwc_string nwcStr; + +- + memset(&server, 0, sizeof(server)); + memset(&username, 0, sizeof(username)); + memset(&password, 0, sizeof(password)); +@@ -536,13 +516,17 @@ int novfs_login_id(struct novfs_xplat *p + password.type = nwcStr.DataType; + password.len = nwcStr.DataLen; + if (!copy_from_user((void *)password.buffer, nwcStr.pBuffer, password.len)) { +- retCode = novfs_do_login(&server, &username, &password, (void **)&lgn.AuthenticationId, &Session); ++ retCode = ++ novfs_do_login(&server, &username, &password, ++ (void **)&lgn.AuthenticationId, &Session); + if (retCode) { + lgn.AuthenticationId = 0; + } + + plgn = (struct nwc_login_id *)pdata->reqData; +- cpylen = copy_to_user(&plgn->AuthenticationId, &lgn.AuthenticationId, sizeof(plgn->AuthenticationId)); ++ cpylen = ++ copy_to_user(&plgn->AuthenticationId, &lgn.AuthenticationId, ++ sizeof(plgn->AuthenticationId)); + } + memset(password.buffer, 0, password.len); + +@@ -581,15 +565,12 @@ int novfs_auth_conn(struct novfs_xplat * + + cpylen = copy_from_user(&pauth, pdata->reqData, sizeof(pauth)); + +- pDauth = (struct nwc_auth_wid *) cmd->data; ++ pDauth = (struct nwc_auth_wid *)cmd->data; + cmd->dataLen = datalen; + pDauth->AuthenticationId = pauth.AuthenticationId; +- pDauth->ConnHandle = (void *) (unsigned long) pauth.ConnHandle; ++ pDauth->ConnHandle = (void *)(unsigned long)pauth.ConnHandle; + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -602,7 +583,7 @@ int novfs_license_conn(struct novfs_xpla + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; + struct nwc_license_conn lisc; +- struct nwc_lisc_id * pDLisc = NULL; ++ struct nwc_lisc_id *pDLisc = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -619,14 +600,11 @@ int novfs_license_conn(struct novfs_xpla + + cpylen = copy_from_user(&lisc, pdata->reqData, sizeof(lisc)); + +- pDLisc = (struct nwc_lisc_id *) cmd->data; ++ pDLisc = (struct nwc_lisc_id *)cmd->data; + cmd->dataLen = datalen; +- pDLisc->ConnHandle = (void *) (unsigned long) lisc.ConnHandle; ++ pDLisc->ConnHandle = (void *)(unsigned long)lisc.ConnHandle; + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -654,17 +632,13 @@ int novfs_logout_id(struct novfs_xplat * + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_LOGOUT_IDENTITY; + +- cpylen = +- copy_from_user(&logout, pdata->reqData, sizeof(logout)); ++ cpylen = copy_from_user(&logout, pdata->reqData, sizeof(logout)); + +- pDLogout = (struct nwc_lo_id *) cmd->data; ++ pDLogout = (struct nwc_lo_id *)cmd->data; + cmd->dataLen = datalen; + pDLogout->AuthenticationId = logout.AuthenticationId; + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -693,13 +667,10 @@ int novfs_unlicense_conn(struct novfs_xp + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_UNLICENSE_CONN; + cmd->dataLen = datalen; +- pUconn = (struct nwc_unlic_conn *) cmd->data; ++ pUconn = (struct nwc_unlic_conn *)cmd->data; + +- pUconn->ConnHandle = (void *) (unsigned long) ulc.ConnHandle; +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ pUconn->ConnHandle = (void *)(unsigned long)ulc.ConnHandle; ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + /* + * we got reply data from the daemon +@@ -732,15 +703,12 @@ int novfs_unauthenticate(struct novfs_xp + + cpylen = copy_from_user(&auth, pdata->reqData, sizeof(auth)); + +- pDAuth = (struct nwc_unauthenticate *) cmd->data; ++ pDAuth = (struct nwc_unauthenticate *)cmd->data; + cmd->dataLen = datalen; + pDAuth->AuthenticationId = auth.AuthenticationId; +- pDAuth->ConnHandle = (void *) (unsigned long) auth.ConnHandle; ++ pDAuth->ConnHandle = (void *)(unsigned long)auth.ConnHandle; + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -761,8 +729,7 @@ int novfs_get_conn_info(struct novfs_xpl + + cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo); + cmd = kmalloc(cmdlen, GFP_KERNEL); +- cpylen = +- copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_get_conn_info)); ++ cpylen = copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_get_conn_info)); + + if (!cmd) + return -ENOMEM; +@@ -777,17 +744,14 @@ int novfs_get_conn_info(struct novfs_xpl + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_GET_CONN_INFO; + +- pDConnInfo = (struct nwd_conn_info *) cmd->data; ++ pDConnInfo = (struct nwd_conn_info *)cmd->data; + +- pDConnInfo->ConnHandle = (void *) (unsigned long) connInfo.ConnHandle; ++ pDConnInfo->ConnHandle = (void *)(unsigned long)connInfo.ConnHandle; + pDConnInfo->uInfoLevel = connInfo.uInfoLevel; + pDConnInfo->uInfoLength = connInfo.uInfoLength; + cmd->dataLen = sizeof(*pDConnInfo); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + if (!retCode) { +@@ -827,17 +791,14 @@ int novfs_set_conn_info(struct novfs_xpl + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_SET_CONN_INFO; + +- pDConnInfo = (struct nwd_set_conn_info *) cmd->data; ++ pDConnInfo = (struct nwd_set_conn_info *)cmd->data; + +- pDConnInfo->ConnHandle = (void *) (unsigned long) connInfo.ConnHandle; ++ pDConnInfo->ConnHandle = (void *)(unsigned long)connInfo.ConnHandle; + pDConnInfo->uInfoLevel = connInfo.uInfoLevel; + pDConnInfo->uInfoLength = connInfo.uInfoLength; + cmd->dataLen = sizeof(*pDConnInfo); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -872,13 +833,11 @@ int novfs_get_id_info(struct novfs_xplat + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_GET_IDENTITY_INFO; + +- idInfo = (struct nwd_get_id_info *) cmd->data; ++ idInfo = (struct nwd_get_id_info *)cmd->data; + idInfo->AuthenticationId = qidInfo.AuthenticationId; + cmd->dataLen = sizeof(*idInfo); + +- retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + +@@ -887,32 +846,20 @@ int novfs_get_id_info(struct novfs_xplat + * Save the return info to the user structure. + */ + gId = pdata->reqData; +- idInfo = (struct nwd_get_id_info *) reply->data; +- cpylen = copy_to_user(&gId->AuthenticationId, +- &idInfo->AuthenticationId, +- sizeof(idInfo-> +- AuthenticationId)); +- cpylen = copy_to_user(&gId->AuthType, +- &idInfo->AuthType, +- sizeof(idInfo->AuthType)); +- cpylen = copy_to_user(&gId->IdentityFlags, +- &idInfo->IdentityFlags, +- sizeof(idInfo->IdentityFlags)); +- cpylen = copy_to_user(&gId->NameType, +- &idInfo->NameType, +- sizeof(idInfo->NameType)); +- cpylen = copy_to_user(&gId->ObjectType, +- &idInfo->ObjectType, +- sizeof(idInfo->ObjectType)); ++ idInfo = (struct nwd_get_id_info *)reply->data; ++ cpylen = copy_to_user(&gId->AuthenticationId, &idInfo->AuthenticationId, sizeof(idInfo->AuthenticationId)); ++ cpylen = copy_to_user(&gId->AuthType, &idInfo->AuthType, sizeof(idInfo->AuthType)); ++ cpylen = copy_to_user(&gId->IdentityFlags, &idInfo->IdentityFlags, sizeof(idInfo->IdentityFlags)); ++ cpylen = copy_to_user(&gId->NameType, &idInfo->NameType, sizeof(idInfo->NameType)); ++ cpylen = copy_to_user(&gId->ObjectType, &idInfo->ObjectType, sizeof(idInfo->ObjectType)); + +- cpylen = copy_from_user(&xferStr, gId->pDomainName, +- sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&xferStr, gId->pDomainName, sizeof(struct nwc_string)); + if (idInfo->pDomainNameOffset >= reply->dataLen) { + retCode = -EINVAL; + goto out; + } + str = (char *)((char *)reply->data + idInfo->pDomainNameOffset); +- if (idInfo->domainLen > reply->dataLen - idInfo->pDomainNameOffset ) { ++ if (idInfo->domainLen > reply->dataLen - idInfo->pDomainNameOffset) { + retCode = -EINVAL; + goto out; + } +@@ -937,10 +884,10 @@ int novfs_get_id_info(struct novfs_xplat + xferStr.DataType = NWC_STRING_TYPE_ASCII; + cpylen = copy_to_user(gId->pObjectName, &xferStr, sizeof(struct nwc_string)); + } +- } ++ } + + out: +- kfree(reply); ++ kfree(reply); + kfree(cmd); + return (retCode); + } +@@ -970,20 +917,15 @@ int novfs_scan_conn_info(struct novfs_xp + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_SCAN_CONN_INFO; + +- pDConnInfo = (struct nwd_scan_conn_info *) cmd->data; ++ pDConnInfo = (struct nwd_scan_conn_info *)cmd->data; + + DbgPrint("Input Data"); + __DbgPrint(" connInfo.uScanIndex = 0x%X\n", connInfo.uScanIndex); +- __DbgPrint(" connInfo.uConnectionReference = 0x%X\n", +- connInfo.uConnectionReference); +- __DbgPrint(" connInfo.uScanInfoLevel = 0x%X\n", +- connInfo.uScanInfoLevel); +- __DbgPrint(" connInfo.uScanInfoLen = 0x%X\n", +- connInfo.uScanInfoLen); +- __DbgPrint(" connInfo.uReturnInfoLength = 0x%X\n", +- connInfo.uReturnInfoLength); +- __DbgPrint(" connInfo.uReturnInfoLevel = 0x%X\n", +- connInfo.uReturnInfoLevel); ++ __DbgPrint(" connInfo.uConnectionReference = 0x%X\n", connInfo.uConnectionReference); ++ __DbgPrint(" connInfo.uScanInfoLevel = 0x%X\n", connInfo.uScanInfoLevel); ++ __DbgPrint(" connInfo.uScanInfoLen = 0x%X\n", connInfo.uScanInfoLen); ++ __DbgPrint(" connInfo.uReturnInfoLength = 0x%X\n", connInfo.uReturnInfoLength); ++ __DbgPrint(" connInfo.uReturnInfoLevel = 0x%X\n", connInfo.uReturnInfoLevel); + __DbgPrint(" connInfo.uScanFlags = 0x%X\n", connInfo.uScanFlags); + + pDConnInfo->uScanIndex = connInfo.uScanIndex; +@@ -995,53 +937,38 @@ int novfs_scan_conn_info(struct novfs_xp + pDConnInfo->uScanFlags = connInfo.uScanFlags; + + if (pDConnInfo->uScanInfoLen) { +- localData = (unsigned char *) pDConnInfo; ++ localData = (unsigned char *)pDConnInfo; + pDConnInfo->uScanConnInfoOffset = sizeof(*pDConnInfo); + localData += pDConnInfo->uScanConnInfoOffset; +- cpylen = copy_from_user(localData, connInfo.pScanConnInfo, +- connInfo.uScanInfoLen); ++ cpylen = copy_from_user(localData, connInfo.pScanConnInfo, connInfo.uScanInfoLen); + } else { + pDConnInfo->uScanConnInfoOffset = 0; + } + + cmd->dataLen = sizeof(*pDConnInfo); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + DbgPrint("Reply recieved"); + __DbgPrint(" NextIndex = %x\n", connInfo.uScanIndex); + __DbgPrint(" ErrorCode = %x\n", reply->Reply.ErrorCode); + __DbgPrint(" data = %p\n", reply->data); + +- pDConnInfo = (struct nwd_scan_conn_info *) reply->data; +- retCode = (unsigned long) reply->Reply.ErrorCode; ++ pDConnInfo = (struct nwd_scan_conn_info *)reply->data; ++ retCode = (unsigned long)reply->Reply.ErrorCode; + if (!retCode) { + GetUserData(&connInfo, cmd, reply); +- rInfo = (struct nwc_scan_conn_info *) pdata->repData; +- cpylen = +- copy_to_user(pdata->repData, +- &pDConnInfo->uScanIndex, +- sizeof(pDConnInfo-> +- uScanIndex)); ++ rInfo = (struct nwc_scan_conn_info *)pdata->repData; ++ cpylen = copy_to_user(pdata->repData, &pDConnInfo->uScanIndex, sizeof(pDConnInfo->uScanIndex)); + cpylen = +- copy_to_user(&rInfo->uConnectionReference, +- &pDConnInfo-> +- uConnectionReference, +- sizeof(pDConnInfo-> +- uConnectionReference)); ++ copy_to_user(&rInfo->uConnectionReference, ++ &pDConnInfo->uConnectionReference, sizeof(pDConnInfo->uConnectionReference)); + } else { + unsigned long x; + + x = 0; +- rInfo = (struct nwc_scan_conn_info *) pdata->reqData; +- cpylen = +- copy_to_user(&rInfo->uConnectionReference, +- &x, +- sizeof(rInfo-> +- uConnectionReference)); ++ rInfo = (struct nwc_scan_conn_info *)pdata->reqData; ++ cpylen = copy_to_user(&rInfo->uConnectionReference, &x, sizeof(rInfo->uConnectionReference)); + } + + kfree(reply); +@@ -1055,17 +982,17 @@ int novfs_scan_conn_info(struct novfs_xp + /* + * Copies the user data out of the scan conn info call. + */ +-static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) ++static void GetUserData(struct nwc_scan_conn_info *connInfo, struct novfs_xplat_call_request *cmd, ++ struct novfs_xplat_call_reply *reply) + { + unsigned long uLevel; + struct nwd_scan_conn_info *pDConnInfo = NULL; + unsigned char *srcData = NULL; + unsigned long dataLen = 0, cpylen; + +- pDConnInfo = (struct nwd_scan_conn_info *) reply->data; ++ pDConnInfo = (struct nwd_scan_conn_info *)reply->data; + uLevel = pDConnInfo->uReturnInfoLevel; +- DbgPrint("uLevel = %d, reply = 0x%p, reply->data = 0x%X", +- uLevel, reply, reply->data); ++ DbgPrint("uLevel = %d, reply = 0x%p, reply->data = 0x%X", uLevel, reply, reply->data); + + switch (uLevel) { + case NWC_CONN_INFO_RETURN_ALL: +@@ -1088,7 +1015,7 @@ static void GetUserData(struct nwc_scan_ + case NWC_CONN_INFO_TREE_NAME: + case NWC_CONN_INFO_SERVER_NAME: + case NWC_CONN_INFO_VERSION: +- srcData = (unsigned char *) pDConnInfo; ++ srcData = (unsigned char *)pDConnInfo; + srcData += pDConnInfo->uReturnConnInfoOffset; + dataLen = pDConnInfo->uReturnInfoLength; + break; +@@ -1098,14 +1025,13 @@ static void GetUserData(struct nwc_scan_ + unsigned char *dstData = connInfo->pReturnConnInfo; + struct nwc_tran_addr tranAddr; + +- srcData = (unsigned char *) reply->data; ++ srcData = (unsigned char *)reply->data; + dataLen = reply->dataLen; + +- DbgPrint("NWC_CONN_INFO_TRAN_ADDR 0x%p -> 0x%p :: 0x%X", +- srcData, connInfo->pReturnConnInfo, dataLen); ++ DbgPrint("NWC_CONN_INFO_TRAN_ADDR 0x%p -> 0x%p :: 0x%X", srcData, connInfo->pReturnConnInfo, dataLen); + + cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); +- if (((struct nwd_scan_conn_info *) srcData)->uReturnConnInfoOffset >= reply->dataLen) ++ if (((struct nwd_scan_conn_info *)srcData)->uReturnConnInfoOffset >= reply->dataLen) + goto out; + srcData += ((struct nwd_scan_conn_info *)srcData)->uReturnConnInfoOffset; + tranAddr.uTransportType = ((struct nwd_tran_addr *)srcData)->uTransportType; +@@ -1114,7 +1040,7 @@ static void GetUserData(struct nwc_scan_ + goto out; + cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); + cpylen = copy_to_user(tranAddr.puAddress, +- ((struct tagNwdTranAddrEx *) srcData)->Buffer, tranAddr.uAddressLength); ++ ((struct tagNwdTranAddrEx *)srcData)->Buffer, tranAddr.uAddressLength); + dataLen = 0; + break; + } +@@ -1129,8 +1055,7 @@ static void GetUserData(struct nwc_scan_ + } + + if (srcData && dataLen && dataLen <= reply->dataLen) { +- DbgPrint("Copy Data 0x%p -> 0x%p :: 0x%X", +- srcData, connInfo->pReturnConnInfo, dataLen); ++ DbgPrint("Copy Data 0x%p -> 0x%p :: 0x%X", srcData, connInfo->pReturnConnInfo, dataLen); + cpylen = copy_to_user(connInfo->pReturnConnInfo, srcData, dataLen); + } + +@@ -1141,7 +1066,8 @@ out: + /* + * Copies the user data out of the scan conn info call. + */ +-static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) ++static void GetConnData(struct nwc_get_conn_info *connInfo, struct novfs_xplat_call_request *cmd, ++ struct novfs_xplat_call_reply *reply) + { + unsigned long uLevel; + struct nwd_conn_info *pDConnInfo = NULL; +@@ -1149,12 +1075,12 @@ static void GetConnData(struct nwc_get_c + unsigned char *srcData = NULL; + unsigned long dataLen = 0, cpylen; + +- pDConnInfo = (struct nwd_conn_info *) cmd->data; ++ pDConnInfo = (struct nwd_conn_info *)cmd->data; + uLevel = pDConnInfo->uInfoLevel; + + switch (uLevel) { + case NWC_CONN_INFO_RETURN_ALL: +- srcData = (unsigned char *) reply->data; ++ srcData = (unsigned char *)reply->data; + dataLen = reply->dataLen; + break; + +@@ -1167,19 +1093,16 @@ static void GetConnData(struct nwc_get_c + unsigned char *dstData = connInfo->pConnInfo; + struct nwc_tran_addr tranAddr; + +- srcData = (unsigned char *) reply->data; ++ srcData = (unsigned char *)reply->data; + + cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); +- tranAddr.uTransportType = +- ((struct tagNwdTranAddrEx *) srcData)->uTransportType; +- tranAddr.uAddressLength = +- ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; ++ tranAddr.uTransportType = ((struct tagNwdTranAddrEx *)srcData)->uTransportType; ++ tranAddr.uAddressLength = ((struct tagNwdTranAddrEx *)srcData)->uAddressLength; + if (tranAddr.uAddressLength > MAX_ADDRESS_LENGTH) + goto out; + cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); + cpylen = copy_to_user(tranAddr.puAddress, +- ((struct tagNwdTranAddrEx *) srcData)->Buffer, +- tranAddr.uAddressLength); ++ ((struct tagNwdTranAddrEx *)srcData)->Buffer, tranAddr.uAddressLength); + dataLen = 0; + break; + } +@@ -1202,7 +1125,7 @@ static void GetConnData(struct nwc_get_c + case NWC_CONN_INFO_VERSION: + case NWC_CONN_INFO_SERVER_NAME: + case NWC_CONN_INFO_TREE_NAME: +- srcData = (unsigned char *) reply->data; ++ srcData = (unsigned char *)reply->data; + dataLen = reply->dataLen; + break; + +@@ -1250,16 +1173,11 @@ int novfs_get_daemon_ver(struct novfs_xp + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_GET_REQUESTER_VERSION; + cmdlen = sizeof(*cmd); +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; +- pDVersion = (struct nwd_get_reqversion *) reply->data; +- cpylen = +- copy_to_user(pDVersion, pdata->reqData, +- sizeof(*pDVersion)); ++ pDVersion = (struct nwd_get_reqversion *)reply->data; ++ cpylen = copy_to_user(pDVersion, pdata->reqData, sizeof(*pDVersion)); + kfree(reply); + } + kfree(cmd); +@@ -1277,8 +1195,7 @@ int novfs_get_preferred_DS_tree(struct n + unsigned long cmdlen, datalen, replylen, cpylen; + unsigned char *dPtr = NULL; + +- cpylen = copy_from_user(&xplatCall, pdata->reqData, +- sizeof(struct nwc_get_pref_ds_tree)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_get_pref_ds_tree)); + if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) + return -EINVAL; + datalen = sizeof(*pDGetTree) + xplatCall.uTreeLength; +@@ -1294,24 +1211,20 @@ int novfs_get_preferred_DS_tree(struct n + cmd->NwcCommand = NWC_GET_PREFERRED_DS_TREE; + cmdlen = sizeof(*cmd); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + if (!retCode) { +- pDGetTree = (struct nwd_get_pref_ds_tree *) reply->data; ++ pDGetTree = (struct nwd_get_pref_ds_tree *)reply->data; + if (pDGetTree->DsTreeNameOffset >= reply->dataLen) { + retCode = -EINVAL; + goto out; + } + dPtr = reply->data + pDGetTree->DsTreeNameOffset; +- p = (struct nwc_get_pref_ds_tree *) pdata->reqData; ++ p = (struct nwc_get_pref_ds_tree *)pdata->reqData; + + DbgPrint("Reply recieved"); +- __DbgPrint(" TreeLen = %x\n", +- pDGetTree->uTreeLength); ++ __DbgPrint(" TreeLen = %x\n", pDGetTree->uTreeLength); + __DbgPrint(" TreeName = %s\n", dPtr); + + if (pDGetTree->uTreeLength > reply->dataLen - pDGetTree->DsTreeNameOffset) { +@@ -1355,18 +1268,14 @@ int novfs_set_preferred_DS_tree(struct n + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_SET_PREFERRED_DS_TREE; + +- pDSetTree = (struct nwd_set_pref_ds_tree *) cmd->data; ++ pDSetTree = (struct nwd_set_pref_ds_tree *)cmd->data; + pDSetTree->DsTreeNameOffset = sizeof(*pDSetTree); + pDSetTree->uTreeLength = xplatCall.uTreeLength; + + dPtr = cmd->data + sizeof(*pDSetTree); +- cpylen = copy_from_user(dPtr, xplatCall.pDsTreeName, +- xplatCall.uTreeLength); ++ cpylen = copy_from_user(dPtr, xplatCall.pDsTreeName, xplatCall.uTreeLength); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -1376,19 +1285,17 @@ int novfs_set_preferred_DS_tree(struct n + + } + +-int novfs_set_default_ctx(struct novfs_xplat *pdata, +- struct novfs_schandle Session) ++int novfs_set_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session) + { + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; + struct nwc_set_def_name_ctx xplatCall; +- struct nwd_set_def_name_ctx * pDSet = NULL; ++ struct nwd_set_def_name_ctx *pDSet = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + unsigned char *dPtr = NULL; + +- cpylen = copy_from_user(&xplatCall, pdata->reqData, +- sizeof(struct nwc_set_def_name_ctx)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_def_name_ctx)); + if (xplatCall.uNameLength > MAX_NAME_LEN || xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) + return -EINVAL; + datalen = sizeof(*pDSet) + xplatCall.uTreeLength + xplatCall.uNameLength; +@@ -1403,7 +1310,7 @@ int novfs_set_default_ctx(struct novfs_x + cmd->NwcCommand = NWC_SET_DEFAULT_NAME_CONTEXT; + cmd->dataLen = sizeof(struct nwd_set_def_name_ctx) + xplatCall.uTreeLength + xplatCall.uNameLength; + +- pDSet = (struct nwd_set_def_name_ctx *) cmd->data; ++ pDSet = (struct nwd_set_def_name_ctx *)cmd->data; + dPtr = cmd->data; + + pDSet->TreeOffset = sizeof(struct nwd_set_def_name_ctx); +@@ -1413,14 +1320,9 @@ int novfs_set_default_ctx(struct novfs_x + + //sgled cpylen = copy_from_user(dPtr+pDSet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); + cpylen = copy_from_user(dPtr + pDSet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled +- cpylen = copy_from_user(dPtr + pDSet->NameContextOffset, +- xplatCall.pNameContext, +- xplatCall.uNameLength); +- +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ cpylen = copy_from_user(dPtr + pDSet->NameContextOffset, xplatCall.pNameContext, xplatCall.uNameLength); ++ ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + kfree(reply); +@@ -1430,23 +1332,21 @@ int novfs_set_default_ctx(struct novfs_x + + } + +-int novfs_get_default_ctx(struct novfs_xplat *pdata, +- struct novfs_schandle Session) ++int novfs_get_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session) + { + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; + struct nwc_get_def_name_ctx xplatCall; +- struct nwd_get_def_name_ctx * pGet = NULL; ++ struct nwd_get_def_name_ctx *pGet = NULL; + char *dPtr = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, replylen, cpylen; + +- cpylen = copy_from_user(&xplatCall, pdata->reqData, +- sizeof(struct nwc_get_def_name_ctx)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_get_def_name_ctx)); + if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) + return -EINVAL; + +- cmdlen = sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx ) + xplatCall.uTreeLength; ++ cmdlen = sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx) + xplatCall.uTreeLength; + cmd = kmalloc(cmdlen, GFP_KERNEL); + + if (!cmd) +@@ -1455,40 +1355,31 @@ int novfs_get_default_ctx(struct novfs_x + cmd->Command.SequenceNumber = 0; + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_GET_DEFAULT_NAME_CONTEXT; +- cmd->dataLen = +- sizeof(struct nwd_get_def_name_ctx) + xplatCall.uTreeLength; ++ cmd->dataLen = sizeof(struct nwd_get_def_name_ctx) + xplatCall.uTreeLength; + +- pGet = (struct nwd_get_def_name_ctx *) cmd->data; ++ pGet = (struct nwd_get_def_name_ctx *)cmd->data; + dPtr = cmd->data; + +- pGet->TreeOffset = sizeof(struct nwd_get_def_name_ctx ); ++ pGet->TreeOffset = sizeof(struct nwd_get_def_name_ctx); + pGet->uTreeLength = xplatCall.uTreeLength; + + //sgled cpylen = copy_from_user( dPtr + pGet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); + cpylen = copy_from_user(dPtr + pGet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled + dPtr[pGet->TreeOffset + pGet->uTreeLength] = 0; + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + retCode = reply->Reply.ErrorCode; + if (!retCode) { +- pGet = (struct nwd_get_def_name_ctx *) reply->data; ++ pGet = (struct nwd_get_def_name_ctx *)reply->data; + +- DbgPrint("retCode=0x%x uNameLength1=%d uNameLength2=%d", +- retCode, pGet->uNameLength, +- xplatCall.uNameLength); ++ DbgPrint("retCode=0x%x uNameLength1=%d uNameLength2=%d", retCode, pGet->uNameLength, xplatCall.uNameLength); + if (xplatCall.uNameLength < pGet->uNameLength) { +- pGet->uNameLength = +- xplatCall.uNameLength; ++ pGet->uNameLength = xplatCall.uNameLength; + retCode = NWE_BUFFER_OVERFLOW; + } + dPtr = (char *)pGet + pGet->NameContextOffset; +- cpylen = +- copy_to_user(xplatCall.pNameContext, dPtr, +- pGet->uNameLength); ++ cpylen = copy_to_user(xplatCall.pNameContext, dPtr, pGet->uNameLength); + } + + kfree(reply); +@@ -1504,8 +1395,7 @@ int novfs_query_feature(struct novfs_xpl + int status = 0; + unsigned long cpylen; + +- cpylen = +- copy_from_user(&xpCall, pdata->reqData, sizeof(struct nwc_query_feature)); ++ cpylen = copy_from_user(&xpCall, pdata->reqData, sizeof(struct nwc_query_feature)); + switch (xpCall.Feature) { + case NWC_FEAT_NDS: + case NWC_FEAT_NDS_MTREE: +@@ -1518,8 +1408,7 @@ int novfs_query_feature(struct novfs_xpl + return (status); + } + +-int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, +- struct novfs_schandle Session) ++int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) + { + struct novfs_xplat_call_request *cmd = NULL; + struct novfs_xplat_call_reply *reply = NULL; +@@ -1528,9 +1417,7 @@ int novfs_get_tree_monitored_conn(struct + char *dPtr = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, +- sizeof(struct nwc_get_tree_monitored_conn_ref)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_get_tree_monitored_conn_ref)); + if (!access_ok(VERIFY_READ, xplatCall.pTreeName, sizeof(struct nwc_string))) + return -EINVAL; + if (xplatCall.pTreeName->DataLen > NW_MAX_TREE_NAME_LEN) +@@ -1547,26 +1434,19 @@ int novfs_get_tree_monitored_conn(struct + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_GET_TREE_MONITORED_CONN_REF; + +- pDConnRef = (struct nwd_get_tree_monitored_conn_ref *) cmd->data; ++ pDConnRef = (struct nwd_get_tree_monitored_conn_ref *)cmd->data; + pDConnRef->TreeName.boffset = sizeof(*pDConnRef); + pDConnRef->TreeName.len = xplatCall.pTreeName->DataLen; + pDConnRef->TreeName.type = xplatCall.pTreeName->DataType; + + dPtr = cmd->data + sizeof(*pDConnRef); +- cpylen = +- copy_from_user(dPtr, xplatCall.pTreeName->pBuffer, +- pDConnRef->TreeName.len); +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ cpylen = copy_from_user(dPtr, xplatCall.pTreeName->pBuffer, pDConnRef->TreeName.len); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { +- pDConnRef = (struct nwd_get_tree_monitored_conn_ref *) reply->data; ++ pDConnRef = (struct nwd_get_tree_monitored_conn_ref *)reply->data; + dPtr = reply->data + pDConnRef->TreeName.boffset; +- p = (struct nwc_get_tree_monitored_conn_ref *) pdata->reqData; +- cpylen = +- copy_to_user(&p->uConnReference, +- &pDConnRef->uConnReference, 4); ++ p = (struct nwc_get_tree_monitored_conn_ref *)pdata->reqData; ++ cpylen = copy_to_user(&p->uConnReference, &pDConnRef->uConnReference, 4); + + status = reply->Reply.ErrorCode; + kfree(reply); +@@ -1585,9 +1465,7 @@ int novfs_enum_ids(struct novfs_xplat *p + char *str = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, +- sizeof(struct nwc_enum_ids)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_enum_ids)); + datalen = sizeof(*pEnum); + cmdlen = datalen + sizeof(*cmd); + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -1604,66 +1482,40 @@ int novfs_enum_ids(struct novfs_xplat *p + __DbgPrint(" iterator = %x\n", xplatCall.Iterator); + __DbgPrint(" cmdlen = %d\n", cmdlen); + +- pEnum = (struct nwd_enum_ids *) cmd->data; ++ pEnum = (struct nwd_enum_ids *)cmd->data; + pEnum->Iterator = xplatCall.Iterator; +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + status = reply->Reply.ErrorCode; + + eId = pdata->repData; +- pEnum = (struct nwd_enum_ids *) reply->data; +- cpylen = +- copy_to_user(&eId->Iterator, &pEnum->Iterator, +- sizeof(pEnum->Iterator)); +- DbgPrint("[XPLAT NWCAPI] Found AuthId 0x%X", +- pEnum->AuthenticationId); +- cpylen = +- copy_to_user(&eId->AuthenticationId, +- &pEnum->AuthenticationId, +- sizeof(pEnum->AuthenticationId)); +- cpylen = +- copy_to_user(&eId->AuthType, &pEnum->AuthType, +- sizeof(pEnum->AuthType)); +- cpylen = +- copy_to_user(&eId->IdentityFlags, +- &pEnum->IdentityFlags, +- sizeof(pEnum->IdentityFlags)); +- cpylen = +- copy_to_user(&eId->NameType, &pEnum->NameType, +- sizeof(pEnum->NameType)); +- cpylen = +- copy_to_user(&eId->ObjectType, &pEnum->ObjectType, +- sizeof(pEnum->ObjectType)); ++ pEnum = (struct nwd_enum_ids *)reply->data; ++ cpylen = copy_to_user(&eId->Iterator, &pEnum->Iterator, sizeof(pEnum->Iterator)); ++ DbgPrint("[XPLAT NWCAPI] Found AuthId 0x%X", pEnum->AuthenticationId); ++ cpylen = copy_to_user(&eId->AuthenticationId, &pEnum->AuthenticationId, sizeof(pEnum->AuthenticationId)); ++ cpylen = copy_to_user(&eId->AuthType, &pEnum->AuthType, sizeof(pEnum->AuthType)); ++ cpylen = copy_to_user(&eId->IdentityFlags, &pEnum->IdentityFlags, sizeof(pEnum->IdentityFlags)); ++ cpylen = copy_to_user(&eId->NameType, &pEnum->NameType, sizeof(pEnum->NameType)); ++ cpylen = copy_to_user(&eId->ObjectType, &pEnum->ObjectType, sizeof(pEnum->ObjectType)); + + if (!status) { +- cpylen = +- copy_from_user(&xferStr, eId->pDomainName, +- sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&xferStr, eId->pDomainName, sizeof(struct nwc_string)); + if (pEnum->domainNameOffset >= reply->dataLen) { + status = -EINVAL; + goto out; + } +- str = +- (char *)((char *)reply->data + +- pEnum->domainNameOffset); ++ str = (char *)((char *)reply->data + pEnum->domainNameOffset); + DbgPrint("[XPLAT NWCAPI] Found Domain %s", str); + if (pEnum->domainNameLen > reply->dataLen - pEnum->domainNameOffset) { + status = -EINVAL; + goto out; + } +- cpylen = +- copy_to_user(xferStr.pBuffer, str, +- pEnum->domainNameLen); ++ cpylen = copy_to_user(xferStr.pBuffer, str, pEnum->domainNameLen); + xferStr.DataType = NWC_STRING_TYPE_ASCII; + xferStr.DataLen = pEnum->domainNameLen - 1; +- cpylen = copy_to_user(eId->pDomainName, &xferStr, +- sizeof(struct nwc_string)); ++ cpylen = copy_to_user(eId->pDomainName, &xferStr, sizeof(struct nwc_string)); + +- cpylen = copy_from_user(&xferStr, eId->pObjectName, +- sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&xferStr, eId->pObjectName, sizeof(struct nwc_string)); + if (pEnum->objectNameOffset >= reply->dataLen) { + status = -EINVAL; + goto out; +@@ -1704,14 +1556,12 @@ int novfs_change_auth_key(struct novfs_x + return -EINVAL; + if (xplatCall.pDomainName->DataLen > MAX_DOMAIN_LEN || + xplatCall.pObjectName->DataLen > MAX_OBJECT_NAME_LENGTH || +- xplatCall.pNewPassword->DataLen > MAX_PASSWORD_LENGTH || +- xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) ++ xplatCall.pNewPassword->DataLen > MAX_PASSWORD_LENGTH || xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) + return -EINVAL; + + datalen = + sizeof(struct nwd_change_key) + xplatCall.pDomainName->DataLen + +- xplatCall.pObjectName->DataLen + xplatCall.pNewPassword->DataLen + +- xplatCall.pVerifyPassword->DataLen; ++ xplatCall.pObjectName->DataLen + xplatCall.pNewPassword->DataLen + xplatCall.pVerifyPassword->DataLen; + + cmdlen = sizeof(*cmd) + datalen; + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -1719,7 +1569,7 @@ int novfs_change_auth_key(struct novfs_x + if (!cmd) + return -ENOMEM; + +- pNewKey = (struct nwd_change_key *) cmd->data; ++ pNewKey = (struct nwd_change_key *)cmd->data; + cmd->dataLen = datalen; + cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; +@@ -1783,9 +1633,7 @@ int novfs_change_auth_key(struct novfs_x + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->verifyPasswordLen = xferStr.DataLen; + +- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + status = reply->Reply.ErrorCode; + +@@ -1805,9 +1653,7 @@ int novfs_set_pri_conn(struct novfs_xpla + struct nwd_set_primary_conn *pConn = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, +- sizeof(struct nwc_set_primary_conn)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_primary_conn)); + + datalen = sizeof(struct nwd_set_primary_conn); + cmdlen = sizeof(*cmd) + datalen; +@@ -1815,17 +1661,14 @@ int novfs_set_pri_conn(struct novfs_xpla + if (!cmd) + return -ENOMEM; + +- pConn = (struct nwd_set_primary_conn *) cmd->data; ++ pConn = (struct nwd_set_primary_conn *)cmd->data; + cmd->dataLen = datalen; + cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_SET_PRIMARY_CONN; +- pConn->ConnHandle = (void *) (unsigned long) xplatCall.ConnHandle; +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ pConn->ConnHandle = (void *)(unsigned long)xplatCall.ConnHandle; ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + status = reply->Reply.ErrorCode; +@@ -1841,7 +1684,7 @@ int novfs_get_pri_conn(struct novfs_xpla + struct novfs_xplat_call_reply *reply = NULL; + unsigned long status = -ENOMEM, cmdlen, replylen, cpylen; + +- cmdlen = (unsigned long) (&((struct novfs_xplat_call_request *) 0)->data); ++ cmdlen = (unsigned long)(&((struct novfs_xplat_call_request *)0)->data); + + cmd.dataLen = 0; + cmd.Command.CommandType = VFS_COMMAND_XPLAT_CALL; +@@ -1849,16 +1692,12 @@ int novfs_get_pri_conn(struct novfs_xpla + cmd.Command.SessionId = Session; + cmd.NwcCommand = NWC_GET_PRIMARY_CONN; + +- status = +- Queue_Daemon_Command((void *)&cmd, cmdlen, NULL, 0, (void **)&reply, +- &replylen, INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)&cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + status = reply->Reply.ErrorCode; + if (!status) { +- cpylen = +- copy_to_user(pdata->repData, reply->data, +- sizeof(unsigned long)); ++ cpylen = copy_to_user(pdata->repData, reply->data, sizeof(unsigned long)); + } + + kfree(reply); +@@ -1881,13 +1720,11 @@ int novfs_set_map_drive(struct novfs_xpl + return -EFAULT; + if (symInfo.dirPathOffsetLength > MAX_OFFSET_LEN || symInfo.linkOffsetLength > MAX_OFFSET_LEN) + return -EINVAL; +- datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength + +- symInfo.linkOffsetLength; ++ datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength + symInfo.linkOffsetLength; + + __DbgPrint(" cmdlen = %d\n", cmdlen); + __DbgPrint(" dataLen = %d\n", datalen); +- __DbgPrint(" symInfo.dirPathOffsetLength = %d\n", +- symInfo.dirPathOffsetLength); ++ __DbgPrint(" symInfo.dirPathOffsetLength = %d\n", symInfo.dirPathOffsetLength); + __DbgPrint(" symInfo.linkOffsetLength = %d\n", symInfo.linkOffsetLength); + __DbgPrint(" pdata->datalen = %d\n", pdata->reqLen); + +@@ -1909,10 +1746,7 @@ int novfs_set_map_drive(struct novfs_xpl + kfree(cmd); + return -EFAULT; + } +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + status = reply->Reply.ErrorCode; +@@ -1950,9 +1784,7 @@ int novfs_unmap_drive(struct novfs_xplat + cmd->NwcCommand = NWC_UNMAP_DRIVE; + + cpylen = copy_from_user(cmd->data, pdata->reqData, datalen); +- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + status = reply->Reply.ErrorCode; +@@ -1982,29 +1814,23 @@ int novfs_enum_drives(struct novfs_xplat + cmd->Command.SequenceNumber = 0; + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_ENUMERATE_DRIVES; +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + status = reply->Reply.ErrorCode; + DbgPrint("Status Code = 0x%X", status); + if (!status) { +- offset = sizeof(((struct nwc_get_mapped_drives *) pdata-> +- repData)->MapBuffLen); ++ offset = sizeof(((struct nwc_get_mapped_drives *) pdata->repData)->MapBuffLen); + cp = reply->data; +- replylen = ((struct nwc_get_mapped_drives *) pdata->repData)->MapBuffLen; ++ replylen = ((struct nwc_get_mapped_drives *)pdata->repData)->MapBuffLen; + if (offset > reply->dataLen) { + status = -EINVAL; + goto out; + } + cpylen = copy_to_user(pdata->repData, cp, offset); + cp += offset; +- cpylen = copy_to_user(((struct nwc_get_mapped_drives *) pdata-> +- repData)->MapBuffer, cp, +- min(replylen - offset, +- reply->dataLen - offset)); ++ cpylen = copy_to_user(((struct nwc_get_mapped_drives *)pdata->repData)->MapBuffer, cp, ++ min(replylen - offset, reply->dataLen - offset)); + } + } + out: +@@ -2034,13 +1860,10 @@ int novfs_get_bcast_msg(struct novfs_xpl + cmd->Command.SessionId = Session; + + cmd->NwcCommand = NWC_GET_BROADCAST_MESSAGE; +- dmsg = (struct nwd_get_bcast_notification *) cmd->data; +- dmsg->uConnReference = (void *) (unsigned long) msg.uConnReference; ++ dmsg = (struct nwd_get_bcast_notification *)cmd->data; ++ dmsg->uConnReference = (void *)(unsigned long)msg.uConnReference; + +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + + if (reply) { + status = reply->Reply.ErrorCode; +@@ -2048,7 +1871,7 @@ int novfs_get_bcast_msg(struct novfs_xpl + if (!status) { + char *cp = pdata->repData; + +- dmsg = (struct nwd_get_bcast_notification *) reply->data; ++ dmsg = (struct nwd_get_bcast_notification *)reply->data; + if (pdata->repLen < dmsg->messageLen) { + dmsg->messageLen = pdata->repLen; + } +@@ -2085,7 +1908,7 @@ int novfs_set_key_value(struct novfs_xpl + + if (cstrObjectName.DataLen > MAX_OBJECT_NAME_LENGTH || cstrPassword.DataLen > MAX_PASSWORD_LENGTH) + return -EINVAL; +- datalen = sizeof(struct nwd_set_key ) + cstrObjectName.DataLen + cstrPassword.DataLen; ++ datalen = sizeof(struct nwd_set_key) + cstrObjectName.DataLen + cstrPassword.DataLen; + + cmdlen = sizeof(*cmd) + datalen; + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -2093,7 +1916,7 @@ int novfs_set_key_value(struct novfs_xpl + if (!cmd) + return -ENOMEM; + +- pNewKey = (struct nwd_set_key *) cmd->data; ++ pNewKey = (struct nwd_set_key *)cmd->data; + cmd->dataLen = datalen; + cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; +@@ -2102,19 +1925,17 @@ int novfs_set_key_value(struct novfs_xpl + + pNewKey->ObjectType = xplatCall.ObjectType; + pNewKey->AuthenticationId = xplatCall.AuthenticationId; +- pNewKey->ConnHandle = (void *) (unsigned long) xplatCall.ConnHandle; ++ pNewKey->ConnHandle = (void *)(unsigned long)xplatCall.ConnHandle; + str = (char *)pNewKey; + + /* + * Get the User Name + */ +- str += sizeof(struct nwd_set_key ); +- cpylen = +- copy_from_user(str, cstrObjectName.pBuffer, +- cstrObjectName.DataLen); ++ str += sizeof(struct nwd_set_key); ++ cpylen = copy_from_user(str, cstrObjectName.pBuffer, cstrObjectName.DataLen); + + str += pNewKey->objectNameLen = cstrObjectName.DataLen; +- pNewKey->objectNameOffset = sizeof(struct nwd_set_key ); ++ pNewKey->objectNameOffset = sizeof(struct nwd_set_key); + + /* + * Get the Verify Password +@@ -2124,9 +1945,7 @@ int novfs_set_key_value(struct novfs_xpl + pNewKey->newPasswordLen = cstrPassword.DataLen; + pNewKey->newPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; + +- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + status = reply->Reply.ErrorCode; + kfree(reply); +@@ -2164,7 +1983,7 @@ int novfs_verify_key_value(struct novfs_ + if (!cmd) + return -ENOMEM; + +- pNewKey = (struct nwd_verify_key *) cmd->data; ++ pNewKey = (struct nwd_verify_key *)cmd->data; + cmd->dataLen = datalen; + cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; +@@ -2180,9 +1999,7 @@ int novfs_verify_key_value(struct novfs_ + * Get the tree name + */ + str += sizeof(*pNewKey); +- cpylen = +- copy_from_user(&xferStr, xplatCall.pDomainName, +- sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&xferStr, xplatCall.pDomainName, sizeof(struct nwc_string)); + pNewKey->domainNameOffset = sizeof(*pNewKey); + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->domainNameLen = xferStr.DataLen; +@@ -2191,8 +2008,7 @@ int novfs_verify_key_value(struct novfs_ + * Get the User Name + */ + str += pNewKey->domainNameLen; +- cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, +- sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, sizeof(struct nwc_string)); + pNewKey->objectNameOffset = pNewKey->domainNameOffset + pNewKey->domainNameLen; + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->objectNameLen = xferStr.DataLen; +@@ -2201,16 +2017,12 @@ int novfs_verify_key_value(struct novfs_ + * Get the Verify Password + */ + str += pNewKey->objectNameLen; +- cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, +- sizeof(struct nwc_string)); +- pNewKey->verifyPasswordOffset = +- pNewKey->objectNameOffset + pNewKey->objectNameLen; ++ cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, sizeof(struct nwc_string)); ++ pNewKey->verifyPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->verifyPasswordLen = xferStr.DataLen; + +- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +- (void **)&reply, &replylen, +- INTERRUPTIBLE); ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); + if (reply) { + status = reply->Reply.ErrorCode; + kfree(reply); +@@ -2218,4 +2030,3 @@ int novfs_verify_key_value(struct novfs_ + kfree(cmd); + return (status); + } +- +--- a/fs/novfs/nwcapi.h ++++ b/fs/novfs/nwcapi.h +@@ -14,7 +14,7 @@ + #ifndef __NWCLNX_H__ + #define __NWCLNX_H__ + +-#if 0 //sgled hack ++#if 0 //sgled hack + #else //sgled hack (up to endif) + + #define NW_MAX_TREE_NAME_LEN 33 +@@ -120,8 +120,7 @@ struct novfs_xplat { + + #if 0 + N_EXTERN_LIBRARY(NWRCODE) +- NWCLnxReq +- (nuint32 request, nptr pInBuf, nuint32 inLen, nptr pOutBuf, nuint32 outLen); ++ NWCLnxReq(nuint32 request, nptr pInBuf, nuint32 inLen, nptr pOutBuf, nuint32 outLen); + #endif + // + // Network Name Format Type +@@ -302,13 +301,12 @@ N_EXTERN_LIBRARY(NWRCODE) + #define MIN_NUM_REPLIES 1 + #define MAX_NUM_REQUESTS 4096 + #define MIN_NUM_REQUESTS 1 +-#define MAX_FRAG_SIZE 4096 ++#define MAX_FRAG_SIZE 4096 + #define MIN_FRAG_SIZE 1 + #define MAX_INFO_LEN 4096 + #define MAX_DOMAIN_LEN MAX_NETWORK_NAME_LENGTH + #define MAX_OFFSET_LEN 4096 + +- + // + // Flags for the GetBroadcastMessage API + // +@@ -338,7 +336,6 @@ N_EXTERN_LIBRARY(NWRCODE) + + //===[ Type definitions ]================================================== + +- + // + // Structure for defining what a transport + // address looks like +@@ -350,7 +347,6 @@ struct nwc_tran_addr { + unsigned char *puAddress; + }; + +- + struct nwc_conn_string { + char *pString; + u32 uStringType; +@@ -501,7 +497,6 @@ struct nwc_convert_netware_handle { + u32 uFileSize; + }; + +- + //++======================================================================= + // API Name: NwcGetConnInfo + // +@@ -617,7 +612,6 @@ struct nwc_get_tree_monitored_conn_ref { + + }; + +- + //++======================================================================= + // API Name: NwcGetPreferredDsTree + // +@@ -678,7 +672,6 @@ struct nwc_license_conn { + u32 ConnHandle; + }; + +- + //++======================================================================= + // API Name: NWCGetMappedDrives + // +@@ -1171,7 +1164,6 @@ struct nwc_set_primary_conn { + + }; + +- + //++======================================================================= + // API Name: NwcQueryFeature + // +@@ -1316,7 +1308,6 @@ struct nwc_login_id { + + }; + +- + //++======================================================================= + // API Name: NWCSetPassword + // +@@ -1401,7 +1392,6 @@ struct nwc_auth_with_id { + + }; + +- + struct nwc_unmap_drive_ex { + // unsigned long connHdl; + unsigned int linkLen; +--- a/fs/novfs/nwerror.h ++++ b/fs/novfs/nwerror.h +@@ -14,7 +14,6 @@ + #ifndef __NOVFS_ERROR_H + #define __NOVFS_ERROR_H + +- + /* + * Network errors + * Decimal values at end of line are 32768 lower than actual +@@ -655,4 +654,4 @@ + #define NWE_LOCK_ERROR 0x89FF // 255 + #define NWE_FAILURE 0x89FF // 255 Generic Failure + +-#endif /* __NOVFS_ERROR_H */ ++#endif /* __NOVFS_ERROR_H */ +--- a/fs/novfs/proc.c ++++ b/fs/novfs/proc.c +@@ -47,9 +47,7 @@ static int Novfs_Get_Version(char *page, + if (novfs_current_mnt) { + i = strlen(novfs_current_mnt); + if ((i > 0) && i < (count - len)) { +- len += +- sprintf(buf + len, "Novfs mount=%s\n", +- novfs_current_mnt); ++ len += sprintf(buf + len, "Novfs mount=%s\n", novfs_current_mnt); + } + } + DbgPrint("%s", buf); +@@ -69,9 +67,7 @@ int novfs_proc_init(void) + + if (Novfs_Control) { + Novfs_Control->size = 0; +- memcpy(&novfs_daemon_proc_fops, +- Novfs_Control->proc_fops, +- sizeof(struct file_operations)); ++ memcpy(&novfs_daemon_proc_fops, Novfs_Control->proc_fops, sizeof(struct file_operations)); + + /* + * Setup our functions +@@ -96,8 +92,7 @@ int novfs_proc_init(void) + /* + * Setup our file functions + */ +- memcpy(&novfs_lib_proc_fops, Novfs_Library->proc_fops, +- sizeof(struct file_operations)); ++ memcpy(&novfs_lib_proc_fops, Novfs_Library->proc_fops, sizeof(struct file_operations)); + novfs_lib_proc_fops.owner = THIS_MODULE; + novfs_lib_proc_fops.open = novfs_daemon_lib_open; + novfs_lib_proc_fops.release = novfs_daemon_lib_close; +@@ -112,9 +107,7 @@ int novfs_proc_init(void) + return (-ENOENT); + } + +- Novfs_Version = +- create_proc_read_entry("Version", 0444, novfs_procfs_dir, +- Novfs_Get_Version, NULL); ++ Novfs_Version = create_proc_read_entry("Version", 0444, novfs_procfs_dir, Novfs_Get_Version, NULL); + if (Novfs_Version) { + Novfs_Version->size = 0; + } else { +@@ -141,8 +134,7 @@ void novfs_proc_exit(void) + DbgPrint("remove_proc_entry(Library, NULL)\n"); + remove_proc_entry("Library", novfs_procfs_dir); + +- DbgPrint("remove_proc_entry(%s, NULL)\n", +- MODULE_NAME); ++ DbgPrint("remove_proc_entry(%s, NULL)\n", MODULE_NAME); + remove_proc_entry(MODULE_NAME, NULL); + + DbgPrint("done\n"); +--- a/fs/novfs/profile.c ++++ b/fs/novfs/profile.c +@@ -62,7 +62,7 @@ static struct proc_dir_entry *inode_file + + static DECLARE_MUTEX(LocalPrint_lock); + +-static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) ++static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user * buf, size_t nbytes, loff_t * ppos) + { + ssize_t retval = nbytes; + u_char *lbuf, *p; +@@ -101,8 +101,7 @@ static ssize_t User_proc_write_DbgBuffer + } else if (!strcmp("novfsd", lbuf)) { + novfs_daemon_debug_cmd_send(p); + } else if (!strcmp("file_update_timeout", lbuf)) { +- novfs_update_timeout = +- simple_strtoul(p, NULL, 0); ++ novfs_update_timeout = simple_strtoul(p, NULL, 0); + } else if (!strcmp("cache", lbuf)) { + if (!strcmp("on", p)) { + novfs_page_cache = 1; +@@ -134,9 +133,7 @@ static ssize_t User_proc_read_DbgBuffer( + count = nbytes; + } + +- count -= +- copy_to_user(buf, &DbgPrintBuffer[DbgPrintBufferReadOffset], +- count); ++ count -= copy_to_user(buf, &DbgPrintBuffer[DbgPrintBufferReadOffset], count); + + if (count == 0) { + if (retval == 0) +@@ -144,8 +141,7 @@ static ssize_t User_proc_read_DbgBuffer( + } else { + DbgPrintBufferReadOffset += count; + if (DbgPrintBufferReadOffset >= DbgPrintBufferOffset) { +- DbgPrintBufferOffset = +- DbgPrintBufferReadOffset = 0; ++ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; + } + retval = count; + } +@@ -158,7 +154,8 @@ static int proc_read_DbgBuffer(char *pag + { + int len; + +- printk(KERN_ALERT "proc_read_DbgBuffer: off=%ld count=%d DbgPrintBufferOffset=%lu DbgPrintBufferReadOffset=%lu\n", off, count, DbgPrintBufferOffset, DbgPrintBufferReadOffset); ++ printk(KERN_ALERT "proc_read_DbgBuffer: off=%ld count=%d DbgPrintBufferOffset=%lu DbgPrintBufferReadOffset=%lu\n", off, ++ count, DbgPrintBufferOffset, DbgPrintBufferReadOffset); + + len = DbgPrintBufferOffset - DbgPrintBufferReadOffset; + +@@ -187,9 +184,7 @@ static int LocalPrint(char *Fmt, ...) + + if (DbgPrintBuffer) { + va_start(args, Fmt); +- len += vsnprintf(DbgPrintBuffer + DbgPrintBufferOffset, +- DbgPrintBufferSize - DbgPrintBufferOffset, +- Fmt, args); ++ len += vsnprintf(DbgPrintBuffer + DbgPrintBufferOffset, DbgPrintBufferSize - DbgPrintBufferOffset, Fmt, args); + DbgPrintBufferOffset += len; + } + +@@ -209,8 +204,7 @@ int ___DbgPrint(const char *site, const + if (buf) { + va_start(args, Fmt); + len = snprintf(buf, DBG_BUFFER_SIZE, "[%d] %s ", current->pid, site); +- len += vsnprintf(buf + len, DBG_BUFFER_SIZE - len, Fmt, +- args); ++ len += vsnprintf(buf + len, DBG_BUFFER_SIZE - len, Fmt, args); + if (-1 == len) { + len = DBG_BUFFER_SIZE - 1; + buf[len] = '\0'; +@@ -226,25 +220,18 @@ int ___DbgPrint(const char *site, const + } + + if (DbgPrintBuffer && DbgPrintOn) { +- if ((DbgPrintBufferOffset + len) > +- DbgPrintBufferSize) { ++ if ((DbgPrintBufferOffset + len) > DbgPrintBufferSize) { + offset = DbgPrintBufferOffset; + DbgPrintBufferOffset = 0; +- memset(&DbgPrintBuffer[offset], +- 0, +- DbgPrintBufferSize - +- offset); ++ memset(&DbgPrintBuffer[offset], 0, DbgPrintBufferSize - offset); + } + + mb(); + +- if ((DbgPrintBufferOffset + len) < +- DbgPrintBufferSize) { ++ if ((DbgPrintBufferOffset + len) < DbgPrintBufferSize) { + DbgPrintBufferOffset += len; +- offset = +- DbgPrintBufferOffset - len; +- memcpy(&DbgPrintBuffer[offset], +- buf, len + 1); ++ offset = DbgPrintBufferOffset - len; ++ memcpy(&DbgPrintBuffer[offset], buf, len + 1); + } + } + } +@@ -317,8 +304,7 @@ static void NovfsGregorianDay(struct loc + int leapsToDate; + int lastYear; + int day; +- int MonthOffset[] = +- { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; ++ int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; + + lastYear = tm->tm_year - 1; + +@@ -333,9 +319,7 @@ static void NovfsGregorianDay(struct loc + * + * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be + */ +- if ((tm->tm_year % 4 == 0) && +- ((tm->tm_year % 100 != 0) || (tm->tm_year % 400 == 0)) && +- (tm->tm_mon > 2)) { ++ if ((tm->tm_year % 4 == 0) && ((tm->tm_year % 100 != 0) || (tm->tm_year % 400 == 0)) && (tm->tm_mon > 2)) { + /* + * We are past Feb. 29 in a leap year + */ +@@ -344,8 +328,7 @@ static void NovfsGregorianDay(struct loc + day = 0; + } + +- day += lastYear * 365 + leapsToDate + MonthOffset[tm->tm_mon - 1] + +- tm->tm_mday; ++ day += lastYear * 365 + leapsToDate + MonthOffset[tm->tm_mon - 1] + tm->tm_mday; + + tm->tm_wday = day % 7; + } +@@ -388,17 +371,15 @@ static void private_to_tm(int tim, struc + char *ctime_r(time_t * clock, char *buf) + { + struct local_rtc_time tm; +- static char *DAYOFWEEK[] = +- { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; +- static char *MONTHOFYEAR[] = +- { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", +-"Oct", "Nov", "Dec" }; ++ static char *DAYOFWEEK[] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; ++ static char *MONTHOFYEAR[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", ++ "Oct", "Nov", "Dec" ++ }; + + private_to_tm(*clock, &tm); + + sprintf(buf, "%s %s %d %d:%02d:%02d %d", DAYOFWEEK[tm.tm_wday], +- MONTHOFYEAR[tm.tm_mon - 1], tm.tm_mday, tm.tm_hour, tm.tm_min, +- tm.tm_sec, tm.tm_year); ++ MONTHOFYEAR[tm.tm_mon - 1], tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year); + return (buf); + } + +@@ -421,8 +402,7 @@ static void dump(struct dentry *parent, + } + + if (parent) { +- pfunc("starting 0x%p %.*s\n", parent, parent->d_name.len, +- parent->d_name.name); ++ pfunc("starting 0x%p %.*s\n", parent, parent->d_name.len, parent->d_name.name); + if (parent->d_subdirs.next == &parent->d_subdirs) { + pfunc("No children...\n"); + } else { +@@ -434,18 +414,13 @@ static void dump(struct dentry *parent, + while (l) { + p = l->dentry->d_subdirs.next; + while (p != &l->dentry->d_subdirs) { +- d = list_entry(p, struct dentry, +- d_u.d_child); ++ d = list_entry(p, struct dentry, d_u.d_child); + p = p->next; + +- if (d->d_subdirs.next != +- &d->d_subdirs) { +- n = kmalloc(sizeof +- (*n), +- GFP_KERNEL); ++ if (d->d_subdirs.next != &d->d_subdirs) { ++ n = kmalloc(sizeof(*n), GFP_KERNEL); + if (n) { +- n->next = +- l->next; ++ n->next = l->next; + l->next = n; + n->dentry = d; + } +@@ -461,21 +436,11 @@ static void dump(struct dentry *parent, + " d_subdirs: 0x%p\n" + " d_inode: 0x%p\n", + d, path, +- d->d_name. +- len, +- d->d_name. +- name, +- d-> +- d_parent, ++ d->d_name.len, ++ d->d_name.name, ++ d->d_parent, + atomic_read +- (&d-> +- d_count), +- d->d_flags, +- d-> +- d_subdirs. +- next, +- d-> +- d_inode); ++ (&d->d_count), d->d_flags, d->d_subdirs.next, d->d_inode); + } + } + } +@@ -484,22 +449,15 @@ static void dump(struct dentry *parent, + l = start; + while (l) { + d = l->dentry; +- path = +- novfs_scope_dget_path(d, buf, +- PATH_LENGTH_BUFFER, +- 1); ++ path = novfs_scope_dget_path(d, buf, PATH_LENGTH_BUFFER, 1); + if (path) { + sd = " (None)"; +- if (&d->d_subdirs != +- d->d_subdirs.next) { ++ if (&d->d_subdirs != d->d_subdirs.next) { + sd = ""; + } + inode_number[0] = '\0'; + if (d->d_inode) { +- sprintf(inode_number, +- " (%lu)", +- d->d_inode-> +- i_ino); ++ sprintf(inode_number, " (%lu)", d->d_inode->i_ino); + } + pfunc("0x%p %s\n" + " d_parent: 0x%p\n" +@@ -509,9 +467,7 @@ static void dump(struct dentry *parent, + " d_inode: 0x%p%s\n", + d, path, d->d_parent, + atomic_read(&d->d_count), +- d->d_flags, +- d->d_subdirs.next, sd, +- d->d_inode, inode_number); ++ d->d_flags, d->d_subdirs.next, sd, d->d_inode, inode_number); + } + + n = l; +@@ -550,8 +506,7 @@ static ssize_t common_read(char *buf, si + + } + +-static ssize_t novfs_profile_read_inode(struct file * file, char *buf, size_t len, +- loff_t * off) ++static ssize_t novfs_profile_read_inode(struct file *file, char *buf, size_t len, loff_t * off) + { + ssize_t retval = 0; + unsigned long offset = *off; +@@ -566,7 +521,6 @@ static ssize_t novfs_profile_read_inode( + novfs_dump_inode(LocalPrint); + } + +- + retval = common_read(buf, len, off); + + if (0 == retval) { +@@ -580,8 +534,7 @@ static ssize_t novfs_profile_read_inode( + + } + +-static ssize_t novfs_profile_dentry_read(struct file * file, char *buf, size_t len, +- loff_t * off) ++static ssize_t novfs_profile_dentry_read(struct file *file, char *buf, size_t len, loff_t * off) + { + ssize_t retval = 0; + unsigned long offset = *off; +@@ -630,18 +583,12 @@ void novfs_profile_init() + dbg_dir = proc_mkdir(MODULE_NAME, NULL); + + if (dbg_dir) { +- dbg_file = create_proc_read_entry("Debug", +- 0600, +- dbg_dir, +- proc_read_DbgBuffer, NULL); ++ dbg_file = create_proc_read_entry("Debug", 0600, dbg_dir, proc_read_DbgBuffer, NULL); + if (dbg_file) { + dbg_file->size = DBGBUFFERSIZE; +- memcpy(&Dbg_proc_file_operations, dbg_file->proc_fops, +- sizeof(struct file_operations)); +- Dbg_proc_file_operations.read = +- User_proc_read_DbgBuffer; +- Dbg_proc_file_operations.write = +- User_proc_write_DbgBuffer; ++ memcpy(&Dbg_proc_file_operations, dbg_file->proc_fops, sizeof(struct file_operations)); ++ Dbg_proc_file_operations.read = User_proc_read_DbgBuffer; ++ Dbg_proc_file_operations.write = User_proc_write_DbgBuffer; + dbg_file->proc_fops = &Dbg_proc_file_operations; + } else { + remove_proc_entry(MODULE_NAME, NULL); +@@ -655,22 +602,16 @@ void novfs_profile_init() + inode_file = create_proc_entry("inode", 0600, dbg_dir); + if (inode_file) { + inode_file->size = 0; +- memcpy(&inode_proc_file_ops, +- inode_file->proc_fops, +- sizeof(struct file_operations)); ++ memcpy(&inode_proc_file_ops, inode_file->proc_fops, sizeof(struct file_operations)); + inode_proc_file_ops.owner = THIS_MODULE; +- inode_proc_file_ops.read = +- novfs_profile_read_inode; ++ inode_proc_file_ops.read = novfs_profile_read_inode; + inode_file->proc_fops = &inode_proc_file_ops; + } + +- dentry_file = create_proc_entry("dentry", +- 0600, dbg_dir); ++ dentry_file = create_proc_entry("dentry", 0600, dbg_dir); + if (dentry_file) { + dentry_file->size = 0; +- memcpy(&dentry_proc_file_ops, +- dentry_file->proc_fops, +- sizeof(struct file_operations)); ++ memcpy(&dentry_proc_file_ops, dentry_file->proc_fops, sizeof(struct file_operations)); + dentry_proc_file_ops.owner = THIS_MODULE; + dentry_proc_file_ops.read = novfs_profile_dentry_read; + dentry_file->proc_fops = &dentry_proc_file_ops; +@@ -686,19 +627,14 @@ void novfs_profile_init() + void novfs_profile_exit(void) + { + if (dbg_file) +- DbgPrint("Calling remove_proc_entry(Debug, NULL)\n"), +- remove_proc_entry("Debug", dbg_dir); ++ DbgPrint("Calling remove_proc_entry(Debug, NULL)\n"), remove_proc_entry("Debug", dbg_dir); + if (inode_file) +- DbgPrint("Calling remove_proc_entry(inode, NULL)\n"), +- remove_proc_entry("inode", dbg_dir); ++ DbgPrint("Calling remove_proc_entry(inode, NULL)\n"), remove_proc_entry("inode", dbg_dir); + if (dentry_file) +- DbgPrint("Calling remove_proc_entry(dentry, NULL)\n"), +- remove_proc_entry("dentry", dbg_dir); ++ DbgPrint("Calling remove_proc_entry(dentry, NULL)\n"), remove_proc_entry("dentry", dbg_dir); + + if (dbg_dir && (dbg_dir != novfs_procfs_dir)) { + DbgPrint("Calling remove_proc_entry(%s, NULL)\n", MODULE_NAME); + remove_proc_entry(MODULE_NAME, NULL); + } + } +- +- +--- a/fs/novfs/scope.c ++++ b/fs/novfs/scope.c +@@ -33,7 +33,6 @@ + #define CLEANUP_INTERVAL 10 + #define MAX_USERNAME_LENGTH 32 + +- + static struct list_head Scope_List; + static struct semaphore Scope_Lock; + static struct semaphore Scope_Thread_Delay; +@@ -41,16 +40,14 @@ static int Scope_Thread_Terminate = 0; + static struct timer_list Scope_Timer; + static unsigned int Scope_Hash_Val = 1; + +-static struct novfs_scope_list *Scope_Search4Scope(struct novfs_schandle Id, +- int Session, int Locked) ++static struct novfs_scope_list *Scope_Search4Scope(struct novfs_schandle Id, int Session, int Locked) + { + struct novfs_scope_list *scope, *rscope = NULL; + struct novfs_schandle cur_scope; + struct list_head *sl; + int offset; + +- DbgPrint("Scope_Search4Scope: 0x%p:%p 0x%x 0x%x\n", Id.hTypeId, Id.hId, +- Session, Locked); ++ DbgPrint("Scope_Search4Scope: 0x%p:%p 0x%x 0x%x\n", Id.hTypeId, Id.hId, Session, Locked); + + if (Session) + offset = offsetof(struct novfs_scope_list, SessionId); +@@ -66,7 +63,7 @@ static struct novfs_scope_list *Scope_Se + while (sl != &Scope_List) { + scope = list_entry(sl, struct novfs_scope_list, ScopeList); + +- cur_scope = *(struct novfs_schandle *) ((char *)scope + offset); ++ cur_scope = *(struct novfs_schandle *)((char *)scope + offset); + if (SC_EQUAL(Id, cur_scope)) { + rscope = scope; + break; +@@ -92,8 +89,7 @@ static struct novfs_scope_list *Scope_Fi + + task = current; + +- DbgPrint("Scope_Find_Scope: %d %d %d %d\n", current_uid(), +- current_euid(), current_suid(), current_fsuid()); ++ DbgPrint("Scope_Find_Scope: %d %d %d %d\n", current_uid(), current_euid(), current_suid(), current_fsuid()); + + //scopeId = task->euid; + UID_TO_SCHANDLE(scopeId, current_euid()); +@@ -113,16 +109,11 @@ static struct novfs_scope_list *Scope_Fi + + if (!novfs_daemon_create_sessionId(&scope->SessionId)) { + DbgPrint("Scope_Find_Scope2: %d %d %d %d\n", +- current_uid(), current_euid(), +- current_suid(), current_fsuid()); +- memset(scope->ScopeUserName, 0, +- sizeof(scope->ScopeUserName)); ++ current_uid(), current_euid(), current_suid(), current_fsuid()); ++ memset(scope->ScopeUserName, 0, sizeof(scope->ScopeUserName)); + scope->ScopeUserNameLength = 0; +- novfs_daemon_getpwuid(current_euid(), +- sizeof(scope->ScopeUserName), +- scope->ScopeUserName); +- scope->ScopeUserNameLength = +- strlen(scope->ScopeUserName); ++ novfs_daemon_getpwuid(current_euid(), sizeof(scope->ScopeUserName), scope->ScopeUserName); ++ scope->ScopeUserNameLength = strlen(scope->ScopeUserName); + addscope = 1; + } + +@@ -141,27 +132,20 @@ static struct novfs_scope_list *Scope_Fi + scope->SessionId.hTypeId, scope->SessionId.hId, + scope->ScopePid, + scope->ScopeTask, +- scope->ScopeHash, +- scope->ScopeUid, +- scope->ScopeUserNameLength, +- scope->ScopeUserName); ++ scope->ScopeHash, scope->ScopeUid, scope->ScopeUserNameLength, scope->ScopeUserName); + + if (SC_PRESENT(scope->SessionId)) { + down(&Scope_Lock); +- pscope = +- Scope_Search4Scope(scopeId, 0, 1); ++ pscope = Scope_Search4Scope(scopeId, 0, 1); + + if (!pscope) { +- list_add(&scope->ScopeList, +- &Scope_List); ++ list_add(&scope->ScopeList, &Scope_List); + } + up(&Scope_Lock); + + if (pscope) { +- printk +- ("<6>Scope_Find_Scope scope not added because it was already there...\n"); +- novfs_daemon_destroy_sessionId(scope-> +- SessionId); ++ printk("<6>Scope_Find_Scope scope not added because it was already there...\n"); ++ novfs_daemon_destroy_sessionId(scope->SessionId); + kfree(scope); + scope = pscope; + addscope = 0; +@@ -170,7 +154,7 @@ static struct novfs_scope_list *Scope_Fi + kfree(scope); + scope = NULL; + } +- ++ + if (scope && addscope) + novfs_add_to_root(scope->ScopeUserName); + } +@@ -206,7 +190,7 @@ static int Scope_Validate_Scope(struct n + return (retVal); + } + +-uid_t novfs_scope_get_uid(struct novfs_scope_list *scope) ++uid_t novfs_scope_get_uid(struct novfs_scope_list * scope) + { + uid_t uid = 0; + if (!scope) +@@ -231,7 +215,7 @@ char *novfs_scope_get_username(void) + } + + struct novfs_schandle novfs_scope_get_sessionId(struct novfs_scope_list +- *Scope) ++ *Scope) + { + struct novfs_schandle sessionId; + DbgPrint("Scope_Get_SessionId: 0x%p\n", Scope); +@@ -241,12 +225,11 @@ struct novfs_schandle novfs_scope_get_se + + if (Scope && Scope_Validate_Scope(Scope)) + sessionId = Scope->SessionId; +- DbgPrint("Scope_Get_SessionId: return 0x%p:%p\n", sessionId.hTypeId, +- sessionId.hId); ++ DbgPrint("Scope_Get_SessionId: return 0x%p:%p\n", sessionId.hTypeId, sessionId.hId); + return (sessionId); + } + +-struct novfs_scope_list *novfs_get_scope_from_name(struct qstr * Name) ++struct novfs_scope_list *novfs_get_scope_from_name(struct qstr *Name) + { + struct novfs_scope_list *scope, *rscope = NULL; + struct list_head *sl; +@@ -259,9 +242,7 @@ struct novfs_scope_list *novfs_get_scope + while (sl != &Scope_List) { + scope = list_entry(sl, struct novfs_scope_list, ScopeList); + +- if ((Name->len == scope->ScopeUserNameLength) && +- (0 == strncmp(scope->ScopeUserName, Name->name, Name->len))) +- { ++ if ((Name->len == scope->ScopeUserNameLength) && (0 == strncmp(scope->ScopeUserName, Name->name, Name->len))) { + rscope = scope; + break; + } +@@ -274,8 +255,7 @@ struct novfs_scope_list *novfs_get_scope + return (rscope); + } + +-int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, +- uint64_t * TotalEnties, uint64_t * FreeEnties) ++int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties) + { + struct novfs_scope_list *scope; + int retVal = 0; +@@ -296,8 +276,7 @@ int novfs_scope_set_userspace(uint64_t * + return (retVal); + } + +-int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, +- uint64_t * TotalEnties, uint64_t * FreeEnties) ++int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties) + { + struct novfs_scope_list *scope; + int retVal = 0; +@@ -309,8 +288,7 @@ int novfs_scope_get_userspace(uint64_t * + td = fd = te = fe = 0; + if (scope) { + +- retVal = +- novfs_daemon_get_userspace(scope->SessionId, &td, &fd, &te, &fe); ++ retVal = novfs_daemon_get_userspace(scope->SessionId, &td, &fd, &te, &fe); + + scope->ScopeUSize = td; + scope->ScopeUFree = fd; +@@ -330,7 +308,7 @@ int novfs_scope_get_userspace(uint64_t * + return (retVal); + } + +-struct novfs_scope_list *novfs_get_scope(struct dentry * Dentry) ++struct novfs_scope_list *novfs_get_scope(struct dentry *Dentry) + { + struct novfs_scope_list *scope = NULL; + char *buf, *path, *cp; +@@ -404,8 +382,7 @@ char *novfs_get_scopeusers(void) + while ((sl != &Scope_List) && (cp < ep)) { + scope = list_entry(sl, struct novfs_scope_list, ScopeList); + +- DbgPrint("Scope_Get_ScopeUsers found 0x%p %s\n", +- scope, scope->ScopeUserName); ++ DbgPrint("Scope_Get_ScopeUsers found 0x%p %s\n", scope, scope->ScopeUserName); + + cp = add_to_list(scope->ScopeUserName, cp, ep); + +@@ -486,8 +463,7 @@ static int Scope_Cleanup_Thread(void *Ar + + if (!rscope) { + list_move(&scope->ScopeList, &cleanup); +- DbgPrint("Scope_Cleanup_Thread: Scope=0x%p\n", +- rscope); ++ DbgPrint("Scope_Cleanup_Thread: Scope=0x%p\n", rscope); + } + } + +@@ -509,10 +485,7 @@ static int Scope_Cleanup_Thread(void *Ar + scope, + scope->ScopeId, + scope->SessionId, +- scope->ScopePid, +- scope->ScopeTask, +- scope->ScopeHash, +- scope->ScopeUid, scope->ScopeUserName); ++ scope->ScopePid, scope->ScopeTask, scope->ScopeHash, scope->ScopeUid, scope->ScopeUserName); + if (!Scope_Search4Scope(scope->SessionId, 1, 0)) { + novfs_remove_from_root(scope->ScopeUserName); + novfs_daemon_destroy_sessionId(scope->SessionId); +@@ -569,10 +542,7 @@ void novfs_scope_cleanup(void) + scope, + scope->ScopeId, + scope->SessionId, +- scope->ScopePid, +- scope->ScopeTask, +- scope->ScopeHash, +- scope->ScopeUid, scope->ScopeUserName); ++ scope->ScopePid, scope->ScopeTask, scope->ScopeHash, scope->ScopeUid, scope->ScopeUserName); + if (!Scope_Search4Scope(scope->SessionId, 1, 1)) { + novfs_remove_from_root(scope->ScopeUserName); + novfs_daemon_destroy_sessionId(scope->SessionId); +@@ -587,8 +557,7 @@ void novfs_scope_cleanup(void) + /* + * Walks the dentry chain building a path. + */ +-char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, +- int Flags) ++char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, int Flags) + { + char *retval = &Buf[Buflen]; + struct dentry *p = Dentry; +@@ -654,5 +623,3 @@ void novfs_scope_exit(void) + printk(KERN_INFO "Scope_Uninit: Exit\n"); + + } +- +- +--- a/fs/novfs/vfs.h ++++ b/fs/novfs/vfs.h +@@ -23,11 +23,10 @@ + + #include "nwcapi.h" + +- + #ifndef XTIER_SCHANDLE + struct novfs_schandle { +- void * hTypeId; +- void * hId; ++ void *hTypeId; ++ void *hId; + + }; + +@@ -46,7 +45,6 @@ struct novfs_schandle { + #define XTIER_SCHANDLE + #endif + +- + /*===[ Manifest constants ]===============================================*/ + #define NOVFS_MAGIC 0x4e574653 + #define MODULE_NAME "novfs" +@@ -191,15 +189,14 @@ struct novfs_data_list { + int rwflag; + }; + +- + extern char *ctime_r(time_t * clock, char *buf); + + /* + * Converts a HANDLE to a u32 type. + */ +-static inline u32 HandletoUint32(void * h) ++static inline u32 HandletoUint32(void *h) + { +- return (u32) ((unsigned long) h); ++ return (u32) ((unsigned long)h); + } + + /* +@@ -207,7 +204,7 @@ static inline u32 HandletoUint32(void * + */ + static inline void *Uint32toHandle(u32 ui32) + { +- return ((void *) (unsigned long) ui32); ++ return ((void *)(unsigned long)ui32); + } + + /* Global variables */ +@@ -219,7 +216,6 @@ extern int novfs_page_cache; + extern char *novfs_current_mnt; + extern int novfs_max_iosize; + +- + /* Global functions */ + extern int novfs_remove_from_root(char *); + extern void novfs_dump_inode(void *pf); +@@ -227,9 +223,9 @@ extern void novfs_dump_inode(void *pf); + extern void novfs_dump(int size, void *dumpptr); + + extern int Queue_Daemon_Command(void *request, unsigned long reqlen, void *data, +- int dlen, void **reply, unsigned long * replen, +- int interruptible); +-extern int novfs_do_login(struct ncl_string * Server, struct ncl_string* Username, struct ncl_string * Password, void **lgnId, struct novfs_schandle *Session); ++ int dlen, void **reply, unsigned long *replen, int interruptible); ++extern int novfs_do_login(struct ncl_string *Server, struct ncl_string *Username, struct ncl_string *Password, void **lgnId, ++ struct novfs_schandle *Session); + + extern int novfs_proc_init(void); + extern void novfs_proc_exit(void); +@@ -241,111 +237,71 @@ extern void novfs_daemon_queue_init(void + extern void novfs_daemon_queue_exit(void); + extern int novfs_daemon_logout(struct qstr *Server, struct novfs_schandle *Session); + extern int novfs_daemon_set_mnt_point(char *Path); +-extern int novfs_daemon_create_sessionId(struct novfs_schandle * SessionId); ++extern int novfs_daemon_create_sessionId(struct novfs_schandle *SessionId); + extern int novfs_daemon_destroy_sessionId(struct novfs_schandle SessionId); + extern int novfs_daemon_getpwuid(uid_t uid, int unamelen, char *uname); + extern int novfs_daemon_get_userspace(struct novfs_schandle SessionId, +- uint64_t * TotalSize, uint64_t * TotalFree, +- uint64_t * TotalDirectoryEnties, +- uint64_t * FreeDirectoryEnties); ++ uint64_t * TotalSize, uint64_t * TotalFree, ++ uint64_t * TotalDirectoryEnties, uint64_t * FreeDirectoryEnties); + extern int novfs_daemon_debug_cmd_send(char *Command); +-extern ssize_t novfs_daemon_recv_reply(struct file *file, +- const char *buf, size_t nbytes, loff_t * ppos); +-extern ssize_t novfs_daemon_cmd_send(struct file *file, char *buf, +- size_t len, loff_t * off); +-extern int novfs_daemon_ioctl(struct inode *inode, struct file *file, +- unsigned int cmd, unsigned long arg); ++extern ssize_t novfs_daemon_recv_reply(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); ++extern ssize_t novfs_daemon_cmd_send(struct file *file, char *buf, size_t len, loff_t * off); ++extern int novfs_daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); + extern int novfs_daemon_lib_close(struct inode *inode, struct file *file); +-extern int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, +- unsigned int cmd, unsigned long arg); ++extern int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); + extern int novfs_daemon_lib_open(struct inode *inode, struct file *file); +-extern ssize_t novfs_daemon_lib_read(struct file *file, char *buf, +- size_t len, loff_t * off); +-extern ssize_t novfs_daemon_lib_write(struct file *file, const char *buf, +- size_t len, loff_t * off); +-extern loff_t novfs_daemon_lib_llseek(struct file *file, loff_t offset, +- int origin); ++extern ssize_t novfs_daemon_lib_read(struct file *file, char *buf, size_t len, loff_t * off); ++extern ssize_t novfs_daemon_lib_write(struct file *file, const char *buf, size_t len, loff_t * off); ++extern loff_t novfs_daemon_lib_llseek(struct file *file, loff_t offset, int origin); + extern int novfs_daemon_open_control(struct inode *Inode, struct file *File); + extern int novfs_daemon_close_control(struct inode *Inode, struct file *File); + extern int novfs_daemon_getversion(char *Buf, int Length); + +- + /* + * file.c functions + */ + extern int novfs_verify_file(struct qstr *Path, struct novfs_schandle SessionId); + extern int novfs_get_alltrees(struct dentry *parent); +-extern int novfs_get_servers(unsigned char **ServerList, +- struct novfs_schandle SessionId); +-extern int novfs_get_vols(struct qstr *Server, +- unsigned char **VolumeList, struct novfs_schandle SessionId); +-extern int novfs_get_file_info(unsigned char *Path, +- struct novfs_entry_info *Info, struct novfs_schandle SessionId); ++extern int novfs_get_servers(unsigned char **ServerList, struct novfs_schandle SessionId); ++extern int novfs_get_vols(struct qstr *Server, unsigned char **VolumeList, struct novfs_schandle SessionId); ++extern int novfs_get_file_info(unsigned char *Path, struct novfs_entry_info *Info, struct novfs_schandle SessionId); + extern int novfs_getx_file_info(char *Path, const char *Name, +- char *buffer, ssize_t buffer_size, ssize_t *dataLen, +- struct novfs_schandle SessionId); +-extern int novfs_listx_file_info(char *Path, char *buffer, +- ssize_t buffer_size, ssize_t *dataLen, +- struct novfs_schandle SessionId); ++ char *buffer, ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId); ++extern int novfs_listx_file_info(char *Path, char *buffer, ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId); + extern int novfs_setx_file_info(char *Path, const char *Name, const void *Value, +- unsigned long valueLen, +- unsigned long *bytesWritten, int flags, +- struct novfs_schandle SessionId); ++ unsigned long valueLen, unsigned long *bytesWritten, int flags, struct novfs_schandle SessionId); + + extern int novfs_get_dir_listex(unsigned char *Path, void **EnumHandle, +- int *Count, struct novfs_entry_info **Info, +- struct novfs_schandle SessionId); ++ int *Count, struct novfs_entry_info **Info, struct novfs_schandle SessionId); + extern int novfs_open_file(unsigned char *Path, int Flags, +- struct novfs_entry_info * Info, void **Handle, +- struct novfs_schandle SessionId); +-extern int novfs_create(unsigned char *Path, int DirectoryFlag, +- struct novfs_schandle SessionId); +-extern int novfs_close_file(void * Handle, struct novfs_schandle SessionId); +-extern int novfs_read_file(void * Handle, unsigned char *Buffer, +- size_t * Bytes, loff_t * Offset, +- struct novfs_schandle SessionId); +-extern int novfs_read_pages(void * Handle, struct novfs_data_list *DList, +- int DList_Cnt, size_t * Bytes, loff_t * Offset, +- struct novfs_schandle SessionId); +-extern int novfs_write_file(void * Handle, unsigned char *Buffer, +- size_t * Bytes, loff_t * Offset, +- struct novfs_schandle SessionId); +-extern int novfs_write_page(void * Handle, struct page *Page, +- struct novfs_schandle SessionId); +-extern int novfs_write_pages(void * Handle, struct novfs_data_list *DList, +- int DList_Cnt, size_t Bytes, loff_t Offset, +- struct novfs_schandle SessionId); +-extern int novfs_delete(unsigned char *Path, int DirectoryFlag, +- struct novfs_schandle SessionId); +-extern int novfs_trunc(unsigned char *Path, int PathLen, +- struct novfs_schandle SessionId); +-extern int novfs_trunc_ex(void * Handle, loff_t Offset, +- struct novfs_schandle SessionId); ++ struct novfs_entry_info *Info, void **Handle, struct novfs_schandle SessionId); ++extern int novfs_create(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId); ++extern int novfs_close_file(void *Handle, struct novfs_schandle SessionId); ++extern int novfs_read_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); ++extern int novfs_read_pages(void *Handle, struct novfs_data_list *DList, ++ int DList_Cnt, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); ++extern int novfs_write_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); ++extern int novfs_write_page(void *Handle, struct page *Page, struct novfs_schandle SessionId); ++extern int novfs_write_pages(void *Handle, struct novfs_data_list *DList, ++ int DList_Cnt, size_t Bytes, loff_t Offset, struct novfs_schandle SessionId); ++extern int novfs_delete(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId); ++extern int novfs_trunc(unsigned char *Path, int PathLen, struct novfs_schandle SessionId); ++extern int novfs_trunc_ex(void *Handle, loff_t Offset, struct novfs_schandle SessionId); + extern int novfs_rename_file(int DirectoryFlag, unsigned char *OldName, +- int OldLen, unsigned char *NewName, int NewLen, +- struct novfs_schandle SessionId); +-extern int novfs_set_attr(unsigned char *Path, struct iattr *Attr, +- struct novfs_schandle SessionId); +-extern int novfs_get_file_cache_flag(unsigned char * Path, +- struct novfs_schandle SessionId); +-extern int novfs_set_file_lock(struct novfs_schandle SessionId, void * fhandle, +- unsigned char fl_type, loff_t fl_start, +- loff_t len); +- +-extern struct inode *novfs_get_inode(struct super_block *sb, int mode, +- int dev, uid_t uid, ino_t ino, struct qstr *name); +-extern int novfs_read_stream(void * ConnHandle, unsigned char * Handle, +- unsigned char * Buffer, size_t * Bytes, loff_t * Offset, +- int User, struct novfs_schandle SessionId); +-extern int novfs_write_stream(void * ConnHandle, unsigned char * Handle, +- unsigned char * Buffer, size_t * Bytes, loff_t * Offset, +- struct novfs_schandle SessionId); +-extern int novfs_close_stream(void * ConnHandle, unsigned char * Handle, +- struct novfs_schandle SessionId); ++ int OldLen, unsigned char *NewName, int NewLen, struct novfs_schandle SessionId); ++extern int novfs_set_attr(unsigned char *Path, struct iattr *Attr, struct novfs_schandle SessionId); ++extern int novfs_get_file_cache_flag(unsigned char *Path, struct novfs_schandle SessionId); ++extern int novfs_set_file_lock(struct novfs_schandle SessionId, void *fhandle, unsigned char fl_type, loff_t fl_start, loff_t len); ++ ++extern struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, uid_t uid, ino_t ino, struct qstr *name); ++extern int novfs_read_stream(void *ConnHandle, unsigned char *Handle, ++ unsigned char *Buffer, size_t * Bytes, loff_t * Offset, int User, struct novfs_schandle SessionId); ++extern int novfs_write_stream(void *ConnHandle, unsigned char *Handle, ++ unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); ++extern int novfs_close_stream(void *ConnHandle, unsigned char *Handle, struct novfs_schandle SessionId); + + extern int novfs_add_to_root(char *); +-extern int novfs_end_directory_enumerate(void *EnumHandle, +- struct novfs_schandle SessionId); ++extern int novfs_end_directory_enumerate(void *EnumHandle, struct novfs_schandle SessionId); + + /* + * scope.c functions +@@ -355,14 +311,11 @@ extern void novfs_scope_exit(void); + extern void *novfs_scope_lookup(void); + extern uid_t novfs_scope_get_uid(struct novfs_scope_list *); + extern struct novfs_schandle novfs_scope_get_sessionId(struct +- novfs_scope_list *); ++ novfs_scope_list *); + extern char *novfs_get_scopeusers(void); +-extern int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, +- uint64_t * TotalEnties, uint64_t * FreeEnties); +-extern int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, +- uint64_t * TotalEnties, uint64_t * FreeEnties); +-extern char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, +- unsigned int Buflen, int Flags); ++extern int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties); ++extern int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties); ++extern char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, int Flags); + extern void novfs_scope_cleanup(void); + extern struct novfs_scope_list *novfs_get_scope_from_name(struct qstr *); + extern struct novfs_scope_list *novfs_get_scope(struct dentry *); +@@ -382,73 +335,38 @@ extern void novfs_profile_exit(void); + /* + * nwcapi.c functions + */ +-extern int novfs_auth_conn(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_conn_close(struct novfs_xplat *pdata, +- void **Handle, struct novfs_schandle Session); +-extern int novfs_get_conn_info(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_set_conn_info(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_get_daemon_ver(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_get_id_info(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_license_conn(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_login_id(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_logout_id(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_open_conn_by_addr(struct novfs_xplat *pdata, +- void **Handle, struct novfs_schandle Session); +-extern int novfs_open_conn_by_name(struct novfs_xplat *pdata, +- void **Handle, struct novfs_schandle Session); +-extern int novfs_open_conn_by_ref(struct novfs_xplat *pdata, +- void **Handle, struct novfs_schandle Session); +-extern int novfs_query_feature(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_raw_send(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_scan_conn_info(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_sys_conn_close(struct novfs_xplat *pdata, +- unsigned long *Handle, struct novfs_schandle Session); +-extern int novfs_unauthenticate(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_unlicense_conn(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_change_auth_key(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_enum_ids(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_get_default_ctx(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_set_default_ctx(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_set_pri_conn(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_get_pri_conn(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_set_map_drive(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_unmap_drive(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_enum_drives(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_get_bcast_msg(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_set_key_value(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +-extern int novfs_verify_key_value(struct novfs_xplat *pdata, +- struct novfs_schandle Session); +- +- +-#endif /* __NOVFS_H */ ++extern int novfs_auth_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_conn_close(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); ++extern int novfs_get_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_set_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_get_daemon_ver(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_get_id_info(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_license_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_login_id(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_logout_id(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); ++extern int novfs_open_conn_by_name(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); ++extern int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); ++extern int novfs_query_feature(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_raw_send(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_scan_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_sys_conn_close(struct novfs_xplat *pdata, unsigned long *Handle, struct novfs_schandle Session); ++extern int novfs_unauthenticate(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_unlicense_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_change_auth_key(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_enum_ids(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_get_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_set_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_set_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_get_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_enum_drives(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_get_bcast_msg(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_set_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session); ++extern int novfs_verify_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session); + ++#endif /* __NOVFS_H */ diff --git a/patches.fixes/novfs-overflow-fixes b/patches.fixes/novfs-overflow-fixes new file mode 100644 index 0000000..ff696c1 --- /dev/null +++ b/patches.fixes/novfs-overflow-fixes @@ -0,0 +1,1720 @@ +From: Sankar P +Subject: novfs: security: Add buffer overflow, integer wraparound fixes +Patch-mainline: no +References: bnc#594362 + +Security fixes that help in addressing buffer overflows, limiting +the amount of data that can be copied from user-space, etc. + +Signed-off-by: Sankar P +Signed-off-by: Marcus Meissner +Signed-off-by: Sebastian Krahmer +--- + fs/novfs/daemon.c | 36 +- + fs/novfs/file.c | 4 + fs/novfs/nwcapi.c | 787 +++++++++++++++++++++++++++--------------------------- + fs/novfs/nwcapi.h | 12 + 4 files changed, 439 insertions(+), 400 deletions(-) + +--- a/fs/novfs/daemon.c ++++ b/fs/novfs/daemon.c +@@ -811,6 +811,9 @@ static int daemon_login(struct novfs_log + struct ncl_string password; + + if (!copy_from_user(&lLogin, Login, sizeof(lLogin))) { ++ if (lLogin.Server.length > MAX_SERVER_NAME_LENGTH || lLogin.UserName.length > MAX_NAME_LEN || ++ lLogin.Password.length > MAX_PASSWORD_LENGTH) ++ return -EINVAL; + server.buffer = kmalloc(lLogin.Server.length, GFP_KERNEL); + if (server.buffer) { + server.len = lLogin.Server.length; +@@ -857,6 +860,8 @@ static int daemon_logout(struct novfs_lo + + if (copy_from_user(&lLogout, Logout, sizeof(lLogout))) + return -EFAULT; ++ if (lLogout.Server.length > MAX_SERVER_NAME_LENGTH) ++ return -EINVAL; + server.name = kmalloc(lLogout.Server.length, GFP_KERNEL); + if (!server.name) + return -ENOMEM; +@@ -1102,6 +1107,8 @@ int novfs_daemon_ioctl(struct inode *ino + char *buf; + io.length = 0; + cpylen = copy_from_user(&io, (char *)arg, sizeof(io)); ++ if (io.length <= 0 || io.length > 1024) ++ return -EINVAL; + if (io.length) { + buf = kmalloc(io.length + 1, GFP_KERNEL); + if (buf) { +@@ -1453,6 +1460,8 @@ int novfs_daemon_lib_ioctl(struct inode + cpylen = + copy_from_user(&io, (void *)arg, + sizeof(io)); ++ if (io.length <= 0 || io.length > 1024) ++ return -EINVAL; + if (io.length) { + buf = + kmalloc(io.length + 1, +@@ -1478,9 +1487,7 @@ int novfs_daemon_lib_ioctl(struct inode + cpylen = + copy_from_user(&data, (void *)arg, + sizeof(data)); +- retCode = +- ((data. +- xfunction & 0x0000FFFF) | 0xCC000000); ++ retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000); + + switch (data.xfunction) { + case NWC_OPEN_CONN_BY_NAME: +@@ -1815,8 +1822,7 @@ static int NwdConvertLocalHandle(struct + //sgled memcpy(lh.NwWareHandle, resource->handle, sizeof(resource->handle)); + memcpy(lh.NetWareHandle, resource->handle, sizeof(resource->handle)); //sgled + if (pdata->repLen >= sizeof(struct nwc_convert_local_handle)) { +- cpylen = +- copy_to_user(pdata->repData, &lh, ++ cpylen = copy_to_user(pdata->repData, &lh, + sizeof(struct nwc_convert_local_handle)); + retVal = 0; + } else { +@@ -1838,6 +1844,8 @@ static int NwdGetMountPath(struct novfs_ + unsigned long cpylen; + struct nwc_get_mount_path mp; + ++ if (pdata->reqLen != sizeof(mp)) ++ return -EINVAL; + cpylen = copy_from_user(&mp, pdata->reqData, pdata->reqLen); + + if (novfs_current_mnt) { +@@ -1878,21 +1886,19 @@ static int set_map_drive(struct novfs_xp + return retVal; + if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) + return -EFAULT; +- drivemap = +- kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, ++ if (symInfo.linkOffsetLength > MAX_NAME_LEN) ++ return -EINVAL; ++ drivemap = kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, + GFP_KERNEL); + if (!drivemap) + return -ENOMEM; + + path = (char *)pdata->reqData; + path += symInfo.linkOffset; +- cpylen = +- copy_from_user(drivemap->name, path, +- symInfo.linkOffsetLength); ++ cpylen = copy_from_user(drivemap->name, path, symInfo.linkOffsetLength); + + drivemap->session = Session; +- drivemap->hash = +- full_name_hash(drivemap->name, ++ drivemap->hash = full_name_hash(drivemap->name, + symInfo.linkOffsetLength - 1); + drivemap->namelen = symInfo.linkOffsetLength - 1; + DbgPrint("hash=0x%lx path=%s", drivemap->hash, drivemap->name); +@@ -1910,8 +1916,7 @@ static int set_map_drive(struct novfs_xp + dm, dm->hash, dm->namelen, dm->name); + + if (drivemap->hash == dm->hash) { +- if (0 == +- strcmp(dm->name, drivemap->name)) { ++ if (0 == strcmp(dm->name, drivemap->name)) { + dm = NULL; + break; + } +@@ -1950,7 +1955,8 @@ static int unmap_drive(struct novfs_xpla + return retVal; + if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) + return -EFAULT; +- ++ if (symInfo.linkLen > MAX_NAME_LEN || symInfo.linkLen == 0) ++ return -EINVAL; + path = kmalloc(symInfo.linkLen, GFP_KERNEL); + if (!path) + return -ENOMEM; +--- a/fs/novfs/file.c ++++ b/fs/novfs/file.c +@@ -1077,7 +1077,7 @@ int novfs_write_file(void *Handle, unsig + + DbgPrint("cmdlen=%ld len=%ld", cmdlen, len); + +- if ((cmdlen + len) > novfs_max_iosize) { ++ if (len > novfs_max_iosize - cmdlen) { + len = novfs_max_iosize - cmdlen; + len = (len / PAGE_SIZE) * PAGE_SIZE; + } +@@ -1449,6 +1449,8 @@ int novfs_write_stream(void *ConnHandle, + size_t len; + + len = *Bytes; ++ if (len > novfs_max_iosize) ++ len = novfs_max_iosize; + cmdlen = len + offsetof(struct novfs_write_stream_request, data); + *Bytes = 0; + +--- a/fs/novfs/nwcapi.c ++++ b/fs/novfs/nwcapi.c +@@ -37,16 +37,20 @@ static void GetConnData(struct nwc_get_c + /*++======================================================================*/ + int novfs_open_conn_by_name(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwd_open_conn_by_name *openConn, *connReply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwd_open_conn_by_name *openConn = NULL, *connReply = NULL; + struct nwc_open_conn_by_name ocbn; + int retCode = 0; +- unsigned long cmdlen, datalen, replylen, cpylen; +- char *data; ++ unsigned long cmdlen, datalen, replylen, cpylen, pnamelen, stypelen; ++ char *data = NULL; + + cpylen = copy_from_user(&ocbn, pdata->reqData, sizeof(ocbn)); +- datalen = sizeof(*openConn) + strlen_user(ocbn.pName->pString) + strlen_user(ocbn.pServiceType); ++ pnamelen = strlen_user(ocbn.pName->pString); ++ stypelen = strlen_user(ocbn.pServiceType); ++ if (pnamelen > MAX_NAME_LEN || stypelen > NW_MAX_SERVICE_TYPE_LEN) ++ return -EINVAL; ++ datalen = sizeof(*openConn) + pnamelen + stypelen; + cmdlen = datalen + sizeof(*cmd); + cmd = kmalloc(cmdlen, GFP_KERNEL); + +@@ -61,8 +65,8 @@ int novfs_open_conn_by_name(struct novfs + cmd->dataLen = datalen; + openConn = (struct nwd_open_conn_by_name *) cmd->data; + +- openConn->nameLen = strlen_user(ocbn.pName->pString); +- openConn->serviceLen = strlen_user(ocbn.pServiceType); ++ openConn->nameLen = pnamelen; ++ openConn->serviceLen = stypelen; + openConn->uConnFlags = ocbn.uConnFlags; + openConn->ConnHandle = Uint32toHandle(ocbn.ConnHandle); + data = (char *)openConn; +@@ -70,13 +74,9 @@ int novfs_open_conn_by_name(struct novfs + openConn->oName = sizeof(*openConn); + + openConn->oServiceType = openConn->oName + openConn->nameLen; +- cpylen = +- copy_from_user(data, ocbn.pName->pString, +- openConn->nameLen); ++ cpylen = copy_from_user(data, ocbn.pName->pString, openConn->nameLen); + data += openConn->nameLen; +- cpylen = +- copy_from_user(data, ocbn.pServiceType, +- openConn->serviceLen); ++ cpylen = copy_from_user(data, ocbn.pServiceType, openConn->serviceLen); + + retCode = + Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, +@@ -109,9 +109,9 @@ int novfs_open_conn_by_name(struct novfs + + int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwd_open_conn_by_addr *openConn, *connReply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwd_open_conn_by_addr *openConn = NULL, *connReply = NULL; + struct nwc_open_conn_by_addr ocba; + struct nwc_tran_addr tranAddr; + int retCode = 0; +@@ -133,8 +133,11 @@ int novfs_open_conn_by_addr(struct novfs + cmd->dataLen = datalen; + openConn = (struct nwd_open_conn_by_addr *) cmd->data; + +- cpylen = +- copy_from_user(&tranAddr, ocba.pTranAddr, sizeof(tranAddr)); ++ cpylen = copy_from_user(&tranAddr, ocba.pTranAddr, sizeof(tranAddr)); ++ if (tranAddr.uAddressLength > sizeof(addr)) { ++ retCode = -EINVAL; ++ goto out; ++ } + + DbgPrint("tranAddr"); + novfs_dump(sizeof(tranAddr), &tranAddr); +@@ -143,17 +146,14 @@ int novfs_open_conn_by_addr(struct novfs + openConn->TranAddr.uAddressLength = tranAddr.uAddressLength; + memset(addr, 0xcc, sizeof(addr) - 1); + +- cpylen = +- copy_from_user(addr, tranAddr.puAddress, +- tranAddr.uAddressLength); ++ cpylen = copy_from_user(addr, tranAddr.puAddress, tranAddr.uAddressLength); + + DbgPrint("addr"); + novfs_dump(sizeof(addr), addr); + + openConn->TranAddr.oAddress = *(unsigned int *) (&addr[2]); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, + (void **)&reply, &replylen, + INTERRUPTIBLE); + if (reply) { +@@ -178,17 +178,17 @@ int novfs_open_conn_by_addr(struct novfs + kfree(reply); + } + ++out: + kfree(cmd); +- + return (retCode); + + } + + int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwd_open_conn_by_ref *openConn; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwd_open_conn_by_ref *openConn = NULL; + struct nwc_open_conn_by_ref ocbr; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; +@@ -207,8 +207,7 @@ int novfs_open_conn_by_ref(struct novfs_ + cmd->dataLen = datalen; + openConn = (struct nwd_open_conn_by_ref *) cmd->data; + +- openConn->uConnReference = +- (void *) (unsigned long) ocbr.uConnReference; ++ openConn->uConnReference = (void *) (unsigned long) ocbr.uConnReference; + openConn->uConnFlags = ocbr.uConnFlags; + + retCode = +@@ -225,13 +224,10 @@ int novfs_open_conn_by_ref(struct novfs_ + /* + * we got valid data. + */ +- ocbr.ConnHandle = +- HandletoUint32(openConn->ConnHandle); ++ ocbr.ConnHandle = HandletoUint32(openConn->ConnHandle); + *Handle = openConn->ConnHandle; + +- cpylen = +- copy_to_user(pdata->reqData, &ocbr, +- sizeof(ocbr)); ++ cpylen = copy_to_user(pdata->reqData, &ocbr, sizeof(ocbr)); + DbgPrint("New Conn Handle = %X", openConn->ConnHandle); + } + kfree(reply); +@@ -245,59 +241,63 @@ int novfs_open_conn_by_ref(struct novfs_ + int novfs_raw_send(struct novfs_xplat *pdata, struct novfs_schandle Session) + { + struct nwc_request xRequest; +- struct nwc_frag *frag, *cFrag, *reqFrag; +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- int retCode = -ENOMEM; ++ struct nwc_frag *frag = NULL, *cFrag = NULL, *reqFrag = NULL; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ int retCode = 0; + unsigned long cmdlen, datalen, replylen, cpylen, totalLen; + unsigned int x; +- struct nwd_ncp_req *ncpData; +- struct nwd_ncp_rep *ncpReply; +- unsigned char *reqData; ++ struct nwd_ncp_req *ncpData = NULL; ++ struct nwd_ncp_rep *ncpReply = NULL; ++ unsigned char *reqData = NULL; + unsigned long actualReplyLength = 0; + + DbgPrint("[XPLAT] Process Raw NCP Send"); + cpylen = copy_from_user(&xRequest, pdata->reqData, sizeof(xRequest)); + ++ if (xRequest.uNumReplyFrags > MAX_NUM_REPLIES || xRequest.uNumReplyFrags < MIN_NUM_REPLIES || ++ xRequest.uNumRequestFrags > MAX_NUM_REQUESTS || xRequest.uNumRequestFrags < MIN_NUM_REQUESTS) ++ return -EINVAL; ++ + /* + * Figure out the length of the request + */ +- frag = +- kmalloc(xRequest.uNumReplyFrags * sizeof(struct nwc_frag), GFP_KERNEL); ++ frag = kmalloc(xRequest.uNumReplyFrags * sizeof(struct nwc_frag), GFP_KERNEL); + +- DbgPrint("[XPLAT RawNCP] - Reply Frag Count 0x%X", +- xRequest.uNumReplyFrags); ++ DbgPrint("[XPLAT RawNCP] - Reply Frag Count 0x%X", xRequest.uNumReplyFrags); + + if (!frag) +- return (retCode); ++ return -ENOMEM; + +- cpylen = +- copy_from_user(frag, xRequest.pReplyFrags, +- xRequest.uNumReplyFrags * sizeof(struct nwc_frag)); ++ cpylen = copy_from_user(frag, xRequest.pReplyFrags, xRequest.uNumReplyFrags * sizeof(struct nwc_frag)); + totalLen = 0; + + cFrag = frag; + for (x = 0; x < xRequest.uNumReplyFrags; x++) { + DbgPrint("[XPLAT - RawNCP] - Frag Len = %d", cFrag->uLength); ++ if (cFrag->uLength > MAX_FRAG_SIZE || cFrag->uLength < MIN_FRAG_SIZE) { ++ retCode = -EINVAL; ++ goto out; ++ } + totalLen += cFrag->uLength; + cFrag++; + } + + DbgPrint("[XPLAT - RawNCP] - totalLen = %d", totalLen); + datalen = 0; +- reqFrag = +- kmalloc(xRequest.uNumRequestFrags * sizeof(struct nwc_frag), +- GFP_KERNEL); ++ reqFrag = kmalloc(xRequest.uNumRequestFrags * sizeof(struct nwc_frag), GFP_KERNEL); + if (!reqFrag) { +- kfree(frag); +- return (retCode); ++ retCode = -ENOMEM; ++ goto out; + } + +- cpylen = +- copy_from_user(reqFrag, xRequest.pRequestFrags, +- xRequest.uNumRequestFrags * sizeof(struct nwc_frag)); ++ cpylen = copy_from_user(reqFrag, xRequest.pRequestFrags, xRequest.uNumRequestFrags * sizeof(struct nwc_frag)); + cFrag = reqFrag; + for (x = 0; x < xRequest.uNumRequestFrags; x++) { ++ if (cFrag->uLength > MAX_FRAG_SIZE || cFrag->uLength < MIN_FRAG_SIZE) { ++ retCode = -EINVAL; ++ goto out; ++ } + datalen += cFrag->uLength; + cFrag++; + } +@@ -311,8 +311,10 @@ int novfs_raw_send(struct novfs_xplat *p + DbgPrint("[XPLAT RawNCP] - Total Command Data Len = %x", cmdlen); + + cmd = kmalloc(cmdlen, GFP_KERNEL); +- if (!cmd) +- return -ENOMEM; ++ if (!cmd) { ++ retCode = -ENOMEM; ++ goto out; ++ } + + cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; +@@ -333,9 +335,7 @@ int novfs_raw_send(struct novfs_xplat *p + cFrag = reqFrag; + + for (x = 0; x < xRequest.uNumRequestFrags; x++) { +- cpylen = +- copy_from_user(reqData, cFrag->pData, +- cFrag->uLength); ++ cpylen = copy_from_user(reqData, cFrag->pData, cFrag->uLength); + reqData += cFrag->uLength; + cFrag++; + } +@@ -369,12 +369,9 @@ int novfs_raw_send(struct novfs_xplat *p + DbgPrint("RawNCP - Copy Frag %d: 0x%X", x, + cFrag->uLength); + +- datalen = +- min((unsigned long) cFrag->uLength, totalLen); ++ datalen = min((unsigned long) cFrag->uLength, totalLen); + +- cpylen = +- copy_to_user(cFrag->pData, reqData, +- datalen); ++ cpylen = copy_to_user(cFrag->pData, reqData, datalen); + totalLen -= datalen; + reqData += datalen; + actualReplyLength += datalen; +@@ -387,10 +384,12 @@ int novfs_raw_send(struct novfs_xplat *p + retCode = -EIO; + } + +- kfree(cmd); ++ + xRequest.uActualReplyLength = actualReplyLength; + cpylen = copy_to_user(pdata->reqData, &xRequest, sizeof(xRequest)); + ++out: ++ kfree(cmd); + kfree(reqFrag); + kfree(frag); + +@@ -399,10 +398,10 @@ int novfs_raw_send(struct novfs_xplat *p + + int novfs_conn_close(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_close_conn cc; +- struct nwd_close_conn *nwdClose; ++ struct nwd_close_conn *nwdClose = NULL; + int retCode = 0; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -439,10 +438,10 @@ int novfs_conn_close(struct novfs_xplat + + int novfs_sys_conn_close(struct novfs_xplat *pdata, unsigned long *Handle, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_close_conn cc; +- struct nwd_close_conn *nwdClose; ++ struct nwd_close_conn *nwdClose = NULL; + unsigned int retCode = 0; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -479,7 +478,7 @@ int novfs_sys_conn_close(struct novfs_xp + + int novfs_login_id(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct nwc_login_id lgn, *plgn; ++ struct nwc_login_id lgn, *plgn = NULL; + int retCode = -ENOMEM; + struct ncl_string server; + struct ncl_string username; +@@ -487,6 +486,11 @@ int novfs_login_id(struct novfs_xplat *p + unsigned long cpylen; + struct nwc_string nwcStr; + ++ ++ memset(&server, 0, sizeof(server)); ++ memset(&username, 0, sizeof(username)); ++ memset(&password, 0, sizeof(password)); ++ + cpylen = copy_from_user(&lgn, pdata->reqData, sizeof(lgn)); + + DbgPrint(""); +@@ -496,6 +500,9 @@ int novfs_login_id(struct novfs_xplat *p + DbgPrint("DomainName\n"); + novfs_dump(sizeof(nwcStr), &nwcStr); + ++ if (nwcStr.DataLen > MAX_NAME_LEN) ++ return -EINVAL; ++ + if ((server.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { + server.type = nwcStr.DataType; + server.len = nwcStr.DataLen; +@@ -505,8 +512,11 @@ int novfs_login_id(struct novfs_xplat *p + + cpylen = copy_from_user(&nwcStr, lgn.pObjectName, sizeof(nwcStr)); + DbgPrint("ObjectName"); ++ if (nwcStr.DataLen > MAX_OBJECT_NAME_LENGTH) { ++ retCode = -EINVAL; ++ goto out; ++ } + novfs_dump(sizeof(nwcStr), &nwcStr); +- + if ((username.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { + username.type = nwcStr.DataType; + username.len = nwcStr.DataLen; +@@ -516,6 +526,10 @@ int novfs_login_id(struct novfs_xplat *p + + cpylen = copy_from_user(&nwcStr, lgn.pPassword, sizeof(nwcStr)); + DbgPrint("Password"); ++ if (nwcStr.DataLen > MAX_PASSWORD_LENGTH) { ++ retCode = -EINVAL; ++ goto out; ++ } + novfs_dump(sizeof(nwcStr), &nwcStr); + + if ((password.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { +@@ -531,24 +545,26 @@ int novfs_login_id(struct novfs_xplat *p + cpylen = copy_to_user(&plgn->AuthenticationId, &lgn.AuthenticationId, sizeof(plgn->AuthenticationId)); + } + memset(password.buffer, 0, password.len); +- kfree(password.buffer); ++ + } + } + memset(username.buffer, 0, username.len); +- kfree(username.buffer); + } + } +- kfree(server.buffer); + } ++out: ++ kfree(password.buffer); ++ kfree(username.buffer); ++ kfree(server.buffer); + return (retCode); + } + + int novfs_auth_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) + { + struct nwc_auth_with_id pauth; +- struct nwc_auth_wid *pDauth; +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct nwc_auth_wid *pDauth = NULL; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -583,10 +599,10 @@ int novfs_auth_conn(struct novfs_xplat * + + int novfs_license_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_license_conn lisc; +- struct nwc_lisc_id * pDLisc; ++ struct nwc_lisc_id * pDLisc = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -621,9 +637,9 @@ int novfs_license_conn(struct novfs_xpla + + int novfs_logout_id(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwc_lo_id logout, *pDLogout; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwc_lo_id logout, *pDLogout = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -659,9 +675,9 @@ int novfs_logout_id(struct novfs_xplat * + + int novfs_unlicense_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwc_unlic_conn *pUconn, ulc; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwc_unlic_conn *pUconn = NULL, ulc; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -697,9 +713,9 @@ int novfs_unlicense_conn(struct novfs_xp + + int novfs_unauthenticate(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwc_unauthenticate auth, *pDAuth; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwc_unauthenticate auth, *pDAuth = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -736,10 +752,10 @@ int novfs_unauthenticate(struct novfs_xp + + int novfs_get_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_get_conn_info connInfo; +- struct nwd_conn_info *pDConnInfo; ++ struct nwd_conn_info *pDConnInfo = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, replylen, cpylen; + +@@ -751,6 +767,11 @@ int novfs_get_conn_info(struct novfs_xpl + if (!cmd) + return -ENOMEM; + ++ if (connInfo.uInfoLength > MAX_INFO_LEN) { ++ retCode = -EINVAL; ++ goto out; ++ } ++ + cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; + cmd->Command.SessionId = Session; +@@ -775,6 +796,7 @@ int novfs_get_conn_info(struct novfs_xpl + + kfree(reply); + } ++out: + kfree(cmd); + return (retCode); + +@@ -782,20 +804,23 @@ int novfs_get_conn_info(struct novfs_xpl + + int novfs_set_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_set_conn_info connInfo; +- struct nwd_set_conn_info *pDConnInfo; ++ struct nwd_set_conn_info *pDConnInfo = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, replylen, cpylen; + + cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo); + cmd = kmalloc(cmdlen, GFP_KERNEL); +- cpylen = +- copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_set_conn_info)); ++ cpylen = copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_set_conn_info)); + + if (!cmd) + return -ENOMEM; ++ if (connInfo.uInfoLength > MAX_INFO_LEN) { ++ retCode = -EINVAL; ++ goto out; ++ } + + cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; + cmd->Command.SequenceNumber = 0; +@@ -817,6 +842,8 @@ int novfs_set_conn_info(struct novfs_xpl + retCode = reply->Reply.ErrorCode; + kfree(reply); + } ++ ++out: + kfree(cmd); + return (retCode); + +@@ -824,12 +851,12 @@ int novfs_set_conn_info(struct novfs_xpl + + int novfs_get_id_info(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwc_get_id_info qidInfo, *gId; +- struct nwd_get_id_info *idInfo; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwc_get_id_info qidInfo, *gId = NULL; ++ struct nwd_get_id_info *idInfo = NULL; + struct nwc_string xferStr; +- char *str; ++ char *str = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, replylen, cpylen; + +@@ -846,12 +873,10 @@ int novfs_get_id_info(struct novfs_xplat + cmd->NwcCommand = NWC_GET_IDENTITY_INFO; + + idInfo = (struct nwd_get_id_info *) cmd->data; +- + idInfo->AuthenticationId = qidInfo.AuthenticationId; + cmd->dataLen = sizeof(*idInfo); + +- retCode = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, ++ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, + (void **)&reply, &replylen, + INTERRUPTIBLE); + if (reply) { +@@ -863,77 +888,77 @@ int novfs_get_id_info(struct novfs_xplat + */ + gId = pdata->reqData; + idInfo = (struct nwd_get_id_info *) reply->data; +- cpylen = +- copy_to_user(&gId->AuthenticationId, ++ cpylen = copy_to_user(&gId->AuthenticationId, + &idInfo->AuthenticationId, + sizeof(idInfo-> + AuthenticationId)); +- cpylen = +- copy_to_user(&gId->AuthType, ++ cpylen = copy_to_user(&gId->AuthType, + &idInfo->AuthType, + sizeof(idInfo->AuthType)); +- cpylen = +- copy_to_user(&gId->IdentityFlags, ++ cpylen = copy_to_user(&gId->IdentityFlags, + &idInfo->IdentityFlags, + sizeof(idInfo->IdentityFlags)); +- cpylen = +- copy_to_user(&gId->NameType, ++ cpylen = copy_to_user(&gId->NameType, + &idInfo->NameType, + sizeof(idInfo->NameType)); +- cpylen = +- copy_to_user(&gId->ObjectType, ++ cpylen = copy_to_user(&gId->ObjectType, + &idInfo->ObjectType, + sizeof(idInfo->ObjectType)); + +- cpylen = +- copy_from_user(&xferStr, gId->pDomainName, ++ cpylen = copy_from_user(&xferStr, gId->pDomainName, + sizeof(struct nwc_string)); +- str = +- (char *)((char *)reply->data + +- idInfo->pDomainNameOffset); +- cpylen = +- copy_to_user(xferStr.pBuffer, str, +- idInfo->domainLen); ++ if (idInfo->pDomainNameOffset >= reply->dataLen) { ++ retCode = -EINVAL; ++ goto out; ++ } ++ str = (char *)((char *)reply->data + idInfo->pDomainNameOffset); ++ if (idInfo->domainLen > reply->dataLen - idInfo->pDomainNameOffset ) { ++ retCode = -EINVAL; ++ goto out; ++ } ++ ++ cpylen = copy_to_user(xferStr.pBuffer, str, idInfo->domainLen); + xferStr.DataType = NWC_STRING_TYPE_ASCII; + xferStr.DataLen = idInfo->domainLen; +- cpylen = +- copy_to_user(gId->pDomainName, &xferStr, +- sizeof(struct nwc_string)); ++ cpylen = copy_to_user(gId->pDomainName, &xferStr, sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&xferStr, gId->pObjectName, sizeof(struct nwc_string)); + +- cpylen = +- copy_from_user(&xferStr, gId->pObjectName, +- sizeof(struct nwc_string)); +- str = +- (char *)((char *)reply->data + +- idInfo->pObjectNameOffset); +- cpylen = +- copy_to_user(xferStr.pBuffer, str, +- idInfo->objectLen); ++ if (idInfo->pObjectNameOffset >= reply->dataLen) { ++ retCode = -EINVAL; ++ goto out; ++ } ++ str = (char *)((char *)reply->data + idInfo->pObjectNameOffset); ++ if (idInfo->objectLen > reply->dataLen - idInfo->pObjectNameOffset) { ++ retCode = -EINVAL; ++ goto out; ++ } ++ cpylen = copy_to_user(xferStr.pBuffer, str, idInfo->objectLen); + xferStr.DataLen = idInfo->objectLen - 1; + xferStr.DataType = NWC_STRING_TYPE_ASCII; +- cpylen = +- copy_to_user(gId->pObjectName, &xferStr, +- sizeof(struct nwc_string)); ++ cpylen = copy_to_user(gId->pObjectName, &xferStr, sizeof(struct nwc_string)); ++ } + } + ++out: + kfree(reply); +- } + kfree(cmd); + return (retCode); + } + + int novfs_scan_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwc_scan_conn_info connInfo, *rInfo; +- struct nwd_scan_conn_info *pDConnInfo; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwc_scan_conn_info connInfo, *rInfo = NULL; ++ struct nwd_scan_conn_info *pDConnInfo = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, replylen, cpylen; +- unsigned char *localData; ++ unsigned char *localData = NULL; + +- cpylen = +- copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_scan_conn_info)); ++ cpylen = copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_scan_conn_info)); ++ ++ if (connInfo.uReturnInfoLength > MAX_INFO_LEN || connInfo.uScanInfoLen > MAX_INFO_LEN) ++ return -EINVAL; + + cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo) + connInfo.uScanInfoLen; + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -962,8 +987,7 @@ int novfs_scan_conn_info(struct novfs_xp + __DbgPrint(" connInfo.uScanFlags = 0x%X\n", connInfo.uScanFlags); + + pDConnInfo->uScanIndex = connInfo.uScanIndex; +- pDConnInfo->uConnectionReference = +- connInfo.uConnectionReference; ++ pDConnInfo->uConnectionReference = connInfo.uConnectionReference; + pDConnInfo->uScanInfoLevel = connInfo.uScanInfoLevel; + pDConnInfo->uScanInfoLen = connInfo.uScanInfoLen; + pDConnInfo->uReturnInfoLength = connInfo.uReturnInfoLength; +@@ -974,8 +998,7 @@ int novfs_scan_conn_info(struct novfs_xp + localData = (unsigned char *) pDConnInfo; + pDConnInfo->uScanConnInfoOffset = sizeof(*pDConnInfo); + localData += pDConnInfo->uScanConnInfoOffset; +- cpylen = +- copy_from_user(localData, connInfo.pScanConnInfo, ++ cpylen = copy_from_user(localData, connInfo.pScanConnInfo, + connInfo.uScanInfoLen); + } else { + pDConnInfo->uScanConnInfoOffset = 0; +@@ -1035,8 +1058,7 @@ int novfs_scan_conn_info(struct novfs_xp + static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) + { + unsigned long uLevel; +- struct nwd_scan_conn_info *pDConnInfo; +- ++ struct nwd_scan_conn_info *pDConnInfo = NULL; + unsigned char *srcData = NULL; + unsigned long dataLen = 0, cpylen; + +@@ -1082,26 +1104,17 @@ static void GetUserData(struct nwc_scan_ + DbgPrint("NWC_CONN_INFO_TRAN_ADDR 0x%p -> 0x%p :: 0x%X", + srcData, connInfo->pReturnConnInfo, dataLen); + +- cpylen = +- copy_from_user(&tranAddr, dstData, +- sizeof(tranAddr)); +- +- srcData += +- ((struct nwd_scan_conn_info *) srcData)-> +- uReturnConnInfoOffset; +- +- tranAddr.uTransportType = +- ((struct nwd_tran_addr *) srcData)->uTransportType; +- tranAddr.uAddressLength = +- ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; +- +- cpylen = +- copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); +- cpylen = +- copy_to_user(tranAddr.puAddress, +- ((struct tagNwdTranAddrEx *) srcData)->Buffer, +- ((struct tagNwdTranAddrEx *) srcData)-> +- uAddressLength); ++ cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); ++ if (((struct nwd_scan_conn_info *) srcData)->uReturnConnInfoOffset >= reply->dataLen) ++ goto out; ++ srcData += ((struct nwd_scan_conn_info *)srcData)->uReturnConnInfoOffset; ++ tranAddr.uTransportType = ((struct nwd_tran_addr *)srcData)->uTransportType; ++ tranAddr.uAddressLength = ((struct tagNwdTranAddrEx *)srcData)->uAddressLength; ++ if (tranAddr.uAddressLength > MAX_ADDRESS_LENGTH) ++ goto out; ++ cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); ++ cpylen = copy_to_user(tranAddr.puAddress, ++ ((struct tagNwdTranAddrEx *) srcData)->Buffer, tranAddr.uAddressLength); + dataLen = 0; + break; + } +@@ -1115,13 +1128,13 @@ static void GetUserData(struct nwc_scan_ + break; + } + +- if (srcData && dataLen) { ++ if (srcData && dataLen && dataLen <= reply->dataLen) { + DbgPrint("Copy Data 0x%p -> 0x%p :: 0x%X", + srcData, connInfo->pReturnConnInfo, dataLen); +- cpylen = +- copy_to_user(connInfo->pReturnConnInfo, srcData, dataLen); ++ cpylen = copy_to_user(connInfo->pReturnConnInfo, srcData, dataLen); + } + ++out: + return; + } + +@@ -1131,7 +1144,7 @@ static void GetUserData(struct nwc_scan_ + static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) + { + unsigned long uLevel; +- struct nwd_conn_info * pDConnInfo; ++ struct nwd_conn_info *pDConnInfo = NULL; + + unsigned char *srcData = NULL; + unsigned long dataLen = 0, cpylen; +@@ -1156,21 +1169,17 @@ static void GetConnData(struct nwc_get_c + + srcData = (unsigned char *) reply->data; + +- cpylen = +- copy_from_user(&tranAddr, dstData, +- sizeof(tranAddr)); ++ cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); + tranAddr.uTransportType = + ((struct tagNwdTranAddrEx *) srcData)->uTransportType; + tranAddr.uAddressLength = + ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; +- +- cpylen = +- copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); +- cpylen = +- copy_to_user(tranAddr.puAddress, ++ if (tranAddr.uAddressLength > MAX_ADDRESS_LENGTH) ++ goto out; ++ cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); ++ cpylen = copy_to_user(tranAddr.puAddress, + ((struct tagNwdTranAddrEx *) srcData)->Buffer, +- ((struct tagNwdTranAddrEx *) srcData)-> +- uAddressLength); ++ tranAddr.uAddressLength); + dataLen = 0; + break; + } +@@ -1214,20 +1223,19 @@ static void GetConnData(struct nwc_get_c + break; + } + +- if (srcData && dataLen) { +- cpylen = +- copy_to_user(connInfo->pConnInfo, srcData, +- connInfo->uInfoLength); ++ if (srcData && dataLen && dataLen <= reply->dataLen) { ++ cpylen = copy_to_user(connInfo->pConnInfo, srcData, connInfo->uInfoLength); + } + ++out: + return; + } + + int novfs_get_daemon_ver(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwd_get_reqversion *pDVersion; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwd_get_reqversion *pDVersion = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; + +@@ -1261,17 +1269,18 @@ int novfs_get_daemon_ver(struct novfs_xp + + int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwd_get_pref_ds_tree *pDGetTree; +- struct nwc_get_pref_ds_tree xplatCall, *p; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwd_get_pref_ds_tree *pDGetTree = NULL; ++ struct nwc_get_pref_ds_tree xplatCall, *p = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; +- unsigned char *dPtr; ++ unsigned char *dPtr = NULL; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, + sizeof(struct nwc_get_pref_ds_tree)); ++ if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) ++ return -EINVAL; + datalen = sizeof(*pDGetTree) + xplatCall.uTreeLength; + cmdlen = datalen + sizeof(*cmd); + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -1292,10 +1301,12 @@ int novfs_get_preferred_DS_tree(struct n + if (reply) { + retCode = reply->Reply.ErrorCode; + if (!retCode) { +- pDGetTree = +- (struct nwd_get_pref_ds_tree *) reply->data; +- dPtr = +- reply->data + pDGetTree->DsTreeNameOffset; ++ pDGetTree = (struct nwd_get_pref_ds_tree *) reply->data; ++ if (pDGetTree->DsTreeNameOffset >= reply->dataLen) { ++ retCode = -EINVAL; ++ goto out; ++ } ++ dPtr = reply->data + pDGetTree->DsTreeNameOffset; + p = (struct nwc_get_pref_ds_tree *) pdata->reqData; + + DbgPrint("Reply recieved"); +@@ -1303,14 +1314,17 @@ int novfs_get_preferred_DS_tree(struct n + pDGetTree->uTreeLength); + __DbgPrint(" TreeName = %s\n", dPtr); + +- cpylen = +- copy_to_user(p, &pDGetTree->uTreeLength, 4); +- cpylen = +- copy_to_user(xplatCall.pDsTreeName, dPtr, +- pDGetTree->uTreeLength); ++ if (pDGetTree->uTreeLength > reply->dataLen - pDGetTree->DsTreeNameOffset) { ++ retCode = -EINVAL; ++ goto out; ++ } ++ cpylen = copy_to_user(p, &pDGetTree->uTreeLength, 4); ++ cpylen = copy_to_user(xplatCall.pDsTreeName, dPtr, pDGetTree->uTreeLength); + } +- kfree(reply); + } ++ ++out: ++ kfree(reply); + kfree(cmd); + return (retCode); + +@@ -1318,17 +1332,17 @@ int novfs_get_preferred_DS_tree(struct n + + int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwd_set_pref_ds_tree *pDSetTree; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwd_set_pref_ds_tree *pDSetTree = NULL; + struct nwc_set_pref_ds_tree xplatCall; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; +- unsigned char *dPtr; ++ unsigned char *dPtr = NULL; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, +- sizeof(struct nwc_set_pref_ds_tree)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_pref_ds_tree)); ++ if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) ++ return -EINVAL; + datalen = sizeof(*pDSetTree) + xplatCall.uTreeLength; + cmdlen = datalen + sizeof(*cmd); + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -1346,8 +1360,7 @@ int novfs_set_preferred_DS_tree(struct n + pDSetTree->uTreeLength = xplatCall.uTreeLength; + + dPtr = cmd->data + sizeof(*pDSetTree); +- cpylen = +- copy_from_user(dPtr, xplatCall.pDsTreeName, ++ cpylen = copy_from_user(dPtr, xplatCall.pDsTreeName, + xplatCall.uTreeLength); + + retCode = +@@ -1366,19 +1379,19 @@ int novfs_set_preferred_DS_tree(struct n + int novfs_set_default_ctx(struct novfs_xplat *pdata, + struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_set_def_name_ctx xplatCall; +- struct nwd_set_def_name_ctx * pDSet; ++ struct nwd_set_def_name_ctx * pDSet = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, datalen, replylen, cpylen; +- unsigned char *dPtr; ++ unsigned char *dPtr = NULL; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, + sizeof(struct nwc_set_def_name_ctx)); +- datalen = +- sizeof(*pDSet) + xplatCall.uTreeLength + xplatCall.uNameLength; ++ if (xplatCall.uNameLength > MAX_NAME_LEN || xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) ++ return -EINVAL; ++ datalen = sizeof(*pDSet) + xplatCall.uTreeLength + xplatCall.uNameLength; + cmdlen = datalen + sizeof(*cmd); + cmd = kmalloc(cmdlen, GFP_KERNEL); + +@@ -1388,23 +1401,19 @@ int novfs_set_default_ctx(struct novfs_x + cmd->Command.SequenceNumber = 0; + cmd->Command.SessionId = Session; + cmd->NwcCommand = NWC_SET_DEFAULT_NAME_CONTEXT; +- cmd->dataLen = +- sizeof(struct nwd_set_def_name_ctx) + +- xplatCall.uTreeLength + xplatCall.uNameLength; ++ cmd->dataLen = sizeof(struct nwd_set_def_name_ctx) + xplatCall.uTreeLength + xplatCall.uNameLength; + + pDSet = (struct nwd_set_def_name_ctx *) cmd->data; + dPtr = cmd->data; + + pDSet->TreeOffset = sizeof(struct nwd_set_def_name_ctx); + pDSet->uTreeLength = xplatCall.uTreeLength; +- pDSet->NameContextOffset = +- pDSet->TreeOffset + xplatCall.uTreeLength; ++ pDSet->NameContextOffset = pDSet->TreeOffset + xplatCall.uTreeLength; + pDSet->uNameLength = xplatCall.uNameLength; + + //sgled cpylen = copy_from_user(dPtr+pDSet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); + cpylen = copy_from_user(dPtr + pDSet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled +- cpylen = +- copy_from_user(dPtr + pDSet->NameContextOffset, ++ cpylen = copy_from_user(dPtr + pDSet->NameContextOffset, + xplatCall.pNameContext, + xplatCall.uNameLength); + +@@ -1424,20 +1433,20 @@ int novfs_set_default_ctx(struct novfs_x + int novfs_get_default_ctx(struct novfs_xplat *pdata, + struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_get_def_name_ctx xplatCall; +- struct nwd_get_def_name_ctx * pGet; +- char *dPtr; ++ struct nwd_get_def_name_ctx * pGet = NULL; ++ char *dPtr = NULL; + int retCode = -ENOMEM; + unsigned long cmdlen, replylen, cpylen; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, + sizeof(struct nwc_get_def_name_ctx)); +- cmdlen = +- sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx ) + +- xplatCall.uTreeLength; ++ if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) ++ return -EINVAL; ++ ++ cmdlen = sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx ) + xplatCall.uTreeLength; + cmd = kmalloc(cmdlen, GFP_KERNEL); + + if (!cmd) +@@ -1512,16 +1521,20 @@ int novfs_query_feature(struct novfs_xpl + int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, + struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwc_get_tree_monitored_conn_ref xplatCall, *p; +- struct nwd_get_tree_monitored_conn_ref *pDConnRef; +- char *dPtr; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwc_get_tree_monitored_conn_ref xplatCall, *p = NULL; ++ struct nwd_get_tree_monitored_conn_ref *pDConnRef = NULL; ++ char *dPtr = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + + cpylen = + copy_from_user(&xplatCall, pdata->reqData, + sizeof(struct nwc_get_tree_monitored_conn_ref)); ++ if (!access_ok(VERIFY_READ, xplatCall.pTreeName, sizeof(struct nwc_string))) ++ return -EINVAL; ++ if (xplatCall.pTreeName->DataLen > NW_MAX_TREE_NAME_LEN) ++ return -EINVAL; + datalen = sizeof(*pDConnRef) + xplatCall.pTreeName->DataLen; + cmdlen = datalen + sizeof(*cmd); + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -1564,12 +1577,12 @@ int novfs_get_tree_monitored_conn(struct + + int novfs_enum_ids(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; +- struct nwc_enum_ids xplatCall, *eId; +- struct nwd_enum_ids *pEnum; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; ++ struct nwc_enum_ids xplatCall, *eId = NULL; ++ struct nwd_enum_ids *pEnum = NULL; + struct nwc_string xferStr; +- char *str; ++ char *str = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + + cpylen = +@@ -1629,56 +1642,71 @@ int novfs_enum_ids(struct novfs_xplat *p + cpylen = + copy_from_user(&xferStr, eId->pDomainName, + sizeof(struct nwc_string)); ++ if (pEnum->domainNameOffset >= reply->dataLen) { ++ status = -EINVAL; ++ goto out; ++ } + str = + (char *)((char *)reply->data + + pEnum->domainNameOffset); +- DbgPrint("[XPLAT NWCAPI] Found Domain %s", +- str); ++ DbgPrint("[XPLAT NWCAPI] Found Domain %s", str); ++ if (pEnum->domainNameLen > reply->dataLen - pEnum->domainNameOffset) { ++ status = -EINVAL; ++ goto out; ++ } + cpylen = + copy_to_user(xferStr.pBuffer, str, + pEnum->domainNameLen); + xferStr.DataType = NWC_STRING_TYPE_ASCII; + xferStr.DataLen = pEnum->domainNameLen - 1; +- cpylen = +- copy_to_user(eId->pDomainName, &xferStr, ++ cpylen = copy_to_user(eId->pDomainName, &xferStr, + sizeof(struct nwc_string)); + +- cpylen = +- copy_from_user(&xferStr, eId->pObjectName, ++ cpylen = copy_from_user(&xferStr, eId->pObjectName, + sizeof(struct nwc_string)); +- str = +- (char *)((char *)reply->data + +- pEnum->objectNameOffset); ++ if (pEnum->objectNameOffset >= reply->dataLen) { ++ status = -EINVAL; ++ goto out; ++ } ++ str = (char *)((char *)reply->data + pEnum->objectNameOffset); + DbgPrint("[XPLAT NWCAPI] Found User %s", str); +- cpylen = +- copy_to_user(xferStr.pBuffer, str, +- pEnum->objectNameLen); ++ if (pEnum->objectNameLen > reply->dataLen - pEnum->objectNameOffset) { ++ status = -EINVAL; ++ goto out; ++ } ++ cpylen = copy_to_user(xferStr.pBuffer, str, pEnum->objectNameLen); + xferStr.DataType = NWC_STRING_TYPE_ASCII; + xferStr.DataLen = pEnum->objectNameLen - 1; +- cpylen = +- copy_to_user(eId->pObjectName, &xferStr, +- sizeof(struct nwc_string)); ++ cpylen = copy_to_user(eId->pObjectName, &xferStr, sizeof(struct nwc_string)); + } +- +- kfree(reply); +- + } ++out: ++ kfree(reply); + kfree(cmd); + return (status); + } + + int novfs_change_auth_key(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_change_key xplatCall; +- struct nwd_change_key *pNewKey; ++ struct nwd_change_key *pNewKey = NULL; + struct nwc_string xferStr; +- char *str; +- unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; ++ char *str = NULL; ++ unsigned long status = -ENOMEM, cmdlen = 0, datalen, replylen, cpylen; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_change_key)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_change_key)); ++ if (!access_ok(VERIFY_READ, xplatCall.pDomainName, sizeof(struct nwc_string)) || ++ !access_ok(VERIFY_READ, xplatCall.pObjectName, sizeof(struct nwc_string)) || ++ !access_ok(VERIFY_READ, xplatCall.pNewPassword, sizeof(struct nwc_string)) || ++ !access_ok(VERIFY_READ, xplatCall.pVerifyPassword, sizeof(struct nwc_string))) ++ return -EINVAL; ++ if (xplatCall.pDomainName->DataLen > MAX_DOMAIN_LEN || ++ xplatCall.pObjectName->DataLen > MAX_OBJECT_NAME_LENGTH || ++ xplatCall.pNewPassword->DataLen > MAX_PASSWORD_LENGTH || ++ xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) ++ return -EINVAL; + + datalen = + sizeof(struct nwd_change_key) + xplatCall.pDomainName->DataLen + +@@ -1707,10 +1735,12 @@ int novfs_change_auth_key(struct novfs_x + * Get the tree name + */ + str += sizeof(*pNewKey); +- cpylen = +- copy_from_user(&xferStr, xplatCall.pDomainName, +- sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&xferStr, xplatCall.pDomainName, sizeof(struct nwc_string)); + pNewKey->domainNameOffset = sizeof(*pNewKey); ++ if (xferStr.DataLen > MAX_DOMAIN_LEN) { ++ status = -EINVAL; ++ goto out; ++ } + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->domainNameLen = xferStr.DataLen; + +@@ -1718,11 +1748,12 @@ int novfs_change_auth_key(struct novfs_x + * Get the User Name + */ + str += pNewKey->domainNameLen; +- cpylen = +- copy_from_user(&xferStr, xplatCall.pObjectName, +- sizeof(struct nwc_string)); +- pNewKey->objectNameOffset = +- pNewKey->domainNameOffset + pNewKey->domainNameLen; ++ cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, sizeof(struct nwc_string)); ++ pNewKey->objectNameOffset = pNewKey->domainNameOffset + pNewKey->domainNameLen; ++ if (xferStr.DataLen > MAX_OBJECT_NAME_LENGTH) { ++ status = -EINVAL; ++ goto out; ++ } + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->objectNameLen = xferStr.DataLen; + +@@ -1730,11 +1761,12 @@ int novfs_change_auth_key(struct novfs_x + * Get the New Password + */ + str += pNewKey->objectNameLen; +- cpylen = +- copy_from_user(&xferStr, xplatCall.pNewPassword, +- sizeof(struct nwc_string)); +- pNewKey->newPasswordOffset = +- pNewKey->objectNameOffset + pNewKey->objectNameLen; ++ cpylen = copy_from_user(&xferStr, xplatCall.pNewPassword, sizeof(struct nwc_string)); ++ pNewKey->newPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; ++ if (xferStr.DataLen > MAX_PASSWORD_LENGTH) { ++ status = -EINVAL; ++ goto out; ++ } + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->newPasswordLen = xferStr.DataLen; + +@@ -1742,34 +1774,35 @@ int novfs_change_auth_key(struct novfs_x + * Get the Verify Password + */ + str += pNewKey->newPasswordLen; +- cpylen = +- copy_from_user(&xferStr, xplatCall.pVerifyPassword, +- sizeof(struct nwc_string)); +- pNewKey->verifyPasswordOffset = +- pNewKey->newPasswordOffset + pNewKey->newPasswordLen; ++ cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, sizeof(struct nwc_string)); ++ pNewKey->verifyPasswordOffset = pNewKey->newPasswordOffset + pNewKey->newPasswordLen; ++ if (xferStr.DataLen > MAX_PASSWORD_LENGTH) { ++ status = -EINVAL; ++ goto out; ++ } + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->verifyPasswordLen = xferStr.DataLen; + +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, + (void **)&reply, &replylen, + INTERRUPTIBLE); + if (reply) { + status = reply->Reply.ErrorCode; +- kfree(reply); ++ + } ++out: + memset(cmd, 0, cmdlen); +- ++ kfree(reply); + kfree(cmd); + return (status); + } + + int novfs_set_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_set_primary_conn xplatCall; +- struct nwd_set_primary_conn *pConn; ++ struct nwd_set_primary_conn *pConn = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + + cpylen = +@@ -1805,7 +1838,7 @@ int novfs_set_pri_conn(struct novfs_xpla + int novfs_get_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) + { + struct novfs_xplat_call_request cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_reply *reply = NULL; + unsigned long status = -ENOMEM, cmdlen, replylen, cpylen; + + cmdlen = (unsigned long) (&((struct novfs_xplat_call_request *) 0)->data); +@@ -1837,8 +1870,8 @@ int novfs_get_pri_conn(struct novfs_xpla + int novfs_set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) + { + +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + unsigned long status = 0, datalen, cmdlen, replylen; + struct nwc_map_drive_ex symInfo; + +@@ -1846,6 +1879,8 @@ int novfs_set_map_drive(struct novfs_xpl + cmdlen = sizeof(*cmd); + if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) + return -EFAULT; ++ if (symInfo.dirPathOffsetLength > MAX_OFFSET_LEN || symInfo.linkOffsetLength > MAX_OFFSET_LEN) ++ return -EINVAL; + datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength + + symInfo.linkOffsetLength; + +@@ -1890,14 +1925,16 @@ int novfs_set_map_drive(struct novfs_xpl + + int novfs_unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + unsigned long status = 0, datalen, cmdlen, replylen, cpylen; + struct nwc_unmap_drive_ex symInfo; + + DbgPrint(""); + + cpylen = copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo)); ++ if (symInfo.linkLen > MAX_NAME_LEN) ++ return -EINVAL; + cmdlen = sizeof(*cmd); + datalen = sizeof(symInfo) + symInfo.linkLen; + +@@ -1913,8 +1950,7 @@ int novfs_unmap_drive(struct novfs_xplat + cmd->NwcCommand = NWC_UNMAP_DRIVE; + + cpylen = copy_from_user(cmd->data, pdata->reqData, datalen); +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, + (void **)&reply, &replylen, + INTERRUPTIBLE); + +@@ -1928,11 +1964,11 @@ int novfs_unmap_drive(struct novfs_xplat + + int novfs_enum_drives(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + unsigned long status = 0, cmdlen, replylen, cpylen; + unsigned long offset; +- char *cp; ++ char *cp = NULL; + + DbgPrint(""); + +@@ -1955,37 +1991,36 @@ int novfs_enum_drives(struct novfs_xplat + status = reply->Reply.ErrorCode; + DbgPrint("Status Code = 0x%X", status); + if (!status) { +- offset = +- sizeof(((struct nwc_get_mapped_drives *) pdata-> ++ offset = sizeof(((struct nwc_get_mapped_drives *) pdata-> + repData)->MapBuffLen); + cp = reply->data; +- replylen = +- ((struct nwc_get_mapped_drives *) pdata->repData)-> +- MapBuffLen; +- cpylen = +- copy_to_user(pdata->repData, cp, offset); ++ replylen = ((struct nwc_get_mapped_drives *) pdata->repData)->MapBuffLen; ++ if (offset > reply->dataLen) { ++ status = -EINVAL; ++ goto out; ++ } ++ cpylen = copy_to_user(pdata->repData, cp, offset); + cp += offset; +- cpylen = +- copy_to_user(((struct nwc_get_mapped_drives *) pdata-> ++ cpylen = copy_to_user(((struct nwc_get_mapped_drives *) pdata-> + repData)->MapBuffer, cp, + min(replylen - offset, + reply->dataLen - offset)); + } +- +- kfree(reply); + } ++out: ++ kfree(reply); + kfree(cmd); + return (status); + } + + int novfs_get_bcast_msg(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + unsigned long cmdlen, replylen; + int status = 0x8866, cpylen; + struct nwc_get_bcast_notification msg; +- struct nwd_get_bcast_notification *dmsg; ++ struct nwd_get_bcast_notification *dmsg = NULL; + + cmdlen = sizeof(*cmd) + sizeof(*dmsg); + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -2013,33 +2048,21 @@ int novfs_get_bcast_msg(struct novfs_xpl + if (!status) { + char *cp = pdata->repData; + +- dmsg = +- (struct nwd_get_bcast_notification *) reply->data; ++ dmsg = (struct nwd_get_bcast_notification *) reply->data; + if (pdata->repLen < dmsg->messageLen) { + dmsg->messageLen = pdata->repLen; + } + msg.messageLen = dmsg->messageLen; +- cpylen = +- offsetof(struct +- nwc_get_bcast_notification, +- message); ++ cpylen = offsetof(struct nwc_get_bcast_notification, message); + cp += cpylen; +- cpylen = +- copy_to_user(pdata->repData, &msg, cpylen); +- cpylen = +- copy_to_user(cp, dmsg->message, +- msg.messageLen); ++ cpylen = copy_to_user(pdata->repData, &msg, cpylen); ++ cpylen = copy_to_user(cp, dmsg->message, msg.messageLen); + } else { + msg.messageLen = 0; + msg.message[0] = 0; +- cpylen = offsetof(struct +- nwc_get_bcast_notification, +- message); +- cpylen = +- copy_to_user(pdata->repData, &msg, +- sizeof(msg)); ++ cpylen = offsetof(struct nwc_get_bcast_notification, message); ++ cpylen = copy_to_user(pdata->repData, &msg, sizeof(msg)); + } +- + kfree(reply); + } + kfree(cmd); +@@ -2048,24 +2071,21 @@ int novfs_get_bcast_msg(struct novfs_xpl + + int novfs_set_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_set_key xplatCall; +- struct nwd_set_key *pNewKey; ++ struct nwd_set_key *pNewKey = NULL; + struct nwc_string cstrObjectName, cstrPassword; +- char *str; ++ char *str = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + + cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_key)); +- cpylen = +- copy_from_user(&cstrObjectName, xplatCall.pObjectName, +- sizeof(struct nwc_string)); +- cpylen = +- copy_from_user(&cstrPassword, xplatCall.pNewPassword, +- sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&cstrObjectName, xplatCall.pObjectName, sizeof(struct nwc_string)); ++ cpylen = copy_from_user(&cstrPassword, xplatCall.pNewPassword, sizeof(struct nwc_string)); + +- datalen = +- sizeof(struct nwd_set_key ) + cstrObjectName.DataLen + cstrPassword.DataLen; ++ if (cstrObjectName.DataLen > MAX_OBJECT_NAME_LENGTH || cstrPassword.DataLen > MAX_PASSWORD_LENGTH) ++ return -EINVAL; ++ datalen = sizeof(struct nwd_set_key ) + cstrObjectName.DataLen + cstrPassword.DataLen; + + cmdlen = sizeof(*cmd) + datalen; + cmd = kmalloc(cmdlen, GFP_KERNEL); +@@ -2099,16 +2119,12 @@ int novfs_set_key_value(struct novfs_xpl + /* + * Get the Verify Password + */ +- cpylen = +- copy_from_user(str, cstrPassword.pBuffer, +- cstrPassword.DataLen); ++ cpylen = copy_from_user(str, cstrPassword.pBuffer, cstrPassword.DataLen); + + pNewKey->newPasswordLen = cstrPassword.DataLen; +- pNewKey->newPasswordOffset = +- pNewKey->objectNameOffset + pNewKey->objectNameLen; ++ pNewKey->newPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; + +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, + (void **)&reply, &replylen, + INTERRUPTIBLE); + if (reply) { +@@ -2121,16 +2137,22 @@ int novfs_set_key_value(struct novfs_xpl + + int novfs_verify_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session) + { +- struct novfs_xplat_call_request *cmd; +- struct novfs_xplat_call_reply *reply; ++ struct novfs_xplat_call_request *cmd = NULL; ++ struct novfs_xplat_call_reply *reply = NULL; + struct nwc_verify_key xplatCall; +- struct nwd_verify_key *pNewKey; ++ struct nwd_verify_key *pNewKey = NULL; + struct nwc_string xferStr; +- char *str; ++ char *str = NULL; + unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; + +- cpylen = +- copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_verify_key)); ++ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_verify_key)); ++ ++ if (!access_ok(VERIFY_READ, xplatCall.pDomainName, sizeof(struct nwc_string)) || ++ !access_ok(VERIFY_READ, xplatCall.pVerifyPassword, sizeof(struct nwc_string))) ++ return -EINVAL; ++ if (xplatCall.pDomainName->DataLen > MAX_NAME_LEN || xplatCall.pObjectName->DataLen > MAX_OBJECT_NAME_LENGTH || ++ xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) ++ return -EINVAL; + + datalen = + sizeof(struct nwd_verify_key) + xplatCall.pDomainName->DataLen + +@@ -2169,11 +2191,9 @@ int novfs_verify_key_value(struct novfs_ + * Get the User Name + */ + str += pNewKey->domainNameLen; +- cpylen = +- copy_from_user(&xferStr, xplatCall.pObjectName, ++ cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, + sizeof(struct nwc_string)); +- pNewKey->objectNameOffset = +- pNewKey->domainNameOffset + pNewKey->domainNameLen; ++ pNewKey->objectNameOffset = pNewKey->domainNameOffset + pNewKey->domainNameLen; + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->objectNameLen = xferStr.DataLen; + +@@ -2181,16 +2201,14 @@ int novfs_verify_key_value(struct novfs_ + * Get the Verify Password + */ + str += pNewKey->objectNameLen; +- cpylen = +- copy_from_user(&xferStr, xplatCall.pVerifyPassword, ++ cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, + sizeof(struct nwc_string)); + pNewKey->verifyPasswordOffset = + pNewKey->objectNameOffset + pNewKey->objectNameLen; + cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); + pNewKey->verifyPasswordLen = xferStr.DataLen; + +- status = +- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, ++ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, + (void **)&reply, &replylen, + INTERRUPTIBLE); + if (reply) { +@@ -2200,3 +2218,4 @@ int novfs_verify_key_value(struct novfs_ + kfree(cmd); + return (status); + } ++ +--- a/fs/novfs/nwcapi.h ++++ b/fs/novfs/nwcapi.h +@@ -297,6 +297,18 @@ N_EXTERN_LIBRARY(NWRCODE) + #define MAX_ADDRESS_LENGTH 32 + #define MAX_NAME_SERVICE_PROVIDERS 10 + ++#define MAX_NAME_LEN 1024 ++#define MAX_NUM_REPLIES 4096 ++#define MIN_NUM_REPLIES 1 ++#define MAX_NUM_REQUESTS 4096 ++#define MIN_NUM_REQUESTS 1 ++#define MAX_FRAG_SIZE 4096 ++#define MIN_FRAG_SIZE 1 ++#define MAX_INFO_LEN 4096 ++#define MAX_DOMAIN_LEN MAX_NETWORK_NAME_LENGTH ++#define MAX_OFFSET_LEN 4096 ++ ++ + // + // Flags for the GetBroadcastMessage API + // diff --git a/patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir b/patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir index 08fe8f8..799f5ec 100644 --- a/patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir +++ b/patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir @@ -10,14 +10,12 @@ behave correctly in novfs, just like other file-systems. Signed-off-by: Sankar P Acked-by: Jan Kara --- - tmp/linux-2.6.32-sle11-sp1/fs/novfs/file.c | 11 ++++++++--- - 1 files changed, 8 insertions(+), 3 deletions(-) + fs/novfs/file.c | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) -diff --git a/fs/novfs/file.c b/fs/novfs/file.c -index b7033ff..5da32ca 100644 --- a/fs/novfs/file.c +++ b/fs/novfs/file.c -@@ -1569,11 +1569,16 @@ int novfs_delete(unsigned char * Path, int DirectoryFlag, struct novfs_schandle +@@ -1569,11 +1569,16 @@ int novfs_delete(unsigned char * Path, i if (reply) { retCode = 0; if (reply->Reply.ErrorCode) { @@ -37,6 +35,3 @@ index b7033ff..5da32ca 100644 } kfree(reply); } --- -1.6.4.2 - diff --git a/patches.fixes/novfs-unlink-oops b/patches.fixes/novfs-unlink-oops new file mode 100644 index 0000000..453d215 --- /dev/null +++ b/patches.fixes/novfs-unlink-oops @@ -0,0 +1,36 @@ +From: Sankar P +Subject: novfs: Fix for the issue of kernel dumps core on restart +References: bnc#641811 +Patch-mainline: No + +This patch fixes a bug that cause kernel to dump core on restart, +by rectifying the counter and dentry manipulation code. + +Signed-off-by: Sankar P +Acked-by: Jan Kara + +diff --git a/fs/novfs/daemon.c b/fs/novfs/daemon.c +index f0fd5d6..6e7fb5d 100644 +--- a/fs/novfs/daemon.c ++++ b/fs/novfs/daemon.c +@@ -1857,14 +1857,15 @@ static long local_unlink(const char *pathname) + while (*c != '\0') { + if (*c == '/') + name = ++c; +- c++; ++ else ++ c++; + } + dentry = lookup_one_len(name, nd.path.dentry, strlen(name)); + error = PTR_ERR(dentry); + + if (!IS_ERR(dentry)) { + DbgPrint("dentry %p", dentry); +- if (!(dentry->d_inode->i_mode & S_IFLNK)) { ++ if (!(dentry->d_inode) || !(dentry->d_inode->i_mode & S_IFLNK)) { + DbgPrint("%s not a link", name); + error = -ENOENT; + goto exit1; +-- +1.7.1 + diff --git a/patches.fixes/novfs-xattr-errcode-cleanup b/patches.fixes/novfs-xattr-errcode-cleanup new file mode 100644 index 0000000..9b0a890 --- /dev/null +++ b/patches.fixes/novfs-xattr-errcode-cleanup @@ -0,0 +1,40 @@ +From: Sankar P +Subject: [PATCH] novfs: code cleanup for one case of novfs return value +References: bnc#624606 +Patch-mainline: No + +Code cleanup. One error case returns a positive error value. +Changed it to return in negative. + +Signed-off-by: Sankar P +Acked-by: Jan Kara +--- + fs/novfs/file.c | 10 ++++------ + 1 file changed, 4 insertions(+), 6 deletions(-) + +--- a/fs/novfs/file.c ++++ b/fs/novfs/file.c +@@ -280,9 +280,8 @@ int novfs_getx_file_info(char *Path, con + DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i", + Path, pathlen, Name, namelen); + +- if (namelen > MAX_XATTR_NAME_LEN) { +- return ENOATTR; +- } ++ if (namelen > MAX_XATTR_NAME_LEN) ++ return -ENOATTR; + + cmdlen = offsetof(struct novfs_xa_get_request, data) + pathlen + 1 + namelen + 1; // two '\0' + cmd = (struct novfs_xa_get_request *) kmalloc(cmdlen, GFP_KERNEL); +@@ -375,9 +374,8 @@ int novfs_setx_file_info(char *Path, con + DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i, " + "value len = %u", Path, pathlen, Name, namelen, valueLen); + +- if (namelen > MAX_XATTR_NAME_LEN) { +- return ENOATTR; +- } ++ if (namelen > MAX_XATTR_NAME_LEN) ++ return -ENOATTR; + + cmdlen = offsetof(struct novfs_xa_set_request, data) + pathlen + 1 + namelen + 1 + valueLen; + cmd = (struct novfs_xa_set_request *) kmalloc(cmdlen, GFP_KERNEL); diff --git a/patches.fixes/novfs-xattr-errcode-cleanup2 b/patches.fixes/novfs-xattr-errcode-cleanup2 new file mode 100644 index 0000000..3f2031f --- /dev/null +++ b/patches.fixes/novfs-xattr-errcode-cleanup2 @@ -0,0 +1,32 @@ +From: Sankar P +Subject: [PATCH] novfs: Fix error codes for getxattr for novfs +Patch-mainline: No +References: bnc#529535 + +getxattr function should not use harsh error codes like ENOENT +if the named attribute cannot be obtained. This fixes makes +novfs return ENOATTR as suggested by the man page. + +Signed-off-by: Sankar P +Acked-by: Jan Kara +--- + fs/novfs/file.c | 8 +------- + 1 file changed, 1 insertion(+), 7 deletions(-) + +--- a/fs/novfs/file.c ++++ b/fs/novfs/file.c +@@ -319,13 +319,7 @@ int novfs_getx_file_info(char *Path, con + reply->Reply.ErrorCode); + DbgPrint("xattr: replylen=%d", replylen); + +- //0xC9 = EA not found (C9), 0xD1 = EA access denied +- if ((reply->Reply.ErrorCode == 0xC9) +- || (reply->Reply.ErrorCode == 0xD1)) { +- retCode = -ENOATTR; +- } else { +- retCode = -ENOENT; +- } ++ retCode = -ENOATTR; + } else { + + *dataLen = diff --git a/patches.fixes/novfs-xattr-memleak b/patches.fixes/novfs-xattr-memleak new file mode 100644 index 0000000..49affbe --- /dev/null +++ b/patches.fixes/novfs-xattr-memleak @@ -0,0 +1,30 @@ +From: Sankar P +Subject: [PATCH] novfs: Fix a memory leak that causes an OOM condition +References: bnc#610828 +Patch-mainline: No + +The patch fixes a serious memory leak issue, that causes +machine go to OOM condition that makes it unusable. + +Signed-off-by: Sankar P +Acked-by: Jan Kara + +--- + fs/novfs/inode.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -3421,11 +3421,8 @@ ssize_t novfs_i_getxattr(struct dentry * + retxcode = -ERANGE; + } + } +- +- if (bufRead) { +- kfree(bufRead); +- } + } ++ kfree(bufRead); + } + } + kfree(buf); diff --git a/patches.fixes/oom-warning b/patches.fixes/oom-warning index cd14bcd..b2f7134 100644 --- a/patches.fixes/oom-warning +++ b/patches.fixes/oom-warning @@ -13,7 +13,7 @@ Signed-off-by: Andrea Arcangeli --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1927,7 +1927,13 @@ rebalance: +@@ -2087,7 +2087,13 @@ rebalance: nopage: if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { @@ -26,5 +26,5 @@ Signed-off-by: Andrea Arcangeli + } + printk(KERN_INFO "%s: page allocation failure." " order:%d, mode:0x%x\n", - p->comm, order, gfp_mask); + current->comm, order, gfp_mask); dump_stack(); diff --git a/patches.fixes/oprofile_bios_ctr.patch b/patches.fixes/oprofile_bios_ctr.patch index cea76e0..4debe02 100644 --- a/patches.fixes/oprofile_bios_ctr.patch +++ b/patches.fixes/oprofile_bios_ctr.patch @@ -42,8 +42,8 @@ Tested-by: Shashi Belur static struct op_x86_model_spec *model; static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(unsigned long, saved_lvtpc); -@@ -336,6 +349,50 @@ static struct notifier_block profile_exc - .priority = 2 +@@ -466,6 +479,50 @@ static struct notifier_block oprofile_cp + .notifier_call = oprofile_cpu_notifier }; +#define P4_CCCR_ENABLE (1 << 12) @@ -93,10 +93,10 @@ Tested-by: Shashi Belur static int nmi_setup(void) { int err = 0; -@@ -360,6 +417,7 @@ static int nmi_setup(void) +@@ -483,6 +540,7 @@ static int nmi_setup(void) + if (err) + goto fail; - /* Assume saved/restored counters are the same on all CPUs */ - model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); + nmi_is_counter_enabled(&per_cpu(cpu_msrs, 0)); for_each_possible_cpu(cpu) { if (!cpu) diff --git a/patches.fixes/powerpc-fix-handling-of-strnlen-with-zero-len b/patches.fixes/powerpc-fix-handling-of-strnlen-with-zero-len deleted file mode 100644 index e045dbe..0000000 --- a/patches.fixes/powerpc-fix-handling-of-strnlen-with-zero-len +++ /dev/null @@ -1,44 +0,0 @@ -From: Jeff Mahoney -To: linuxppc-dev@ozlabs.org -CC: Steven Rostedt , Christian_Sellars@symantec.com -Subject: [PATCH] powerpc: fix handling of strnlen with zero len -Patch-mainline: submitted 17 Mar 2010 -References: bnc#582681 - - Commit 0119536c, which added the assembly version of strncmp to - powerpc, mentions that it adds two instructions to the version from - boot/string.S to allow it to handle len=0. Unfortunately, it doesn't - always return 0 when that is the case. The length is passed in r5, but - the return value is passed back in r3. In certain cases, this will - happen to work. Otherwise it will pass back the address of the first - string as the return value. - - This patch lifts the len <= 0 handling code from memcpy to handle that - case. - -Reported by: Christian_Sellars@symantec.com -Signed-off-by: Jeff Mahoney ---- - arch/powerpc/lib/string.S | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/arch/powerpc/lib/string.S -+++ b/arch/powerpc/lib/string.S -@@ -71,7 +71,7 @@ _GLOBAL(strcmp) - - _GLOBAL(strncmp) - PPC_LCMPI r5,0 -- beqlr -+ ble- 2f - mtctr r5 - addi r5,r3,-1 - addi r4,r4,-1 -@@ -82,6 +82,8 @@ _GLOBAL(strncmp) - beqlr 1 - bdnzt eq,1b - blr -+2: li r3,0 -+ blr - - _GLOBAL(strlen) - addi r4,r3,-1 diff --git a/patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash b/patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash new file mode 100644 index 0000000..f12f9bf --- /dev/null +++ b/patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash @@ -0,0 +1,60 @@ +From: Jeff Mahoney +Subject: reiserfs: Force inode evictions before umount to avoid crash +References: bnc#610598 bnc#680073 bnc#684112 +Patch-mainline: Submitted to reiserfs-devel Apr 6 2011 + + This patch fixes a crash in reiserfs_delete_xattrs during umount. + + When shrink_dcache_for_umount clears the dcache from + generic_shutdown_super, delayed evictions are forced to disk. If an + evicted inode has extended attributes associated with it, it will + need to walk the xattr tree to locate and remove them. + + But since shrink_dcache_for_umount will BUG if it encounters active + dentries, the xattr tree must be released before it's called or it will + crash during every umount. + + This patch forces the evictions to occur before generic_shutdown_super + by calling shrink_dcache_sb first. The additional evictions caused + by the removal of each associated xattr file and dir will be automatically + handled as they're added to the LRU list. + +Signed-off-by: Jeff Mahoney +Acked-by: Jeff Mahoney +--- + fs/reiserfs/super.c | 24 ++++++++++++++---------- + 1 file changed, 14 insertions(+), 10 deletions(-) + +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -453,16 +453,20 @@ int remove_save_link(struct inode *inode + static void reiserfs_kill_sb(struct super_block *s) + { + if (REISERFS_SB(s)) { +- if (REISERFS_SB(s)->xattr_root) { +- d_invalidate(REISERFS_SB(s)->xattr_root); +- dput(REISERFS_SB(s)->xattr_root); +- REISERFS_SB(s)->xattr_root = NULL; +- } +- if (REISERFS_SB(s)->priv_root) { +- d_invalidate(REISERFS_SB(s)->priv_root); +- dput(REISERFS_SB(s)->priv_root); +- REISERFS_SB(s)->priv_root = NULL; +- } ++ /* ++ * Force any pending inode evictions to occur now. Any ++ * inodes to be removed that have extended attributes ++ * associated with them need to clean them up before ++ * we can release the extended attribute root dentries. ++ * shrink_dcache_for_umount will BUG if we don't release ++ * those before it's called so ->put_super is too late. ++ */ ++ shrink_dcache_sb(s); ++ ++ dput(REISERFS_SB(s)->xattr_root); ++ REISERFS_SB(s)->xattr_root = NULL; ++ dput(REISERFS_SB(s)->priv_root); ++ REISERFS_SB(s)->priv_root = NULL; + } + + kill_block_super(s); diff --git a/patches.fixes/reiserfs-remove-2-tb-file-size-limit b/patches.fixes/reiserfs-remove-2-tb-file-size-limit index 84b2232..5f5bc6f 100644 --- a/patches.fixes/reiserfs-remove-2-tb-file-size-limit +++ b/patches.fixes/reiserfs-remove-2-tb-file-size-limit @@ -33,7 +33,7 @@ Acked-by: Jeff Mahoney --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c -@@ -1309,6 +1309,18 @@ out_err: +@@ -1322,6 +1322,18 @@ out_err: return err; } @@ -52,7 +52,7 @@ Acked-by: Jeff Mahoney static int read_super_block(struct super_block *s, int offset) { struct buffer_head *bh; -@@ -1398,10 +1410,7 @@ static int read_super_block(struct super +@@ -1411,10 +1423,7 @@ static int read_super_block(struct super s->dq_op = &reiserfs_quota_operations; #endif diff --git a/patches.fixes/remount-no-shrink-dcache b/patches.fixes/remount-no-shrink-dcache index 0b7b65f..87d3925 100644 --- a/patches.fixes/remount-no-shrink-dcache +++ b/patches.fixes/remount-no-shrink-dcache @@ -16,8 +16,8 @@ Signed-off-by: Olaf Kirch --- a/fs/super.c +++ b/fs/super.c -@@ -556,16 +556,10 @@ out: - return err; +@@ -521,16 +521,10 @@ rescan: + return NULL; } -/** @@ -36,8 +36,8 @@ Signed-off-by: Olaf Kirch +static int __do_remount_sb(struct super_block *sb, int flags, void *data, int rflags) { int retval; - int remount_rw, remount_ro; -@@ -580,7 +574,8 @@ int do_remount_sb(struct super_block *sb + int remount_ro; +@@ -545,7 +539,8 @@ int do_remount_sb(struct super_block *sb if (flags & MS_RDONLY) acct_auto_close(sb); @@ -47,7 +47,7 @@ Signed-off-by: Olaf Kirch sync_filesystem(sb); remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); -@@ -589,7 +584,7 @@ int do_remount_sb(struct super_block *sb +@@ -553,7 +548,7 @@ int do_remount_sb(struct super_block *sb /* If we are remounting RDONLY and current sb is read/write, make sure there are no rw files opened */ if (remount_ro) { @@ -56,7 +56,7 @@ Signed-off-by: Olaf Kirch mark_files_ro(sb); else if (!fs_may_remount_ro(sb)) return -EBUSY; -@@ -619,6 +614,21 @@ int do_remount_sb(struct super_block *sb +@@ -579,6 +574,21 @@ int do_remount_sb(struct super_block *sb return 0; } @@ -77,13 +77,13 @@ Signed-off-by: Olaf Kirch + static void do_emergency_remount(struct work_struct *work) { - struct super_block *sb; -@@ -914,7 +924,7 @@ int get_sb_single(struct file_system_typ + struct super_block *sb, *p = NULL; +@@ -888,7 +898,7 @@ int get_sb_single(struct file_system_typ } s->s_flags |= MS_ACTIVE; } else { - do_remount_sb(s, flags, data, 0); + __do_remount_sb(s, flags, data, 0); } - simple_set_mnt(mnt, s); - return 0; + return dget(s->s_root); + } diff --git a/patches.fixes/scsi-add-tgps-setting b/patches.fixes/scsi-add-tgps-setting index dbea9eb..5e16c4a 100644 --- a/patches.fixes/scsi-add-tgps-setting +++ b/patches.fixes/scsi-add-tgps-setting @@ -16,15 +16,15 @@ Signed-off-by: Hannes Reinecke drivers/scsi/device_handler/scsi_dh_alua.c | 70 +++------------------------- drivers/scsi/device_handler/scsi_dh_emc.c | 8 +-- drivers/scsi/device_handler/scsi_dh_hp_sw.c | 10 ++-- - drivers/scsi/device_handler/scsi_dh_rdac.c | 56 +++++++++++----------- + drivers/scsi/device_handler/scsi_dh_rdac.c | 58 +++++++++++------------ drivers/scsi/scsi_scan.c | 1 drivers/scsi/scsi_sysfs.c | 2 include/scsi/scsi_device.h | 4 + - 8 files changed, 59 insertions(+), 101 deletions(-) + 8 files changed, 60 insertions(+), 102 deletions(-) --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c -@@ -28,6 +28,7 @@ struct scsi_dh_devinfo_list { +@@ -29,6 +29,7 @@ struct scsi_dh_devinfo_list { struct list_head node; char vendor[9]; char model[17]; @@ -32,7 +32,7 @@ Signed-off-by: Hannes Reinecke struct scsi_device_handler *handler; }; -@@ -60,7 +61,8 @@ scsi_dh_cache_lookup(struct scsi_device +@@ -61,7 +62,8 @@ scsi_dh_cache_lookup(struct scsi_device spin_lock(&list_lock); list_for_each_entry(tmp, &scsi_dh_dev_list, node) { if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) && @@ -42,7 +42,7 @@ Signed-off-by: Hannes Reinecke found_dh = tmp->handler; break; } -@@ -79,7 +81,9 @@ static int scsi_dh_handler_lookup(struct +@@ -80,7 +82,9 @@ static int scsi_dh_handler_lookup(struct if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor, strlen(scsi_dh->devlist[i].vendor)) && !strncmp(sdev->model, scsi_dh->devlist[i].model, @@ -53,7 +53,7 @@ Signed-off-by: Hannes Reinecke found = 1; break; } -@@ -128,6 +132,7 @@ device_handler_match(struct scsi_device_ +@@ -129,6 +133,7 @@ device_handler_match(struct scsi_device_ strncpy(tmp->model, sdev->model, 16); tmp->vendor[8] = '\0'; tmp->model[16] = '\0'; @@ -63,7 +63,7 @@ Signed-off-by: Hannes Reinecke list_add(&tmp->node, &scsi_dh_dev_list); --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c -@@ -124,43 +124,6 @@ static struct request *get_alua_req(stru +@@ -125,43 +125,6 @@ static struct request *get_alua_req(stru } /* @@ -107,7 +107,7 @@ Signed-off-by: Hannes Reinecke * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command * @sdev: sdev the command should be sent to */ -@@ -332,23 +295,19 @@ static unsigned submit_stpg(struct alua_ +@@ -333,23 +296,19 @@ static unsigned submit_stpg(struct alua_ } /* @@ -137,7 +137,7 @@ Signed-off-by: Hannes Reinecke switch (h->tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, -@@ -610,7 +569,7 @@ static int alua_initialize(struct scsi_d +@@ -611,7 +570,7 @@ static int alua_initialize(struct scsi_d { int err; @@ -146,7 +146,7 @@ Signed-off-by: Hannes Reinecke if (err != SCSI_DH_OK) goto out; -@@ -684,19 +643,8 @@ static int alua_prep_fn(struct scsi_devi +@@ -685,19 +644,8 @@ static int alua_prep_fn(struct scsi_devi } static const struct scsi_dh_devlist alua_dev_list[] = { @@ -170,7 +170,7 @@ Signed-off-by: Hannes Reinecke static int alua_bus_attach(struct scsi_device *sdev); --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c -@@ -623,10 +623,10 @@ done: +@@ -622,10 +622,10 @@ done: } static const struct scsi_dh_devlist clariion_dev_list[] = { @@ -187,7 +187,7 @@ Signed-off-by: Hannes Reinecke static int clariion_bus_attach(struct scsi_device *sdev); --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c -@@ -310,11 +310,11 @@ static int hp_sw_activate(struct scsi_de +@@ -311,11 +311,11 @@ static int hp_sw_activate(struct scsi_de } static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { @@ -206,7 +206,7 @@ Signed-off-by: Hannes Reinecke static int hp_sw_bus_attach(struct scsi_device *sdev); --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c -@@ -744,34 +744,34 @@ static int rdac_check_sense(struct scsi_ +@@ -745,37 +745,37 @@ static int rdac_check_sense(struct scsi_ } static const struct scsi_dh_devlist rdac_dev_list[] = { @@ -233,10 +233,13 @@ Signed-off-by: Hannes Reinecke - {"DELL", "MD3000i"}, - {"DELL", "MD32xx"}, - {"DELL", "MD32xxi"}, +- {"DELL", "MD36xxi"}, - {"LSI", "INF-01-00"}, - {"ENGENIO", "INF-01-00"}, - {"STK", "FLEXLINE 380"}, - {"SUN", "CSM100_R_FC"}, +- {"SUN", "STK6580_6780"}, +- {"SUN", "SUN_6180"}, - {NULL, NULL}, + {"IBM", "1722", 0}, + {"IBM", "1724", 0}, @@ -261,17 +264,20 @@ Signed-off-by: Hannes Reinecke + {"DELL", "MD3000i", 0}, + {"DELL", "MD32xx", 0}, + {"DELL", "MD32xxi", 0}, ++ {"DELL", "MD36xxi", 0}, + {"LSI", "INF-01-00", 0}, + {"ENGENIO", "INF-01-00", 0}, + {"STK", "FLEXLINE 380", 0}, + {"SUN", "CSM100_R_FC", 0}, ++ {"SUN", "STK6580_6780", 0}, ++ {"SUN", "SUN_6180", 0}, + {NULL, NULL, 0}, }; static int rdac_bus_attach(struct scsi_device *sdev); --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c -@@ -837,6 +837,7 @@ static int scsi_add_lun(struct scsi_devi +@@ -838,6 +838,7 @@ static int scsi_add_lun(struct scsi_devi sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; sdev->lockable = sdev->removable; sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); @@ -281,7 +287,7 @@ Signed-off-by: Hannes Reinecke (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c -@@ -543,6 +543,7 @@ sdev_rd_attr (scsi_level, "%d\n"); +@@ -501,6 +501,7 @@ sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (vendor, "%.8s\n"); sdev_rd_attr (model, "%.16s\n"); sdev_rd_attr (rev, "%.4s\n"); @@ -289,7 +295,7 @@ Signed-off-by: Hannes Reinecke /* * TODO: can we make these symlinks to the block layer ones? -@@ -728,6 +729,7 @@ static struct attribute *scsi_sdev_attrs +@@ -686,6 +687,7 @@ static struct attribute *scsi_sdev_attrs &dev_attr_vendor.attr, &dev_attr_model.attr, &dev_attr_rev.attr, diff --git a/patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file b/patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file new file mode 100644 index 0000000..a9033f3 --- /dev/null +++ b/patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file @@ -0,0 +1,26 @@ +From: Vasiliy Kulikov +Subject: scsi: aic94xx: world-writable sysfs update_bios file +Introduced-by: v2.6.25-rc1 +Patch-mainline: Submitted 4 Feb 2011 + +Don't allow everybody to load firmware. + +Signed-off-by: Vasiliy Kulikov +Acked-by: Jeff Mahoney +--- + Compile tested only. + drivers/scsi/aic94xx/aic94xx_init.c | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) +diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c +index 3b7e83d..d5ff142 100644 +--- a/drivers/scsi/aic94xx/aic94xx_init.c ++++ b/drivers/scsi/aic94xx/aic94xx_init.c +@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev, + flash_error_table[i].reason); + } + +-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO, ++static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR, + asd_show_update_bios, asd_store_update_bios); + + static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) diff --git a/patches.fixes/scsi-check-host-lookup-failure b/patches.fixes/scsi-check-host-lookup-failure index 9909f43..f4da1cd 100644 --- a/patches.fixes/scsi-check-host-lookup-failure +++ b/patches.fixes/scsi-check-host-lookup-failure @@ -18,7 +18,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c -@@ -258,7 +258,7 @@ scsi_generic_msg_handler(struct sk_buff +@@ -259,7 +259,7 @@ scsi_generic_msg_handler(struct sk_buff /* if successful, scsi_host_lookup takes a shost reference */ shost = scsi_host_lookup(msg->host_no); diff --git a/patches.fixes/scsi-dh-alua-retry-UA b/patches.fixes/scsi-dh-alua-retry-UA index 99b2d14..4d18eb4 100644 --- a/patches.fixes/scsi-dh-alua-retry-UA +++ b/patches.fixes/scsi-dh-alua-retry-UA @@ -13,7 +13,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c -@@ -495,33 +495,10 @@ static int alua_check_sense(struct scsi_ +@@ -496,33 +496,10 @@ static int alua_check_sense(struct scsi_ return SUCCESS; break; case UNIT_ATTENTION: diff --git a/patches.fixes/scsi-dh-alua-send-stpg b/patches.fixes/scsi-dh-alua-send-stpg index 2a316e3..6b06d08 100644 --- a/patches.fixes/scsi-dh-alua-send-stpg +++ b/patches.fixes/scsi-dh-alua-send-stpg @@ -13,7 +13,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c -@@ -601,13 +601,12 @@ static int alua_activate(struct scsi_dev +@@ -602,13 +602,11 @@ static int alua_activate(struct scsi_dev struct alua_dh_data *h = get_alua_data(sdev); int err = SCSI_DH_OK; @@ -26,9 +26,8 @@ Signed-off-by: Hannes Reinecke + if (err != SCSI_DH_OK) + goto out; -- if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) { +- if (h->tpgs & TPGS_MODE_EXPLICIT && + if ((h->tpgs & TPGS_MODE_EXPLICIT) && -+ h->state != TPGS_STATE_OPTIMIZED) { + h->state != TPGS_STATE_OPTIMIZED && + h->state != TPGS_STATE_LBA_DEPENDENT) { h->callback_fn = fn; - h->callback_data = data; - err = submit_stpg(h); diff --git a/patches.fixes/scsi-dh-queuedata-accessors b/patches.fixes/scsi-dh-queuedata-accessors index 72324e0..86769a1 100644 --- a/patches.fixes/scsi-dh-queuedata-accessors +++ b/patches.fixes/scsi-dh-queuedata-accessors @@ -21,7 +21,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c -@@ -438,7 +438,7 @@ int scsi_dh_activate(struct request_queu +@@ -439,7 +439,7 @@ int scsi_dh_activate(struct request_queu struct scsi_device_handler *scsi_dh = NULL; spin_lock_irqsave(q->queue_lock, flags); @@ -29,8 +29,8 @@ Signed-off-by: Hannes Reinecke + sdev = scsi_device_from_queue(q); if (sdev && sdev->scsi_dh_data) scsi_dh = sdev->scsi_dh_data->scsi_dh; - if (!scsi_dh || !get_device(&sdev->sdev_gendev)) -@@ -500,7 +500,7 @@ int scsi_dh_handler_exist(const char *na + if (!scsi_dh || !get_device(&sdev->sdev_gendev) || +@@ -501,7 +501,7 @@ int scsi_dh_handler_exist(const char *na EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); /* @@ -39,7 +39,7 @@ Signed-off-by: Hannes Reinecke * @sdev - sdev the handler should be attached to * @name - name of the handler to attach */ -@@ -516,7 +516,7 @@ int scsi_dh_attach(struct request_queue +@@ -517,7 +517,7 @@ int scsi_dh_attach(struct request_queue return -EINVAL; spin_lock_irqsave(q->queue_lock, flags); @@ -48,7 +48,7 @@ Signed-off-by: Hannes Reinecke if (!sdev || !get_device(&sdev->sdev_gendev)) err = -ENODEV; spin_unlock_irqrestore(q->queue_lock, flags); -@@ -530,7 +530,7 @@ int scsi_dh_attach(struct request_queue +@@ -531,7 +531,7 @@ int scsi_dh_attach(struct request_queue EXPORT_SYMBOL_GPL(scsi_dh_attach); /* @@ -57,7 +57,7 @@ Signed-off-by: Hannes Reinecke * @sdev - sdev the handler should be detached from * * This function will detach the device handler only -@@ -544,7 +544,7 @@ void scsi_dh_detach(struct request_queue +@@ -545,7 +545,7 @@ void scsi_dh_detach(struct request_queue struct scsi_device_handler *scsi_dh = NULL; spin_lock_irqsave(q->queue_lock, flags); @@ -68,7 +68,7 @@ Signed-off-by: Hannes Reinecke spin_unlock_irqrestore(q->queue_lock, flags); --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c -@@ -1595,6 +1595,17 @@ static void scsi_request_fn(struct reque +@@ -1587,6 +1587,17 @@ static void scsi_request_fn(struct reque spin_lock_irq(q->queue_lock); } diff --git a/patches.fixes/scsi-dh-rdac-add-stk b/patches.fixes/scsi-dh-rdac-add-stk index 304545a..59b7685 100644 --- a/patches.fixes/scsi-dh-rdac-add-stk +++ b/patches.fixes/scsi-dh-rdac-add-stk @@ -14,7 +14,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c -@@ -758,6 +758,8 @@ static const struct scsi_dh_devlist rdac +@@ -759,6 +759,8 @@ static const struct scsi_dh_devlist rdac {"SGI", "TP9500", 0}, {"SGI", "IS", 0}, {"STK", "OPENstorage D280", 0}, diff --git a/patches.fixes/scsi-ibmvscsi-module_alias.patch b/patches.fixes/scsi-ibmvscsi-module_alias.patch index be04f21..741c391 100644 --- a/patches.fixes/scsi-ibmvscsi-module_alias.patch +++ b/patches.fixes/scsi-ibmvscsi-module_alias.patch @@ -9,7 +9,7 @@ Patch-mainline: not yet --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c -@@ -106,6 +106,9 @@ static struct scsi_transport_template *i +@@ -108,6 +108,9 @@ static struct scsi_transport_template *i static struct ibmvscsi_ops *ibmvscsi_ops; @@ -19,7 +19,7 @@ Patch-mainline: not yet MODULE_DESCRIPTION("IBM Virtual SCSI"); MODULE_AUTHOR("Dave Boutcher"); MODULE_LICENSE("GPL"); -@@ -1841,7 +1844,7 @@ static struct device_attribute *ibmvscsi +@@ -1802,7 +1805,7 @@ static struct device_attribute *ibmvscsi static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, @@ -28,7 +28,7 @@ Patch-mainline: not yet .queuecommand = ibmvscsi_queuecommand, .eh_abort_handler = ibmvscsi_eh_abort_handler, .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, -@@ -2026,7 +2029,7 @@ static struct vio_driver ibmvscsi_driver +@@ -2069,7 +2072,7 @@ static struct vio_driver ibmvscsi_driver .remove = ibmvscsi_remove, .get_desired_dma = ibmvscsi_get_desired_dma, .driver = { diff --git a/patches.fixes/scsi-ibmvscsi-show-config.patch b/patches.fixes/scsi-ibmvscsi-show-config.patch index 3e047da..ecf8574 100644 --- a/patches.fixes/scsi-ibmvscsi-show-config.patch +++ b/patches.fixes/scsi-ibmvscsi-show-config.patch @@ -10,12 +10,12 @@ Signed-off-by: lxie@us.ibm.com Signed-off-by: Olaf Hering --- - drivers/scsi/ibmvscsi/ibmvscsi.c | 16 +++++++++------- - 1 file changed, 9 insertions(+), 7 deletions(-) + drivers/scsi/ibmvscsi/ibmvscsi.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c -@@ -97,10 +97,12 @@ static int max_requests = IBMVSCSI_MAX_R +@@ -99,6 +99,8 @@ static int max_requests = IBMVSCSI_MAX_R static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; static int fast_fail = 1; static int client_reserve = 1; @@ -24,12 +24,7 @@ Signed-off-by: Olaf Hering static struct scsi_transport_template *ibmvscsi_transport_template; --#define IBMVSCSI_VERSION "1.5.8" -+#define IBMVSCSI_VERSION "1.5.9" - - static struct ibmvscsi_ops *ibmvscsi_ops; - -@@ -1706,7 +1708,7 @@ static ssize_t show_host_srp_version(str +@@ -1667,7 +1669,7 @@ static ssize_t show_host_srp_version(str struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; @@ -38,7 +33,7 @@ Signed-off-by: Olaf Hering hostdata->madapter_info.srp_version); return len; } -@@ -1727,7 +1729,7 @@ static ssize_t show_host_partition_name( +@@ -1688,7 +1690,7 @@ static ssize_t show_host_partition_name( struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; @@ -47,7 +42,7 @@ Signed-off-by: Olaf Hering hostdata->madapter_info.partition_name); return len; } -@@ -1748,7 +1750,7 @@ static ssize_t show_host_partition_numbe +@@ -1709,7 +1711,7 @@ static ssize_t show_host_partition_numbe struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; @@ -56,7 +51,7 @@ Signed-off-by: Olaf Hering hostdata->madapter_info.partition_number); return len; } -@@ -1768,7 +1770,7 @@ static ssize_t show_host_mad_version(str +@@ -1729,7 +1731,7 @@ static ssize_t show_host_mad_version(str struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; @@ -65,7 +60,7 @@ Signed-off-by: Olaf Hering hostdata->madapter_info.mad_version); return len; } -@@ -1788,7 +1790,7 @@ static ssize_t show_host_os_type(struct +@@ -1749,7 +1751,7 @@ static ssize_t show_host_os_type(struct struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; @@ -74,7 +69,7 @@ Signed-off-by: Olaf Hering return len; } -@@ -1807,7 +1809,7 @@ static ssize_t show_host_config(struct d +@@ -1768,7 +1770,7 @@ static ssize_t show_host_config(struct d struct ibmvscsi_host_data *hostdata = shost_priv(shost); /* returns null-terminated host config data */ diff --git a/patches.fixes/scsi-inquiry-too-short-ratelimit b/patches.fixes/scsi-inquiry-too-short-ratelimit index 17697d8..4ca497a 100644 --- a/patches.fixes/scsi-inquiry-too-short-ratelimit +++ b/patches.fixes/scsi-inquiry-too-short-ratelimit @@ -15,7 +15,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c -@@ -695,7 +695,7 @@ static int scsi_probe_lun(struct scsi_de +@@ -696,7 +696,7 @@ static int scsi_probe_lun(struct scsi_de * and displaying garbage for the Vendor, Product, or Revision * strings. */ diff --git a/patches.fixes/scsi-retry-alua-transition-in-progress b/patches.fixes/scsi-retry-alua-transition-in-progress index 4f44091..f2c7a2f 100644 --- a/patches.fixes/scsi-retry-alua-transition-in-progress +++ b/patches.fixes/scsi-retry-alua-transition-in-progress @@ -21,7 +21,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c -@@ -371,7 +371,8 @@ static int scsi_check_sense(struct scsi_ +@@ -376,7 +376,8 @@ static int scsi_check_sense(struct scsi_ * if the device is in the process of becoming ready, we * should retry. */ diff --git a/patches.fixes/scsi-scan-blist-update b/patches.fixes/scsi-scan-blist-update index ea83ac2..ef81c38 100644 --- a/patches.fixes/scsi-scan-blist-update +++ b/patches.fixes/scsi-scan-blist-update @@ -15,7 +15,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c -@@ -159,7 +159,7 @@ static struct { +@@ -160,7 +160,7 @@ static struct { {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, diff --git a/patches.fixes/sd_liberal_28_sense_invalid.diff b/patches.fixes/sd_liberal_28_sense_invalid.diff index 0a1e1f0..17dfba4 100644 --- a/patches.fixes/sd_liberal_28_sense_invalid.diff +++ b/patches.fixes/sd_liberal_28_sense_invalid.diff @@ -16,7 +16,7 @@ Signed-off-by: Brandon Philips --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c -@@ -1334,8 +1334,7 @@ sd_spinup_disk(struct scsi_disk *sdkp) +@@ -1390,8 +1390,7 @@ sd_spinup_disk(struct scsi_disk *sdkp) * Yes, this sense key/ASC combination shouldn't * occur here. It's characteristic of these devices. */ diff --git a/patches.fixes/seccomp-disable-tsc-option b/patches.fixes/seccomp-disable-tsc-option index 520729a..3e4e6e1 100644 --- a/patches.fixes/seccomp-disable-tsc-option +++ b/patches.fixes/seccomp-disable-tsc-option @@ -12,20 +12,19 @@ Addition of x86-64 by Jan Beulich. Signed-off-by: Andrea Arcangeli Acked-by: Jeff Mahoney --- - arch/x86/Kconfig | 12 ++++++++++++ + arch/x86/Kconfig | 11 +++++++++++ arch/x86/kernel/process.c | 2 ++ - 2 files changed, 14 insertions(+) + 2 files changed, 13 insertions(+) --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -1485,6 +1485,18 @@ config SECCOMP +@@ -1482,6 +1482,17 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. +config SECCOMP_DISABLE_TSC + bool "Disable the TSC for seccomp tasks" + depends on SECCOMP -+ default n + help + This feature mathematically prevents covert channels + for tasks running under SECCOMP. This can generate @@ -39,7 +38,7 @@ Acked-by: Jeff Mahoney ---help--- --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c -@@ -139,6 +139,7 @@ static void hard_disable_TSC(void) +@@ -135,6 +135,7 @@ static void hard_disable_TSC(void) void disable_TSC(void) { @@ -47,7 +46,7 @@ Acked-by: Jeff Mahoney preempt_disable(); if (!test_and_set_thread_flag(TIF_NOTSC)) /* -@@ -147,6 +148,7 @@ void disable_TSC(void) +@@ -143,6 +144,7 @@ void disable_TSC(void) */ hard_disable_TSC(); preempt_enable(); diff --git a/patches.fixes/setup_arg_pages-diagnose-excessive-argument-size b/patches.fixes/setup_arg_pages-diagnose-excessive-argument-size deleted file mode 100644 index 1bc1308..0000000 --- a/patches.fixes/setup_arg_pages-diagnose-excessive-argument-size +++ /dev/null @@ -1,46 +0,0 @@ -From: Roland McGrath -Date: Wed, 8 Sep 2010 02:35:49 +0000 (-0700) -Subject: setup_arg_pages: diagnose excessive argument size -Git-commit: 1b528181b2ffa14721fb28ad1bd539fe1732c583 -Patch-mainline: 2.6.36-rc4 -References: bnc#635425 -Introduced-by: 2.6.23 - -setup_arg_pages: diagnose excessive argument size - -The CONFIG_STACK_GROWSDOWN variant of setup_arg_pages() does not -check the size of the argument/environment area on the stack. -When it is unworkably large, shift_arg_pages() hits its BUG_ON. -This is exploitable with a very large RLIMIT_STACK limit, to -create a crash pretty easily. - -Check that the initial stack is not too large to make it possible -to map in any executable. We're not checking that the actual -executable (or intepreter, for binfmt_elf) will fit. So those -mappings might clobber part of the initial stack mapping. But -that is just userland lossage that userland made happen, not a -kernel problem. - -Signed-off-by: Roland McGrath -Reviewed-by: KOSAKI Motohiro -Signed-off-by: Linus Torvalds -Acked-by: Jeff Mahoney ---- - - fs/exec.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -593,6 +593,11 @@ int setup_arg_pages(struct linux_binprm - #else - stack_top = arch_align_stack(stack_top); - stack_top = PAGE_ALIGN(stack_top); -+ -+ if (unlikely(stack_top < mmap_min_addr) || -+ unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) -+ return -ENOMEM; -+ - stack_shift = vma->vm_end - stack_top; - - bprm->p -= stack_shift; diff --git a/patches.fixes/sunrpc-monotonic-expiry b/patches.fixes/sunrpc-monotonic-expiry deleted file mode 100644 index 6a19ca6..0000000 --- a/patches.fixes/sunrpc-monotonic-expiry +++ /dev/null @@ -1,250 +0,0 @@ -From: NeilBrown -Date: Wed, 17 Feb 2010 16:53:34 +1100 -Subject: [PATCH] sunrpc: use monotonic time in expiry cache -Patch-mainline: Submitted for 2.6.34 -References: bnc#578668 - -this protects us from confusion when the wallclock time changes. - -We convert to and from wallclock when setting or reading expiry -times. - -Signed-off-by: NeilBrown -Acked-by: NeilBrown - ---- - fs/nfs/dns_resolve.c | 6 +++--- - fs/nfsd/export.c | 9 +++------ - include/linux/sunrpc/cache.h | 23 ++++++++++++++++++++++- - net/sunrpc/cache.c | 35 +++++++++++++++++++---------------- - 4 files changed, 47 insertions(+), 26 deletions(-) - ---- a/fs/nfs/dns_resolve.c -+++ b/fs/nfs/dns_resolve.c -@@ -143,7 +143,7 @@ static int nfs_dns_show(struct seq_file - return 0; - } - item = container_of(h, struct nfs_dns_ent, h); -- ttl = (long)item->h.expiry_time - (long)get_seconds(); -+ ttl = (long)item->h.expiry_time - (long)monotonic_seconds(); - if (ttl < 0) - ttl = 0; - -@@ -215,7 +215,7 @@ static int nfs_dns_parse(struct cache_de - ttl = get_expiry(&buf); - if (ttl == 0) - goto out; -- key.h.expiry_time = ttl + get_seconds(); -+ key.h.expiry_time = ttl + monotonic_seconds(); - - ret = -ENOMEM; - item = nfs_dns_lookup(cd, &key); -@@ -277,7 +277,7 @@ static int do_cache_lookup_nowait(struct - goto out_err; - ret = -ETIMEDOUT; - if (!test_bit(CACHE_VALID, &(*item)->h.flags) -- || (*item)->h.expiry_time < get_seconds() -+ || (*item)->h.expiry_time < monotonic_seconds() - || cd->flush_time > (*item)->h.last_refresh) - goto out_put; - ret = -ENOENT; ---- a/fs/nfsd/export.c -+++ b/fs/nfsd/export.c -@@ -946,10 +946,9 @@ static void exp_fsid_unhash(struct svc_e - - ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid); - if (!IS_ERR(ek)) { -- ek->h.expiry_time = get_seconds()-1; -+ sunrpc_invalidate(&ek->h, &svc_expkey_cache); - cache_put(&ek->h, &svc_expkey_cache); - } -- svc_expkey_cache.nextcheck = get_seconds(); - } - - static int exp_fsid_hash(svc_client *clp, struct svc_export *exp) -@@ -984,10 +983,9 @@ static void exp_unhash(struct svc_export - - ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino); - if (!IS_ERR(ek)) { -- ek->h.expiry_time = get_seconds()-1; -+ sunrpc_invalidate(&ek->h, &svc_expkey_cache); - cache_put(&ek->h, &svc_expkey_cache); - } -- svc_expkey_cache.nextcheck = get_seconds(); - } - - /* -@@ -1108,8 +1106,7 @@ out: - static void - exp_do_unexport(svc_export *unexp) - { -- unexp->h.expiry_time = get_seconds()-1; -- svc_export_cache.nextcheck = get_seconds(); -+ sunrpc_invalidate(&unexp->h, &svc_export_cache); - exp_unhash(unexp); - exp_fsid_unhash(unexp); - } ---- a/include/linux/sunrpc/cache.h -+++ b/include/linux/sunrpc/cache.h -@@ -220,14 +220,35 @@ static inline int get_int(char **bpp, in - return 0; - } - -+/* -+ * timestamps kept in the cache are expressed in seconds -+ * since boot. This is the best for measuring differences in -+ * real time. -+ */ -+static inline unsigned long monotonic_seconds(void) -+{ -+ struct timespec boot; -+ getboottime(&boot); -+ return get_seconds() - boot.tv_sec; -+} -+ - static inline time_t get_expiry(char **bpp) - { - int rv; -+ struct timespec boot; -+ - if (get_int(bpp, &rv)) - return 0; - if (rv < 0) - return 0; -- return rv; -+ getboottime(&boot); -+ return rv - boot.tv_sec; - } - -+static inline void sunrpc_invalidate(struct cache_head *h, -+ struct cache_detail *detail) -+{ -+ h->expiry_time = monotonic_seconds() - 1; -+ detail->nextcheck = monotonic_seconds(); -+} - #endif /* _LINUX_SUNRPC_CACHE_H_ */ ---- a/net/sunrpc/cache.c -+++ b/net/sunrpc/cache.c -@@ -41,7 +41,7 @@ static void cache_revisit_request(struct - - static void cache_init(struct cache_head *h) - { -- time_t now = get_seconds(); -+ time_t now = monotonic_seconds(); - h->next = NULL; - h->flags = 0; - kref_init(&h->ref); -@@ -108,7 +108,7 @@ static void cache_dequeue(struct cache_d - static void cache_fresh_locked(struct cache_head *head, time_t expiry) - { - head->expiry_time = expiry; -- head->last_refresh = get_seconds(); -+ head->last_refresh = monotonic_seconds(); - set_bit(CACHE_VALID, &head->flags); - } - -@@ -184,7 +184,7 @@ static int cache_make_upcall(struct cach - static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) - { - if (!test_bit(CACHE_VALID, &h->flags) || -- h->expiry_time < get_seconds()) -+ h->expiry_time < monotonic_seconds()) - return -EAGAIN; - else if (detail->flush_time > h->last_refresh) - return -EAGAIN; -@@ -222,7 +222,7 @@ int cache_check(struct cache_detail *det - - /* now see if we want to start an upcall */ - refresh_age = (h->expiry_time - h->last_refresh); -- age = get_seconds() - h->last_refresh; -+ age = monotonic_seconds() - h->last_refresh; - - if (rqstp == NULL) { - if (rv == -EAGAIN) -@@ -237,7 +237,7 @@ int cache_check(struct cache_detail *det - cache_revisit_request(h); - if (rv == -EAGAIN) { - set_bit(CACHE_NEGATIVE, &h->flags); -- cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY); -+ cache_fresh_locked(h, monotonic_seconds()+CACHE_NEW_EXPIRY); - cache_fresh_unlocked(h, detail); - rv = -ENOENT; - } -@@ -372,11 +372,11 @@ static int cache_clean(void) - return -1; - } - current_detail = list_entry(next, struct cache_detail, others); -- if (current_detail->nextcheck > get_seconds()) -+ if (current_detail->nextcheck > monotonic_seconds()) - current_index = current_detail->hash_size; - else { - current_index = 0; -- current_detail->nextcheck = get_seconds()+30*60; -+ current_detail->nextcheck = monotonic_seconds()+30*60; - } - } - -@@ -401,7 +401,7 @@ static int cache_clean(void) - for (; ch; cp= & ch->next, ch= *cp) { - if (current_detail->nextcheck > ch->expiry_time) - current_detail->nextcheck = ch->expiry_time+1; -- if (ch->expiry_time >= get_seconds() && -+ if (ch->expiry_time >= monotonic_seconds() && - ch->last_refresh >= current_detail->flush_time) - continue; - if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) -@@ -465,7 +465,7 @@ EXPORT_SYMBOL_GPL(cache_flush); - void cache_purge(struct cache_detail *detail) - { - detail->flush_time = LONG_MAX; -- detail->nextcheck = get_seconds(); -+ detail->nextcheck = monotonic_seconds(); - cache_flush(); - detail->flush_time = 1; - } -@@ -1249,7 +1249,8 @@ static int c_show(struct seq_file *m, vo - - ifdebug(CACHE) - seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", -- cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags); -+ cp->expiry_time - monotonic_seconds() + get_seconds(), -+ atomic_read(&cp->ref.refcount), cp->flags); - cache_get(cp); - if (cache_check(cd, cp, NULL)) - /* cache_check does a cache_put on failure */ -@@ -1313,7 +1314,8 @@ static ssize_t read_flush(struct file *f - unsigned long p = *ppos; - size_t len; - -- sprintf(tbuf, "%lu\n", cd->flush_time); -+ sprintf(tbuf, "%lu\n", (cd->flush_time - monotonic_seconds() -+ + get_seconds())); - len = strlen(tbuf); - if (p >= len) - return 0; -@@ -1331,19 +1333,20 @@ static ssize_t write_flush(struct file * - struct cache_detail *cd) - { - char tbuf[20]; -- char *ep; -- long flushtime; -+ char *bp, *ep; -+ - if (*ppos || count > sizeof(tbuf)-1) - return -EINVAL; - if (copy_from_user(tbuf, buf, count)) - return -EFAULT; - tbuf[count] = 0; -- flushtime = simple_strtoul(tbuf, &ep, 0); -+ simple_strtoul(tbuf, &ep, 0); - if (*ep && *ep != '\n') - return -EINVAL; - -- cd->flush_time = flushtime; -- cd->nextcheck = get_seconds(); -+ bp = tbuf; -+ cd->flush_time = get_expiry(&bp); -+ cd->nextcheck = monotonic_seconds(); - cache_flush(); - - *ppos += count; diff --git a/patches.fixes/taskstats-alignment b/patches.fixes/taskstats-alignment deleted file mode 100644 index 8abb7a9..0000000 --- a/patches.fixes/taskstats-alignment +++ /dev/null @@ -1,62 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] delayacct: align to 8 byte boundary on 64-bit systems -References: bnc#578065 -Patch-mainline: Hopefully 2.6.34, submitting when the window opens - - prepare_reply sets up an skb for the response. If I understand it correctly, - the payload contains: - - +--------------------------------+ - | genlmsghdr - 4 bytes | - +--------------------------------+ - | NLA header - 4 bytes | /* Aggregate header */ - +-+------------------------------+ - | | NLA header - 4 bytes | /* PID header */ - | +------------------------------+ - | | pid/tgid - 4 bytes | - | +------------------------------+ - | | NLA header - 4 bytes | /* stats header */ - | + -----------------------------+ <- oops. aligned on 4 byte boundary - | | struct taskstats - 328 bytes | - +-+------------------------------+ - - The start of the taskstats struct must be 8 byte aligned on IA64 (and other - systems with 8 byte alignment rules for 64-bit types) or runtime alignment - warnings will be issued. - - This patch pads the pid/tgid field out to sizeof(long), which forces - the alignment of taskstats. The getdelays userspace code is ok with this - since it assumes 32-bit pid/tgid and then honors that header's length field. - - An array is used to avoid exposing kernel memory contents to userspace in the - response. - -Signed-off-by: Jeff Mahoney ---- - kernel/taskstats.c | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - ---- a/kernel/taskstats.c -+++ b/kernel/taskstats.c -@@ -359,6 +359,12 @@ static struct taskstats *mk_reply(struct - struct nlattr *na, *ret; - int aggr; - -+ /* If we don't pad, we end up with alignment on a 4 byte boundary. -+ * This causes lots of runtime warnings on systems requiring 8 byte -+ * alignment */ -+ u32 pids[2] = { pid, 0 }; -+ int pid_size = ALIGN(sizeof(pid), sizeof(long)); -+ - aggr = (type == TASKSTATS_TYPE_PID) - ? TASKSTATS_TYPE_AGGR_PID - : TASKSTATS_TYPE_AGGR_TGID; -@@ -366,7 +372,7 @@ static struct taskstats *mk_reply(struct - na = nla_nest_start(skb, aggr); - if (!na) - goto err; -- if (nla_put(skb, type, sizeof(pid), &pid) < 0) -+ if (nla_put(skb, type, pid_size, pids) < 0) - goto err; - ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); - if (!ret) diff --git a/patches.fixes/tehuti-firmware-name b/patches.fixes/tehuti-firmware-name deleted file mode 100644 index d601bd3..0000000 --- a/patches.fixes/tehuti-firmware-name +++ /dev/null @@ -1,23 +0,0 @@ -From: Hannes Reinecke -Subject: Tehuti network driver references wrong firmware -References: bnc#562092 -Patch-mainline: not yet - -The tehuti network driver references the firmware -'tehuti/firmware.bin', which is actually named -'tehuti/bdx.bin'. - -Signed-off-by: Hannes Reinecke - ---- - drivers/net/tehuti.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/tehuti.c -+++ b/drivers/net/tehuti.c -@@ -2518,4 +2518,4 @@ module_exit(bdx_module_exit); - MODULE_LICENSE("GPL"); - MODULE_AUTHOR(DRIVER_AUTHOR); - MODULE_DESCRIPTION(BDX_DRV_DESC); --MODULE_FIRMWARE("tehuti/firmware.bin"); -+MODULE_FIRMWARE("tehuti/bdx.bin"); diff --git a/patches.fixes/tg3-fix-default-wol.patch b/patches.fixes/tg3-fix-default-wol.patch index 38b5b3d..b706c0c 100644 --- a/patches.fixes/tg3-fix-default-wol.patch +++ b/patches.fixes/tg3-fix-default-wol.patch @@ -17,7 +17,7 @@ Signed-off-by: Rafael J. Wysocki --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c -@@ -12194,8 +12194,10 @@ static void __devinit tg3_get_eeprom_hw_ +@@ -12048,8 +12048,10 @@ static void __devinit tg3_get_eeprom_hw_ if (val & VCPU_CFGSHDW_ASPM_DBNC) tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; if ((val & VCPU_CFGSHDW_WOL_ENABLE) && @@ -29,7 +29,7 @@ Signed-off-by: Rafael J. Wysocki goto done; } -@@ -12329,8 +12331,10 @@ static void __devinit tg3_get_eeprom_hw_ +@@ -12182,8 +12184,10 @@ static void __devinit tg3_get_eeprom_hw_ tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && @@ -40,4 +40,4 @@ Signed-off-by: Rafael J. Wysocki + } if (cfg2 & (1 << 17)) - tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; diff --git a/patches.fixes/tiocgdev b/patches.fixes/tiocgdev deleted file mode 100644 index 4ac0d7e..0000000 --- a/patches.fixes/tiocgdev +++ /dev/null @@ -1,154 +0,0 @@ -Subject: tiocgdev ioctl -Patch-mainline: never, lkml guys don't like it -From: kraxel@suse.de - -add tty ioctl to figure physical device of the console. - - arch/alpha/include/asm/ioctls.h | 1 + - arch/arm/include/asm/ioctls.h | 1 + - arch/ia64/include/asm/ioctls.h | 1 + - arch/m68k/include/asm/ioctls.h | 1 + - arch/mips/include/asm/ioctls.h | 1 + - arch/powerpc/include/asm/ioctls.h | 1 + - arch/s390/include/asm/ioctls.h | 1 + - arch/sh/include/asm/ioctls.h | 1 + - arch/sparc/include/asm/ioctls.h | 1 + - drivers/char/tty_io.c | 15 +++++++++++++++ - fs/compat_ioctl.c | 1 + - include/asm-generic/ioctls.h | 1 + - 12 files changed, 26 insertions(+) - ---- a/arch/alpha/include/asm/ioctls.h -+++ b/arch/alpha/include/asm/ioctls.h -@@ -91,6 +91,7 @@ - #define TIOCGSID 0x5429 /* Return the session ID of FD */ - #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - #define TIOCSERCONFIG 0x5453 - #define TIOCSERGWILD 0x5454 ---- a/arch/arm/include/asm/ioctls.h -+++ b/arch/arm/include/asm/ioctls.h -@@ -52,6 +52,7 @@ - #define TCSETSF2 _IOW('T',0x2D, struct termios2) - #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ - #define FIOCLEX 0x5451 ---- a/arch/ia64/include/asm/ioctls.h -+++ b/arch/ia64/include/asm/ioctls.h -@@ -59,6 +59,7 @@ - #define TCSETSF2 _IOW('T',0x2D, struct termios2) - #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ - #define FIOCLEX 0x5451 ---- a/arch/m68k/include/asm/ioctls.h -+++ b/arch/m68k/include/asm/ioctls.h -@@ -52,6 +52,7 @@ - #define TCSETSF2 _IOW('T',0x2D, struct termios2) - #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ - #define FIOCLEX 0x5451 ---- a/arch/mips/include/asm/ioctls.h -+++ b/arch/mips/include/asm/ioctls.h -@@ -83,6 +83,7 @@ - #define TCSETSF2 _IOW('T', 0x2D, struct termios2) - #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get real dev no below /dev/console */ - - /* I hope the range from 0x5480 on is free ... */ - #define TIOCSCTTY 0x5480 /* become controlling tty */ ---- a/arch/powerpc/include/asm/ioctls.h -+++ b/arch/powerpc/include/asm/ioctls.h -@@ -93,6 +93,7 @@ - #define TIOCSRS485 0x542f - #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - #define TIOCSERCONFIG 0x5453 - #define TIOCSERGWILD 0x5454 ---- a/arch/s390/include/asm/ioctls.h -+++ b/arch/s390/include/asm/ioctls.h -@@ -60,6 +60,7 @@ - #define TCSETSF2 _IOW('T',0x2D, struct termios2) - #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ - #define FIOCLEX 0x5451 ---- a/arch/sh/include/asm/ioctls.h -+++ b/arch/sh/include/asm/ioctls.h -@@ -84,6 +84,7 @@ - #define TCSETSF2 _IOW('T', 45, struct termios2) - #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - #define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */ - #define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */ ---- a/arch/sparc/include/asm/ioctls.h -+++ b/arch/sparc/include/asm/ioctls.h -@@ -19,6 +19,7 @@ - #define TCSETS2 _IOW('T', 13, struct termios2) - #define TCSETSW2 _IOW('T', 14, struct termios2) - #define TCSETSF2 _IOW('T', 15, struct termios2) -+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ - - /* Note that all the ioctls that are not available in Linux have a - * double underscore on the front to: a) avoid some programs to ---- a/drivers/char/tty_io.c -+++ b/drivers/char/tty_io.c -@@ -2546,6 +2546,21 @@ long tty_ioctl(struct file *file, unsign - case TIOCSETD: - return tiocsetd(tty, p); - /* -+ * Without the real device to which /dev/console is connected, -+ * blogd can not work. -+ * blogd spawns a pty/tty pair, -+ * set /dev/console to the tty of that pair (ioctl TIOCCONS), -+ * then reads in all input from the current /dev/console, -+ * buffer or write the readed data to /var/log/boot.msg -+ * _and_ to the original real device. -+ */ -+ case TIOCGDEV: -+ { -+ unsigned int ret = new_encode_dev(tty_devnum(real_tty)); -+ return put_user(ret, (unsigned int __user *)p); -+ } -+ -+ /* - * Break handling - */ - case TIOCSBRK: /* Turn break on, unconditionally */ ---- a/fs/compat_ioctl.c -+++ b/fs/compat_ioctl.c -@@ -941,6 +941,7 @@ COMPATIBLE_IOCTL(TCSETSW) - COMPATIBLE_IOCTL(TCSETSF) - COMPATIBLE_IOCTL(TIOCLINUX) - COMPATIBLE_IOCTL(TIOCSBRK) -+COMPATIBLE_IOCTL(TIOCGDEV) - COMPATIBLE_IOCTL(TIOCCBRK) - COMPATIBLE_IOCTL(TIOCGSID) - COMPATIBLE_IOCTL(TIOCGICOUNT) ---- a/include/asm-generic/ioctls.h -+++ b/include/asm-generic/ioctls.h -@@ -65,6 +65,7 @@ - #define TIOCSRS485 0x542F - #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ - #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ -+#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get real dev no below /dev/console */ - #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ - #define TCSETX 0x5433 - #define TCSETXF 0x5434 diff --git a/patches.fixes/tulip-quad-NIC-ifdown b/patches.fixes/tulip-quad-NIC-ifdown index 951031c..a310bee 100644 --- a/patches.fixes/tulip-quad-NIC-ifdown +++ b/patches.fixes/tulip-quad-NIC-ifdown @@ -14,7 +14,7 @@ Acked-by: Olaf Kirch --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c -@@ -1827,6 +1827,10 @@ static void __devexit tulip_remove_one ( +@@ -1943,6 +1943,10 @@ static void __devexit tulip_remove_one ( return; tp = netdev_priv(dev); diff --git a/patches.fixes/twl6030-fix-note_interrupt-call b/patches.fixes/twl6030-fix-note_interrupt-call deleted file mode 100644 index 7f3e071..0000000 --- a/patches.fixes/twl6030-fix-note_interrupt-call +++ /dev/null @@ -1,22 +0,0 @@ -From: Jeff Mahoney -Subject: twl6030: Fix note_interrupt call -Patch-mainline: not yet - - note_interrupt takes 4 arguments, not 3. - -Signed-off-by: Jeff Mahoney ---- - drivers/mfd/twl6030-irq.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mfd/twl6030-irq.c -+++ b/drivers/mfd/twl6030-irq.c -@@ -143,7 +143,7 @@ static int twl6030_irq_thread(void *data - */ - if (d->status & IRQ_DISABLED) - note_interrupt(module_irq, d, -- IRQ_NONE); -+ IRQ_NONE, false); - else - d->handle_irq(module_irq, d); - diff --git a/patches.fixes/ubifs-restrict-world-writable-debugfs-files b/patches.fixes/ubifs-restrict-world-writable-debugfs-files new file mode 100644 index 0000000..e9c5ba1 --- /dev/null +++ b/patches.fixes/ubifs-restrict-world-writable-debugfs-files @@ -0,0 +1,45 @@ +From: Vasiliy Kulikov +Date: Fri, 4 Feb 2011 15:24:19 +0300 +Subject: UBIFS: restrict world-writable debugfs files +Patch-mainline: v2.6.39-rc1 +Git-commit: 8c559d30b4e59cf6994215ada1fe744928f494bf +Introduced-by: v2.6.29-rc1 +References: bnc#673934 + +Don't allow everybody to dump sensitive information about filesystems. + +Signed-off-by: Vasiliy Kulikov +Signed-off-by: Artem Bityutskiy +Acked-by: Jeff Mahoney +--- + fs/ubifs/debug.c | 6 +++--- + 1 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c +index 0bee4db..bcb1acb 100644 +--- a/fs/ubifs/debug.c ++++ b/fs/ubifs/debug.c +@@ -2813,19 +2813,19 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) + } + + fname = "dump_lprops"; +- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); ++ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dfs_dump_lprops = dent; + + fname = "dump_budg"; +- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); ++ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dfs_dump_budg = dent; + + fname = "dump_tnc"; +- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); ++ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dfs_dump_tnc = dent; + diff --git a/patches.fixes/use-rcu-lock-in-setpgid.patch b/patches.fixes/use-rcu-lock-in-setpgid.patch deleted file mode 100644 index 8883e8c..0000000 --- a/patches.fixes/use-rcu-lock-in-setpgid.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 950eaaca681c44aab87a46225c9e44f902c080aa Mon Sep 17 00:00:00 2001 -From: Paul E. McKenney -Date: Tue, 31 Aug 2010 17:00:18 -0700 -Subject: pid: make setpgid() system call use RCU read-side critical section -Git-commit: 950eaaca681c44aab87a46225c9e44f902c080aa -Patch-mainline: yes -References: bnc#639728 - -[ 23.584719] -[ 23.584720] =================================================== -[ 23.585059] [ INFO: suspicious rcu_dereference_check() usage. ] -[ 23.585176] --------------------------------------------------- -[ 23.585176] kernel/pid.c:419 invoked rcu_dereference_check() without protection! -[ 23.585176] -[ 23.585176] other info that might help us debug this: -[ 23.585176] -[ 23.585176] -[ 23.585176] rcu_scheduler_active = 1, debug_locks = 1 -[ 23.585176] 1 lock held by rc.sysinit/728: -[ 23.585176] #0: (tasklist_lock){.+.+..}, at: [] sys_setpgid+0x5f/0x193 -[ 23.585176] -[ 23.585176] stack backtrace: -[ 23.585176] Pid: 728, comm: rc.sysinit Not tainted 2.6.36-rc2 #2 -[ 23.585176] Call Trace: -[ 23.585176] [] lockdep_rcu_dereference+0x99/0xa2 -[ 23.585176] [] find_task_by_pid_ns+0x50/0x6a -[ 23.585176] [] find_task_by_vpid+0x1d/0x1f -[ 23.585176] [] sys_setpgid+0x67/0x193 -[ 23.585176] [] system_call_fastpath+0x16/0x1b -[ 24.959669] type=1400 audit(1282938522.956:4): avc: denied { module_request } for pid=766 comm="hwclock" kmod="char-major-10-135" scontext=system_u:system_r:hwclock_t:s0 tcontext=system_u:system_r:kernel_t:s0 tclas - -It turns out that the setpgid() system call fails to enter an RCU -read-side critical section before doing a PID-to-task_struct translation. -This commit therefore does rcu_read_lock() before the translation, and -also does rcu_read_unlock() after the last use of the returned pointer. - -Reported-by: Andrew Morton -Signed-off-by: Paul E. McKenney -Acked-by: David Howells -Signed-off-by: Jiri Slaby ---- - kernel/sys.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -962,6 +962,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid - pgid = pid; - if (pgid < 0) - return -EINVAL; -+ rcu_read_lock(); - - /* From this point forward we keep holding onto the tasklist lock - * so that our parent does not change from under us. -DaveM -@@ -1015,6 +1016,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid - out: - /* All paths lead to here, thus we are safe. -DaveM */ - write_unlock_irq(&tasklist_lock); -+ rcu_read_unlock(); - return err; - } - diff --git a/patches.fixes/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing b/patches.fixes/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing deleted file mode 100644 index 6dd270f..0000000 --- a/patches.fixes/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing +++ /dev/null @@ -1,53 +0,0 @@ -From: Roland McGrath -Date: Tue, 14 Sep 2010 19:22:58 +0000 (-0700) -Subject: x86-64, compat: Retruncate rax after ia32 syscall entry tracing -Git-commit: eefdca043e8391dcd719711716492063030b55ac -References: CVE-2010-3301 bnc#639708 -Patch-mainline: 2.6.36 -Introduced-by: 2.6.27 - -x86-64, compat: Retruncate rax after ia32 syscall entry tracing - -In commit d4d6715, we reopened an old hole for a 64-bit ptracer touching a -32-bit tracee in system call entry. A %rax value set via ptrace at the -entry tracing stop gets used whole as a 32-bit syscall number, while we -only check the low 32 bits for validity. - -Fix it by truncating %rax back to 32 bits after syscall_trace_enter, -in addition to testing the full 64 bits as has already been added. - -Reported-by: Ben Hawkes -Signed-off-by: Roland McGrath -Signed-off-by: H. Peter Anvin -Acked-by: Jeff Mahoney ---- - - arch/x86/ia32/ia32entry.S | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S -index 84e3a4e..518bb99 100644 ---- a/arch/x86/ia32/ia32entry.S -+++ b/arch/x86/ia32/ia32entry.S -@@ -50,7 +50,12 @@ - /* - * Reload arg registers from stack in case ptrace changed them. - * We don't reload %eax because syscall_trace_enter() returned -- * the value it wants us to use in the table lookup. -+ * the %rax value we should see. Instead, we just truncate that -+ * value to 32 bits again as we did on entry from user mode. -+ * If it's a new value set by user_regset during entry tracing, -+ * this matches the normal truncation of the user-mode value. -+ * If it's -1 to make us punt the syscall, then (u32)-1 is still -+ * an appropriately invalid value. - */ - .macro LOAD_ARGS32 offset, _r9=0 - .if \_r9 -@@ -60,6 +65,7 @@ - movl \offset+48(%rsp),%edx - movl \offset+56(%rsp),%esi - movl \offset+64(%rsp),%edi -+ movl %eax,%eax /* zero extension */ - .endm - - .macro CFI_STARTPROC32 simple diff --git a/patches.fixes/x86-64-compat-test-rax-for-the-syscall-number-not-eax b/patches.fixes/x86-64-compat-test-rax-for-the-syscall-number-not-eax deleted file mode 100644 index b89a16e..0000000 --- a/patches.fixes/x86-64-compat-test-rax-for-the-syscall-number-not-eax +++ /dev/null @@ -1,99 +0,0 @@ -From: H. Peter Anvin -Date: Tue, 14 Sep 2010 19:42:41 +0000 (-0700) -Subject: x86-64, compat: Test %rax for the syscall number, not %eax -Git-commit: 36d001c70d8a0144ac1d038f6876c484849a74de -References: CVE-2010-3301 bnc#639708 -Patch-mainline: 2.6.36 -Introduced-by: 2.6.27 - -x86-64, compat: Test %rax for the syscall number, not %eax - -On 64 bits, we always, by necessity, jump through the system call -table via %rax. For 32-bit system calls, in theory the system call -number is stored in %eax, and the code was testing %eax for a valid -system call number. At one point we loaded the stored value back from -the stack to enforce zero-extension, but that was removed in checkin -d4d67150165df8bf1cc05e532f6efca96f907cab. An actual 32-bit process -will not be able to introduce a non-zero-extended number, but it can -happen via ptrace. - -Instead of re-introducing the zero-extension, test what we are -actually going to use, i.e. %rax. This only adds a handful of REX -prefixes to the code. - -Reported-by: Ben Hawkes -Signed-off-by: H. Peter Anvin -Cc: -Cc: Roland McGrath -Cc: Andrew Morton -Acked-by: Jeff Mahoney ---- - - arch/x86/ia32/ia32entry.S | 14 +++++++------- - 1 file changed, 7 insertions(+), 7 deletions(-) - ---- a/arch/x86/ia32/ia32entry.S -+++ b/arch/x86/ia32/ia32entry.S -@@ -159,7 +159,7 @@ ENTRY(ia32_sysenter_target) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) - CFI_REMEMBER_STATE - jnz sysenter_tracesys -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys - sysenter_do_call: - IA32_ARG_FIXUP -@@ -201,7 +201,7 @@ sysexit_from_sys_call: - movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ - call audit_syscall_entry - movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys - movl %ebx,%edi /* reload 1st syscall arg */ - movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ -@@ -254,7 +254,7 @@ sysenter_tracesys: - call syscall_trace_enter - LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ - RESTORE_REST -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ - jmp sysenter_do_call - CFI_ENDPROC -@@ -320,7 +320,7 @@ ENTRY(ia32_cstar_target) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) - CFI_REMEMBER_STATE - jnz cstar_tracesys -- cmpl $IA32_NR_syscalls-1,%eax -+ cmpq $IA32_NR_syscalls-1,%rax - ja ia32_badsys - cstar_do_call: - IA32_ARG_FIXUP 1 -@@ -373,7 +373,7 @@ cstar_tracesys: - LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ - RESTORE_REST - xchgl %ebp,%r9d -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ - jmp cstar_do_call - END(ia32_cstar_target) -@@ -431,7 +431,7 @@ ENTRY(ia32_syscall) - orl $TS_COMPAT,TI_status(%r10) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) - jnz ia32_tracesys -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys - ia32_do_call: - IA32_ARG_FIXUP -@@ -450,7 +450,7 @@ ia32_tracesys: - call syscall_trace_enter - LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ - RESTORE_REST -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ - jmp ia32_do_call - END(ia32_syscall) diff --git a/patches.fixes/xfs-dmapi-fixes b/patches.fixes/xfs-dmapi-fixes deleted file mode 100644 index f225757..0000000 --- a/patches.fixes/xfs-dmapi-fixes +++ /dev/null @@ -1,38 +0,0 @@ -From: Bill O'Donnel -Subject: xfs/dmapi: fix crash on mount -References: bnc#458027 -Patch-mainline: not yet, depends on dmapi - - This patch resolves a crash on mount problem with dmapi, relating to an - errant search and replace. - -Acked-by: Jeff Mahoney ---- - - fs/dmapi/dmapi_private.h | 4 ++-- - fs/xfs/xfs_dmops.c | 1 - - 2 files changed, 2 insertions(+), 3 deletions(-) - ---- a/fs/dmapi/dmapi_private.h -+++ b/fs/dmapi/dmapi_private.h -@@ -37,8 +37,8 @@ - #include "sv.h" - - #ifdef CONFIG_PROC_FS --#define DMAPI_PROCFS "orig/fs/dmapi_v2" /* DMAPI device in /proc. */ --#define DMAPI_DBG_PROCFS "orig/fs/dmapi_d" /* DMAPI debugging dir */ -+#define DMAPI_PROCFS "fs/dmapi_v2" /* DMAPI device in /proc. */ -+#define DMAPI_DBG_PROCFS "fs/dmapi_d" /* DMAPI debugging dir */ - #endif - - extern struct kmem_cache *dm_fsreg_cachep; ---- a/fs/xfs/xfs_dmops.c -+++ b/fs/xfs/xfs_dmops.c -@@ -57,7 +57,6 @@ xfs_dmops_get(struct xfs_mount *mp) - mp->m_dm_ops = &xfs_dmcore_stub; - } - -- mp->m_dm_ops = &xfs_dmcore_stub; - return 0; - } - diff --git a/patches.fixes/xfs-export-debug b/patches.fixes/xfs-export-debug deleted file mode 100644 index 239562c..0000000 --- a/patches.fixes/xfs-export-debug +++ /dev/null @@ -1,23 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] xfs: export assertion handler -Patch-mainline: not yet, depends on dmapi - - xfs dmapi support uses the xfs assertion infrastructure if debugging is - enabled. This patch exports the assfail function. - -Signed-off-by: Jeff Mahoney - ---- - fs/xfs/support/debug.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/fs/xfs/support/debug.c -+++ b/fs/xfs/support/debug.c -@@ -108,6 +108,7 @@ assfail(char *expr, char *file, int line - printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line); - BUG(); - } -+EXPORT_SYMBOL_GPL(assfail); - - void - xfs_hex_dump(void *p, int length) diff --git a/patches.kernel.org/patch-2.6.38.1 b/patches.kernel.org/patch-2.6.38.1 new file mode 100644 index 0000000..8f8bae3 --- /dev/null +++ b/patches.kernel.org/patch-2.6.38.1 @@ -0,0 +1,2788 @@ +From: Jiri Slaby +Subject: Linux 2.6.38.1 +Patch-mainline: Linux 2.6.38.1 +References: bnc#558740 + +Signed-off-by: Jiri Slaby +--- +diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices +index 87da405..9edb75d 100644 +--- a/Documentation/i2c/instantiating-devices ++++ b/Documentation/i2c/instantiating-devices +@@ -100,7 +100,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) + (...) + i2c_adap = i2c_get_adapter(2); + memset(&i2c_info, 0, sizeof(struct i2c_board_info)); +- strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); ++ strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); + isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, + normal_i2c, NULL); + i2c_put_adapter(i2c_adap); +diff --git a/Makefile b/Makefile +index d6592b6..167ef45 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 38 +-EXTRAVERSION = ++EXTRAVERSION = .1 + NAME = Flesh-Eating Bats with Fangs + + # *DOCUMENTATION* +diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c +index 0ca90b8..556bbd4 100644 +--- a/arch/arm/mach-davinci/board-dm644x-evm.c ++++ b/arch/arm/mach-davinci/board-dm644x-evm.c +@@ -440,11 +440,6 @@ evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c) + gpio_request(gpio + 7, "nCF_SEL"); + gpio_direction_output(gpio + 7, 1); + +- /* irlml6401 switches over 1A, in under 8 msec; +- * now it can be managed by nDRV_VBUS ... +- */ +- davinci_setup_usb(1000, 8); +- + return 0; + } + +@@ -705,6 +700,9 @@ static __init void davinci_evm_init(void) + davinci_serial_init(&uart_config); + dm644x_init_asp(&dm644x_evm_snd_data); + ++ /* irlml6401 switches over 1A, in under 8 msec */ ++ davinci_setup_usb(1000, 8); ++ + soc_info->emac_pdata->phy_id = DM644X_EVM_PHY_ID; + /* Register the fixup for PHY on DaVinci */ + phy_register_fixup_for_uid(LXT971_PHY_ID, LXT971_PHY_MASK, +diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h +index d840f4a..5bb95a1 100644 +--- a/arch/microblaze/include/asm/uaccess.h ++++ b/arch/microblaze/include/asm/uaccess.h +@@ -120,16 +120,16 @@ static inline unsigned long __must_check __clear_user(void __user *to, + { + /* normal memset with two words to __ex_table */ + __asm__ __volatile__ ( \ +- "1: sb r0, %2, r0;" \ ++ "1: sb r0, %1, r0;" \ + " addik %0, %0, -1;" \ + " bneid %0, 1b;" \ +- " addik %2, %2, 1;" \ ++ " addik %1, %1, 1;" \ + "2: " \ + __EX_TABLE_SECTION \ + ".word 1b,2b;" \ + ".previous;" \ +- : "=r"(n) \ +- : "0"(n), "r"(to) ++ : "=r"(n), "=r"(to) \ ++ : "0"(n), "1"(to) + ); + return n; + } +diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c +index d7d94b8..3948f1d 100644 +--- a/arch/parisc/kernel/irq.c ++++ b/arch/parisc/kernel/irq.c +@@ -108,7 +108,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) + int cpu_dest; + + /* timer and ipi have to always be received on all CPUs */ +- if (CHECK_IRQ_PER_CPU(irq)) { ++ if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { + /* Bad linux design decision. The mask has already + * been set; we must reset it */ + cpumask_setall(irq_desc[irq].affinity); +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index 125fc1a..7626fa7 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -880,6 +880,7 @@ + #define PV_970 0x0039 + #define PV_POWER5 0x003A + #define PV_POWER5p 0x003B ++#define PV_POWER7 0x003F + #define PV_970FX 0x003C + #define PV_630 0x0040 + #define PV_630p 0x0041 +diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c +index ab6f6be..97e0ae4 100644 +--- a/arch/powerpc/kernel/perf_event.c ++++ b/arch/powerpc/kernel/perf_event.c +@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) + return ip; + } + ++static bool pmc_overflow(unsigned long val) ++{ ++ if ((int)val < 0) ++ return true; ++ ++ /* ++ * Events on POWER7 can roll back if a speculative event doesn't ++ * eventually complete. Unfortunately in some rare cases they will ++ * raise a performance monitor exception. We need to catch this to ++ * ensure we reset the PMC. In all cases the PMC will be 256 or less ++ * cycles from overflow. ++ * ++ * We only do this if the first pass fails to find any overflowing ++ * PMCs because a user might set a period of less than 256 and we ++ * don't want to mistakenly reset them. ++ */ ++ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) ++ return true; ++ ++ return false; ++} ++ + /* + * Performance monitor interrupt stuff + */ +@@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) + if (is_limited_pmc(i + 1)) + continue; + val = read_pmc(i + 1); +- if ((int)val < 0) ++ if (pmc_overflow(val)) + write_pmc(i + 1, 0); + } + } +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h +index 94b979d..effff47 100644 +--- a/arch/x86/include/asm/pgtable-3level.h ++++ b/arch/x86/include/asm/pgtable-3level.h +@@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd) + + static inline void pud_clear(pud_t *pudp) + { +- unsigned long pgd; +- + set_pud(pudp, __pud(0)); + + /* +@@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp) + * section 8.1: in PAE mode we explicitly have to flush the + * TLB via cr3 if the top-level pgd is changed... + * +- * Make sure the pud entry we're updating is within the +- * current pgd to avoid unnecessary TLB flushes. ++ * Currently all places where pud_clear() is called either have ++ * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or ++ * pud_clear_bad()), so we don't need TLB flush here. + */ +- pgd = read_cr3(); +- if (__pa(pudp) >= pgd && __pa(pudp) < +- (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) +- write_cr3(pgd); + } + + #ifdef CONFIG_SMP +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index 7038b95..4db3554 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -620,7 +620,12 @@ static int __kprobes stop_machine_text_poke(void *data) + flush_icache_range((unsigned long)p->addr, + (unsigned long)p->addr + p->len); + } +- ++ /* ++ * Intel Archiecture Software Developer's Manual section 7.1.3 specifies ++ * that a core serializing instruction such as "cpuid" should be ++ * executed on _each_ core before the new instruction is made visible. ++ */ ++ sync_core(); + return 0; + } + +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c +index 294f26d..0b5e2b5 100644 +--- a/arch/x86/kernel/e820.c ++++ b/arch/x86/kernel/e820.c +@@ -847,15 +847,21 @@ static int __init parse_memopt(char *p) + if (!p) + return -EINVAL; + +-#ifdef CONFIG_X86_32 + if (!strcmp(p, "nopentium")) { ++#ifdef CONFIG_X86_32 + setup_clear_cpu_cap(X86_FEATURE_PSE); + return 0; +- } ++#else ++ printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); ++ return -EINVAL; + #endif ++ } + + userdef = 1; + mem_size = memparse(p, &p); ++ /* don't remove all of memory when handling "mem={invalid}" param */ ++ if (mem_size == 0) ++ return -EINVAL; + e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); + + return 0; +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c +index 9efbdcc..3755ef4 100644 +--- a/arch/x86/kernel/early-quirks.c ++++ b/arch/x86/kernel/early-quirks.c +@@ -159,7 +159,12 @@ static void __init ati_bugs_contd(int num, int slot, int func) + if (rev >= 0x40) + acpi_fix_pin2_polarity = 1; + +- if (rev > 0x13) ++ /* ++ * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... ++ * SB700: revisions 0x39, 0x3a, ... ++ * SB800: revisions 0x40, 0x41, ... ++ */ ++ if (rev >= 0x39) + return; + + if (acpi_use_timer_override) +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index aed1ffb..bbd5c80 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -1248,7 +1248,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) + decl PER_CPU_VAR(irq_count) + jmp error_exit + CFI_ENDPROC +-END(do_hypervisor_callback) ++END(xen_do_hypervisor_callback) + + /* + * Hypervisor uses this for application faults while it executes. +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 0113d19..8573b83 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -168,8 +168,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) + * section 8.1: in PAE mode we explicitly have to flush the + * TLB via cr3 if the top-level pgd is changed... + */ +- if (mm == current->active_mm) +- write_cr3(read_cr3()); ++ flush_tlb_mm(mm); + } + #else /* !CONFIG_X86_PAE */ + +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index b8d96ce..34e08f6 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -260,6 +260,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ + { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ ++ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ + + /* JMicron 360/1/3/5/6, match class to avoid IDE function */ +@@ -383,6 +384,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { + .class = PCI_CLASS_STORAGE_SATA_AHCI, + .class_mask = 0xffffff, + .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ ++ { PCI_DEVICE(0x1b4b, 0x9125), ++ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ + + /* Promise */ + { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c +index 17a6378..e16850e 100644 +--- a/drivers/ata/libata-eh.c ++++ b/drivers/ata/libata-eh.c +@@ -1618,7 +1618,7 @@ static void ata_eh_analyze_serror(struct ata_link *link) + * host links. For disabled PMP links, only N bit is + * considered as X bit is left at 1 for link plugging. + */ +- if (link->lpm_policy != ATA_LPM_MAX_POWER) ++ if (link->lpm_policy > ATA_LPM_MAX_POWER) + hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ + else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) + hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; +diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c +index 85da4c4..2eee8e0 100644 +--- a/drivers/gpu/drm/drm_sysfs.c ++++ b/drivers/gpu/drm/drm_sysfs.c +@@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device, + { + struct drm_connector *connector = to_drm_connector(device); + enum drm_connector_status status; ++ int ret; ++ ++ ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex); ++ if (ret) ++ return ret; + + status = connector->funcs->detect(connector, true); ++ mutex_unlock(&connector->dev->mode_config.mutex); ++ + return snprintf(buf, PAGE_SIZE, "%s\n", + drm_get_connector_status_name(status)); + } +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 8a9e08b..2347bc1 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -1377,7 +1377,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) + else + i915_enable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE); ++ ++ /* maintain vblank delivery even in deep C-states */ ++ if (dev_priv->info->gen == 3) ++ I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); ++ + return 0; + } + +@@ -1390,6 +1395,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ++ if (dev_priv->info->gen == 3) ++ I915_WRITE(INSTPM, ++ INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); ++ + if (HAS_PCH_SPLIT(dev)) + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 2abe240..12c547a 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -405,9 +405,12 @@ + #define I915_ERROR_INSTRUCTION (1<<0) + #define INSTPM 0x020c0 + #define INSTPM_SELF_EN (1<<12) /* 915GM only */ ++#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts ++ will not assert AGPBUSY# and will only ++ be delivered when out of C3. */ + #define ACTHD 0x020c8 + #define FW_BLC 0x020d8 +-#define FW_BLC2 0x020dc ++#define FW_BLC2 0x020dc + #define FW_BLC_SELF 0x020e0 /* 915+ only */ + #define FW_BLC_SELF_EN_MASK (1<<31) + #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index a4e5e53..4a5a73b 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -61,8 +61,8 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, + args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); + args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); + } else if (a2 > a1) { +- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); +- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); ++ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); ++ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); + } + break; + case RMX_FULL: +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index 2560f01..ae445b1 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -68,9 +68,15 @@ config HID_A4TECH + ---help--- + Support for A4 tech X5 and WOP-35 / Trust 450L mice. + +-config HID_ACRUX_FF +- tristate "ACRUX force feedback" ++config HID_ACRUX ++ tristate "ACRUX game controller support" + depends on USB_HID ++ ---help--- ++ Say Y here if you want to enable support for ACRUX game controllers. ++ ++config HID_ACRUX_FF ++ tristate "ACRUX force feedback support" ++ depends on HID_ACRUX + select INPUT_FF_MEMLESS + ---help--- + Say Y here if you want to enable force feedback support for ACRUX +@@ -319,10 +325,10 @@ config HID_NTRIG + Support for N-Trig touch screen. + + config HID_ORTEK +- tristate "Ortek WKB-2000 wireless keyboard and mouse trackpad" ++ tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad" + depends on USB_HID + ---help--- +- Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. ++ Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad. + + config HID_PANTHERLORD + tristate "Pantherlord/GreenAsia game controller" +diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile +index 6efc2a0..13e6248 100644 +--- a/drivers/hid/Makefile ++++ b/drivers/hid/Makefile +@@ -27,7 +27,7 @@ endif + + obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o + obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o +-obj-$(CONFIG_HID_ACRUX_FF) += hid-axff.o ++obj-$(CONFIG_HID_ACRUX) += hid-axff.o + obj-$(CONFIG_HID_APPLE) += hid-apple.o + obj-$(CONFIG_HID_BELKIN) += hid-belkin.o + obj-$(CONFIG_HID_CANDO) += hid-cando.o +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 61aa712..b85744f 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -481,6 +481,12 @@ static const struct hid_device_id apple_devices[] = { + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), ++ .driver_data = APPLE_HAS_FN }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), ++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), +diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c +index e5b961d..b455428 100644 +--- a/drivers/hid/hid-axff.c ++++ b/drivers/hid/hid-axff.c +@@ -33,6 +33,8 @@ + #include + + #include "hid-ids.h" ++ ++#ifdef CONFIG_HID_ACRUX_FF + #include "usbhid/usbhid.h" + + struct axff_device { +@@ -109,6 +111,12 @@ err_free_mem: + kfree(axff); + return error; + } ++#else ++static inline int axff_init(struct hid_device *hid) ++{ ++ return 0; ++} ++#endif + + static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) + { +@@ -139,9 +147,25 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) + error); + } + ++ /* ++ * We need to start polling device right away, otherwise ++ * it will go into a coma. ++ */ ++ error = hid_hw_open(hdev); ++ if (error) { ++ dev_err(&hdev->dev, "hw open failed\n"); ++ return error; ++ } ++ + return 0; + } + ++static void ax_remove(struct hid_device *hdev) ++{ ++ hid_hw_close(hdev); ++ hid_hw_stop(hdev); ++} ++ + static const struct hid_device_id ax_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), }, + { } +@@ -149,9 +173,10 @@ static const struct hid_device_id ax_devices[] = { + MODULE_DEVICE_TABLE(hid, ax_devices); + + static struct hid_driver ax_driver = { +- .name = "acrux", +- .id_table = ax_devices, +- .probe = ax_probe, ++ .name = "acrux", ++ .id_table = ax_devices, ++ .probe = ax_probe, ++ .remove = ax_remove, + }; + + static int __init ax_init(void) +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index d678cf3..9477b2a 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1256,9 +1256,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, +-#if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE) + { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, +-#endif + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, +@@ -1302,6 +1300,9 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, +@@ -1400,6 +1401,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, + { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, + { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, +@@ -1801,6 +1803,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, + { } +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 92a0d61..090bf48 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -103,6 +103,9 @@ + #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 + #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 + #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 ++#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 ++#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 ++#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b +@@ -466,6 +469,7 @@ + #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 + + #define USB_VENDOR_ID_ORTEK 0x05a4 ++#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 + #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 + + #define USB_VENDOR_ID_PANJIT 0x134c +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 7f552bf..ebcc02a 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -290,14 +290,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + goto ignore; + } + +- if (field->report_type == HID_FEATURE_REPORT) { +- if (device->driver->feature_mapping) { +- device->driver->feature_mapping(device, hidinput, field, +- usage); +- } +- goto ignore; +- } +- + if (device->driver->input_mapping) { + int ret = device->driver->input_mapping(device, hidinput, field, + usage, &bit, &max); +@@ -835,6 +827,24 @@ static void hidinput_close(struct input_dev *dev) + hid_hw_close(hid); + } + ++static void report_features(struct hid_device *hid) ++{ ++ struct hid_driver *drv = hid->driver; ++ struct hid_report_enum *rep_enum; ++ struct hid_report *rep; ++ int i, j; ++ ++ if (!drv->feature_mapping) ++ return; ++ ++ rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; ++ list_for_each_entry(rep, &rep_enum->report_list, list) ++ for (i = 0; i < rep->maxfield; i++) ++ for (j = 0; j < rep->field[i]->maxusage; j++) ++ drv->feature_mapping(hid, rep->field[i], ++ rep->field[i]->usage + j); ++} ++ + /* + * Register the input device; print a message. + * Configure the input layer interface +@@ -863,7 +873,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) + return -1; + } + +- for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) { ++ report_features(hid); ++ ++ for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { + if (k == HID_OUTPUT_REPORT && + hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) + continue; +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 698e645..318cc40 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -258,7 +258,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda + input_report_abs(input, ABS_MT_TRACKING_ID, id); + input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2); + input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2); +- input_report_abs(input, ABS_MT_ORIENTATION, orientation); ++ input_report_abs(input, ABS_MT_ORIENTATION, -orientation); + input_report_abs(input, ABS_MT_POSITION_X, x); + input_report_abs(input, ABS_MT_POSITION_Y, y); + +@@ -397,7 +397,7 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h + input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); +- input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); ++ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); + + /* Note: Touch Y position from the device is inverted relative + * to how pointer motion is reported (and relative to how USB +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 07d3183..2bbc954 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -122,7 +122,7 @@ struct mt_class mt_classes[] = { + { } + }; + +-static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi, ++static void mt_feature_mapping(struct hid_device *hdev, + struct hid_field *field, struct hid_usage *usage) + { + if (usage->hid == HID_DG_INPUTMODE) { +diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c +index e90edfc..ad6faa6 100644 +--- a/drivers/hid/hid-ortek.c ++++ b/drivers/hid/hid-ortek.c +@@ -1,5 +1,5 @@ + /* +- * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). ++ * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad). + * Fixes LogicalMaximum error in USB report description, see + * http://bugzilla.kernel.org/show_bug.cgi?id=14787 + * +@@ -30,6 +30,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, + } + + static const struct hid_device_id ortek_devices[] = { ++ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, + { } + }; +diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c +index a610e78..38a41d2 100644 +--- a/drivers/hwmon/sht15.c ++++ b/drivers/hwmon/sht15.c +@@ -333,11 +333,11 @@ static inline int sht15_calc_humid(struct sht15_data *data) + + const int c1 = -4; + const int c2 = 40500; /* x 10 ^ -6 */ +- const int c3 = -2800; /* x10 ^ -9 */ ++ const int c3 = -28; /* x 10 ^ -7 */ + + RHlinear = c1*1000 + + c2 * data->val_humid/1000 +- + (data->val_humid * data->val_humid * c3)/1000000; ++ + (data->val_humid * data->val_humid * c3) / 10000; + return (temp - 25000) * (10000 + 80 * data->val_humid) + / 1000000 + RHlinear; + } +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index 64e0903..1d9616b 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -2989,6 +2989,7 @@ static int cm_sidr_req_handler(struct cm_work *work) + goto out; /* No match. */ + } + atomic_inc(&cur_cm_id_priv->refcount); ++ atomic_inc(&cm_id_priv->refcount); + spin_unlock_irq(&cm.lock); + + cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 6884da2..e450c5a 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -1210,6 +1210,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) + cm_id->context = conn_id; + cm_id->cm_handler = cma_ib_handler; + ++ /* ++ * Protect against the user destroying conn_id from another thread ++ * until we're done accessing it. ++ */ ++ atomic_inc(&conn_id->refcount); + ret = conn_id->id.event_handler(&conn_id->id, &event); + if (!ret) { + /* +@@ -1222,8 +1227,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) + ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); + mutex_unlock(&lock); + mutex_unlock(&conn_id->handler_mutex); ++ cma_deref_id(conn_id); + goto out; + } ++ cma_deref_id(conn_id); + + /* Destroy the CM ID by returning a non-zero value. */ + conn_id->cm_id.ib = NULL; +@@ -1425,17 +1432,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, + event.param.conn.private_data_len = iw_event->private_data_len; + event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; + event.param.conn.responder_resources = attr.max_qp_rd_atom; ++ ++ /* ++ * Protect against the user destroying conn_id from another thread ++ * until we're done accessing it. ++ */ ++ atomic_inc(&conn_id->refcount); + ret = conn_id->id.event_handler(&conn_id->id, &event); + if (ret) { + /* User wants to destroy the CM ID */ + conn_id->cm_id.iw = NULL; + cma_exch(conn_id, CMA_DESTROYING); + mutex_unlock(&conn_id->handler_mutex); ++ cma_deref_id(conn_id); + rdma_destroy_id(&conn_id->id); + goto out; + } + + mutex_unlock(&conn_id->handler_mutex); ++ cma_deref_id(conn_id); + + out: + if (dev) +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c +index ee82851..3185314 100644 +--- a/drivers/input/mouse/bcm5974.c ++++ b/drivers/input/mouse/bcm5974.c +@@ -63,6 +63,10 @@ + #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 + #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 + #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 ++/* Macbook8 (unibody, March 2011) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 ++#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 ++#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 + + #define BCM5974_DEVICE(prod) { \ + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ +@@ -96,6 +100,10 @@ static const struct usb_device_id bcm5974_table[] = { + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), ++ /* MacbookPro8 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), + /* Terminating entry */ + {} + }; +@@ -274,6 +282,18 @@ static const struct bcm5974_config bcm5974_config_table[] = { + { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, + { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } + }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI, ++ USB_DEVICE_ID_APPLE_WELLSPRING5_ISO, ++ USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, ++ HAS_INTEGRATED_BUTTON, ++ 0x84, sizeof(struct bt_data), ++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, ++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, ++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, ++ { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, ++ { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } ++ }, + {} + }; + +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c +index ebc62ad..fbe1ea4 100644 +--- a/drivers/mmc/core/sdio.c ++++ b/drivers/mmc/core/sdio.c +@@ -395,6 +395,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, + if (err) + goto remove; + ++ /* ++ * Update oldcard with the new RCA received from the SDIO ++ * device -- we're doing this so that it's updated in the ++ * "card" struct when oldcard overwrites that later. ++ */ ++ if (oldcard) ++ oldcard->rca = card->rca; ++ + mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); + } + +diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c +index 0dc905b..f7e622c 100644 +--- a/drivers/mmc/host/sdhci-pci.c ++++ b/drivers/mmc/host/sdhci-pci.c +@@ -547,6 +547,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { + }, + + { ++ .vendor = PCI_VENDOR_ID_RICOH, ++ .device = 0xe823, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, ++ }, ++ ++ { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB712_SD, + .subvendor = PCI_ANY_ID, +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c +index 9f01e50..7c0a7c4 100644 +--- a/drivers/net/wireless/ath/ath9k/hw.c ++++ b/drivers/net/wireless/ath/ath9k/hw.c +@@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah) + if (ah->hw_version.devid == AR5416_AR9100_DEVID) + ah->hw_version.macVersion = AR_SREV_VERSION_9100; + ++ /* ++ * Read back AR_WA into a permanent copy and set bits 14 and 17. ++ * We need to do this to avoid RMW of this register. We cannot ++ * read the reg when chip is asleep. ++ */ ++ ah->WARegVal = REG_READ(ah, AR_WA); ++ ah->WARegVal |= (AR_WA_D3_L1_DISABLE | ++ AR_WA_ASPM_TIMER_BASED_DISABLE); ++ ++ ath9k_hw_read_revisions(ah); ++ + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { + ath_err(common, "Couldn't reset chip\n"); + return -EIO; +@@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) + + ath9k_hw_init_mode_regs(ah); + +- /* +- * Read back AR_WA into a permanent copy and set bits 14 and 17. +- * We need to do this to avoid RMW of this register. We cannot +- * read the reg when chip is asleep. +- */ +- ah->WARegVal = REG_READ(ah, AR_WA); +- ah->WARegVal |= (AR_WA_D3_L1_DISABLE | +- AR_WA_ASPM_TIMER_BASED_DISABLE); + + if (ah->is_pciexpress) + ath9k_hw_configpcipowersave(ah, 0, 0); +@@ -1082,8 +1085,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) + return false; + } + +- ath9k_hw_read_revisions(ah); +- + return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); + } + +diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c +index b2497b8..3867a2e 100644 +--- a/drivers/net/wireless/ath/ath9k/recv.c ++++ b/drivers/net/wireless/ath/ath9k/recv.c +@@ -439,9 +439,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) + * mode interface or when in monitor mode. AP mode does not need this + * since it receives all in-BSS frames anyway. + */ +- if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && +- (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || +- (sc->sc_ah->is_monitoring)) ++ if (sc->sc_ah->is_monitoring) + rfilt |= ATH9K_RX_FILTER_PROM; + + if (sc->rx.rxfilter & FIF_CONTROL) +diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c +index 6b82cac..2bb5297 100644 +--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c ++++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c +@@ -871,23 +871,35 @@ static void rtl8187_work(struct work_struct *work) + /* The RTL8187 returns the retry count through register 0xFFFA. In + * addition, it appears to be a cumulative retry count, not the + * value for the current TX packet. When multiple TX entries are +- * queued, the retry count will be valid for the last one in the queue. +- * The "error" should not matter for purposes of rate setting. */ ++ * waiting in the queue, the retry count will be the total for all. ++ * The "error" may matter for purposes of rate setting, but there is ++ * no other choice with this hardware. ++ */ + struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, + work.work); + struct ieee80211_tx_info *info; + struct ieee80211_hw *dev = priv->dev; + static u16 retry; + u16 tmp; ++ u16 avg_retry; ++ int length; + + mutex_lock(&priv->conf_mutex); + tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA); ++ length = skb_queue_len(&priv->b_tx_status.queue); ++ if (unlikely(!length)) ++ length = 1; ++ if (unlikely(tmp < retry)) ++ tmp = retry; ++ avg_retry = (tmp - retry) / length; + while (skb_queue_len(&priv->b_tx_status.queue) > 0) { + struct sk_buff *old_skb; + + old_skb = skb_dequeue(&priv->b_tx_status.queue); + info = IEEE80211_SKB_CB(old_skb); +- info->status.rates[0].count = tmp - retry + 1; ++ info->status.rates[0].count = avg_retry + 1; ++ if (info->status.rates[0].count > RETRY_COUNT) ++ info->flags &= ~IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(dev, old_skb); + } + retry = tmp; +@@ -933,8 +945,8 @@ static int rtl8187_start(struct ieee80211_hw *dev) + rtl818x_iowrite32(priv, &priv->map->TX_CONF, + RTL818X_TX_CONF_HW_SEQNUM | + RTL818X_TX_CONF_DISREQQSIZE | +- (7 << 8 /* short retry limit */) | +- (7 << 0 /* long retry limit */) | ++ (RETRY_COUNT << 8 /* short retry limit */) | ++ (RETRY_COUNT << 0 /* long retry limit */) | + (7 << 21 /* MAX TX DMA */)); + rtl8187_init_urbs(dev); + rtl8187b_init_status_urb(dev); +@@ -1378,6 +1390,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, + dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_RX_INCLUDES_FCS; ++ /* Initialize rate-control variables */ ++ dev->max_rates = 1; ++ dev->max_rate_tries = RETRY_COUNT; + + eeprom.data = dev; + eeprom.register_read = rtl8187_eeprom_register_read; +diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +index 0d7b142..f1cc907 100644 +--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h ++++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +@@ -35,6 +35,8 @@ + #define RFKILL_MASK_8187_89_97 0x2 + #define RFKILL_MASK_8198 0x4 + ++#define RETRY_COUNT 7 ++ + struct rtl8187_rx_info { + struct urb *urb; + struct ieee80211_hw *dev; +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index ea25e5b..c85438a 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -1088,7 +1088,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) + attr->write = write_vpd_attr; + retval = sysfs_create_bin_file(&dev->dev.kobj, attr); + if (retval) { +- kfree(dev->vpd->attr); ++ kfree(attr); + return retval; + } + dev->vpd->attr = attr; +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 53a786f..bd80f63 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -533,6 +533,17 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); + ++#define ICH_PMBASE 0x40 ++#define ICH_ACPI_CNTL 0x44 ++#define ICH4_ACPI_EN 0x10 ++#define ICH6_ACPI_EN 0x80 ++#define ICH4_GPIOBASE 0x58 ++#define ICH4_GPIO_CNTL 0x5c ++#define ICH4_GPIO_EN 0x10 ++#define ICH6_GPIOBASE 0x48 ++#define ICH6_GPIO_CNTL 0x4c ++#define ICH6_GPIO_EN 0x10 ++ + /* + * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at + * 0x40 (128 bytes of ACPI, GPIO & TCO registers) +@@ -541,12 +552,33 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui + static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) + { + u32 region; ++ u8 enable; + +- pci_read_config_dword(dev, 0x40, ®ion); +- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); ++ /* ++ * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict ++ * with low legacy (and fixed) ports. We don't know the decoding ++ * priority and can't tell whether the legacy device or the one created ++ * here is really at that address. This happens on boards with broken ++ * BIOSes. ++ */ ++ ++ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); ++ if (enable & ICH4_ACPI_EN) { ++ pci_read_config_dword(dev, ICH_PMBASE, ®ion); ++ region &= PCI_BASE_ADDRESS_IO_MASK; ++ if (region >= PCIBIOS_MIN_IO) ++ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, ++ "ICH4 ACPI/GPIO/TCO"); ++ } + +- pci_read_config_dword(dev, 0x58, ®ion); +- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); ++ pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); ++ if (enable & ICH4_GPIO_EN) { ++ pci_read_config_dword(dev, ICH4_GPIOBASE, ®ion); ++ region &= PCI_BASE_ADDRESS_IO_MASK; ++ if (region >= PCIBIOS_MIN_IO) ++ quirk_io_region(dev, region, 64, ++ PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO"); ++ } + } + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); +@@ -562,12 +594,25 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui + static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) + { + u32 region; ++ u8 enable; + +- pci_read_config_dword(dev, 0x40, ®ion); +- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); ++ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); ++ if (enable & ICH6_ACPI_EN) { ++ pci_read_config_dword(dev, ICH_PMBASE, ®ion); ++ region &= PCI_BASE_ADDRESS_IO_MASK; ++ if (region >= PCIBIOS_MIN_IO) ++ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, ++ "ICH6 ACPI/GPIO/TCO"); ++ } + +- pci_read_config_dword(dev, 0x48, ®ion); +- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); ++ pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); ++ if (enable & ICH4_GPIO_EN) { ++ pci_read_config_dword(dev, ICH6_GPIOBASE, ®ion); ++ region &= PCI_BASE_ADDRESS_IO_MASK; ++ if (region >= PCIBIOS_MIN_IO) ++ quirk_io_region(dev, region, 64, ++ PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO"); ++ } + } + + static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) +@@ -2618,58 +2663,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, + + #endif /* CONFIG_PCI_MSI */ + +-#ifdef CONFIG_PCI_IOV +- +-/* +- * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the +- * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the +- * old Flash Memory Space. +- */ +-static void __devinit quirk_i82576_sriov(struct pci_dev *dev) +-{ +- int pos, flags; +- u32 bar, start, size; +- +- if (PAGE_SIZE > 0x10000) +- return; +- +- flags = pci_resource_flags(dev, 0); +- if ((flags & PCI_BASE_ADDRESS_SPACE) != +- PCI_BASE_ADDRESS_SPACE_MEMORY || +- (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) != +- PCI_BASE_ADDRESS_MEM_TYPE_32) +- return; +- +- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); +- if (!pos) +- return; +- +- pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar); +- if (bar & PCI_BASE_ADDRESS_MEM_MASK) +- return; +- +- start = pci_resource_start(dev, 1); +- size = pci_resource_len(dev, 1); +- if (!start || size != 0x400000 || start & (size - 1)) +- return; +- +- pci_resource_flags(dev, 1) = 0; +- pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0); +- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start); +- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2); +- +- dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n"); +-} +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov); +- +-#endif /* CONFIG_PCI_IOV */ +- + /* Allow manual resource allocation for PCI hotplug bridges + * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For + * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), +diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c +index 6b72932..30f2b33 100644 +--- a/drivers/scsi/device_handler/scsi_dh_alua.c ++++ b/drivers/scsi/device_handler/scsi_dh_alua.c +@@ -285,7 +285,8 @@ static void stpg_endio(struct request *req, int error) + print_alua_state(h->state)); + } + done: +- blk_put_request(req); ++ req->end_io_data = NULL; ++ __blk_put_request(req->q, req); + if (h->callback_fn) { + h->callback_fn(h->callback_data, err); + h->callback_fn = h->callback_data = NULL; +diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c +index b47d7aa..e2fe165 100644 +--- a/drivers/staging/tidspbridge/rmgr/proc.c ++++ b/drivers/staging/tidspbridge/rmgr/proc.c +@@ -781,12 +781,14 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, + (u32)pmpu_addr, + ul_size, dir); + ++ mutex_lock(&proc_lock); ++ + /* find requested memory are in cached mapping information */ + map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); + if (!map_obj) { + pr_err("%s: find_containing_mapping failed\n", __func__); + status = -EFAULT; +- goto err_out; ++ goto no_map; + } + + if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { +@@ -795,6 +797,8 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, + status = -EFAULT; + } + ++no_map: ++ mutex_unlock(&proc_lock); + err_out: + + return status; +@@ -819,21 +823,24 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, + (u32)pmpu_addr, + ul_size, dir); + ++ mutex_lock(&proc_lock); ++ + /* find requested memory are in cached mapping information */ + map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); + if (!map_obj) { + pr_err("%s: find_containing_mapping failed\n", __func__); + status = -EFAULT; +- goto err_out; ++ goto no_map; + } + + if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { + pr_err("%s: InValid address parameters %p %x\n", + __func__, pmpu_addr, ul_size); + status = -EFAULT; +- goto err_out; + } + ++no_map: ++ mutex_unlock(&proc_lock); + err_out: + return status; + } +@@ -1726,9 +1733,8 @@ int proc_un_map(void *hprocessor, void *map_addr, + (p_proc_object->hbridge_context, va_align, size_align); + } + +- mutex_unlock(&proc_lock); + if (status) +- goto func_end; ++ goto unmap_failed; + + /* + * A successful unmap should be followed by removal of map_obj +@@ -1737,6 +1743,9 @@ int proc_un_map(void *hprocessor, void *map_addr, + */ + remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); + ++unmap_failed: ++ mutex_unlock(&proc_lock); ++ + func_end: + dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", + __func__, hprocessor, map_addr, status); +diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h +index d7b3aca..6160b2f 100644 +--- a/drivers/staging/winbond/core.h ++++ b/drivers/staging/winbond/core.h +@@ -3,6 +3,7 @@ + + #include + #include ++#include + + #include "wbhal.h" + #include "mto.h" +diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c +index 366080b..7f19c8b 100644 +--- a/drivers/target/target_core_cdb.c ++++ b/drivers/target/target_core_cdb.c +@@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd) + { + struct se_device *dev = SE_DEV(cmd); + unsigned char *buf = cmd->t_task->t_task_buf; +- u32 blocks = dev->transport->get_blocks(dev); ++ unsigned long long blocks_long = dev->transport->get_blocks(dev); ++ u32 blocks; ++ ++ if (blocks_long >= 0x00000000ffffffff) ++ blocks = 0xffffffff; ++ else ++ blocks = (u32)blocks_long; + + buf[0] = (blocks >> 24) & 0xff; + buf[1] = (blocks >> 16) & 0xff; +diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c +index 3975df6..b3b881b 100644 +--- a/drivers/tty/serial/8250.c ++++ b/drivers/tty/serial/8250.c +@@ -954,6 +954,23 @@ static int broken_efr(struct uart_8250_port *up) + return 0; + } + ++static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) ++{ ++ unsigned char status; ++ ++ status = serial_in(up, 0x04); /* EXCR2 */ ++#define PRESL(x) ((x) & 0x30) ++ if (PRESL(status) == 0x10) { ++ /* already in high speed mode */ ++ return 0; ++ } else { ++ status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ ++ status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ ++ serial_outp(up, 0x04, status); ++ } ++ return 1; ++} ++ + /* + * We know that the chip has FIFOs. Does it have an EFR? The + * EFR is located in the same register position as the IIR and +@@ -1025,12 +1042,8 @@ static void autoconfig_16550a(struct uart_8250_port *up) + quot = serial_dl_read(up); + quot <<= 3; + +- status1 = serial_in(up, 0x04); /* EXCR2 */ +- status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ +- status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ +- serial_outp(up, 0x04, status1); +- +- serial_dl_write(up, quot); ++ if (ns16550a_goto_highspeed(up)) ++ serial_dl_write(up, quot); + + serial_outp(up, UART_LCR, 0); + +@@ -3025,17 +3038,13 @@ void serial8250_resume_port(int line) + struct uart_8250_port *up = &serial8250_ports[line]; + + if (up->capabilities & UART_NATSEMI) { +- unsigned char tmp; +- + /* Ensure it's still in high speed mode */ + serial_outp(up, UART_LCR, 0xE0); + +- tmp = serial_in(up, 0x04); /* EXCR2 */ +- tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ +- tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ +- serial_outp(up, 0x04, tmp); ++ ns16550a_goto_highspeed(up); + + serial_outp(up, UART_LCR, 0); ++ up->port.uartclk = 921600*16; + } + uart_resume_port(&serial8250_reg, &up->port); + } +diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c +index b62857b..37e13c3 100644 +--- a/drivers/tty/serial/mrst_max3110.c ++++ b/drivers/tty/serial/mrst_max3110.c +@@ -51,7 +51,7 @@ + struct uart_max3110 { + struct uart_port port; + struct spi_device *spi; +- char name[24]; ++ char name[SPI_NAME_SIZE]; + + wait_queue_head_t wq; + struct task_struct *main_thread; +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c +index f71e8e3..d370885 100644 +--- a/drivers/usb/core/hcd-pci.c ++++ b/drivers/usb/core/hcd-pci.c +@@ -363,8 +363,7 @@ static int check_root_hub_suspended(struct device *dev) + struct pci_dev *pci_dev = to_pci_dev(dev); + struct usb_hcd *hcd = pci_get_drvdata(pci_dev); + +- if (!(hcd->state == HC_STATE_SUSPENDED || +- hcd->state == HC_STATE_HALT)) { ++ if (HCD_RH_RUNNING(hcd)) { + dev_warn(dev, "Root hub is not suspended\n"); + return -EBUSY; + } +@@ -386,7 +385,7 @@ static int suspend_common(struct device *dev, bool do_wakeup) + if (retval) + return retval; + +- if (hcd->driver->pci_suspend) { ++ if (hcd->driver->pci_suspend && !HCD_DEAD(hcd)) { + /* Optimization: Don't suspend if a root-hub wakeup is + * pending and it would cause the HCD to wake up anyway. + */ +@@ -427,7 +426,7 @@ static int resume_common(struct device *dev, int event) + struct usb_hcd *hcd = pci_get_drvdata(pci_dev); + int retval; + +- if (hcd->state != HC_STATE_SUSPENDED) { ++ if (HCD_RH_RUNNING(hcd)) { + dev_dbg(dev, "can't resume, not suspended!\n"); + return 0; + } +@@ -442,7 +441,7 @@ static int resume_common(struct device *dev, int event) + + clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); + +- if (hcd->driver->pci_resume) { ++ if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) { + if (event != PM_EVENT_AUTO_RESUME) + wait_for_companions(pci_dev, hcd); + +@@ -475,10 +474,10 @@ static int hcd_pci_suspend_noirq(struct device *dev) + + pci_save_state(pci_dev); + +- /* If the root hub is HALTed rather than SUSPENDed, ++ /* If the root hub is dead rather than suspended, + * disallow remote wakeup. + */ +- if (hcd->state == HC_STATE_HALT) ++ if (HCD_DEAD(hcd)) + device_set_wakeup_enable(dev, 0); + dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev)); + +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index e935f71..c34a935 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -983,7 +983,7 @@ static int register_root_hub(struct usb_hcd *hcd) + spin_unlock_irq (&hcd_root_hub_lock); + + /* Did the HC die before the root hub was registered? */ +- if (hcd->state == HC_STATE_HALT) ++ if (HCD_DEAD(hcd) || hcd->state == HC_STATE_HALT) + usb_hc_died (hcd); /* This time clean up */ + } + +@@ -1089,13 +1089,10 @@ int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb) + * Check the host controller's state and add the URB to the + * endpoint's queue. + */ +- switch (hcd->state) { +- case HC_STATE_RUNNING: +- case HC_STATE_RESUMING: ++ if (HCD_RH_RUNNING(hcd)) { + urb->unlinked = 0; + list_add_tail(&urb->urb_list, &urb->ep->urb_list); +- break; +- default: ++ } else { + rc = -ESHUTDOWN; + goto done; + } +@@ -1913,7 +1910,7 @@ int usb_hcd_get_frame_number (struct usb_device *udev) + { + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + +- if (!HC_IS_RUNNING (hcd->state)) ++ if (!HCD_RH_RUNNING(hcd)) + return -ESHUTDOWN; + return hcd->driver->get_frame_number (hcd); + } +@@ -1930,9 +1927,15 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) + + dev_dbg(&rhdev->dev, "bus %s%s\n", + (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend"); ++ if (HCD_DEAD(hcd)) { ++ dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); ++ return 0; ++ } ++ + if (!hcd->driver->bus_suspend) { + status = -ENOENT; + } else { ++ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + hcd->state = HC_STATE_QUIESCING; + status = hcd->driver->bus_suspend(hcd); + } +@@ -1940,7 +1943,12 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) + usb_set_device_state(rhdev, USB_STATE_SUSPENDED); + hcd->state = HC_STATE_SUSPENDED; + } else { +- hcd->state = old_state; ++ spin_lock_irq(&hcd_root_hub_lock); ++ if (!HCD_DEAD(hcd)) { ++ set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); ++ hcd->state = old_state; ++ } ++ spin_unlock_irq(&hcd_root_hub_lock); + dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", + "suspend", status); + } +@@ -1955,9 +1963,13 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) + + dev_dbg(&rhdev->dev, "usb %s%s\n", + (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); ++ if (HCD_DEAD(hcd)) { ++ dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); ++ return 0; ++ } + if (!hcd->driver->bus_resume) + return -ENOENT; +- if (hcd->state == HC_STATE_RUNNING) ++ if (HCD_RH_RUNNING(hcd)) + return 0; + + hcd->state = HC_STATE_RESUMING; +@@ -1966,10 +1978,15 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) + if (status == 0) { + /* TRSMRCY = 10 msec */ + msleep(10); +- usb_set_device_state(rhdev, rhdev->actconfig +- ? USB_STATE_CONFIGURED +- : USB_STATE_ADDRESS); +- hcd->state = HC_STATE_RUNNING; ++ spin_lock_irq(&hcd_root_hub_lock); ++ if (!HCD_DEAD(hcd)) { ++ usb_set_device_state(rhdev, rhdev->actconfig ++ ? USB_STATE_CONFIGURED ++ : USB_STATE_ADDRESS); ++ set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); ++ hcd->state = HC_STATE_RUNNING; ++ } ++ spin_unlock_irq(&hcd_root_hub_lock); + } else { + hcd->state = old_state; + dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", +@@ -2080,7 +2097,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) + */ + local_irq_save(flags); + +- if (unlikely(hcd->state == HC_STATE_HALT || !HCD_HW_ACCESSIBLE(hcd))) { ++ if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) { + rc = IRQ_NONE; + } else if (hcd->driver->irq(hcd) == IRQ_NONE) { + rc = IRQ_NONE; +@@ -2114,6 +2131,8 @@ void usb_hc_died (struct usb_hcd *hcd) + dev_err (hcd->self.controller, "HC died; cleaning up\n"); + + spin_lock_irqsave (&hcd_root_hub_lock, flags); ++ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); ++ set_bit(HCD_FLAG_DEAD, &hcd->flags); + if (hcd->rh_registered) { + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + +@@ -2256,6 +2275,12 @@ int usb_add_hcd(struct usb_hcd *hcd, + */ + device_init_wakeup(&rhdev->dev, 1); + ++ /* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is ++ * registered. But since the controller can die at any time, ++ * let's initialize the flag before touching the hardware. ++ */ ++ set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); ++ + /* "reset" is misnamed; its role is now one-time init. the controller + * should already have been reset (and boot firmware kicked off etc). + */ +@@ -2323,6 +2348,7 @@ int usb_add_hcd(struct usb_hcd *hcd, + return retval; + + error_create_attr_group: ++ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + if (HC_IS_RUNNING(hcd->state)) + hcd->state = HC_STATE_QUIESCING; + spin_lock_irq(&hcd_root_hub_lock); +@@ -2375,6 +2401,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) + usb_get_dev(rhdev); + sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group); + ++ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + if (HC_IS_RUNNING (hcd->state)) + hcd->state = HC_STATE_QUIESCING; + +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c +index c14fc08..ae334b0 100644 +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -366,7 +366,16 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) + if (xfertype == USB_ENDPOINT_XFER_ISOC) { + int n, len; + +- /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */ ++ /* SuperSpeed isoc endpoints have up to 16 bursts of up to ++ * 3 packets each ++ */ ++ if (dev->speed == USB_SPEED_SUPER) { ++ int burst = 1 + ep->ss_ep_comp.bMaxBurst; ++ int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); ++ max *= burst; ++ max *= mult; ++ } ++ + /* "high bandwidth" mode, 1-3 packets/uframe? */ + if (dev->speed == USB_SPEED_HIGH) { + int mult = 1 + ((max >> 11) & 0x03); +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c +index 8a515f0..72ae77c 100644 +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -106,6 +106,27 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) + ehci->owned_ports = 0; + } + ++static int ehci_port_change(struct ehci_hcd *ehci) ++{ ++ int i = HCS_N_PORTS(ehci->hcs_params); ++ ++ /* First check if the controller indicates a change event */ ++ ++ if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD) ++ return 1; ++ ++ /* ++ * Not all controllers appear to update this while going from D3 to D0, ++ * so check the individual port status registers as well ++ */ ++ ++ while (i--) ++ if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC) ++ return 1; ++ ++ return 0; ++} ++ + static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, + bool suspending, bool do_wakeup) + { +@@ -173,7 +194,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, + } + + /* Does the root hub have a port wakeup pending? */ +- if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) ++ if (!suspending && ehci_port_change(ehci)) + usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); + + spin_unlock_irqrestore(&ehci->lock, flags); +diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c +index bdba8c5..c470cc8 100644 +--- a/drivers/usb/host/isp1760-hcd.c ++++ b/drivers/usb/host/isp1760-hcd.c +@@ -33,6 +33,7 @@ struct isp1760_hcd { + struct inter_packet_info atl_ints[32]; + struct inter_packet_info int_ints[32]; + struct memory_chunk memory_pool[BLOCKS]; ++ u32 atl_queued; + + /* periodic schedule support */ + #define DEFAULT_I_TDPS 1024 +@@ -850,6 +851,11 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, + skip_map &= ~queue_entry; + isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG); + ++ priv->atl_queued++; ++ if (priv->atl_queued == 2) ++ isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, ++ hcd->regs + HC_INTERRUPT_ENABLE); ++ + buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG); + buffstatus |= ATL_BUFFER; + isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG); +@@ -992,6 +998,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) + u32 dw3; + + status = 0; ++ priv->atl_queued--; + + queue_entry = __ffs(done_map); + done_map &= ~(1 << queue_entry); +@@ -1054,11 +1061,6 @@ static void do_atl_int(struct usb_hcd *usb_hcd) + * device is not able to send data fast enough. + * This happens mostly on slower hardware. + */ +- printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " +- "%d of %zu done: %08x cur: %08x\n", qtd, +- urb, qh, PTD_XFERRED_LENGTH(dw3), +- qtd->length, done_map, +- (1 << queue_entry)); + + /* RL counter = ERR counter */ + dw3 &= ~(0xf << 19); +@@ -1086,6 +1088,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) + priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs + + atl_regs, sizeof(ptd)); + ++ priv->atl_queued++; ++ if (priv->atl_queued == 2) ++ isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, ++ usb_hcd->regs + HC_INTERRUPT_ENABLE); ++ + buffstatus = isp1760_readl(usb_hcd->regs + + HC_BUFFER_STATUS_REG); + buffstatus |= ATL_BUFFER; +@@ -1191,6 +1198,9 @@ static void do_atl_int(struct usb_hcd *usb_hcd) + skip_map = isp1760_readl(usb_hcd->regs + + HC_ATL_PTD_SKIPMAP_REG); + } ++ if (priv->atl_queued <= 1) ++ isp1760_writel(INTERRUPT_ENABLE_MASK, ++ usb_hcd->regs + HC_INTERRUPT_ENABLE); + } + + static void do_intl_int(struct usb_hcd *usb_hcd) +@@ -1770,7 +1780,7 @@ static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd) + goto leave; + + isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG); +- if (imask & HC_ATL_INT) ++ if (imask & (HC_ATL_INT | HC_SOT_INT)) + do_atl_int(usb_hcd); + + if (imask & HC_INTL_INT) +diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h +index 6931ef5..612bce5 100644 +--- a/drivers/usb/host/isp1760-hcd.h ++++ b/drivers/usb/host/isp1760-hcd.h +@@ -69,6 +69,7 @@ void deinit_kmem_cache(void); + + #define HC_INTERRUPT_ENABLE 0x314 + #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT) ++#define INTERRUPT_ENABLE_SOT_MASK (HC_INTL_INT | HC_SOT_INT | HC_EOT_INT) + + #define HC_ISO_INT (1 << 9) + #define HC_ATL_INT (1 << 8) +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 3289bf4..d3f0406 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -500,15 +500,26 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, + state->new_cycle_state = ~(state->new_cycle_state) & 0x1; + next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); + ++ /* ++ * If there is only one segment in a ring, find_trb_seg()'s while loop ++ * will not run, and it will return before it has a chance to see if it ++ * needs to toggle the cycle bit. It can't tell if the stalled transfer ++ * ended just before the link TRB on a one-segment ring, or if the TD ++ * wrapped around the top of the ring, because it doesn't have the TD in ++ * question. Look for the one-segment case where stalled TRB's address ++ * is greater than the new dequeue pointer address. ++ */ ++ if (ep_ring->first_seg == ep_ring->first_seg->next && ++ state->new_deq_ptr < dev->eps[ep_index].stopped_trb) ++ state->new_cycle_state ^= 0x1; ++ xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); ++ + /* Don't update the ring cycle state for the producer (us). */ + xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", + state->new_deq_seg); + addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); + xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", + (unsigned long long) addr); +- xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); +- ep_ring->dequeue = state->new_deq_ptr; +- ep_ring->deq_seg = state->new_deq_seg; + } + + static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, +@@ -951,9 +962,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, + } else { + xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", + ep_ctx->deq); ++ if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, ++ dev->eps[ep_index].queued_deq_ptr) == ++ (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { ++ /* Update the ring's dequeue segment and dequeue pointer ++ * to reflect the new position. ++ */ ++ ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; ++ ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; ++ } else { ++ xhci_warn(xhci, "Mismatch between completed Set TR Deq " ++ "Ptr command & xHCI internal state.\n"); ++ xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", ++ dev->eps[ep_index].queued_deq_seg, ++ dev->eps[ep_index].queued_deq_ptr); ++ } + } + + dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; ++ dev->eps[ep_index].queued_deq_seg = NULL; ++ dev->eps[ep_index].queued_deq_ptr = NULL; + /* Restart any rings with pending URBs */ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } +@@ -3229,6 +3257,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, + u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); + u32 type = TRB_TYPE(TRB_SET_DEQ); ++ struct xhci_virt_ep *ep; + + addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); + if (addr == 0) { +@@ -3237,6 +3266,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, + deq_seg, deq_ptr); + return 0; + } ++ ep = &xhci->devs[slot_id]->eps[ep_index]; ++ if ((ep->ep_state & SET_DEQ_PENDING)) { ++ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); ++ xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); ++ return 0; ++ } ++ ep->queued_deq_seg = deq_seg; ++ ep->queued_deq_ptr = deq_ptr; + return queue_command(xhci, lower_32_bits(addr) | cycle_state, + upper_32_bits(addr), trb_stream_id, + trb_slot_id | trb_ep_index | type, false); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 7f127df..62bc1bc 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -644,6 +644,9 @@ struct xhci_ep_ctx { + #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) + #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) + ++/* deq bitmasks */ ++#define EP_CTX_CYCLE_MASK (1 << 0) ++ + + /** + * struct xhci_input_control_context +@@ -746,6 +749,12 @@ struct xhci_virt_ep { + struct timer_list stop_cmd_timer; + int stop_cmds_pending; + struct xhci_hcd *xhci; ++ /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue ++ * command. We'll need to update the ring's dequeue segment and dequeue ++ * pointer after the command completes. ++ */ ++ struct xhci_segment *queued_deq_seg; ++ union xhci_trb *queued_deq_ptr; + /* + * Sometimes the xHC can not process isochronous endpoint ring quickly + * enough, and it will miss some isoc tds on the ring and generate +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index 7b8815d..14ac87e 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -75,6 +75,7 @@ static int debug; + static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x1a86, 0x7523) }, ++ { USB_DEVICE(0x1a86, 0x5523) }, + { }, + }; + MODULE_DEVICE_TABLE(usb, id_table); +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c +index bd5bd85..b382d9a 100644 +--- a/drivers/usb/serial/kobil_sct.c ++++ b/drivers/usb/serial/kobil_sct.c +@@ -372,7 +372,7 @@ static void kobil_read_int_callback(struct urb *urb) + } + + tty = tty_port_tty_get(&port->port); +- if (urb->actual_length) { ++ if (tty && urb->actual_length) { + + /* BEGIN DEBUG */ + /* +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 5f46838..75c7f45 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -652,7 +652,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, ++ 0xff, 0xff), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index 546a521..2ff90a9 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -911,9 +911,8 @@ int usb_serial_probe(struct usb_interface *interface, + dev_err(&interface->dev, "No free urbs available\n"); + goto probe_error; + } +- buffer_size = serial->type->bulk_in_size; +- if (!buffer_size) +- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ++ buffer_size = max_t(int, serial->type->bulk_in_size, ++ le16_to_cpu(endpoint->wMaxPacketSize)); + port->bulk_in_size = buffer_size; + port->bulk_in_endpointAddress = endpoint->bEndpointAddress; + port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); +diff --git a/fs/dcache.c b/fs/dcache.c +index 611ffe9..a39fe47 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -296,8 +296,12 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) + __releases(parent->d_lock) + __releases(dentry->d_inode->i_lock) + { +- dentry->d_parent = NULL; + list_del(&dentry->d_u.d_child); ++ /* ++ * Inform try_to_ascend() that we are no longer attached to the ++ * dentry tree ++ */ ++ dentry->d_flags |= DCACHE_DISCONNECTED; + if (parent) + spin_unlock(&parent->d_lock); + dentry_iput(dentry); +@@ -1012,6 +1016,35 @@ void shrink_dcache_for_umount(struct super_block *sb) + } + + /* ++ * This tries to ascend one level of parenthood, but ++ * we can race with renaming, so we need to re-check ++ * the parenthood after dropping the lock and check ++ * that the sequence number still matches. ++ */ ++static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) ++{ ++ struct dentry *new = old->d_parent; ++ ++ rcu_read_lock(); ++ spin_unlock(&old->d_lock); ++ spin_lock(&new->d_lock); ++ ++ /* ++ * might go back up the wrong parent if we have had a rename ++ * or deletion ++ */ ++ if (new != old->d_parent || ++ (old->d_flags & DCACHE_DISCONNECTED) || ++ (!locked && read_seqretry(&rename_lock, seq))) { ++ spin_unlock(&new->d_lock); ++ new = NULL; ++ } ++ rcu_read_unlock(); ++ return new; ++} ++ ++ ++/* + * Search for at least 1 mount point in the dentry's subdirs. + * We descend to the next level whenever the d_subdirs + * list is non-empty and continue searching. +@@ -1066,24 +1099,10 @@ resume: + * All done at this level ... ascend and resume the search. + */ + if (this_parent != parent) { +- struct dentry *tmp; +- struct dentry *child; +- +- tmp = this_parent->d_parent; +- rcu_read_lock(); +- spin_unlock(&this_parent->d_lock); +- child = this_parent; +- this_parent = tmp; +- spin_lock(&this_parent->d_lock); +- /* might go back up the wrong parent if we have had a rename +- * or deletion */ +- if (this_parent != child->d_parent || +- (!locked && read_seqretry(&rename_lock, seq))) { +- spin_unlock(&this_parent->d_lock); +- rcu_read_unlock(); ++ struct dentry *child = this_parent; ++ this_parent = try_to_ascend(this_parent, locked, seq); ++ if (!this_parent) + goto rename_retry; +- } +- rcu_read_unlock(); + next = child->d_u.d_child.next; + goto resume; + } +@@ -1181,24 +1200,10 @@ resume: + * All done at this level ... ascend and resume the search. + */ + if (this_parent != parent) { +- struct dentry *tmp; +- struct dentry *child; +- +- tmp = this_parent->d_parent; +- rcu_read_lock(); +- spin_unlock(&this_parent->d_lock); +- child = this_parent; +- this_parent = tmp; +- spin_lock(&this_parent->d_lock); +- /* might go back up the wrong parent if we have had a rename +- * or deletion */ +- if (this_parent != child->d_parent || +- (!locked && read_seqretry(&rename_lock, seq))) { +- spin_unlock(&this_parent->d_lock); +- rcu_read_unlock(); ++ struct dentry *child = this_parent; ++ this_parent = try_to_ascend(this_parent, locked, seq); ++ if (!this_parent) + goto rename_retry; +- } +- rcu_read_unlock(); + next = child->d_u.d_child.next; + goto resume; + } +@@ -2942,28 +2947,14 @@ resume: + spin_unlock(&dentry->d_lock); + } + if (this_parent != root) { +- struct dentry *tmp; +- struct dentry *child; +- +- tmp = this_parent->d_parent; ++ struct dentry *child = this_parent; + if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { + this_parent->d_flags |= DCACHE_GENOCIDE; + this_parent->d_count--; + } +- rcu_read_lock(); +- spin_unlock(&this_parent->d_lock); +- child = this_parent; +- this_parent = tmp; +- spin_lock(&this_parent->d_lock); +- /* might go back up the wrong parent if we have had a rename +- * or deletion */ +- if (this_parent != child->d_parent || +- (!locked && read_seqretry(&rename_lock, seq))) { +- spin_unlock(&this_parent->d_lock); +- rcu_read_unlock(); ++ this_parent = try_to_ascend(this_parent, locked, seq); ++ if (!this_parent) + goto rename_retry; +- } +- rcu_read_unlock(); + next = child->d_u.d_child.next; + goto resume; + } +diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c +index b27ba71..75c968e 100644 +--- a/fs/ext3/namei.c ++++ b/fs/ext3/namei.c +@@ -1540,8 +1540,8 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, + goto cleanup; + node2 = (struct dx_node *)(bh2->b_data); + entries2 = node2->entries; ++ memset(&node2->fake, 0, sizeof(struct fake_dirent)); + node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize); +- node2->fake.inode = 0; + BUFFER_TRACE(frame->bh, "get_write_access"); + err = ext3_journal_get_write_access(handle, frame->bh); + if (err) +diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c +index be03a0b..764b86a 100644 +--- a/fs/partitions/osf.c ++++ b/fs/partitions/osf.c +@@ -10,7 +10,7 @@ + #include "check.h" + #include "osf.h" + +-#define MAX_OSF_PARTITIONS 8 ++#define MAX_OSF_PARTITIONS 18 + + int osf_partition(struct parsed_partitions *state) + { +diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h +index dcd6a7c..ca29e03 100644 +--- a/include/linux/ftrace.h ++++ b/include/linux/ftrace.h +@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void); + + extern void ftrace_graph_init_task(struct task_struct *t); + extern void ftrace_graph_exit_task(struct task_struct *t); ++extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); + + static inline int task_curr_ret_stack(struct task_struct *t) + { +@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void) + + static inline void ftrace_graph_init_task(struct task_struct *t) { } + static inline void ftrace_graph_exit_task(struct task_struct *t) { } ++static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } + + static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc) +diff --git a/include/linux/hid.h b/include/linux/hid.h +index d91c25e..fc5faf6 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -638,7 +638,7 @@ struct hid_driver { + struct hid_input *hidinput, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max); + void (*feature_mapping)(struct hid_device *hdev, +- struct hid_input *hidinput, struct hid_field *field, ++ struct hid_field *field, + struct hid_usage *usage); + #ifdef CONFIG_PM + int (*suspend)(struct hid_device *hdev, pm_message_t message); +diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h +index ab46194..76d896c 100644 +--- a/include/linux/usb/ch9.h ++++ b/include/linux/usb/ch9.h +@@ -585,6 +585,8 @@ struct usb_ss_ep_comp_descriptor { + #define USB_DT_SS_EP_COMP_SIZE 6 + /* Bits 4:0 of bmAttributes if this is a bulk endpoint */ + #define USB_SS_MAX_STREAMS(p) (1 << (p & 0x1f)) ++/* Bits 1:0 of bmAttributes if this is an isoc endpoint */ ++#define USB_SS_MULT(p) (1 + ((p) & 0x3)) + + /*-------------------------------------------------------------------------*/ + +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h +index a854fe8..f21f599 100644 +--- a/include/linux/usb/hcd.h ++++ b/include/linux/usb/hcd.h +@@ -99,6 +99,8 @@ struct usb_hcd { + #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ + #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ + #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ ++#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ ++#define HCD_FLAG_DEAD 6 /* controller has died? */ + + /* The flags can be tested using these macros; they are likely to + * be slightly faster than test_bit(). +@@ -108,6 +110,8 @@ struct usb_hcd { + #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) + #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) + #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) ++#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) ++#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) + + /* Flags that get set only during HCD registration or removal. */ + unsigned rh_registered:1;/* is root hub registered? */ +diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h +index c904913..45f3b9d 100644 +--- a/include/linux/usb/serial.h ++++ b/include/linux/usb/serial.h +@@ -191,7 +191,8 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) + * @id_table: pointer to a list of usb_device_id structures that define all + * of the devices this structure can support. + * @num_ports: the number of different ports this device will have. +- * @bulk_in_size: bytes to allocate for bulk-in buffer (0 = end-point size) ++ * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer ++ * (0 = end-point size) + * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) + * @calc_num_ports: pointer to a function to determine how many ports this + * device has dynamically. It will be called after the probe() +diff --git a/kernel/perf_event.c b/kernel/perf_event.c +index 656222f..b22a2ef 100644 +--- a/kernel/perf_event.c ++++ b/kernel/perf_event.c +@@ -4567,7 +4567,7 @@ static int perf_exclude_event(struct perf_event *event, + struct pt_regs *regs) + { + if (event->hw.state & PERF_HES_STOPPED) +- return 0; ++ return 1; + + if (regs) { + if (event->attr.exclude_user && user_mode(regs)) +@@ -4923,6 +4923,8 @@ static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) + { ++ if (event->hw.state & PERF_HES_STOPPED) ++ return 0; + /* + * All tracepoints are from kernel-space. + */ +diff --git a/kernel/sched.c b/kernel/sched.c +index 42eab5a..c164920c 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -5572,7 +5572,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) + * The idle tasks have their own, simple scheduling class: + */ + idle->sched_class = &idle_sched_class; +- ftrace_graph_init_task(idle); ++ ftrace_graph_init_idle_task(idle, cpu); + } + + /* +diff --git a/kernel/smp.c b/kernel/smp.c +index 9910744..9545489 100644 +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -450,7 +450,7 @@ void smp_call_function_many(const struct cpumask *mask, + { + struct call_function_data *data; + unsigned long flags; +- int cpu, next_cpu, this_cpu = smp_processor_id(); ++ int refs, cpu, next_cpu, this_cpu = smp_processor_id(); + + /* + * Can deadlock when called with interrupts disabled. +@@ -461,7 +461,7 @@ void smp_call_function_many(const struct cpumask *mask, + WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() + && !oops_in_progress && !early_boot_irqs_disabled); + +- /* So, what's a CPU they want? Ignoring this one. */ ++ /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ + cpu = cpumask_first_and(mask, cpu_online_mask); + if (cpu == this_cpu) + cpu = cpumask_next_and(cpu, mask, cpu_online_mask); +@@ -483,22 +483,49 @@ void smp_call_function_many(const struct cpumask *mask, + + data = &__get_cpu_var(cfd_data); + csd_lock(&data->csd); ++ ++ /* This BUG_ON verifies our reuse assertions and can be removed */ + BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); + ++ /* ++ * The global call function queue list add and delete are protected ++ * by a lock, but the list is traversed without any lock, relying ++ * on the rcu list add and delete to allow safe concurrent traversal. ++ * We reuse the call function data without waiting for any grace ++ * period after some other cpu removes it from the global queue. ++ * This means a cpu might find our data block as it is being ++ * filled out. ++ * ++ * We hold off the interrupt handler on the other cpu by ++ * ordering our writes to the cpu mask vs our setting of the ++ * refs counter. We assert only the cpu owning the data block ++ * will set a bit in cpumask, and each bit will only be cleared ++ * by the subject cpu. Each cpu must first find its bit is ++ * set and then check that refs is set indicating the element is ++ * ready to be processed, otherwise it must skip the entry. ++ * ++ * On the previous iteration refs was set to 0 by another cpu. ++ * To avoid the use of transitivity, set the counter to 0 here ++ * so the wmb will pair with the rmb in the interrupt handler. ++ */ ++ atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */ ++ + data->csd.func = func; + data->csd.info = info; +- cpumask_and(data->cpumask, mask, cpu_online_mask); +- cpumask_clear_cpu(this_cpu, data->cpumask); + +- /* +- * To ensure the interrupt handler gets an complete view +- * we order the cpumask and refs writes and order the read +- * of them in the interrupt handler. In addition we may +- * only clear our own cpu bit from the mask. +- */ ++ /* Ensure 0 refs is visible before mask. Also orders func and info */ + smp_wmb(); + +- atomic_set(&data->refs, cpumask_weight(data->cpumask)); ++ /* We rely on the "and" being processed before the store */ ++ cpumask_and(data->cpumask, mask, cpu_online_mask); ++ cpumask_clear_cpu(this_cpu, data->cpumask); ++ refs = cpumask_weight(data->cpumask); ++ ++ /* Some callers race with other cpus changing the passed mask */ ++ if (unlikely(!refs)) { ++ csd_unlock(&data->csd); ++ return; ++ } + + raw_spin_lock_irqsave(&call_function.lock, flags); + /* +@@ -507,6 +534,12 @@ void smp_call_function_many(const struct cpumask *mask, + * will not miss any other list entries: + */ + list_add_rcu(&data->csd.list, &call_function.queue); ++ /* ++ * We rely on the wmb() in list_add_rcu to complete our writes ++ * to the cpumask before this write to refs, which indicates ++ * data is on the list and is ready to be processed. ++ */ ++ atomic_set(&data->refs, refs); + raw_spin_unlock_irqrestore(&call_function.lock, flags); + + /* +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index f3dadae..888b611 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void) + /* The cpu_boot init_task->ret_stack will never be freed */ + for_each_online_cpu(cpu) { + if (!idle_task(cpu)->ret_stack) +- ftrace_graph_init_task(idle_task(cpu)); ++ ftrace_graph_init_idle_task(idle_task(cpu), cpu); + } + + do { +@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void) + mutex_unlock(&ftrace_lock); + } + ++static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); ++ ++static void ++graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) ++{ ++ atomic_set(&t->tracing_graph_pause, 0); ++ atomic_set(&t->trace_overrun, 0); ++ t->ftrace_timestamp = 0; ++ /* make curr_ret_stack visable before we add the ret_stack */ ++ smp_wmb(); ++ t->ret_stack = ret_stack; ++} ++ ++/* ++ * Allocate a return stack for the idle task. May be the first ++ * time through, or it may be done by CPU hotplug online. ++ */ ++void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) ++{ ++ t->curr_ret_stack = -1; ++ /* ++ * The idle task has no parent, it either has its own ++ * stack or no stack at all. ++ */ ++ if (t->ret_stack) ++ WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); ++ ++ if (ftrace_graph_active) { ++ struct ftrace_ret_stack *ret_stack; ++ ++ ret_stack = per_cpu(idle_ret_stack, cpu); ++ if (!ret_stack) { ++ ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH ++ * sizeof(struct ftrace_ret_stack), ++ GFP_KERNEL); ++ if (!ret_stack) ++ return; ++ per_cpu(idle_ret_stack, cpu) = ret_stack; ++ } ++ graph_init_task(t, ret_stack); ++ } ++} ++ + /* Allocate a return stack for newly created task */ + void ftrace_graph_init_task(struct task_struct *t) + { +@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t) + GFP_KERNEL); + if (!ret_stack) + return; +- atomic_set(&t->tracing_graph_pause, 0); +- atomic_set(&t->trace_overrun, 0); +- t->ftrace_timestamp = 0; +- /* make curr_ret_stack visable before we add the ret_stack */ +- smp_wmb(); +- t->ret_stack = ret_stack; ++ graph_init_task(t, ret_stack); + } + } + +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index 57d344c..35d046b 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -436,7 +436,9 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) + if (!(rovr->tk_flags & RPC_TASK_KILLED)) { + rovr->tk_flags |= RPC_TASK_KILLED; + rpc_exit(rovr, -EIO); +- rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); ++ if (RPC_IS_QUEUED(rovr)) ++ rpc_wake_up_queued_task(rovr->tk_waitqueue, ++ rovr); + } + } + spin_unlock(&clnt->cl_lock); +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index 59e5994..17c3e3a 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -637,14 +637,12 @@ static void __rpc_execute(struct rpc_task *task) + save_callback = task->tk_callback; + task->tk_callback = NULL; + save_callback(task); +- } +- +- /* +- * Perform the next FSM step. +- * tk_action may be NULL when the task has been killed +- * by someone else. +- */ +- if (!RPC_IS_QUEUED(task)) { ++ } else { ++ /* ++ * Perform the next FSM step. ++ * tk_action may be NULL when the task has been killed ++ * by someone else. ++ */ + if (task->tk_action == NULL) + break; + task->tk_action(task); +diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c +index 9d32f18..cb09f1f 100644 +--- a/security/tomoyo/file.c ++++ b/security/tomoyo/file.c +@@ -927,7 +927,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, + struct path *path, const int flag) + { + const u8 acc_mode = ACC_MODE(flag); +- int error = -ENOMEM; ++ int error = 0; + struct tomoyo_path_info buf; + struct tomoyo_request_info r; + int idx; +@@ -938,9 +938,6 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, + buf.name = NULL; + r.mode = TOMOYO_CONFIG_DISABLED; + idx = tomoyo_read_lock(); +- if (!tomoyo_get_realpath(&buf, path)) +- goto out; +- error = 0; + /* + * If the filename is specified by "deny_rewrite" keyword, + * we need to check "allow_rewrite" permission when the filename is not +diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c +index 12b44b0..a0da775 100644 +--- a/sound/drivers/aloop.c ++++ b/sound/drivers/aloop.c +@@ -482,8 +482,9 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) + cable->streams[SNDRV_PCM_STREAM_CAPTURE]; + unsigned long delta_play = 0, delta_capt = 0; + unsigned int running; ++ unsigned long flags; + +- spin_lock(&cable->lock); ++ spin_lock_irqsave(&cable->lock, flags); + running = cable->running ^ cable->pause; + if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { + delta_play = jiffies - dpcm_play->last_jiffies; +@@ -495,10 +496,8 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) + dpcm_capt->last_jiffies += delta_capt; + } + +- if (delta_play == 0 && delta_capt == 0) { +- spin_unlock(&cable->lock); +- return running; +- } ++ if (delta_play == 0 && delta_capt == 0) ++ goto unlock; + + if (delta_play > delta_capt) { + loopback_bytepos_update(dpcm_play, delta_play - delta_capt, +@@ -510,14 +509,14 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) + delta_capt = delta_play; + } + +- if (delta_play == 0 && delta_capt == 0) { +- spin_unlock(&cable->lock); +- return running; +- } ++ if (delta_play == 0 && delta_capt == 0) ++ goto unlock; ++ + /* note delta_capt == delta_play at this moment */ + loopback_bytepos_update(dpcm_capt, delta_capt, BYTEPOS_UPDATE_COPY); + loopback_bytepos_update(dpcm_play, delta_play, BYTEPOS_UPDATE_POSONLY); +- spin_unlock(&cable->lock); ++ unlock: ++ spin_unlock_irqrestore(&cable->lock, flags); + return running; + } + +diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c +index 22dbd91..448dd01 100644 +--- a/sound/pci/asihpi/hpioctl.c ++++ b/sound/pci/asihpi/hpioctl.c +@@ -155,6 +155,11 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + goto out; + } + ++ if (hm->h.adapter_index >= HPI_MAX_ADAPTERS) { ++ err = -EINVAL; ++ goto out; ++ } ++ + pa = &adapters[hm->h.adapter_index]; + hr->h.size = 0; + if (hm->h.object == HPI_OBJ_SUBSYSTEM) { +diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c +index 1bff80c..b932154 100644 +--- a/sound/pci/ctxfi/ctatc.c ++++ b/sound/pci/ctxfi/ctatc.c +@@ -869,7 +869,7 @@ spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm) + mutex_lock(&atc->atc_mutex); + dao->ops->get_spos(dao, &status); + if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) { +- status &= ((~IEC958_AES3_CON_FS) << 24); ++ status &= ~(IEC958_AES3_CON_FS << 24); + status |= (iec958_con_fs << 24); + dao->ops->set_spos(dao, status); + dao->ops->commit_write(dao); +diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c +index af56eb9..47d9ea9 100644 +--- a/sound/pci/ctxfi/ctdaio.c ++++ b/sound/pci/ctxfi/ctdaio.c +@@ -176,6 +176,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input) + if (!entry) + return -ENOMEM; + ++ dao->ops->clear_left_input(dao); + /* Program master and conjugate resources */ + input->ops->master(input); + daio->rscl.ops->master(&daio->rscl); +@@ -204,6 +205,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input) + if (!entry) + return -ENOMEM; + ++ dao->ops->clear_right_input(dao); + /* Program master and conjugate resources */ + input->ops->master(input); + daio->rscr.ops->master(&daio->rscr); +diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c +index 15c1e72..c3519ff 100644 +--- a/sound/pci/ctxfi/ctmixer.c ++++ b/sound/pci/ctxfi/ctmixer.c +@@ -566,19 +566,6 @@ static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol, + return 0; + } + +-static int ct_spdif_default_get(struct snd_kcontrol *kcontrol, +- struct snd_ctl_elem_value *ucontrol) +-{ +- unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF; +- +- ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; +- ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; +- ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; +- ucontrol->value.iec958.status[3] = (status >> 24) & 0xff; +- +- return 0; +-} +- + static int ct_spdif_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) + { +@@ -586,6 +573,10 @@ static int ct_spdif_get(struct snd_kcontrol *kcontrol, + unsigned int status; + + atc->spdif_out_get_status(atc, &status); ++ ++ if (status == 0) ++ status = SNDRV_PCM_DEFAULT_CON_SPDIF; ++ + ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; + ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; + ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; +@@ -629,7 +620,7 @@ static struct snd_kcontrol_new iec958_default_ctl = { + .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), + .count = 1, + .info = ct_spdif_info, +- .get = ct_spdif_default_get, ++ .get = ct_spdif_get, + .put = ct_spdif_put, + .private_value = MIXER_IEC958_DEFAULT + }; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 4261bb8..acd2099 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -394,6 +394,7 @@ struct alc_spec { + /* other flags */ + unsigned int no_analog :1; /* digital I/O only */ + unsigned int dual_adc_switch:1; /* switch ADCs (for ALC275) */ ++ unsigned int single_input_src:1; + int init_amp; + int codec_variant; /* flag for other variants */ + +@@ -3919,6 +3920,8 @@ static struct hda_amp_list alc880_lg_loopbacks[] = { + * Common callbacks + */ + ++static void alc_init_special_input_src(struct hda_codec *codec); ++ + static int alc_init(struct hda_codec *codec) + { + struct alc_spec *spec = codec->spec; +@@ -3929,6 +3932,7 @@ static int alc_init(struct hda_codec *codec) + + for (i = 0; i < spec->num_init_verbs; i++) + snd_hda_sequence_write(codec, spec->init_verbs[i]); ++ alc_init_special_input_src(codec); + + if (spec->init_hook) + spec->init_hook(codec); +@@ -5151,7 +5155,9 @@ static const char *alc_get_line_out_pfx(const struct auto_pin_cfg *cfg, + + switch (cfg->line_out_type) { + case AUTO_PIN_SPEAKER_OUT: +- return "Speaker"; ++ if (cfg->line_outs == 1) ++ return "Speaker"; ++ break; + case AUTO_PIN_HP_OUT: + return "Headphone"; + default: +@@ -5205,16 +5211,19 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec, + return err; + } else { + const char *name = pfx; +- if (!name) ++ int index = i; ++ if (!name) { + name = chname[i]; ++ index = 0; ++ } + err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, +- name, i, ++ name, index, + HDA_COMPOSE_AMP_VAL(nid, 3, 0, + HDA_OUTPUT)); + if (err < 0) + return err; + err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, +- name, i, ++ name, index, + HDA_COMPOSE_AMP_VAL(nid, 3, 2, + HDA_INPUT)); + if (err < 0) +@@ -5585,6 +5594,7 @@ static void fixup_single_adc(struct hda_codec *codec) + spec->capsrc_nids += i; + spec->adc_nids += i; + spec->num_adc_nids = 1; ++ spec->single_input_src = 1; + } + } + +@@ -5596,6 +5606,16 @@ static void fixup_dual_adc_switch(struct hda_codec *codec) + init_capsrc_for_pin(codec, spec->int_mic.pin); + } + ++/* initialize some special cases for input sources */ ++static void alc_init_special_input_src(struct hda_codec *codec) ++{ ++ struct alc_spec *spec = codec->spec; ++ if (spec->dual_adc_switch) ++ fixup_dual_adc_switch(codec); ++ else if (spec->single_input_src) ++ init_capsrc_for_pin(codec, spec->autocfg.inputs[0].pin); ++} ++ + static void set_capture_mixer(struct hda_codec *codec) + { + struct alc_spec *spec = codec->spec; +@@ -5611,7 +5631,7 @@ static void set_capture_mixer(struct hda_codec *codec) + int mux = 0; + int num_adcs = spec->num_adc_nids; + if (spec->dual_adc_switch) +- fixup_dual_adc_switch(codec); ++ num_adcs = 1; + else if (spec->auto_mic) + fixup_automic_adc(codec); + else if (spec->input_mux) { +@@ -5620,8 +5640,6 @@ static void set_capture_mixer(struct hda_codec *codec) + else if (spec->input_mux->num_items == 1) + fixup_single_adc(codec); + } +- if (spec->dual_adc_switch) +- num_adcs = 1; + spec->cap_mixer = caps[mux][num_adcs - 1]; + } + } +@@ -10748,6 +10766,7 @@ static struct alc_config_preset alc882_presets[] = { + */ + enum { + PINFIX_ABIT_AW9D_MAX, ++ PINFIX_LENOVO_Y530, + PINFIX_PB_M5210, + PINFIX_ACER_ASPIRE_7736, + }; +@@ -10762,6 +10781,14 @@ static const struct alc_fixup alc882_fixups[] = { + { } + } + }, ++ [PINFIX_LENOVO_Y530] = { ++ .type = ALC_FIXUP_PINS, ++ .v.pins = (const struct alc_pincfg[]) { ++ { 0x15, 0x99130112 }, /* rear int speakers */ ++ { 0x16, 0x99130111 }, /* subwoofer */ ++ { } ++ } ++ }, + [PINFIX_PB_M5210] = { + .type = ALC_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { +@@ -10777,6 +10804,7 @@ static const struct alc_fixup alc882_fixups[] = { + + static struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", PINFIX_PB_M5210), ++ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), + SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), + SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), + {} +@@ -10829,23 +10857,28 @@ static void alc882_auto_init_hp_out(struct hda_codec *codec) + hda_nid_t pin, dac; + int i; + +- for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { +- pin = spec->autocfg.hp_pins[i]; +- if (!pin) +- break; +- dac = spec->multiout.hp_nid; +- if (!dac) +- dac = spec->multiout.dac_nids[0]; /* to front */ +- alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); ++ if (spec->autocfg.line_out_type != AUTO_PIN_HP_OUT) { ++ for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { ++ pin = spec->autocfg.hp_pins[i]; ++ if (!pin) ++ break; ++ dac = spec->multiout.hp_nid; ++ if (!dac) ++ dac = spec->multiout.dac_nids[0]; /* to front */ ++ alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); ++ } + } +- for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { +- pin = spec->autocfg.speaker_pins[i]; +- if (!pin) +- break; +- dac = spec->multiout.extra_out_nid[0]; +- if (!dac) +- dac = spec->multiout.dac_nids[0]; /* to front */ +- alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); ++ ++ if (spec->autocfg.line_out_type != AUTO_PIN_SPEAKER_OUT) { ++ for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { ++ pin = spec->autocfg.speaker_pins[i]; ++ if (!pin) ++ break; ++ dac = spec->multiout.extra_out_nid[0]; ++ if (!dac) ++ dac = spec->multiout.dac_nids[0]; /* to front */ ++ alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); ++ } + } + } + +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index bd7b123..052062d 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -757,7 +757,7 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e + struct sigmatel_spec *spec = codec->spec; + unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); + const struct hda_input_mux *imux = spec->input_mux; +- unsigned int idx, prev_idx; ++ unsigned int idx, prev_idx, didx; + + idx = ucontrol->value.enumerated.item[0]; + if (idx >= imux->num_items) +@@ -769,7 +769,8 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e + snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, + AC_VERB_SET_CONNECT_SEL, + imux->items[idx].index); +- if (prev_idx >= spec->num_analog_muxes) { ++ if (prev_idx >= spec->num_analog_muxes && ++ spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { + imux = spec->dinput_mux; + /* 0 = analog */ + snd_hda_codec_write_cache(codec, +@@ -779,9 +780,13 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e + } + } else { + imux = spec->dinput_mux; ++ /* first dimux item is hardcoded to select analog imux, ++ * so lets skip it ++ */ ++ didx = idx - spec->num_analog_muxes + 1; + snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, + AC_VERB_SET_CONNECT_SEL, +- imux->items[idx - 1].index); ++ imux->items[didx].index); + } + spec->cur_mux[adc_idx] = idx; + return 1; +diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c +index 3ceaef6..838a0d5 100644 +--- a/sound/soc/pxa/z2.c ++++ b/sound/soc/pxa/z2.c +@@ -147,7 +147,7 @@ static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd) + snd_soc_dapm_disable_pin(dapm, "LINPUT3"); + snd_soc_dapm_disable_pin(dapm, "RINPUT3"); + snd_soc_dapm_disable_pin(dapm, "OUT3"); +- snd_soc_dapm_disable_pin(dapm, "MONO"); ++ snd_soc_dapm_disable_pin(dapm, "MONO1"); + + /* Add z2 specific widgets */ + snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets, +diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN +index 97d7656..26d4d3f 100755 +--- a/tools/perf/util/PERF-VERSION-GEN ++++ b/tools/perf/util/PERF-VERSION-GEN +@@ -23,10 +23,10 @@ if test -d ../../.git -o -f ../../.git && + then + VN=$(echo "$VN" | sed -e 's/-/./g'); + else +- eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '` +- eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '` +- eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '` +- eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '` ++ eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ') ++ eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') ++ eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') ++ eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ') + + VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}" + fi diff --git a/patches.kernel.org/patch-2.6.38.1-2 b/patches.kernel.org/patch-2.6.38.1-2 new file mode 100644 index 0000000..4f91a4c --- /dev/null +++ b/patches.kernel.org/patch-2.6.38.1-2 @@ -0,0 +1,2543 @@ +From: Jiri Slaby +Subject: Linux 2.6.38.2 +Patch-mainline: Linux 2.6.38.2 + +Signed-off-by: Jiri Slaby +--- +diff --git a/Makefile b/Makefile +index 167ef45..6c15525 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 38 +-EXTRAVERSION = .1 ++EXTRAVERSION = .2 + NAME = Flesh-Eating Bats with Fangs + + # *DOCUMENTATION* +diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c +index f62bb4c..7c3fb07 100644 +--- a/arch/arm/mach-s3c2440/mach-mini2440.c ++++ b/arch/arm/mach-s3c2440/mach-mini2440.c +@@ -506,6 +506,11 @@ static struct i2c_board_info mini2440_i2c_devs[] __initdata = { + }, + }; + ++static struct platform_device uda1340_codec = { ++ .name = "uda134x-codec", ++ .id = -1, ++}; ++ + static struct platform_device *mini2440_devices[] __initdata = { + &s3c_device_ohci, + &s3c_device_wdt, +@@ -521,7 +526,9 @@ static struct platform_device *mini2440_devices[] __initdata = { + &s3c_device_nand, + &s3c_device_sdi, + &s3c_device_iis, ++ &uda1340_codec, + &mini2440_audio, ++ &samsung_asoc_dma, + }; + + static void __init mini2440_map_io(void) +diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c +index 90a15d2..2130ca6 100644 +--- a/arch/sh/kernel/ptrace_32.c ++++ b/arch/sh/kernel/ptrace_32.c +@@ -101,6 +101,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr) + + attr = bp->attr; + attr.bp_addr = addr; ++ /* reenable breakpoint */ ++ attr.disabled = false; + err = modify_user_hw_breakpoint(bp, &attr); + if (unlikely(err)) + return err; +@@ -392,6 +394,9 @@ long arch_ptrace(struct task_struct *child, long request, + tmp = 0; + } else { + unsigned long index; ++ ret = init_fpu(child); ++ if (ret) ++ break; + index = addr - offsetof(struct user, fpu); + tmp = ((unsigned long *)child->thread.xstate) + [index >> 2]; +@@ -423,6 +428,9 @@ long arch_ptrace(struct task_struct *child, long request, + else if (addr >= offsetof(struct user, fpu) && + addr < offsetof(struct user, u_fpvalid)) { + unsigned long index; ++ ret = init_fpu(child); ++ if (ret) ++ break; + index = addr - offsetof(struct user, fpu); + set_stopped_child_used_math(child); + ((unsigned long *)child->thread.xstate) +diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c +index 4436eac..c8f9764 100644 +--- a/arch/sh/kernel/ptrace_64.c ++++ b/arch/sh/kernel/ptrace_64.c +@@ -403,6 +403,9 @@ long arch_ptrace(struct task_struct *child, long request, + else if ((addr >= offsetof(struct user, fpu)) && + (addr < offsetof(struct user, u_fpvalid))) { + unsigned long index; ++ ret = init_fpu(child); ++ if (ret) ++ break; + index = addr - offsetof(struct user, fpu); + tmp = get_fpu_long(child, index); + } else if (addr == offsetof(struct user, u_fpvalid)) { +@@ -442,6 +445,9 @@ long arch_ptrace(struct task_struct *child, long request, + else if ((addr >= offsetof(struct user, fpu)) && + (addr < offsetof(struct user, u_fpvalid))) { + unsigned long index; ++ ret = init_fpu(child); ++ if (ret) ++ break; + index = addr - offsetof(struct user, fpu); + ret = put_fpu_long(child, index, data); + } +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S +index c8b4efa..9ca3b0e 100644 +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -1413,7 +1413,7 @@ ENTRY(async_page_fault) + CFI_ADJUST_CFA_OFFSET 4 + jmp error_code + CFI_ENDPROC +-END(apf_page_fault) ++END(async_page_fault) + #endif + + /* +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c +index 2d2673c..5655c22 100644 +--- a/arch/x86/kernel/head64.c ++++ b/arch/x86/kernel/head64.c +@@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data) + /* Make NULL pointers segfault */ + zap_identity_mappings(); + +- /* Cleanup the over mapped high alias */ +- cleanup_highmap(); +- + max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; + + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index d3cfe26..e543fe9 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -297,6 +297,9 @@ static void __init init_gbpages(void) + static inline void init_gbpages(void) + { + } ++static void __init cleanup_highmap(void) ++{ ++} + #endif + + static void __init reserve_brk(void) +@@ -922,6 +925,8 @@ void __init setup_arch(char **cmdline_p) + */ + reserve_brk(); + ++ cleanup_highmap(); ++ + memblock.current_limit = get_max_mapped(); + memblock_x86_fill(); + +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 947f42a..f13ff3a 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -279,25 +279,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, + load_cr3(swapper_pg_dir); + #endif + +-#ifdef CONFIG_X86_64 +- if (!after_bootmem && !start) { +- pud_t *pud; +- pmd_t *pmd; +- +- mmu_cr4_features = read_cr4(); +- +- /* +- * _brk_end cannot change anymore, but it and _end may be +- * located on different 2M pages. cleanup_highmap(), however, +- * can only consider _end when it runs, so destroy any +- * mappings beyond _brk_end here. +- */ +- pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); +- pmd = pmd_offset(pud, _brk_end - 1); +- while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) +- pmd_clear(pmd); +- } +-#endif + __flush_tlb_all(); + + if (!after_bootmem && e820_table_end > e820_table_start) +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index c14a542..68f9921 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -51,6 +51,7 @@ + #include + #include + #include ++#include + + static int __init parse_direct_gbpages_off(char *arg) + { +@@ -293,18 +294,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) + * to the compile time generated pmds. This results in invalid pmds up + * to the point where we hit the physaddr 0 mapping. + * +- * We limit the mappings to the region from _text to _end. _end is +- * rounded up to the 2MB boundary. This catches the invalid pmds as ++ * We limit the mappings to the region from _text to _brk_end. _brk_end ++ * is rounded up to the 2MB boundary. This catches the invalid pmds as + * well, as they are located before _text: + */ + void __init cleanup_highmap(void) + { + unsigned long vaddr = __START_KERNEL_map; +- unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; ++ unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); ++ unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; + pmd_t *pmd = level2_kernel_pgt; +- pmd_t *last_pmd = pmd + PTRS_PER_PMD; + +- for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { ++ for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { + if (pmd_none(*pmd)) + continue; + if (vaddr < (unsigned long) _text || vaddr > end) +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c +index f608942..6020562 100644 +--- a/arch/x86/xen/mmu.c ++++ b/arch/x86/xen/mmu.c +@@ -1651,9 +1651,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) + for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { + pte_t pte; + +- if (pfn > max_pfn_mapped) +- max_pfn_mapped = pfn; +- + if (!pte_none(pte_page[pteidx])) + continue; + +@@ -1711,6 +1708,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, + pud_t *l3; + pmd_t *l2; + ++ /* max_pfn_mapped is the last pfn mapped in the initial memory ++ * mappings. Considering that on Xen after the kernel mappings we ++ * have the mappings of some pages that don't exist in pfn space, we ++ * set max_pfn_mapped to the last real pfn mapped. */ ++ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); ++ + /* Zap identity mapping */ + init_level4_pgt[0] = __pgd(0); + +@@ -1815,9 +1818,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, + initial_kernel_pmd = + extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); + +- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + +- xen_start_info->nr_pt_frames * PAGE_SIZE + +- 512*1024); ++ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + + kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); + memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); +diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c +index 69ad529..ea5ac2d 100644 +--- a/drivers/firmware/dcdbas.c ++++ b/drivers/firmware/dcdbas.c +@@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) + } + + /* generate SMI */ ++ /* inb to force posted write through and make SMI happen now */ + asm volatile ( +- "outb %b0,%w1" ++ "outb %b0,%w1\n" ++ "inb %w1" + : /* no output args */ + : "a" (smi_cmd->command_code), + "d" (smi_cmd->command_address), +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index 654faa8..6a5371b 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, + uint32_t __user *encoder_id; + struct drm_mode_group *mode_group; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + + /* +@@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev, + struct drm_mode_object *obj; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + + obj = drm_mode_object_find(dev, crtc_resp->crtc_id, +@@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, + uint64_t __user *prop_values; + uint32_t __user *encoder_ptr; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); + + DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); +@@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, + struct drm_encoder *encoder; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, enc_resp->encoder_id, + DRM_MODE_OBJECT_ENCODER); +@@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, + int ret = 0; + int i; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, crtc_req->crtc_id, + DRM_MODE_OBJECT_CRTC); +@@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, + struct drm_crtc *crtc; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + if (!req->flags) { + DRM_ERROR("no operation set\n"); + return -EINVAL; +@@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev, + struct drm_framebuffer *fb; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + if ((config->min_width > r->width) || (r->width > config->max_width)) { + DRM_ERROR("mode new framebuffer width not within limits\n"); + return -EINVAL; +@@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev, + int ret = 0; + int found = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); + /* TODO check that we realy get a framebuffer back. */ +@@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev, + struct drm_framebuffer *fb; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); + if (!obj) { +@@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + int num_clips; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); + if (!obj) { +@@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, + struct drm_mode_modeinfo *umode = &mode_cmd->mode; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + + obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); +@@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, + struct drm_mode_modeinfo *umode = &mode_cmd->mode; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + + obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); +@@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + uint64_t __user *values_ptr; + uint32_t __user *blob_length_ptr; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); + if (!obj) { +@@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, + int ret = 0; + void *blob_ptr; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); + if (!obj) { +@@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, + int ret = -EINVAL; + int i; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + + obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); +@@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, + int size; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) { +@@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, + int size; + int ret = 0; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) { +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index ea1c4b0..c3c78ee 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -498,11 +498,12 @@ EXPORT_SYMBOL(drm_gem_vm_open); + void drm_gem_vm_close(struct vm_area_struct *vma) + { + struct drm_gem_object *obj = vma->vm_private_data; ++ struct drm_device *dev = obj->dev; + +- mutex_lock(&obj->dev->struct_mutex); ++ mutex_lock(&dev->struct_mutex); + drm_vm_close_locked(vma); + drm_gem_object_unreference(obj); +- mutex_unlock(&obj->dev->struct_mutex); ++ mutex_unlock(&dev->struct_mutex); + } + EXPORT_SYMBOL(drm_gem_vm_close); + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 36e66cc..729c95a 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1749,8 +1749,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) + return; + + spin_lock(&file_priv->mm.lock); +- list_del(&request->client_list); +- request->file_priv = NULL; ++ if (request->file_priv) { ++ list_del(&request->client_list); ++ request->file_priv = NULL; ++ } + spin_unlock(&file_priv->mm.lock); + } + +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +index 50ab161..ded73a6 100644 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -388,6 +388,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + ++ /* We can't wait for rendering with pagefaults disabled */ ++ if (obj->active && in_atomic()) ++ return -EFAULT; ++ + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + return ret; +@@ -461,15 +465,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, + struct list_head *objects) + { + struct drm_i915_gem_object *obj; +- int ret; +- ++ int ret = 0; ++ ++ /* This is the fast path and we cannot handle a pagefault whilst ++ * holding the struct mutex lest the user pass in the relocations ++ * contained within a mmaped bo. For in such a case we, the page ++ * fault handler would call i915_gem_fault() and we would try to ++ * acquire the struct mutex again. Obviously this is bad and so ++ * lockdep complains vehemently. ++ */ ++ pagefault_disable(); + list_for_each_entry(obj, objects, exec_list) { + ret = i915_gem_execbuffer_relocate_object(obj, eb); + if (ret) +- return ret; ++ break; + } ++ pagefault_enable(); + +- return 0; ++ return ret; + } + + static int +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index 4a5a73b..e967cc8 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode + /* adjust pixel clock as needed */ + adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); + +- if (ASIC_IS_AVIVO(rdev)) ++ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) ++ /* TV seems to prefer the legacy algo on some boards */ ++ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, ++ &ref_div, &post_div); ++ else if (ASIC_IS_AVIVO(rdev)) + radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); + else +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c +index cf7c8d5..cf602e2 100644 +--- a/drivers/gpu/drm/radeon/radeon_combios.c ++++ b/drivers/gpu/drm/radeon/radeon_combios.c +@@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, + + bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) + { +- int edid_info; ++ int edid_info, size; + struct edid *edid; + unsigned char *raw; + edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); +@@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) + return false; + + raw = rdev->bios + edid_info; +- edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); ++ size = EDID_LENGTH * (raw[0x7e] + 1); ++ edid = kmalloc(size, GFP_KERNEL); + if (edid == NULL) + return false; + +- memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); ++ memcpy((unsigned char *)edid, raw, size); + + if (!drm_edid_is_valid(edid)) { + kfree(edid); +@@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) + } + + rdev->mode_info.bios_hardcoded_edid = edid; ++ rdev->mode_info.bios_hardcoded_edid_size = size; + return true; + } + +@@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) + struct edid * + radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) + { +- if (rdev->mode_info.bios_hardcoded_edid) +- return rdev->mode_info.bios_hardcoded_edid; ++ struct edid *edid; ++ ++ if (rdev->mode_info.bios_hardcoded_edid) { ++ edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); ++ if (edid) { ++ memcpy((unsigned char *)edid, ++ (unsigned char *)rdev->mode_info.bios_hardcoded_edid, ++ rdev->mode_info.bios_hardcoded_edid_size); ++ return edid; ++ } ++ } + return NULL; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 22b7e3d..d83338b 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -629,6 +629,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, + static enum drm_connector_status + radeon_vga_detect(struct drm_connector *connector, bool force) + { ++ struct drm_device *dev = connector->dev; ++ struct radeon_device *rdev = dev->dev_private; + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct drm_encoder *encoder; + struct drm_encoder_helper_funcs *encoder_funcs; +@@ -679,6 +681,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force) + + if (ret == connector_status_connected) + ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); ++ ++ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the ++ * vbios to deal with KVMs. If we have one and are not able to detect a monitor ++ * by other means, assume the CRT is connected and use that EDID. ++ */ ++ if ((!rdev->is_atom_bios) && ++ (ret == connector_status_disconnected) && ++ rdev->mode_info.bios_hardcoded_edid_size) { ++ ret = connector_status_connected; ++ } ++ + radeon_connector_update_scratch_regs(connector, ret); + return ret; + } +@@ -790,6 +803,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) + static enum drm_connector_status + radeon_dvi_detect(struct drm_connector *connector, bool force) + { ++ struct drm_device *dev = connector->dev; ++ struct radeon_device *rdev = dev->dev_private; + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct drm_encoder *encoder = NULL; + struct drm_encoder_helper_funcs *encoder_funcs; +@@ -829,8 +844,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) + * you don't really know what's connected to which port as both are digital. + */ + if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { +- struct drm_device *dev = connector->dev; +- struct radeon_device *rdev = dev->dev_private; + struct drm_connector *list_connector; + struct radeon_connector *list_radeon_connector; + list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { +@@ -895,6 +908,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) + ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); + } + ++ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the ++ * vbios to deal with KVMs. If we have one and are not able to detect a monitor ++ * by other means, assume the DFP is connected and use that EDID. In most ++ * cases the DVI port is actually a virtual KVM port connected to the service ++ * processor. ++ */ ++ if ((!rdev->is_atom_bios) && ++ (ret == connector_status_disconnected) && ++ rdev->mode_info.bios_hardcoded_edid_size) { ++ radeon_connector->use_digital = true; ++ ret = connector_status_connected; ++ } ++ + out: + /* updated in get modes as well since we need to know if it's analog or digital */ + radeon_connector_update_scratch_regs(connector, ret); +diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h +index a670caa..8c134db 100644 +--- a/drivers/gpu/drm/radeon/radeon_mode.h ++++ b/drivers/gpu/drm/radeon/radeon_mode.h +@@ -239,6 +239,7 @@ struct radeon_mode_info { + struct drm_property *underscan_vborder_property; + /* hardcoded DFP edid from BIOS */ + struct edid *bios_hardcoded_edid; ++ int bios_hardcoded_edid_size; + + /* pointer to fbdev info structure */ + struct radeon_fbdev *rfbdev; +diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c +index 7f85a86..53e6273 100644 +--- a/drivers/input/xen-kbdfront.c ++++ b/drivers/input/xen-kbdfront.c +@@ -110,7 +110,7 @@ static irqreturn_t input_handler(int rq, void *dev_id) + static int __devinit xenkbd_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) + { +- int ret, i; ++ int ret, i, abs; + struct xenkbd_info *info; + struct input_dev *kbd, *ptr; + +@@ -128,6 +128,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, + if (!info->page) + goto error_nomem; + ++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) ++ abs = 0; ++ if (abs) ++ xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1"); ++ + /* keyboard */ + kbd = input_allocate_device(); + if (!kbd) +@@ -137,11 +142,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, + kbd->id.bustype = BUS_PCI; + kbd->id.vendor = 0x5853; + kbd->id.product = 0xffff; +- kbd->evbit[0] = BIT(EV_KEY); ++ ++ __set_bit(EV_KEY, kbd->evbit); + for (i = KEY_ESC; i < KEY_UNKNOWN; i++) +- set_bit(i, kbd->keybit); ++ __set_bit(i, kbd->keybit); + for (i = KEY_OK; i < KEY_MAX; i++) +- set_bit(i, kbd->keybit); ++ __set_bit(i, kbd->keybit); + + ret = input_register_device(kbd); + if (ret) { +@@ -160,12 +166,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, + ptr->id.bustype = BUS_PCI; + ptr->id.vendor = 0x5853; + ptr->id.product = 0xfffe; +- ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); ++ ++ if (abs) { ++ __set_bit(EV_ABS, ptr->evbit); ++ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); ++ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); ++ } else { ++ input_set_capability(ptr, EV_REL, REL_X); ++ input_set_capability(ptr, EV_REL, REL_Y); ++ } ++ input_set_capability(ptr, EV_REL, REL_WHEEL); ++ ++ __set_bit(EV_KEY, ptr->evbit); + for (i = BTN_LEFT; i <= BTN_TASK; i++) +- set_bit(i, ptr->keybit); +- ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL); +- input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); +- input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); ++ __set_bit(i, ptr->keybit); + + ret = input_register_device(ptr); + if (ret) { +@@ -272,7 +286,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) + { + struct xenkbd_info *info = dev_get_drvdata(&dev->dev); +- int ret, val; ++ int val; + + switch (backend_state) { + case XenbusStateInitialising: +@@ -285,16 +299,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, + + case XenbusStateInitWait: + InitWait: +- ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, +- "feature-abs-pointer", "%d", &val); +- if (ret < 0) +- val = 0; +- if (val) { +- ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, +- "request-abs-pointer", "1"); +- if (ret) +- pr_warning("can't request abs-pointer\n"); +- } + xenbus_switch_state(dev, XenbusStateConnected); + break; + +diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c +index a1e9dfb..6459b8c 100644 +--- a/drivers/media/video/uvc/uvc_driver.c ++++ b/drivers/media/video/uvc/uvc_driver.c +@@ -1264,6 +1264,14 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain, + + break; + ++ case UVC_OTT_VENDOR_SPECIFIC: ++ case UVC_OTT_DISPLAY: ++ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: ++ if (uvc_trace_param & UVC_TRACE_PROBE) ++ printk(" OT %d", entity->id); ++ ++ break; ++ + case UVC_TT_STREAMING: + if (UVC_ENTITY_IS_ITERM(entity)) { + if (uvc_trace_param & UVC_TRACE_PROBE) +diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c +index 5673d67..545c029 100644 +--- a/drivers/media/video/uvc/uvc_video.c ++++ b/drivers/media/video/uvc/uvc_video.c +@@ -89,15 +89,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, + static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, + struct uvc_streaming_control *ctrl) + { +- struct uvc_format *format; ++ struct uvc_format *format = NULL; + struct uvc_frame *frame = NULL; + unsigned int i; + +- if (ctrl->bFormatIndex <= 0 || +- ctrl->bFormatIndex > stream->nformats) +- return; ++ for (i = 0; i < stream->nformats; ++i) { ++ if (stream->format[i].index == ctrl->bFormatIndex) { ++ format = &stream->format[i]; ++ break; ++ } ++ } + +- format = &stream->format[ctrl->bFormatIndex - 1]; ++ if (format == NULL) ++ return; + + for (i = 0; i < format->nframes; ++i) { + if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) { +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c +index cb23aa2..e610cfe 100644 +--- a/drivers/pci/hotplug/acpiphp_glue.c ++++ b/drivers/pci/hotplug/acpiphp_glue.c +@@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) + + pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); + if (pdev) { ++ pdev->current_state = PCI_D0; + slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); + pci_dev_put(pdev); + } +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 4ab49d4..30bb8d0 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb) + if (!ACM_READY(acm)) + goto exit; + ++ usb_mark_last_busy(acm->dev); ++ + data = (unsigned char *)(dr + 1); + switch (dr->bNotificationType) { + case USB_CDC_NOTIFY_NETWORK_CONNECTION: +@@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb) + break; + } + exit: +- usb_mark_last_busy(acm->dev); + retval = usb_submit_urb(urb, GFP_ATOMIC); + if (retval) + dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " +@@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work) + if (!ACM_READY(acm)) + return; + tty = tty_port_tty_get(&acm->port); ++ if (!tty) ++ return; + tty_wakeup(tty); + tty_kref_put(tty); + } +@@ -646,8 +649,10 @@ static void acm_port_down(struct acm *acm) + usb_kill_urb(acm->ctrlurb); + for (i = 0; i < ACM_NW; i++) + usb_kill_urb(acm->wb[i].urb); ++ tasklet_disable(&acm->urb_task); + for (i = 0; i < nr; i++) + usb_kill_urb(acm->ru[i].urb); ++ tasklet_enable(&acm->urb_task); + acm->control->needs_remote_wakeup = 0; + usb_autopm_put_interface(acm->control); + } +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c +index 47085e5..a97c018 100644 +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -281,7 +281,7 @@ static void cleanup(struct wdm_device *desc) + desc->sbuf, + desc->validity->transfer_dma); + usb_free_coherent(interface_to_usbdev(desc->intf), +- desc->wMaxCommand, ++ desc->bMaxPacketSize0, + desc->inbuf, + desc->response->transfer_dma); + kfree(desc->orq); +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index a7131ad..37518df 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -802,7 +802,7 @@ static int proc_control(struct dev_state *ps, void __user *arg) + tbuf, ctrl.wLength, tmo); + usb_lock_device(dev); + snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, +- tbuf, i); ++ tbuf, max(i, 0)); + if ((i > 0) && ctrl.wLength) { + if (copy_to_user(ctrl.data, tbuf, i)) { + free_page((unsigned long)tbuf); +diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c +index 233c288..5add8b5 100644 +--- a/drivers/usb/host/ehci-q.c ++++ b/drivers/usb/host/ehci-q.c +@@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) + int stopped; + unsigned count = 0; + u8 state; +- const __le32 halt = HALT_BIT(ehci); + struct ehci_qh_hw *hw = qh->hw; + + if (unlikely (list_empty (&qh->qtd_list))) +@@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) + && !(qtd->hw_alt_next + & EHCI_LIST_END(ehci))) { + stopped = 1; +- goto halt; + } + + /* stop scanning when we reach qtds the hc is using */ +@@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) + */ + ehci_clear_tt_buffer(ehci, qh, urb, token); + } +- +- /* force halt for unlinked or blocked qh, so we'll +- * patch the qh later and so that completions can't +- * activate it while we "know" it's stopped. +- */ +- if ((halt & hw->hw_token) == 0) { +-halt: +- hw->hw_token |= halt; +- wmb (); +- } + } + + /* unless we already know the urb's status, collect qtd status +diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c +index f7a2057..8b1d94a 100644 +--- a/drivers/usb/misc/uss720.c ++++ b/drivers/usb/misc/uss720.c +@@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p + spin_lock_irqsave(&priv->asynclock, flags); + list_add_tail(&rq->asynclist, &priv->asynclist); + spin_unlock_irqrestore(&priv->asynclock, flags); ++ kref_get(&rq->ref_count); + ret = usb_submit_urb(rq->urb, mem_flags); +- if (!ret) { +- kref_get(&rq->ref_count); ++ if (!ret) + return rq; +- } +- kref_put(&rq->ref_count, destroy_async); ++ destroy_async(&rq->ref_count); + err("submit_async_request submit_urb failed with %d", ret); + return NULL; + } +diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c +index 9d49d1c..52312e8 100644 +--- a/drivers/usb/musb/blackfin.c ++++ b/drivers/usb/musb/blackfin.c +@@ -322,7 +322,7 @@ static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout) + mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); + } + +-static int bfin_musb_get_vbus_status(struct musb *musb) ++static int bfin_musb_vbus_status(struct musb *musb) + { + return 0; + } +@@ -540,7 +540,7 @@ static struct dev_pm_ops bfin_pm_ops = { + .resume = bfin_resume, + }; + +-#define DEV_PM_OPS &bfin_pm_op, ++#define DEV_PM_OPS &bfin_pm_ops + #else + #define DEV_PM_OPS NULL + #endif +@@ -548,7 +548,7 @@ static struct dev_pm_ops bfin_pm_ops = { + static struct platform_driver bfin_driver = { + .remove = __exit_p(bfin_remove), + .driver = { +- .name = "musb-bfin", ++ .name = "musb-blackfin", + .pm = DEV_PM_OPS, + }, + }; +diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c +index 0056a41..15e8e1a 100644 +--- a/drivers/video/console/tileblit.c ++++ b/drivers/video/console/tileblit.c +@@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, + int softback_lines, int fg, int bg) + { + struct fb_tilecursor cursor; +- int use_sw = (vc->vc_cursor_type & 0x01); ++ int use_sw = (vc->vc_cursor_type & 0x10); + + cursor.sx = vc->vc_x; + cursor.sy = vc->vc_y; +diff --git a/fs/aio.c b/fs/aio.c +index 26869cd..88f0ed5 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -520,7 +520,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) + ctx->reqs_active--; + + if (unlikely(!ctx->reqs_active && ctx->dead)) +- wake_up(&ctx->wait); ++ wake_up_all(&ctx->wait); + } + + static void aio_fput_routine(struct work_struct *data) +@@ -1229,7 +1229,7 @@ static void io_destroy(struct kioctx *ioctx) + * by other CPUs at this point. Right now, we rely on the + * locking done by the above calls to ensure this consistency. + */ +- wake_up(&ioctx->wait); ++ wake_up_all(&ioctx->wait); + put_ioctx(ioctx); /* once for the lookup */ + } + +diff --git a/fs/dcache.c b/fs/dcache.c +index a39fe47..1baddc1 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1612,10 +1612,13 @@ struct dentry *d_obtain_alias(struct inode *inode) + __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first); + spin_unlock(&tmp->d_lock); + spin_unlock(&inode->i_lock); ++ security_d_instantiate(tmp, inode); + + return tmp; + + out_iput: ++ if (res && !IS_ERR(res)) ++ security_d_instantiate(res, inode); + iput(inode); + return res; + } +diff --git a/fs/ext3/super.c b/fs/ext3/super.c +index 85c8cc8..0d62f29 100644 +--- a/fs/ext3/super.c ++++ b/fs/ext3/super.c +@@ -1464,6 +1464,13 @@ static void ext3_orphan_cleanup (struct super_block * sb, + return; + } + ++ /* Check if feature set allows readwrite operations */ ++ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { ++ ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " ++ "unknown ROCOMPAT features"); ++ return; ++ } ++ + if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { + if (es->s_last_orphan) + jbd_debug(1, "Errors on filesystem, " +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index f6a318f..4381efe 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb); + static int ext4_freeze(struct super_block *sb); + static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data); ++static int ext4_feature_set_ok(struct super_block *sb, int readonly); + static void ext4_destroy_lazyinit_thread(void); + static void ext4_unregister_li_request(struct super_block *sb); + static void ext4_clear_request_list(void); +@@ -2120,6 +2121,13 @@ static void ext4_orphan_cleanup(struct super_block *sb, + return; + } + ++ /* Check if feature set would not allow a r/w mount */ ++ if (!ext4_feature_set_ok(sb, 0)) { ++ ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " ++ "unknown ROCOMPAT features"); ++ return; ++ } ++ + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { + if (es->s_last_orphan) + jbd_debug(1, "Errors on filesystem, " +diff --git a/fs/namespace.c b/fs/namespace.c +index d1edf26..445534b 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -2469,9 +2469,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + error = user_path_dir(new_root, &new); + if (error) + goto out0; +- error = -EINVAL; +- if (!check_mnt(new.mnt)) +- goto out1; + + error = user_path_dir(put_old, &old); + if (error) +@@ -2491,7 +2488,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + IS_MNT_SHARED(new.mnt->mnt_parent) || + IS_MNT_SHARED(root.mnt->mnt_parent)) + goto out2; +- if (!check_mnt(root.mnt)) ++ if (!check_mnt(root.mnt) || !check_mnt(new.mnt)) + goto out2; + error = -ENOENT; + if (cant_mount(old.dentry)) +@@ -2515,19 +2512,19 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + goto out2; /* not attached */ + /* make sure we can reach put_old from new_root */ + tmp = old.mnt; +- br_write_lock(vfsmount_lock); + if (tmp != new.mnt) { + for (;;) { + if (tmp->mnt_parent == tmp) +- goto out3; /* already mounted on put_old */ ++ goto out2; /* already mounted on put_old */ + if (tmp->mnt_parent == new.mnt) + break; + tmp = tmp->mnt_parent; + } + if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) +- goto out3; ++ goto out2; + } else if (!is_subdir(old.dentry, new.dentry)) +- goto out3; ++ goto out2; ++ br_write_lock(vfsmount_lock); + detach_mnt(new.mnt, &parent_path); + detach_mnt(root.mnt, &root_parent); + /* mount old root on put_old */ +@@ -2550,9 +2547,6 @@ out1: + path_put(&new); + out0: + return error; +-out3: +- br_write_unlock(vfsmount_lock); +- goto out2; + } + + static void __init init_mount_tree(void) +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 42b92d7..b5fcbf7 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -1214,13 +1214,17 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) + #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) + static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) + { ++ int ret; ++ + if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags)) + return 1; +- if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags, +- NFS_INO_COMMIT, nfs_wait_bit_killable, +- TASK_KILLABLE)) +- return 1; +- return 0; ++ if (!may_wait) ++ return 0; ++ ret = out_of_line_wait_on_bit_lock(&nfsi->flags, ++ NFS_INO_COMMIT, ++ nfs_wait_bit_killable, ++ TASK_KILLABLE); ++ return (ret < 0) ? ret : 1; + } + + static void nfs_commit_clear_lock(struct nfs_inode *nfsi) +@@ -1396,9 +1400,10 @@ int nfs_commit_inode(struct inode *inode, int how) + { + LIST_HEAD(head); + int may_wait = how & FLUSH_SYNC; +- int res = 0; ++ int res; + +- if (!nfs_commit_set_lock(NFS_I(inode), may_wait)) ++ res = nfs_commit_set_lock(NFS_I(inode), may_wait); ++ if (res <= 0) + goto out_mark_dirty; + spin_lock(&inode->i_lock); + res = nfs_scan_commit(inode, &head, 0, 0); +@@ -1407,12 +1412,14 @@ int nfs_commit_inode(struct inode *inode, int how) + int error = nfs_commit_list(inode, &head, how); + if (error < 0) + return error; +- if (may_wait) +- wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, +- nfs_wait_bit_killable, +- TASK_KILLABLE); +- else ++ if (!may_wait) + goto out_mark_dirty; ++ error = wait_on_bit(&NFS_I(inode)->flags, ++ NFS_INO_COMMIT, ++ nfs_wait_bit_killable, ++ TASK_KILLABLE); ++ if (error < 0) ++ return error; + } else + nfs_commit_clear_lock(NFS_I(inode)); + return res; +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index db52546..5fcb139 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -984,8 +984,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *, + void *); + enum nfsd4_op_flags { + ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */ +- ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */ +- ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */ ++ ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */ ++ ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */ + }; + + struct nfsd4_operation { +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 7b566ec..f0e448a 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -316,64 +316,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; + static struct list_head client_lru; + static struct list_head close_lru; + +-static void unhash_generic_stateid(struct nfs4_stateid *stp) +-{ +- list_del(&stp->st_hash); +- list_del(&stp->st_perfile); +- list_del(&stp->st_perstateowner); +-} +- +-static void free_generic_stateid(struct nfs4_stateid *stp) +-{ +- put_nfs4_file(stp->st_file); +- kmem_cache_free(stateid_slab, stp); +-} +- +-static void release_lock_stateid(struct nfs4_stateid *stp) +-{ +- struct file *file; +- +- unhash_generic_stateid(stp); +- file = find_any_file(stp->st_file); +- if (file) +- locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); +- free_generic_stateid(stp); +-} +- +-static void unhash_lockowner(struct nfs4_stateowner *sop) +-{ +- struct nfs4_stateid *stp; +- +- list_del(&sop->so_idhash); +- list_del(&sop->so_strhash); +- list_del(&sop->so_perstateid); +- while (!list_empty(&sop->so_stateids)) { +- stp = list_first_entry(&sop->so_stateids, +- struct nfs4_stateid, st_perstateowner); +- release_lock_stateid(stp); +- } +-} +- +-static void release_lockowner(struct nfs4_stateowner *sop) +-{ +- unhash_lockowner(sop); +- nfs4_put_stateowner(sop); +-} +- +-static void +-release_stateid_lockowners(struct nfs4_stateid *open_stp) +-{ +- struct nfs4_stateowner *lock_sop; +- +- while (!list_empty(&open_stp->st_lockowners)) { +- lock_sop = list_entry(open_stp->st_lockowners.next, +- struct nfs4_stateowner, so_perstateid); +- /* list_del(&open_stp->st_lockowners); */ +- BUG_ON(lock_sop->so_is_open_owner); +- release_lockowner(lock_sop); +- } +-} +- + /* + * We store the NONE, READ, WRITE, and BOTH bits separately in the + * st_{access,deny}_bmap field of the stateid, in order to track not +@@ -446,13 +388,71 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp) + return nfs4_access_to_omode(access); + } + +-static void release_open_stateid(struct nfs4_stateid *stp) ++static void unhash_generic_stateid(struct nfs4_stateid *stp) ++{ ++ list_del(&stp->st_hash); ++ list_del(&stp->st_perfile); ++ list_del(&stp->st_perstateowner); ++} ++ ++static void free_generic_stateid(struct nfs4_stateid *stp) + { + int oflag = nfs4_access_bmap_to_omode(stp); + ++ nfs4_file_put_access(stp->st_file, oflag); ++ put_nfs4_file(stp->st_file); ++ kmem_cache_free(stateid_slab, stp); ++} ++ ++static void release_lock_stateid(struct nfs4_stateid *stp) ++{ ++ struct file *file; ++ ++ unhash_generic_stateid(stp); ++ file = find_any_file(stp->st_file); ++ if (file) ++ locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); ++ free_generic_stateid(stp); ++} ++ ++static void unhash_lockowner(struct nfs4_stateowner *sop) ++{ ++ struct nfs4_stateid *stp; ++ ++ list_del(&sop->so_idhash); ++ list_del(&sop->so_strhash); ++ list_del(&sop->so_perstateid); ++ while (!list_empty(&sop->so_stateids)) { ++ stp = list_first_entry(&sop->so_stateids, ++ struct nfs4_stateid, st_perstateowner); ++ release_lock_stateid(stp); ++ } ++} ++ ++static void release_lockowner(struct nfs4_stateowner *sop) ++{ ++ unhash_lockowner(sop); ++ nfs4_put_stateowner(sop); ++} ++ ++static void ++release_stateid_lockowners(struct nfs4_stateid *open_stp) ++{ ++ struct nfs4_stateowner *lock_sop; ++ ++ while (!list_empty(&open_stp->st_lockowners)) { ++ lock_sop = list_entry(open_stp->st_lockowners.next, ++ struct nfs4_stateowner, so_perstateid); ++ /* list_del(&open_stp->st_lockowners); */ ++ BUG_ON(lock_sop->so_is_open_owner); ++ release_lockowner(lock_sop); ++ } ++} ++ ++static void release_open_stateid(struct nfs4_stateid *stp) ++{ + unhash_generic_stateid(stp); + release_stateid_lockowners(stp); +- nfs4_file_put_access(stp->st_file, oflag); + free_generic_stateid(stp); + } + +@@ -3735,6 +3735,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc + stp->st_stateid.si_stateownerid = sop->so_id; + stp->st_stateid.si_fileid = fp->fi_id; + stp->st_stateid.si_generation = 0; ++ stp->st_access_bmap = 0; + stp->st_deny_bmap = open_stp->st_deny_bmap; + stp->st_openstp = open_stp; + +@@ -3749,6 +3750,17 @@ check_lock_length(u64 offset, u64 length) + LOFF_OVERFLOW(offset, length))); + } + ++static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access) ++{ ++ struct nfs4_file *fp = lock_stp->st_file; ++ int oflag = nfs4_access_to_omode(access); ++ ++ if (test_bit(access, &lock_stp->st_access_bmap)) ++ return; ++ nfs4_file_get_access(fp, oflag); ++ __set_bit(access, &lock_stp->st_access_bmap); ++} ++ + /* + * LOCK operation + */ +@@ -3765,7 +3777,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + struct file_lock conflock; + __be32 status = 0; + unsigned int strhashval; +- unsigned int cmd; + int err; + + dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", +@@ -3847,22 +3858,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + switch (lock->lk_type) { + case NFS4_READ_LT: + case NFS4_READW_LT: +- if (find_readable_file(lock_stp->st_file)) { +- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ); +- filp = find_readable_file(lock_stp->st_file); +- } ++ filp = find_readable_file(lock_stp->st_file); ++ if (filp) ++ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); + file_lock.fl_type = F_RDLCK; +- cmd = F_SETLK; +- break; ++ break; + case NFS4_WRITE_LT: + case NFS4_WRITEW_LT: +- if (find_writeable_file(lock_stp->st_file)) { +- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE); +- filp = find_writeable_file(lock_stp->st_file); +- } ++ filp = find_writeable_file(lock_stp->st_file); ++ if (filp) ++ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); + file_lock.fl_type = F_WRLCK; +- cmd = F_SETLK; +- break; ++ break; + default: + status = nfserr_inval; + goto out; +@@ -3886,7 +3893,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + * Note: locks.c uses the BKL to protect the inode's lock list. + */ + +- err = vfs_lock_file(filp, cmd, &file_lock, &conflock); ++ err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); + switch (-err) { + case 0: /* success! */ + update_stateid(&lock_stp->st_stateid); +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 615f0a9..c6766af 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, + + u32 dummy; + char *machine_name; +- int i, j; ++ int i; + int nr_secflavs; + + READ_BUF(16); +@@ -1215,8 +1215,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, + READ_BUF(4); + READ32(dummy); + READ_BUF(dummy * 4); +- for (j = 0; j < dummy; ++j) +- READ32(dummy); + break; + case RPC_AUTH_GSS: + dprintk("RPC_AUTH_GSS callback secflavor " +@@ -1232,7 +1230,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, + READ_BUF(4); + READ32(dummy); + READ_BUF(dummy); +- p += XDR_QUADLEN(dummy); + break; + default: + dprintk("Illegal callback secflavor\n"); +diff --git a/fs/proc/array.c b/fs/proc/array.c +index 7c99c1c..5e4f776 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -489,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + vsize, + mm ? get_mm_rss(mm) : 0, + rsslim, +- mm ? mm->start_code : 0, +- mm ? mm->end_code : 0, ++ mm ? (permitted ? mm->start_code : 1) : 0, ++ mm ? (permitted ? mm->end_code : 1) : 0, + (permitted && mm) ? mm->start_stack : 0, + esp, + eip, +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 60b9148..f269ee6 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -249,8 +249,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + const char *name = arch_vma_name(vma); + if (!name) { + if (mm) { +- if (vma->vm_start <= mm->start_brk && +- vma->vm_end >= mm->brk) { ++ if (vma->vm_start <= mm->brk && ++ vma->vm_end >= mm->start_brk) { + name = "[heap]"; + } else if (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack) { +diff --git a/fs/super.c b/fs/super.c +index 7e9dd4c..0d89e93 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type) + #else + INIT_LIST_HEAD(&s->s_files); + #endif ++ s->s_bdi = &default_backing_dev_info; + INIT_LIST_HEAD(&s->s_instances); + INIT_HLIST_BL_HEAD(&s->s_anon); + INIT_LIST_HEAD(&s->s_inodes); +@@ -1003,6 +1004,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void + } + BUG_ON(!mnt->mnt_sb); + WARN_ON(!mnt->mnt_sb->s_bdi); ++ WARN_ON(mnt->mnt_sb->s_bdi == &default_backing_dev_info); + mnt->mnt_sb->s_flags |= MS_BORN; + + error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); +diff --git a/fs/sync.c b/fs/sync.c +index ba76b96..412dc89 100644 +--- a/fs/sync.c ++++ b/fs/sync.c +@@ -33,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) + * This should be safe, as we require bdi backing to actually + * write out data in the first place + */ +- if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info) ++ if (sb->s_bdi == &noop_backing_dev_info) + return 0; + + if (sb->s_qcop && sb->s_qcop->quota_sync) +@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem); + + static void sync_one_sb(struct super_block *sb, void *arg) + { +- if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi) ++ if (!(sb->s_flags & MS_RDONLY)) + __sync_filesystem(sb, *(int *)arg); + } + /* +diff --git a/include/linux/compaction.h b/include/linux/compaction.h +index dfa2ed4..cc9f7a4 100644 +--- a/include/linux/compaction.h ++++ b/include/linux/compaction.h +@@ -11,9 +11,6 @@ + /* The full zone was compacted */ + #define COMPACT_COMPLETE 3 + +-#define COMPACT_MODE_DIRECT_RECLAIM 0 +-#define COMPACT_MODE_KSWAPD 1 +- + #ifdef CONFIG_COMPACTION + extern int sysctl_compact_memory; + extern int sysctl_compaction_handler(struct ctl_table *table, int write, +@@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist, + bool sync); + extern unsigned long compaction_suitable(struct zone *zone, int order); + extern unsigned long compact_zone_order(struct zone *zone, int order, +- gfp_t gfp_mask, bool sync, +- int compact_mode); ++ gfp_t gfp_mask, bool sync); + + /* Do not skip compaction more than 64 times */ + #define COMPACT_MAX_DEFER_SHIFT 6 +@@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order) + } + + static inline unsigned long compact_zone_order(struct zone *zone, int order, +- gfp_t gfp_mask, bool sync, +- int compact_mode) ++ gfp_t gfp_mask, bool sync) + { + return COMPACT_CONTINUE; + } +diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h +index 1908929..a3c1874 100644 +--- a/include/linux/ethtool.h ++++ b/include/linux/ethtool.h +@@ -13,6 +13,9 @@ + #ifndef _LINUX_ETHTOOL_H + #define _LINUX_ETHTOOL_H + ++#ifdef __KERNEL__ ++#include ++#endif + #include + #include + +@@ -449,6 +452,37 @@ struct ethtool_rxnfc { + __u32 rule_locs[0]; + }; + ++#ifdef __KERNEL__ ++#ifdef CONFIG_COMPAT ++ ++struct compat_ethtool_rx_flow_spec { ++ u32 flow_type; ++ union { ++ struct ethtool_tcpip4_spec tcp_ip4_spec; ++ struct ethtool_tcpip4_spec udp_ip4_spec; ++ struct ethtool_tcpip4_spec sctp_ip4_spec; ++ struct ethtool_ah_espip4_spec ah_ip4_spec; ++ struct ethtool_ah_espip4_spec esp_ip4_spec; ++ struct ethtool_usrip4_spec usr_ip4_spec; ++ struct ethhdr ether_spec; ++ u8 hdata[72]; ++ } h_u, m_u; ++ compat_u64 ring_cookie; ++ u32 location; ++}; ++ ++struct compat_ethtool_rxnfc { ++ u32 cmd; ++ u32 flow_type; ++ compat_u64 data; ++ struct compat_ethtool_rx_flow_spec fs; ++ u32 rule_cnt; ++ u32 rule_locs[0]; ++}; ++ ++#endif /* CONFIG_COMPAT */ ++#endif /* __KERNEL__ */ ++ + /** + * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection + * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR +diff --git a/include/linux/mm.h b/include/linux/mm.h +index f6385fc..c67adb4 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page) + /* + * PageBuddy() indicate that the page is free and in the buddy system + * (see mm/page_alloc.c). ++ * ++ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to ++ * -2 so that an underflow of the page_mapcount() won't be mistaken ++ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very ++ * efficiently by most CPU architectures. + */ ++#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) ++ + static inline int PageBuddy(struct page *page) + { +- return atomic_read(&page->_mapcount) == -2; ++ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; + } + + static inline void __SetPageBuddy(struct page *page) + { + VM_BUG_ON(atomic_read(&page->_mapcount) != -1); +- atomic_set(&page->_mapcount, -2); ++ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); + } + + static inline void __ClearPageBuddy(struct page *page) +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index b24d702..bcc7336 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -1813,10 +1813,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) + + /* Update the css_set linked lists if we're using them */ + write_lock(&css_set_lock); +- if (!list_empty(&tsk->cg_list)) { +- list_del(&tsk->cg_list); +- list_add(&tsk->cg_list, &newcg->tasks); +- } ++ if (!list_empty(&tsk->cg_list)) ++ list_move(&tsk->cg_list, &newcg->tasks); + write_unlock(&css_set_lock); + + for_each_subsys(root, ss) { +@@ -3655,12 +3653,12 @@ again: + spin_lock(&release_list_lock); + set_bit(CGRP_REMOVED, &cgrp->flags); + if (!list_empty(&cgrp->release_list)) +- list_del(&cgrp->release_list); ++ list_del_init(&cgrp->release_list); + spin_unlock(&release_list_lock); + + cgroup_lock_hierarchy(cgrp->root); + /* delete this cgroup from parent->children */ +- list_del(&cgrp->sibling); ++ list_del_init(&cgrp->sibling); + cgroup_unlock_hierarchy(cgrp->root); + + d = dget(cgrp->dentry); +@@ -3879,7 +3877,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) + subsys[ss->subsys_id] = NULL; + + /* remove subsystem from rootnode's list of subsystems */ +- list_del(&ss->sibling); ++ list_del_init(&ss->sibling); + + /* + * disentangle the css from all css_sets attached to the dummytop. as +@@ -4253,7 +4251,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) + if (!list_empty(&tsk->cg_list)) { + write_lock(&css_set_lock); + if (!list_empty(&tsk->cg_list)) +- list_del(&tsk->cg_list); ++ list_del_init(&tsk->cg_list); + write_unlock(&css_set_lock); + } + +diff --git a/kernel/perf_event.c b/kernel/perf_event.c +index b22a2ef..ad02fea 100644 +--- a/kernel/perf_event.c ++++ b/kernel/perf_event.c +@@ -6115,17 +6115,20 @@ __perf_event_exit_task(struct perf_event *child_event, + struct perf_event_context *child_ctx, + struct task_struct *child) + { +- struct perf_event *parent_event; ++ if (child_event->parent) { ++ raw_spin_lock_irq(&child_ctx->lock); ++ perf_group_detach(child_event); ++ raw_spin_unlock_irq(&child_ctx->lock); ++ } + + perf_event_remove_from_context(child_event); + +- parent_event = child_event->parent; + /* +- * It can happen that parent exits first, and has events ++ * It can happen that the parent exits first, and has events + * that are still around due to the child reference. These +- * events need to be zapped - but otherwise linger. ++ * events need to be zapped. + */ +- if (parent_event) { ++ if (child_event->parent) { + sync_child_event(child_event, child); + free_event(child_event); + } +diff --git a/kernel/signal.c b/kernel/signal.c +index 4e3cff1..3175186 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -2421,9 +2421,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, + return -EFAULT; + + /* Not even root can pretend to send signals from the kernel. +- Nor can they impersonate a kill(), which adds source info. */ +- if (info.si_code >= 0) ++ * Nor can they impersonate a kill()/tgkill(), which adds source info. ++ */ ++ if (info.si_code != SI_QUEUE) { ++ /* We used to allow any < 0 si_code */ ++ WARN_ON_ONCE(info.si_code < 0); + return -EPERM; ++ } + info.si_signo = sig; + + /* POSIX.1b doesn't mention process groups. */ +@@ -2437,9 +2441,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) + return -EINVAL; + + /* Not even root can pretend to send signals from the kernel. +- Nor can they impersonate a kill(), which adds source info. */ +- if (info->si_code >= 0) ++ * Nor can they impersonate a kill()/tgkill(), which adds source info. ++ */ ++ if (info->si_code != SI_QUEUE) { ++ /* We used to allow any < 0 si_code */ ++ WARN_ON_ONCE(info->si_code < 0); + return -EPERM; ++ } + info->si_signo = sig; + + return do_send_specific(tgid, pid, sig, info); +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 4eed0af..443fd20 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -169,6 +169,11 @@ static int proc_taint(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + #endif + ++#ifdef CONFIG_PRINTK ++static int proc_dmesg_restrict(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos); ++#endif ++ + #ifdef CONFIG_MAGIC_SYSRQ + /* Note: sysrq code uses it's own private copy */ + static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE; +@@ -713,7 +718,7 @@ static struct ctl_table kern_table[] = { + .data = &kptr_restrict, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec_minmax, ++ .proc_handler = proc_dmesg_restrict, + .extra1 = &zero, + .extra2 = &two, + }, +@@ -2397,6 +2402,17 @@ static int proc_taint(struct ctl_table *table, int write, + return err; + } + ++#ifdef CONFIG_PRINTK ++static int proc_dmesg_restrict(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ if (write && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ return proc_dointvec_minmax(table, write, buffer, lenp, ppos); ++} ++#endif ++ + struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index 027100d..8e4ed88 100644 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -604,7 +604,7 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) + spin_lock(&sb_lock); + list_for_each_entry(sb, &super_blocks, s_list) { + if (sb->s_bdi == bdi) +- sb->s_bdi = NULL; ++ sb->s_bdi = &default_backing_dev_info; + } + spin_unlock(&sb_lock); + } +diff --git a/mm/compaction.c b/mm/compaction.c +index 8be430b8..dcb058b 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -42,8 +42,6 @@ struct compact_control { + unsigned int order; /* order a direct compactor needs */ + int migratetype; /* MOVABLE, RECLAIMABLE etc */ + struct zone *zone; +- +- int compact_mode; + }; + + static unsigned long release_freepages(struct list_head *freelist) +@@ -397,10 +395,7 @@ static int compact_finished(struct zone *zone, + return COMPACT_COMPLETE; + + /* Compaction run is not finished if the watermark is not met */ +- if (cc->compact_mode != COMPACT_MODE_KSWAPD) +- watermark = low_wmark_pages(zone); +- else +- watermark = high_wmark_pages(zone); ++ watermark = low_wmark_pages(zone); + watermark += (1 << cc->order); + + if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) +@@ -413,15 +408,6 @@ static int compact_finished(struct zone *zone, + if (cc->order == -1) + return COMPACT_CONTINUE; + +- /* +- * Generating only one page of the right order is not enough +- * for kswapd, we must continue until we're above the high +- * watermark as a pool for high order GFP_ATOMIC allocations +- * too. +- */ +- if (cc->compact_mode == COMPACT_MODE_KSWAPD) +- return COMPACT_CONTINUE; +- + /* Direct compactor: Is a suitable page free? */ + for (order = cc->order; order < MAX_ORDER; order++) { + /* Job done if page is free of the right migratetype */ +@@ -543,8 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) + + unsigned long compact_zone_order(struct zone *zone, + int order, gfp_t gfp_mask, +- bool sync, +- int compact_mode) ++ bool sync) + { + struct compact_control cc = { + .nr_freepages = 0, +@@ -553,7 +538,6 @@ unsigned long compact_zone_order(struct zone *zone, + .migratetype = allocflags_to_migratetype(gfp_mask), + .zone = zone, + .sync = sync, +- .compact_mode = compact_mode, + }; + INIT_LIST_HEAD(&cc.freepages); + INIT_LIST_HEAD(&cc.migratepages); +@@ -599,8 +583,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, + nodemask) { + int status; + +- status = compact_zone_order(zone, order, gfp_mask, sync, +- COMPACT_MODE_DIRECT_RECLAIM); ++ status = compact_zone_order(zone, order, gfp_mask, sync); + rc = max(status, rc); + + /* If a normal allocation would succeed, stop compacting */ +@@ -631,7 +614,6 @@ static int compact_node(int nid) + .nr_freepages = 0, + .nr_migratepages = 0, + .order = -1, +- .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, + }; + + zone = &pgdat->node_zones[zoneid]; +diff --git a/mm/oom_kill.c b/mm/oom_kill.c +index 7dcca55..33b5861 100644 +--- a/mm/oom_kill.c ++++ b/mm/oom_kill.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + int sysctl_panic_on_oom; + int sysctl_oom_kill_allocating_task; +@@ -292,13 +293,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, + unsigned long totalpages, struct mem_cgroup *mem, + const nodemask_t *nodemask) + { +- struct task_struct *p; ++ struct task_struct *g, *p; + struct task_struct *chosen = NULL; + *ppoints = 0; + +- for_each_process(p) { ++ do_each_thread(g, p) { + unsigned int points; + ++ if (!p->mm) ++ continue; + if (oom_unkillable_task(p, mem, nodemask)) + continue; + +@@ -314,22 +317,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, + if (test_tsk_thread_flag(p, TIF_MEMDIE)) + return ERR_PTR(-1UL); + +- /* +- * This is in the process of releasing memory so wait for it +- * to finish before killing some other task by mistake. +- * +- * However, if p is the current task, we allow the 'kill' to +- * go ahead if it is exiting: this will simply set TIF_MEMDIE, +- * which will allow it to gain access to memory reserves in +- * the process of exiting and releasing its resources. +- * Otherwise we could get an easy OOM deadlock. +- */ +- if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) { +- if (p != current) +- return ERR_PTR(-1UL); +- +- chosen = p; +- *ppoints = 1000; ++ if (p->flags & PF_EXITING) { ++ /* ++ * If p is the current task and is in the process of ++ * releasing memory, we allow the "kill" to set ++ * TIF_MEMDIE, which will allow it to gain access to ++ * memory reserves. Otherwise, it may stall forever. ++ * ++ * The loop isn't broken here, however, in case other ++ * threads are found to have already been oom killed. ++ */ ++ if (p == current) { ++ chosen = p; ++ *ppoints = 1000; ++ } else { ++ /* ++ * If this task is not being ptraced on exit, ++ * then wait for it to finish before killing ++ * some other task unnecessarily. ++ */ ++ if (!(task_ptrace(p->group_leader) & ++ PT_TRACE_EXIT)) ++ return ERR_PTR(-1UL); ++ } + } + + points = oom_badness(p, mem, nodemask, totalpages); +@@ -337,7 +347,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, + chosen = p; + *ppoints = points; + } +- } ++ } while_each_thread(g, p); + + return chosen; + } +@@ -491,6 +501,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, + list_for_each_entry(child, &t->children, sibling) { + unsigned int child_points; + ++ if (child->mm == p->mm) ++ continue; + /* + * oom_badness() returns 0 if the thread is unkillable + */ +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index cdef1d4..2828037 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -286,7 +286,7 @@ static void bad_page(struct page *page) + + /* Don't complain about poisoned pages */ + if (PageHWPoison(page)) { +- __ClearPageBuddy(page); ++ reset_page_mapcount(page); /* remove PageBuddy */ + return; + } + +@@ -317,7 +317,7 @@ static void bad_page(struct page *page) + dump_stack(); + out: + /* Leave bad fields for debug, except PageBuddy could make trouble */ +- __ClearPageBuddy(page); ++ reset_page_mapcount(page); /* remove PageBuddy */ + add_taint(TAINT_BAD_PAGE); + } + +diff --git a/mm/shmem.c b/mm/shmem.c +index 5ee67c99..5ac23d5 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2791,5 +2791,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) + fput(vma->vm_file); + vma->vm_file = file; + vma->vm_ops = &shmem_vm_ops; ++ vma->vm_flags |= VM_CAN_NONLINEAR; + return 0; + } +diff --git a/mm/slab.c b/mm/slab.c +index 37961d1f..4c6e2e3 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -2288,8 +2288,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, + if (ralign < align) { + ralign = align; + } +- /* disable debug if not aligning with REDZONE_ALIGN */ +- if (ralign & (__alignof__(unsigned long long) - 1)) ++ /* disable debug if necessary */ ++ if (ralign > __alignof__(unsigned long long)) + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); + /* + * 4) Store it. +@@ -2315,8 +2315,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, + */ + if (flags & SLAB_RED_ZONE) { + /* add space for red zone words */ +- cachep->obj_offset += align; +- size += align + sizeof(unsigned long long); ++ cachep->obj_offset += sizeof(unsigned long long); ++ size += 2 * sizeof(unsigned long long); + } + if (flags & SLAB_STORE_USER) { + /* user store requires one word storage behind the end of +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 0341c57..6d6d28c 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -2149,8 +2149,13 @@ bad_swap_2: + p->flags = 0; + spin_unlock(&swap_lock); + vfree(swap_map); +- if (swap_file) ++ if (swap_file) { ++ if (did_down) { ++ mutex_unlock(&inode->i_mutex); ++ did_down = 0; ++ } + filp_close(swap_file, NULL); ++ } + out: + if (page && !IS_ERR(page)) { + kunmap(page); +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 6771ea7..3b4a41d 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -2397,7 +2397,6 @@ loop_again: + * cause too much scanning of the lower zones. + */ + for (i = 0; i <= end_zone; i++) { +- int compaction; + struct zone *zone = pgdat->node_zones + i; + int nr_slab; + +@@ -2428,24 +2427,9 @@ loop_again: + sc.nr_reclaimed += reclaim_state->reclaimed_slab; + total_scanned += sc.nr_scanned; + +- compaction = 0; +- if (order && +- zone_watermark_ok(zone, 0, +- high_wmark_pages(zone), +- end_zone, 0) && +- !zone_watermark_ok(zone, order, +- high_wmark_pages(zone), +- end_zone, 0)) { +- compact_zone_order(zone, +- order, +- sc.gfp_mask, false, +- COMPACT_MODE_KSWAPD); +- compaction = 1; +- } +- + if (zone->all_unreclaimable) + continue; +- if (!compaction && nr_slab == 0 && ++ if (nr_slab == 0 && + !zone_reclaimable(zone)) + zone->all_unreclaimable = 1; + /* +diff --git a/net/socket.c b/net/socket.c +index ac2219f..29c7df0 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -2583,23 +2583,123 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) + + static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) + { ++ struct compat_ethtool_rxnfc __user *compat_rxnfc; ++ bool convert_in = false, convert_out = false; ++ size_t buf_size = ALIGN(sizeof(struct ifreq), 8); ++ struct ethtool_rxnfc __user *rxnfc; + struct ifreq __user *ifr; ++ u32 rule_cnt = 0, actual_rule_cnt; ++ u32 ethcmd; + u32 data; +- void __user *datap; ++ int ret; ++ ++ if (get_user(data, &ifr32->ifr_ifru.ifru_data)) ++ return -EFAULT; + +- ifr = compat_alloc_user_space(sizeof(*ifr)); ++ compat_rxnfc = compat_ptr(data); + +- if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) ++ if (get_user(ethcmd, &compat_rxnfc->cmd)) + return -EFAULT; + +- if (get_user(data, &ifr32->ifr_ifru.ifru_data)) ++ /* Most ethtool structures are defined without padding. ++ * Unfortunately struct ethtool_rxnfc is an exception. ++ */ ++ switch (ethcmd) { ++ default: ++ break; ++ case ETHTOOL_GRXCLSRLALL: ++ /* Buffer size is variable */ ++ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) ++ return -EFAULT; ++ if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) ++ return -ENOMEM; ++ buf_size += rule_cnt * sizeof(u32); ++ /* fall through */ ++ case ETHTOOL_GRXRINGS: ++ case ETHTOOL_GRXCLSRLCNT: ++ case ETHTOOL_GRXCLSRULE: ++ convert_out = true; ++ /* fall through */ ++ case ETHTOOL_SRXCLSRLDEL: ++ case ETHTOOL_SRXCLSRLINS: ++ buf_size += sizeof(struct ethtool_rxnfc); ++ convert_in = true; ++ break; ++ } ++ ++ ifr = compat_alloc_user_space(buf_size); ++ rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); ++ ++ if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) + return -EFAULT; + +- datap = compat_ptr(data); +- if (put_user(datap, &ifr->ifr_ifru.ifru_data)) ++ if (put_user(convert_in ? rxnfc : compat_ptr(data), ++ &ifr->ifr_ifru.ifru_data)) + return -EFAULT; + +- return dev_ioctl(net, SIOCETHTOOL, ifr); ++ if (convert_in) { ++ /* We expect there to be holes between fs.m_u and ++ * fs.ring_cookie and at the end of fs, but nowhere else. ++ */ ++ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + ++ sizeof(compat_rxnfc->fs.m_u) != ++ offsetof(struct ethtool_rxnfc, fs.m_u) + ++ sizeof(rxnfc->fs.m_u)); ++ BUILD_BUG_ON( ++ offsetof(struct compat_ethtool_rxnfc, fs.location) - ++ offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != ++ offsetof(struct ethtool_rxnfc, fs.location) - ++ offsetof(struct ethtool_rxnfc, fs.ring_cookie)); ++ ++ if (copy_in_user(rxnfc, compat_rxnfc, ++ (void *)(&rxnfc->fs.m_u + 1) - ++ (void *)rxnfc) || ++ copy_in_user(&rxnfc->fs.ring_cookie, ++ &compat_rxnfc->fs.ring_cookie, ++ (void *)(&rxnfc->fs.location + 1) - ++ (void *)&rxnfc->fs.ring_cookie) || ++ copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, ++ sizeof(rxnfc->rule_cnt))) ++ return -EFAULT; ++ } ++ ++ ret = dev_ioctl(net, SIOCETHTOOL, ifr); ++ if (ret) ++ return ret; ++ ++ if (convert_out) { ++ if (copy_in_user(compat_rxnfc, rxnfc, ++ (const void *)(&rxnfc->fs.m_u + 1) - ++ (const void *)rxnfc) || ++ copy_in_user(&compat_rxnfc->fs.ring_cookie, ++ &rxnfc->fs.ring_cookie, ++ (const void *)(&rxnfc->fs.location + 1) - ++ (const void *)&rxnfc->fs.ring_cookie) || ++ copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, ++ sizeof(rxnfc->rule_cnt))) ++ return -EFAULT; ++ ++ if (ethcmd == ETHTOOL_GRXCLSRLALL) { ++ /* As an optimisation, we only copy the actual ++ * number of rules that the underlying ++ * function returned. Since Mallory might ++ * change the rule count in user memory, we ++ * check that it is less than the rule count ++ * originally given (as the user buffer size), ++ * which has been range-checked. ++ */ ++ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) ++ return -EFAULT; ++ if (actual_rule_cnt < rule_cnt) ++ rule_cnt = actual_rule_cnt; ++ if (copy_in_user(&compat_rxnfc->rule_locs[0], ++ &rxnfc->rule_locs[0], ++ rule_cnt * sizeof(u32))) ++ return -EFAULT; ++ } ++ } ++ ++ return 0; + } + + static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index be96d42..1e336a0 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport) + if (sk == NULL) + return; + ++ transport->srcport = 0; ++ + write_lock_bh(&sk->sk_callback_lock); + transport->inet = NULL; + transport->sock = NULL; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index acd2099..c2eb6a7 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -16085,9 +16085,12 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec, + return err; + } else { + const char *name = pfx; +- if (!name) ++ int index = i; ++ if (!name) { + name = chname[i]; +- err = __alc861_create_out_sw(codec, name, nid, i, 3); ++ index = 0; ++ } ++ err = __alc861_create_out_sw(codec, name, nid, index, 3); + if (err < 0) + return err; + } +@@ -17238,16 +17241,19 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, + return err; + } else { + const char *name = pfx; +- if (!name) ++ int index = i; ++ if (!name) { + name = chname[i]; ++ index = 0; ++ } + err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, +- name, i, ++ name, index, + HDA_COMPOSE_AMP_VAL(nid_v, 3, 0, + HDA_OUTPUT)); + if (err < 0) + return err; + err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, +- name, i, ++ name, index, + HDA_COMPOSE_AMP_VAL(nid_s, 3, 2, + HDA_INPUT)); + if (err < 0) +@@ -19296,12 +19302,15 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec, + return err; + } else { + const char *name = pfx; +- if (!name) ++ int index = i; ++ if (!name) { + name = chname[i]; +- err = __alc662_add_vol_ctl(spec, name, nid, i, 3); ++ index = 0; ++ } ++ err = __alc662_add_vol_ctl(spec, name, nid, index, 3); + if (err < 0) + return err; +- err = __alc662_add_sw_ctl(spec, name, mix, i, 3); ++ err = __alc662_add_sw_ctl(spec, name, mix, index, 3); + if (err < 0) + return err; + } +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 052062d..8566119 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -94,6 +94,7 @@ enum { + STAC_92HD83XXX_REF, + STAC_92HD83XXX_PWR_REF, + STAC_DELL_S14, ++ STAC_DELL_E5520M, + STAC_92HD83XXX_HP, + STAC_HP_DV7_4000, + STAC_92HD83XXX_MODELS +@@ -1657,6 +1658,13 @@ static unsigned int dell_s14_pin_configs[10] = { + 0x40f000f0, 0x40f000f0, + }; + ++/* Switch int mic from 0x20 to 0x11 */ ++static unsigned int dell_e5520m_pin_configs[10] = { ++ 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110, ++ 0x23011050, 0x23a1102e, 0x400000f3, 0xd5a30130, ++ 0x400000f0, 0x40f000f0, ++}; ++ + static unsigned int hp_dv7_4000_pin_configs[10] = { + 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, + 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, +@@ -1667,6 +1675,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { + [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, + [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, + [STAC_DELL_S14] = dell_s14_pin_configs, ++ [STAC_DELL_E5520M] = dell_e5520m_pin_configs, + [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, + }; + +@@ -1675,6 +1684,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { + [STAC_92HD83XXX_REF] = "ref", + [STAC_92HD83XXX_PWR_REF] = "mic-ref", + [STAC_DELL_S14] = "dell-s14", ++ [STAC_DELL_E5520M] = "dell-e5520m", + [STAC_92HD83XXX_HP] = "hp", + [STAC_HP_DV7_4000] = "hp-dv7-4000", + }; +@@ -1687,6 +1697,14 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { + "DFI LanParty", STAC_92HD83XXX_REF), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, + "unknown Dell", STAC_DELL_S14), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049a, ++ "Dell E5520", STAC_DELL_E5520M), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049b, ++ "Dell E5420", STAC_DELL_E5520M), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04eb, ++ "Dell E5420m", STAC_DELL_E5520M), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04ec, ++ "Dell E5520m", STAC_DELL_E5520M), + SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, + "HP", STAC_92HD83XXX_HP), + {} /* terminator */ +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c +index 63b0054..acc4579 100644 +--- a/sound/pci/hda/patch_via.c ++++ b/sound/pci/hda/patch_via.c +@@ -159,6 +159,7 @@ struct via_spec { + #endif + }; + ++static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec); + static struct via_spec * via_new_spec(struct hda_codec *codec) + { + struct via_spec *spec; +@@ -169,6 +170,10 @@ static struct via_spec * via_new_spec(struct hda_codec *codec) + + codec->spec = spec; + spec->codec = codec; ++ spec->codec_type = get_codec_type(codec); ++ /* VT1708BCE & VT1708S are almost same */ ++ if (spec->codec_type == VT1708BCE) ++ spec->codec_type = VT1708S; + return spec; + } + +@@ -1101,6 +1106,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, + struct hda_codec *codec = snd_kcontrol_chip(kcontrol); + struct via_spec *spec = codec->spec; + unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ++ int ret; + + if (!spec->mux_nids[adc_idx]) + return -EINVAL; +@@ -1109,12 +1115,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, + AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0) + snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, + AC_VERB_SET_POWER_STATE, AC_PWRST_D0); +- /* update jack power state */ +- set_jack_power_state(codec); + +- return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, ++ ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, + spec->mux_nids[adc_idx], + &spec->cur_mux[adc_idx]); ++ /* update jack power state */ ++ set_jack_power_state(codec); ++ ++ return ret; + } + + static int via_independent_hp_info(struct snd_kcontrol *kcontrol, +@@ -1188,8 +1196,16 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, + /* Get Independent Mode index of headphone pin widget */ + spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel + ? 1 : 0; +- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel); ++ if (spec->codec_type == VT1718S) ++ snd_hda_codec_write(codec, nid, 0, ++ AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); ++ else ++ snd_hda_codec_write(codec, nid, 0, ++ AC_VERB_SET_CONNECT_SEL, pinsel); + ++ if (spec->codec_type == VT1812) ++ snd_hda_codec_write(codec, 0x35, 0, ++ AC_VERB_SET_CONNECT_SEL, pinsel); + if (spec->multiout.hp_nid && spec->multiout.hp_nid + != spec->multiout.dac_nids[HDA_FRONT]) + snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid, +@@ -1208,6 +1224,8 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, + activate_ctl(codec, "Headphone Playback Switch", + spec->hp_independent_mode); + } ++ /* update jack power state */ ++ set_jack_power_state(codec); + return 0; + } + +@@ -1248,9 +1266,12 @@ static int via_hp_build(struct hda_codec *codec) + break; + } + +- nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); +- if (nums <= 1) +- return 0; ++ if (spec->codec_type != VT1708) { ++ nums = snd_hda_get_connections(codec, nid, ++ conn, HDA_MAX_CONNECTIONS); ++ if (nums <= 1) ++ return 0; ++ } + + knew = via_clone_control(spec, &via_hp_mixer[0]); + if (knew == NULL) +@@ -1310,6 +1331,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute) + start_idx = 2; + end_idx = 4; + break; ++ case VT1718S: ++ nid_mixer = 0x21; ++ start_idx = 1; ++ end_idx = 3; ++ break; + default: + return; + } +@@ -2185,10 +2211,6 @@ static int via_init(struct hda_codec *codec) + for (i = 0; i < spec->num_iverbs; i++) + snd_hda_sequence_write(codec, spec->init_verbs[i]); + +- spec->codec_type = get_codec_type(codec); +- if (spec->codec_type == VT1708BCE) +- spec->codec_type = VT1708S; /* VT1708BCE & VT1708S are almost +- same */ + /* Lydia Add for EAPD enable */ + if (!spec->dig_in_nid) { /* No Digital In connection */ + if (spec->dig_in_pin) { +@@ -2438,7 +2460,14 @@ static int vt_auto_create_analog_input_ctls(struct hda_codec *codec, + else + type_idx = 0; + label = hda_get_autocfg_input_label(codec, cfg, i); +- err = via_new_analog_input(spec, label, type_idx, idx, cap_nid); ++ if (spec->codec_type == VT1708S || ++ spec->codec_type == VT1702 || ++ spec->codec_type == VT1716S) ++ err = via_new_analog_input(spec, label, type_idx, ++ idx+1, cap_nid); ++ else ++ err = via_new_analog_input(spec, label, type_idx, ++ idx, cap_nid); + if (err < 0) + return err; + snd_hda_add_imux_item(imux, label, idx, NULL); +diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c +index e76847a..48ffd40 100644 +--- a/sound/soc/codecs/uda134x.c ++++ b/sound/soc/codecs/uda134x.c +@@ -486,7 +486,8 @@ static struct snd_soc_dai_driver uda134x_dai = { + static int uda134x_soc_probe(struct snd_soc_codec *codec) + { + struct uda134x_priv *uda134x; +- struct uda134x_platform_data *pd = dev_get_drvdata(codec->card->dev); ++ struct uda134x_platform_data *pd = codec->card->dev->platform_data; ++ + int ret; + + printk(KERN_INFO "UDA134X SoC Audio Codec\n"); +diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c +index 2c09e93..86f1dc4 100644 +--- a/sound/soc/samsung/s3c24xx_uda134x.c ++++ b/sound/soc/samsung/s3c24xx_uda134x.c +@@ -226,7 +226,7 @@ static struct snd_soc_ops s3c24xx_uda134x_ops = { + static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = { + .name = "UDA134X", + .stream_name = "UDA134X", +- .codec_name = "uda134x-hifi", ++ .codec_name = "uda134x-codec", + .codec_dai_name = "uda134x-hifi", + .cpu_dai_name = "s3c24xx-iis", + .ops = &s3c24xx_uda134x_ops, +@@ -321,6 +321,7 @@ static int s3c24xx_uda134x_probe(struct platform_device *pdev) + + platform_set_drvdata(s3c24xx_uda134x_snd_device, + &snd_soc_s3c24xx_uda134x); ++ platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x)); + ret = platform_device_add(s3c24xx_uda134x_snd_device); + if (ret) { + printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n"); diff --git a/patches.kernel.org/patch-2.6.38.2-3 b/patches.kernel.org/patch-2.6.38.2-3 new file mode 100644 index 0000000..10d4ea6 --- /dev/null +++ b/patches.kernel.org/patch-2.6.38.2-3 @@ -0,0 +1,3905 @@ +From: Greg Kroah-Hartman +Subject: Linux 2.6.38.3 +Patch-mainline: Linux 2.6.38.3 + + +Signed-off-by: Greg Kroah-Hartman + +diff --git a/Makefile b/Makefile +index 6c15525..e47e39e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 38 +-EXTRAVERSION = .2 ++EXTRAVERSION = .3 + NAME = Flesh-Eating Bats with Fangs + + # *DOCUMENTATION* +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c +index 09d31db..aa92696 100644 +--- a/arch/powerpc/kernel/time.c ++++ b/arch/powerpc/kernel/time.c +@@ -356,7 +356,7 @@ void account_system_vtime(struct task_struct *tsk) + } + get_paca()->user_time_scaled += user_scaled; + +- if (in_irq() || idle_task(smp_processor_id()) != tsk) { ++ if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { + account_system_time(tsk, 0, delta, sys_scaled); + if (stolen) + account_steal_time(stolen); +diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S +index 8fe2a49..4292df7 100644 +--- a/arch/x86/crypto/aesni-intel_asm.S ++++ b/arch/x86/crypto/aesni-intel_asm.S +@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt: + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10, %xmm0 + ++ + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) + sub $16, %r11 + add %r13, %r11 +@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt: + # GHASH computation for the last <16 byte block + sub %r13, %r11 + add $16, %r11 +- PSHUFB_XMM %xmm10, %xmm1 ++ ++ movdqa SHUF_MASK(%rip), %xmm10 ++ PSHUFB_XMM %xmm10, %xmm0 + + # shuffle xmm0 back to output as ciphertext + +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index e1e60c7..b375b2a 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm) + struct cryptd_aead *cryptd_tfm; + struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) + PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); ++ struct crypto_aead *cryptd_child; ++ struct aesni_rfc4106_gcm_ctx *child_ctx; + cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); ++ ++ cryptd_child = cryptd_aead_child(cryptd_tfm); ++ child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); ++ memcpy(child_ctx, ctx, sizeof(*ctx)); + ctx->cryptd_tfm = cryptd_tfm; + tfm->crt_aead.reqsize = sizeof(struct aead_request) + + crypto_aead_reqsize(&cryptd_tfm->base); +@@ -925,6 +931,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, + int ret = 0; + struct crypto_tfm *tfm = crypto_aead_tfm(parent); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); ++ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); ++ struct aesni_rfc4106_gcm_ctx *child_ctx = ++ aesni_rfc4106_gcm_ctx_get(cryptd_child); + u8 *new_key_mem = NULL; + + if (key_len < 4) { +@@ -968,6 +977,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, + goto exit; + } + ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); ++ memcpy(child_ctx, ctx, sizeof(*ctx)); + exit: + kfree(new_key_mem); + return ret; +@@ -999,7 +1009,6 @@ static int rfc4106_encrypt(struct aead_request *req) + int ret; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); +- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + + if (!irq_fpu_usable()) { + struct aead_request *cryptd_req = +@@ -1008,6 +1017,7 @@ static int rfc4106_encrypt(struct aead_request *req) + aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_aead_encrypt(cryptd_req); + } else { ++ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + kernel_fpu_begin(); + ret = cryptd_child->base.crt_aead.encrypt(req); + kernel_fpu_end(); +@@ -1020,7 +1030,6 @@ static int rfc4106_decrypt(struct aead_request *req) + int ret; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); +- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + + if (!irq_fpu_usable()) { + struct aead_request *cryptd_req = +@@ -1029,6 +1038,7 @@ static int rfc4106_decrypt(struct aead_request *req) + aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_aead_decrypt(cryptd_req); + } else { ++ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + kernel_fpu_begin(); + ret = cryptd_child->base.crt_aead.decrypt(req); + kernel_fpu_end(); +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c +index bebabec..151787e 100644 +--- a/arch/x86/kernel/cpu/mtrr/main.c ++++ b/arch/x86/kernel/cpu/mtrr/main.c +@@ -292,14 +292,24 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ + + /* + * HACK! +- * We use this same function to initialize the mtrrs on boot. +- * The state of the boot cpu's mtrrs has been saved, and we want +- * to replicate across all the APs. +- * If we're doing that @reg is set to something special... ++ * ++ * We use this same function to initialize the mtrrs during boot, ++ * resume, runtime cpu online and on an explicit request to set a ++ * specific MTRR. ++ * ++ * During boot or suspend, the state of the boot cpu's mtrrs has been ++ * saved, and we want to replicate that across all the cpus that come ++ * online (either at the end of boot or resume or during a runtime cpu ++ * online). If we're doing that, @reg is set to something special and on ++ * this cpu we still do mtrr_if->set_all(). During boot/resume, this ++ * is unnecessary if at this point we are still on the cpu that started ++ * the boot/resume sequence. But there is no guarantee that we are still ++ * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be ++ * sure that we are in sync with everyone else. + */ + if (reg != ~0U) + mtrr_if->set(reg, base, size, type); +- else if (!mtrr_aps_delayed_init) ++ else + mtrr_if->set_all(); + + /* Wait for the others */ +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c +index 5655c22..2d2673c 100644 +--- a/arch/x86/kernel/head64.c ++++ b/arch/x86/kernel/head64.c +@@ -77,6 +77,9 @@ void __init x86_64_start_kernel(char * real_mode_data) + /* Make NULL pointers segfault */ + zap_identity_mappings(); + ++ /* Cleanup the over mapped high alias */ ++ cleanup_highmap(); ++ + max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; + + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index e543fe9..d3cfe26 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -297,9 +297,6 @@ static void __init init_gbpages(void) + static inline void init_gbpages(void) + { + } +-static void __init cleanup_highmap(void) +-{ +-} + #endif + + static void __init reserve_brk(void) +@@ -925,8 +922,6 @@ void __init setup_arch(char **cmdline_p) + */ + reserve_brk(); + +- cleanup_highmap(); +- + memblock.current_limit = get_max_mapped(); + memblock_x86_fill(); + +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index f13ff3a..947f42a 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -279,6 +279,25 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, + load_cr3(swapper_pg_dir); + #endif + ++#ifdef CONFIG_X86_64 ++ if (!after_bootmem && !start) { ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ mmu_cr4_features = read_cr4(); ++ ++ /* ++ * _brk_end cannot change anymore, but it and _end may be ++ * located on different 2M pages. cleanup_highmap(), however, ++ * can only consider _end when it runs, so destroy any ++ * mappings beyond _brk_end here. ++ */ ++ pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); ++ pmd = pmd_offset(pud, _brk_end - 1); ++ while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) ++ pmd_clear(pmd); ++ } ++#endif + __flush_tlb_all(); + + if (!after_bootmem && e820_table_end > e820_table_start) +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index 68f9921..c14a542 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -51,7 +51,6 @@ + #include + #include + #include +-#include + + static int __init parse_direct_gbpages_off(char *arg) + { +@@ -294,18 +293,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) + * to the compile time generated pmds. This results in invalid pmds up + * to the point where we hit the physaddr 0 mapping. + * +- * We limit the mappings to the region from _text to _brk_end. _brk_end +- * is rounded up to the 2MB boundary. This catches the invalid pmds as ++ * We limit the mappings to the region from _text to _end. _end is ++ * rounded up to the 2MB boundary. This catches the invalid pmds as + * well, as they are located before _text: + */ + void __init cleanup_highmap(void) + { + unsigned long vaddr = __START_KERNEL_map; +- unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); +- unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; ++ unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; + pmd_t *pmd = level2_kernel_pgt; ++ pmd_t *last_pmd = pmd + PTRS_PER_PMD; + +- for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { ++ for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { + if (pmd_none(*pmd)) + continue; + if (vaddr < (unsigned long) _text || vaddr > end) +diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c +index 8524939..c7358dd 100644 +--- a/drivers/acpi/pci_root.c ++++ b/drivers/acpi/pci_root.c +@@ -564,7 +564,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) + /* Indicate support for various _OSC capabilities. */ + if (pci_ext_cfg_avail(root->bus->self)) + flags |= OSC_EXT_PCI_CONFIG_SUPPORT; +- if (pcie_aspm_enabled()) ++ if (pcie_aspm_support_enabled()) + flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | + OSC_CLOCK_PWR_CAPABILITY_SUPPORT; + if (pci_msi_enabled()) +diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c +index 25ef1a4..b836e11 100644 +--- a/drivers/atm/solos-pci.c ++++ b/drivers/atm/solos-pci.c +@@ -165,7 +165,6 @@ static uint32_t fpga_tx(struct solos_card *); + static irqreturn_t solos_irq(int irq, void *dev_id); + static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); + static int list_vccs(int vci); +-static void release_vccs(struct atm_dev *dev); + static int atm_init(struct solos_card *, struct device *); + static void atm_remove(struct solos_card *); + static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); +@@ -384,7 +383,6 @@ static int process_status(struct solos_card *card, int port, struct sk_buff *skb + /* Anything but 'Showtime' is down */ + if (strcmp(state_str, "Showtime")) { + atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST); +- release_vccs(card->atmdev[port]); + dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str); + return 0; + } +@@ -697,7 +695,7 @@ void solos_bh(unsigned long card_arg) + size); + } + if (atmdebug) { +- dev_info(&card->dev->dev, "Received: device %d\n", port); ++ dev_info(&card->dev->dev, "Received: port %d\n", port); + dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", + size, le16_to_cpu(header->vpi), + le16_to_cpu(header->vci)); +@@ -830,28 +828,6 @@ static int list_vccs(int vci) + return num_found; + } + +-static void release_vccs(struct atm_dev *dev) +-{ +- int i; +- +- write_lock_irq(&vcc_sklist_lock); +- for (i = 0; i < VCC_HTABLE_SIZE; i++) { +- struct hlist_head *head = &vcc_hash[i]; +- struct hlist_node *node, *tmp; +- struct sock *s; +- struct atm_vcc *vcc; +- +- sk_for_each_safe(s, node, tmp, head) { +- vcc = atm_sk(s); +- if (vcc->dev == dev) { +- vcc_release_async(vcc, -EPIPE); +- sk_del_node_init(s); +- } +- } +- } +- write_unlock_irq(&vcc_sklist_lock); +-} +- + + static int popen(struct atm_vcc *vcc) + { +@@ -1018,8 +994,15 @@ static uint32_t fpga_tx(struct solos_card *card) + + /* Clean up and free oldskb now it's gone */ + if (atmdebug) { ++ struct pkt_hdr *header = (void *)oldskb->data; ++ int size = le16_to_cpu(header->size); ++ ++ skb_pull(oldskb, sizeof(*header)); + dev_info(&card->dev->dev, "Transmitted: port %d\n", + port); ++ dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", ++ size, le16_to_cpu(header->vpi), ++ le16_to_cpu(header->vci)); + print_buffer(oldskb); + } + +@@ -1262,7 +1245,7 @@ static int atm_init(struct solos_card *card, struct device *parent) + card->atmdev[i]->ci_range.vci_bits = 16; + card->atmdev[i]->dev_data = card; + card->atmdev[i]->phy_data = (void *)(unsigned long)i; +- atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_UNKNOWN); ++ atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND); + + skb = alloc_skb(sizeof(*header), GFP_ATOMIC); + if (!skb) { +diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h +index 579f749..554bbd9 100644 +--- a/drivers/block/cciss.h ++++ b/drivers/block/cciss.h +@@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) + h->ctlr, c->busaddr); + #endif /* CCISS_DEBUG */ + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); ++ readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); + h->commands_outstanding++; + if ( h->commands_outstanding > h->max_outstanding) + h->max_outstanding = h->commands_outstanding; +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 700a384..f44ca40 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -71,6 +71,9 @@ static struct usb_device_id btusb_table[] = { + /* Apple MacBookAir3,1, MacBookAir3,2 */ + { USB_DEVICE(0x05ac, 0x821b) }, + ++ /* Apple MacBookPro8,2 */ ++ { USB_DEVICE(0x05ac, 0x821a) }, ++ + /* AVM BlueFRITZ! USB v2.0 */ + { USB_DEVICE(0x057c, 0x3800) }, + +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c +index 1f46f1c..7beb0e2 100644 +--- a/drivers/char/tpm/tpm.c ++++ b/drivers/char/tpm/tpm.c +@@ -980,7 +980,7 @@ int tpm_open(struct inode *inode, struct file *file) + return -EBUSY; + } + +- chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); ++ chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); + if (chip->data_buffer == NULL) { + clear_bit(0, &chip->is_open); + put_device(chip->dev); +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index 23e0355..7e0e660 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -2765,7 +2765,7 @@ static int __init amd64_edac_init(void) + mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); + ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); + if (!(mcis && ecc_stngs)) +- goto err_ret; ++ goto err_free; + + msrs = msrs_alloc(); + if (!msrs) +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 02d5c41..99768d9 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -675,7 +675,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + ATOM_ENCODER_CAP_RECORD *cap_record; + u16 caps = 0; + +- while (record->ucRecordType > 0 && ++ while (record->ucRecordSize > 0 && ++ record->ucRecordType > 0 && + record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { + switch (record->ucRecordType) { + case ATOM_ENCODER_CAP_RECORD_TYPE: +@@ -720,7 +721,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + break; + } + +- while (record->ucRecordType > 0 && ++ while (record->ucRecordSize > 0 && ++ record->ucRecordType > 0 && + record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { + switch (record->ucRecordType) { + case ATOM_I2C_RECORD_TYPE: +@@ -782,10 +784,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + ATOM_HPD_INT_RECORD *hpd_record; + ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; + +- while (record->ucRecordType > 0 +- && record-> +- ucRecordType <= +- ATOM_MAX_OBJECT_RECORD_NUMBER) { ++ while (record->ucRecordSize > 0 && ++ record->ucRecordType > 0 && ++ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { + switch (record->ucRecordType) { + case ATOM_I2C_RECORD_TYPE: + i2c_record = +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 318cc40..418c399 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -418,6 +418,8 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h + input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, + 2565, 4, 0); + } ++ ++ input_set_events_per_packet(input, 60); + } + + if (report_undeciphered) { +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index aa186cf..e06e045 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -836,8 +836,8 @@ static const struct dmi_system_id __initconst toshiba_dmi_table[] = { + }, + + }, +- { } + #endif ++ { } + }; + + static bool broken_olpc_ec; +@@ -851,8 +851,8 @@ static const struct dmi_system_id __initconst olpc_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "XO"), + }, + }, +- { } + #endif ++ { } + }; + + void __init synaptics_module_init(void) +diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c +index 80a3ae3..c0cff64 100644 +--- a/drivers/leds/leds-lp5521.c ++++ b/drivers/leds/leds-lp5521.c +@@ -534,7 +534,7 @@ static ssize_t lp5521_selftest(struct device *dev, + } + + /* led class device attributes */ +-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); ++static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); + static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); + + static struct attribute *lp5521_led_attributes[] = { +@@ -548,15 +548,15 @@ static struct attribute_group lp5521_led_attribute_group = { + }; + + /* device attributes */ +-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, + show_engine1_mode, store_engine1_mode); +-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, + show_engine2_mode, store_engine2_mode); +-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, + show_engine3_mode, store_engine3_mode); +-static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); +-static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); +-static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); ++static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); ++static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); ++static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); + static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); + + static struct attribute *lp5521_attributes[] = { +diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c +index d0c4068..e19fed2 100644 +--- a/drivers/leds/leds-lp5523.c ++++ b/drivers/leds/leds-lp5523.c +@@ -713,7 +713,7 @@ static ssize_t store_current(struct device *dev, + } + + /* led class device attributes */ +-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); ++static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); + static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); + + static struct attribute *lp5523_led_attributes[] = { +@@ -727,21 +727,21 @@ static struct attribute_group lp5523_led_attribute_group = { + }; + + /* device attributes */ +-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, + show_engine1_mode, store_engine1_mode); +-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, + show_engine2_mode, store_engine2_mode); +-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, + show_engine3_mode, store_engine3_mode); +-static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR, + show_engine1_leds, store_engine1_leds); +-static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR, + show_engine2_leds, store_engine2_leds); +-static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, ++static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR, + show_engine3_leds, store_engine3_leds); +-static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); +-static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); +-static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); ++static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); ++static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); ++static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); + static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); + + static struct attribute *lp5523_attributes[] = { +diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig +index ecdffa6..c090246 100644 +--- a/drivers/media/radio/Kconfig ++++ b/drivers/media/radio/Kconfig +@@ -441,6 +441,7 @@ config RADIO_TIMBERDALE + config RADIO_WL1273 + tristate "Texas Instruments WL1273 I2C FM Radio" + depends on I2C && VIDEO_V4L2 ++ select MFD_CORE + select MFD_WL1273_CORE + select FW_LOADER + ---help--- +diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c +index df33a1d..a794ae6 100644 +--- a/drivers/media/video/tlg2300/pd-video.c ++++ b/drivers/media/video/tlg2300/pd-video.c +@@ -764,10 +764,8 @@ static int pd_vidioc_s_fmt(struct poseidon *pd, struct v4l2_pix_format *pix) + } + ret |= send_set_req(pd, VIDEO_ROSOLU_SEL, + vid_resol, &cmd_status); +- if (ret || cmd_status) { +- mutex_unlock(&pd->lock); ++ if (ret || cmd_status) + return -EBUSY; +- } + + pix_def->pixelformat = pix->pixelformat; /* save it */ + pix->height = (context->tvnormid & V4L2_STD_525_60) ? 480 : 576; +diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c +index 4193af5..1707d22 100644 +--- a/drivers/mfd/ab3100-core.c ++++ b/drivers/mfd/ab3100-core.c +@@ -613,7 +613,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) + ab3100_get_priv.ab3100 = ab3100; + ab3100_get_priv.mode = false; + ab3100_get_reg_file = debugfs_create_file("get_reg", +- S_IWUGO, ab3100_dir, &ab3100_get_priv, ++ S_IWUSR, ab3100_dir, &ab3100_get_priv, + &ab3100_get_set_reg_fops); + if (!ab3100_get_reg_file) { + err = -ENOMEM; +@@ -623,7 +623,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) + ab3100_set_priv.ab3100 = ab3100; + ab3100_set_priv.mode = true; + ab3100_set_reg_file = debugfs_create_file("set_reg", +- S_IWUGO, ab3100_dir, &ab3100_set_priv, ++ S_IWUSR, ab3100_dir, &ab3100_set_priv, + &ab3100_get_set_reg_fops); + if (!ab3100_set_reg_file) { + err = -ENOMEM; +diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c +index 5fbca34..681984d 100644 +--- a/drivers/mfd/ab3550-core.c ++++ b/drivers/mfd/ab3550-core.c +@@ -1053,17 +1053,17 @@ static inline void ab3550_setup_debugfs(struct ab3550 *ab) + goto exit_destroy_dir; + + ab3550_bank_file = debugfs_create_file("register-bank", +- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops); ++ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_bank_fops); + if (!ab3550_bank_file) + goto exit_destroy_reg; + + ab3550_address_file = debugfs_create_file("register-address", +- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops); ++ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_address_fops); + if (!ab3550_address_file) + goto exit_destroy_bank; + + ab3550_val_file = debugfs_create_file("register-value", +- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops); ++ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_val_fops); + if (!ab3550_val_file) + goto exit_destroy_address; + +diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c +index 3c1541a..64748e4 100644 +--- a/drivers/mfd/ab8500-debugfs.c ++++ b/drivers/mfd/ab8500-debugfs.c +@@ -585,18 +585,18 @@ static int __devinit ab8500_debug_probe(struct platform_device *plf) + goto exit_destroy_dir; + + ab8500_bank_file = debugfs_create_file("register-bank", +- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops); ++ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops); + if (!ab8500_bank_file) + goto exit_destroy_reg; + + ab8500_address_file = debugfs_create_file("register-address", +- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, ++ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, + &ab8500_address_fops); + if (!ab8500_address_file) + goto exit_destroy_bank; + + ab8500_val_file = debugfs_create_file("register-value", +- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops); ++ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops); + if (!ab8500_val_file) + goto exit_destroy_address; + +diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c +index 46b3439..16d7179 100644 +--- a/drivers/misc/ep93xx_pwm.c ++++ b/drivers/misc/ep93xx_pwm.c +@@ -249,11 +249,11 @@ static ssize_t ep93xx_pwm_set_invert(struct device *dev, + + static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); + static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); +-static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO, ++static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO, + ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); +-static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO, ++static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO, + ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); +-static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO, ++static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO, + ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); + + static struct attribute *ep93xx_pwm_attrs[] = { +diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c +index ea5cfe2..24386a8 100644 +--- a/drivers/net/myri10ge/myri10ge.c ++++ b/drivers/net/myri10ge/myri10ge.c +@@ -3645,6 +3645,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) + dma_free_coherent(&pdev->dev, bytes, + ss->fw_stats, ss->fw_stats_bus); + ss->fw_stats = NULL; ++ netif_napi_del(&ss->napi); + } + } + kfree(mgp->ss); +diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c +index 587498e..3de98cb 100644 +--- a/drivers/net/netxen/netxen_nic_ethtool.c ++++ b/drivers/net/netxen/netxen_nic_ethtool.c +@@ -901,7 +901,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data) + struct netxen_adapter *adapter = netdev_priv(netdev); + int hw_lro; + +- if (data & ~ETH_FLAG_LRO) ++ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) + return -EINVAL; + + if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) +diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c +index 4c14510..45b2755 100644 +--- a/drivers/net/qlcnic/qlcnic_ethtool.c ++++ b/drivers/net/qlcnic/qlcnic_ethtool.c +@@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int hw_lro; + +- if (data & ~ETH_FLAG_LRO) ++ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) + return -EINVAL; + + if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) +diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c +index 39c17ce..0cdff2b 100644 +--- a/drivers/net/s2io.c ++++ b/drivers/net/s2io.c +@@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data) + int rc = 0; + int changed = 0; + +- if (data & ~ETH_FLAG_LRO) ++ if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO)) + return -EINVAL; + + if (data & ETH_FLAG_LRO) { +diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c +index 81254be..51f2ef1 100644 +--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c ++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c +@@ -304,8 +304,8 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) + u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; + unsigned long flags; + +- if (data & ~ETH_FLAG_LRO) +- return -EOPNOTSUPP; ++ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) ++ return -EINVAL; + + if (lro_requested ^ lro_present) { + /* toggle the LRO feature*/ +diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c +index 1dd3a21..c5eb034 100644 +--- a/drivers/net/vxge/vxge-ethtool.c ++++ b/drivers/net/vxge/vxge-ethtool.c +@@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data) + struct vxgedev *vdev = netdev_priv(dev); + enum vxge_hw_status status; + +- if (data & ~ETH_FLAG_RXHASH) +- return -EOPNOTSUPP; ++ if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH)) ++ return -EINVAL; + + if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) + return 0; +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index a09d15f..0848e09 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -1063,6 +1063,8 @@ static int ath9k_start(struct ieee80211_hw *hw) + "Starting driver with initial channel: %d MHz\n", + curchan->center_freq); + ++ ath9k_ps_wakeup(sc); ++ + mutex_lock(&sc->mutex); + + if (ath9k_wiphy_started(sc)) { +@@ -1179,6 +1181,8 @@ static int ath9k_start(struct ieee80211_hw *hw) + mutex_unlock: + mutex_unlock(&sc->mutex); + ++ ath9k_ps_restore(sc); ++ + return r; + } + +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c +index 07b7804..5c9d83b 100644 +--- a/drivers/net/wireless/ath/ath9k/xmit.c ++++ b/drivers/net/wireless/ath/ath9k/xmit.c +@@ -1699,8 +1699,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, + u8 tidno; + + spin_lock_bh(&txctl->txq->axq_lock); +- +- if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { ++ if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && ++ ieee80211_is_data_qos(hdr->frame_control)) { + tidno = ieee80211_get_qos_ctl(hdr)[0] & + IEEE80211_QOS_CTL_TID_MASK; + tid = ATH_AN_2_TID(txctl->an, tidno); +diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c +index 3d5566e..ff0f5ba 100644 +--- a/drivers/net/wireless/b43/dma.c ++++ b/drivers/net/wireless/b43/dma.c +@@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) + dmaaddr = meta->dmaaddr; + goto drop_recycle_buffer; + } +- if (unlikely(len > ring->rx_buffersize)) { ++ if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { + /* The data did not fit into one descriptor buffer + * and is split over multiple buffers. + * This should never happen, as we try to allocate buffers +diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h +index a01c210..e8a80a1 100644 +--- a/drivers/net/wireless/b43/dma.h ++++ b/drivers/net/wireless/b43/dma.h +@@ -163,7 +163,7 @@ struct b43_dmadesc_generic { + /* DMA engine tuning knobs */ + #define B43_TXRING_SLOTS 256 + #define B43_RXRING_SLOTS 64 +-#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN ++#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN) + + /* Pointer poison */ + #define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) +diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h +index 9e6f313..c0cd307 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h ++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h +@@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr { + + /* 6x00 Specific */ + #define EEPROM_6000_TX_POWER_VERSION (4) +-#define EEPROM_6000_EEPROM_VERSION (0x434) ++#define EEPROM_6000_EEPROM_VERSION (0x423) + + /* 6x50 Specific */ + #define EEPROM_6050_TX_POWER_VERSION (4) +diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c +index 9b344a9..e183587 100644 +--- a/drivers/net/wireless/p54/p54usb.c ++++ b/drivers/net/wireless/p54/p54usb.c +@@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { + {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ + {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ + {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ ++ {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */ + {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ + {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ + {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ +@@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { + {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ + {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ + {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ ++ {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */ + {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ + {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ + +diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c +index 54917a2..e2a528d 100644 +--- a/drivers/net/wireless/rt2x00/rt2800lib.c ++++ b/drivers/net/wireless/rt2x00/rt2800lib.c +@@ -2810,10 +2810,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) + + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); +- rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); +- rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); +- rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + /* Wait for DMA, ignore error */ +@@ -2823,9 +2820,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 0); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); +- +- rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); +- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); + } + EXPORT_SYMBOL_GPL(rt2800_disable_radio); + +diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c +index 3b3f1e4..37a38b5 100644 +--- a/drivers/net/wireless/rt2x00/rt2800pci.c ++++ b/drivers/net/wireless/rt2x00/rt2800pci.c +@@ -475,39 +475,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) + + static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) + { +- u32 reg; +- +- rt2800_disable_radio(rt2x00dev); +- +- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); +- +- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); +- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); +- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); +- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); +- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); +- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); +- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); +- rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); +- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); +- +- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); +- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); ++ if (rt2x00_is_soc(rt2x00dev)) { ++ rt2800_disable_radio(rt2x00dev); ++ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); ++ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); ++ } + } + + static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) + { +- /* +- * Always put the device to sleep (even when we intend to wakeup!) +- * if the device is booting and wasn't asleep it will return +- * failure when attempting to wakeup. +- */ +- rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2); +- + if (state == STATE_AWAKE) { +- rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); ++ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02); + rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); ++ } else if (state == STATE_SLEEP) { ++ rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); ++ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); ++ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01); + } + + return 0; +diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c +index 9597a03..2b77a29 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00dev.c ++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c +@@ -1031,8 +1031,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) + * Stop all work. + */ + cancel_work_sync(&rt2x00dev->intf_work); +- cancel_work_sync(&rt2x00dev->rxdone_work); +- cancel_work_sync(&rt2x00dev->txdone_work); ++ if (rt2x00_is_usb(rt2x00dev)) { ++ cancel_work_sync(&rt2x00dev->rxdone_work); ++ cancel_work_sync(&rt2x00dev->txdone_work); ++ } + + /* + * Free the tx status fifo. +diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c +index e64403b..6ec06a4 100644 +--- a/drivers/net/wireless/wl12xx/testmode.c ++++ b/drivers/net/wireless/wl12xx/testmode.c +@@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) + + kfree(wl->nvs); + +- wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); ++ if (len != sizeof(struct wl1271_nvs_file)) ++ return -EINVAL; ++ ++ wl->nvs = kzalloc(len, GFP_KERNEL); + if (!wl->nvs) { + wl1271_error("could not allocate memory for the nvs file"); + ret = -ENOMEM; +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 3188cd9..bbdb4fd 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -69,6 +69,7 @@ struct pcie_link_state { + }; + + static int aspm_disabled, aspm_force, aspm_clear_state; ++static bool aspm_support_enabled = true; + static DEFINE_MUTEX(aspm_lock); + static LIST_HEAD(link_list); + +@@ -896,6 +897,7 @@ static int __init pcie_aspm_disable(char *str) + { + if (!strcmp(str, "off")) { + aspm_disabled = 1; ++ aspm_support_enabled = false; + printk(KERN_INFO "PCIe ASPM is disabled\n"); + } else if (!strcmp(str, "force")) { + aspm_force = 1; +@@ -930,3 +932,8 @@ int pcie_aspm_enabled(void) + } + EXPORT_SYMBOL(pcie_aspm_enabled); + ++bool pcie_aspm_support_enabled(void) ++{ ++ return aspm_support_enabled; ++} ++EXPORT_SYMBOL(pcie_aspm_support_enabled); +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index 38b34a7..fa54ba7 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -222,6 +222,7 @@ struct acer_debug { + static struct rfkill *wireless_rfkill; + static struct rfkill *bluetooth_rfkill; + static struct rfkill *threeg_rfkill; ++static bool rfkill_inited; + + /* Each low-level interface must define at least some of the following */ + struct wmi_interface { +@@ -1161,9 +1162,13 @@ static int acer_rfkill_set(void *data, bool blocked) + { + acpi_status status; + u32 cap = (unsigned long)data; +- status = set_u32(!blocked, cap); +- if (ACPI_FAILURE(status)) +- return -ENODEV; ++ ++ if (rfkill_inited) { ++ status = set_u32(!blocked, cap); ++ if (ACPI_FAILURE(status)) ++ return -ENODEV; ++ } ++ + return 0; + } + +@@ -1187,14 +1192,16 @@ static struct rfkill *acer_rfkill_register(struct device *dev, + return ERR_PTR(-ENOMEM); + + status = get_device_status(&state, cap); +- if (ACPI_SUCCESS(status)) +- rfkill_init_sw_state(rfkill_dev, !state); + + err = rfkill_register(rfkill_dev); + if (err) { + rfkill_destroy(rfkill_dev); + return ERR_PTR(err); + } ++ ++ if (ACPI_SUCCESS(status)) ++ rfkill_set_sw_state(rfkill_dev, !state); ++ + return rfkill_dev; + } + +@@ -1229,6 +1236,8 @@ static int acer_rfkill_init(struct device *dev) + } + } + ++ rfkill_inited = true; ++ + schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); + + return 0; +diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c +index 37268e9..afeb546 100644 +--- a/drivers/rtc/rtc-ds1511.c ++++ b/drivers/rtc/rtc-ds1511.c +@@ -485,7 +485,7 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj, + static struct bin_attribute ds1511_nvram_attr = { + .attr = { + .name = "nvram", +- .mode = S_IRUGO | S_IWUGO, ++ .mode = S_IRUGO | S_IWUSR, + }, + .size = DS1511_RAM_MAX, + .read = ds1511_nvram_read, +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index f905ecb..01543d2 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -1847,7 +1847,7 @@ store_priv_session_##field(struct device *dev, \ + #define iscsi_priv_session_rw_attr(field, format) \ + iscsi_priv_session_attr_show(field, format) \ + iscsi_priv_session_attr_store(field) \ +-static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ ++static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ + show_priv_session_##field, \ + store_priv_session_##field) + iscsi_priv_session_rw_attr(recovery_tmo, "%d"); +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c +index 7f5a6a8..3b00e90 100644 +--- a/drivers/scsi/ses.c ++++ b/drivers/scsi/ses.c +@@ -390,9 +390,9 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, + len = (desc_ptr[2] << 8) + desc_ptr[3]; + /* skip past overall descriptor */ + desc_ptr += len + 4; +- if (ses_dev->page10) +- addl_desc_ptr = ses_dev->page10 + 8; + } ++ if (ses_dev->page10) ++ addl_desc_ptr = ses_dev->page10 + 8; + type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; + components = 0; + for (i = 0; i < types; i++, type_ptr += 4) { +diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c +index 45a627d..09e596a 100644 +--- a/drivers/staging/hv/channel.c ++++ b/drivers/staging/hv/channel.c +@@ -76,14 +76,14 @@ static void vmbus_setevent(struct vmbus_channel *channel) + + if (channel->offermsg.monitor_allocated) { + /* Each u32 represents 32 channels */ +- set_bit(channel->offermsg.child_relid & 31, ++ sync_set_bit(channel->offermsg.child_relid & 31, + (unsigned long *) gVmbusConnection.SendInterruptPage + + (channel->offermsg.child_relid >> 5)); + + monitorpage = gVmbusConnection.MonitorPages; + monitorpage++; /* Get the child to parent monitor page */ + +- set_bit(channel->monitor_bit, ++ sync_set_bit(channel->monitor_bit, + (unsigned long *)&monitorpage->trigger_group + [channel->monitor_grp].pending); + +@@ -99,7 +99,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) + + if (Channel->offermsg.monitor_allocated) { + /* Each u32 represents 32 channels */ +- clear_bit(Channel->offermsg.child_relid & 31, ++ sync_clear_bit(Channel->offermsg.child_relid & 31, + (unsigned long *)gVmbusConnection.SendInterruptPage + + (Channel->offermsg.child_relid >> 5)); + +@@ -107,7 +107,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) + (struct hv_monitor_page *)gVmbusConnection.MonitorPages; + monitorPage++; /* Get the child to parent monitor page */ + +- clear_bit(Channel->monitor_bit, ++ sync_clear_bit(Channel->monitor_bit, + (unsigned long *)&monitorPage->trigger_group + [Channel->monitor_grp].Pending); + } +diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c +index c2e298f..0739eb7 100644 +--- a/drivers/staging/hv/connection.c ++++ b/drivers/staging/hv/connection.c +@@ -281,7 +281,7 @@ void VmbusOnEvents(void) + for (dword = 0; dword < maxdword; dword++) { + if (recvInterruptPage[dword]) { + for (bit = 0; bit < 32; bit++) { +- if (test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) { ++ if (sync_test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) { + relid = (dword << 5) + bit; + DPRINT_DBG(VMBUS, "event detected for relid - %d", relid); + +@@ -320,7 +320,7 @@ int VmbusPostMessage(void *buffer, size_t bufferLen) + int VmbusSetEvent(u32 childRelId) + { + /* Each u32 represents 32 channels */ +- set_bit(childRelId & 31, ++ sync_set_bit(childRelId & 31, + (unsigned long *)gVmbusConnection.SendInterruptPage + + (childRelId >> 5)); + +diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c +index b41c964..f433add 100644 +--- a/drivers/staging/hv/netvsc_drv.c ++++ b/drivers/staging/hv/netvsc_drv.c +@@ -46,6 +46,7 @@ struct net_device_context { + /* point back to our device context */ + struct vm_device *device_ctx; + unsigned long avail; ++ struct work_struct work; + }; + + struct netvsc_driver_context { +@@ -225,6 +226,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, + unsigned int status) + { + struct vm_device *device_ctx = to_vm_device(device_obj); ++ struct net_device_context *ndev_ctx; + struct net_device *net = dev_get_drvdata(&device_ctx->device); + + if (!net) { +@@ -237,6 +239,8 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, + netif_carrier_on(net); + netif_wake_queue(net); + netif_notify_peers(net); ++ ndev_ctx = netdev_priv(net); ++ schedule_work(&ndev_ctx->work); + } else { + netif_carrier_off(net); + netif_stop_queue(net); +@@ -336,6 +340,25 @@ static const struct net_device_ops device_ops = { + .ndo_set_mac_address = eth_mac_addr, + }; + ++/* ++ * Send GARP packet to network peers after migrations. ++ * After Quick Migration, the network is not immediately operational in the ++ * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add ++ * another netif_notify_peers() into a scheduled work, otherwise GARP packet ++ * will not be sent after quick migration, and cause network disconnection. ++ */ ++static void netvsc_send_garp(struct work_struct *w) ++{ ++ struct net_device_context *ndev_ctx; ++ struct net_device *net; ++ ++ msleep(20); ++ ndev_ctx = container_of(w, struct net_device_context, work); ++ net = dev_get_drvdata(&ndev_ctx->device_ctx->device); ++ netif_notify_peers(net); ++} ++ ++ + static int netvsc_probe(struct device *device) + { + struct driver_context *driver_ctx = +@@ -364,6 +387,7 @@ static int netvsc_probe(struct device *device) + net_device_ctx->device_ctx = device_ctx; + net_device_ctx->avail = ring_size; + dev_set_drvdata(device, net); ++ INIT_WORK(&net_device_ctx->work, netvsc_send_garp); + + /* Notify the netvsc driver of the new device */ + ret = net_drv_obj->base.OnDeviceAdd(device_obj, &device_info); +diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c +index 84fdb64..87e6cf2 100644 +--- a/drivers/staging/hv/vmbus_drv.c ++++ b/drivers/staging/hv/vmbus_drv.c +@@ -291,7 +291,7 @@ static int vmbus_on_isr(struct hv_driver *drv) + event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; + + /* Since we are a child, we only need to check bit 0 */ +- if (test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { ++ if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { + DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]); + ret |= 0x2; + } +diff --git a/drivers/staging/hv/vmbus_private.h b/drivers/staging/hv/vmbus_private.h +index 07f6d22..c75b2d7 100644 +--- a/drivers/staging/hv/vmbus_private.h ++++ b/drivers/staging/hv/vmbus_private.h +@@ -31,6 +31,7 @@ + #include "channel_mgmt.h" + #include "ring_buffer.h" + #include ++#include + + + /* +diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h +index 6ff33e1..90e90f0 100644 +--- a/drivers/staging/iio/imu/adis16400.h ++++ b/drivers/staging/iio/imu/adis16400.h +@@ -17,7 +17,8 @@ + #ifndef SPI_ADIS16400_H_ + #define SPI_ADIS16400_H_ + +-#define ADIS16400_STARTUP_DELAY 220 /* ms */ ++#define ADIS16400_STARTUP_DELAY 290 /* ms */ ++#define ADIS16400_MTEST_DELAY 90 /* ms */ + + #define ADIS16400_READ_REG(a) a + #define ADIS16400_WRITE_REG(a) ((a) | 0x80) +diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c +index cfb108a..2107edb 100644 +--- a/drivers/staging/iio/imu/adis16400_core.c ++++ b/drivers/staging/iio/imu/adis16400_core.c +@@ -93,7 +93,6 @@ static int adis16400_spi_write_reg_16(struct device *dev, + .tx_buf = st->tx + 2, + .bits_per_word = 8, + .len = 2, +- .cs_change = 1, + }, + }; + +@@ -137,7 +136,6 @@ static int adis16400_spi_read_reg_16(struct device *dev, + .rx_buf = st->rx, + .bits_per_word = 8, + .len = 2, +- .cs_change = 1, + }, + }; + +@@ -375,7 +373,7 @@ static int adis16400_self_test(struct device *dev) + dev_err(dev, "problem starting self test"); + goto err_ret; + } +- ++ msleep(ADIS16400_MTEST_DELAY); + adis16400_check_status(dev); + + err_ret: +@@ -497,12 +495,12 @@ err_ret: + _reg) + + static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_X, ADIS16400_XGYRO_OFF); +-static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_XGYRO_OFF); +-static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_XGYRO_OFF); ++static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_YGYRO_OFF); ++static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_ZGYRO_OFF); + + static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_X, ADIS16400_XACCL_OFF); +-static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_XACCL_OFF); +-static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_XACCL_OFF); ++static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_YACCL_OFF); ++static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_ZACCL_OFF); + + + static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16400_read_14bit_signed, +diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c +index 33293fb..da28cb4 100644 +--- a/drivers/staging/iio/imu/adis16400_ring.c ++++ b/drivers/staging/iio/imu/adis16400_ring.c +@@ -122,12 +122,10 @@ static int adis16400_spi_read_burst(struct device *dev, u8 *rx) + .tx_buf = st->tx, + .bits_per_word = 8, + .len = 2, +- .cs_change = 0, + }, { + .rx_buf = rx, + .bits_per_word = 8, + .len = 24, +- .cs_change = 1, + }, + }; + +@@ -162,9 +160,10 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s) + work_trigger_to_ring); + struct iio_ring_buffer *ring = st->indio_dev->ring; + +- int i = 0; ++ int i = 0, j; + s16 *data; + size_t datasize = ring->access.get_bytes_per_datum(ring); ++ unsigned long mask = ring->scan_mask; + + data = kmalloc(datasize , GFP_KERNEL); + if (data == NULL) { +@@ -174,9 +173,12 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s) + + if (ring->scan_count) + if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0) +- for (; i < ring->scan_count; i++) ++ for (; i < ring->scan_count; i++) { ++ j = __ffs(mask); ++ mask &= ~(1 << j); + data[i] = be16_to_cpup( +- (__be16 *)&(st->rx[i*2])); ++ (__be16 *)&(st->rx[j*2])); ++ } + + /* Guaranteed to be aligned with 8 byte boundary */ + if (ring->scan_timestamp) +diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c +index ae6ac82..8e60332 100644 +--- a/drivers/staging/usbip/stub_rx.c ++++ b/drivers/staging/usbip/stub_rx.c +@@ -170,33 +170,23 @@ static int tweak_set_configuration_cmd(struct urb *urb) + + static int tweak_reset_device_cmd(struct urb *urb) + { +- struct usb_ctrlrequest *req; +- __u16 value; +- __u16 index; +- int ret; +- +- req = (struct usb_ctrlrequest *) urb->setup_packet; +- value = le16_to_cpu(req->wValue); +- index = le16_to_cpu(req->wIndex); +- +- usbip_uinfo("reset_device (port %d) to %s\n", index, +- dev_name(&urb->dev->dev)); ++ struct stub_priv *priv = (struct stub_priv *) urb->context; ++ struct stub_device *sdev = priv->sdev; + +- /* all interfaces should be owned by usbip driver, so just reset it. */ +- ret = usb_lock_device_for_reset(urb->dev, NULL); +- if (ret < 0) { +- dev_err(&urb->dev->dev, "lock for reset\n"); +- return ret; +- } +- +- /* try to reset the device */ +- ret = usb_reset_device(urb->dev); +- if (ret < 0) +- dev_err(&urb->dev->dev, "device reset\n"); ++ usbip_uinfo("reset_device %s\n", dev_name(&urb->dev->dev)); + +- usb_unlock_device(urb->dev); +- +- return ret; ++ /* ++ * usb_lock_device_for_reset caused a deadlock: it causes the driver ++ * to unbind. In the shutdown the rx thread is signalled to shut down ++ * but this thread is pending in the usb_lock_device_for_reset. ++ * ++ * Instead queue the reset. ++ * ++ * Unfortunatly an existing usbip connection will be dropped due to ++ * driver unbinding. ++ */ ++ usb_queue_reset_device(sdev->interface); ++ return 0; + } + + /* +diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c +index d7136e2..b7a493c 100644 +--- a/drivers/staging/usbip/stub_tx.c ++++ b/drivers/staging/usbip/stub_tx.c +@@ -169,7 +169,6 @@ static int stub_send_ret_submit(struct stub_device *sdev) + struct stub_priv *priv, *tmp; + + struct msghdr msg; +- struct kvec iov[3]; + size_t txsize; + + size_t total_size = 0; +@@ -179,28 +178,73 @@ static int stub_send_ret_submit(struct stub_device *sdev) + struct urb *urb = priv->urb; + struct usbip_header pdu_header; + void *iso_buffer = NULL; ++ struct kvec *iov = NULL; ++ int iovnum = 0; + + txsize = 0; + memset(&pdu_header, 0, sizeof(pdu_header)); + memset(&msg, 0, sizeof(msg)); +- memset(&iov, 0, sizeof(iov)); + +- usbip_dbg_stub_tx("setup txdata urb %p\n", urb); ++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) ++ iovnum = 2 + urb->number_of_packets; ++ else ++ iovnum = 2; ++ ++ iov = kzalloc(iovnum * sizeof(struct kvec), GFP_KERNEL); + ++ if (!iov) { ++ usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); ++ return -1; ++ } ++ ++ iovnum = 0; + + /* 1. setup usbip_header */ + setup_ret_submit_pdu(&pdu_header, urb); ++ usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", ++ pdu_header.base.seqnum, urb); ++ /*usbip_dump_header(pdu_header);*/ + usbip_header_correct_endian(&pdu_header, 1); + +- iov[0].iov_base = &pdu_header; +- iov[0].iov_len = sizeof(pdu_header); ++ iov[iovnum].iov_base = &pdu_header; ++ iov[iovnum].iov_len = sizeof(pdu_header); ++ iovnum++; + txsize += sizeof(pdu_header); + + /* 2. setup transfer buffer */ +- if (usb_pipein(urb->pipe) && urb->actual_length > 0) { +- iov[1].iov_base = urb->transfer_buffer; +- iov[1].iov_len = urb->actual_length; ++ if (usb_pipein(urb->pipe) && ++ usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && ++ urb->actual_length > 0) { ++ iov[iovnum].iov_base = urb->transfer_buffer; ++ iov[iovnum].iov_len = urb->actual_length; ++ iovnum++; + txsize += urb->actual_length; ++ } else if (usb_pipein(urb->pipe) && ++ usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { ++ /* ++ * For isochronous packets: actual length is the sum of ++ * the actual length of the individual, packets, but as ++ * the packet offsets are not changed there will be ++ * padding between the packets. To optimally use the ++ * bandwidth the padding is not transmitted. ++ */ ++ ++ int i; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ iov[iovnum].iov_base = urb->transfer_buffer + urb->iso_frame_desc[i].offset; ++ iov[iovnum].iov_len = urb->iso_frame_desc[i].actual_length; ++ iovnum++; ++ txsize += urb->iso_frame_desc[i].actual_length; ++ } ++ ++ if (txsize != sizeof(pdu_header) + urb->actual_length) { ++ dev_err(&sdev->interface->dev, ++ "actual length of urb (%d) does not match iso packet sizes (%d)\n", ++ urb->actual_length, txsize-sizeof(pdu_header)); ++ kfree(iov); ++ usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); ++ return -1; ++ } + } + + /* 3. setup iso_packet_descriptor */ +@@ -211,32 +255,34 @@ static int stub_send_ret_submit(struct stub_device *sdev) + if (!iso_buffer) { + usbip_event_add(&sdev->ud, + SDEV_EVENT_ERROR_MALLOC); ++ kfree(iov); + return -1; + } + +- iov[2].iov_base = iso_buffer; +- iov[2].iov_len = len; ++ iov[iovnum].iov_base = iso_buffer; ++ iov[iovnum].iov_len = len; + txsize += len; ++ iovnum++; + } + +- ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, +- 3, txsize); ++ ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, ++ iov, iovnum, txsize); + if (ret != txsize) { + dev_err(&sdev->interface->dev, + "sendmsg failed!, retval %d for %zd\n", + ret, txsize); ++ kfree(iov); + kfree(iso_buffer); + usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); + return -1; + } + ++ kfree(iov); + kfree(iso_buffer); +- usbip_dbg_stub_tx("send txdata\n"); + + total_size += txsize; + } + +- + spin_lock_irqsave(&sdev->priv_lock, flags); + + list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { +diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c +index 210ef16..2108ca1 100644 +--- a/drivers/staging/usbip/usbip_common.c ++++ b/drivers/staging/usbip/usbip_common.c +@@ -334,10 +334,11 @@ void usbip_dump_header(struct usbip_header *pdu) + usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); + break; + case USBIP_RET_SUBMIT: +- usbip_udbg("RET_SUBMIT: st %d al %u sf %d ec %d\n", ++ usbip_udbg("RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n", + pdu->u.ret_submit.status, + pdu->u.ret_submit.actual_length, + pdu->u.ret_submit.start_frame, ++ pdu->u.ret_submit.number_of_packets, + pdu->u.ret_submit.error_count); + case USBIP_RET_UNLINK: + usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); +@@ -625,6 +626,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, + rpdu->status = urb->status; + rpdu->actual_length = urb->actual_length; + rpdu->start_frame = urb->start_frame; ++ rpdu->number_of_packets = urb->number_of_packets; + rpdu->error_count = urb->error_count; + } else { + /* vhci_rx.c */ +@@ -632,6 +634,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, + urb->status = rpdu->status; + urb->actual_length = rpdu->actual_length; + urb->start_frame = rpdu->start_frame; ++ urb->number_of_packets = rpdu->number_of_packets; + urb->error_count = rpdu->error_count; + } + } +@@ -700,11 +703,13 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, + cpu_to_be32s(&pdu->status); + cpu_to_be32s(&pdu->actual_length); + cpu_to_be32s(&pdu->start_frame); ++ cpu_to_be32s(&pdu->number_of_packets); + cpu_to_be32s(&pdu->error_count); + } else { + be32_to_cpus(&pdu->status); + be32_to_cpus(&pdu->actual_length); + be32_to_cpus(&pdu->start_frame); ++ cpu_to_be32s(&pdu->number_of_packets); + be32_to_cpus(&pdu->error_count); + } + } +@@ -830,6 +835,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) + int size = np * sizeof(*iso); + int i; + int ret; ++ int total_length = 0; + + if (!usb_pipeisoc(urb->pipe)) + return 0; +@@ -859,19 +865,75 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) + return -EPIPE; + } + ++ + for (i = 0; i < np; i++) { + iso = buff + (i * sizeof(*iso)); + + usbip_iso_pakcet_correct_endian(iso, 0); + usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0); ++ total_length += urb->iso_frame_desc[i].actual_length; + } + + kfree(buff); + ++ if (total_length != urb->actual_length) { ++ dev_err(&urb->dev->dev, ++ "total length of iso packets (%d) not equal to actual length of buffer (%d)\n", ++ total_length, urb->actual_length); ++ ++ if (ud->side == USBIP_STUB) ++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); ++ else ++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); ++ ++ return -EPIPE; ++ } ++ + return ret; + } + EXPORT_SYMBOL_GPL(usbip_recv_iso); + ++/* ++ * This functions restores the padding which was removed for optimizing ++ * the bandwidth during transfer over tcp/ip ++ * ++ * buffer and iso packets need to be stored and be in propeper endian in urb ++ * before calling this function ++ */ ++int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) ++{ ++ int np = urb->number_of_packets; ++ int i; ++ int ret; ++ int actualoffset = urb->actual_length; ++ ++ if (!usb_pipeisoc(urb->pipe)) ++ return 0; ++ ++ /* if no packets or length of data is 0, then nothing to unpack */ ++ if (np == 0 || urb->actual_length == 0) ++ return 0; ++ ++ /* ++ * if actual_length is transfer_buffer_length then no padding is ++ * present. ++ */ ++ if (urb->actual_length == urb->transfer_buffer_length) ++ return 0; ++ ++ /* ++ * loop over all packets from last to first (to prevent overwritting ++ * memory when padding) and move them into the proper place ++ */ ++ for (i = np-1; i > 0; i--) { ++ actualoffset -= urb->iso_frame_desc[i].actual_length; ++ memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset, ++ urb->transfer_buffer + actualoffset, ++ urb->iso_frame_desc[i].actual_length); ++ } ++ return ret; ++} ++EXPORT_SYMBOL_GPL(usbip_pad_iso); + + /* some members of urb must be substituted before. */ + int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) +diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h +index d280e23..baa4c09 100644 +--- a/drivers/staging/usbip/usbip_common.h ++++ b/drivers/staging/usbip/usbip_common.h +@@ -393,6 +393,8 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send); + int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); + /* some members of urb must be substituted before. */ + int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); ++/* some members of urb must be substituted before. */ ++int usbip_pad_iso(struct usbip_device *ud, struct urb *urb); + void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); + + +diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c +index bf69914..109002a 100644 +--- a/drivers/staging/usbip/vhci_rx.c ++++ b/drivers/staging/usbip/vhci_rx.c +@@ -99,6 +99,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, + if (usbip_recv_iso(ud, urb) < 0) + return; + ++ /* restore the padding in iso packets */ ++ if (usbip_pad_iso(ud, urb) < 0) ++ return; + + if (usbip_dbg_flag_vhci_rx) + usbip_dump_urb(urb); +diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c +index 596ba60..51b5551 100644 +--- a/drivers/watchdog/davinci_wdt.c ++++ b/drivers/watchdog/davinci_wdt.c +@@ -202,7 +202,6 @@ static struct miscdevice davinci_wdt_miscdev = { + static int __devinit davinci_wdt_probe(struct platform_device *pdev) + { + int ret = 0, size; +- struct resource *res; + struct device *dev = &pdev->dev; + + wdt_clk = clk_get(dev, NULL); +@@ -216,31 +215,31 @@ static int __devinit davinci_wdt_probe(struct platform_device *pdev) + + dev_info(dev, "heartbeat %d sec\n", heartbeat); + +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- if (res == NULL) { ++ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (wdt_mem == NULL) { + dev_err(dev, "failed to get memory region resource\n"); + return -ENOENT; + } + +- size = resource_size(res); +- wdt_mem = request_mem_region(res->start, size, pdev->name); +- +- if (wdt_mem == NULL) { ++ size = resource_size(wdt_mem); ++ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { + dev_err(dev, "failed to get memory region\n"); + return -ENOENT; + } + +- wdt_base = ioremap(res->start, size); ++ wdt_base = ioremap(wdt_mem->start, size); + if (!wdt_base) { + dev_err(dev, "failed to map memory region\n"); ++ release_mem_region(wdt_mem->start, size); ++ wdt_mem = NULL; + return -ENOMEM; + } + + ret = misc_register(&davinci_wdt_miscdev); + if (ret < 0) { + dev_err(dev, "cannot register misc device\n"); +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, size); ++ wdt_mem = NULL; + } else { + set_bit(WDT_DEVICE_INITED, &wdt_status); + } +@@ -253,8 +252,7 @@ static int __devexit davinci_wdt_remove(struct platform_device *pdev) + { + misc_deregister(&davinci_wdt_miscdev); + if (wdt_mem) { +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); + wdt_mem = NULL; + } + +diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c +index 3053ff0..1fe9bc5 100644 +--- a/drivers/watchdog/max63xx_wdt.c ++++ b/drivers/watchdog/max63xx_wdt.c +@@ -270,7 +270,6 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) + { + int ret = 0; + int size; +- struct resource *res; + struct device *dev = &pdev->dev; + struct max63xx_timeout *table; + +@@ -294,21 +293,19 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) + + max63xx_pdev = pdev; + +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- if (res == NULL) { ++ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (wdt_mem == NULL) { + dev_err(dev, "failed to get memory region resource\n"); + return -ENOENT; + } + +- size = resource_size(res); +- wdt_mem = request_mem_region(res->start, size, pdev->name); +- +- if (wdt_mem == NULL) { ++ size = resource_size(wdt_mem); ++ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { + dev_err(dev, "failed to get memory region\n"); + return -ENOENT; + } + +- wdt_base = ioremap(res->start, size); ++ wdt_base = ioremap(wdt_mem->start, size); + if (!wdt_base) { + dev_err(dev, "failed to map memory region\n"); + ret = -ENOMEM; +@@ -326,8 +323,8 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) + out_unmap: + iounmap(wdt_base); + out_request: +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, size); ++ wdt_mem = NULL; + + return ret; + } +@@ -336,8 +333,7 @@ static int __devexit max63xx_wdt_remove(struct platform_device *pdev) + { + misc_deregister(&max63xx_wdt_miscdev); + if (wdt_mem) { +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); + wdt_mem = NULL; + } + +diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c +index bf5b97c..8c8c7d5 100644 +--- a/drivers/watchdog/pnx4008_wdt.c ++++ b/drivers/watchdog/pnx4008_wdt.c +@@ -254,7 +254,6 @@ static struct miscdevice pnx4008_wdt_miscdev = { + static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) + { + int ret = 0, size; +- struct resource *res; + + if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) + heartbeat = DEFAULT_HEARTBEAT; +@@ -262,42 +261,42 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) + printk(KERN_INFO MODULE_NAME + "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); + +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- if (res == NULL) { ++ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (wdt_mem == NULL) { + printk(KERN_INFO MODULE_NAME + "failed to get memory region resouce\n"); + return -ENOENT; + } + +- size = resource_size(res); +- wdt_mem = request_mem_region(res->start, size, pdev->name); ++ size = resource_size(wdt_mem); + +- if (wdt_mem == NULL) { ++ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { + printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); + return -ENOENT; + } +- wdt_base = (void __iomem *)IO_ADDRESS(res->start); ++ wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start); + + wdt_clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(wdt_clk)) { + ret = PTR_ERR(wdt_clk); +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, size); ++ wdt_mem = NULL; + goto out; + } + + ret = clk_enable(wdt_clk); + if (ret) { +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, size); ++ wdt_mem = NULL; ++ clk_put(wdt_clk); + goto out; + } + + ret = misc_register(&pnx4008_wdt_miscdev); + if (ret < 0) { + printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, size); ++ wdt_mem = NULL; + clk_disable(wdt_clk); + clk_put(wdt_clk); + } else { +@@ -320,8 +319,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) + clk_put(wdt_clk); + + if (wdt_mem) { +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); + wdt_mem = NULL; + } + return 0; +diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c +index ae53662..8303c57 100644 +--- a/drivers/watchdog/s3c2410_wdt.c ++++ b/drivers/watchdog/s3c2410_wdt.c +@@ -402,7 +402,6 @@ static inline void s3c2410wdt_cpufreq_deregister(void) + + static int __devinit s3c2410wdt_probe(struct platform_device *pdev) + { +- struct resource *res; + struct device *dev; + unsigned int wtcon; + int started = 0; +@@ -416,20 +415,19 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) + + /* get the memory region for the watchdog timer */ + +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- if (res == NULL) { ++ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (wdt_mem == NULL) { + dev_err(dev, "no memory resource specified\n"); + return -ENOENT; + } + +- size = resource_size(res); +- wdt_mem = request_mem_region(res->start, size, pdev->name); +- if (wdt_mem == NULL) { ++ size = resource_size(wdt_mem); ++ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { + dev_err(dev, "failed to get memory region\n"); + return -EBUSY; + } + +- wdt_base = ioremap(res->start, size); ++ wdt_base = ioremap(wdt_mem->start, size); + if (wdt_base == NULL) { + dev_err(dev, "failed to ioremap() region\n"); + ret = -EINVAL; +@@ -524,8 +522,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) + iounmap(wdt_base); + + err_req: +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, size); ++ wdt_mem = NULL; + + return ret; + } +@@ -545,8 +543,7 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev) + + iounmap(wdt_base); + +- release_resource(wdt_mem); +- kfree(wdt_mem); ++ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); + wdt_mem = NULL; + return 0; + } +diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c +index 8083728..c7ea4be 100644 +--- a/drivers/watchdog/sp5100_tco.c ++++ b/drivers/watchdog/sp5100_tco.c +@@ -42,6 +42,7 @@ + #define PFX TCO_MODULE_NAME ": " + + /* internal variables */ ++static u32 tcobase_phys; + static void __iomem *tcobase; + static unsigned int pm_iobase; + static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ +@@ -305,10 +306,18 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) + /* Low three bits of BASE0 are reserved. */ + val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); + ++ if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, ++ "SP5100 TCO")) { ++ printk(KERN_ERR PFX "mmio address 0x%04x already in use\n", ++ val); ++ goto unreg_region; ++ } ++ tcobase_phys = val; ++ + tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); + if (tcobase == 0) { + printk(KERN_ERR PFX "failed to get tcobase address\n"); +- goto unreg_region; ++ goto unreg_mem_region; + } + + /* Enable watchdog decode bit */ +@@ -346,7 +355,8 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) + /* Done */ + return 1; + +- iounmap(tcobase); ++unreg_mem_region: ++ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); + unreg_region: + release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); + exit: +@@ -401,6 +411,7 @@ static int __devinit sp5100_tco_init(struct platform_device *dev) + + exit: + iounmap(tcobase); ++ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); + release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); + return ret; + } +@@ -414,6 +425,7 @@ static void __devexit sp5100_tco_cleanup(void) + /* Deregister */ + misc_deregister(&sp5100_tco_miscdev); + iounmap(tcobase); ++ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); + release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); + } + +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 7f78cc7..bd64b41 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -1284,6 +1284,8 @@ struct btrfs_root { + #define BTRFS_INODE_NOATIME (1 << 9) + #define BTRFS_INODE_DIRSYNC (1 << 10) + ++#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) ++ + /* some macros to generate set/get funcs for the struct fields. This + * assumes there is a lefoo_to_cpu for every type, so lets make a simple + * one for u8: +@@ -2355,6 +2357,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); + int btrfs_find_orphan_roots(struct btrfs_root *tree_root); + int btrfs_set_root_node(struct btrfs_root_item *item, + struct extent_buffer *node); ++void btrfs_check_and_init_root_item(struct btrfs_root_item *item); ++ + /* dir-item.c */ + int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index e1aa8d6..edd9efa 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1184,8 +1184,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, + root->commit_root = btrfs_root_node(root); + BUG_ON(!root->node); + out: +- if (location->objectid != BTRFS_TREE_LOG_OBJECTID) ++ if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { + root->ref_cows = 1; ++ btrfs_check_and_init_root_item(&root->root_item); ++ } + + return root; + } +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 5fdb2ab..2ff51e6 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -294,6 +294,10 @@ static noinline int create_subvol(struct btrfs_root *root, + inode_item->nbytes = cpu_to_le64(root->leafsize); + inode_item->mode = cpu_to_le32(S_IFDIR | 0755); + ++ root_item.flags = 0; ++ root_item.byte_limit = 0; ++ inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT); ++ + btrfs_set_root_bytenr(&root_item, leaf->start); + btrfs_set_root_generation(&root_item, trans->transid); + btrfs_set_root_level(&root_item, 0); +diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c +index 6a1086e..3e45c32 100644 +--- a/fs/btrfs/root-tree.c ++++ b/fs/btrfs/root-tree.c +@@ -471,3 +471,21 @@ again: + btrfs_free_path(path); + return 0; + } ++ ++/* ++ * Old btrfs forgets to init root_item->flags and root_item->byte_limit ++ * for subvolumes. To work around this problem, we steal a bit from ++ * root_item->inode_item->flags, and use it to indicate if those fields ++ * have been properly initialized. ++ */ ++void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) ++{ ++ u64 inode_flags = le64_to_cpu(root_item->inode.flags); ++ ++ if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { ++ inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; ++ root_item->inode.flags = cpu_to_le64(inode_flags); ++ root_item->flags = 0; ++ root_item->byte_limit = 0; ++ } ++} +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 3d73c8d..f3d6681 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -970,6 +970,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + record_root_in_trans(trans, root); + btrfs_set_root_last_snapshot(&root->root_item, trans->transid); + memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); ++ btrfs_check_and_init_root_item(new_root_item); + + root_flags = btrfs_root_flags(new_root_item); + if (pending->readonly) +diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c +index c1436cf..4feb78c 100644 +--- a/fs/ecryptfs/keystore.c ++++ b/fs/ecryptfs/keystore.c +@@ -1563,6 +1563,7 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, + printk(KERN_ERR "Could not find key with description: [%s]\n", + sig); + rc = process_request_key_err(PTR_ERR(*auth_tok_key)); ++ (*auth_tok_key) = NULL; + goto out; + } + (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); +diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c +index cc64fca..eb9d967 100644 +--- a/fs/ecryptfs/mmap.c ++++ b/fs/ecryptfs/mmap.c +@@ -374,6 +374,11 @@ static int ecryptfs_write_begin(struct file *file, + && (pos != 0)) + zero_user(page, 0, PAGE_CACHE_SIZE); + out: ++ if (unlikely(rc)) { ++ unlock_page(page); ++ page_cache_release(page); ++ *pagep = NULL; ++ } + return rc; + } + +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 9f7f9e4..fee51db 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -5460,13 +5460,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, + /* if nrblocks are contiguous */ + if (chunk) { + /* +- * With N contiguous data blocks, it need at most +- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks +- * 2 dindirect blocks +- * 1 tindirect block ++ * With N contiguous data blocks, we need at most ++ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, ++ * 2 dindirect blocks, and 1 tindirect block + */ +- indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); +- return indirects + 3; ++ return DIV_ROUND_UP(nrblocks, ++ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; + } + /* + * if nrblocks are not contiguous, worse case, each block touch +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 4381efe..243deb0 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2978,6 +2978,12 @@ static int ext4_register_li_request(struct super_block *sb, + mutex_unlock(&ext4_li_info->li_list_mtx); + + sbi->s_li_request = elr; ++ /* ++ * set elr to NULL here since it has been inserted to ++ * the request_list and the removal and free of it is ++ * handled by ext4_clear_request_list from now on. ++ */ ++ elr = NULL; + + if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { + ret = ext4_run_lazyinit_thread(); +diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c +index 0c6d816..7c831a2 100644 +--- a/fs/nfsd/lockd.c ++++ b/fs/nfsd/lockd.c +@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp) + exp_readlock(); + nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); + fh_put(&fh); +- rqstp->rq_client = NULL; + exp_readunlock(); + /* We return nlm error codes as nlm doesn't know + * about nfsd, but nfsd does know about nlm.. +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index f0e448a..96aaaa4 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp) + + static void free_generic_stateid(struct nfs4_stateid *stp) + { +- int oflag = nfs4_access_bmap_to_omode(stp); ++ int oflag; + +- nfs4_file_put_access(stp->st_file, oflag); +- put_nfs4_file(stp->st_file); ++ if (stp->st_access_bmap) { ++ oflag = nfs4_access_bmap_to_omode(stp); ++ nfs4_file_put_access(stp->st_file, oflag); ++ put_nfs4_file(stp->st_file); ++ } + kmem_cache_free(stateid_slab, stp); + } + +diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c +index 2f560c9..f49e628 100644 +--- a/fs/nilfs2/file.c ++++ b/fs/nilfs2/file.c +@@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) + /* + * check to see if the page is mapped already (no holes) + */ +- if (PageMappedToDisk(page)) { +- unlock_page(page); ++ if (PageMappedToDisk(page)) + goto mapped; +- } ++ + if (page_has_buffers(page)) { + struct buffer_head *bh, *head; + int fully_mapped = 1; +@@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) + + if (fully_mapped) { + SetPageMappedToDisk(page); +- unlock_page(page); + goto mapped; + } + } +@@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) + return VM_FAULT_SIGBUS; + + ret = block_page_mkwrite(vma, vmf, nilfs_get_block); +- if (unlikely(ret)) { ++ if (ret != VM_FAULT_LOCKED) { + nilfs_transaction_abort(inode->i_sb); + return ret; + } ++ nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); + nilfs_transaction_commit(inode->i_sb); + + mapped: + SetPageChecked(page); + wait_on_page_writeback(page); +- return 0; ++ return VM_FAULT_LOCKED; + } + + static const struct vm_operations_struct nilfs_file_vm_ops = { +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c +index a91b69a..0348d0c 100644 +--- a/fs/notify/inotify/inotify_fsnotify.c ++++ b/fs/notify/inotify/inotify_fsnotify.c +@@ -198,6 +198,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group) + idr_for_each(&group->inotify_data.idr, idr_callback, group); + idr_remove_all(&group->inotify_data.idr); + idr_destroy(&group->inotify_data.idr); ++ atomic_dec(&group->inotify_data.user->inotify_devs); + free_uid(group->inotify_data.user); + } + +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c +index 4cd5d5d..aec9b4a 100644 +--- a/fs/notify/inotify/inotify_user.c ++++ b/fs/notify/inotify/inotify_user.c +@@ -290,7 +290,6 @@ static int inotify_fasync(int fd, struct file *file, int on) + static int inotify_release(struct inode *ignored, struct file *file) + { + struct fsnotify_group *group = file->private_data; +- struct user_struct *user = group->inotify_data.user; + + pr_debug("%s: group=%p\n", __func__, group); + +@@ -299,8 +298,6 @@ static int inotify_release(struct inode *ignored, struct file *file) + /* free this group, matching get was inotify_init->fsnotify_obtain_group */ + fsnotify_put_group(group); + +- atomic_dec(&user->inotify_devs); +- + return 0; + } + +@@ -697,7 +694,7 @@ retry: + return ret; + } + +-static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) ++static struct fsnotify_group *inotify_new_group(unsigned int max_events) + { + struct fsnotify_group *group; + +@@ -710,8 +707,14 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign + spin_lock_init(&group->inotify_data.idr_lock); + idr_init(&group->inotify_data.idr); + group->inotify_data.last_wd = 0; +- group->inotify_data.user = user; + group->inotify_data.fa = NULL; ++ group->inotify_data.user = get_current_user(); ++ ++ if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > ++ inotify_max_user_instances) { ++ fsnotify_put_group(group); ++ return ERR_PTR(-EMFILE); ++ } + + return group; + } +@@ -721,7 +724,6 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign + SYSCALL_DEFINE1(inotify_init1, int, flags) + { + struct fsnotify_group *group; +- struct user_struct *user; + int ret; + + /* Check the IN_* constants for consistency. */ +@@ -731,31 +733,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) + if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) + return -EINVAL; + +- user = get_current_user(); +- if (unlikely(atomic_read(&user->inotify_devs) >= +- inotify_max_user_instances)) { +- ret = -EMFILE; +- goto out_free_uid; +- } +- + /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ +- group = inotify_new_group(user, inotify_max_queued_events); +- if (IS_ERR(group)) { +- ret = PTR_ERR(group); +- goto out_free_uid; +- } +- +- atomic_inc(&user->inotify_devs); ++ group = inotify_new_group(inotify_max_queued_events); ++ if (IS_ERR(group)) ++ return PTR_ERR(group); + + ret = anon_inode_getfd("inotify", &inotify_fops, group, + O_RDONLY | flags); +- if (ret >= 0) +- return ret; ++ if (ret < 0) ++ fsnotify_put_group(group); + +- fsnotify_put_group(group); +- atomic_dec(&user->inotify_devs); +-out_free_uid: +- free_uid(user); + return ret; + } + +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c +index 1fbb0e2..bbba782 100644 +--- a/fs/ocfs2/aops.c ++++ b/fs/ocfs2/aops.c +@@ -1026,6 +1026,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, + ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, + &cluster_start, &cluster_end); + ++ /* treat the write as new if the a hole/lseek spanned across ++ * the page boundary. ++ */ ++ new = new | ((i_size_read(inode) <= page_offset(page)) && ++ (page_offset(page) <= user_pos)); ++ + if (page == wc->w_target_page) { + map_from = user_pos & (PAGE_CACHE_SIZE - 1); + map_to = map_from + user_len; +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c +index a2a622e..b59ee61 100644 +--- a/fs/quota/dquot.c ++++ b/fs/quota/dquot.c +@@ -442,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire); + */ + int dquot_commit(struct dquot *dquot) + { +- int ret = 0, ret2 = 0; ++ int ret = 0; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + mutex_lock(&dqopt->dqio_mutex); +@@ -454,15 +454,10 @@ int dquot_commit(struct dquot *dquot) + spin_unlock(&dq_list_lock); + /* Inactive dquot can be only if there was error during read/init + * => we have better not writing it */ +- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { ++ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) + ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); +- if (info_dirty(&dqopt->info[dquot->dq_type])) { +- ret2 = dqopt->ops[dquot->dq_type]->write_file_info( +- dquot->dq_sb, dquot->dq_type); +- } +- if (ret >= 0) +- ret = ret2; +- } ++ else ++ ret = -EIO; + out_sem: + mutex_unlock(&dqopt->dqio_mutex); + return ret; +diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c +index 0dc340a..3f79cd1 100644 +--- a/fs/squashfs/dir.c ++++ b/fs/squashfs/dir.c +@@ -172,6 +172,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) + length += sizeof(dirh); + + dir_count = le32_to_cpu(dirh.count) + 1; ++ ++ /* dir_count should never be larger than 256 */ ++ if (dir_count > 256) ++ goto failed_read; ++ + while (dir_count--) { + /* + * Read directory entry. +@@ -183,6 +188,10 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) + + size = le16_to_cpu(dire->size) + 1; + ++ /* size should never be larger than SQUASHFS_NAME_LEN */ ++ if (size > SQUASHFS_NAME_LEN) ++ goto failed_read; ++ + err = squashfs_read_metadata(inode->i_sb, dire->name, + &block, &offset, size); + if (err < 0) +diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c +index 7a9464d..5d922a6 100644 +--- a/fs/squashfs/namei.c ++++ b/fs/squashfs/namei.c +@@ -176,6 +176,11 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, + length += sizeof(dirh); + + dir_count = le32_to_cpu(dirh.count) + 1; ++ ++ /* dir_count should never be larger than 256 */ ++ if (dir_count > 256) ++ goto data_error; ++ + while (dir_count--) { + /* + * Read directory entry. +@@ -187,6 +192,10 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, + + size = le16_to_cpu(dire->size) + 1; + ++ /* size should never be larger than SQUASHFS_NAME_LEN */ ++ if (size > SQUASHFS_NAME_LEN) ++ goto data_error; ++ + err = squashfs_read_metadata(dir->i_sb, dire->name, + &block, &offset, size); + if (err < 0) +@@ -228,6 +237,9 @@ exit_lookup: + d_add(dentry, inode); + return ERR_PTR(0); + ++data_error: ++ err = -EIO; ++ + read_failure: + ERROR("Unable to read directory block [%llx:%x]\n", + squashfs_i(dir)->start + msblk->directory_table, +diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c +index 4661ae2..04ae9a5 100644 +--- a/fs/squashfs/zlib_wrapper.c ++++ b/fs/squashfs/zlib_wrapper.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #include "squashfs_fs.h" + #include "squashfs_fs_sb.h" +@@ -37,8 +38,7 @@ static void *zlib_init(struct squashfs_sb_info *dummy) + z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL); + if (stream == NULL) + goto failed; +- stream->workspace = kmalloc(zlib_inflate_workspacesize(), +- GFP_KERNEL); ++ stream->workspace = vmalloc(zlib_inflate_workspacesize()); + if (stream->workspace == NULL) + goto failed; + +@@ -56,7 +56,7 @@ static void zlib_free(void *strm) + z_stream *stream = strm; + + if (stream) +- kfree(stream->workspace); ++ vfree(stream->workspace); + kfree(stream); + } + +diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c +index 02429d8..32bcb2c 100644 +--- a/fs/ubifs/commit.c ++++ b/fs/ubifs/commit.c +@@ -521,7 +521,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) + size_t sz; + + if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX)) +- goto out; ++ return 0; + + INIT_LIST_HEAD(&list); + +diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c +index 0bee4db..5b9e985 100644 +--- a/fs/ubifs/debug.c ++++ b/fs/ubifs/debug.c +@@ -961,11 +961,39 @@ void dbg_dump_index(struct ubifs_info *c) + void dbg_save_space_info(struct ubifs_info *c) + { + struct ubifs_debug_info *d = c->dbg; +- +- ubifs_get_lp_stats(c, &d->saved_lst); ++ int freeable_cnt; + + spin_lock(&c->space_lock); ++ memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats)); ++ ++ /* ++ * We use a dirty hack here and zero out @c->freeable_cnt, because it ++ * affects the free space calculations, and UBIFS might not know about ++ * all freeable eraseblocks. Indeed, we know about freeable eraseblocks ++ * only when we read their lprops, and we do this only lazily, upon the ++ * need. So at any given point of time @c->freeable_cnt might be not ++ * exactly accurate. ++ * ++ * Just one example about the issue we hit when we did not zero ++ * @c->freeable_cnt. ++ * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the ++ * amount of free space in @d->saved_free ++ * 2. We re-mount R/W, which makes UBIFS to read the "lsave" ++ * information from flash, where we cache LEBs from various ++ * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()' ++ * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()' ++ * -> 'ubifs_get_pnode()' -> 'update_cats()' ++ * -> 'ubifs_add_to_cat()'). ++ * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt ++ * becomes %1. ++ * 4. We calculate the amount of free space when the re-mount is ++ * finished in 'dbg_check_space_info()' and it does not match ++ * @d->saved_free. ++ */ ++ freeable_cnt = c->freeable_cnt; ++ c->freeable_cnt = 0; + d->saved_free = ubifs_get_free_space_nolock(c); ++ c->freeable_cnt = freeable_cnt; + spin_unlock(&c->space_lock); + } + +@@ -982,12 +1010,15 @@ int dbg_check_space_info(struct ubifs_info *c) + { + struct ubifs_debug_info *d = c->dbg; + struct ubifs_lp_stats lst; +- long long avail, free; ++ long long free; ++ int freeable_cnt; + + spin_lock(&c->space_lock); +- avail = ubifs_calc_available(c, c->min_idx_lebs); ++ freeable_cnt = c->freeable_cnt; ++ c->freeable_cnt = 0; ++ free = ubifs_get_free_space_nolock(c); ++ c->freeable_cnt = freeable_cnt; + spin_unlock(&c->space_lock); +- free = ubifs_get_free_space(c); + + if (free != d->saved_free) { + ubifs_err("free space changed from %lld to %lld", +diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c +index 72775d3..ef5155e 100644 +--- a/fs/ubifs/lpt.c ++++ b/fs/ubifs/lpt.c +@@ -1270,10 +1270,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) + lnum = branch->lnum; + offs = branch->offs; + pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS); +- if (!pnode) { +- err = -ENOMEM; +- goto out; +- } ++ if (!pnode) ++ return -ENOMEM; ++ + if (lnum == 0) { + /* + * This pnode was not written which just means that the LEB +diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c +index 9731898..ad485b6 100644 +--- a/fs/xfs/linux-2.6/xfs_super.c ++++ b/fs/xfs/linux-2.6/xfs_super.c +@@ -1551,10 +1551,14 @@ xfs_fs_fill_super( + if (error) + goto out_free_sb; + +- error = xfs_mountfs(mp); +- if (error) +- goto out_filestream_unmount; +- ++ /* ++ * we must configure the block size in the superblock before we run the ++ * full mount process as the mount process can lookup and cache inodes. ++ * For the same reason we must also initialise the syncd and register ++ * the inode cache shrinker so that inodes can be reclaimed during ++ * operations like a quotacheck that iterate all inodes in the ++ * filesystem. ++ */ + sb->s_magic = XFS_SB_MAGIC; + sb->s_blocksize = mp->m_sb.sb_blocksize; + sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; +@@ -1562,6 +1566,16 @@ xfs_fs_fill_super( + sb->s_time_gran = 1; + set_posix_acl_flag(sb); + ++ error = xfs_syncd_init(mp); ++ if (error) ++ goto out_filestream_unmount; ++ ++ xfs_inode_shrinker_register(mp); ++ ++ error = xfs_mountfs(mp); ++ if (error) ++ goto out_syncd_stop; ++ + root = igrab(VFS_I(mp->m_rootip)); + if (!root) { + error = ENOENT; +@@ -1577,14 +1591,11 @@ xfs_fs_fill_super( + goto fail_vnrele; + } + +- error = xfs_syncd_init(mp); +- if (error) +- goto fail_vnrele; +- +- xfs_inode_shrinker_register(mp); +- + return 0; + ++ out_syncd_stop: ++ xfs_inode_shrinker_unregister(mp); ++ xfs_syncd_stop(mp); + out_filestream_unmount: + xfs_filestream_unmount(mp); + out_free_sb: +@@ -1608,6 +1619,9 @@ xfs_fs_fill_super( + } + + fail_unmount: ++ xfs_inode_shrinker_unregister(mp); ++ xfs_syncd_stop(mp); ++ + /* + * Blow away any referenced inode in the filestreams cache. + * This can and will cause log traffic as inodes go inactive +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +index 5ff1194..6724bf3 100644 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -458,6 +458,8 @@ + {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ ++ {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ ++ {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0, 0, 0} + + #define r128_PCI_IDS \ +diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h +index 475f8c4..381f4ce 100644 +--- a/include/linux/atmdev.h ++++ b/include/linux/atmdev.h +@@ -443,6 +443,7 @@ void atm_dev_signal_change(struct atm_dev *dev, char signal); + + void vcc_insert_socket(struct sock *sk); + ++void atm_dev_release_vccs(struct atm_dev *dev); + + /* + * This is approximately the algorithm used by alloc_skb. +diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h +index a3c1874..a04b6ce 100644 +--- a/include/linux/ethtool.h ++++ b/include/linux/ethtool.h +@@ -591,6 +591,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data); + u32 ethtool_op_get_flags(struct net_device *dev); + int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); + void ethtool_ntuple_flush(struct net_device *dev); ++bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); + + /** + * ðtool_ops - Alter and report network device settings +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 559d028..6002bca 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -1002,12 +1002,11 @@ extern bool pcie_ports_auto; + #endif + + #ifndef CONFIG_PCIEASPM +-static inline int pcie_aspm_enabled(void) +-{ +- return 0; +-} ++static inline int pcie_aspm_enabled(void) { return 0; } ++static inline bool pcie_aspm_support_enabled(void) { return false; } + #else + extern int pcie_aspm_enabled(void); ++extern bool pcie_aspm_support_enabled(void); + #endif + + #ifdef CONFIG_PCIEAER +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h +index 85867dc..bfd36ff 100644 +--- a/include/scsi/scsi_device.h ++++ b/include/scsi/scsi_device.h +@@ -461,7 +461,7 @@ static inline int scsi_device_qas(struct scsi_device *sdev) + } + static inline int scsi_device_enclosure(struct scsi_device *sdev) + { +- return sdev->inquiry[6] & (1<<6); ++ return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; + } + + static inline int scsi_device_protection(struct scsi_device *sdev) +diff --git a/include/sound/pcm.h b/include/sound/pcm.h +index e731f8d..ec26781 100644 +--- a/include/sound/pcm.h ++++ b/include/sound/pcm.h +@@ -1030,9 +1030,7 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s + #define snd_pcm_lib_mmap_iomem NULL + #endif + +-int snd_pcm_lib_mmap_noncached(struct snd_pcm_substream *substream, +- struct vm_area_struct *area); +-#define snd_pcm_lib_mmap_vmalloc snd_pcm_lib_mmap_noncached ++#define snd_pcm_lib_mmap_vmalloc NULL + + static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) + { +diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h +index 8031769..60f94fb 100644 +--- a/include/sound/soc-dapm.h ++++ b/include/sound/soc-dapm.h +@@ -45,25 +45,25 @@ + /* platform domain */ + #define SND_SOC_DAPM_INPUT(wname) \ + { .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0} ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM } + #define SND_SOC_DAPM_OUTPUT(wname) \ + { .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0} ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM } + #define SND_SOC_DAPM_MIC(wname, wevent) \ + { .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0, .event = wevent, \ ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ + .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} + #define SND_SOC_DAPM_HP(wname, wevent) \ + { .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0, .event = wevent, \ ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ + .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} + #define SND_SOC_DAPM_SPK(wname, wevent) \ + { .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0, .event = wevent, \ ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ + .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} + #define SND_SOC_DAPM_LINE(wname, wevent) \ + { .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0, .event = wevent, \ ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ + .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} + + /* path domain */ +@@ -177,11 +177,11 @@ + /* events that are pre and post DAPM */ + #define SND_SOC_DAPM_PRE(wname, wevent) \ + { .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0, .event = wevent, \ ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ + .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD} + #define SND_SOC_DAPM_POST(wname, wevent) \ + { .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \ +- .num_kcontrols = 0, .event = wevent, \ ++ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ + .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD} + + /* stream domain */ +diff --git a/kernel/perf_event.c b/kernel/perf_event.c +index ad02fea..b2536bd 100644 +--- a/kernel/perf_event.c ++++ b/kernel/perf_event.c +@@ -62,7 +62,8 @@ static struct srcu_struct pmus_srcu; + */ + int sysctl_perf_event_paranoid __read_mostly = 1; + +-int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ ++/* Minimum for 512 kiB + 1 user control page */ ++int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ + + /* + * max perf event sample rate +@@ -5916,6 +5917,11 @@ SYSCALL_DEFINE5(perf_event_open, + goto err_alloc; + } + ++ if (task) { ++ put_task_struct(task); ++ task = NULL; ++ } ++ + /* + * Look up the group leader (we will attach this event to it): + */ +diff --git a/kernel/signal.c b/kernel/signal.c +index 3175186..bf11d269 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -2423,7 +2423,7 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, + /* Not even root can pretend to send signals from the kernel. + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ +- if (info.si_code != SI_QUEUE) { ++ if (info.si_code >= 0 || info.si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info.si_code < 0); + return -EPERM; +@@ -2443,7 +2443,7 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) + /* Not even root can pretend to send signals from the kernel. + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ +- if (info->si_code != SI_QUEUE) { ++ if (info->si_code >= 0 || info->si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info->si_code < 0); + return -EPERM; +diff --git a/mm/mremap.c b/mm/mremap.c +index 1de98d4..a7c1f9f 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -277,9 +277,16 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, + if (old_len > vma->vm_end - addr) + goto Efault; + +- if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { +- if (new_len > old_len) ++ /* Need to be careful about a growing mapping */ ++ if (new_len > old_len) { ++ unsigned long pgoff; ++ ++ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) + goto Efault; ++ pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; ++ pgoff += vma->vm_pgoff; ++ if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) ++ goto Einval; + } + + if (vma->vm_flags & VM_LOCKED) { +diff --git a/net/atm/common.c b/net/atm/common.c +index 1b9c52a..22b963d 100644 +--- a/net/atm/common.c ++++ b/net/atm/common.c +@@ -252,6 +252,7 @@ void atm_dev_release_vccs(struct atm_dev *dev) + } + write_unlock_irq(&vcc_sklist_lock); + } ++EXPORT_SYMBOL(atm_dev_release_vccs); + + static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) + { +diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c +index 2862f53..d935da7 100644 +--- a/net/bluetooth/bnep/sock.c ++++ b/net/bluetooth/bnep/sock.c +@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long + sockfd_put(nsock); + return -EBADFD; + } ++ ca.device[sizeof(ca.device)-1] = 0; + + err = bnep_add_connection(&ca, nsock); + if (!err) { +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c +index 960c6d1..926ed39 100644 +--- a/net/bluetooth/sco.c ++++ b/net/bluetooth/sco.c +@@ -703,6 +703,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user + break; + } + ++ memset(&cinfo, 0, sizeof(cinfo)); + cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; + memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); + +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index 16df053..47acf4a 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user, + if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) + return -ENOMEM; + ++ tmp.name[sizeof(tmp.name) - 1] = 0; ++ + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; + newinfo = vmalloc(sizeof(*newinfo) + countersize); + if (!newinfo) +diff --git a/net/core/ethtool.c b/net/core/ethtool.c +index ff23029..6c7c610 100644 +--- a/net/core/ethtool.c ++++ b/net/core/ethtool.c +@@ -146,9 +146,24 @@ u32 ethtool_op_get_flags(struct net_device *dev) + } + EXPORT_SYMBOL(ethtool_op_get_flags); + ++/* Check if device can enable (or disable) particular feature coded in "data" ++ * argument. Flags "supported" describe features that can be toggled by device. ++ * If feature can not be toggled, it state (enabled or disabled) must match ++ * hardcoded device features state, otherwise flags are marked as invalid. ++ */ ++bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) ++{ ++ u32 features = dev->features & flags_dup_features; ++ /* "data" can contain only flags_dup_features bits, ++ * see __ethtool_set_flags */ ++ ++ return (features & ~supported) != (data & ~supported); ++} ++EXPORT_SYMBOL(ethtool_invalid_flags); ++ + int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) + { +- if (data & ~supported) ++ if (ethtool_invalid_flags(dev, data, supported)) + return -EINVAL; + + dev->features = ((dev->features & ~flags_dup_features) | +diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c +index 0c28263..116d3fd 100644 +--- a/net/econet/af_econet.c ++++ b/net/econet/af_econet.c +@@ -435,10 +435,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, + udpdest.sin_addr.s_addr = htonl(network | addr.station); + } + ++ memset(&ah, 0, sizeof(ah)); + ah.port = port; + ah.cb = cb & 0x7f; + ah.code = 2; /* magic */ +- ah.pad = 0; + + /* tack our header on the front of the iovec */ + size = sizeof(struct aunhdr); +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index e855fff..6d79aa1 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -1065,6 +1065,7 @@ static int do_replace(struct net *net, const void __user *user, + /* overflow check */ + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) + return -ENOMEM; ++ tmp.name[sizeof(tmp.name)-1] = 0; + + newinfo = xt_alloc_table_info(tmp.size); + if (!newinfo) +@@ -1486,6 +1487,7 @@ static int compat_do_replace(struct net *net, void __user *user, + return -ENOMEM; + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) + return -ENOMEM; ++ tmp.name[sizeof(tmp.name)-1] = 0; + + newinfo = xt_alloc_table_info(tmp.size); + if (!newinfo) +@@ -1738,6 +1740,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len + ret = -EFAULT; + break; + } ++ rev.name[sizeof(rev.name)-1] = 0; + + try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, + rev.revision, 1, &ret), +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index 652efea..92fb4c5 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -387,7 +387,7 @@ ipt_do_table(struct sk_buff *skb, + verdict = (unsigned)(-v) - 1; + break; + } +- if (*stackptr == 0) { ++ if (*stackptr <= origptr) { + e = get_entry(table_base, + private->underflow[hook]); + pr_debug("Underflow (this is normal) " +@@ -427,10 +427,10 @@ ipt_do_table(struct sk_buff *skb, + /* Verdict */ + break; + } while (!acpar.hotdrop); +- xt_info_rdunlock_bh(); + pr_debug("Exiting %s; resetting sp from %u to %u\n", + __func__, *stackptr, origptr); + *stackptr = origptr; ++ xt_info_rdunlock_bh(); + #ifdef DEBUG_ALLOW_ALL + return NF_ACCEPT; + #else +@@ -1261,6 +1261,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) + /* overflow check */ + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) + return -ENOMEM; ++ tmp.name[sizeof(tmp.name)-1] = 0; + + newinfo = xt_alloc_table_info(tmp.size); + if (!newinfo) +@@ -1805,6 +1806,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) + return -ENOMEM; + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) + return -ENOMEM; ++ tmp.name[sizeof(tmp.name)-1] = 0; + + newinfo = xt_alloc_table_info(tmp.size); + if (!newinfo) +@@ -2034,6 +2036,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + ret = -EFAULT; + break; + } ++ rev.name[sizeof(rev.name)-1] = 0; + + if (cmd == IPT_SO_GET_REVISION_TARGET) + target = 1; +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c +index 1e26a48..af7dec6 100644 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c +@@ -669,8 +669,11 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input, + char buffer[PROC_WRITELEN+1]; + unsigned long nodenum; + +- if (copy_from_user(buffer, input, PROC_WRITELEN)) ++ if (size > PROC_WRITELEN) ++ return -EIO; ++ if (copy_from_user(buffer, input, size)) + return -EFAULT; ++ buffer[size] = 0; + + if (*buffer == '+') { + nodenum = simple_strtoul(buffer+1, NULL, 10); +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index 7d227c6..eadafbf 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -410,7 +410,7 @@ ip6t_do_table(struct sk_buff *skb, + verdict = (unsigned)(-v) - 1; + break; + } +- if (*stackptr == 0) ++ if (*stackptr <= origptr) + e = get_entry(table_base, + private->underflow[hook]); + else +@@ -441,8 +441,8 @@ ip6t_do_table(struct sk_buff *skb, + break; + } while (!acpar.hotdrop); + +- xt_info_rdunlock_bh(); + *stackptr = origptr; ++ xt_info_rdunlock_bh(); + + #ifdef DEBUG_ALLOW_ALL + return NF_ACCEPT; +@@ -1274,6 +1274,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) + /* overflow check */ + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) + return -ENOMEM; ++ tmp.name[sizeof(tmp.name)-1] = 0; + + newinfo = xt_alloc_table_info(tmp.size); + if (!newinfo) +@@ -1820,6 +1821,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) + return -ENOMEM; + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) + return -ENOMEM; ++ tmp.name[sizeof(tmp.name)-1] = 0; + + newinfo = xt_alloc_table_info(tmp.size); + if (!newinfo) +@@ -2049,6 +2051,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + ret = -EFAULT; + break; + } ++ rev.name[sizeof(rev.name)-1] = 0; + + if (cmd == IP6T_SO_GET_REVISION_TARGET) + target = 1; +diff --git a/net/irda/iriap.c b/net/irda/iriap.c +index 5b743bd..3647753 100644 +--- a/net/irda/iriap.c ++++ b/net/irda/iriap.c +@@ -656,10 +656,16 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, + n = 1; + + name_len = fp[n++]; ++ ++ IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;); ++ + memcpy(name, fp+n, name_len); n+=name_len; + name[name_len] = '\0'; + + attr_len = fp[n++]; ++ ++ IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;); ++ + memcpy(attr, fp+n, attr_len); n+=attr_len; + attr[attr_len] = '\0'; + +diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c +index 7c567b8..2bb2beb 100644 +--- a/net/irda/irnet/irnet_ppp.c ++++ b/net/irda/irnet/irnet_ppp.c +@@ -105,6 +105,9 @@ irnet_ctrl_write(irnet_socket * ap, + while(isspace(start[length - 1])) + length--; + ++ DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, ++ -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); ++ + /* Copy the name for later reuse */ + memcpy(ap->rname, start + 5, length - 5); + ap->rname[length - 5] = '\0'; +diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c +index 165a451..cac35ff 100644 +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -639,18 +639,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; + struct ieee80211_local *local = hw_to_local(mp->hw); + u16 sta_cap = sta->ht_cap.cap; ++ int n_supported = 0; + int ack_dur; + int stbc; + int i; + + /* fall back to the old minstrel for legacy stations */ +- if (!sta->ht_cap.ht_supported) { +- msp->is_ht = false; +- memset(&msp->legacy, 0, sizeof(msp->legacy)); +- msp->legacy.r = msp->ratelist; +- msp->legacy.sample_table = msp->sample_table; +- return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); +- } ++ if (!sta->ht_cap.ht_supported) ++ goto use_legacy; + + BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != + MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); +@@ -705,7 +701,22 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, + + mi->groups[i].supported = + mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; ++ ++ if (mi->groups[i].supported) ++ n_supported++; + } ++ ++ if (!n_supported) ++ goto use_legacy; ++ ++ return; ++ ++use_legacy: ++ msp->is_ht = false; ++ memset(&msp->legacy, 0, sizeof(msp->legacy)); ++ msp->legacy.r = msp->ratelist; ++ msp->legacy.sample_table = msp->sample_table; ++ return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); + } + + static void +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index c426504..604216e 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -243,6 +243,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + memcpy(sta->sta.addr, addr, ETH_ALEN); + sta->local = local; + sta->sdata = sdata; ++ sta->last_rx = jiffies; + + ewma_init(&sta->avg_signal, 1024, 8); + +diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c +index 8678823..bcd5ed6 100644 +--- a/net/netfilter/nf_conntrack_h323_asn1.c ++++ b/net/netfilter/nf_conntrack_h323_asn1.c +@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f, + CHECK_BOUND(bs, 2); + count = *bs->cur++; + count <<= 8; +- count = *bs->cur++; ++ count += *bs->cur++; + break; + case SEMI: + BYTE_ALIGN(bs); +diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c +index 1734abb..174d51c 100644 +--- a/net/rose/rose_subr.c ++++ b/net/rose/rose_subr.c +@@ -290,10 +290,15 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct * + facilities->source_ndigis = 0; + facilities->dest_ndigis = 0; + for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { +- if (pt[6] & AX25_HBIT) ++ if (pt[6] & AX25_HBIT) { ++ if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) ++ return -1; + memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); +- else ++ } else { ++ if (facilities->source_ndigis >= ROSE_MAX_DIGIS) ++ return -1; + memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); ++ } + } + } + p += l + 2; +@@ -333,6 +338,11 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac + + case 0xC0: + l = p[1]; ++ ++ /* Prevent overflows*/ ++ if (l < 10 || l > 20) ++ return -1; ++ + if (*p == FAC_CCITT_DEST_NSAP) { + memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); + memcpy(callsign, p + 12, l - 10); +@@ -373,12 +383,16 @@ int rose_parse_facilities(unsigned char *p, + switch (*p) { + case FAC_NATIONAL: /* National */ + len = rose_parse_national(p + 1, facilities, facilities_len - 1); ++ if (len < 0) ++ return 0; + facilities_len -= len + 1; + p += len + 1; + break; + + case FAC_CCITT: /* CCITT */ + len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); ++ if (len < 0) ++ return 0; + facilities_len -= len + 1; + p += len + 1; + break; +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c +index f375dec..778e5df 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c +@@ -427,7 +427,7 @@ static int + context_derive_keys_rc4(struct krb5_ctx *ctx) + { + struct crypto_hash *hmac; +- static const char sigkeyconstant[] = "signaturekey"; ++ char sigkeyconstant[] = "signaturekey"; + int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ + struct hash_desc desc; + struct scatterlist sg[1]; +diff --git a/sound/core/init.c b/sound/core/init.c +index 3e65da2..a0080aa 100644 +--- a/sound/core/init.c ++++ b/sound/core/init.c +@@ -848,6 +848,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file) + return -ENOMEM; + mfile->file = file; + mfile->disconnected_f_op = NULL; ++ INIT_LIST_HEAD(&mfile->shutdown_list); + spin_lock(&card->files_lock); + if (card->shutdown) { + spin_unlock(&card->files_lock); +@@ -883,6 +884,9 @@ int snd_card_file_remove(struct snd_card *card, struct file *file) + list_for_each_entry(mfile, &card->files_list, list) { + if (mfile->file == file) { + list_del(&mfile->list); ++ spin_lock(&shutdown_lock); ++ list_del(&mfile->shutdown_list); ++ spin_unlock(&shutdown_lock); + if (mfile->disconnected_f_op) + fops_put(mfile->disconnected_f_op); + found = mfile; +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index a82e3756..64449cb 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -375,6 +375,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, + } + + if (runtime->no_period_wakeup) { ++ snd_pcm_sframes_t xrun_threshold; + /* + * Without regular period interrupts, we have to check + * the elapsed time to detect xruns. +@@ -383,7 +384,8 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, + if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) + goto no_delta_check; + hdelta = jdelta - delta * HZ / runtime->rate; +- while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) { ++ xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; ++ while (hdelta > xrun_threshold) { + delta += runtime->buffer_size; + hw_base += runtime->buffer_size; + if (hw_base >= runtime->boundary) +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 4be45e7..6848dd9 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -3201,15 +3201,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, + EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); + #endif /* SNDRV_PCM_INFO_MMAP */ + +-/* mmap callback with pgprot_noncached */ +-int snd_pcm_lib_mmap_noncached(struct snd_pcm_substream *substream, +- struct vm_area_struct *area) +-{ +- area->vm_page_prot = pgprot_noncached(area->vm_page_prot); +- return snd_pcm_default_mmap(substream, area); +-} +-EXPORT_SYMBOL(snd_pcm_lib_mmap_noncached); +- + /* + * mmap DMA buffer + */ +diff --git a/sound/oss/dev_table.h b/sound/oss/dev_table.h +index b7617be..0199a31 100644 +--- a/sound/oss/dev_table.h ++++ b/sound/oss/dev_table.h +@@ -271,7 +271,7 @@ struct synth_operations + void (*reset) (int dev); + void (*hw_control) (int dev, unsigned char *event); + int (*load_patch) (int dev, int format, const char __user *addr, +- int offs, int count, int pmgr_flag); ++ int count, int pmgr_flag); + void (*aftertouch) (int dev, int voice, int pressure); + void (*controller) (int dev, int voice, int ctrl_num, int value); + void (*panning) (int dev, int voice, int value); +diff --git a/sound/oss/midi_synth.c b/sound/oss/midi_synth.c +index 3c09374..2292c23 100644 +--- a/sound/oss/midi_synth.c ++++ b/sound/oss/midi_synth.c +@@ -476,7 +476,7 @@ EXPORT_SYMBOL(midi_synth_hw_control); + + int + midi_synth_load_patch(int dev, int format, const char __user *addr, +- int offs, int count, int pmgr_flag) ++ int count, int pmgr_flag) + { + int orig_dev = synth_devs[dev]->midi_dev; + +@@ -491,33 +491,29 @@ midi_synth_load_patch(int dev, int format, const char __user *addr, + if (!prefix_cmd(orig_dev, 0xf0)) + return 0; + ++ /* Invalid patch format */ + if (format != SYSEX_PATCH) +- { +-/* printk("MIDI Error: Invalid patch format (key) 0x%x\n", format);*/ + return -EINVAL; +- } ++ ++ /* Patch header too short */ + if (count < hdr_size) +- { +-/* printk("MIDI Error: Patch header too short\n");*/ + return -EINVAL; +- } ++ + count -= hdr_size; + + /* +- * Copy the header from user space but ignore the first bytes which have +- * been transferred already. ++ * Copy the header from user space + */ + +- if(copy_from_user(&((char *) &sysex)[offs], &(addr)[offs], hdr_size - offs)) ++ if (copy_from_user(&sysex, addr, hdr_size)) + return -EFAULT; +- +- if (count < sysex.len) +- { +-/* printk(KERN_WARNING "MIDI Warning: Sysex record too short (%d<%d)\n", count, (int) sysex.len);*/ ++ ++ /* Sysex record too short */ ++ if ((unsigned)count < (unsigned)sysex.len) + sysex.len = count; +- } +- left = sysex.len; +- src_offs = 0; ++ ++ left = sysex.len; ++ src_offs = 0; + + for (i = 0; i < left && !signal_pending(current); i++) + { +diff --git a/sound/oss/midi_synth.h b/sound/oss/midi_synth.h +index 6bc9d00..b64ddd6 100644 +--- a/sound/oss/midi_synth.h ++++ b/sound/oss/midi_synth.h +@@ -8,7 +8,7 @@ int midi_synth_open (int dev, int mode); + void midi_synth_close (int dev); + void midi_synth_hw_control (int dev, unsigned char *event); + int midi_synth_load_patch (int dev, int format, const char __user * addr, +- int offs, int count, int pmgr_flag); ++ int count, int pmgr_flag); + void midi_synth_panning (int dev, int channel, int pressure); + void midi_synth_aftertouch (int dev, int channel, int pressure); + void midi_synth_controller (int dev, int channel, int ctrl_num, int value); +diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c +index 938c48c..407cd67 100644 +--- a/sound/oss/opl3.c ++++ b/sound/oss/opl3.c +@@ -820,7 +820,7 @@ static void opl3_hw_control(int dev, unsigned char *event) + } + + static int opl3_load_patch(int dev, int format, const char __user *addr, +- int offs, int count, int pmgr_flag) ++ int count, int pmgr_flag) + { + struct sbi_instrument ins; + +@@ -830,11 +830,7 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, + return -EINVAL; + } + +- /* +- * What the fuck is going on here? We leave junk in the beginning +- * of ins and then check the field pretty close to that beginning? +- */ +- if(copy_from_user(&((char *) &ins)[offs], addr + offs, sizeof(ins) - offs)) ++ if (copy_from_user(&ins, addr, sizeof(ins))) + return -EFAULT; + + if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) +@@ -849,6 +845,10 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, + + static void opl3_panning(int dev, int voice, int value) + { ++ ++ if (voice < 0 || voice >= devc->nr_voice) ++ return; ++ + devc->voc[voice].panning = value; + } + +@@ -1066,8 +1066,15 @@ static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info + + static void opl3_setup_voice(int dev, int voice, int chn) + { +- struct channel_info *info = +- &synth_devs[dev]->chn_info[chn]; ++ struct channel_info *info; ++ ++ if (voice < 0 || voice >= devc->nr_voice) ++ return; ++ ++ if (chn < 0 || chn > 15) ++ return; ++ ++ info = &synth_devs[dev]->chn_info[chn]; + + opl3_set_instr(dev, voice, info->pgm_num); + +diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c +index 5ea1098..30bcfe4 100644 +--- a/sound/oss/sequencer.c ++++ b/sound/oss/sequencer.c +@@ -241,7 +241,7 @@ int sequencer_write(int dev, struct file *file, const char __user *buf, int coun + return -ENXIO; + + fmt = (*(short *) &event_rec[0]) & 0xffff; +- err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0); ++ err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0); + if (err < 0) + return err; + +diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c +index 537cfba..863eafe 100644 +--- a/sound/pci/ens1370.c ++++ b/sound/pci/ens1370.c +@@ -229,6 +229,7 @@ MODULE_PARM_DESC(lineio, "Line In to Rear Out (0 = auto, 1 = force)."); + #define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */ + #define ES_1371_CODEC_RDY (1<<31) /* codec ready */ + #define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */ ++#define EV_1938_CODEC_MAGIC (1<<26) + #define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */ + #define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0)) + #define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD) +@@ -603,12 +604,18 @@ static void snd_es1370_codec_write(struct snd_ak4531 *ak4531, + + #ifdef CHIP1371 + ++static inline bool is_ev1938(struct ensoniq *ensoniq) ++{ ++ return ensoniq->pci->device == 0x8938; ++} ++ + static void snd_es1371_codec_write(struct snd_ac97 *ac97, + unsigned short reg, unsigned short val) + { + struct ensoniq *ensoniq = ac97->private_data; +- unsigned int t, x; ++ unsigned int t, x, flag; + ++ flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; + mutex_lock(&ensoniq->src_mutex); + for (t = 0; t < POLL_COUNT; t++) { + if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) { +@@ -630,7 +637,8 @@ static void snd_es1371_codec_write(struct snd_ac97 *ac97, + 0x00010000) + break; + } +- outl(ES_1371_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1371_CODEC)); ++ outl(ES_1371_CODEC_WRITE(reg, val) | flag, ++ ES_REG(ensoniq, 1371_CODEC)); + /* restore SRC reg */ + snd_es1371_wait_src_ready(ensoniq); + outl(x, ES_REG(ensoniq, 1371_SMPRATE)); +@@ -647,8 +655,9 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, + unsigned short reg) + { + struct ensoniq *ensoniq = ac97->private_data; +- unsigned int t, x, fail = 0; ++ unsigned int t, x, flag, fail = 0; + ++ flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; + __again: + mutex_lock(&ensoniq->src_mutex); + for (t = 0; t < POLL_COUNT; t++) { +@@ -671,7 +680,8 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, + 0x00010000) + break; + } +- outl(ES_1371_CODEC_READS(reg), ES_REG(ensoniq, 1371_CODEC)); ++ outl(ES_1371_CODEC_READS(reg) | flag, ++ ES_REG(ensoniq, 1371_CODEC)); + /* restore SRC reg */ + snd_es1371_wait_src_ready(ensoniq); + outl(x, ES_REG(ensoniq, 1371_SMPRATE)); +@@ -683,6 +693,11 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, + /* now wait for the stinkin' data (RDY) */ + for (t = 0; t < POLL_COUNT; t++) { + if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) { ++ if (is_ev1938(ensoniq)) { ++ for (t = 0; t < 100; t++) ++ inl(ES_REG(ensoniq, CONTROL)); ++ x = inl(ES_REG(ensoniq, 1371_CODEC)); ++ } + mutex_unlock(&ensoniq->src_mutex); + return ES_1371_CODEC_READ(x); + } +diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c +index 8dabab7..7aee900 100644 +--- a/sound/pci/hda/patch_analog.c ++++ b/sound/pci/hda/patch_analog.c +@@ -4353,6 +4353,84 @@ static int ad1984a_thinkpad_init(struct hda_codec *codec) + } + + /* ++ * Precision R5500 ++ * 0x12 - HP/line-out ++ * 0x13 - speaker (mono) ++ * 0x15 - mic-in ++ */ ++ ++static struct hda_verb ad1984a_precision_verbs[] = { ++ /* Unmute main output path */ ++ {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */ ++ {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x1f}, /* 0dB */ ++ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5) + 0x17}, /* 0dB */ ++ /* Analog mixer; mute as default */ ++ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, ++ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, ++ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, ++ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, ++ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, ++ /* Select mic as input */ ++ {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1}, ++ {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x27}, /* 0dB */ ++ /* Configure as mic */ ++ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, ++ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */ ++ /* HP unmute */ ++ {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, ++ /* turn on EAPD */ ++ {0x13, AC_VERB_SET_EAPD_BTLENABLE, 0x02}, ++ /* unsolicited event for pin-sense */ ++ {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT}, ++ { } /* end */ ++}; ++ ++static struct snd_kcontrol_new ad1984a_precision_mixers[] = { ++ HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT), ++ HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT), ++ HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT), ++ HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT), ++ HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT), ++ HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT), ++ HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT), ++ HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT), ++ HDA_CODEC_VOLUME("Speaker Playback Volume", 0x13, 0x0, HDA_OUTPUT), ++ HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), ++ HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), ++ { } /* end */ ++}; ++ ++ ++/* mute internal speaker if HP is plugged */ ++static void ad1984a_precision_automute(struct hda_codec *codec) ++{ ++ unsigned int present; ++ ++ present = snd_hda_jack_detect(codec, 0x12); ++ snd_hda_codec_amp_stereo(codec, 0x13, HDA_OUTPUT, 0, ++ HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); ++} ++ ++ ++/* unsolicited event for HP jack sensing */ ++static void ad1984a_precision_unsol_event(struct hda_codec *codec, ++ unsigned int res) ++{ ++ if ((res >> 26) != AD1884A_HP_EVENT) ++ return; ++ ad1984a_precision_automute(codec); ++} ++ ++/* initialize jack-sensing, too */ ++static int ad1984a_precision_init(struct hda_codec *codec) ++{ ++ ad198x_init(codec); ++ ad1984a_precision_automute(codec); ++ return 0; ++} ++ ++ ++/* + * HP Touchsmart + * port-A (0x11) - front hp-out + * port-B (0x14) - unused +@@ -4481,6 +4559,7 @@ enum { + AD1884A_MOBILE, + AD1884A_THINKPAD, + AD1984A_TOUCHSMART, ++ AD1984A_PRECISION, + AD1884A_MODELS + }; + +@@ -4490,9 +4569,11 @@ static const char * const ad1884a_models[AD1884A_MODELS] = { + [AD1884A_MOBILE] = "mobile", + [AD1884A_THINKPAD] = "thinkpad", + [AD1984A_TOUCHSMART] = "touchsmart", ++ [AD1984A_PRECISION] = "precision", + }; + + static struct snd_pci_quirk ad1884a_cfg_tbl[] = { ++ SND_PCI_QUIRK(0x1028, 0x04ac, "Precision R5500", AD1984A_PRECISION), + SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE), + SND_PCI_QUIRK(0x103c, 0x3037, "HP 2230s", AD1884A_LAPTOP), + SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE), +@@ -4586,6 +4667,14 @@ static int patch_ad1884a(struct hda_codec *codec) + codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event; + codec->patch_ops.init = ad1984a_thinkpad_init; + break; ++ case AD1984A_PRECISION: ++ spec->mixers[0] = ad1984a_precision_mixers; ++ spec->init_verbs[spec->num_init_verbs++] = ++ ad1984a_precision_verbs; ++ spec->multiout.dig_out_nid = 0; ++ codec->patch_ops.unsol_event = ad1984a_precision_unsol_event; ++ codec->patch_ops.init = ad1984a_precision_init; ++ break; + case AD1984A_TOUCHSMART: + spec->mixers[0] = ad1984a_touchsmart_mixers; + spec->init_verbs[0] = ad1984a_touchsmart_verbs; +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 4d5004e..e33d69e 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -3130,6 +3130,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), + SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ + {} +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index ec0fa2d..520f94a 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -1276,6 +1276,39 @@ static int simple_playback_pcm_prepare(struct hda_pcm_stream *hinfo, + stream_tag, format, substream); + } + ++static void nvhdmi_8ch_7x_set_info_frame_parameters(struct hda_codec *codec, ++ int channels) ++{ ++ unsigned int chanmask; ++ int chan = channels ? (channels - 1) : 1; ++ ++ switch (channels) { ++ default: ++ case 0: ++ case 2: ++ chanmask = 0x00; ++ break; ++ case 4: ++ chanmask = 0x08; ++ break; ++ case 6: ++ chanmask = 0x0b; ++ break; ++ case 8: ++ chanmask = 0x13; ++ break; ++ } ++ ++ /* Set the audio infoframe channel allocation and checksum fields. The ++ * channel count is computed implicitly by the hardware. */ ++ snd_hda_codec_write(codec, 0x1, 0, ++ Nv_VERB_SET_Channel_Allocation, chanmask); ++ ++ snd_hda_codec_write(codec, 0x1, 0, ++ Nv_VERB_SET_Info_Frame_Checksum, ++ (0x71 - chan - chanmask)); ++} ++ + static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) +@@ -1294,6 +1327,10 @@ static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, + AC_VERB_SET_STREAM_FORMAT, 0); + } + ++ /* The audio hardware sends a channel count of 0x7 (8ch) when all the ++ * streams are disabled. */ ++ nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); ++ + return snd_hda_multi_out_dig_close(codec, &spec->multiout); + } + +@@ -1304,37 +1341,16 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, + struct snd_pcm_substream *substream) + { + int chs; +- unsigned int dataDCC1, dataDCC2, chan, chanmask, channel_id; ++ unsigned int dataDCC1, dataDCC2, channel_id; + int i; + + mutex_lock(&codec->spdif_mutex); + + chs = substream->runtime->channels; +- chan = chs ? (chs - 1) : 1; + +- switch (chs) { +- default: +- case 0: +- case 2: +- chanmask = 0x00; +- break; +- case 4: +- chanmask = 0x08; +- break; +- case 6: +- chanmask = 0x0b; +- break; +- case 8: +- chanmask = 0x13; +- break; +- } + dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT; + dataDCC2 = 0x2; + +- /* set the Audio InforFrame Channel Allocation */ +- snd_hda_codec_write(codec, 0x1, 0, +- Nv_VERB_SET_Channel_Allocation, chanmask); +- + /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */ + if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE)) + snd_hda_codec_write(codec, +@@ -1409,10 +1425,7 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, + } + } + +- /* set the Audio Info Frame Checksum */ +- snd_hda_codec_write(codec, 0x1, 0, +- Nv_VERB_SET_Info_Frame_Checksum, +- (0x71 - chan - chanmask)); ++ nvhdmi_8ch_7x_set_info_frame_parameters(codec, chs); + + mutex_unlock(&codec->spdif_mutex); + return 0; +@@ -1508,6 +1521,11 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec) + spec->multiout.max_channels = 8; + spec->pcm_playback = &nvhdmi_pcm_playback_8ch_7x; + codec->patch_ops = nvhdmi_patch_ops_8ch_7x; ++ ++ /* Initialize the audio infoframe channel mask and checksum to something ++ * valid */ ++ nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); ++ + return 0; + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index c2eb6a7..e164a4b 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -1360,7 +1360,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) + case 0x10ec0883: + case 0x10ec0885: + case 0x10ec0887: +- case 0x10ec0889: ++ /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ + alc889_coef_init(codec); + break; + case 0x10ec0888: +@@ -14191,7 +14191,7 @@ static hda_nid_t alc269vb_capsrc_nids[1] = { + }; + + static hda_nid_t alc269_adc_candidates[] = { +- 0x08, 0x09, 0x07, ++ 0x08, 0x09, 0x07, 0x11, + }; + + #define alc269_modes alc260_modes +diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c +index 671ef8d..aab7765 100644 +--- a/sound/soc/imx/imx-pcm-dma-mx2.c ++++ b/sound/soc/imx/imx-pcm-dma-mx2.c +@@ -110,12 +110,12 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream, + slave_config.direction = DMA_TO_DEVICE; + slave_config.dst_addr = dma_params->dma_addr; + slave_config.dst_addr_width = buswidth; +- slave_config.dst_maxburst = dma_params->burstsize; ++ slave_config.dst_maxburst = dma_params->burstsize * buswidth; + } else { + slave_config.direction = DMA_FROM_DEVICE; + slave_config.src_addr = dma_params->dma_addr; + slave_config.src_addr_width = buswidth; +- slave_config.src_maxburst = dma_params->burstsize; ++ slave_config.src_maxburst = dma_params->burstsize * buswidth; + } + + ret = dmaengine_slave_config(iprtd->dma_chan, &slave_config); +@@ -303,6 +303,11 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = { + + static int __devinit imx_soc_platform_probe(struct platform_device *pdev) + { ++ struct imx_ssi *ssi = platform_get_drvdata(pdev); ++ ++ ssi->dma_params_tx.burstsize = 6; ++ ssi->dma_params_rx.burstsize = 4; ++ + return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); + } + +diff --git a/sound/soc/imx/imx-ssi.h b/sound/soc/imx/imx-ssi.h +index a4406a1..dc8a875 100644 +--- a/sound/soc/imx/imx-ssi.h ++++ b/sound/soc/imx/imx-ssi.h +@@ -234,7 +234,4 @@ void imx_pcm_free(struct snd_pcm *pcm); + */ + #define IMX_SSI_DMABUF_SIZE (64 * 1024) + +-#define DMA_RXFIFO_BURST 0x4 +-#define DMA_TXFIFO_BURST 0x6 +- + #endif /* _IMX_SSI_H */ +diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c +index 784cff5..9027da4 100644 +--- a/sound/soc/pxa/corgi.c ++++ b/sound/soc/pxa/corgi.c +@@ -310,7 +310,7 @@ static struct snd_soc_dai_link corgi_dai = { + .cpu_dai_name = "pxa2xx-i2s", + .codec_dai_name = "wm8731-hifi", + .platform_name = "pxa-pcm-audio", +- .codec_name = "wm8731-codec-0.001b", ++ .codec_name = "wm8731-codec.0-001b", + .init = corgi_wm8731_init, + .ops = &corgi_ops, + }; diff --git a/patches.rpmify/buildhost b/patches.rpmify/buildhost index 4e82893..5f64d12 100644 --- a/patches.rpmify/buildhost +++ b/patches.rpmify/buildhost @@ -7,21 +7,20 @@ and "geeko". Signed-off-by: Andreas Gruenbacher - scripts/mkcompile_h | 17 +++-------------- - 1 file changed, 3 insertions(+), 14 deletions(-) + scripts/mkcompile_h | 16 +++------------- + 1 file changed, 3 insertions(+), 13 deletions(-) --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h -@@ -64,20 +64,9 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" +@@ -64,19 +64,9 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" echo \#define LINUX_COMPILE_TIME \"`date +%T`\" - echo \#define LINUX_COMPILE_BY \"`whoami`\" - echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\" - -- if [ -x /bin/dnsdomainname ]; then -- domain=`dnsdomainname 2> /dev/null` -- elif [ -x /bin/domainname ]; then +- domain=`dnsdomainname 2> /dev/null` +- if [ -z "$domain" ]; then - domain=`domainname 2> /dev/null` - fi - diff --git a/patches.rpmify/cloneconfig.diff b/patches.rpmify/cloneconfig.diff index 468ef03..4bfb615 100644 --- a/patches.rpmify/cloneconfig.diff +++ b/patches.rpmify/cloneconfig.diff @@ -1,22 +1,25 @@ From: Andreas Gruenbacher Subject: Add ``cloneconfig'' target -Patch-mainline: not yet +Patch-mainline: Submitted 24 Feb 2011 Cloneconfig takes the first configuration it finds which appears to belong to the running kernel, and configures the kernel sources to match this configuration as closely as possible. Signed-off-by: Andreas Gruenbacher +Signed-off-by: Jeff Mahoney +--- - scripts/kconfig/Makefile | 16 ++++++++++++++++ - 1 file changed, 16 insertions(+) + scripts/kconfig/Makefile | 17 +++++++++++++++++ + 1 file changed, 17 insertions(+) --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile -@@ -106,6 +106,22 @@ allnoconfig: $(obj)/conf - allmodconfig: $(obj)/conf - $< -m $(Kconfig) +@@ -99,6 +99,23 @@ PHONY += allnoconfig allyesconfig allmod + allnoconfig allyesconfig allmodconfig alldefconfig randconfig: $(obj)/conf + $< --$@ $(Kconfig) ++ +UNAME_RELEASE := $(shell uname -r) +CLONECONFIG := $(firstword $(wildcard /proc/config.gz \ + /lib/modules/$(UNAME_RELEASE)/.config \ @@ -31,8 +34,8 @@ Signed-off-by: Andreas Gruenbacher + *) cat $(CLONECONFIG) > .config.running ;; \ + esac && \ + echo -e "Cloning configuration file $(CLONECONFIG)\n" -+ $(Q)$< -D .config.running arch/$(SRCARCH)/Kconfig ++ $(Q)$< --defconfig=.config.running arch/$(SRCARCH)/Kconfig + - defconfig: $(obj)/conf - ifeq ($(KBUILD_DEFCONFIG),) - $< -d $(Kconfig) + + PHONY += listnewconfig oldnoconfig savedefconfig defconfig + diff --git a/patches.rpmify/dmar-fix-section-mismatch b/patches.rpmify/dmar-fix-section-mismatch deleted file mode 100644 index 39f69b4..0000000 --- a/patches.rpmify/dmar-fix-section-mismatch +++ /dev/null @@ -1,26 +0,0 @@ -From: Jeff Mahoney -Subject: dmar: Fix section mismatch -Patch-mainline: Next linux-next sync -Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/sfr/linux-next.git -Git-commit: 0b8973a81876d90f916507ac40d1381068dc986a - - dmar_ir_support uses dmar_tbl, which is __initdata. dmar_ir_support is - only called by intr_remapping_supported, which is __init. So, we mark - dmar_ir_support as __init as well. - -Signed-off-by: Jeff Mahoney ---- - drivers/pci/dmar.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/pci/dmar.c -+++ b/drivers/pci/dmar.c -@@ -1456,7 +1456,7 @@ int dmar_reenable_qi(struct intel_iommu - /* - * Check interrupt remapping support in DMAR table description. - */ --int dmar_ir_support(void) -+int __init dmar_ir_support(void) - { - struct acpi_table_dmar *dmar; - dmar = (struct acpi_table_dmar *)dmar_tbl; diff --git a/patches.rpmify/dw_spi-fix-PPC-build.patch b/patches.rpmify/dw_spi-fix-PPC-build.patch new file mode 100644 index 0000000..dfaf46d --- /dev/null +++ b/patches.rpmify/dw_spi-fix-PPC-build.patch @@ -0,0 +1,37 @@ +From: Jiri Slaby +Date: Fri, 18 Mar 2011 10:37:34 +0100 +Subject: SPI: dw_spi, fix PPC build +Patch-mainline: submitted Mar 18 + +Currently, build on PPC dies with: +In file included from drivers/spi/dw_spi_mmio.c:16: +include/linux/spi/dw_spi.h:147: error: field ‘tx_sgl’ has incomplete type +include/linux/spi/dw_spi.h:149: error: field ‘rx_sgl’ has incomplete type + +Add linux/scatterlist.h include to dw_spi.h, because we need to know +the contents of the structure. + +Signed-off-by: Jiri Slaby +Cc: David Brownell +Cc: Grant Likely +Cc: Benjamin Herrenschmidt +Cc: Paul Mackerras +--- + include/linux/spi/dw_spi.h | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) + +diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h +index 6cd10f6..fb0bce5 100644 +--- a/include/linux/spi/dw_spi.h ++++ b/include/linux/spi/dw_spi.h +@@ -2,6 +2,7 @@ + #define DW_SPI_HEADER_H + + #include ++#include + + /* Bit fields in CTRLR0 */ + #define SPI_DFS_OFFSET 0 +-- +1.7.4.1 + diff --git a/patches.rpmify/firmware-path b/patches.rpmify/firmware-path index 0c9509e..1aa3d9f 100644 --- a/patches.rpmify/firmware-path +++ b/patches.rpmify/firmware-path @@ -15,7 +15,7 @@ Signed-off-by: Jeff Mahoney --- a/Makefile +++ b/Makefile -@@ -1028,7 +1028,7 @@ depend dep: +@@ -975,7 +975,7 @@ depend dep: # --------------------------------------------------------------------------- # Firmware install diff --git a/patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning b/patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning index 9c710da..88149bb 100644 --- a/patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning +++ b/patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning @@ -1,10 +1,20 @@ From: Jeff Mahoney Subject: ia64/mca: Fix cast from integer to pointer warning -Patch-mainline: not yet +Patch-mainline: Submitted 24 Feb 2011 - __get_free_pages() returns an unsigned long that is the address of the - pages returned. ia64_mca_cpu_init wants to use it as a data pointer, so - we cast it as void *. + ia64_mca_cpu_init has a void *data local variable that is assigned + the value from either __get_free_pages() or mca_bootmem(). The problem + is that __get_free_pages returns an unsigned long and mca_bootmem, via + alloc_bootmem(), returns a void *. format_mca_init_stack takes the void *, + and it's also used with __pa(), but that casts it to long anyway. + + This results in the following build warning: + + arch/ia64/kernel/mca.c:1898: warning: assignment makes pointer from + integer without a cast + + This patch casts the return of __get_free_pages to a void * to avoid + the warning. Signed-off-by: Jeff Mahoney --- @@ -13,7 +23,7 @@ Signed-off-by: Jeff Mahoney --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c -@@ -1858,7 +1858,8 @@ ia64_mca_cpu_init(void *cpu_data) +@@ -1859,7 +1859,8 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else diff --git a/patches.rpmify/powerpc-kvm-build-failure-workaround b/patches.rpmify/powerpc-kvm-build-failure-workaround deleted file mode 100644 index e0c61b2..0000000 --- a/patches.rpmify/powerpc-kvm-build-failure-workaround +++ /dev/null @@ -1,31 +0,0 @@ -From: Jeff Mahoney -Subject: powerpc: kvm build failure workaround -Patch-mainline: Hopefully never - - This patch works around an issue with gcc 4.5 that is failing the build - with: - arch/powerpc/kvm/book3s.c:1102:23: error: 'ext_bkp.vrsave' may be used uninitialized in this function - - The warning is incorrect, so we work around it by explicitly setting it to - 0. - -Signed-off-by: Jeff Mahoney ---- - arch/powerpc/kvm/book3s.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/arch/powerpc/kvm/book3s.c -+++ b/arch/powerpc/kvm/book3s.c -@@ -1104,6 +1104,12 @@ int __kvmppc_vcpu_run(struct kvm_run *kv - bool save_vsx = current->thread.used_vsr; - ulong ext_msr; - -+#ifdef CONFIG_ALTIVEC -+ /* JDM This is functionally unnecessary but works around an -+ * over-eager unintialized usage checker in gcc 4.5 */ -+ ext_bkp.vrsave = current->thread.vrsave; -+#endif -+ - /* No need to go into the guest when all we do is going out */ - if (signal_pending(current)) { - kvm_run->exit_reason = KVM_EXIT_INTR; diff --git a/patches.rpmify/ppc-crashdump-typefix b/patches.rpmify/ppc-crashdump-typefix deleted file mode 100644 index ff83706..0000000 --- a/patches.rpmify/ppc-crashdump-typefix +++ /dev/null @@ -1,23 +0,0 @@ -From: Jeff Mahoney -Subject: powerpc: use min_t in copy_oldmem_page -Patch-mainline: not yet - - The gcc used in Factory considers the comparison of csize and PAGE_SIZE - to be invalid and causes a build failure. This patch forces it to use size_t. - -Signed-off-by: Jeff Mahoney ---- - arch/powerpc/kernel/crash_dump.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/powerpc/kernel/crash_dump.c -+++ b/arch/powerpc/kernel/crash_dump.c -@@ -128,7 +128,7 @@ ssize_t copy_oldmem_page(unsigned long p - if (!csize) - return 0; - -- csize = min(csize, PAGE_SIZE); -+ csize = min_t(size_t, csize, PAGE_SIZE); - - if (pfn < max_pfn) { - vaddr = __va(pfn << PAGE_SHIFT); diff --git a/patches.rpmify/qla4xx-missing-readq-definition b/patches.rpmify/qla4xx-missing-readq-definition new file mode 100644 index 0000000..2fa247f --- /dev/null +++ b/patches.rpmify/qla4xx-missing-readq-definition @@ -0,0 +1,38 @@ +From: Jeff Mahoney +Subject: qla4xxx: add workaround for missing readq/writeq +Patch-mainline: submitted Sep 21, 2010 + + Commit f4f5df23 added support for ISP82XX devices but unconditionally + used readq/writeq without defining it for architectures that don't + support it natively. + + This patch copies the readq/writeq definitions from the qla2xxx driver + to allow the code to build on e.g. ppc32 hardware. + +Signed-off-by: Jeff Mahoney +--- + drivers/scsi/qla4xxx/ql4_nx.h | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +--- a/drivers/scsi/qla4xxx/ql4_nx.h ++++ b/drivers/scsi/qla4xxx/ql4_nx.h +@@ -776,4 +776,19 @@ struct crb_addr_pair { + #define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) + #define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) + ++#ifndef readq ++static inline u64 readq(void __iomem *addr) ++{ ++ return readl(addr) | (((u64) readl(addr + 4)) << 32LL); ++} ++#endif ++ ++#ifndef writeq ++static inline void writeq(u64 val, void __iomem *addr) ++{ ++ writel(((u32) (val)), (addr)); ++ writel(((u32) (val >> 32)), (addr + 4)); ++} ++#endif ++ + #endif diff --git a/patches.rpmify/rpm-kernel-config b/patches.rpmify/rpm-kernel-config index e3ef193..63e25ac 100644 --- a/patches.rpmify/rpm-kernel-config +++ b/patches.rpmify/rpm-kernel-config @@ -13,10 +13,9 @@ Signed-off-by: Andreas Gruenbacher --- a/init/Kconfig +++ b/init/Kconfig -@@ -1,3 +1,7 @@ +@@ -1,3 +1,6 @@ +config SUSE_KERNEL -+ bool -+ default y ++ def_bool y + config ARCH string diff --git a/patches.rpmify/split-package b/patches.rpmify/split-package index b777922..2c636eb 100644 --- a/patches.rpmify/split-package +++ b/patches.rpmify/split-package @@ -12,9 +12,9 @@ Signed-off-by: Jeff Mahoney --- a/init/Kconfig +++ b/init/Kconfig -@@ -2,6 +2,18 @@ config SUSE_KERNEL - bool - default y +@@ -1,6 +1,18 @@ config SUSE_KERNEL + config SUSE_KERNEL + def_bool y +config SPLIT_PACKAGE + bool "Split the kernel package into multiple RPMs" diff --git a/patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings b/patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings index c8bfe29..2af1b42 100644 --- a/patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings +++ b/patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings @@ -1,6 +1,6 @@ From: Jeff Mahoney Subject: tioca: Fix assignment from incompatible pointer warnings -Patch-mainline: not yet +Patch-mainline: Submitted 24 Feb 2011 The prototype for sn_pci_provider->{dma_map,dma_map_consistent} expects an unsigned long instead of a u64. @@ -12,7 +12,7 @@ Signed-off-by: Jeff Mahoney --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c -@@ -508,7 +508,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dm +@@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dm * use the GART mapped mode. */ static u64 diff --git a/patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch b/patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch new file mode 100644 index 0000000..792ea23 --- /dev/null +++ b/patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch @@ -0,0 +1,221 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:44 +0530 +Subject: [PATCH 01/16] vfs: Hooks for more fine-grained directory permission checking +Patch-mainline: not yet + +Add iop->may_create and iop->may_delete for overriding the POSIX file +permission checks when creating and deleting files. File systems can +implement these hooks to support permission models which use different +rules for file creation and deletion. + +When these hooks are not used, the vfs behavior remains unchanged. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/namei.c | 82 ++++++++++++++++++++++++++++++++++++++++++----------- + include/linux/fs.h | 4 ++ + 2 files changed, 69 insertions(+), 17 deletions(-) + +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -1283,6 +1283,26 @@ static inline int check_sticky(struct in + } + + /* ++ * Do the directory specific tests of inode_permission() and call the ++ * may_delete inode operation. The may_delete inode operation must do the ++ * sticky check when needed. ++ */ ++static int may_delete_iop(struct inode *dir, struct inode *inode, int replace) ++{ ++ int error; ++ ++ if (IS_RDONLY(dir)) ++ return -EROFS; ++ if (IS_IMMUTABLE(dir)) ++ return -EACCES; ++ error = dir->i_op->may_delete(dir, inode, replace); ++ if (!error) ++ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC); ++ ++ return error; ++} ++ ++/* + * Check whether we can remove a link victim from directory dir, check + * whether the type of victim is right. + * 1. We can't do it if dir is read-only (done in permission()) +@@ -1301,7 +1321,8 @@ static inline int check_sticky(struct in + * 10. We don't allow removal of NFS sillyrenamed files; it's handled by + * nfs_async_unlink(). + */ +-static int may_delete(struct inode *dir,struct dentry *victim,int isdir) ++static int may_delete(struct inode *dir, struct dentry *victim, ++ int isdir, int replace) + { + int error; + +@@ -1310,14 +1331,19 @@ static int may_delete(struct inode *dir, + + BUG_ON(victim->d_parent->d_inode != dir); + audit_inode_child(victim, dir); +- +- error = inode_permission(dir, MAY_WRITE | MAY_EXEC); ++ if (dir->i_op->may_delete) ++ error = may_delete_iop(dir, victim->d_inode, replace); ++ else { ++ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); ++ if (!error && check_sticky(dir, victim->d_inode)) ++ error = -EPERM; ++ } + if (error) + return error; + if (IS_APPEND(dir)) + return -EPERM; +- if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| +- IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) ++ if (IS_APPEND(victim->d_inode) || IS_IMMUTABLE(victim->d_inode) || ++ IS_SWAPFILE(victim->d_inode)) + return -EPERM; + if (isdir) { + if (!S_ISDIR(victim->d_inode->i_mode)) +@@ -1333,6 +1359,25 @@ static int may_delete(struct inode *dir, + return 0; + } + ++/* ++ * Do the directory specific tests of inode_permission() and call the ++ * may_create inode operation. ++ */ ++static int may_create_iop(struct inode *dir, int isdir) ++{ ++ int error; ++ ++ if (IS_RDONLY(dir)) ++ return -EROFS; ++ if (IS_IMMUTABLE(dir)) ++ return -EACCES; ++ error = dir->i_op->may_create(dir, isdir); ++ if (!error) ++ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC); ++ ++ return error; ++} ++ + /* Check whether we can create an object with dentry child in directory + * dir. + * 1. We can't do it if child already exists (open has special treatment for +@@ -1341,13 +1386,16 @@ static int may_delete(struct inode *dir, + * 3. We should have write and exec permissions on dir + * 4. We can't do it if dir is immutable (done in permission()) + */ +-static inline int may_create(struct inode *dir, struct dentry *child) ++static inline int may_create(struct inode *dir, struct dentry *child, int isdir) + { + if (child->d_inode) + return -EEXIST; + if (IS_DEADDIR(dir)) + return -ENOENT; +- return inode_permission(dir, MAY_WRITE | MAY_EXEC); ++ if (dir->i_op->may_create) ++ return may_create_iop(dir, isdir); ++ else ++ return inode_permission(dir, MAY_WRITE | MAY_EXEC); + } + + /* +@@ -1395,7 +1443,7 @@ void unlock_rename(struct dentry *p1, st + int vfs_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *nd) + { +- int error = may_create(dir, dentry); ++ int error = may_create(dir, dentry, 0); + + if (error) + return error; +@@ -1957,7 +2005,7 @@ EXPORT_SYMBOL_GPL(lookup_create); + + int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) + { +- int error = may_create(dir, dentry); ++ int error = may_create(dir, dentry, 0); + + if (error) + return error; +@@ -2061,7 +2109,7 @@ SYSCALL_DEFINE3(mknod, const char __user + + int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) + { +- int error = may_create(dir, dentry); ++ int error = may_create(dir, dentry, 1); + + if (error) + return error; +@@ -2151,7 +2199,7 @@ void dentry_unhash(struct dentry *dentry + + int vfs_rmdir(struct inode *dir, struct dentry *dentry) + { +- int error = may_delete(dir, dentry, 1); ++ int error = may_delete(dir, dentry, 1, 0); + + if (error) + return error; +@@ -2238,7 +2286,7 @@ SYSCALL_DEFINE1(rmdir, const char __user + + int vfs_unlink(struct inode *dir, struct dentry *dentry) + { +- int error = may_delete(dir, dentry, 0); ++ int error = may_delete(dir, dentry, 0, 0); + + if (error) + return error; +@@ -2346,7 +2394,7 @@ SYSCALL_DEFINE1(unlink, const char __use + + int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) + { +- int error = may_create(dir, dentry); ++ int error = may_create(dir, dentry, 0); + + if (error) + return error; +@@ -2419,7 +2467,7 @@ int vfs_link(struct dentry *old_dentry, + if (!inode) + return -ENOENT; + +- error = may_create(dir, new_dentry); ++ error = may_create(dir, new_dentry, S_ISDIR(inode->i_mode)); + if (error) + return error; + +@@ -2630,14 +2678,14 @@ int vfs_rename(struct inode *old_dir, st + if (old_dentry->d_inode == new_dentry->d_inode) + return 0; + +- error = may_delete(old_dir, old_dentry, is_dir); ++ error = may_delete(old_dir, old_dentry, is_dir, 0); + if (error) + return error; + + if (!new_dentry->d_inode) +- error = may_create(new_dir, new_dentry); ++ error = may_create(new_dir, new_dentry, is_dir); + else +- error = may_delete(new_dir, new_dentry, is_dir); ++ error = may_delete(new_dir, new_dentry, is_dir, 1); + if (error) + return error; + +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1542,6 +1542,10 @@ struct inode_operations { + void (*truncate_range)(struct inode *, loff_t, loff_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, + u64 len); ++ int (*may_create) (struct inode *, int); ++ int (*may_delete) (struct inode *, struct inode *, int); ++ ++ + } ____cacheline_aligned; + + struct seq_file; diff --git a/patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch b/patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch new file mode 100644 index 0000000..2146eef --- /dev/null +++ b/patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch @@ -0,0 +1,73 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:45 +0530 +Subject: [PATCH 02/16] vfs: Add generic IS_ACL() test for acl support +Patch-mainline: not yet + +When IS_POSIXACL() is true, the vfs does not apply the umask. Other acl +models will need the same exception, so introduce a separate IS_ACL() +test. + +The IS_POSIX_ACL() test is still needed so that nfsd can determine when +the underlying file system supports POSIX ACLs (as opposed to some other +kind). + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/namei.c | 6 +++--- + include/linux/fs.h | 8 +++++++- + 2 files changed, 10 insertions(+), 4 deletions(-) + +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -2215,7 +2215,7 @@ static int __open_namei_create(struct na + int error; + struct dentry *dir = nd->path.dentry; + +- if (!IS_POSIXACL(dir->d_inode)) ++ if (!IS_ACL(dir->d_inode)) + mode &= ~current_umask(); + error = security_path_mknod(&nd->path, path->dentry, mode, 0); + if (error) +@@ -2749,7 +2749,7 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const + error = PTR_ERR(dentry); + goto out_unlock; + } +- if (!IS_POSIXACL(nd.path.dentry->d_inode)) ++ if (!IS_ACL(nd.path.dentry->d_inode)) + mode &= ~current_umask(); + error = may_mknod(mode); + if (error) +@@ -2826,7 +2826,7 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const + if (IS_ERR(dentry)) + goto out_unlock; + +- if (!IS_POSIXACL(nd.path.dentry->d_inode)) ++ if (!IS_ACL(nd.path.dentry->d_inode)) + mode &= ~current_umask(); + error = mnt_want_write(nd.path.mnt); + if (error) +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -205,7 +205,7 @@ struct inodes_stat_t { + #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ + #define MS_SILENT 32768 +-#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ ++#define MS_POSIXACL (1<<16) /* Supports POSIX ACLs */ + #define MS_UNBINDABLE (1<<17) /* change to unbindable */ + #define MS_PRIVATE (1<<18) /* change to private */ + #define MS_SLAVE (1<<19) /* change to slave */ +@@ -280,6 +280,12 @@ struct inodes_stat_t { + #define IS_IMA(inode) ((inode)->i_flags & S_IMA) + #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) + ++/* ++ * IS_ACL() tells the VFS to not apply the umask ++ * and use iop->check_acl for acl permission checks when defined. ++ */ ++#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL) ++ + /* the read-only stuff doesn't really belong here, but any other place is + probably as bad and I don't want to create yet another include file. */ + diff --git a/patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch b/patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch new file mode 100644 index 0000000..9707339 --- /dev/null +++ b/patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch @@ -0,0 +1,42 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:45 +0530 +Subject: [PATCH 03/16] vfs: Add IS_RICHACL() test for richacl support +Patch-mainline: not yet + +Introduce a new MS_RICHACL super-block flag and a new IS_RICHACL() test +which file systems like nfs can use. IS_ACL() is true if IS_POSIXACL() +or IS_RICHACL() is true. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + include/linux/fs.h | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -217,6 +217,7 @@ struct inodes_stat_t { + #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ + #define MS_I_VERSION (1<<23) /* Update inode I_version field */ + #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ ++#define MS_RICHACL (1<<25) /* Supports richacls */ + #define MS_BORN (1<<29) + #define MS_ACTIVE (1<<30) + #define MS_NOUSER (1<<31) +@@ -273,6 +274,7 @@ struct inodes_stat_t { + #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) + #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) + #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) ++#define IS_RICHACL(inode) __IS_FLG(inode, MS_RICHACL) + + #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) + #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) +@@ -283,7 +285,7 @@ struct inodes_stat_t { + * IS_ACL() tells the VFS to not apply the umask + * and use iop->check_acl for acl permission checks when defined. + */ +-#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL) ++#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL | MS_RICHACL) + + /* the read-only stuff doesn't really belong here, but any other place is + probably as bad and I don't want to create yet another include file. */ diff --git a/patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch b/patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch new file mode 100644 index 0000000..67206b5 --- /dev/null +++ b/patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch @@ -0,0 +1,415 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:46 +0530 +Subject: [PATCH 04/16] richacl: In-memory representation and helper functions +Patch-mainline: not yet + +A richacl consists of an NFSv4 acl and an owner, group, and other mask. +These three masks correspond to the owner, group, and other file +permission bits, but they contain NFSv4 permissions instead of POSIX +permissions. + +Each entry in the NFSv4 acl applies to the file owner (OWNER@), the +owning group (GROUP@), literally everyone (EVERYONE@), or to a specific +uid or gid. + +As in the standard POSIX file permission model, each process is the +owner, group, or other file class. A richacl grants a requested access +only if the NFSv4 acl in the richacl grants the access (according to the +NFSv4 permission check algorithm), and the file mask that applies to the +process includes the requested permissions. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/Kconfig | 4 + fs/Makefile | 3 + fs/richacl_base.c | 109 +++++++++++++++++++++ + include/linux/richacl.h | 245 ++++++++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 361 insertions(+) + create mode 100644 fs/richacl_base.c + create mode 100644 include/linux/richacl.h + +--- a/fs/Kconfig ++++ b/fs/Kconfig +@@ -39,6 +39,9 @@ config FS_POSIX_ACL + source "fs/reiserfs/Kconfig" + source "fs/jfs/Kconfig" + ++config FS_RICHACL ++ bool ++ + source "fs/xfs/Kconfig" + source "fs/gfs2/Kconfig" + source "fs/ocfs2/Kconfig" +--- a/fs/Makefile ++++ b/fs/Makefile +@@ -51,6 +51,9 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl. + obj-$(CONFIG_NFS_COMMON) += nfs_common/ + obj-$(CONFIG_GENERIC_ACL) += generic_acl.o + ++obj-$(CONFIG_FS_RICHACL) += richacl.o ++richacl-y := richacl_base.o ++ + obj-y += quota/ + + obj-$(CONFIG_PROC_FS) += proc/ +--- /dev/null ++++ b/fs/richacl_base.c +@@ -0,0 +1,109 @@ ++/* ++ * Copyright (C) 2006, 2010 Novell, Inc. ++ * Written by Andreas Gruenbacher ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++MODULE_LICENSE("GPL"); ++ ++/* ++ * Special e_who identifiers: ACEs which have ACE4_SPECIAL_WHO set in ++ * ace->e_flags use these constants in ace->u.e_who. ++ * ++ * For efficiency, we compare pointers instead of comparing strings. ++ */ ++const char richace_owner_who[] = "OWNER@"; ++EXPORT_SYMBOL_GPL(richace_owner_who); ++const char richace_group_who[] = "GROUP@"; ++EXPORT_SYMBOL_GPL(richace_group_who); ++const char richace_everyone_who[] = "EVERYONE@"; ++EXPORT_SYMBOL_GPL(richace_everyone_who); ++ ++/** ++ * richacl_alloc - allocate a richacl ++ * @count: number of entries ++ */ ++struct richacl * ++richacl_alloc(int count) ++{ ++ size_t size = sizeof(struct richacl) + count * sizeof(struct richace); ++ struct richacl *acl = kzalloc(size, GFP_KERNEL); ++ ++ if (acl) { ++ atomic_set(&acl->a_refcount, 1); ++ acl->a_count = count; ++ } ++ return acl; ++} ++EXPORT_SYMBOL_GPL(richacl_alloc); ++ ++/** ++ * richacl_clone - create a copy of a richacl ++ */ ++static struct richacl * ++richacl_clone(const struct richacl *acl) ++{ ++ int count = acl->a_count; ++ size_t size = sizeof(struct richacl) + count * sizeof(struct richace); ++ struct richacl *dup = kmalloc(size, GFP_KERNEL); ++ ++ if (dup) { ++ memcpy(dup, acl, size); ++ atomic_set(&dup->a_refcount, 1); ++ } ++ return dup; ++} ++ ++/** ++ * richace_is_same_identifier - are both identifiers the same? ++ */ ++int ++richace_is_same_identifier(const struct richace *a, const struct richace *b) ++{ ++#define WHO_FLAGS (ACE4_SPECIAL_WHO | ACE4_IDENTIFIER_GROUP) ++ if ((a->e_flags & WHO_FLAGS) != (b->e_flags & WHO_FLAGS)) ++ return 0; ++ if (a->e_flags & ACE4_SPECIAL_WHO) ++ return a->u.e_who == b->u.e_who; ++ else ++ return a->u.e_id == b->u.e_id; ++#undef WHO_FLAGS ++} ++ ++/** ++ * richacl_set_who - set a special who value ++ * @ace: acl entry ++ * @who: who value to use ++ */ ++int ++richace_set_who(struct richace *ace, const char *who) ++{ ++ if (!strcmp(who, richace_owner_who)) ++ who = richace_owner_who; ++ else if (!strcmp(who, richace_group_who)) ++ who = richace_group_who; ++ else if (!strcmp(who, richace_everyone_who)) ++ who = richace_everyone_who; ++ else ++ return -EINVAL; ++ ++ ace->u.e_who = who; ++ ace->e_flags |= ACE4_SPECIAL_WHO; ++ ace->e_flags &= ~ACE4_IDENTIFIER_GROUP; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(richace_set_who); +--- /dev/null ++++ b/include/linux/richacl.h +@@ -0,0 +1,245 @@ ++/* ++ * Copyright (C) 2006, 2010 Novell, Inc. ++ * Written by Andreas Gruenbacher ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ */ ++ ++#ifndef __RICHACL_H ++#define __RICHACL_H ++#include ++ ++struct richace { ++ unsigned short e_type; ++ unsigned short e_flags; ++ unsigned int e_mask; ++ union { ++ unsigned int e_id; ++ const char *e_who; ++ } u; ++}; ++ ++struct richacl { ++ atomic_t a_refcount; ++ unsigned int a_owner_mask; ++ unsigned int a_group_mask; ++ unsigned int a_other_mask; ++ unsigned short a_count; ++ unsigned short a_flags; ++ struct richace a_entries[0]; ++}; ++ ++#define richacl_for_each_entry(_ace, _acl) \ ++ for (_ace = _acl->a_entries; \ ++ _ace != _acl->a_entries + _acl->a_count; \ ++ _ace++) ++ ++#define richacl_for_each_entry_reverse(_ace, _acl) \ ++ for (_ace = _acl->a_entries + _acl->a_count - 1; \ ++ _ace != _acl->a_entries - 1; \ ++ _ace--) ++ ++/* e_type values */ ++#define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000 ++#define ACE4_ACCESS_DENIED_ACE_TYPE 0x0001 ++/*#define ACE4_SYSTEM_AUDIT_ACE_TYPE 0x0002*/ ++/*#define ACE4_SYSTEM_ALARM_ACE_TYPE 0x0003*/ ++ ++/* e_flags bitflags */ ++#define ACE4_FILE_INHERIT_ACE 0x0001 ++#define ACE4_DIRECTORY_INHERIT_ACE 0x0002 ++#define ACE4_NO_PROPAGATE_INHERIT_ACE 0x0004 ++#define ACE4_INHERIT_ONLY_ACE 0x0008 ++/*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/ ++/*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/ ++#define ACE4_IDENTIFIER_GROUP 0x0040 ++/* in-memory representation only */ ++#define ACE4_SPECIAL_WHO 0x4000 ++ ++#define ACE4_VALID_FLAGS ( \ ++ ACE4_FILE_INHERIT_ACE | \ ++ ACE4_DIRECTORY_INHERIT_ACE | \ ++ ACE4_NO_PROPAGATE_INHERIT_ACE | \ ++ ACE4_INHERIT_ONLY_ACE | \ ++ ACE4_IDENTIFIER_GROUP) ++ ++/* e_mask bitflags */ ++#define ACE4_READ_DATA 0x00000001 ++#define ACE4_LIST_DIRECTORY 0x00000001 ++#define ACE4_WRITE_DATA 0x00000002 ++#define ACE4_ADD_FILE 0x00000002 ++#define ACE4_APPEND_DATA 0x00000004 ++#define ACE4_ADD_SUBDIRECTORY 0x00000004 ++#define ACE4_READ_NAMED_ATTRS 0x00000008 ++#define ACE4_WRITE_NAMED_ATTRS 0x00000010 ++#define ACE4_EXECUTE 0x00000020 ++#define ACE4_DELETE_CHILD 0x00000040 ++#define ACE4_READ_ATTRIBUTES 0x00000080 ++#define ACE4_WRITE_ATTRIBUTES 0x00000100 ++#define ACE4_WRITE_RETENTION 0x00000200 ++#define ACE4_WRITE_RETENTION_HOLD 0x00000400 ++#define ACE4_DELETE 0x00010000 ++#define ACE4_READ_ACL 0x00020000 ++#define ACE4_WRITE_ACL 0x00040000 ++#define ACE4_WRITE_OWNER 0x00080000 ++#define ACE4_SYNCHRONIZE 0x00100000 ++ ++/* Valid ACE4_* flags for directories and non-directories */ ++#define ACE4_VALID_MASK ( \ ++ ACE4_READ_DATA | ACE4_LIST_DIRECTORY | \ ++ ACE4_WRITE_DATA | ACE4_ADD_FILE | \ ++ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \ ++ ACE4_READ_NAMED_ATTRS | \ ++ ACE4_WRITE_NAMED_ATTRS | \ ++ ACE4_EXECUTE | \ ++ ACE4_DELETE_CHILD | \ ++ ACE4_READ_ATTRIBUTES | \ ++ ACE4_WRITE_ATTRIBUTES | \ ++ ACE4_WRITE_RETENTION | \ ++ ACE4_WRITE_RETENTION_HOLD | \ ++ ACE4_DELETE | \ ++ ACE4_READ_ACL | \ ++ ACE4_WRITE_ACL | \ ++ ACE4_WRITE_OWNER | \ ++ ACE4_SYNCHRONIZE) ++ ++/* These permissions are always allowed no matter what the acl says. */ ++#define ACE4_POSIX_ALWAYS_ALLOWED ( \ ++ ACE4_SYNCHRONIZE | \ ++ ACE4_READ_ATTRIBUTES | \ ++ ACE4_READ_ACL) ++ ++/** ++ * richacl_get - grab another reference to a richacl handle ++ */ ++static inline struct richacl * ++richacl_get(struct richacl *acl) ++{ ++ if (acl) ++ atomic_inc(&acl->a_refcount); ++ return acl; ++} ++ ++/** ++ * richacl_put - free a richacl handle ++ */ ++static inline void ++richacl_put(struct richacl *acl) ++{ ++ if (acl && atomic_dec_and_test(&acl->a_refcount)) ++ kfree(acl); ++} ++ ++/* ++ * Special e_who identifiers: we use these pointer values in comparisons ++ * instead of doing a strcmp. ++ */ ++extern const char richace_owner_who[]; ++extern const char richace_group_who[]; ++extern const char richace_everyone_who[]; ++ ++/** ++ * richace_is_owner - check if @ace is an OWNER@ entry ++ */ ++static inline int ++richace_is_owner(const struct richace *ace) ++{ ++ return (ace->e_flags & ACE4_SPECIAL_WHO) && ++ ace->u.e_who == richace_owner_who; ++} ++ ++/** ++ * richace_is_group - check if @ace is a GROUP@ entry ++ */ ++static inline int ++richace_is_group(const struct richace *ace) ++{ ++ return (ace->e_flags & ACE4_SPECIAL_WHO) && ++ ace->u.e_who == richace_group_who; ++} ++ ++/** ++ * richace_is_everyone - check if @ace is an EVERYONE@ entry ++ */ ++static inline int ++richace_is_everyone(const struct richace *ace) ++{ ++ return (ace->e_flags & ACE4_SPECIAL_WHO) && ++ ace->u.e_who == richace_everyone_who; ++} ++ ++/** ++ * richace_is_unix_id - check if @ace applies to a specific uid or gid ++ */ ++static inline int ++richace_is_unix_id(const struct richace *ace) ++{ ++ return !(ace->e_flags & ACE4_SPECIAL_WHO); ++} ++ ++/** ++ * richace_is_inherit_only - check if @ace is for inheritance only ++ * ++ * ACEs with the %ACE4_INHERIT_ONLY_ACE flag set have no effect during ++ * permission checking. ++ */ ++static inline int ++richace_is_inherit_only(const struct richace *ace) ++{ ++ return ace->e_flags & ACE4_INHERIT_ONLY_ACE; ++} ++ ++/** ++ * richace_is_inheritable - check if @ace is inheritable ++ */ ++static inline int ++richace_is_inheritable(const struct richace *ace) ++{ ++ return ace->e_flags & (ACE4_FILE_INHERIT_ACE | ++ ACE4_DIRECTORY_INHERIT_ACE); ++} ++ ++/** ++ * richace_clear_inheritance_flags - clear all inheritance flags in @ace ++ */ ++static inline void ++richace_clear_inheritance_flags(struct richace *ace) ++{ ++ ace->e_flags &= ~(ACE4_FILE_INHERIT_ACE | ++ ACE4_DIRECTORY_INHERIT_ACE | ++ ACE4_NO_PROPAGATE_INHERIT_ACE | ++ ACE4_INHERIT_ONLY_ACE); ++} ++ ++/** ++ * richace_is_allow - check if @ace is an %ALLOW type entry ++ */ ++static inline int ++richace_is_allow(const struct richace *ace) ++{ ++ return ace->e_type == ACE4_ACCESS_ALLOWED_ACE_TYPE; ++} ++ ++/** ++ * richace_is_deny - check if @ace is a %DENY type entry ++ */ ++static inline int ++richace_is_deny(const struct richace *ace) ++{ ++ return ace->e_type == ACE4_ACCESS_DENIED_ACE_TYPE; ++} ++ ++extern struct richacl *richacl_alloc(int); ++extern int richace_is_same_identifier(const struct richace *, ++ const struct richace *); ++extern int richace_set_who(struct richace *, const char *); ++ ++#endif /* __RICHACL_H */ diff --git a/patches.suse/0005-richacl-Permission-mapping-functions.patch b/patches.suse/0005-richacl-Permission-mapping-functions.patch new file mode 100644 index 0000000..cb7adc8 --- /dev/null +++ b/patches.suse/0005-richacl-Permission-mapping-functions.patch @@ -0,0 +1,167 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:47 +0530 +Subject: [PATCH 05/16] richacl: Permission mapping functions +Patch-mainline: not yet + +We need to map from POSIX permissions to NFSv4 permissions when a +chmod() is done, from NFSv4 permissions to POSIX permissions when an acl +is set (which implicitly sets the file permission bits), and from the +MAY_READ/MAY_WRITE/MAY_EXEC/MAY_APPEND flags to NFSv4 permissions when +doing an access check in a richacl. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/richacl.h | 27 +++++++++++++ + 2 files changed, 125 insertions(+) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -69,6 +69,104 @@ richacl_clone(const struct richacl *acl) + } + + /** ++ * richacl_mask_to_mode - compute the file permission bits which correspond to @mask ++ * @mask: %ACE4_* permission mask ++ * ++ * See richacl_masks_to_mode(). ++ */ ++static int ++richacl_mask_to_mode(unsigned int mask) ++{ ++ int mode = 0; ++ ++ if (mask & ACE4_POSIX_MODE_READ) ++ mode |= MAY_READ; ++ if (mask & ACE4_POSIX_MODE_WRITE) ++ mode |= MAY_WRITE; ++ if (mask & ACE4_POSIX_MODE_EXEC) ++ mode |= MAY_EXEC; ++ ++ return mode; ++} ++ ++/** ++ * richacl_masks_to_mode - compute the file permission bits from the file masks ++ * ++ * When setting a richacl, we set the file permission bits to indicate maximum ++ * permissions: for example, we set the Write permission when a mask contains ++ * ACE4_APPEND_DATA even if it does not also contain ACE4_WRITE_DATA. ++ * ++ * Permissions which are not in ACE4_POSIX_MODE_READ, ACE4_POSIX_MODE_WRITE, or ++ * ACE4_POSIX_MODE_EXEC cannot be represented in the file permission bits. ++ * Such permissions can still be effective, but not for new files or after a ++ * chmod(), and only if they were set explicitly, for example, by setting a ++ * richacl. ++ */ ++int ++richacl_masks_to_mode(const struct richacl *acl) ++{ ++ return richacl_mask_to_mode(acl->a_owner_mask) << 6 | ++ richacl_mask_to_mode(acl->a_group_mask) << 3 | ++ richacl_mask_to_mode(acl->a_other_mask); ++} ++EXPORT_SYMBOL_GPL(richacl_masks_to_mode); ++ ++/** ++ * richacl_mode_to_mask - compute a file mask from the lowest three mode bits ++ * ++ * When the file permission bits of a file are set with chmod(), this specifies ++ * the maximum permissions that processes will get. All permissions beyond ++ * that will be removed from the file masks, and become ineffective. ++ * ++ * We also add in the permissions which are always allowed no matter what the ++ * acl says. ++ */ ++unsigned int ++richacl_mode_to_mask(mode_t mode) ++{ ++ unsigned int mask = ACE4_POSIX_ALWAYS_ALLOWED; ++ ++ if (mode & MAY_READ) ++ mask |= ACE4_POSIX_MODE_READ; ++ if (mode & MAY_WRITE) ++ mask |= ACE4_POSIX_MODE_WRITE; ++ if (mode & MAY_EXEC) ++ mask |= ACE4_POSIX_MODE_EXEC; ++ ++ return mask; ++} ++ ++/** ++ * richacl_want_to_mask - convert the iop->permission want argument to a mask ++ * @want: @want argument of the permission inode operation ++ * ++ * When checking for append, @want is (MAY_WRITE | MAY_APPEND). ++ * ++ * Richacls use the iop->may_create and iop->may_delete hooks which are ++ * used for checking if creating and deleting files is allowed. These hooks do ++ * not use richacl_want_to_mask(), so we do not have to deal with mapping ++ * MAY_WRITE to ACE4_ADD_FILE, ACE4_ADD_SUBDIRECTORY, and ACE4_DELETE_CHILD ++ * here. ++ */ ++unsigned int ++richacl_want_to_mask(int want) ++{ ++ unsigned int mask = 0; ++ ++ if (want & MAY_READ) ++ mask |= ACE4_READ_DATA; ++ if (want & MAY_APPEND) ++ mask |= ACE4_APPEND_DATA; ++ else if (want & MAY_WRITE) ++ mask |= ACE4_WRITE_DATA; ++ if (want & MAY_EXEC) ++ mask |= ACE4_EXECUTE; ++ ++ return mask; ++} ++EXPORT_SYMBOL_GPL(richacl_want_to_mask); ++ ++/** + * richace_is_same_identifier - are both identifiers the same? + */ + int +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -111,6 +111,30 @@ struct richacl { + ACE4_WRITE_OWNER | \ + ACE4_SYNCHRONIZE) + ++/* ++ * The POSIX permissions are supersets of the following NFSv4 permissions: ++ * ++ * - MAY_READ maps to READ_DATA or LIST_DIRECTORY, depending on the type ++ * of the file system object. ++ * ++ * - MAY_WRITE maps to WRITE_DATA or ACE4_APPEND_DATA for files, and to ++ * ADD_FILE, ACE4_ADD_SUBDIRECTORY, or ACE4_DELETE_CHILD for directories. ++ * ++ * - MAY_EXECUTE maps to ACE4_EXECUTE. ++ * ++ * (Some of these NFSv4 permissions have the same bit values.) ++ */ ++#define ACE4_POSIX_MODE_READ ( \ ++ ACE4_READ_DATA | ACE4_LIST_DIRECTORY) ++#define ACE4_POSIX_MODE_WRITE ( \ ++ ACE4_WRITE_DATA | ACE4_ADD_FILE | \ ++ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \ ++ ACE4_DELETE_CHILD) ++#define ACE4_POSIX_MODE_EXEC ( \ ++ ACE4_EXECUTE) ++#define ACE4_POSIX_MODE_ALL (ACE4_POSIX_MODE_READ | ACE4_POSIX_MODE_WRITE | \ ++ ACE4_POSIX_MODE_EXEC) ++ + /* These permissions are always allowed no matter what the acl says. */ + #define ACE4_POSIX_ALWAYS_ALLOWED ( \ + ACE4_SYNCHRONIZE | \ +@@ -241,5 +265,8 @@ extern struct richacl *richacl_alloc(int + extern int richace_is_same_identifier(const struct richace *, + const struct richace *); + extern int richace_set_who(struct richace *, const char *); ++extern int richacl_masks_to_mode(const struct richacl *); ++extern unsigned int richacl_mode_to_mask(mode_t); ++extern unsigned int richacl_want_to_mask(int); + + #endif /* __RICHACL_H */ diff --git a/patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch b/patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch new file mode 100644 index 0000000..de094b3 --- /dev/null +++ b/patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch @@ -0,0 +1,164 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:47 +0530 +Subject: [PATCH 06/16] richacl: Compute maximum file masks from an acl +Patch-mainline: not yet + +Compute upper bound owner, group, and other file masks with as few +permissions as possible without denying any permissions that the NFSv4 +acl in a richacl grants. + +This algorithm is used when a file inherits an acl at create time and +when an acl is set via a mechanism that does not specify file modes +(such as via nfsd). When user-space sets an acl, the file masks are +passed in as part of the xattr. + +When setting a richacl, the file masks determine what the file +permission bits will be set to; see richacl_masks_to_mode(). + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/richacl.h | 1 + 2 files changed, 126 insertions(+) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -205,3 +205,128 @@ richace_set_who(struct richace *ace, con + return 0; + } + EXPORT_SYMBOL_GPL(richace_set_who); ++ ++/** ++ * richacl_allowed_to_who - mask flags allowed to a specific who value ++ * ++ * Computes the mask values allowed to a specific who value, taking ++ * EVERYONE@ entries into account. ++ */ ++static unsigned int richacl_allowed_to_who(struct richacl *acl, ++ struct richace *who) ++{ ++ struct richace *ace; ++ unsigned int allowed = 0; ++ ++ richacl_for_each_entry_reverse(ace, acl) { ++ if (richace_is_inherit_only(ace)) ++ continue; ++ if (richace_is_same_identifier(ace, who) || ++ richace_is_everyone(ace)) { ++ if (richace_is_allow(ace)) ++ allowed |= ace->e_mask; ++ else if (richace_is_deny(ace)) ++ allowed &= ~ace->e_mask; ++ } ++ } ++ return allowed; ++} ++ ++/** ++ * richacl_group_class_allowed - maximum permissions the group class is allowed ++ * ++ * See richacl_compute_max_masks(). ++ */ ++static unsigned int richacl_group_class_allowed(struct richacl *acl) ++{ ++ struct richace *ace; ++ unsigned int everyone_allowed = 0, group_class_allowed = 0; ++ int had_group_ace = 0; ++ ++ richacl_for_each_entry_reverse(ace, acl) { ++ if (richace_is_inherit_only(ace) || ++ richace_is_owner(ace)) ++ continue; ++ ++ if (richace_is_everyone(ace)) { ++ if (richace_is_allow(ace)) ++ everyone_allowed |= ace->e_mask; ++ else if (richace_is_deny(ace)) ++ everyone_allowed &= ~ace->e_mask; ++ } else { ++ group_class_allowed |= ++ richacl_allowed_to_who(acl, ace); ++ ++ if (richace_is_group(ace)) ++ had_group_ace = 1; ++ } ++ } ++ if (!had_group_ace) ++ group_class_allowed |= everyone_allowed; ++ return group_class_allowed; ++} ++ ++/** ++ * richacl_compute_max_masks - compute upper bound masks ++ * ++ * Computes upper bound owner, group, and other masks so that none of ++ * the mask flags allowed by the acl are disabled (for any choice of the ++ * file owner or group membership). ++ */ ++void richacl_compute_max_masks(struct richacl *acl) ++{ ++ unsigned int gmask = ~0; ++ struct richace *ace; ++ ++ /* ++ * @gmask contains all permissions which the group class is ever ++ * allowed. We use it to avoid adding permissions to the group mask ++ * from everyone@ allow aces which the group class is always denied ++ * through other aces. For example, the following acl would otherwise ++ * result in a group mask or rw: ++ * ++ * group@:w::deny ++ * everyone@:rw::allow ++ * ++ * Avoid computing @gmask for acls which do not include any group class ++ * deny aces: in such acls, the group class is never denied any ++ * permissions from everyone@ allow aces. ++ */ ++ ++restart: ++ acl->a_owner_mask = 0; ++ acl->a_group_mask = 0; ++ acl->a_other_mask = 0; ++ ++ richacl_for_each_entry_reverse(ace, acl) { ++ if (richace_is_inherit_only(ace)) ++ continue; ++ ++ if (richace_is_owner(ace)) { ++ if (richace_is_allow(ace)) ++ acl->a_owner_mask |= ace->e_mask; ++ else if (richace_is_deny(ace)) ++ acl->a_owner_mask &= ~ace->e_mask; ++ } else if (richace_is_everyone(ace)) { ++ if (richace_is_allow(ace)) { ++ acl->a_owner_mask |= ace->e_mask; ++ acl->a_group_mask |= ace->e_mask & gmask; ++ acl->a_other_mask |= ace->e_mask; ++ } else if (richace_is_deny(ace)) { ++ acl->a_owner_mask &= ~ace->e_mask; ++ acl->a_group_mask &= ~ace->e_mask; ++ acl->a_other_mask &= ~ace->e_mask; ++ } ++ } else { ++ if (richace_is_allow(ace)) { ++ acl->a_owner_mask |= ace->e_mask & gmask; ++ acl->a_group_mask |= ace->e_mask & gmask; ++ } else if (richace_is_deny(ace) && gmask == ~0) { ++ gmask = richacl_group_class_allowed(acl); ++ if (likely(gmask != ~0)) /* should always be true */ ++ goto restart; ++ } ++ } ++ } ++} ++EXPORT_SYMBOL_GPL(richacl_compute_max_masks); +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -268,5 +268,6 @@ extern int richace_set_who(struct richac + extern int richacl_masks_to_mode(const struct richacl *); + extern unsigned int richacl_mode_to_mask(mode_t); + extern unsigned int richacl_want_to_mask(int); ++extern void richacl_compute_max_masks(struct richacl *); + + #endif /* __RICHACL_H */ diff --git a/patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch b/patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch new file mode 100644 index 0000000..281d4cb --- /dev/null +++ b/patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch @@ -0,0 +1,79 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:47 +0530 +Subject: [PATCH 07/16] richacl: Update the file masks in chmod() +Patch-mainline: not yet + +Doing a chmod() sets the file mode, which includes the file permission +bits. When a file has a richacl, the permissions that the richacl +grants need to be limited to what the new file permission bits allow. + +This is done by setting the file masks in the richacl to what the file +permission bits map to. The richacl access check algorithm takes the +file masks into account, which ensures that the richacl cannot grant too +many permissions. + +It is possible to explicitly add permissions to the file masks which go +beyond what the file permission bits can grant (like the ACE4_WRITE_ACL +permission). The POSIX.1 standard calls this an alternate file access +control mechanism. A subsequent chmod() would ensure that those +permissions are disabled again. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 37 +++++++++++++++++++++++++++++++++++++ + include/linux/richacl.h | 1 + + 2 files changed, 38 insertions(+) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -330,3 +330,40 @@ restart: + } + } + EXPORT_SYMBOL_GPL(richacl_compute_max_masks); ++ ++/** ++ * richacl_chmod - update the file masks to reflect the new mode ++ * @mode: new file permission bits ++ * ++ * Return a copy of @acl where the file masks have been replaced by the file ++ * masks corresponding to the file permission bits in @mode, or returns @acl ++ * itself if the file masks are already up to date. Takes over a reference ++ * to @acl. ++ */ ++struct richacl * ++richacl_chmod(struct richacl *acl, mode_t mode) ++{ ++ unsigned int owner_mask, group_mask, other_mask; ++ struct richacl *clone; ++ ++ owner_mask = richacl_mode_to_mask(mode >> 6); ++ group_mask = richacl_mode_to_mask(mode >> 3); ++ other_mask = richacl_mode_to_mask(mode); ++ ++ if (acl->a_owner_mask == owner_mask && ++ acl->a_group_mask == group_mask && ++ acl->a_other_mask == other_mask) ++ return acl; ++ ++ clone = richacl_clone(acl); ++ richacl_put(acl); ++ if (!clone) ++ return ERR_PTR(-ENOMEM); ++ ++ clone->a_owner_mask = owner_mask; ++ clone->a_group_mask = group_mask; ++ clone->a_other_mask = other_mask; ++ ++ return clone; ++} ++EXPORT_SYMBOL_GPL(richacl_chmod); +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -269,5 +269,6 @@ extern int richacl_masks_to_mode(const s + extern unsigned int richacl_mode_to_mask(mode_t); + extern unsigned int richacl_want_to_mask(int); + extern void richacl_compute_max_masks(struct richacl *); ++extern struct richacl *richacl_chmod(struct richacl *, mode_t); + + #endif /* __RICHACL_H */ diff --git a/patches.suse/0008-richacl-Permission-check-algorithm.patch b/patches.suse/0008-richacl-Permission-check-algorithm.patch new file mode 100644 index 0000000..8d5eb7f --- /dev/null +++ b/patches.suse/0008-richacl-Permission-check-algorithm.patch @@ -0,0 +1,130 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:48 +0530 +Subject: [PATCH 08/16] richacl: Permission check algorithm +Patch-mainline: not yet + +As in the standard POSIX file permission model, each process is the +owner, group, or other file class. A process is + + - in the owner file class if it owns the file, + - in the group file class if it is in the file's owning group or it + matches any of the user or group entries, and + - in the other file class otherwise. + +Each file class is associated with a file mask. + +A richacl grants a requested access if the NFSv4 acl in the richacl +grants the requested permissions (according to the NFSv4 permission +check algorithm) and the file mask that applies to the process includes +the requested permissions. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/richacl.h | 2 + + 2 files changed, 89 insertions(+) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -367,3 +367,90 @@ richacl_chmod(struct richacl *acl, mode_ + return clone; + } + EXPORT_SYMBOL_GPL(richacl_chmod); ++ ++/** ++ * richacl_permission - richacl permission check algorithm ++ * @inode: inode to check ++ * @acl: rich acl of the inode ++ * @mask: requested access (ACE4_* bitmask) ++ * ++ * Checks if the current process is granted @mask flags in @acl. ++ */ ++int ++richacl_permission(struct inode *inode, const struct richacl *acl, ++ unsigned int mask) ++{ ++ const struct richace *ace; ++ unsigned int file_mask, requested = mask, denied = 0; ++ int in_owning_group = in_group_p(inode->i_gid); ++ int in_owner_or_group_class = in_owning_group; ++ ++ /* ++ * A process is ++ * - in the owner file class if it owns the file, ++ * - in the group file class if it is in the file's owning group or ++ * it matches any of the user or group entries, and ++ * - in the other file class otherwise. ++ */ ++ ++ /* ++ * Check if the acl grants the requested access and determine which ++ * file class the process is in. ++ */ ++ richacl_for_each_entry(ace, acl) { ++ unsigned int ace_mask = ace->e_mask; ++ ++ if (richace_is_inherit_only(ace)) ++ continue; ++ if (richace_is_owner(ace)) { ++ if (current_fsuid() != inode->i_uid) ++ continue; ++ goto is_owner; ++ } else if (richace_is_group(ace)) { ++ if (!in_owning_group) ++ continue; ++ } else if (richace_is_unix_id(ace)) { ++ if (ace->e_flags & ACE4_IDENTIFIER_GROUP) { ++ if (!in_group_p(ace->u.e_id)) ++ continue; ++ } else { ++ if (current_fsuid() != ace->u.e_id) ++ continue; ++ } ++ } else ++ goto is_everyone; ++ ++is_owner: ++ /* The process is in the owner or group file class. */ ++ in_owner_or_group_class = 1; ++ ++is_everyone: ++ /* Check which mask flags the ACE allows or denies. */ ++ if (richace_is_deny(ace)) ++ denied |= ace_mask & mask; ++ mask &= ~ace_mask; ++ ++ /* ++ * Keep going until we know which file class ++ * the process is in. ++ */ ++ if (!mask && in_owner_or_group_class) ++ break; ++ } ++ denied |= mask; ++ ++ /* ++ * The file class a process is in determines which file mask applies. ++ * Check if that file mask also grants the requested access. ++ */ ++ if (current_fsuid() == inode->i_uid) ++ file_mask = acl->a_owner_mask; ++ else if (in_owner_or_group_class) ++ file_mask = acl->a_group_mask; ++ else ++ file_mask = acl->a_other_mask; ++ denied |= requested & ~file_mask; ++ ++ return denied ? -EACCES : 0; ++} ++EXPORT_SYMBOL_GPL(richacl_permission); +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -270,5 +270,7 @@ extern unsigned int richacl_mode_to_mask + extern unsigned int richacl_want_to_mask(int); + extern void richacl_compute_max_masks(struct richacl *); + extern struct richacl *richacl_chmod(struct richacl *, mode_t); ++extern int richacl_permission(struct inode *, const struct richacl *, ++ unsigned int); + + #endif /* __RICHACL_H */ diff --git a/patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch b/patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch new file mode 100644 index 0000000..4f96841 --- /dev/null +++ b/patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch @@ -0,0 +1,252 @@ +From: Andreas Gruenbacher +Date: Sat, 12 Jun 2010 19:48:47 +0200 +Subject: [PATCH 09/16] richacl: Helper functions for implementing richacl inode operations +Patch-mainline: not yet + +These functions are supposed to be used by file systems so that the file +system independent code remains in the vfs. + +Signed-off-by: Andreas Gruenbacher +--- + fs/Makefile | 2 + fs/richacl_inode.c | 194 ++++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/richacl.h | 21 +++++ + 3 files changed, 216 insertions(+), 1 deletion(-) + create mode 100644 fs/richacl_inode.c + +--- a/fs/Makefile ++++ b/fs/Makefile +@@ -52,7 +52,7 @@ obj-$(CONFIG_NFS_COMMON) += nfs_common/ + obj-$(CONFIG_GENERIC_ACL) += generic_acl.o + + obj-$(CONFIG_FS_RICHACL) += richacl.o +-richacl-y := richacl_base.o ++richacl-y := richacl_base.o richacl_inode.o + + obj-y += quota/ + +--- /dev/null ++++ b/fs/richacl_inode.c +@@ -0,0 +1,194 @@ ++/* ++ * Copyright (C) 2010 Novell, Inc. ++ * Written by Andreas Gruenbacher ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++/** ++ * richacl_may_create - helper for implementing iop->may_create ++ */ ++int ++richacl_may_create(struct inode *dir, int isdir, ++ int (*richacl_permission)(struct inode *, unsigned int)) ++{ ++ if (IS_RICHACL(dir)) ++ return richacl_permission(dir, ++ ACE4_EXECUTE | (isdir ? ++ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); ++ else ++ return generic_permission(dir, MAY_WRITE | MAY_EXEC, ++ dir->i_op->check_acl); ++} ++EXPORT_SYMBOL(richacl_may_create); ++ ++static int ++check_sticky(struct inode *dir, struct inode *inode) ++{ ++ if (!(dir->i_mode & S_ISVTX)) ++ return 0; ++ if (inode->i_uid == current_fsuid()) ++ return 0; ++ if (dir->i_uid == current_fsuid()) ++ return 0; ++ return !capable(CAP_FOWNER); ++} ++ ++/** ++ * richacl_may_delete - helper for implementing iop->may_delete ++ */ ++int ++richacl_may_delete(struct inode *dir, struct inode *inode, int replace, ++ int (*richacl_permission)(struct inode *, unsigned int)) ++{ ++ int error; ++ ++ if (IS_RICHACL(inode)) { ++ error = richacl_permission(dir, ++ ACE4_EXECUTE | ACE4_DELETE_CHILD); ++ if (!error && check_sticky(dir, inode)) ++ error = -EPERM; ++ if (error && !richacl_permission(inode, ACE4_DELETE)) ++ error = 0; ++ if (!error && replace) ++ error = richacl_permission(dir, ++ ACE4_EXECUTE | (S_ISDIR(inode->i_mode) ? ++ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); ++ } else { ++ error = generic_permission(dir, MAY_WRITE | MAY_EXEC, ++ dir->i_op->check_acl); ++ if (!error && check_sticky(dir, inode)) ++ error = -EPERM; ++ } ++ ++ return error; ++} ++EXPORT_SYMBOL(richacl_may_delete); ++ ++/** ++ * richacl_inode_permission - helper for implementing iop->permission ++ * @inode: inode to check ++ * @acl: rich acl of the inode (may be NULL) ++ * @mask: requested access (ACE4_* bitmask) ++ * ++ * This function is supposed to be used by file systems for implementing the ++ * permission inode operation. ++ */ ++int ++richacl_inode_permission(struct inode *inode, const struct richacl *acl, ++ unsigned int mask) ++{ ++ if (acl) { ++ if (!richacl_permission(inode, acl, mask)) ++ return 0; ++ } else { ++ int mode = inode->i_mode; ++ ++ if (current_fsuid() == inode->i_uid) ++ mode >>= 6; ++ else if (in_group_p(inode->i_gid)) ++ mode >>= 3; ++ if (!(mask & ~richacl_mode_to_mask(mode))) ++ return 0; ++ } ++ ++ /* ++ * Keep in sync with the capability checks in generic_permission(). ++ */ ++ if (!(mask & ~ACE4_POSIX_MODE_ALL)) { ++ /* ++ * Read/write DACs are always overridable. ++ * Executable DACs are overridable if at ++ * least one exec bit is set. ++ */ ++ if (!(mask & ACE4_POSIX_MODE_EXEC) || execute_ok(inode)) ++ if (capable(CAP_DAC_OVERRIDE)) ++ return 0; ++ } ++ /* ++ * Searching includes executable on directories, else just read. ++ */ ++ if (!(mask & ~(ACE4_READ_DATA | ACE4_LIST_DIRECTORY | ACE4_EXECUTE)) && ++ (S_ISDIR(inode->i_mode) || !(mask & ACE4_EXECUTE))) ++ if (capable(CAP_DAC_READ_SEARCH)) ++ return 0; ++ ++ return -EACCES; ++} ++EXPORT_SYMBOL_GPL(richacl_inode_permission); ++ ++/** ++ * richacl_inode_change_ok - helper for implementing iop->setattr ++ * @inode: inode to check ++ * @attr: requested inode attribute changes ++ * @richacl_permission: permission function taking an inode and ACE4_* flags ++ * ++ * Keep in sync with inode_change_ok(). ++ */ ++int ++richacl_inode_change_ok(struct inode *inode, struct iattr *attr, ++ int (*richacl_permission)(struct inode *, unsigned int)) ++{ ++ unsigned int ia_valid = attr->ia_valid; ++ ++ /* If force is set do it anyway. */ ++ if (ia_valid & ATTR_FORCE) ++ return 0; ++ ++ /* Make sure a caller can chown. */ ++ if ((ia_valid & ATTR_UID) && ++ (current_fsuid() != inode->i_uid || ++ attr->ia_uid != inode->i_uid) && ++ (current_fsuid() != attr->ia_uid || ++ richacl_permission(inode, ACE4_WRITE_OWNER)) && ++ !capable(CAP_CHOWN)) ++ goto error; ++ ++ /* Make sure caller can chgrp. */ ++ if ((ia_valid & ATTR_GID)) { ++ int in_group = in_group_p(attr->ia_gid); ++ if ((current_fsuid() != inode->i_uid || ++ (!in_group && attr->ia_gid != inode->i_gid)) && ++ (!in_group || ++ richacl_permission(inode, ACE4_WRITE_OWNER)) && ++ !capable(CAP_CHOWN)) ++ goto error; ++ } ++ ++ /* Make sure a caller can chmod. */ ++ if (ia_valid & ATTR_MODE) { ++ if (current_fsuid() != inode->i_uid && ++ richacl_permission(inode, ACE4_WRITE_ACL) && ++ !capable(CAP_FOWNER)) ++ goto error; ++ /* Also check the setgid bit! */ ++ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : ++ inode->i_gid) && !capable(CAP_FSETID)) ++ attr->ia_mode &= ~S_ISGID; ++ } ++ ++ /* Check for setting the inode time. */ ++ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) { ++ if (current_fsuid() != inode->i_uid && ++ richacl_permission(inode, ACE4_WRITE_ATTRIBUTES) && ++ !capable(CAP_FOWNER)) ++ goto error; ++ } ++ return 0; ++error: ++ return -EPERM; ++} ++EXPORT_SYMBOL_GPL(richacl_inode_change_ok); +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -273,4 +273,25 @@ extern struct richacl *richacl_chmod(str + extern int richacl_permission(struct inode *, const struct richacl *, + unsigned int); + ++/* richacl_inode.c */ ++ ++#ifdef CONFIG_FS_RICHACL ++extern int richacl_may_create(struct inode *, int, ++ int (*)(struct inode *, unsigned int)); ++extern int richacl_may_delete(struct inode *, struct inode *, int, ++ int (*)(struct inode *, unsigned int)); ++extern int richacl_inode_permission(struct inode *, const struct richacl *, ++ unsigned int); ++extern int richacl_inode_change_ok(struct inode *, struct iattr *, ++ int (*)(struct inode *, unsigned int)); ++#else ++static inline int ++richacl_inode_change_ok(struct inode *inode, struct iattr *attr, ++ int (*richacl_permission)(struct inode *inode, ++ unsigned int mask)) ++{ ++ return -EPERM; ++} ++#endif ++ + #endif /* __RICHACL_H */ diff --git a/patches.suse/0010-richacl-Create-time-inheritance.patch b/patches.suse/0010-richacl-Create-time-inheritance.patch new file mode 100644 index 0000000..5e81a2b --- /dev/null +++ b/patches.suse/0010-richacl-Create-time-inheritance.patch @@ -0,0 +1,127 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:49 +0530 +Subject: [PATCH 10/16] richacl: Create-time inheritance +Patch-mainline: not yet + +When a new file is created, it can inherit an acl from its parent +directory; this is similar to how default acls work in POSIX (draft) +ACLs. + +As with POSIX ACLs, if a file inherits an acl from its parent directory, +the intersection between the create mode and the permissions granted by +the inherited acl determines the file masks and file permission bits, +and the umask is ignored. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/richacl.h | 1 + 2 files changed, 91 insertions(+) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -454,3 +454,93 @@ is_everyone: + return denied ? -EACCES : 0; + } + EXPORT_SYMBOL_GPL(richacl_permission); ++ ++/** ++ * richacl_inherit - compute the inherited acl of a new file ++ * @dir_acl: acl of the containing direcory ++ * @inode: inode of the new file (create mode in i_mode) ++ * ++ * A directory can have acl entries which files and/or directories created ++ * inside the directory will inherit. This function computes the acl for such ++ * a new file. If there is no inheritable acl, it will return %NULL. ++ * ++ * The file permission bits in inode->i_mode must be set to the create mode. ++ * If there is an inheritable acl, the maximum permissions that the acl grants ++ * will be computed and permissions not granted by the acl will be removed from ++ * inode->i_mode. If there is no inheritable acl, the umask will be applied ++ * instead. ++ */ ++struct richacl * ++richacl_inherit(const struct richacl *dir_acl, struct inode *inode) ++{ ++ const struct richace *dir_ace; ++ struct richacl *acl = NULL; ++ struct richace *ace; ++ int count = 0; ++ mode_t mask = ~current_umask(); ++ ++ if (S_ISDIR(inode->i_mode)) { ++ richacl_for_each_entry(dir_ace, dir_acl) { ++ if (!richace_is_inheritable(dir_ace)) ++ continue; ++ count++; ++ } ++ if (!count) ++ goto mask; ++ acl = richacl_alloc(count); ++ if (!acl) ++ return ERR_PTR(-ENOMEM); ++ ace = acl->a_entries; ++ richacl_for_each_entry(dir_ace, dir_acl) { ++ if (!richace_is_inheritable(dir_ace)) ++ continue; ++ memcpy(ace, dir_ace, sizeof(struct richace)); ++ if (dir_ace->e_flags & ACE4_NO_PROPAGATE_INHERIT_ACE) ++ richace_clear_inheritance_flags(ace); ++ if ((dir_ace->e_flags & ACE4_FILE_INHERIT_ACE) && ++ !(dir_ace->e_flags & ACE4_DIRECTORY_INHERIT_ACE)) ++ ace->e_flags |= ACE4_INHERIT_ONLY_ACE; ++ ace++; ++ } ++ } else { ++ richacl_for_each_entry(dir_ace, dir_acl) { ++ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE)) ++ continue; ++ count++; ++ } ++ if (!count) ++ goto mask; ++ acl = richacl_alloc(count); ++ if (!acl) ++ return ERR_PTR(-ENOMEM); ++ ace = acl->a_entries; ++ richacl_for_each_entry(dir_ace, dir_acl) { ++ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE)) ++ continue; ++ memcpy(ace, dir_ace, sizeof(struct richace)); ++ richace_clear_inheritance_flags(ace); ++ /* ++ * ACE4_DELETE_CHILD is meaningless for ++ * non-directories, so clear it. ++ */ ++ ace->e_mask &= ~ACE4_DELETE_CHILD; ++ ace++; ++ } ++ } ++ ++ richacl_compute_max_masks(acl); ++ ++ /* ++ * Ensure that the acl will not grant any permissions beyond the create ++ * mode. ++ */ ++ acl->a_owner_mask &= richacl_mode_to_mask(inode->i_mode >> 6); ++ acl->a_group_mask &= richacl_mode_to_mask(inode->i_mode >> 3); ++ acl->a_other_mask &= richacl_mode_to_mask(inode->i_mode); ++ mask = ~S_IRWXUGO | richacl_masks_to_mode(acl); ++ ++mask: ++ inode->i_mode &= mask; ++ return acl; ++} ++EXPORT_SYMBOL_GPL(richacl_inherit); +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -272,6 +272,7 @@ extern void richacl_compute_max_masks(st + extern struct richacl *richacl_chmod(struct richacl *, mode_t); + extern int richacl_permission(struct inode *, const struct richacl *, + unsigned int); ++extern struct richacl *richacl_inherit(const struct richacl *, struct inode *); + + /* richacl_inode.c */ + diff --git a/patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch b/patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch new file mode 100644 index 0000000..cc53564 --- /dev/null +++ b/patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch @@ -0,0 +1,79 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:50 +0530 +Subject: [PATCH 11/16] richacl: Check if an acl is equivalent to a file mode +Patch-mainline: not yet + +This function is used to avoid storing richacls on disk if the acl can +be computed from the file permission bits. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/richacl.h | 1 + + 2 files changed, 49 insertions(+) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -544,3 +544,51 @@ mask: + return acl; + } + EXPORT_SYMBOL_GPL(richacl_inherit); ++ ++/** ++ * richacl_equiv_mode - check if @acl is equivalent to file permission bits ++ * @mode_p: the file mode (including the file type) ++ * ++ * If @acl can be fully represented by file permission bits, this function ++ * returns 0, and the file permission bits in @mode_p are set to the equivalent ++ * of @acl. ++ * ++ * This function is used to avoid storing richacls on disk if the acl can be ++ * computed from the file permission bits. It allows user-space to make sure ++ * that a file has no explicit richacl set. ++ */ ++int ++richacl_equiv_mode(const struct richacl *acl, mode_t *mode_p) ++{ ++ const struct richace *ace = acl->a_entries; ++ unsigned int x; ++ mode_t mode; ++ ++ if (acl->a_count != 1 || ++ acl->a_flags || ++ !richace_is_everyone(ace) || ++ !richace_is_allow(ace) || ++ ace->e_flags & ~ACE4_SPECIAL_WHO) ++ return -1; ++ ++ /* ++ * Figure out the permissions we care about: ACE4_DELETE_CHILD is ++ * meaningless for non-directories, so we ignore it. ++ */ ++ x = ~ACE4_POSIX_ALWAYS_ALLOWED; ++ if (!S_ISDIR(*mode_p)) ++ x &= ~ACE4_DELETE_CHILD; ++ ++ if ((ace->e_mask & x) != (ACE4_POSIX_MODE_ALL & x)) ++ return -1; ++ ++ mode = richacl_masks_to_mode(acl); ++ if ((acl->a_owner_mask & x) != (richacl_mode_to_mask(mode >> 6) & x) || ++ (acl->a_group_mask & x) != (richacl_mode_to_mask(mode >> 3) & x) || ++ (acl->a_other_mask & x) != (richacl_mode_to_mask(mode) & x)) ++ return -1; ++ ++ *mode_p = (*mode_p & ~S_IRWXUGO) | mode; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(richacl_equiv_mode); +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -273,6 +273,7 @@ extern struct richacl *richacl_chmod(str + extern int richacl_permission(struct inode *, const struct richacl *, + unsigned int); + extern struct richacl *richacl_inherit(const struct richacl *, struct inode *); ++extern int richacl_equiv_mode(const struct richacl *, mode_t *); + + /* richacl_inode.c */ + diff --git a/patches.suse/0012-richacl-Automatic-Inheritance.patch b/patches.suse/0012-richacl-Automatic-Inheritance.patch new file mode 100644 index 0000000..8e23aef --- /dev/null +++ b/patches.suse/0012-richacl-Automatic-Inheritance.patch @@ -0,0 +1,143 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:52 +0530 +Subject: [PATCH 12/16] richacl: Automatic Inheritance +Patch-mainline: not yet + +Automatic Inheritance (AI) allows changes to the acl of a directory to +recursively propagate down to files and directories in the directory. + +To implement this, the kernel keeps track of which permissions have been +inherited, and makes sure that permission propagation is turned off when +the file permission bits of a file are changed (upon create or chmod). + +The actual permission propagation is implemented in user space. + +AI works as follows: + + - When the ACL4_AUTO_INHERIT flag in the acl of a file is cleared, the + file is not affected by AI. + + - When the ACL4_AUTO_INHERIT flag in the acl of a directory is set and + a file or subdirectory is created in that directory, files created in + the directory will have the ACL4_AUTO_INHERIT flag set, and all + inherited aces will have the ACE4_INHERITED_ACE flag set. This + allows user space to distinguish between aces which have been + inherited, and aces which have been explicitly added. + + - When the ACL4_PROTECTED acl flag in the acl of a file is set, AI will + not modify the acl of the file. This does not affect propagation of + permissions from the file to its children (if the file is a + directory). + +Linux does not have a way of creating files without setting the file +permission bits, so all files created inside a directory with +ACL4_AUTO_INHERIT set will also have the ACL4_PROTECTED flag set. This +effectively disables AI. + +Protocols which support creating files without specifying permissions +can explicitly clear the ACL4_PROTECTED flag after creating a file (and +reset the file masks to "undo" applying the create mode; see +richacl_compute_max_masks()). This is a workaround; a per-create or +per-process flag indicating to ignore the create mode when AI is in +effect would fix this problem. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 15 ++++++++++++++- + include/linux/richacl.h | 25 ++++++++++++++++++++++++- + 2 files changed, 38 insertions(+), 2 deletions(-) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -352,7 +352,8 @@ richacl_chmod(struct richacl *acl, mode_ + + if (acl->a_owner_mask == owner_mask && + acl->a_group_mask == group_mask && +- acl->a_other_mask == other_mask) ++ acl->a_other_mask == other_mask && ++ (!richacl_is_auto_inherit(acl) || richacl_is_protected(acl))) + return acl; + + clone = richacl_clone(acl); +@@ -363,6 +364,8 @@ richacl_chmod(struct richacl *acl, mode_ + clone->a_owner_mask = owner_mask; + clone->a_group_mask = group_mask; + clone->a_other_mask = other_mask; ++ if (richacl_is_auto_inherit(clone)) ++ clone->a_flags |= ACL4_PROTECTED; + + return clone; + } +@@ -539,6 +542,16 @@ richacl_inherit(const struct richacl *di + acl->a_other_mask &= richacl_mode_to_mask(inode->i_mode); + mask = ~S_IRWXUGO | richacl_masks_to_mode(acl); + ++ if (richacl_is_auto_inherit(dir_acl)) { ++ /* ++ * We need to set ACL4_PROTECTED because we are ++ * doing an implicit chmod ++ */ ++ acl->a_flags = ACL4_AUTO_INHERIT | ACL4_PROTECTED; ++ richacl_for_each_entry(ace, acl) ++ ace->e_flags |= ACE4_INHERITED_ACE; ++ } ++ + mask: + inode->i_mode &= mask; + return acl; +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -47,6 +47,15 @@ struct richacl { + _ace != _acl->a_entries - 1; \ + _ace--) + ++/* a_flags values */ ++#define ACL4_AUTO_INHERIT 0x01 ++#define ACL4_PROTECTED 0x02 ++/*#define ACL4_DEFAULTED 0x04*/ ++ ++#define ACL4_VALID_FLAGS ( \ ++ ACL4_AUTO_INHERIT | \ ++ ACL4_PROTECTED) ++ + /* e_type values */ + #define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000 + #define ACE4_ACCESS_DENIED_ACE_TYPE 0x0001 +@@ -61,6 +70,7 @@ struct richacl { + /*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/ + /*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/ + #define ACE4_IDENTIFIER_GROUP 0x0040 ++#define ACE4_INHERITED_ACE 0x0080 + /* in-memory representation only */ + #define ACE4_SPECIAL_WHO 0x4000 + +@@ -69,7 +79,8 @@ struct richacl { + ACE4_DIRECTORY_INHERIT_ACE | \ + ACE4_NO_PROPAGATE_INHERIT_ACE | \ + ACE4_INHERIT_ONLY_ACE | \ +- ACE4_IDENTIFIER_GROUP) ++ ACE4_IDENTIFIER_GROUP | \ ++ ACE4_INHERITED_ACE) + + /* e_mask bitflags */ + #define ACE4_READ_DATA 0x00000001 +@@ -162,6 +173,18 @@ richacl_put(struct richacl *acl) + kfree(acl); + } + ++static inline int ++richacl_is_auto_inherit(const struct richacl *acl) ++{ ++ return acl->a_flags & ACL4_AUTO_INHERIT; ++} ++ ++static inline int ++richacl_is_protected(const struct richacl *acl) ++{ ++ return acl->a_flags & ACL4_PROTECTED; ++} ++ + /* + * Special e_who identifiers: we use these pointer values in comparisons + * instead of doing a strcmp. diff --git a/patches.suse/0013-richacl-Restrict-access-check-algorithm.patch b/patches.suse/0013-richacl-Restrict-access-check-algorithm.patch new file mode 100644 index 0000000..9d77726 --- /dev/null +++ b/patches.suse/0013-richacl-Restrict-access-check-algorithm.patch @@ -0,0 +1,52 @@ +From: Andreas Gruenbacher +Date: Mon, 14 Jun 2010 09:22:14 +0530 +Subject: [PATCH 13/16] richacl: Restrict access check algorithm +Patch-mainline: not yet + +We want to avoid applying the file masks to an acl when changing the +file permission bits or performing an access check. On the other hand, +when we *do* apply the file masks to the acl, we want the resulting acl +to produce the same access check results with the standard nfs4 access +check algorithm as the richacl access check algorithm with the original +acl. This is already the case, except in the following scenario: + +With file masks equivalent to file mode 0600, the following acl would +grant the owner rw access if the owner is in the owning group: + + group@:rw::allow + +There is no way to express this in an nfs4 acl; the result is always a +more restrictive acl. There are two approaches to deal with this +difference: either accept that it exists and that applying the file +masks is imperfect, or change the richacl access check algorithm so that +such accesses are denied. + +This patch denies such accesses and makes sure that the richacl access +check algorithm grants the same accesses as the nfsv4 acl with the file +masks applied. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/richacl_base.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -423,6 +423,16 @@ richacl_permission(struct inode *inode, + } else + goto is_everyone; + ++ /* ++ * Apply the group file mask to entries other than OWNER@ and ++ * EVERYONE@. This is not required for correct access checking ++ * but ensures that we grant the same permissions as the acl ++ * computed by richacl_apply_masks() would grant. See ++ * richacl_apply_masks() for a more detailed explanation. ++ */ ++ if (richace_is_allow(ace)) ++ ace_mask &= acl->a_group_mask; ++ + is_owner: + /* The process is in the owner or group file class. */ + in_owner_or_group_class = 1; diff --git a/patches.suse/0014-richacl-xattr-mapping-functions.patch b/patches.suse/0014-richacl-xattr-mapping-functions.patch new file mode 100644 index 0000000..c2eb771 --- /dev/null +++ b/patches.suse/0014-richacl-xattr-mapping-functions.patch @@ -0,0 +1,237 @@ +From: Andreas Gruenbacher +Date: Fri, 11 Jun 2010 16:12:50 +0530 +Subject: [PATCH 14/16] richacl: xattr mapping functions +Patch-mainline: not yet + +Map between "system.richacl" xattrs and the in-kernel representation. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Aneesh Kumar K.V +--- + fs/Makefile | 2 + fs/richacl_xattr.c | 156 ++++++++++++++++++++++++++++++++++++++++++ + include/linux/richacl_xattr.h | 47 ++++++++++++ + 3 files changed, 204 insertions(+), 1 deletion(-) + create mode 100644 fs/richacl_xattr.c + create mode 100644 include/linux/richacl_xattr.h + +--- a/fs/Makefile ++++ b/fs/Makefile +@@ -52,7 +52,7 @@ obj-$(CONFIG_NFS_COMMON) += nfs_common/ + obj-$(CONFIG_GENERIC_ACL) += generic_acl.o + + obj-$(CONFIG_FS_RICHACL) += richacl.o +-richacl-y := richacl_base.o richacl_inode.o ++richacl-y := richacl_base.o richacl_inode.o richacl_xattr.o + + obj-y += quota/ + +--- /dev/null ++++ b/fs/richacl_xattr.c +@@ -0,0 +1,156 @@ ++/* ++ * Copyright (C) 2006, 2010 Novell, Inc. ++ * Written by Andreas Gruenbacher ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++MODULE_LICENSE("GPL"); ++ ++/** ++ * richacl_from_xattr - convert a richacl xattr into the in-memory representation ++ */ ++struct richacl * ++richacl_from_xattr(const void *value, size_t size) ++{ ++ const struct richacl_xattr *xattr_acl = value; ++ const struct richace_xattr *xattr_ace = (void *)(xattr_acl + 1); ++ struct richacl *acl; ++ struct richace *ace; ++ int count; ++ ++ if (size < sizeof(struct richacl_xattr) || ++ xattr_acl->a_version != ACL4_XATTR_VERSION || ++ (xattr_acl->a_flags & ~ACL4_VALID_FLAGS)) ++ return ERR_PTR(-EINVAL); ++ ++ count = le16_to_cpu(xattr_acl->a_count); ++ if (count > ACL4_XATTR_MAX_COUNT) ++ return ERR_PTR(-EINVAL); ++ ++ acl = richacl_alloc(count); ++ if (!acl) ++ return ERR_PTR(-ENOMEM); ++ ++ acl->a_flags = xattr_acl->a_flags; ++ acl->a_owner_mask = le32_to_cpu(xattr_acl->a_owner_mask); ++ if (acl->a_owner_mask & ~ACE4_VALID_MASK) ++ goto fail_einval; ++ acl->a_group_mask = le32_to_cpu(xattr_acl->a_group_mask); ++ if (acl->a_group_mask & ~ACE4_VALID_MASK) ++ goto fail_einval; ++ acl->a_other_mask = le32_to_cpu(xattr_acl->a_other_mask); ++ if (acl->a_other_mask & ~ACE4_VALID_MASK) ++ goto fail_einval; ++ ++ richacl_for_each_entry(ace, acl) { ++ const char *who = (void *)(xattr_ace + 1), *end; ++ ssize_t used = (void *)who - value; ++ ++ if (used > size) ++ goto fail_einval; ++ end = memchr(who, 0, size - used); ++ if (!end) ++ goto fail_einval; ++ ++ ace->e_type = le16_to_cpu(xattr_ace->e_type); ++ ace->e_flags = le16_to_cpu(xattr_ace->e_flags); ++ ace->e_mask = le32_to_cpu(xattr_ace->e_mask); ++ ace->u.e_id = le32_to_cpu(xattr_ace->e_id); ++ ++ if (ace->e_flags & ~ACE4_VALID_FLAGS) ++ goto fail_einval; ++ if (ace->e_type > ACE4_ACCESS_DENIED_ACE_TYPE || ++ (ace->e_mask & ~ACE4_VALID_MASK)) ++ goto fail_einval; ++ ++ if (who == end) { ++ if (ace->u.e_id == -1) ++ goto fail_einval; /* uid/gid needed */ ++ } else if (richace_set_who(ace, who)) ++ goto fail_einval; ++ ++ xattr_ace = (void *)who + ALIGN(end - who + 1, 4); ++ } ++ ++ return acl; ++ ++fail_einval: ++ richacl_put(acl); ++ return ERR_PTR(-EINVAL); ++} ++EXPORT_SYMBOL_GPL(richacl_from_xattr); ++ ++/** ++ * richacl_xattr_size - compute the size of the xattr representation of @acl ++ */ ++size_t ++richacl_xattr_size(const struct richacl *acl) ++{ ++ size_t size = sizeof(struct richacl_xattr); ++ const struct richace *ace; ++ ++ richacl_for_each_entry(ace, acl) { ++ size += sizeof(struct richace_xattr) + ++ (richace_is_unix_id(ace) ? 4 : ++ ALIGN(strlen(ace->u.e_who) + 1, 4)); ++ } ++ return size; ++} ++EXPORT_SYMBOL_GPL(richacl_xattr_size); ++ ++/** ++ * richacl_to_xattr - convert @acl into its xattr representation ++ * @acl: the richacl to convert ++ * @buffer: buffer of size richacl_xattr_size(@acl) for the result ++ */ ++void ++richacl_to_xattr(const struct richacl *acl, void *buffer) ++{ ++ struct richacl_xattr *xattr_acl = buffer; ++ struct richace_xattr *xattr_ace; ++ const struct richace *ace; ++ ++ xattr_acl->a_version = ACL4_XATTR_VERSION; ++ xattr_acl->a_flags = acl->a_flags; ++ xattr_acl->a_count = cpu_to_le16(acl->a_count); ++ ++ xattr_acl->a_owner_mask = cpu_to_le32(acl->a_owner_mask); ++ xattr_acl->a_group_mask = cpu_to_le32(acl->a_group_mask); ++ xattr_acl->a_other_mask = cpu_to_le32(acl->a_other_mask); ++ ++ xattr_ace = (void *)(xattr_acl + 1); ++ richacl_for_each_entry(ace, acl) { ++ xattr_ace->e_type = cpu_to_le16(ace->e_type); ++ xattr_ace->e_flags = cpu_to_le16(ace->e_flags & ++ ACE4_VALID_FLAGS); ++ xattr_ace->e_mask = cpu_to_le32(ace->e_mask); ++ if (richace_is_unix_id(ace)) { ++ xattr_ace->e_id = cpu_to_le32(ace->u.e_id); ++ memset(xattr_ace->e_who, 0, 4); ++ xattr_ace = (void *)xattr_ace->e_who + 4; ++ } else { ++ int sz = ALIGN(strlen(ace->u.e_who) + 1, 4); ++ ++ xattr_ace->e_id = cpu_to_le32(-1); ++ memset(xattr_ace->e_who + sz - 4, 0, 4); ++ strcpy(xattr_ace->e_who, ace->u.e_who); ++ xattr_ace = (void *)xattr_ace->e_who + sz; ++ } ++ } ++} ++EXPORT_SYMBOL_GPL(richacl_to_xattr); +--- /dev/null ++++ b/include/linux/richacl_xattr.h +@@ -0,0 +1,47 @@ ++/* ++ * Copyright (C) 2006, 2010 Novell, Inc. ++ * Written by Andreas Gruenbacher ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ */ ++ ++#ifndef __RICHACL_XATTR_H ++#define __RICHACL_XATTR_H ++ ++#include ++ ++#define RICHACL_XATTR "system.richacl" ++ ++struct richace_xattr { ++ __le16 e_type; ++ __le16 e_flags; ++ __le32 e_mask; ++ __le32 e_id; ++ char e_who[0]; ++}; ++ ++struct richacl_xattr { ++ unsigned char a_version; ++ unsigned char a_flags; ++ __le16 a_count; ++ __le32 a_owner_mask; ++ __le32 a_group_mask; ++ __le32 a_other_mask; ++}; ++ ++#define ACL4_XATTR_VERSION 0 ++#define ACL4_XATTR_MAX_COUNT 1024 ++ ++extern struct richacl *richacl_from_xattr(const void *, size_t); ++extern size_t richacl_xattr_size(const struct richacl *acl); ++extern void richacl_to_xattr(const struct richacl *, void *); ++ ++#endif /* __RICHACL_XATTR_H */ diff --git a/patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch b/patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch new file mode 100644 index 0000000..9c04b2c --- /dev/null +++ b/patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch @@ -0,0 +1,156 @@ +From: Aneesh Kumar K.V +Date: Fri, 11 Jun 2010 16:12:51 +0530 +Subject: [PATCH 15/16] ext4: Use IS_POSIXACL() to check for POSIX ACL support +Patch-mainline: not yet + +Use IS_POSIXACL() instead of a file system specific mount flag since we +have IS_POSIXACL() in the vfs already, anyway. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: Andreas Gruenbacher +--- + fs/ext4/acl.c | 16 ++++++++-------- + fs/ext4/ext4.h | 1 - + fs/ext4/super.c | 16 +++++----------- + 3 files changed, 13 insertions(+), 20 deletions(-) + +--- a/fs/ext4/acl.c ++++ b/fs/ext4/acl.c +@@ -139,7 +139,7 @@ ext4_get_acl(struct inode *inode, int ty + struct posix_acl *acl; + int retval; + +- if (!test_opt(inode->i_sb, POSIX_ACL)) ++ if (!IS_POSIXACL(inode)) + return NULL; + + acl = get_cached_acl(inode, type); +@@ -266,7 +266,7 @@ ext4_init_acl(handle_t *handle, struct i + int error = 0; + + if (!S_ISLNK(inode->i_mode)) { +- if (test_opt(dir->i_sb, POSIX_ACL)) { ++ if (IS_POSIXACL(inode)) { + acl = ext4_get_acl(dir, ACL_TYPE_DEFAULT); + if (IS_ERR(acl)) + return PTR_ERR(acl); +@@ -274,7 +274,7 @@ ext4_init_acl(handle_t *handle, struct i + if (!acl) + inode->i_mode &= ~current_umask(); + } +- if (test_opt(inode->i_sb, POSIX_ACL) && acl) { ++ if (IS_POSIXACL(inode) && acl) { + struct posix_acl *clone; + mode_t mode; + +@@ -328,7 +328,7 @@ ext4_acl_chmod(struct inode *inode) + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; +- if (!test_opt(inode->i_sb, POSIX_ACL)) ++ if (!IS_POSIXACL(inode)) + return 0; + acl = ext4_get_acl(inode, ACL_TYPE_ACCESS); + if (IS_ERR(acl) || !acl) +@@ -370,7 +370,7 @@ ext4_xattr_list_acl_access(struct dentry + { + const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS); + +- if (!test_opt(dentry->d_sb, POSIX_ACL)) ++ if (!IS_POSIXACL(dentry->d_inode)) + return 0; + if (list && size <= list_len) + memcpy(list, POSIX_ACL_XATTR_ACCESS, size); +@@ -383,7 +383,7 @@ ext4_xattr_list_acl_default(struct dentr + { + const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT); + +- if (!test_opt(dentry->d_sb, POSIX_ACL)) ++ if (!IS_POSIXACL(dentry->d_inode)) + return 0; + if (list && size <= list_len) + memcpy(list, POSIX_ACL_XATTR_DEFAULT, size); +@@ -399,7 +399,7 @@ ext4_xattr_get_acl(struct dentry *dentry + + if (strcmp(name, "") != 0) + return -EINVAL; +- if (!test_opt(dentry->d_sb, POSIX_ACL)) ++ if (!IS_POSIXACL(dentry->d_inode)) + return -EOPNOTSUPP; + + acl = ext4_get_acl(dentry->d_inode, type); +@@ -424,7 +424,7 @@ ext4_xattr_set_acl(struct dentry *dentry + + if (strcmp(name, "") != 0) + return -EINVAL; +- if (!test_opt(inode->i_sb, POSIX_ACL)) ++ if (!IS_POSIXACL(dentry->d_inode)) + return -EOPNOTSUPP; + if (!is_owner_or_cap(inode)) + return -EPERM; +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -875,7 +875,6 @@ struct ext4_inode_info { + #define EXT4_MOUNT_UPDATE_JOURNAL 0x01000 /* Update the journal format */ + #define EXT4_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */ + #define EXT4_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */ +-#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ + #define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */ + #define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */ + #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -974,9 +974,9 @@ static int ext4_show_options(struct seq_ + } + #endif + #ifdef CONFIG_EXT4_FS_POSIX_ACL +- if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL)) ++ if ((sb->s_flags & MS_POSIXACL) && !(def_mount_opts & EXT4_DEFM_ACL)) + seq_puts(seq, ",acl"); +- if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL)) ++ if (!(sb->s_flags & MS_POSIXACL) && (def_mount_opts & EXT4_DEFM_ACL)) + seq_puts(seq, ",noacl"); + #endif + if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { +@@ -1481,10 +1481,10 @@ static int parse_options(char *options, + #endif + #ifdef CONFIG_EXT4_FS_POSIX_ACL + case Opt_acl: +- set_opt(sb, POSIX_ACL); ++ sb->s_flags |= MS_POSIXACL; + break; + case Opt_noacl: +- clear_opt(sb, POSIX_ACL); ++ sb->s_flags &= ~MS_POSIXACL; + break; + #else + case Opt_acl: +@@ -2644,7 +2644,7 @@ static int ext4_fill_super(struct super_ + #endif + #ifdef CONFIG_EXT4_FS_POSIX_ACL + if (def_mount_opts & EXT4_DEFM_ACL) +- set_opt(sb, POSIX_ACL); ++ sb->s_flags |= MS_POSIXACL; + #endif + if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) + set_opt(sb, JOURNAL_DATA); +@@ -2691,9 +2691,6 @@ static int ext4_fill_super(struct super_ + &journal_ioprio, NULL, 0)) + goto failed_mount; + +- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | +- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); +- + if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && + (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || + EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) || +@@ -3753,9 +3750,6 @@ static int ext4_remount(struct super_blo + if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) + ext4_abort(sb, "Abort forced by user"); + +- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | +- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); +- + es = sbi->s_es; + + if (sbi->s_journal) { diff --git a/patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch b/patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch new file mode 100644 index 0000000..74601f4 --- /dev/null +++ b/patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch @@ -0,0 +1,690 @@ +From: Aneesh Kumar K.V +Date: Fri, 11 Jun 2010 16:12:52 +0530 +Subject: [PATCH 16/16] ext4: Implement richacl support in ext4 +Patch-mainline: not yet + +Support the richacl permission model in ext4. The richacls are stored +in "system.richacl" xattrs. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: Andreas Gruenbacher +--- + fs/ext4/Kconfig | 10 + + fs/ext4/Makefile | 1 + fs/ext4/ext4.h | 4 + fs/ext4/file.c | 4 + fs/ext4/ialloc.c | 7 + + fs/ext4/inode.c | 19 ++- + fs/ext4/namei.c | 7 + + fs/ext4/richacl.c | 293 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ + fs/ext4/richacl.h | 56 ++++++++++ + fs/ext4/super.c | 45 ++++++-- + fs/ext4/xattr.c | 6 + + fs/ext4/xattr.h | 5 + 12 files changed, 441 insertions(+), 16 deletions(-) + create mode 100644 fs/ext4/richacl.c + create mode 100644 fs/ext4/richacl.h + +--- a/fs/ext4/Kconfig ++++ b/fs/ext4/Kconfig +@@ -83,3 +83,13 @@ config EXT4_DEBUG + + If you select Y here, then you will be able to turn on debugging + with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug" ++ ++config EXT4_FS_RICHACL ++ bool "Ext4 Rich Access Control Lists (EXPERIMENTAL)" ++ depends on EXT4_FS_XATTR && EXPERIMENTAL ++ select FS_RICHACL ++ help ++ Rich ACLs are an implementation of NFSv4 ACLs, extended by file masks ++ to fit into the standard POSIX file permission model. They are ++ designed to work seamlessly locally as well as across the NFSv4 and ++ CIFS/SMB2 network file system protocols. +--- a/fs/ext4/Makefile ++++ b/fs/ext4/Makefile +@@ -11,3 +11,4 @@ ext4-y := balloc.o bitmap.o dir.o file.o + ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o + ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o + ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o ++ext4-$(CONFIG_EXT4_FS_RICHACL) += richacl.o +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -841,6 +841,10 @@ struct ext4_inode_info { + */ + tid_t i_sync_tid; + tid_t i_datasync_tid; ++#ifdef CONFIG_EXT4_FS_RICHACL ++ struct richacl *i_richacl; ++#endif ++ + }; + + /* +--- a/fs/ext4/file.c ++++ b/fs/ext4/file.c +@@ -28,6 +28,7 @@ + #include "ext4_jbd2.h" + #include "xattr.h" + #include "acl.h" ++#include "richacl.h" + + /* + * Called when an inode is released. Note that this is different +@@ -161,5 +162,8 @@ const struct inode_operations ext4_file_ + #endif + .check_acl = ext4_check_acl, + .fiemap = ext4_fiemap, ++ .permission = ext4_permission, ++ .may_create = ext4_may_create, ++ .may_delete = ext4_may_delete, + }; + +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -28,6 +28,7 @@ + #include "ext4_jbd2.h" + #include "xattr.h" + #include "acl.h" ++#include "richacl.h" + + #include + +@@ -1023,7 +1024,11 @@ got: + if (err) + goto fail_drop; + +- err = ext4_init_acl(handle, inode, dir); ++ if (EXT4_IS_RICHACL(dir)) ++ err = ext4_init_richacl(handle, inode, dir); ++ else ++ err = ext4_init_acl(handle, inode, dir); ++ + if (err) + goto fail_free_drop; + +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -45,6 +45,7 @@ + #include "xattr.h" + #include "acl.h" + #include "ext4_extents.h" ++#include "richacl.h" + + #include + +@@ -5041,6 +5042,9 @@ struct inode *ext4_iget(struct super_blo + inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); + + ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ++#ifdef CONFIG_EXT4_FS_RICHACL ++ ei->i_richacl = EXT4_RICHACL_NOT_CACHED; ++#endif + ei->i_dir_start_lookup = 0; + ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); + /* We now have enough fields to check if the inode was active or not. +@@ -5466,7 +5470,11 @@ int ext4_setattr(struct dentry *dentry, + int orphan = 0; + const unsigned int ia_valid = attr->ia_valid; + +- error = inode_change_ok(inode, attr); ++ if (EXT4_IS_RICHACL(inode)) ++ error = richacl_inode_change_ok(inode, attr, ++ ext4_richacl_permission); ++ else ++ error = inode_change_ok(inode, attr); + if (error) + return error; + +@@ -5563,9 +5571,12 @@ int ext4_setattr(struct dentry *dentry, + if (orphan && inode->i_nlink) + ext4_orphan_del(NULL, inode); + +- if (!rc && (ia_valid & ATTR_MODE)) +- rc = ext4_acl_chmod(inode); +- ++ if (!rc && (ia_valid & ATTR_MODE)) { ++ if (EXT4_IS_RICHACL(inode)) ++ rc = ext4_richacl_chmod(inode); ++ else ++ rc = ext4_acl_chmod(inode); ++ } + err_out: + ext4_std_error(inode->i_sb, error); + if (!error) +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -39,6 +39,7 @@ + + #include "xattr.h" + #include "acl.h" ++#include "richacl.h" + + /* + * define how far ahead to read directories while searching them. +@@ -2522,6 +2523,9 @@ const struct inode_operations ext4_dir_i + #endif + .check_acl = ext4_check_acl, + .fiemap = ext4_fiemap, ++ .permission = ext4_permission, ++ .may_create = ext4_may_create, ++ .may_delete = ext4_may_delete, + }; + + const struct inode_operations ext4_special_inode_operations = { +@@ -2533,4 +2537,7 @@ const struct inode_operations ext4_speci + .removexattr = generic_removexattr, + #endif + .check_acl = ext4_check_acl, ++ .permission = ext4_permission, ++ .may_create = ext4_may_create, ++ .may_delete = ext4_may_delete, + }; +--- /dev/null ++++ b/fs/ext4/richacl.c +@@ -0,0 +1,293 @@ ++/* ++ * Copyright IBM Corporation, 2010 ++ * Author Aneesh Kumar K.V ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of version 2.1 of the GNU Lesser General Public License ++ * as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it would be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++ * ++ */ ++ ++#include ++#include ++#include ++ ++#include "ext4.h" ++#include "ext4_jbd2.h" ++#include "xattr.h" ++#include "acl.h" ++#include "richacl.h" ++ ++static inline struct richacl * ++ext4_iget_richacl(struct inode *inode) ++{ ++ struct richacl *acl = EXT4_RICHACL_NOT_CACHED; ++ struct ext4_inode_info *ei = EXT4_I(inode); ++ ++ spin_lock(&inode->i_lock); ++ if (ei->i_richacl != EXT4_RICHACL_NOT_CACHED) ++ acl = richacl_get(ei->i_richacl); ++ spin_unlock(&inode->i_lock); ++ ++ return acl; ++} ++ ++static inline void ++ext4_iset_richacl(struct inode *inode, struct richacl *acl) ++{ ++ struct ext4_inode_info *ei = EXT4_I(inode); ++ ++ spin_lock(&inode->i_lock); ++ if (ei->i_richacl != EXT4_RICHACL_NOT_CACHED) ++ richacl_put(ei->i_richacl); ++ ei->i_richacl = richacl_get(acl); ++ spin_unlock(&inode->i_lock); ++} ++ ++static struct richacl * ++ext4_get_richacl(struct inode *inode) ++{ ++ const int name_index = EXT4_XATTR_INDEX_RICHACL; ++ void *value = NULL; ++ struct richacl *acl; ++ int retval; ++ ++ if (!IS_RICHACL(inode)) ++ return ERR_PTR(-EOPNOTSUPP); ++ acl = ext4_iget_richacl(inode); ++ if (acl != EXT4_RICHACL_NOT_CACHED) ++ return acl; ++ retval = ext4_xattr_get(inode, name_index, "", NULL, 0); ++ if (retval > 0) { ++ value = kmalloc(retval, GFP_KERNEL); ++ if (!value) ++ return ERR_PTR(-ENOMEM); ++ retval = ext4_xattr_get(inode, name_index, "", value, retval); ++ } ++ if (retval > 0) { ++ acl = richacl_from_xattr(value, retval); ++ if (acl == ERR_PTR(-EINVAL)) ++ acl = ERR_PTR(-EIO); ++ } else if (retval == -ENODATA || retval == -ENOSYS) ++ acl = NULL; ++ else ++ acl = ERR_PTR(retval); ++ kfree(value); ++ ++ if (!IS_ERR_OR_NULL(acl)) ++ ext4_iset_richacl(inode, acl); ++ ++ return acl; ++} ++ ++static int ++ext4_set_richacl(handle_t *handle, struct inode *inode, struct richacl *acl) ++{ ++ const int name_index = EXT4_XATTR_INDEX_RICHACL; ++ size_t size = 0; ++ void *value = NULL; ++ int retval; ++ ++ if (acl) { ++ mode_t mode = inode->i_mode; ++ if (richacl_equiv_mode(acl, &mode) == 0) { ++ inode->i_mode = mode; ++ ext4_mark_inode_dirty(handle, inode); ++ acl = NULL; ++ } ++ } ++ if (acl) { ++ size = richacl_xattr_size(acl); ++ value = kmalloc(size, GFP_KERNEL); ++ if (!value) ++ return -ENOMEM; ++ richacl_to_xattr(acl, value); ++ } ++ if (handle) ++ retval = ext4_xattr_set_handle(handle, inode, name_index, "", ++ value, size, 0); ++ else ++ retval = ext4_xattr_set(inode, name_index, "", value, size, 0); ++ kfree(value); ++ if (!retval) ++ ext4_iset_richacl(inode, acl); ++ ++ return retval; ++} ++ ++int ++ext4_richacl_permission(struct inode *inode, unsigned int mask) ++{ ++ struct richacl *acl; ++ int retval; ++ ++ if (!IS_RICHACL(inode)) ++ BUG(); ++ ++ acl = ext4_get_richacl(inode); ++ if (acl && IS_ERR(acl)) ++ retval = PTR_ERR(acl); ++ else { ++ retval = richacl_inode_permission(inode, acl, mask); ++ richacl_put(acl); ++ } ++ ++ return retval; ++} ++ ++int ext4_permission(struct inode *inode, int mask) ++{ ++ if (IS_RICHACL(inode)) ++ return ext4_richacl_permission(inode, ++ richacl_want_to_mask(mask)); ++ else ++ return generic_permission(inode, mask, ext4_check_acl); ++} ++ ++int ext4_may_create(struct inode *dir, int isdir) ++{ ++ return richacl_may_create(dir, isdir, ext4_richacl_permission); ++} ++ ++int ext4_may_delete(struct inode *dir, struct inode *inode, int replace) ++{ ++ return richacl_may_delete(dir, inode, replace, ext4_richacl_permission); ++} ++ ++int ++ext4_init_richacl(handle_t *handle, struct inode *inode, struct inode *dir) ++{ ++ struct richacl *dir_acl = NULL; ++ ++ if (!S_ISLNK(inode->i_mode)) { ++ dir_acl = ext4_get_richacl(dir); ++ if (IS_ERR(dir_acl)) ++ return PTR_ERR(dir_acl); ++ } ++ if (dir_acl) { ++ struct richacl *acl; ++ int retval; ++ ++ acl = richacl_inherit(dir_acl, inode); ++ richacl_put(dir_acl); ++ ++ retval = PTR_ERR(acl); ++ if (acl && !IS_ERR(acl)) { ++ retval = ext4_set_richacl(handle, inode, acl); ++ richacl_put(acl); ++ } ++ return retval; ++ } else { ++ inode->i_mode &= ~current_umask(); ++ return 0; ++ } ++} ++ ++int ++ext4_richacl_chmod(struct inode *inode) ++{ ++ struct richacl *acl; ++ int retval; ++ ++ if (S_ISLNK(inode->i_mode)) ++ return -EOPNOTSUPP; ++ acl = ext4_get_richacl(inode); ++ if (IS_ERR_OR_NULL(acl)) ++ return PTR_ERR(acl); ++ acl = richacl_chmod(acl, inode->i_mode); ++ if (IS_ERR(acl)) ++ return PTR_ERR(acl); ++ retval = ext4_set_richacl(NULL, inode, acl); ++ richacl_put(acl); ++ ++ return retval; ++} ++ ++static size_t ++ext4_xattr_list_richacl(struct dentry *dentry, char *list, size_t list_len, ++ const char *name, size_t name_len, int type) ++{ ++ const size_t size = sizeof(RICHACL_XATTR); ++ if (!IS_RICHACL(dentry->d_inode)) ++ return 0; ++ if (list && size <= list_len) ++ memcpy(list, RICHACL_XATTR, size); ++ return size; ++} ++ ++static int ++ext4_xattr_get_richacl(struct dentry *dentry, const char *name, void *buffer, ++ size_t buffer_size, int type) ++{ ++ struct richacl *acl; ++ size_t size; ++ ++ if (strcmp(name, "") != 0) ++ return -EINVAL; ++ acl = ext4_get_richacl(dentry->d_inode); ++ if (IS_ERR(acl)) ++ return PTR_ERR(acl); ++ if (acl == NULL) ++ return -ENODATA; ++ size = richacl_xattr_size(acl); ++ if (buffer) { ++ if (size > buffer_size) ++ return -ERANGE; ++ richacl_to_xattr(acl, buffer); ++ } ++ richacl_put(acl); ++ ++ return size; ++} ++ ++static int ++ext4_xattr_set_richacl(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags, int type) ++{ ++ handle_t *handle; ++ struct richacl *acl = NULL; ++ int retval, retries = 0; ++ struct inode *inode = dentry->d_inode; ++ ++ if (!IS_RICHACL(dentry->d_inode)) ++ return -EOPNOTSUPP; ++ if (S_ISLNK(inode->i_mode)) ++ return -EOPNOTSUPP; ++ if (strcmp(name, "") != 0) ++ return -EINVAL; ++ if (current_fsuid() != inode->i_uid && ++ ext4_richacl_permission(inode, ACE4_WRITE_ACL) && ++ !capable(CAP_FOWNER)) ++ return -EPERM; ++ if (value) { ++ acl = richacl_from_xattr(value, size); ++ if (IS_ERR(acl)) ++ return PTR_ERR(acl); ++ ++ inode->i_mode &= ~S_IRWXUGO; ++ inode->i_mode |= richacl_masks_to_mode(acl); ++ } ++ ++retry: ++ handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); ++ if (IS_ERR(handle)) ++ return PTR_ERR(handle); ++ ext4_mark_inode_dirty(handle, inode); ++ retval = ext4_set_richacl(handle, inode, acl); ++ ext4_journal_stop(handle); ++ if (retval == ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) ++ goto retry; ++ richacl_put(acl); ++ return retval; ++} ++ ++const struct xattr_handler ext4_richacl_xattr_handler = { ++ .prefix = RICHACL_XATTR, ++ .list = ext4_xattr_list_richacl, ++ .get = ext4_xattr_get_richacl, ++ .set = ext4_xattr_set_richacl, ++}; +--- /dev/null ++++ b/fs/ext4/richacl.h +@@ -0,0 +1,56 @@ ++/* ++ * Copyright IBM Corporation, 2010 ++ * Author Aneesh Kumar K.V ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of version 2.1 of the GNU Lesser General Public License ++ * as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it would be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++ * ++ */ ++ ++#ifndef __FS_EXT4_RICHACL_H ++#define __FS_EXT4_RICHACL_H ++ ++#include ++ ++#ifdef CONFIG_EXT4_FS_RICHACL ++ ++# define EXT4_IS_RICHACL(inode) IS_RICHACL(inode) ++ ++/* Value for i_richacl if RICHACL has not been cached */ ++# define EXT4_RICHACL_NOT_CACHED ((void *)-1) ++ ++extern int ext4_permission(struct inode *, int); ++extern int ext4_richacl_permission(struct inode *, unsigned int); ++extern int ext4_may_create(struct inode *, int); ++extern int ext4_may_delete(struct inode *, struct inode *, int); ++extern int ext4_init_richacl(handle_t *, struct inode *, struct inode *); ++extern int ext4_richacl_chmod(struct inode *); ++ ++#else /* CONFIG_FS_EXT4_RICHACL */ ++ ++# define EXT4_IS_RICHACL(inode) (0) ++ ++# define ext4_permission NULL ++# define ext4_may_create NULL ++# define ext4_may_delete NULL ++# define ext4_richacl_permission NULL ++ ++static inline int ++ext4_init_richacl(handle_t *handle, struct inode *inode, struct inode *dir) ++{ ++ return 0; ++} ++ ++static inline int ++ext4_richacl_chmod(struct inode *inode) ++{ ++ return 0; ++} ++ ++#endif /* CONFIG_FS_EXT4_RICHACL */ ++#endif /* __FS_EXT4_RICHACL_H */ +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -46,6 +46,7 @@ + #include "xattr.h" + #include "acl.h" + #include "mballoc.h" ++#include "richacl.h" + + #define CREATE_TRACE_POINTS + #include +@@ -795,7 +796,9 @@ static struct inode *ext4_alloc_inode(st + ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); + if (!ei) + return NULL; +- ++#ifdef CONFIG_EXT4_FS_RICHACL ++ ei->i_richacl = EXT4_RICHACL_NOT_CACHED; ++#endif + ei->vfs_inode.i_version = 1; + ei->vfs_inode.i_data.writeback_index = 0; + memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); +@@ -873,6 +876,13 @@ void ext4_clear_inode(struct inode *inod + invalidate_inode_buffers(inode); + end_writeback(inode); + dquot_drop(inode); ++#ifdef CONFIG_EXT4_FS_RICHACL ++ if (EXT4_I(inode)->i_richacl && ++ EXT4_I(inode)->i_richacl != EXT4_RICHACL_NOT_CACHED) { ++ richacl_put(EXT4_I(inode)->i_richacl); ++ EXT4_I(inode)->i_richacl = EXT4_RICHACL_NOT_CACHED; ++ } ++#endif + ext4_discard_preallocations(inode); + if (EXT4_I(inode)->jinode) { + jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), +@@ -973,10 +983,13 @@ static int ext4_show_options(struct seq_ + seq_puts(seq, ",nouser_xattr"); + } + #endif +-#ifdef CONFIG_EXT4_FS_POSIX_ACL +- if ((sb->s_flags & MS_POSIXACL) && !(def_mount_opts & EXT4_DEFM_ACL)) +- seq_puts(seq, ",acl"); +- if (!(sb->s_flags & MS_POSIXACL) && (def_mount_opts & EXT4_DEFM_ACL)) ++#if defined(CONFIG_EXT4_FS_POSIX_ACL) || defined(CONFIG_EXT4_FS_RICHACL) ++ if (sb->s_flags & MS_POSIXACL) { ++ if (!(def_mount_opts & EXT4_DEFM_ACL)) ++ seq_puts(seq, ",acl"); ++ } else if (sb->s_flags & MS_RICHACL) ++ seq_puts(seq, ",richacl"); ++ else if (def_mount_opts & EXT4_DEFM_ACL) + seq_puts(seq, ",noacl"); + #endif + if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { +@@ -1203,7 +1216,7 @@ enum { + Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, + Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, + Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov, +- Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, ++ Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_richacl, Opt_noacl, + Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh, + Opt_commit, Opt_min_batch_time, Opt_max_batch_time, + Opt_journal_update, Opt_journal_dev, +@@ -1241,6 +1254,7 @@ static const match_table_t tokens = { + {Opt_user_xattr, "user_xattr"}, + {Opt_nouser_xattr, "nouser_xattr"}, + {Opt_acl, "acl"}, ++ {Opt_richacl, "richacl"}, + {Opt_noacl, "noacl"}, + {Opt_noload, "noload"}, + {Opt_noload, "norecovery"}, +@@ -1479,17 +1493,26 @@ static int parse_options(char *options, + ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported"); + break; + #endif +-#ifdef CONFIG_EXT4_FS_POSIX_ACL ++#if defined(CONFIG_EXT4_FS_POSIX_ACL) || defined(CONFIG_EXT4_FS_RICHACL) ++# ifdef CONFIG_EXT4_FS_POSIX_ACL + case Opt_acl: +- sb->s_flags |= MS_POSIXACL; ++ if (!(sb->s_flags & MS_RICHACL)) ++ sb->s_flags |= MS_POSIXACL; + break; +- case Opt_noacl: ++# endif ++# ifdef CONFIG_EXT4_FS_RICHACL ++ case Opt_richacl: + sb->s_flags &= ~MS_POSIXACL; ++ sb->s_flags |= MS_RICHACL; ++ break; ++# endif ++ case Opt_noacl: ++ sb->s_flags &= ~(MS_POSIXACL | MS_RICHACL); + break; + #else + case Opt_acl: + case Opt_noacl: +- ext4_msg(sb, KERN_ERR, "(no)acl options not supported"); ++ ext4_msg(sb, KERN_ERR, "(no)acl/richacl options not supported"); + break; + #endif + case Opt_journal_update: +@@ -2642,7 +2665,7 @@ static int ext4_fill_super(struct super_ + if (def_mount_opts & EXT4_DEFM_XATTR_USER) + set_opt(sb, XATTR_USER); + #endif +-#ifdef CONFIG_EXT4_FS_POSIX_ACL ++#if defined(CONFIG_EXT4_FS_POSIX_ACL) + if (def_mount_opts & EXT4_DEFM_ACL) + sb->s_flags |= MS_POSIXACL; + #endif +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -107,6 +107,9 @@ static const struct xattr_handler *ext4_ + #ifdef CONFIG_EXT4_FS_SECURITY + [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, + #endif ++#ifdef CONFIG_EXT4_FS_RICHACL ++ [EXT4_XATTR_INDEX_RICHACL] = &ext4_richacl_xattr_handler, ++#endif + }; + + const struct xattr_handler *ext4_xattr_handlers[] = { +@@ -119,6 +122,9 @@ const struct xattr_handler *ext4_xattr_h + #ifdef CONFIG_EXT4_FS_SECURITY + &ext4_xattr_security_handler, + #endif ++#ifdef CONFIG_EXT4_FS_RICHACL ++ &ext4_richacl_xattr_handler, ++#endif + NULL + }; + +--- a/fs/ext4/xattr.h ++++ b/fs/ext4/xattr.h +@@ -21,6 +21,7 @@ + #define EXT4_XATTR_INDEX_TRUSTED 4 + #define EXT4_XATTR_INDEX_LUSTRE 5 + #define EXT4_XATTR_INDEX_SECURITY 6 ++#define EXT4_XATTR_INDEX_RICHACL 7 + + struct ext4_xattr_header { + __le32 h_magic; /* magic number for identification */ +@@ -70,6 +71,10 @@ extern const struct xattr_handler ext4_x + extern const struct xattr_handler ext4_xattr_acl_access_handler; + extern const struct xattr_handler ext4_xattr_acl_default_handler; + extern const struct xattr_handler ext4_xattr_security_handler; ++extern const struct xattr_handler ext4_xattr_acl_access_handler; ++extern const struct xattr_handler ext4_xattr_acl_default_handler; ++extern const struct xattr_handler ext4_xattr_security_handler; ++extern const struct xattr_handler ext4_richacl_xattr_handler; + + extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); + diff --git a/patches.suse/8250-sysrq-ctrl_o.patch b/patches.suse/8250-sysrq-ctrl_o.patch index f06115c..da7db84 100644 --- a/patches.suse/8250-sysrq-ctrl_o.patch +++ b/patches.suse/8250-sysrq-ctrl_o.patch @@ -24,7 +24,7 @@ Signed-off-by: Olaf Hering --- arch/powerpc/include/asm/serial.h | 6 ++++ arch/powerpc/kernel/legacy_serial.c | 52 ++++++++++++++++++++++++++++++++++++ - drivers/serial/8250.c | 6 ++++ + drivers/tty/serial/8250.c | 6 ++++ 3 files changed, 64 insertions(+) --- a/arch/powerpc/include/asm/serial.h @@ -44,7 +44,7 @@ Signed-off-by: Olaf Hering #else --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c -@@ -494,6 +494,55 @@ device_initcall(serial_dev_init); +@@ -495,6 +495,55 @@ device_initcall(serial_dev_init); #ifdef CONFIG_SERIAL_8250_CONSOLE @@ -100,7 +100,7 @@ Signed-off-by: Olaf Hering /* * This is called very early, as part of console_init() (typically just after * time_init()). This function is respondible for trying to find a good -@@ -562,6 +611,9 @@ static int __init check_legacy_serial_co +@@ -563,6 +612,9 @@ static int __init check_legacy_serial_co if (i >= legacy_serial_count) goto not_found; @@ -110,9 +110,9 @@ Signed-off-by: Olaf Hering of_node_put(prom_stdout); DBG("Found serial console at ttyS%d\n", offset); ---- a/drivers/serial/8250.c -+++ b/drivers/serial/8250.c -@@ -100,6 +100,8 @@ static unsigned int skip_txen_test; /* f +--- a/drivers/tty/serial/8250.c ++++ b/drivers/tty/serial/8250.c +@@ -102,6 +102,8 @@ static unsigned int skip_txen_test; /* f #define CONFIG_SERIAL_MANY_PORTS 1 #endif @@ -121,7 +121,7 @@ Signed-off-by: Olaf Hering /* * HUB6 is always on. This will be removed once the header * files have been cleaned. -@@ -1386,7 +1388,11 @@ receive_chars(struct uart_8250_port *up, +@@ -1423,7 +1425,11 @@ receive_chars(struct uart_8250_port *up, do { if (likely(lsr & UART_LSR_DR)) diff --git a/patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch b/patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch index 185a8ea..c9484a4 100644 --- a/patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch +++ b/patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch @@ -16,7 +16,7 @@ Patch-mainline: not yet Acked-by: Michal Marek --- - drivers/char/vt.c | 10 + drivers/tty/vt/vt.c | 10 drivers/video/bootsplash/bootsplash.c | 407 +++++++++++++++++++++--------- drivers/video/bootsplash/bootsplash.h | 23 - drivers/video/bootsplash/decode-jpg.c | 50 +++ @@ -27,9 +27,9 @@ Acked-by: Michal Marek include/linux/fb.h | 3 9 files changed, 652 insertions(+), 316 deletions(-) ---- a/drivers/char/vt.c -+++ b/drivers/char/vt.c -@@ -4104,7 +4104,7 @@ void vcs_scr_writew(struct vc_data *vc, +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -4216,7 +4216,7 @@ void vcs_scr_updated(struct vc_data *vc) #ifdef CONFIG_BOOTSPLASH void con_remap_def_color(struct vc_data *vc, int new_color) { @@ -38,7 +38,7 @@ Acked-by: Michal Marek unsigned c, len = vc->vc_screenbuf_size >> 1; int old_color; -@@ -4112,11 +4112,13 @@ void con_remap_def_color(struct vc_data +@@ -4224,11 +4224,13 @@ void con_remap_def_color(struct vc_data old_color = vc->vc_def_color << 8; new_color <<= 8; while(len--) { @@ -220,7 +220,7 @@ Acked-by: Michal Marek - return 0; + SPLASH_DEBUG(); + if (!oops_in_progress) -+ acquire_console_sem(); ++ console_lock(); - vc = vc_cons[0].d; + if (!splash_usesilent) @@ -261,7 +261,7 @@ Acked-by: Michal Marek + + done: + if (!oops_in_progress) -+ release_console_sem(); ++ console_unlock(); + + return ret; +} @@ -519,7 +519,7 @@ Acked-by: Michal Marek if (!strncmp(buffer, "redraw", 6)) { + SPLASH_DEBUG( " redraw"); splash_status(vc); - release_console_sem(); + console_unlock(); return count; @@ -851,6 +975,7 @@ static int splash_write_proc(struct file if (!strncmp(buffer, "show", 4) || !strncmp(buffer, "hide", 4)) { @@ -530,7 +530,7 @@ Acked-by: Michal Marek pe = 0; else if (buffer[4] == '\n') @@ -867,51 +992,77 @@ static int splash_write_proc(struct file - release_console_sem(); + console_unlock(); return count; } + @@ -546,7 +546,7 @@ Acked-by: Michal Marek + splash_status(vc); + } } - release_console_sem(); + console_unlock(); return count; } if (!strncmp(buffer,"freesilent\n",11)) { @@ -570,7 +570,7 @@ Acked-by: Michal Marek + } + vc->vc_splash_data->splash_dosilent = 0; } - release_console_sem(); + console_unlock(); return count; } - @@ -631,20 +631,20 @@ Acked-by: Michal Marek + } } - } -- release_console_sem(); +- console_unlock(); - return count; -+ release_console_sem(); ++ console_unlock(); + return count; } if (!vc->vc_splash_data) { - release_console_sem(); + console_unlock(); @@ -919,6 +1070,7 @@ static int splash_write_proc(struct file } if (buffer[0] == 't') { vc->vc_splash_data->splash_state ^= 1; + SPLASH_DEBUG(" t"); splash_status(vc); - release_console_sem(); + console_unlock(); return count; @@ -959,6 +1111,8 @@ static int splash_proc_unregister(void) # endif @@ -674,13 +674,13 @@ Acked-by: Michal Marek @@ -1004,7 +1161,9 @@ void splash_init(void) mem = vmalloc(len); if (mem) { - acquire_console_sem(); + console_lock(); - if ((int)sys_read(fd, mem, len) == len && splash_getraw((unsigned char *)mem, (unsigned char *)mem + len, (int *)0) == 0 && vc->vc_splash_data) + if ((int)sys_read(fd, mem, len) == len + && splash_getraw((unsigned char *)mem, (unsigned char *)mem + len, (int *)0) == INIT_CONSOLE + && vc->vc_splash_data) vc->vc_splash_data->splash_state = splash_default & 1; - release_console_sem(); + console_unlock(); vfree(mem); --- a/drivers/video/bootsplash/bootsplash.h +++ b/drivers/video/bootsplash/bootsplash.h @@ -1395,7 +1395,7 @@ Acked-by: Michal Marek - --- a/drivers/video/console/bitblit.c +++ b/drivers/video/console/bitblit.c -@@ -52,7 +52,7 @@ static void bit_bmove(struct vc_data *vc +@@ -53,7 +53,7 @@ static void bit_bmove(struct vc_data *vc #ifdef CONFIG_BOOTSPLASH if (info->splash_data) { @@ -1404,7 +1404,7 @@ Acked-by: Michal Marek sy, sx, dy, dx, height, width); return; } -@@ -75,8 +75,8 @@ static void bit_clear(struct vc_data *vc +@@ -76,8 +76,8 @@ static void bit_clear(struct vc_data *vc #ifdef CONFIG_BOOTSPLASH if (info->splash_data) { @@ -1415,7 +1415,7 @@ Acked-by: Michal Marek return; } #endif -@@ -179,7 +179,7 @@ static void bit_putcs(struct vc_data *vc +@@ -180,7 +180,7 @@ static void bit_putcs(struct vc_data *vc #ifdef CONFIG_BOOTSPLASH if (info->splash_data) { @@ -1424,7 +1424,7 @@ Acked-by: Michal Marek return; } #endif -@@ -239,7 +239,7 @@ static void bit_clear_margins(struct vc_ +@@ -240,7 +240,7 @@ static void bit_clear_margins(struct vc_ #ifdef CONFIG_BOOTSPLASH if (info->splash_data) { @@ -1433,7 +1433,7 @@ Acked-by: Michal Marek return; } #endif -@@ -412,7 +412,7 @@ static void bit_cursor(struct vc_data *v +@@ -413,7 +413,7 @@ static void bit_cursor(struct vc_data *v #ifdef CONFIG_BOOTSPLASH if (info->splash_data) { @@ -1444,7 +1444,7 @@ Acked-by: Michal Marek } --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c -@@ -2072,7 +2072,7 @@ static void fbcon_bmove_rec(struct vc_da +@@ -2074,7 +2074,7 @@ static void fbcon_bmove_rec(struct vc_da #ifdef CONFIG_BOOTSPLASH if (info->splash_data && sy == dy && height == 1) { /* must use slower redraw bmove to keep background pic intact */ @@ -1453,7 +1453,7 @@ Acked-by: Michal Marek return; } #endif -@@ -2323,7 +2323,7 @@ static void fbcon_generic_blank(struct v +@@ -2325,7 +2325,7 @@ static void fbcon_generic_blank(struct v #ifdef CONFIG_BOOTSPLASH if (info->splash_data) { @@ -1464,7 +1464,7 @@ Acked-by: Michal Marek #endif --- a/drivers/video/vesafb.c +++ b/drivers/video/vesafb.c -@@ -182,10 +182,7 @@ static void vesafb_destroy(struct fb_inf +@@ -181,10 +181,7 @@ static void vesafb_destroy(struct fb_inf framebuffer_release(info); } @@ -1476,7 +1476,7 @@ Acked-by: Michal Marek .owner = THIS_MODULE, .fb_destroy = vesafb_destroy, .fb_setcolreg = vesafb_setcolreg, -@@ -270,9 +267,6 @@ static int __devinit vesafb_probe(struct +@@ -269,9 +266,6 @@ static int __init vesafb_probe(struct pl * option to simply use size_total as that * wastes plenty of kernel address space. */ size_remap = size_vmode * 2; @@ -1488,7 +1488,7 @@ Acked-by: Michal Marek if (size_remap < size_vmode) --- a/include/linux/fb.h +++ b/include/linux/fb.h -@@ -863,8 +863,7 @@ struct fb_info { +@@ -879,8 +879,7 @@ struct fb_info { struct splash_data *splash_data; unsigned char *splash_pic; int splash_pic_size; diff --git a/patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch b/patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch index c5b97b8..8e16adf 100644 --- a/patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch +++ b/patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch @@ -14,7 +14,7 @@ Signed-off-by: Suresh Jayaraman --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -148,6 +148,7 @@ static char * const zone_names[MAX_NR_ZO +@@ -165,6 +165,7 @@ static char * const zone_names[MAX_NR_ZO "Movable", }; @@ -22,7 +22,7 @@ Signed-off-by: Suresh Jayaraman int min_free_kbytes = 1024; static unsigned long __meminitdata nr_kernel_pages; -@@ -4609,13 +4610,13 @@ static void setup_per_zone_lowmem_reserv +@@ -4845,13 +4846,13 @@ static void setup_per_zone_lowmem_reserv } /** @@ -38,7 +38,7 @@ Signed-off-by: Suresh Jayaraman { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; -@@ -4713,6 +4714,15 @@ static void __init setup_per_zone_inacti +@@ -4949,6 +4950,15 @@ static void __init setup_per_zone_inacti calculate_zone_inactive_ratio(zone); } @@ -54,7 +54,7 @@ Signed-off-by: Suresh Jayaraman /* * Initialise min_free_kbytes. * -@@ -4748,7 +4758,7 @@ static int __init init_per_zone_wmark_mi +@@ -4984,7 +4994,7 @@ static int __init init_per_zone_wmark_mi min_free_kbytes = 128; if (min_free_kbytes > 65536) min_free_kbytes = 65536; diff --git a/patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch b/patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch index 812df3b..c7982ec 100644 --- a/patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch +++ b/patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch @@ -38,7 +38,7 @@ Signed-off-by: Suresh Jayaraman MMINIT_WARNING, --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1261,19 +1261,6 @@ failed: +@@ -1345,19 +1345,6 @@ failed: return NULL; } @@ -58,13 +58,13 @@ Signed-off-by: Suresh Jayaraman #ifdef CONFIG_FAIL_PAGE_ALLOC static struct fail_page_alloc_attr { -@@ -1768,8 +1755,7 @@ void wake_all_kswapd(unsigned int order, - wakeup_kswapd(zone, order); +@@ -1911,8 +1898,7 @@ void wake_all_kswapd(unsigned int order, + wakeup_kswapd(zone, order, classzone_idx); } -static inline int -gfp_to_alloc_flags(gfp_t gfp_mask) +int gfp_to_alloc_flags(gfp_t gfp_mask) { - struct task_struct *p = current; int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; + const gfp_t wait = gfp_mask & __GFP_WAIT; diff --git a/patches.suse/SoN-04-page_alloc-reserve.patch b/patches.suse/SoN-04-page_alloc-reserve.patch index efd1381..c780d6d 100644 --- a/patches.suse/SoN-04-page_alloc-reserve.patch +++ b/patches.suse/SoN-04-page_alloc-reserve.patch @@ -29,7 +29,7 @@ Signed-off-by: Suresh Jayaraman * protected by zone->lru_lock ! --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1572,8 +1572,10 @@ zonelist_scan: +@@ -1656,8 +1656,10 @@ zonelist_scan: try_this_zone: page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask, migratetype); diff --git a/patches.suse/SoN-05-reserve-slub.patch b/patches.suse/SoN-05-reserve-slub.patch index fe30a17..fd47f19 100644 --- a/patches.suse/SoN-05-reserve-slub.patch +++ b/patches.suse/SoN-05-reserve-slub.patch @@ -20,12 +20,14 @@ Signed-off-by: Suresh Jayaraman include/linux/slub_def.h | 1 mm/slab.c | 61 ++++++++++++++++++++++++++++++++++++++++------- mm/slob.c | 16 +++++++++++- - mm/slub.c | 43 +++++++++++++++++++++++++++------ - 4 files changed, 104 insertions(+), 17 deletions(-) + mm/slub.c | 41 ++++++++++++++++++++++++++----- + 4 files changed, 103 insertions(+), 16 deletions(-) ---- a/include/linux/slub_def.h -+++ b/include/linux/slub_def.h -@@ -38,6 +38,7 @@ struct kmem_cache_cpu { +Index: linux-2.6.35-master/include/linux/slub_def.h +=================================================================== +--- linux-2.6.35-master.orig/include/linux/slub_def.h ++++ linux-2.6.35-master/include/linux/slub_def.h +@@ -39,6 +39,7 @@ struct kmem_cache_cpu { void **freelist; /* Pointer to first free per cpu object */ struct page *page; /* The slab from which we are allocating */ int node; /* The node of the page (or -1 for debug) */ @@ -33,8 +35,10 @@ Signed-off-by: Suresh Jayaraman #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif ---- a/mm/slab.c -+++ b/mm/slab.c +Index: linux-2.6.35-master/mm/slab.c +=================================================================== +--- linux-2.6.35-master.orig/mm/slab.c ++++ linux-2.6.35-master/mm/slab.c @@ -120,6 +120,8 @@ #include #include @@ -44,7 +48,7 @@ Signed-off-by: Suresh Jayaraman /* * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). -@@ -268,7 +270,8 @@ struct array_cache { +@@ -244,7 +246,8 @@ struct array_cache { unsigned int avail; unsigned int limit; unsigned int batchcount; @@ -54,7 +58,7 @@ Signed-off-by: Suresh Jayaraman spinlock_t lock; void *entry[]; /* * Must have this definition in here for the proper -@@ -704,6 +707,27 @@ static inline struct array_cache *cpu_ca +@@ -680,6 +683,27 @@ static inline struct array_cache *cpu_ca return cachep->array[smp_processor_id()]; } @@ -82,7 +86,7 @@ Signed-off-by: Suresh Jayaraman static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) { -@@ -910,6 +934,7 @@ static struct array_cache *alloc_arrayca +@@ -886,6 +910,7 @@ static struct array_cache *alloc_arrayca nc->limit = entries; nc->batchcount = batchcount; nc->touched = 0; @@ -90,7 +94,7 @@ Signed-off-by: Suresh Jayaraman spin_lock_init(&nc->lock); } return nc; -@@ -1606,7 +1631,8 @@ __initcall(cpucache_init); +@@ -1674,7 +1699,8 @@ __initcall(cpucache_init); * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ @@ -100,7 +104,7 @@ Signed-off-by: Suresh Jayaraman { struct page *page; int nr_pages; -@@ -1628,6 +1654,7 @@ static void *kmem_getpages(struct kmem_c +@@ -1696,6 +1722,7 @@ static void *kmem_getpages(struct kmem_c if (!page) return NULL; @@ -108,7 +112,7 @@ Signed-off-by: Suresh Jayaraman nr_pages = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) add_zone_page_state(page_zone(page), -@@ -2060,6 +2087,7 @@ static int __init_refok setup_cpu_cache( +@@ -2128,6 +2155,7 @@ static int __init_refok setup_cpu_cache( cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; cpu_cache_get(cachep)->batchcount = 1; cpu_cache_get(cachep)->touched = 0; @@ -116,7 +120,7 @@ Signed-off-by: Suresh Jayaraman cachep->batchcount = 1; cachep->limit = BOOT_CPUCACHE_ENTRIES; return 0; -@@ -2745,6 +2773,7 @@ static int cache_grow(struct kmem_cache +@@ -2813,6 +2841,7 @@ static int cache_grow(struct kmem_cache size_t offset; gfp_t local_flags; struct kmem_list3 *l3; @@ -124,7 +128,7 @@ Signed-off-by: Suresh Jayaraman /* * Be lazy and only check for valid flags here, keeping it out of the -@@ -2783,7 +2812,7 @@ static int cache_grow(struct kmem_cache +@@ -2851,7 +2880,7 @@ static int cache_grow(struct kmem_cache * 'nodeid'. */ if (!objp) @@ -133,7 +137,7 @@ Signed-off-by: Suresh Jayaraman if (!objp) goto failed; -@@ -2800,6 +2829,7 @@ static int cache_grow(struct kmem_cache +@@ -2868,6 +2897,7 @@ static int cache_grow(struct kmem_cache if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); @@ -141,7 +145,7 @@ Signed-off-by: Suresh Jayaraman spin_lock(&l3->list_lock); /* Make slab active. */ -@@ -2934,7 +2964,8 @@ bad: +@@ -3002,7 +3032,8 @@ bad: #define check_slabp(x,y) do { } while(0) #endif @@ -151,16 +155,16 @@ Signed-off-by: Suresh Jayaraman { int batchcount; struct kmem_list3 *l3; -@@ -2944,6 +2975,8 @@ static void *cache_alloc_refill(struct k +@@ -3012,6 +3043,8 @@ static void *cache_alloc_refill(struct k retry: check_irq_off(); - node = numa_node_id(); + node = numa_mem_id(); + if (unlikely(must_refill)) + goto force_grow; ac = cpu_cache_get(cachep); batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { -@@ -3013,11 +3046,14 @@ alloc_done: +@@ -3081,11 +3114,14 @@ alloc_done: if (unlikely(!ac->avail)) { int x; @@ -176,7 +180,7 @@ Signed-off-by: Suresh Jayaraman return NULL; if (!ac->avail) /* objects refilled by interrupt? */ -@@ -3107,17 +3143,18 @@ static inline void *____cache_alloc(stru +@@ -3175,17 +3211,18 @@ static inline void *____cache_alloc(stru { void *objp; struct array_cache *ac; @@ -197,7 +201,7 @@ Signed-off-by: Suresh Jayaraman /* * the 'ac' may be updated by cache_alloc_refill(), * and kmemleak_erase() requires its correct value. -@@ -3173,7 +3210,7 @@ static void *fallback_alloc(struct kmem_ +@@ -3243,7 +3280,7 @@ static void *fallback_alloc(struct kmem_ struct zone *zone; enum zone_type high_zoneidx = gfp_zone(flags); void *obj = NULL; @@ -206,12 +210,12 @@ Signed-off-by: Suresh Jayaraman if (flags & __GFP_THISNODE) return NULL; -@@ -3209,10 +3246,12 @@ retry: +@@ -3280,10 +3317,12 @@ retry: if (local_flags & __GFP_WAIT) local_irq_enable(); kmem_flagcheck(cache, flags); -- obj = kmem_getpages(cache, local_flags, numa_node_id()); -+ obj = kmem_getpages(cache, local_flags, numa_node_id(), +- obj = kmem_getpages(cache, local_flags, numa_mem_id()); ++ obj = kmem_getpages(cache, local_flags, numa_mem_id(), + &reserve); if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -220,7 +224,7 @@ Signed-off-by: Suresh Jayaraman /* * Insert into the appropriate per node queues */ -@@ -3251,6 +3290,9 @@ static void *____cache_alloc_node(struct +@@ -3323,6 +3362,9 @@ static void *____cache_alloc_node(struct l3 = cachep->nodelists[nodeid]; BUG_ON(!l3); @@ -230,7 +234,7 @@ Signed-off-by: Suresh Jayaraman retry: check_irq_off(); spin_lock(&l3->list_lock); -@@ -3288,6 +3330,7 @@ retry: +@@ -3360,6 +3402,7 @@ retry: must_grow: spin_unlock(&l3->list_lock); @@ -238,17 +242,19 @@ Signed-off-by: Suresh Jayaraman x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); if (x) goto retry; ---- a/mm/slob.c -+++ b/mm/slob.c -@@ -69,6 +69,7 @@ - #include - #include +Index: linux-2.6.35-master/mm/slob.c +=================================================================== +--- linux-2.6.35-master.orig/mm/slob.c ++++ linux-2.6.35-master/mm/slob.c +@@ -71,6 +71,7 @@ + #include + #include +#include "internal.h" /* * slob_block has a field 'units', which indicates size of block if +ve, -@@ -191,6 +192,11 @@ struct slob_rcu { +@@ -193,6 +194,11 @@ struct slob_rcu { static DEFINE_SPINLOCK(slob_lock); /* @@ -260,7 +266,7 @@ Signed-off-by: Suresh Jayaraman * Encode the given size and next info into a free slob block s. */ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) -@@ -240,7 +246,7 @@ static int slob_last(slob_t *s) +@@ -242,7 +248,7 @@ static int slob_last(slob_t *s) static void *slob_new_pages(gfp_t gfp, int order, int node) { @@ -269,7 +275,7 @@ Signed-off-by: Suresh Jayaraman #ifdef CONFIG_NUMA if (node != -1) -@@ -252,6 +258,8 @@ static void *slob_new_pages(gfp_t gfp, i +@@ -254,6 +260,8 @@ static void *slob_new_pages(gfp_t gfp, i if (!page) return NULL; @@ -278,7 +284,7 @@ Signed-off-by: Suresh Jayaraman return page_address(page); } -@@ -324,6 +332,11 @@ static void *slob_alloc(size_t size, gfp +@@ -326,6 +334,11 @@ static void *slob_alloc(size_t size, gfp slob_t *b = NULL; unsigned long flags; @@ -290,7 +296,7 @@ Signed-off-by: Suresh Jayaraman if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) -@@ -362,6 +375,7 @@ static void *slob_alloc(size_t size, gfp +@@ -364,6 +377,7 @@ static void *slob_alloc(size_t size, gfp } spin_unlock_irqrestore(&slob_lock, flags); @@ -298,18 +304,20 @@ Signed-off-by: Suresh Jayaraman /* Not enough space: must allocate a new page */ if (!b) { b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -28,6 +28,8 @@ +Index: linux-2.6.35-master/mm/slub.c +=================================================================== +--- linux-2.6.35-master.orig/mm/slub.c ++++ linux-2.6.35-master/mm/slub.c +@@ -27,6 +27,8 @@ #include #include #include +#include "internal.h" + - /* - * Lock order: -@@ -1148,7 +1150,8 @@ static void setup_object(struct kmem_cac + #include + +@@ -1145,7 +1147,8 @@ static void setup_object(struct kmem_cac s->ctor(object); } @@ -319,7 +327,7 @@ Signed-off-by: Suresh Jayaraman { struct page *page; void *start; -@@ -1162,6 +1165,8 @@ static struct page *new_slab(struct kmem +@@ -1159,6 +1162,8 @@ static struct page *new_slab(struct kmem if (!page) goto out; @@ -328,7 +336,7 @@ Signed-off-by: Suresh Jayaraman inc_slabs_node(s, page_to_nid(page), page->objects); page->slab = s; page->flags |= 1 << PG_slab; -@@ -1611,10 +1616,20 @@ static void *__slab_alloc(struct kmem_ca +@@ -1607,10 +1612,20 @@ static void *__slab_alloc(struct kmem_ca { void **object; struct page *new; @@ -349,22 +357,21 @@ Signed-off-by: Suresh Jayaraman if (!c->page) goto new_slab; -@@ -1628,8 +1643,8 @@ load_freelist: +@@ -1624,7 +1639,7 @@ load_freelist: object = c->page->freelist; if (unlikely(!object)) goto another_slab; -- if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) -- goto debug; -+ if (unlikely(PageSlubDebug(c->page) || c->reserve)) -+ goto slow_path; +- if (kmem_cache_debug(s)) ++ if (unlikely(kmem_cache_debug(s) || c->reserve)) + goto debug; c->freelist = get_freepointer(s, object); - c->page->inuse = c->page->objects; -@@ -1651,16 +1666,18 @@ new_slab: +@@ -1647,17 +1662,19 @@ new_slab: goto load_freelist; } +grow_slab: + gfpflags &= gfp_allowed_mask; if (gfpflags & __GFP_WAIT) local_irq_enable(); @@ -380,15 +387,14 @@ Signed-off-by: Suresh Jayaraman stat(s, ALLOC_SLAB); if (c->page) flush_slab(s, c); -@@ -1672,10 +1689,21 @@ new_slab: +@@ -1668,10 +1685,21 @@ new_slab: if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); return NULL; --debug: -- if (!alloc_debug_processing(s, c->page, object, addr)) + -+slow_path: -+ if (PageSlubDebug(c->page) && + debug: +- if (!alloc_debug_processing(s, c->page, object, addr)) ++ if (kmem_cache_debug(s) && + !alloc_debug_processing(s, c->page, object, addr)) goto another_slab; @@ -403,17 +409,17 @@ Signed-off-by: Suresh Jayaraman + c->page->inuse++; c->page->freelist = get_freepointer(s, object); - c->node = -1; -@@ -2100,10 +2128,11 @@ static void early_kmem_cache_node_alloc( + c->node = NUMA_NO_NODE; +@@ -2096,10 +2124,11 @@ static void early_kmem_cache_node_alloc( struct page *page; struct kmem_cache_node *n; unsigned long flags; + int reserve; - BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); + BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); -- page = new_slab(kmalloc_caches, gfpflags, node); -+ page = new_slab(kmalloc_caches, gfpflags, node, &reserve); +- page = new_slab(kmem_cache_node, GFP_NOWAIT, node); ++ page = new_slab(kmem_cache_node, GFP_NOWAIT, node, &reserve); BUG_ON(!page); if (page_to_nid(page) != node) { diff --git a/patches.suse/SoN-06-mm-kmem_estimate_pages.patch b/patches.suse/SoN-06-mm-kmem_estimate_pages.patch index 806175e..7cd202c 100644 --- a/patches.suse/SoN-06-mm-kmem_estimate_pages.patch +++ b/patches.suse/SoN-06-mm-kmem_estimate_pages.patch @@ -2,6 +2,9 @@ From: Peter Zijlstra Subject: [PATCH 06/31] mm: kmem_alloc_estimate() Patch-mainline: not yet +Feb 8 2011: Refreshed patch to accomodate an upstream change - commit 55136592 +removed dynamic dma slab allocation. + Provide a method to get the upper bound on the pages needed to allocate a given number of objects from a given kmem_cache. @@ -18,18 +21,20 @@ Signed-off-by: Suresh Jayaraman mm/slub.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 233 insertions(+) ---- a/include/linux/slab.h -+++ b/include/linux/slab.h -@@ -107,6 +107,8 @@ void kmem_cache_free(struct kmem_cache * +Index: linux-2.6.37-master/include/linux/slab.h +=================================================================== +--- linux-2.6.37-master.orig/include/linux/slab.h ++++ linux-2.6.37-master/include/linux/slab.h +@@ -106,6 +106,8 @@ int kmem_cache_shrink(struct kmem_cache + void kmem_cache_free(struct kmem_cache *, void *); + unsigned int kmem_cache_size(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *); - int kern_ptr_validate(const void *ptr, unsigned long size); - int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); +unsigned kmem_alloc_estimate(struct kmem_cache *cachep, + gfp_t flags, int objects); /* * Please use this macro to create slab caches. Simply specify the -@@ -143,6 +145,8 @@ void * __must_check krealloc(const void +@@ -142,6 +144,8 @@ void * __must_check krealloc(const void void kfree(const void *); void kzfree(const void *); size_t ksize(const void *); @@ -38,9 +43,11 @@ Signed-off-by: Suresh Jayaraman /* * Allocator specific definitions. These are mainly used to establish optimized ---- a/mm/slab.c -+++ b/mm/slab.c -@@ -3850,6 +3850,81 @@ const char *kmem_cache_name(struct kmem_ +Index: linux-2.6.37-master/mm/slab.c +=================================================================== +--- linux-2.6.37-master.orig/mm/slab.c ++++ linux-2.6.37-master/mm/slab.c +@@ -3890,6 +3890,81 @@ const char *kmem_cache_name(struct kmem_ EXPORT_SYMBOL_GPL(kmem_cache_name); /* @@ -122,9 +129,11 @@ Signed-off-by: Suresh Jayaraman * This initializes kmem_list3 or resizes various caches for all nodes. */ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) ---- a/mm/slob.c -+++ b/mm/slob.c -@@ -702,6 +702,73 @@ int slab_is_available(void) +Index: linux-2.6.37-master/mm/slob.c +=================================================================== +--- linux-2.6.37-master.orig/mm/slob.c ++++ linux-2.6.37-master/mm/slob.c +@@ -699,6 +699,73 @@ int slab_is_available(void) return slob_ready; } @@ -198,9 +207,11 @@ Signed-off-by: Suresh Jayaraman void __init kmem_cache_init(void) { slob_ready = 1; ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -2449,6 +2449,42 @@ const char *kmem_cache_name(struct kmem_ +Index: linux-2.6.37-master/mm/slub.c +=================================================================== +--- linux-2.6.37-master.orig/mm/slub.c ++++ linux-2.6.37-master/mm/slub.c +@@ -2434,6 +2434,42 @@ const char *kmem_cache_name(struct kmem_ } EXPORT_SYMBOL(kmem_cache_name); @@ -243,7 +254,7 @@ Signed-off-by: Suresh Jayaraman static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { -@@ -2879,6 +2915,57 @@ void kfree(const void *x) +@@ -2783,6 +2819,57 @@ void kfree(const void *x) EXPORT_SYMBOL(kfree); /* @@ -284,10 +295,10 @@ Signed-off-by: Suresh Jayaraman + +#ifdef CONFIG_ZONE_DMA + if (unlikely(flags & SLUB_DMA)) -+ s = dma_kmalloc_cache(i, flags); ++ s = kmalloc_dma_caches[i]; + else +#endif -+ s = &kmalloc_caches[i]; ++ s = kmalloc_caches[i]; + + if (s) + pages += kmem_alloc_estimate(s, flags, 0); diff --git a/patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch b/patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch index 927a060..7e0dfe7 100644 --- a/patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch +++ b/patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch @@ -32,7 +32,7 @@ Signed-off-by: Suresh Jayaraman --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1772,6 +1772,13 @@ static inline void rcu_copy_process(stru +@@ -1761,6 +1761,13 @@ static inline void rcu_copy_process(stru #endif @@ -60,21 +60,21 @@ Signed-off-by: Suresh Jayaraman @@ -246,6 +248,7 @@ restart: account_system_vtime(current); - _local_bh_enable(); + __local_bh_enable(SOFTIRQ_OFFSET); + tsk_restore_flags(current, pflags, PF_MEMALLOC); } #ifndef __ARCH_HAS_DO_SOFTIRQ --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1785,9 +1785,10 @@ int gfp_to_alloc_flags(gfp_t gfp_mask) +@@ -1928,9 +1928,10 @@ int gfp_to_alloc_flags(gfp_t gfp_mask) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { - if (!in_interrupt() && -- ((p->flags & PF_MEMALLOC) || +- ((current->flags & PF_MEMALLOC) || - unlikely(test_thread_flag(TIF_MEMDIE)))) -+ if (!in_irq() && (p->flags & PF_MEMALLOC)) ++ if (!in_irq() && (current->flags & PF_MEMALLOC)) + alloc_flags |= ALLOC_NO_WATERMARKS; + else if (!in_interrupt() && + unlikely(test_thread_flag(TIF_MEMDIE))) diff --git a/patches.suse/SoN-08-mm-page_alloc-emerg.patch b/patches.suse/SoN-08-mm-page_alloc-emerg.patch index e9e8882..08e34a0 100644 --- a/patches.suse/SoN-08-mm-page_alloc-emerg.patch +++ b/patches.suse/SoN-08-mm-page_alloc-emerg.patch @@ -12,13 +12,13 @@ Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- include/linux/mmzone.h | 3 + - mm/page_alloc.c | 84 +++++++++++++++++++++++++++++++++++++++++++------ + mm/page_alloc.c | 85 +++++++++++++++++++++++++++++++++++++++++++------ mm/vmstat.c | 6 +-- - 3 files changed, 80 insertions(+), 13 deletions(-) + 3 files changed, 81 insertions(+), 13 deletions(-) --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h -@@ -279,6 +279,7 @@ struct zone_reclaim_stat { +@@ -282,6 +282,7 @@ struct zone_reclaim_stat { struct zone { /* Fields commonly accessed by the page allocator */ @@ -26,7 +26,7 @@ Signed-off-by: Suresh Jayaraman /* zone watermarks, access with *_wmark_pages(zone) macros */ unsigned long watermark[NR_WMARK]; -@@ -756,6 +757,8 @@ int sysctl_min_unmapped_ratio_sysctl_han +@@ -776,6 +777,8 @@ int sysctl_min_unmapped_ratio_sysctl_han int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); @@ -37,7 +37,7 @@ Signed-off-by: Suresh Jayaraman extern char numa_zonelist_order[]; --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -150,6 +150,8 @@ static char * const zone_names[MAX_NR_ZO +@@ -173,6 +173,8 @@ static char * const zone_names[MAX_NR_ZO static DEFINE_SPINLOCK(min_free_lock); int min_free_kbytes = 1024; @@ -46,16 +46,16 @@ Signed-off-by: Suresh Jayaraman static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; -@@ -1373,7 +1375,7 @@ int zone_watermark_ok(struct zone *z, in +@@ -1457,7 +1459,7 @@ static bool __zone_watermark_ok(struct z if (alloc_flags & ALLOC_HARDER) min -= min / 4; - if (free_pages <= min + z->lowmem_reserve[classzone_idx]) + if (free_pages <= min+z->lowmem_reserve[classzone_idx]+z->pages_emerg) - return 0; + return false; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ -@@ -1803,7 +1805,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, u +@@ -1985,7 +1987,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, u { const gfp_t wait = gfp_mask & __GFP_WAIT; struct page *page = NULL; @@ -63,19 +63,20 @@ Signed-off-by: Suresh Jayaraman + int alloc_flags = 0; unsigned long pages_reclaimed = 0; unsigned long did_some_progress; - struct task_struct *p = current; -@@ -1924,8 +1926,8 @@ nopage: + bool sync_migration = false; +@@ -2144,8 +2146,9 @@ nopage: printk(KERN_INFO "perfectly reliable and the kernel is designed to handle that.\n"); } printk(KERN_INFO "%s: page allocation failure." - " order:%d, mode:0x%x\n", -- p->comm, order, gfp_mask); +- current->comm, order, gfp_mask); + " order:%d, mode:0x%x, alloc_flags:0x%x pflags:0x%x\n", -+ p->comm, order, gfp_mask, alloc_flags, p->flags); ++ current->comm, order, gfp_mask, alloc_flags, ++ current->flags); dump_stack(); show_mem(); } -@@ -2256,9 +2258,9 @@ void show_free_areas(void) +@@ -2480,9 +2483,9 @@ void show_free_areas(void) "\n", zone->name, K(zone_page_state(zone, NR_FREE_PAGES)), @@ -88,7 +89,7 @@ Signed-off-by: Suresh Jayaraman K(zone_page_state(zone, NR_ACTIVE_ANON)), K(zone_page_state(zone, NR_INACTIVE_ANON)), K(zone_page_state(zone, NR_ACTIVE_FILE)), -@@ -4549,7 +4551,7 @@ static void calculate_totalreserve_pages +@@ -4863,7 +4866,7 @@ static void calculate_totalreserve_pages } /* we treat the high watermark as reserved pages. */ @@ -97,7 +98,7 @@ Signed-off-by: Suresh Jayaraman if (max > zone->present_pages) max = zone->present_pages; -@@ -4607,7 +4609,8 @@ static void setup_per_zone_lowmem_reserv +@@ -4921,7 +4924,8 @@ static void setup_per_zone_lowmem_reserv */ static void __setup_per_zone_wmarks(void) { @@ -107,7 +108,7 @@ Signed-off-by: Suresh Jayaraman unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; -@@ -4619,11 +4622,13 @@ static void __setup_per_zone_wmarks(void +@@ -4933,11 +4937,13 @@ static void __setup_per_zone_wmarks(void } for_each_zone(zone) { @@ -122,7 +123,7 @@ Signed-off-by: Suresh Jayaraman if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't -@@ -4642,12 +4647,14 @@ static void __setup_per_zone_wmarks(void +@@ -4956,12 +4962,14 @@ static void __setup_per_zone_wmarks(void if (min_pages > 128) min_pages = 128; zone->watermark[WMARK_MIN] = min_pages; @@ -137,7 +138,7 @@ Signed-off-by: Suresh Jayaraman } zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); -@@ -4712,6 +4719,63 @@ void setup_per_zone_wmarks(void) +@@ -5026,6 +5034,63 @@ void setup_per_zone_wmarks(void) spin_unlock_irqrestore(&min_free_lock, flags); } @@ -203,7 +204,7 @@ Signed-off-by: Suresh Jayaraman * --- a/mm/vmstat.c +++ b/mm/vmstat.c -@@ -721,9 +721,9 @@ static void zoneinfo_show_print(struct s +@@ -957,9 +957,9 @@ static void zoneinfo_show_print(struct s "\n spanned %lu" "\n present %lu", zone_page_state(zone, NR_FREE_PAGES), diff --git a/patches.suse/SoN-08a-mm-page_alloc-emerg.patch b/patches.suse/SoN-08a-mm-page_alloc-emerg.patch new file mode 100644 index 0000000..96cfc1b --- /dev/null +++ b/patches.suse/SoN-08a-mm-page_alloc-emerg.patch @@ -0,0 +1,30 @@ +From 4d2cfa9116b4651cf959e5a02feac0334590dbd9 Mon Sep 17 00:00:00 2001 +From: Mel Gorman +Date: Wed, 9 Mar 2011 19:23:55 +0000 +Subject: [PATCH] collapse: mm: Report the low watermark correctly. +Patch-mainline: Not yet +References: bnc#678497 + +Report the correct low watermark plus the emergency pool offset +properly. Currently it is printing out an offset from the min watermark. + +Signed-off-by: Mel Gorman +Reviewed-by: NeilBrown +Signed-off-by: Suresh Jayaraman +--- + mm/vmstat.c | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) + +Index: linux-2.6.37-openSUSE-11.4/mm/vmstat.c +=================================================================== +--- linux-2.6.37-openSUSE-11.4.orig/mm/vmstat.c ++++ linux-2.6.37-openSUSE-11.4/mm/vmstat.c +@@ -880,7 +880,7 @@ static void zoneinfo_show_print(struct s + "\n present %lu", + zone_page_state(zone, NR_FREE_PAGES), + zone->pages_emerg + min_wmark_pages(zone), +- zone->pages_emerg + min_wmark_pages(zone), ++ zone->pages_emerg + low_wmark_pages(zone), + zone->pages_emerg + high_wmark_pages(zone), + zone->pages_scanned, + zone->spanned_pages, diff --git a/patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch b/patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch index 9c1c5d6..f5530f3 100644 --- a/patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch +++ b/patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch @@ -22,7 +22,7 @@ Signed-off-by: Suresh Jayaraman --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1852,6 +1852,11 @@ restart: +@@ -1995,6 +1995,11 @@ restart: rebalance: /* Allocate without watermarks if the context allows */ if (alloc_flags & ALLOC_NO_WATERMARKS) { diff --git a/patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch b/patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch index 995aba9..074af89 100644 --- a/patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch +++ b/patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch @@ -11,21 +11,29 @@ flags as opposed to task related flags, such as sk->sk_allocation. Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- - include/linux/gfp.h | 3 ++- + include/linux/gfp.h | 4 +++- mm/page_alloc.c | 4 +++- - 2 files changed, 5 insertions(+), 2 deletions(-) + 2 files changed, 6 insertions(+), 2 deletions(-) --- a/include/linux/gfp.h +++ b/include/linux/gfp.h -@@ -47,6 +47,7 @@ struct vm_area_struct; - #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ - #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ - #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ -+#define __GFP_MEMALLOC ((__force gfp_t)0x2000u)/* Use emergency reserves */ - #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ - #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ - #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ -@@ -98,7 +99,7 @@ struct vm_area_struct; +@@ -23,6 +23,7 @@ struct vm_area_struct; + #define ___GFP_REPEAT 0x400u + #define ___GFP_NOFAIL 0x800u + #define ___GFP_NORETRY 0x1000u ++#define ___GFP_MEMALLOC 0x2000u + #define ___GFP_COMP 0x4000u + #define ___GFP_ZERO 0x8000u + #define ___GFP_NOMEMALLOC 0x10000u +@@ -73,6 +74,7 @@ struct vm_area_struct; + #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ + #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ + #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ ++#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Use emergency reserves */ + #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ + #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ + #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ +@@ -119,7 +121,7 @@ struct vm_area_struct; /* Control page allocator reclaim behavior */ #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ @@ -33,17 +41,17 @@ Signed-off-by: Suresh Jayaraman + __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) /* Control slab gfp mask during early boot */ - #define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) + #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1787,7 +1787,9 @@ int gfp_to_alloc_flags(gfp_t gfp_mask) +@@ -1944,7 +1944,9 @@ int gfp_to_alloc_flags(gfp_t gfp_mask) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { -- if (!in_irq() && (p->flags & PF_MEMALLOC)) +- if (!in_irq() && (current->flags & PF_MEMALLOC)) + if (gfp_mask & __GFP_MEMALLOC) + alloc_flags |= ALLOC_NO_WATERMARKS; -+ else if (!in_irq() && (p->flags & PF_MEMALLOC)) ++ else if (!in_irq() && (current->flags & PF_MEMALLOC)) alloc_flags |= ALLOC_NO_WATERMARKS; else if (!in_interrupt() && unlikely(test_thread_flag(TIF_MEMDIE))) diff --git a/patches.suse/SoN-11-mm-reserve.patch b/patches.suse/SoN-11-mm-reserve.patch index fb7bcd5..ac7e64d 100644 --- a/patches.suse/SoN-11-mm-reserve.patch +++ b/patches.suse/SoN-11-mm-reserve.patch @@ -13,15 +13,14 @@ It should also allow for a Banker's algorithm replacement of __GFP_NOFAIL. Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- - include/linux/reserve.h | 198 ++++++++++++++ + include/linux/reserve.h | 197 ++++++++++++++ mm/Makefile | 2 mm/reserve.c | 637 ++++++++++++++++++++++++++++++++++++++++++++++++ - mm/slub.c | 2 - 4 files changed, 837 insertions(+), 2 deletions(-) + 3 files changed, 835 insertions(+), 1 deletion(-) --- /dev/null +++ b/include/linux/reserve.h -@@ -0,0 +1,198 @@ +@@ -0,0 +1,197 @@ +/* + * Memory reserve management. + * @@ -105,8 +104,7 @@ Signed-off-by: Suresh Jayaraman + * Returns NULL on failure + */ +#define kmalloc_reserve(size, gfp, node, res, emerg) \ -+ __kmalloc_reserve(size, gfp, node, \ -+ __builtin_return_address(0), res, emerg) ++ __kmalloc_reserve(size, gfp, node, _RET_IP_, res, emerg) + +void __kfree_reserve(void *obj, struct mem_reserve *res, int emerg); + @@ -222,18 +220,18 @@ Signed-off-by: Suresh Jayaraman +#endif /* _LINUX_RESERVE_H */ --- a/mm/Makefile +++ b/mm/Makefile -@@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o - maccess.o page_alloc.o page-writeback.o \ +@@ -12,7 +12,7 @@ obj-y := bootmem.o filemap.o mempool.o readahead.o swap.o truncate.o vmscan.o shmem.o \ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ -- page_isolation.o mm_init.o mmu_context.o \ -+ page_isolation.o mm_init.o mmu_context.o reserve.o \ - $(mmu-y) + page_isolation.o mm_init.o mmu_context.o percpu.o \ +- $(mmu-y) ++ reserve.o $(mmu-y) obj-y += init-mm.o + obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o --- /dev/null +++ b/mm/reserve.c -@@ -0,0 +1,637 @@ +@@ -0,0 +1,639 @@ +/* + * Memory reserve management. + * @@ -864,6 +862,7 @@ Signed-off-by: Suresh Jayaraman +out: + return page; +} ++EXPORT_SYMBOL_GPL(__alloc_pages_reserve); + +void __free_pages_reserve(struct page *page, int order, + struct mem_reserve *res, int emerg) @@ -871,21 +870,4 @@ Signed-off-by: Suresh Jayaraman + __free_pages(page, order); + mem_reserve_pages_charge(res, -(1 << order)); +} ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -2810,6 +2810,7 @@ void *__kmalloc(size_t size, gfp_t flags - } - EXPORT_SYMBOL(__kmalloc); - -+#ifdef CONFIG_NUMA - static void *kmalloc_large_node(size_t size, gfp_t flags, int node) - { - struct page *page; -@@ -2824,7 +2825,6 @@ static void *kmalloc_large_node(size_t s - return ptr; - } - --#ifdef CONFIG_NUMA - void *__kmalloc_node(size_t size, gfp_t flags, int node) - { - struct kmem_cache *s; ++EXPORT_SYMBOL_GPL(__free_pages_reserve); diff --git a/patches.suse/SoN-13-net-ps_rx.patch b/patches.suse/SoN-13-net-ps_rx.patch index 2166ff4..086f061 100644 --- a/patches.suse/SoN-13-net-ps_rx.patch +++ b/patches.suse/SoN-13-net-ps_rx.patch @@ -23,25 +23,25 @@ Signed-off-by: Suresh Jayaraman --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c -@@ -2656,7 +2656,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, stru +@@ -2676,7 +2676,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, stru struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; struct rx_bd *rxbd = &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; -- struct page *page = alloc_page(GFP_ATOMIC); -+ struct page *page = netdev_alloc_page(bp->dev); +- struct page *page = alloc_page(gfp); ++ struct page *page = __netdev_alloc_page(bp->dev, gfp); if (!page) return -ENOMEM; -@@ -2686,7 +2686,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struc - pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, - PCI_DMA_FROMDEVICE); +@@ -2706,7 +2706,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struc + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), + PAGE_SIZE, PCI_DMA_FROMDEVICE); - __free_page(page); + netdev_free_page(bp->dev, page); rx_pg->page = NULL; } -@@ -3019,7 +3019,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2 +@@ -3041,7 +3041,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2 if (i == pages - 1) frag_len -= 4; @@ -50,7 +50,7 @@ Signed-off-by: Suresh Jayaraman rx_pg->page = NULL; err = bnx2_alloc_rx_page(bp, rxr, -@@ -3036,9 +3036,6 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2 +@@ -3059,9 +3059,6 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2 PAGE_SIZE, PCI_DMA_FROMDEVICE); frag_size -= frag_len; @@ -62,7 +62,7 @@ Signed-off-by: Suresh Jayaraman pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c -@@ -241,7 +241,7 @@ static void e1000_alloc_rx_buffers_ps(st +@@ -604,7 +604,7 @@ static void e1000_alloc_rx_buffers_ps(st continue; } if (!ps_page->page) { @@ -71,9 +71,9 @@ Signed-off-by: Suresh Jayaraman if (!ps_page->page) { adapter->alloc_rx_buff_failed++; goto no_buffers; -@@ -834,11 +834,8 @@ static bool e1000_clean_rx_irq_ps(struct - pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, - PCI_DMA_FROMDEVICE); +@@ -1188,11 +1188,8 @@ static bool e1000_clean_rx_irq_ps(struct + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); ps_page->dma = 0; - skb_fill_page_desc(skb, j, ps_page->page, 0, length); + skb_add_rx_frag(skb, j, ps_page->page, 0, length); @@ -86,16 +86,16 @@ Signed-off-by: Suresh Jayaraman /* strip the ethernet crc, problem is we're using pages now so --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c -@@ -5251,7 +5251,7 @@ static bool igb_clean_rx_irq_adv(struct - PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); +@@ -5594,7 +5594,7 @@ static bool igb_clean_rx_irq_adv(struct + PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; -- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, -+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags++, +- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, ++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buffer_info->page, buffer_info->page_offset, length); -@@ -5261,10 +5261,6 @@ static bool igb_clean_rx_irq_adv(struct +@@ -5604,10 +5604,6 @@ static bool igb_clean_rx_irq_adv(struct buffer_info->page = NULL; else get_page(buffer_info->page); @@ -108,41 +108,24 @@ Signed-off-by: Suresh Jayaraman if (!(staterr & E1000_RXD_STAT_EOP)) { --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c -@@ -696,6 +696,7 @@ static void ixgbe_alloc_rx_buffers(struc - int cleaned_count) - { - struct pci_dev *pdev = adapter->pdev; -+ struct net_device *netdev = adapter->netdev; - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *bi; - unsigned int i; -@@ -709,7 +710,7 @@ static void ixgbe_alloc_rx_buffers(struc - if (!bi->page_dma && - (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { - if (!bi->page) { -- bi->page = alloc_page(GFP_ATOMIC); -+ bi->page = netdev_alloc_page(netdev); - if (!bi->page) { - adapter->alloc_rx_page_failed++; - goto no_buffers; -@@ -896,10 +897,10 @@ static bool ixgbe_clean_rx_irq(struct ix - pci_unmap_page(pdev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); +@@ -1243,10 +1244,10 @@ static bool ixgbe_clean_rx_irq(struct ix + PAGE_SIZE / 2, + DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, -- rx_buffer_info->page, -- rx_buffer_info->page_offset, -- upper_len); +- rx_buffer_info->page, +- rx_buffer_info->page_offset, +- upper_len); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_buffer_info->page, + rx_buffer_info->page_offset, + upper_len); - if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || - (page_count(rx_buffer_info->page) != 1)) -@@ -907,9 +908,6 @@ static bool ixgbe_clean_rx_irq(struct ix + if ((page_count(rx_buffer_info->page) == 1) && + (page_to_nid(rx_buffer_info->page) == current_node)) +@@ -1254,9 +1255,6 @@ static bool ixgbe_clean_rx_irq(struct ix else - get_page(rx_buffer_info->page); + rx_buffer_info->page = NULL; - skb->len += upper_len; - skb->data_len += upper_len; @@ -152,7 +135,7 @@ Signed-off-by: Suresh Jayaraman i++; --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c -@@ -1344,7 +1344,7 @@ static struct sk_buff *sky2_rx_alloc(str +@@ -1394,7 +1394,7 @@ static struct sk_buff *sky2_rx_alloc(str skb_reserve(skb, NET_IP_ALIGN); for (i = 0; i < sky2->rx_nfrags; i++) { @@ -161,7 +144,7 @@ Signed-off-by: Suresh Jayaraman if (!page) goto free_partial; -@@ -2304,8 +2304,8 @@ static struct sk_buff *receive_copy(stru +@@ -2353,8 +2353,8 @@ static struct sk_buff *receive_copy(stru } /* Adjust length of skb with fragments to match received data */ @@ -172,7 +155,7 @@ Signed-off-by: Suresh Jayaraman { int i, num_frags; unsigned int size; -@@ -2322,15 +2322,11 @@ static void skb_put_frags(struct sk_buff +@@ -2371,15 +2371,11 @@ static void skb_put_frags(struct sk_buff if (length == 0) { /* don't need this page */ @@ -190,7 +173,7 @@ Signed-off-by: Suresh Jayaraman length -= size; } } -@@ -2358,7 +2354,7 @@ static struct sk_buff *receive_new(struc +@@ -2407,7 +2403,7 @@ static struct sk_buff *receive_new(struc *re = nre; if (skb_shinfo(skb)->nr_frags) @@ -199,15 +182,3 @@ Signed-off-by: Suresh Jayaraman else skb_put(skb, length); return skb; ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -1069,6 +1069,9 @@ static inline void skb_fill_page_desc(st - extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, - int off, int size); - -+extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, -+ int off, int size); -+ - #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) - #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) - #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) diff --git a/patches.suse/SoN-14-net-sk_allocation.patch b/patches.suse/SoN-14-net-sk_allocation.patch index 47f616f..a4de1d7 100644 --- a/patches.suse/SoN-14-net-sk_allocation.patch +++ b/patches.suse/SoN-14-net-sk_allocation.patch @@ -16,7 +16,7 @@ Signed-off-by: Suresh Jayaraman --- a/include/net/sock.h +++ b/include/net/sock.h -@@ -556,6 +556,11 @@ static inline int sock_flag(struct sock +@@ -565,6 +565,11 @@ static inline int sock_flag(struct sock return test_bit(flag, &sk->sk_flags); } @@ -30,7 +30,7 @@ Signed-off-by: Suresh Jayaraman sk->sk_ack_backlog--; --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c -@@ -682,7 +682,8 @@ struct sk_buff *sk_stream_alloc_skb(stru +@@ -685,7 +685,8 @@ struct sk_buff *sk_stream_alloc_skb(stru /* The TCP header must be at least 32-bit aligned. */ size = ALIGN(size, 4); @@ -42,7 +42,7 @@ Signed-off-by: Suresh Jayaraman /* --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c -@@ -2306,7 +2306,7 @@ void tcp_send_fin(struct sock *sk) +@@ -2313,7 +2313,7 @@ void tcp_send_fin(struct sock *sk) /* Socket is locked, keep trying until memory is available. */ for (;;) { skb = alloc_skb_fclone(MAX_TCP_HEADER, @@ -51,7 +51,7 @@ Signed-off-by: Suresh Jayaraman if (skb) break; yield(); -@@ -2332,7 +2332,7 @@ void tcp_send_active_reset(struct sock * +@@ -2339,7 +2339,7 @@ void tcp_send_active_reset(struct sock * struct sk_buff *skb; /* NOTE: No TCP options attached and we never retransmit this. */ @@ -60,7 +60,7 @@ Signed-off-by: Suresh Jayaraman if (!skb) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); return; -@@ -2405,7 +2405,8 @@ struct sk_buff *tcp_make_synack(struct s +@@ -2412,7 +2412,8 @@ struct sk_buff *tcp_make_synack(struct s if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) s_data_desired = cvp->s_data_desired; @@ -70,7 +70,7 @@ Signed-off-by: Suresh Jayaraman if (skb == NULL) return NULL; -@@ -2685,7 +2686,7 @@ void tcp_send_ack(struct sock *sk) +@@ -2694,7 +2695,7 @@ void tcp_send_ack(struct sock *sk) * tcp_transmit_skb() will set the ownership to this * sock. */ @@ -79,7 +79,7 @@ Signed-off-by: Suresh Jayaraman if (buff == NULL) { inet_csk_schedule_ack(sk); inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; -@@ -2720,7 +2721,7 @@ static int tcp_xmit_probe_skb(struct soc +@@ -2729,7 +2730,7 @@ static int tcp_xmit_probe_skb(struct soc struct sk_buff *skb; /* We don't queue it, tcp_transmit_skb() sets ownership. */ @@ -90,7 +90,7 @@ Signed-off-by: Suresh Jayaraman --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c -@@ -594,7 +594,8 @@ static int tcp_v6_md5_do_add(struct sock +@@ -589,7 +589,8 @@ static int tcp_v6_md5_do_add(struct sock } else { /* reallocate new list if current one is full. */ if (!tp->md5sig_info) { @@ -100,7 +100,7 @@ Signed-off-by: Suresh Jayaraman if (!tp->md5sig_info) { kfree(newkey); return -ENOMEM; -@@ -607,7 +608,8 @@ static int tcp_v6_md5_do_add(struct sock +@@ -602,7 +603,8 @@ static int tcp_v6_md5_do_add(struct sock } if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * @@ -110,7 +110,7 @@ Signed-off-by: Suresh Jayaraman if (!keys) { tcp_free_md5sig_pool(); -@@ -731,7 +733,8 @@ static int tcp_v6_parse_md5_keys (struct +@@ -726,7 +728,8 @@ static int tcp_v6_parse_md5_keys (struct struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *p; @@ -120,7 +120,7 @@ Signed-off-by: Suresh Jayaraman if (!p) return -ENOMEM; -@@ -998,6 +1001,7 @@ static void tcp_v6_send_response(struct +@@ -997,6 +1000,7 @@ static void tcp_v6_send_response(struct unsigned int tot_len = sizeof(struct tcphdr); struct dst_entry *dst; __be32 *topt; @@ -128,7 +128,7 @@ Signed-off-by: Suresh Jayaraman if (ts) tot_len += TCPOLEN_TSTAMP_ALIGNED; -@@ -1007,7 +1011,7 @@ static void tcp_v6_send_response(struct +@@ -1006,7 +1010,7 @@ static void tcp_v6_send_response(struct #endif buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, @@ -137,7 +137,7 @@ Signed-off-by: Suresh Jayaraman if (buff == NULL) return; -@@ -1085,6 +1089,7 @@ static void tcp_v6_send_reset(struct soc +@@ -1083,6 +1087,7 @@ static void tcp_v6_send_reset(struct soc struct tcphdr *th = tcp_hdr(skb); u32 seq = 0, ack_seq = 0; struct tcp_md5sig_key *key = NULL; @@ -145,7 +145,7 @@ Signed-off-by: Suresh Jayaraman if (th->rst) return; -@@ -1096,6 +1101,8 @@ static void tcp_v6_send_reset(struct soc +@@ -1094,6 +1099,8 @@ static void tcp_v6_send_reset(struct soc if (sk) key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); #endif diff --git a/patches.suse/SoN-15-netvm-reserve.patch b/patches.suse/SoN-15-netvm-reserve.patch index df242ca..0ab6f12 100644 --- a/patches.suse/SoN-15-netvm-reserve.patch +++ b/patches.suse/SoN-15-netvm-reserve.patch @@ -33,13 +33,13 @@ Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- include/net/sock.h | 43 ++++++++++++++++++++- - net/Kconfig | 3 + + net/Kconfig | 2 net/core/sock.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 152 insertions(+), 1 deletion(-) + 3 files changed, 151 insertions(+), 1 deletion(-) --- a/include/net/sock.h +++ b/include/net/sock.h -@@ -51,6 +51,7 @@ +@@ -52,6 +52,7 @@ #include #include #include @@ -47,7 +47,7 @@ Signed-off-by: Suresh Jayaraman #include #include -@@ -525,6 +526,7 @@ enum sock_flags { +@@ -534,6 +535,7 @@ enum sock_flags { SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ @@ -55,7 +55,7 @@ Signed-off-by: Suresh Jayaraman SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ -@@ -556,9 +558,48 @@ static inline int sock_flag(struct sock +@@ -565,9 +567,48 @@ static inline int sock_flag(struct sock return test_bit(flag, &sk->sk_flags); } @@ -107,13 +107,13 @@ Signed-off-by: Suresh Jayaraman static inline void sk_acceptq_removed(struct sock *sk) --- a/net/Kconfig +++ b/net/Kconfig -@@ -276,4 +276,7 @@ source "net/wimax/Kconfig" - source "net/rfkill/Kconfig" - source "net/9p/Kconfig" +@@ -294,5 +294,7 @@ source "net/rfkill/Kconfig" + source "net/caif/Kconfig" + source "net/ceph/Kconfig" +config NETVM -+ def_bool n -+ ++ bool + endif # if NET --- a/net/core/sock.c +++ b/net/core/sock.c @@ -122,12 +122,12 @@ Signed-off-by: Suresh Jayaraman #include #include +#include + #include #include - #include -@@ -217,6 +218,105 @@ __u32 sysctl_rmem_default __read_mostly - int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); - EXPORT_SYMBOL(sysctl_optmem_max); +@@ -224,6 +225,105 @@ int net_cls_subsys_id = -1; + EXPORT_SYMBOL_GPL(net_cls_subsys_id); + #endif +static struct mem_reserve net_reserve; +struct mem_reserve net_rx_reserve; @@ -231,7 +231,7 @@ Signed-off-by: Suresh Jayaraman static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) { struct timeval tv; -@@ -1074,6 +1174,7 @@ static void __sk_free(struct sock *sk) +@@ -1121,6 +1221,7 @@ static void __sk_free(struct sock *sk) { struct sk_filter *filter; @@ -239,7 +239,7 @@ Signed-off-by: Suresh Jayaraman if (sk->sk_destruct) sk->sk_destruct(sk); -@@ -1249,6 +1350,12 @@ void __init sk_init(void) +@@ -1300,6 +1401,12 @@ void __init sk_init(void) sysctl_wmem_max = 131071; sysctl_rmem_max = 131071; } diff --git a/patches.suse/SoN-16-netvm-reserve-inet.patch b/patches.suse/SoN-16-netvm-reserve-inet.patch index 0e665ee..aa12583 100644 --- a/patches.suse/SoN-16-netvm-reserve-inet.patch +++ b/patches.suse/SoN-16-netvm-reserve-inet.patch @@ -78,8 +78,8 @@ Signed-off-by: Suresh Jayaraman + + struct mem_reserve ip6_rt_reserve; #ifdef CONFIG_IPV6_MROUTE - struct sock *mroute6_sk; - struct mfc6_cache **mfc6_cache_array; + #ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES + struct mr6_table *mrt6; --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -20,6 +20,7 @@ @@ -102,16 +102,16 @@ Signed-off-by: Suresh Jayaraman --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -45,6 +45,8 @@ - #include #include #include + #include +#include +#include /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c -@@ -626,6 +628,34 @@ int ip_defrag(struct sk_buff *skb, u32 u - } +@@ -635,6 +637,34 @@ int ip_defrag(struct sk_buff *skb, u32 u + EXPORT_SYMBOL(ip_defrag); #ifdef CONFIG_SYSCTL +static int @@ -145,7 +145,7 @@ Signed-off-by: Suresh Jayaraman static int zero; static struct ctl_table ip4_frags_ns_ctl_table[] = { -@@ -634,7 +664,7 @@ static struct ctl_table ip4_frags_ns_ctl +@@ -643,7 +673,7 @@ static struct ctl_table ip4_frags_ns_ctl .data = &init_net.ipv4.frags.high_thresh, .maxlen = sizeof(int), .mode = 0644, @@ -154,7 +154,7 @@ Signed-off-by: Suresh Jayaraman }, { .procname = "ipfrag_low_thresh", -@@ -732,6 +762,8 @@ static inline void ip4_frags_ctl_registe +@@ -741,6 +771,8 @@ static inline void ip4_frags_ctl_registe static int __net_init ipv4_frags_init_net(struct net *net) { @@ -163,7 +163,7 @@ Signed-off-by: Suresh Jayaraman /* * Fragment cache limits. We will commit 256K at one time. Should we * cross that limit we will prune down to 192K. This should cope with -@@ -749,11 +781,31 @@ static int __net_init ipv4_frags_init_ne +@@ -758,11 +790,31 @@ static int __net_init ipv4_frags_init_ne inet_frags_init_net(&net->ipv4.frags); @@ -206,7 +206,7 @@ Signed-off-by: Suresh Jayaraman #define RT_FL_TOS(oldflp) \ ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) -@@ -225,6 +226,7 @@ struct rt_hash_bucket { +@@ -224,6 +225,7 @@ struct rt_hash_bucket { # define RT_HASH_LOCK_SZ 256 # endif #endif @@ -214,7 +214,7 @@ Signed-off-by: Suresh Jayaraman static spinlock_t *rt_hash_locks; # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)] -@@ -271,6 +273,10 @@ static inline int rt_genid(struct net *n +@@ -268,6 +270,10 @@ static inline int rt_genid(struct net *n return atomic_read(&net->ipv4.rt_genid); } @@ -225,7 +225,7 @@ Signed-off-by: Suresh Jayaraman #ifdef CONFIG_PROC_FS struct rt_cache_iter_state { struct seq_net_private p; -@@ -400,6 +406,36 @@ static int rt_cache_seq_show(struct seq_ +@@ -398,6 +404,36 @@ static int rt_cache_seq_show(struct seq_ return 0; } @@ -262,7 +262,7 @@ Signed-off-by: Suresh Jayaraman static const struct seq_operations rt_cache_seq_ops = { .start = rt_cache_seq_start, .next = rt_cache_seq_next, -@@ -3157,7 +3193,7 @@ static ctl_table ipv4_route_table[] = { +@@ -3103,7 +3139,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_max_size, .maxlen = sizeof(int), .mode = 0644, @@ -271,7 +271,7 @@ Signed-off-by: Suresh Jayaraman }, { /* Deprecated. Use gc_min_interval_ms */ -@@ -3194,7 +3230,7 @@ static ctl_table ipv4_route_table[] = { +@@ -3140,7 +3176,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_redirect_load, .maxlen = sizeof(int), .mode = 0644, @@ -280,7 +280,7 @@ Signed-off-by: Suresh Jayaraman }, { .procname = "redirect_number", -@@ -3414,6 +3450,24 @@ int __init ip_rt_init(void) +@@ -3334,6 +3370,24 @@ int __init ip_rt_init(void) ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); ip_rt_max_size = (rt_hash_mask + 1) * 16; @@ -315,7 +315,7 @@ Signed-off-by: Suresh Jayaraman #include #include -@@ -632,13 +633,41 @@ static const struct inet6_protocol frag_ +@@ -639,13 +640,41 @@ static const struct inet6_protocol frag_ }; #ifdef CONFIG_SYSCTL @@ -358,7 +358,7 @@ Signed-off-by: Suresh Jayaraman }, { .procname = "ip6frag_low_thresh", -@@ -743,17 +772,39 @@ static inline void ip6_frags_sysctl_unre +@@ -750,17 +779,39 @@ static inline void ip6_frags_sysctl_unre static int __net_init ipv6_frags_init_net(struct net *net) { @@ -409,8 +409,8 @@ Signed-off-by: Suresh Jayaraman #include #include #include -@@ -2537,6 +2538,34 @@ int ipv6_sysctl_rtcache_flush(ctl_table - return -EINVAL; +@@ -2532,6 +2533,34 @@ int ipv6_sysctl_rtcache_flush(ctl_table + return 0; } +static int @@ -444,7 +444,7 @@ Signed-off-by: Suresh Jayaraman ctl_table ipv6_route_table_template[] = { { .procname = "flush", -@@ -2557,7 +2586,7 @@ ctl_table ipv6_route_table_template[] = +@@ -2552,7 +2581,7 @@ ctl_table ipv6_route_table_template[] = .data = &init_net.ipv6.sysctl.ip6_rt_max_size, .maxlen = sizeof(int), .mode = 0644, @@ -453,7 +453,7 @@ Signed-off-by: Suresh Jayaraman }, { .procname = "gc_min_interval", -@@ -2632,6 +2661,8 @@ struct ctl_table * __net_init ipv6_route +@@ -2627,6 +2656,8 @@ struct ctl_table * __net_init ipv6_route table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; } @@ -462,7 +462,7 @@ Signed-off-by: Suresh Jayaraman return table; } #endif -@@ -2681,6 +2712,14 @@ static int __net_init ip6_route_net_init +@@ -2676,6 +2707,14 @@ static int __net_init ip6_route_net_init net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; @@ -477,7 +477,7 @@ Signed-off-by: Suresh Jayaraman #ifdef CONFIG_PROC_FS proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); -@@ -2691,12 +2730,15 @@ static int __net_init ip6_route_net_init +@@ -2686,12 +2725,15 @@ static int __net_init ip6_route_net_init out: return ret; @@ -491,10 +491,10 @@ Signed-off-by: Suresh Jayaraman - kfree(net->ipv6.ip6_null_entry); #endif + kfree(net->ipv6.ip6_null_entry); + out_ip6_dst_entries: + dst_entries_destroy(&net->ipv6.ip6_dst_ops); out_ip6_dst_ops: - goto out; - } -@@ -2707,6 +2749,7 @@ static void __net_exit ip6_route_net_exi +@@ -2702,6 +2744,7 @@ static void __net_exit ip6_route_net_exi proc_net_remove(net, "ipv6_route"); proc_net_remove(net, "rt6_stats"); #endif diff --git a/patches.suse/SoN-16a-netvm-reserve-inet.patch b/patches.suse/SoN-16a-netvm-reserve-inet.patch new file mode 100644 index 0000000..1f31d06 --- /dev/null +++ b/patches.suse/SoN-16a-netvm-reserve-inet.patch @@ -0,0 +1,71 @@ +From: Mel Gorman +Subject: [PATCH] netvm: Remove duplicated initialization in net/ipv4/route.c. +Patch-mainline: Not yet +References: bnc#678970 + +Calling mem_reserve_init() twice causes list_add corruption error and +unnecessarily increases reserves. Remove one initialisation. + +Signed-off-by: Tetsuo Handa +Signed-off-by: Mel Gorman +Reviewed-by: NeilBrown +Signed-off-by: Suresh Jayaraman +--- + net/ipv4/route.c | 9 --------- + 1 files changed, 0 insertions(+), 9 deletions(-) + +Index: linux-2.6.37-openSUSE-11.4/net/ipv4/route.c +=================================================================== +--- linux-2.6.37-openSUSE-11.4.orig/net/ipv4/route.c ++++ linux-2.6.37-openSUSE-11.4/net/ipv4/route.c +@@ -224,7 +224,6 @@ struct rt_hash_bucket { + # define RT_HASH_LOCK_SZ 256 + # endif + #endif +-#include + + static spinlock_t *rt_hash_locks; + # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)] +@@ -271,8 +270,6 @@ static inline int rt_genid(struct net *n + + static struct mem_reserve ipv4_route_reserve; + +-static struct mem_reserve ipv4_route_reserve; +- + #ifdef CONFIG_PROC_FS + struct rt_cache_iter_state { + struct seq_net_private p; +@@ -431,8 +428,6 @@ proc_dointvec_route(struct ctl_table *ta + return ret; + } + +-static struct mutex ipv4_route_lock; +- + static const struct seq_operations rt_cache_seq_ops = { + .start = rt_cache_seq_start, + .next = rt_cache_seq_next, +@@ -3167,7 +3162,7 @@ static ctl_table ipv4_route_table[] = { + .data = &ip_rt_redirect_load, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec_route, ++ .proc_handler = proc_dointvec, + }, + { + .procname = "redirect_number", +@@ -3369,15 +3364,6 @@ int __init ip_rt_init(void) + + #ifdef CONFIG_PROCFS + mutex_init(&ipv4_route_lock); +-#endif +- +- mem_reserve_init(&ipv4_route_reserve, "IPv4 route cache", +- &net_rx_reserve); +- mem_reserve_kmem_cache_set(&ipv4_route_reserve, +- ipv4_dst_ops.kmem_cachep, ip_rt_max_size); +- +-#ifdef CONFIG_PROCFS +- mutex_init(&ipv4_route_lock); + #endif + + mem_reserve_init(&ipv4_route_reserve, "IPv4 route cache", diff --git a/patches.suse/SoN-17-netvm-reserve-inet.patch-fix b/patches.suse/SoN-17-netvm-reserve-inet.patch-fix index 8394d71..93240de 100644 --- a/patches.suse/SoN-17-netvm-reserve-inet.patch-fix +++ b/patches.suse/SoN-17-netvm-reserve-inet.patch-fix @@ -12,7 +12,7 @@ Signed-off-by: Suresh Jayaraman --- a/net/ipv4/route.c +++ b/net/ipv4/route.c -@@ -3437,7 +3437,7 @@ int __init ip_rt_init(void) +@@ -3370,7 +3370,7 @@ int __init ip_rt_init(void) ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); ip_rt_max_size = (rt_hash_mask + 1) * 16; diff --git a/patches.suse/SoN-18-netvm-skbuff-reserve.patch b/patches.suse/SoN-18-netvm-skbuff-reserve.patch index 1c36141..db98044 100644 --- a/patches.suse/SoN-18-netvm-skbuff-reserve.patch +++ b/patches.suse/SoN-18-netvm-skbuff-reserve.patch @@ -19,10 +19,11 @@ the accounting overhead to be limited to the later kind. Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- + include/linux/mm_types.h | 1 - include/linux/skbuff.h | 25 +++++++- - net/core/skbuff.c | 137 +++++++++++++++++++++++++++++++++++++---------- - 3 files changed, 132 insertions(+), 31 deletions(-) + include/linux/skbuff.h | 35 +++++++++++-- + net/core/skbuff.c | 121 +++++++++++++++++++++++++++++++++++++---------- + 3 files changed, 128 insertions(+), 29 deletions(-) --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -36,17 +37,29 @@ Signed-off-by: Suresh Jayaraman * protected by zone->lru_lock ! --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -380,6 +380,9 @@ struct sk_buff { - #ifdef CONFIG_IPV6_NDISC_NODETYPE - __u8 ndisc_nodetype:2; +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + /* Don't change this without changing skb_csum_unnecessary! */ + #define CHECKSUM_NONE 0 +@@ -386,9 +386,12 @@ struct sk_buff { + __u8 deliver_no_wcard:1; #endif + __u8 ooo_okay:1; +#ifdef CONFIG_NETVM + __u8 emergency:1; +#endif kmemcheck_bitfield_end(flags2); - /* 0/14 bit hole */ -@@ -417,6 +420,18 @@ struct sk_buff { +- /* 0/13 bit hole */ ++ /* 0/12 bit hole */ + + #ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +@@ -423,6 +426,18 @@ struct sk_buff { #include @@ -62,10 +75,10 @@ Signed-off-by: Suresh Jayaraman +#endif +} + - static inline struct dst_entry *skb_dst(const struct sk_buff *skb) - { - return (struct dst_entry *)skb->_skb_dst; -@@ -436,7 +451,7 @@ extern void kfree_skb(struct sk_buff *sk + /* + * skb might have a dst pointer attached, refcounted or not. + * _skb_refdst low order bit is set if refcount was _not_ taken +@@ -480,7 +495,7 @@ extern void kfree_skb(struct sk_buff *sk extern void consume_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, @@ -74,16 +87,16 @@ Signed-off-by: Suresh Jayaraman static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { -@@ -446,7 +461,7 @@ static inline struct sk_buff *alloc_skb( +@@ -490,7 +505,7 @@ static inline struct sk_buff *alloc_skb( static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { -- return __alloc_skb(size, priority, 1, -1); -+ return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, -1); +- return __alloc_skb(size, priority, 1, NUMA_NO_NODE); ++ return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); } - extern int skb_recycle_check(struct sk_buff *skb, int skb_size); -@@ -1456,7 +1471,8 @@ static inline void __skb_queue_purge(str + extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); +@@ -1511,7 +1526,8 @@ static inline void __skb_queue_purge(str static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) { @@ -93,16 +106,34 @@ Signed-off-by: Suresh Jayaraman if (likely(skb)) skb_reserve(skb, NET_SKB_PAD); return skb; -@@ -1497,6 +1513,7 @@ static inline struct sk_buff *netdev_all +@@ -1551,6 +1567,8 @@ static inline struct sk_buff *netdev_all + return skb; } - extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); -+extern void __netdev_free_page(struct net_device *dev, struct page *page); ++extern struct mem_reserve net_skb_reserve; ++ + /** + * __netdev_alloc_page - allocate a page for ps-rx on a specific device + * @dev: network device to receive on +@@ -1562,7 +1580,8 @@ static inline struct sk_buff *netdev_all + */ + static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) + { +- return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); ++ return alloc_pages_reserve(NUMA_NO_NODE, gfp_mask | __GFP_MEMALLOC, 0, ++ &net_skb_reserve, NULL); + } /** - * netdev_alloc_page - allocate a page for ps-rx on a specific device -@@ -1513,7 +1530,7 @@ static inline struct page *netdev_alloc_ +@@ -1578,9 +1597,14 @@ static inline struct page *netdev_alloc_ + return __netdev_alloc_page(dev, GFP_ATOMIC); + } ++static inline void __netdev_free_page(struct net_device *dev, struct page *page) ++{ ++ free_pages_reserve(page, 0, &net_skb_reserve, page->reserve); ++} ++ static inline void netdev_free_page(struct net_device *dev, struct page *page) { - __free_page(page); @@ -112,7 +143,7 @@ Signed-off-by: Suresh Jayaraman /** --- a/net/core/skbuff.c +++ b/net/core/skbuff.c -@@ -170,23 +170,29 @@ EXPORT_SYMBOL(skb_under_panic); +@@ -168,14 +168,21 @@ static void skb_under_panic(struct sk_bu * %GFP_ATOMIC. */ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, @@ -136,8 +167,9 @@ Signed-off-by: Suresh Jayaraman /* Get the HEAD */ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); - if (!skb) +@@ -183,9 +190,8 @@ struct sk_buff *__alloc_skb(unsigned int goto out; + prefetchw(skb); - size = SKB_DATA_ALIGN(size); - data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), @@ -146,7 +178,7 @@ Signed-off-by: Suresh Jayaraman + gfp_mask, node, &net_skb_reserve, &emergency); if (!data) goto nodata; - + prefetchw(data + size); @@ -196,6 +202,9 @@ struct sk_buff *__alloc_skb(unsigned int * the tail pointer in struct sk_buff! */ @@ -157,16 +189,16 @@ Signed-off-by: Suresh Jayaraman skb->truesize = size + sizeof(struct sk_buff); atomic_set(&skb->users, 1); skb->head = data; -@@ -220,7 +229,7 @@ struct sk_buff *__alloc_skb(unsigned int - skb_frag_list_init(skb); - memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); +@@ -211,7 +220,7 @@ struct sk_buff *__alloc_skb(unsigned int + atomic_set(&shinfo->dataref, 1); + kmemcheck_annotate_variable(shinfo->destructor_arg); - if (fclone) { + if (flags & SKB_ALLOC_FCLONE) { struct sk_buff *child = skb + 1; atomic_t *fclone_ref = (atomic_t *) (child + 1); -@@ -230,6 +239,9 @@ struct sk_buff *__alloc_skb(unsigned int +@@ -221,6 +230,9 @@ struct sk_buff *__alloc_skb(unsigned int atomic_set(fclone_ref, 1); child->fclone = SKB_FCLONE_UNAVAILABLE; @@ -176,37 +208,16 @@ Signed-off-by: Suresh Jayaraman } out: return skb; -@@ -259,7 +271,7 @@ struct sk_buff *__netdev_alloc_skb(struc - int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; +@@ -249,7 +261,7 @@ struct sk_buff *__netdev_alloc_skb(struc + { struct sk_buff *skb; -- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); -+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, node); +- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); ++ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; -@@ -273,11 +285,19 @@ struct page *__netdev_alloc_page(struct - int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; - struct page *page; - -- page = alloc_pages_node(node, gfp_mask, 0); -+ page = alloc_pages_reserve(node, gfp_mask | __GFP_MEMALLOC, 0, -+ &net_skb_reserve, NULL); -+ - return page; - } - EXPORT_SYMBOL(__netdev_alloc_page); - -+void __netdev_free_page(struct net_device *dev, struct page *page) -+{ -+ free_pages_reserve(page, 0, &net_skb_reserve, page->reserve); -+} -+EXPORT_SYMBOL(__netdev_free_page); -+ - void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, - int size) - { -@@ -285,6 +305,27 @@ void skb_add_rx_frag(struct sk_buff *skb +@@ -265,6 +277,27 @@ void skb_add_rx_frag(struct sk_buff *skb skb->len += size; skb->data_len += size; skb->truesize += size; @@ -234,7 +245,7 @@ Signed-off-by: Suresh Jayaraman } EXPORT_SYMBOL(skb_add_rx_frag); -@@ -336,21 +377,38 @@ static void skb_clone_fraglist(struct sk +@@ -316,21 +349,38 @@ static void skb_clone_fraglist(struct sk skb_get(list); } @@ -268,7 +279,7 @@ Signed-off-by: Suresh Jayaraman + } } - if (skb_has_frags(skb)) + if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); - kfree(skb->head); @@ -276,7 +287,7 @@ Signed-off-by: Suresh Jayaraman } } -@@ -547,6 +605,9 @@ static void __copy_skb_header(struct sk_ +@@ -524,6 +574,9 @@ static void __copy_skb_header(struct sk_ #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) new->ipvs_property = old->ipvs_property; #endif @@ -286,7 +297,7 @@ Signed-off-by: Suresh Jayaraman new->protocol = old->protocol; new->mark = old->mark; new->skb_iif = old->skb_iif; -@@ -641,6 +702,9 @@ struct sk_buff *skb_clone(struct sk_buff +@@ -618,6 +671,9 @@ struct sk_buff *skb_clone(struct sk_buff n->fclone = SKB_FCLONE_CLONE; atomic_inc(fclone_ref); } else { @@ -296,7 +307,7 @@ Signed-off-by: Suresh Jayaraman n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); if (!n) return NULL; -@@ -677,6 +741,14 @@ static void copy_skb_header(struct sk_bu +@@ -654,6 +710,14 @@ static void copy_skb_header(struct sk_bu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; } @@ -311,44 +322,27 @@ Signed-off-by: Suresh Jayaraman /** * skb_copy - create private copy of an sk_buff * @skb: buffer to copy -@@ -697,15 +769,17 @@ static void copy_skb_header(struct sk_bu - struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) +@@ -675,7 +739,8 @@ struct sk_buff *skb_copy(const struct sk { - int headerlen = skb->data - skb->head; -+ int size; - /* - * Allocate the copy buffer - */ - struct sk_buff *n; - #ifdef NET_SKBUFF_DATA_USES_OFFSET -- n = alloc_skb(skb->end + skb->data_len, gfp_mask); -+ size = skb->end + skb->data_len; - #else -- n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); -+ size = skb->end - skb->head + skb->data_len; - #endif -+ n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), -1); + int headerlen = skb_headroom(skb); + unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; +- struct sk_buff *n = alloc_skb(size, gfp_mask); ++ struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), ++ NUMA_NO_NODE); + if (!n) return NULL; +@@ -709,7 +774,8 @@ EXPORT_SYMBOL(skb_copy); + struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) + { + unsigned int size = skb_end_pointer(skb) - skb->head; +- struct sk_buff *n = alloc_skb(size, gfp_mask); ++ struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), ++ NUMA_NO_NODE); -@@ -740,12 +814,14 @@ struct sk_buff *pskb_copy(struct sk_buff - /* - * Allocate the copy buffer - */ -+ int size; - struct sk_buff *n; - #ifdef NET_SKBUFF_DATA_USES_OFFSET -- n = alloc_skb(skb->end, gfp_mask); -+ size = skb->end; - #else -- n = alloc_skb(skb->end - skb->head, gfp_mask); -+ size = skb->end - skb->head; - #endif -+ n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), -1); if (!n) goto out; - -@@ -764,8 +840,9 @@ struct sk_buff *pskb_copy(struct sk_buff +@@ -729,8 +795,9 @@ struct sk_buff *pskb_copy(struct sk_buff int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { @@ -360,9 +354,9 @@ Signed-off-by: Suresh Jayaraman } skb_shinfo(n)->nr_frags = i; } -@@ -816,7 +893,11 @@ int pskb_expand_head(struct sk_buff *skb - - size = SKB_DATA_ALIGN(size); +@@ -778,7 +845,11 @@ int pskb_expand_head(struct sk_buff *skb + goto adjust_others; + } - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); + if (skb_emergency(skb)) @@ -373,16 +367,16 @@ Signed-off-by: Suresh Jayaraman if (!data) goto nodata; -@@ -831,7 +912,7 @@ int pskb_expand_head(struct sk_buff *skb - sizeof(struct skb_shared_info)); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) -- get_page(skb_shinfo(skb)->frags[i].page); -+ skb_get_page(skb, skb_shinfo(skb)->frags[i].page); +@@ -806,7 +877,7 @@ int pskb_expand_head(struct sk_buff *skb + kfree(skb->head); + } else { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) +- get_page(skb_shinfo(skb)->frags[i].page); ++ skb_get_page(skb, skb_shinfo(skb)->frags[i].page); - if (skb_has_frags(skb)) - skb_clone_fraglist(skb); -@@ -912,8 +993,8 @@ struct sk_buff *skb_copy_expand(const st + if (skb_has_frag_list(skb)) + skb_clone_fraglist(skb); +@@ -889,8 +960,8 @@ struct sk_buff *skb_copy_expand(const st /* * Allocate the copy buffer */ @@ -393,16 +387,16 @@ Signed-off-by: Suresh Jayaraman int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; int off; -@@ -1105,7 +1186,7 @@ drop_pages: +@@ -1083,7 +1154,7 @@ drop_pages: skb_shinfo(skb)->nr_frags = i; for (; i < nfrags; i++) - put_page(skb_shinfo(skb)->frags[i].page); + skb_put_page(skb, skb_shinfo(skb)->frags[i].page); - if (skb_has_frags(skb)) + if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); -@@ -1274,7 +1355,7 @@ pull_pages: +@@ -1252,7 +1323,7 @@ pull_pages: k = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { if (skb_shinfo(skb)->frags[i].size <= eat) { @@ -411,7 +405,7 @@ Signed-off-by: Suresh Jayaraman eat -= skb_shinfo(skb)->frags[i].size; } else { skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; -@@ -2052,6 +2133,7 @@ static inline void skb_split_no_header(s +@@ -2034,6 +2105,7 @@ static inline void skb_split_no_header(s skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; if (pos < len) { @@ -419,7 +413,7 @@ Signed-off-by: Suresh Jayaraman /* Split frag. * We have two variants in this case: * 1. Move all the frag to the second -@@ -2060,7 +2142,7 @@ static inline void skb_split_no_header(s +@@ -2042,7 +2114,7 @@ static inline void skb_split_no_header(s * where splitting is expensive. * 2. Split is accurately. We make this. */ @@ -428,7 +422,7 @@ Signed-off-by: Suresh Jayaraman skb_shinfo(skb1)->frags[0].page_offset += len - pos; skb_shinfo(skb1)->frags[0].size -= len - pos; skb_shinfo(skb)->frags[i].size = len - pos; -@@ -2559,8 +2641,9 @@ struct sk_buff *skb_segment(struct sk_bu +@@ -2540,8 +2612,9 @@ struct sk_buff *skb_segment(struct sk_bu skb_release_head_state(nskb); __skb_push(nskb, doffset); } else { @@ -440,7 +434,7 @@ Signed-off-by: Suresh Jayaraman if (unlikely(!nskb)) goto err; -@@ -2602,7 +2685,7 @@ struct sk_buff *skb_segment(struct sk_bu +@@ -2587,7 +2660,7 @@ struct sk_buff *skb_segment(struct sk_bu while (pos < offset + len && i < nfrags) { *frag = skb_shinfo(skb)->frags[i]; diff --git a/patches.suse/SoN-19-netvm-sk_filter.patch b/patches.suse/SoN-19-netvm-sk_filter.patch index 4de0a16..74adfbe 100644 --- a/patches.suse/SoN-19-netvm-sk_filter.patch +++ b/patches.suse/SoN-19-netvm-sk_filter.patch @@ -16,7 +16,7 @@ Signed-off-by: Suresh Jayaraman --- a/net/core/filter.c +++ b/net/core/filter.c -@@ -81,6 +81,9 @@ int sk_filter(struct sock *sk, struct sk +@@ -82,6 +82,9 @@ int sk_filter(struct sock *sk, struct sk int err; struct sk_filter *filter; diff --git a/patches.suse/SoN-20-netvm-tcp-deadlock.patch b/patches.suse/SoN-20-netvm-tcp-deadlock.patch index 302301f..8820bf4 100644 --- a/patches.suse/SoN-20-netvm-tcp-deadlock.patch +++ b/patches.suse/SoN-20-netvm-tcp-deadlock.patch @@ -12,15 +12,17 @@ Fix this by exempting the SOCK_MEMALLOC sockets from the rmem limit. Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- - include/net/sock.h | 7 ++++--- - net/core/sock.c | 2 +- - net/ipv4/tcp_input.c | 12 ++++++------ - net/sctp/ulpevent.c | 2 +- - 4 files changed, 12 insertions(+), 11 deletions(-) + + include/net/sock.h | 7 ++++--- + net/caif/caif_socket.c | 2 +- + net/core/sock.c | 2 +- + net/ipv4/tcp_input.c | 12 ++++++------ + net/sctp/ulpevent.c | 2 +- + 5 files changed, 13 insertions(+), 12 deletions(-) --- a/include/net/sock.h +++ b/include/net/sock.h -@@ -923,12 +923,13 @@ static inline int sk_wmem_schedule(struc +@@ -980,12 +980,13 @@ static inline int sk_wmem_schedule(struc __sk_mem_schedule(sk, size, SK_MEM_SEND); } @@ -39,7 +41,7 @@ Signed-off-by: Suresh Jayaraman static inline void sk_mem_reclaim(struct sock *sk) --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -392,7 +392,7 @@ int sock_queue_rcv_skb(struct sock *sk, +@@ -399,7 +399,7 @@ int sock_queue_rcv_skb(struct sock *sk, if (err) return err; @@ -50,7 +52,7 @@ Signed-off-by: Suresh Jayaraman } --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c -@@ -4335,19 +4335,19 @@ static void tcp_ofo_queue(struct sock *s +@@ -4347,19 +4347,19 @@ static void tcp_ofo_queue(struct sock *s static int tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); @@ -74,7 +76,7 @@ Signed-off-by: Suresh Jayaraman return -1; } } -@@ -4399,7 +4399,7 @@ static void tcp_data_queue(struct sock * +@@ -4412,7 +4412,7 @@ static void tcp_data_queue(struct sock * if (eaten <= 0) { queue_and_out: if (eaten < 0 && @@ -83,7 +85,7 @@ Signed-off-by: Suresh Jayaraman goto drop; skb_set_owner_r(skb, sk); -@@ -4470,7 +4470,7 @@ drop: +@@ -4483,7 +4483,7 @@ drop: TCP_ECN_check_ce(tp, skb); @@ -94,7 +96,7 @@ Signed-off-by: Suresh Jayaraman /* Disable header prediction. */ --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c -@@ -701,7 +701,7 @@ struct sctp_ulpevent *sctp_ulpevent_make +@@ -702,7 +702,7 @@ struct sctp_ulpevent *sctp_ulpevent_make if (rx_count >= asoc->base.sk->sk_rcvbuf) { if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || @@ -103,3 +105,14 @@ Signed-off-by: Suresh Jayaraman goto fail; } +--- a/net/caif/caif_socket.c ++++ b/net/caif/caif_socket.c +@@ -170,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc + err = sk_filter(sk, skb); + if (err) + return err; +- if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { ++ if (!sk_rmem_schedule(sk, skb) && rx_flow_is_on(cf_sk)) { + set_rx_flow_off(cf_sk); + pr_debug("sending flow OFF due to rmem_schedule\n"); + dbfs_atomic_inc(&cnt.num_rx_flow_off); diff --git a/patches.suse/SoN-21-emergency-nf_queue.patch b/patches.suse/SoN-21-emergency-nf_queue.patch index 3d247b8..56c0d08 100644 --- a/patches.suse/SoN-21-emergency-nf_queue.patch +++ b/patches.suse/SoN-21-emergency-nf_queue.patch @@ -14,13 +14,15 @@ Signed-off-by: Suresh Jayaraman --- a/net/netfilter/core.c +++ b/net/netfilter/core.c -@@ -175,9 +175,12 @@ next_hook: +@@ -176,11 +176,14 @@ next_hook: if (verdict == NF_ACCEPT || verdict == NF_STOP) { ret = 1; - } else if (verdict == NF_DROP) { + } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { +drop: kfree_skb(skb); - ret = -EPERM; + ret = -(verdict >> NF_VERDICT_BITS); + if (ret == 0) + ret = -EPERM; } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { + if (skb_emergency(skb)) + goto drop; diff --git a/patches.suse/SoN-22-netvm.patch b/patches.suse/SoN-22-netvm.patch index c92c9ab..6543971 100644 --- a/patches.suse/SoN-22-netvm.patch +++ b/patches.suse/SoN-22-netvm.patch @@ -9,17 +9,18 @@ Use the (new) sk_backlog_rcv() wrapper to ensure this for backlog processing. Skip taps, since those are user-space again. +Signed-off-by: Jiri Slaby [lock imbalance fix] Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- include/net/sock.h | 5 ++++ - net/core/dev.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++----- - net/core/sock.c | 16 ++++++++++++++ - 3 files changed, 73 insertions(+), 5 deletions(-) + net/core/dev.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++---- + net/core/sock.c | 16 +++++++++++++++ + 3 files changed, 72 insertions(+), 4 deletions(-) --- a/include/net/sock.h +++ b/include/net/sock.h -@@ -660,8 +660,13 @@ static inline __must_check int sk_add_ba +@@ -682,8 +682,13 @@ static inline __must_check int sk_add_ba return 0; } @@ -35,9 +36,9 @@ Signed-off-by: Suresh Jayaraman --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -2464,6 +2464,30 @@ void netif_nit_deliver(struct sk_buff *s - rcu_read_unlock(); +@@ -2814,6 +2814,30 @@ int __skb_bond_should_drop(struct sk_buf } + EXPORT_SYMBOL(__skb_bond_should_drop); +/* + * Filter the protocols for which the reserves are adequate. @@ -63,20 +64,20 @@ Signed-off-by: Suresh Jayaraman + return 1; +} + - /** - * netif_receive_skb - process receive buffer from network - * @skb: buffer to process -@@ -2487,6 +2511,7 @@ int netif_receive_skb(struct sk_buff *sk - struct net_device *null_or_bond; + static int __netif_receive_skb(struct sk_buff *skb) + { + struct packet_type *ptype, *pt_prev; +@@ -2824,6 +2848,7 @@ static int __netif_receive_skb(struct sk + struct net_device *orig_or_bond; int ret = NET_RX_DROP; __be16 type; + unsigned long pflags = current->flags; - if (!skb->tstamp.tv64) - net_timestamp(skb); -@@ -2494,9 +2519,21 @@ int netif_receive_skb(struct sk_buff *sk - if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) - return NET_RX_SUCCESS; + if (!netdev_tstamp_prequeue) + net_timestamp_check(skb); +@@ -2831,9 +2856,21 @@ static int __netif_receive_skb(struct sk + + trace_netif_receive_skb(skb); + /* Emergency skb are special, they should + * - be delivered to SOCK_MEMALLOC sockets only @@ -97,7 +98,7 @@ Signed-off-by: Suresh Jayaraman if (!skb->skb_iif) skb->skb_iif = skb->dev->ifindex; -@@ -2527,6 +2564,9 @@ int netif_receive_skb(struct sk_buff *sk +@@ -2875,6 +2912,9 @@ static int __netif_receive_skb(struct sk } #endif @@ -107,7 +108,7 @@ Signed-off-by: Suresh Jayaraman list_for_each_entry_rcu(ptype, &ptype_all, list) { if (ptype->dev == null_or_orig || ptype->dev == skb->dev || ptype->dev == orig_dev) { -@@ -2536,19 +2576,23 @@ int netif_receive_skb(struct sk_buff *sk +@@ -2884,13 +2924,17 @@ static int __netif_receive_skb(struct sk } } @@ -123,26 +124,27 @@ Signed-off-by: Suresh Jayaraman + if (!skb_emergency_protocol(skb)) + goto drop; + - skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); - if (!skb) -- goto out; -+ goto unlock; - skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); - if (!skb) -- goto out; -+ goto unlock; + /* Handle special case of bridge or macvlan */ + rx_handler = rcu_dereference(skb->dev->rx_handler); + if (rx_handler) { +@@ -2900,7 +2944,7 @@ ncls: + } + skb = rx_handler(skb); + if (!skb) +- goto out; ++ goto unlock; + } - /* - * Make sure frames received on VLAN interfaces stacked on -@@ -2577,6 +2621,7 @@ ncls: + if (vlan_tx_tag_present(skb)) { +@@ -2930,6 +2974,7 @@ ncls: if (pt_prev) { ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { +drop: + atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); /* Jamal, now you will not able to escape explaining - * me how you were going to use this. :-) -@@ -2584,8 +2629,10 @@ ncls: +@@ -2937,8 +2982,10 @@ ncls: ret = NET_RX_DROP; } @@ -153,10 +155,10 @@ Signed-off-by: Suresh Jayaraman + tsk_restore_flags(current, pflags, PF_MEMALLOC); return ret; } - EXPORT_SYMBOL(netif_receive_skb); + --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -315,6 +315,22 @@ int sk_clear_memalloc(struct sock *sk) +@@ -322,6 +322,22 @@ int sk_clear_memalloc(struct sock *sk) return set; } EXPORT_SYMBOL_GPL(sk_clear_memalloc); diff --git a/patches.suse/SoN-23-mm-swapfile.patch b/patches.suse/SoN-23-mm-swapfile.patch index 45cccc2..e018fba 100644 --- a/patches.suse/SoN-23-mm-swapfile.patch +++ b/patches.suse/SoN-23-mm-swapfile.patch @@ -48,21 +48,21 @@ Signed-off-by: Suresh Jayaraman --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking -@@ -174,6 +174,10 @@ prototypes: - int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, - loff_t offset, unsigned long nr_segs); - int (*launder_page) (struct page *); +@@ -198,6 +198,10 @@ prototypes: + int (*launder_page)(struct page *); + int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); + int (*error_remove_page)(struct address_space *, struct page *); + int (*swapon) (struct file *); + int (*swapoff) (struct file *); + int (*swap_out) (struct file *, struct page *, struct writeback_control *); + int (*swap_in) (struct file *, struct page *); locking rules: - All except set_page_dirty may block -@@ -193,6 +197,10 @@ invalidatepage: no yes - releasepage: no yes - direct_IO: no - launder_page: no yes + All except set_page_dirty and freepage may block +@@ -221,6 +225,10 @@ migratepage: yes (both) + launder_page: yes + is_partially_uptodate: yes + error_remove_page: yes +swapon no +swapoff no +swap_out no yes, unlocks @@ -70,7 +70,7 @@ Signed-off-by: Suresh Jayaraman ->write_begin(), ->write_end(), ->sync_page() and ->readpage() may be called from the request handler (/dev/loop). -@@ -292,6 +300,20 @@ cleaned, or an error value if not. Note +@@ -322,6 +330,20 @@ cleaned, or an error value if not. Note getting mapped back in and redirtied, it needs to be kept locked across the entire operation. @@ -88,12 +88,12 @@ Signed-off-by: Suresh Jayaraman + ->swap_in() when swapon() returned success, this method is used to +read the swap page. + - Note: currently almost all instances of address_space methods are - using BKL for internal serialization and that's one of the worst sources - of contention. Normally they are calling library functions (in fs/buffer.c) + ----------------------- file_lock_operations ------------------------------ + prototypes: + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt -@@ -537,6 +537,11 @@ struct address_space_operations { +@@ -543,6 +543,11 @@ struct address_space_operations { int (*migratepage) (struct page *, struct page *); int (*launder_page) (struct page *); int (*error_remove_page) (struct mapping *mapping, struct page *page); @@ -105,7 +105,7 @@ Signed-off-by: Suresh Jayaraman }; writepage: called by the VM to write a dirty page to backing store. -@@ -701,6 +706,19 @@ struct address_space_operations { +@@ -712,6 +717,19 @@ struct address_space_operations { unless you have them locked or reference counts increased. @@ -127,7 +127,7 @@ Signed-off-by: Suresh Jayaraman --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h -@@ -339,6 +339,7 @@ static inline int inode_has_buffers(stru +@@ -330,6 +330,7 @@ static inline int inode_has_buffers(stru static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } @@ -137,7 +137,7 @@ Signed-off-by: Suresh Jayaraman #endif /* _LINUX_BUFFER_HEAD_H */ --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -608,6 +608,15 @@ struct address_space_operations { +@@ -622,6 +622,15 @@ struct address_space_operations { int (*is_partially_uptodate) (struct page *, read_descriptor_t *, unsigned long); int (*error_remove_page)(struct address_space *, struct page *); @@ -155,15 +155,15 @@ Signed-off-by: Suresh Jayaraman /* --- a/include/linux/swap.h +++ b/include/linux/swap.h -@@ -146,6 +146,7 @@ enum { - SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ +@@ -148,6 +148,7 @@ enum { SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ -+ SWP_FILE = (1 << 6), /* file swap area */ + SWP_BLKDEV = (1 << 6), /* its a block device */ ++ SWP_FILE = (1 << 7), /* file swap area */ /* add others here before... */ SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ }; -@@ -291,6 +292,8 @@ extern void swap_unplug_io_fn(struct bac +@@ -303,6 +304,8 @@ extern void swap_unplug_io_fn(struct bac /* linux/mm/page_io.c */ extern int swap_readpage(struct page *); extern int swap_writepage(struct page *page, struct writeback_control *wbc); @@ -172,7 +172,7 @@ Signed-off-by: Suresh Jayaraman extern void end_swap_bio_read(struct bio *bio, int err); /* linux/mm/swap_state.c */ -@@ -327,6 +330,7 @@ extern int swap_type_of(dev_t, sector_t, +@@ -339,6 +342,7 @@ extern int swap_type_of(dev_t, sector_t, extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); @@ -182,7 +182,7 @@ Signed-off-by: Suresh Jayaraman struct backing_dev_info; --- a/mm/page_io.c +++ b/mm/page_io.c -@@ -16,6 +16,7 @@ +@@ -17,6 +17,7 @@ #include #include #include @@ -190,7 +190,7 @@ Signed-off-by: Suresh Jayaraman #include #include -@@ -92,11 +93,23 @@ int swap_writepage(struct page *page, st +@@ -93,11 +94,23 @@ int swap_writepage(struct page *page, st { struct bio *bio; int ret = 0, rw = WRITE; @@ -214,7 +214,7 @@ Signed-off-by: Suresh Jayaraman bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); -@@ -114,13 +127,52 @@ out: +@@ -115,13 +128,52 @@ out: return ret; } @@ -269,7 +269,7 @@ Signed-off-by: Suresh Jayaraman unlock_page(page); --- a/mm/swap_state.c +++ b/mm/swap_state.c -@@ -28,8 +28,8 @@ +@@ -29,8 +29,8 @@ */ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, @@ -282,7 +282,7 @@ Signed-off-by: Suresh Jayaraman --- a/mm/swapfile.c +++ b/mm/swapfile.c -@@ -1346,6 +1346,14 @@ static void destroy_swap_extents(struct +@@ -1373,6 +1373,14 @@ static void destroy_swap_extents(struct list_del(&se->list); kfree(se); } @@ -297,7 +297,7 @@ Signed-off-by: Suresh Jayaraman } /* -@@ -1427,7 +1435,9 @@ add_swap_extent(struct swap_info_struct +@@ -1454,7 +1462,9 @@ add_swap_extent(struct swap_info_struct */ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) { @@ -308,7 +308,7 @@ Signed-off-by: Suresh Jayaraman unsigned blocks_per_page; unsigned long page_no; unsigned blkbits; -@@ -1438,13 +1448,22 @@ static int setup_swap_extents(struct swa +@@ -1465,13 +1475,22 @@ static int setup_swap_extents(struct swa int nr_extents = 0; int ret; @@ -332,7 +332,7 @@ Signed-off-by: Suresh Jayaraman blkbits = inode->i_blkbits; blocks_per_page = PAGE_SIZE >> blkbits; -@@ -2220,6 +2239,13 @@ int swapcache_prepare(swp_entry_t entry) +@@ -2290,6 +2309,13 @@ int swapcache_prepare(swp_entry_t entry) return __swap_duplicate(entry, SWAP_HAS_CACHE); } diff --git a/patches.suse/SoN-24-mm-page_file_methods.patch b/patches.suse/SoN-24-mm-page_file_methods.patch index 949df32..c16612e 100644 --- a/patches.suse/SoN-24-mm-page_file_methods.patch +++ b/patches.suse/SoN-24-mm-page_file_methods.patch @@ -29,7 +29,7 @@ Signed-off-by: Suresh Jayaraman --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -662,6 +662,17 @@ static inline void *page_rmapping(struct +@@ -663,6 +663,17 @@ static inline void *page_rmapping(struct return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); } @@ -47,7 +47,7 @@ Signed-off-by: Suresh Jayaraman static inline int PageAnon(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; -@@ -678,6 +689,20 @@ static inline pgoff_t page_index(struct +@@ -679,6 +690,20 @@ static inline pgoff_t page_index(struct return page->index; } @@ -70,7 +70,7 @@ Signed-off-by: Suresh Jayaraman * so that transitions both from it and to it can be tracked, --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h -@@ -281,6 +281,11 @@ static inline loff_t page_offset(struct +@@ -282,6 +282,11 @@ static inline loff_t page_offset(struct return ((loff_t)page->index) << PAGE_CACHE_SHIFT; } @@ -79,12 +79,12 @@ Signed-off-by: Suresh Jayaraman + return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; +} + - static inline pgoff_t linear_page_index(struct vm_area_struct *vma, - unsigned long address) - { + extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, + unsigned long address); + --- a/mm/swapfile.c +++ b/mm/swapfile.c -@@ -2247,6 +2247,25 @@ struct swap_info_struct *page_swap_info( +@@ -2307,6 +2307,25 @@ struct swap_info_struct *page_swap_info( } /* diff --git a/patches.suse/SoN-25-nfs-swapcache.patch b/patches.suse/SoN-25-nfs-swapcache.patch index 193bd41..bb56be1 100644 --- a/patches.suse/SoN-25-nfs-swapcache.patch +++ b/patches.suse/SoN-25-nfs-swapcache.patch @@ -17,7 +17,7 @@ Signed-off-by: Suresh Jayaraman --- a/fs/nfs/file.c +++ b/fs/nfs/file.c -@@ -476,7 +476,7 @@ static void nfs_invalidate_page(struct p +@@ -472,7 +472,7 @@ static void nfs_invalidate_page(struct p if (offset != 0) return; /* Cancel any unstarted writes on this page */ @@ -26,7 +26,7 @@ Signed-off-by: Suresh Jayaraman nfs_fscache_invalidate_page(page, page->mapping->host); } -@@ -509,7 +509,7 @@ static int nfs_release_page(struct page +@@ -514,7 +514,7 @@ static int nfs_release_page(struct page */ static int nfs_launder_page(struct page *page) { @@ -35,7 +35,7 @@ Signed-off-by: Suresh Jayaraman struct nfs_inode *nfsi = NFS_I(inode); dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", -@@ -558,7 +558,7 @@ static int nfs_vm_page_mkwrite(struct vm +@@ -563,7 +563,7 @@ static int nfs_vm_page_mkwrite(struct vm nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page); lock_page(page); @@ -46,7 +46,7 @@ Signed-off-by: Suresh Jayaraman --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h -@@ -344,13 +344,14 @@ void nfs_super_set_maxbytes(struct super +@@ -343,13 +343,14 @@ void nfs_super_set_maxbytes(struct super static inline unsigned int nfs_page_length(struct page *page) { @@ -66,7 +66,7 @@ Signed-off-by: Suresh Jayaraman return 0; --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c -@@ -76,11 +76,11 @@ nfs_create_request(struct nfs_open_conte +@@ -70,11 +70,11 @@ nfs_create_request(struct nfs_open_conte * update_nfs_request below if the region is not locked. */ req->wb_page = page; atomic_set(&req->wb_complete, 0); @@ -91,7 +91,7 @@ Signed-off-by: Suresh Jayaraman * --- a/fs/nfs/read.c +++ b/fs/nfs/read.c -@@ -501,11 +501,11 @@ static const struct rpc_call_ops nfs_rea +@@ -502,11 +502,11 @@ static const struct rpc_call_ops nfs_rea int nfs_readpage(struct file *file, struct page *page) { struct nfs_open_context *ctx; @@ -105,7 +105,7 @@ Signed-off-by: Suresh Jayaraman nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); nfs_add_stats(inode, NFSIOS_READPAGES, 1); -@@ -559,7 +559,7 @@ static int +@@ -560,7 +560,7 @@ static int readpage_async_filler(void *data, struct page *page) { struct nfs_readdesc *desc = (struct nfs_readdesc *)data; @@ -162,8 +162,8 @@ Signed-off-by: Suresh Jayaraman + struct inode *inode = page_file_mapping(page)->host; struct nfs_server *nfss = NFS_SERVER(inode); - page_cache_get(page); -@@ -212,7 +212,7 @@ static int nfs_set_page_writeback(struct + page_cache_get(page); +@@ -213,7 +213,7 @@ static int nfs_set_page_writeback(struct static void nfs_end_page_writeback(struct page *page) { @@ -172,31 +172,32 @@ Signed-off-by: Suresh Jayaraman struct nfs_server *nfss = NFS_SERVER(inode); end_page_writeback(page); -@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struc +@@ -224,7 +224,7 @@ static void nfs_end_page_writeback(struc - static struct nfs_page *nfs_find_and_lock_request(struct page *page) + static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) { - struct inode *inode = page->mapping->host; + struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req; int ret; -@@ -280,12 +280,12 @@ out: +@@ -285,13 +285,13 @@ out: static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) { - struct inode *inode = page->mapping->host; + struct inode *inode = page_file_mapping(page)->host; + int ret; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - nfs_pageio_cond_complete(pgio, page->index); + nfs_pageio_cond_complete(pgio, page_file_index(page)); - return nfs_page_async_flush(pgio, page); - } - -@@ -297,7 +297,8 @@ static int nfs_writepage_locked(struct p + ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); + if (ret == -EAGAIN) { + redirty_page_for_writepage(wbc, page); +@@ -310,7 +310,8 @@ static int nfs_writepage_locked(struct p struct nfs_pageio_descriptor pgio; int err; @@ -206,7 +207,7 @@ Signed-off-by: Suresh Jayaraman err = nfs_do_writepage(page, wbc, &pgio); nfs_pageio_complete(&pgio); if (err < 0) -@@ -441,7 +442,8 @@ nfs_mark_request_commit(struct nfs_page +@@ -455,7 +456,8 @@ nfs_mark_request_commit(struct nfs_page nfsi->ncommit++; spin_unlock(&inode->i_lock); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); @@ -216,7 +217,7 @@ Signed-off-by: Suresh Jayaraman __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } -@@ -452,7 +454,8 @@ nfs_clear_request_commit(struct nfs_page +@@ -466,7 +468,8 @@ nfs_clear_request_commit(struct nfs_page if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { dec_zone_page_state(page, NR_UNSTABLE_NFS); @@ -226,7 +227,7 @@ Signed-off-by: Suresh Jayaraman return 1; } return 0; -@@ -513,7 +516,7 @@ nfs_need_commit(struct nfs_inode *nfsi) +@@ -527,7 +530,7 @@ nfs_need_commit(struct nfs_inode *nfsi) * nfs_scan_commit - Scan an inode for commit requests * @inode: NFS inode to scan * @dst: destination list @@ -235,7 +236,7 @@ Signed-off-by: Suresh Jayaraman * @npages: idx_start + npages sets the upper bound to scan. * * Moves requests from the inode's 'commit' request list. -@@ -633,7 +636,7 @@ out_err: +@@ -647,7 +650,7 @@ out_err: static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, struct page *page, unsigned int offset, unsigned int bytes) { @@ -244,7 +245,7 @@ Signed-off-by: Suresh Jayaraman struct nfs_page *req; int error; -@@ -688,7 +691,7 @@ int nfs_flush_incompatible(struct file * +@@ -706,7 +709,7 @@ int nfs_flush_incompatible(struct file * nfs_release_request(req); if (!do_flush) return 0; @@ -253,7 +254,7 @@ Signed-off-by: Suresh Jayaraman } while (status == 0); return status; } -@@ -714,7 +717,7 @@ int nfs_updatepage(struct file *file, st +@@ -732,7 +735,7 @@ int nfs_updatepage(struct file *file, st unsigned int offset, unsigned int count) { struct nfs_open_context *ctx = nfs_file_open_context(file); @@ -262,7 +263,7 @@ Signed-off-by: Suresh Jayaraman int status = 0; nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); -@@ -722,7 +725,7 @@ int nfs_updatepage(struct file *file, st +@@ -740,7 +743,7 @@ int nfs_updatepage(struct file *file, st dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n", file->f_path.dentry->d_parent->d_name.name, file->f_path.dentry->d_name.name, count, @@ -271,7 +272,7 @@ Signed-off-by: Suresh Jayaraman /* If we're not using byte range locks, and we know the page * is up to date, it may be more efficient to extend the write -@@ -997,7 +1000,7 @@ static void nfs_writeback_release_partia +@@ -1023,7 +1026,7 @@ static void nfs_writeback_release_partia } if (nfs_write_need_commit(data)) { @@ -280,7 +281,7 @@ Signed-off-by: Suresh Jayaraman spin_lock(&inode->i_lock); if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { -@@ -1278,7 +1281,7 @@ nfs_commit_list(struct inode *inode, str +@@ -1321,7 +1324,7 @@ nfs_commit_list(struct inode *inode, str nfs_list_remove_request(req); nfs_mark_request_commit(req); dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); diff --git a/patches.suse/SoN-25a-nfs-swapcache.patch b/patches.suse/SoN-25a-nfs-swapcache.patch new file mode 100644 index 0000000..9283c6b --- /dev/null +++ b/patches.suse/SoN-25a-nfs-swapcache.patch @@ -0,0 +1,31 @@ +From: Mel Gorman +Subject: [PATCH] nfs: Convert nfs_mark_request_dirty() to use page_file_mapping() +Patch-mainline: Not yet +References: bnc#677738 + +nfs_mark_request_dirty() uses page->mapping directly. If the page is a +PageSwapCache page, it triggers as oops as the mapping must be retrieved +from the swap info instead. This patch uses page_file_mapping() thus +preventing the oops. + +Signed-off-by: Mel Gorman +Reviewed-by: NeilBrown +Signed-off-by: Suresh Jayaraman +--- + fs/nfs/write.c | 3 ++- + 1 files changed, 2 insertions(+), 1 deletions(-) + +Index: linux-2.6.37-openSUSE-11.4/fs/nfs/write.c +=================================================================== +--- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/write.c ++++ linux-2.6.37-openSUSE-11.4/fs/nfs/write.c +@@ -432,7 +432,8 @@ static void + nfs_mark_request_dirty(struct nfs_page *req) + { + __set_page_dirty_nobuffers(req->wb_page); +- __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC); ++ __mark_inode_dirty(page_file_mapping(req->wb_page)->host, ++ I_DIRTY_DATASYNC); + } + + #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) diff --git a/patches.suse/SoN-25b-nfs-swapcache.patch b/patches.suse/SoN-25b-nfs-swapcache.patch new file mode 100644 index 0000000..bc252a8 --- /dev/null +++ b/patches.suse/SoN-25b-nfs-swapcache.patch @@ -0,0 +1,29 @@ +From: Mel Gorman +Date: Wed, 9 Mar 2011 19:36:49 +0000 +Subject: [PATCH] nfs: Use page_file_offset during page writeback +Patch-mainline: Not yet +References: bnc#677738 + +nfs_wb_page could conceivably be called for a PageSwapCache page so play +it safe and use page_file_offset() to lookup the correct index. + +Signed-off-by: Mel Gorman +Reviewed-by: NeilBrown +Signed-off-by: Suresh Jayaraman +--- + fs/nfs/write.c | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) + +Index: linux-2.6.37-openSUSE-11.4/fs/nfs/write.c +=================================================================== +--- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/write.c ++++ linux-2.6.37-openSUSE-11.4/fs/nfs/write.c +@@ -1520,7 +1520,7 @@ int nfs_wb_page_cancel(struct inode *ino + */ + int nfs_wb_page(struct inode *inode, struct page *page) + { +- loff_t range_start = page_offset(page); ++ loff_t range_start = page_file_offset(page); + loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, diff --git a/patches.suse/SoN-26-nfs-swapper.patch b/patches.suse/SoN-26-nfs-swapper.patch index be8e767..6673192 100644 --- a/patches.suse/SoN-26-nfs-swapper.patch +++ b/patches.suse/SoN-26-nfs-swapper.patch @@ -19,12 +19,12 @@ Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- fs/nfs/inode.c | 6 ++++ - fs/nfs/write.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++---------- - 2 files changed, 65 insertions(+), 12 deletions(-) + fs/nfs/write.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++----------- + 2 files changed, 67 insertions(+), 14 deletions(-) --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c -@@ -780,6 +780,12 @@ int nfs_revalidate_mapping(struct inode +@@ -868,6 +868,12 @@ int nfs_revalidate_mapping(struct inode struct nfs_inode *nfsi = NFS_I(inode); int ret = 0; @@ -35,11 +35,11 @@ Signed-off-by: Suresh Jayaraman + goto out; + if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) - || nfs_attribute_timeout(inode) || NFS_STALE(inode)) { - ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); + || nfs_attribute_cache_expired(inode) + || NFS_STALE(inode)) { --- a/fs/nfs/write.c +++ b/fs/nfs/write.c -@@ -109,25 +109,64 @@ static void nfs_context_set_write_error( +@@ -107,25 +107,64 @@ static void nfs_context_set_write_error( set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); } @@ -119,10 +119,11 @@ Signed-off-by: Suresh Jayaraman if (req == NULL) break; if (nfs_set_page_tag_locked(req)) -@@ -382,8 +421,14 @@ static int nfs_inode_add_request(struct +@@ -391,9 +430,15 @@ static int nfs_inode_add_request(struct if (nfs_have_delegation(inode, FMODE_WRITE)) nfsi->change_attr++; } +- set_bit(PG_MAPPED, &req->wb_flags); - SetPagePrivate(req->wb_page); - set_page_private(req->wb_page, (unsigned long)req); + /* @@ -130,26 +131,29 @@ Signed-off-by: Suresh Jayaraman + * with invalidate/truncate. + */ + if (likely(!PageSwapCache(req->wb_page))) { ++ set_bit(PG_MAPPED, &req->wb_flags); + SetPagePrivate(req->wb_page); + set_page_private(req->wb_page, (unsigned long)req); + } nfsi->npages++; kref_get(&req->wb_kref); radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, -@@ -405,8 +450,10 @@ static void nfs_inode_remove_request(str +@@ -415,9 +460,11 @@ static void nfs_inode_remove_request(str BUG_ON (!NFS_WBACK_BUSY(req)); spin_lock(&inode->i_lock); - set_page_private(req->wb_page, 0); - ClearPagePrivate(req->wb_page); +- clear_bit(PG_MAPPED, &req->wb_flags); + if (likely(!PageSwapCache(req->wb_page))) { + set_page_private(req->wb_page, 0); + ClearPagePrivate(req->wb_page); ++ clear_bit(PG_MAPPED, &req->wb_flags); + } radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); nfsi->npages--; if (!nfsi->npages) { -@@ -574,7 +621,7 @@ static struct nfs_page *nfs_try_to_updat +@@ -585,7 +632,7 @@ static struct nfs_page *nfs_try_to_updat spin_lock(&inode->i_lock); for (;;) { diff --git a/patches.suse/SoN-27-nfs-swap_ops.patch b/patches.suse/SoN-27-nfs-swap_ops.patch index 110378c..68f2a8a 100644 --- a/patches.suse/SoN-27-nfs-swap_ops.patch +++ b/patches.suse/SoN-27-nfs-swap_ops.patch @@ -16,20 +16,21 @@ Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- fs/nfs/Kconfig | 10 ++++++ - fs/nfs/file.c | 18 +++++++++++ - fs/nfs/write.c | 22 +++++++++++++ + fs/nfs/file.c | 18 ++++++++++++ + fs/nfs/write.c | 22 ++++++++++++++ include/linux/nfs_fs.h | 2 + include/linux/sunrpc/xprt.h | 5 ++- net/sunrpc/Kconfig | 5 +++ - net/sunrpc/sched.c | 9 ++++- - net/sunrpc/xprtsock.c | 70 ++++++++++++++++++++++++++++++++++++++++++++ - 8 files changed, 138 insertions(+), 3 deletions(-) + net/sunrpc/clnt.c | 2 + + net/sunrpc/sched.c | 7 +++- + net/sunrpc/xprtsock.c | 65 ++++++++++++++++++++++++++++++++++++++++++++ + 9 files changed, 133 insertions(+), 3 deletions(-) --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -74,6 +74,16 @@ config NFS_V4 - If unsure, say N. + If unsure, say Y. +config NFS_SWAP + bool "Provide swap over NFS support" @@ -42,11 +43,11 @@ Signed-off-by: Suresh Jayaraman + For more details, see Documentation/network-swap.txt + config NFS_V4_1 - bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)" - depends on NFS_V4 && EXPERIMENTAL + bool "NFS client support for NFSv4.1 (EXPERIMENTAL)" + depends on NFS_FS && NFS_V4 && EXPERIMENTAL --- a/fs/nfs/file.c +++ b/fs/nfs/file.c -@@ -519,6 +519,18 @@ static int nfs_launder_page(struct page +@@ -529,6 +529,18 @@ static int nfs_launder_page(struct page return nfs_wb_page(inode, page); } @@ -65,7 +66,7 @@ Signed-off-by: Suresh Jayaraman const struct address_space_operations nfs_file_aops = { .readpage = nfs_readpage, .readpages = nfs_readpages, -@@ -533,6 +545,12 @@ const struct address_space_operations nf +@@ -543,6 +555,12 @@ const struct address_space_operations nf .migratepage = nfs_migrate_page, .launder_page = nfs_launder_page, .error_remove_page = generic_error_remove_page, @@ -80,7 +81,7 @@ Signed-off-by: Suresh Jayaraman /* --- a/fs/nfs/write.c +++ b/fs/nfs/write.c -@@ -356,6 +356,28 @@ int nfs_writepage(struct page *page, str +@@ -365,6 +365,28 @@ int nfs_writepage(struct page *page, str return ret; } @@ -111,7 +112,7 @@ Signed-off-by: Suresh Jayaraman int ret; --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h -@@ -469,6 +469,8 @@ extern int nfs_writepages(struct addres +@@ -502,6 +502,8 @@ extern int nfs_writepages(struct addres extern int nfs_flush_incompatible(struct file *file, struct page *page); extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); @@ -122,7 +123,7 @@ Signed-off-by: Suresh Jayaraman * Try to write back everything synchronously (but check the --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h -@@ -168,7 +168,9 @@ struct rpc_xprt { +@@ -172,7 +172,9 @@ struct rpc_xprt { unsigned int max_reqs; /* total slots */ unsigned long state; /* transport state */ unsigned char shutdown : 1, /* being shut down */ @@ -133,7 +134,7 @@ Signed-off-by: Suresh Jayaraman unsigned int bind_index; /* bind function index */ /* -@@ -302,6 +304,7 @@ void xprt_release_rqst_cong(struct rpc +@@ -308,6 +310,7 @@ void xprt_release_rqst_cong(struct rpc void xprt_disconnect_done(struct rpc_xprt *xprt); void xprt_force_disconnect(struct rpc_xprt *xprt); void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); @@ -148,16 +149,27 @@ Signed-off-by: Suresh Jayaraman If unsure, say N. +config SUNRPC_SWAP -+ def_bool n ++ bool + depends on SUNRPC + select NETVM + config RPCSEC_GSS_KRB5 - tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" - depends on SUNRPC && EXPERIMENTAL + tristate + depends on SUNRPC && CRYPTO +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -590,6 +590,8 @@ void rpc_task_set_client(struct rpc_task + atomic_inc(&clnt->cl_count); + if (clnt->cl_softrtry) + task->tk_flags |= RPC_TASK_SOFT; ++ if (task->tk_client->cl_xprt->swapper) ++ task->tk_flags |= RPC_TASK_SWAPPER; + /* Add to the client's list of all tasks */ + spin_lock(&clnt->cl_lock); + list_add_tail(&task->tk_task, &clnt->cl_tasks); --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c -@@ -747,7 +747,10 @@ static void rpc_async_schedule(struct wo +@@ -728,7 +728,10 @@ static void rpc_async_schedule(struct wo void *rpc_malloc(struct rpc_task *task, size_t size) { struct rpc_buffer *buf; @@ -169,16 +181,7 @@ Signed-off-by: Suresh Jayaraman size += sizeof(struct rpc_buffer); if (size <= RPC_BUFFER_MAXSIZE) -@@ -818,6 +821,8 @@ static void rpc_init_task(struct rpc_tas - kref_get(&task->tk_client->cl_kref); - if (task->tk_client->cl_softrtry) - task->tk_flags |= RPC_TASK_SOFT; -+ if (task->tk_client->cl_xprt->swapper) -+ task->tk_flags |= RPC_TASK_SWAPPER; - } - - if (task->tk_ops->rpc_call_prepare != NULL) -@@ -843,7 +848,7 @@ static void rpc_init_task(struct rpc_tas +@@ -807,7 +810,7 @@ static void rpc_init_task(struct rpc_tas static struct rpc_task * rpc_alloc_task(void) { @@ -189,9 +192,9 @@ Signed-off-by: Suresh Jayaraman /* --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c -@@ -1642,6 +1642,57 @@ static inline void xs_reclassify_socket6 +@@ -1641,6 +1641,57 @@ out: + return ERR_PTR(err); } - #endif +#ifdef CONFIG_SUNRPC_SWAP +static void xs_set_memalloc(struct rpc_xprt *xprt) @@ -247,7 +250,7 @@ Signed-off-by: Suresh Jayaraman static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); -@@ -1666,6 +1717,8 @@ static void xs_udp_finish_connecting(str +@@ -1665,6 +1716,8 @@ static void xs_udp_finish_connecting(str transport->sock = sock; transport->inet = sk; @@ -256,36 +259,12 @@ Signed-off-by: Suresh Jayaraman write_unlock_bh(&sk->sk_callback_lock); } xs_udp_do_set_buffer_size(xprt); -@@ -1683,11 +1736,15 @@ static void xs_udp_connect_worker4(struc - container_of(work, struct sock_xprt, connect_worker.work); - struct rpc_xprt *xprt = &transport->xprt; - struct socket *sock = transport->sock; -+ unsigned long pflags = current->flags; - int err, status = -EIO; - - if (xprt->shutdown) - goto out; - -+ if (xprt->swapper) -+ current->flags |= PF_MEMALLOC; -+ - /* Start by resetting any existing state */ - xs_reset_transport(transport); - -@@ -1714,6 +1771,7 @@ static void xs_udp_connect_worker4(struc - out: - xprt_clear_connecting(xprt); - xprt_wake_pending_tasks(xprt, status); -+ tsk_restore_flags(current, pflags, PF_MEMALLOC); - } - - /** -@@ -1728,11 +1786,15 @@ static void xs_udp_connect_worker6(struc +@@ -1676,11 +1729,15 @@ static void xs_udp_setup_socket(struct w container_of(work, struct sock_xprt, connect_worker.work); struct rpc_xprt *xprt = &transport->xprt; struct socket *sock = transport->sock; + unsigned long pflags = current->flags; - int err, status = -EIO; + int status = -EIO; if (xprt->shutdown) goto out; @@ -295,8 +274,8 @@ Signed-off-by: Suresh Jayaraman + /* Start by resetting any existing state */ xs_reset_transport(transport); - -@@ -1759,6 +1821,7 @@ static void xs_udp_connect_worker6(struc + sock = xs_create_sock(xprt, transport, +@@ -1699,6 +1756,7 @@ static void xs_udp_setup_socket(struct w out: xprt_clear_connecting(xprt); xprt_wake_pending_tasks(xprt, status); @@ -304,7 +283,7 @@ Signed-off-by: Suresh Jayaraman } /* -@@ -1833,6 +1896,8 @@ static int xs_tcp_finish_connecting(stru +@@ -1788,6 +1846,8 @@ static int xs_tcp_finish_connecting(stru if (!xprt_bound(xprt)) return -ENOTCONN; @@ -313,10 +292,10 @@ Signed-off-by: Suresh Jayaraman /* Tell the socket layer to start connecting... */ xprt->stat.connect_count++; xprt->stat.connect_start = jiffies; -@@ -1853,11 +1918,15 @@ static void xs_tcp_setup_socket(struct r - struct sock_xprt *)) - { +@@ -1808,11 +1868,15 @@ static void xs_tcp_setup_socket(struct w + container_of(work, struct sock_xprt, connect_worker.work); struct socket *sock = transport->sock; + struct rpc_xprt *xprt = &transport->xprt; + unsigned long pflags = current->flags; int status = -EIO; @@ -328,12 +307,12 @@ Signed-off-by: Suresh Jayaraman + if (!sock) { clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); - sock = create_sock(xprt, transport); -@@ -1918,6 +1987,7 @@ out_eagain: + sock = xs_create_sock(xprt, transport, +@@ -1874,6 +1938,7 @@ out_eagain: out: xprt_clear_connecting(xprt); xprt_wake_pending_tasks(xprt, status); + tsk_restore_flags(current, pflags, PF_MEMALLOC); } - static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt, + /** diff --git a/patches.suse/SoN-27a-nfs-swap_ops.patch b/patches.suse/SoN-27a-nfs-swap_ops.patch new file mode 100644 index 0000000..bee43d5 --- /dev/null +++ b/patches.suse/SoN-27a-nfs-swap_ops.patch @@ -0,0 +1,46 @@ +From: Mel Gorman +Date: Wed, 9 Mar 2011 12:26:23 +0000 +Subject: [PATCH] netvm: Do not mark requests for swapfile writes as dirty or kswapd fails to free the page +Patch-mainline: Not yet +References: bnc#678472 + +When writing back NFS pages from kswapd, the inode and pages are getting +marked dirty before IO has even started. The expectation of kswapd is +that it calls clear_page_dirty_for_io(), submits IO and the filesystem +remarks the page dirty if necessary. Without this patch, the page always +comes back under writeback and still dirty. kswapd continually launders +but never frees leading to deadlock. + +Signed-off-by: Mel Gorman +--- + fs/nfs/write.c | 14 ++++++++++++-- + 1 files changed, 12 insertions(+), 2 deletions(-) + +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 2375c7d..fe05d78 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -744,11 +744,21 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, + req = nfs_setup_write_request(ctx, page, offset, count); + if (IS_ERR(req)) + return PTR_ERR(req); +- nfs_mark_request_dirty(req); ++ ++ /* ++ * There is no need to mark swapfile requests as dirty like normal ++ * writepage requests as page dirtying and cleaning is managed ++ * from the mm. If a PageSwapCache page is marked dirty like this, ++ * it will still be dirty after kswapd calls writepage and may ++ * never be released ++ */ ++ if (!PageSwapCache(page)) ++ nfs_mark_request_dirty(req); + /* Update file length */ + nfs_grow_file(page, offset, count); + nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); +- nfs_mark_request_dirty(req); ++ if (!PageSwapCache(page)) ++ nfs_mark_request_dirty(req); + nfs_clear_page_tag_locked(req); + return 0; + } diff --git a/patches.suse/SoN-28-nfs-alloc-recursions.patch b/patches.suse/SoN-28-nfs-alloc-recursions.patch index 4356761..523c3f5 100644 --- a/patches.suse/SoN-28-nfs-alloc-recursions.patch +++ b/patches.suse/SoN-28-nfs-alloc-recursions.patch @@ -19,14 +19,14 @@ Signed-off-by: Suresh Jayaraman --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -27,7 +27,7 @@ static inline struct nfs_page * + static inline struct nfs_page * nfs_page_alloc(void) { - struct nfs_page *p; -- p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); -+ p = kmem_cache_alloc(nfs_page_cachep, GFP_NOIO); - if (p) { - memset(p, 0, sizeof(*p)); +- struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); ++ struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO); + if (p) INIT_LIST_HEAD(&p->wb_list); + return p; --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -50,7 +50,7 @@ static mempool_t *nfs_commit_mempool; diff --git a/patches.suse/SoN-29-fix-swap_sync_page-race b/patches.suse/SoN-29-fix-swap_sync_page-race index bbf0567..2544dab 100644 --- a/patches.suse/SoN-29-fix-swap_sync_page-race +++ b/patches.suse/SoN-29-fix-swap_sync_page-race @@ -23,7 +23,7 @@ Signed-off-by: Suresh Jayaraman --- a/mm/page_io.c +++ b/mm/page_io.c -@@ -127,10 +127,15 @@ out: +@@ -128,10 +128,15 @@ out: return ret; } @@ -41,7 +41,7 @@ Signed-off-by: Suresh Jayaraman --- a/mm/swapfile.c +++ b/mm/swapfile.c -@@ -2242,7 +2242,13 @@ int swapcache_prepare(swp_entry_t entry) +@@ -2302,7 +2302,13 @@ int swapcache_prepare(swp_entry_t entry) struct swap_info_struct *page_swap_info(struct page *page) { swp_entry_t swap = { .val = page_private(page) }; diff --git a/patches.suse/SoN-30-fix-uninitialized-var.patch b/patches.suse/SoN-30-fix-uninitialized-var.patch index 415991c..a484ffd 100644 --- a/patches.suse/SoN-30-fix-uninitialized-var.patch +++ b/patches.suse/SoN-30-fix-uninitialized-var.patch @@ -19,7 +19,7 @@ Signed-off-by: Suresh Jayaraman --- a/mm/slab.c +++ b/mm/slab.c -@@ -2773,7 +2773,7 @@ static int cache_grow(struct kmem_cache +@@ -2841,7 +2841,7 @@ static int cache_grow(struct kmem_cache size_t offset; gfp_t local_flags; struct kmem_list3 *l3; @@ -28,7 +28,7 @@ Signed-off-by: Suresh Jayaraman /* * Be lazy and only check for valid flags here, keeping it out of the -@@ -2829,7 +2829,8 @@ static int cache_grow(struct kmem_cache +@@ -2897,7 +2897,8 @@ static int cache_grow(struct kmem_cache if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); diff --git a/patches.suse/SoN-31-fix-null-pointer-dereference b/patches.suse/SoN-31-fix-null-pointer-dereference new file mode 100644 index 0000000..0c88a66 --- /dev/null +++ b/patches.suse/SoN-31-fix-null-pointer-dereference @@ -0,0 +1,37 @@ +From: Xiaotian Feng +Subject: fix null pointer deref in swap_entry_free +Patch-mainline: Not yet + +Commit b3a27d uses p->bdev->bd_disk, this will lead a null pointer +deref with swap over nfs. + +Signed-off-by: Xiaotian Feng +Signed-off-by: Suresh Jayaraman +-- +Index: linux-2.6.35-master/mm/swapfile.c +=================================================================== +--- linux-2.6.35-master.orig/mm/swapfile.c ++++ linux-2.6.35-master/mm/swapfile.c +@@ -574,7 +574,6 @@ static unsigned char swap_entry_free(str + + /* free if no reference */ + if (!usage) { +- struct gendisk *disk = p->bdev->bd_disk; + if (offset < p->lowest_bit) + p->lowest_bit = offset; + if (offset > p->highest_bit) +@@ -584,9 +583,11 @@ static unsigned char swap_entry_free(str + swap_list.next = p->type; + nr_swap_pages++; + p->inuse_pages--; +- if ((p->flags & SWP_BLKDEV) && +- disk->fops->swap_slot_free_notify) +- disk->fops->swap_slot_free_notify(p->bdev, offset); ++ if (p->flags & SWP_BLKDEV) { ++ struct gendisk *disk = p->bdev->bd_disk; ++ if (disk->fops->swap_slot_free_notify) ++ disk->fops->swap_slot_free_notify(p->bdev, offset); ++ } + } + + return usage; diff --git a/patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles b/patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles new file mode 100644 index 0000000..a0f82de --- /dev/null +++ b/patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles @@ -0,0 +1,45 @@ +From: Xiaotian Feng +Subject: fix mess up on swap with multi files from same nfs server +Patch-mainline: Not yet + +xs_swapper() will set xprt->swapper when swapon nfs files, unset xprt->swapper +when swapoff nfs files. This will lead a bug if we swapon multi files from +the same nfs server, they had the same xprt, then the reserved memory could +not be disconnected when we swapoff all files. + +Signed-off-by: Xiaotian Feng +--- +Index: linux-2.6.35-master/include/linux/sunrpc/xprt.h +=================================================================== +--- linux-2.6.35-master.orig/include/linux/sunrpc/xprt.h ++++ linux-2.6.35-master/include/linux/sunrpc/xprt.h +@@ -172,8 +172,8 @@ struct rpc_xprt { + unsigned int max_reqs; /* total slots */ + unsigned long state; /* transport state */ + unsigned char shutdown : 1, /* being shut down */ +- resvport : 1, /* use a reserved port */ +- swapper : 1; /* we're swapping over this ++ resvport : 1; /* use a reserved port */ ++ unsigned int swapper; /* we're swapping over this + transport */ + unsigned int bind_index; /* bind function index */ + +Index: linux-2.6.35-master/net/sunrpc/xprtsock.c +=================================================================== +--- linux-2.6.35-master.orig/net/sunrpc/xprtsock.c ++++ linux-2.6.35-master/net/sunrpc/xprtsock.c +@@ -1665,11 +1665,11 @@ int xs_swapper(struct rpc_xprt *xprt, in + */ + err = sk_adjust_memalloc(1, RPC_RESERVE_PAGES); + if (!err) { +- xprt->swapper = 1; ++ xprt->swapper++; + xs_set_memalloc(xprt); + } + } else if (xprt->swapper) { +- xprt->swapper = 0; ++ xprt->swapper--; + sk_clear_memalloc(transport->inet); + sk_adjust_memalloc(-1, -RPC_RESERVE_PAGES); + } diff --git a/patches.suse/SoN-33-slab-leak-fix.patch b/patches.suse/SoN-33-slab-leak-fix.patch new file mode 100644 index 0000000..4cb0e44 --- /dev/null +++ b/patches.suse/SoN-33-slab-leak-fix.patch @@ -0,0 +1,45 @@ +From: Nick Piggin +Subject: Fix almost-infinite slab cache growing +References: bnc#554081 +Patch-mainline: never + +If we get into cache_alloc_refill() with must_refill set, we end up in an +almost endless loop adding more and more pages to the slab. The loop can be +broken only by a failure to allocate more pages or an interrupt refilling the +slab's allocation cache. + +Fix the issue by jumping to a more appropriate place when the allocation cache +is not refilled by an interrupt. + +Signed-off-by: Nick Piggin + +Index: linux-2.6.38-master/mm/slab.c +=================================================================== +--- linux-2.6.38-master.orig/mm/slab.c ++++ linux-2.6.38-master/mm/slab.c +@@ -3104,11 +3104,11 @@ static void *cache_alloc_refill(struct k + struct array_cache *ac; + int node; + +-retry: + check_irq_off(); + node = numa_slab_nid(cachep, flags); + if (unlikely(must_refill)) + goto force_grow; ++retry: + ac = cpu_cache_get(cachep); + batchcount = ac->batchcount; + if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { +@@ -3188,8 +3188,10 @@ force_grow: + if (!x && (ac->avail == 0 || must_refill)) + return NULL; + +- if (!ac->avail) /* objects refilled by interrupt? */ ++ if (!ac->avail) { /* objects refilled by interrupt? */ ++ node = numa_node_id(); + goto retry; ++ } + } + ac->touched = 1; + return ac->entry[--ac->avail]; diff --git a/patches.suse/SoN-fix b/patches.suse/SoN-fix new file mode 100644 index 0000000..f75ae54 --- /dev/null +++ b/patches.suse/SoN-fix @@ -0,0 +1,22 @@ +From: Jeff Mahoney +Subject: SoN: wakeup_kswapd takes a zone index now +Patch-mainline: Depends on local patches + + This patch fixes the build with SoN applied. + +Signed-off-by: Jeff Mahoney +--- + mm/page_alloc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5054,7 +5054,7 @@ static int test_reserve_limits(void) + int node; + + for_each_zone(zone) +- wakeup_kswapd(zone, 0); ++ wakeup_kswapd(zone, 0, zone_idx(zone)); + + for_each_online_node(node) { + struct page *page = alloc_pages_node(node, GFP_KERNEL, 0); diff --git a/patches.suse/acpi-don-t-preempt-until-the-system-is-up b/patches.suse/acpi-don-t-preempt-until-the-system-is-up index 9888d0e..9e28565 100644 --- a/patches.suse/acpi-don-t-preempt-until-the-system-is-up +++ b/patches.suse/acpi-don-t-preempt-until-the-system-is-up @@ -1,12 +1,12 @@ From: Jeff Mahoney Subject: acpi: don't preempt until the system is up +Patch-mainline: Probably never This is needed to avoid scheduling while atomic BUGs with the DSDT in initramfs patches. Signed-off-by: Jeff Mahoney -Acked-by: Jeff Mahoney --- drivers/acpi/acpica/psloop.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/patches.suse/acpi-generic-initramfs-table-override-support b/patches.suse/acpi-generic-initramfs-table-override-support index 0f63b2c..ee6d39b 100644 --- a/patches.suse/acpi-generic-initramfs-table-override-support +++ b/patches.suse/acpi-generic-initramfs-table-override-support @@ -130,7 +130,7 @@ Signed-off-by: Jeff Mahoney +for use with the CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS method. --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt -@@ -217,6 +217,10 @@ and is between 256 and 4096 characters. +@@ -220,6 +220,10 @@ and is between 256 and 4096 characters. acpi_no_auto_ssdt [HW,ACPI] Disable automatic loading of SSDT @@ -143,7 +143,7 @@ Signed-off-by: Jeff Mahoney --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig -@@ -260,6 +260,19 @@ config ACPI_CUSTOM_DSDT +@@ -271,6 +271,19 @@ config ACPI_CUSTOM_DSDT bool default ACPI_CUSTOM_DSDT_FILE != "" @@ -165,7 +165,7 @@ Signed-off-by: Jeff Mahoney default 0 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c -@@ -665,6 +665,13 @@ void __init acpi_early_init(void) +@@ -866,6 +866,13 @@ void __init acpi_early_init(void) goto error0; } @@ -229,7 +229,7 @@ Signed-off-by: Jeff Mahoney /* * The story of _OSI(Linux) * -@@ -352,6 +379,146 @@ acpi_os_predefined_override(const struct +@@ -326,6 +353,146 @@ acpi_os_predefined_override(const struct return AE_OK; } @@ -320,7 +320,7 @@ Signed-off-by: Jeff Mahoney + int i; + + /* This is early enough that we don't need the mutex yet */ -+ for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { ++ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { + if (acpi_tb_is_table_loaded(i)) + continue; + @@ -376,7 +376,7 @@ Signed-off-by: Jeff Mahoney acpi_status acpi_os_table_override(struct acpi_table_header * existing_table, struct acpi_table_header ** new_table) -@@ -365,6 +532,10 @@ acpi_os_table_override(struct acpi_table +@@ -339,6 +506,10 @@ acpi_os_table_override(struct acpi_table if (strncmp(existing_table->signature, "DSDT", 4) == 0) *new_table = (struct acpi_table_header *)AmlCode; #endif diff --git a/patches.suse/add-initramfs-file_read_write b/patches.suse/add-initramfs-file_read_write index c67c7bd..7caf883 100644 --- a/patches.suse/add-initramfs-file_read_write +++ b/patches.suse/add-initramfs-file_read_write @@ -11,8 +11,8 @@ Patch-mainline: Probably never Signed-off-by: Jeff Mahoney --- - init/initramfs.c | 150 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- - 1 file changed, 147 insertions(+), 3 deletions(-) + init/initramfs.c | 152 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 149 insertions(+), 3 deletions(-) --- a/init/initramfs.c +++ b/init/initramfs.c @@ -32,7 +32,7 @@ Signed-off-by: Jeff Mahoney static __initdata char *message; static void __init error(char *x) -@@ -333,10 +335,152 @@ static int __init do_name(void) +@@ -333,10 +336,153 @@ static int __init do_name(void) return 0; } @@ -67,8 +67,9 @@ Signed-off-by: Jeff Mahoney + break; + } + -+ data = page_address(page); ++ data = kmap_atomic(page, KM_USER0); + memcpy(i.iov->iov_base + i.iov_offset, data + offset, bytes); ++ kunmap_atomic(data, KM_USER0); + + iov_iter_advance(&i, bytes); + pos += bytes; @@ -120,9 +121,9 @@ Signed-off-by: Jeff Mahoney + &page, &fsdata); + if (unlikely(status)) + break; -+ data = page_address(page); -+ ++ data = kmap_atomic(page, KM_USER0); + memcpy(data + offset, i.iov->iov_base + i.iov_offset, bytes); ++ kunmap_atomic(data, KM_USER0); + copied = bytes; + + status = simple_write_end(file, mapping, pos, bytes, copied, @@ -186,7 +187,7 @@ Signed-off-by: Jeff Mahoney sys_close(wfd); do_utime(vcollected, mtime); kfree(vcollected); -@@ -344,7 +488,7 @@ static int __init do_copy(void) +@@ -344,7 +490,7 @@ static int __init do_copy(void) state = SkipIt; return 0; } else { @@ -195,7 +196,7 @@ Signed-off-by: Jeff Mahoney body_len -= count; eat(count); return 1; -@@ -589,7 +733,7 @@ static int __init populate_rootfs(void) +@@ -592,7 +738,7 @@ static int __init populate_rootfs(void) "; looks like an initrd\n", err); fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700); if (fd >= 0) { diff --git a/patches.suse/audit-export-logging.patch b/patches.suse/audit-export-logging.patch index 4748d7c..96cc886 100644 --- a/patches.suse/audit-export-logging.patch +++ b/patches.suse/audit-export-logging.patch @@ -17,7 +17,7 @@ Signed-Off-by: Tony Jones --- a/include/linux/audit.h +++ b/include/linux/audit.h -@@ -585,6 +585,9 @@ extern void audit_log(struct audit_ +@@ -577,6 +577,9 @@ extern void audit_log(struct audit_ __attribute__((format(printf,4,5))); extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); diff --git a/patches.suse/b43-missing-firmware-info.patch b/patches.suse/b43-missing-firmware-info.patch index 243ce26..ba32b2d 100644 --- a/patches.suse/b43-missing-firmware-info.patch +++ b/patches.suse/b43-missing-firmware-info.patch @@ -15,9 +15,9 @@ Signed-off-by: Jiri Benc drivers/net/wireless/b43/main.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) ---- linux-2.6.34-master.orig/drivers/net/wireless/b43/main.c -+++ linux-2.6.34-master/drivers/net/wireless/b43/main.c -@@ -1976,10 +1976,13 @@ static void b43_release_firmware(struct +--- a/drivers/net/wireless/b43/main.c ++++ b/drivers/net/wireless/b43/main.c +@@ -1976,10 +1976,13 @@ static void b43_release_firmware(struct static void b43_print_fw_helptext(struct b43_wl *wl, bool error) { const char text[] = diff --git a/patches.suse/bootsplash b/patches.suse/bootsplash index 7eb6cc0..fe90f66 100644 --- a/patches.suse/bootsplash +++ b/patches.suse/bootsplash @@ -9,15 +9,15 @@ when installing a new jpeg. Provide splash_set_percent function. Signed-off-by: mls@suse.de --- - drivers/char/keyboard.c | 9 - drivers/char/n_tty.c | 9 - drivers/char/vt.c | 25 + drivers/tty/n_tty.c | 9 + drivers/tty/vt/keyboard.c | 9 + drivers/tty/vt/vt.c | 25 drivers/video/Kconfig | 4 drivers/video/Makefile | 1 drivers/video/bootsplash/Kconfig | 17 drivers/video/bootsplash/Makefile | 5 drivers/video/bootsplash/bootsplash.c | 1017 ++++++++++++++++++++++++++++++++++ - drivers/video/bootsplash/bootsplash.h | 44 + + drivers/video/bootsplash/bootsplash.h | 41 + drivers/video/bootsplash/decode-jpg.c | 957 +++++++++++++++++++++++++++++++ drivers/video/bootsplash/decode-jpg.h | 35 + drivers/video/bootsplash/render.c | 328 ++++++++++ @@ -28,29 +28,11 @@ Signed-off-by: mls@suse.de include/linux/console_struct.h | 3 include/linux/fb.h | 8 kernel/panic.c | 13 - 19 files changed, 2601 insertions(+), 2 deletions(-) + 19 files changed, 2598 insertions(+), 2 deletions(-) ---- a/drivers/char/keyboard.c -+++ b/drivers/char/keyboard.c -@@ -1190,6 +1190,15 @@ static void kbd_keycode(unsigned int key - if (keycode < BTN_MISC && printk_ratelimit()) - printk(KERN_WARNING "keyboard.c: can't emulate rawmode for keycode %d\n", keycode); - -+#ifdef CONFIG_BOOTSPLASH -+ /* This code has to be redone for some non-x86 platforms */ -+ if (down == 1 && (keycode == 0x3c || keycode == 0x01)) { /* F2 and ESC on PC keyboard */ -+ extern int splash_verbose(void); -+ if (splash_verbose()) -+ return; -+ } -+#endif -+ - #ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */ - if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) { - if (!sysrq_down) { ---- a/drivers/char/n_tty.c -+++ b/drivers/char/n_tty.c -@@ -1779,6 +1779,15 @@ do_it_again: +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -1790,6 +1790,15 @@ do_it_again: tty->minimum_to_wake = (minimum - (b - buf)); if (!input_available_p(tty, 0)) { @@ -66,10 +48,28 @@ Signed-off-by: mls@suse.de if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { retval = -EIO; break; ---- a/drivers/char/vt.c -+++ b/drivers/char/vt.c -@@ -4101,6 +4101,31 @@ void vcs_scr_writew(struct vc_data *vc, - } +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -1202,6 +1202,15 @@ static void kbd_keycode(unsigned int key + pr_warning("can't emulate rawmode for keycode %d\n", + keycode); + ++#ifdef CONFIG_BOOTSPLASH ++ /* This code has to be redone for some non-x86 platforms */ ++ if (down == 1 && (keycode == 0x3c || keycode == 0x01)) { /* F2 and ESC on PC keyboard */ ++ extern int splash_verbose(void); ++ if (splash_verbose()) ++ return; ++ } ++#endif ++ + #ifdef CONFIG_SPARC + if (keycode == KEY_A && sparc_l1_a_state) { + sparc_l1_a_state = false; +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -4213,6 +4213,31 @@ void vcs_scr_updated(struct vc_data *vc) + notify_update(vc); } +#ifdef CONFIG_BOOTSPLASH @@ -102,7 +102,7 @@ Signed-off-by: mls@suse.de */ --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig -@@ -2228,4 +2228,8 @@ if FB || SGI_NEWPORT_CONSOLE +@@ -2379,4 +2379,8 @@ if FB || SGI_NEWPORT_CONSOLE source "drivers/video/logo/Kconfig" endif @@ -980,7 +980,7 @@ Signed-off-by: mls@suse.de + if (!buffer || !splash_default) + return count; + -+ acquire_console_sem(); ++ console_lock(); + unit = 0; + if (buffer[0] == '@' && buffer[1] >= '0' && buffer[1] <= '9') { + unit = buffer[1] - '0'; @@ -992,14 +992,14 @@ Signed-off-by: mls@suse.de + if (*buffer == ' ') + buffer++; + if (unit >= MAX_NR_CONSOLES || !vc_cons[unit].d) { -+ release_console_sem(); ++ console_unlock(); + return count; + } + } + vc = vc_cons[unit].d; + if (!strncmp(buffer, "redraw", 6)) { + splash_status(vc); -+ release_console_sem(); ++ console_unlock(); + return count; + } + if (!strncmp(buffer, "show", 4) || !strncmp(buffer, "hide", 4)) { @@ -1018,7 +1018,7 @@ Signed-off-by: mls@suse.de + if (*buffer == 'h') + pe = 65535 - pe; + splash_set_percent(vc, pe); -+ release_console_sem(); ++ console_unlock(); + return count; + } + if (!strncmp(buffer,"silent\n",7) || !strncmp(buffer,"verbose\n",8)) { @@ -1028,7 +1028,7 @@ Signed-off-by: mls@suse.de + splash_status(vc); + } + } -+ release_console_sem(); ++ console_unlock(); + return count; + } + if (!strncmp(buffer,"freesilent\n",11)) { @@ -1042,7 +1042,7 @@ Signed-off-by: mls@suse.de + splash_status(vc); + vc->vc_splash_data->splash_dosilent = 0; + } -+ release_console_sem(); ++ console_unlock(); + return count; + } + @@ -1064,17 +1064,17 @@ Signed-off-by: mls@suse.de + boxit(info->screen_base, info->fix.line_length, vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount, vc->vc_splash_data->splash_percent, 1); + } + } -+ release_console_sem(); ++ console_unlock(); + return count; + } + if (!vc->vc_splash_data) { -+ release_console_sem(); ++ console_unlock(); + return count; + } + if (buffer[0] == 't') { + vc->vc_splash_data->splash_state ^= 1; + splash_status(vc); -+ release_console_sem(); ++ console_unlock(); + return count; + } + new = simple_strtoul(buffer, NULL, 0); @@ -1089,7 +1089,7 @@ Signed-off-by: mls@suse.de + vc->vc_splash_data->splash_state = new & 1; + splash_status(vc); + } -+ release_console_sem(); ++ console_unlock(); + return count; +} + @@ -1157,10 +1157,10 @@ Signed-off-by: mls@suse.de + + mem = vmalloc(len); + if (mem) { -+ acquire_console_sem(); ++ console_lock(); + if ((int)sys_read(fd, mem, len) == len && splash_getraw((unsigned char *)mem, (unsigned char *)mem + len, (int *)0) == 0 && vc->vc_splash_data) + vc->vc_splash_data->splash_state = splash_default & 1; -+ release_console_sem(); ++ console_unlock(); + vfree(mem); + } + sys_close(fd); @@ -1171,7 +1171,7 @@ Signed-off-by: mls@suse.de + --- /dev/null +++ b/drivers/video/bootsplash/bootsplash.h -@@ -0,0 +1,44 @@ +@@ -0,0 +1,41 @@ +/* + * linux/drivers/video/bootsplash/bootsplash.h - splash screen definition. + * @@ -1212,9 +1212,6 @@ Signed-off-by: mls@suse.de +/* vt.c */ +extern void con_remap_def_color(struct vc_data *vc, int new_color); + -+extern void acquire_console_sem(void); -+extern void release_console_sem(void); -+ +#endif --- /dev/null +++ b/drivers/video/bootsplash/decode-jpg.c @@ -2547,7 +2544,7 @@ Signed-off-by: mls@suse.de + --- a/drivers/video/console/bitblit.c +++ b/drivers/video/console/bitblit.c -@@ -17,6 +17,9 @@ +@@ -18,6 +18,9 @@ #include #include #include "fbcon.h" @@ -2557,7 +2554,7 @@ Signed-off-by: mls@suse.de /* * Accelerated handlers. -@@ -47,6 +50,13 @@ static void bit_bmove(struct vc_data *vc +@@ -48,6 +51,13 @@ static void bit_bmove(struct vc_data *vc { struct fb_copyarea area; @@ -2571,7 +2568,7 @@ Signed-off-by: mls@suse.de area.sx = sx * vc->vc_font.width; area.sy = sy * vc->vc_font.height; area.dx = dx * vc->vc_font.width; -@@ -63,6 +73,13 @@ static void bit_clear(struct vc_data *vc +@@ -64,6 +74,13 @@ static void bit_clear(struct vc_data *vc int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; struct fb_fillrect region; @@ -2585,7 +2582,7 @@ Signed-off-by: mls@suse.de region.color = attr_bgcol_ec(bgshift, vc, info); region.dx = sx * vc->vc_font.width; region.dy = sy * vc->vc_font.height; -@@ -160,6 +177,13 @@ static void bit_putcs(struct vc_data *vc +@@ -161,6 +178,13 @@ static void bit_putcs(struct vc_data *vc image.height = vc->vc_font.height; image.depth = 1; @@ -2599,7 +2596,7 @@ Signed-off-by: mls@suse.de if (attribute) { buf = kmalloc(cellsize, GFP_KERNEL); if (!buf) -@@ -213,6 +237,13 @@ static void bit_clear_margins(struct vc_ +@@ -214,6 +238,13 @@ static void bit_clear_margins(struct vc_ unsigned int bs = info->var.yres - bh; struct fb_fillrect region; @@ -2613,7 +2610,7 @@ Signed-off-by: mls@suse.de region.color = attr_bgcol_ec(bgshift, vc, info); region.rop = ROP_COPY; -@@ -379,6 +410,14 @@ static void bit_cursor(struct vc_data *v +@@ -380,6 +411,14 @@ static void bit_cursor(struct vc_data *v cursor.image.depth = 1; cursor.rop = ROP_XOR; @@ -2652,7 +2649,7 @@ Signed-off-by: mls@suse.de static signed char con2fb_map_boot[MAX_NR_CONSOLES]; static int logo_lines; -@@ -537,6 +544,10 @@ static int fbcon_takeover(int show_logo) +@@ -538,6 +545,10 @@ static int fbcon_takeover(int show_logo) for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map[i] = info_idx; @@ -2663,7 +2660,7 @@ Signed-off-by: mls@suse.de err = take_over_console(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); -@@ -1099,6 +1110,16 @@ static void fbcon_init(struct vc_data *v +@@ -1101,6 +1112,16 @@ static void fbcon_init(struct vc_data *v new_cols /= vc->vc_font.width; new_rows /= vc->vc_font.height; @@ -2680,7 +2677,7 @@ Signed-off-by: mls@suse.de /* * We must always set the mode. The mode of the previous console * driver could be in the same resolution but we are using different -@@ -1800,6 +1821,10 @@ static int fbcon_scroll(struct vc_data * +@@ -1802,6 +1823,10 @@ static int fbcon_scroll(struct vc_data * fbcon_softback_note(vc, t, count); if (logo_shown >= 0) goto redraw_up; @@ -2691,7 +2688,7 @@ Signed-off-by: mls@suse.de switch (p->scrollmode) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, t, b - t - count, -@@ -1891,6 +1916,10 @@ static int fbcon_scroll(struct vc_data * +@@ -1893,6 +1918,10 @@ static int fbcon_scroll(struct vc_data * count = vc->vc_rows; if (logo_shown >= 0) goto redraw_down; @@ -2702,7 +2699,7 @@ Signed-off-by: mls@suse.de switch (p->scrollmode) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, -@@ -2039,6 +2068,14 @@ static void fbcon_bmove_rec(struct vc_da +@@ -2041,6 +2070,14 @@ static void fbcon_bmove_rec(struct vc_da } return; } @@ -2717,7 +2714,7 @@ Signed-off-by: mls@suse.de ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, height, width); } -@@ -2147,6 +2184,10 @@ static int fbcon_switch(struct vc_data * +@@ -2149,6 +2186,10 @@ static int fbcon_switch(struct vc_data * info = registered_fb[con2fb_map[vc->vc_num]]; ops = info->fbcon_par; @@ -2728,7 +2725,7 @@ Signed-off-by: mls@suse.de if (softback_top) { if (softback_lines) fbcon_set_origin(vc); -@@ -2280,6 +2321,12 @@ static void fbcon_generic_blank(struct v +@@ -2282,6 +2323,12 @@ static void fbcon_generic_blank(struct v { struct fb_event event; @@ -2741,7 +2738,7 @@ Signed-off-by: mls@suse.de if (blank) { unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; -@@ -2481,6 +2528,12 @@ static int fbcon_do_set_font(struct vc_d +@@ -2507,6 +2554,12 @@ static int fbcon_do_set_font(struct vc_d cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); @@ -2793,7 +2790,7 @@ Signed-off-by: mls@suse.de const u_char *fontdata; --- a/drivers/video/vesafb.c +++ b/drivers/video/vesafb.c -@@ -182,7 +182,10 @@ static void vesafb_destroy(struct fb_inf +@@ -181,7 +181,10 @@ static void vesafb_destroy(struct fb_inf framebuffer_release(info); } @@ -2805,7 +2802,7 @@ Signed-off-by: mls@suse.de .owner = THIS_MODULE, .fb_destroy = vesafb_destroy, .fb_setcolreg = vesafb_setcolreg, -@@ -267,6 +270,9 @@ static int __devinit vesafb_probe(struct +@@ -266,6 +269,9 @@ static int __init vesafb_probe(struct pl * option to simply use size_total as that * wastes plenty of kernel address space. */ size_remap = size_vmode * 2; @@ -2817,19 +2814,19 @@ Signed-off-by: mls@suse.de if (size_remap < size_vmode) --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h -@@ -105,6 +105,9 @@ struct vc_data { +@@ -106,6 +106,9 @@ struct vc_data { struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ unsigned long vc_uni_pagedir; unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ +#ifdef CONFIG_BOOTSPLASH + struct splash_data *vc_splash_data; +#endif + bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ /* additional information is in vt_kern.h */ }; - --- a/include/linux/fb.h +++ b/include/linux/fb.h -@@ -859,6 +859,14 @@ struct fb_info { +@@ -875,6 +875,14 @@ struct fb_info { void *fbcon_par; /* fbcon use-only private area */ /* From here on everything is device dependent */ void *par; @@ -2846,7 +2843,7 @@ Signed-off-by: mls@suse.de allocated inside the aperture so may not actually overlap */ --- a/kernel/panic.c +++ b/kernel/panic.c -@@ -122,7 +122,12 @@ NORET_TYPE void panic(const char * fmt, +@@ -110,7 +110,12 @@ NORET_TYPE void panic(const char * fmt, * We can't use the "normal" timers since we just panicked. */ printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); @@ -2857,10 +2854,10 @@ Signed-off-by: mls@suse.de + (void)splash_verbose(); + } +#endif - for (i = 0; i < panic_timeout; i++) { + for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); - panic_blink_one_second(); -@@ -151,6 +156,12 @@ NORET_TYPE void panic(const char * fmt, + if (i >= i_next) { +@@ -143,6 +148,12 @@ NORET_TYPE void panic(const char * fmt, } #endif local_irq_enable(); @@ -2870,6 +2867,6 @@ Signed-off-by: mls@suse.de + (void)splash_verbose(); + } +#endif - while (1) { + for (i = 0; ; i += PANIC_TIMER_STEP) { touch_softlockup_watchdog(); - panic_blink_one_second(); + if (i >= i_next) { diff --git a/patches.suse/bootsplash-console-fix b/patches.suse/bootsplash-console-fix index c5b3e63..b7becf2 100644 --- a/patches.suse/bootsplash-console-fix +++ b/patches.suse/bootsplash-console-fix @@ -16,7 +16,7 @@ Signed-off-by: Takashi Iwai --- a/drivers/video/bootsplash/render.c +++ b/drivers/video/bootsplash/render.c -@@ -210,11 +210,7 @@ +@@ -210,11 +210,7 @@ void splashcopy(u8 *dst, u8 *src, int he union pt p, q; p.ul = (u32 *)dst; q.ul = (u32 *)src; @@ -29,7 +29,7 @@ Signed-off-by: Takashi Iwai fb_writel(*q.ul++,p.ul++); if (width & 2) fb_writew(*q.us++,p.us++); -@@ -234,12 +230,8 @@ +@@ -234,12 +230,8 @@ static void splashset(u8 *dst, int heigh while (height-- > 0) { union pt p; p.ul = (u32 *)dst; @@ -44,7 +44,7 @@ Signed-off-by: Takashi Iwai fb_writel(bgx,p.ul++); if (width & 2) fb_writew(bgx,p.us++); -@@ -248,7 +240,7 @@ +@@ -248,7 +240,7 @@ static void splashset(u8 *dst, int heigh dst += dstbytes; } else { /* slow! */ for (i=0; i < width; i++) @@ -53,7 +53,7 @@ Signed-off-by: Takashi Iwai } } } -@@ -398,8 +390,7 @@ +@@ -398,8 +390,7 @@ int splash_cursor(struct fb_info *info, void splash_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) { struct splash_data *sd; diff --git a/patches.suse/bootsplash-keep-multiple-data b/patches.suse/bootsplash-keep-multiple-data index 57dedcd..30f8d6a 100644 --- a/patches.suse/bootsplash-keep-multiple-data +++ b/patches.suse/bootsplash-keep-multiple-data @@ -291,7 +291,7 @@ Signed-off-by: Takashi Iwai #endif --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c -@@ -2185,7 +2185,16 @@ static int fbcon_switch(struct vc_data * +@@ -2187,7 +2187,16 @@ static int fbcon_switch(struct vc_data * ops = info->fbcon_par; #ifdef CONFIG_BOOTSPLASH diff --git a/patches.suse/bootsplash-scaler b/patches.suse/bootsplash-scaler index 3c50c3e..9d249f0 100644 --- a/patches.suse/bootsplash-scaler +++ b/patches.suse/bootsplash-scaler @@ -19,12 +19,12 @@ resolutions can be derived from it. Acked-by: Michal Marek --- - drivers/video/bootsplash/bootsplash.c | 858 +++++++++++++++++++++++++++++++--- + drivers/video/bootsplash/bootsplash.c | 855 +++++++++++++++++++++++++++++++--- drivers/video/bootsplash/decode-jpg.c | 4 drivers/video/bootsplash/render.c | 16 drivers/video/console/fbcon.h | 11 include/linux/fb.h | 3 - 5 files changed, 810 insertions(+), 82 deletions(-) + 5 files changed, 807 insertions(+), 82 deletions(-) --- a/drivers/video/bootsplash/bootsplash.c +++ b/drivers/video/bootsplash/bootsplash.c @@ -36,7 +36,7 @@ Acked-by: Michal Marek * * Ideas & SuSE screen work by Ken Wimer, * -@@ -55,7 +56,9 @@ static unsigned char *jpg_errors[] = { +@@ -55,7 +56,9 @@ "wrong marker", "no EOI", "bad tables", @@ -47,7 +47,7 @@ Acked-by: Michal Marek }; static struct jpeg_decdata *decdata = 0; /* private decoder data */ -@@ -64,7 +67,9 @@ static int splash_registered = 0; +@@ -64,7 +67,9 @@ static int splash_usesilent = 0; /* shall we display the silentjpeg? */ int splash_default = 0xf01; @@ -58,7 +58,7 @@ Acked-by: Michal Marek static int __init splash_setup(char *options) { -@@ -120,7 +125,8 @@ static int boxextract(unsigned char *buf +@@ -120,7 +125,8 @@ return 12; } @@ -68,7 +68,7 @@ Acked-by: Michal Marek { int x, y, p, doblend, r, g, b, a, add; unsigned int i = 0; -@@ -245,7 +251,7 @@ static void boxit(unsigned char *pic, in +@@ -245,7 +251,7 @@ } add = (xs & 1); add ^= (add ^ y) & 1 ? 1 : 3; /* 2x2 ordered dithering */ @@ -77,7 +77,7 @@ Acked-by: Michal Marek for (x = xs; x <= xe; x++) { if (!(sti & 0x80000000)) { sti <<= 1; -@@ -310,19 +316,172 @@ static void boxit(unsigned char *pic, in +@@ -310,19 +316,172 @@ } } @@ -253,7 +253,7 @@ Acked-by: Michal Marek if ((err = jpeg_decode(jpeg, mem, ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d)\n",jpg_errors[err - 1], err); vfree(mem); -@@ -337,6 +496,7 @@ static void splash_free(struct vc_data * +@@ -337,6 +496,7 @@ for (sd = vc->vc_splash_data; sd; sd = next) { next = sd->next; vfree(sd->splash_sboxes); @@ -261,7 +261,7 @@ Acked-by: Michal Marek vfree(sd); } vc->vc_splash_data = 0; -@@ -432,6 +592,11 @@ static void splash_pivot_current(struct +@@ -432,6 +592,11 @@ state = sd->splash_state; percent = sd->splash_percent; silent = sd->splash_dosilent; @@ -273,7 +273,7 @@ Acked-by: Michal Marek for (; sd->next; sd = sd->next) { if (sd->next == new) { sd->next = new->next; -@@ -441,6 +606,17 @@ static void splash_pivot_current(struct +@@ -441,6 +606,17 @@ new->splash_state = state; new->splash_percent = percent; new->splash_dosilent = silent; @@ -291,7 +291,7 @@ Acked-by: Michal Marek return; } } -@@ -459,7 +635,7 @@ static int splash_getraw(unsigned char * +@@ -459,7 +635,7 @@ int palcnt; int i, len; const int *offsets; @@ -300,7 +300,7 @@ Acked-by: Michal Marek struct fb_info *info; struct splash_data *sd; struct splash_data *splash_found = NULL; -@@ -489,7 +665,16 @@ static int splash_getraw(unsigned char * +@@ -489,7 +665,16 @@ vc_allocate(unit); } vc = vc_cons[unit].d; @@ -317,17 +317,7 @@ Acked-by: Michal Marek width = info->var.xres; height = info->var.yres; splash_size = splash_geti(ndata, SPLASH_OFF_SIZE); -@@ -539,6 +724,9 @@ static int splash_getraw(unsigned char * - } - if (update) - *update = up; -+ vfree(sd->splash_pic); -+ sd->splash_pic = NULL; -+ sd->splash_pic_size = 0; - } - return unit; - } -@@ -579,6 +767,12 @@ static int splash_getraw(unsigned char * +@@ -579,6 +764,12 @@ memset(sd, 0, sizeof(*sd)); jpeg_get_size(ndata + len + boxcnt * 12 + palcnt, &sd->splash_width, &sd->splash_height); @@ -340,7 +330,7 @@ Acked-by: Michal Marek if (silentsize) { sd->splash_silentjpeg = vmalloc(silentsize); if (sd->splash_silentjpeg) { -@@ -596,6 +790,8 @@ static int splash_getraw(unsigned char * +@@ -596,6 +787,8 @@ sd->splash_text_yo = splash_gets(ndata, SPLASH_OFF_YO); sd->splash_text_wi = splash_gets(ndata, SPLASH_OFF_WI); sd->splash_text_he = splash_gets(ndata, SPLASH_OFF_HE); @@ -349,7 +339,7 @@ Acked-by: Michal Marek sd->splash_percent = oldpercent == -1 ? splash_gets(ndata, SPLASH_OFF_PERCENT) : oldpercent; if (version == 1) { sd->splash_text_xo *= 8; -@@ -606,6 +802,9 @@ static int splash_getraw(unsigned char * +@@ -606,6 +799,9 @@ sd->splash_fg_color = (splash_default >> 4) & 0x0f; sd->splash_state = splash_default & 1; } @@ -359,7 +349,7 @@ Acked-by: Michal Marek /* fake penguin box for older formats */ if (version == 1) boxcnt = splash_mkpenguin(sd, sd->splash_text_xo + 10, sd->splash_text_yo + 10, sd->splash_text_wi - 20, sd->splash_text_he - 20, 0xf0, 0xf0, 0xf0); -@@ -627,15 +826,6 @@ static int splash_getraw(unsigned char * +@@ -627,15 +823,6 @@ ndata += len + splash_size - 1; continue; } @@ -375,7 +365,7 @@ Acked-by: Michal Marek printk(KERN_INFO "bootsplash: ...found (%dx%d, %d bytes, v%d).\n", width, height, splash_size, version); if (version == 1) { printk(KERN_WARNING "bootsplash: Using deprecated v1 header. Updating your splash utility recommended.\n"); -@@ -649,6 +839,16 @@ static int splash_getraw(unsigned char * +@@ -649,6 +836,16 @@ if (splash_found) { splash_pivot_current(vc, splash_found); return unit_found; @@ -392,7 +382,7 @@ Acked-by: Michal Marek } printk(KERN_ERR "bootsplash: ...no good signature found.\n"); -@@ -715,27 +915,71 @@ int splash_verbose(void) +@@ -715,27 +912,71 @@ return 0; } @@ -473,7 +463,7 @@ Acked-by: Michal Marek return -1; } -@@ -743,13 +987,14 @@ int splash_prepare(struct vc_data *vc, s +@@ -743,13 +984,14 @@ { int err; int width, height, depth, octpp, size, sbytes; @@ -489,7 +479,7 @@ Acked-by: Michal Marek return -1; } -@@ -759,52 +1004,62 @@ int splash_prepare(struct vc_data *vc, s +@@ -759,52 +1001,62 @@ octpp = (depth + 1) >> 3; if (depth == 24 || depth < 15) { /* Other targets might need fixing */ @@ -568,7 +558,7 @@ Acked-by: Michal Marek info->var.yres, info->var.xres, info->fix.line_length, sbytes, -@@ -813,27 +1068,43 @@ int splash_prepare(struct vc_data *vc, s +@@ -813,27 +1065,43 @@ } else vc->vc_splash_data->splash_dosilent = 0; @@ -624,7 +614,7 @@ Acked-by: Michal Marek return -5; } return 0; -@@ -856,12 +1127,16 @@ static struct proc_dir_entry *proc_splas +@@ -856,12 +1124,16 @@ static int splash_recolor(struct vc_data *vc) { @@ -642,7 +632,7 @@ Acked-by: Michal Marek if (fg_console == vc->vc_num) { update_region(vc, vc->vc_origin + vc->vc_size_row * vc->vc_top, -@@ -884,10 +1159,6 @@ static int splash_status(struct vc_data +@@ -884,10 +1156,6 @@ splash_prepare(vc, info); if (vc->vc_splash_data && vc->vc_splash_data->splash_state) { if (info->splash_data) { @@ -653,7 +643,7 @@ Acked-by: Michal Marek if (fg_console == vc->vc_num) { update_region(vc, vc->vc_origin + vc->vc_size_row * vc->vc_top, -@@ -895,11 +1166,9 @@ static int splash_status(struct vc_data +@@ -895,11 +1163,9 @@ splash_clear_margins(vc, info, 0); } } @@ -668,7 +658,7 @@ Acked-by: Michal Marek return 0; } -@@ -956,10 +1225,9 @@ void splash_set_percent(struct vc_data * +@@ -956,10 +1222,9 @@ || pe < oldpe) { if (splash_hasinter(vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount)) { @@ -682,7 +672,7 @@ Acked-by: Michal Marek } else { int octpp = (info->var.bits_per_pixel + 1) >> 3; if (info->splash_data) { -@@ -970,6 +1238,8 @@ void splash_set_percent(struct vc_data * +@@ -970,6 +1235,8 @@ info->splash_data->splash_sboxes, info->splash_data->splash_sboxcount, info->splash_data->splash_percent, @@ -691,7 +681,7 @@ Acked-by: Michal Marek 1, octpp); #if 0 -@@ -979,6 +1249,8 @@ void splash_set_percent(struct vc_data * +@@ -979,6 +1246,8 @@ info->splash_data->splash_boxes, info->splash_data->splash_boxcount, info->splash_data->splash_percent, @@ -700,7 +690,7 @@ Acked-by: Michal Marek 1, octpp); #endif -@@ -1100,6 +1372,8 @@ static int splash_write_proc(struct file +@@ -1100,6 +1369,8 @@ info->splash_data->splash_sboxes, info->splash_data->splash_sboxcount, info->splash_data->splash_percent, @@ -709,7 +699,7 @@ Acked-by: Michal Marek 1, octpp); } else if ((up & 1) != 0) { -@@ -1108,6 +1382,8 @@ static int splash_write_proc(struct file +@@ -1108,6 +1379,8 @@ info->splash_data->splash_boxes, info->splash_data->splash_boxcount, info->splash_data->splash_percent, @@ -718,7 +708,7 @@ Acked-by: Michal Marek 1, octpp); } -@@ -1226,3 +1502,447 @@ void splash_init(void) +@@ -1226,3 +1499,447 @@ return; } @@ -1168,7 +1158,7 @@ Acked-by: Michal Marek +} --- a/drivers/video/bootsplash/decode-jpg.c +++ b/drivers/video/bootsplash/decode-jpg.c -@@ -888,9 +888,9 @@ PREC q[][64]; +@@ -888,9 +888,9 @@ #define PIC_32(yin, xin, p, xout) \ ( \ y = outy[(yin) * 8 + xin], \ @@ -1182,7 +1172,7 @@ Acked-by: Michal Marek --- a/drivers/video/bootsplash/render.c +++ b/drivers/video/bootsplash/render.c -@@ -45,7 +45,7 @@ void splash_putcs(struct vc_data *vc, st +@@ -45,7 +45,7 @@ transparent = sd->splash_color == bg_color; xpos = xpos * vc->vc_font.width + sd->splash_text_xo; ypos = ypos * vc->vc_font.height + sd->splash_text_yo; @@ -1191,7 +1181,7 @@ Acked-by: Michal Marek dst.ub = (u8 *)(info->screen_base + ypos * info->fix.line_length + xpos * octpp); fgx = ((u32 *)info->pseudo_palette)[fg_color]; if (transparent && sd->splash_color == 15) { -@@ -109,10 +109,10 @@ void splash_putcs(struct vc_data *vc, st +@@ -109,10 +109,10 @@ } } dst.ub += info->fix.line_length - vc->vc_font.width * octpp; @@ -1204,7 +1194,7 @@ Acked-by: Michal Marek } } -@@ -136,7 +136,7 @@ static void splash_renderc(struct fb_inf +@@ -136,7 +136,7 @@ sd = info->splash_data; transparent = sd->splash_color == bg_color; @@ -1213,7 +1203,7 @@ Acked-by: Michal Marek dst.ub = (u8*)(info->screen_base + ypos * info->fix.line_length + xpos * octpp); fgx = ((u32 *)info->pseudo_palette)[fg_color]; if (transparent && sd->splash_color == 15) { -@@ -197,7 +197,7 @@ static void splash_renderc(struct fb_inf +@@ -197,7 +197,7 @@ } } dst.ub += info->fix.line_length - width * octpp; @@ -1222,7 +1212,7 @@ Acked-by: Michal Marek } } -@@ -255,10 +255,11 @@ static void splashset(u8 *dst, int heigh +@@ -255,10 +255,11 @@ static void splashfill(struct fb_info *info, int sy, int sx, int height, int width) { int octpp = (info->var.bits_per_pixel + 1) >> 3; @@ -1236,7 +1226,7 @@ Acked-by: Michal Marek octpp); } -@@ -442,6 +443,7 @@ void splash_bmove_redraw(struct vc_data +@@ -442,6 +443,7 @@ void splash_blank(struct vc_data *vc, struct fb_info *info, int blank) { SPLASH_DEBUG(); @@ -1246,7 +1236,7 @@ Acked-by: Michal Marek info->var.yres, info->var.xres, --- a/drivers/video/console/fbcon.h +++ b/drivers/video/console/fbcon.h -@@ -34,8 +34,10 @@ struct splash_data { +@@ -34,8 +34,10 @@ int splash_height; /* height of image */ int splash_text_xo; /* text area origin */ int splash_text_yo; @@ -1258,7 +1248,7 @@ Acked-by: Michal Marek int splash_showtext; /* silent/verbose mode */ int splash_boxcount; int splash_percent; -@@ -45,12 +47,19 @@ struct splash_data { +@@ -45,12 +47,19 @@ unsigned char *splash_boxes; unsigned char *splash_jpeg; /* jpeg */ unsigned char *splash_palette; /* palette for 8-bit */ @@ -1280,7 +1270,7 @@ Acked-by: Michal Marek --- a/include/linux/fb.h +++ b/include/linux/fb.h -@@ -861,9 +861,6 @@ struct fb_info { +@@ -877,9 +877,6 @@ void *par; #ifdef CONFIG_BOOTSPLASH struct splash_data *splash_data; diff --git a/patches.suse/cgroup-disable-memory.patch b/patches.suse/cgroup-disable-memory.patch deleted file mode 100644 index 18db6f0..0000000 --- a/patches.suse/cgroup-disable-memory.patch +++ /dev/null @@ -1,89 +0,0 @@ -From: Balbir Singh -Date: Thu, 01 May 2008 02:48:58 -0700 -Subject: memcg: disable the memory controller by default -Patch-mainline: not yet - -Due to the overhead of the memory controller the memory controller is now -disabled by default. This patch adds cgroup_enable. - -[akpm@linux-foundation.org: `inline __init' doesn't make sense] -Signed-off-by: Balbir Singh -Cc: AMAMOTO Takashi -Acked-by: Paul Menage -Cc: Pavel Emelianov -Acked-by: KAMEZAWA Hiroyuki -Cc: -Signed-off-by: Andrew Morton -Signed-off-by: Jiri Slaby ---- - - Documentation/kernel-parameters.txt | 3 +++ - kernel/cgroup.c | 17 +++++++++++++---- - mm/memcontrol.c | 1 + - 3 files changed, 17 insertions(+), 4 deletions(-) - ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -449,8 +449,11 @@ and is between 256 and 4096 characters. - See Documentation/s390/CommonIO for details. - - cgroup_disable= [KNL] Disable a particular controller -+ cgroup_enable= [KNL] Enable a particular controller -+ For both cgroup_enable and cgroup_enable - Format: {name of the controller(s) to disable} - {Currently supported controllers - "memory"} -+ {Memory controller is disabled by default} - - checkreqprot [SELINUX] Set initial checkreqprot flag value. - Format: { "0" | "1" } ---- a/kernel/cgroup.c -+++ b/kernel/cgroup.c -@@ -4394,7 +4394,7 @@ static void cgroup_release_agent(struct - mutex_unlock(&cgroup_mutex); - } - --static int __init cgroup_disable(char *str) -+static int __init cgroup_turnonoff(char *str, int disable) - { - int i; - char *token; -@@ -4410,17 +4410,26 @@ static int __init cgroup_disable(char *s - struct cgroup_subsys *ss = subsys[i]; - - if (!strcmp(token, ss->name)) { -- ss->disabled = 1; -- printk(KERN_INFO "Disabling %s control group" -- " subsystem\n", ss->name); -+ ss->disabled = disable; - break; - } - } - } - return 1; - } -+ -+static int __init cgroup_disable(char *str) -+{ -+ return cgroup_turnonoff(str, 1); -+} - __setup("cgroup_disable=", cgroup_disable); - -+static int __init cgroup_enable(char *str) -+{ -+ return cgroup_turnonoff(str, 0); -+} -+__setup("cgroup_enable=", cgroup_enable); -+ - /* - * Functons for CSS ID. - */ ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -4343,6 +4343,7 @@ struct cgroup_subsys mem_cgroup_subsys = - .attach = mem_cgroup_move_task, - .early_init = 0, - .use_id = 1, -+ .disabled = 1, - }; - - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP diff --git a/patches.suse/connector-read-mostly b/patches.suse/connector-read-mostly index ceaf851..1cefe96 100644 --- a/patches.suse/connector-read-mostly +++ b/patches.suse/connector-read-mostly @@ -12,7 +12,7 @@ Acked-by: Jeff Mahoney --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c -@@ -34,7 +34,7 @@ +@@ -35,7 +35,7 @@ #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) diff --git a/patches.suse/crasher-26.diff b/patches.suse/crasher-26.diff index 77b5555..74572be 100644 --- a/patches.suse/crasher-26.diff +++ b/patches.suse/crasher-26.diff @@ -10,9 +10,9 @@ Patch-mainline: probably never --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig -@@ -1113,5 +1113,10 @@ config DEVPORT - - source "drivers/s390/char/Kconfig" +@@ -1129,5 +1129,10 @@ config RAMOOPS + This enables panic and oops messages to be logged to a circular + buffer in RAM where it can be read back at some later point. +config CRASHER + tristate "Crasher Module" @@ -23,14 +23,14 @@ Patch-mainline: probably never --- a/drivers/char/Makefile +++ b/drivers/char/Makefile -@@ -105,6 +105,7 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/ +@@ -108,6 +108,7 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o obj-$(CONFIG_TCG_TPM) += tpm/ +obj-$(CONFIG_CRASHER) += crasher.o obj-$(CONFIG_PS3_FLASH) += ps3flash.o - + obj-$(CONFIG_RAMOOPS) += ramoops.o --- /dev/null +++ b/drivers/char/crasher.c @@ -0,0 +1,228 @@ diff --git a/patches.suse/dm-emulate-blkrrpart-ioctl b/patches.suse/dm-emulate-blkrrpart-ioctl index 16e84ff..395d483 100644 --- a/patches.suse/dm-emulate-blkrrpart-ioctl +++ b/patches.suse/dm-emulate-blkrrpart-ioctl @@ -15,7 +15,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/md/dm.c +++ b/drivers/md/dm.c -@@ -409,19 +409,25 @@ static int dm_blk_ioctl(struct block_dev +@@ -421,19 +421,25 @@ static int dm_blk_ioctl(struct block_dev if (!map || !dm_table_get_size(map)) goto out; diff --git a/patches.suse/dm-mpath-accept-failed-paths b/patches.suse/dm-mpath-accept-failed-paths index 7494f1d..58b6e63 100644 --- a/patches.suse/dm-mpath-accept-failed-paths +++ b/patches.suse/dm-mpath-accept-failed-paths @@ -106,7 +106,7 @@ Signed-off-by: Hannes Reinecke return p; bad: -@@ -976,7 +1011,7 @@ static int fail_path(struct pgpath *pgpa +@@ -978,7 +1013,7 @@ static int fail_path(struct pgpath *pgpa if (!pgpath->is_active) goto out; @@ -115,7 +115,7 @@ Signed-off-by: Hannes Reinecke pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); pgpath->is_active = 0; -@@ -988,7 +1023,7 @@ static int fail_path(struct pgpath *pgpa +@@ -990,7 +1025,7 @@ static int fail_path(struct pgpath *pgpa m->current_pgpath = NULL; dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, @@ -124,7 +124,7 @@ Signed-off-by: Hannes Reinecke schedule_work(&m->trigger_event); queue_work(kmultipathd, &pgpath->deactivate_path); -@@ -1013,6 +1048,12 @@ static int reinstate_path(struct pgpath +@@ -1015,6 +1050,12 @@ static int reinstate_path(struct pgpath if (pgpath->is_active) goto out; @@ -137,7 +137,7 @@ Signed-off-by: Hannes Reinecke if (!pgpath->pg->ps.type->reinstate_path) { DMWARN("Reinstate path not supported by path selector %s", pgpath->pg->ps.type->name); -@@ -1035,7 +1076,7 @@ static int reinstate_path(struct pgpath +@@ -1037,7 +1078,7 @@ static int reinstate_path(struct pgpath } dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, @@ -146,7 +146,7 @@ Signed-off-by: Hannes Reinecke schedule_work(&m->trigger_event); -@@ -1055,6 +1096,9 @@ static int action_dev(struct multipath * +@@ -1057,6 +1098,9 @@ static int action_dev(struct multipath * struct pgpath *pgpath; struct priority_group *pg; @@ -156,7 +156,7 @@ Signed-off-by: Hannes Reinecke list_for_each_entry(pg, &m->priority_groups, list) { list_for_each_entry(pgpath, &pg->pgpaths, list) { if (pgpath->path.dev == dev) -@@ -1239,8 +1283,9 @@ static void activate_path(struct work_st +@@ -1241,8 +1285,9 @@ static void activate_path(struct work_st struct pgpath *pgpath = container_of(work, struct pgpath, activate_path); @@ -168,7 +168,7 @@ Signed-off-by: Hannes Reinecke } /* -@@ -1415,7 +1460,7 @@ static int multipath_status(struct dm_ta +@@ -1426,7 +1471,7 @@ static int multipath_status(struct dm_ta pg->ps.type->info_args); list_for_each_entry(p, &pg->pgpaths, list) { @@ -177,7 +177,7 @@ Signed-off-by: Hannes Reinecke p->is_active ? "A" : "F", p->fail_count); if (pg->ps.type->status) -@@ -1441,7 +1486,7 @@ static int multipath_status(struct dm_ta +@@ -1452,7 +1497,7 @@ static int multipath_status(struct dm_ta pg->ps.type->table_args); list_for_each_entry(p, &pg->pgpaths, list) { @@ -186,7 +186,7 @@ Signed-off-by: Hannes Reinecke if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, &p->path, type, result + sz, -@@ -1533,7 +1578,7 @@ static int multipath_ioctl(struct dm_tar +@@ -1544,7 +1589,7 @@ static int multipath_ioctl(struct dm_tar if (!m->current_pgpath) __choose_pgpath(m, 0); @@ -207,7 +207,7 @@ Signed-off-by: Hannes Reinecke }; --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c -@@ -538,9 +538,12 @@ int dm_get_device(struct dm_target *ti, +@@ -541,9 +541,12 @@ int dm_get_device(struct dm_target *ti, */ void dm_put_device(struct dm_target *ti, struct dm_dev *d) { diff --git a/patches.suse/dm-mpath-evaluate-request-result-and-sense b/patches.suse/dm-mpath-evaluate-request-result-and-sense index e518e22..9d91a57 100644 --- a/patches.suse/dm-mpath-evaluate-request-result-and-sense +++ b/patches.suse/dm-mpath-evaluate-request-result-and-sense @@ -41,7 +41,7 @@ Signed-off-by: Hannes Reinecke }; typedef int (*action_fn) (struct pgpath *pgpath); -@@ -995,6 +997,9 @@ static int multipath_map(struct dm_targe +@@ -997,6 +999,9 @@ static int multipath_map(struct dm_targe map_context->ptr = mpio; clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; @@ -51,7 +51,7 @@ Signed-off-by: Hannes Reinecke r = map_io(m, clone, mpio, 0); if (r < 0 || r == DM_MAPIO_REQUEUE) mempool_free(mpio, m->mpio_pool); -@@ -1293,6 +1298,44 @@ static void activate_path(struct work_st +@@ -1295,6 +1300,44 @@ static void activate_path(struct work_st } /* @@ -96,7 +96,7 @@ Signed-off-by: Hannes Reinecke * end_io handling */ static int do_end_io(struct multipath *m, struct request *clone, -@@ -1318,6 +1361,10 @@ static int do_end_io(struct multipath *m +@@ -1320,6 +1363,10 @@ static int do_end_io(struct multipath *m if (error == -EOPNOTSUPP) return error; @@ -104,10 +104,10 @@ Signed-off-by: Hannes Reinecke + if (r != DM_ENDIO_REQUEUE) + return r; + - if (mpio->pgpath) - fail_path(mpio->pgpath); - -@@ -1344,6 +1391,10 @@ static int multipath_end_io(struct dm_ta + if (clone->cmd_flags & REQ_DISCARD) + /* + * Pass all discard request failures up. +@@ -1355,6 +1402,10 @@ static int multipath_end_io(struct dm_ta if (ps->type->end_io) ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); } @@ -124,7 +124,7 @@ Signed-off-by: Hannes Reinecke sense_deferred = scsi_sense_is_deferred(&sshdr); } -- if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ +- if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ - req->errors = result; - if (result) { - if (sense_valid && req->sense) { @@ -135,12 +135,6 @@ Signed-off-by: Hannes Reinecke + req->errors = result; + if (sense_valid && req->sense) { + int len = 8 + cmd->sense_buffer[7]; -+ -+ if (len > SCSI_SENSE_BUFFERSIZE) -+ len = SCSI_SENSE_BUFFERSIZE; -+ memcpy(req->sense, cmd->sense_buffer, len); -+ req->sense_len = len; -+ } - if (len > SCSI_SENSE_BUFFERSIZE) - len = SCSI_SENSE_BUFFERSIZE; @@ -150,7 +144,13 @@ Signed-off-by: Hannes Reinecke - if (!sense_deferred) - error = -EIO; - } -+ if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ ++ if (len > SCSI_SENSE_BUFFERSIZE) ++ len = SCSI_SENSE_BUFFERSIZE; ++ memcpy(req->sense, cmd->sense_buffer, len); ++ req->sense_len = len; ++ } ++ ++ if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ + if ((result) && (!sense_deferred)) + error = -EIO; diff --git a/patches.suse/dm-mpath-leastpending-path-update b/patches.suse/dm-mpath-leastpending-path-update index 78bd6cd..88d84e4 100644 --- a/patches.suse/dm-mpath-leastpending-path-update +++ b/patches.suse/dm-mpath-leastpending-path-update @@ -29,7 +29,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/md/Makefile +++ b/drivers/md/Makefile -@@ -37,7 +37,7 @@ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o +@@ -29,7 +29,7 @@ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o obj-$(CONFIG_DM_DELAY) += dm-delay.o diff --git a/patches.suse/dm-mpath-no-activate-for-offlined-paths b/patches.suse/dm-mpath-no-activate-for-offlined-paths index 58a6486..cc885dd 100644 --- a/patches.suse/dm-mpath-no-activate-for-offlined-paths +++ b/patches.suse/dm-mpath-no-activate-for-offlined-paths @@ -17,7 +17,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c -@@ -1228,8 +1228,9 @@ static void pg_init_done(void *data, int +@@ -1230,8 +1230,9 @@ static void pg_init_done(void *data, int errors = 0; break; } @@ -29,7 +29,7 @@ Signed-off-by: Hannes Reinecke /* * Fail path for now, so we do not ping pong */ -@@ -1242,6 +1243,10 @@ static void pg_init_done(void *data, int +@@ -1244,6 +1245,10 @@ static void pg_init_done(void *data, int */ bypass_pg(m, pg, 1); break; @@ -37,10 +37,10 @@ Signed-off-by: Hannes Reinecke + DMWARN("Device %s offlined.", pgpath->path.pdev); + errors = 0; + break; - /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */ case SCSI_DH_RETRY: - case SCSI_DH_IMM_RETRY: -@@ -1262,7 +1267,8 @@ static void pg_init_done(void *data, int + /* Wait before retrying. */ + delay_retry = 1; +@@ -1264,7 +1269,8 @@ static void pg_init_done(void *data, int spin_lock_irqsave(&m->lock, flags); if (errors) { if (pgpath == m->current_pgpath) { @@ -52,7 +52,7 @@ Signed-off-by: Hannes Reinecke } --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c -@@ -411,14 +411,18 @@ static int upgrade_mode(struct dm_dev_in +@@ -414,14 +414,18 @@ static int upgrade_mode(struct dm_dev_in dd_new = dd_old = *dd; @@ -74,7 +74,7 @@ Signed-off-by: Hannes Reinecke close_dev(&dd_old, md); return 0; -@@ -480,7 +484,7 @@ static int __table_get_device(struct dm_ +@@ -483,7 +487,7 @@ static int __table_get_device(struct dm_ atomic_set(&dd->count, 0); list_add(&dd->list, &t->devices); diff --git a/patches.suse/dm-mpath-no-partitions-feature b/patches.suse/dm-mpath-no-partitions-feature index 2acdfbb..a12a696 100644 --- a/patches.suse/dm-mpath-no-partitions-feature +++ b/patches.suse/dm-mpath-no-partitions-feature @@ -31,14 +31,14 @@ Signed-off-by: Hannes Reinecke struct multipath { struct list_head list; @@ -83,6 +85,7 @@ struct multipath { - unsigned saved_queue_if_no_path;/* Saved state during suspension */ unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_count; /* Number of times pg_init called */ + unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ + unsigned features; /* Additional selected features */ struct work_struct process_queued_ios; struct list_head queued_ios; -@@ -851,6 +854,10 @@ static int parse_features(struct arg_set +@@ -852,6 +855,10 @@ static int parse_features(struct arg_set continue; } @@ -49,12 +49,12 @@ Signed-off-by: Hannes Reinecke if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && (argc >= 1)) { r = read_param(_params + 1, shift(as), -@@ -1475,11 +1482,14 @@ static int multipath_status(struct dm_ta - DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); +@@ -1486,11 +1493,14 @@ static int multipath_status(struct dm_ta else { DMEMIT("%u ", m->queue_if_no_path + -- (m->pg_init_retries > 0) * 2); -+ (m->pg_init_retries > 0) * 2 + + (m->pg_init_retries > 0) * 2 + +- (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2); ++ (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + + (m->features & FEATURE_NO_PARTITIONS)); if (m->queue_if_no_path) DMEMIT("queue_if_no_path "); @@ -62,6 +62,6 @@ Signed-off-by: Hannes Reinecke DMEMIT("pg_init_retries %u ", m->pg_init_retries); + if (m->features & FEATURE_NO_PARTITIONS) + DMEMIT("no_partitions "); + if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) + DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); } - - if (!m->hw_handler_name || type == STATUSTYPE_INFO) diff --git a/patches.suse/dm-mpath-null-pgs b/patches.suse/dm-mpath-null-pgs index 9de800b..09902d0 100644 --- a/patches.suse/dm-mpath-null-pgs +++ b/patches.suse/dm-mpath-null-pgs @@ -14,7 +14,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c -@@ -869,8 +869,8 @@ static int multipath_ctr(struct dm_targe +@@ -870,8 +870,8 @@ static int multipath_ctr(struct dm_targe { /* target parameters */ static struct param _params[] = { diff --git a/patches.suse/dm-raid45-26-Nov-2009.patch b/patches.suse/dm-raid45-26-Nov-2009.patch new file mode 100644 index 0000000..fe0af4f --- /dev/null +++ b/patches.suse/dm-raid45-26-Nov-2009.patch @@ -0,0 +1,5286 @@ +From: Heinz Mauelshagen +Subject: DMRAID45 module +Patch-mainline: No. https://www.redhat.com/archives/dm-devel/2009-November/msg00270.html +References: bnc#615906 + bnc#565962 + +X-URL: http://people.redhat.com/~heinzm/sw/dm/dm-raid45/ +Patch-mainline: No. https://www.redhat.com/archives/dm-devel/2009-November/msg00270.html +Date: Thu, 26 Nov 2009 14:45:12 +0100 + + DM-RAID 45 module. + + This driver is used for "Fake RAID" devices. + +Signed-off-by: Heinz Mauelshagen +Signed-off-by: Nikanth Karthikesan + +--- + drivers/md/Kconfig | 9 + drivers/md/Makefile | 1 + drivers/md/dm-memcache.c | 302 ++ + drivers/md/dm-memcache.h | 68 + drivers/md/dm-raid45.c | 4721 +++++++++++++++++++++++++++++++++++++++++ + drivers/md/dm-raid45.h | 30 + drivers/md/dm-region-hash.c | 21 + drivers/md/dm.c | 1 + include/linux/dm-region-hash.h | 4 + 9 files changed, 5154 insertions(+), 3 deletions(-) + +--- a/drivers/md/Kconfig ++++ b/drivers/md/Kconfig +@@ -321,6 +321,15 @@ config DM_DELAY + + If unsure, say N. + ++config DM_RAID45 ++ tristate "RAID 4/5 target (EXPERIMENTAL)" ++ depends on BLK_DEV_DM && EXPERIMENTAL ++ select ASYNC_XOR ++ ---help--- ++ A target that supports RAID4 and RAID5 mappings. ++ ++ If unsure, say N. ++ + config DM_UEVENT + bool "DM uevents (EXPERIMENTAL)" + depends on BLK_DEV_DM && EXPERIMENTAL +--- a/drivers/md/Makefile ++++ b/drivers/md/Makefile +@@ -37,6 +37,7 @@ obj-$(CONFIG_DM_MIRROR) += dm-mirror.o + obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o + obj-$(CONFIG_DM_ZERO) += dm-zero.o + obj-$(CONFIG_DM_RAID) += dm-raid.o ++obj-$(CONFIG_DM_RAID45) += dm-raid45.o dm-log.o dm-memcache.o + + ifeq ($(CONFIG_DM_UEVENT),y) + dm-mod-objs += dm-uevent.o +--- /dev/null ++++ b/drivers/md/dm-memcache.c +@@ -0,0 +1,302 @@ ++/* ++ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. ++ * ++ * Module Author: Heinz Mauelshagen ++ * ++ * Device-mapper memory object handling: ++ * ++ * o allocate/free total_pages in a per client page pool. ++ * ++ * o allocate/free memory objects with chunks (1..n) of ++ * pages_per_chunk pages hanging off. ++ * ++ * This file is released under the GPL. ++ */ ++ ++#define DM_MEM_CACHE_VERSION "0.2" ++ ++#include "dm.h" ++#include "dm-memcache.h" ++#include ++#include ++ ++struct dm_mem_cache_client { ++ spinlock_t lock; ++ mempool_t *objs_pool; ++ struct page_list *free_list; ++ unsigned objects; ++ unsigned chunks; ++ unsigned pages_per_chunk; ++ unsigned free_pages; ++ unsigned total_pages; ++}; ++ ++/* ++ * Free pages and page_list elements of client. ++ */ ++static void free_cache_pages(struct page_list *list) ++{ ++ while (list) { ++ struct page_list *pl = list; ++ ++ list = pl->next; ++ BUG_ON(!pl->page); ++ __free_page(pl->page); ++ kfree(pl); ++ } ++} ++ ++/* ++ * Alloc number of pages and page_list elements as required by client. ++ */ ++static struct page_list *alloc_cache_pages(unsigned pages) ++{ ++ struct page_list *pl, *ret = NULL; ++ struct page *page; ++ ++ while (pages--) { ++ page = alloc_page(GFP_NOIO); ++ if (!page) ++ goto err; ++ ++ pl = kmalloc(sizeof(*pl), GFP_NOIO); ++ if (!pl) { ++ __free_page(page); ++ goto err; ++ } ++ ++ pl->page = page; ++ pl->next = ret; ++ ret = pl; ++ } ++ ++ return ret; ++ ++err: ++ free_cache_pages(ret); ++ return NULL; ++} ++ ++/* ++ * Allocate page_list elements from the pool to chunks of the memory object. ++ */ ++static void alloc_chunks(struct dm_mem_cache_client *cl, ++ struct dm_mem_cache_object *obj) ++{ ++ unsigned chunks = cl->chunks; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ local_irq_disable(); ++ while (chunks--) { ++ unsigned p = cl->pages_per_chunk; ++ ++ obj[chunks].pl = NULL; ++ ++ while (p--) { ++ struct page_list *pl; ++ ++ /* Take next element from free list */ ++ spin_lock(&cl->lock); ++ pl = cl->free_list; ++ BUG_ON(!pl); ++ cl->free_list = pl->next; ++ spin_unlock(&cl->lock); ++ ++ pl->next = obj[chunks].pl; ++ obj[chunks].pl = pl; ++ } ++ } ++ ++ local_irq_restore(flags); ++} ++ ++/* ++ * Free page_list elements putting them back onto free list ++ */ ++static void free_chunks(struct dm_mem_cache_client *cl, ++ struct dm_mem_cache_object *obj) ++{ ++ unsigned chunks = cl->chunks; ++ unsigned long flags; ++ struct page_list *next, *pl; ++ ++ local_irq_save(flags); ++ local_irq_disable(); ++ while (chunks--) { ++ for (pl = obj[chunks].pl; pl; pl = next) { ++ next = pl->next; ++ ++ spin_lock(&cl->lock); ++ pl->next = cl->free_list; ++ cl->free_list = pl; ++ cl->free_pages++; ++ spin_unlock(&cl->lock); ++ } ++ } ++ ++ local_irq_restore(flags); ++} ++ ++/* ++ * Create/destroy dm memory cache client resources. ++ */ ++struct dm_mem_cache_client * ++dm_mem_cache_client_create(unsigned objects, unsigned chunks, ++ unsigned pages_per_chunk) ++{ ++ unsigned total_pages = objects * chunks * pages_per_chunk; ++ struct dm_mem_cache_client *client; ++ ++ BUG_ON(!total_pages); ++ client = kzalloc(sizeof(*client), GFP_KERNEL); ++ if (!client) ++ return ERR_PTR(-ENOMEM); ++ ++ client->objs_pool = mempool_create_kmalloc_pool(objects, ++ chunks * sizeof(struct dm_mem_cache_object)); ++ if (!client->objs_pool) ++ goto err; ++ ++ client->free_list = alloc_cache_pages(total_pages); ++ if (!client->free_list) ++ goto err1; ++ ++ spin_lock_init(&client->lock); ++ client->objects = objects; ++ client->chunks = chunks; ++ client->pages_per_chunk = pages_per_chunk; ++ client->free_pages = client->total_pages = total_pages; ++ return client; ++ ++err1: ++ mempool_destroy(client->objs_pool); ++err: ++ kfree(client); ++ return ERR_PTR(-ENOMEM); ++} ++EXPORT_SYMBOL(dm_mem_cache_client_create); ++ ++void dm_mem_cache_client_destroy(struct dm_mem_cache_client *cl) ++{ ++ BUG_ON(cl->free_pages != cl->total_pages); ++ free_cache_pages(cl->free_list); ++ mempool_destroy(cl->objs_pool); ++ kfree(cl); ++} ++EXPORT_SYMBOL(dm_mem_cache_client_destroy); ++ ++/* ++ * Grow a clients cache by an amount of pages. ++ * ++ * Don't call from interrupt context! ++ */ ++int dm_mem_cache_grow(struct dm_mem_cache_client *cl, unsigned objects) ++{ ++ unsigned pages = objects * cl->chunks * cl->pages_per_chunk; ++ struct page_list *pl, *last; ++ ++ BUG_ON(!pages); ++ pl = alloc_cache_pages(pages); ++ if (!pl) ++ return -ENOMEM; ++ ++ last = pl; ++ while (last->next) ++ last = last->next; ++ ++ spin_lock_irq(&cl->lock); ++ last->next = cl->free_list; ++ cl->free_list = pl; ++ cl->free_pages += pages; ++ cl->total_pages += pages; ++ cl->objects += objects; ++ spin_unlock_irq(&cl->lock); ++ ++ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO); ++ return 0; ++} ++EXPORT_SYMBOL(dm_mem_cache_grow); ++ ++/* Shrink a clients cache by an amount of pages */ ++int dm_mem_cache_shrink(struct dm_mem_cache_client *cl, unsigned objects) ++{ ++ int r; ++ unsigned pages = objects * cl->chunks * cl->pages_per_chunk, p = pages; ++ unsigned long flags; ++ struct page_list *last = NULL, *pl, *pos; ++ ++ BUG_ON(!pages); ++ ++ spin_lock_irqsave(&cl->lock, flags); ++ pl = pos = cl->free_list; ++ while (p-- && pos->next) { ++ last = pos; ++ pos = pos->next; ++ } ++ ++ if (++p) ++ r = -ENOMEM; ++ else { ++ r = 0; ++ cl->free_list = pos; ++ cl->free_pages -= pages; ++ cl->total_pages -= pages; ++ cl->objects -= objects; ++ last->next = NULL; ++ } ++ spin_unlock_irqrestore(&cl->lock, flags); ++ ++ if (!r) { ++ free_cache_pages(pl); ++ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO); ++ } ++ ++ return r; ++} ++EXPORT_SYMBOL(dm_mem_cache_shrink); ++ ++/* ++ * Allocate/free a memory object ++ * ++ * Can be called from interrupt context ++ */ ++struct dm_mem_cache_object *dm_mem_cache_alloc(struct dm_mem_cache_client *cl) ++{ ++ int r = 0; ++ unsigned pages = cl->chunks * cl->pages_per_chunk; ++ unsigned long flags; ++ struct dm_mem_cache_object *obj; ++ ++ obj = mempool_alloc(cl->objs_pool, GFP_NOIO); ++ if (!obj) ++ return ERR_PTR(-ENOMEM); ++ ++ spin_lock_irqsave(&cl->lock, flags); ++ if (pages > cl->free_pages) ++ r = -ENOMEM; ++ else ++ cl->free_pages -= pages; ++ spin_unlock_irqrestore(&cl->lock, flags); ++ ++ if (r) { ++ mempool_free(obj, cl->objs_pool); ++ return ERR_PTR(r); ++ } ++ ++ alloc_chunks(cl, obj); ++ return obj; ++} ++EXPORT_SYMBOL(dm_mem_cache_alloc); ++ ++void dm_mem_cache_free(struct dm_mem_cache_client *cl, ++ struct dm_mem_cache_object *obj) ++{ ++ free_chunks(cl, obj); ++ mempool_free(obj, cl->objs_pool); ++} ++EXPORT_SYMBOL(dm_mem_cache_free); ++ ++MODULE_DESCRIPTION(DM_NAME " dm memory cache"); ++MODULE_AUTHOR("Heinz Mauelshagen "); ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/drivers/md/dm-memcache.h +@@ -0,0 +1,68 @@ ++/* ++ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. ++ * ++ * Module Author: Heinz Mauelshagen ++ * ++ * Device-mapper memory object handling: ++ * ++ * o allocate/free total_pages in a per client page pool. ++ * ++ * o allocate/free memory objects with chunks (1..n) of ++ * pages_per_chunk pages hanging off. ++ * ++ * This file is released under the GPL. ++ */ ++ ++#ifndef _DM_MEM_CACHE_H ++#define _DM_MEM_CACHE_H ++ ++#define DM_MEM_CACHE_H_VERSION "0.1" ++ ++#include "dm.h" ++#include ++ ++static inline struct page_list *pl_elem(struct page_list *pl, unsigned p) ++{ ++ while (pl && p--) ++ pl = pl->next; ++ ++ return pl; ++} ++ ++struct dm_mem_cache_object { ++ struct page_list *pl; /* Dynamically allocated array */ ++ void *private; /* Caller context reference */ ++}; ++ ++struct dm_mem_cache_client; ++ ++/* ++ * Create/destroy dm memory cache client resources. ++ * ++ * On creation, a number of @objects with @chunks of ++ * @pages_per_chunk pages will be allocated. ++ */ ++struct dm_mem_cache_client * ++dm_mem_cache_client_create(unsigned objects, unsigned chunks, ++ unsigned pages_per_chunk); ++void dm_mem_cache_client_destroy(struct dm_mem_cache_client *client); ++ ++/* ++ * Grow/shrink a dm memory cache client resources ++ * by @objetcs amount of objects. ++ */ ++int dm_mem_cache_grow(struct dm_mem_cache_client *client, unsigned objects); ++int dm_mem_cache_shrink(struct dm_mem_cache_client *client, unsigned objects); ++ ++/* ++ * Allocate/free a memory object ++ * ++ * On allocation one object with an amount of chunks and ++ * an amount of pages per chunk will be returned on success. ++ */ ++struct dm_mem_cache_object * ++dm_mem_cache_alloc(struct dm_mem_cache_client *client); ++void dm_mem_cache_free(struct dm_mem_cache_client *client, ++ struct dm_mem_cache_object *object); ++ ++#endif +--- /dev/null ++++ b/drivers/md/dm-raid45.c +@@ -0,0 +1,4721 @@ ++/* ++ * Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved. ++ * ++ * Module Author: Heinz Mauelshagen ++ * ++ * This file is released under the GPL. ++ * ++ * ++ * Linux 2.6 Device Mapper RAID4 and RAID5 target. ++ * ++ * Tested-by: Intel; Marcin.Labun@intel.com, krzysztof.wojcik@intel.com ++ * ++ * ++ * Supports the following ATARAID vendor solutions (and SNIA DDF): ++ * ++ * Adaptec HostRAID ASR ++ * SNIA DDF1 ++ * Hiphpoint 37x ++ * Hiphpoint 45x ++ * Intel IMSM ++ * Jmicron ATARAID ++ * LSI Logic MegaRAID ++ * NVidia RAID ++ * Promise FastTrack ++ * Silicon Image Medley ++ * VIA Software RAID ++ * ++ * via the dmraid application. ++ * ++ * ++ * Features: ++ * ++ * o RAID4 with dedicated and selectable parity device ++ * o RAID5 with rotating parity (left+right, symmetric+asymmetric) ++ * o recovery of out of sync device for initial ++ * RAID set creation or after dead drive replacement ++ * o run time optimization of xor algorithm used to calculate parity ++ * ++ * ++ * Thanks to MD for: ++ * o the raid address calculation algorithm ++ * o the base of the biovec <-> page list copier. ++ * ++ * ++ * Uses region hash to keep track of how many writes are in flight to ++ * regions in order to use dirty log to keep state of regions to recover: ++ * ++ * o clean regions (those which are synchronized ++ * and don't have write io in flight) ++ * o dirty regions (those with write io in flight) ++ * ++ * ++ * On startup, any dirty regions are migrated to the ++ * 'nosync' state and are subject to recovery by the daemon. ++ * ++ * See raid_ctr() for table definition. ++ * ++ * ANALYZEME: recovery bandwidth ++ */ ++ ++static const char *version = "v0.2597k"; ++ ++#include "dm.h" ++#include "dm-memcache.h" ++#include "dm-raid45.h" ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++ ++/* ++ * Configurable parameters ++ */ ++ ++/* Minimum/maximum and default # of selectable stripes. */ ++#define STRIPES_MIN 8 ++#define STRIPES_MAX 16384 ++#define STRIPES_DEFAULT 80 ++ ++/* Maximum and default chunk size in sectors if not set in constructor. */ ++#define CHUNK_SIZE_MIN 8 ++#define CHUNK_SIZE_MAX 16384 ++#define CHUNK_SIZE_DEFAULT 64 ++ ++/* Default io size in sectors if not set in constructor. */ ++#define IO_SIZE_MIN CHUNK_SIZE_MIN ++#define IO_SIZE_DEFAULT IO_SIZE_MIN ++ ++/* Recover io size default in sectors. */ ++#define RECOVER_IO_SIZE_MIN 64 ++#define RECOVER_IO_SIZE_DEFAULT 256 ++ ++/* Default, minimum and maximum percentage of recover io bandwidth. */ ++#define BANDWIDTH_DEFAULT 10 ++#define BANDWIDTH_MIN 1 ++#define BANDWIDTH_MAX 100 ++ ++/* # of parallel recovered regions */ ++#define RECOVERY_STRIPES_MIN 1 ++#define RECOVERY_STRIPES_MAX 64 ++#define RECOVERY_STRIPES_DEFAULT RECOVERY_STRIPES_MIN ++/* ++ * END Configurable parameters ++ */ ++ ++#define TARGET "dm-raid45" ++#define DAEMON "kraid45d" ++#define DM_MSG_PREFIX TARGET ++ ++#define SECTORS_PER_PAGE (PAGE_SIZE >> SECTOR_SHIFT) ++ ++/* Amount/size for __xor(). */ ++#define XOR_SIZE PAGE_SIZE ++ ++/* Ticks to run xor_speed() test for. */ ++#define XOR_SPEED_TICKS 5 ++ ++/* Check value in range. */ ++#define range_ok(i, min, max) (i >= min && i <= max) ++ ++/* Structure access macros. */ ++/* Derive raid_set from stripe_cache pointer. */ ++#define RS(x) container_of(x, struct raid_set, sc) ++ ++/* Page reference. */ ++#define PAGE(stripe, p) ((stripe)->obj[p].pl->page) ++ ++/* Stripe chunk reference. */ ++#define CHUNK(stripe, p) ((stripe)->chunk + p) ++ ++/* Bio list reference. */ ++#define BL(stripe, p, rw) (stripe->chunk[p].bl + rw) ++#define BL_CHUNK(chunk, rw) (chunk->bl + rw) ++ ++/* Page list reference. */ ++#define PL(stripe, p) (stripe->obj[p].pl) ++/* END: structure access macros. */ ++ ++/* Factor out to dm-bio-list.h */ ++static inline void bio_list_push(struct bio_list *bl, struct bio *bio) ++{ ++ bio->bi_next = bl->head; ++ bl->head = bio; ++ ++ if (!bl->tail) ++ bl->tail = bio; ++} ++ ++/* Factor out to dm.h */ ++#define TI_ERR_RET(str, ret) \ ++ do { ti->error = str; return ret; } while (0); ++#define TI_ERR(str) TI_ERR_RET(str, -EINVAL) ++ ++/* Macro to define access IO flags access inline functions. */ ++#define BITOPS(name, what, var, flag) \ ++static inline int TestClear ## name ## what(struct var *v) \ ++{ return test_and_clear_bit(flag, &v->io.flags); } \ ++static inline int TestSet ## name ## what(struct var *v) \ ++{ return test_and_set_bit(flag, &v->io.flags); } \ ++static inline void Clear ## name ## what(struct var *v) \ ++{ clear_bit(flag, &v->io.flags); } \ ++static inline void Set ## name ## what(struct var *v) \ ++{ set_bit(flag, &v->io.flags); } \ ++static inline int name ## what(struct var *v) \ ++{ return test_bit(flag, &v->io.flags); } ++ ++/*----------------------------------------------------------------- ++ * Stripe cache ++ * ++ * Cache for all reads and writes to raid sets (operational or degraded) ++ * ++ * We need to run all data to and from a RAID set through this cache, ++ * because parity chunks need to get calculated from data chunks ++ * or, in the degraded/resynchronization case, missing chunks need ++ * to be reconstructed using the other chunks of the stripe. ++ *---------------------------------------------------------------*/ ++/* Unique kmem cache name suffix # counter. */ ++static atomic_t _stripe_sc_nr = ATOMIC_INIT(-1); /* kmem cache # counter. */ ++ ++/* A chunk within a stripe (holds bios hanging off). */ ++/* IO status flags for chunks of a stripe. */ ++enum chunk_flags { ++ CHUNK_DIRTY, /* Pages of chunk dirty; need writing. */ ++ CHUNK_ERROR, /* IO error on any chunk page. */ ++ CHUNK_IO, /* Allow/prohibit IO on chunk pages. */ ++ CHUNK_LOCKED, /* Chunk pages locked during IO. */ ++ CHUNK_MUST_IO, /* Chunk must io. */ ++ CHUNK_UNLOCK, /* Enforce chunk unlock. */ ++ CHUNK_UPTODATE, /* Chunk pages are uptodate. */ ++}; ++ ++#if READ != 0 || WRITE != 1 ++#error dm-raid45: READ/WRITE != 0/1 used as index!!! ++#endif ++ ++enum bl_type { ++ WRITE_QUEUED = WRITE + 1, ++ WRITE_MERGED, ++ NR_BL_TYPES, /* Must be last one! */ ++}; ++struct stripe_chunk { ++ atomic_t cnt; /* Reference count. */ ++ struct stripe *stripe; /* Backpointer to stripe for endio(). */ ++ /* Bio lists for reads, writes, and writes merged. */ ++ struct bio_list bl[NR_BL_TYPES]; ++ struct { ++ unsigned long flags; /* IO status flags. */ ++ } io; ++}; ++ ++/* Define chunk bit operations. */ ++BITOPS(Chunk, Dirty, stripe_chunk, CHUNK_DIRTY) ++BITOPS(Chunk, Error, stripe_chunk, CHUNK_ERROR) ++BITOPS(Chunk, Io, stripe_chunk, CHUNK_IO) ++BITOPS(Chunk, Locked, stripe_chunk, CHUNK_LOCKED) ++BITOPS(Chunk, MustIo, stripe_chunk, CHUNK_MUST_IO) ++BITOPS(Chunk, Unlock, stripe_chunk, CHUNK_UNLOCK) ++BITOPS(Chunk, Uptodate, stripe_chunk, CHUNK_UPTODATE) ++ ++/* ++ * Stripe linked list indexes. Keep order, because the stripe ++ * and the stripe cache rely on the first 3! ++ */ ++enum list_types { ++ LIST_FLUSH, /* Stripes to flush for io. */ ++ LIST_ENDIO, /* Stripes to endio. */ ++ LIST_LRU, /* Least recently used stripes. */ ++ SC_NR_LISTS, /* # of lists in stripe cache. */ ++ LIST_HASH = SC_NR_LISTS, /* Hashed stripes. */ ++ LIST_RECOVER = LIST_HASH, /* For recovery type stripes only. */ ++ STRIPE_NR_LISTS,/* To size array in struct stripe. */ ++}; ++ ++/* Adressing region recovery. */ ++struct recover_addr { ++ struct dm_region *reg; /* Actual region to recover. */ ++ sector_t pos; /* Position within region to recover. */ ++ sector_t end; /* End of region to recover. */ ++}; ++ ++/* A stripe: the io object to handle all reads and writes to a RAID set. */ ++struct stripe { ++ atomic_t cnt; /* Reference count. */ ++ struct stripe_cache *sc; /* Backpointer to stripe cache. */ ++ ++ /* ++ * 4 linked lists: ++ * o io list to flush io ++ * o endio list ++ * o LRU list to put stripes w/o reference count on ++ * o stripe cache hash ++ */ ++ struct list_head lists[STRIPE_NR_LISTS]; ++ ++ sector_t key; /* Hash key. */ ++ region_t region; /* Region stripe is mapped to. */ ++ ++ struct { ++ unsigned long flags; /* Stripe state flags (see below). */ ++ ++ /* ++ * Pending ios in flight: ++ * ++ * used to control move of stripe to endio list ++ */ ++ atomic_t pending; ++ ++ /* Sectors to read and write for multi page stripe sets. */ ++ unsigned size; ++ } io; ++ ++ /* Address region recovery. */ ++ struct recover_addr *recover; ++ ++ /* Lock on stripe (Future: for clustering). */ ++ void *lock; ++ ++ struct { ++ unsigned short parity; /* Parity chunk index. */ ++ short recover; /* Recovery chunk index. */ ++ } idx; ++ ++ /* ++ * This stripe's memory cache object (dm-mem-cache); ++ * i.e. the io chunk pages. ++ */ ++ struct dm_mem_cache_object *obj; ++ ++ /* Array of stripe sets (dynamically allocated). */ ++ struct stripe_chunk chunk[0]; ++}; ++ ++/* States stripes can be in (flags field). */ ++enum stripe_states { ++ STRIPE_ERROR, /* io error on stripe. */ ++ STRIPE_MERGED, /* Writes got merged to be written. */ ++ STRIPE_RBW, /* Read-before-write stripe. */ ++ STRIPE_RECONSTRUCT, /* Reconstruct of a missing chunk required. */ ++ STRIPE_RECONSTRUCTED, /* Reconstructed of a missing chunk. */ ++ STRIPE_RECOVER, /* Stripe used for RAID set recovery. */ ++}; ++ ++/* Define stripe bit operations. */ ++BITOPS(Stripe, Error, stripe, STRIPE_ERROR) ++BITOPS(Stripe, Merged, stripe, STRIPE_MERGED) ++BITOPS(Stripe, RBW, stripe, STRIPE_RBW) ++BITOPS(Stripe, Reconstruct, stripe, STRIPE_RECONSTRUCT) ++BITOPS(Stripe, Reconstructed, stripe, STRIPE_RECONSTRUCTED) ++BITOPS(Stripe, Recover, stripe, STRIPE_RECOVER) ++ ++/* A stripe hash. */ ++struct stripe_hash { ++ struct list_head *hash; ++ unsigned buckets; ++ unsigned mask; ++ unsigned prime; ++ unsigned shift; ++}; ++ ++enum sc_lock_types { ++ LOCK_ENDIO, /* Protect endio list. */ ++ NR_LOCKS, /* To size array in struct stripe_cache. */ ++}; ++ ++/* A stripe cache. */ ++struct stripe_cache { ++ /* Stripe hash. */ ++ struct stripe_hash hash; ++ ++ spinlock_t locks[NR_LOCKS]; /* Locks to protect lists. */ ++ ++ /* Stripes with io to flush, stripes to endio and LRU lists. */ ++ struct list_head lists[SC_NR_LISTS]; ++ ++ /* Slab cache to allocate stripes from. */ ++ struct { ++ struct kmem_cache *cache; /* Cache itself. */ ++ char name[32]; /* Unique name. */ ++ } kc; ++ ++ struct dm_io_client *dm_io_client; /* dm-io client resource context. */ ++ ++ /* dm-mem-cache client resource context. */ ++ struct dm_mem_cache_client *mem_cache_client; ++ ++ int stripes_parm; /* # stripes parameter from constructor. */ ++ atomic_t stripes; /* actual # of stripes in cache. */ ++ atomic_t stripes_to_set; /* # of stripes to resize cache to. */ ++ atomic_t stripes_last; /* last # of stripes in cache. */ ++ atomic_t active_stripes; /* actual # of active stripes in cache. */ ++ ++ /* REMOVEME: */ ++ atomic_t active_stripes_max; /* actual # of active stripes in cache. */ ++}; ++ ++/* Flag specs for raid_dev */ ; ++enum raid_dev_flags { ++ DEV_FAILED, /* Device failed. */ ++ DEV_IO_QUEUED, /* Io got queued to device. */ ++}; ++ ++/* The raid device in a set. */ ++struct raid_dev { ++ struct dm_dev *dev; ++ sector_t start; /* Offset to map to. */ ++ struct { /* Using struct to be able to BITOPS(). */ ++ unsigned long flags; /* raid_dev_flags. */ ++ } io; ++}; ++ ++BITOPS(Dev, Failed, raid_dev, DEV_FAILED) ++BITOPS(Dev, IoQueued, raid_dev, DEV_IO_QUEUED) ++ ++/* Flags spec for raid_set. */ ++enum raid_set_flags { ++ RS_CHECK_OVERWRITE, /* Check for chunk overwrites. */ ++ RS_DEAD, /* RAID set inoperational. */ ++ RS_DEAD_ENDIO_MESSAGE, /* RAID set dead endio one-off message. */ ++ RS_DEGRADED, /* Io errors on RAID device. */ ++ RS_DEVEL_STATS, /* REMOVEME: display status information. */ ++ RS_ENFORCE_PARITY_CREATION,/* Enforce parity creation. */ ++ RS_PROHIBIT_WRITES, /* Prohibit writes on device failure. */ ++ RS_RECOVER, /* Do recovery. */ ++ RS_RECOVERY_BANDWIDTH, /* Allow recovery bandwidth (delayed bios). */ ++ RS_SC_BUSY, /* Stripe cache busy -> send an event. */ ++ RS_SUSPEND, /* Suspend RAID set. */ ++}; ++ ++/* REMOVEME: devel stats counters. */ ++enum stats_types { ++ S_BIOS_READ, ++ S_BIOS_ADDED_READ, ++ S_BIOS_ENDIO_READ, ++ S_BIOS_WRITE, ++ S_BIOS_ADDED_WRITE, ++ S_BIOS_ENDIO_WRITE, ++ S_CAN_MERGE, ++ S_CANT_MERGE, ++ S_CONGESTED, ++ S_DM_IO_READ, ++ S_DM_IO_WRITE, ++ S_BANDWIDTH, ++ S_BARRIER, ++ S_BIO_COPY_PL_NEXT, ++ S_DEGRADED, ++ S_DELAYED_BIOS, ++ S_FLUSHS, ++ S_HITS_1ST, ++ S_IOS_POST, ++ S_INSCACHE, ++ S_MAX_LOOKUP, ++ S_CHUNK_LOCKED, ++ S_NO_BANDWIDTH, ++ S_NOT_CONGESTED, ++ S_NO_RW, ++ S_NOSYNC, ++ S_OVERWRITE, ++ S_PROHIBITCHUNKIO, ++ S_RECONSTRUCT_EI, ++ S_RECONSTRUCT_DEV, ++ S_RECONSTRUCT_SET, ++ S_RECONSTRUCTED, ++ S_REQUEUE, ++ S_STRIPE_ERROR, ++ S_SUM_DELAYED_BIOS, ++ S_XORS, ++ S_NR_STATS, /* # of stats counters. Must be last! */ ++}; ++ ++/* Status type -> string mappings. */ ++struct stats_map { ++ const enum stats_types type; ++ const char *str; ++}; ++ ++static struct stats_map stats_map[] = { ++ { S_BIOS_READ, "r=" }, ++ { S_BIOS_ADDED_READ, "/" }, ++ { S_BIOS_ENDIO_READ, "/" }, ++ { S_BIOS_WRITE, " w=" }, ++ { S_BIOS_ADDED_WRITE, "/" }, ++ { S_BIOS_ENDIO_WRITE, "/" }, ++ { S_DM_IO_READ, " rc=" }, ++ { S_DM_IO_WRITE, " wc=" }, ++ { S_BANDWIDTH, "\nbw=" }, ++ { S_NO_BANDWIDTH, " no_bw=" }, ++ { S_BARRIER, "\nbarrier=" }, ++ { S_BIO_COPY_PL_NEXT, "\nbio_cp_next=" }, ++ { S_CAN_MERGE, "\nmerge=" }, ++ { S_CANT_MERGE, "/no_merge=" }, ++ { S_CHUNK_LOCKED, "\nchunk_locked=" }, ++ { S_CONGESTED, "\ncgst=" }, ++ { S_NOT_CONGESTED, "/not_cgst=" }, ++ { S_DEGRADED, "\ndegraded=" }, ++ { S_DELAYED_BIOS, "\ndel_bios=" }, ++ { S_SUM_DELAYED_BIOS, "/sum_del_bios=" }, ++ { S_FLUSHS, "\nflushs=" }, ++ { S_HITS_1ST, "\nhits_1st=" }, ++ { S_IOS_POST, " ios_post=" }, ++ { S_INSCACHE, " inscache=" }, ++ { S_MAX_LOOKUP, " maxlookup=" }, ++ { S_NO_RW, "\nno_rw=" }, ++ { S_NOSYNC, " nosync=" }, ++ { S_OVERWRITE, " ovr=" }, ++ { S_PROHIBITCHUNKIO, " prhbt_io=" }, ++ { S_RECONSTRUCT_EI, "\nrec_ei=" }, ++ { S_RECONSTRUCT_DEV, " rec_dev=" }, ++ { S_RECONSTRUCT_SET, " rec_set=" }, ++ { S_RECONSTRUCTED, " rec=" }, ++ { S_REQUEUE, " requeue=" }, ++ { S_STRIPE_ERROR, " stripe_err=" }, ++ { S_XORS, " xors=" }, ++}; ++ ++/* ++ * A RAID set. ++ */ ++#define dm_rh_client dm_region_hash ++enum count_type { IO_WORK = 0, IO_RECOVER, IO_NR_COUNT }; ++typedef void (*xor_function_t)(unsigned count, unsigned long **data); ++struct raid_set { ++ struct dm_target *ti; /* Target pointer. */ ++ ++ struct { ++ unsigned long flags; /* State flags. */ ++ struct mutex in_lock; /* Protects central input list below. */ ++ struct mutex xor_lock; /* Protects xor algorithm set. */ ++ struct bio_list in; /* Pending ios (central input list). */ ++ struct bio_list work; /* ios work set. */ ++ wait_queue_head_t suspendq; /* suspend synchronization. */ ++ atomic_t in_process; /* counter of queued bios (suspendq). */ ++ atomic_t in_process_max;/* counter of queued bios max. */ ++ ++ /* io work. */ ++ struct workqueue_struct *wq; ++ struct delayed_work dws_do_raid; /* For main worker. */ ++ struct work_struct ws_do_table_event; /* For event worker. */ ++ } io; ++ ++ /* Stripe locking abstraction. */ ++ struct dm_raid45_locking_type *locking; ++ ++ struct stripe_cache sc; /* Stripe cache for this set. */ ++ ++ /* Xor optimization. */ ++ struct { ++ struct xor_func *f; ++ unsigned chunks; ++ unsigned speed; ++ } xor; ++ ++ /* Recovery parameters. */ ++ struct recover { ++ struct dm_dirty_log *dl; /* Dirty log. */ ++ struct dm_rh_client *rh; /* Region hash. */ ++ ++ struct dm_io_client *dm_io_client; /* recovery dm-io client. */ ++ /* dm-mem-cache client resource context for recovery stripes. */ ++ struct dm_mem_cache_client *mem_cache_client; ++ ++ struct list_head stripes; /* List of recovery stripes. */ ++ ++ region_t nr_regions; ++ region_t nr_regions_to_recover; ++ region_t nr_regions_recovered; ++ unsigned long start_jiffies; ++ unsigned long end_jiffies; ++ ++ unsigned bandwidth; /* Recovery bandwidth [%]. */ ++ unsigned bandwidth_work; /* Recovery bandwidth [factor]. */ ++ unsigned bandwidth_parm; /* " constructor parm. */ ++ unsigned io_size; /* recovery io size <= region size. */ ++ unsigned io_size_parm; /* recovery io size ctr parameter. */ ++ unsigned recovery; /* Recovery allowed/prohibited. */ ++ unsigned recovery_stripes; /* # of parallel recovery stripes. */ ++ ++ /* recovery io throttling. */ ++ atomic_t io_count[IO_NR_COUNT]; /* counter recover/regular io.*/ ++ unsigned long last_jiffies; ++ } recover; ++ ++ /* RAID set parameters. */ ++ struct { ++ struct raid_type *raid_type; /* RAID type (eg, RAID4). */ ++ unsigned raid_parms; /* # variable raid parameters. */ ++ ++ unsigned chunk_size; /* Sectors per chunk. */ ++ unsigned chunk_size_parm; ++ unsigned chunk_shift; /* rsector chunk size shift. */ ++ ++ unsigned io_size; /* Sectors per io. */ ++ unsigned io_size_parm; ++ unsigned io_mask; /* Mask for bio_copy_page_list(). */ ++ unsigned io_inv_mask; /* Mask for raid_address(). */ ++ ++ sector_t sectors_per_dev; /* Sectors per device. */ ++ ++ atomic_t failed_devs; /* Amount of devices failed. */ ++ ++ /* Index of device to initialize. */ ++ int dev_to_init; ++ int dev_to_init_parm; ++ ++ /* Raid devices dynamically allocated. */ ++ unsigned raid_devs; /* # of RAID devices below. */ ++ unsigned data_devs; /* # of RAID data devices. */ ++ ++ int ei; /* index of failed RAID device. */ ++ ++ /* Index of dedicated parity device (i.e. RAID4). */ ++ int pi; ++ int pi_parm; /* constructor parm for status output. */ ++ } set; ++ ++ /* REMOVEME: devel stats counters. */ ++ atomic_t stats[S_NR_STATS]; ++ ++ /* Dynamically allocated temporary pointers for xor(). */ ++ unsigned long **data; ++ ++ /* Dynamically allocated RAID devices. Alignment? */ ++ struct raid_dev dev[0]; ++}; ++ ++/* Define RAID set bit operations. */ ++BITOPS(RS, Bandwidth, raid_set, RS_RECOVERY_BANDWIDTH) ++BITOPS(RS, CheckOverwrite, raid_set, RS_CHECK_OVERWRITE) ++BITOPS(RS, Dead, raid_set, RS_DEAD) ++BITOPS(RS, DeadEndioMessage, raid_set, RS_DEAD_ENDIO_MESSAGE) ++BITOPS(RS, Degraded, raid_set, RS_DEGRADED) ++BITOPS(RS, DevelStats, raid_set, RS_DEVEL_STATS) ++BITOPS(RS, EnforceParityCreation, raid_set, RS_ENFORCE_PARITY_CREATION) ++BITOPS(RS, ProhibitWrites, raid_set, RS_PROHIBIT_WRITES) ++BITOPS(RS, Recover, raid_set, RS_RECOVER) ++BITOPS(RS, ScBusy, raid_set, RS_SC_BUSY) ++BITOPS(RS, Suspend, raid_set, RS_SUSPEND) ++#undef BITOPS ++ ++/*----------------------------------------------------------------- ++ * Raid-4/5 set structures. ++ *---------------------------------------------------------------*/ ++/* RAID level definitions. */ ++enum raid_level { ++ raid4, ++ raid5, ++}; ++ ++/* Symmetric/Asymmetric, Left/Right parity rotating algorithms. */ ++enum raid_algorithm { ++ none, ++ left_asym, ++ right_asym, ++ left_sym, ++ right_sym, ++}; ++ ++struct raid_type { ++ const char *name; /* RAID algorithm. */ ++ const char *descr; /* Descriptor text for logging. */ ++ const unsigned parity_devs; /* # of parity devices. */ ++ const unsigned minimal_devs; /* minimal # of devices in set. */ ++ const enum raid_level level; /* RAID level. */ ++ const enum raid_algorithm algorithm; /* RAID algorithm. */ ++}; ++ ++/* Supported raid types and properties. */ ++static struct raid_type raid_types[] = { ++ {"raid4", "RAID4 (dedicated parity disk)", 1, 3, raid4, none}, ++ {"raid5_la", "RAID5 (left asymmetric)", 1, 3, raid5, left_asym}, ++ {"raid5_ra", "RAID5 (right asymmetric)", 1, 3, raid5, right_asym}, ++ {"raid5_ls", "RAID5 (left symmetric)", 1, 3, raid5, left_sym}, ++ {"raid5_rs", "RAID5 (right symmetric)", 1, 3, raid5, right_sym}, ++}; ++ ++/* Address as calculated by raid_address(). */ ++struct raid_address { ++ sector_t key; /* Hash key (address of stripe % chunk_size). */ ++ unsigned di, pi; /* Data and parity disks index. */ ++}; ++ ++/* REMOVEME: reset statistics counters. */ ++static void stats_reset(struct raid_set *rs) ++{ ++ unsigned s = S_NR_STATS; ++ ++ while (s--) ++ atomic_set(rs->stats + s, 0); ++} ++ ++/*---------------------------------------------------------------- ++ * RAID set management routines. ++ *--------------------------------------------------------------*/ ++/* ++ * Begin small helper functions. ++ */ ++/* No need to be called from region hash indirectly at dm_rh_dec(). */ ++static void wake_dummy(void *context) {} ++ ++/* Return # of io reference. */ ++static int io_ref(struct raid_set *rs) ++{ ++ return atomic_read(&rs->io.in_process); ++} ++ ++/* Get an io reference. */ ++static void io_get(struct raid_set *rs) ++{ ++ int p = atomic_inc_return(&rs->io.in_process); ++ ++ if (p > atomic_read(&rs->io.in_process_max)) ++ atomic_set(&rs->io.in_process_max, p); /* REMOVEME: max. */ ++} ++ ++/* Put the io reference and conditionally wake io waiters. */ ++static void io_put(struct raid_set *rs) ++{ ++ /* Intel: rebuild data corrupter? */ ++ if (atomic_dec_and_test(&rs->io.in_process)) ++ wake_up(&rs->io.suspendq); ++ else ++ BUG_ON(io_ref(rs) < 0); ++} ++ ++/* Wait until all io has been processed. */ ++static void wait_ios(struct raid_set *rs) ++{ ++ wait_event(rs->io.suspendq, !io_ref(rs)); ++} ++ ++/* Queue (optionally delayed) io work. */ ++static void wake_do_raid_delayed(struct raid_set *rs, unsigned long delay) ++{ ++ queue_delayed_work(rs->io.wq, &rs->io.dws_do_raid, delay); ++} ++ ++/* Queue io work immediately (called from region hash too). */ ++static void wake_do_raid(void *context) ++{ ++ struct raid_set *rs = context; ++ ++ queue_work(rs->io.wq, &rs->io.dws_do_raid.work); ++} ++ ++/* Calculate device sector offset. */ ++static sector_t _sector(struct raid_set *rs, struct bio *bio) ++{ ++ sector_t sector = bio->bi_sector; ++ ++ sector_div(sector, rs->set.data_devs); ++ return sector; ++} ++ ++/* Return # of active stripes in stripe cache. */ ++static int sc_active(struct stripe_cache *sc) ++{ ++ return atomic_read(&sc->active_stripes); ++} ++ ++/* Stripe cache busy indicator. */ ++static int sc_busy(struct raid_set *rs) ++{ ++ return sc_active(&rs->sc) > ++ atomic_read(&rs->sc.stripes) - (STRIPES_MIN / 2); ++} ++ ++/* Set chunks states. */ ++enum chunk_dirty_type { CLEAN, DIRTY, ERROR }; ++static void chunk_set(struct stripe_chunk *chunk, enum chunk_dirty_type type) ++{ ++ switch (type) { ++ case CLEAN: ++ ClearChunkDirty(chunk); ++ break; ++ case DIRTY: ++ SetChunkDirty(chunk); ++ break; ++ case ERROR: ++ SetChunkError(chunk); ++ SetStripeError(chunk->stripe); ++ return; ++ default: ++ BUG(); ++ } ++ ++ SetChunkUptodate(chunk); ++ SetChunkIo(chunk); ++ ClearChunkError(chunk); ++} ++ ++/* Return region state for a sector. */ ++static int region_state(struct raid_set *rs, sector_t sector, ++ enum dm_rh_region_states state) ++{ ++ struct dm_rh_client *rh = rs->recover.rh; ++ region_t region = dm_rh_sector_to_region(rh, sector); ++ ++ return !!(dm_rh_get_state(rh, region, 1) & state); ++} ++ ++/* ++ * Return true in case a chunk should be read/written ++ * ++ * Conditions to read/write: ++ * o chunk not uptodate ++ * o chunk dirty ++ * ++ * Conditios to avoid io: ++ * o io already ongoing on chunk ++ * o io explitely prohibited ++ */ ++static int chunk_io(struct stripe_chunk *chunk) ++{ ++ /* 2nd run optimization (flag set below on first run). */ ++ if (TestClearChunkMustIo(chunk)) ++ return 1; ++ ++ /* Avoid io if prohibited or a locked chunk. */ ++ if (!ChunkIo(chunk) || ChunkLocked(chunk)) ++ return 0; ++ ++ if (!ChunkUptodate(chunk) || ChunkDirty(chunk)) { ++ SetChunkMustIo(chunk); /* 2nd run optimization. */ ++ return 1; ++ } ++ ++ return 0; ++} ++ ++/* Call a function on each chunk needing io unless device failed. */ ++static unsigned for_each_io_dev(struct stripe *stripe, ++ void (*f_io)(struct stripe *stripe, unsigned p)) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ unsigned p, r = 0; ++ ++ for (p = 0; p < rs->set.raid_devs; p++) { ++ if (chunk_io(CHUNK(stripe, p)) && !DevFailed(rs->dev + p)) { ++ f_io(stripe, p); ++ r++; ++ } ++ } ++ ++ return r; ++} ++ ++/* ++ * Index of device to calculate parity on. ++ * ++ * Either the parity device index *or* the selected ++ * device to init after a spare replacement. ++ */ ++static int dev_for_parity(struct stripe *stripe, int *sync) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ int r = region_state(rs, stripe->key, DM_RH_NOSYNC | DM_RH_RECOVERING); ++ ++ *sync = !r; ++ ++ /* Reconstruct a particular device ?. */ ++ if (r && rs->set.dev_to_init > -1) ++ return rs->set.dev_to_init; ++ else if (rs->set.raid_type->level == raid4) ++ return rs->set.pi; ++ else if (!StripeRecover(stripe)) ++ return stripe->idx.parity; ++ else ++ return -1; ++} ++ ++/* RAID set congested function. */ ++static int rs_congested(void *congested_data, int bdi_bits) ++{ ++ int r; ++ unsigned p; ++ struct raid_set *rs = congested_data; ++ ++ if (sc_busy(rs) || RSSuspend(rs) || RSProhibitWrites(rs)) ++ r = 1; ++ else for (r = 0, p = rs->set.raid_devs; !r && p--; ) { ++ /* If any of our component devices are overloaded. */ ++ struct request_queue *q = bdev_get_queue(rs->dev[p].dev->bdev); ++ ++ r |= bdi_congested(&q->backing_dev_info, bdi_bits); ++ } ++ ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + (r ? S_CONGESTED : S_NOT_CONGESTED)); ++ return r; ++} ++ ++/* RAID device degrade check. */ ++static void rs_check_degrade_dev(struct raid_set *rs, ++ struct stripe *stripe, unsigned p) ++{ ++ if (TestSetDevFailed(rs->dev + p)) ++ return; ++ ++ /* Through an event in case of member device errors. */ ++ if ((atomic_inc_return(&rs->set.failed_devs) > ++ rs->set.raid_type->parity_devs) && ++ !TestSetRSDead(rs)) { ++ /* Display RAID set dead message once. */ ++ unsigned p; ++ char buf[BDEVNAME_SIZE]; ++ ++ DMERR("FATAL: too many devices failed -> RAID set broken"); ++ for (p = 0; p < rs->set.raid_devs; p++) { ++ if (DevFailed(rs->dev + p)) ++ DMERR("device /dev/%s failed", ++ bdevname(rs->dev[p].dev->bdev, buf)); ++ } ++ } ++ ++ /* Only log the first member error. */ ++ if (!TestSetRSDegraded(rs)) { ++ char buf[BDEVNAME_SIZE]; ++ ++ /* Store index for recovery. */ ++ rs->set.ei = p; ++ DMERR("CRITICAL: %sio error on device /dev/%s " ++ "in region=%llu; DEGRADING RAID set\n", ++ stripe ? "" : "FAKED ", ++ bdevname(rs->dev[p].dev->bdev, buf), ++ (unsigned long long) (stripe ? stripe->key : 0)); ++ DMERR("further device error messages suppressed"); ++ } ++ ++ /* Prohibit further writes to allow for userpace to update metadata. */ ++ SetRSProhibitWrites(rs); ++ schedule_work(&rs->io.ws_do_table_event); ++} ++ ++/* RAID set degrade check. */ ++static void rs_check_degrade(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ unsigned p = rs->set.raid_devs; ++ ++ while (p--) { ++ if (ChunkError(CHUNK(stripe, p))) ++ rs_check_degrade_dev(rs, stripe, p); ++ } ++} ++ ++/* Lookup a RAID device by name or by major:minor number. */ ++static int raid_dev_lookup(struct raid_set *rs, struct raid_dev *dev_lookup) ++{ ++ unsigned p; ++ struct raid_dev *dev; ++ ++ /* ++ * Must be an incremental loop, because the device array ++ * can have empty slots still on calls from raid_ctr() ++ */ ++ for (dev = rs->dev, p = 0; ++ dev->dev && p < rs->set.raid_devs; ++ dev++, p++) { ++ if (dev_lookup->dev->bdev->bd_dev == dev->dev->bdev->bd_dev) ++ return p; ++ } ++ ++ return -ENODEV; ++} ++/* ++ * End small helper functions. ++ */ ++ ++/* ++ * Stripe hash functions ++ */ ++/* Initialize/destroy stripe hash. */ ++static int hash_init(struct stripe_hash *hash, unsigned stripes) ++{ ++ unsigned buckets = roundup_pow_of_two(stripes >> 1); ++ static unsigned hash_primes[] = { ++ /* Table of primes for hash_fn/table size optimization. */ ++ 1, 2, 3, 7, 13, 27, 53, 97, 193, 389, 769, ++ 1543, 3079, 6151, 12289, 24593, 49157, 98317, ++ }; ++ ++ /* Allocate stripe hash buckets. */ ++ hash->hash = vmalloc(buckets * sizeof(*hash->hash)); ++ if (!hash->hash) ++ return -ENOMEM; ++ ++ hash->buckets = buckets; ++ hash->mask = buckets - 1; ++ hash->shift = ffs(buckets); ++ if (hash->shift > ARRAY_SIZE(hash_primes)) ++ hash->shift = ARRAY_SIZE(hash_primes) - 1; ++ ++ BUG_ON(hash->shift < 2); ++ hash->prime = hash_primes[hash->shift]; ++ ++ /* Initialize buckets. */ ++ while (buckets--) ++ INIT_LIST_HEAD(hash->hash + buckets); ++ return 0; ++} ++ ++static void hash_exit(struct stripe_hash *hash) ++{ ++ if (hash->hash) { ++ vfree(hash->hash); ++ hash->hash = NULL; ++ } ++} ++ ++static unsigned hash_fn(struct stripe_hash *hash, sector_t key) ++{ ++ return (unsigned) (((key * hash->prime) >> hash->shift) & hash->mask); ++} ++ ++static struct list_head *hash_bucket(struct stripe_hash *hash, sector_t key) ++{ ++ return hash->hash + hash_fn(hash, key); ++} ++ ++/* Insert an entry into a hash. */ ++static void stripe_insert(struct stripe_hash *hash, struct stripe *stripe) ++{ ++ list_add(stripe->lists + LIST_HASH, hash_bucket(hash, stripe->key)); ++} ++ ++/* Lookup an entry in the stripe hash. */ ++static struct stripe *stripe_lookup(struct stripe_cache *sc, sector_t key) ++{ ++ unsigned look = 0; ++ struct stripe *stripe; ++ struct list_head *bucket = hash_bucket(&sc->hash, key); ++ ++ list_for_each_entry(stripe, bucket, lists[LIST_HASH]) { ++ look++; ++ ++ if (stripe->key == key) { ++ /* REMOVEME: statisics. */ ++ if (look > atomic_read(RS(sc)->stats + S_MAX_LOOKUP)) ++ atomic_set(RS(sc)->stats + S_MAX_LOOKUP, look); ++ return stripe; ++ } ++ } ++ ++ return NULL; ++} ++ ++/* Resize the stripe cache hash on size changes. */ ++static int sc_hash_resize(struct stripe_cache *sc) ++{ ++ /* Resize indicated ? */ ++ if (atomic_read(&sc->stripes) != atomic_read(&sc->stripes_last)) { ++ int r; ++ struct stripe_hash hash; ++ ++ r = hash_init(&hash, atomic_read(&sc->stripes)); ++ if (r) ++ return r; ++ ++ if (sc->hash.hash) { ++ unsigned b = sc->hash.buckets; ++ struct list_head *pos, *tmp; ++ ++ /* Walk old buckets and insert into new. */ ++ while (b--) { ++ list_for_each_safe(pos, tmp, sc->hash.hash + b) ++ stripe_insert(&hash, ++ list_entry(pos, struct stripe, ++ lists[LIST_HASH])); ++ } ++ ++ } ++ ++ hash_exit(&sc->hash); ++ memcpy(&sc->hash, &hash, sizeof(sc->hash)); ++ atomic_set(&sc->stripes_last, atomic_read(&sc->stripes)); ++ } ++ ++ return 0; ++} ++/* End hash stripe hash function. */ ++ ++/* List add, delete, push and pop functions. */ ++/* Add stripe to flush list. */ ++#define DEL_LIST(lh) \ ++ if (!list_empty(lh)) \ ++ list_del_init(lh); ++ ++/* Delete stripe from hash. */ ++static void stripe_hash_del(struct stripe *stripe) ++{ ++ DEL_LIST(stripe->lists + LIST_HASH); ++} ++ ++/* Return stripe reference count. */ ++static inline int stripe_ref(struct stripe *stripe) ++{ ++ return atomic_read(&stripe->cnt); ++} ++ ++static void stripe_flush_add(struct stripe *stripe) ++{ ++ struct stripe_cache *sc = stripe->sc; ++ struct list_head *lh = stripe->lists + LIST_FLUSH; ++ ++ if (!StripeReconstruct(stripe) && list_empty(lh)) ++ list_add_tail(lh, sc->lists + LIST_FLUSH); ++} ++ ++/* ++ * Add stripe to LRU (inactive) list. ++ * ++ * Need lock, because of concurrent access from message interface. ++ */ ++static void stripe_lru_add(struct stripe *stripe) ++{ ++ if (!StripeRecover(stripe)) { ++ struct list_head *lh = stripe->lists + LIST_LRU; ++ ++ if (list_empty(lh)) ++ list_add_tail(lh, stripe->sc->lists + LIST_LRU); ++ } ++} ++ ++#define POP_LIST(list) \ ++ do { \ ++ if (list_empty(sc->lists + (list))) \ ++ stripe = NULL; \ ++ else { \ ++ stripe = list_first_entry(sc->lists + (list), \ ++ struct stripe, \ ++ lists[(list)]); \ ++ list_del_init(stripe->lists + (list)); \ ++ } \ ++ } while (0); ++ ++/* Pop an available stripe off the LRU list. */ ++static struct stripe *stripe_lru_pop(struct stripe_cache *sc) ++{ ++ struct stripe *stripe; ++ ++ POP_LIST(LIST_LRU); ++ return stripe; ++} ++ ++/* Pop an available stripe off the io list. */ ++static struct stripe *stripe_io_pop(struct stripe_cache *sc) ++{ ++ struct stripe *stripe; ++ ++ POP_LIST(LIST_FLUSH); ++ return stripe; ++} ++ ++/* Push a stripe safely onto the endio list to be handled by do_endios(). */ ++static void stripe_endio_push(struct stripe *stripe) ++{ ++ unsigned long flags; ++ struct stripe_cache *sc = stripe->sc; ++ struct list_head *stripe_list = stripe->lists + LIST_ENDIO, ++ *sc_list = sc->lists + LIST_ENDIO; ++ spinlock_t *lock = sc->locks + LOCK_ENDIO; ++ ++ /* This runs in parallel with do_endios(). */ ++ spin_lock_irqsave(lock, flags); ++ if (list_empty(stripe_list)) ++ list_add_tail(stripe_list, sc_list); ++ spin_unlock_irqrestore(lock, flags); ++ ++ wake_do_raid(RS(sc)); /* Wake myself. */ ++} ++ ++/* Pop a stripe off safely off the endio list. */ ++static struct stripe *stripe_endio_pop(struct stripe_cache *sc) ++{ ++ struct stripe *stripe; ++ spinlock_t *lock = sc->locks + LOCK_ENDIO; ++ ++ /* This runs in parallel with endio(). */ ++ spin_lock_irq(lock); ++ POP_LIST(LIST_ENDIO) ++ spin_unlock_irq(lock); ++ return stripe; ++} ++#undef POP_LIST ++ ++/* ++ * Stripe cache locking functions ++ */ ++/* Dummy lock function for single host RAID4+5. */ ++static void *no_lock(sector_t key, enum dm_lock_type type) ++{ ++ return &no_lock; ++} ++ ++/* Dummy unlock function for single host RAID4+5. */ ++static void no_unlock(void *lock_handle) ++{ ++} ++ ++/* No locking (for single host RAID 4+5). */ ++static struct dm_raid45_locking_type locking_none = { ++ .lock = no_lock, ++ .unlock = no_unlock, ++}; ++ ++/* Lock a stripe (for clustering). */ ++static int ++stripe_lock(struct stripe *stripe, int rw, sector_t key) ++{ ++ stripe->lock = RS(stripe->sc)->locking->lock(key, rw == READ ? DM_RAID45_SHARED : DM_RAID45_EX); ++ return stripe->lock ? 0 : -EPERM; ++} ++ ++/* Unlock a stripe (for clustering). */ ++static void stripe_unlock(struct stripe *stripe) ++{ ++ RS(stripe->sc)->locking->unlock(stripe->lock); ++ stripe->lock = NULL; ++} ++ ++/* Test io pending on stripe. */ ++static int stripe_io_ref(struct stripe *stripe) ++{ ++ return atomic_read(&stripe->io.pending); ++} ++ ++static void stripe_io_get(struct stripe *stripe) ++{ ++ if (atomic_inc_return(&stripe->io.pending) == 1) ++ /* REMOVEME: statistics */ ++ atomic_inc(&stripe->sc->active_stripes); ++ else ++ BUG_ON(stripe_io_ref(stripe) < 0); ++} ++ ++static void stripe_io_put(struct stripe *stripe) ++{ ++ if (atomic_dec_and_test(&stripe->io.pending)) { ++ if (unlikely(StripeRecover(stripe))) ++ /* Don't put recovery stripe on endio list. */ ++ wake_do_raid(RS(stripe->sc)); ++ else ++ /* Add regular stripe to endio list and wake daemon. */ ++ stripe_endio_push(stripe); ++ ++ /* REMOVEME: statistics */ ++ atomic_dec(&stripe->sc->active_stripes); ++ } else ++ BUG_ON(stripe_io_ref(stripe) < 0); ++} ++ ++/* Take stripe reference out. */ ++static int stripe_get(struct stripe *stripe) ++{ ++ int r; ++ struct list_head *lh = stripe->lists + LIST_LRU; ++ ++ /* Delete stripe from LRU (inactive) list if on. */ ++ DEL_LIST(lh); ++ BUG_ON(stripe_ref(stripe) < 0); ++ ++ /* Lock stripe on first reference */ ++ r = (atomic_inc_return(&stripe->cnt) == 1) ? ++ stripe_lock(stripe, WRITE, stripe->key) : 0; ++ ++ return r; ++} ++#undef DEL_LIST ++ ++/* Return references on a chunk. */ ++static int chunk_ref(struct stripe_chunk *chunk) ++{ ++ return atomic_read(&chunk->cnt); ++} ++ ++/* Take out reference on a chunk. */ ++static int chunk_get(struct stripe_chunk *chunk) ++{ ++ return atomic_inc_return(&chunk->cnt); ++} ++ ++/* Drop reference on a chunk. */ ++static void chunk_put(struct stripe_chunk *chunk) ++{ ++ BUG_ON(atomic_dec_return(&chunk->cnt) < 0); ++} ++ ++/* ++ * Drop reference on a stripe. ++ * ++ * Move it to list of LRU stripes if zero. ++ */ ++static void stripe_put(struct stripe *stripe) ++{ ++ if (atomic_dec_and_test(&stripe->cnt)) { ++ BUG_ON(stripe_io_ref(stripe)); ++ stripe_unlock(stripe); ++ } else ++ BUG_ON(stripe_ref(stripe) < 0); ++} ++ ++/* Helper needed by for_each_io_dev(). */ ++static void stripe_get_references(struct stripe *stripe, unsigned p) ++{ ++ ++ /* ++ * Another one to reference the stripe in ++ * order to protect vs. LRU list moves. ++ */ ++ io_get(RS(stripe->sc)); /* Global io references. */ ++ stripe_get(stripe); ++ stripe_io_get(stripe); /* One for each chunk io. */ ++} ++ ++/* Helper for endio() to put all take references. */ ++static void stripe_put_references(struct stripe *stripe) ++{ ++ stripe_io_put(stripe); /* One for each chunk io. */ ++ stripe_put(stripe); ++ io_put(RS(stripe->sc)); ++} ++ ++/* ++ * Stripe cache functions. ++ */ ++/* ++ * Invalidate all chunks (i.e. their pages) of a stripe. ++ * ++ * I only keep state for the whole chunk. ++ */ ++static inline void stripe_chunk_invalidate(struct stripe_chunk *chunk) ++{ ++ chunk->io.flags = 0; ++} ++ ++static void ++stripe_chunks_invalidate(struct stripe *stripe) ++{ ++ unsigned p = RS(stripe->sc)->set.raid_devs; ++ ++ while (p--) ++ stripe_chunk_invalidate(CHUNK(stripe, p)); ++} ++ ++/* Prepare stripe for (re)use. */ ++static void stripe_invalidate(struct stripe *stripe) ++{ ++ stripe->io.flags = 0; ++ stripe->idx.parity = stripe->idx.recover = -1; ++ stripe_chunks_invalidate(stripe); ++} ++ ++/* ++ * Allow io on all chunks of a stripe. ++ * If not set, IO will not occur; i.e. it's prohibited. ++ * ++ * Actual IO submission for allowed chunks depends ++ * on their !uptodate or dirty state. ++ */ ++static void stripe_allow_io(struct stripe *stripe) ++{ ++ unsigned p = RS(stripe->sc)->set.raid_devs; ++ ++ while (p--) ++ SetChunkIo(CHUNK(stripe, p)); ++} ++ ++/* Initialize a stripe. */ ++static void stripe_init(struct stripe_cache *sc, struct stripe *stripe) ++{ ++ unsigned i, p = RS(sc)->set.raid_devs; ++ ++ /* Work all io chunks. */ ++ while (p--) { ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ ++ atomic_set(&chunk->cnt, 0); ++ chunk->stripe = stripe; ++ i = ARRAY_SIZE(chunk->bl); ++ while (i--) ++ bio_list_init(chunk->bl + i); ++ } ++ ++ stripe->sc = sc; ++ ++ i = ARRAY_SIZE(stripe->lists); ++ while (i--) ++ INIT_LIST_HEAD(stripe->lists + i); ++ ++ stripe->io.size = RS(sc)->set.io_size; ++ atomic_set(&stripe->cnt, 0); ++ atomic_set(&stripe->io.pending, 0); ++ stripe_invalidate(stripe); ++} ++ ++/* Number of pages per chunk. */ ++static inline unsigned chunk_pages(unsigned sectors) ++{ ++ return dm_div_up(sectors, SECTORS_PER_PAGE); ++} ++ ++/* Number of pages per stripe. */ ++static inline unsigned stripe_pages(struct raid_set *rs, unsigned io_size) ++{ ++ return chunk_pages(io_size) * rs->set.raid_devs; ++} ++ ++/* Initialize part of page_list (recovery). */ ++static void stripe_zero_pl_part(struct stripe *stripe, int p, ++ unsigned start, unsigned count) ++{ ++ unsigned o = start / SECTORS_PER_PAGE, pages = chunk_pages(count); ++ /* Get offset into the page_list. */ ++ struct page_list *pl = pl_elem(PL(stripe, p), o); ++ ++ BUG_ON(!pl); ++ while (pl && pages--) { ++ BUG_ON(!pl->page); ++ memset(page_address(pl->page), 0, PAGE_SIZE); ++ pl = pl->next; ++ } ++} ++ ++/* Initialize parity chunk of stripe. */ ++static void stripe_zero_chunk(struct stripe *stripe, int p) ++{ ++ if (p > -1) ++ stripe_zero_pl_part(stripe, p, 0, stripe->io.size); ++} ++ ++/* Return dynamic stripe structure size. */ ++static size_t stripe_size(struct raid_set *rs) ++{ ++ return sizeof(struct stripe) + ++ rs->set.raid_devs * sizeof(struct stripe_chunk); ++} ++ ++/* Allocate a stripe and its memory object. */ ++/* XXX adjust to cope with stripe cache and recovery stripe caches. */ ++enum grow { SC_GROW, SC_KEEP }; ++static struct stripe *stripe_alloc(struct stripe_cache *sc, ++ struct dm_mem_cache_client *mc, ++ enum grow grow) ++{ ++ int r; ++ struct stripe *stripe; ++ ++ stripe = kmem_cache_zalloc(sc->kc.cache, GFP_KERNEL); ++ if (stripe) { ++ /* Grow the dm-mem-cache by one object. */ ++ if (grow == SC_GROW) { ++ r = dm_mem_cache_grow(mc, 1); ++ if (r) ++ goto err_free; ++ } ++ ++ stripe->obj = dm_mem_cache_alloc(mc); ++ if (IS_ERR(stripe->obj)) ++ goto err_shrink; ++ ++ stripe_init(sc, stripe); ++ } ++ ++ return stripe; ++ ++err_shrink: ++ if (grow == SC_GROW) ++ dm_mem_cache_shrink(mc, 1); ++err_free: ++ kmem_cache_free(sc->kc.cache, stripe); ++ return NULL; ++} ++ ++/* ++ * Free a stripes memory object, shrink the ++ * memory cache and free the stripe itself. ++ */ ++static void stripe_free(struct stripe *stripe, struct dm_mem_cache_client *mc) ++{ ++ dm_mem_cache_free(mc, stripe->obj); ++ dm_mem_cache_shrink(mc, 1); ++ kmem_cache_free(stripe->sc->kc.cache, stripe); ++} ++ ++/* Free the recovery stripe. */ ++static void stripe_recover_free(struct raid_set *rs) ++{ ++ struct recover *rec = &rs->recover; ++ struct dm_mem_cache_client *mc; ++ ++ mc = rec->mem_cache_client; ++ rec->mem_cache_client = NULL; ++ if (mc) { ++ struct stripe *stripe; ++ ++ while (!list_empty(&rec->stripes)) { ++ stripe = list_first_entry(&rec->stripes, struct stripe, ++ lists[LIST_RECOVER]); ++ list_del(stripe->lists + LIST_RECOVER); ++ kfree(stripe->recover); ++ stripe_free(stripe, mc); ++ } ++ ++ dm_mem_cache_client_destroy(mc); ++ dm_io_client_destroy(rec->dm_io_client); ++ rec->dm_io_client = NULL; ++ } ++} ++ ++/* Grow stripe cache. */ ++static int sc_grow(struct stripe_cache *sc, unsigned stripes, enum grow grow) ++{ ++ int r = 0; ++ ++ /* Try to allocate this many (additional) stripes. */ ++ while (stripes--) { ++ struct stripe *stripe = ++ stripe_alloc(sc, sc->mem_cache_client, grow); ++ ++ if (likely(stripe)) { ++ stripe_lru_add(stripe); ++ atomic_inc(&sc->stripes); ++ } else { ++ r = -ENOMEM; ++ break; ++ } ++ } ++ ++ return r ? r : sc_hash_resize(sc); ++} ++ ++/* Shrink stripe cache. */ ++static int sc_shrink(struct stripe_cache *sc, unsigned stripes) ++{ ++ int r = 0; ++ ++ /* Try to get unused stripe from LRU list. */ ++ while (stripes--) { ++ struct stripe *stripe; ++ ++ stripe = stripe_lru_pop(sc); ++ if (stripe) { ++ /* An LRU stripe may never have ios pending! */ ++ BUG_ON(stripe_io_ref(stripe)); ++ BUG_ON(stripe_ref(stripe)); ++ atomic_dec(&sc->stripes); ++ /* Remove from hash if on before deletion. */ ++ stripe_hash_del(stripe); ++ stripe_free(stripe, sc->mem_cache_client); ++ } else { ++ r = -ENOENT; ++ break; ++ } ++ } ++ ++ /* Check if stats are still sane. */ ++ if (atomic_read(&sc->active_stripes_max) > ++ atomic_read(&sc->stripes)) ++ atomic_set(&sc->active_stripes_max, 0); ++ ++ if (r) ++ return r; ++ ++ return atomic_read(&sc->stripes) ? sc_hash_resize(sc) : 0; ++} ++ ++/* Create stripe cache and recovery. */ ++static int sc_init(struct raid_set *rs, unsigned stripes) ++{ ++ unsigned i, r, rstripes; ++ struct stripe_cache *sc = &rs->sc; ++ struct stripe *stripe; ++ struct recover *rec = &rs->recover; ++ struct mapped_device *md; ++ struct gendisk *disk; ++ ++ ++ /* Initialize lists and locks. */ ++ i = ARRAY_SIZE(sc->lists); ++ while (i--) ++ INIT_LIST_HEAD(sc->lists + i); ++ ++ INIT_LIST_HEAD(&rec->stripes); ++ ++ /* Initialize endio and LRU list locks. */ ++ i = NR_LOCKS; ++ while (i--) ++ spin_lock_init(sc->locks + i); ++ ++ /* Initialize atomic variables. */ ++ atomic_set(&sc->stripes, 0); ++ atomic_set(&sc->stripes_to_set, 0); ++ atomic_set(&sc->active_stripes, 0); ++ atomic_set(&sc->active_stripes_max, 0); /* REMOVEME: statistics. */ ++ ++ /* ++ * We need a runtime unique # to suffix the kmem cache name ++ * because we'll have one for each active RAID set. ++ */ ++ md = dm_table_get_md(rs->ti->table); ++ disk = dm_disk(md); ++ snprintf(sc->kc.name, sizeof(sc->kc.name), "%s-%d.%d", TARGET, ++ disk->first_minor, atomic_inc_return(&_stripe_sc_nr)); ++ dm_put(md); ++ sc->kc.cache = kmem_cache_create(sc->kc.name, stripe_size(rs), ++ 0, 0, NULL); ++ if (!sc->kc.cache) ++ return -ENOMEM; ++ ++ /* Create memory cache client context for RAID stripe cache. */ ++ sc->mem_cache_client = ++ dm_mem_cache_client_create(stripes, rs->set.raid_devs, ++ chunk_pages(rs->set.io_size)); ++ if (IS_ERR(sc->mem_cache_client)) ++ return PTR_ERR(sc->mem_cache_client); ++ ++ /* Create memory cache client context for RAID recovery stripe(s). */ ++ rstripes = rec->recovery_stripes; ++ rec->mem_cache_client = ++ dm_mem_cache_client_create(rstripes, rs->set.raid_devs, ++ chunk_pages(rec->io_size)); ++ if (IS_ERR(rec->mem_cache_client)) ++ return PTR_ERR(rec->mem_cache_client); ++ ++ /* Create dm-io client context for IO stripes. */ ++ sc->dm_io_client = ++ dm_io_client_create((stripes > 32 ? 32 : stripes) * ++ rs->set.raid_devs * ++ chunk_pages(rs->set.io_size)); ++ if (IS_ERR(sc->dm_io_client)) ++ return PTR_ERR(sc->dm_io_client); ++ ++ /* FIXME: intermingeled with stripe cache initialization. */ ++ /* Create dm-io client context for recovery stripes. */ ++ rec->dm_io_client = ++ dm_io_client_create(rstripes * rs->set.raid_devs * ++ chunk_pages(rec->io_size)); ++ if (IS_ERR(rec->dm_io_client)) ++ return PTR_ERR(rec->dm_io_client); ++ ++ /* Allocate stripes for set recovery. */ ++ while (rstripes--) { ++ stripe = stripe_alloc(sc, rec->mem_cache_client, SC_KEEP); ++ if (!stripe) ++ return -ENOMEM; ++ ++ stripe->recover = kzalloc(sizeof(*stripe->recover), GFP_KERNEL); ++ if (!stripe->recover) { ++ stripe_free(stripe, rec->mem_cache_client); ++ return -ENOMEM; ++ } ++ ++ SetStripeRecover(stripe); ++ stripe->io.size = rec->io_size; ++ list_add_tail(stripe->lists + LIST_RECOVER, &rec->stripes); ++ /* Don't add recovery stripes to LRU list! */ ++ } ++ ++ /* ++ * Allocate the stripe objetcs from the ++ * cache and add them to the LRU list. ++ */ ++ r = sc_grow(sc, stripes, SC_KEEP); ++ if (!r) ++ atomic_set(&sc->stripes_last, stripes); ++ ++ return r; ++} ++ ++/* Destroy the stripe cache. */ ++static void sc_exit(struct stripe_cache *sc) ++{ ++ struct raid_set *rs = RS(sc); ++ ++ if (sc->kc.cache) { ++ stripe_recover_free(rs); ++ BUG_ON(sc_shrink(sc, atomic_read(&sc->stripes))); ++ kmem_cache_destroy(sc->kc.cache); ++ sc->kc.cache = NULL; ++ ++ if (sc->mem_cache_client && !IS_ERR(sc->mem_cache_client)) ++ dm_mem_cache_client_destroy(sc->mem_cache_client); ++ ++ if (sc->dm_io_client && !IS_ERR(sc->dm_io_client)) ++ dm_io_client_destroy(sc->dm_io_client); ++ ++ hash_exit(&sc->hash); ++ } ++} ++ ++/* ++ * Calculate RAID address ++ * ++ * Delivers tuple with the index of the data disk holding the chunk ++ * in the set, the parity disks index and the start of the stripe ++ * within the address space of the set (used as the stripe cache hash key). ++ */ ++/* thx MD. */ ++static struct raid_address *raid_address(struct raid_set *rs, sector_t sector, ++ struct raid_address *addr) ++{ ++ sector_t stripe, tmp; ++ ++ /* ++ * chunk_number = sector / chunk_size ++ * stripe_number = chunk_number / data_devs ++ * di = stripe % data_devs; ++ */ ++ stripe = sector >> rs->set.chunk_shift; ++ addr->di = sector_div(stripe, rs->set.data_devs); ++ ++ switch (rs->set.raid_type->level) { ++ case raid4: ++ addr->pi = rs->set.pi; ++ goto check_shift_di; ++ case raid5: ++ tmp = stripe; ++ addr->pi = sector_div(tmp, rs->set.raid_devs); ++ ++ switch (rs->set.raid_type->algorithm) { ++ case left_asym: /* Left asymmetric. */ ++ addr->pi = rs->set.data_devs - addr->pi; ++ case right_asym: /* Right asymmetric. */ ++check_shift_di: ++ if (addr->di >= addr->pi) ++ addr->di++; ++ break; ++ case left_sym: /* Left symmetric. */ ++ addr->pi = rs->set.data_devs - addr->pi; ++ case right_sym: /* Right symmetric. */ ++ addr->di = (addr->pi + addr->di + 1) % ++ rs->set.raid_devs; ++ break; ++ case none: /* Ain't happen: RAID4 algorithm placeholder. */ ++ BUG(); ++ } ++ } ++ ++ /* ++ * Start offset of the stripes chunk on any single device of the RAID ++ * set, adjusted in case io size differs from chunk size. ++ */ ++ addr->key = (stripe << rs->set.chunk_shift) + ++ (sector & rs->set.io_inv_mask); ++ return addr; ++} ++ ++/* ++ * Copy data across between stripe pages and bio vectors. ++ * ++ * Pay attention to data alignment in stripe and bio pages. ++ */ ++static void bio_copy_page_list(int rw, struct stripe *stripe, ++ struct page_list *pl, struct bio *bio) ++{ ++ unsigned i, page_offset; ++ void *page_addr; ++ struct raid_set *rs = RS(stripe->sc); ++ struct bio_vec *bv; ++ ++ /* Get start page in page list for this sector. */ ++ i = (bio->bi_sector & rs->set.io_mask) / SECTORS_PER_PAGE; ++ pl = pl_elem(pl, i); ++ BUG_ON(!pl); ++ BUG_ON(!pl->page); ++ ++ page_addr = page_address(pl->page); ++ page_offset = to_bytes(bio->bi_sector & (SECTORS_PER_PAGE - 1)); ++ ++ /* Walk all segments and copy data across between bio_vecs and pages. */ ++ bio_for_each_segment(bv, bio, i) { ++ int len = bv->bv_len, size; ++ unsigned bio_offset = 0; ++ void *bio_addr = __bio_kmap_atomic(bio, i, KM_USER0); ++redo: ++ size = (page_offset + len > PAGE_SIZE) ? ++ PAGE_SIZE - page_offset : len; ++ ++ if (rw == READ) ++ memcpy(bio_addr + bio_offset, ++ page_addr + page_offset, size); ++ else ++ memcpy(page_addr + page_offset, ++ bio_addr + bio_offset, size); ++ ++ page_offset += size; ++ if (page_offset == PAGE_SIZE) { ++ /* ++ * We reached the end of the chunk page -> ++ * need to refer to the next one to copy more data. ++ */ ++ len -= size; ++ if (len) { ++ /* Get next page. */ ++ pl = pl->next; ++ BUG_ON(!pl); ++ BUG_ON(!pl->page); ++ page_addr = page_address(pl->page); ++ page_offset = 0; ++ bio_offset += size; ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_BIO_COPY_PL_NEXT); ++ goto redo; ++ } ++ } ++ ++ __bio_kunmap_atomic(bio_addr, KM_USER0); ++ } ++} ++ ++/* ++ * Xor optimization macros. ++ */ ++/* Xor data pointer declaration and initialization macros. */ ++#define DECLARE_2 unsigned long *d0 = data[0], *d1 = data[1] ++#define DECLARE_3 DECLARE_2, *d2 = data[2] ++#define DECLARE_4 DECLARE_3, *d3 = data[3] ++#define DECLARE_5 DECLARE_4, *d4 = data[4] ++#define DECLARE_6 DECLARE_5, *d5 = data[5] ++#define DECLARE_7 DECLARE_6, *d6 = data[6] ++#define DECLARE_8 DECLARE_7, *d7 = data[7] ++ ++/* Xor unrole macros. */ ++#define D2(n) d0[n] = d0[n] ^ d1[n] ++#define D3(n) D2(n) ^ d2[n] ++#define D4(n) D3(n) ^ d3[n] ++#define D5(n) D4(n) ^ d4[n] ++#define D6(n) D5(n) ^ d5[n] ++#define D7(n) D6(n) ^ d6[n] ++#define D8(n) D7(n) ^ d7[n] ++ ++#define X_2(macro, offset) macro(offset); macro(offset + 1); ++#define X_4(macro, offset) X_2(macro, offset); X_2(macro, offset + 2); ++#define X_8(macro, offset) X_4(macro, offset); X_4(macro, offset + 4); ++#define X_16(macro, offset) X_8(macro, offset); X_8(macro, offset + 8); ++#define X_32(macro, offset) X_16(macro, offset); X_16(macro, offset + 16); ++#define X_64(macro, offset) X_32(macro, offset); X_32(macro, offset + 32); ++ ++/* Define a _xor_#chunks_#xors_per_run() function. */ ++#define _XOR(chunks, xors_per_run) \ ++static void _xor ## chunks ## _ ## xors_per_run(unsigned long **data) \ ++{ \ ++ unsigned end = XOR_SIZE / sizeof(data[0]), i; \ ++ DECLARE_ ## chunks; \ ++\ ++ for (i = 0; i < end; i += xors_per_run) { \ ++ X_ ## xors_per_run(D ## chunks, i); \ ++ } \ ++} ++ ++/* Define xor functions for 2 - 8 chunks and xors per run. */ ++#define MAKE_XOR_PER_RUN(xors_per_run) \ ++ _XOR(2, xors_per_run); _XOR(3, xors_per_run); \ ++ _XOR(4, xors_per_run); _XOR(5, xors_per_run); \ ++ _XOR(6, xors_per_run); _XOR(7, xors_per_run); \ ++ _XOR(8, xors_per_run); ++ ++MAKE_XOR_PER_RUN(8) /* Define _xor_*_8() functions. */ ++MAKE_XOR_PER_RUN(16) /* Define _xor_*_16() functions. */ ++MAKE_XOR_PER_RUN(32) /* Define _xor_*_32() functions. */ ++MAKE_XOR_PER_RUN(64) /* Define _xor_*_64() functions. */ ++ ++#define MAKE_XOR(xors_per_run) \ ++struct { \ ++ void (*f)(unsigned long **); \ ++} static xor_funcs ## xors_per_run[] = { \ ++ { NULL }, /* NULL pointers to optimize indexing in xor(). */ \ ++ { NULL }, \ ++ { _xor2_ ## xors_per_run }, \ ++ { _xor3_ ## xors_per_run }, \ ++ { _xor4_ ## xors_per_run }, \ ++ { _xor5_ ## xors_per_run }, \ ++ { _xor6_ ## xors_per_run }, \ ++ { _xor7_ ## xors_per_run }, \ ++ { _xor8_ ## xors_per_run }, \ ++}; \ ++\ ++static void xor_ ## xors_per_run(unsigned n, unsigned long **data) \ ++{ \ ++ /* Call respective function for amount of chunks. */ \ ++ xor_funcs ## xors_per_run[n].f(data); \ ++} ++ ++/* Define xor_8() - xor_64 functions. */ ++MAKE_XOR(8) ++MAKE_XOR(16) ++MAKE_XOR(32) ++MAKE_XOR(64) ++/* ++ * END xor optimization macros. ++ */ ++ ++/* Maximum number of chunks, which can be xor'ed in one go. */ ++#define XOR_CHUNKS_MAX (ARRAY_SIZE(xor_funcs8) - 1) ++ ++/* xor_blocks wrapper to allow for using that crypto library function. */ ++static void xor_blocks_wrapper(unsigned n, unsigned long **data) ++{ ++ BUG_ON(n < 2 || n > MAX_XOR_BLOCKS + 1); ++ xor_blocks(n - 1, XOR_SIZE, (void *) data[0], (void **) data + 1); ++} ++ ++struct xor_func { ++ xor_function_t f; ++ const char *name; ++} static xor_funcs[] = { ++ { xor_64, "xor_64" }, ++ { xor_32, "xor_32" }, ++ { xor_16, "xor_16" }, ++ { xor_8, "xor_8" }, ++ { xor_blocks_wrapper, "xor_blocks" }, ++}; ++ ++/* ++ * Check, if chunk has to be xored in/out: ++ * ++ * o if writes are queued ++ * o if writes are merged ++ * o if stripe is to be reconstructed ++ * o if recovery stripe ++ */ ++static inline int chunk_must_xor(struct stripe_chunk *chunk) ++{ ++ if (ChunkUptodate(chunk)) { ++ BUG_ON(!bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED)) && ++ !bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED))); ++ ++ if (!bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED)) || ++ !bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED))) ++ return 1; ++ ++ if (StripeReconstruct(chunk->stripe) || ++ StripeRecover(chunk->stripe)) ++ return 1; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Calculate crc. ++ * ++ * This indexes into the chunks of a stripe and their pages. ++ * ++ * All chunks will be xored into the indexed (@pi) ++ * chunk in maximum groups of xor.chunks. ++ * ++ */ ++static void xor(struct stripe *stripe, unsigned pi, unsigned sector) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ unsigned max_chunks = rs->xor.chunks, n = 1, ++ o = sector / SECTORS_PER_PAGE, /* Offset into the page_list. */ ++ p = rs->set.raid_devs; ++ unsigned long **d = rs->data; ++ xor_function_t xor_f = rs->xor.f->f; ++ ++ BUG_ON(sector > stripe->io.size); ++ ++ /* Address of parity page to xor into. */ ++ d[0] = page_address(pl_elem(PL(stripe, pi), o)->page); ++ ++ while (p--) { ++ /* Preset pointers to data pages. */ ++ if (p != pi && chunk_must_xor(CHUNK(stripe, p))) ++ d[n++] = page_address(pl_elem(PL(stripe, p), o)->page); ++ ++ /* If max chunks -> xor. */ ++ if (n == max_chunks) { ++ mutex_lock(&rs->io.xor_lock); ++ xor_f(n, d); ++ mutex_unlock(&rs->io.xor_lock); ++ n = 1; ++ } ++ } ++ ++ /* If chunks -> xor. */ ++ if (n > 1) { ++ mutex_lock(&rs->io.xor_lock); ++ xor_f(n, d); ++ mutex_unlock(&rs->io.xor_lock); ++ } ++} ++ ++/* Common xor loop through all stripe page lists. */ ++static void common_xor(struct stripe *stripe, sector_t count, ++ unsigned off, unsigned pi) ++{ ++ unsigned sector; ++ ++ BUG_ON(!count); ++ for (sector = off; sector < count; sector += SECTORS_PER_PAGE) ++ xor(stripe, pi, sector); ++ ++ /* Set parity page uptodate and clean. */ ++ chunk_set(CHUNK(stripe, pi), CLEAN); ++ atomic_inc(RS(stripe->sc)->stats + S_XORS); /* REMOVEME: statistics. */ ++} ++ ++/* ++ * Calculate parity sectors on intact stripes. ++ * ++ * Need to calculate raid address for recover stripe, because its ++ * chunk sizes differs and is typically larger than io chunk size. ++ */ ++static void parity_xor(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ int size_differs = stripe->io.size != rs->set.io_size; ++ unsigned chunk_size = rs->set.chunk_size, io_size = stripe->io.size, ++ xor_size = chunk_size > io_size ? io_size : chunk_size; ++ sector_t off; ++ ++ /* This can be the recover stripe with a larger io size. */ ++ for (off = 0; off < io_size; off += xor_size) { ++ /* ++ * Recover stripe is likely bigger than regular io ++ * ones and has no precalculated parity disk index -> ++ * need to calculate RAID address. ++ */ ++ if (unlikely(size_differs)) { ++ struct raid_address addr; ++ ++ raid_address(rs, (stripe->key + off) * ++ rs->set.data_devs, &addr); ++ stripe->idx.parity = addr.pi; ++ stripe_zero_pl_part(stripe, addr.pi, off, xor_size); ++ } ++ ++ common_xor(stripe, xor_size, off, stripe->idx.parity); ++ chunk_set(CHUNK(stripe, stripe->idx.parity), DIRTY); ++ } ++} ++ ++/* Reconstruct missing chunk. */ ++static void stripe_reconstruct(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ int p = rs->set.raid_devs, pr = stripe->idx.recover; ++ ++ BUG_ON(pr < 0); ++ ++ /* Check if all but the chunk to be reconstructed are uptodate. */ ++ while (p--) ++ BUG_ON(p != pr && !ChunkUptodate(CHUNK(stripe, p))); ++ ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + (RSDegraded(rs) ? S_RECONSTRUCT_EI : ++ S_RECONSTRUCT_DEV)); ++ /* Zero chunk to be reconstructed. */ ++ stripe_zero_chunk(stripe, pr); ++ common_xor(stripe, stripe->io.size, 0, pr); ++} ++ ++/* ++ * Recovery io throttling ++ */ ++/* Conditionally reset io counters. */ ++static int recover_io_reset(struct raid_set *rs) ++{ ++ unsigned long j = jiffies; ++ ++ /* Pay attention to jiffies overflows. */ ++ if (j > rs->recover.last_jiffies + HZ || ++ j < rs->recover.last_jiffies) { ++ atomic_set(rs->recover.io_count + IO_WORK, 0); ++ atomic_set(rs->recover.io_count + IO_RECOVER, 0); ++ rs->recover.last_jiffies = j; ++ return 1; ++ } ++ ++ return 0; ++} ++ ++/* Count ios. */ ++static void recover_io_count(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ ++ atomic_inc(rs->recover.io_count + ++ (StripeRecover(stripe) ? IO_RECOVER : IO_WORK)); ++} ++ ++/* Try getting a stripe either from the hash or from the LRU list. */ ++static struct stripe *stripe_find(struct raid_set *rs, ++ struct raid_address *addr) ++{ ++ int r; ++ struct stripe_cache *sc = &rs->sc; ++ struct stripe *stripe; ++ ++ /* Try stripe from hash. */ ++ stripe = stripe_lookup(sc, addr->key); ++ if (stripe) { ++ r = stripe_get(stripe); ++ if (r) ++ goto get_lock_failed; ++ ++ atomic_inc(rs->stats + S_HITS_1ST); /* REMOVEME: statistics. */ ++ } else { ++ /* Not in hash -> try to get an LRU stripe. */ ++ stripe = stripe_lru_pop(sc); ++ if (stripe) { ++ /* ++ * An LRU stripe may not be referenced ++ * and may never have ios pending! ++ */ ++ BUG_ON(stripe_ref(stripe)); ++ BUG_ON(stripe_io_ref(stripe)); ++ ++ /* Remove from hash if on before reuse. */ ++ stripe_hash_del(stripe); ++ ++ /* Invalidate before reinserting with changed key. */ ++ stripe_invalidate(stripe); ++ ++ stripe->key = addr->key; ++ stripe->region = dm_rh_sector_to_region(rs->recover.rh, ++ addr->key); ++ stripe->idx.parity = addr->pi; ++ r = stripe_get(stripe); ++ if (r) ++ goto get_lock_failed; ++ ++ /* Insert stripe into the stripe hash. */ ++ stripe_insert(&sc->hash, stripe); ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_INSCACHE); ++ } ++ } ++ ++ return stripe; ++ ++get_lock_failed: ++ stripe_put(stripe); ++ return NULL; ++} ++ ++/* ++ * Process end io ++ * ++ * I need to do it here because I can't in interrupt ++ */ ++/* End io all bios on a bio list. */ ++static void bio_list_endio(struct stripe *stripe, struct bio_list *bl, ++ int p, int error) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ struct bio *bio; ++ struct page_list *pl = PL(stripe, p); ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ ++ /* Update region counters. */ ++ while ((bio = bio_list_pop(bl))) { ++ if (bio_data_dir(bio) == WRITE) ++ /* Drop io pending count for any writes. */ ++ dm_rh_dec(rs->recover.rh, stripe->region); ++ else if (!error) ++ /* Copy data accross. */ ++ bio_copy_page_list(READ, stripe, pl, bio); ++ ++ bio_endio(bio, error); ++ ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + (bio_data_dir(bio) == READ ? ++ S_BIOS_ENDIO_READ : S_BIOS_ENDIO_WRITE)); ++ ++ chunk_put(chunk); ++ stripe_put(stripe); ++ io_put(rs); /* Wake any suspend waiters on last bio. */ ++ } ++} ++ ++/* ++ * End io all reads/writes on a stripe copying ++ * read data accross from stripe to bios and ++ * decrementing region counters for writes. ++ * ++ * Processing of ios depeding on state: ++ * o no chunk error -> endio ok ++ * o degraded: ++ * - chunk error and read -> ignore to be requeued ++ * - chunk error and write -> endio ok ++ * o dead (more than parity_devs failed) and chunk_error-> endio failed ++ */ ++static void stripe_endio(int rw, struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ unsigned p = rs->set.raid_devs; ++ int write = (rw != READ); ++ ++ while (p--) { ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ struct bio_list *bl; ++ ++ BUG_ON(ChunkLocked(chunk)); ++ ++ bl = BL_CHUNK(chunk, rw); ++ if (bio_list_empty(bl)) ++ continue; ++ ++ if (unlikely(ChunkError(chunk) || !ChunkUptodate(chunk))) { ++ /* RAID set dead. */ ++ if (unlikely(RSDead(rs))) ++ bio_list_endio(stripe, bl, p, -EIO); ++ /* RAID set degraded. */ ++ else if (write) ++ bio_list_endio(stripe, bl, p, 0); ++ } else { ++ BUG_ON(!RSDegraded(rs) && ChunkDirty(chunk)); ++ bio_list_endio(stripe, bl, p, 0); ++ } ++ } ++} ++ ++/* Fail all ios hanging off all bio lists of a stripe. */ ++static void stripe_fail_io(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ unsigned p = rs->set.raid_devs; ++ ++ while (p--) { ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ int i = ARRAY_SIZE(chunk->bl); ++ ++ /* Fail all bios on all bio lists of the stripe. */ ++ while (i--) { ++ struct bio_list *bl = chunk->bl + i; ++ ++ if (!bio_list_empty(bl)) ++ bio_list_endio(stripe, bl, p, -EIO); ++ } ++ } ++ ++ /* Put stripe on LRU list. */ ++ BUG_ON(stripe_io_ref(stripe)); ++ BUG_ON(stripe_ref(stripe)); ++} ++ ++/* Unlock all required chunks. */ ++static void stripe_chunks_unlock(struct stripe *stripe) ++{ ++ unsigned p = RS(stripe->sc)->set.raid_devs; ++ struct stripe_chunk *chunk; ++ ++ while (p--) { ++ chunk = CHUNK(stripe, p); ++ ++ if (TestClearChunkUnlock(chunk)) ++ ClearChunkLocked(chunk); ++ } ++} ++ ++/* ++ * Queue reads and writes to a stripe by hanging ++ * their bios off the stripesets read/write lists. ++ */ ++static int stripe_queue_bio(struct raid_set *rs, struct bio *bio, ++ struct bio_list *reject) ++{ ++ struct raid_address addr; ++ struct stripe *stripe; ++ ++ stripe = stripe_find(rs, raid_address(rs, bio->bi_sector, &addr)); ++ if (stripe) { ++ int r = 0, rw = bio_data_dir(bio); ++ ++ /* Distinguish reads and writes. */ ++ bio_list_add(BL(stripe, addr.di, rw), bio); ++ ++ if (rw == READ) ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_BIOS_ADDED_READ); ++ else { ++ /* Inrement pending write count on region. */ ++ dm_rh_inc(rs->recover.rh, stripe->region); ++ r = 1; ++ ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_BIOS_ADDED_WRITE); ++ } ++ ++ /* ++ * Put on io (flush) list in case of ++ * initial bio queued to chunk. ++ */ ++ if (chunk_get(CHUNK(stripe, addr.di)) == 1) ++ stripe_flush_add(stripe); ++ ++ return r; ++ } ++ ++ /* Got no stripe from cache or failed to lock it -> reject bio. */ ++ bio_list_add(reject, bio); ++ atomic_inc(rs->stats + S_IOS_POST); /* REMOVEME: statistics. */ ++ return 0; ++} ++ ++/* ++ * Handle all stripes by handing them to the daemon, because we can't ++ * map their chunk pages to copy the data in interrupt context. ++ * ++ * We don't want to handle them here either, while interrupts are disabled. ++ */ ++ ++/* Read/write endio function for dm-io (interrupt context). */ ++static void endio(unsigned long error, void *context) ++{ ++ struct stripe_chunk *chunk = context; ++ ++ if (unlikely(error)) { ++ chunk_set(chunk, ERROR); ++ /* REMOVEME: statistics. */ ++ atomic_inc(RS(chunk->stripe->sc)->stats + S_STRIPE_ERROR); ++ } else ++ chunk_set(chunk, CLEAN); ++ ++ /* ++ * For recovery stripes, I need to reset locked locked ++ * here, because those aren't processed in do_endios(). ++ */ ++ if (unlikely(StripeRecover(chunk->stripe))) ++ ClearChunkLocked(chunk); ++ else ++ SetChunkUnlock(chunk); ++ ++ /* Indirectly puts stripe on cache's endio list via stripe_io_put(). */ ++ stripe_put_references(chunk->stripe); ++} ++ ++/* Read/Write a chunk asynchronously. */ ++static void stripe_chunk_rw(struct stripe *stripe, unsigned p) ++{ ++ struct stripe_cache *sc = stripe->sc; ++ struct raid_set *rs = RS(sc); ++ struct dm_mem_cache_object *obj = stripe->obj + p; ++ struct page_list *pl = obj->pl; ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ struct raid_dev *dev = rs->dev + p; ++ struct dm_io_region io = { ++ .bdev = dev->dev->bdev, ++ .sector = stripe->key, ++ .count = stripe->io.size, ++ }; ++ struct dm_io_request control = { ++ .bi_rw = ChunkDirty(chunk) ? WRITE : READ, ++ .mem = { ++ .type = DM_IO_PAGE_LIST, ++ .ptr.pl = pl, ++ .offset = 0, ++ }, ++ .notify = { ++ .fn = endio, ++ .context = chunk, ++ }, ++ .client = StripeRecover(stripe) ? rs->recover.dm_io_client : ++ sc->dm_io_client, ++ }; ++ ++ BUG_ON(ChunkLocked(chunk)); ++ BUG_ON(!ChunkUptodate(chunk) && ChunkDirty(chunk)); ++ BUG_ON(ChunkUptodate(chunk) && !ChunkDirty(chunk)); ++ ++ /* ++ * Don't rw past end of device, which can happen, because ++ * typically sectors_per_dev isn't divisible by io_size. ++ */ ++ if (unlikely(io.sector + io.count > rs->set.sectors_per_dev)) ++ io.count = rs->set.sectors_per_dev - io.sector; ++ ++ BUG_ON(!io.count); ++ io.sector += dev->start; /* Add . */ ++ if (RSRecover(rs)) ++ recover_io_count(stripe); /* Recovery io accounting. */ ++ ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + (ChunkDirty(chunk) ? S_DM_IO_WRITE : ++ S_DM_IO_READ)); ++ SetChunkLocked(chunk); ++ SetDevIoQueued(dev); ++ BUG_ON(dm_io(&control, 1, &io, NULL)); ++} ++ ++/* ++ * Write dirty or read not uptodate page lists of a stripe. ++ */ ++static int stripe_chunks_rw(struct stripe *stripe) ++{ ++ int r; ++ struct raid_set *rs = RS(stripe->sc); ++ ++ /* ++ * Increment the pending count on the stripe ++ * first, so that we don't race in endio(). ++ * ++ * An inc (IO) is needed for any chunk unless !ChunkIo(chunk): ++ * ++ * o not uptodate ++ * o dirtied by writes merged ++ * o dirtied by parity calculations ++ */ ++ r = for_each_io_dev(stripe, stripe_get_references); ++ if (r) { ++ /* Io needed: chunks are either not uptodate or dirty. */ ++ int max; /* REMOVEME: */ ++ struct stripe_cache *sc = &rs->sc; ++ ++ /* Submit actual io. */ ++ for_each_io_dev(stripe, stripe_chunk_rw); ++ ++ /* REMOVEME: statistics */ ++ max = sc_active(sc); ++ if (atomic_read(&sc->active_stripes_max) < max) ++ atomic_set(&sc->active_stripes_max, max); ++ ++ atomic_inc(rs->stats + S_FLUSHS); ++ /* END REMOVEME: statistics */ ++ } ++ ++ return r; ++} ++ ++/* Merge in all writes hence dirtying respective chunks. */ ++static void stripe_merge_writes(struct stripe *stripe) ++{ ++ unsigned p = RS(stripe->sc)->set.raid_devs; ++ ++ while (p--) { ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ struct bio_list *write = BL_CHUNK(chunk, WRITE_QUEUED); ++ ++ if (!bio_list_empty(write)) { ++ struct bio *bio; ++ struct page_list *pl = stripe->obj[p].pl; ++ ++ /* ++ * We can play with the lists without holding a lock, ++ * because it is just us accessing them anyway. ++ */ ++ bio_list_for_each(bio, write) ++ bio_copy_page_list(WRITE, stripe, pl, bio); ++ ++ bio_list_merge(BL_CHUNK(chunk, WRITE_MERGED), write); ++ bio_list_init(write); ++ chunk_set(chunk, DIRTY); ++ } ++ } ++} ++ ++/* Queue all writes to get merged. */ ++static int stripe_queue_writes(struct stripe *stripe) ++{ ++ int r = 0; ++ unsigned p = RS(stripe->sc)->set.raid_devs; ++ ++ while (p--) { ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ struct bio_list *write = BL_CHUNK(chunk, WRITE); ++ ++ if (!bio_list_empty(write)) { ++ bio_list_merge(BL_CHUNK(chunk, WRITE_QUEUED), write); ++ bio_list_init(write); ++SetChunkIo(chunk); ++ r = 1; ++ } ++ } ++ ++ return r; ++} ++ ++ ++/* Check, if a chunk gets completely overwritten. */ ++static int stripe_check_chunk_overwrite(struct stripe *stripe, unsigned p) ++{ ++ unsigned sectors = 0; ++ struct bio *bio; ++ struct bio_list *bl = BL(stripe, p, WRITE_QUEUED); ++ ++ bio_list_for_each(bio, bl) ++ sectors += bio_sectors(bio); ++ ++ BUG_ON(sectors > RS(stripe->sc)->set.io_size); ++ return sectors == RS(stripe->sc)->set.io_size; ++} ++ ++/* ++ * Avoid io on broken/reconstructed drive in order to ++ * reconstruct date on endio. ++ * ++ * (*1*) We set StripeReconstruct() in here, so that _do_endios() ++ * will trigger a reconstruct call before resetting it. ++ */ ++static int stripe_chunk_set_io_flags(struct stripe *stripe, int pr) ++{ ++ struct stripe_chunk *chunk = CHUNK(stripe, pr); ++ ++ /* ++ * Allow io on all chunks but the indexed one, ++ * because we're either degraded or prohibit it ++ * on the one for later reconstruction. ++ */ ++ /* Includes ClearChunkIo(), ClearChunkUptodate(). */ ++ stripe_chunk_invalidate(chunk); ++ stripe->idx.recover = pr; ++ SetStripeReconstruct(stripe); ++ ++ /* REMOVEME: statistics. */ ++ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO); ++ return -EPERM; ++} ++ ++/* Chunk locked/uptodate and device failed tests. */ ++static struct stripe_chunk * ++stripe_chunk_check(struct stripe *stripe, unsigned p, unsigned *chunks_uptodate) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ struct stripe_chunk *chunk = CHUNK(stripe, p); ++ ++ /* Can't access active chunks. */ ++ if (ChunkLocked(chunk)) { ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_CHUNK_LOCKED); ++ return NULL; ++ } ++ ++ /* Can't access broken devive. */ ++ if (ChunkError(chunk) || DevFailed(rs->dev + p)) ++ return NULL; ++ ++ /* Can access uptodate chunks. */ ++ if (ChunkUptodate(chunk)) { ++ (*chunks_uptodate)++; ++ return NULL; ++ } ++ ++ return chunk; ++} ++ ++/* ++ * Degraded/reconstruction mode. ++ * ++ * Check stripe state to figure which chunks don't need IO. ++ * ++ * Returns 0 for fully operational, -EPERM for degraded/resynchronizing. ++ */ ++static int stripe_check_reconstruct(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ ++ if (RSDead(rs)) { ++ ClearStripeReconstruct(stripe); ++ ClearStripeReconstructed(stripe); ++ stripe_allow_io(stripe); ++ return 0; ++ } ++ ++ /* Avoid further reconstruction setting, when already set. */ ++ if (StripeReconstruct(stripe)) { ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_RECONSTRUCT_SET); ++ return -EBUSY; ++ } ++ ++ /* Initially allow io on all chunks. */ ++ stripe_allow_io(stripe); ++ ++ /* Return if stripe is already reconstructed. */ ++ if (StripeReconstructed(stripe)) { ++ atomic_inc(rs->stats + S_RECONSTRUCTED); ++ return 0; ++ } ++ ++ /* ++ * Degraded/reconstruction mode (device failed) -> ++ * avoid io on the failed device. ++ */ ++ if (unlikely(RSDegraded(rs))) { ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_DEGRADED); ++ /* Allow IO on all devices but the dead one. */ ++ BUG_ON(rs->set.ei < 0); ++ return stripe_chunk_set_io_flags(stripe, rs->set.ei); ++ } else { ++ int sync, pi = dev_for_parity(stripe, &sync); ++ ++ /* ++ * Reconstruction mode (ie. a particular (replaced) device or ++ * some (rotating) parity chunk is being resynchronized) -> ++ * o make sure all needed chunks are read in ++ * o cope with 3/4 disk array special case where it ++ * doesn't make a difference to read in parity ++ * to xor data in/out ++ */ ++ if (RSEnforceParityCreation(rs) || !sync) { ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_NOSYNC); ++ /* Allow IO on all devs but the one to reconstruct. */ ++ return stripe_chunk_set_io_flags(stripe, pi); ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * Check, if stripe is ready to merge writes. ++ * I.e. if all chunks present to allow to merge bios. ++ * ++ * We prohibit io on: ++ * ++ * o chunks without bios ++ * o chunks which get completely written over ++ */ ++static int stripe_merge_possible(struct stripe *stripe, int nosync) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ unsigned chunks_overwrite = 0, chunks_prohibited = 0, ++ chunks_uptodate = 0, p = rs->set.raid_devs; ++ ++ /* Walk all chunks. */ ++ while (p--) { ++ struct stripe_chunk *chunk; ++ ++ /* Prohibit io on broken devices. */ ++ if (DevFailed(rs->dev + p)) { ++ chunk = CHUNK(stripe, p); ++ goto prohibit_io; ++ } ++ ++ /* We can't optimize any further if no chunk. */ ++ chunk = stripe_chunk_check(stripe, p, &chunks_uptodate); ++ if (!chunk || nosync) ++ continue; ++ ++ /* ++ * We have a chunk, which is not uptodate. ++ * ++ * If this is not parity and we don't have ++ * reads queued, we can optimize further. ++ */ ++ if (p != stripe->idx.parity && ++ bio_list_empty(BL_CHUNK(chunk, READ)) && ++ bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED))) { ++ if (bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED))) ++ goto prohibit_io; ++ else if (RSCheckOverwrite(rs) && ++ stripe_check_chunk_overwrite(stripe, p)) ++ /* Completely overwritten chunk. */ ++ chunks_overwrite++; ++ } ++ ++ /* Allow io for chunks with bios and overwritten ones. */ ++ SetChunkIo(chunk); ++ continue; ++ ++prohibit_io: ++ /* No io for broken devices or for chunks w/o bios. */ ++ ClearChunkIo(chunk); ++ chunks_prohibited++; ++ /* REMOVEME: statistics. */ ++ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO); ++ } ++ ++ /* All data chunks will get written over. */ ++ if (chunks_overwrite == rs->set.data_devs) ++ atomic_inc(rs->stats + S_OVERWRITE); /* REMOVEME: statistics.*/ ++ else if (chunks_uptodate + chunks_prohibited < rs->set.raid_devs) { ++ /* We don't have enough chunks to merge. */ ++ atomic_inc(rs->stats + S_CANT_MERGE); /* REMOVEME: statistics.*/ ++ return -EPERM; ++ } ++ ++ /* ++ * If we have all chunks up to date or overwrite them, we ++ * just zero the parity chunk and let stripe_rw() recreate it. ++ */ ++ if (chunks_uptodate == rs->set.raid_devs || ++ chunks_overwrite == rs->set.data_devs) { ++ stripe_zero_chunk(stripe, stripe->idx.parity); ++ BUG_ON(StripeReconstruct(stripe)); ++ SetStripeReconstruct(stripe); /* Enforce xor in caller. */ ++ } else { ++ /* ++ * With less chunks, we xor parity out. ++ * ++ * (*4*) We rely on !StripeReconstruct() in chunk_must_xor(), ++ * so that only chunks with queued or merged writes ++ * are being xored. ++ */ ++ parity_xor(stripe); ++ } ++ ++ /* ++ * We do have enough chunks to merge. ++ * All chunks are uptodate or get written over. ++ */ ++ atomic_inc(rs->stats + S_CAN_MERGE); /* REMOVEME: statistics. */ ++ return 0; ++} ++ ++/* ++ * Avoid reading chunks in case we're fully operational. ++ * ++ * We prohibit io on any chunks without bios but the parity chunk. ++ */ ++static void stripe_avoid_reads(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ unsigned dummy = 0, p = rs->set.raid_devs; ++ ++ /* Walk all chunks. */ ++ while (p--) { ++ struct stripe_chunk *chunk = ++ stripe_chunk_check(stripe, p, &dummy); ++ ++ if (!chunk) ++ continue; ++ ++ /* If parity or any bios pending -> allow io. */ ++ if (chunk_ref(chunk) || p == stripe->idx.parity) ++ SetChunkIo(chunk); ++ else { ++ ClearChunkIo(chunk); ++ /* REMOVEME: statistics. */ ++ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO); ++ } ++ } ++} ++ ++/* ++ * Read/write a stripe. ++ * ++ * All stripe read/write activity goes through this function ++ * unless recovery, which has to call stripe_chunk_rw() directly. ++ * ++ * Make sure we don't try already merged stripes in order ++ * to avoid data corruption. ++ * ++ * Check the state of the RAID set and if degraded (or ++ * resynchronizing for reads), read in all other chunks but ++ * the one on the dead/resynchronizing device in order to be ++ * able to reconstruct the missing one in _do_endios(). ++ * ++ * Can be called on active stripes in order ++ * to dispatch new io on inactive chunks. ++ * ++ * States to cover: ++ * o stripe to read and/or write ++ * o stripe with error to reconstruct ++ */ ++static int stripe_rw(struct stripe *stripe) ++{ ++ int nosync, r; ++ struct raid_set *rs = RS(stripe->sc); ++ ++ /* ++ * Check, if a chunk needs to be reconstructed ++ * because of a degraded set or a region out of sync. ++ */ ++ nosync = stripe_check_reconstruct(stripe); ++ switch (nosync) { ++ case -EBUSY: ++ return 0; /* Wait for stripe reconstruction to finish. */ ++ case -EPERM: ++ goto io; ++ } ++ ++ /* ++ * If we don't have merged writes pending, we can schedule ++ * queued writes to be merged next without corrupting data. ++ */ ++ if (!StripeMerged(stripe)) { ++ r = stripe_queue_writes(stripe); ++ if (r) ++ /* Writes got queued -> flag RBW. */ ++ SetStripeRBW(stripe); ++ } ++ ++ /* ++ * Merge all writes hanging off uptodate/overwritten ++ * chunks of the stripe. ++ */ ++ if (StripeRBW(stripe)) { ++ r = stripe_merge_possible(stripe, nosync); ++ if (!r) { /* Merge possible. */ ++ struct stripe_chunk *chunk; ++ ++ /* ++ * I rely on valid parity in order ++ * to xor a fraction of chunks out ++ * of parity and back in. ++ */ ++ stripe_merge_writes(stripe); /* Merge writes in. */ ++ parity_xor(stripe); /* Update parity. */ ++ ClearStripeReconstruct(stripe); /* Reset xor enforce. */ ++ SetStripeMerged(stripe); /* Writes merged. */ ++ ClearStripeRBW(stripe); /* Disable RBW. */ ++ ++ /* ++ * REMOVEME: sanity check on parity chunk ++ * states after writes got merged. ++ */ ++ chunk = CHUNK(stripe, stripe->idx.parity); ++ BUG_ON(ChunkLocked(chunk)); ++ BUG_ON(!ChunkUptodate(chunk)); ++ BUG_ON(!ChunkDirty(chunk)); ++ BUG_ON(!ChunkIo(chunk)); ++ } ++ } else if (!nosync && !StripeMerged(stripe)) ++ /* Read avoidance if not degraded/resynchronizing/merged. */ ++ stripe_avoid_reads(stripe); ++ ++io: ++ /* Now submit any reads/writes for non-uptodate or dirty chunks. */ ++ r = stripe_chunks_rw(stripe); ++ if (!r) { ++ /* ++ * No io submitted because of chunk io ++ * prohibited or locked chunks/failed devices ++ * -> push to end io list for processing. ++ */ ++ stripe_endio_push(stripe); ++ atomic_inc(rs->stats + S_NO_RW); /* REMOVEME: statistics. */ ++ } ++ ++ return r; ++} ++ ++/* ++ * Recovery functions ++ */ ++/* Read a stripe off a raid set for recovery. */ ++static int stripe_recover_read(struct stripe *stripe, int pi) ++{ ++ BUG_ON(stripe_io_ref(stripe)); ++ ++ /* Invalidate all chunks so that they get read in. */ ++ stripe_chunks_invalidate(stripe); ++ stripe_allow_io(stripe); /* Allow io on all recovery chunks. */ ++ ++ /* ++ * If we are reconstructing a perticular device, we can avoid ++ * reading the respective chunk in, because we're going to ++ * reconstruct it anyway. ++ * ++ * We can't do that for resynchronization of rotating parity, ++ * because the recovery stripe chunk size is typically larger ++ * than the sets chunk size. ++ */ ++ if (pi > -1) ++ ClearChunkIo(CHUNK(stripe, pi)); ++ ++ return stripe_chunks_rw(stripe); ++} ++ ++/* Write a stripe to a raid set for recovery. */ ++static int stripe_recover_write(struct stripe *stripe, int pi) ++{ ++ BUG_ON(stripe_io_ref(stripe)); ++ ++ /* ++ * If this is a reconstruct of a particular device, then ++ * reconstruct the respective chunk, else create parity chunk. ++ */ ++ if (pi > -1) { ++ stripe_zero_chunk(stripe, pi); ++ common_xor(stripe, stripe->io.size, 0, pi); ++ chunk_set(CHUNK(stripe, pi), DIRTY); ++ } else ++ parity_xor(stripe); ++ ++ return stripe_chunks_rw(stripe); ++} ++ ++/* Read/write a recovery stripe. */ ++static int stripe_recover_rw(struct stripe *stripe) ++{ ++ int r = 0, sync = 0; ++ ++ /* Read/write flip-flop. */ ++ if (TestClearStripeRBW(stripe)) { ++ SetStripeMerged(stripe); ++ stripe->key = stripe->recover->pos; ++ r = stripe_recover_read(stripe, dev_for_parity(stripe, &sync)); ++ BUG_ON(!r); ++ } else if (TestClearStripeMerged(stripe)) { ++ r = stripe_recover_write(stripe, dev_for_parity(stripe, &sync)); ++ BUG_ON(!r); ++ } ++ ++ BUG_ON(sync); ++ return r; ++} ++ ++/* Recover bandwidth available ?. */ ++static int recover_bandwidth(struct raid_set *rs) ++{ ++ int r, work; ++ ++ /* On reset or when bios delayed -> allow recovery. */ ++ r = recover_io_reset(rs); ++ if (r || RSBandwidth(rs)) ++ goto out; ++ ++ work = atomic_read(rs->recover.io_count + IO_WORK); ++ if (work) { ++ /* Pay attention to larger recover stripe size. */ ++ int recover = atomic_read(rs->recover.io_count + IO_RECOVER) * ++ rs->recover.io_size / rs->set.io_size; ++ ++ /* ++ * Don't use more than given bandwidth ++ * of the work io for recovery. ++ */ ++ if (recover > work / rs->recover.bandwidth_work) { ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_NO_BANDWIDTH); ++ return 0; ++ } ++ } ++ ++out: ++ atomic_inc(rs->stats + S_BANDWIDTH); /* REMOVEME: statistics. */ ++ return 1; ++} ++ ++/* Try to get a region to recover. */ ++static int stripe_recover_get_region(struct stripe *stripe) ++{ ++ struct raid_set *rs = RS(stripe->sc); ++ struct recover *rec = &rs->recover; ++ struct recover_addr *addr = stripe->recover; ++ struct dm_dirty_log *dl = rec->dl; ++ struct dm_rh_client *rh = rec->rh; ++ ++ BUG_ON(!dl); ++ BUG_ON(!rh); ++ ++ /* Return, that we have region first to finish it during suspension. */ ++ if (addr->reg) ++ return 1; ++ ++ if (RSSuspend(rs)) ++ return -EPERM; ++ ++ if (dl->type->get_sync_count(dl) >= rec->nr_regions) ++ return -ENOENT; ++ ++ /* If we don't have enough bandwidth, we don't proceed recovering. */ ++ if (!recover_bandwidth(rs)) ++ return -EAGAIN; ++ ++ /* Start quiescing a region. */ ++ dm_rh_recovery_prepare(rh); ++ addr->reg = dm_rh_recovery_start(rh); ++ if (!addr->reg) ++ return -EAGAIN; ++ ++ addr->pos = dm_rh_region_to_sector(rh, dm_rh_get_region_key(addr->reg)); ++ addr->end = addr->pos + dm_rh_get_region_size(rh); ++ ++ /* ++ * Take one global io reference out for the ++ * whole region, which is going to be released ++ * when the region is completely done with. ++ */ ++ io_get(rs); ++ return 0; ++} ++ ++/* Update region hash state. */ ++enum recover_type { REC_FAILURE = 0, REC_SUCCESS = 1 }; ++static void recover_rh_update(struct stripe *stripe, enum recover_type success) ++{ ++ struct recover_addr *addr = stripe->recover; ++ struct raid_set *rs = RS(stripe->sc); ++ struct recover *rec = &rs->recover; ++ ++ if (!addr->reg) { ++ DMERR("%s- Called w/o region", __func__); ++ return; ++ } ++ ++ dm_rh_recovery_end(addr->reg, success); ++ if (success) ++ rec->nr_regions_recovered++; ++ ++ addr->reg = NULL; ++ ++ /* ++ * Completely done with this region -> ++ * release the 1st io reference. ++ */ ++ io_put(rs); ++} ++ ++/* Set start of recovery state. */ ++static void set_start_recovery(struct raid_set *rs) ++{ ++ /* Initialize recovery. */ ++ rs->recover.start_jiffies = jiffies; ++ rs->recover.end_jiffies = 0; ++} ++ ++/* Set end of recovery state. */ ++static void set_end_recovery(struct raid_set *rs) ++{ ++ ClearRSRecover(rs); ++/* Achtung: nicht mehr zurück setzten -> 'i' belibt in status output und userpace könnte sich darauf verlassen, das es verschiwndet!!!! */ ++ rs->set.dev_to_init = -1; ++ ++ /* Check for jiffies overrun. */ ++ rs->recover.end_jiffies = jiffies; ++ if (rs->recover.end_jiffies < rs->recover.start_jiffies) ++ rs->recover.end_jiffies = ~0; ++} ++ ++/* Handle recovery on one recovery stripe. */ ++static int _do_recovery(struct stripe *stripe) ++{ ++ int r; ++ struct raid_set *rs = RS(stripe->sc); ++ struct recover_addr *addr = stripe->recover; ++ ++ /* If recovery is active -> return. */ ++ if (stripe_io_ref(stripe)) ++ return 1; ++ ++ /* IO error is fatal for recovery -> stop it. */ ++ if (unlikely(StripeError(stripe))) ++ goto err; ++ ++ /* Recovery end required. */ ++ if (unlikely(RSDegraded(rs))) ++ goto err; ++ ++ /* Get a region to recover. */ ++ r = stripe_recover_get_region(stripe); ++ switch (r) { ++ case 0: /* Got a new region: flag initial read before write. */ ++ SetStripeRBW(stripe); ++ case 1: /* Have a region in the works. */ ++ break; ++ case -EAGAIN: ++ /* No bandwidth/quiesced region yet, try later. */ ++ if (!io_ref(rs)) ++ wake_do_raid_delayed(rs, HZ / 4); ++ case -EPERM: ++ /* Suspend. */ ++ return 1; ++ case -ENOENT: /* No more regions to recover. */ ++ schedule_work(&rs->io.ws_do_table_event); ++ return 0; ++ default: ++ BUG(); ++ } ++ ++ /* Read/write a recover stripe. */ ++ r = stripe_recover_rw(stripe); ++ if (r) ++ /* IO initiated. */ ++ return 1; ++ ++ /* Read and write finished-> update recovery position within region. */ ++ addr->pos += stripe->io.size; ++ ++ /* If we're at end of region, update region hash. */ ++ if (addr->pos >= addr->end || ++ addr->pos >= rs->set.sectors_per_dev) ++ recover_rh_update(stripe, REC_SUCCESS); ++ else ++ /* Prepare to read next region segment. */ ++ SetStripeRBW(stripe); ++ ++ /* Schedule myself for another round... */ ++ wake_do_raid(rs); ++ return 1; ++ ++err: ++ /* FIXME: rather try recovering other regions on error? */ ++ rs_check_degrade(stripe); ++ recover_rh_update(stripe, REC_FAILURE); ++ ++ /* Check state of partially recovered array. */ ++ if (RSDegraded(rs) && !RSDead(rs) && ++ rs->set.dev_to_init != -1 && ++ rs->set.ei != rs->set.dev_to_init) { ++ /* Broken drive != drive to recover -> FATAL. */ ++ SetRSDead(rs); ++ DMERR("FATAL: failed device != device to initialize -> " ++ "RAID set broken"); ++ } ++ ++ if (StripeError(stripe) || RSDegraded(rs)) { ++ char buf[BDEVNAME_SIZE]; ++ ++ DMERR("stopping recovery due to " ++ "ERROR on /dev/%s, stripe at offset %llu", ++ bdevname(rs->dev[rs->set.ei].dev->bdev, buf), ++ (unsigned long long) stripe->key); ++ ++ } ++ ++ /* Make sure, that all quiesced regions get released. */ ++ while (addr->reg) { ++ dm_rh_recovery_end(addr->reg, -EIO); ++ addr->reg = dm_rh_recovery_start(rs->recover.rh); ++ } ++ ++ return 0; ++} ++ ++/* Called by main io daemon to recover regions. */ ++static int do_recovery(struct raid_set *rs) ++{ ++ if (RSRecover(rs)) { ++ int r = 0; ++ struct stripe *stripe; ++ ++ list_for_each_entry(stripe, &rs->recover.stripes, ++ lists[LIST_RECOVER]) ++ r += _do_recovery(stripe); ++ ++ if (r) ++ return r; ++ ++ set_end_recovery(rs); ++ stripe_recover_free(rs); ++ } ++ ++ return 0; ++} ++ ++/* ++ * END recovery functions ++ */ ++ ++/* End io process all stripes handed in by endio() callback. */ ++static void _do_endios(struct raid_set *rs, struct stripe *stripe, ++ struct list_head *flush_list) ++{ ++ /* First unlock all required chunks. */ ++ stripe_chunks_unlock(stripe); ++ ++ /* ++ * If an io error on a stripe occured, degrade the RAID set ++ * and try to endio as many bios as possible. If any bios can't ++ * be endio processed, requeue the stripe (stripe_ref() != 0). ++ */ ++ if (TestClearStripeError(stripe)) { ++ /* ++ * FIXME: if read, rewrite the failed chunk after reconstruction ++ * in order to trigger disk bad sector relocation. ++ */ ++ rs_check_degrade(stripe); /* Resets ChunkError(). */ ++ ClearStripeReconstruct(stripe); ++ ClearStripeReconstructed(stripe); ++ ++ /* ++ * FIXME: if write, don't endio writes in flight and don't ++ * allow for new writes until userspace has updated ++ * its metadata. ++ */ ++ } ++ ++ /* Got to reconstruct a missing chunk. */ ++ if (StripeReconstruct(stripe)) { ++ /* ++ * (*2*) We use StripeReconstruct() to allow for ++ * all chunks to be xored into the reconstructed ++ * one (see chunk_must_xor()). ++ */ ++ stripe_reconstruct(stripe); ++ ++ /* ++ * (*3*) Now we reset StripeReconstruct() and flag ++ * StripeReconstructed() to show to stripe_rw(), ++ * that we have reconstructed a missing chunk. ++ */ ++ ClearStripeReconstruct(stripe); ++ SetStripeReconstructed(stripe); ++ ++ /* FIXME: reschedule to be written in case of read. */ ++ /* if (!RSDead && RSDegraded(rs) !StripeRBW(stripe)) { ++ chunk_set(CHUNK(stripe, stripe->idx.recover), DIRTY); ++ stripe_chunks_rw(stripe); ++ } */ ++ ++ stripe->idx.recover = -1; ++ } ++ ++ /* ++ * Now that we eventually got a complete stripe, we ++ * can process the rest of the end ios on reads. ++ */ ++ stripe_endio(READ, stripe); ++ ++ /* End io all merged writes if not prohibited. */ ++ if (!RSProhibitWrites(rs) && StripeMerged(stripe)) { ++ ClearStripeMerged(stripe); ++ stripe_endio(WRITE_MERGED, stripe); ++ } ++ ++ /* If RAID set is dead -> fail any ios to dead drives. */ ++ if (RSDead(rs)) { ++ if (!TestSetRSDeadEndioMessage(rs)) ++ DMERR("RAID set dead: failing ios to dead devices"); ++ ++ stripe_fail_io(stripe); ++ } ++ ++ /* ++ * We have stripe references still, ++ * beacuse of read before writes or IO errors -> ++ * got to put on flush list for processing. ++ */ ++ if (stripe_ref(stripe)) { ++ BUG_ON(!list_empty(stripe->lists + LIST_LRU)); ++ list_add_tail(stripe->lists + LIST_FLUSH, flush_list); ++ atomic_inc(rs->stats + S_REQUEUE); /* REMOVEME: statistics. */ ++ } else ++ stripe_lru_add(stripe); ++} ++ ++/* Pop any endio stripes off of the endio list and belabour them. */ ++static void do_endios(struct raid_set *rs) ++{ ++ struct stripe_cache *sc = &rs->sc; ++ struct stripe *stripe; ++ /* IO flush list for sorted requeued stripes. */ ++ struct list_head flush_list; ++ ++ INIT_LIST_HEAD(&flush_list); ++ ++ while ((stripe = stripe_endio_pop(sc))) { ++ /* Avoid endio on stripes with newly io'ed chunks. */ ++ if (!stripe_io_ref(stripe)) ++ _do_endios(rs, stripe, &flush_list); ++ } ++ ++ /* ++ * Insert any requeued stripes in the proper ++ * order at the beginning of the io (flush) list. ++ */ ++ list_splice(&flush_list, sc->lists + LIST_FLUSH); ++} ++ ++/* Flush any stripes on the io list. */ ++static int do_flush(struct raid_set *rs) ++{ ++ int r = 0; ++ struct stripe *stripe; ++ ++ while ((stripe = stripe_io_pop(&rs->sc))) ++ r += stripe_rw(stripe); /* Read/write stripe. */ ++ ++ return r; ++} ++ ++/* Stripe cache resizing. */ ++static void do_sc_resize(struct raid_set *rs) ++{ ++ unsigned set = atomic_read(&rs->sc.stripes_to_set); ++ ++ if (set) { ++ unsigned cur = atomic_read(&rs->sc.stripes); ++ int r = (set > cur) ? sc_grow(&rs->sc, set - cur, SC_GROW) : ++ sc_shrink(&rs->sc, cur - set); ++ ++ /* Flag end of resizeing if ok. */ ++ if (!r) ++ atomic_set(&rs->sc.stripes_to_set, 0); ++ } ++} ++ ++/* ++ * Process all ios ++ * ++ * We do different things with the io depending ++ * on the state of the region that it is in: ++ * ++ * o reads: hang off stripe cache or postpone if full ++ * ++ * o writes: ++ * ++ * CLEAN/DIRTY/NOSYNC: increment pending and hang io off stripe's stripe set. ++ * In case stripe cache is full or busy, postpone the io. ++ * ++ * RECOVERING: delay the io until recovery of the region completes. ++ * ++ */ ++static void do_ios(struct raid_set *rs, struct bio_list *ios) ++{ ++ int r; ++ unsigned flush = 0, delay = 0; ++ sector_t sector; ++ struct dm_rh_client *rh = rs->recover.rh; ++ struct bio *bio; ++ struct bio_list reject; ++ ++ bio_list_init(&reject); ++ ++ /* ++ * Classify each io: ++ * o delay writes to recovering regions (let reads go through) ++ * o queue io to all other regions ++ */ ++ while ((bio = bio_list_pop(ios))) { ++ /* ++ * In case we get a barrier bio, push it back onto ++ * the input queue unless all work queues are empty ++ * and the stripe cache is inactive. ++ */ ++ if (unlikely(bio_empty_barrier(bio))) { ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + S_BARRIER); ++ if (delay || ++ !list_empty(rs->sc.lists + LIST_FLUSH) || ++ !bio_list_empty(&reject) || ++ sc_active(&rs->sc)) { ++ bio_list_push(ios, bio); ++ break; ++ } ++ } ++ ++ /* If writes prohibited because of failures -> postpone. */ ++ if (RSProhibitWrites(rs) && bio_data_dir(bio) == WRITE) { ++ bio_list_add(&reject, bio); ++ continue; ++ } ++ ++ /* Check for recovering regions. */ ++ sector = _sector(rs, bio); ++ r = region_state(rs, sector, DM_RH_RECOVERING); ++ if (unlikely(r)) { ++ delay++; ++ /* Wait writing to recovering regions. */ ++ dm_rh_delay_by_region(rh, bio, ++ dm_rh_sector_to_region(rh, ++ sector)); ++ /* REMOVEME: statistics.*/ ++ atomic_inc(rs->stats + S_DELAYED_BIOS); ++ atomic_inc(rs->stats + S_SUM_DELAYED_BIOS); ++ ++ /* Force bandwidth tests in recovery. */ ++ SetRSBandwidth(rs); ++ } else { ++ /* ++ * Process ios to non-recovering regions by queueing ++ * them to stripes (does dm_rh_inc()) for writes). ++ */ ++ flush += stripe_queue_bio(rs, bio, &reject); ++ } ++ } ++ ++ if (flush) { ++ /* FIXME: better error handling. */ ++ r = dm_rh_flush(rh); /* Writes got queued -> flush dirty log. */ ++ if (r) ++ DMERR_LIMIT("dirty log flush"); ++ } ++ ++ /* Merge any rejected bios back to the head of the input list. */ ++ bio_list_merge_head(ios, &reject); ++} ++ ++/* Unplug: let any queued io role on the sets devices. */ ++static void do_unplug(struct raid_set *rs) ++{ ++ struct raid_dev *dev = rs->dev + rs->set.raid_devs; ++ ++ while (dev-- > rs->dev) { ++ /* Only call any device unplug function, if io got queued. */ ++ if (TestClearDevIoQueued(dev)) ++ blk_unplug(bdev_get_queue(dev->dev->bdev)); ++ } ++} ++ ++/* Send an event in case we're getting too busy. */ ++static void do_busy_event(struct raid_set *rs) ++{ ++ if (sc_busy(rs)) { ++ if (!TestSetRSScBusy(rs)) ++ schedule_work(&rs->io.ws_do_table_event); ++ } else ++ ClearRSScBusy(rs); ++} ++ ++/* Throw an event. */ ++static void do_table_event(struct work_struct *ws) ++{ ++ struct raid_set *rs = container_of(ws, struct raid_set, ++ io.ws_do_table_event); ++ dm_table_event(rs->ti->table); ++} ++ ++ ++/*----------------------------------------------------------------- ++ * RAID daemon ++ *---------------------------------------------------------------*/ ++/* ++ * o belabour all end ios ++ * o update the region hash states ++ * o optionally shrink the stripe cache ++ * o optionally do recovery ++ * o unplug any component raid devices with queued bios ++ * o grab the input queue ++ * o work an all requeued or new ios and perform stripe cache flushs ++ * o unplug any component raid devices with queued bios ++ * o check, if the stripe cache gets too busy and throw an event if so ++ */ ++static void do_raid(struct work_struct *ws) ++{ ++ int r; ++ struct raid_set *rs = container_of(ws, struct raid_set, ++ io.dws_do_raid.work); ++ struct bio_list *ios = &rs->io.work, *ios_in = &rs->io.in; ++ ++ /* ++ * We always need to end io, so that ios can get errored in ++ * case the set failed and the region counters get decremented ++ * before we update region hash states and go any further. ++ */ ++ do_endios(rs); ++ dm_rh_update_states(rs->recover.rh, 1); ++ ++ /* ++ * Now that we've end io'd, which may have put stripes on the LRU list ++ * to allow for shrinking, we resize the stripe cache if requested. ++ */ ++ do_sc_resize(rs); ++ ++ /* Try to recover regions. */ ++ r = do_recovery(rs); ++ if (r) ++ do_unplug(rs); /* Unplug the sets device queues. */ ++ ++ /* Quickly grab all new ios queued and add them to the work list. */ ++ mutex_lock(&rs->io.in_lock); ++ bio_list_merge(ios, ios_in); ++ bio_list_init(ios_in); ++ mutex_unlock(&rs->io.in_lock); ++ ++ if (!bio_list_empty(ios)) ++ do_ios(rs, ios); /* Got ios to work into the cache. */ ++ ++ r = do_flush(rs); /* Flush any stripes on io list. */ ++ if (r) ++ do_unplug(rs); /* Unplug the sets device queues. */ ++ ++ do_busy_event(rs); /* Check if we got too busy. */ ++} ++ ++/* ++ * Callback for region hash to dispatch ++ * delayed bios queued to recovered regions ++ * (gets called via dm_rh_update_states()). ++ */ ++static void dispatch_delayed_bios(void *context, struct bio_list *bl) ++{ ++ struct raid_set *rs = context; ++ struct bio *bio; ++ ++ /* REMOVEME: statistics; decrement pending delayed bios counter. */ ++ bio_list_for_each(bio, bl) ++ atomic_dec(rs->stats + S_DELAYED_BIOS); ++ ++ /* Merge region hash private list to work list. */ ++ bio_list_merge_head(&rs->io.work, bl); ++ bio_list_init(bl); ++ ClearRSBandwidth(rs); ++} ++ ++/************************************************************* ++ * Constructor helpers ++ *************************************************************/ ++/* Calculate MB/sec. */ ++static unsigned mbpers(struct raid_set *rs, unsigned io_size) ++{ ++ return to_bytes((rs->xor.speed * rs->set.data_devs * ++ io_size * HZ / XOR_SPEED_TICKS) >> 10) >> 10; ++} ++ ++/* ++ * Discover fastest xor algorithm and # of chunks combination. ++ */ ++/* Calculate speed of particular algorithm and # of chunks. */ ++static unsigned xor_speed(struct stripe *stripe) ++{ ++ int ticks = XOR_SPEED_TICKS; ++ unsigned p = RS(stripe->sc)->set.raid_devs, r = 0; ++ unsigned long j; ++ ++ /* Set uptodate so that common_xor()->xor() will belabour chunks. */ ++ while (p--) ++ SetChunkUptodate(CHUNK(stripe, p)); ++ ++ /* Wait for next tick. */ ++ for (j = jiffies; j == jiffies; ); ++ ++ /* Do xors for a few ticks. */ ++ while (ticks--) { ++ unsigned xors = 0; ++ ++ for (j = jiffies; j == jiffies; ) { ++ mb(); ++ common_xor(stripe, stripe->io.size, 0, 0); ++ mb(); ++ xors++; ++ mb(); ++ } ++ ++ if (xors > r) ++ r = xors; ++ } ++ ++ return r; ++} ++ ++/* Define for xor multi recovery stripe optimization runs. */ ++#define DMRAID45_XOR_TEST ++ ++/* Optimize xor algorithm for this RAID set. */ ++static unsigned xor_optimize(struct raid_set *rs) ++{ ++ unsigned chunks_max = 2, speed_max = 0; ++ struct xor_func *f = ARRAY_END(xor_funcs), *f_max = NULL; ++ struct stripe *stripe; ++ unsigned io_size = 0, speed_hm = 0, speed_min = ~0, speed_xor_blocks = 0; ++ ++ BUG_ON(list_empty(&rs->recover.stripes)); ++#ifndef DMRAID45_XOR_TEST ++ stripe = list_first_entry(&rs->recover.stripes, struct stripe, ++ lists[LIST_RECOVER]); ++#endif ++ ++ /* Try all xor functions. */ ++ while (f-- > xor_funcs) { ++ unsigned speed; ++ ++#ifdef DMRAID45_XOR_TEST ++ list_for_each_entry(stripe, &rs->recover.stripes, ++ lists[LIST_RECOVER]) { ++ io_size = stripe->io.size; ++#endif ++ ++ /* Set actual xor function for common_xor(). */ ++ rs->xor.f = f; ++ rs->xor.chunks = (f->f == xor_blocks_wrapper ? ++ (MAX_XOR_BLOCKS + 1) : ++ XOR_CHUNKS_MAX); ++ if (rs->xor.chunks > rs->set.raid_devs) ++ rs->xor.chunks = rs->set.raid_devs; ++ ++ for ( ; rs->xor.chunks > 1; rs->xor.chunks--) { ++ speed = xor_speed(stripe); ++ ++#ifdef DMRAID45_XOR_TEST ++ if (f->f == xor_blocks_wrapper) { ++ if (speed > speed_xor_blocks) ++ speed_xor_blocks = speed; ++ } else if (speed > speed_hm) ++ speed_hm = speed; ++ ++ if (speed < speed_min) ++ speed_min = speed; ++#endif ++ ++ if (speed > speed_max) { ++ speed_max = speed; ++ chunks_max = rs->xor.chunks; ++ f_max = f; ++ } ++ } ++#ifdef DMRAID45_XOR_TEST ++ } ++#endif ++ } ++ ++ /* Memorize optimal parameters. */ ++ rs->xor.f = f_max; ++ rs->xor.chunks = chunks_max; ++#ifdef DMRAID45_XOR_TEST ++ DMINFO("%s stripes=%u/size=%u min=%u xor_blocks=%u hm=%u max=%u", ++ speed_max == speed_hm ? "HM" : "NB", ++ rs->recover.recovery_stripes, io_size, speed_min, ++ speed_xor_blocks, speed_hm, speed_max); ++#endif ++ return speed_max; ++} ++ ++/* ++ * Allocate a RAID context (a RAID set) ++ */ ++/* Structure for variable RAID parameters. */ ++struct variable_parms { ++ int bandwidth; ++ int bandwidth_parm; ++ int chunk_size; ++ int chunk_size_parm; ++ int io_size; ++ int io_size_parm; ++ int stripes; ++ int stripes_parm; ++ int recover_io_size; ++ int recover_io_size_parm; ++ int raid_parms; ++ int recovery; ++ int recovery_stripes; ++ int recovery_stripes_parm; ++}; ++ ++static struct raid_set * ++context_alloc(struct raid_type *raid_type, struct variable_parms *p, ++ unsigned raid_devs, sector_t sectors_per_dev, ++ struct dm_target *ti, unsigned dl_parms, char **argv) ++{ ++ int r; ++ size_t len; ++ sector_t region_size, ti_len; ++ struct raid_set *rs = NULL; ++ struct dm_dirty_log *dl; ++ struct recover *rec; ++ ++ /* ++ * Create the dirty log ++ * ++ * We need to change length for the dirty log constructor, ++ * because we want an amount of regions for all stripes derived ++ * from the single device size, so that we can keep region ++ * size = 2^^n independant of the number of devices ++ */ ++ ti_len = ti->len; ++ ti->len = sectors_per_dev; ++ dl = dm_dirty_log_create(argv[0], ti, dl_parms, argv + 2); ++ ti->len = ti_len; ++ if (!dl) ++ goto bad_dirty_log; ++ ++ /* Chunk size *must* be smaller than region size. */ ++ region_size = dl->type->get_region_size(dl); ++ if (p->chunk_size > region_size) ++ goto bad_chunk_size; ++ ++ /* Recover io size *must* be smaller than region size as well. */ ++ if (p->recover_io_size > region_size) ++ goto bad_recover_io_size; ++ ++ /* Size and allocate the RAID set structure. */ ++ len = sizeof(*rs->data) + sizeof(*rs->dev); ++ if (dm_array_too_big(sizeof(*rs), len, raid_devs)) ++ goto bad_array; ++ ++ len = sizeof(*rs) + raid_devs * len; ++ rs = kzalloc(len, GFP_KERNEL); ++ if (!rs) ++ goto bad_alloc; ++ ++ rec = &rs->recover; ++ atomic_set(&rs->io.in_process, 0); ++ atomic_set(&rs->io.in_process_max, 0); ++ rec->io_size = p->recover_io_size; ++ ++ /* Pointer to data array. */ ++ rs->data = (unsigned long **) ++ ((void *) rs->dev + raid_devs * sizeof(*rs->dev)); ++ rec->dl = dl; ++ rs->set.raid_devs = raid_devs; ++ rs->set.data_devs = raid_devs - raid_type->parity_devs; ++ rs->set.raid_type = raid_type; ++ ++ rs->set.raid_parms = p->raid_parms; ++ rs->set.chunk_size_parm = p->chunk_size_parm; ++ rs->set.io_size_parm = p->io_size_parm; ++ rs->sc.stripes_parm = p->stripes_parm; ++ rec->io_size_parm = p->recover_io_size_parm; ++ rec->bandwidth_parm = p->bandwidth_parm; ++ rec->recovery = p->recovery; ++ rec->recovery_stripes = p->recovery_stripes; ++ ++ /* ++ * Set chunk and io size and respective shifts ++ * (used to avoid divisions) ++ */ ++ rs->set.chunk_size = p->chunk_size; ++ rs->set.chunk_shift = ffs(p->chunk_size) - 1; ++ ++ rs->set.io_size = p->io_size; ++ rs->set.io_mask = p->io_size - 1; ++ /* Mask to adjust address key in case io_size != chunk_size. */ ++ rs->set.io_inv_mask = (p->chunk_size - 1) & ~rs->set.io_mask; ++ ++ rs->set.sectors_per_dev = sectors_per_dev; ++ ++ rs->set.ei = -1; /* Indicate no failed device. */ ++ atomic_set(&rs->set.failed_devs, 0); ++ ++ rs->ti = ti; ++ ++ atomic_set(rec->io_count + IO_WORK, 0); ++ atomic_set(rec->io_count + IO_RECOVER, 0); ++ ++ /* Initialize io lock and queues. */ ++ mutex_init(&rs->io.in_lock); ++ mutex_init(&rs->io.xor_lock); ++ bio_list_init(&rs->io.in); ++ bio_list_init(&rs->io.work); ++ ++ init_waitqueue_head(&rs->io.suspendq); /* Suspend waiters (dm-io). */ ++ ++ rec->nr_regions = dm_sector_div_up(sectors_per_dev, region_size); ++ rec->rh = dm_region_hash_create(rs, dispatch_delayed_bios, ++ wake_dummy, wake_do_raid, 0, p->recovery_stripes, ++ dl, region_size, rec->nr_regions); ++ if (IS_ERR(rec->rh)) ++ goto bad_rh; ++ ++ /* Initialize stripe cache. */ ++ r = sc_init(rs, p->stripes); ++ if (r) ++ goto bad_sc; ++ ++ /* REMOVEME: statistics. */ ++ stats_reset(rs); ++ ClearRSDevelStats(rs); /* Disnable development status. */ ++ return rs; ++ ++bad_dirty_log: ++ TI_ERR_RET("Error creating dirty log", ERR_PTR(-ENOMEM)); ++ ++bad_chunk_size: ++ dm_dirty_log_destroy(dl); ++ TI_ERR_RET("Chunk size larger than region size", ERR_PTR(-EINVAL)); ++ ++bad_recover_io_size: ++ dm_dirty_log_destroy(dl); ++ TI_ERR_RET("Recover stripe io size larger than region size", ++ ERR_PTR(-EINVAL)); ++ ++bad_array: ++ dm_dirty_log_destroy(dl); ++ TI_ERR_RET("Arry too big", ERR_PTR(-EINVAL)); ++ ++bad_alloc: ++ dm_dirty_log_destroy(dl); ++ TI_ERR_RET("Cannot allocate raid context", ERR_PTR(-ENOMEM)); ++ ++bad_rh: ++ dm_dirty_log_destroy(dl); ++ ti->error = DM_MSG_PREFIX "Error creating dirty region hash"; ++ goto free_rs; ++ ++bad_sc: ++ dm_region_hash_destroy(rec->rh); /* Destroys dirty log too. */ ++ sc_exit(&rs->sc); ++ ti->error = DM_MSG_PREFIX "Error creating stripe cache"; ++free_rs: ++ kfree(rs); ++ return ERR_PTR(-ENOMEM); ++} ++ ++/* Free a RAID context (a RAID set). */ ++static void context_free(struct raid_set *rs, unsigned p) ++{ ++ while (p--) ++ dm_put_device(rs->ti, rs->dev[p].dev); ++ ++ sc_exit(&rs->sc); ++ dm_region_hash_destroy(rs->recover.rh); /* Destroys dirty log too. */ ++ kfree(rs); ++} ++ ++/* Create work queue and initialize delayed work. */ ++static int rs_workqueue_init(struct raid_set *rs) ++{ ++ struct dm_target *ti = rs->ti; ++ ++ rs->io.wq = create_singlethread_workqueue(DAEMON); ++ if (!rs->io.wq) ++ TI_ERR_RET("failed to create " DAEMON, -ENOMEM); ++ ++ INIT_DELAYED_WORK(&rs->io.dws_do_raid, do_raid); ++ INIT_WORK(&rs->io.ws_do_table_event, do_table_event); ++ return 0; ++} ++ ++/* Return pointer to raid_type structure for raid name. */ ++static struct raid_type *get_raid_type(char *name) ++{ ++ struct raid_type *r = ARRAY_END(raid_types); ++ ++ while (r-- > raid_types) { ++ if (!strcmp(r->name, name)) ++ return r; ++ } ++ ++ return NULL; ++} ++ ++/* FIXME: factor out to dm core. */ ++static int multiple(sector_t a, sector_t b, sector_t *n) ++{ ++ sector_t r = a; ++ ++ sector_div(r, b); ++ *n = r; ++ return a == r * b; ++} ++ ++/* Log RAID set information to kernel log. */ ++static void rs_log(struct raid_set *rs, unsigned io_size) ++{ ++ unsigned p; ++ char buf[BDEVNAME_SIZE]; ++ ++ for (p = 0; p < rs->set.raid_devs; p++) ++ DMINFO("/dev/%s is raid disk %u%s", ++ bdevname(rs->dev[p].dev->bdev, buf), p, ++ (p == rs->set.pi) ? " (parity)" : ""); ++ ++ DMINFO("%d/%d/%d sectors chunk/io/recovery size, %u stripes\n" ++ "algorithm \"%s\", %u chunks with %uMB/s\n" ++ "%s set with net %u/%u devices", ++ rs->set.chunk_size, rs->set.io_size, rs->recover.io_size, ++ atomic_read(&rs->sc.stripes), ++ rs->xor.f->name, rs->xor.chunks, mbpers(rs, io_size), ++ rs->set.raid_type->descr, rs->set.data_devs, rs->set.raid_devs); ++} ++ ++/* Get all devices and offsets. */ ++static int dev_parms(struct raid_set *rs, char **argv, int *p) ++{ ++ struct dm_target *ti = rs->ti; ++ ++DMINFO("rs->set.sectors_per_dev=%llu", (unsigned long long) rs->set.sectors_per_dev); ++ for (*p = 0; *p < rs->set.raid_devs; (*p)++, argv += 2) { ++ int r; ++ unsigned long long tmp; ++ struct raid_dev *dev = rs->dev + *p; ++ ++ /* Get offset and device. */ ++ if (sscanf(argv[1], "%llu", &tmp) != 1 || ++ tmp > rs->set.sectors_per_dev) ++ TI_ERR("Invalid RAID device offset parameter"); ++ ++ dev->start = tmp; ++ r = dm_get_device(ti, argv[0], dev->start, ++ rs->set.sectors_per_dev, ++ dm_table_get_mode(ti->table), &dev->dev); ++ if (r) ++ TI_ERR_RET("RAID device lookup failure", r); ++ ++ r = raid_dev_lookup(rs, dev); ++ if (r != -ENODEV && r < *p) { ++ (*p)++; /* Ensure dm_put_device() on actual device. */ ++ TI_ERR_RET("Duplicate RAID device", -ENXIO); ++ } ++ } ++ ++ return 0; ++} ++ ++/* Set recovery bandwidth. */ ++static void ++recover_set_bandwidth(struct raid_set *rs, unsigned bandwidth) ++{ ++ rs->recover.bandwidth = bandwidth; ++ rs->recover.bandwidth_work = 100 / bandwidth; ++} ++ ++/* Handle variable number of RAID parameters. */ ++static int get_raid_variable_parms(struct dm_target *ti, char **argv, ++ struct variable_parms *vp) ++{ ++ int p, value; ++ struct { ++ int action; /* -1: skip, 0: no power2 check, 1: power2 check */ ++ char *errmsg; ++ int min, max; ++ int *var, *var2, *var3; ++ } argctr[] = { ++ { 1, ++ "Invalid chunk size; must be -1 or 2^^n and <= 16384", ++ IO_SIZE_MIN, CHUNK_SIZE_MAX, ++ &vp->chunk_size_parm, &vp->chunk_size, &vp->io_size }, ++ { 0, ++ "Invalid number of stripes: must be -1 or >= 8 and <= 16384", ++ STRIPES_MIN, STRIPES_MAX, ++ &vp->stripes_parm, &vp->stripes, NULL }, ++ { 1, ++ "Invalid io size; must -1 or >= 8, 2^^n and less equal " ++ "min(BIO_MAX_SECTORS/2, chunk size)", ++ IO_SIZE_MIN, 0, /* Needs to be updated in loop below. */ ++ &vp->io_size_parm, &vp->io_size, NULL }, ++ { 1, ++ "Invalid recovery io size; must be -1 or " ++ "2^^n and less equal BIO_MAX_SECTORS/2", ++ RECOVER_IO_SIZE_MIN, BIO_MAX_SECTORS / 2, ++ &vp->recover_io_size_parm, &vp->recover_io_size, NULL }, ++ { 0, ++ "Invalid recovery bandwidth percentage; " ++ "must be -1 or > 0 and <= 100", ++ BANDWIDTH_MIN, BANDWIDTH_MAX, ++ &vp->bandwidth_parm, &vp->bandwidth, NULL }, ++ /* Handle sync argument seperately in loop. */ ++ { -1, ++ "Invalid recovery switch; must be \"sync\" or \"nosync\"" }, ++ { 0, ++ "Invalid number of recovery stripes;" ++ "must be -1, > 0 and <= 64", ++ RECOVERY_STRIPES_MIN, RECOVERY_STRIPES_MAX, ++ &vp->recovery_stripes_parm, &vp->recovery_stripes, NULL }, ++ }, *varp; ++ ++ /* Fetch # of variable raid parameters. */ ++ if (sscanf(*(argv++), "%d", &vp->raid_parms) != 1 || ++ !range_ok(vp->raid_parms, 0, 7)) ++ TI_ERR("Bad variable raid parameters number"); ++ ++ /* Preset variable RAID parameters. */ ++ vp->chunk_size = CHUNK_SIZE_DEFAULT; ++ vp->io_size = IO_SIZE_DEFAULT; ++ vp->stripes = STRIPES_DEFAULT; ++ vp->recover_io_size = RECOVER_IO_SIZE_DEFAULT; ++ vp->bandwidth = BANDWIDTH_DEFAULT; ++ vp->recovery = 1; ++ vp->recovery_stripes = RECOVERY_STRIPES_DEFAULT; ++ ++ /* Walk the array of argument constraints for all given ones. */ ++ for (p = 0, varp = argctr; p < vp->raid_parms; p++, varp++) { ++ BUG_ON(varp >= ARRAY_END(argctr)); ++ ++ /* Special case for "[no]sync" string argument. */ ++ if (varp->action < 0) { ++ if (!strcmp(*argv, "sync")) ++ ; ++ else if (!strcmp(*argv, "nosync")) ++ vp->recovery = 0; ++ else ++ TI_ERR(varp->errmsg); ++ ++ argv++; ++ continue; ++ } ++ ++ /* ++ * Special case for io_size depending ++ * on previously set chunk size. ++ */ ++ if (p == 2) ++ varp->max = min(BIO_MAX_SECTORS / 2, vp->chunk_size); ++ ++ if (sscanf(*(argv++), "%d", &value) != 1 || ++ (value != -1 && ++ ((varp->action && !is_power_of_2(value)) || ++ !range_ok(value, varp->min, varp->max)))) ++ TI_ERR(varp->errmsg); ++ ++ *varp->var = value; ++ if (value != -1) { ++ if (varp->var2) ++ *varp->var2 = value; ++ if (varp->var3) ++ *varp->var3 = value; ++ } ++ } ++ ++ return 0; ++} ++ ++/* Parse optional locking parameters. */ ++static int get_raid_locking_parms(struct dm_target *ti, char **argv, ++ int *locking_parms, ++ struct dm_raid45_locking_type **locking_type) ++{ ++ if (!strnicmp(argv[0], "locking", strlen(argv[0]))) { ++ char *lckstr = argv[1]; ++ size_t lcksz = strlen(lckstr); ++ ++ if (!strnicmp(lckstr, "none", lcksz)) { ++ *locking_type = &locking_none; ++ *locking_parms = 2; ++ } else if (!strnicmp(lckstr, "cluster", lcksz)) { ++ DMERR("locking type \"%s\" not yet implemented", ++ lckstr); ++ return -EINVAL; ++ } else { ++ DMERR("unknown locking type \"%s\"", lckstr); ++ return -EINVAL; ++ } ++ } ++ ++ *locking_parms = 0; ++ *locking_type = &locking_none; ++ return 0; ++} ++ ++/* Set backing device read ahead properties of RAID set. */ ++static void rs_set_read_ahead(struct raid_set *rs, ++ unsigned sectors, unsigned stripes) ++{ ++ unsigned ra_pages = dm_div_up(sectors, SECTORS_PER_PAGE); ++ struct mapped_device *md = dm_table_get_md(rs->ti->table); ++ struct backing_dev_info *bdi = &dm_disk(md)->queue->backing_dev_info; ++ ++ /* Set read-ahead for the RAID set and the component devices. */ ++ if (ra_pages) { ++ unsigned p = rs->set.raid_devs; ++ ++ bdi->ra_pages = stripes * ra_pages * rs->set.data_devs; ++ ++ while (p--) { ++ struct request_queue *q = ++ bdev_get_queue(rs->dev[p].dev->bdev); ++ ++ q->backing_dev_info.ra_pages = ra_pages; ++ } ++ } ++ ++ dm_put(md); ++} ++ ++/* Set congested function. */ ++static void rs_set_congested_fn(struct raid_set *rs) ++{ ++ struct mapped_device *md = dm_table_get_md(rs->ti->table); ++ struct backing_dev_info *bdi = &dm_disk(md)->queue->backing_dev_info; ++ ++ /* Set congested function and data. */ ++ bdi->congested_fn = rs_congested; ++ bdi->congested_data = rs; ++ dm_put(md); ++} ++ ++/* ++ * Construct a RAID4/5 mapping: ++ * ++ * log_type #log_params \ ++ * raid_type [#parity_dev] #raid_variable_params \ ++ * [locking "none"/"cluster"] ++ * #raid_devs #dev_to_initialize [ ]{3,} ++ * ++ * log_type = "core"/"disk", ++ * #log_params = 1-3 (1-2 for core dirty log type, 3 for disk dirty log only) ++ * log_params = [dirty_log_path] region_size [[no]sync]) ++ * ++ * raid_type = "raid4", "raid5_la", "raid5_ra", "raid5_ls", "raid5_rs" ++ * ++ * #parity_dev = N if raid_type = "raid4" ++ * o N = -1: pick default = last device ++ * o N >= 0 and < #raid_devs: parity device index ++ * ++ * #raid_variable_params = 0-7; raid_params (-1 = default): ++ * [chunk_size [#stripes [io_size [recover_io_size \ ++ * [%recovery_bandwidth [recovery_switch [#recovery_stripes]]]]]]] ++ * o chunk_size (unit to calculate drive addresses; must be 2^^n, > 8 ++ * and <= CHUNK_SIZE_MAX) ++ * o #stripes is number of stripes allocated to stripe cache ++ * (must be > 1 and < STRIPES_MAX) ++ * o io_size (io unit size per device in sectors; must be 2^^n and > 8) ++ * o recover_io_size (io unit size per device for recovery in sectors; ++ must be 2^^n, > SECTORS_PER_PAGE and <= region_size) ++ * o %recovery_bandwith is the maximum amount spend for recovery during ++ * application io (1-100%) ++ * o recovery switch = [sync|nosync] ++ * o #recovery_stripes is the number of recovery stripes used for ++ * parallel recovery of the RAID set ++ * If raid_variable_params = 0, defaults will be used. ++ * Any raid_variable_param can be set to -1 to apply a default ++ * ++ * #raid_devs = N (N >= 3) ++ * ++ * #dev_to_initialize = N ++ * -1: initialize parity on all devices ++ * >= 0 and < #raid_devs: initialize raid_path; used to force reconstruction ++ * of a failed devices content after replacement ++ * ++ * = device_path (eg, /dev/sdd1) ++ * = begin at offset on ++ * ++ */ ++#define MIN_PARMS 13 ++static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) ++{ ++ int dev_to_init, dl_parms, i, locking_parms, ++ parity_parm, pi = -1, r, raid_devs; ++ sector_t tmp, sectors_per_dev; ++ struct dm_raid45_locking_type *locking; ++ struct raid_set *rs; ++ struct raid_type *raid_type; ++ struct variable_parms parms; ++ ++ /* Ensure minimum number of parameters. */ ++ if (argc < MIN_PARMS) ++ TI_ERR("Not enough parameters"); ++ ++ /* Fetch # of dirty log parameters. */ ++ if (sscanf(argv[1], "%d", &dl_parms) != 1 || ++ !range_ok(dl_parms, 1, 4711)) /* ;-) */ ++ TI_ERR("Bad dirty log parameters number"); ++ ++ /* Check raid_type. */ ++ raid_type = get_raid_type(argv[dl_parms + 2]); ++ if (!raid_type) ++ TI_ERR("Bad raid type"); ++ ++ /* In case of RAID4, parity drive is selectable. */ ++ parity_parm = !!(raid_type->level == raid4); ++ ++ /* Handle variable number of RAID parameters. */ ++ r = get_raid_variable_parms(ti, argv + dl_parms + parity_parm + 3, ++ &parms); ++ if (r) ++ return r; ++ ++ /* Handle any locking parameters. */ ++ r = get_raid_locking_parms(ti, ++ argv + dl_parms + parity_parm + ++ parms.raid_parms + 4, ++ &locking_parms, &locking); ++ if (r) ++ return r; ++ ++ /* # of raid devices. */ ++ i = dl_parms + parity_parm + parms.raid_parms + locking_parms + 4; ++ if (sscanf(argv[i], "%d", &raid_devs) != 1 || ++ raid_devs < raid_type->minimal_devs) ++ TI_ERR("Invalid number of raid devices"); ++ ++ /* In case of RAID4, check parity drive index is in limits. */ ++ if (raid_type->level == raid4) { ++ /* Fetch index of parity device. */ ++ if (sscanf(argv[dl_parms + 3], "%d", &pi) != 1 || ++ (pi != -1 && !range_ok(pi, 0, raid_devs - 1))) ++ TI_ERR("Invalid RAID4 parity device index"); ++ } ++ ++ /* ++ * Index of device to initialize starts at 0 ++ * ++ * o -1 -> don't initialize a selected device; ++ * initialize parity conforming to algorithm ++ * o 0..raid_devs-1 -> initialize respective device ++ * (used for reconstruction of a replaced device) ++ */ ++ if (sscanf(argv[dl_parms + parity_parm + parms.raid_parms + ++ locking_parms + 5], "%d", &dev_to_init) != 1 || ++ !range_ok(dev_to_init, -1, raid_devs - 1)) ++ TI_ERR("Invalid number for raid device to initialize"); ++ ++ /* Check # of raid device arguments. */ ++ if (argc - dl_parms - parity_parm - parms.raid_parms - 6 != ++ 2 * raid_devs) ++ TI_ERR("Wrong number of raid device/offset arguments"); ++ ++ /* ++ * Check that the table length is devisable ++ * w/o rest by (raid_devs - parity_devs) ++ */ ++ if (!multiple(ti->len, raid_devs - raid_type->parity_devs, ++ §ors_per_dev)) ++ TI_ERR("Target length not divisible by number of data devices"); ++ ++ /* ++ * Check that the device size is ++ * devisable w/o rest by chunk size ++ */ ++ if (!multiple(sectors_per_dev, parms.chunk_size, &tmp)) ++ TI_ERR("Device length not divisible by chunk_size"); ++ ++ /**************************************************************** ++ * Now that we checked the constructor arguments -> ++ * let's allocate the RAID set ++ ****************************************************************/ ++ rs = context_alloc(raid_type, &parms, raid_devs, sectors_per_dev, ++ ti, dl_parms, argv); ++ if (IS_ERR(rs)) ++ return PTR_ERR(rs); ++ ++ ++ rs->set.dev_to_init = rs->set.dev_to_init_parm = dev_to_init; ++ rs->set.pi = rs->set.pi_parm = pi; ++ ++ /* Set RAID4 parity drive index. */ ++ if (raid_type->level == raid4) ++ rs->set.pi = (pi == -1) ? rs->set.data_devs : pi; ++ ++ recover_set_bandwidth(rs, parms.bandwidth); ++ ++ /* Use locking type to lock stripe access. */ ++ rs->locking = locking; ++ ++ /* Get the device/offset tupels. */ ++ argv += dl_parms + 6 + parity_parm + parms.raid_parms; ++ r = dev_parms(rs, argv, &i); ++ if (r) ++ goto err; ++ ++ /* Set backing device information (eg. read ahead). */ ++ rs_set_read_ahead(rs, 2 * rs->set.chunk_size /* sectors per device */, ++ 2 /* # of stripes */); ++ rs_set_congested_fn(rs); /* Set congested function. */ ++ SetRSCheckOverwrite(rs); /* Allow chunk overwrite checks. */ ++ rs->xor.speed = xor_optimize(rs); /* Select best xor algorithm. */ ++ ++ /* Set for recovery of any nosync regions. */ ++ if (parms.recovery) ++ SetRSRecover(rs); ++ else { ++ /* ++ * Need to free recovery stripe(s) here in case ++ * of nosync, because xor_optimize uses one. ++ */ ++ set_start_recovery(rs); ++ set_end_recovery(rs); ++ stripe_recover_free(rs); ++ } ++ ++ /* ++ * Enable parity chunk creation enformcement for ++ * little numbers of array members where it doesn'ti ++ * gain us performance to xor parity out and back in as ++ * with larger array member numbers. ++ */ ++ if (rs->set.raid_devs <= rs->set.raid_type->minimal_devs + 1) ++ SetRSEnforceParityCreation(rs); ++ ++ /* ++ * Make sure that dm core only hands maximum io size ++ * length down and pays attention to io boundaries. ++ */ ++ ti->split_io = rs->set.io_size; ++ ti->private = rs; ++ ++ /* Initialize work queue to handle this RAID set's io. */ ++ r = rs_workqueue_init(rs); ++ if (r) ++ goto err; ++ ++ rs_log(rs, rs->recover.io_size); /* Log information about RAID set. */ ++ return 0; ++ ++err: ++ context_free(rs, i); ++ return r; ++} ++ ++/* ++ * Destruct a raid mapping ++ */ ++static void raid_dtr(struct dm_target *ti) ++{ ++ struct raid_set *rs = ti->private; ++ ++ destroy_workqueue(rs->io.wq); ++ context_free(rs, rs->set.raid_devs); ++} ++ ++/* Raid mapping function. */ ++static int raid_map(struct dm_target *ti, struct bio *bio, ++ union map_info *map_context) ++{ ++ /* I don't want to waste stripe cache capacity. */ ++ if (bio_rw(bio) == READA) ++ return -EIO; ++ else { ++ struct raid_set *rs = ti->private; ++ ++ /* ++ * Get io reference to be waiting for to drop ++ * to zero on device suspension/destruction. ++ */ ++ io_get(rs); ++ bio->bi_sector -= ti->begin; /* Remap sector. */ ++ ++ /* Queue io to RAID set. */ ++ mutex_lock(&rs->io.in_lock); ++ bio_list_add(&rs->io.in, bio); ++ mutex_unlock(&rs->io.in_lock); ++ ++ /* Wake daemon to process input list. */ ++ wake_do_raid(rs); ++ ++ /* REMOVEME: statistics. */ ++ atomic_inc(rs->stats + (bio_data_dir(bio) == READ ? ++ S_BIOS_READ : S_BIOS_WRITE)); ++ return DM_MAPIO_SUBMITTED; /* Handle later. */ ++ } ++} ++ ++/* Device suspend. */ ++static void raid_presuspend(struct dm_target *ti) ++{ ++ struct raid_set *rs = ti->private; ++ struct dm_dirty_log *dl = rs->recover.dl; ++ ++ SetRSSuspend(rs); ++ ++ if (RSRecover(rs)) ++ dm_rh_stop_recovery(rs->recover.rh); ++ ++ cancel_delayed_work(&rs->io.dws_do_raid); ++ flush_workqueue(rs->io.wq); ++ wait_ios(rs); /* Wait for completion of all ios being processed. */ ++ ++ if (dl->type->presuspend && dl->type->presuspend(dl)) ++ /* FIXME: need better error handling. */ ++ DMWARN("log presuspend failed"); ++} ++ ++static void raid_postsuspend(struct dm_target *ti) ++{ ++ struct raid_set *rs = ti->private; ++ struct dm_dirty_log *dl = rs->recover.dl; ++ ++ if (dl->type->postsuspend && dl->type->postsuspend(dl)) ++ /* FIXME: need better error handling. */ ++ DMWARN("log postsuspend failed"); ++ ++} ++ ++/* Device resume. */ ++static void raid_resume(struct dm_target *ti) ++{ ++ struct raid_set *rs = ti->private; ++ struct recover *rec = &rs->recover; ++ struct dm_dirty_log *dl = rec->dl; ++ ++DMINFO("%s...", __func__); ++ if (dl->type->resume && dl->type->resume(dl)) ++ /* Resume dirty log. */ ++ /* FIXME: need better error handling. */ ++ DMWARN("log resume failed"); ++ ++ rec->nr_regions_to_recover = ++ rec->nr_regions - dl->type->get_sync_count(dl); ++ ++ /* Restart any unfinished recovery. */ ++ if (RSRecover(rs)) { ++ set_start_recovery(rs); ++ dm_rh_start_recovery(rec->rh); ++ } ++ ++ ClearRSSuspend(rs); ++} ++ ++/* Return stripe cache size. */ ++static unsigned sc_size(struct raid_set *rs) ++{ ++ return to_sector(atomic_read(&rs->sc.stripes) * ++ (sizeof(struct stripe) + ++ (sizeof(struct stripe_chunk) + ++ (sizeof(struct page_list) + ++ to_bytes(rs->set.io_size) * ++ rs->set.raid_devs)) + ++ (rs->recover.end_jiffies ? ++ 0 : rs->recover.recovery_stripes * ++ to_bytes(rs->set.raid_devs * rs->recover.io_size)))); ++} ++ ++/* REMOVEME: status output for development. */ ++static void raid_devel_stats(struct dm_target *ti, char *result, ++ unsigned *size, unsigned maxlen) ++{ ++ unsigned sz = *size; ++ unsigned long j; ++ char buf[BDEVNAME_SIZE], *p; ++ struct stats_map *sm; ++ struct raid_set *rs = ti->private; ++ struct recover *rec = &rs->recover; ++ struct timespec ts; ++ ++ DMEMIT("%s %s=%u bw=%u\n", ++ version, rs->xor.f->name, rs->xor.chunks, rs->recover.bandwidth); ++ DMEMIT("act_ios=%d ", io_ref(rs)); ++ DMEMIT("act_ios_max=%d\n", atomic_read(&rs->io.in_process_max)); ++ DMEMIT("act_stripes=%d ", sc_active(&rs->sc)); ++ DMEMIT("act_stripes_max=%d\n", ++ atomic_read(&rs->sc.active_stripes_max)); ++ ++ for (sm = stats_map; sm < ARRAY_END(stats_map); sm++) ++ DMEMIT("%s%d", sm->str, atomic_read(rs->stats + sm->type)); ++ ++ DMEMIT(" checkovr=%s\n", RSCheckOverwrite(rs) ? "on" : "off"); ++ DMEMIT("sc=%u/%u/%u/%u/%u/%u/%u\n", rs->set.chunk_size, ++ atomic_read(&rs->sc.stripes), rs->set.io_size, ++ rec->recovery_stripes, rec->io_size, rs->sc.hash.buckets, ++ sc_size(rs)); ++ ++ j = (rec->end_jiffies ? rec->end_jiffies : jiffies) - ++ rec->start_jiffies; ++ jiffies_to_timespec(j, &ts); ++ sprintf(buf, "%ld.%ld", ts.tv_sec, ts.tv_nsec); ++ p = strchr(buf, '.'); ++ p[3] = 0; ++ ++ DMEMIT("rg=%llu/%llu/%llu/%u %s\n", ++ (unsigned long long) rec->nr_regions_recovered, ++ (unsigned long long) rec->nr_regions_to_recover, ++ (unsigned long long) rec->nr_regions, rec->bandwidth, buf); ++ ++ *size = sz; ++} ++ ++static int raid_status(struct dm_target *ti, status_type_t type, ++ char *result, unsigned maxlen) ++{ ++ unsigned p, sz = 0; ++ char buf[BDEVNAME_SIZE]; ++ struct raid_set *rs = ti->private; ++ struct dm_dirty_log *dl = rs->recover.dl; ++ int raid_parms[] = { ++ rs->set.chunk_size_parm, ++ rs->sc.stripes_parm, ++ rs->set.io_size_parm, ++ rs->recover.io_size_parm, ++ rs->recover.bandwidth_parm, ++ -2, ++ rs->recover.recovery_stripes, ++ }; ++ ++ switch (type) { ++ case STATUSTYPE_INFO: ++ /* REMOVEME: statistics. */ ++ if (RSDevelStats(rs)) ++ raid_devel_stats(ti, result, &sz, maxlen); ++ ++ DMEMIT("%u ", rs->set.raid_devs); ++ ++ for (p = 0; p < rs->set.raid_devs; p++) ++ DMEMIT("%s ", ++ format_dev_t(buf, rs->dev[p].dev->bdev->bd_dev)); ++ ++ DMEMIT("2 "); ++ for (p = 0; p < rs->set.raid_devs; p++) { ++ DMEMIT("%c", !DevFailed(rs->dev + p) ? 'A' : 'D'); ++ ++ if (p == rs->set.pi) ++ DMEMIT("p"); ++ ++ if (p == rs->set.dev_to_init) ++ DMEMIT("i"); ++ } ++ ++ DMEMIT(" %llu/%llu ", ++ (unsigned long long) dl->type->get_sync_count(dl), ++ (unsigned long long) rs->recover.nr_regions); ++ ++ sz += dl->type->status(dl, type, result+sz, maxlen-sz); ++ break; ++ case STATUSTYPE_TABLE: ++ sz = rs->recover.dl->type->status(rs->recover.dl, type, ++ result, maxlen); ++ DMEMIT("%s %u ", rs->set.raid_type->name, rs->set.raid_parms); ++ ++ for (p = 0; p < rs->set.raid_parms; p++) { ++ if (raid_parms[p] > -2) ++ DMEMIT("%d ", raid_parms[p]); ++ else ++ DMEMIT("%s ", rs->recover.recovery ? ++ "sync" : "nosync"); ++ } ++ ++ DMEMIT("%u %d ", rs->set.raid_devs, rs->set.dev_to_init); ++ ++ for (p = 0; p < rs->set.raid_devs; p++) ++ DMEMIT("%s %llu ", ++ format_dev_t(buf, rs->dev[p].dev->bdev->bd_dev), ++ (unsigned long long) rs->dev[p].start); ++ } ++ ++ return 0; ++} ++ ++/* ++ * Message interface ++ */ ++/* Turn a delta into an absolute value. */ ++static int _absolute(char *action, int act, int r) ++{ ++ size_t len = strlen(action); ++ ++ if (len < 2) ++ len = 2; ++ ++ /* Make delta absolute. */ ++ if (!strncmp("set", action, len)) ++ ; ++ else if (!strncmp("grow", action, len)) ++ r += act; ++ else if (!strncmp("shrink", action, len)) ++ r = act - r; ++ else ++ r = -EINVAL; ++ ++ return r; ++} ++ ++ /* Change recovery io bandwidth. */ ++static int bandwidth_change(struct raid_set *rs, int argc, char **argv, ++ enum raid_set_flags flag) ++{ ++ int act = rs->recover.bandwidth, bandwidth; ++ ++ if (argc != 2) ++ return -EINVAL; ++ ++ if (sscanf(argv[1], "%d", &bandwidth) == 1 && ++ range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) { ++ /* Make delta bandwidth absolute. */ ++ bandwidth = _absolute(argv[0], act, bandwidth); ++ ++ /* Check range. */ ++ if (range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) { ++ recover_set_bandwidth(rs, bandwidth); ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++/* Set/reset development feature flags. */ ++static int devel_flags(struct raid_set *rs, int argc, char **argv, ++ enum raid_set_flags flag) ++{ ++ size_t len; ++ ++ if (argc != 1) ++ return -EINVAL; ++ ++ len = strlen(argv[0]); ++ if (len < 2) ++ len = 2; ++ ++ if (!strncmp(argv[0], "on", len)) ++ return test_and_set_bit(flag, &rs->io.flags) ? -EPERM : 0; ++ else if (!strncmp(argv[0], "off", len)) ++ return test_and_clear_bit(flag, &rs->io.flags) ? 0 : -EPERM; ++ else if (!strncmp(argv[0], "reset", len)) { ++ if (flag == RS_DEVEL_STATS) { ++ if (test_bit(flag, &rs->io.flags)) { ++ stats_reset(rs); ++ return 0; ++ } else ++ return -EPERM; ++ } else { ++ set_bit(flag, &rs->io.flags); ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++/* Resize the stripe cache. */ ++static int sc_resize(struct raid_set *rs, int argc, char **argv, ++ enum raid_set_flags flag) ++{ ++ int act, stripes; ++ ++ if (argc != 2) ++ return -EINVAL; ++ ++ /* Deny permission in case the daemon is still resizing!. */ ++ if (atomic_read(&rs->sc.stripes_to_set)) ++ return -EPERM; ++ ++ if (sscanf(argv[1], "%d", &stripes) == 1 && ++ stripes > 0) { ++ act = atomic_read(&rs->sc.stripes); ++ ++ /* Make delta stripes absolute. */ ++ stripes = _absolute(argv[0], act, stripes); ++ ++ /* ++ * Check range and that the # of stripes changes. ++ * We leave the resizing to the wroker. ++ */ ++ if (range_ok(stripes, STRIPES_MIN, STRIPES_MAX) && ++ stripes != atomic_read(&rs->sc.stripes)) { ++ atomic_set(&rs->sc.stripes_to_set, stripes); ++ wake_do_raid(rs); ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++/* Change xor algorithm and number of chunks. */ ++static int xor_set(struct raid_set *rs, int argc, char **argv, ++ enum raid_set_flags flag) ++{ ++ if (argc == 2) { ++ int chunks; ++ char *algorithm = argv[0]; ++ struct xor_func *f = ARRAY_END(xor_funcs); ++ ++ if (sscanf(argv[1], "%d", &chunks) == 1 && ++ range_ok(chunks, 2, XOR_CHUNKS_MAX) && ++ chunks <= rs->set.raid_devs) { ++ while (f-- > xor_funcs) { ++ if (!strcmp(algorithm, f->name)) { ++ unsigned io_size = 0; ++ struct stripe *stripe = stripe_alloc(&rs->sc, rs->sc.mem_cache_client, SC_GROW); ++ ++ DMINFO("xor: %s", f->name); ++ if (f->f == xor_blocks_wrapper && ++ chunks > MAX_XOR_BLOCKS + 1) { ++ DMERR("chunks > MAX_XOR_BLOCKS" ++ " + 1"); ++ break; ++ } ++ ++ mutex_lock(&rs->io.xor_lock); ++ rs->xor.f = f; ++ rs->xor.chunks = chunks; ++ rs->xor.speed = 0; ++ mutex_unlock(&rs->io.xor_lock); ++ ++ if (stripe) { ++ rs->xor.speed = xor_speed(stripe); ++ io_size = stripe->io.size; ++ stripe_free(stripe, rs->sc.mem_cache_client); ++ } ++ ++ rs_log(rs, io_size); ++ return 0; ++ } ++ } ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++/* ++ * Allow writes after they got prohibited because of a device failure. ++ * ++ * This needs to be called after userspace updated metadata state ++ * based on an event being thrown during device failure processing. ++ */ ++static int allow_writes(struct raid_set *rs, int argc, char **argv, ++ enum raid_set_flags flag) ++{ ++ if (TestClearRSProhibitWrites(rs)) { ++DMINFO("%s waking", __func__); ++ wake_do_raid(rs); ++ return 0; ++ } ++ ++ return -EPERM; ++} ++ ++/* Parse the RAID message. */ ++/* ++ * 'all[ow_writes]' ++ * 'ba[ndwidth] {se[t],g[row],sh[rink]} #' # e.g 'ba se 50' ++ * "o[verwrite] {on,of[f],r[eset]}' # e.g. 'o of' ++ * 'sta[tistics] {on,of[f],r[eset]}' # e.g. 'stat of' ++ * 'str[ipecache] {se[t],g[row],sh[rink]} #' # e.g. 'stripe set 1024' ++ * 'xor algorithm #chunks' # e.g. 'xor xor_8 5' ++ * ++ */ ++static int raid_message(struct dm_target *ti, unsigned argc, char **argv) ++{ ++ if (argc) { ++ size_t len = strlen(argv[0]); ++ struct raid_set *rs = ti->private; ++ struct { ++ const char *name; ++ int (*f) (struct raid_set *rs, int argc, char **argv, ++ enum raid_set_flags flag); ++ enum raid_set_flags flag; ++ } msg_descr[] = { ++ { "allow_writes", allow_writes, 0 }, ++ { "bandwidth", bandwidth_change, 0 }, ++ { "overwrite", devel_flags, RS_CHECK_OVERWRITE }, ++ { "statistics", devel_flags, RS_DEVEL_STATS }, ++ { "stripe_cache", sc_resize, 0 }, ++ { "xor", xor_set, 0 }, ++ }, *m = ARRAY_END(msg_descr); ++ ++ if (len < 3) ++ len = 3; ++ ++ while (m-- > msg_descr) { ++ if (!strncmp(argv[0], m->name, len)) ++ return m->f(rs, argc - 1, argv + 1, m->flag); ++ } ++ ++ } ++ ++ return -EINVAL; ++} ++/* ++ * END message interface ++ */ ++ ++/* Provide io hints. */ ++static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) ++{ ++ struct raid_set *rs = ti->private; ++ ++ blk_limits_io_min(limits, rs->set.chunk_size); ++ blk_limits_io_opt(limits, rs->set.chunk_size * rs->set.data_devs); ++} ++ ++static struct target_type raid_target = { ++ .name = "raid45", ++ .version = {1, 0, 0}, ++ .module = THIS_MODULE, ++ .ctr = raid_ctr, ++ .dtr = raid_dtr, ++ .map = raid_map, ++ .presuspend = raid_presuspend, ++ .postsuspend = raid_postsuspend, ++ .resume = raid_resume, ++ .status = raid_status, ++ .message = raid_message, ++ .io_hints = raid_io_hints, ++}; ++ ++static void init_exit(const char *bad_msg, const char *good_msg, int r) ++{ ++ if (r) ++ DMERR("Failed to %sregister target [%d]", bad_msg, r); ++ else ++ DMINFO("%s %s", good_msg, version); ++} ++ ++static int __init dm_raid_init(void) ++{ ++ int r = dm_register_target(&raid_target); ++ ++ init_exit("", "initialized", r); ++ return r; ++} ++ ++static void __exit dm_raid_exit(void) ++{ ++ dm_unregister_target(&raid_target); ++ init_exit("un", "exit", 0); ++} ++ ++/* Module hooks. */ ++module_init(dm_raid_init); ++module_exit(dm_raid_exit); ++ ++MODULE_DESCRIPTION(DM_NAME " raid4/5 target"); ++MODULE_AUTHOR("Heinz Mauelshagen "); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("dm-raid4"); ++MODULE_ALIAS("dm-raid5"); +--- /dev/null ++++ b/drivers/md/dm-raid45.h +@@ -0,0 +1,30 @@ ++/* ++ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. ++ * ++ * Module Author: Heinz Mauelshagen (Mauelshagen@RedHat.com) ++ * ++ * Locking definitions for the device-mapper RAID45 target. ++ * ++ * This file is released under the GPL. ++ * ++ */ ++ ++#ifndef _DM_RAID45_H ++#define _DM_RAID45_H ++ ++/* Factor out to dm.h! */ ++#define STR_LEN(ptr, str) (ptr), (str), strlen((ptr)) ++/* Reference to array end. */ ++#define ARRAY_END(a) ((a) + ARRAY_SIZE(a)) ++ ++enum dm_lock_type { DM_RAID45_EX, DM_RAID45_SHARED }; ++ ++struct dm_raid45_locking_type { ++ /* Request a lock on a stripe. */ ++ void* (*lock)(sector_t key, enum dm_lock_type type); ++ ++ /* Release a lock on a stripe. */ ++ void (*unlock)(void *lock_handle); ++}; ++ ++#endif +--- a/drivers/md/dm-region-hash.c ++++ b/drivers/md/dm-region-hash.c +@@ -113,10 +113,11 @@ struct dm_region { + /* + * Conversion fns + */ +-static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) ++region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) + { + return sector >> rh->region_shift; + } ++EXPORT_SYMBOL_GPL(dm_rh_sector_to_region); + + sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) + { +@@ -496,7 +497,7 @@ void dm_rh_update_states(struct dm_regio + } + EXPORT_SYMBOL_GPL(dm_rh_update_states); + +-static void rh_inc(struct dm_region_hash *rh, region_t region) ++void dm_rh_inc(struct dm_region_hash *rh, region_t region) + { + struct dm_region *reg; + +@@ -518,6 +519,7 @@ static void rh_inc(struct dm_region_hash + + read_unlock(&rh->hash_lock); + } ++EXPORT_SYMBOL_GPL(dm_rh_inc); + + void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) + { +@@ -526,7 +528,7 @@ void dm_rh_inc_pending(struct dm_region_ + for (bio = bios->head; bio; bio = bio->bi_next) { + if (bio->bi_rw & REQ_FLUSH) + continue; +- rh_inc(rh, dm_rh_bio_to_region(rh, bio)); ++ dm_rh_inc(rh, dm_rh_bio_to_region(rh, bio)); + } + } + EXPORT_SYMBOL_GPL(dm_rh_inc_pending); +@@ -694,6 +696,19 @@ void dm_rh_delay(struct dm_region_hash * + } + EXPORT_SYMBOL_GPL(dm_rh_delay); + ++void dm_rh_delay_by_region(struct dm_region_hash *rh, ++ struct bio *bio, region_t region) ++{ ++ struct dm_region *reg; ++ ++ /* FIXME: locking. */ ++ read_lock(&rh->hash_lock); ++ reg = __rh_find(rh, region); ++ bio_list_add(®->delayed_bios, bio); ++ read_unlock(&rh->hash_lock); ++} ++EXPORT_SYMBOL_GPL(dm_rh_delay_by_region); ++ + void dm_rh_stop_recovery(struct dm_region_hash *rh) + { + int i; +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2606,6 +2606,7 @@ struct gendisk *dm_disk(struct mapped_de + { + return md->disk; + } ++EXPORT_SYMBOL_GPL(dm_disk); + + struct kobject *dm_kobject(struct mapped_device *md) + { +--- a/include/linux/dm-region-hash.h ++++ b/include/linux/dm-region-hash.h +@@ -49,6 +49,7 @@ struct dm_dirty_log *dm_rh_dirty_log(str + */ + region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); + sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); ++region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector); + void *dm_rh_region_context(struct dm_region *reg); + + /* +@@ -72,11 +73,14 @@ void dm_rh_update_states(struct dm_regio + int dm_rh_flush(struct dm_region_hash *rh); + + /* Inc/dec pending count on regions. */ ++void dm_rh_inc(struct dm_region_hash *rh, region_t region); + void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); + void dm_rh_dec(struct dm_region_hash *rh, region_t region); + + /* Delay bios on regions. */ + void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); ++void dm_rh_delay_by_region(struct dm_region_hash *rh, struct bio *bio, ++ region_t region); + + void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); + diff --git a/patches.suse/dm-raid45-api-update-no-barriers b/patches.suse/dm-raid45-api-update-no-barriers new file mode 100644 index 0000000..92874bb --- /dev/null +++ b/patches.suse/dm-raid45-api-update-no-barriers @@ -0,0 +1,34 @@ +From: Jeff Mahoney +Subject: dm-raid45: api update after removal of barriers +Patch-mainline: Whenever dmraid45 is + + This patch updates dm-raid45 to deal with the removal of the + barrier interface. + +Signed-off-by: Jeff Mahoney +--- + drivers/md/dm-raid45.c | 13 ++++--------- + 1 file changed, 4 insertions(+), 9 deletions(-) + +--- a/drivers/md/dm-raid45.c ++++ b/drivers/md/dm-raid45.c +@@ -196,10 +195,6 @@ enum chunk_flags { + CHUNK_UPTODATE, /* Chunk pages are uptodate. */ + }; + +-#if READ != 0 || WRITE != 1 +-#error dm-raid45: READ/WRITE != 0/1 used as index!!! +-#endif +- + enum bl_type { + WRITE_QUEUED = WRITE + 1, + WRITE_MERGED, +@@ -3276,7 +3271,7 @@ static void do_ios(struct raid_set *rs, + * the input queue unless all work queues are empty + * and the stripe cache is inactive. + */ +- if (unlikely(bio_empty_barrier(bio))) { ++ if (bio->bi_rw & REQ_FLUSH) { + /* REMOVEME: statistics. */ + atomic_inc(rs->stats + S_BARRIER); + if (delay || diff --git a/patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md b/patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md new file mode 100644 index 0000000..c9d9cac --- /dev/null +++ b/patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md @@ -0,0 +1,45 @@ +From: Jeff Mahoney +Subject: [PATCH] dm-raid45: API update: Remove dm_put after dm_table_get_md +References: bnc#615656 +Patch-mainline: depends on dm-raid45 being upstream + + Commit ecdb2e257abc33ae6798d3ccba87bdafa40ef6b6, for 2.6.34, removed + the dm_get() call from dm_table_get_md(). The dm-raid45 code still has + matching dm_put() calls for the dm_table_get_md() calls. This patch removes + the dm_put() calls as it's causing too many reference drops and BUG_ONs. + +Signed-off-by: Jeff Mahoney +--- + drivers/md/dm-raid45.c | 4 ---- + 1 file changed, 4 deletions(-) + + drivers/md/dm-raid45.c | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/drivers/md/dm-raid45.c ++++ b/drivers/md/dm-raid45.c +@@ -1567,7 +1567,6 @@ static int sc_init(struct raid_set *rs, + disk = dm_disk(md); + snprintf(sc->kc.name, sizeof(sc->kc.name), "%s-%d.%d", TARGET, + disk->first_minor, atomic_inc_return(&_stripe_sc_nr)); +- dm_put(md); + sc->kc.cache = kmem_cache_create(sc->kc.name, stripe_size(rs), + 0, 0, NULL); + if (!sc->kc.cache) +@@ -3981,8 +3980,6 @@ static void rs_set_read_ahead(struct rai + q->backing_dev_info.ra_pages = ra_pages; + } + } +- +- dm_put(md); + } + + /* Set congested function. */ +@@ -3994,7 +3991,6 @@ static void rs_set_congested_fn(struct r + /* Set congested function and data. */ + bdi->congested_fn = rs_congested; + bdi->congested_data = rs; +- dm_put(md); + } + + /* diff --git a/patches.suse/dm-raid45_2.6.27_20081027.patch b/patches.suse/dm-raid45_2.6.27_20081027.patch deleted file mode 100644 index f6a7e8c..0000000 --- a/patches.suse/dm-raid45_2.6.27_20081027.patch +++ /dev/null @@ -1,5606 +0,0 @@ -From: "Heinz Mauelshagen -Subject: DMRAID45 module -X-URL: http://people.redhat.com/~heinzm/sw/dm/dm-raid45/ -Patch-mainline: not yet - - DM-RAID 45 module. - - This driver is used for "Fake RAID" devices. - -Acked-by: Jeff Mahoney - ---- - - drivers/md/Kconfig | 15 - drivers/md/Makefile | 4 - drivers/md/dm-memcache.c | 301 ++ - drivers/md/dm-memcache.h | 68 - drivers/md/dm-message.c | 182 + - drivers/md/dm-message.h | 91 - drivers/md/dm-raid45.c | 4523 +++++++++++++++++++++++++++++++++++++++++ - drivers/md/dm-raid45.h | 28 - drivers/md/dm-region-hash.c | 108 - drivers/md/dm.c | 1 - include/linux/dm-region-hash.h | 109 - 11 files changed, 5314 insertions(+), 116 deletions(-) - ---- a/drivers/md/Kconfig -+++ b/drivers/md/Kconfig -@@ -120,7 +120,6 @@ config MD_RAID10 - - config MD_RAID456 - tristate "RAID-4/RAID-5/RAID-6 mode" -- depends on BLK_DEV_MD - select MD_RAID6_PQ - select ASYNC_MEMCPY - select ASYNC_XOR -@@ -249,9 +248,14 @@ config DM_SNAPSHOT - ---help--- - Allow volume managers to take writable snapshots of a device. - -+config DM_RAID -+ tristate -+ depends on BLK_DEV_DM -+ - config DM_MIRROR - tristate "Mirror target" - depends on BLK_DEV_DM -+ select DM_RAID - ---help--- - Allow volume managers to mirror logical volumes, also - needed for live data migration tools such as 'pvmove'. -@@ -313,6 +317,15 @@ config DM_DELAY - - If unsure, say N. - -+config DM_RAID45 -+ tristate "RAID 4/5 target (EXPERIMENTAL)" -+ depends on DM_RAID -+ depends on BLK_DEV_DM && EXPERIMENTAL -+ ---help--- -+ A target that supports RAID4 and RAID5 mappings. -+ -+ If unsure, say N. -+ - config DM_UEVENT - bool "DM uevents (EXPERIMENTAL)" - depends on BLK_DEV_DM && EXPERIMENTAL ---- a/drivers/md/Makefile -+++ b/drivers/md/Makefile -@@ -41,7 +41,9 @@ obj-$(CONFIG_DM_MULTIPATH) += dm-multipa - obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o - obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o - obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o --obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o -+obj-$(CONFIG_DM_RAID) += dm-region-hash.o dm-log.o -+obj-$(CONFIG_DM_MIRROR) += dm-mirror.o -+obj-$(CONFIG_DM_RAID45) += dm-raid45.o dm-memcache.o dm-message.o - obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o - obj-$(CONFIG_DM_ZERO) += dm-zero.o - ---- /dev/null -+++ b/drivers/md/dm-memcache.c -@@ -0,0 +1,302 @@ -+/* -+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * Device-mapper memory object handling: -+ * -+ * o allocate/free total_pages in a per client page pool. -+ * -+ * o allocate/free memory objects with chunks (1..n) of -+ * pages_per_chunk pages hanging off. -+ * -+ * This file is released under the GPL. -+ */ -+ -+#define DM_MEM_CACHE_VERSION "0.2" -+ -+#include "dm.h" -+#include "dm-memcache.h" -+#include -+#include -+ -+struct dm_mem_cache_client { -+ spinlock_t lock; -+ mempool_t *objs_pool; -+ struct page_list *free_list; -+ unsigned objects; -+ unsigned chunks; -+ unsigned pages_per_chunk; -+ unsigned free_pages; -+ unsigned total_pages; -+}; -+ -+/* -+ * Free pages and page_list elements of client. -+ */ -+static void free_cache_pages(struct page_list *list) -+{ -+ while (list) { -+ struct page_list *pl = list; -+ -+ list = pl->next; -+ BUG_ON(!pl->page); -+ __free_page(pl->page); -+ kfree(pl); -+ } -+} -+ -+/* -+ * Alloc number of pages and page_list elements as required by client. -+ */ -+static struct page_list *alloc_cache_pages(unsigned pages) -+{ -+ struct page_list *pl, *ret = NULL; -+ struct page *page; -+ -+ while (pages--) { -+ page = alloc_page(GFP_NOIO); -+ if (!page) -+ goto err; -+ -+ pl = kmalloc(sizeof(*pl), GFP_NOIO); -+ if (!pl) { -+ __free_page(page); -+ goto err; -+ } -+ -+ pl->page = page; -+ pl->next = ret; -+ ret = pl; -+ } -+ -+ return ret; -+ -+err: -+ free_cache_pages(ret); -+ return NULL; -+} -+ -+/* -+ * Allocate page_list elements from the pool to chunks of the memory object. -+ */ -+static void alloc_chunks(struct dm_mem_cache_client *cl, -+ struct dm_mem_cache_object *obj) -+{ -+ unsigned chunks = cl->chunks; -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ local_irq_disable(); -+ while (chunks--) { -+ unsigned p = cl->pages_per_chunk; -+ -+ obj[chunks].pl = NULL; -+ -+ while (p--) { -+ struct page_list *pl; -+ -+ /* Take next element from free list */ -+ spin_lock(&cl->lock); -+ pl = cl->free_list; -+ BUG_ON(!pl); -+ cl->free_list = pl->next; -+ spin_unlock(&cl->lock); -+ -+ pl->next = obj[chunks].pl; -+ obj[chunks].pl = pl; -+ } -+ } -+ -+ local_irq_restore(flags); -+} -+ -+/* -+ * Free page_list elements putting them back onto free list -+ */ -+static void free_chunks(struct dm_mem_cache_client *cl, -+ struct dm_mem_cache_object *obj) -+{ -+ unsigned chunks = cl->chunks; -+ unsigned long flags; -+ struct page_list *next, *pl; -+ -+ local_irq_save(flags); -+ local_irq_disable(); -+ while (chunks--) { -+ for (pl = obj[chunks].pl; pl; pl = next) { -+ next = pl->next; -+ -+ spin_lock(&cl->lock); -+ pl->next = cl->free_list; -+ cl->free_list = pl; -+ cl->free_pages++; -+ spin_unlock(&cl->lock); -+ } -+ } -+ -+ local_irq_restore(flags); -+} -+ -+/* -+ * Create/destroy dm memory cache client resources. -+ */ -+struct dm_mem_cache_client * -+dm_mem_cache_client_create(unsigned objects, unsigned chunks, -+ unsigned pages_per_chunk) -+{ -+ unsigned total_pages = objects * chunks * pages_per_chunk; -+ struct dm_mem_cache_client *client; -+ -+ BUG_ON(!total_pages); -+ client = kzalloc(sizeof(*client), GFP_KERNEL); -+ if (!client) -+ return ERR_PTR(-ENOMEM); -+ -+ client->objs_pool = mempool_create_kmalloc_pool(objects, -+ chunks * sizeof(struct dm_mem_cache_object)); -+ if (!client->objs_pool) -+ goto err; -+ -+ client->free_list = alloc_cache_pages(total_pages); -+ if (!client->free_list) -+ goto err1; -+ -+ spin_lock_init(&client->lock); -+ client->objects = objects; -+ client->chunks = chunks; -+ client->pages_per_chunk = pages_per_chunk; -+ client->free_pages = client->total_pages = total_pages; -+ return client; -+ -+err1: -+ mempool_destroy(client->objs_pool); -+err: -+ kfree(client); -+ return ERR_PTR(-ENOMEM); -+} -+EXPORT_SYMBOL(dm_mem_cache_client_create); -+ -+void dm_mem_cache_client_destroy(struct dm_mem_cache_client *cl) -+{ -+ BUG_ON(cl->free_pages != cl->total_pages); -+ free_cache_pages(cl->free_list); -+ mempool_destroy(cl->objs_pool); -+ kfree(cl); -+} -+EXPORT_SYMBOL(dm_mem_cache_client_destroy); -+ -+/* -+ * Grow a clients cache by an amount of pages. -+ * -+ * Don't call from interrupt context! -+ */ -+int dm_mem_cache_grow(struct dm_mem_cache_client *cl, unsigned objects) -+{ -+ unsigned pages = objects * cl->chunks * cl->pages_per_chunk; -+ struct page_list *pl, *last; -+ -+ BUG_ON(!pages); -+ pl = alloc_cache_pages(pages); -+ if (!pl) -+ return -ENOMEM; -+ -+ last = pl; -+ while (last->next) -+ last = last->next; -+ -+ spin_lock_irq(&cl->lock); -+ last->next = cl->free_list; -+ cl->free_list = pl; -+ cl->free_pages += pages; -+ cl->total_pages += pages; -+ cl->objects++; -+ spin_unlock_irq(&cl->lock); -+ -+ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO); -+ return 0; -+} -+EXPORT_SYMBOL(dm_mem_cache_grow); -+ -+/* Shrink a clients cache by an amount of pages */ -+int dm_mem_cache_shrink(struct dm_mem_cache_client *cl, unsigned objects) -+{ -+ int r; -+ unsigned pages = objects * cl->chunks * cl->pages_per_chunk, p = pages; -+ unsigned long flags; -+ struct page_list *last = NULL, *pl, *pos; -+ -+ BUG_ON(!pages); -+ -+ spin_lock_irqsave(&cl->lock, flags); -+ pl = pos = cl->free_list; -+ while (p-- && pos->next) { -+ last = pos; -+ pos = pos->next; -+ } -+ -+ if (++p) -+ r = -ENOMEM; -+ else { -+ r = 0; -+ cl->free_list = pos; -+ cl->free_pages -= pages; -+ cl->total_pages -= pages; -+ cl->objects--; -+ last->next = NULL; -+ } -+ spin_unlock_irqrestore(&cl->lock, flags); -+ -+ if (!r) { -+ free_cache_pages(pl); -+ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO); -+ } -+ -+ return r; -+} -+EXPORT_SYMBOL(dm_mem_cache_shrink); -+ -+/* -+ * Allocate/free a memory object -+ * -+ * Can be called from interrupt context -+ */ -+struct dm_mem_cache_object *dm_mem_cache_alloc(struct dm_mem_cache_client *cl) -+{ -+ int r = 0; -+ unsigned pages = cl->chunks * cl->pages_per_chunk; -+ unsigned long flags; -+ struct dm_mem_cache_object *obj; -+ -+ obj = mempool_alloc(cl->objs_pool, GFP_NOIO); -+ if (!obj) -+ return ERR_PTR(-ENOMEM); -+ -+ spin_lock_irqsave(&cl->lock, flags); -+ if (pages > cl->free_pages) -+ r = -ENOMEM; -+ else -+ cl->free_pages -= pages; -+ spin_unlock_irqrestore(&cl->lock, flags); -+ -+ if (r) { -+ mempool_free(obj, cl->objs_pool); -+ return ERR_PTR(r); -+ } -+ -+ alloc_chunks(cl, obj); -+ return obj; -+} -+EXPORT_SYMBOL(dm_mem_cache_alloc); -+ -+void dm_mem_cache_free(struct dm_mem_cache_client *cl, -+ struct dm_mem_cache_object *obj) -+{ -+ free_chunks(cl, obj); -+ mempool_free(obj, cl->objs_pool); -+} -+EXPORT_SYMBOL(dm_mem_cache_free); -+ -+MODULE_DESCRIPTION(DM_NAME " dm memory cache"); -+MODULE_AUTHOR("Heinz Mauelshagen "); -+MODULE_LICENSE("GPL"); ---- /dev/null -+++ b/drivers/md/dm-memcache.h -@@ -0,0 +1,68 @@ -+/* -+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * Device-mapper memory object handling: -+ * -+ * o allocate/free total_pages in a per client page pool. -+ * -+ * o allocate/free memory objects with chunks (1..n) of -+ * pages_per_chunk pages hanging off. -+ * -+ * This file is released under the GPL. -+ */ -+ -+#ifndef _DM_MEM_CACHE_H -+#define _DM_MEM_CACHE_H -+ -+#define DM_MEM_CACHE_H_VERSION "0.1" -+ -+#include "dm.h" -+#include -+ -+static inline struct page_list *pl_elem(struct page_list *pl, unsigned p) -+{ -+ while (pl && p--) -+ pl = pl->next; -+ -+ return pl; -+} -+ -+struct dm_mem_cache_object { -+ struct page_list *pl; /* Dynamically allocated array */ -+ void *private; /* Caller context reference */ -+}; -+ -+struct dm_mem_cache_client; -+ -+/* -+ * Create/destroy dm memory cache client resources. -+ * -+ * On creation, a number of @objects with @chunks of -+ * @pages_per_chunk pages will be allocated. -+ */ -+struct dm_mem_cache_client * -+dm_mem_cache_client_create(unsigned objects, unsigned chunks, -+ unsigned pages_per_chunk); -+void dm_mem_cache_client_destroy(struct dm_mem_cache_client *client); -+ -+/* -+ * Grow/shrink a dm memory cache client resources -+ * by @objetcs amount of objects. -+ */ -+int dm_mem_cache_grow(struct dm_mem_cache_client *client, unsigned objects); -+int dm_mem_cache_shrink(struct dm_mem_cache_client *client, unsigned objects); -+ -+/* -+ * Allocate/free a memory object -+ * -+ * On allocation one object with an amount of chunks and -+ * an amount of pages per chunk will be returned on success. -+ */ -+struct dm_mem_cache_object * -+dm_mem_cache_alloc(struct dm_mem_cache_client *client); -+void dm_mem_cache_free(struct dm_mem_cache_client *client, -+ struct dm_mem_cache_object *object); -+ -+#endif ---- /dev/null -+++ b/drivers/md/dm-message.c -@@ -0,0 +1,182 @@ -+/* -+ * Copyright (C) 2007,2008 Red Hat Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * General device-mapper message interface argument parser. -+ * -+ * This file is released under the GPL. -+ * -+ * device-mapper message parser. -+ * -+ */ -+ -+#include "dm.h" -+#include "dm-message.h" -+#include -+ -+#define DM_MSG_PREFIX "dm_message" -+ -+/* Basename of a path. */ -+static inline char * -+basename(char *s) -+{ -+ char *p = strrchr(s, '/'); -+ -+ return p ? p + 1 : s; -+} -+ -+/* Get an argument depending on type. */ -+static void -+message_arguments(struct dm_msg *msg, int argc, char **argv) -+{ -+ -+ if (argc) { -+ int i; -+ struct dm_message_argument *args = msg->spec->args; -+ -+ for (i = 0; i < args->num_args; i++) { -+ int r; -+ unsigned long **ptr = args->ptr; -+ enum dm_message_argument_type type = args->types[i]; -+ -+ switch (type) { -+ case dm_msg_base_t: -+ ((char **) ptr)[i] = basename(argv[i]); -+ break; -+ -+ case dm_msg_str_t: -+ ((char **) ptr)[i] = argv[i]; -+ break; -+ -+ case dm_msg_int_t: -+ r = sscanf(argv[i], "%d", ((int **) ptr)[i]); -+ goto check; -+ -+ case dm_msg_uint_t: -+ r = sscanf(argv[i], "%u", -+ ((unsigned **) ptr)[i]); -+ goto check; -+ -+ case dm_msg_uint64_t: -+ r = sscanf(argv[i], "%llu", -+ ((unsigned long long **) ptr)[i]); -+ -+check: -+ if (r != 1) { -+ set_bit(dm_msg_ret_undef, &msg->ret); -+ set_bit(dm_msg_ret_arg, &msg->ret); -+ } -+ } -+ } -+ } -+} -+ -+/* Parse message options. */ -+static void -+message_options_parse(struct dm_msg *msg, int argc, char **argv) -+{ -+ int hit = 0; -+ unsigned long *action; -+ size_t l1 = strlen(*argv), l_hit = 0; -+ struct dm_message_option *o = msg->spec->options; -+ char **option, **option_end = o->options + o->num_options; -+ -+ for (option = o->options, action = o->actions; -+ option < option_end; option++, action++) { -+ size_t l2 = strlen(*option); -+ -+ if (!strnicmp(*argv, *option, min(l1, l2))) { -+ hit++; -+ l_hit = l2; -+ set_bit(*action, &msg->action); -+ } -+ } -+ -+ /* Assume error. */ -+ msg->ret = 0; -+ set_bit(dm_msg_ret_option, &msg->ret); -+ if (!hit || l1 > l_hit) -+ set_bit(dm_msg_ret_undef, &msg->ret); /* Undefined option. */ -+ else if (hit > 1) -+ set_bit(dm_msg_ret_ambiguous, &msg->ret); /* Ambiguous option.*/ -+ else { -+ clear_bit(dm_msg_ret_option, &msg->ret); /* Option OK. */ -+ message_arguments(msg, --argc, ++argv); -+ } -+} -+ -+static inline void -+print_ret(const char *caller, unsigned long ret) -+{ -+ struct { -+ unsigned long err; -+ const char *err_str; -+ } static err_msg[] = { -+ { dm_msg_ret_ambiguous, "message ambiguous" }, -+ { dm_msg_ret_inval, "message invalid" }, -+ { dm_msg_ret_undef, "message undefined" }, -+ { dm_msg_ret_arg, "message argument" }, -+ { dm_msg_ret_argcount, "message argument count" }, -+ { dm_msg_ret_option, "option" }, -+ }, *e = ARRAY_END(err_msg); -+ -+ while (e-- > err_msg) { -+ if (test_bit(e->err, &ret)) -+ DMERR("%s %s", caller, e->err_str); -+ } -+} -+ -+/* Parse a message action. */ -+int -+dm_message_parse(const char *caller, struct dm_msg *msg, void *context, -+ int argc, char **argv) -+{ -+ int hit = 0; -+ size_t l1 = strlen(*argv), l_hit = 0; -+ struct dm_msg_spec *s, *s_hit = NULL, -+ *s_end = msg->specs + msg->num_specs; -+ -+ if (argc < 2) -+ return -EINVAL; -+ -+ for (s = msg->specs; s < s_end; s++) { -+ size_t l2 = strlen(s->cmd); -+ -+ if (!strnicmp(*argv, s->cmd, min(l1, l2))) { -+ hit++; -+ l_hit = l2; -+ s_hit = s; -+ } -+ } -+ -+ msg->ret = 0; -+ if (!hit || l1 > l_hit) /* No hit or message string too long. */ -+ set_bit(dm_msg_ret_undef, &msg->ret); -+ else if (hit > 1) /* Ambiguous message. */ -+ set_bit(dm_msg_ret_ambiguous, &msg->ret); -+ else if (argc - 2 != s_hit->args->num_args) { -+ set_bit(dm_msg_ret_undef, &msg->ret); -+ set_bit(dm_msg_ret_argcount, &msg->ret); -+ } -+ -+ if (msg->ret) -+ goto bad; -+ -+ msg->action = 0; -+ msg->spec = s_hit; -+ set_bit(s_hit->action, &msg->action); -+ message_options_parse(msg, --argc, ++argv); -+ -+ if (!msg->ret) -+ return msg->spec->f(msg, context); -+ -+bad: -+ print_ret(caller, msg->ret); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dm_message_parse); -+ -+MODULE_DESCRIPTION(DM_NAME " device-mapper target message parser"); -+MODULE_AUTHOR("Heinz Mauelshagen "); -+MODULE_LICENSE("GPL"); ---- /dev/null -+++ b/drivers/md/dm-message.h -@@ -0,0 +1,91 @@ -+/* -+ * Copyright (C) 2007,2008 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * General device-mapper message interface argument parser. -+ * -+ * This file is released under the GPL. -+ * -+ */ -+ -+#ifndef DM_MESSAGE_H -+#define DM_MESSAGE_H -+ -+/* Factor out to dm.h. */ -+/* Reference to array end. */ -+#define ARRAY_END(a) ((a) + ARRAY_SIZE(a)) -+ -+/* Message return bits. */ -+enum dm_message_return { -+ dm_msg_ret_ambiguous, /* Action ambiguous. */ -+ dm_msg_ret_inval, /* Action invalid. */ -+ dm_msg_ret_undef, /* Action undefined. */ -+ -+ dm_msg_ret_option, /* Option error. */ -+ dm_msg_ret_arg, /* Argument error. */ -+ dm_msg_ret_argcount, /* Argument count error. */ -+}; -+ -+/* Message argument type conversions. */ -+enum dm_message_argument_type { -+ dm_msg_base_t, /* Basename string. */ -+ dm_msg_str_t, /* String. */ -+ dm_msg_int_t, /* Signed int. */ -+ dm_msg_uint_t, /* Unsigned int. */ -+ dm_msg_uint64_t, /* Unsigned int 64. */ -+}; -+ -+/* A message option. */ -+struct dm_message_option { -+ unsigned num_options; -+ char **options; -+ unsigned long *actions; -+}; -+ -+/* Message arguments and types. */ -+struct dm_message_argument { -+ unsigned num_args; -+ unsigned long **ptr; -+ enum dm_message_argument_type types[]; -+}; -+ -+/* Client message. */ -+struct dm_msg { -+ unsigned long action; /* Identified action. */ -+ unsigned long ret; /* Return bits. */ -+ unsigned num_specs; /* # of sepcifications listed. */ -+ struct dm_msg_spec *specs; /* Specification list. */ -+ struct dm_msg_spec *spec; /* Specification selected. */ -+}; -+ -+/* Secification of the message. */ -+struct dm_msg_spec { -+ const char *cmd; /* Name of the command (i.e. 'bandwidth'). */ -+ unsigned long action; -+ struct dm_message_option *options; -+ struct dm_message_argument *args; -+ unsigned long parm; /* Parameter to pass through to callback. */ -+ /* Function to process for action. */ -+ int (*f) (struct dm_msg *msg, void *context); -+}; -+ -+/* Parameter access macros. */ -+#define DM_MSG_PARM(msg) ((msg)->spec->parm) -+ -+#define DM_MSG_STR_ARGS(msg, idx) ((char *) *(msg)->spec->args->ptr[idx]) -+#define DM_MSG_INT_ARGS(msg, idx) ((int) *(msg)->spec->args->ptr[idx]) -+#define DM_MSG_UINT_ARGS(msg, idx) ((unsigned) DM_MSG_INT_ARG(msg, idx)) -+#define DM_MSG_UINT64_ARGS(msg, idx) ((uint64_t) *(msg)->spec->args->ptr[idx]) -+ -+#define DM_MSG_STR_ARG(msg) DM_MSG_STR_ARGS(msg, 0) -+#define DM_MSG_INT_ARG(msg) DM_MSG_INT_ARGS(msg, 0) -+#define DM_MSG_UINT_ARG(msg) DM_MSG_UINT_ARGS(msg, 0) -+#define DM_MSG_UINT64_ARG(msg) DM_MSG_UINT64_ARGS(msg, 0) -+ -+ -+/* Parse a message and its options and optionally call a function back. */ -+int dm_message_parse(const char *caller, struct dm_msg *msg, void *context, -+ int argc, char **argv); -+ -+#endif ---- /dev/null -+++ b/drivers/md/dm-raid45.c -@@ -0,0 +1,4524 @@ -+/* -+ * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * This file is released under the GPL. -+ * -+ * -+ * Linux 2.6 Device Mapper RAID4 and RAID5 target. -+ * -+ * Supports: -+ * o RAID4 with dedicated and selectable parity device -+ * o RAID5 with rotating parity (left+right, symmetric+asymmetric) -+ * o run time optimization of xor algorithm used to calculate parity -+ * -+ * -+ * Thanks to MD for: -+ * o the raid address calculation algorithm -+ * o the base of the biovec <-> page list copier. -+ * -+ * -+ * Uses region hash to keep track of how many writes are in flight to -+ * regions in order to use dirty log to keep state of regions to recover: -+ * -+ * o clean regions (those which are synchronized -+ * and don't have write io in flight) -+ * o dirty regions (those with write io in flight) -+ * -+ * -+ * On startup, any dirty regions are migrated to the 'nosync' state -+ * and are subject to recovery by the daemon. -+ * -+ * See raid_ctr() for table definition. -+ * -+ * -+ * FIXME: -+ * o add virtual interface for locking -+ * o remove instrumentation (REMOVEME:) -+ * -+ */ -+ -+static const char *version = "v0.2431"; -+ -+#include "dm.h" -+#include "dm-memcache.h" -+#include "dm-message.h" -+#include "dm-raid45.h" -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+/* # of parallel recovered regions */ -+/* FIXME: cope with multiple recovery stripes in raid_set struct. */ -+#define MAX_RECOVER 1 /* needs to be 1! */ -+ -+/* -+ * Configurable parameters -+ */ -+#define INLINE -+ -+/* Default # of stripes if not set in constructor. */ -+#define STRIPES 64 -+ -+/* Minimum/maximum # of selectable stripes. */ -+#define STRIPES_MIN 8 -+#define STRIPES_MAX 16384 -+ -+/* Default chunk size in sectors if not set in constructor. */ -+#define CHUNK_SIZE 64 -+ -+/* Default io size in sectors if not set in constructor. */ -+#define IO_SIZE_MIN SECTORS_PER_PAGE -+#define IO_SIZE IO_SIZE_MIN -+ -+/* Maximum setable chunk size in sectors. */ -+#define CHUNK_SIZE_MAX 16384 -+ -+/* Recover io size default in sectors. */ -+#define RECOVER_IO_SIZE_MIN 64 -+#define RECOVER_IO_SIZE 256 -+ -+/* Default percentage recover io bandwidth. */ -+#define BANDWIDTH 10 -+#define BANDWIDTH_MIN 1 -+#define BANDWIDTH_MAX 100 -+/* -+ * END Configurable parameters -+ */ -+ -+#define TARGET "dm-raid45" -+#define DAEMON "kraid45d" -+#define DM_MSG_PREFIX TARGET -+ -+#define SECTORS_PER_PAGE (PAGE_SIZE >> SECTOR_SHIFT) -+ -+/* Amount/size for __xor(). */ -+#define SECTORS_PER_XOR SECTORS_PER_PAGE -+#define XOR_SIZE PAGE_SIZE -+ -+/* Derive raid_set from stripe_cache pointer. */ -+#define RS(x) container_of(x, struct raid_set, sc) -+ -+/* Check value in range. */ -+#define range_ok(i, min, max) (i >= min && i <= max) -+ -+/* Page reference. */ -+#define PAGE(stripe, p) ((stripe)->obj[p].pl->page) -+ -+/* Bio list reference. */ -+#define BL(stripe, p, rw) (stripe->ss[p].bl + rw) -+ -+/* Page list reference. */ -+#define PL(stripe, p) (stripe->obj[p].pl) -+ -+/* Check argument is power of 2. */ -+#define POWER_OF_2(a) (!(a & (a - 1))) -+ -+/* Factor out to dm-bio-list.h */ -+static inline void bio_list_push(struct bio_list *bl, struct bio *bio) -+{ -+ bio->bi_next = bl->head; -+ bl->head = bio; -+ -+ if (!bl->tail) -+ bl->tail = bio; -+} -+ -+/* Factor out to dm.h */ -+#define TI_ERR_RET(str, ret) \ -+ do { ti->error = DM_MSG_PREFIX ": " str; return ret; } while (0); -+#define TI_ERR(str) TI_ERR_RET(str, -EINVAL) -+ -+/*----------------------------------------------------------------- -+ * Stripe cache -+ * -+ * Cache for all reads and writes to raid sets (operational or degraded) -+ * -+ * We need to run all data to and from a RAID set through this cache, -+ * because parity chunks need to get calculated from data chunks -+ * or, in the degraded/resynchronization case, missing chunks need -+ * to be reconstructed using the other chunks of the stripe. -+ *---------------------------------------------------------------*/ -+/* Protect kmem cache # counter. */ -+static atomic_t _stripe_sc_nr = ATOMIC_INIT(-1); /* kmem cache # counter. */ -+ -+/* A stripe set (holds bios hanging off). */ -+struct stripe_set { -+ struct stripe *stripe; /* Backpointer to stripe for endio(). */ -+ struct bio_list bl[3]; /* Reads, writes, and writes merged. */ -+#define WRITE_MERGED 2 -+}; -+ -+#if READ != 0 || WRITE != 1 -+#error dm-raid45: READ/WRITE != 0/1 used as index!!! -+#endif -+ -+/* -+ * Stripe linked list indexes. Keep order, because the stripe -+ * and the stripe cache rely on the first 3! -+ */ -+enum list_types { -+ LIST_IO = 0, /* Stripes with io pending. */ -+ LIST_ENDIO, /* Stripes to endio. */ -+ LIST_LRU, /* Least recently used stripes. */ -+ LIST_HASH, /* Hashed stripes. */ -+ LIST_RECOVER = LIST_HASH, /* For recovery type stripes only. */ -+ NR_LISTS, /* To size array in struct stripe. */ -+}; -+ -+enum lock_types { -+ LOCK_ENDIO = 0, /* Protect endio list. */ -+ LOCK_LRU, /* Protect lru list. */ -+ NR_LOCKS, /* To size array in struct stripe_cache. */ -+}; -+ -+/* A stripe: the io object to handle all reads and writes to a RAID set. */ -+struct stripe { -+ struct stripe_cache *sc; /* Backpointer to stripe cache. */ -+ -+ sector_t key; /* Hash key. */ -+ region_t region; /* Region stripe is mapped to. */ -+ -+ /* Reference count. */ -+ atomic_t cnt; -+ -+ struct { -+ unsigned long flags; /* flags (see below). */ -+ -+ /* -+ * Pending ios in flight: -+ * -+ * used as a 'lock' to control move of stripe to endio list -+ */ -+ atomic_t pending; /* Pending ios in flight. */ -+ -+ /* Sectors to read and write for multi page stripe sets. */ -+ unsigned size; -+ } io; -+ -+ /* Lock on stripe (for clustering). */ -+ void *lock; -+ -+ /* -+ * 4 linked lists: -+ * o io list to flush io -+ * o endio list -+ * o LRU list to put stripes w/o reference count on -+ * o stripe cache hash -+ */ -+ struct list_head lists[NR_LISTS]; -+ -+ struct { -+ unsigned short parity; /* Parity chunk index. */ -+ short recover; /* Recovery chunk index. */ -+ } idx; -+ -+ /* This sets memory cache object (dm-mem-cache). */ -+ struct dm_mem_cache_object *obj; -+ -+ /* Array of stripe sets (dynamically allocated). */ -+ struct stripe_set ss[0]; -+}; -+ -+/* States stripes can be in (flags field). */ -+enum stripe_states { -+ STRIPE_ACTIVE, /* Active io on stripe. */ -+ STRIPE_ERROR, /* io error on stripe. */ -+ STRIPE_MERGED, /* Writes got merged. */ -+ STRIPE_READ, /* Read. */ -+ STRIPE_RBW, /* Read-before-write. */ -+ STRIPE_RECONSTRUCT, /* reconstruct of a missing chunk required. */ -+ STRIPE_RECOVER, /* Stripe used for RAID set recovery. */ -+}; -+ -+/* ... and macros to access them. */ -+#define BITOPS(name, what, var, flag) \ -+static inline int TestClear ## name ## what(struct var *v) \ -+{ return test_and_clear_bit(flag, &v->io.flags); } \ -+static inline int TestSet ## name ## what(struct var *v) \ -+{ return test_and_set_bit(flag, &v->io.flags); } \ -+static inline void Clear ## name ## what(struct var *v) \ -+{ clear_bit(flag, &v->io.flags); } \ -+static inline void Set ## name ## what(struct var *v) \ -+{ set_bit(flag, &v->io.flags); } \ -+static inline int name ## what(struct var *v) \ -+{ return test_bit(flag, &v->io.flags); } -+ -+ -+BITOPS(Stripe, Active, stripe, STRIPE_ACTIVE) -+BITOPS(Stripe, Merged, stripe, STRIPE_MERGED) -+BITOPS(Stripe, Error, stripe, STRIPE_ERROR) -+BITOPS(Stripe, Read, stripe, STRIPE_READ) -+BITOPS(Stripe, RBW, stripe, STRIPE_RBW) -+BITOPS(Stripe, Reconstruct, stripe, STRIPE_RECONSTRUCT) -+BITOPS(Stripe, Recover, stripe, STRIPE_RECOVER) -+ -+/* A stripe hash. */ -+struct stripe_hash { -+ struct list_head *hash; -+ unsigned buckets; -+ unsigned mask; -+ unsigned prime; -+ unsigned shift; -+}; -+ -+/* A stripe cache. */ -+struct stripe_cache { -+ /* Stripe hash. */ -+ struct stripe_hash hash; -+ -+ /* Stripes with io to flush, stripes to endio and LRU lists. */ -+ struct list_head lists[3]; -+ -+ /* Locks to protect endio and lru lists. */ -+ spinlock_t locks[NR_LOCKS]; -+ -+ /* Slab cache to allocate stripes from. */ -+ struct { -+ struct kmem_cache *cache; /* Cache itself. */ -+ char name[32]; /* Unique name. */ -+ } kc; -+ -+ struct dm_io_client *dm_io_client; /* dm-io client resource context. */ -+ -+ /* dm-mem-cache client resource context. */ -+ struct dm_mem_cache_client *mem_cache_client; -+ -+ int stripes_parm; /* # stripes parameter from constructor. */ -+ atomic_t stripes; /* actual # of stripes in cache. */ -+ atomic_t stripes_to_shrink; /* # of stripes to shrink cache by. */ -+ atomic_t stripes_last; /* last # of stripes in cache. */ -+ atomic_t active_stripes; /* actual # of active stripes in cache. */ -+ -+ /* REMOVEME: */ -+ atomic_t max_active_stripes; /* actual # of active stripes in cache. */ -+}; -+ -+/* Flag specs for raid_dev */ ; -+enum raid_dev_flags { DEVICE_FAILED, IO_QUEUED }; -+ -+/* The raid device in a set. */ -+struct raid_dev { -+ struct dm_dev *dev; -+ unsigned long flags; /* raid_dev_flags. */ -+ sector_t start; /* offset to map to. */ -+}; -+ -+/* Flags spec for raid_set. */ -+enum raid_set_flags { -+ RS_CHECK_OVERWRITE, /* Check for chunk overwrites. */ -+ RS_DEAD, /* RAID set inoperational. */ -+ RS_DEVEL_STATS, /* REMOVEME: display status information. */ -+ RS_IO_ERROR, /* io error on set. */ -+ RS_RECOVER, /* Do recovery. */ -+ RS_RECOVERY_BANDWIDTH, /* Allow recovery bandwidth (delayed bios). */ -+ RS_REGION_GET, /* get a region to recover. */ -+ RS_SC_BUSY, /* stripe cache busy -> send an event. */ -+ RS_SUSPENDED, /* RAID set suspendedn. */ -+}; -+ -+/* REMOVEME: devel stats counters. */ -+enum stats_types { -+ S_BIOS_READ, -+ S_BIOS_ADDED_READ, -+ S_BIOS_ENDIO_READ, -+ S_BIOS_WRITE, -+ S_BIOS_ADDED_WRITE, -+ S_BIOS_ENDIO_WRITE, -+ S_CAN_MERGE, -+ S_CANT_MERGE, -+ S_CONGESTED, -+ S_DM_IO_READ, -+ S_DM_IO_WRITE, -+ S_ACTIVE_READS, -+ S_BANDWIDTH, -+ S_BARRIER, -+ S_BIO_COPY_PL_NEXT, -+ S_DEGRADED, -+ S_DELAYED_BIOS, -+ S_EVICT, -+ S_FLUSHS, -+ S_HITS_1ST, -+ S_IOS_POST, -+ S_INSCACHE, -+ S_MAX_LOOKUP, -+ S_MERGE_PAGE_LOCKED, -+ S_NO_BANDWIDTH, -+ S_NOT_CONGESTED, -+ S_NO_RW, -+ S_NOSYNC, -+ S_PROHIBITPAGEIO, -+ S_RECONSTRUCT_EI, -+ S_RECONSTRUCT_DEV, -+ S_REDO, -+ S_REQUEUE, -+ S_STRIPE_ERROR, -+ S_SUM_DELAYED_BIOS, -+ S_XORS, -+ S_NR_STATS, /* # of stats counters. */ -+}; -+ -+/* Status type -> string mappings. */ -+struct stats_map { -+ const enum stats_types type; -+ const char *str; -+}; -+ -+static struct stats_map stats_map[] = { -+ { S_BIOS_READ, "r=" }, -+ { S_BIOS_ADDED_READ, "/" }, -+ { S_BIOS_ENDIO_READ, "/" }, -+ { S_BIOS_WRITE, " w=" }, -+ { S_BIOS_ADDED_WRITE, "/" }, -+ { S_BIOS_ENDIO_WRITE, "/" }, -+ { S_DM_IO_READ, " rc=" }, -+ { S_DM_IO_WRITE, " wc=" }, -+ { S_ACTIVE_READS, " active_reads=" }, -+ { S_BANDWIDTH, " bandwidth=" }, -+ { S_NO_BANDWIDTH, " no_bandwidth=" }, -+ { S_BARRIER, " barrier=" }, -+ { S_BIO_COPY_PL_NEXT, " bio_copy_pl_next=" }, -+ { S_CAN_MERGE, " can_merge=" }, -+ { S_MERGE_PAGE_LOCKED, "/page_locked=" }, -+ { S_CANT_MERGE, "/cant_merge=" }, -+ { S_CONGESTED, " congested=" }, -+ { S_NOT_CONGESTED, "/not_congested=" }, -+ { S_DEGRADED, " degraded=" }, -+ { S_DELAYED_BIOS, " delayed_bios=" }, -+ { S_SUM_DELAYED_BIOS, "/sum_delayed_bios=" }, -+ { S_EVICT, " evict=" }, -+ { S_FLUSHS, " flushs=" }, -+ { S_HITS_1ST, " hits_1st=" }, -+ { S_IOS_POST, " ios_post=" }, -+ { S_INSCACHE, " inscache=" }, -+ { S_MAX_LOOKUP, " max_lookup=" }, -+ { S_NO_RW, " no_rw=" }, -+ { S_NOSYNC, " nosync=" }, -+ { S_PROHIBITPAGEIO, " ProhibitPageIO=" }, -+ { S_RECONSTRUCT_EI, " reconstruct_ei=" }, -+ { S_RECONSTRUCT_DEV, " reconstruct_dev=" }, -+ { S_REDO, " redo=" }, -+ { S_REQUEUE, " requeue=" }, -+ { S_STRIPE_ERROR, " stripe_error=" }, -+ { S_XORS, " xors=" }, -+}; -+ -+/* -+ * A RAID set. -+ */ -+typedef void (*xor_function_t)(unsigned count, unsigned long **data); -+struct raid_set { -+ struct dm_target *ti; /* Target pointer. */ -+ -+ struct { -+ unsigned long flags; /* State flags. */ -+ spinlock_t in_lock; /* Protects central input list below. */ -+ struct bio_list in; /* Pending ios (central input list). */ -+ struct bio_list work; /* ios work set. */ -+ wait_queue_head_t suspendq; /* suspend synchronization. */ -+ atomic_t in_process; /* counter of queued bios (suspendq). */ -+ atomic_t in_process_max;/* counter of queued bios max. */ -+ -+ /* io work. */ -+ struct workqueue_struct *wq; -+ struct delayed_work dws; -+ } io; -+ -+ /* External locking. */ -+ struct dm_raid45_locking_type *locking; -+ -+ struct stripe_cache sc; /* Stripe cache for this set. */ -+ -+ /* Xor optimization. */ -+ struct { -+ struct xor_func *f; -+ unsigned chunks; -+ unsigned speed; -+ } xor; -+ -+ /* Recovery parameters. */ -+ struct recover { -+ struct dm_dirty_log *dl; /* Dirty log. */ -+ struct dm_region_hash *rh; /* Region hash. */ -+ -+ /* dm-mem-cache client resource context for recovery stripes. */ -+ struct dm_mem_cache_client *mem_cache_client; -+ -+ struct list_head stripes; /* List of recovery stripes. */ -+ -+ region_t nr_regions; -+ region_t nr_regions_to_recover; -+ region_t nr_regions_recovered; -+ unsigned long start_jiffies; -+ unsigned long end_jiffies; -+ -+ unsigned bandwidth; /* Recovery bandwidth [%]. */ -+ unsigned bandwidth_work; /* Recovery bandwidth [factor]. */ -+ unsigned bandwidth_parm; /* " constructor parm. */ -+ unsigned io_size; /* io size <= chunk size. */ -+ unsigned io_size_parm; /* io size ctr parameter. */ -+ -+ /* recovery io throttling. */ -+ atomic_t io_count[2]; /* counter recover/regular io. */ -+ unsigned long last_jiffies; -+ -+ struct dm_region *reg; /* Actual region to recover. */ -+ sector_t pos; /* Position within region to recover. */ -+ sector_t end; /* End of region to recover. */ -+ } recover; -+ -+ /* RAID set parameters. */ -+ struct { -+ struct raid_type *raid_type; /* RAID type (eg, RAID4). */ -+ unsigned raid_parms; /* # variable raid parameters. */ -+ -+ unsigned chunk_size; /* Sectors per chunk. */ -+ unsigned chunk_size_parm; -+ unsigned chunk_mask; /* Mask for amount. */ -+ unsigned chunk_shift; /* rsector chunk size shift. */ -+ -+ unsigned io_size; /* Sectors per io. */ -+ unsigned io_size_parm; -+ unsigned io_mask; /* Mask for amount. */ -+ unsigned io_shift_mask; /* Mask for raid_address(). */ -+ unsigned io_shift; /* rsector io size shift. */ -+ unsigned pages_per_io; /* Pages per io. */ -+ -+ sector_t sectors_per_dev; /* Sectors per device. */ -+ -+ atomic_t failed_devs; /* Amount of devices failed. */ -+ -+ /* Index of device to initialize. */ -+ int dev_to_init; -+ int dev_to_init_parm; -+ -+ /* Raid devices dynamically allocated. */ -+ unsigned raid_devs; /* # of RAID devices below. */ -+ unsigned data_devs; /* # of RAID data devices. */ -+ -+ int ei; /* index of failed RAID device. */ -+ -+ /* index of dedicated parity device (i.e. RAID4). */ -+ int pi; -+ int pi_parm; /* constructor parm for status output. */ -+ } set; -+ -+ /* REMOVEME: devel stats counters. */ -+ atomic_t stats[S_NR_STATS]; -+ -+ /* Dynamically allocated temporary pointers for xor(). */ -+ unsigned long **data; -+ -+ /* Dynamically allocated RAID devices. Alignment? */ -+ struct raid_dev dev[0]; -+}; -+ -+ -+BITOPS(RS, Bandwidth, raid_set, RS_RECOVERY_BANDWIDTH) -+BITOPS(RS, CheckOverwrite, raid_set, RS_CHECK_OVERWRITE) -+BITOPS(RS, Dead, raid_set, RS_DEAD) -+BITOPS(RS, DevelStats, raid_set, RS_DEVEL_STATS) -+BITOPS(RS, IoError, raid_set, RS_IO_ERROR) -+BITOPS(RS, Recover, raid_set, RS_RECOVER) -+BITOPS(RS, RegionGet, raid_set, RS_REGION_GET) -+BITOPS(RS, ScBusy, raid_set, RS_SC_BUSY) -+BITOPS(RS, Suspended, raid_set, RS_SUSPENDED) -+#undef BITOPS -+ -+#define PageIO(page) PageChecked(page) -+#define AllowPageIO(page) SetPageChecked(page) -+#define ProhibitPageIO(page) ClearPageChecked(page) -+ -+/*----------------------------------------------------------------- -+ * Raid-4/5 set structures. -+ *---------------------------------------------------------------*/ -+/* RAID level definitions. */ -+enum raid_level { -+ raid4, -+ raid5, -+}; -+ -+/* Symmetric/Asymmetric, Left/Right parity rotating algorithms. */ -+enum raid_algorithm { -+ none, -+ left_asym, -+ right_asym, -+ left_sym, -+ right_sym, -+}; -+ -+struct raid_type { -+ const char *name; /* RAID algorithm. */ -+ const char *descr; /* Descriptor text for logging. */ -+ const unsigned parity_devs; /* # of parity devices. */ -+ const unsigned minimal_devs; /* minimal # of devices in set. */ -+ const enum raid_level level; /* RAID level. */ -+ const enum raid_algorithm algorithm; /* RAID algorithm. */ -+}; -+ -+/* Supported raid types and properties. */ -+static struct raid_type raid_types[] = { -+ {"raid4", "RAID4 (dedicated parity disk)", 1, 3, raid4, none}, -+ {"raid5_la", "RAID5 (left asymmetric)", 1, 3, raid5, left_asym}, -+ {"raid5_ra", "RAID5 (right asymmetric)", 1, 3, raid5, right_asym}, -+ {"raid5_ls", "RAID5 (left symmetric)", 1, 3, raid5, left_sym}, -+ {"raid5_rs", "RAID5 (right symmetric)", 1, 3, raid5, right_sym}, -+}; -+ -+/* Address as calculated by raid_address(). */ -+struct address { -+ sector_t key; /* Hash key (start address of stripe). */ -+ unsigned di, pi; /* Data and parity disks index. */ -+}; -+ -+/* REMOVEME: reset statistics counters. */ -+static void stats_reset(struct raid_set *rs) -+{ -+ unsigned s = S_NR_STATS; -+ -+ while (s--) -+ atomic_set(rs->stats + s, 0); -+} -+ -+/*---------------------------------------------------------------- -+ * RAID set management routines. -+ *--------------------------------------------------------------*/ -+/* -+ * Begin small helper functions. -+ */ -+/* Queue (optionally delayed) io work. */ -+static void wake_do_raid_delayed(struct raid_set *rs, unsigned long delay) -+{ -+ struct delayed_work *dws = &rs->io.dws; -+ -+ cancel_delayed_work(dws); -+ queue_delayed_work(rs->io.wq, dws, delay); -+} -+ -+/* Queue io work immediately (called from region hash too). */ -+static INLINE void wake_do_raid(void *context) -+{ -+ wake_do_raid_delayed(context, 0); -+} -+ -+/* Wait until all io has been processed. */ -+static INLINE void wait_ios(struct raid_set *rs) -+{ -+ wait_event(rs->io.suspendq, !atomic_read(&rs->io.in_process)); -+} -+ -+/* Declare io queued to device. */ -+static INLINE void io_dev_queued(struct raid_dev *dev) -+{ -+ set_bit(IO_QUEUED, &dev->flags); -+} -+ -+/* Io on device and reset ? */ -+static inline int io_dev_clear(struct raid_dev *dev) -+{ -+ return test_and_clear_bit(IO_QUEUED, &dev->flags); -+} -+ -+/* Get an io reference. */ -+static INLINE void io_get(struct raid_set *rs) -+{ -+ int p = atomic_inc_return(&rs->io.in_process); -+ -+ if (p > atomic_read(&rs->io.in_process_max)) -+ atomic_set(&rs->io.in_process_max, p); /* REMOVEME: max. */ -+} -+ -+/* Put the io reference and conditionally wake io waiters. */ -+static INLINE void io_put(struct raid_set *rs) -+{ -+ /* Intel: rebuild data corrupter? */ -+ if (!atomic_read(&rs->io.in_process)) { -+ DMERR("%s would go negative!!!", __func__); -+ return; -+ } -+ -+ if (atomic_dec_and_test(&rs->io.in_process)) -+ wake_up(&rs->io.suspendq); -+} -+ -+/* Calculate device sector offset. */ -+static INLINE sector_t _sector(struct raid_set *rs, struct bio *bio) -+{ -+ sector_t sector = bio->bi_sector; -+ -+ sector_div(sector, rs->set.data_devs); -+ return sector; -+} -+ -+/* Test device operational. */ -+static INLINE int dev_operational(struct raid_set *rs, unsigned p) -+{ -+ return !test_bit(DEVICE_FAILED, &rs->dev[p].flags); -+} -+ -+/* Return # of active stripes in stripe cache. */ -+static INLINE int sc_active(struct stripe_cache *sc) -+{ -+ return atomic_read(&sc->active_stripes); -+} -+ -+/* Test io pending on stripe. */ -+static INLINE int stripe_io(struct stripe *stripe) -+{ -+ return atomic_read(&stripe->io.pending); -+} -+ -+static INLINE void stripe_io_inc(struct stripe *stripe) -+{ -+ atomic_inc(&stripe->io.pending); -+} -+ -+static INLINE void stripe_io_dec(struct stripe *stripe) -+{ -+ atomic_dec(&stripe->io.pending); -+} -+ -+/* Wrapper needed by for_each_io_dev(). */ -+static void _stripe_io_inc(struct stripe *stripe, unsigned p) -+{ -+ stripe_io_inc(stripe); -+} -+ -+/* Error a stripe. */ -+static INLINE void stripe_error(struct stripe *stripe, struct page *page) -+{ -+ SetStripeError(stripe); -+ SetPageError(page); -+ atomic_inc(RS(stripe->sc)->stats + S_STRIPE_ERROR); -+} -+ -+/* Page IOed ok. */ -+enum dirty_type { CLEAN, DIRTY }; -+static INLINE void page_set(struct page *page, enum dirty_type type) -+{ -+ switch (type) { -+ case DIRTY: -+ SetPageDirty(page); -+ AllowPageIO(page); -+ break; -+ -+ case CLEAN: -+ ClearPageDirty(page); -+ break; -+ -+ default: -+ BUG(); -+ } -+ -+ SetPageUptodate(page); -+ ClearPageError(page); -+} -+ -+/* Return region state for a sector. */ -+static INLINE int -+region_state(struct raid_set *rs, sector_t sector, unsigned long state) -+{ -+ struct dm_region_hash *rh = rs->recover.rh; -+ -+ return RSRecover(rs) ? -+ (dm_rh_get_state(rh, dm_rh_sector_to_region(rh, sector), 1) & -+ state) : 0; -+} -+ -+/* Check maximum devices which may fail in a raid set. */ -+static inline int raid_set_degraded(struct raid_set *rs) -+{ -+ return RSIoError(rs); -+} -+ -+/* Check # of devices which may fail in a raid set. */ -+static INLINE int raid_set_operational(struct raid_set *rs) -+{ -+ /* Too many failed devices -> BAD. */ -+ return atomic_read(&rs->set.failed_devs) <= -+ rs->set.raid_type->parity_devs; -+} -+ -+/* -+ * Return true in case a page_list should be read/written -+ * -+ * Conditions to read/write: -+ * o 1st page in list not uptodate -+ * o 1st page in list dirty -+ * o if we optimized io away, we flag it using the pages checked bit. -+ */ -+static INLINE unsigned page_io(struct page *page) -+{ -+ /* Optimization: page was flagged to need io during first run. */ -+ if (PagePrivate(page)) { -+ ClearPagePrivate(page); -+ return 1; -+ } -+ -+ /* Avoid io if prohibited or a locked page. */ -+ if (!PageIO(page) || PageLocked(page)) -+ return 0; -+ -+ if (!PageUptodate(page) || PageDirty(page)) { -+ /* Flag page needs io for second run optimization. */ -+ SetPagePrivate(page); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/* Call a function on each page list needing io. */ -+static INLINE unsigned -+for_each_io_dev(struct raid_set *rs, struct stripe *stripe, -+ void (*f_io)(struct stripe *stripe, unsigned p)) -+{ -+ unsigned p = rs->set.raid_devs, r = 0; -+ -+ while (p--) { -+ if (page_io(PAGE(stripe, p))) { -+ f_io(stripe, p); -+ r++; -+ } -+ } -+ -+ return r; -+} -+ -+/* Reconstruct a particular device ?. */ -+static INLINE int dev_to_init(struct raid_set *rs) -+{ -+ return rs->set.dev_to_init > -1; -+} -+ -+/* -+ * Index of device to calculate parity on. -+ * Either the parity device index *or* the selected device to init -+ * after a spare replacement. -+ */ -+static INLINE unsigned dev_for_parity(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ -+ return dev_to_init(rs) ? rs->set.dev_to_init : stripe->idx.parity; -+} -+ -+/* Return the index of the device to be recovered. */ -+static int idx_get(struct raid_set *rs) -+{ -+ /* Avoid to read in the pages to be reconstructed anyway. */ -+ if (dev_to_init(rs)) -+ return rs->set.dev_to_init; -+ else if (rs->set.raid_type->level == raid4) -+ return rs->set.pi; -+ -+ return -1; -+} -+ -+/* RAID set congested function. */ -+static int raid_set_congested(void *congested_data, int bdi_bits) -+{ -+ struct raid_set *rs = congested_data; -+ int r = 0; /* Assume uncongested. */ -+ unsigned p = rs->set.raid_devs; -+ -+ /* If any of our component devices are overloaded. */ -+ while (p--) { -+ struct request_queue *q = bdev_get_queue(rs->dev[p].dev->bdev); -+ -+ r |= bdi_congested(&q->backing_dev_info, bdi_bits); -+ } -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (r ? S_CONGESTED : S_NOT_CONGESTED)); -+ return r; -+} -+ -+/* Display RAID set dead message once. */ -+static void raid_set_dead(struct raid_set *rs) -+{ -+ if (!TestSetRSDead(rs)) { -+ unsigned p; -+ char buf[BDEVNAME_SIZE]; -+ -+ DMERR("FATAL: too many devices failed -> RAID set dead"); -+ -+ for (p = 0; p < rs->set.raid_devs; p++) { -+ if (!dev_operational(rs, p)) -+ DMERR("device /dev/%s failed", -+ bdevname(rs->dev[p].dev->bdev, buf)); -+ } -+ } -+} -+ -+/* RAID set degrade check. */ -+static INLINE int -+raid_set_check_and_degrade(struct raid_set *rs, -+ struct stripe *stripe, unsigned p) -+{ -+ if (test_and_set_bit(DEVICE_FAILED, &rs->dev[p].flags)) -+ return -EPERM; -+ -+ /* Through an event in case of member device errors. */ -+ dm_table_event(rs->ti->table); -+ atomic_inc(&rs->set.failed_devs); -+ -+ /* Only log the first member error. */ -+ if (!TestSetRSIoError(rs)) { -+ char buf[BDEVNAME_SIZE]; -+ -+ /* Store index for recovery. */ -+ mb(); -+ rs->set.ei = p; -+ mb(); -+ -+ DMERR("CRITICAL: %sio error on device /dev/%s " -+ "in region=%llu; DEGRADING RAID set", -+ stripe ? "" : "FAKED ", -+ bdevname(rs->dev[p].dev->bdev, buf), -+ (unsigned long long) (stripe ? stripe->key : 0)); -+ DMERR("further device error messages suppressed"); -+ } -+ -+ return 0; -+} -+ -+static void -+raid_set_check_degrade(struct raid_set *rs, struct stripe *stripe) -+{ -+ unsigned p = rs->set.raid_devs; -+ -+ while (p--) { -+ struct page *page = PAGE(stripe, p); -+ -+ if (PageError(page)) { -+ ClearPageError(page); -+ raid_set_check_and_degrade(rs, stripe, p); -+ } -+ } -+} -+ -+/* RAID set upgrade check. */ -+static int raid_set_check_and_upgrade(struct raid_set *rs, unsigned p) -+{ -+ if (!test_and_clear_bit(DEVICE_FAILED, &rs->dev[p].flags)) -+ return -EPERM; -+ -+ if (atomic_dec_and_test(&rs->set.failed_devs)) { -+ ClearRSIoError(rs); -+ rs->set.ei = -1; -+ } -+ -+ return 0; -+} -+ -+/* Lookup a RAID device by name or by major:minor number. */ -+union dev_lookup { -+ const char *dev_name; -+ struct raid_dev *dev; -+}; -+enum lookup_type { byname, bymajmin, bynumber }; -+static int raid_dev_lookup(struct raid_set *rs, enum lookup_type by, -+ union dev_lookup *dl) -+{ -+ unsigned p; -+ -+ /* -+ * Must be an incremental loop, because the device array -+ * can have empty slots still on calls from raid_ctr() -+ */ -+ for (p = 0; p < rs->set.raid_devs; p++) { -+ char buf[BDEVNAME_SIZE]; -+ struct raid_dev *dev = rs->dev + p; -+ -+ if (!dev->dev) -+ break; -+ -+ /* Format dev string appropriately if necessary. */ -+ if (by == byname) -+ bdevname(dev->dev->bdev, buf); -+ else if (by == bymajmin) -+ format_dev_t(buf, dev->dev->bdev->bd_dev); -+ -+ /* Do the actual check. */ -+ if (by == bynumber) { -+ if (dl->dev->dev->bdev->bd_dev == -+ dev->dev->bdev->bd_dev) -+ return p; -+ } else if (!strcmp(dl->dev_name, buf)) -+ return p; -+ } -+ -+ return -ENODEV; -+} -+ -+/* End io wrapper. */ -+static INLINE void -+_bio_endio(struct raid_set *rs, struct bio *bio, int error) -+{ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (bio_data_dir(bio) == WRITE ? -+ S_BIOS_ENDIO_WRITE : S_BIOS_ENDIO_READ)); -+ bio_endio(bio, error); -+ io_put(rs); /* Wake any suspend waiters. */ -+} -+ -+/* -+ * End small helper functions. -+ */ -+ -+ -+/* -+ * Stripe hash functions -+ */ -+/* Initialize/destroy stripe hash. */ -+static int hash_init(struct stripe_hash *hash, unsigned stripes) -+{ -+ unsigned buckets = 2, max_buckets = stripes / 4; -+ unsigned hash_primes[] = { -+ /* Table of primes for hash_fn/table size optimization. */ -+ 3, 7, 13, 27, 53, 97, 193, 389, 769, -+ 1543, 3079, 6151, 12289, 24593, -+ }; -+ -+ /* Calculate number of buckets (2^^n <= stripes / 4). */ -+ while (buckets < max_buckets) -+ buckets <<= 1; -+ -+ /* Allocate stripe hash. */ -+ hash->hash = vmalloc(buckets * sizeof(*hash->hash)); -+ if (!hash->hash) -+ return -ENOMEM; -+ -+ hash->buckets = buckets; -+ hash->mask = buckets - 1; -+ hash->shift = ffs(buckets); -+ if (hash->shift > ARRAY_SIZE(hash_primes) + 1) -+ hash->shift = ARRAY_SIZE(hash_primes) + 1; -+ -+ BUG_ON(hash->shift - 2 > ARRAY_SIZE(hash_primes) + 1); -+ hash->prime = hash_primes[hash->shift - 2]; -+ -+ /* Initialize buckets. */ -+ while (buckets--) -+ INIT_LIST_HEAD(hash->hash + buckets); -+ -+ return 0; -+} -+ -+static INLINE void hash_exit(struct stripe_hash *hash) -+{ -+ if (hash->hash) { -+ vfree(hash->hash); -+ hash->hash = NULL; -+ } -+} -+ -+/* List add (head/tail/locked/unlocked) inlines. */ -+enum list_lock_type { LIST_LOCKED, LIST_UNLOCKED }; -+#define LIST_DEL(name, list) \ -+static void stripe_ ## name ## _del(struct stripe *stripe, \ -+ enum list_lock_type lock) { \ -+ struct list_head *lh = stripe->lists + (list); \ -+ spinlock_t *l = NULL; \ -+\ -+ if (lock == LIST_LOCKED) { \ -+ l = stripe->sc->locks + LOCK_LRU; \ -+ spin_lock_irq(l); \ -+ } \ -+\ -+\ -+ if (!list_empty(lh)) \ -+ list_del_init(lh); \ -+\ -+ if (lock == LIST_LOCKED) \ -+ spin_unlock_irq(l); \ -+} -+ -+LIST_DEL(hash, LIST_HASH) -+LIST_DEL(lru, LIST_LRU) -+#undef LIST_DEL -+ -+enum list_pos_type { POS_HEAD, POS_TAIL }; -+#define LIST_ADD(name, list) \ -+static void stripe_ ## name ## _add(struct stripe *stripe, \ -+ enum list_pos_type pos, \ -+ enum list_lock_type lock) { \ -+ struct list_head *lh = stripe->lists + (list); \ -+ struct stripe_cache *sc = stripe->sc; \ -+ spinlock_t *l = NULL; \ -+\ -+ if (lock == LIST_LOCKED) { \ -+ l = sc->locks + LOCK_LRU; \ -+ spin_lock_irq(l); \ -+ } \ -+\ -+ if (list_empty(lh)) { \ -+ if (pos == POS_HEAD) \ -+ list_add(lh, sc->lists + (list)); \ -+ else \ -+ list_add_tail(lh, sc->lists + (list)); \ -+ } \ -+\ -+ if (lock == LIST_LOCKED) \ -+ spin_unlock_irq(l); \ -+} -+ -+LIST_ADD(endio, LIST_ENDIO) -+LIST_ADD(io, LIST_IO) -+LIST_ADD(lru, LIST_LRU) -+#undef LIST_ADD -+ -+#define POP(list) \ -+ do { \ -+ if (list_empty(sc->lists + list)) \ -+ stripe = NULL; \ -+ else { \ -+ stripe = list_first_entry(&sc->lists[list], \ -+ struct stripe, \ -+ lists[list]); \ -+ list_del_init(&stripe->lists[list]); \ -+ } \ -+ } while (0); -+ -+/* Pop an available stripe off the lru list. */ -+static struct stripe *stripe_lru_pop(struct stripe_cache *sc) -+{ -+ struct stripe *stripe; -+ spinlock_t *lock = sc->locks + LOCK_LRU; -+ -+ spin_lock_irq(lock); -+ POP(LIST_LRU); -+ spin_unlock_irq(lock); -+ -+ if (stripe) -+ /* Remove from hash before reuse. */ -+ stripe_hash_del(stripe, LIST_UNLOCKED); -+ -+ return stripe; -+} -+ -+static inline unsigned hash_fn(struct stripe_hash *hash, sector_t key) -+{ -+ return (unsigned) (((key * hash->prime) >> hash->shift) & hash->mask); -+} -+ -+static inline struct list_head * -+hash_bucket(struct stripe_hash *hash, sector_t key) -+{ -+ return hash->hash + hash_fn(hash, key); -+} -+ -+/* Insert an entry into a hash. */ -+static inline void hash_insert(struct stripe_hash *hash, struct stripe *stripe) -+{ -+ list_add(stripe->lists + LIST_HASH, hash_bucket(hash, stripe->key)); -+} -+ -+/* Insert an entry into the stripe hash. */ -+static inline void -+sc_insert(struct stripe_cache *sc, struct stripe *stripe) -+{ -+ hash_insert(&sc->hash, stripe); -+} -+ -+/* Lookup an entry in the stripe hash. */ -+static inline struct stripe * -+stripe_lookup(struct stripe_cache *sc, sector_t key) -+{ -+ unsigned c = 0; -+ struct stripe *stripe; -+ struct list_head *bucket = hash_bucket(&sc->hash, key); -+ -+ list_for_each_entry(stripe, bucket, lists[LIST_HASH]) { -+ /* REMOVEME: statisics. */ -+ if (++c > atomic_read(RS(sc)->stats + S_MAX_LOOKUP)) -+ atomic_set(RS(sc)->stats + S_MAX_LOOKUP, c); -+ -+ if (stripe->key == key) -+ return stripe; -+ } -+ -+ return NULL; -+} -+ -+/* Resize the stripe cache hash on size changes. */ -+static int hash_resize(struct stripe_cache *sc) -+{ -+ /* Resize threshold reached? */ -+ if (atomic_read(&sc->stripes) > 2 * atomic_read(&sc->stripes_last) -+ || atomic_read(&sc->stripes) < atomic_read(&sc->stripes_last) / 4) { -+ int r; -+ struct stripe_hash hash, hash_tmp; -+ spinlock_t *lock; -+ -+ r = hash_init(&hash, atomic_read(&sc->stripes)); -+ if (r) -+ return r; -+ -+ lock = sc->locks + LOCK_LRU; -+ spin_lock_irq(lock); -+ if (sc->hash.hash) { -+ unsigned b = sc->hash.buckets; -+ struct list_head *pos, *tmp; -+ -+ /* Walk old buckets and insert into new. */ -+ while (b--) { -+ list_for_each_safe(pos, tmp, sc->hash.hash + b) -+ hash_insert(&hash, -+ list_entry(pos, struct stripe, -+ lists[LIST_HASH])); -+ } -+ -+ } -+ -+ memcpy(&hash_tmp, &sc->hash, sizeof(hash_tmp)); -+ memcpy(&sc->hash, &hash, sizeof(sc->hash)); -+ atomic_set(&sc->stripes_last, atomic_read(&sc->stripes)); -+ spin_unlock_irq(lock); -+ -+ hash_exit(&hash_tmp); -+ } -+ -+ return 0; -+} -+ -+/* -+ * Stripe cache locking functions -+ */ -+/* Dummy lock function for local RAID4+5. */ -+static void *no_lock(sector_t key, enum dm_lock_type type) -+{ -+ return &no_lock; -+} -+ -+/* Dummy unlock function for local RAID4+5. */ -+static void no_unlock(void *lock_handle) -+{ -+} -+ -+/* No locking (for local RAID 4+5). */ -+static struct dm_raid45_locking_type locking_none = { -+ .lock = no_lock, -+ .unlock = no_unlock, -+}; -+ -+/* Clustered RAID 4+5. */ -+/* FIXME: code this. */ -+static struct dm_raid45_locking_type locking_cluster = { -+ .lock = no_lock, -+ .unlock = no_unlock, -+}; -+ -+/* Lock a stripe (for clustering). */ -+static int -+stripe_lock(struct raid_set *rs, struct stripe *stripe, int rw, sector_t key) -+{ -+ stripe->lock = rs->locking->lock(key, rw == READ ? DM_RAID45_SHARED : -+ DM_RAID45_EX); -+ return stripe->lock ? 0 : -EPERM; -+} -+ -+/* Unlock a stripe (for clustering). */ -+static void stripe_unlock(struct raid_set *rs, struct stripe *stripe) -+{ -+ rs->locking->unlock(stripe->lock); -+ stripe->lock = NULL; -+} -+ -+/* -+ * Stripe cache functions. -+ */ -+/* -+ * Invalidate all page lists pages of a stripe. -+ * -+ * I only keep state for the whole list in the first page. -+ */ -+static INLINE void -+stripe_pages_invalidate(struct stripe *stripe) -+{ -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) { -+ struct page *page = PAGE(stripe, p); -+ -+ ProhibitPageIO(page); -+ ClearPageChecked(page); -+ ClearPageDirty(page); -+ ClearPageError(page); -+ __clear_page_locked(page); -+ ClearPagePrivate(page); -+ ClearPageUptodate(page); -+ } -+} -+ -+/* Prepare stripe for (re)use. */ -+static INLINE void stripe_invalidate(struct stripe *stripe) -+{ -+ stripe->io.flags = 0; -+ stripe_pages_invalidate(stripe); -+} -+ -+/* Allow io on all chunks of a stripe. */ -+static INLINE void stripe_allow_io(struct stripe *stripe) -+{ -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) -+ AllowPageIO(PAGE(stripe, p)); -+} -+ -+/* Initialize a stripe. */ -+static void -+stripe_init(struct stripe_cache *sc, struct stripe *stripe) -+{ -+ unsigned p = RS(sc)->set.raid_devs; -+ unsigned i; -+ -+ /* Work all io chunks. */ -+ while (p--) { -+ struct stripe_set *ss = stripe->ss + p; -+ -+ stripe->obj[p].private = ss; -+ ss->stripe = stripe; -+ -+ i = ARRAY_SIZE(ss->bl); -+ while (i--) -+ bio_list_init(ss->bl + i); -+ } -+ -+ stripe->sc = sc; -+ -+ i = ARRAY_SIZE(stripe->lists); -+ while (i--) -+ INIT_LIST_HEAD(stripe->lists + i); -+ -+ atomic_set(&stripe->cnt, 0); -+ atomic_set(&stripe->io.pending, 0); -+ -+ stripe_invalidate(stripe); -+} -+ -+/* Number of pages per chunk. */ -+static inline unsigned chunk_pages(unsigned io_size) -+{ -+ return dm_div_up(io_size, SECTORS_PER_PAGE); -+} -+ -+/* Number of pages per stripe. */ -+static inline unsigned stripe_pages(struct raid_set *rs, unsigned io_size) -+{ -+ return chunk_pages(io_size) * rs->set.raid_devs; -+} -+ -+/* Initialize part of page_list (recovery). */ -+static INLINE void stripe_zero_pl_part(struct stripe *stripe, unsigned p, -+ unsigned start, unsigned count) -+{ -+ unsigned pages = chunk_pages(count); -+ /* Get offset into the page_list. */ -+ struct page_list *pl = pl_elem(PL(stripe, p), start / SECTORS_PER_PAGE); -+ -+ BUG_ON(!pl); -+ while (pl && pages--) { -+ BUG_ON(!pl->page); -+ memset(page_address(pl->page), 0, PAGE_SIZE); -+ pl = pl->next; -+ } -+} -+ -+/* Initialize parity chunk of stripe. */ -+static INLINE void stripe_zero_chunk(struct stripe *stripe, unsigned p) -+{ -+ stripe_zero_pl_part(stripe, p, 0, stripe->io.size); -+} -+ -+/* Return dynamic stripe structure size. */ -+static INLINE size_t stripe_size(struct raid_set *rs) -+{ -+ return sizeof(struct stripe) + -+ rs->set.raid_devs * sizeof(struct stripe_set); -+} -+ -+/* Allocate a stripe and its memory object. */ -+/* XXX adjust to cope with stripe cache and recovery stripe caches. */ -+enum grow { SC_GROW, SC_KEEP }; -+static struct stripe *stripe_alloc(struct stripe_cache *sc, -+ struct dm_mem_cache_client *mc, -+ enum grow grow) -+{ -+ int r; -+ struct stripe *stripe; -+ -+ stripe = kmem_cache_zalloc(sc->kc.cache, GFP_KERNEL); -+ if (stripe) { -+ /* Grow the dm-mem-cache by one object. */ -+ if (grow == SC_GROW) { -+ r = dm_mem_cache_grow(mc, 1); -+ if (r) -+ goto err_free; -+ } -+ -+ stripe->obj = dm_mem_cache_alloc(mc); -+ if (!stripe->obj) -+ goto err_shrink; -+ -+ stripe_init(sc, stripe); -+ } -+ -+ return stripe; -+ -+err_shrink: -+ if (grow == SC_GROW) -+ dm_mem_cache_shrink(mc, 1); -+err_free: -+ kmem_cache_free(sc->kc.cache, stripe); -+ return NULL; -+} -+ -+/* -+ * Free a stripes memory object, shrink the -+ * memory cache and free the stripe itself -+ */ -+static void stripe_free(struct stripe *stripe, struct dm_mem_cache_client *mc) -+{ -+ dm_mem_cache_free(mc, stripe->obj); -+ dm_mem_cache_shrink(mc, 1); -+ kmem_cache_free(stripe->sc->kc.cache, stripe); -+} -+ -+/* Free the recovery stripe. */ -+static void stripe_recover_free(struct raid_set *rs) -+{ -+ struct recover *rec = &rs->recover; -+ struct list_head *stripes = &rec->stripes; -+ -+ while (!list_empty(stripes)) { -+ struct stripe *stripe = list_first_entry(stripes, struct stripe, -+ lists[LIST_RECOVER]); -+ list_del(stripe->lists + LIST_RECOVER); -+ stripe_free(stripe, rec->mem_cache_client); -+ } -+} -+ -+/* Push a stripe safely onto the endio list to be handled by do_endios(). */ -+static INLINE void stripe_endio_push(struct stripe *stripe) -+{ -+ int wake; -+ unsigned long flags; -+ struct stripe_cache *sc = stripe->sc; -+ spinlock_t *lock = sc->locks + LOCK_ENDIO; -+ -+ spin_lock_irqsave(lock, flags); -+ wake = list_empty(sc->lists + LIST_ENDIO); -+ stripe_endio_add(stripe, POS_HEAD, LIST_UNLOCKED); -+ spin_unlock_irqrestore(lock, flags); -+ -+ if (wake) -+ wake_do_raid(RS(sc)); -+} -+ -+/* Protected check for stripe cache endio list empty. */ -+static INLINE int stripe_endio_empty(struct stripe_cache *sc) -+{ -+ int r; -+ spinlock_t *lock = sc->locks + LOCK_ENDIO; -+ -+ spin_lock_irq(lock); -+ r = list_empty(sc->lists + LIST_ENDIO); -+ spin_unlock_irq(lock); -+ -+ return r; -+} -+ -+/* Pop a stripe off safely off the endio list. */ -+static struct stripe *stripe_endio_pop(struct stripe_cache *sc) -+{ -+ struct stripe *stripe; -+ spinlock_t *lock = sc->locks + LOCK_ENDIO; -+ -+ /* This runs in parallel with endio(). */ -+ spin_lock_irq(lock); -+ POP(LIST_ENDIO) -+ spin_unlock_irq(lock); -+ return stripe; -+} -+ -+#undef POP -+ -+/* Evict stripe from cache. */ -+static void stripe_evict(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ stripe_hash_del(stripe, LIST_UNLOCKED); /* Take off hash. */ -+ -+ if (list_empty(stripe->lists + LIST_LRU)) { -+ stripe_lru_add(stripe, POS_TAIL, LIST_LOCKED); -+ atomic_inc(rs->stats + S_EVICT); /* REMOVEME: statistics. */ -+ } -+} -+ -+/* Grow stripe cache. */ -+static int -+sc_grow(struct stripe_cache *sc, unsigned stripes, enum grow grow) -+{ -+ int r = 0; -+ struct raid_set *rs = RS(sc); -+ -+ /* Try to allocate this many (additional) stripes. */ -+ while (stripes--) { -+ struct stripe *stripe = -+ stripe_alloc(sc, sc->mem_cache_client, grow); -+ -+ if (likely(stripe)) { -+ stripe->io.size = rs->set.io_size; -+ stripe_lru_add(stripe, POS_TAIL, LIST_LOCKED); -+ atomic_inc(&sc->stripes); -+ } else { -+ r = -ENOMEM; -+ break; -+ } -+ } -+ -+ ClearRSScBusy(rs); -+ return r ? r : hash_resize(sc); -+} -+ -+/* Shrink stripe cache. */ -+static int sc_shrink(struct stripe_cache *sc, unsigned stripes) -+{ -+ int r = 0; -+ -+ /* Try to get unused stripe from LRU list. */ -+ while (stripes--) { -+ struct stripe *stripe; -+ -+ stripe = stripe_lru_pop(sc); -+ if (stripe) { -+ /* An lru stripe may never have ios pending! */ -+ BUG_ON(stripe_io(stripe)); -+ stripe_free(stripe, sc->mem_cache_client); -+ atomic_dec(&sc->stripes); -+ } else { -+ r = -ENOENT; -+ break; -+ } -+ } -+ -+ /* Check if stats are still sane. */ -+ if (atomic_read(&sc->max_active_stripes) > -+ atomic_read(&sc->stripes)) -+ atomic_set(&sc->max_active_stripes, 0); -+ -+ if (r) -+ return r; -+ -+ ClearRSScBusy(RS(sc)); -+ return hash_resize(sc); -+} -+ -+/* Create stripe cache. */ -+static int sc_init(struct raid_set *rs, unsigned stripes) -+{ -+ unsigned i, nr; -+ struct stripe_cache *sc = &rs->sc; -+ struct stripe *stripe; -+ struct recover *rec = &rs->recover; -+ -+ /* Initialize lists and locks. */ -+ i = ARRAY_SIZE(sc->lists); -+ while (i--) -+ INIT_LIST_HEAD(sc->lists + i); -+ -+ i = NR_LOCKS; -+ while (i--) -+ spin_lock_init(sc->locks + i); -+ -+ /* Initialize atomic variables. */ -+ atomic_set(&sc->stripes, 0); -+ atomic_set(&sc->stripes_last, 0); -+ atomic_set(&sc->stripes_to_shrink, 0); -+ atomic_set(&sc->active_stripes, 0); -+ atomic_set(&sc->max_active_stripes, 0); /* REMOVEME: statistics. */ -+ -+ /* -+ * We need a runtime unique # to suffix the kmem cache name -+ * because we'll have one for each active RAID set. -+ */ -+ nr = atomic_inc_return(&_stripe_sc_nr); -+ sprintf(sc->kc.name, "%s_%d", TARGET, nr); -+ sc->kc.cache = kmem_cache_create(sc->kc.name, stripe_size(rs), -+ 0, 0, NULL); -+ if (!sc->kc.cache) -+ return -ENOMEM; -+ -+ /* Create memory cache client context for RAID stripe cache. */ -+ sc->mem_cache_client = -+ dm_mem_cache_client_create(stripes, rs->set.raid_devs, -+ chunk_pages(rs->set.io_size)); -+ if (IS_ERR(sc->mem_cache_client)) -+ return PTR_ERR(sc->mem_cache_client); -+ -+ /* Create memory cache client context for RAID recovery stripe(s). */ -+ rec->mem_cache_client = -+ dm_mem_cache_client_create(MAX_RECOVER, rs->set.raid_devs, -+ chunk_pages(rec->io_size)); -+ if (IS_ERR(rec->mem_cache_client)) -+ return PTR_ERR(rec->mem_cache_client); -+ -+ /* Allocate stripe for set recovery. */ -+ /* XXX: cope with MAX_RECOVERY. */ -+ INIT_LIST_HEAD(&rec->stripes); -+ for (i = 0; i < MAX_RECOVER; i++) { -+ stripe = stripe_alloc(sc, rec->mem_cache_client, SC_KEEP); -+ if (!stripe) -+ return -ENOMEM; -+ -+ SetStripeRecover(stripe); -+ stripe->io.size = rec->io_size; -+ list_add(stripe->lists + LIST_RECOVER, &rec->stripes); -+ } -+ -+ /* -+ * Allocate the stripe objetcs from the -+ * cache and add them to the LRU list. -+ */ -+ return sc_grow(sc, stripes, SC_KEEP); -+} -+ -+/* Destroy the stripe cache. */ -+static void sc_exit(struct stripe_cache *sc) -+{ -+ if (sc->kc.cache) { -+ BUG_ON(sc_shrink(sc, atomic_read(&sc->stripes))); -+ kmem_cache_destroy(sc->kc.cache); -+ } -+ -+ if (sc->mem_cache_client) -+ dm_mem_cache_client_destroy(sc->mem_cache_client); -+ -+ ClearRSRecover(RS(sc)); -+ stripe_recover_free(RS(sc)); -+ if (RS(sc)->recover.mem_cache_client) -+ dm_mem_cache_client_destroy(RS(sc)->recover.mem_cache_client); -+ -+ hash_exit(&sc->hash); -+} -+ -+/* -+ * Calculate RAID address -+ * -+ * Delivers tuple with the index of the data disk holding the chunk -+ * in the set, the parity disks index and the start of the stripe -+ * within the address space of the set (used as the stripe cache hash key). -+ */ -+/* thx MD. */ -+static struct address * -+raid_address(struct raid_set *rs, sector_t sector, struct address *addr) -+{ -+ unsigned data_devs = rs->set.data_devs, di, pi, -+ raid_devs = rs->set.raid_devs; -+ sector_t stripe, tmp; -+ -+ /* -+ * chunk_number = sector / chunk_size -+ * stripe = chunk_number / data_devs -+ * di = stripe % data_devs; -+ */ -+ stripe = sector >> rs->set.chunk_shift; -+ di = sector_div(stripe, data_devs); -+ -+ switch (rs->set.raid_type->level) { -+ case raid5: -+ tmp = stripe; -+ pi = sector_div(tmp, raid_devs); -+ -+ switch (rs->set.raid_type->algorithm) { -+ case left_asym: /* Left asymmetric. */ -+ pi = data_devs - pi; -+ case right_asym: /* Right asymmetric. */ -+ if (di >= pi) -+ di++; -+ break; -+ -+ case left_sym: /* Left symmetric. */ -+ pi = data_devs - pi; -+ case right_sym: /* Right symmetric. */ -+ di = (pi + di + 1) % raid_devs; -+ break; -+ -+ default: -+ DMERR("Unknown RAID algorithm %d", -+ rs->set.raid_type->algorithm); -+ goto out; -+ } -+ -+ break; -+ -+ case raid4: -+ pi = rs->set.pi; -+ if (di >= pi) -+ di++; -+ break; -+ -+ default: -+ DMERR("Unknown RAID level %d", rs->set.raid_type->level); -+ goto out; -+ } -+ -+ /* -+ * Hash key = start offset on any single device of the RAID set; -+ * adjusted in case io size differs from chunk size. -+ */ -+ addr->key = (stripe << rs->set.chunk_shift) + -+ (sector & rs->set.io_shift_mask); -+ addr->di = di; -+ addr->pi = pi; -+ -+out: -+ return addr; -+} -+ -+/* -+ * Copy data across between stripe pages and bio vectors. -+ * -+ * Pay attention to data alignment in stripe and bio pages. -+ */ -+static void -+bio_copy_page_list(int rw, struct stripe *stripe, -+ struct page_list *pl, struct bio *bio) -+{ -+ unsigned i, page_offset; -+ void *page_addr; -+ struct raid_set *rs = RS(stripe->sc); -+ struct bio_vec *bv; -+ -+ /* Get start page in page list for this sector. */ -+ i = (bio->bi_sector & rs->set.io_mask) / SECTORS_PER_PAGE; -+ pl = pl_elem(pl, i); -+ -+ page_addr = page_address(pl->page); -+ page_offset = to_bytes(bio->bi_sector & (SECTORS_PER_PAGE - 1)); -+ -+ /* Walk all segments and copy data across between bio_vecs and pages. */ -+ bio_for_each_segment(bv, bio, i) { -+ int len = bv->bv_len, size; -+ unsigned bio_offset = 0; -+ void *bio_addr = __bio_kmap_atomic(bio, i, KM_USER0); -+redo: -+ size = (page_offset + len > PAGE_SIZE) ? -+ PAGE_SIZE - page_offset : len; -+ -+ if (rw == READ) -+ memcpy(bio_addr + bio_offset, -+ page_addr + page_offset, size); -+ else -+ memcpy(page_addr + page_offset, -+ bio_addr + bio_offset, size); -+ -+ page_offset += size; -+ if (page_offset == PAGE_SIZE) { -+ /* -+ * We reached the end of the chunk page -> -+ * need refer to the next one to copy more data. -+ */ -+ len -= size; -+ if (len) { -+ /* Get next page. */ -+ pl = pl->next; -+ BUG_ON(!pl); -+ page_addr = page_address(pl->page); -+ page_offset = 0; -+ bio_offset += size; -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_BIO_COPY_PL_NEXT); -+ goto redo; -+ } -+ } -+ -+ __bio_kunmap_atomic(bio_addr, KM_USER0); -+ } -+} -+ -+/* -+ * Xor optimization macros. -+ */ -+/* Xor data pointer declaration and initialization macros. */ -+#define DECLARE_2 unsigned long *d0 = data[0], *d1 = data[1] -+#define DECLARE_3 DECLARE_2, *d2 = data[2] -+#define DECLARE_4 DECLARE_3, *d3 = data[3] -+#define DECLARE_5 DECLARE_4, *d4 = data[4] -+#define DECLARE_6 DECLARE_5, *d5 = data[5] -+#define DECLARE_7 DECLARE_6, *d6 = data[6] -+#define DECLARE_8 DECLARE_7, *d7 = data[7] -+ -+/* Xor unrole macros. */ -+#define D2(n) d0[n] = d0[n] ^ d1[n] -+#define D3(n) D2(n) ^ d2[n] -+#define D4(n) D3(n) ^ d3[n] -+#define D5(n) D4(n) ^ d4[n] -+#define D6(n) D5(n) ^ d5[n] -+#define D7(n) D6(n) ^ d6[n] -+#define D8(n) D7(n) ^ d7[n] -+ -+#define X_2(macro, offset) macro(offset); macro(offset + 1); -+#define X_4(macro, offset) X_2(macro, offset); X_2(macro, offset + 2); -+#define X_8(macro, offset) X_4(macro, offset); X_4(macro, offset + 4); -+#define X_16(macro, offset) X_8(macro, offset); X_8(macro, offset + 8); -+#define X_32(macro, offset) X_16(macro, offset); X_16(macro, offset + 16); -+#define X_64(macro, offset) X_32(macro, offset); X_32(macro, offset + 32); -+ -+/* Define a _xor_#chunks_#xors_per_run() function. */ -+#define _XOR(chunks, xors_per_run) \ -+static void _xor ## chunks ## _ ## xors_per_run(unsigned long **data) \ -+{ \ -+ unsigned end = XOR_SIZE / sizeof(data[0]), i; \ -+ DECLARE_ ## chunks; \ -+\ -+ for (i = 0; i < end; i += xors_per_run) { \ -+ X_ ## xors_per_run(D ## chunks, i); \ -+ } \ -+} -+ -+/* Define xor functions for 2 - 8 chunks. */ -+#define MAKE_XOR_PER_RUN(xors_per_run) \ -+ _XOR(2, xors_per_run); _XOR(3, xors_per_run); \ -+ _XOR(4, xors_per_run); _XOR(5, xors_per_run); \ -+ _XOR(6, xors_per_run); _XOR(7, xors_per_run); \ -+ _XOR(8, xors_per_run); -+ -+MAKE_XOR_PER_RUN(8) /* Define _xor_*_8() functions. */ -+MAKE_XOR_PER_RUN(16) /* Define _xor_*_16() functions. */ -+MAKE_XOR_PER_RUN(32) /* Define _xor_*_32() functions. */ -+MAKE_XOR_PER_RUN(64) /* Define _xor_*_64() functions. */ -+ -+#define MAKE_XOR(xors_per_run) \ -+struct { \ -+ void (*f)(unsigned long **); \ -+} static xor_funcs ## xors_per_run[] = { \ -+ { NULL }, \ -+ { NULL }, \ -+ { _xor2_ ## xors_per_run }, \ -+ { _xor3_ ## xors_per_run }, \ -+ { _xor4_ ## xors_per_run }, \ -+ { _xor5_ ## xors_per_run }, \ -+ { _xor6_ ## xors_per_run }, \ -+ { _xor7_ ## xors_per_run }, \ -+ { _xor8_ ## xors_per_run }, \ -+}; \ -+\ -+static void xor_ ## xors_per_run(unsigned n, unsigned long **data) \ -+{ \ -+ /* Call respective function for amount of chunks. */ \ -+ xor_funcs ## xors_per_run[n].f(data); \ -+} -+ -+/* Define xor_8() - xor_64 functions. */ -+MAKE_XOR(8) -+MAKE_XOR(16) -+MAKE_XOR(32) -+MAKE_XOR(64) -+ -+/* Maximum number of chunks, which can be xor'ed in one go. */ -+#define XOR_CHUNKS_MAX (ARRAY_SIZE(xor_funcs8) - 1) -+ -+struct xor_func { -+ xor_function_t f; -+ const char *name; -+} static xor_funcs[] = { -+ {xor_8, "xor_8"}, -+ {xor_16, "xor_16"}, -+ {xor_32, "xor_32"}, -+ {xor_64, "xor_64"}, -+}; -+ -+/* -+ * Calculate crc. -+ * -+ * This indexes into the page list of the stripe. -+ * -+ * All chunks will be xored into the parity chunk -+ * in maximum groups of xor.chunks. -+ * -+ * FIXME: try mapping the pages on discontiguous memory. -+ */ -+static void xor(struct stripe *stripe, unsigned pi, unsigned sector) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned max_chunks = rs->xor.chunks, n, p; -+ unsigned o = sector / SECTORS_PER_PAGE; /* Offset into the page_list. */ -+ unsigned long **d = rs->data; -+ xor_function_t xor_f = rs->xor.f->f; -+ -+ /* Address of parity page to xor into. */ -+ d[0] = page_address(pl_elem(PL(stripe, pi), o)->page); -+ -+ /* Preset pointers to data pages. */ -+ for (n = 1, p = rs->set.raid_devs; p--; ) { -+ if (p != pi && PageIO(PAGE(stripe, p))) -+ d[n++] = page_address(pl_elem(PL(stripe, p), o)->page); -+ -+ /* If max chunks -> xor .*/ -+ if (n == max_chunks) { -+ xor_f(n, d); -+ n = 1; -+ } -+ } -+ -+ /* If chunks -> xor. */ -+ if (n > 1) -+ xor_f(n, d); -+ -+ /* Set parity page uptodate and clean. */ -+ page_set(PAGE(stripe, pi), CLEAN); -+} -+ -+/* Common xor loop through all stripe page lists. */ -+static void common_xor(struct stripe *stripe, sector_t count, -+ unsigned off, unsigned p) -+{ -+ unsigned sector; -+ -+ for (sector = off; sector < count; sector += SECTORS_PER_XOR) -+ xor(stripe, p, sector); -+ -+ atomic_inc(RS(stripe->sc)->stats + S_XORS); /* REMOVEME: statistics. */ -+} -+ -+/* -+ * Calculate parity sectors on intact stripes. -+ * -+ * Need to calculate raid address for recover stripe, because its -+ * chunk sizes differs and is typically larger than io chunk size. -+ */ -+static void parity_xor(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned chunk_size = rs->set.chunk_size, -+ io_size = stripe->io.size, -+ xor_size = chunk_size > io_size ? io_size : chunk_size; -+ sector_t off; -+ -+ /* This can be the recover stripe with a larger io size. */ -+ for (off = 0; off < io_size; off += xor_size) { -+ unsigned pi; -+ -+ /* -+ * Recover stripe likely is bigger than regular io -+ * ones and has no precalculated parity disk index -> -+ * need to calculate RAID address. -+ */ -+ if (unlikely(StripeRecover(stripe))) { -+ struct address addr; -+ -+ raid_address(rs, -+ (stripe->key + off) * rs->set.data_devs, -+ &addr); -+ pi = addr.pi; -+ stripe_zero_pl_part(stripe, pi, off, -+ rs->set.chunk_size); -+ } else -+ pi = stripe->idx.parity; -+ -+ common_xor(stripe, xor_size, off, pi); -+ page_set(PAGE(stripe, pi), DIRTY); -+ } -+} -+ -+/* Reconstruct missing chunk. */ -+static void reconstruct_xor(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ int p = stripe->idx.recover; -+ -+ BUG_ON(p < 0); -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (raid_set_degraded(rs) ? -+ S_RECONSTRUCT_EI : S_RECONSTRUCT_DEV)); -+ -+ /* Zero chunk to be reconstructed. */ -+ stripe_zero_chunk(stripe, p); -+ common_xor(stripe, stripe->io.size, 0, p); -+} -+ -+/* -+ * Try getting a stripe either from the hash or from the lru list -+ */ -+static inline void _stripe_get(struct stripe *stripe) -+{ -+ atomic_inc(&stripe->cnt); -+} -+ -+static struct stripe *stripe_get(struct raid_set *rs, struct address *addr) -+{ -+ struct stripe_cache *sc = &rs->sc; -+ struct stripe *stripe; -+ -+ stripe = stripe_lookup(sc, addr->key); -+ if (stripe) { -+ _stripe_get(stripe); -+ /* Remove from the lru list if on. */ -+ stripe_lru_del(stripe, LIST_LOCKED); -+ atomic_inc(rs->stats + S_HITS_1ST); /* REMOVEME: statistics. */ -+ } else { -+ /* Second try to get an LRU stripe. */ -+ stripe = stripe_lru_pop(sc); -+ if (stripe) { -+ _stripe_get(stripe); -+ /* Invalidate before reinserting with changed key. */ -+ stripe_invalidate(stripe); -+ stripe->key = addr->key; -+ stripe->region = dm_rh_sector_to_region(rs->recover.rh, -+ addr->key); -+ stripe->idx.parity = addr->pi; -+ sc_insert(sc, stripe); -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_INSCACHE); -+ } -+ } -+ -+ return stripe; -+} -+ -+/* -+ * Decrement reference count on a stripe. -+ * -+ * Move it to list of LRU stripes if zero. -+ */ -+static void stripe_put(struct stripe *stripe) -+{ -+ if (atomic_dec_and_test(&stripe->cnt)) { -+ if (TestClearStripeActive(stripe)) -+ atomic_dec(&stripe->sc->active_stripes); -+ -+ /* Put stripe onto the LRU list. */ -+ stripe_lru_add(stripe, POS_TAIL, LIST_LOCKED); -+ } -+ -+ BUG_ON(atomic_read(&stripe->cnt) < 0); -+} -+ -+/* -+ * Process end io -+ * -+ * I need to do it here because I can't in interrupt -+ * -+ * Read and write functions are split in order to avoid -+ * conditionals in the main loop for performamce reasons. -+ */ -+ -+/* Helper read bios on a page list. */ -+static void _bio_copy_page_list(struct stripe *stripe, struct page_list *pl, -+ struct bio *bio) -+{ -+ bio_copy_page_list(READ, stripe, pl, bio); -+} -+ -+/* Helper write bios on a page list. */ -+static void _rh_dec(struct stripe *stripe, struct page_list *pl, -+ struct bio *bio) -+{ -+ dm_rh_dec(RS(stripe->sc)->recover.rh, stripe->region); -+} -+ -+/* End io all bios on a page list. */ -+static inline int -+page_list_endio(int rw, struct stripe *stripe, unsigned p, unsigned *count) -+{ -+ int r = 0; -+ struct bio_list *bl = BL(stripe, p, rw); -+ -+ if (!bio_list_empty(bl)) { -+ struct page_list *pl = PL(stripe, p); -+ struct page *page = pl->page; -+ -+ if (PageLocked(page)) -+ r = -EBUSY; -+ /* -+ * FIXME: PageUptodate() not cleared -+ * properly for missing chunks ? -+ */ -+ else if (PageUptodate(page)) { -+ struct bio *bio; -+ struct raid_set *rs = RS(stripe->sc); -+ void (*h_f)(struct stripe *, struct page_list *, -+ struct bio *) = -+ (rw == READ) ? _bio_copy_page_list : _rh_dec; -+ -+ while ((bio = bio_list_pop(bl))) { -+ h_f(stripe, pl, bio); -+ _bio_endio(rs, bio, 0); -+ stripe_put(stripe); -+ if (count) -+ (*count)++; -+ } -+ } else -+ r = -EAGAIN; -+ } -+ -+ return r; -+} -+ -+/* -+ * End io all reads/writes on a stripe copying -+ * read date accross from stripe to bios. -+ */ -+static int stripe_endio(int rw, struct stripe *stripe, unsigned *count) -+{ -+ int r = 0; -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) { -+ int rr = page_list_endio(rw, stripe, p, count); -+ -+ if (rr && r != -EIO) -+ r = rr; -+ } -+ -+ return r; -+} -+ -+/* Fail all ios on a bio list and return # of bios. */ -+static unsigned -+bio_list_fail(struct raid_set *rs, struct stripe *stripe, struct bio_list *bl) -+{ -+ unsigned r; -+ struct bio *bio; -+ -+ raid_set_dead(rs); -+ -+ /* Update region counters. */ -+ if (stripe) { -+ struct dm_region_hash *rh = rs->recover.rh; -+ -+ bio_list_for_each(bio, bl) { -+ if (bio_data_dir(bio) == WRITE) -+ dm_rh_dec(rh, stripe->region); -+ } -+ } -+ -+ /* Error end io all bios. */ -+ for (r = 0; (bio = bio_list_pop(bl)); r++) -+ _bio_endio(rs, bio, -EIO); -+ -+ return r; -+} -+ -+/* Fail all ios of a bio list of a stripe and drop io pending count. */ -+static void -+stripe_bio_list_fail(struct raid_set *rs, struct stripe *stripe, -+ struct bio_list *bl) -+{ -+ unsigned put = bio_list_fail(rs, stripe, bl); -+ -+ while (put--) -+ stripe_put(stripe); -+} -+ -+/* Fail all ios hanging off all bio lists of a stripe. */ -+static void stripe_fail_io(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned p = rs->set.raid_devs; -+ -+ stripe_evict(stripe); -+ -+ while (p--) { -+ struct stripe_set *ss = stripe->ss + p; -+ int i = ARRAY_SIZE(ss->bl); -+ -+ while (i--) -+ stripe_bio_list_fail(rs, stripe, ss->bl + i); -+ } -+} -+ -+/* -+ * Handle all stripes by handing them to the daemon, because we can't -+ * map their pages to copy the data in interrupt context. -+ * -+ * We don't want to handle them here either, while interrupts are disabled. -+ */ -+ -+/* Read/write endio function for dm-io (interrupt context). */ -+static void endio(unsigned long error, void *context) -+{ -+ struct dm_mem_cache_object *obj = context; -+ struct stripe_set *ss = obj->private; -+ struct stripe *stripe = ss->stripe; -+ struct page *page = obj->pl->page; -+ -+ if (unlikely(error)) -+ stripe_error(stripe, page); -+ else -+ page_set(page, CLEAN); -+ -+ __clear_page_locked(page); -+ stripe_io_dec(stripe); -+ -+ /* Add stripe to endio list and wake daemon. */ -+ stripe_endio_push(stripe); -+} -+ -+/* -+ * Recovery io throttling -+ */ -+/* Conditionally reset io counters. */ -+enum count_type { IO_WORK = 0, IO_RECOVER }; -+static int recover_io_reset(struct raid_set *rs) -+{ -+ unsigned long j = jiffies; -+ -+ /* Pay attention to jiffies overflows. */ -+ if (j > rs->recover.last_jiffies + HZ -+ || j < rs->recover.last_jiffies) { -+ rs->recover.last_jiffies = j; -+ atomic_set(rs->recover.io_count + IO_WORK, 0); -+ atomic_set(rs->recover.io_count + IO_RECOVER, 0); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/* Count ios. */ -+static INLINE void -+recover_io_count(struct raid_set *rs, struct stripe *stripe) -+{ -+ if (RSRecover(rs)) { -+ recover_io_reset(rs); -+ atomic_inc(rs->recover.io_count + -+ (StripeRecover(stripe) ? IO_RECOVER : IO_WORK)); -+ } -+} -+ -+/* Read/Write a page_list asynchronously. */ -+static void page_list_rw(struct stripe *stripe, unsigned p) -+{ -+ struct stripe_cache *sc = stripe->sc; -+ struct raid_set *rs = RS(sc); -+ struct dm_mem_cache_object *obj = stripe->obj + p; -+ struct page_list *pl = obj->pl; -+ struct page *page = pl->page; -+ struct raid_dev *dev = rs->dev + p; -+ struct dm_io_region io = { -+ .bdev = dev->dev->bdev, -+ .sector = stripe->key, -+ .count = stripe->io.size, -+ }; -+ struct dm_io_request control = { -+ .bi_rw = PageDirty(page) ? WRITE : READ, -+ .mem.type = DM_IO_PAGE_LIST, -+ .mem.ptr.pl = pl, -+ .mem.offset = 0, -+ .notify.fn = endio, -+ .notify.context = obj, -+ .client = sc->dm_io_client, -+ }; -+ -+ BUG_ON(PageLocked(page)); -+ -+ /* -+ * Don't rw past end of device, which can happen, because -+ * typically sectors_per_dev isn't divisable by io_size. -+ */ -+ if (unlikely(io.sector + io.count > rs->set.sectors_per_dev)) -+ io.count = rs->set.sectors_per_dev - io.sector; -+ -+ io.sector += dev->start; /* Add . */ -+ recover_io_count(rs, stripe); /* Recovery io accounting. */ -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + -+ (PageDirty(page) ? S_DM_IO_WRITE : S_DM_IO_READ)); -+ -+ ClearPageError(page); -+ __set_page_locked(page); -+ io_dev_queued(dev); -+ BUG_ON(dm_io(&control, 1, &io, NULL)); -+} -+ -+/* -+ * Write dirty / read not uptodate page lists of a stripe. -+ */ -+static unsigned stripe_page_lists_rw(struct raid_set *rs, struct stripe *stripe) -+{ -+ unsigned r; -+ -+ /* -+ * Increment the pending count on the stripe -+ * first, so that we don't race in endio(). -+ * -+ * An inc (IO) is needed for any page: -+ * -+ * o not uptodate -+ * o dirtied by writes merged -+ * o dirtied by parity calculations -+ */ -+ r = for_each_io_dev(rs, stripe, _stripe_io_inc); -+ if (r) { -+ /* io needed: chunks are not uptodate/dirty. */ -+ int max; /* REMOVEME: */ -+ struct stripe_cache *sc = &rs->sc; -+ -+ if (!TestSetStripeActive(stripe)) -+ atomic_inc(&sc->active_stripes); -+ -+ /* Take off the lru list in case it got added there. */ -+ stripe_lru_del(stripe, LIST_LOCKED); -+ -+ /* Submit actual io. */ -+ for_each_io_dev(rs, stripe, page_list_rw); -+ -+ /* REMOVEME: statistics */ -+ max = sc_active(sc); -+ if (atomic_read(&sc->max_active_stripes) < max) -+ atomic_set(&sc->max_active_stripes, max); -+ -+ atomic_inc(rs->stats + S_FLUSHS); -+ /* END REMOVEME: statistics */ -+ } -+ -+ return r; -+} -+ -+/* Work in all pending writes. */ -+static INLINE void _writes_merge(struct stripe *stripe, unsigned p) -+{ -+ struct bio_list *write = BL(stripe, p, WRITE); -+ -+ if (!bio_list_empty(write)) { -+ struct page_list *pl = stripe->obj[p].pl; -+ struct bio *bio; -+ struct bio_list *write_merged = BL(stripe, p, WRITE_MERGED); -+ -+ /* -+ * We can play with the lists without holding a lock, -+ * because it is just us accessing them anyway. -+ */ -+ bio_list_for_each(bio, write) -+ bio_copy_page_list(WRITE, stripe, pl, bio); -+ -+ bio_list_merge(write_merged, write); -+ bio_list_init(write); -+ page_set(pl->page, DIRTY); -+ } -+} -+ -+/* Merge in all writes hence dirtying respective pages. */ -+static INLINE void writes_merge(struct stripe *stripe) -+{ -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) -+ _writes_merge(stripe, p); -+} -+ -+/* Check, if a chunk gets completely overwritten. */ -+static INLINE int stripe_check_overwrite(struct stripe *stripe, unsigned p) -+{ -+ unsigned sectors = 0; -+ struct bio *bio; -+ struct bio_list *bl = BL(stripe, p, WRITE); -+ -+ bio_list_for_each(bio, bl) -+ sectors += bio_sectors(bio); -+ -+ return sectors == RS(stripe->sc)->set.io_size; -+} -+ -+/* -+ * Prepare stripe to avoid io on broken/reconstructed -+ * drive in order to reconstruct date on endio. -+ */ -+enum prepare_type { IO_ALLOW, IO_PROHIBIT }; -+static void stripe_prepare(struct stripe *stripe, unsigned p, -+ enum prepare_type type) -+{ -+ struct page *page = PAGE(stripe, p); -+ -+ switch (type) { -+ case IO_PROHIBIT: -+ /* -+ * In case we prohibit, we gotta make sure, that -+ * io on all other chunks than the one which failed -+ * or is being reconstructed is allowed and that it -+ * doesn't have state uptodate. -+ */ -+ stripe_allow_io(stripe); -+ ClearPageUptodate(page); -+ ProhibitPageIO(page); -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITPAGEIO); -+ stripe->idx.recover = p; -+ SetStripeReconstruct(stripe); -+ break; -+ -+ case IO_ALLOW: -+ AllowPageIO(page); -+ stripe->idx.recover = -1; -+ ClearStripeReconstruct(stripe); -+ break; -+ -+ default: -+ BUG(); -+ } -+} -+ -+/* -+ * Degraded/reconstruction mode. -+ * -+ * Check stripe state to figure which chunks don't need IO. -+ */ -+static INLINE void stripe_check_reconstruct(struct stripe *stripe, -+ int prohibited) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ -+ /* -+ * Degraded mode (device(s) failed) -> -+ * avoid io on the failed device. -+ */ -+ if (unlikely(raid_set_degraded(rs))) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_DEGRADED); -+ stripe_prepare(stripe, rs->set.ei, IO_PROHIBIT); -+ return; -+ } else { -+ /* -+ * Reconstruction mode (ie. a particular device or -+ * some (rotating) parity chunk is being resynchronized) -> -+ * o make sure all needed pages are read in -+ * o writes are allowed to go through -+ */ -+ int r = region_state(rs, stripe->key, DM_RH_NOSYNC); -+ -+ if (r) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_NOSYNC); -+ stripe_prepare(stripe, dev_for_parity(stripe), -+ IO_PROHIBIT); -+ return; -+ } -+ } -+ -+ /* -+ * All disks good. Avoid reading parity chunk and reconstruct it -+ * unless we have prohibited io to chunk(s). -+ */ -+ if (!prohibited) { -+ if (StripeMerged(stripe)) -+ stripe_prepare(stripe, stripe->idx.parity, IO_ALLOW); -+ else { -+ stripe_prepare(stripe, stripe->idx.parity, IO_PROHIBIT); -+ -+ /* -+ * Overrule stripe_prepare to reconstruct the -+ * parity chunk, because it'll be created new anyway. -+ */ -+ ClearStripeReconstruct(stripe); -+ } -+ } -+} -+ -+/* Check, if stripe is ready to merge writes. */ -+static INLINE int stripe_check_merge(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ int prohibited = 0; -+ unsigned chunks = 0, p = rs->set.raid_devs; -+ -+ /* Walk all chunks. */ -+ while (p--) { -+ struct page *page = PAGE(stripe, p); -+ -+ /* Can't merge active chunks. */ -+ if (PageLocked(page)) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_MERGE_PAGE_LOCKED); -+ break; -+ } -+ -+ /* Can merge uptodate chunks and have to count parity chunk. */ -+ if (PageUptodate(page) || p == stripe->idx.parity) { -+ chunks++; -+ continue; -+ } -+ -+ /* Read before write ordering. */ -+ if (RSCheckOverwrite(rs) && -+ bio_list_empty(BL(stripe, p, READ))) { -+ int r = stripe_check_overwrite(stripe, p); -+ -+ if (r) { -+ chunks++; -+ /* REMOVEME: statistics. */ -+ atomic_inc(RS(stripe->sc)->stats + -+ S_PROHIBITPAGEIO); -+ ProhibitPageIO(page); -+ prohibited = 1; -+ } -+ } -+ } -+ -+ if (chunks == rs->set.raid_devs) { -+ /* All pages are uptodate or get written over or mixture. */ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_CAN_MERGE); -+ return 0; -+ } else -+ /* REMOVEME: statistics.*/ -+ atomic_inc(rs->stats + S_CANT_MERGE); -+ -+ return prohibited ? 1 : -EPERM; -+} -+ -+/* Check, if stripe is ready to merge writes. */ -+static INLINE int stripe_check_read(struct stripe *stripe) -+{ -+ int r = 0; -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ /* Walk all chunks. */ -+ while (p--) { -+ struct page *page = PAGE(stripe, p); -+ -+ if (!PageLocked(page) && -+ bio_list_empty(BL(stripe, p, READ))) { -+ ProhibitPageIO(page); -+ r = 1; -+ } -+ } -+ -+ return r; -+} -+ -+/* -+ * Read/write a stripe. -+ * -+ * All stripe read/write activity goes through this function. -+ * -+ * States to cover: -+ * o stripe to read and/or write -+ * o stripe with error to reconstruct -+ */ -+static int stripe_rw(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ int prohibited = 0, r; -+ -+ /* -+ * Check the state of the RAID set and if degraded (or -+ * resynchronizing for reads), read in all other chunks but -+ * the one on the dead/resynchronizing device in order to be -+ * able to reconstruct the missing one. -+ * -+ * Merge all writes hanging off uptodate pages of the stripe. -+ */ -+ -+ /* Initially allow io on all chunks and prohibit below, if necessary. */ -+ stripe_allow_io(stripe); -+ -+ if (StripeRBW(stripe)) { -+ r = stripe_check_merge(stripe); -+ if (!r) { -+ /* -+ * If I could rely on valid parity (which would only -+ * be sure in case of a full synchronization), -+ * I could xor a fraction of chunks out of -+ * parity and back in. -+ * -+ * For the time being, I got to redo parity... -+ */ -+ /* parity_xor(stripe); */ /* Xor chunks out. */ -+ stripe_zero_chunk(stripe, stripe->idx.parity); -+ writes_merge(stripe); /* Merge writes in. */ -+ parity_xor(stripe); /* Update parity. */ -+ ClearStripeRBW(stripe); /* Disable RBW. */ -+ SetStripeMerged(stripe); /* Writes merged. */ -+ } -+ -+ if (r > 0) -+ prohibited = 1; -+ } else if (!raid_set_degraded(rs)) -+ /* Only allow for read avoidance if not degraded. */ -+ prohibited = stripe_check_read(stripe); -+ -+ /* -+ * Check, if io needs to be allowed/prohibeted on certain chunks -+ * because of a degraded set or reconstruction on a region. -+ */ -+ stripe_check_reconstruct(stripe, prohibited); -+ -+ /* Now submit any reads/writes. */ -+ r = stripe_page_lists_rw(rs, stripe); -+ if (!r) { -+ /* -+ * No io submitted because of chunk io prohibited or -+ * locked pages -> push to end io list for processing. -+ */ -+ atomic_inc(rs->stats + S_NO_RW); /* REMOVEME: statistics. */ -+ stripe_endio_push(stripe); -+ wake_do_raid(rs); /* Wake myself. */ -+ } -+ -+ return 0; -+} -+ -+/* Flush stripe either via flush list or imeediately. */ -+enum flush_type { FLUSH_DELAY, FLUSH_NOW }; -+static int stripe_flush(struct stripe *stripe, enum flush_type type) -+{ -+ int r = 0; -+ -+ stripe_lru_del(stripe, LIST_LOCKED); -+ -+ /* Immediately flush. */ -+ if (type == FLUSH_NOW) { -+ if (likely(raid_set_operational(RS(stripe->sc)))) -+ r = stripe_rw(stripe); /* Read/write stripe. */ -+ else -+ /* Optimization: Fail early on failed sets. */ -+ stripe_fail_io(stripe); -+ /* Delay flush by putting it on io list for later processing. */ -+ } else if (type == FLUSH_DELAY) -+ stripe_io_add(stripe, POS_TAIL, LIST_UNLOCKED); -+ else -+ BUG(); -+ -+ return r; -+} -+ -+/* -+ * Queue reads and writes to a stripe by hanging -+ * their bios off the stripsets read/write lists. -+ * -+ * Endio reads on uptodate chunks. -+ */ -+static INLINE int stripe_queue_bio(struct raid_set *rs, struct bio *bio, -+ struct bio_list *reject) -+{ -+ int r = 0; -+ struct address addr; -+ struct stripe *stripe = -+ stripe_get(rs, raid_address(rs, bio->bi_sector, &addr)); -+ -+ if (stripe) { -+ int rr, rw = bio_data_dir(bio); -+ -+ rr = stripe_lock(rs, stripe, rw, addr.key); /* Lock stripe */ -+ if (rr) { -+ stripe_put(stripe); -+ goto out; -+ } -+ -+ /* Distinguish read and write cases. */ -+ bio_list_add(BL(stripe, addr.di, rw), bio); -+ -+ /* REMOVEME: statistics */ -+ atomic_inc(rs->stats + (rw == WRITE ? -+ S_BIOS_ADDED_WRITE : S_BIOS_ADDED_READ)); -+ -+ if (rw == READ) -+ SetStripeRead(stripe); -+ else { -+ SetStripeRBW(stripe); -+ -+ /* Inrement pending write count on region. */ -+ dm_rh_inc(rs->recover.rh, stripe->region); -+ r = 1; /* Region hash needs a flush. */ -+ } -+ -+ /* -+ * Optimize stripe flushing: -+ * -+ * o directly start io for read stripes. -+ * -+ * o put stripe onto stripe caches io_list for RBW, -+ * so that do_flush() can belabour it after we put -+ * more bios to the stripe for overwrite optimization. -+ */ -+ stripe_flush(stripe, -+ StripeRead(stripe) ? FLUSH_NOW : FLUSH_DELAY); -+ -+ /* Got no stripe from cache -> reject bio. */ -+ } else { -+out: -+ bio_list_add(reject, bio); -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_IOS_POST); -+ } -+ -+ return r; -+} -+ -+/* -+ * Recovery functions -+ */ -+/* Read a stripe off a raid set for recovery. */ -+static int recover_read(struct raid_set *rs, struct stripe *stripe, int idx) -+{ -+ /* Invalidate all pages so that they get read in. */ -+ stripe_pages_invalidate(stripe); -+ -+ /* Allow io on all recovery chunks. */ -+ stripe_allow_io(stripe); -+ -+ if (idx > -1) -+ ProhibitPageIO(PAGE(stripe, idx)); -+ -+ stripe->key = rs->recover.pos; -+ return stripe_page_lists_rw(rs, stripe); -+} -+ -+/* Write a stripe to a raid set for recovery. */ -+static int recover_write(struct raid_set *rs, struct stripe *stripe, int idx) -+{ -+ /* -+ * If this is a reconstruct of a particular device, then -+ * reconstruct the respective page(s), else create parity page(s). -+ */ -+ if (idx > -1) { -+ struct page *page = PAGE(stripe, idx); -+ -+ AllowPageIO(page); -+ stripe_zero_chunk(stripe, idx); -+ common_xor(stripe, stripe->io.size, 0, idx); -+ page_set(page, DIRTY); -+ } else -+ parity_xor(stripe); -+ -+ return stripe_page_lists_rw(rs, stripe); -+} -+ -+/* Recover bandwidth available ?. */ -+static int recover_bandwidth(struct raid_set *rs) -+{ -+ int r, work; -+ -+ /* On reset -> allow recovery. */ -+ r = recover_io_reset(rs); -+ if (r || RSBandwidth(rs)) -+ goto out; -+ -+ work = atomic_read(rs->recover.io_count + IO_WORK); -+ if (work) { -+ /* Pay attention to larger recover stripe size. */ -+ int recover = -+ atomic_read(rs->recover.io_count + IO_RECOVER) * -+ rs->recover.io_size / -+ rs->set.io_size; -+ -+ /* -+ * Don't use more than given bandwidth of -+ * the work io for recovery. -+ */ -+ if (recover > work / rs->recover.bandwidth_work) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_NO_BANDWIDTH); -+ return 0; -+ } -+ } -+ -+out: -+ atomic_inc(rs->stats + S_BANDWIDTH); /* REMOVEME: statistics. */ -+ return 1; -+} -+ -+/* Try to get a region to recover. */ -+static int recover_get_region(struct raid_set *rs) -+{ -+ struct recover *rec = &rs->recover; -+ struct dm_region_hash *rh = rec->rh; -+ -+ /* Start quiescing some regions. */ -+ if (!RSRegionGet(rs)) { -+ int r = recover_bandwidth(rs); /* Enough bandwidth ?. */ -+ -+ if (r) { -+ r = dm_rh_recovery_prepare(rh); -+ if (r < 0) { -+ DMINFO("No %sregions to recover", -+ rec->nr_regions_to_recover ? -+ "more " : ""); -+ return -ENOENT; -+ } -+ } else -+ return -EAGAIN; -+ -+ SetRSRegionGet(rs); -+ } -+ -+ if (!rec->reg) { -+ rec->reg = dm_rh_recovery_start(rh); -+ if (rec->reg) { -+ /* -+ * A reference for the the region I'll -+ * keep till I've completely synced it. -+ */ -+ io_get(rs); -+ rec->pos = dm_rh_region_to_sector(rh, -+ dm_rh_get_region_key(rec->reg)); -+ rec->end = rec->pos + dm_rh_get_region_size(rh); -+ return 1; -+ } else -+ return -EAGAIN; -+ } -+ -+ return 0; -+} -+ -+/* Read/write a recovery stripe. */ -+static INLINE int recover_stripe_rw(struct raid_set *rs, struct stripe *stripe) -+{ -+ /* Read/write flip-flop. */ -+ if (TestClearStripeRBW(stripe)) { -+ SetStripeRead(stripe); -+ return recover_read(rs, stripe, idx_get(rs)); -+ } else if (TestClearStripeRead(stripe)) -+ return recover_write(rs, stripe, idx_get(rs)); -+ -+ return 0; -+} -+ -+/* Reset recovery variables. */ -+static void recovery_region_reset(struct raid_set *rs) -+{ -+ rs->recover.reg = NULL; -+ ClearRSRegionGet(rs); -+} -+ -+/* Update region hash state. */ -+static void recover_rh_update(struct raid_set *rs, int error) -+{ -+ struct recover *rec = &rs->recover; -+ struct dm_region *reg = rec->reg; -+ -+ if (reg) { -+ dm_rh_recovery_end(reg, error); -+ if (!error) -+ rec->nr_regions_recovered++; -+ -+ recovery_region_reset(rs); -+ } -+ -+ dm_rh_update_states(reg->rh, 1); -+ dm_rh_flush(reg->rh); -+ io_put(rs); /* Release the io reference for the region. */ -+} -+ -+/* Called by main io daemon to recover regions. */ -+/* FIXME: cope with MAX_RECOVER > 1. */ -+static INLINE void _do_recovery(struct raid_set *rs, struct stripe *stripe) -+{ -+ int r; -+ struct recover *rec = &rs->recover; -+ -+ /* If recovery is active -> return. */ -+ if (StripeActive(stripe)) -+ return; -+ -+ /* io error is fatal for recovery -> stop it. */ -+ if (unlikely(StripeError(stripe))) -+ goto err; -+ -+ /* Get a region to recover. */ -+ r = recover_get_region(rs); -+ switch (r) { -+ case 1: /* Got a new region. */ -+ /* Flag read before write. */ -+ ClearStripeRead(stripe); -+ SetStripeRBW(stripe); -+ break; -+ -+ case 0: -+ /* Got a region in the works. */ -+ r = recover_bandwidth(rs); -+ if (r) /* Got enough bandwidth. */ -+ break; -+ -+ case -EAGAIN: -+ /* No bandwidth/quiesced region yet, try later. */ -+ wake_do_raid_delayed(rs, HZ / 10); -+ return; -+ -+ case -ENOENT: /* No more regions. */ -+ dm_table_event(rs->ti->table); -+ goto free; -+ } -+ -+ /* Read/write a recover stripe. */ -+ r = recover_stripe_rw(rs, stripe); -+ if (r) { -+ /* IO initiated, get another reference for the IO. */ -+ io_get(rs); -+ return; -+ } -+ -+ /* Update recovery position within region. */ -+ rec->pos += stripe->io.size; -+ -+ /* If we're at end of region, update region hash. */ -+ if (rec->pos >= rec->end || -+ rec->pos >= rs->set.sectors_per_dev) -+ recover_rh_update(rs, 0); -+ else -+ SetStripeRBW(stripe); -+ -+ /* Schedule myself for another round... */ -+ wake_do_raid(rs); -+ return; -+ -+err: -+ raid_set_check_degrade(rs, stripe); -+ -+ { -+ char buf[BDEVNAME_SIZE]; -+ -+ DMERR("stopping recovery due to " -+ "ERROR on /dev/%s, stripe at offset %llu", -+ bdevname(rs->dev[rs->set.ei].dev->bdev, buf), -+ (unsigned long long) stripe->key); -+ -+ } -+ -+ /* Make sure, that all quiesced regions get released. */ -+ do { -+ if (rec->reg) -+ dm_rh_recovery_end(rec->reg, -EIO); -+ -+ rec->reg = dm_rh_recovery_start(rec->rh); -+ } while (rec->reg); -+ -+ recover_rh_update(rs, -EIO); -+free: -+ rs->set.dev_to_init = -1; -+ -+ /* Check for jiffies overrun. */ -+ rs->recover.end_jiffies = jiffies; -+ if (rs->recover.end_jiffies < rs->recover.start_jiffies) -+ rs->recover.end_jiffies = ~0; -+ -+ ClearRSRecover(rs); -+} -+ -+static INLINE void do_recovery(struct raid_set *rs) -+{ -+ struct stripe *stripe; -+ -+ list_for_each_entry(stripe, &rs->recover.stripes, lists[LIST_RECOVER]) -+ _do_recovery(rs, stripe); -+ -+ if (!RSRecover(rs)) -+ stripe_recover_free(rs); -+} -+ -+/* -+ * END recovery functions -+ */ -+ -+/* End io process all stripes handed in by endio() callback. */ -+static void do_endios(struct raid_set *rs) -+{ -+ struct stripe_cache *sc = &rs->sc; -+ struct stripe *stripe; -+ -+ while ((stripe = stripe_endio_pop(sc))) { -+ unsigned count; -+ -+ /* Recovery stripe special case. */ -+ if (unlikely(StripeRecover(stripe))) { -+ if (stripe_io(stripe)) -+ continue; -+ -+ io_put(rs); /* Release region io reference. */ -+ ClearStripeActive(stripe); -+ -+ /* REMOVEME: statistics*/ -+ atomic_dec(&sc->active_stripes); -+ continue; -+ } -+ -+ /* Early end io all reads on any uptodate chunks. */ -+ stripe_endio(READ, stripe, (count = 0, &count)); -+ if (stripe_io(stripe)) { -+ if (count) /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_ACTIVE_READS); -+ -+ continue; -+ } -+ -+ /* Set stripe inactive after all io got processed. */ -+ if (TestClearStripeActive(stripe)) -+ atomic_dec(&sc->active_stripes); -+ -+ /* Unlock stripe (for clustering). */ -+ stripe_unlock(rs, stripe); -+ -+ /* -+ * If an io error on a stripe occured and the RAID set -+ * is still operational, requeue the stripe for io. -+ */ -+ if (TestClearStripeError(stripe)) { -+ raid_set_check_degrade(rs, stripe); -+ ClearStripeReconstruct(stripe); -+ -+ if (!StripeMerged(stripe) && -+ raid_set_operational(rs)) { -+ stripe_pages_invalidate(stripe); -+ stripe_flush(stripe, FLUSH_DELAY); -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_REQUEUE); -+ continue; -+ } -+ } -+ -+ /* Check if the RAID set is inoperational to error ios. */ -+ if (!raid_set_operational(rs)) { -+ ClearStripeReconstruct(stripe); -+ stripe_fail_io(stripe); -+ BUG_ON(atomic_read(&stripe->cnt)); -+ continue; -+ } -+ -+ /* Got to reconstruct a missing chunk. */ -+ if (TestClearStripeReconstruct(stripe)) -+ reconstruct_xor(stripe); -+ -+ /* -+ * Now that we've got a complete stripe, we can -+ * process the rest of the end ios on reads. -+ */ -+ BUG_ON(stripe_endio(READ, stripe, NULL)); -+ ClearStripeRead(stripe); -+ -+ /* -+ * Read-before-write stripes need to be flushed again in -+ * order to work the write data into the pages *after* -+ * they were read in. -+ */ -+ if (TestClearStripeMerged(stripe)) -+ /* End io all bios which got merged already. */ -+ BUG_ON(stripe_endio(WRITE_MERGED, stripe, NULL)); -+ -+ /* Got to put on flush list because of new writes. */ -+ if (StripeRBW(stripe)) -+ stripe_flush(stripe, FLUSH_DELAY); -+ } -+} -+ -+/* -+ * Stripe cache shrinking. -+ */ -+static INLINE void do_sc_shrink(struct raid_set *rs) -+{ -+ unsigned shrink = atomic_read(&rs->sc.stripes_to_shrink); -+ -+ if (shrink) { -+ unsigned cur = atomic_read(&rs->sc.stripes); -+ -+ sc_shrink(&rs->sc, shrink); -+ shrink -= cur - atomic_read(&rs->sc.stripes); -+ atomic_set(&rs->sc.stripes_to_shrink, shrink); -+ -+ /* -+ * Wake myself up in case we failed to shrink the -+ * requested amount in order to try again later. -+ */ -+ if (shrink) -+ wake_do_raid(rs); -+ } -+} -+ -+ -+/* -+ * Process all ios -+ * -+ * We do different things with the io depending on the -+ * state of the region that it's in: -+ * -+ * o reads: hang off stripe cache or postpone if full -+ * -+ * o writes: -+ * -+ * CLEAN/DIRTY/NOSYNC: increment pending and hang io off stripe's stripe set. -+ * In case stripe cache is full or busy, postpone the io. -+ * -+ * RECOVERING: delay the io until recovery of the region completes. -+ * -+ */ -+static INLINE void do_ios(struct raid_set *rs, struct bio_list *ios) -+{ -+ int r; -+ unsigned flush = 0; -+ struct dm_region_hash *rh = rs->recover.rh; -+ struct bio *bio; -+ struct bio_list delay, reject; -+ -+ bio_list_init(&delay); -+ bio_list_init(&reject); -+ -+ /* -+ * Classify each io: -+ * o delay to recovering regions -+ * o queue to all other regions -+ */ -+ while ((bio = bio_list_pop(ios))) { -+ /* -+ * In case we get a barrier bio, push it back onto -+ * the input queue unless all work queues are empty -+ * and the stripe cache is inactive. -+ */ -+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_BARRIER); -+ if (!list_empty(rs->sc.lists + LIST_IO) || -+ !bio_list_empty(&delay) || -+ !bio_list_empty(&reject) || -+ sc_active(&rs->sc)) { -+ bio_list_push(ios, bio); -+ break; -+ } -+ } -+ -+ r = region_state(rs, _sector(rs, bio), DM_RH_RECOVERING); -+ if (unlikely(r)) { -+ /* Got to wait for recovering regions. */ -+ bio_list_add(&delay, bio); -+ SetRSBandwidth(rs); -+ } else { -+ /* -+ * Process ios to non-recovering regions by queueing -+ * them to stripes (does rh_inc()) for writes). -+ */ -+ flush += stripe_queue_bio(rs, bio, &reject); -+ } -+ } -+ -+ if (flush) { -+ r = dm_rh_flush(rh); /* Writes got queued -> flush dirty log. */ -+ if (r) -+ DMERR("dirty log flush"); -+ } -+ -+ /* Delay ios to regions which are recovering. */ -+ while ((bio = bio_list_pop(&delay))) { -+ /* REMOVEME: statistics.*/ -+ atomic_inc(rs->stats + S_DELAYED_BIOS); -+ atomic_inc(rs->stats + S_SUM_DELAYED_BIOS); -+ dm_rh_delay(rh, bio); -+ -+ } -+ -+ /* Merge any rejected bios back to the head of the input list. */ -+ bio_list_merge_head(ios, &reject); -+} -+ -+/* Flush any stripes on the io list. */ -+static INLINE void do_flush(struct raid_set *rs) -+{ -+ struct list_head *list = rs->sc.lists + LIST_IO, *pos, *tmp; -+ -+ list_for_each_safe(pos, tmp, list) { -+ int r = stripe_flush(list_entry(pos, struct stripe, -+ lists[LIST_IO]), FLUSH_NOW); -+ -+ /* Remove from the list only if the stripe got processed. */ -+ if (!r) -+ list_del_init(pos); -+ } -+} -+ -+/* Send an event in case we're getting too busy. */ -+static INLINE void do_busy_event(struct raid_set *rs) -+{ -+ if ((sc_active(&rs->sc) > atomic_read(&rs->sc.stripes) * 4 / 5)) { -+ if (!TestSetRSScBusy(rs)) -+ dm_table_event(rs->ti->table); -+ } else -+ ClearRSScBusy(rs); -+} -+ -+/* Unplug: let the io role on the sets devices. */ -+static INLINE void do_unplug(struct raid_set *rs) -+{ -+ struct raid_dev *dev = rs->dev + rs->set.raid_devs; -+ -+ while (dev-- > rs->dev) { -+ /* Only call any device unplug function, if io got queued. */ -+ if (io_dev_clear(dev)) -+ blk_unplug(bdev_get_queue(dev->dev->bdev)); -+ } -+} -+ -+/*----------------------------------------------------------------- -+ * RAID daemon -+ *---------------------------------------------------------------*/ -+/* -+ * o belabour all end ios -+ * o optionally shrink the stripe cache -+ * o update the region hash states -+ * o optionally do recovery -+ * o grab the input queue -+ * o work an all requeued or new ios and perform stripe cache flushs -+ * unless the RAID set is inoperational (when we error ios) -+ * o check, if the stripe cache gets too busy and throw an event if so -+ * o unplug any component raid devices with queued bios -+ */ -+static void do_raid(struct work_struct *ws) -+{ -+ struct raid_set *rs = container_of(ws, struct raid_set, io.dws.work); -+ struct bio_list *ios = &rs->io.work, *ios_in = &rs->io.in; -+ spinlock_t *lock = &rs->io.in_lock; -+ -+ /* -+ * We always need to end io, so that ios -+ * can get errored in case the set failed -+ * and the region counters get decremented -+ * before we update the region hash states. -+ */ -+redo: -+ do_endios(rs); -+ -+ /* -+ * Now that we've end io'd, which may have put stripes on -+ * the LRU list, we shrink the stripe cache if requested. -+ */ -+ do_sc_shrink(rs); -+ -+ /* Update region hash states before we go any further. */ -+ dm_rh_update_states(rs->recover.rh, 1); -+ -+ /* Try to recover regions. */ -+ if (RSRecover(rs)) -+ do_recovery(rs); -+ -+ /* More endios -> process. */ -+ if (!stripe_endio_empty(&rs->sc)) { -+ atomic_inc(rs->stats + S_REDO); -+ goto redo; -+ } -+ -+ /* Quickly grab all new ios queued and add them to the work list. */ -+ spin_lock_irq(lock); -+ bio_list_merge(ios, ios_in); -+ bio_list_init(ios_in); -+ spin_unlock_irq(lock); -+ -+ /* Let's assume we're operational most of the time ;-). */ -+ if (likely(raid_set_operational(rs))) { -+ /* If we got ios, work them into the cache. */ -+ if (!bio_list_empty(ios)) { -+ do_ios(rs, ios); -+ do_unplug(rs); /* Unplug the sets device queues. */ -+ } -+ -+ do_flush(rs); /* Flush any stripes on io list. */ -+ do_unplug(rs); /* Unplug the sets device queues. */ -+ do_busy_event(rs); /* Check if we got too busy. */ -+ -+ /* More endios -> process. */ -+ if (!stripe_endio_empty(&rs->sc)) { -+ atomic_inc(rs->stats + S_REDO); -+ goto redo; -+ } -+ } else -+ /* No way to reconstruct data with too many devices failed. */ -+ bio_list_fail(rs, NULL, ios); -+} -+ -+/* -+ * Callback for region hash to dispatch -+ * delayed bios queued to recovered regions -+ * (Gets called via rh_update_states()). -+ */ -+static void dispatch_delayed_bios(void *context, struct bio_list *bl) -+{ -+ struct raid_set *rs = context; -+ struct bio *bio; -+ -+ /* REMOVEME: decrement pending delayed bios counter. */ -+ bio_list_for_each(bio, bl) -+ atomic_dec(rs->stats + S_DELAYED_BIOS); -+ -+ /* Merge region hash private list to work list. */ -+ bio_list_merge_head(&rs->io.work, bl); -+ bio_list_init(bl); -+ ClearRSBandwidth(rs); -+} -+ -+/************************************************************* -+ * Constructor helpers -+ *************************************************************/ -+/* Calculate MB/sec. */ -+static INLINE unsigned mbpers(struct raid_set *rs, unsigned speed) -+{ -+ return to_bytes(speed * rs->set.data_devs * -+ rs->recover.io_size * HZ >> 10) >> 10; -+} -+ -+/* -+ * Discover fastest xor algorithm and # of chunks combination. -+ */ -+/* Calculate speed for algorithm and # of chunks. */ -+static INLINE unsigned xor_speed(struct stripe *stripe) -+{ -+ unsigned r = 0; -+ unsigned long j; -+ -+ /* Wait for next tick. */ -+ for (j = jiffies; j == jiffies;) -+ ; -+ -+ /* Do xors for a full tick. */ -+ for (j = jiffies; j == jiffies;) { -+ mb(); -+ common_xor(stripe, stripe->io.size, 0, 0); -+ mb(); -+ r++; -+ mb(); -+ } -+ -+ return r; -+} -+ -+/* Optimize xor algorithm for this RAID set. */ -+static unsigned xor_optimize(struct raid_set *rs) -+{ -+ unsigned chunks_max = 2, speed_max = 0; -+ struct xor_func *f = ARRAY_END(xor_funcs), *f_max = NULL; -+ struct stripe *stripe; -+ -+ BUG_ON(list_empty(&rs->recover.stripes)); -+ stripe = list_first_entry(&rs->recover.stripes, struct stripe, -+ lists[LIST_RECOVER]); -+ -+ /* -+ * Got to allow io on all chunks, so that -+ * xor() will actually work on them. -+ */ -+ stripe_allow_io(stripe); -+ -+ /* Try all xor functions. */ -+ while (f-- > xor_funcs) { -+ unsigned speed; -+ -+ /* Set actual xor function for common_xor(). */ -+ rs->xor.f = f; -+ rs->xor.chunks = XOR_CHUNKS_MAX + 1; -+ -+ while (rs->xor.chunks-- > 2) { -+ speed = xor_speed(stripe); -+ if (speed > speed_max) { -+ speed_max = speed; -+ chunks_max = rs->xor.chunks; -+ f_max = f; -+ } -+ } -+ } -+ -+ /* Memorize optimum parameters. */ -+ rs->xor.f = f_max; -+ rs->xor.chunks = chunks_max; -+ return speed_max; -+} -+ -+static inline int array_too_big(unsigned long fixed, unsigned long obj, -+ unsigned long num) -+{ -+ return (num > (ULONG_MAX - fixed) / obj); -+} -+ -+static void wakeup_all_recovery_waiters(void *context) -+{ -+} -+ -+/* -+ * Allocate a RAID context (a RAID set) -+ */ -+static int -+context_alloc(struct raid_set **raid_set, struct raid_type *raid_type, -+ unsigned stripes, unsigned chunk_size, unsigned io_size, -+ unsigned recover_io_size, unsigned raid_devs, -+ sector_t sectors_per_dev, -+ struct dm_target *ti, unsigned dl_parms, char **argv) -+{ -+ int r; -+ unsigned p; -+ size_t len; -+ sector_t region_size, ti_len; -+ struct raid_set *rs = NULL; -+ struct dm_dirty_log *dl; -+ struct recover *rec; -+ -+ /* -+ * Create the dirty log -+ * -+ * We need to change length for the dirty log constructor, -+ * because we want an amount of regions for all stripes derived -+ * from the single device size, so that we can keep region -+ * size = 2^^n independant of the number of devices -+ */ -+ ti_len = ti->len; -+ ti->len = sectors_per_dev; -+ dl = dm_dirty_log_create(argv[0], ti, dl_parms, argv + 2); -+ ti->len = ti_len; -+ if (!dl) -+ goto bad_dirty_log; -+ -+ /* Chunk size *must* be smaller than region size. */ -+ region_size = dl->type->get_region_size(dl); -+ if (chunk_size > region_size) -+ goto bad_chunk_size; -+ -+ /* Recover io size *must* be smaller than region size as well. */ -+ if (recover_io_size > region_size) -+ goto bad_recover_io_size; -+ -+ /* Size and allocate the RAID set structure. */ -+ len = sizeof(*rs->data) + sizeof(*rs->dev); -+ if (array_too_big(sizeof(*rs), len, raid_devs)) -+ goto bad_array; -+ -+ len = sizeof(*rs) + raid_devs * len; -+ rs = kzalloc(len, GFP_KERNEL); -+ if (!rs) -+ goto bad_alloc; -+ -+ rec = &rs->recover; -+ atomic_set(&rs->io.in_process, 0); -+ atomic_set(&rs->io.in_process_max, 0); -+ rec->io_size = recover_io_size; -+ -+ /* Pointer to data array. */ -+ rs->data = (unsigned long **) -+ ((void *) rs->dev + raid_devs * sizeof(*rs->dev)); -+ rec->dl = dl; -+ rs->set.raid_devs = p = raid_devs; -+ rs->set.data_devs = raid_devs - raid_type->parity_devs; -+ rs->set.raid_type = raid_type; -+ -+ /* -+ * Set chunk and io size and respective shifts -+ * (used to avoid divisions) -+ */ -+ rs->set.chunk_size = chunk_size; -+ rs->set.chunk_mask = chunk_size - 1; -+ rs->set.chunk_shift = ffs(chunk_size) - 1; -+ -+ rs->set.io_size = io_size; -+ rs->set.io_mask = io_size - 1; -+ rs->set.io_shift = ffs(io_size) - 1; -+ rs->set.io_shift_mask = rs->set.chunk_mask & ~rs->set.io_mask; -+ -+ rs->set.pages_per_io = chunk_pages(io_size); -+ rs->set.sectors_per_dev = sectors_per_dev; -+ -+ rs->set.ei = -1; /* Indicate no failed device. */ -+ atomic_set(&rs->set.failed_devs, 0); -+ -+ rs->ti = ti; -+ -+ atomic_set(rec->io_count + IO_WORK, 0); -+ atomic_set(rec->io_count + IO_RECOVER, 0); -+ -+ /* Initialize io lock and queues. */ -+ spin_lock_init(&rs->io.in_lock); -+ bio_list_init(&rs->io.in); -+ bio_list_init(&rs->io.work); -+ -+ init_waitqueue_head(&rs->io.suspendq); /* Suspend waiters (dm-io). */ -+ -+ rec->nr_regions = dm_sector_div_up(sectors_per_dev, region_size); -+ -+ rec->rh = dm_region_hash_create(rs, dispatch_delayed_bios, wake_do_raid, -+ wakeup_all_recovery_waiters, -+ rs->ti->begin, MAX_RECOVER, dl, -+ region_size, rs->recover.nr_regions); -+ if (IS_ERR(rec->rh)) -+ goto bad_rh; -+ -+ /* Initialize stripe cache. */ -+ r = sc_init(rs, stripes); -+ if (r) -+ goto bad_sc; -+ -+ /* Create dm-io client context. */ -+ rs->sc.dm_io_client = dm_io_client_create(rs->set.raid_devs * -+ rs->set.pages_per_io); -+ if (IS_ERR(rs->sc.dm_io_client)) -+ goto bad_dm_io_client; -+ -+ /* REMOVEME: statistics. */ -+ stats_reset(rs); -+ ClearRSDevelStats(rs); /* Disnable development status. */ -+ -+ *raid_set = rs; -+ return 0; -+ -+bad_dirty_log: -+ TI_ERR_RET("Error creating dirty log", -ENOMEM); -+ -+ -+bad_chunk_size: -+ dm_dirty_log_destroy(dl); -+ TI_ERR("Chunk size larger than region size"); -+ -+bad_recover_io_size: -+ dm_dirty_log_destroy(dl); -+ TI_ERR("Recover stripe io size larger than region size"); -+ -+bad_array: -+ dm_dirty_log_destroy(dl); -+ TI_ERR("Arry too big"); -+ -+bad_alloc: -+ dm_dirty_log_destroy(dl); -+ TI_ERR_RET("Cannot allocate raid context", -ENOMEM); -+ -+bad_rh: -+ dm_dirty_log_destroy(dl); -+ ti->error = DM_MSG_PREFIX "Error creating dirty region hash"; -+ goto free_rs; -+ -+bad_sc: -+ ti->error = DM_MSG_PREFIX "Error creating stripe cache"; -+ goto free; -+ -+bad_dm_io_client: -+ ti->error = DM_MSG_PREFIX "Error allocating dm-io resources"; -+free: -+ dm_region_hash_destroy(rec->rh); -+ sc_exit(&rs->sc); -+ dm_region_hash_destroy(rec->rh); /* Destroys dirty log as well. */ -+free_rs: -+ kfree(rs); -+ return -ENOMEM; -+} -+ -+/* Free a RAID context (a RAID set). */ -+static void -+context_free(struct raid_set *rs, struct dm_target *ti, unsigned r) -+{ -+ while (r--) -+ dm_put_device(ti, rs->dev[r].dev); -+ -+ dm_io_client_destroy(rs->sc.dm_io_client); -+ sc_exit(&rs->sc); -+ dm_region_hash_destroy(rs->recover.rh); -+ dm_dirty_log_destroy(rs->recover.dl); -+ kfree(rs); -+} -+ -+/* Create work queue and initialize work. */ -+static int rs_workqueue_init(struct raid_set *rs) -+{ -+ struct dm_target *ti = rs->ti; -+ -+ rs->io.wq = create_singlethread_workqueue(DAEMON); -+ if (!rs->io.wq) -+ TI_ERR_RET("failed to create " DAEMON, -ENOMEM); -+ -+ INIT_DELAYED_WORK(&rs->io.dws, do_raid); -+ return 0; -+} -+ -+/* Return pointer to raid_type structure for raid name. */ -+static struct raid_type *get_raid_type(char *name) -+{ -+ struct raid_type *r = ARRAY_END(raid_types); -+ -+ while (r-- > raid_types) { -+ if (!strnicmp(STR_LEN(r->name, name))) -+ return r; -+ } -+ -+ return NULL; -+} -+ -+/* FIXME: factor out to dm core. */ -+static int multiple(sector_t a, sector_t b, sector_t *n) -+{ -+ sector_t r = a; -+ -+ sector_div(r, b); -+ *n = r; -+ return a == r * b; -+} -+ -+/* Log RAID set information to kernel log. */ -+static void raid_set_log(struct raid_set *rs, unsigned speed) -+{ -+ unsigned p; -+ char buf[BDEVNAME_SIZE]; -+ -+ for (p = 0; p < rs->set.raid_devs; p++) -+ DMINFO("/dev/%s is raid disk %u", -+ bdevname(rs->dev[p].dev->bdev, buf), p); -+ -+ DMINFO("%d/%d/%d sectors chunk/io/recovery size, %u stripes", -+ rs->set.chunk_size, rs->set.io_size, rs->recover.io_size, -+ atomic_read(&rs->sc.stripes)); -+ DMINFO("algorithm \"%s\", %u chunks with %uMB/s", rs->xor.f->name, -+ rs->xor.chunks, mbpers(rs, speed)); -+ DMINFO("%s set with net %u/%u devices", rs->set.raid_type->descr, -+ rs->set.data_devs, rs->set.raid_devs); -+} -+ -+/* Get all devices and offsets. */ -+static int -+dev_parms(struct dm_target *ti, struct raid_set *rs, -+ char **argv, int *p) -+{ -+ for (*p = 0; *p < rs->set.raid_devs; (*p)++, argv += 2) { -+ int r; -+ unsigned long long tmp; -+ struct raid_dev *dev = rs->dev + *p; -+ union dev_lookup dl = {.dev = dev }; -+ -+ /* Get offset and device. */ -+ r = sscanf(argv[1], "%llu", &tmp); -+ if (r != 1) -+ TI_ERR("Invalid RAID device offset parameter"); -+ -+ dev->start = tmp; -+ r = dm_get_device(ti, argv[0], dev->start, -+ rs->set.sectors_per_dev, -+ dm_table_get_mode(ti->table), &dev->dev); -+ if (r) -+ TI_ERR_RET("RAID device lookup failure", r); -+ -+ r = raid_dev_lookup(rs, bynumber, &dl); -+ if (r != -ENODEV && r < *p) { -+ (*p)++; /* Ensure dm_put_device() on actual device. */ -+ TI_ERR_RET("Duplicate RAID device", -ENXIO); -+ } -+ } -+ -+ return 0; -+} -+ -+/* Set recovery bandwidth. */ -+static INLINE void -+recover_set_bandwidth(struct raid_set *rs, unsigned bandwidth) -+{ -+ rs->recover.bandwidth = bandwidth; -+ rs->recover.bandwidth_work = 100 / bandwidth; -+} -+ -+/* Handle variable number of RAID parameters. */ -+static int -+raid_variable_parms(struct dm_target *ti, char **argv, -+ unsigned i, int *raid_parms, -+ int *chunk_size, int *chunk_size_parm, -+ int *stripes, int *stripes_parm, -+ int *io_size, int *io_size_parm, -+ int *recover_io_size, int *recover_io_size_parm, -+ int *bandwidth, int *bandwidth_parm) -+{ -+ /* Fetch # of variable raid parameters. */ -+ if (sscanf(argv[i++], "%d", raid_parms) != 1 || -+ !range_ok(*raid_parms, 0, 5)) -+ TI_ERR("Bad variable raid parameters number"); -+ -+ if (*raid_parms) { -+ /* -+ * If we've got variable RAID parameters, -+ * chunk size is the first one -+ */ -+ if (sscanf(argv[i++], "%d", chunk_size) != 1 || -+ (*chunk_size != -1 && -+ (!POWER_OF_2(*chunk_size) || -+ !range_ok(*chunk_size, IO_SIZE_MIN, CHUNK_SIZE_MAX)))) -+ TI_ERR("Invalid chunk size; must be 2^^n and <= 16384"); -+ -+ *chunk_size_parm = *chunk_size; -+ if (*chunk_size == -1) -+ *chunk_size = CHUNK_SIZE; -+ -+ /* -+ * In case we've got 2 or more variable raid -+ * parameters, the number of stripes is the second one -+ */ -+ if (*raid_parms > 1) { -+ if (sscanf(argv[i++], "%d", stripes) != 1 || -+ (*stripes != -1 && -+ !range_ok(*stripes, STRIPES_MIN, -+ STRIPES_MAX))) -+ TI_ERR("Invalid number of stripes: must " -+ "be >= 8 and <= 8192"); -+ } -+ -+ *stripes_parm = *stripes; -+ if (*stripes == -1) -+ *stripes = STRIPES; -+ -+ /* -+ * In case we've got 3 or more variable raid -+ * parameters, the io size is the third one. -+ */ -+ if (*raid_parms > 2) { -+ if (sscanf(argv[i++], "%d", io_size) != 1 || -+ (*io_size != -1 && -+ (!POWER_OF_2(*io_size) || -+ !range_ok(*io_size, IO_SIZE_MIN, -+ min(BIO_MAX_SECTORS / 2, -+ *chunk_size))))) -+ TI_ERR("Invalid io size; must " -+ "be 2^^n and less equal " -+ "min(BIO_MAX_SECTORS/2, chunk size)"); -+ } else -+ *io_size = *chunk_size; -+ -+ *io_size_parm = *io_size; -+ if (*io_size == -1) -+ *io_size = *chunk_size; -+ -+ /* -+ * In case we've got 4 variable raid parameters, -+ * the recovery stripe io_size is the fourth one -+ */ -+ if (*raid_parms > 3) { -+ if (sscanf(argv[i++], "%d", recover_io_size) != 1 || -+ (*recover_io_size != -1 && -+ (!POWER_OF_2(*recover_io_size) || -+ !range_ok(*recover_io_size, RECOVER_IO_SIZE_MIN, -+ BIO_MAX_SECTORS / 2)))) -+ TI_ERR("Invalid recovery io size; must be " -+ "2^^n and less equal BIO_MAX_SECTORS/2"); -+ } -+ -+ *recover_io_size_parm = *recover_io_size; -+ if (*recover_io_size == -1) -+ *recover_io_size = RECOVER_IO_SIZE; -+ -+ /* -+ * In case we've got 5 variable raid parameters, -+ * the recovery io bandwidth is the fifth one -+ */ -+ if (*raid_parms > 4) { -+ if (sscanf(argv[i++], "%d", bandwidth) != 1 || -+ (*bandwidth != -1 && -+ !range_ok(*bandwidth, BANDWIDTH_MIN, -+ BANDWIDTH_MAX))) -+ TI_ERR("Invalid recovery bandwidth " -+ "percentage; must be > 0 and <= 100"); -+ } -+ -+ *bandwidth_parm = *bandwidth; -+ if (*bandwidth == -1) -+ *bandwidth = BANDWIDTH; -+ } -+ -+ return 0; -+} -+ -+/* Parse optional locking parameters. */ -+static int -+raid_locking_parms(struct dm_target *ti, char **argv, -+ unsigned i, int *locking_parms, -+ struct dm_raid45_locking_type **locking_type) -+{ -+ *locking_parms = 0; -+ *locking_type = &locking_none; -+ -+ if (!strnicmp(argv[i], "none", strlen(argv[i]))) -+ *locking_parms = 1; -+ else if (!strnicmp(argv[i + 1], "locking", strlen(argv[i + 1]))) { -+ *locking_type = &locking_none; -+ *locking_parms = 2; -+ } else if (!strnicmp(argv[i + 1], "cluster", strlen(argv[i + 1]))) { -+ *locking_type = &locking_cluster; -+ /* FIXME: namespace. */ -+ *locking_parms = 3; -+ } -+ -+ return *locking_parms == 1 ? -EINVAL : 0; -+} -+ -+/* Set backing device information properties of RAID set. */ -+static void rs_set_bdi(struct raid_set *rs, unsigned stripes, unsigned chunks) -+{ -+ unsigned p, ra_pages; -+ struct mapped_device *md = dm_table_get_md(rs->ti->table); -+ struct backing_dev_info *bdi = &dm_disk(md)->queue->backing_dev_info; -+ -+ /* Set read-ahead for the RAID set and the component devices. */ -+ bdi->ra_pages = stripes * stripe_pages(rs, rs->set.io_size); -+ ra_pages = chunks * chunk_pages(rs->set.io_size); -+ for (p = rs->set.raid_devs; p--; ) { -+ struct request_queue *q = bdev_get_queue(rs->dev[p].dev->bdev); -+ -+ q->backing_dev_info.ra_pages = ra_pages; -+ } -+ -+ /* Set congested function and data. */ -+ bdi->congested_fn = raid_set_congested; -+ bdi->congested_data = rs; -+ -+ dm_put(md); -+} -+ -+/* Get backing device information properties of RAID set. */ -+static void rs_get_ra(struct raid_set *rs, unsigned *stripes, unsigned *chunks) -+{ -+ struct mapped_device *md = dm_table_get_md(rs->ti->table); -+ -+ *stripes = dm_disk(md)->queue->backing_dev_info.ra_pages -+ / stripe_pages(rs, rs->set.io_size); -+ *chunks = bdev_get_queue(rs->dev->dev->bdev)->backing_dev_info.ra_pages -+ / chunk_pages(rs->set.io_size); -+ -+ dm_put(md); -+} -+ -+/* -+ * Construct a RAID4/5 mapping: -+ * -+ * log_type #log_params \ -+ * raid_type [#parity_dev] #raid_variable_params \ -+ * [locking "none"/"cluster"] -+ * #raid_devs #dev_to_initialize [ ]{3,} -+ * -+ * log_type = "core"/"disk", -+ * #log_params = 1-3 (1-2 for core dirty log type, 3 for disk dirty log only) -+ * log_params = [dirty_log_path] region_size [[no]sync]) -+ * -+ * raid_type = "raid4", "raid5_la", "raid5_ra", "raid5_ls", "raid5_rs" -+ * -+ * #parity_dev = N if raid_type = "raid4" -+ * o N = -1: pick default = last device -+ * o N >= 0 and < #raid_devs: parity device index -+ * -+ * #raid_variable_params = 0-5; raid_params (-1 = default): -+ * [chunk_size [#stripes [io_size [recover_io_size [%recovery_bandwidth]]]]] -+ * o chunk_size (unit to calculate drive addresses; must be 2^^n, > 8 -+ * and <= CHUNK_SIZE_MAX) -+ * o #stripes is number of stripes allocated to stripe cache -+ * (must be > 1 and < STRIPES_MAX) -+ * o io_size (io unit size per device in sectors; must be 2^^n and > 8) -+ * o recover_io_size (io unit size per device for recovery in sectors; -+ must be 2^^n, > SECTORS_PER_PAGE and <= region_size) -+ * o %recovery_bandwith is the maximum amount spend for recovery during -+ * application io (1-100%) -+ * If raid_variable_params = 0, defaults will be used. -+ * Any raid_variable_param can be set to -1 to apply a default -+ * -+ * #raid_devs = N (N >= 3) -+ * -+ * #dev_to_initialize = N -+ * -1: initialize parity on all devices -+ * >= 0 and < #raid_devs: initialize raid_path; used to force reconstruction -+ * of a failed devices content after replacement -+ * -+ * = device_path (eg, /dev/sdd1) -+ * = begin at offset on -+ * -+ */ -+#define MIN_PARMS 13 -+static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) -+{ -+ int bandwidth = BANDWIDTH, bandwidth_parm = -1, -+ chunk_size = CHUNK_SIZE, chunk_size_parm = -1, -+ dev_to_init, dl_parms, locking_parms, parity_parm, pi = -1, -+ i, io_size = IO_SIZE, io_size_parm = -1, -+ r, raid_devs, raid_parms, -+ recover_io_size = RECOVER_IO_SIZE, recover_io_size_parm = -1, -+ stripes = STRIPES, stripes_parm = -1; -+ unsigned speed; -+ sector_t tmp, sectors_per_dev; -+ struct dm_raid45_locking_type *locking; -+ struct raid_set *rs; -+ struct raid_type *raid_type; -+ -+ /* Ensure minimum number of parameters. */ -+ if (argc < MIN_PARMS) -+ TI_ERR("Not enough parameters"); -+ -+ /* Fetch # of dirty log parameters. */ -+ if (sscanf(argv[1], "%d", &dl_parms) != 1 -+ || !range_ok(dl_parms, 1, 4711)) -+ TI_ERR("Bad dirty log parameters number"); -+ -+ /* Check raid_type. */ -+ raid_type = get_raid_type(argv[dl_parms + 2]); -+ if (!raid_type) -+ TI_ERR("Bad raid type"); -+ -+ /* In case of RAID4, parity drive is selectable. */ -+ parity_parm = !!(raid_type->level == raid4); -+ -+ /* Handle variable number of RAID parameters. */ -+ r = raid_variable_parms(ti, argv, dl_parms + parity_parm + 3, -+ &raid_parms, -+ &chunk_size, &chunk_size_parm, -+ &stripes, &stripes_parm, -+ &io_size, &io_size_parm, -+ &recover_io_size, &recover_io_size_parm, -+ &bandwidth, &bandwidth_parm); -+ if (r) -+ return r; -+ -+ r = raid_locking_parms(ti, argv, -+ dl_parms + parity_parm + raid_parms + 4, -+ &locking_parms, &locking); -+ if (r) -+ return r; -+ -+ /* # of raid devices. */ -+ i = dl_parms + parity_parm + raid_parms + locking_parms + 4; -+ if (sscanf(argv[i], "%d", &raid_devs) != 1 || -+ raid_devs < raid_type->minimal_devs) -+ TI_ERR("Invalid number of raid devices"); -+ -+ /* In case of RAID4, check parity drive index is in limits. */ -+ if (raid_type->level == raid4) { -+ /* Fetch index of parity device. */ -+ if (sscanf(argv[dl_parms + 3], "%d", &pi) != 1 || -+ !range_ok(pi, 0, raid_devs - 1)) -+ TI_ERR("Invalid RAID4 parity device index"); -+ } -+ -+ /* -+ * Index of device to initialize starts at 0 -+ * -+ * o -1 -> don't initialize a particular device, -+ * o 0..raid_devs-1 -> initialize respective device -+ * (used for reconstruction of a replaced device) -+ */ -+ if (sscanf -+ (argv[dl_parms + parity_parm + raid_parms + locking_parms + 5], -+ "%d", &dev_to_init) != 1 -+ || !range_ok(dev_to_init, -1, raid_devs - 1)) -+ TI_ERR("Invalid number for raid device to initialize"); -+ -+ /* Check # of raid device arguments. */ -+ if (argc - dl_parms - parity_parm - raid_parms - 6 != -+ 2 * raid_devs) -+ TI_ERR("Wrong number of raid device/offset arguments"); -+ -+ /* -+ * Check that the table length is devisable -+ * w/o rest by (raid_devs - parity_devs) -+ */ -+ if (!multiple(ti->len, raid_devs - raid_type->parity_devs, -+ §ors_per_dev)) -+ TI_ERR -+ ("Target length not divisable by number of data devices"); -+ -+ /* -+ * Check that the device size is -+ * devisable w/o rest by chunk size -+ */ -+ if (!multiple(sectors_per_dev, chunk_size, &tmp)) -+ TI_ERR("Device length not divisable by chunk_size"); -+ -+ /**************************************************************** -+ * Now that we checked the constructor arguments -> -+ * let's allocate the RAID set -+ ****************************************************************/ -+ r = context_alloc(&rs, raid_type, stripes, chunk_size, io_size, -+ recover_io_size, raid_devs, sectors_per_dev, -+ ti, dl_parms, argv); -+ if (r) -+ return r; -+ -+ /* -+ * Set these here in order to avoid passing -+ * too many arguments to context_alloc() -+ */ -+ rs->set.dev_to_init_parm = dev_to_init; -+ rs->set.dev_to_init = dev_to_init; -+ rs->set.pi_parm = pi; -+ rs->set.pi = (pi == -1) ? rs->set.data_devs : pi; -+ rs->set.raid_parms = raid_parms; -+ rs->set.chunk_size_parm = chunk_size_parm; -+ rs->set.io_size_parm = io_size_parm; -+ rs->sc.stripes_parm = stripes_parm; -+ rs->recover.io_size_parm = recover_io_size_parm; -+ rs->recover.bandwidth_parm = bandwidth_parm; -+ recover_set_bandwidth(rs, bandwidth); -+ -+ /* Use locking type to lock stripe access. */ -+ rs->locking = locking; -+ -+ /* Get the device/offset tupels. */ -+ argv += dl_parms + 6 + parity_parm + raid_parms; -+ r = dev_parms(ti, rs, argv, &i); -+ if (r) -+ goto err; -+ -+ /* Initialize recovery. */ -+ rs->recover.start_jiffies = jiffies; -+ rs->recover.end_jiffies = 0; -+ recovery_region_reset(rs); -+ -+ /* Allow for recovery of any nosync regions. */ -+ SetRSRecover(rs); -+ -+ /* Set backing device information (eg. read ahead). */ -+ rs_set_bdi(rs, chunk_size * 2, io_size * 4); -+ SetRSCheckOverwrite(rs); /* Allow chunk overwrite checks. */ -+ -+ speed = xor_optimize(rs); /* Select best xor algorithm. */ -+ -+ /* Initialize work queue to handle this RAID set's io. */ -+ r = rs_workqueue_init(rs); -+ if (r) -+ goto err; -+ -+ raid_set_log(rs, speed); /* Log information about RAID set. */ -+ -+ /* -+ * Make sure that dm core only hands maximum io size -+ * length down and pays attention to io boundaries. -+ */ -+ ti->split_io = rs->set.io_size; -+ ti->private = rs; -+ return 0; -+ -+err: -+ context_free(rs, ti, i); -+ return r; -+} -+ -+/* -+ * Destruct a raid mapping -+ */ -+static void raid_dtr(struct dm_target *ti) -+{ -+ struct raid_set *rs = ti->private; -+ -+ /* Indicate recovery end so that ios in flight drain. */ -+ ClearRSRecover(rs); -+ -+ wake_do_raid(rs); /* Wake daemon. */ -+ wait_ios(rs); /* Wait for any io still being processed. */ -+ destroy_workqueue(rs->io.wq); -+ context_free(rs, ti, rs->set.raid_devs); -+} -+ -+/* Queues ios to RAID sets. */ -+static inline void queue_bio(struct raid_set *rs, struct bio *bio) -+{ -+ int wake; -+ struct bio_list *in = &rs->io.in; -+ spinlock_t *in_lock = &rs->io.in_lock; -+ -+ spin_lock_irq(in_lock); -+ wake = bio_list_empty(in); -+ bio_list_add(in, bio); -+ spin_unlock_irq(in_lock); -+ -+ /* Wake daemon if input list was empty. */ -+ if (wake) -+ wake_do_raid(rs); -+} -+ -+/* Raid mapping function. */ -+static int raid_map(struct dm_target *ti, struct bio *bio, -+ union map_info *map_context) -+{ -+ /* I don't want to waste stripe cache capacity. */ -+ if (bio_rw(bio) == READA) -+ return -EIO; -+ else { -+ struct raid_set *rs = ti->private; -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + -+ (bio_data_dir(bio) == WRITE ? -+ S_BIOS_WRITE : S_BIOS_READ)); -+ -+ /* -+ * Get io reference to be waiting for to drop -+ * to zero on device suspension/destruction. -+ */ -+ io_get(rs); -+ bio->bi_sector -= ti->begin; /* Remap sector. */ -+ queue_bio(rs, bio); /* Queue to the daemon. */ -+ return DM_MAPIO_SUBMITTED; /* Handle later. */ -+ } -+} -+ -+/* Device suspend. */ -+static void raid_postsuspend(struct dm_target *ti) -+{ -+ struct raid_set *rs = ti->private; -+ struct dm_dirty_log *dl = rs->recover.dl; -+ -+ SetRSSuspended(rs); -+ -+ if (RSRecover(rs)) -+ dm_rh_stop_recovery(rs->recover.rh); /* Wakes do_raid(). */ -+ else -+ wake_do_raid(rs); -+ -+ wait_ios(rs); /* Wait for completion of all ios being processed. */ -+ if (dl->type->postsuspend && dl->type->postsuspend(dl)) -+ /* Suspend dirty log. */ -+ /* FIXME: need better error handling. */ -+ DMWARN("log suspend failed"); -+} -+ -+/* Device resume. */ -+static void raid_resume(struct dm_target *ti) -+{ -+ struct raid_set *rs = ti->private; -+ struct recover *rec = &rs->recover; -+ struct dm_dirty_log *dl = rec->dl; -+ -+ if (dl->type->resume && dl->type->resume(dl)) -+ /* Resume dirty log. */ -+ /* FIXME: need better error handling. */ -+ DMWARN("log resume failed"); -+ -+ rec->nr_regions_to_recover = -+ rec->nr_regions - dl->type->get_sync_count(dl); -+ -+ ClearRSSuspended(rs); -+ -+ /* Reset any unfinished recovery. */ -+ if (RSRecover(rs)) { -+ recovery_region_reset(rs); -+ dm_rh_start_recovery(rec->rh);/* Calls wake_do_raid(). */ -+ } else -+ wake_do_raid(rs); -+} -+ -+static INLINE unsigned sc_size(struct raid_set *rs) -+{ -+ return to_sector(atomic_read(&rs->sc.stripes) * -+ (sizeof(struct stripe) + -+ (sizeof(struct stripe_set) + -+ (sizeof(struct page_list) + -+ to_bytes(rs->set.io_size) * -+ rs->set.raid_devs)) + -+ (rs->recover. -+ end_jiffies ? 0 : to_bytes(rs->set.raid_devs * -+ rs->recover. -+ io_size)))); -+} -+ -+/* REMOVEME: status output for development. */ -+static void -+raid_devel_stats(struct dm_target *ti, char *result, -+ unsigned *size, unsigned maxlen) -+{ -+ unsigned chunks, stripes, sz = *size; -+ unsigned long j; -+ char buf[BDEVNAME_SIZE], *p; -+ struct stats_map *sm, *sm_end = ARRAY_END(stats_map); -+ struct raid_set *rs = ti->private; -+ struct recover *rec = &rs->recover; -+ struct timespec ts; -+ -+ DMEMIT("%s ", version); -+ DMEMIT("io_inprocess=%d ", atomic_read(&rs->io.in_process)); -+ DMEMIT("io_inprocess_max=%d ", atomic_read(&rs->io.in_process_max)); -+ -+ for (sm = stats_map; sm < sm_end; sm++) -+ DMEMIT("%s%d", sm->str, atomic_read(rs->stats + sm->type)); -+ -+ DMEMIT(" overwrite=%s ", RSCheckOverwrite(rs) ? "on" : "off"); -+ DMEMIT("sc=%u/%u/%u/%u/%u ", rs->set.chunk_size, rs->set.io_size, -+ atomic_read(&rs->sc.stripes), rs->sc.hash.buckets, -+ sc_size(rs)); -+ -+ j = (rec->end_jiffies ? rec->end_jiffies : jiffies) - -+ rec->start_jiffies; -+ jiffies_to_timespec(j, &ts); -+ sprintf(buf, "%ld.%ld", ts.tv_sec, ts.tv_nsec); -+ p = strchr(buf, '.'); -+ p[3] = 0; -+ -+ DMEMIT("rg=%llu%s/%llu/%llu/%u %s ", -+ (unsigned long long) rec->nr_regions_recovered, -+ RSRegionGet(rs) ? "+" : "", -+ (unsigned long long) rec->nr_regions_to_recover, -+ (unsigned long long) rec->nr_regions, rec->bandwidth, buf); -+ -+ rs_get_ra(rs, &stripes, &chunks); -+ DMEMIT("ra=%u/%u ", stripes, chunks); -+ -+ *size = sz; -+} -+ -+static int -+raid_status(struct dm_target *ti, status_type_t type, -+ char *result, unsigned maxlen) -+{ -+ unsigned i, sz = 0; -+ char buf[BDEVNAME_SIZE]; -+ struct raid_set *rs = ti->private; -+ -+ switch (type) { -+ case STATUSTYPE_INFO: -+ /* REMOVEME: statistics. */ -+ if (RSDevelStats(rs)) -+ raid_devel_stats(ti, result, &sz, maxlen); -+ -+ DMEMIT("%u ", rs->set.raid_devs); -+ -+ for (i = 0; i < rs->set.raid_devs; i++) -+ DMEMIT("%s ", -+ format_dev_t(buf, rs->dev[i].dev->bdev->bd_dev)); -+ -+ DMEMIT("1 "); -+ for (i = 0; i < rs->set.raid_devs; i++) { -+ DMEMIT("%c", dev_operational(rs, i) ? 'A' : 'D'); -+ -+ if (rs->set.raid_type->level == raid4 && -+ i == rs->set.pi) -+ DMEMIT("p"); -+ -+ if (rs->set.dev_to_init == i) -+ DMEMIT("i"); -+ } -+ -+ break; -+ -+ case STATUSTYPE_TABLE: -+ sz = rs->recover.dl->type->status(rs->recover.dl, type, -+ result, maxlen); -+ DMEMIT("%s %u ", rs->set.raid_type->name, -+ rs->set.raid_parms); -+ -+ if (rs->set.raid_type->level == raid4) -+ DMEMIT("%d ", rs->set.pi_parm); -+ -+ if (rs->set.raid_parms) -+ DMEMIT("%d ", rs->set.chunk_size_parm); -+ -+ if (rs->set.raid_parms > 1) -+ DMEMIT("%d ", rs->sc.stripes_parm); -+ -+ if (rs->set.raid_parms > 2) -+ DMEMIT("%d ", rs->set.io_size_parm); -+ -+ if (rs->set.raid_parms > 3) -+ DMEMIT("%d ", rs->recover.io_size_parm); -+ -+ if (rs->set.raid_parms > 4) -+ DMEMIT("%d ", rs->recover.bandwidth_parm); -+ -+ DMEMIT("%u %d ", rs->set.raid_devs, rs->set.dev_to_init); -+ -+ for (i = 0; i < rs->set.raid_devs; i++) -+ DMEMIT("%s %llu ", -+ format_dev_t(buf, -+ rs->dev[i].dev->bdev->bd_dev), -+ (unsigned long long) rs->dev[i].start); -+ } -+ -+ return 0; -+} -+ -+/* -+ * Message interface -+ */ -+enum raid_msg_actions { -+ act_bw, /* Recovery bandwidth switch. */ -+ act_dev, /* Device failure switch. */ -+ act_overwrite, /* Stripe overwrite check. */ -+ act_read_ahead, /* Set read ahead. */ -+ act_stats, /* Development statistics switch. */ -+ act_sc, /* Stripe cache switch. */ -+ -+ act_on, /* Set entity on. */ -+ act_off, /* Set entity off. */ -+ act_reset, /* Reset entity. */ -+ -+ act_set = act_on, /* Set # absolute. */ -+ act_grow = act_off, /* Grow # by an amount. */ -+ act_shrink = act_reset, /* Shrink # by an amount. */ -+}; -+ -+/* Turn a delta to absolute. */ -+static int _absolute(unsigned long action, int act, int r) -+{ -+ /* Make delta absolute. */ -+ if (test_bit(act_set, &action)) -+ ; -+ else if (test_bit(act_grow, &action)) -+ r += act; -+ else if (test_bit(act_shrink, &action)) -+ r = act - r; -+ else -+ r = -EINVAL; -+ -+ return r; -+} -+ -+ /* Change recovery io bandwidth. */ -+static int bandwidth_change(struct dm_msg *msg, void *context) -+{ -+ struct raid_set *rs = context; -+ int act = rs->recover.bandwidth; -+ int bandwidth = DM_MSG_INT_ARG(msg); -+ -+ if (range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) { -+ /* Make delta bandwidth absolute. */ -+ bandwidth = _absolute(msg->action, act, bandwidth); -+ -+ /* Check range. */ -+ if (range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) { -+ recover_set_bandwidth(rs, bandwidth); -+ return 0; -+ } -+ } -+ -+ set_bit(dm_msg_ret_arg, &msg->ret); -+ set_bit(dm_msg_ret_inval, &msg->ret); -+ return -EINVAL; -+} -+ -+/* Change state of a device (running/offline). */ -+/* FIXME: this only works while recovering!. */ -+static int device_state(struct dm_msg *msg, void *context) -+{ -+ int r; -+ const char *str = "is already "; -+ union dev_lookup dl = { .dev_name = DM_MSG_STR_ARG(msg) }; -+ struct raid_set *rs = context; -+ -+ r = raid_dev_lookup(rs, strchr(dl.dev_name, ':') ? -+ bymajmin : byname, &dl); -+ if (r == -ENODEV) { -+ DMERR("device %s is no member of this set", dl.dev_name); -+ return r; -+ } -+ -+ if (test_bit(act_off, &msg->action)) { -+ if (dev_operational(rs, r)) -+ str = ""; -+ } else if (!dev_operational(rs, r)) -+ str = ""; -+ -+ DMINFO("/dev/%s %s%s", dl.dev_name, str, -+ test_bit(act_off, &msg->action) ? "offline" : "running"); -+ -+ return test_bit(act_off, &msg->action) ? -+ raid_set_check_and_degrade(rs, NULL, r) : -+ raid_set_check_and_upgrade(rs, r); -+} -+ -+/* Set/reset development feature flags. */ -+static int devel_flags(struct dm_msg *msg, void *context) -+{ -+ struct raid_set *rs = context; -+ -+ if (test_bit(act_on, &msg->action)) -+ return test_and_set_bit(msg->spec->parm, -+ &rs->io.flags) ? -EPERM : 0; -+ else if (test_bit(act_off, &msg->action)) -+ return test_and_clear_bit(msg->spec->parm, -+ &rs->io.flags) ? 0 : -EPERM; -+ else if (test_bit(act_reset, &msg->action)) { -+ if (test_bit(act_stats, &msg->action)) { -+ stats_reset(rs); -+ goto on; -+ } else if (test_bit(act_overwrite, &msg->action)) { -+on: -+ set_bit(msg->spec->parm, &rs->io.flags); -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+ /* Set stripe and chunk read ahead pages. */ -+static int read_ahead_set(struct dm_msg *msg, void *context) -+{ -+ int stripes = DM_MSG_INT_ARGS(msg, 0); -+ int chunks = DM_MSG_INT_ARGS(msg, 1); -+ -+ if (range_ok(stripes, 1, 512) && -+ range_ok(chunks, 1, 512)) { -+ rs_set_bdi(context, stripes, chunks); -+ return 0; -+ } -+ -+ set_bit(dm_msg_ret_arg, &msg->ret); -+ set_bit(dm_msg_ret_inval, &msg->ret); -+ return -EINVAL; -+} -+ -+/* Resize the stripe cache. */ -+static int stripecache_resize(struct dm_msg *msg, void *context) -+{ -+ int act, stripes; -+ struct raid_set *rs = context; -+ -+ /* Deny permission in case the daemon is still shrinking!. */ -+ if (atomic_read(&rs->sc.stripes_to_shrink)) -+ return -EPERM; -+ -+ stripes = DM_MSG_INT_ARG(msg); -+ if (stripes > 0) { -+ act = atomic_read(&rs->sc.stripes); -+ -+ /* Make delta stripes absolute. */ -+ stripes = _absolute(msg->action, act, stripes); -+ -+ /* -+ * Check range and that the # of stripes changes. -+ * We can grow from gere but need to leave any -+ * shrinking to the worker for synchronization. -+ */ -+ if (range_ok(stripes, STRIPES_MIN, STRIPES_MAX)) { -+ if (stripes > act) -+ return sc_grow(&rs->sc, stripes - act, SC_GROW); -+ else if (stripes < act) { -+ atomic_set(&rs->sc.stripes_to_shrink, -+ act - stripes); -+ wake_do_raid(rs); -+ } -+ -+ return 0; -+ } -+ } -+ -+ set_bit(dm_msg_ret_arg, &msg->ret); -+ set_bit(dm_msg_ret_inval, &msg->ret); -+ return -EINVAL; -+} -+ -+/* Parse the RAID message action. */ -+/* -+ * 'ba[ndwidth] {se[t],g[row],sh[rink]} #' # e.g 'ba se 50' -+ * 'de{vice] o[ffline]/r[unning] DevName/maj:min' # e.g 'device o /dev/sda' -+ * "o[verwrite] {on,of[f],r[eset]}' # e.g. 'o of' -+ * "r[ead_ahead] set #stripes #chunks # e.g. 'r se 3 2' -+ * 'sta[tistics] {on,of[f],r[eset]}' # e.g. 'stat of' -+ * 'str[ipecache] {se[t],g[row],sh[rink]} #' # e.g. 'stripe set 1024' -+ * -+ */ -+static int -+raid_message(struct dm_target *ti, unsigned argc, char **argv) -+{ -+ /* Variables to store the parsed parameters im. */ -+ static int i[2]; -+ static unsigned long *i_arg[] = { -+ (unsigned long *) i + 0, -+ (unsigned long *) i + 1, -+ }; -+ static char *p; -+ static unsigned long *p_arg[] = { (unsigned long *) &p }; -+ -+ /* Declare all message option strings. */ -+ static char *str_sgs[] = { "set", "grow", "shrink" }; -+ static char *str_dev[] = { "running", "offline" }; -+ static char *str_oor[] = { "on", "off", "reset" }; -+ -+ /* Declare all actions. */ -+ static unsigned long act_sgs[] = { act_set, act_grow, act_shrink }; -+ static unsigned long act_oor[] = { act_on, act_off, act_reset }; -+ -+ /* Bandwidth option. */ -+ static struct dm_message_option bw_opt = { 3, str_sgs, act_sgs }; -+ static struct dm_message_argument bw_args = { -+ 1, i_arg, { dm_msg_int_t } -+ }; -+ -+ /* Device option. */ -+ static struct dm_message_option dev_opt = { 2, str_dev, act_oor }; -+ static struct dm_message_argument dev_args = { -+ 1, p_arg, { dm_msg_base_t } -+ }; -+ -+ /* Read ahead option. */ -+ static struct dm_message_option ra_opt = { 1, str_sgs, act_sgs }; -+ static struct dm_message_argument ra_args = { -+ 2, i_arg, { dm_msg_int_t, dm_msg_int_t } -+ }; -+ -+ static struct dm_message_argument null_args = { -+ 0, NULL, { dm_msg_int_t } -+ }; -+ -+ /* Overwrite and statistics option. */ -+ static struct dm_message_option ovr_stats_opt = { 3, str_oor, act_oor }; -+ -+ /* Sripecache option. */ -+ static struct dm_message_option stripe_opt = { 3, str_sgs, act_sgs }; -+ -+ /* Declare messages. */ -+ static struct dm_msg_spec specs[] = { -+ { "bandwidth", act_bw, &bw_opt, &bw_args, -+ 0, bandwidth_change }, -+ { "device", act_dev, &dev_opt, &dev_args, -+ 0, device_state }, -+ { "overwrite", act_overwrite, &ovr_stats_opt, &null_args, -+ RS_CHECK_OVERWRITE, devel_flags }, -+ { "read_ahead", act_read_ahead, &ra_opt, &ra_args, -+ 0, read_ahead_set }, -+ { "statistics", act_stats, &ovr_stats_opt, &null_args, -+ RS_DEVEL_STATS, devel_flags }, -+ { "stripecache", act_sc, &stripe_opt, &bw_args, -+ 0, stripecache_resize }, -+ }; -+ -+ /* The message for the parser. */ -+ struct dm_msg msg = { -+ .num_specs = ARRAY_SIZE(specs), -+ .specs = specs, -+ }; -+ -+ return dm_message_parse(TARGET, &msg, ti->private, argc, argv); -+} -+/* -+ * END message interface -+ */ -+ -+static struct target_type raid_target = { -+ .name = "raid45", -+ .version = {1, 0, 0}, -+ .module = THIS_MODULE, -+ .ctr = raid_ctr, -+ .dtr = raid_dtr, -+ .map = raid_map, -+ .postsuspend = raid_postsuspend, -+ .resume = raid_resume, -+ .status = raid_status, -+ .message = raid_message, -+}; -+ -+static void init_exit(const char *bad_msg, const char *good_msg, int r) -+{ -+ if (r) -+ DMERR("Failed to %sregister target [%d]", bad_msg, r); -+ else -+ DMINFO("%s %s", good_msg, version); -+} -+ -+static int __init dm_raid_init(void) -+{ -+ int r; -+ -+ r = dm_register_target(&raid_target); -+ init_exit("", "initialized", r); -+ return r; -+} -+ -+static void __exit dm_raid_exit(void) -+{ -+ dm_unregister_target(&raid_target); -+ init_exit("un", "exit", 0); -+} -+ -+/* Module hooks. */ -+module_init(dm_raid_init); -+module_exit(dm_raid_exit); -+ -+MODULE_DESCRIPTION(DM_NAME " raid4/5 target"); -+MODULE_AUTHOR("Heinz Mauelshagen "); -+MODULE_LICENSE("GPL"); ---- /dev/null -+++ b/drivers/md/dm-raid45.h -@@ -0,0 +1,28 @@ -+/* -+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen (Mauelshagen@RedHat.com) -+ * -+ * Locking definitions for the device-mapper RAID45 target. -+ * -+ * This file is released under the GPL. -+ * -+ */ -+ -+#ifndef _DM_RAID45_H -+#define _DM_RAID45_H -+ -+/* Factor out to dm.h! */ -+#define STR_LEN(ptr, str) (ptr), (str), strlen((ptr)) -+ -+enum dm_lock_type { DM_RAID45_EX, DM_RAID45_SHARED }; -+ -+struct dm_raid45_locking_type { -+ /* Request a lock on a stripe. */ -+ void* (*lock)(sector_t key, enum dm_lock_type type); -+ -+ /* Release a lock on a stripe. */ -+ void (*unlock)(void *lock_handle); -+}; -+ -+#endif ---- a/drivers/md/dm-region-hash.c -+++ b/drivers/md/dm-region-hash.c -@@ -53,100 +53,6 @@ - * 'delayed_bios' fields of the regions. This is used from irq - * context, so all other uses will have to suspend local irqs. - *---------------------------------------------------------------*/ --struct dm_region_hash { -- uint32_t region_size; -- unsigned region_shift; -- -- /* holds persistent region state */ -- struct dm_dirty_log *log; -- -- /* hash table */ -- rwlock_t hash_lock; -- mempool_t *region_pool; -- unsigned mask; -- unsigned nr_buckets; -- unsigned prime; -- unsigned shift; -- struct list_head *buckets; -- -- unsigned max_recovery; /* Max # of regions to recover in parallel */ -- -- spinlock_t region_lock; -- atomic_t recovery_in_flight; -- struct semaphore recovery_count; -- struct list_head clean_regions; -- struct list_head quiesced_regions; -- struct list_head recovered_regions; -- struct list_head failed_recovered_regions; -- -- /* -- * If there was a barrier failure no regions can be marked clean. -- */ -- int barrier_failure; -- -- void *context; -- sector_t target_begin; -- -- /* Callback function to schedule bios writes */ -- void (*dispatch_bios)(void *context, struct bio_list *bios); -- -- /* Callback function to wakeup callers worker thread. */ -- void (*wakeup_workers)(void *context); -- -- /* Callback function to wakeup callers recovery waiters. */ -- void (*wakeup_all_recovery_waiters)(void *context); --}; -- --struct dm_region { -- struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ -- region_t key; -- int state; -- -- struct list_head hash_list; -- struct list_head list; -- -- atomic_t pending; -- struct bio_list delayed_bios; --}; -- --/* -- * Conversion fns -- */ --static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) --{ -- return sector >> rh->region_shift; --} -- --sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) --{ -- return region << rh->region_shift; --} --EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); -- --region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) --{ -- return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); --} --EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); -- --void *dm_rh_region_context(struct dm_region *reg) --{ -- return reg->rh->context; --} --EXPORT_SYMBOL_GPL(dm_rh_region_context); -- --region_t dm_rh_get_region_key(struct dm_region *reg) --{ -- return reg->key; --} --EXPORT_SYMBOL_GPL(dm_rh_get_region_key); -- --sector_t dm_rh_get_region_size(struct dm_region_hash *rh) --{ -- return rh->region_size; --} --EXPORT_SYMBOL_GPL(dm_rh_get_region_size); -- - /* - * FIXME: shall we pass in a structure instead of all these args to - * dm_region_hash_create()???? -@@ -495,7 +401,7 @@ void dm_rh_update_states(struct dm_regio - } - EXPORT_SYMBOL_GPL(dm_rh_update_states); - --static void rh_inc(struct dm_region_hash *rh, region_t region) -+void dm_rh_inc(struct dm_region_hash *rh, region_t region) - { - struct dm_region *reg; - -@@ -517,6 +423,7 @@ static void rh_inc(struct dm_region_hash - - read_unlock(&rh->hash_lock); - } -+EXPORT_SYMBOL_GPL(dm_rh_inc); - - void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) - { -@@ -525,7 +432,7 @@ void dm_rh_inc_pending(struct dm_region_ - for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio_empty_barrier(bio)) - continue; -- rh_inc(rh, dm_rh_bio_to_region(rh, bio)); -+ dm_rh_inc(rh, dm_rh_bio_to_region(rh, bio)); - } - } - EXPORT_SYMBOL_GPL(dm_rh_inc_pending); -@@ -614,8 +521,9 @@ static int __rh_recovery_prepare(struct - return 1; - } - --void dm_rh_recovery_prepare(struct dm_region_hash *rh) -+int dm_rh_recovery_prepare(struct dm_region_hash *rh) - { -+ int r = 0; - /* Extra reference to avoid race with dm_rh_stop_recovery */ - atomic_inc(&rh->recovery_in_flight); - -@@ -624,13 +532,17 @@ void dm_rh_recovery_prepare(struct dm_re - if (__rh_recovery_prepare(rh) <= 0) { - atomic_dec(&rh->recovery_in_flight); - up(&rh->recovery_count); -+ r = -ENOENT; - break; - } - } - - /* Drop the extra reference */ -- if (atomic_dec_and_test(&rh->recovery_in_flight)) -+ if (atomic_dec_and_test(&rh->recovery_in_flight)) { - rh->wakeup_all_recovery_waiters(rh->context); -+ r = -ESRCH; -+ } -+ return r; - } - EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare); - ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -2673,6 +2673,7 @@ struct gendisk *dm_disk(struct mapped_de - { - return md->disk; - } -+EXPORT_SYMBOL_GPL(dm_disk); - - struct kobject *dm_kobject(struct mapped_device *md) - { ---- a/include/linux/dm-region-hash.h -+++ b/include/linux/dm-region-hash.h -@@ -15,8 +15,62 @@ - /*----------------------------------------------------------------- - * Region hash - *----------------------------------------------------------------*/ --struct dm_region_hash; --struct dm_region; -+struct dm_region_hash { -+ uint32_t region_size; -+ unsigned region_shift; -+ -+ /* holds persistent region state */ -+ struct dm_dirty_log *log; -+ -+ /* hash table */ -+ rwlock_t hash_lock; -+ mempool_t *region_pool; -+ unsigned mask; -+ unsigned nr_buckets; -+ unsigned prime; -+ unsigned shift; -+ struct list_head *buckets; -+ -+ unsigned max_recovery; /* Max # of regions to recover in parallel */ -+ -+ spinlock_t region_lock; -+ atomic_t recovery_in_flight; -+ struct semaphore recovery_count; -+ struct list_head clean_regions; -+ struct list_head quiesced_regions; -+ struct list_head recovered_regions; -+ struct list_head failed_recovered_regions; -+ -+ /* -+ * If there was a barrier failure no regions can be marked clean. -+ */ -+ int barrier_failure; -+ -+ void *context; -+ sector_t target_begin; -+ -+ /* Callback function to schedule bios writes */ -+ void (*dispatch_bios)(void *context, struct bio_list *bios); -+ -+ /* Callback function to wakeup callers worker thread. */ -+ void (*wakeup_workers)(void *context); -+ -+ /* Callback function to wakeup callers recovery waiters. */ -+ void (*wakeup_all_recovery_waiters)(void *context); -+}; -+ -+struct dm_region { -+ struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ -+ region_t key; -+ int state; -+ -+ struct list_head hash_list; -+ struct list_head list; -+ -+ atomic_t pending; -+ struct bio_list delayed_bios; -+}; -+ - - /* - * States a region can have. -@@ -45,19 +99,6 @@ void dm_region_hash_destroy(struct dm_re - struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); - - /* -- * Conversion functions. -- */ --region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); --sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); --void *dm_rh_region_context(struct dm_region *reg); -- --/* -- * Get region size and key (ie. number of the region). -- */ --sector_t dm_rh_get_region_size(struct dm_region_hash *rh); --region_t dm_rh_get_region_key(struct dm_region *reg); -- --/* - * Get/set/update region state (and dirty log). - * - */ -@@ -73,6 +114,7 @@ int dm_rh_flush(struct dm_region_hash *r - - /* Inc/dec pending count on regions. */ - void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); -+void dm_rh_inc(struct dm_region_hash *rh, region_t region); - void dm_rh_dec(struct dm_region_hash *rh, region_t region); - - /* Delay bios on regions. */ -@@ -85,7 +127,7 @@ void dm_rh_mark_nosync(struct dm_region_ - */ - - /* Prepare some regions for recovery by starting to quiesce them. */ --void dm_rh_recovery_prepare(struct dm_region_hash *rh); -+int dm_rh_recovery_prepare(struct dm_region_hash *rh); - - /* Try fetching a quiesced region for recovery. */ - struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh); -@@ -100,4 +142,39 @@ int dm_rh_recovery_in_flight(struct dm_r - void dm_rh_start_recovery(struct dm_region_hash *rh); - void dm_rh_stop_recovery(struct dm_region_hash *rh); - -+/* -+ * Conversion fns -+ */ -+static inline region_t dm_rh_sector_to_region(struct dm_region_hash *rh, -+ sector_t sector) -+{ -+ return sector >> rh->region_shift; -+} -+ -+static inline sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, -+ region_t region) -+{ -+ return region << rh->region_shift; -+} -+ -+static inline region_t dm_rh_bio_to_region(struct dm_region_hash *rh, -+ struct bio *bio) -+{ -+ return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); -+} -+ -+static inline void *dm_rh_region_context(struct dm_region *reg) -+{ -+ return reg->rh->context; -+} -+ -+static inline region_t dm_rh_get_region_key(struct dm_region *reg) -+{ -+ return reg->key; -+} -+ -+static inline sector_t dm_rh_get_region_size(struct dm_region_hash *rh) -+{ -+ return rh->region_size; -+} - #endif /* DM_REGION_HASH_H */ diff --git a/patches.suse/dmraid45-dm_dirty_log_create-api-fix b/patches.suse/dmraid45-dm_dirty_log_create-api-fix index f0b1d01..bc7b5d1 100644 --- a/patches.suse/dmraid45-dm_dirty_log_create-api-fix +++ b/patches.suse/dmraid45-dm_dirty_log_create-api-fix @@ -14,7 +14,7 @@ Signed-off-by: Jeff Mahoney --- a/drivers/md/dm-raid45.c +++ b/drivers/md/dm-raid45.c -@@ -3367,7 +3367,7 @@ context_alloc(struct raid_set **raid_set +@@ -3598,7 +3598,7 @@ context_alloc(struct raid_type *raid_typ */ ti_len = ti->len; ti->len = sectors_per_dev; diff --git a/patches.suse/dmraid45-dm_get_device-takes-fewer-arguments b/patches.suse/dmraid45-dm_get_device-takes-fewer-arguments index 14b91e1..06dc87d 100644 --- a/patches.suse/dmraid45-dm_get_device-takes-fewer-arguments +++ b/patches.suse/dmraid45-dm_get_device-takes-fewer-arguments @@ -12,7 +12,7 @@ Acked-by: Jeff Mahoney --- a/drivers/md/dm-raid45.c +++ b/drivers/md/dm-raid45.c -@@ -3588,9 +3588,8 @@ dev_parms(struct dm_target *ti, struct r +@@ -3810,9 +3810,8 @@ DMINFO("rs->set.sectors_per_dev=%llu", ( TI_ERR("Invalid RAID device offset parameter"); dev->start = tmp; diff --git a/patches.suse/elousb-2.6.35-api-changes b/patches.suse/elousb-2.6.35-api-changes new file mode 100644 index 0000000..d73a4a9 --- /dev/null +++ b/patches.suse/elousb-2.6.35-api-changes @@ -0,0 +1,51 @@ +From: Jeff Mahoney +Subject: elousb: API Changes for 2.6.35 +Patch-mainline: Whenever the driver makes it upstream + + This patch contains API fixes for 2.6.35. + +Acked-by: Jeff Mahoney +--- + drivers/input/touchscreen/elousb.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/drivers/input/touchscreen/elousb.c ++++ b/drivers/input/touchscreen/elousb.c +@@ -168,7 +168,7 @@ static int elousb_probe(struct usb_inter + if (!elo || !input_dev) + goto fail1; + +- elo->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &elo->data_dma); ++ elo->data = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &elo->data_dma); + if (!elo->data) + goto fail1; + +@@ -242,7 +242,9 @@ static int elousb_probe(struct usb_inter + elo->irq->transfer_dma = elo->data_dma; + elo->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + +- input_register_device(elo->dev); ++ error = input_register_device(elo->dev); ++ if (error) ++ goto fail4; + + usb_set_intfdata(intf, elo); + return 0; +@@ -252,7 +254,7 @@ fail4: + fail3: + usb_free_urb(elo->irq); + fail2: +- usb_buffer_free(dev, 8, elo->data, elo->data_dma); ++ usb_free_coherent(dev, 8, elo->data, elo->data_dma); + fail1: + input_free_device(input_dev); + kfree(elo); +@@ -268,7 +270,7 @@ static void elousb_disconnect(struct usb + usb_kill_urb(elo->irq); + input_unregister_device(elo->dev); + usb_free_urb(elo->irq); +- usb_buffer_free(interface_to_usbdev(intf), 8, elo->data, elo->data_dma); ++ usb_free_coherent(interface_to_usbdev(intf), 8, elo->data, elo->data_dma); + kfree(elo); + } + } diff --git a/patches.suse/export-release_open_intent b/patches.suse/export-release_open_intent index ba8da1c..9258456 100644 --- a/patches.suse/export-release_open_intent +++ b/patches.suse/export-release_open_intent @@ -13,11 +13,11 @@ Signed-off-by: Jeff Mahoney --- a/fs/namei.c +++ b/fs/namei.c -@@ -386,6 +386,7 @@ void release_open_intent(struct nameidat - else - fput(nd->intent.open.file); +@@ -562,6 +562,7 @@ void release_open_intent(struct nameidat + fput(file); + } } +EXPORT_SYMBOL_GPL(release_open_intent); - static inline struct dentry * - do_revalidate(struct dentry *dentry, struct nameidata *nd) + static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) + { diff --git a/patches.suse/export-security_inode_permission b/patches.suse/export-security_inode_permission index 2ad17d6..bca9a33 100644 --- a/patches.suse/export-security_inode_permission +++ b/patches.suse/export-security_inode_permission @@ -11,11 +11,11 @@ Signed-off-by: Jeff Mahoney --- a/security/security.c +++ b/security/security.c -@@ -564,6 +564,7 @@ int security_inode_permission(struct ino +@@ -528,6 +528,7 @@ int security_inode_permission(struct ino return 0; return security_ops->inode_permission(inode, mask); } +EXPORT_SYMBOL_GPL(security_inode_permission); - int security_inode_setattr(struct dentry *dentry, struct iattr *attr) + int security_inode_exec_permission(struct inode *inode, unsigned int flags) { diff --git a/patches.suse/export-sync_page_range b/patches.suse/export-sync_page_range deleted file mode 100644 index 3b04206..0000000 --- a/patches.suse/export-sync_page_range +++ /dev/null @@ -1,178 +0,0 @@ -From 48b8926b6b02382bc774efee2ed2cd6be8770ac0 Mon Sep 17 00:00:00 2001 -From: Michal Marek -Date: Fri, 20 Nov 2009 17:25:21 +0100 -Subject: [PATCH] Revert "vfs: Remove generic_osync_inode() and sync_page_range{_nolock}()" -References: bnc#557231 - -Commit 18f2ee705d98034b0f229a3202d827468d4bffd9 broke iscsitarget, revert -it temporarily. The exports are marged _GPL though. - -Signed-off-by: Michal Marek - ---- - fs/fs-writeback.c | 54 ++++++++++++++++++++++++++++++++++++++ - include/linux/fs.h | 5 +++ - include/linux/writeback.h | 4 ++ - mm/filemap.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++ - 4 files changed, 127 insertions(+) - ---- a/fs/fs-writeback.c -+++ b/fs/fs-writeback.c -@@ -1280,3 +1280,57 @@ int sync_inode(struct inode *inode, stru - return ret; - } - EXPORT_SYMBOL(sync_inode); -+ -+/** -+ * generic_osync_inode - flush all dirty data for a given inode to disk -+ * @inode: inode to write -+ * @mapping: the address_space that should be flushed -+ * @what: what to write and wait upon -+ * -+ * This can be called by file_write functions for files which have the -+ * O_SYNC flag set, to flush dirty writes to disk. -+ * -+ * @what is a bitmask, specifying which part of the inode's data should be -+ * written and waited upon. -+ * -+ * OSYNC_DATA: i_mapping's dirty data -+ * OSYNC_METADATA: the buffers at i_mapping->private_list -+ * OSYNC_INODE: the inode itself -+ */ -+ -+int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) -+{ -+ int err = 0; -+ int need_write_inode_now = 0; -+ int err2; -+ -+ if (what & OSYNC_DATA) -+ err = filemap_fdatawrite(mapping); -+ if (what & (OSYNC_METADATA|OSYNC_DATA)) { -+ err2 = sync_mapping_buffers(mapping); -+ if (!err) -+ err = err2; -+ } -+ if (what & OSYNC_DATA) { -+ err2 = filemap_fdatawait(mapping); -+ if (!err) -+ err = err2; -+ } -+ -+ spin_lock(&inode_lock); -+ if ((inode->i_state & I_DIRTY) && -+ ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) -+ need_write_inode_now = 1; -+ spin_unlock(&inode_lock); -+ -+ if (need_write_inode_now) { -+ err2 = write_inode_now(inode, 1); -+ if (!err) -+ err = err2; -+ } -+ else -+ inode_sync_wait(inode); -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(generic_osync_inode); ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1459,6 +1459,11 @@ int fiemap_check_flags(struct fiemap_ext - #define DT_SOCK 12 - #define DT_WHT 14 - -+#define OSYNC_METADATA (1<<0) -+#define OSYNC_DATA (1<<1) -+#define OSYNC_INODE (1<<2) -+int generic_osync_inode(struct inode *, struct address_space *, int); -+ - /* - * This is the "filldir" function type, used by readdir() to let - * the kernel specify what kind of dirent layout it wants to have. ---- a/include/linux/writeback.h -+++ b/include/linux/writeback.h -@@ -148,6 +148,10 @@ int write_cache_pages(struct address_spa - struct writeback_control *wbc, writepage_t writepage, - void *data); - int do_writepages(struct address_space *mapping, struct writeback_control *wbc); -+int sync_page_range(struct inode *inode, struct address_space *mapping, -+ loff_t pos, loff_t count); -+int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, -+ loff_t pos, loff_t count); - void set_page_dirty_balance(struct page *page, int page_mkwrite); - void writeback_set_ratelimit(void); - ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -334,6 +334,70 @@ int filemap_fdatawait_range(struct addre - EXPORT_SYMBOL(filemap_fdatawait_range); - - /** -+ * sync_page_range - write and wait on all pages in the passed range -+ * @inode: target inode -+ * @mapping: target address_space -+ * @pos: beginning offset in pages to write -+ * @count: number of bytes to write -+ * -+ * Write and wait upon all the pages in the passed range. This is a "data -+ * integrity" operation. It waits upon in-flight writeout before starting and -+ * waiting upon new writeout. If there was an IO error, return it. -+ * -+ * We need to re-take i_mutex during the generic_osync_inode list walk because -+ * it is otherwise livelockable. -+ */ -+int sync_page_range(struct inode *inode, struct address_space *mapping, -+ loff_t pos, loff_t count) -+{ -+ pgoff_t start = pos >> PAGE_CACHE_SHIFT; -+ pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; -+ int ret; -+ -+ if (!mapping_cap_writeback_dirty(mapping) || !count) -+ return 0; -+ ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); -+ if (ret == 0) { -+ mutex_lock(&inode->i_mutex); -+ ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); -+ mutex_unlock(&inode->i_mutex); -+ } -+ if (ret == 0) -+ ret = wait_on_page_writeback_range(mapping, start, end); -+ return ret; -+} -+EXPORT_SYMBOL_GPL(sync_page_range); -+ -+/** -+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking -+ * @inode: target inode -+ * @mapping: target address_space -+ * @pos: beginning offset in pages to write -+ * @count: number of bytes to write -+ * -+ * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea -+ * as it forces O_SYNC writers to different parts of the same file -+ * to be serialised right until io completion. -+ */ -+int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, -+ loff_t pos, loff_t count) -+{ -+ pgoff_t start = pos >> PAGE_CACHE_SHIFT; -+ pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; -+ int ret; -+ -+ if (!mapping_cap_writeback_dirty(mapping) || !count) -+ return 0; -+ ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); -+ if (ret == 0) -+ ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); -+ if (ret == 0) -+ ret = wait_on_page_writeback_range(mapping, start, end); -+ return ret; -+} -+EXPORT_SYMBOL_GPL(sync_page_range_nolock); -+ -+/** - * filemap_fdatawait - wait for all under-writeback pages to complete - * @mapping: address space structure to wait for - * diff --git a/patches.suse/ext3-barrier-default b/patches.suse/ext3-barrier-default index 5c48c05..7041431 100644 --- a/patches.suse/ext3-barrier-default +++ b/patches.suse/ext3-barrier-default @@ -23,7 +23,7 @@ Acked-by: Jeff Mahoney --- a/fs/ext3/Kconfig +++ b/fs/ext3/Kconfig -@@ -49,6 +49,28 @@ config EXT3_DEFAULTS_TO_ORDERED +@@ -50,6 +50,28 @@ config EXT3_DEFAULTS_TO_ORDERED privacy issues of data=writeback and are willing to make that trade off, answer 'n'. @@ -64,7 +64,7 @@ Acked-by: Jeff Mahoney --- a/fs/ext3/super.c +++ b/fs/ext3/super.c -@@ -1688,6 +1688,10 @@ static int ext3_fill_super (struct super +@@ -1693,6 +1693,10 @@ static int ext3_fill_super (struct super sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); diff --git a/patches.suse/file-capabilities-disable-by-default.diff b/patches.suse/file-capabilities-disable-by-default.diff index d8930d8..4b767cb 100644 --- a/patches.suse/file-capabilities-disable-by-default.diff +++ b/patches.suse/file-capabilities-disable-by-default.diff @@ -14,7 +14,7 @@ Signed-off-by: Andreas Gruenbacher --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt -@@ -1697,7 +1697,13 @@ and is between 256 and 4096 characters. +@@ -1659,7 +1659,13 @@ and is between 256 and 4096 characters. no_file_caps Tells the kernel not to honor file capabilities. The only way then for a file to be executed with privilege @@ -31,7 +31,7 @@ Signed-off-by: Andreas Gruenbacher function PAL_HALT_LIGHT when idle. This increases --- a/kernel/capability.c +++ b/kernel/capability.c -@@ -29,7 +29,7 @@ EXPORT_SYMBOL(__cap_empty_set); +@@ -28,7 +28,7 @@ EXPORT_SYMBOL(__cap_empty_set); EXPORT_SYMBOL(__cap_full_set); EXPORT_SYMBOL(__cap_init_eff_set); @@ -40,7 +40,7 @@ Signed-off-by: Andreas Gruenbacher static int __init file_caps_disable(char *str) { -@@ -38,6 +38,13 @@ static int __init file_caps_disable(char +@@ -37,6 +37,13 @@ static int __init file_caps_disable(char } __setup("no_file_caps", file_caps_disable); diff --git a/patches.suse/fs-knows-MAY_APPEND.diff b/patches.suse/fs-knows-MAY_APPEND.diff deleted file mode 100644 index f439724..0000000 --- a/patches.suse/fs-knows-MAY_APPEND.diff +++ /dev/null @@ -1,59 +0,0 @@ -From: Andreas Gruenbacher -Subject: Allow filesystems to handle MAY_APPEND -Patch-mainline: not yet - -The MS_WITHAPPEND super_block flag tells the vfs that the permission -inode operation understands the MAY_APPEND flag. This is required for -implementing permission models which go beyond the traditional UNIX -semantics. - -If a filesystem does not set the flag, the behavior is unchanged. - -Signed-off-by: Andreas Gruenbacher - ---- - fs/namei.c | 6 +++++- - include/linux/fs.h | 2 ++ - 2 files changed, 7 insertions(+), 1 deletion(-) - ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -252,6 +252,7 @@ int generic_permission(struct inode *ino - int inode_permission(struct inode *inode, int mask) - { - int retval; -+ int submask = mask; - - if (mask & MAY_WRITE) { - umode_t mode = inode->i_mode; -@@ -270,8 +271,11 @@ int inode_permission(struct inode *inode - return -EACCES; - } - -+ if (!IS_WITHAPPEND(inode)) -+ submask &= ~MAY_APPEND; -+ - if (inode->i_op->permission) -- retval = inode->i_op->permission(inode, mask); -+ retval = inode->i_op->permission(inode, submask); - else - retval = generic_permission(inode, mask, inode->i_op->check_acl); - ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -209,6 +209,7 @@ struct inodes_stat_t { - #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ - #define MS_I_VERSION (1<<23) /* Update inode I_version field */ - #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ -+#define MS_WITHAPPEND (1<<25) /* iop->permission() understands MAY_APPEND */ - #define MS_ACTIVE (1<<30) - #define MS_NOUSER (1<<31) - -@@ -259,6 +260,7 @@ struct inodes_stat_t { - #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) - #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) - #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) -+#define IS_WITHAPPEND(inode) __IS_FLG(inode, MS_WITHAPPEND) - - #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) - #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) diff --git a/patches.suse/fs-may_iops.diff b/patches.suse/fs-may_iops.diff deleted file mode 100644 index b96b1a0..0000000 --- a/patches.suse/fs-may_iops.diff +++ /dev/null @@ -1,145 +0,0 @@ -From: Andreas Gruenbacher -Subject: VFS hooks for per-filesystem permission models -Patch-mainline: Not yet - -Add may_create and may_delete inode operations that filesystems can -implement in order to override the vfs provided default behavior. -This is required for implementing permission models which go beyond -the traditional UNIX semantics. - -If a filesystem does not implement these hooks, the behavior is -unchanged. - -Signed-off-by: Andreas Gruenbacher - ---- - fs/namei.c | 48 +++++++++++++++++++++++++++++++++++++----------- - include/linux/fs.h | 2 ++ - 2 files changed, 39 insertions(+), 11 deletions(-) - ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -1320,13 +1320,24 @@ static int may_delete(struct inode *dir, - BUG_ON(victim->d_parent->d_inode != dir); - audit_inode_child(victim, dir); - -- error = inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ if (dir->i_op->may_delete) { -+ if (IS_RDONLY(dir)) -+ return -EROFS; -+ if (IS_IMMUTABLE(dir)) -+ return -EACCES; -+ error = dir->i_op->may_delete(dir, victim->d_inode); -+ if (!error) -+ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ } else { -+ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ if (!error && check_sticky(dir, victim->d_inode)) -+ error = -EPERM; -+ } - if (error) - return error; - if (IS_APPEND(dir)) - return -EPERM; -- if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| -- IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) -+ if (IS_APPEND(victim->d_inode) || IS_IMMUTABLE(victim->d_inode)) - return -EPERM; - if (isdir) { - if (!S_ISDIR(victim->d_inode->i_mode)) -@@ -1350,13 +1361,28 @@ static int may_delete(struct inode *dir, - * 3. We should have write and exec permissions on dir - * 4. We can't do it if dir is immutable (done in permission()) - */ --static inline int may_create(struct inode *dir, struct dentry *child) -+static inline int may_create(struct inode *dir, struct dentry *child, -+ int isdir) - { -+ int error; -+ - if (child->d_inode) - return -EEXIST; - if (IS_DEADDIR(dir)) - return -ENOENT; -- return inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ -+ if (dir->i_op->may_create) { -+ if (IS_RDONLY(dir)) -+ return -EROFS; -+ if (IS_IMMUTABLE(dir)) -+ return -EACCES; -+ error = dir->i_op->may_create(dir, isdir); -+ if (!error) -+ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ } else -+ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ -+ return error; - } - - /* -@@ -1404,7 +1430,7 @@ void unlock_rename(struct dentry *p1, st - int vfs_create(struct inode *dir, struct dentry *dentry, int mode, - struct nameidata *nd) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 0); - - if (error) - return error; -@@ -1963,7 +1989,7 @@ EXPORT_SYMBOL_GPL(lookup_create); - - int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 0); - - if (error) - return error; -@@ -2067,7 +2093,7 @@ SYSCALL_DEFINE3(mknod, const char __user - - int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 1); - - if (error) - return error; -@@ -2350,7 +2376,7 @@ SYSCALL_DEFINE1(unlink, const char __use - - int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 0); - - if (error) - return error; -@@ -2423,7 +2449,7 @@ int vfs_link(struct dentry *old_dentry, - if (!inode) - return -ENOENT; - -- error = may_create(dir, new_dentry); -+ error = may_create(dir, new_dentry, S_ISDIR(inode->i_mode)); - if (error) - return error; - -@@ -2636,7 +2662,7 @@ int vfs_rename(struct inode *old_dir, st - return error; - - if (!new_dentry->d_inode) -- error = may_create(new_dir, new_dentry); -+ error = may_create(new_dir, new_dentry, is_dir); - else - error = may_delete(new_dir, new_dentry, is_dir); - if (error) ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1531,6 +1531,8 @@ struct inode_operations { - void (*truncate) (struct inode *); - int (*permission) (struct inode *, int); - int (*check_acl)(struct inode *, int); -+ int (*may_create) (struct inode *, int); -+ int (*may_delete) (struct inode *, struct inode *); - int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); - int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); diff --git a/patches.suse/genksyms-add-override-flag.diff b/patches.suse/genksyms-add-override-flag.diff index daad3f7..f716b7f 100644 --- a/patches.suse/genksyms-add-override-flag.diff +++ b/patches.suse/genksyms-add-override-flag.diff @@ -15,12 +15,12 @@ but it doesn't abort the build if a symtype cannot be preserved --- a/scripts/Makefile.build +++ b/scripts/Makefile.build -@@ -158,6 +158,7 @@ cmd_gensymtypes = +@@ -161,6 +161,7 @@ cmd_gensymtypes = $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ - $(GENKSYMS) $(if $(strip $(1)), -T $(@:.o=.symtypes)) -a $(ARCH) \ + $(GENKSYMS) $(if $(1), -T $(2)) -a $(ARCH) \ $(if $(KBUILD_PRESERVE),-p) \ + $(if $(KBUILD_OVERRIDE),-o) \ - -r $(firstword $(wildcard $(basename $@).symref /dev/null)) + -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ --- a/scripts/genksyms/genksyms.c diff --git a/patches.suse/gfs2-ro-mounts-only.patch b/patches.suse/gfs2-ro-mounts-only.patch deleted file mode 100644 index d6f1a3c..0000000 --- a/patches.suse/gfs2-ro-mounts-only.patch +++ /dev/null @@ -1,39 +0,0 @@ -From: Mark Fasheh -Date: Tue Dec 1 16:15:11 PST 2009 -Subject: gfs2: allow spectator mounts for migration to ocfs2 -Patch-mainline: never -References: FATE#307584 - -Lock out all writeable gfs2 mounts. This allows only spectator mounts, which -should never modify the disks. We do this to support minimal use of GFS2 for -migration of data. No performance bugs will be fixed. Any writeable mounts -are strictly prohibited. - -Signed-off-by: Mark Fasheh - ---- - fs/gfs2/ops_fstype.c | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - ---- a/fs/gfs2/ops_fstype.c -+++ b/fs/gfs2/ops_fstype.c -@@ -1134,6 +1134,19 @@ static int fill_super(struct super_block - } - sdp->sd_args = *args; - -+ if (!sdp->sd_args.ar_spectator || !(sb->s_flags & MS_RDONLY) || -+ sdp->sd_args.ar_ignore_local_fs) { -+ printk(KERN_WARNING "Only read-only GFS2 mounts are " -+ "supported.\nPlease mount with the \"spectator\" and " -+ "\"ro\" mount options\n"); -+ goto fail; -+ } -+ -+ printk(KERN_WARNING -+ "WARNING: GFS2 mounts are ONLY supported for single-node " -+ "migration of data!\nNo performance or write bugs will be " -+ "considered.\n"); -+ - if (sdp->sd_args.ar_spectator) { - sb->s_flags |= MS_RDONLY; - set_bit(SDF_NORECOVERY, &sdp->sd_flags); diff --git a/patches.suse/hung_task_timeout-configurable-default b/patches.suse/hung_task_timeout-configurable-default index cb6418c..989a0e0 100644 --- a/patches.suse/hung_task_timeout-configurable-default +++ b/patches.suse/hung_task_timeout-configurable-default @@ -31,7 +31,7 @@ Signed-off-by: Jeff Mahoney --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -209,6 +209,20 @@ config DETECT_HUNG_TASK +@@ -214,6 +214,20 @@ config DETECT_HUNG_TASK enabled then all held locks will also be reported. This feature has negligible overhead. diff --git a/patches.suse/init-move-populate_rootfs-back-to-start_kernel b/patches.suse/init-move-populate_rootfs-back-to-start_kernel index ca039cc..0a1a0ec 100644 --- a/patches.suse/init-move-populate_rootfs-back-to-start_kernel +++ b/patches.suse/init-move-populate_rootfs-back-to-start_kernel @@ -25,12 +25,12 @@ Patch-mainline: Probably never Signed-off-by: Jeff Mahoney --- - include/linux/init.h | 1 - include/linux/kmod.h | 2 - init/initramfs.c | 153 +++++++++++++++++++++++++++++++++++++++++++++++++-- - init/main.c | 10 +++ - kernel/kmod.c | 4 + - 5 files changed, 161 insertions(+), 9 deletions(-) + include/linux/init.h | 1 + + include/linux/kmod.h | 2 -- + init/initramfs.c | 3 +-- + init/main.c | 10 +++++++++- + kernel/kmod.c | 4 +++- + 5 files changed, 14 insertions(+), 6 deletions(-) --- a/include/linux/init.h +++ b/include/linux/init.h @@ -44,18 +44,18 @@ Signed-off-by: Jeff Mahoney --- a/include/linux/kmod.h +++ b/include/linux/kmod.h -@@ -98,8 +98,6 @@ call_usermodehelper_keys(char *path, cha - return call_usermodehelper_exec(info, wait); +@@ -109,8 +109,6 @@ call_usermodehelper(char *path, char **a + NULL, NULL, NULL); } -extern void usermodehelper_init(void); - - struct file; - extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[], - struct file **filp); + extern int usermodehelper_disable(void); + extern void usermodehelper_enable(void); + --- a/init/initramfs.c +++ b/init/initramfs.c -@@ -565,7 +709,7 @@ static void __init clean_rootfs(void) +@@ -714,7 +714,7 @@ static void __init clean_rootfs(void) } #endif @@ -64,14 +64,14 @@ Signed-off-by: Jeff Mahoney { char *err = unpack_to_rootfs(__initramfs_start, __initramfs_end - __initramfs_start); -@@ -605,4 +749,3 @@ static int __init populate_rootfs(void) +@@ -754,4 +754,3 @@ static int __init populate_rootfs(void) } return 0; } -rootfs_initcall(populate_rootfs); --- a/init/main.c +++ b/init/main.c -@@ -713,6 +713,15 @@ asmlinkage void __init start_kernel(void +@@ -696,6 +696,15 @@ asmlinkage void __init start_kernel(void check_bugs(); @@ -87,9 +87,9 @@ Signed-off-by: Jeff Mahoney acpi_early_init(); /* before LAPIC and SMP init */ sfi_init_late(); -@@ -810,7 +819,6 @@ static void __init do_basic_setup(void) +@@ -793,7 +802,6 @@ static void __init do_initcalls(void) + static void __init do_basic_setup(void) { - init_workqueues(); cpuset_init_smp(); - usermodehelper_init(); init_tmpfs(); @@ -97,9 +97,9 @@ Signed-off-by: Jeff Mahoney init_irq_proc(); --- a/kernel/kmod.c +++ b/kernel/kmod.c -@@ -531,8 +531,10 @@ int call_usermodehelper_pipe(char *path, +@@ -416,8 +416,10 @@ unlock: } - EXPORT_SYMBOL(call_usermodehelper_pipe); + EXPORT_SYMBOL(call_usermodehelper_exec); -void __init usermodehelper_init(void) +static int __init usermodehelper_init(void) diff --git a/patches.suse/kbd-ignore-gfx.patch b/patches.suse/kbd-ignore-gfx.patch index d519b18..f0b44bd 100644 --- a/patches.suse/kbd-ignore-gfx.patch +++ b/patches.suse/kbd-ignore-gfx.patch @@ -17,7 +17,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c -@@ -1261,7 +1261,7 @@ static int vgacon_font_set(struct vc_dat +@@ -1259,7 +1259,7 @@ static int vgacon_font_set(struct vc_dat unsigned charcount = font->charcount; int rc; @@ -26,7 +26,7 @@ Signed-off-by: Hannes Reinecke return -EINVAL; if (font->width != VGA_FONTWIDTH || -@@ -1279,7 +1279,7 @@ static int vgacon_font_set(struct vc_dat +@@ -1277,7 +1277,7 @@ static int vgacon_font_set(struct vc_dat static int vgacon_font_get(struct vc_data *c, struct console_font *font) { diff --git a/patches.suse/kconfig-automate-kernel-desktop b/patches.suse/kconfig-automate-kernel-desktop index 895b795..5b1dccf 100644 --- a/patches.suse/kconfig-automate-kernel-desktop +++ b/patches.suse/kconfig-automate-kernel-desktop @@ -15,22 +15,20 @@ Signed-off-by: Suresh Jayaraman --- a/init/Kconfig +++ b/init/Kconfig -@@ -498,6 +498,8 @@ config HAVE_UNSTABLE_SCHED_CLOCK +@@ -537,6 +537,7 @@ config HAVE_UNSTABLE_SCHED_CLOCK menuconfig CGROUPS boolean "Control Group support" depends on EVENTFD -+ default n if KERNEL_DESKTOP -+ default y ++ default !KERNEL_DESKTOP help This option adds support for grouping sets of processes together, for use with process control subsystems such as Cpusets, CFS, memory -@@ -619,7 +621,8 @@ config CGROUP_MEM_RES_CTLR_SWAP +@@ -651,7 +653,7 @@ config CGROUP_MEM_RES_CTLR_SWAP menuconfig CGROUP_SCHED bool "Group CPU scheduler" - depends on EXPERIMENTAL && CGROUPS + depends on EXPERIMENTAL - default n -+ default n if KERNEL_DESKTOP -+ default y ++ default !KERNEL_DESKTOP help This feature lets CPU scheduler recognize task groups and control CPU bandwidth allocation to such task groups. It uses cgroups to group diff --git a/patches.suse/kdb-build-fixes b/patches.suse/kdb-build-fixes deleted file mode 100644 index 5874f92..0000000 --- a/patches.suse/kdb-build-fixes +++ /dev/null @@ -1,37 +0,0 @@ -From: Jeff Mahoney -Subject: kdb: Build fixes -Patch-mainline: not yet, whenever KDB is upstream - - The includes as provided don't work when building out-of-tree, as we do - while packaging. Also, struct file->f_count is an atomic_long_t. - -Signed-off-by: Jeff Mahoney ---- - kdb/modules/kdbm_vm.c | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) - ---- a/kdb/modules/kdbm_vm.c -+++ b/kdb/modules/kdbm_vm.c -@@ -16,7 +16,9 @@ - #include - #include - --#include -+#include -+#include -+#include - #include - #include - -@@ -749,9 +751,8 @@ kdbm_filp(int argc, const char **argv) - kdb_printf(" f_dentry = 0x%p f_vfsmnt = 0x%p f_op = 0x%p\n", - f.f_dentry, f.f_vfsmnt, f.f_op); - -- kdb_printf(" f_count = " kdb_f_count_fmt -- " f_flags = 0x%x f_mode = 0x%x\n", -- atomic_long_read(&f.f_count), f.f_flags, f.f_mode); -+ kdb_printf(" f_count = %ld f_flags = 0x%x f_mode = 0x%x\n", -+ f.f_count, f.f_flags, f.f_mode); - - kdb_printf(" f_pos = %Ld\n", f.f_pos); - #ifdef CONFIG_SECURITY diff --git a/patches.suse/kdb-common b/patches.suse/kdb-common deleted file mode 100644 index 8e59168..0000000 --- a/patches.suse/kdb-common +++ /dev/null @@ -1,32572 +0,0 @@ -From: Martin Hicks -Date: Mon, 07 Dec 2009 11:52:50 -0600 -Subject: kdb-v4.4-2.6.32-common-3 -References: FATE#303971 -X-URL: ftp://oss.sgi.com/www/projects/kdb/download/v4.4/ -Patch-mainline: Probably never - -The KDB common code. - -Differences: -- We remove the binary sysctl. It changes with every release, which makes - it useless. -- Use debugger_syslog_data as provided by - patches/patches.arch/ppc64-xmon-dmesg-printing.patch. It's the same function. - -Acked-by: Jeff Mahoney ---- - - Documentation/kdb/bt_x86 | 1837 ++++++++ - Documentation/kdb/kdb.mm | 492 ++ - Documentation/kdb/kdb_bp.man | 197 - Documentation/kdb/kdb_bt.man | 315 + - Documentation/kdb/kdb_env.man | 46 - Documentation/kdb/kdb_ll.man | 134 - Documentation/kdb/kdb_md.man | 136 - Documentation/kdb/kdb_ps.man | 96 - Documentation/kdb/kdb_rd.man | 170 - Documentation/kdb/kdb_sr.man | 68 - Documentation/kdb/kdb_ss.man | 109 - Documentation/kdb/slides | 1382 ++++++ - Makefile | 1 - drivers/char/keyboard.c | 10 - drivers/hid/usbhid/hid-core.c | 40 - drivers/hid/usbhid/usbkbd.c | 17 - drivers/serial/8250.c | 57 - drivers/serial/8250_early.c | 29 - drivers/serial/sn_console.c | 73 - drivers/usb/core/hcd.c | 71 - drivers/usb/core/hcd.h | 11 - drivers/usb/host/ehci-hcd.c | 42 - drivers/usb/host/ehci-pci.c | 8 - drivers/usb/host/ehci-q.c | 222 + - drivers/usb/host/ohci-hcd.c | 67 - drivers/usb/host/ohci-pci.c | 8 - drivers/usb/host/ohci-q.c | 62 - drivers/usb/host/uhci-hcd.c | 218 + - drivers/usb/host/uhci-q.c | 164 - fs/proc/meminfo.c | 145 - fs/proc/mmu.c | 16 - include/asm-generic/kmap_types.h | 3 - include/linux/console.h | 5 - include/linux/dis-asm.h | 347 + - include/linux/kdb.h | 184 - include/linux/kdbprivate.h | 518 ++ - include/linux/reboot.h | 7 - init/main.c | 32 - kdb/ChangeLog | 2040 +++++++++ - kdb/Makefile | 43 - kdb/kdb_bp.c | 661 +++ - kdb/kdb_bt.c | 180 - kdb/kdb_cmds | 33 - kdb/kdb_id.c | 236 + - kdb/kdb_io.c | 859 ++++ - kdb/kdbdereference.c | 7258 ++++++++++++++++++++++++++++++++++ - kdb/kdbmain.c | 4333 ++++++++++++++++++++ - kdb/kdbsupport.c | 1155 +++++ - kdb/modules/Makefile | 14 - kdb/modules/kdbm_debugtypes.c | 388 + - kdb/modules/kdbm_pg.c | 683 +++ - kdb/modules/kdbm_sched.c | 57 - kdb/modules/kdbm_task.c | 195 - kdb/modules/kdbm_vm.c | 1040 ++++ - kdb/modules/kdbm_x86.c | 1093 +++++ - kdb/modules/lcrash/README | 3 - kdb/modules/lcrash/asm/README | 1 - kdb/modules/lcrash/asm/kl_dump_ia64.h | 199 - kdb/modules/lcrash/asm/kl_types.h | 48 - kdb/modules/lcrash/kl_alloc.h | 124 - kdb/modules/lcrash/kl_bfd.h | 31 - kdb/modules/lcrash/kl_btnode.h | 95 - kdb/modules/lcrash/kl_cmp.h | 102 - kdb/modules/lcrash/kl_copt.h | 29 - kdb/modules/lcrash/kl_debug.h | 168 - kdb/modules/lcrash/kl_dump.h | 511 ++ - kdb/modules/lcrash/kl_dump_arch.h | 124 - kdb/modules/lcrash/kl_dump_ia64.h | 199 - kdb/modules/lcrash/kl_dwarfs.h | 27 - kdb/modules/lcrash/kl_error.h | 266 + - kdb/modules/lcrash/kl_htnode.h | 71 - kdb/modules/lcrash/kl_lib.h | 65 - kdb/modules/lcrash/kl_libutil.h | 40 - kdb/modules/lcrash/kl_mem.h | 104 - kdb/modules/lcrash/kl_mem_ia64.h | 149 - kdb/modules/lcrash/kl_module.h | 69 - kdb/modules/lcrash/kl_queue.h | 89 - kdb/modules/lcrash/kl_stabs.h | 122 - kdb/modules/lcrash/kl_stringtab.h | 68 - kdb/modules/lcrash/kl_sym.h | 131 - kdb/modules/lcrash/kl_task.h | 39 - kdb/modules/lcrash/kl_typeinfo.h | 199 - kdb/modules/lcrash/kl_types.h | 54 - kdb/modules/lcrash/klib.h | 480 ++ - kdb/modules/lcrash/lc_eval.h | 225 + - kernel/exit.c | 3 - kernel/kallsyms.c | 23 - kernel/kexec.c | 15 - kernel/module.c | 19 - kernel/panic.c | 9 - kernel/sched.c | 109 - kernel/signal.c | 49 - lib/bug.c | 8 - mm/hugetlb.c | 22 - mm/mmzone.c | 4 - mm/swapfile.c | 22 - 96 files changed, 31713 insertions(+), 9 deletions(-) - ---- /dev/null -+++ b/Documentation/kdb/bt_x86 -@@ -0,0 +1,1837 @@ -+Copyright Keith Owens, 2007. -+ -+How the KDB backtrace for x86 works, how to diagnose problems and submit a bug -+============================================================================== -+ -+Unlike ia64, x86 architectures do not mandate unwind information in the kernel. -+gcc will include some unwind information for C functions, but not for assembler -+code. Attempts have been made to add unwind information to the assembler code -+by hand, with little success. Eventually Linus rejected the x86 unwind code -+because it was breaking too often and destroying useful debugging data. -+ -+Even if the x86 unwinder worked correctly, it would only give an accurate -+backtrace, it would not give function arguments. Needless to say, function -+arguments are what people really want. To get function arguments requires much -+more support from the compiler than simple unwind data, the compiler has to -+track line by line where each argument is held and make that data available to -+the debugger. Compiling with gcc -g will provide that information, but the -+resulting kernel is several times larger than normal. -+ -+Although the gcc -g data can be stored on another machine, there are constructs -+in the kernel that cannot be tracked by this method. i386 builds with 4K stacks -+and all x86_64 builds have multiple kernel stacks. The compiler knows nothing -+about these extra stacks and cannot backtrace through them correctly. The -+assembler code in arch/{i386,x86_64}/kernel/entry.S is a maze of twisty logic -+paths, some of which eventually jump to common labels. Describing this twisty -+logic to an unwinder is very difficult, expecially when you try to describe -+where arguments and/or registers are held). -+ -+KDB gets an accurate x86 backtrace and extracts the arguments by performing code -+decomposition and analysis at run time. This avoids the need to bloat the -+running kernel to several times its normal size with gcc -g data. KDB (unlike -+gdb) also knows about the additional kernel stacks and twisty assembler code -+paths. -+ -+The x86 backtrace code for i386 is very similar to the x86_64 code, with 80% -+common code and data. Most of the differences between the backtrace for the two -+architectures is due to the assembler code and stack handling. To avoid code -+duplication between KDB patches, the x86 backtrace code is actually stored in -+the kdb common patch, in source kdb/kdba_bt_x86.c. kdb/Makefile only builds -+kdba_bt_x86.o for i386 or x86_64. Most of the code reads as if the architecture -+is x86_64, using register names like rsp and rip. i386 is treated as a subset -+of x86_64, with fewer registers and printing the names as esp and eip. When -+this documentation refers to rsp and rip, read it as esp and eip for i386. The -+20% of code and data that is different in held in two large #ifdef sections, -+scan kdba_bt_x86.c for CONFIG_X86_64. Be careful when changing anything in the -+architecture specific sections, you will need to review the other architecture -+to see if it needs changes as well. -+ -+The idea behind the x86 backtrace is to trace one function at a time, which -+gives us the calling function. Then apply the same algorithm to the calling -+function until you unwind to the first function in the process. The starting -+point for tracing any process is to extract the current stack pointer and -+current instruction pointer (rsp and rip). The way that these values are -+extracted varies between running tasks and blocked tasks, the method is -+described later (Process Starting Point) but ignore it for now, just assume that -+we have a starting rsp and rip. -+ -+Given the instruction pointer (rip), we identify the start and end of the kernel -+or module function it is in, using the kernel symbol table. This is easy for C -+code, it is significantly harder for assembler code because of the twisty code -+paths that branch to common labels. The method of identifying the current -+function is described later (Identifying The Current Function) but ignore it for -+now, just assumes that we have the start and end address of the function plus -+its name. -+ -+After the rip has been mapped to a function name with sensible start and end -+addresses, the next step is to analyse the code paths in that function. KDB -+already has a built in disassembler (copied with slight modifications from -+binutils) which knows how to decode each x86 instruction. Instead of -+duplicating that logic in kdba_bt_x86, it takes advantage of the fact that you -+can override the disassembler's print function, sending the output line to a -+buffer instead of printing it. kdba_bt_x86 stills has to decode the buffer but -+that is a lot easier than decoding the x86 instruction set. -+ -+The code analysis consists of two main passes. There are example below of the -+analysis with basic block (bb) debugging activated (Examples of Basic Block -+Debugging Output). -+ -+The first pass (bb_pass1) identifies all the basic blocks in the function. For -+our purposes, a basic block has a single entry point and one or more exit -+points. The start of the function is the start of basic block 0, all other -+blocks are the target of jump instructions (conditional or unconditional) from -+within the rest of the code. A block ends with an unconditional jump or with a -+terminating instruction such as ret, iret, sysexit, sysret or ud2a (BUG). A -+block can also end because the next instruction is the start of a new block -+(target of jmp or jcc), in this case there is an implied drop through from one -+block to the next. -+ -+Although a call instruction also transfers control, it returns to the next -+instruction so call is not treated as a transfer. Instead call is treated as a -+normal instruction with side effects, the scratch registers are cleared after a -+call. -+ -+At the end of the first pass over the function we have a directed graph that -+starts at bb[0]. The nodes of the graph (bb_list[]) are the basic blocks with -+their start and end address. The vertices are the jmp or jcc instructions -+(bb_jmp_list[]) that transfer control between blocks, plus any implied drop -+through transfers between consecutive blocks. This graph can have cycles, many -+functions have loops in them which transfer control back to earlier in the code -+body. -+ -+The second pass (bb_pass2) runs over the directed graph analysing the effect of -+each instruction on the register and memory state. It is important to -+understand that the analysis in this pass is an abstract one, it does not use -+actual hex values for the register contents, instead it uses symbolic values. -+When the basic block code says that "register rsi contains value rax" it means -+that whatever value was in rax on entry to the function has also been copied to -+register rsi at this point in the logic flow. -+ -+At an abstract level, all C functions start with exactly the same state, each -+register contains its own symbolic value (except for the stack pointer, see -+later) with no local stack variables defined yet. Assembler functions tend to -+have unusual starting points, with some registers and/or memory contents defined -+differently on entry. For example, ret_from_intr on i386 already has a struct -+pt_regs on its stack, ret_from_intr on x86_64 already has a partial struct -+pt_regs plus another two words stacked on top of it. The special starting cases -+are listed in the arch specific bb_special_cases[]. -+ -+Once the input state of bb[0] has been defined (including any special cases), -+bb_pass2_do_changed_blocks() runs over all the nodes in bb_list[]. Each -+instruction in each block is analysed (Tracking the Effects of Instructions) to -+see what effect it has on the abstract register state, the analysis of each -+instruction is done in bb_usage(). An instruction can copy one register to -+another, it can copy a register to stack, move from stack to a register or it -+can invalidate the contents of a register or memory location. A general rule in -+bb_usage() is that any operation whose results cannot be calculated in terms of -+an original input value gives an undefined result. Remember that it is the -+abstract value that becomes undefined, moving a constant to a register gives a -+defined value for the view of the program but it is undefined as far as the -+abstract state is concerned. -+ -+References to data on the stack are a little awkward because the stack pointer -+frequently changes. To overcome this, kdba_bt_x86 defines a pseudo register -+called the 'original stack pointer' (osp). This always represents the stack -+pointer on entry to the function, so on entry rsp contains osp+0x0. As rsp is -+modified, it still points at osp, but its offset from osp changes. Copying rsp -+to another register (e.g. mov %rsp,%rbp) copies the osp offset as well. At the -+point that this function calls the next function down the stack, kdba_bt_x86 -+knows the delta from osp to rsp. Applying that delta to the actual value of the -+stack pointer gives the stack pointer value on input to the current function, -+that location contains the return address so we can go up one stack frame and -+repeat the process. -+ -+After doing basic block analysis on the current function, kdba_bt_x86 knows what -+the abstract register and memory state is at the point this function was -+interrupted or it called the next function down the stack, this is the exit -+state. For an interrupt the actual register values are saved in a struct -+pt_regs, for a call we have unwound from the KDB interrupt back to the called -+function so we have some idea of what the register values are in the called -+function. The abstract exit state is merged with the known actual register -+values to derive the original stack pointer. That in turn gives us any -+registers that were saved on stack. The original stack pointer gives the return -+address from the calling function, go up one stack frame and repeat the -+analysis. -+ -+ -+Process Starting Point -+====================== -+ -+All backtrace code needs a starting point which defines at least the stack -+pointer and instruction pointer, it may define other registers as well. The -+first part of kdba_bt_stack() extracts the starting point. Processes can be in -+one of three states, running (currently on a cpu), blocked (sleeping or ready to -+run but not currently on a cpu) or unknown. -+ -+For running processes, the current rsp and rip are dynamic. Because KDB stops -+the entire machine by sending an interrupt to the other cpus, KDB can save the -+rsp and rip for each cpu at the point where KDB is entered. This data is held -+in array kdb_running_process and is stored by kdb_save_running() and the arch -+specific kdba_save_running() functions. When backtracing a running process, KDB -+uses the data in kdb_running_process as its starting point. -+ -+For blocked processes we always have the saved rsp, it is held in the process's -+thread_info. For i386 blocked processes, thread_info also contains the saved -+rip. For x86_64 blocked processes, rip is no longer saved in thread_info, it is -+assumed that all blocked processes will resume at assembler label thread_return, -+so that rip is used on x86_64. See arch specific kdba_bt_stack_rip(). -+ -+Unknown process state only occurs when the user does 'bt '. -+Unlike other bt commands, 'bt ' does not identify any specific -+process, instead it identifies a kernel stack. must be inside a -+valid kernel stack and must point to a saved rip from a call instruction. -+kdba_bt_x86.c uses the common kdba_get_stack_info() and arch specific -+kdba_get_stack_info_alternate() functions to check that the address falls within -+a valid kernel stack. If the user gives a stack address that does not point to -+a saved rip from a call instruction then the backtrace will be garbage. -+ -+ -+Identifying The Current Function -+================================ -+ -+Given a rip value, KDB uses the kallsyms data to find the start of the function -+(first address <= rip) and the end of the function (next symbol in kallsyms). -+This works for plain C code because gcc only generates one label per function. -+It does not work for assembler code or for assembler code embedded in C -+functions, because the assembler labels appear as global entries in kallsyms. -+For example, arch/i386/kernel/entry.S has function ret_from_exception which -+contains three global labels ret_from_intr, check_userspace and -+resume_userspace. If rip points to any of those global labels, KDB wants the -+start of the real function, i.e. ret_from_exception. In addition, if rip points -+to ret_from_exception, KDB wants the end of the function to be after the last -+global label in that function, i.e. after resume_userspace. -+ -+The simplest way to handle these unwanted global labels is to list the spurious -+assembler labels, which is done in the arch specific array bb_spurious. After -+mapping rip to the nearest start and end labels from kallsyms, kdb_bb() works -+backwards until it finds a non-spurious label then works forwards to the next -+non-spurious label. That gives a real start and end address and a real name for -+the current function. -+ -+Note that this algorithm only applies in kdb_bb() when it maps rip to a suitable -+start and end address. When disassembling the code, you will still see the -+spurious label names, users need to see the extra labels. ret_from_exception on -+i386 disassembles like this (2.6.22) :- -+ -+[0]kdb> id ret_from_exception -+0xc0102554 ret_from_exception: cli -+0xc0102555 ret_from_intr: mov $0xfffff000,%ebp -+0xc010255a ret_from_intr+0x5: and %esp,%ebp -+0xc010255c check_userspace: mov 0x34(%esp),%eax -+0xc0102560 check_userspace+0x4: mov 0x30(%esp),%al -+0xc0102564 check_userspace+0x8: and $0x20003,%eax -+0xc0102569 check_userspace+0xd: cmp $0x3,%eax -+0xc010256c check_userspace+0x10: jb 0xc010258c resume_kernel -+0xc0102572 check_userspace+0x16: mov %esi,%esi -+0xc0102574 resume_userspace: cli -+0xc0102575 resume_userspace+0x1: mov 0x8(%ebp),%ecx -+0xc0102578 resume_userspace+0x4: and $0xfe3e,%ecx -+0xc010257e resume_userspace+0xa: jne 0xc01026f4 work_pending -+0xc0102584 resume_userspace+0x10: jmp 0xc01026a7 restore_all -+0xc0102589 resume_userspace+0x15: lea 0x0(%esi),%esi -+0xc010258c resume_kernel: cli -+ -+For the purposes of kdba_bt_x86.c, any rip from 0xc0102554 to 0xc0102589 needs -+to map to the range 0xc0102554 (start) to 0xc010258c (end) with function name -+ret_from_exception. Therefore ret_from_intr, check_userspace and -+resume_userspace are listed in bb_spurious[] for i386 so those symbols are -+ignored. The comments in bb_spurious[] list the function that encloses each -+spurious label, those comments are only for humans, they do not affect the code. -+ -+Once rip has been mapped to non-spurious labels, the module name, function name, -+start and end address are stored in variables bb_mod_name, bb_func_name, -+bb_func_start, bb_func_end. These variables are used throughout kdba_bt_x86.c -+for processing each function in turn. -+ -+Watch for changes to assembler code, especially in arch/i386/kernel/entry.S, -+arch/x86_64/kernel/entry.S and arch/x86_64/ia32/ia32entry.S. When new labels -+are added you may need to adjust bb_spurious[] for that architecture. Running -+bb_all can help identify assembler labels that have been added or deleted. -+ -+ -+Tracking the Effects of Instructions -+==================================== -+ -+bb_pass2_do_changed_blocks() uses the KDB disassembler to decode the x86 -+instructions to something a human can read. bb_dis_pass2() is used as a print -+routine to store data for a single instruction in a buffer then -+bb_parse_buffer() starts the analysis. Any instruction prefixes like lock or -+rep are stripped out. The opcode string is isolated then up to 3 operands are -+extracted (imul can have 3 operands), these are src, dst and dst2. The operand -+is matched against table bb_opcode_usage_all[] which lists all the instructions -+that actually appear in i386 and x86_64 kernels. A lot of the x86 instrcution -+set is not used by the kernel so instructions such as SSE do not appear in -+bb_opcode_usage_all[]. -+ -+Each operand is decoded by bb_parse_operand() to see whether it has a segment -+prefix, displacement, base, index or scale. An indirect call or jmp is -+identified. Operands consisting only of a register are classified as 'reg' -+type, displacements starting with '$' are immediate values otherwise the operand -+refers to a memory location. Any base or index register name is mapped to the -+abstract register name that contains it, this takes care of mapping (say) %ah to -+rax. -+ -+After decoding the opcode and all its operands, bb_usage() decides what effect -+the instruction will have on the abstract state machine. Some x86 instructions -+list all the affected registers in their operands and these can be handled as -+generic cases. Alas many x86 instructions have side effects and change -+registers that are not listed in the operands, these have to be handled as -+special cases. enum bb_operand_usage lists the generic and special cases. -+ -+bb_usage() is basically one huge switch statement over the special values in -+enum bb_operand_usage. For each special case it tracks the side effects of the -+instruction. Once all the special cases have been handled and converted to -+generic cases then bb_usage() handles the generic cases. -+ -+bb_usage() detects when a register is copied to another register, a register is -+copied to stack or a known stack value is copied to a register and updates the -+state data accordingly. It is particularly important that all stack pointer -+updates and copies of the stack pointer are tracked, much of the saved state is -+on stack and can be accessed via any register that points to the stack, not just -+via rsp. -+ -+i386 built with 4K stacks and all x86_64 builds have multiple kernel stacks. -+bb_usage() knows which instructions or locations are used to switch stacks and -+pretends that these instructions have no effect on the contents of rsp. The -+higher level backtrace code knows how to handle stack switching, it is too -+complicated for basic block analysis. -+ -+ -+Transfer of Control Outside the Current Function -+================================================ -+ -+Ignoring call instructions, most C code does not transfer control outside the -+current function, IOW there are no jump instructions to instructions outside the -+function. There are a few cases that this can occur for C code, inline -+assembler and tail recursion. -+ -+Tail recursion occurs when a function ends by returning the value from a second -+function and that second function has exactly the same arguments and return -+value as the current function. For example, -+ -+ int bar(int i, char *p) -+ { -+ ... do some work and return an int ... -+ } -+ -+ int foo(int i, char *p) -+ { -+ return bar(i, p); -+ } -+ -+If tail recursion is active (gcc -foptimize-sibling-calls) then instead of foo -+calling bar, bar returning to foo then foo returning to its caller, gcc will end -+foo with a direct jmp to bar. The source code says that something called foo -+but the stack trace will show bar is active, with no sign of foo on stack. When -+bar returns it will use the return address from the code that called foo. -+ -+bb_transfer() detects an unconditional jmp to code outside the function body and -+assumes that this represents tail recursion. For tail recursion to work -+correctly, all the preserved registers must contain their original values, -+bb_sanity_check() validates this. Any deviation from the expected state will -+stop basic block analysis and fall back on the old unreliable backtrace code. -+ -+Besides tail recursion in C code, assembler code can jump to labels outside the -+current function. Unfortunately this occurs all the time in the twisty -+assembler code and, to make things worse, many of these transfers are done with -+non-standard register or memory state. bb_special_case() and the arch specific -+bb_special_cases[] handle all the known special cases, including what the -+register and/or memory state should be. Any deviation from the expected state -+will stop basic block analysis and fall back on the old unreliable backtrace -+code. -+ -+ -+Locating Arguments -+================== -+ -+Function arguments can be passed in registers or on stack. The full ABI for -+passing arguments is described in -+ -+ http://www.caldera.com/developers/devspecs/abi386-4.pdf -+ http://www.x86-64.org/documentation/abi.pdf -+ -+The short description, ignoring special cases like passing structures by name -+and floating point arguments which tend not to apply to the kernel, is :- -+ -+i386. With -mpregparm=0, all arguments are passed on stack, except for -+ functions defined as FASTCALL, where the first 3 arguments are passed in -+ registers. -+ -+ With -mregparm=3, the first 3 arguments are passed in registers except -+ for functions defined as asmlinkage or with variable number of -+ arguments, when arguments are still passed on stack. -mpregparm=3 used -+ to be a config option, in recent kernels it is the default. -+ -+ Arguments defined as long long (64 bit) are passed in two registers or -+ in two locations on stack. Being passed in two pieces makes a 64 bit -+ argument look like two 32 bit arguments to KDB, it will be printed as -+ two separate arguments. -+ -+ When compiled with -mregparm=3, if a 64 bit argument is argument number -+ 2 then it will not be split between register and stack, instead it will -+ all be on stack and the third argument register will not be used. This -+ makes it look like there is an extra argument in the list. There is -+ nothing that KDB can do to detect these corner cases with 64 bit -+ arguments on i386, which is a pity because they can confuse users. -+ -+ The preserved registers are ebx, ebp, esp, esi, edi. Arguments are -+ passed in eax, edx, ecx. The return value is in eax. -+ -+x86_64. The first 6 arguments are passed in registers, the 7th and later -+ arguments are passed on stack. Except for functions with a variable -+ number of arguments (e.g. printk) where all arguments are on stack -+ except for rax which contains the number of SSE arguments (always 0 for -+ the kernel). -+ -+ The preserved registers are rbx, rbp, rsp, r12, r13, r14, r15. -+ Arguments are passed in rdi, rsi, rdx, rcx, r8, r9. The return value is -+ in rax. -+ -+For both architectures, kdba_bt detects an argument that is passed in a register -+by the fact that the function code reads from that argument type register while -+it contains its original value. IOW, testing the value of rax, copying rax to -+another register or storing it on stack without first overwriting rax means that -+rax contains a useful input value. Reading from memory which is above the -+original stack pointer means that there is a argument at that location on -+stack. -+ -+There are some functions in the kernel whose definition contains arguments that -+are not actually used. Typically these functions are instantiations of generic -+function definitions where some, but not all, instantiations use all the -+arguments. For example, a filesystem function may take flags that are not used -+by this particular filesystem, but the function definition has to match the -+generic filesystem declarations. If the unused arguments are at the end of the -+list then there is no way of telling from the object code that they exist, the -+function that does not use the trailing aguments will have no code that refers -+to them. KDB will print a truncated argument list for this case. -+ -+If the unused arguments are not at the end of the list then KDB can detect the -+presence of the unused arguments, because there is code that refers to later -+arguments. KDB will print the unused argument, although gcc may have -+overwritten the register it is in, in which case KDB prints "invalid". -+ -+Originally kdba_bt_x86 would detect that there was no reference to arguments in -+registers but there were still references to arguments on stack and assume that -+the function had all its arguments on stack. Unfortunately this did not work -+with the large number of 'pass through' functions in the kernel. A 'pass -+through' function is one which calls another function with roughly the same -+argument list and makes no other reference to the register arguments. For -+example, ipv4_doint_and_flush_strategy() takes 7 arguments, calls -+devinet_conf_sysctl() with those 7 arguments in the same order and has no other -+reference to any of its arguments. -+ -+Pass through functions do not touch the arguments that are passed in registers -+because they are already in the right location for the routine about to be -+called, so the pass through function has no code that references the argument -+registers. No code means that kdba_bt_x86 cannot tell if the function has -+register arguments or not. The arguments passed on stack must always be copied -+to the new stack frame, even for pass through functions, so the arguments on -+stack can always be detected. -+ -+kdba_bt_x86 was changed to assume that if there are any arguments on stack then -+there are always arguments in registers, except for a list of functions that are -+known to be asmlinkage or to have a variable number of arguments. -+bb_assume_pass_through() ignores the known special cases, for other functions -+which have stack arguments but no register arguments it assumes the function is -+pass through and prints a warning about that assumption. -+ -+The above heuristics mean that there is one case that kdba_bt_x86 cannot detect: -+pass through functions where all the arguments are in registers. These have no -+argument references at all in their code, so they are printed with no arguments. -+All they do is call another function so this class of functions never fails, or -+if it does fail then it is due to something that is not argument related. If -+the failure is further down the call stack then the arguments are printed at the -+next function down the stack, so the user still has the arguments. -+ -+This list of limitations on getting the x86 arguments may seem to be a long one, -+but kdba_bt_x86 gives sensible results for most functions. For kernel -+debugging, any arguments are far better than none at all. -+ -+ -+Kernel Stack Switching -+====================== -+ -+Understanding the unusual way that x86 kernel stacks are used is very important -+when diagnosing backtrace problems. Every process has its own normal kernel -+stack, even processes that run entirely within the kernel such as kthread or the -+per cpu migration processes. The normal stacks are 4K or 8K on i386 (depending -+on CONFIG_4KSTACKS) and 8K on x86_64. The normal stacks are global, they are -+not tied to any cpu. -+ -+For i386 with 8K stacks there are no other kernel stacks so there is no stack -+switching to worry about. -+ -+For i386 with 4K process stacks, each cpu also has a 4K soft irq stack and a 4K -+hard irq stack. It is possible for a process to be running on its own process -+stack, for the process to be interrupted by a soft irq which is then interrupted -+by a hard irq. At that point the backtrace is split between the hard irq, the -+soft irq and the normal normal stacks. -+ -+On x86_64, each cpu always has stacks for stackfault, doublefault, nmi, debug, -+mce and interrupts. See Documentation/x86_64/kernel-stacks. -+ -+The arch specific kdba_get_stack_info_alternate() function works out which stack -+the backtrace starts on, how big the stack is and how to switch to the next -+stack. This information is stored in the kdb_activation_record and used by the -+higher level backtrace code to detect a stack switch. -+ -+The normal stack has some padding at the end, this reflects the stack pointer -+when the process was created in the kernel. kdba_bt_x86 cannot backtrace -+through this padding data, mainly because the code that set the nitial stack -+pointer no longer exists after boot. ARCH_NORMAL_PADDING defines how many words -+to ignore at the end of the normal stack. -+ -+ -+Debugging KDB -+============= -+ -+KDB has conditional debugging print statements scattered throughout the code. -+If KDB is not behaving as expected, you can turn on debugging and rerun the -+command. Before debugging KDB, set LINES 10000 and capture the output via a -+serial console. If using minicom, turn on word wrap (control-A W) and capture -+mode (control-A L). If you are using a serial console via a serial to Ethernet -+interface using ssh or telnet, use the 'script' command to start the session. -+ -+The various KDB_DEBUG_FLAG_* flags are listed in include/linux/kdbprivate.h. -+You set debug with 'set KDBDEBUG 0xnn' where nn is the or'd value of the desired -+flags. 'set KDBDEBUG 0' turns off KDB debugging. When diagnosing x86 backtrace -+problems, the most useful debug flags are -+ -+ KDB_DEBUG_FLAG_ARA 0x10 Activation record, arch specific -+ KDB_DEBUG_FLAG_BB_SUMM 0x04 Basic block analysis, summary only -+ KDB_DEBUG_FLAG_BB 0x20 All basic block analysis -+ -+ARA prints information about the different kernel stacks as kdba_bt_x86 unwinds -+through the switched kernel stacks. BB_SUMM prints a summary of the basic block -+analysis for each function, including the abstract exit state and the rollback -+calculations. BB prints a huge amount of basic block debugging, you probably -+only want to turn this for the full backtrace on as a last resort. -+ -+I find 'set KDBDEBUG 0x14' to be best to get an overview of a problem. It gives -+both the kernel stack information plus the abstract state and actual location of -+data for each function. -+ -+Command 'bb1' does a detailed debug session for a single function, bb1 takes a -+single parameter, the address of the exit point from the function, by number, -+not by name. bb1 turns on KDB_DEBUG_FLAG_BB, does basic block analysis for the -+function that contains the exit point then resets the debug flags to their -+previous value. -+ -+Command 'bb_all' runs through every function in the base kernel (not module -+functions) and does a basic block analysis of every function. It also validates -+the various tables in kdba_bt_x86 where possible. bb_all is meant for the KDB -+maintainer to check that all the base kernel function pass the sanity checks, it -+can also be used by end users when reporting a bug. bb_all takes no parameters. -+It prints a '.' for every 100 functions it has analysed and allows for up to 20 -+errors before giving up. The output from bb_all also includes the config -+variables that affect basic block analysis plus any assumptions about 'pass -+through' functions. -+ -+ -+Submitting a Bug Report Against kdba_bt_x86 -+=========================================== -+ -+Capture the KDB output via a serial console. -+ -+set LINES 10000 -+set BTSP 1 -+set KDBDEBUG 0x14 -+Reproduce the problem. -+set KDBDEBUG 0 -+bb_all -+ -+If you can identify the rip/eip where kdba_bt_x86 gets confused, run bb1 with -+that address. -+ -+Find each set of output from kdba_get_stack_info in the trace, extract the last -+two lines and type those lines into KDB. That will give a hex and symbolic dump -+of the raw kernel stacks. For example, if the trace data is -+ -+kdba_get_stack_info: esp=0xc04fbef8 cpu=0 task=c047b3e0 -+kdba_get_stack_info: ar->stack -+ physical_start=0xc04fb000 -+ physical_end=0xc04fc000 -+ logical_start=0xc04fb038 -+ logical_end=0xc04fc000 -+ next=0xc04b4f44 -+ id=hardirq_ctx -+ set MDCOUNT 1024 -+ mds 0xc04fb000 -+ -+then type the last two lines into KDB. Repeat this for each stack listed by -+kdba_get_stack_info on the failing backtrace. -+ -+Send all the console output to the KDB maintainer. -+ -+ -+Examples of Basic Block Debugging Output -+======================================== -+ -+Example of the basic block analysis of fs/namei::getname() on i386. Kernel -+2.6.22, i386, compiled with frame pointers, gcc 4.1.0. -+ -+Basic block debugging is very verbose, so set a high number of output lines. -+You really need a reliable serial console to capture this amount of output. -+ -+ [0]kdb> set LINES 10000 -+ -+A simple disassemble of getname(). This is not required for debugging purposes -+since each instruction is printed as part of basic block debugging, but this can -+be easier to read. -+ -+ [0]kdb> id getname -+ 0xc015cce8 getname: push %ebp -+ 0xc015cce9 getname+0x1: mov %esp,%ebp -+ 0xc015cceb getname+0x3: push %edi -+ 0xc015ccec getname+0x4: push %esi -+ 0xc015cced getname+0x5: push %ebx -+ 0xc015ccee getname+0x6: sub $0x4,%esp -+ 0xc015ccf1 getname+0x9: mov %eax,%edi -+ 0xc015ccf3 getname+0xb: mov $0xd0,%edx -+ 0xc015ccf8 getname+0x10: mov 0xc04b2120,%eax -+ 0xc015ccfd getname+0x15: call 0xc0153009 kmem_cache_alloc -+ 0xc015cd02 getname+0x1a: mov %eax,0xfffffff0(%ebp) -+ 0xc015cd05 getname+0x1d: mov $0xfffffff4,%eax -+ 0xc015cd0a getname+0x22: cmpl $0x0,0xfffffff0(%ebp) -+ 0xc015cd0e getname+0x26: je 0xc015cd7d getname+0x95 -+ 0xc015cd10 getname+0x28: mov %esp,%eax -+ 0xc015cd12 getname+0x2a: and $0xfffff000,%eax -+ 0xc015cd17 getname+0x2f: cmpl $0xffffffff,0x18(%eax) -+ 0xc015cd1b getname+0x33: je 0xc015cd39 getname+0x51 -+ 0xc015cd1d getname+0x35: mov $0xfffffff2,%esi -+ 0xc015cd22 getname+0x3a: cmp $0xbfffffff,%edi -+ 0xc015cd28 getname+0x40: ja 0xc015cd60 getname+0x78 -+ 0xc015cd2a getname+0x42: mov $0xc0000000,%ebx -+ 0xc015cd2f getname+0x47: sub %edi,%ebx -+ 0xc015cd31 getname+0x49: cmp $0xfff,%ebx -+ 0xc015cd37 getname+0x4f: jbe 0xc015cd3e getname+0x56 -+ 0xc015cd39 getname+0x51: mov $0x1000,%ebx -+ 0xc015cd3e getname+0x56: mov %ebx,%ecx -+ 0xc015cd40 getname+0x58: mov %edi,%edx -+ 0xc015cd42 getname+0x5a: mov 0xfffffff0(%ebp),%eax -+ 0xc015cd45 getname+0x5d: call 0xc023dbb4 strncpy_from_user -+ 0xc015cd4a getname+0x62: cmp $0x0,%eax -+ 0xc015cd4d getname+0x65: jle 0xc015cd5a getname+0x72 -+ 0xc015cd4f getname+0x67: mov $0xffffffdc,%esi -+ 0xc015cd54 getname+0x6c: cmp %ebx,%eax -+ 0xc015cd56 getname+0x6e: jae 0xc015cd60 getname+0x78 -+ 0xc015cd58 getname+0x70: jmp 0xc015cd71 getname+0x89 -+ 0xc015cd5a getname+0x72: je 0xc015cd76 getname+0x8e -+ 0xc015cd5c getname+0x74: jge 0xc015cd71 getname+0x89 -+ 0xc015cd5e getname+0x76: mov %eax,%esi -+ 0xc015cd60 getname+0x78: mov 0xfffffff0(%ebp),%edx -+ 0xc015cd63 getname+0x7b: mov 0xc04b2120,%eax -+ 0xc015cd68 getname+0x80: call 0xc01521f1 kmem_cache_free -+ 0xc015cd6d getname+0x85: mov %esi,%eax -+ 0xc015cd6f getname+0x87: jmp 0xc015cd7d getname+0x95 -+ 0xc015cd71 getname+0x89: mov 0xfffffff0(%ebp),%eax -+ 0xc015cd74 getname+0x8c: jmp 0xc015cd7d getname+0x95 -+ 0xc015cd76 getname+0x8e: mov $0xfffffffe,%esi -+ 0xc015cd7b getname+0x93: jmp 0xc015cd60 getname+0x78 -+ 0xc015cd7d getname+0x95: pop %edx -+ 0xc015cd7e getname+0x96: pop %ebx -+ 0xc015cd7f getname+0x97: pop %esi -+ 0xc015cd80 getname+0x98: pop %edi -+ 0xc015cd81 getname+0x99: pop %ebp -+ 0xc015cd82 getname+0x9a: ret -+ -+The bb1 command only one argument which must be an address, not a name. bb1 -+turns on full basic block debugging and analyses the function containing the -+supplied address. Give bb1 the address of the exit point from this function, -+IOW the return address that is stored on stack due to a call from this function -+to the next function down the call stack. Assume that getname() has called -+kmem_cache_free() and something went wrong in kmem_cache_free() or one of the -+functions that it calls. The call to kmem_cache_free is at 0xc015cd68 and the -+return address on stack is the instruction after the call, i.e. 0xc015cd6d, so -+ -+ [0]kdb> bb1 0xc015cd6d -+ bb_pass1: func_name getname func_start 0xc015cce8 func_end 0xc015cd83 -+ -+bb_pass1 has identified the function name and its start and end address. For C -+functions these are just the function start address and the next symbol in -+kallsyms. For Assembler code there may be spurious labels so the function name -+may not match the label prior to the address given to bb1. For an example of -+that on i386, find the address of resume_userspace then pass that address to the -+bb1 KDB command. -+ -+ bb_pass1: end -+ bb[0] start 0xc015cce8 end 0xc015cd38 drop_through 1 -+ bb[1] start 0xc015cd39 end 0xc015cd3d drop_through 1 -+ bb[2] start 0xc015cd3e end 0xc015cd58 drop_through 0 -+ bb[3] start 0xc015cd5a end 0xc015cd5f drop_through 1 -+ bb[4] start 0xc015cd60 end 0xc015cd6f drop_through 0 -+ bb[5] start 0xc015cd71 end 0xc015cd74 drop_through 0 -+ bb[6] start 0xc015cd76 end 0xc015cd7b drop_through 0 -+ bb[7] start 0xc015cd7d end 0xc015cd82 drop_through 0 -+ bb_jmp[0] from 0xc015cd0e to 0xc015cd7d drop_through 0 -+ bb_jmp[1] from 0xc015cd1b to 0xc015cd39 drop_through 0 -+ bb_jmp[2] from 0xc015cd28 to 0xc015cd60 drop_through 0 -+ bb_jmp[3] from 0xc015cd37 to 0xc015cd3e drop_through 0 -+ bb_jmp[4] from 0xc015cd4d to 0xc015cd5a drop_through 0 -+ bb_jmp[5] from 0xc015cd56 to 0xc015cd60 drop_through 0 -+ bb_jmp[6] from 0xc015cd58 to 0xc015cd71 drop_through 0 -+ bb_jmp[7] from 0xc015cd5a to 0xc015cd76 drop_through 0 -+ bb_jmp[8] from 0xc015cd5c to 0xc015cd71 drop_through 0 -+ bb_jmp[9] from 0xc015cd6f to 0xc015cd7d drop_through 0 -+ bb_jmp[10] from 0xc015cd74 to 0xc015cd7d drop_through 0 -+ bb_jmp[11] from 0xc015cd7b to 0xc015cd60 drop_through 0 -+ bb_jmp[12] from 0xc015cd38 to 0xc015cd39 drop_through 1 -+ bb_jmp[13] from 0xc015cd3d to 0xc015cd3e drop_through 1 -+ bb_jmp[14] from 0xc015cd5f to 0xc015cd60 drop_through 1 -+ -+After analysing the logic flow, we can see that getname() consists of 8 basic -+blocks (nodes in bb_list[]). 5 of these blocks end in unconditional jumps, the -+other 3 drop through to the next block. There are 15 transfers of control -+(vertices in bb_jmp_list[]). 12 of these transfers are explicit jmp or jcc -+instructions, the other 3 are implicit transfers when dropping through from one -+block to the next. The node list is sorted by start address, the vertex list is -+not sorted. -+ -+Basic block 0 starts at the function start (0xc015cce8) and ends at 0xc015cd38. -+0xc015cd39 is the target of a jump instruction (0xc015cd1b: je 0xc015cd39) so -+0xc015cd39 starts a new block, which means that 0xc015cd38 becomes the end of -+the previous block. Because bb[0] does not end in an explicit jmp instruction, -+there is a drop through from the end of bb[0] to the start of bb[1], see -+bb_jmp[12]. -+ -+ bb_pass2: start -+ -+To get the most accurate results from pass2, try to scan the directed graph by -+only looking at nodes whose inputs are all defined. Initially only process -+nodes with no missing inputs. -+ -+ bb_pass2_do_changed_blocks: allow_missing 0 -+ -+ bb[0] -+ bb_reg_state c07282e0 -+ rax = rax -+ rbx = rbx -+ rcx = rcx -+ rdx = rdx -+ rdi = rdi -+ rsi = rsi -+ rbp = rbp -+ rsp = osp+0x0 -+ -+The initial state for bb[0] is the same for all C functions. Each register -+contains its own abstract value, except for rsp which is defined in terms of the -+original stack pointer (osp). -+ -+ '0xc015cce8 getname: push %ebp' -+ -+The first instruction of getname() saves the frame pointer. -+ -+ opcode 'push' matched by 'push', usage 44 -+ src R: %ebp base_rc 8 (rbp) -+ -+bb_usage() reports how the instruction was recognised and how its operands were -+decoded. Although this is i386 (ebp), it is reported as rbp. Using the x86_64 -+names for registers throughout makes it easier to create common code for the two -+architecures. -+ -+ rsp osp offset +0x0 -> -0x4 -+ -+A push instruction decrements rsp by 4 (i386) or 8 (x86_64) bytes. rsp -+originally contained the original stack pointer (osp), now it contains the -+original stack pointer - 4. -+ -+ *(rsp+0x0 osp-0x4) = rbp slot 0 -+ -+The stack location pointed to by *rsp now contains the original value of rbp. -+Since rsp contains (osp-0x4), *(osp-0x4) contains rbp. It is slot 0 in the -+memory array associated with the register state. -+ -+ '0xc015cce9 getname+0x1: mov %esp,%ebp' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %esp base_rc 9 (rsp) -+ dst R: %ebp base_rc 8 (rbp) -+ rbp = rsp (osp-0x4) -+ -+Copy esp (rsp) to ebp (rbp). rsp contained (osp-0x4) so rbp also contains -+(osp-0x4). Any reference to data via either rbp or rsp will now be tracked as a -+stack location. -+ -+ '0xc015cceb getname+0x3: push %edi' -+ opcode 'push' matched by 'push', usage 44 -+ src R: %edi base_rc 6 (rdi) -+ rsp osp offset -0x4 -> -0x8 -+ *(rsp+0x0 osp-0x8) = rdi slot 1 -+ '0xc015ccec getname+0x4: push %esi' -+ opcode 'push' matched by 'push', usage 44 -+ src R: %esi base_rc 7 (rsi) -+ rsp osp offset -0x8 -> -0xc -+ *(rsp+0x0 osp-0xc) = rsi slot 2 -+ '0xc015cced getname+0x5: push %ebx' -+ opcode 'push' matched by 'push', usage 44 -+ src R: %ebx base_rc 3 (rbx) -+ rsp osp offset -0xc -> -0x10 -+ *(rsp+0x0 osp-0x10) = rbx slot 3 -+ -+Push 3 registers to stack. rsp is adjusted for each push and stack locations -+are assigned to contain the values of edi, esi and ebx. This sequence is very -+common in i386 C functions. edi, esi and ebx are preserved registers on i386, -+but gcc wants to use them for scratch space. The original contents iof these -+registers must be saved on stack and restored before returning to our caller. -+ -+ '0xc015ccee getname+0x6: sub $0x4,%esp' -+ opcode 'sub' matched by 'sub', usage 51 -+ src I: $0x4 -+ dst R: %esp base_rc 9 (rsp) -+ rsp osp offset -0x10 -> -0x14 -+ -+Subtract 4 bytes from esp. This defines the local stack variables. Sorry, -+names for local stack variables are not available to KDB. -+ -+ '0xc015ccf1 getname+0x9: mov %eax,%edi' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %eax base_rc 2 (rax) -+ dst R: %edi base_rc 6 (rdi) -+ rdi = rax (rax) -+ -+Having saved edi on stack, gcc now overwrites edi with eax. At this point rax -+still contains its original value, so rdi now contains a copy of rax, as well as -+the original value which is still in rax. This is a common sequence in C code. -+rax contains argument 0 but it is also a scratch register. If the code needs to -+use argument 0 later then its value must be saved somewhere before executing any -+instruction that changes rax. edi is a preserved register so its contents will -+not be changed by any function that we call, or if it is changed then it will be -+restored before returning to this function. -+ -+rax is listed in the arch specific bb_param_reg[] list and the code is reading -+from rax while it still contains its original value. The only way that makes -+any sense is when rax is an input argument to getname(). We note that fact in -+bb_reg_read(). -+ -+ '0xc015ccf3 getname+0xb: mov $0xd0,%edx' -+ opcode 'mov' matched by 'mov', usage 36 -+ src I: $0xd0 -+ dst R: %edx base_rc 5 (rdx) -+ rdx = undefined -+ -+Moving an constant value to edx. Although this is a constant, it does not refer -+to any of the original values that were supplied to this function. Therefore -+rdx becomes undefined for the purposes of the code analysis. -+ -+ '0xc015ccf8 getname+0x10: mov 0xc04b2120,%eax' -+ opcode 'mov' matched by 'mov', usage 36 -+ src M: 0xc04b2120 -+ dst R: %eax base_rc 2 (rax) -+ rax = undefined -+ -+Moving a constant value to eax makes rax undefined. -+ -+ '0xc015ccfd getname+0x15: call 0xc0153009 ' -+ opcode 'call' matched by 'call', usage 17 -+ src M: 0xc0153009 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = rbx -+ rcx = rcx -+ rdx = undefined -+ rdi = rax -+ rsi = rsi -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ -+Basic block debugging prints the register and memory state when transfering -+control between blocks and when issuing call instructions. The call state is -+mainly useful when C code calls assembler routines, especially if you are not -+sure what state the assembler code expects. Not all of our assembler is as well -+documented as it could be :( -+ -+ rax = undefined -+ rcx = undefined -+ rdx = undefined -+ -+The i386 ABI says that some registers are preserved across calls, see the arch -+specific bb_preserved_reg[] list. Any registers not in that list automatically -+become undefined after a call instruction. -+ -+ '0xc015cd02 getname+0x1a: mov %eax,0xfffffff0(%ebp)' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %eax base_rc 2 (rax) -+ dst M: 0xfffffff0(%ebp) base_rc 8 (rbp) -+ -+eax is the return value from the call, it is being saved at offset 0xfffffff0 -+(-0x10) from ebp. Since rbp contains (osp-0x4) the return value is being stored -+at (osp-0x14). This is a stack location but we have no record of any data being -+held at that location, it is part of the local stack variables. -+ -+ '0xc015cd05 getname+0x1d: mov $0xfffffff4,%eax' -+ opcode 'mov' matched by 'mov', usage 36 -+ src I: $0xfffffff4 -+ dst R: %eax base_rc 2 (rax) -+ rax = undefined -+ '0xc015cd0a getname+0x22: cmpl $0x0,0xfffffff0(%ebp)' -+ opcode 'cmpl' matched by 'cmp', usage 3 -+ src I: $0x0 -+ dst M: 0xfffffff0(%ebp) base_rc 8 (rbp) -+ '0xc015cd0e getname+0x26: je 0xc015cd7d ' -+ opcode 'je' matched by 'j', usage 28 -+ src M: 0xc015cd7d -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = rbx -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = rsi -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ -+Transfer of control, print the register and memory state. -+ -+ matched: from 0xc015cd0e to 0xc015cd7d drop_through 0 bb_jmp[0] -+ -+Which bb_jmp_list[] entry matches this transfer. -+ -+ new state c07286b8 -+ -+The current abstract register and memory state is cloned at address c07286b8. -+This state becomes one of the inputs to the basic block whose start address is -+0xc015cd7d. -+ -+ '0xc015cd10 getname+0x28: mov %esp,%eax' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %esp base_rc 9 (rsp) -+ dst R: %eax base_rc 2 (rax) -+ rax = rsp (osp-0x14) -+ -+Copy rsp which contains (osp-0x14) to rax. rax contains a valid stack pointer. -+ -+ '0xc015cd12 getname+0x2a: and $0xfffff000,%eax' -+ opcode 'and' matched by 'and', usage 11 -+ src I: $0xfffff000 -+ dst R: %eax base_rc 2 (rax) -+ rax = undefined -+ -+But not for long. -+ -+ '0xc015cd17 getname+0x2f: cmpl $0xffffffff,0x18(%eax)' -+ opcode 'cmpl' matched by 'cmp', usage 3 -+ src I: $0xffffffff -+ dst M: 0x18(%eax) base_rc 2 (rax) -+ '0xc015cd1b getname+0x33: je 0xc015cd39 ' -+ opcode 'je' matched by 'j', usage 28 -+ src M: 0xc015cd39 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = rbx -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = rsi -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ -+Another transfer of control, print the state. -+ -+ matched: from 0xc015cd1b to 0xc015cd39 drop_through 0 bb_jmp[1] -+ -+Which bb_jmp_list[] entry was used. -+ -+ reuse bb_jmp[0] -+ -+To save space, we only clone the state if it is different. Otherwise we reuse -+the state from another vertex and bump the reference count. -+ -+ '0xc015cd1d getname+0x35: mov $0xfffffff2,%esi' -+ opcode 'mov' matched by 'mov', usage 36 -+ src I: $0xfffffff2 -+ dst R: %esi base_rc 7 (rsi) -+ rsi = undefined -+ -+Using esi as a scratch register, even though the i386 ABi says that esi is a -+preserved register. Not to worry, the original value of rsi was saved on stack -+on entry and it will be restored before exit. -+ -+ '0xc015cd22 getname+0x3a: cmp $0xbfffffff,%edi' -+ opcode 'cmp' matched by 'cmp', usage 3 -+ src I: $0xbfffffff -+ dst R: %edi base_rc 6 (rdi) -+ '0xc015cd28 getname+0x40: ja 0xc015cd60 ' -+ opcode 'ja' matched by 'j', usage 28 -+ src M: 0xc015cd60 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = rbx -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd28 to 0xc015cd60 drop_through 0 bb_jmp[2] -+ new state c0728710 -+ -+This state is different from any states already saved, clone to a new entry. -+ -+ '0xc015cd2a getname+0x42: mov $0xc0000000,%ebx' -+ opcode 'mov' matched by 'mov', usage 36 -+ src I: $0xc0000000 -+ dst R: %ebx base_rc 3 (rbx) -+ rbx = undefined -+ '0xc015cd2f getname+0x47: sub %edi,%ebx' -+ opcode 'sub' matched by 'sub', usage 51 -+ src R: %edi base_rc 6 (rdi) -+ dst R: %ebx base_rc 3 (rbx) -+ rbx = undefined -+ '0xc015cd31 getname+0x49: cmp $0xfff,%ebx' -+ opcode 'cmp' matched by 'cmp', usage 3 -+ src I: $0xfff -+ dst R: %ebx base_rc 3 (rbx) -+ '0xc015cd37 getname+0x4f: jbe 0xc015cd3e ' -+ opcode 'jbe' matched by 'j', usage 28 -+ src M: 0xc015cd3e -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd37 to 0xc015cd3e drop_through 0 bb_jmp[3] -+ new state c0728768 -+ -+This state is different from any states already saved, clone to a new entry. -+ -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd38 to 0xc015cd39 drop_through 1 bb_jmp[12] -+ reuse bb_jmp[3] -+ -+Basic block 0 drops through to basic block 1, treat it as an implicit transfer -+of control. The state is the same as the previous jump instruction so reuse it -+and bump the reference count. -+ -+That ends basic block 0, now pick the next block in the list that (a) needs to -+be scanned and (b) has all its input states defined. In this case bb[1]. -+ -+ bb[1] -+ -+bb[1] starts at 0xc015cd39 and has two paths that transfer control to it. -+bb_jmp[1] from an explicit jump at 0xc015cd1b and a drop through at bb_jmp[12]. -+Where there is more than one input state we have to merge them and reconcile the -+final value. -+ -+ first state c07286b8 -+ -+The first input state is stored at c07286b8. Looking back through the trace we -+find that entry associated with bb_jmp[0], not bb_jmp[1] as expected. However -+bb_jmp[1] reused the state that was stored for bb_jmp[0] so all is well. -+ -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = rbx -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = rsi -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ -+The first state for bb[1]. -+ -+ merging state c0728768 -+ -+Now merge the second state, which is held at c0728768. -+ -+ rbx = undefined -+ rsi = undefined -+ -+The two states disagree on the values being tracked in rbx and rsi. Compiler -+theory 101 says that if two or more paths to a basic block have different values -+for a register then that register cannot be relied on at the start of the block, -+so make it undefined. The same logic applies to memory locations. -+ -+ final state -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ -+After merging all the input states, this is the final starting state for bb[1]. -+Now track what bb[1] does to the state. -+ -+ '0xc015cd39 getname+0x51: mov $0x1000,%ebx' -+ opcode 'mov' matched by 'mov', usage 36 -+ src I: $0x1000 -+ dst R: %ebx base_rc 3 (rbx) -+ rbx = undefined -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd3d to 0xc015cd3e drop_through 1 bb_jmp[13] -+ reuse bb_jmp[3] -+ -+bb[1] is a single instruction which drops through to bb[2]. -+ -+ bb[2] -+ first state c0728768 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ merging state c0728768 -+ -+bb[2] has two inputs, both vertices are pointing to input state c0728768. -+Merging an entry with itself has no effect. -+ -+ '0xc015cd3e getname+0x56: mov %ebx,%ecx' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %ebx base_rc 3 (rbx) -+ dst R: %ecx base_rc 4 (rcx) -+ rcx = rbx (undefined) -+ '0xc015cd40 getname+0x58: mov %edi,%edx' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %edi base_rc 6 (rdi) -+ dst R: %edx base_rc 5 (rdx) -+ rdx = rdi (rax) -+ '0xc015cd42 getname+0x5a: mov 0xfffffff0(%ebp),%eax' -+ opcode 'mov' matched by 'mov', usage 36 -+ src M: 0xfffffff0(%ebp) base_rc 8 (rbp) -+ dst R: %eax base_rc 2 (rax) -+ rax = *(rbp-0x10) (osp-0x14) rax = undefined -+ '0xc015cd45 getname+0x5d: call 0xc023dbb4 ' -+ opcode 'call' matched by 'call', usage 17 -+ src M: 0xc023dbb4 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = rax -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ rax = undefined -+ rcx = undefined -+ rdx = undefined -+ '0xc015cd4a getname+0x62: cmp $0x0,%eax' -+ opcode 'cmp' matched by 'cmp', usage 3 -+ src I: $0x0 -+ dst R: %eax base_rc 2 (rax) -+ '0xc015cd4d getname+0x65: jle 0xc015cd5a ' -+ opcode 'jle' matched by 'j', usage 28 -+ src M: 0xc015cd5a -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd4d to 0xc015cd5a drop_through 0 bb_jmp[4] -+ reuse bb_jmp[3] -+ '0xc015cd4f getname+0x67: mov $0xffffffdc,%esi' -+ opcode 'mov' matched by 'mov', usage 36 -+ src I: $0xffffffdc -+ dst R: %esi base_rc 7 (rsi) -+ rsi = undefined -+ '0xc015cd54 getname+0x6c: cmp %ebx,%eax' -+ opcode 'cmp' matched by 'cmp', usage 3 -+ src R: %ebx base_rc 3 (rbx) -+ dst R: %eax base_rc 2 (rax) -+ '0xc015cd56 getname+0x6e: jae 0xc015cd60 ' -+ opcode 'jae' matched by 'j', usage 28 -+ src M: 0xc015cd60 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd56 to 0xc015cd60 drop_through 0 bb_jmp[5] -+ reuse bb_jmp[3] -+ '0xc015cd58 getname+0x70: jmp 0xc015cd71 ' -+ opcode 'jmp' matched by 'j', usage 28 -+ src M: 0xc015cd71 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd58 to 0xc015cd71 drop_through 0 bb_jmp[6] -+ reuse bb_jmp[3] -+ -+ bb[3] -+ first state c0728768 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ -+bb[3] only has one input, nothing to merge. -+ -+ '0xc015cd5a getname+0x72: je 0xc015cd76 ' -+ opcode 'je' matched by 'j', usage 28 -+ src M: 0xc015cd76 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd5a to 0xc015cd76 drop_through 0 bb_jmp[7] -+ reuse bb_jmp[3] -+ '0xc015cd5c getname+0x74: jge 0xc015cd71 ' -+ opcode 'jge' matched by 'j', usage 28 -+ src M: 0xc015cd71 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd5c to 0xc015cd71 drop_through 0 bb_jmp[8] -+ reuse bb_jmp[3] -+ '0xc015cd5e getname+0x76: mov %eax,%esi' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %eax base_rc 2 (rax) -+ dst R: %esi base_rc 7 (rsi) -+ rsi = rax (undefined) -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd5f to 0xc015cd60 drop_through 1 bb_jmp[14] -+ reuse bb_jmp[3] -+ -+ bb[5] -+ first state c0728768 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ merging state c0728768 -+ '0xc015cd71 getname+0x89: mov 0xfffffff0(%ebp),%eax' -+ opcode 'mov' matched by 'mov', usage 36 -+ src M: 0xfffffff0(%ebp) base_rc 8 (rbp) -+ dst R: %eax base_rc 2 (rax) -+ rax = *(rbp-0x10) (osp-0x14) rax = undefined -+ '0xc015cd74 getname+0x8c: jmp 0xc015cd7d ' -+ opcode 'jmp' matched by 'j', usage 28 -+ src M: 0xc015cd7d -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd74 to 0xc015cd7d drop_through 0 bb_jmp[10] -+ reuse bb_jmp[3] -+ -+ bb[6] -+ first state c0728768 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ '0xc015cd76 getname+0x8e: mov $0xfffffffe,%esi' -+ opcode 'mov' matched by 'mov', usage 36 -+ src I: $0xfffffffe -+ dst R: %esi base_rc 7 (rsi) -+ rsi = undefined -+ '0xc015cd7b getname+0x93: jmp 0xc015cd60 ' -+ opcode 'jmp' matched by 'j', usage 28 -+ src M: 0xc015cd60 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd7b to 0xc015cd60 drop_through 0 bb_jmp[11] -+ reuse bb_jmp[3] -+ -+ bb[4] -+ first state c0728710 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = rbx -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ merging state c0728768 -+ rbx = undefined -+ merging state c0728768 -+ merging state c0728768 -+ -+bb[4] has 4 inputs, 3 of which have the same state. One one path (state -+c0728710) rbx is defined, on the others (c0728768) rbx is undefined so the final -+state has rbx as undefined. -+ -+ final state -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ '0xc015cd60 getname+0x78: mov 0xfffffff0(%ebp),%edx' -+ opcode 'mov' matched by 'mov', usage 36 -+ src M: 0xfffffff0(%ebp) base_rc 8 (rbp) -+ dst R: %edx base_rc 5 (rdx) -+ rdx = *(rbp-0x10) (osp-0x14) rdx = undefined -+ '0xc015cd63 getname+0x7b: mov 0xc04b2120,%eax' -+ opcode 'mov' matched by 'mov', usage 36 -+ src M: 0xc04b2120 -+ dst R: %eax base_rc 2 (rax) -+ rax = undefined -+ '0xc015cd68 getname+0x80: call 0xc01521f1 ' -+ opcode 'call' matched by 'call', usage 17 -+ src M: 0xc01521f1 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ rax = undefined -+ rcx = undefined -+ rdx = undefined -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ '0xc015cd6d getname+0x85: mov %esi,%eax' -+ opcode 'mov' matched by 'mov', usage 36 -+ src R: %esi base_rc 7 (rsi) -+ dst R: %eax base_rc 2 (rax) -+ rax = rsi (undefined) -+ '0xc015cd6f getname+0x87: jmp 0xc015cd7d ' -+ opcode 'jmp' matched by 'j', usage 28 -+ src M: 0xc015cd7d -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ matched: from 0xc015cd6f to 0xc015cd7d drop_through 0 bb_jmp[9] -+ reuse bb_jmp[3] -+ -+ bb[7] -+ first state c07286b8 -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = rbx -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = rsi -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ merging state c0728768 -+ rbx = undefined -+ rsi = undefined -+ merging state c0728768 -+ final state -+ bb_reg_state c0728658 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ '0xc015cd7d getname+0x95: pop %edx' -+ opcode 'pop' matched by 'pop', usage 42 -+ src R: %edx base_rc 5 (rdx) -+ rdx = *(rsp+0x0) (osp-0x14) rdx = undefined -+ rsp osp offset -0x14 -> -0x10 -+ -+This instruction is a bit misleading. It looks like gcc is restoring a value -+from the stack *(osp-0x14) to edx, but we have no record of any useful data -+being stored at osp-0x14. In fact gcc is just reducing the stack pointer by 4 -+bytes to reverse the effect of 0xc015ccee: sub $0x4,%esp, the value popped into -+edx contains nothing useful. Why gcc does pop instead of add $0x4,%esp is a -+puzzle, probably some micro optimization. -+ -+ '0xc015cd7e getname+0x96: pop %ebx' -+ opcode 'pop' matched by 'pop', usage 42 -+ src R: %ebx base_rc 3 (rbx) -+ rbx = *(rsp+0x0) (osp-0x10) value rbx -+ rsp osp offset -0x10 -> -0xc -+ delete rbx from osp-0x10 slot 3 -+ -+This pop is doing something useful. It is restoring the original value of the -+preserved register ebx from stack, reversing 0xc015cced: push %ebx. Note that -+incrementing rsp from osp-0x10 to osp-0xc invalidates the data held in memory at -+osp-0x10, so we delete our record of it. -+ -+ '0xc015cd7f getname+0x97: pop %esi' -+ opcode 'pop' matched by 'pop', usage 42 -+ src R: %esi base_rc 7 (rsi) -+ rsi = *(rsp+0x0) (osp-0xc) value rsi -+ rsp osp offset -0xc -> -0x8 -+ delete rsi from osp-0xc slot 2 -+ '0xc015cd80 getname+0x98: pop %edi' -+ opcode 'pop' matched by 'pop', usage 42 -+ src R: %edi base_rc 6 (rdi) -+ rdi = *(rsp+0x0) (osp-0x8) value rdi -+ rsp osp offset -0x8 -> -0x4 -+ delete rdi from osp-0x8 slot 1 -+ -+Pop the other preserved registers, in reverse order to the push sequence at the -+start. -+ -+ '0xc015cd81 getname+0x99: pop %ebp' -+ opcode 'pop' matched by 'pop', usage 42 -+ src R: %ebp base_rc 8 (rbp) -+ rbp = *(rsp+0x0) (osp-0x4) value rbp -+ rsp osp offset -0x4 -> +0x0 -+ delete rbp from osp-0x4 slot 0 -+ -+Pop the previous frame pointer. -+ -+ '0xc015cd82 getname+0x9a: ret ' -+ opcode 'ret' matched by 'ret', usage 48 -+ -+When a ret instruction is executed, all the preserved registers must be back to -+their original value and the stack pointer must contain osp+0. -+bb_sanity_check() will complain and abort the backtrace if this is not true. No -+complaints here. -+ -+ bb_pass2: end bb_reg_params 1 bb_memory_params 0 -+ -+We identified one argument passed in a register (the read of rax at 0xc015ccf1) -+and no reference to memory locations above the stack frame. So we have one -+argument being passed in a register and no arguments being passed on stack. -+This matches -+ -+ char * getname(const char __user * filename) -+ -+ bb_pass2: bb_exit_state at 0xc015cd6d -+ bb_reg_state c07287c0 -+ rax = undefined -+ rbx = undefined -+ rcx = undefined -+ rdx = undefined -+ rdi = rax -+ rsi = undefined -+ rbp = osp-0x4 -+ rsp = osp-0x14 -+ slot 0 offset_address -0x4 rbp -+ slot 1 offset_address -0x8 rdi -+ slot 2 offset_address -0xc rsi -+ slot 3 offset_address -0x10 rbx -+ -+We told bb1 that the exit address from this function is 0xc015cd6d. The -+abstract state at this exit point was saved, it defines how we rollback the -+actual register values from the next function down the stack (kmem_cache_free) -+to get the actual register values on entry to this function (getname). See -+bb_actual_rollback() which updates bb_actual[]. -+ -+Looking at the exit state above, we see that rsp contains the abstracte value -+osp-0x14. It is a given that we have the actual value of rsp after the call -+from getname() to kmem_cache_free(), otherwise we would not have found the -+return address on stack and we would not be analysing getname(). Adding 0x14 -+(the delta from osp to rsp) to our current actual rsp gives us the actual value -+of osp on entry to getname(). -+ -+The main aim of doing all this work is to track the function arguments so we can -+print them if possible. getname() only has one argument which was passed in -+eax. According to the abstract exit state, the original value of rax is -+currently in rdi, so by looking at the actual value of rdi from the next stack -+frame down we are able to get the argument to getname(). -+ -+It is not always possible to get register arguments, gcc will only preserve -+input arguments as long as it needs them so there may be no saved copy of -+arguments that are passed in register. In this case, bt_print_one() prints -+"invalid". -+ -+If basic block analysis detected any arguments were passed on stack, their -+contents can now be extracted based on the known value of the stack pointer. -+bt_print_one() prints the arguments, if BT_ARGS is non-zero then any argument -+that might be a kernel address is printed as a symbol. -+ -+Once rsp has been rolled back to osp, we can calculate that actual address of -+the stack locations that contain useful data. The previous values of rbp, rdi, -+rsi and rbx are then copied from those locations into bb_actual[]. That gives -+the values for those registers at the exit point from the function that called -+getname(). Go up one level and repeat the analysis. -+ -+There are two references to rdi in the exit state, which can be confusing. -+ -+ rdi = rax -+ slot 1 offset_address -0x8 rdi -+ -+The first reference says that "register rdi contains the original value of rax", -+the second reference says that "*(osp-0x8) contains the original value of rdi". -+Do not confuse the two, one is by name, the other is by value. -+ -+getname() is a fairly simple function, it has no loops. __follow_mount is more -+complicated, it has loops as well as BUG() statements. -+ -+ [0]kdb> id __follow_mount -+ 0xc015be76 __follow_mount: push %ebp -+ 0xc015be77 __follow_mount+0x1: mov %esp,%ebp -+ 0xc015be79 __follow_mount+0x3: push %edi -+ 0xc015be7a __follow_mount+0x4: push %esi -+ 0xc015be7b __follow_mount+0x5: push %ebx -+ 0xc015be7c __follow_mount+0x6: mov %eax,%esi -+ 0xc015be7e __follow_mount+0x8: xor %edi,%edi -+ 0xc015be80 __follow_mount+0xa: jmp 0xc015beca __follow_mount+0x54 -+ 0xc015be82 __follow_mount+0xc: mov (%esi),%eax -+ 0xc015be84 __follow_mount+0xe: call 0xc0169664 lookup_mnt -+ 0xc015be89 __follow_mount+0x13: mov %eax,%ebx -+ 0xc015be8b __follow_mount+0x15: test %eax,%eax -+ 0xc015be8d __follow_mount+0x17: je 0xc015bed3 __follow_mount+0x5d -+ 0xc015be8f __follow_mount+0x19: mov 0x4(%esi),%eax -+ 0xc015be92 __follow_mount+0x1c: call 0xc0163de2 dput -+ 0xc015be97 __follow_mount+0x21: test %edi,%edi -+ 0xc015be99 __follow_mount+0x23: je 0xc015bead __follow_mount+0x37 -+ 0xc015be9b __follow_mount+0x25: mov (%esi),%eax -+ 0xc015be9d __follow_mount+0x27: test %eax,%eax -+ 0xc015be9f __follow_mount+0x29: je 0xc015bead __follow_mount+0x37 -+ 0xc015bea1 __follow_mount+0x2b: movl $0x0,0x64(%eax) -+ 0xc015bea8 __follow_mount+0x32: call 0xc016835b mntput_no_expire -+ 0xc015bead __follow_mount+0x37: mov %ebx,(%esi) -+ 0xc015beaf __follow_mount+0x39: mov 0x10(%ebx),%eax -+ 0xc015beb2 __follow_mount+0x3c: test %eax,%eax -+ 0xc015beb4 __follow_mount+0x3e: je 0xc015bec2 __follow_mount+0x4c -+ 0xc015beb6 __follow_mount+0x40: cmpl $0x0,(%eax) -+ 0xc015beb9 __follow_mount+0x43: jne 0xc015bebf __follow_mount+0x49 -+ 0xc015bebb __follow_mount+0x45: ud2a -+ 0xc015bebd __follow_mount+0x47: jmp 0xc015bebd __follow_mount+0x47 -+ 0xc015bebf __follow_mount+0x49: lock incl (%eax) -+ 0xc015bec2 __follow_mount+0x4c: mov %eax,0x4(%esi) -+ 0xc015bec5 __follow_mount+0x4f: mov $0x1,%edi -+ 0xc015beca __follow_mount+0x54: mov 0x4(%esi),%edx -+ 0xc015becd __follow_mount+0x57: cmpl $0x0,0x74(%edx) -+ 0xc015bed1 __follow_mount+0x5b: jne 0xc015be82 __follow_mount+0xc -+ 0xc015bed3 __follow_mount+0x5d: mov %edi,%eax -+ 0xc015bed5 __follow_mount+0x5f: pop %ebx -+ 0xc015bed6 __follow_mount+0x60: pop %esi -+ 0xc015bed7 __follow_mount+0x61: pop %edi -+ 0xc015bed8 __follow_mount+0x62: pop %ebp -+ 0xc015bed9 __follow_mount+0x63: ret -+ -+ [0]kdb> bb1 0xc015bed9 -+ bb_pass1: func_name __follow_mount func_start 0xc015be76 func_end 0xc015beda -+ bb_pass1: end -+ bb[0] start 0xc015be76 end 0xc015be80 drop_through 0 -+ bb[1] start 0xc015be82 end 0xc015beac drop_through 1 -+ bb[2] start 0xc015bead end 0xc015bebb drop_through 0 -+ -+Note that the ud2a (BUG) instruction at 0xc015bebb ends bb[2]. -+ -+ bb[3] start 0xc015bebd end 0xc015bebd drop_through 0 -+ -+bb[3] is peculiar, it is a jmp to itself, nothing else refers to 0xc015bebd and -+you cannot drop through from the previous instruction because ud2a kills the -+kernel. The i386 and x86_64 BUG() macros contain for(;;) after ud2a, for no -+good reason that I can see (is there old hardware that does not abort on ud2a?). -+ia64 and the generic versions of BUG() do not contain for(;;). for(;;) after -+ud2a generates a branch to itself than can never be executed. -+ -+ bb[4] start 0xc015bebf end 0xc015bec1 drop_through 1 -+ bb[5] start 0xc015bec2 end 0xc015bec9 drop_through 1 -+ bb[6] start 0xc015beca end 0xc015bed2 drop_through 1 -+ bb[7] start 0xc015bed3 end 0xc015bed9 drop_through 0 -+ bb_jmp[0] from 0xc015be80 to 0xc015beca drop_through 0 -+ bb_jmp[1] from 0xc015be8d to 0xc015bed3 drop_through 0 -+ bb_jmp[2] from 0xc015be99 to 0xc015bead drop_through 0 -+ bb_jmp[3] from 0xc015be9f to 0xc015bead drop_through 0 -+ bb_jmp[4] from 0xc015beb4 to 0xc015bec2 drop_through 0 -+ bb_jmp[5] from 0xc015beb9 to 0xc015bebf drop_through 0 -+ bb_jmp[6] from 0xc015bebd to 0xc015bebd drop_through 0 -+ bb_jmp[7] from 0xc015bed1 to 0xc015be82 drop_through 0 -+ bb_jmp[8] from 0xc015beac to 0xc015bead drop_through 1 -+ bb_jmp[9] from 0xc015bec1 to 0xc015bec2 drop_through 1 -+ bb_jmp[10] from 0xc015bec9 to 0xc015beca drop_through 1 -+ bb_jmp[11] from 0xc015bed2 to 0xc015bed3 drop_through 1 -+ -+Apart from bb[0] and the special case bb[3], all the other blocks are part of a -+cycle. That cycle goes bb[0] -> bb[6]. bb[6] -> {bb[1], bb[7]}. bb[1] -> -+{bb[2], bb[7]}. bb[2] -> {bb[4], bb[5]}. bb[4] -> bb[5]. bb[5] -> bb[6] and -+back to the start. bb[7] ends with 'ret', it does not feed into other blocks. -+ -+ bb_pass2: start -+ -+ bb_pass2_do_changed_blocks: allow_missing 0 -+ -+ bb[0] -+ [ ... detail snipped ... ] -+ matched: from 0xc015be80 to 0xc015beca drop_through 0 bb_jmp[0] -+ new state c07286d8 -+ -+ bb_pass2_do_changed_blocks: allow_missing 1 -+ -+Because of the cycle, only bb[0] can be processed with 0 missing inputs, all the -+other blocks have at least one missing input. Call bb_pass2_do_changed_blocks() -+again, this time allowing one missing input per blocks. -+ -+ bb[6] -+ first state c07286d8 -+ [ ... detail snipped ... ] -+ matched: from 0xc015bed2 to 0xc015bed3 drop_through 1 bb_jmp[11] -+ reuse bb_jmp[7] -+ -+ bb[7] -+ first state c0728730 -+ [ ... detail snipped ... ] -+ -+ bb[1] -+ first state c0728730 -+ [ ... detail snipped ... ] -+ matched: from 0xc015beac to 0xc015bead drop_through 1 bb_jmp[8] -+ reuse bb_jmp[1] -+ -+ bb[2] -+ first state c0728788 -+ [ ... detail snipped ... ] -+ merging state c0728788 -+ merging state c0728788 -+ [ ... detail snipped ... ] -+ matched: from 0xc015beb9 to 0xc015bebf drop_through 0 bb_jmp[5] -+ reuse bb_jmp[1] -+ -+ bb[4] -+ first state c0728788 -+ [ ... detail snipped ... ] -+ matched: from 0xc015bec1 to 0xc015bec2 drop_through 1 bb_jmp[9] -+ reuse bb_jmp[1] -+ -+ bb[5] -+ first state c0728788 -+ [ ... detail snipped ... ] -+ merging state c0728788 -+ [ ... detail snipped ... ] -+ matched: from 0xc015bec9 to 0xc015beca drop_through 1 bb_jmp[10] -+ reuse bb_jmp[1] -+ -+ bb[6] -+ first state c07286d8 -+ [ ... detail snipped ... ] -+ merging state c0728788 -+ matched: from 0xc015bed2 to 0xc015bed3 drop_through 1 bb_jmp[11] -+ reuse bb_jmp[1] -+ -+Note the rescan of bb[6]. The first scan only had one input from bb[0]. After -+traversing the cycle and getting back from bb[5] to bb[6], bb[6] now has more -+inputs so we need to rescan it. With the additional input, the output state -+from bb[6] has changed since the first scan, which means that every block it -+feeds has to be rescanned. bb[6] feeds bb[1] and bb[7]. -+ -+ bb[7] -+ first state c0728788 -+ [ ... detail snipped ... ] -+ merging state c0728788 -+ [ ... detail snipped ... ] -+ -+bb[7] being rescanned, this time it has data for both its inputs. -+ -+ bb[1] -+ first state c0728788 -+ [ ... detail snipped ... ] -+ matched: from 0xc015beac to 0xc015bead drop_through 1 bb_jmp[8] -+ no state change -+ -+bb[1] is being rescanned because the input from bb[6] has changed, however the -+rescan of bb[1] reports 'no state change', the changed input from bb[6] did not -+affect the final output state from bb[1]. Because the output state from bb[1] -+has not changed since the previous scan, there is no need to rescan bb[2], bb[7] -+or bb[4]. Since bb[4] is not being rescanned, there is no need to rescan bb[5] -+or bb[6] and the cycle is closed. ---- /dev/null -+++ b/Documentation/kdb/kdb.mm -@@ -0,0 +1,492 @@ -+.TH KDB 8 "September 21, 2005" -+.hy 0 -+.SH NAME -+Built-in Kernel Debugger for Linux - v4.4 -+.SH "Overview" -+This document describes the built-in kernel debugger available -+for linux. This debugger allows the programmer to interactively -+examine kernel memory, disassemble kernel functions, set breakpoints -+in the kernel code and display and modify register contents. -+.P -+A symbol table is included in the kernel image and in modules which -+enables all non-stack symbols (including static symbols) to be used as -+arguments to the kernel debugger commands. -+.SH "Getting Started" -+To include the kernel debugger in a linux kernel, use a -+configuration mechanism (e.g. xconfig, menuconfig, et. al.) -+to enable the \fBCONFIG_KDB\fP option. Additionally, for accurate -+stack tracebacks, it is recommended that the \fBCONFIG_FRAME_POINTER\fP -+option be enabled (if present). \fBCONFIG_FRAME_POINTER\fP changes the compiler -+flags so that the frame pointer register will be used as a frame -+pointer rather than a general purpose register. -+.P -+After linux has been configured to include the kernel debugger, -+make a new kernel with the new configuration file (a make clean -+is recommended before making the kernel), and install the kernel -+as normal. -+.P -+You can compile a kernel with kdb support but have kdb off by default, -+select \fBCONFIG_KDB_OFF\fR. Then the user has to explicitly activate -+kdb by booting with the 'kdb=on' flag or, after /proc is mounted, by -+.nf -+ echo "1" > /proc/sys/kernel/kdb -+.fi -+You can also do the reverse, compile a kernel with kdb on and -+deactivate kdb with the boot flag 'kdb=off' or, after /proc is mounted, -+by -+.nf -+ echo "0" > /proc/sys/kernel/kdb -+.fi -+.P -+When booting the new kernel, the 'kdb=early' flag -+may be added after the image name on the boot line to -+force the kernel to stop in the kernel debugger early in the -+kernel initialization process. 'kdb=early' implies 'kdb=on'. -+If the 'kdb=early' flag isn't provided, then kdb will automatically be -+invoked upon system panic or when the \fBPAUSE\fP key is used from the -+keyboard, assuming that kdb is on. Older versions of kdb used just a -+boot flag of 'kdb' to activate kdb early, this is no longer supported. -+.P -+KDB can also be used via the serial port. Set up the system to -+have a serial console (see \fIDocumentation/serial-console.txt\fP), you -+must also have a user space program such as agetty set up to read from -+the serial console. -+The control sequence \fBKDB\fP on the serial port will cause the -+kernel debugger to be entered, assuming that kdb is on, that some -+program is reading from the serial console, at least one cpu is -+accepting interrupts and the serial console driver is still usable. -+.P -+\fBNote:\fR\ When the serial console sequence consists of multiple -+characters such as KDB then all but the last character are passed -+through to the application that is reading from the serial console. -+After exiting from kdb, you should use backspace to delete the rest of -+the control sequence. -+.P -+You can boot with kdb activated but without the ability to enter kdb -+via any keyboard sequence. -+In this mode, kdb will only be entered after a system failure. -+Booting with kdb=on-nokey will activate kdb but ignore keyboard -+sequences that would normally drop you into kdb. -+kdb=on-nokey is mainly useful when you are using a PC keyboard and your -+application needs to use the Pause key. -+You can also activate this mode by -+.nf -+ echo "2" > /proc/sys/kernel/kdb -+.fi -+.P -+If the console is sitting on the login prompt when you enter kdb, then -+the login command may switch into upper case mode. -+This is not a kdb bug, it is a "feature" of login - if the userid is -+all upper case then login assumes that you using a TeleType (circa -+1960) which does not have lower case characters. -+Wait 60 seconds for login to timeout and it will switch back to lower -+case mode. -+.P -+\fBNote:\fR\ Your distributor may have chosen a different kdb -+activation sequence for the serial console. -+Consult your distribution documentation. -+.P -+If you have both a keyboard+video and a serial console, you can use -+either for kdb. -+Define both video and serial consoles with boot parameters -+.P -+.nf -+ console=tty0 console=ttyS0,38400 -+.fi -+.P -+Any kdb data entered on the keyboard or the serial console will be echoed -+to both. -+.P -+If you are using a USB keyboard then kdb commands cannot be entered -+until the kernel has initialised the USB subsystem and recognised the -+keyboard. -+Using kdb=early with a USB keyboard will not work, the USB subsystem is -+initialised too late. -+.P -+While kdb is active, the keyboard (not serial console) indicators may strobe. -+The caps lock and scroll lock lights will turn on and off, num lock is not used -+because it can confuse laptop keyboards where the numeric keypad is mapped over -+the normal keys. -+On exit from kdb the keyboard indicators will probably be wrong, they will not match the kernel state. -+Pressing caps lock twice should get the indicators back in sync with -+the kernel. -+.SH "Basic Commands" -+There are several categories of commands available to the -+kernel debugger user including commands providing memory -+display and modification, register display and modification, -+instruction disassemble, breakpoints and stack tracebacks. -+Any command can be prefixed with '-' which will cause kdb to ignore any -+errors on that command, this is useful when packaging commands using -+defcmd. -+A line whose first non-space character is '#' is printed and ignored. -+.P -+The following table shows the currently implemented standard commands, -+these are always available. Other commands can be added by extra -+debugging modules, type '?' at the kdb prompt to get a list of all -+available commands. -+.DS -+.TS -+box, center; -+l | l -+l | l. -+Command Description -+_ -+bc Clear Breakpoint -+bd Disable Breakpoint -+be Enable Breakpoint -+bl Display breakpoints -+bp Set or Display breakpoint -+bph Set or Display hardware breakpoint -+bpa Set or Display breakpoint globally -+bpha Set or Display hardware breakpoint globally -+bt Stack backtrace for current process -+btp Stack backtrace for specific process -+bta Stack backtrace for all processes -+btc Cycle over all live cpus and backtrace each one -+cpu Display or switch cpus -+dmesg Display system messages -+defcmd Define a command as a set of other commands -+ef Print exception frame -+env Show environment -+go Restart execution -+handlers Control the display of IA64 MCA/INIT handlers -+help Display help message -+id Disassemble Instructions -+kill Send a signal to a process -+ll Follow Linked Lists -+lsmod List loaded modules -+md Display memory contents -+mdWcN Display memory contents with width W and count N. -+mdp Display memory based on a physical address -+mdr Display raw memory contents -+mds Display memory contents symbolically -+mm Modify memory contents, words -+mmW Modify memory contents, bytes -+per_cpu Display per_cpu variables -+pid Change the default process context -+ps Display process status -+reboot Reboot the machine -+rd Display register contents -+rm Modify register contents -+rq Display runqueue for one cpu -+rqa Display runqueue for all cpus -+set Add/change environment variable -+sr Invoke SysReq commands -+ss Single step a cpu -+ssb Single step a cpu until a branch instruction -+stackdepth Print the stack depth for selected processes -+summary Summarize the system -+.TE -+.DE -+.P -+Some commands can be abbreviated, such commands are indicated by a -+non-zero \fIminlen\fP parameter to \fBkdb_register\fP; the value of -+\fIminlen\fP being the minimum length to which the command can be -+abbreviated (for example, the \fBgo\fP command can be abbreviated -+legally to \fBg\fP). -+.P -+If an input string does not match a command in the command table, -+it is treated as an address expression and the corresponding address -+value and nearest symbol are shown. -+.P -+Some of the commands are described here. -+Information on the more complicated commands can be found in the -+appropriate manual pages. -+.TP 8 -+cpu -+With no parameters, it lists the available cpus. -+\&'*' after a cpu number indicates a cpu that did not respond to the kdb -+stop signal. -+\&'+' after a cpu number indicates a cpu for which kdb has some data, but -+that cpu is no longer responding to kdb, so you cannot switch to it. -+This could be a cpu that has failed after entering kdb, or the cpu may -+have saved its state for debugging then entered the prom, this is -+normal for an IA64 MCA event. -+\&'I' after a cpu number means that the cpu was idle before it entered -+kdb, it is unlikely to contain any useful data. -+\&'F' after a cpu number means that the cpu is offline. -+There is currenly no way to distinguish between cpus that used to be -+online but are now offline and cpus that were never online, the kernel -+does not maintain the information required to separate those two cases. -+.I cpu -+followed by a number will switch to that cpu, you cannot switch to -+a cpu marked '*', '+' or 'F'. -+This command is only available if the kernel was configured for SMP. -+.TP 8 -+dmesg [lines] [adjust] -+Displays the system messages from the kernel buffer. -+If kdb logging is on, it is disabled by dmesg and is left as disabled. -+With no parameters or a zero value for 'lines', dmesg dumps the entire -+kernel buffer. -+If lines is specified and is positive, dmesg dumps the last 'lines' -+from the buffer. -+If lines is specified and is negative, dmesg dumps the first 'lines' -+from the buffer. -+If adjust is specified, adjust the starting point for the lines that -+are printed. -+When 'lines' is positive, move the starting point back by 'adjust' -+lines, when 'lines' is negative, move the starting point forward by -+\&'adjust' lines. -+.I dmesg -100 -+will dump 100 lines, from the start of the buffer. -+.I dmesg 100 -+will dump 100 lines, starting 100 lines from the end of the buffer, -+.I dmesg 100 100 -+will dump 100 lines, starting 200 lines from the end of the buffer. -+.I dmesg -100 100 -+will dump 100 lines, starting 100 lines from the start of the buffer. -+.TP 8 -+defcmd -+Defines a new command as a set of other commands, all input until -+.I endefcmd -+is saved and executed as a package. -+.I defcmd -+takes three parameters, the command name to be defined and used to -+invoke the package, a quoted string containing the usage text and a -+quoted string containing the help text for the command. -+When using defcmd, it is a good idea to prefix commands that might fail -+with '-', this ignores errors so the following commands are still -+executed. -+For example, -+.P -+.nf -+ defcmd diag "" "Standard diagnostics" -+ set LINES 2000 -+ set BTAPROMPT 0 -+ -id %eip-0x40 -+ -cpu -+ -ps -+ -dmesg 80 -+ -bt -+ -bta -+ endefcmd -+.fi -+.P -+When used with no parameters, defcmd prints all the defined commands. -+.TP 8 -+go -+Continue normal execution. -+Active breakpoints are reestablished and the processor(s) allowed to -+run normally. -+To continue at a specific address, use -+.I rm -+to change the instruction pointer then go. -+.TP 8 -+handlers -+Control the display of IA64 MCA/INIT handlers. -+The IA64 MCA/INIT handlers run on separate tasks. -+During an MCA/INIT event, the active tasks are typically the handlers, -+rather than the original tasks, which is not very useful for debugging. -+By default, KDB hides the MCA/INIT handlers so commands such as ps and -+btc will display the original task. -+You can change this behaviour by using -+.I handlers show -+to display the MCA/INIT handlers instead of the original tasks or use -+.I handlers hide -+(the default) to hide the MCA/INIT handlers and display the original -+tasks. -+.I handlers status -+will list the address of the handler task and the original task for -+each cpu. -+\fBNote:\fR\ If the original task was running in user space or it -+failed any of the MCA/INIT verification tests then there is no original -+task to display. -+In this case, the handler will be displayed even if -+.I handlers hide -+is set and -+.I handlers status -+will not show an original task. -+.TP 8 -+id -+Disassemble instructions starting at an address. -+Environment variable IDCOUNT controls how many lines of disassembly -+output the command produces. -+.TP 8 -+kill -+Internal command to send a signal (like kill(1)) to a process. -+kill -signal pid. -+.TP 8 -+lsmod -+Internal command to list modules. -+This does not use any kernel nor user space services so can be used at any time. -+.TP 8 -+per_cpu [] [] -+Display the values of a per_cpu variable, the variable_name is -+specified without the \fIper_cpu__\fR prefix. -+Length is the length of the variable, 1-8, if omitted or 0 it defaults -+to the size of the machine's register. -+To display the variable on a specific cpu, the third parameter is the -+cpu number. -+When the third parameter is omitted, the variable's value is printed -+from all cpus, except that zero values are suppressed. -+For each cpu, per_cpu prints the cpu number, the address of the -+variable and its value. -+.TP 8 -+pid -+Change the current process context, with no parameters it displays the -+current process. -+The current process is used to display registers, both kernel and user -+space. -+It is also used when dumping user pages. -+.I pid R -+resets to the original process that was running when kdb was entered. -+This command is useful if you have been looking at other processes and/or -+cpus and you want to get back to the original process. -+It does not switch cpus, it only resets the context to the original process. -+.TP 8 -+reboot -+Reboot the system, with no attempt to do a clean close down. -+.TP 8 -+rq -+Display the runqueues for the specified cpu. -+.TP 8 -+rqa -+Display the runqueues for all cpus. -+.TP 8 -+stackdepth -+Print the stack usage for processes using more than the specified -+percentage of their stack. -+If percentage is not supplied, it defaults to 60. -+This command is only implemented on i386 and ia64 architectures, -+patches for other architectures will be gratefully accepted. -+.TP 8 -+summary -+Print a summary of the system, including the time (no timezone is -+applied), uname information and various critical system counters. -+.SH INITIAL KDB COMMANDS -+kdb/kdb_cmds is a plain text file where you can define kdb commands -+which are to be issued during kdb_init(). One command per line, blank -+lines are ignored, lines starting with '#' are ignored. kdb_cmds is -+intended for per user customization of kdb, you can use it to set -+environment variables to suit your hardware or to set standard -+breakpoints for the problem you are debugging. This file is converted -+to a small C object, compiled and linked into the kernel. You must -+rebuild and reinstall the kernel after changing kdb_cmds. This file -+will never be shipped with any useful data so you can always override -+it with your local copy. Sample kdb_cmds: -+.P -+.nf -+# Initial commands for kdb, alter to suit your needs. -+# These commands are executed in kdb_init() context, no SMP, no -+# processes. Commands that require process data (including stack or -+# registers) are not reliable this early. set and bp commands should -+# be safe. Global breakpoint commands affect each cpu as it is booted. -+ -+set LINES=50 -+set MDCOUNT=25 -+set RECURSE=1 -+bp sys_init_module -+.fi -+.SH INTERRUPTS AND KDB -+When a kdb event occurs, one cpu (the initial cpu) enters kdb state. -+It uses a cross system interrupt to interrupt the -+other cpus and bring them all into kdb state. All cpus run with -+interrupts disabled while they are inside kdb, this prevents most -+external events from disturbing the kernel while kdb is running. -+.B Note: -+Disabled interrupts means that any I/O that relies on interrupts cannot -+proceed while kdb is in control, devices can time out. The clock tick -+is also disabled, machines will lose track of time while they are -+inside kdb. -+.P -+Even with interrupts disabled, some non-maskable interrupt events will -+still occur, these can disturb the kernel while you are debugging it. -+The initial cpu will still accept NMI events, assuming that kdb was not -+entered for an NMI event. Any cpu where you use the SS or SSB commands -+will accept NMI events, even after the instruction has finished and the -+cpu is back in kdb. This is an unavoidable side effect of the fact that -+doing SS[B] requires the cpu to drop all the way out of kdb, including -+exiting from the event that brought the cpu into kdb. Under normal -+circumstances the only NMI event is for the NMI oopser and that is kdb -+aware so it does not disturb the kernel while kdb is running. -+.P -+Sometimes doing SS or SSB on ix86 will allow one interrupt to proceed, -+even though the cpu is disabled for interrupts. I have not been able -+to track this one down but I suspect that the interrupt was pending -+when kdb was entered and it runs when kdb exits through IRET even -+though the popped flags are marked as cli(). If any ix86 hardware -+expert can shed some light on this problem, please notify the kdb -+maintainer. -+.SH RECOVERING FROM KDB ERRORS -+If a kdb command breaks and kdb has enough of a recovery environment -+then kdb will abort the command and drop back into mainline kdb code. -+This means that user written kdb commands can follow bad pointers -+without killing kdb. Ideally all code should verify that data areas -+are valid (using kdb_getarea) before accessing it but lots of calls to -+kdb_getarea can be clumsy. -+.P -+The sparc64 port does not currently provide this error recovery. -+If someone would volunteer to write the necessary longjmp/setjmp -+code, their efforts would be greatly appreciated. In the -+meantime, it is possible for kdb to trigger a panic by accessing -+a bad address. -+.SH DEBUGGING THE DEBUGGER -+kdb has limited support for debugging problems within kdb. If you -+suspect that kdb is failing, you can set environment variable KDBDEBUG -+to a bit pattern which will activate kdb_printf statements within kdb. -+See include/linux/kdb.h, KDB_DEBUG_FLAG_xxx defines. For example -+.nf -+ set KDBDEBUG=0x60 -+.fi -+activates the event callbacks into kdb plus state tracing in sections -+of kdb. -+.nf -+ set KDBDEBUG=0x18 -+.fi -+gives lots of tracing as kdb tries to decode the process stack. -+.P -+You can also perform one level of recursion in kdb. If environment -+variable RECURSE is not set or is 0 then kdb will either recover from -+an error (if the recovery environment is satisfactory) or kdb will -+allow the error to percolate, usually resulting in a dead system. When -+RECURSE is 1 then kdb will recover from an error or, if there is no -+satisfactory recovery environment, it will drop into kdb state to let -+you diagnose the problem. When RECURSE is 2 then all errors drop into -+kdb state, kdb does not attempt recovery first. Errors while in -+recursive state all drop through, kdb does not even attempt to recover -+from recursive errors. -+.SH KEYBOARD EDITING -+kdb supports a command history, which can be accessed via keyboard -+sequences. -+It supports the special keys on PC keyboards, control characters and -+vt100 sequences on a serial console or a PC keyboard. -+.P -+.DS -+.TS -+box, center; -+l | l | l l | l -+l | l | l l | l. -+PC Special keys Control VT100 key Codes Action -+_ -+Backspace ctrl-H Backspace 0x7f Delete character to the left of the cursor -+Delete ctrl-D Delete \\e[3~ Delete character to the right of the cursor -+Home ctrl-A Home \\e[1~ Go to start of line -+End ctrl-E End \\e[4~ Go to end of line -+Up arrow ctrl-P Up arrow \\e[A Up one command in history -+Down arrow ctrl-N Down arrow \\e[B Down one command in history -+Left arrow ctrl-B Left arrow \\e[D Left one character in current command -+Right arrow ctrl-F Right arrow \\e[C Right one character in current command -+.TE -+.DE -+.P -+There is no toggle for insert/replace mode, kdb editing is always in -+insert mode. -+Use delete and backspace to delete characters. -+.P -+kdb also supports tab completion for kernel symbols -+Type the start of a kernel symbol and press tab (ctrl-I) to complete -+the name -+If there is more than one possible match, kdb will append any common -+characters and wait for more input, pressing tab a second time will -+display the possible matches -+The number of matches is limited by environment variable DTABCOUNT, -+with a default of 30 if that variable is not set. -+.SH AUTHORS -+Scott Lurndal, Richard Bass, Scott Foehner, Srinivasa Thirumalachar, -+Masahiro Adegawa, Marc Esipovich, Ted Kline, Steve Lord, Andi Kleen, -+Sonic Zhang. -+.br -+Keith Owens - kdb maintainer. -+.SH SEE ALSO -+.P -+linux/Documentation/kdb/kdb_{bp,bt,env,ll,md,ps,rd,sr,ss}.man ---- /dev/null -+++ b/Documentation/kdb/kdb_bp.man -@@ -0,0 +1,197 @@ -+.TH BD 1 "July 12, 2004" -+.SH NAME -+bp, bpa, bph, bpha, bd, bc, be, bl \- breakpoint commands -+.SH SYNOPSIS -+bp \fIaddress-expression\fP -+.LP -+bpa \fIaddress-expression\fP -+.LP -+bph \fIaddress-expression\fP [\f(CWDATAR|DATAW|DATAA|IO\fP [\fIlength\fP]] -+.LP -+bpha \fIaddress-expression\fP [\f(CWDATAR|DATAW|DATAA|IO\fP [\fIlength\fP]] -+.LP -+bd \fIbreakpoint-number\fP -+.LP -+bc \fIbreakpoint-number\fP -+.LP -+be \fIbreakpoint-number\fP -+.LP -+bl -+.SH DESCRIPTION -+.hy 0 -+The -+.B bp -+family of commands are used to establish a breakpoint. -+The \fIaddress-expression\fP may be a numeric value (decimal or -+hexidecimal), a symbol name, a register name preceeded by a -+percent symbol '%', or a simple expression consisting of a -+symbol name, an addition or subtraction character and a numeric -+value (decimal or hexidecimal). -+.P -+\fBbph\fP and \fBbpha\fP will force the use of a hardware register, provided -+the processor architecture supports them. -+.P -+The \fIaddress-expression\fP may also consist of a single -+asterisk '*' symbol which indicates that the command should -+operate on all existing breakpoints (valid only for \fBbc\fP, -+\fBbd\fP and \fBbe\fP). -+.P -+Four different types of -+breakpoints may be set: -+ -+.TP 8 -+Instruction -+Causes the kernel debugger to be invoked from the debug exception -+path when an instruction is fetched from the specified address. This -+is the default if no other type of breakpoint is requested or when -+the \fBbp\fP command is used. -+ -+.TP 8 -+DATAR -+Causes the kernel debugger to be entered when data of length -+\fIlength\fP is read from or written to the specified address. -+This type of breakpoint must use a processor debug register which -+places an architecture dependent limit on the number of data and I/O -+breakpoints that may be established. On arm mode XScale platform -+(thumb mode is not supported yet), -+debugger is triggered by reading from the specified address. -+The \fBbph\fP or \fBbpha\fP commands must be used. -+ -+.TP 8 -+DATAW -+Enters the kernel debugger when data of length \fIlength\fP -+is written to the specified address. \fIlength\fP defaults -+to four bytes if it is not explicitly specified. -+Note that the processor may have already overwritten the prior data at -+the breakpoint location before the kernel debugger is invoked. -+The prior data should be saved before establishing the breakpoint, if -+required. On arm mode XScale platform, the debugger is triggered -+after having overwritten the specified address. -+The \fBbph\fP or \fBbpha\fP commands must be used. -+ -+.TP 8 -+IO -+Enters the kernel debugger when an \fBin\fP or \fBout\fP instruction -+targets the specified I/O address. The \fBbph\fP or \fBbpha\fP -+commands must be used. This type of breakpoint is not valid in -+arm mode XScale platform. This option is not valid in arm -+mode XScale platform. -+ -+.TP 8 -+DATAA -+Enters the kernel debugger after the data in specified address has -+been accessed (read or write), this option is only used in arm -+mode XScale platform. -+ -+.P -+The -+.B bpha -+command will establish a breakpoint on all processors in an -+SMP system. This command is not available in an uniprocessor -+kernel. -+.P -+The -+.B bd -+command will disable a breakpoint without removing it from the kernel -+debugger's breakpoint table. -+This can be used to keep breakpoints in the table without exceeding the -+architecture limit on breakpoint registers. -+A breakpoint-number of \fI*\fR will disable all break points. -+.P -+The -+.B be -+command will re-enable a disabled breakpoint. -+A breakpoint-number of \fI*\fR will enable all break points. -+.P -+The -+.B bc -+command will clear a breakpoint from the breakpoint table. -+A breakpoint-number of \fI*\fR will clear all break points. -+.P -+The -+.B bl -+command will list the existing set of breakpoints. -+.SH LIMITATIONS -+There is a compile time limit of sixteen entries in the -+breakpoint table at any one time. -+.P -+There are architecture dependent limits on the number of hardware -+breakpoints that can be set. -+.IP ix86 8 -+Four. -+.PD 0 -+.IP xscale 8 -+Two for insruction breakpoints and another two for data breakpoint. -+.PD 0 -+.IP ia64 8 -+? -+.PD 0 -+.IP sparc64 8 -+None. -+.PD 1 -+When issuing the "go" command after entering the debugger due to -+a breakpoint, kdb will silently perform a single step in order to -+reapply the breakpoint. The sparc64 port has some limitations on -+single stepping, which may limit where a breakpoint may be safely -+set. Please read the man page for \fBss\fP for more information. -+.SH ENVIRONMENT -+The breakpoint subsystem does not currently use any environment -+variables. -+.SH SMP CONSIDERATIONS -+Using -+.B bc -+is risky on SMP systems. -+If you clear a breakpoint when another cpu has hit that breakpoint but -+has not been processed then it may not be recognised as a kdb -+breakpoint, usually resulting in incorrect program counters and kernel -+panics. -+It is safer to disable the breakpoint with -+.BR bd , -+then -+.B go -+to let any other processors that are waiting on the breakpoint to -+clear. -+After all processors are clear of the disabled breakpoint then it is -+safe to clear it using -+.BR bc . -+.P -+Breakpoints which use the processor breakpoint registers -+are only established on the processor which is -+currently active. If you wish breakpoints to be universal -+use the -+.B bpa -+or -+.B bpha -+commands. -+.SH EXAMPLES -+.TP 8 -+bp schedule -+Sets an instruction breakpoint at the begining of the -+function \fBschedule\fP. -+ -+.TP 8 -+bp schedule+0x12e -+Sets an instruction breakpoint at the instruction located -+at \fBschedule\fP+\fI0x12e\fP. -+ -+.TP 8 -+bph ttybuffer+0x24 dataw -+Sets a data write breakpoint at the location referenced by -+\fBttybuffer\fP+\fI0x24\fP for a length of four bytes. -+ -+.TP 8 -+bph 0xc0254010 datar 1 -+Establishes a data reference breakpoint at address \fB0xc0254010\fP -+for a length of one byte. -+ -+.TP 8 -+bp -+List current breakpoint table. -+ -+.TP 8 -+bd 0 -+Disable breakpoint #0. -+ -+.TP 8 -+bc * -+Clear all breakpoints ---- /dev/null -+++ b/Documentation/kdb/kdb_bt.man -@@ -0,0 +1,315 @@ -+.TH BT 1 "July 20, 2007" -+.SH NAME -+bt \- Stack Traceback command -+.SH SYNOPSIS -+bt [ ] -+.LP -+btp -+.LP -+btt -+.LP -+bta [ DRSTZUIMA ] -+.LP -+btc [] -+.SH DESCRIPTION -+.hy 0 -+The -+.B bt -+command is used to print a stack traceback. It uses the -+current registers (see \fBrd\fP command) to determine -+the starting context and attempts to provide a complete -+stack traceback for the active thread. If \fIstack-frame-address\fP -+is supplied, it is assumed to point to the start of a valid -+stack frame and the stack will be traced back from that -+point. -+On x86 architecture, \fIstack-frame-address\fP must be the stack address of a -+saved \fB%eip\fP (\fB%rip\fP for x86_64) value from a \fBcall\fP instruction. -+.P -+The \fBbtp\fP command will analyze the stack for the given -+process identification (see the \fBps\fP command). -+\fBbtp\fP sets the current process for any following register display or update -+commands. -+.P -+The \fBbtt\fP command will analyze the stack for the given task -+structure. -+It is exactly equivalent to \fBbtp\fR on the pid extracted from the -+task structure. -+\fBbtt\fP sets the current process for any following register display or update -+commands. -+.P -+The \fBbta\fP command lists the stack for all processes in the desired -+state. -+Without any parameters, \fBbta\fP gives a backtrace for all useful processes. -+If a parameter is specified, it is a single string consisting of the -+letters D, R, S, T, Z, U, I, M and A in any order. -+See the kdb \fBps\fR man page for more details. -+\fBbta\fP does not change the current process. -+.P -+The \fBbtc\fP command will analyze the stack for the current process on -+a specified cpu or, if no cpu number is supplied, for the current -+process on all cpus. -+It does not switch to the other cpus, instead it uses the task -+structures to identify and issue \fBbtt\fR against the current task on -+the desired cpus. -+\fBbtc\fP with no arguments does not change the current process. -+\fBbtc\fP with a cpu number sets the current process for any following register -+display or update commands. -+.P -+For each function, the stack trace prints at least two lines. -+The first line contains four or five fields\ :- -+.IP * 3 -+The pointer to the stack frame. -+.PD 0 -+.IP * 3 -+The current address within this frame. -+.IP * 3 -+The address converted to a function name (actually the first non-local -+label which is <= the address). -+.IP * 3 -+The offset of the address within the function. -+.IP * 3 -+Any parameters to the function. -+.PD 1 -+.PP -+If environment variable NOSECT is set to 0 then the next line contains -+five fields which are designed to make it easier to match the trace -+against the kernel code\ :- -+.IP * 3 -+The module name that contains the address, "kernel" if it is in the -+base kernel. -+.PD 0 -+.IP * 3 -+The section name that contains the address (not available on 2.6 kernels). -+.IP * 3 -+The start address of the section (not available on 2.6 kernels). -+.IP * 3 -+The start address of the function. -+.IP * 3 -+The end address of the function (the first non-local label which is > -+the address). -+.PD 1 -+.PP -+If arguments are being converted to symbols, any argument which -+converts to a kernel or module address is printed as\ :- -+.IP * 3 -+Argument address. -+.PD 0 -+.IP * 3 -+The module name that contains the address, "kernel" if it is in the -+base kernel. -+.IP * 3 -+The symbol name the argument maps to. -+.IP * 3 -+The offset of the argument from the symbol, suppressed if 0. -+.PD 1 -+.P -+On architectures that use nested stacks, the backtrace will indicate a -+switch to a new stack by printing a line of equal signs and the type of -+stack. -+.SH MATCHING TRACE TO KERNEL CODE -+The command "objdump\ -S" will disassemble an object and, if the code -+was compiled with debugging (gcc flag -g), objdump will interleave the -+C source lines with the generated object. -+.PP -+A complete objdump of the kernel or a module is too big, normally you -+only want specific functions. -+By default objdump will only print the .text section but Linux uses -+other section names for executable code. -+When objdump prints relocatable objects (modules) it uses an offset of -+0 which is awkward to relate to the stack trace. -+The five fields which are printed for each function are designed to -+make it easier to match the stack trace against the kernel code using -+"objdump\ -S". -+.PP -+If the function is in the kernel then you need the section name, the -+start and end address of the function. The command is -+.PP -+.nf -+ objdump -S -j \\ -+ --start-address= \\ -+ --stop-address= \\ -+ /usr/src/linux/vmlinux -+.fi -+.PP -+If the function is in a module then you need the section name, the -+start address of the section, the start and end address of the -+function, the module name. The command is -+.PP -+.nf -+ objdump -S -j \\ -+ --adjust-vma= \\ -+ --start-address= \\ -+ --stop-address= \\ -+ /path/to/module/.o -+.fi -+.PP -+Unfortunately the 2.6 kernel does not provide the information required -+to locate the start of the section, which makes it very difficult to -+perform a reliable objdump on a module. -+.PP -+All addresses to objdump must be preceded by '0x' if they are in hex, -+objdump does not assume hex. -+The stack trace values are printed with leading '0x' to make it easy to -+run objdump. -+.SH LIMITATIONS -+Some architectures pass parameters in registers; ia64, x86_64 and i386 (with -+gcc flag -mregparm) fall into this category. -+On these architectures, the compiler may reuse input parameter registers as -+scratch space. -+For example, if a function takes a pointer to a structure and only accesses one -+field in that structure, the compiler may calculate the address of the field by -+adding a value to the input register. -+Once the input register has been updated, it no longer points to the -+start of the structure, but to some field within it. -+This also occurs with array pointers, the compiler may update the input pointer -+directly, leaving it pointing to some element of the array instead of the start -+of the array. -+Always treat parameter values that have been passed in registers with extreme -+suspicion, the compiler may have changed the value. -+The x86 backtrace can generally identify register parameters that are no longer -+valid, it prints them as 'invalid' instead of as a misleading number. -+The ia64 backtrace cannot identify parameter registers that have been -+overwritten. -+.P -+x86 architectures do not have full unwind information in the kernel. -+The KDB backtrace on x86 performs code decomposition and analysis to track the -+frames on the call stack (including stack switches) and to locate parameters. -+if this code analysis does not yield a valid result, KDB falls back on the old -+method of scanning the process stack and printing anything that looks like a -+kernel address. -+This old method is unreliable (it produces lots of false positives in the -+trace) and cannot track parameters at all, so no parameters are printed. -+If you get an x86 backtrace that falls back to the old method, read -+Documentation/kdb/bt_x86 and follow the steps listed to get diagnostics and to -+submit a bug report. -+.P -+There are a lot of functions in the kernel which take some arguments then do -+nothing except call another function with the same initial arguments, sometimes -+adding parameters at the end. For example\ :- -+.nf -+.na -+.ft CW -+ -+int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen, -+ void __user *oldval, size_t __user *oldlenp, -+ void __user *newval, size_t newlen) -+{ -+ int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp, -+ newval, newlen); -+ -+ if (ret == 1) -+ rt_cache_flush(0); -+ -+ return ret; -+} -+.ad b -+.fi -+.P -+ipv4_doint_and_flush_strategy() passes all its parameters directly to -+devinet_conf_sysctl() and makes no other use of those parameters, -+so ipv4_doint_and_flush_strategy is a 'pass through' function. -+The x86_64 calling sequence mandates that the first 6 parameters are passed in -+registers, with other parameters being passed on stack. -+The i386 calling sequence with -mregparm=3 (which is the default since about -+2.6.18) passes the first 3 parameters in registers, with other parameters being -+passed on stack. -+The only exceptions to the above calling sequence are for functions declared as -+asmlinkage or functions with a variable number of parameters (e.g. printk). -+.P -+When a pass through function calls another function, the first 3 (i386) or 6 -+(x86) parameters are already in their correct registers so the pass through -+function does not need to access the registers, which means that there are no -+references to these registers in the assembler code for the function. -+Users still want to see those arguments so the x86 backtrace has to assume that -+if\ :- -+.IP * 2 -+There are parameters passed on the stack and -+.IP * -+There are no code references to parameters passed in registers and -+.IP * -+The function is not a known asmlinkage or variadic function, then -+there are pass through register arguments. -+.P -+The x86 backtrace will warn you when it makes this assumption, like this\ :- -+.nf -+.na -+.ft CW -+ -+ has memory parameters but no register parameters. -+ Assuming it is a 'pass through' function that does not refer to its register -+ parameters and setting register parameters -+.ad b -+.fi -+.P -+The above 3 line message is only printed once, any future assumptions will -+print a shorter message. -+.P -+The \fBbt\fP command may print more or less arguments for a function -+than that function accepts. -+For x86, trailing arguments that are passed in but not used by the function -+will not be printed, resulting in fewer arguments than expected. -+For ia64, the hardware does not distinguish between input and local registers, -+some local registers may be printed as function arguments, resulting in more -+arguments than expected. -+.P -+On i386, 64 bit arguments (long long) occupy two adjacent 32 bit fields. -+There is no way for KDB to tell that this has occurred, so 64 bit arguments -+will be printed as two separate 32 bit arguments. -+.SH ENVIRONMENT -+The \fBBTARGS\fP environment variable governs the maximum number -+of arguments that are printed for any single function. -+On IA64 hardware, there is no difference between input and local registers, the -+first \fBBTARGS\fP registers are printed, up to the total limit of input plus -+local registers. -+Use a large value for \fBBTARGS\fP if you want to see the local registers on -+IA64. -+.PP -+If the \fBBTSP\fP environment variable is non-zero then the entire backtrace is -+printed, otherwise only the backtrace to the point of the last interrupt is -+printed. -+Printing the entire backtrace with 'set\ BTSP\ 1' is useful for diagnosing -+problems with the backtrace algorithms. -+In addition, when BTSP is non-zero, each backtrace frame may print extra lines -+giving information about the stack pointers, this is architecture specific. -+.PP -+If the \fBBTSYMARG\fP environment variable is non-zero then any -+arguments that fall within the kernel or modules are converted to symbols. -+.PP -+If the \fBNOSECT\fP environment variable is non-zero then the -+section information is suppressed. -+The default is NOSECT=1 so section data is suppressed; use set\ NOSECT=0 -+to see section information. -+.PP -+The \fBBTAPROMPT\fP environment variable controls the prompt after each -+process is listed by the \fBbta\fP command. If \fBBTAPROMPT\fP is not -+set or is non-zero then \fBbta\fP issues a prompt after each process is -+listed. If \fBBTAPROMPT\fP is set to zero then no prompt is issued and -+all processes are listed without human intervention. -+.PP -+\fBbt\fR with no parameters uses the \fBPS\fR environment variable, see -+the kdb \fBps\fR man page. -+.SH SMP CONSIDERATIONS -+None. -+.SH EXAMPLES -+.nf -+.na -+.ft CW -+[0]kdb> bt -+Stack traceback for pid 2873 -+0xc2efc0f0 2873 2836 1 0 R 0xc2efc2a0 *mount -+esp eip Function (args) -+0xf65a3c88 0xc0201f9f xfs_mount_validate_sb (0xf68bcb08, 0xf68bcb48, 0x0) -+0xf65a3c94 0xc0202f17 xfs_readsb+0x9d (0xf68bcb08, 0x0) -+0xf65a3cc0 0xc020a72e xfs_mount+0x21d (invalid, 0xf68bc2f0, 0x0) -+0xf65a3cf4 0xc021a84a vfs_mount+0x1a (invalid) -+0xf65a3d04 0xc021a721 xfs_fs_fill_super+0x76 (0xf76b6200, invalid, invalid) -+0xf65a3d78 0xc015ad81 get_sb_bdev+0xd4 (invalid, invalid, invalid, 0xf7257000, 0xc021a6ab, 0xf7594b38) -+ xfs_fs_get_sb has memory parameters but no register parameters. -+ Assuming it is a 'pass through' function that does not refer to its register -+ parameters and setting 3 register parameters -+0xf65a3db4 0xc0219a3a xfs_fs_get_sb+0x21 (invalid, invalid, invalid, 0xf7257000, 0xf7594b38) -+0xf65a3dcc 0xc015a992 vfs_kern_mount+0x41 (0xc04847e0, 0x0, 0xf68e9000, 0xf7257000) -+0xf65a3df0 0xc015aa11 do_kern_mount+0x38 (0xf6818000, 0x0, 0xf68e9000, 0xf7257000) -+0xf65a3e10 0xc016c8b0 do_mount+0x5df (0xf68e9000, 0xf65d6000, 0xf6818000, 0xc0ed0000, 0xf7257000) -+0xf65a3f90 0xc016c996 sys_mount+0x6f (0x8069b50, 0x8069b60, 0x8069b70, 0xc0ed0000, 0x8069ba0) -+0xf65a3fb4 0xc0102646 sysenter_past_esp+0x5f (invalid, invalid, invalid, 0x73, 0x246, 0xbfe52f50) ---- /dev/null -+++ b/Documentation/kdb/kdb_env.man -@@ -0,0 +1,46 @@ -+.TH ENV 1 "24 September 2000" -+.SH NAME -+env, set \- Environment manipulation commands -+.SH SYNOPSIS -+env -+.LP -+set \fIenvironment-variable\fP=\fIvalue\fP -+.SH DESCRIPTION -+The kernel debugger contains an environment which contains a series -+of name-value pairs. Some environment variables are known to the -+various kernel debugger commands and have specific meaning to the -+command; such are enumerated on the respective reference material. -+.P -+Arbitrary environment variables may be created and used with -+many commands (those which require an \fIaddress-expression\fP). -+.P -+The -+.B env -+command is used to display the current environment. -+.P -+The -+.B set -+command is used to alter an existing environment variable or -+establish a new environment variable. -+.SH LIMITATIONS -+There is a compile-time limit of 33 environment variables. -+.P -+There is a compile-time limit of 512 bytes (\fBKDB_ENVBUFSIZE\fP) -+of heap space available for new environment variables and for -+environment variables changed from their compile-time values. -+.SH ENVIRONMENT -+These commands explicitly manipulate the environment. -+.SH SMP CONSIDERATIONS -+None. -+.SH USER SETTINGS -+You can include "set" commands in kdb/kdb_cmds (see kdb.mm) to define -+your environment variables at kernel startup. -+.SH EXAMPLES -+.TP 8 -+env -+Display current environment settings. -+ -+.TP 8 -+set IDCOUNT=100 -+Set the number of lines to display for the \fBid\fP command -+to the value \fI100\fP. ---- /dev/null -+++ b/Documentation/kdb/kdb_ll.man -@@ -0,0 +1,134 @@ -+.TH LL 1 "19 April 1999" -+.SH NAME -+ll \- Linked List examination -+.SH SYNOPSIS -+ll -+.SH DESCRIPTION -+The -+.B ll -+command is used to execute a single command repetitively for -+each element of a linked list. -+.P -+The command specified by will be executed with a single -+argument, the address of the current element. -+.SH LIMITATIONS -+Be careful if using this command recursively. -+.SH ENVIRONMENT -+None. -+.SH SMP CONSIDERATIONS -+None. -+.SH EXAMPLES -+.nf -+.na -+.ft CW -+# cd modules -+# insmod kdbm_vm.o -+# Entering kdb on processor 0 due to PAUSE -+kdb> ps -+Task Addr Pid Parent cpu lcpu Tss Command -+0xc03de000 0000000001 0000000000 0000 0000 0xc03de2d4 init -+0xc0090000 0000000002 0000000001 0000 0000 0xc00902d4 kflushd -+0xc000e000 0000000003 0000000001 0000 0000 0xc000e2d4 kpiod -+0xc000c000 0000000004 0000000001 0000 0000 0xc000c2d4 kswapd -+0xc7de2000 0000000056 0000000001 0000 0000 0xc7de22d4 kerneld -+0xc7d3a000 0000000179 0000000001 0000 0000 0xc7d3a2d4 syslogd -+0xc7a7e000 0000000188 0000000001 0000 0000 0xc7a7e2d4 klogd -+0xc7a04000 0000000199 0000000001 0000 0000 0xc7a042d4 atd -+0xc7b84000 0000000210 0000000001 0000 0000 0xc7b842d4 crond -+0xc79d6000 0000000221 0000000001 0000 0000 0xc79d62d4 portmap -+0xc798e000 0000000232 0000000001 0000 0000 0xc798e2d4 snmpd -+0xc7904000 0000000244 0000000001 0000 0000 0xc79042d4 inetd -+0xc78fc000 0000000255 0000000001 0000 0000 0xc78fc2d4 lpd -+0xc77ec000 0000000270 0000000001 0000 0000 0xc77ec2d4 sendmail -+0xc77b8000 0000000282 0000000001 0000 0000 0xc77b82d4 gpm -+0xc7716000 0000000300 0000000001 0000 0000 0xc77162d4 smbd -+0xc7ee2000 0000000322 0000000001 0000 0000 0xc7ee22d4 mingetty -+0xc7d6e000 0000000323 0000000001 0000 0000 0xc7d6e2d4 login -+0xc778c000 0000000324 0000000001 0000 0000 0xc778c2d4 mingetty -+0xc78b6000 0000000325 0000000001 0000 0000 0xc78b62d4 mingetty -+0xc77e8000 0000000326 0000000001 0000 0000 0xc77e82d4 mingetty -+0xc7708000 0000000327 0000000001 0000 0000 0xc77082d4 mingetty -+0xc770e000 0000000328 0000000001 0000 0000 0xc770e2d4 mingetty -+0xc76b0000 0000000330 0000000001 0000 0000 0xc76b02d4 update -+0xc7592000 0000000331 0000000323 0000 0000 0xc75922d4 ksh -+0xc7546000 0000000338 0000000331 0000 0000 0xc75462d4 su -+0xc74dc000 0000000339 0000000338 0000 0000 0xc74dc2d4 ksh -+kdb> md 0xc74dc2d4 -+c74dc2d4: 00000000 c74de000 00000018 00000000 .....`MG........ -+c74dc2e4: 00000000 00000000 00000000 074de000 .............`M. -+c74dc2f4: c01123ff 00000000 00000000 00000000 #.@............ -+c74dc304: 00000000 00000000 c74dded0 00000000 ........P^MG.... -+[omitted] -+c74dc474: 00000000 00000000 00000000 00000000 ................ -+c74dc484: 00000000 c7c15d00 c77b0900 c026fbe0 .....]AG..{G`{&@ -+c74dc494: 00000000 c76c2000 00000000 00000000 ..... lG........ -+c74dc4a4: 00000000 00000000 00000000 c74dc4ac ............,DMG -+kdb> md 0xc026fbe0 -+c026fbe0: c0262b60 00000000 c7594940 c74de000 @HYG....@IYG.`MG -+[omitted] -+kdb> md 0xc0262b60 -+c0262b60: c0266660 08048000 0804c000 c7bec360 `f&@.....@..`C>G -+kdb> ll c0262b60 12 md -+c0262b60: c0266660 08048000 0804c000 c7bec360 `f&@.....@..`C>G -+c7bec360: c0266660 0804c000 0804d000 c7becb20 `f&@.@...P.. K>G -+c7becb20: c0266660 0804d000 08050000 c7bec3a0 `f&@.P...... C>G -+c7bec3a0: c0266660 40000000 40009000 c7bec420 `f&@...@...@ D>G -+c7bec420: c0266660 40009000 4000b000 c7bec4a0 `f&@...@.0.@ D>G -+c7bec4a0: c0266660 4000b000 40010000 c7bec8e0 `f&@.0.@...@`H>G -+c7bec8e0: c0266660 40010000 400a1000 c7becbe0 `f&@...@...@`K>G -+c7becbe0: c0266660 400a1000 400a8000 c7becc60 `f&@...@...@`L>G -+c7becc60: c0266660 400a8000 400b4000 c7952300 `f&@...@.@.@.#.G -+c7952300: c0266660 400b5000 400bc000 c79521c0 `f&@.P.@.@.@@!.G -+c79521c0: c0266660 400bc000 400bd000 c7bec6e0 `f&@.@.@.P.@`F>G -+c7bec6e0: c0266660 bffff000 c0000000 00000000 `f&@.p?...@.... -+kdb> -+kdb> ll c0262b60 12 vm -+struct vm_area_struct at 0xc0262b60 for 56 bytes -+vm_start = 0x8048000 vm_end = 0x804c000 -+page_prot = 0x25 avl_height = 2244 vm_offset = 0x0 -+flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE -+struct vm_area_struct at 0xc7bec360 for 56 bytes -+vm_start = 0x804c000 vm_end = 0x804d000 -+page_prot = 0x25 avl_height = -31808 vm_offset = 0x3000 -+flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE -+struct vm_area_struct at 0xc7becb20 for 56 bytes -+vm_start = 0x804d000 vm_end = 0x8050000 -+page_prot = 0x25 avl_height = -28664 vm_offset = 0x0 -+flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC -+struct vm_area_struct at 0xc7bec3a0 for 56 bytes -+vm_start = 0x40000000 vm_end = 0x40009000 -+page_prot = 0x25 avl_height = 30126 vm_offset = 0x0 -+flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE -+struct vm_area_struct at 0xc7bec420 for 56 bytes -+vm_start = 0x40009000 vm_end = 0x4000b000 -+page_prot = 0x25 avl_height = 30126 vm_offset = 0x8000 -+flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE -+struct vm_area_struct at 0xc7bec4a0 for 56 bytes -+vm_start = 0x4000b000 vm_end = 0x40010000 -+page_prot = 0x25 avl_height = 26853 vm_offset = 0x0 -+flags: READ MAYREAD MAYWRITE MAYEXEC -+struct vm_area_struct at 0xc7bec8e0 for 56 bytes -+vm_start = 0x40010000 vm_end = 0x400a1000 -+page_prot = 0x25 avl_height = 2244 vm_offset = 0x0 -+flags: READ EXEC MAYREAD MAYWRITE MAYEXEC -+struct vm_area_struct at 0xc7becbe0 for 56 bytes -+vm_start = 0x400a1000 vm_end = 0x400a8000 -+page_prot = 0x25 avl_height = 30126 vm_offset = 0x90000 -+flags: READ WRITE MAYREAD MAYWRITE MAYEXEC -+struct vm_area_struct at 0xc7becc60 for 56 bytes -+vm_start = 0x400a8000 vm_end = 0x400b4000 -+page_prot = 0x25 avl_height = 2244 vm_offset = 0x0 -+flags: READ WRITE MAYREAD MAYWRITE MAYEXEC -+struct vm_area_struct at 0xc7952300 for 56 bytes -+vm_start = 0x400b5000 vm_end = 0x400bc000 -+page_prot = 0x25 avl_height = 30126 vm_offset = 0x0 -+flags: READ EXEC MAYREAD MAYWRITE MAYEXEC -+struct vm_area_struct at 0xc79521c0 for 56 bytes -+vm_start = 0x400bc000 vm_end = 0x400bd000 -+page_prot = 0x25 avl_height = -16344 vm_offset = 0x6000 -+flags: READ WRITE MAYREAD MAYWRITE MAYEXEC -+struct vm_area_struct at 0xc7bec6e0 for 56 bytes -+vm_start = 0xbffff000 vm_end = 0xc0000000 -+page_prot = 0x25 avl_height = 2244 vm_offset = 0x0 -+flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC GROWSDOWN -+kdb> ---- /dev/null -+++ b/Documentation/kdb/kdb_md.man -@@ -0,0 +1,136 @@ -+.TH MD 1 "August 4, 2004" -+.SH NAME -+md, mdWcN, mdr, mds, mm, mmW\- Memory manipulation commands -+.SH SYNOPSIS -+md [ \fIaddress-expression\fP [ \fIline-count\fP [\fIoutput-radix\fP ] ] ] -+.LP -+md\fIW\fRc\fIn\fR [ \fIaddress-expression\fP [ \fIline-count\fP [\fIoutput-radix\fP ] ] ] -+.LP -+mdp \fIphysical-address-expression\fP,\fIbytes\fP -+.LP -+mdr \fIaddress-expression\fP,\fIbytes\fP -+.LP -+mds [ \fIaddress-expression\fP [ \fIline-count\fP [\fIoutput-radix\fP ] ] ] -+.LP -+mm \fIaddress-expression\fP \fInew-contents\fP -+.LP -+mm\fIW\fR \fIaddress-expression\fP \fInew-contents\fP -+.SH DESCRIPTION -+The -+.B md -+command is used to display the contents of memory. -+The \fIaddress-expression\fP may be a numeric value (decimal or -+hexidecimal), a symbol name, a register name preceeded by one or more -+percent symbols '%', an environment variable name preceeded by -+a currency symbol '$', or a simple expression consisting of a -+symbol name, an addition or subtraction character and a numeric -+value (decimal or hexidecimal). -+.P -+If an address is specified and the \fIline-count\fP or \fIradix\fP arguments -+are omitted, they default to the values of the \fBMDCOUNT\fP and \fBRADIX\fP -+environment variables respectively. If the \fBMDCOUNT\fP or \fBRADIX\fP -+environment variables are unset, the appropriate defaults will be used [see -+\fBENVIRONMENT\fP below]. If no address is specified then md resumes -+after the last address printed, using the previous values of count and -+radix. The start address is rounded down to a multiple of the -+BYTESPERWORD (md) or width (md\fIW\fR). -+.P -+md uses the current value of environment variable \fBBYTESPERWORD\fP to -+read the data. When reading hardware registers that require special -+widths, it is more convenient to use md\fIW\fRc\fIn\fR where \fIW\fR is -+the width for this command and \fRc\fIn\fR is the number of entries to -+read. For example, md1c20 reads 20 bytes, 1 at a time. To continue -+printing just type md, the width and count apply to following md -+commands with no parameters. \fBNote:\fR The count is the number of -+repeats of the width, unlike MDCOUNT which gives the number of md lines -+to print. -+.P -+The -+.B mdp -+command displays the contents of physical memory, starting at the -+specified physical address for the specified number of bytes. -+The address is preceded by 'phys'. -+.P -+The -+.B mdr -+command displays the raw contents of memory, starting at the specified -+address for the specified number of bytes. -+The data is printed in one line without a leading address and no -+trailing character conversion. -+.B mdr -+is intended for interfacing with external debuggers, it is of little -+use to humans. -+.P -+The -+.B mds -+command displays the contents of memory one word per line and -+attempts to correlate the contents of each word with a symbol -+in the symbol table. If no symbol is found, the ascii representation -+of the word is printed, otherwise the symbol name and offset from -+symbol value are printed. -+By default the section data is printed for kernel symbols. -+.P -+The -+.B mm -+and -+\fBmm\fIW\fR -+commands allow modification of memory. The bytes at the address -+represented by \fIaddress-expression\fP are changed to -+\fInew-contents\fP. \fInew-contents\fP is allowed to be an -+\fIaddress-expression\fP. -+.B mm -+changes a machine word, \fBmm\fIW\fR changes \fIW\fR bytes at that -+address. -+.SH LIMITATIONS -+None. -+.SH ENVIRONMENT -+.TP 8 -+MDCOUNT -+This environment variable (default=8) defines the number of lines -+that will be displayed by each invocation of the \fBmd\fP command. -+ -+.TP 8 -+RADIX -+This environment variable (default=16) defines the radix used to -+print the memory contents. -+ -+.TP 8 -+BYTESPERWORD -+This environment variable (default=4) selects the width of output -+data when printing memory contents. Select the value two to get -+16-bit word output, select the value one to get byte output. -+ -+.TP 8 -+LINES -+This environment variable governs the number of lines of output -+that will be presented before the kernel debugger built-in pager -+pauses the output. This variable only affects the functioning -+of the \fBmd\fP and \fBmds\fP if the \fBMDCOUNT\fP variable -+is set to a value greater than the \fBLINES\fP variable. -+ -+.TP 8 -+NOSECT -+If the \fBNOSECT\fP environment variable is non-zero then the -+section information is suppressed. -+The default is NOSECT=1 so section data is suppressed; use set\ NOSECT=0 -+to see section information. -+.SH SMP CONSIDERATIONS -+None. -+.SH EXAMPLES -+.TP 8 -+md %edx -+Display memory starting at the address contained in register \fB%edx\fP. -+ -+.TP 8 -+mds %esp -+Display stack contents symbolically. This command is quite useful -+in manual stack traceback. -+ -+.TP 8 -+mm 0xc0252110 0x25 -+Change the memory location at 0xc0252110 to the value 0x25. -+ -+.TP 8 -+md chrdev_table 15 -+Display 15 lines (at 16 bytes per line) starting at address -+represented by the symbol \fIchrdev_table\fP. ---- /dev/null -+++ b/Documentation/kdb/kdb_ps.man -@@ -0,0 +1,96 @@ -+.TH PS 1 "September 14, 2004" -+.SH NAME -+ps \- Display processes -+.SH SYNOPSIS -+ps [ DRSTCZEUIMA ] -+.SH DESCRIPTION -+The -+.B ps -+command displays the status of all processes in the desired state. -+This command does not take any locks (all cpus should be frozen while -+kdb is running) so it can safely be used to debug lock problems with -+the process table. -+.P -+Without any parameters, \fBps\fP displays all the interesting -+processes, excluding idle tasks and sleeping system daemons. -+If a parameter is specified, it is a single string consisting of the -+letters D, R, S, T, C, Z, E, U, I and M, in any order. -+Each letter selects processes in a specific state, when multiple -+letters are specified, a process will be displayed if it is in any of -+the specified states. -+The states are\ :- -+.P -+.DS -+.TS -+box, center; -+l | l -+l | l. -+D Uninterruptible sleep -+R Running -+S Interruptible sleep -+T Stopped -+C Traced -+Z Zombie -+E Dead -+U Unrunnable -+I Idle task -+M Sleeping system daemon -+A All -+.TE -+.DE -+.P -+For state R (running), the process may not be on a cpu at the moment, -+but it is ready to run. -+The header line above the backtrace contains '1' in the fourth field if -+the process is actually on a cpu. -+.P -+The idle task is run on each cpu when there is no work for that cpu to do. -+Unless the idle task is servicing an interrupt, there is no point in -+printing the idle task. -+An idle task that is not servicing a interrupt is marked as state I, -+while servicing an interrupt it is in state R. -+By default, idle tasks are not printed, use \fBps\ I\fR to print them. -+If the idle tasks are not being printed, the start of the \fBps\R -+output contains a list of which cpus are idle. -+.P -+Each cpu has one or more system daemons to handle per cpu work such as -+soft irqs. -+A system daemon (idenified by a NULL mm pointer) that is sleeping is -+marked as state M. -+These processes rarely have any useful data and generate a lot of -+output on large machines, so sleeping system daemons are not printed by -+default. -+Use \fBps\ M\fR to print them. -+.P -+At the start of the \fBps\fR output is a line giving the cpu status, -+see the kdb \fBcpu\fR command. -+.SH LIMITATIONS -+None. -+.SH ENVIRONMENT -+.TP 8 -+PS -+This environment variable (default=DRSTCZEU) is used when \fBps\fR -+is issued with no parameters. -+ -+.SH SMP CONSIDERATIONS -+None. -+.SH EXAMPLES -+.TP 8 -+\fBps\fR -+displays the useful tasks, suppressing idle tasks and sleeping -+system daemons. -+ -+.TP 8 -+\fBps\ RD\fR -+displays only tasks that are running or are in an uninterruptible -+sleep. -+ -+.TP 8 -+\fBps\ DRSTCZEUIM\fR -+displays all tasks. -+ -+.TP 8 -+\fBps\ A\fR -+displays all tasks. -+This is easier than remembering DRSTCZEUIM. -+ ---- /dev/null -+++ b/Documentation/kdb/kdb_rd.man -@@ -0,0 +1,170 @@ -+.TH RD 1 "September 20, 2005" -+.SH NAME -+rd, rm\- Register manipulation commands -+.SH SYNOPSIS -+rd [[c [n]]|d|u] -+.LP -+rm \fIregister-name\fP \fInew-contents\fP -+.LP -+ef
-+.SH DESCRIPTION -+The -+.B rd -+command is used to display the contents of processor and coprocessor registers. -+Without any arguments, the rd command displays the contents of the general -+register set at the point at which the kernel debugger was entered. -+If the bt* or pid commands have been used to change the current process then -+.B rd -+and -+.B rm -+may not be able to display any registers. -+'n' argument is only used for XScale platform to identify the desired -+coprocessor number, while 'd' option is not valid for XScale platform. -+.P -+On IA32 and IA64, with the 'c' argument, the processor control registers -+%cr0, %cr1, %cr2 and %cr4 are displayed, while with the 'd' argument -+the processor debug registers are displayed. If the 'u' argument -+is supplied, the registers for the current task as of the last -+time the current task entered the kernel are displayed. -+.P -+On XScale, 'c' argument is used to display the -+all coprocessor control registers or specified coprocessor registers by -+argumnet 'n'. Argument 'u' is used to display the -+registers for the current task as of the last time the current task -+entered the kernel. Argument 'd' is not supported. -+.P -+On ix86, the -+.B rm -+command allows modification of a register. The following -+register names are valid: \fB%eax\fP, \fB%ebx\fP, \fB%ecx\fP, -+\fB%edx\fP, \fB%esi\fP, \fB%edi\fP, \fB%esp\fP, \fB%eip\fP, -+and \fB%ebp\fP. Note that if two '%' symbols are used -+consecutively, the register set displayed by the 'u' argument -+to the \fBrd\fP command is modified. -+.P -+The debug registers, \fBdr0\fP through \fBdr3\fP and both -+\fBdr6\fP and \fBdr7\fP can also be modified with the \fBrm\fP -+command. -+.P -+On sparc64, the valid registers are named \fB%g0\fP through -+\fB%g7\fP, \fB%l0\fP through \fB%l7\fP, \fB%o0\fP through -+\fB%o7\fP, and \fB%i0\fP through \fB%i7\fP, with the exceptions -+that \fB%o6\fP is called \fB%sp\fP and that \fB%i6\fP is called -+\fB%fp\fP. The registers \fB%tstate\fP, \fB%tpc\fP, \fB%tnpc\fP, -+\fB%y\fP, and \fB%fprs\fP provide state information at the time -+the system entered kdb. Additionally, when viewing registers, two -+convenience names are provided: \fB%®s\fP shows the -+address on the stack of the current registers, and \fB%csp\fP -+shows the current stack pointer within kdb itself. -+.P -+While on XScale, both the cpu registers and most coprocessor -+registers can be be modified. \fIregister-name\fP can be followings like -+r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, -+r15, cpsr to address cpu registers. For the coprocessor registers in XSacle, -+either alias name or \fICpcc[CRndd[CRmbb[Opaa]]]\fP can be used to address -+the register in coprocessor cc with CRn=dd, CRm=bb and opcode2=aa. All aa, bb, cc, dd can be -+1 or 2 decimal digitals, the default value is 0 when any of them is omitted. Name -+acc0_h and acc0_l are used to identify the high byte and -+low word of accumulator in coprocessor 0. -+.P -+On IA64, the parameter to -+.B rd -+can be d (debug registers), u (user registers at most recent entry to kernel), -+i (interrupt registers), %isr (current interrupt status), s (stacked -+registers), k (kernel registers). You can also specify these individual -+registers - -+psr, -+ifs, -+ip, -+unat, -+pfs, -+rsc, -+rnat, -+bsps, -+pr, -+ldrs, -+ccv, -+fpsr, -+b0, -+b6, -+b7, -+r1, -+r2, -+r3, -+r8, -+r9, -+r10, -+r11, -+r12, -+r13, -+r14, -+r15, -+r16, -+r17, -+r18, -+r19, -+r20, -+r21, -+r22, -+r23, -+r24, -+r25, -+r26, -+r27, -+r28, -+r29, -+r30, -+r31. -+.B rm -+can change any of the individual registers or the stacked registers. -+.P -+The -+.B ef -+command displays an exception frame at the specified address. -+.SH LIMITATIONS -+Currently the \fBrm\fP command will not allow modification of the -+control registers. -+.P -+Currently neither the \fBrd\fP command nor the \fBrm\fP command will -+display or modify the model specific registers on the Pentium -+and Pentium Pro families. -+.SH ENVIRONMENT -+None. -+.SH SMP CONSIDERATIONS -+None. -+.SH EXAMPLES -+.TP 8 -+rd -+Display general register set from kdb's current task. -+ -+.TP 8 -+rd c 0 -+Display coprocessor 0 registers. -+ -+.TP 8 -+rm %eax 0 -+Set the contents of \fB%eax\fP to zero. This will be the -+value of %eax when kdb returns from the condition which -+invoked it. -+ -+.TP 8 -+rm %%eax 0 -+Set the value of the \fB%eax\fP register to zero. This will -+be the value the user-mode application will see upon returning -+from the kernel. -+ -+.TP 8 -+rm %acc0_h 0 -+Set the contents of high byte of accumulator to zero. -+ -+.TP 8 -+rm dr0 0xc1287220 -+Set the value of the \fBdr0\fB register to \f(CW0xc1287220\fP. -+ -+.TP 8 -+rm %InVLD_BTB 0 -+Write 0 to coprocessor 15 register with CRn=7, CRm=5, opcode2=6. -+ -+.TP 8 -+rm %CP15CRn7CRm5Op6 0 -+Same with above. ---- /dev/null -+++ b/Documentation/kdb/kdb_sr.man -@@ -0,0 +1,68 @@ -+.TH SR 1 "7 October 2002" -+.SH NAME -+sr \- invoke sysrq commands from kdb -+.SH SYNOPSIS -+sr \fIx\fP -+.SH DESCRIPTION -+.hy 0 -+The -+.B sr -+command invokes the existing sysrq handler code in the kernel. -+This command takes a single character which is passed to sysrq -+processing, as if you had entered the sysrq key sequence followed by -+that character. -+.P -+.B Caveats: -+.P -+kdb will always call the sysrq code but sysrq may be disabled. -+If you expect to use sysrq functions during debugging then -+.IP "" -+echo "1" > /proc/sys/kernel/sysrq -+.P -+before starting the debug session. -+Alternatively issue -+.IP "" -+mm4 sysrq_enabled 1 -+.P -+during debugging. -+.P -+The sysrq code prints a heading using console loglevel 7 then reverts -+to the original loglevel for the rest of the sysrq processing. -+If the rest of the sysrq output is printed at a level below your -+current loglevel then you will not see the output on the kdb console, -+the output will only appear in the printk buffer. -+It is the user's responsibility to set the loglevel correctly if they -+want to see the sysrq output on the console. -+Issue -+.IP "" -+sr 7 -+.P -+before any other -+.B sr -+commands if you want to see the output on the console. -+You may even have to adjust the default message loglevel in order to -+see any output from -+.BR sr . -+See Documentation/sysctl/kernel.txt for details on setting console -+loglevels via /proc. -+You can also adjust the loglevel variables via kdb -+.BR mm ; -+on older kernels there are variables such as default_message_level, on -+newer kernels all the loglevel variables are in array console_printk, -+see kernel/printk.c for your kernel. -+.P -+Operations that require interrupt driven I/O can be invoked from kdb -+.BR sr , -+but they will not do anything until you type 'go' to exit from kdb -+(interrupts are disabled while in kdb). -+There is no guarantee that these operations will work, if the machine -+entered kdb because of an error then interrupt driven I/O may already -+be dead. -+Do not assume that -+.B sr\ s -+does anything useful. -+.P -+The sysrq handler uses locks and calls printk which also uses locks. -+If the sysrq handler or any of the sysrq functions have to wait for a -+lock then they will never return and kdb will appear to hang. -+Invoking sysrq code from kdb is inherently unsafe. ---- /dev/null -+++ b/Documentation/kdb/kdb_ss.man -@@ -0,0 +1,109 @@ -+.TH SS 1 "17 January 2002" -+.SH NAME -+ss, ssb \- Single Step -+.SH SYNOPSIS -+ss -+.LP -+ssb -+.SH DESCRIPTION -+The -+.B ss -+command is used to execute a single instruction and return -+to the kernel debugger. -+.P -+Both the instruction that was single-stepped and the next -+instruction to execute are printed. -+.P -+The \fBssb\fP command will execute instructions from the -+current value of the instruction pointer. Each instruction -+may be printed as it is executed, depending upon architecture; -+execution will stop at any instruction which would cause the flow -+of control to change (e.g. branch, call, interrupt instruction, -+return, etc.) -+.SH LIMITATIONS -+On sparc64, there are some circumstances where single-stepping -+can be dangerous. Do not single-step across an instruction which -+changes the interrupt-enable bit in %tstate. Do not single step -+through code which is invoked when entering or leaving the -+kernel, particularly any kernel entry code before %tl is set to -+0, or any kernel exit code after %tl is set to 1. -+.SH ENVIRONMENT -+None. -+.SH SMP CONSIDERATIONS -+Other processors are held in the kernel debugger when the instruction -+is traced. Single stepping though code that requires a lock which is -+in use by another processor is an exercise in futility, it will never -+succeed. -+.SH INTERRUPT CONSIDERATIONS -+When a kdb event occurs, one cpu (the initial cpu) enters kdb state. -+It uses a cross system interrupt to interrupt the -+other cpus and bring them all into kdb state. All cpus run with -+interrupts disabled while they are inside kdb, this prevents most -+external events from disturbing the kernel while kdb is running. -+.B Note: -+Disabled interrupts means that any I/O that relies on interrupts cannot -+proceed while kdb is in control, devices can time out. The clock tick -+is also disabled, machines will lose track of time while they are -+inside kdb. -+.P -+Even with interrupts disabled, some non-maskable interrupt events -+will still occur, these can disturb the kernel while you are -+debugging it. The initial cpu will still accept NMI events, -+assuming that kdb was not entered for an NMI event. Any cpu -+where you use the SS or SSB commands will accept NMI events, even -+after the instruction has finished and the cpu is back in kdb. -+This is an unavoidable side effect of the fact that doing SS[B] -+requires the cpu to drop all the way out of kdb, including -+exiting from the NMI event that brought the cpu into kdb. Under -+normal circumstances the only NMI event is for the NMI oopser and -+that is kdb aware so it does not disturb the kernel while kdb is -+running. -+.P -+Sometimes doing SS or SSB on ix86 will allow one interrupt to proceed, -+even though the cpu is disabled for interrupts. I have not been able -+to track this one down but I suspect that the interrupt was pending -+when kdb was entered and it runs when kdb exits through IRET even -+though the popped flags are marked as cli(). If any ix86 hardware -+expert can shed some light on this problem, please notify the kdb -+maintainer. -+.SH EXAMPLES -+.nf -+.na -+.ft CW -+kdb> bp gendisk_head datar 4 -+Data Access Breakpoint #0 at 0xc024ddf4 (gendisk_head) in dr0 is enabled on cpu 0 -+for 4 bytes -+kdb> go -+... -+[root@host /root]# cat /proc/partitions -+Entering kdb on processor 0 due to Debug Exception @ 0xc01845e3 -+Read/Write breakpoint #0 at 0xc024ddf4 -+[0]kdb> ssb -+sd_finish+0x7b: movzbl 0xc02565d4,%edx -+sd_finish+0x82: leal 0xf(%edx),%eax -+sd_finish+0x85: sarl $0x4,%eax -+sd_finish+0x88: movl 0xc0256654,%ecx -+sd_finish+0x8e: leal (%eax,%eax,4),%edx -+sd_finish+0x91: leal (%eax,%edx,2),%edx -+sd_finish+0x94: movl 0xc0251108,%eax -+sd_finish+0x99: movl %eax,0xffffffc(%ecx,%edx,4) -+sd_finish+0x9d: movl %ecx,0xc0251108 -+sd_finish+0xa3: xorl %ebx,%ebx -+sd_finish+0xa5: cmpb $0x0,0xc02565d4 -+[0]kdb> go -+[root@host /root]# -+ -+[0]kdb> ss -+sys_read: pushl %ebp -+SS trap at 0xc01274c1 -+sys_read+0x1: movl %esp,%ebp -+[0]kdb> ss -+sys_read+0x1: movl %esp,%ebp -+SS trap at 0xc01274c3 -+sys_read+0x3: subl $0xc,%esp -+[0]kdb> ss -+sys_read+0x3: subl $0xc,%esp -+SS trap at 0xc01274c6 -+sys_read+0x6: pushl %edi -+[0]kdb> -+ ---- /dev/null -+++ b/Documentation/kdb/slides -@@ -0,0 +1,1382 @@ -+#! /opt/cpg/bin/do-mgp -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%% -+%deffont "standard" tfont "comic.ttf" -+%deffont "thick" tfont "arialb.ttf" -+%deffont "typewriter" xfont "courier new-bold-r" -+%deffont "type2writer" xfont "arial narrow-bold-r" -+%% -+%% Default settings per each line numbers. -+%% -+#%default 1 leftfill, size 2, fore "black", back "LemonChiffon2", font "thick" -+%default 1 leftfill, size 2, fore "black", back "white", font "thick" -+%default 2 size 10, vgap 10, prefix " ", center -+%default 3 size 2, bar "gray70", vgap 10 -+%default 4 size 6, fore "black", vgap 30, prefix " ", font "standard", left -+%% -+%% Default settings that are applied to TAB-indented lines. -+%% -+%tab 1 size 4, vgap 35, prefix " ", icon arc "red" 40 -+%tab 2 size 4, vgap 20, prefix " ", icon delta3 "blue" 40 -+%tab 3 size 4, vgap 20, prefix " ", icon delta3 "green" 40 -+%% -+%% -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+KDB - Kernel Debugger -+ -+ -+ -+%size 7,center, font "thick" -+Introduction -+ -+And -+ -+Demonstration -+ -+ -+%size 3 -+ -+February 5, 2002 IBM Linux Technology Center Paul Dorwin -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+IBM Legal -+ -+ -+ IBM Legal requires this information: -+ -+%size 3 -+ -+ THE INFORMATION IN THE FOLLOWING PRESENTATION IS PREPARED -+ SOLELY FOR THE INFORMATION OF THE READER, AND COMES "AS IS" -+ AND WITHOUT WARRANTY OR REPRESENATION OF ANY KIND. -+ -+ ANY PARTY USING THE MATERIALS IN THIS PRESENTATION DOES SO -+ AT ITS OWN RISK LIABILITY AND THE PROVIDER OF THE MATERIALS -+ ACCEPTS NO RISK OR LIABILITY FOR SUCH USE OR RESULTING FROM -+ DISSEMINATION TO OR USE BY ANY OTHER PARTY -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Agenda -+ -+%size 5 -+ -+ Installing and Configuring KDB -+ -+ KDB Commands -+ -+ Scull Demo -+ -+ Setting Breakpoints -+ -+ Displaying Data Structures -+ -+ Kernel Data structures -+ -+ Take a walk through an IO operation -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Installing Configuring KDB -+ -+ -+ Install KDB patch. -+ Start with a clean source tree -+ Apply architecture specific patches -+ Obtain patch for your kernel version -+ see http://oss.sgi.com/projects/kdb/ -+ Apply the kdb patch -+ patch -p 1 -N -u -i /path/to/patch -+ Apply any other patches -+ Build and reboot on your kdb enabled kernel -+ Man pages can be found at Documentation/kdb -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Configuring KDB -+ -+ -+ Config kernel with the following options: -+ These are documented in Documentation/Configure.help -+ -+ CONFIG_KDB=y -+ Enable compilation of KDB in the kernel.. -+ Setting this also sets CONFIG_KALLSYMS=y. -+ CONFIG_KDB_MODULES=n -+ KDB may be extended, compiling kdb/modules. -+ CONFIG_KDB_OFF=n -+ y = KDB is disabled by default. -+ boot with kdb=on to enable at boot. -+ /proc/sys/kernel/kdb to enable/disable when system is up. -+ CONFIG_KALLSYMS=y -+ This causes all symbols to be exported. -+ CONFIG_FRAME_POINTER=y -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Invoking KDB -+ -+ -+ KDB can be invoked in the following ways: -+ -+ Early init with "kdb=early" lilo flag -+ Hits breakpoint prior to fork_init() (init/main.c) -+ -+ Serial console with CNTRL-A -+ -+ Console with PAUSE key -+ -+ When a pre-set breakpoint is hit -+ -+ On panic -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+KDB Commands -+ -+ -+ KDB environment -+ env Show environment variables -+ set Set environment variables -+ help Display Help Message -+ ? Display Help Message -+ -+ System related -+ sections List kernel and module sections -+ lsmod List loaded kernel modules -+ reboot Reboot the machine immediately -+ cpu Switch to new cpu -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+KDB Commands -+ -+ -+ Memory Manipulation -+ md Display Memory Contents -+ mdr Display Raw Memory -+ mds Display Symbolically -+ mm Modify Memory Contents -+ id Display Instructions -+ -+ Register Manipulation -+ rd Display Registers -+ rm Modify Registers -+ ef Display exception frame -+ -+ Stack -+ bt [] Stack traceback -+ btp Display stack for -+ bta Display all stacks -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+KDB Commands -+ -+ -+ Breakpoint -+ bc Clear Breakpoint -+ bd Disable Breakpoint -+ be Enable Breakpoint -+ bl [] Display breakpoints -+ bp [] Set/Display breakpoints -+ bpa [] Set/Display global breakpoints -+ bph [] Set hardware breakpoint -+ bpha [] Set global hardware breakpoint -+ bp* modifiers: -+ instruction - break on instruction fetch (default) -+ datar - break on read at vaddr -+ dataw - break on write at vaddr -+ IO - break on in or out op at vaddress -+ -+ Execution control -+ go [] Continue Execution -+ ss [<#steps>] Single Step -+ ssb Single step to branch/call -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+KDB Commands -+ -+ -+ Kernel structures -+ ll Traverse list and execute command -+ ps Display active task list -+ vm Display vm_area_struct -+ dentry Display interesting dentry stuff -+ filp Display interesting filp stuff -+ sh Show scsi_host -+ sd Show scsi_device -+ sc Show scsi_cmnd -+ kiobuf Display kiobuf -+ page Display page -+ inode Display inode -+ bh Display buffer head -+ inode_pages Display pages in an inode -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo -+ -+ -+ Objective -+ Find and display the data associated with a scull device -+ -+ The sequence of events -+ Populate the scull device with data -+ Identify the breakpoints -+ Set breakpoint in the device read function -+ Identify the data structure elements -+ Identify device structures used to track data -+ Display data structures containing the data -+ Show the usage of the filp command -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Populate Device -+ -+ -+ Obtain the code -+ Surf to http://examples.oreilly.com/linuxdrive2/ -+ Download the tarball -+ Untar it to /usr/src -+ -+ Build and install the module -+ cd /usr/src/ldd2-samples-1.0.1/scull -+ make -+ ./scull.init start -+ -+ Populate the scull device -+ cat main.c > /dev/scull0 -+ cat /dev/scull0 -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Driver Details -+ -+ -+ cat /dev/scull0 -+ fd = -+%fore "blue", cont -+open -+%fore "black", cont -+("/dev/scull0", O_RDONLY); -+ Kernel finds the file_operations structure -+ Kernel then invokes the open function -+%fore "blue" -+ read -+%fore "black", cont -+(fd, buf, size); -+ Kernel finds the file_operations structure -+ Kernel then invokes the read function -+ -+ Scull device file operations structure -+ -+%font "typewriter", size 3 -+ struct file_operations scull_fops = { -+ llseek: scull_llseek, -+%fore "blue" -+ read: scull_read, -+%fore "black" -+ write: scull_write, -+ ioctl: scull_ioctl, -+%fore "blue" -+ open: scull_open, -+%fore "black" -+ release: scull_release, -+ }; -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Driver Details -+ -+%font "typewriter", size 3 -+ scull_open code -+%font "typewriter", size 3 -+ int -+%fore "blue", cont -+scull_open -+%fore "black", cont -+(struct inode *inode, struct file *filp) -+ { -+ Scull_Dev *dev; /* device information */ -+ int num = NUM(inode->i_rdev); -+ -+ -+ -+ dev = (Scull_Dev *)filp->private_data; -+ if (!dev) { -+ if (num >= scull_nr_devs) return -ENODEV; -+%fore "blue" -+ dev = &scull_devices[num]; -+ filp->private_data = dev; -+%fore "black" -+ } -+ -+ -+ -+ } -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Driver Details -+ -+%font "typewriter", size 3 -+ scull_read code -+%font "typewriter", size 3 -+ ssize_t -+%fore "blue", cont -+scull_read -+%fore "black", cont -+(struct file *filp, char *buf, size_t count, -+ loff_t *f_pos) -+ { -+ -+%fore "blue", cont -+ Scull_Dev *dev = filp->private_data; -+%fore "black", cont -+ /* the first listitem */ -+%fore "blue" -+ Scull_Dev *dptr; -+%fore "black" -+ int quantum = dev->quantum; -+ int qset = dev->qset; -+ int itemsize = quantum * qset; -+ if (down_interruptible(&dev->sem)) -+ return -ERESTARTSYS; -+ if (*f_pos + count > dev->size) -+ count = dev->size - *f_pos; -+ -+ /* find listitem, qset index, and offset in the quantum */ -+ item = (long)*f_pos / itemsize; -+ rest = (long)*f_pos % itemsize; -+ s_pos = rest / quantum; q_pos = rest % quantum; -+ -+ /* follow the list up to the right position */ -+%fore "blue" -+ dptr = scull_follow(dev, item); -+%fore "black" -+ -+ -+ -+ } -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Breakpoints -+ -+ -+%font "typewriter", size 3 -+ Determine where to set breakpoint -+%font "typewriter", size 3 -+%fore "blue" -+ dptr = scull_follow(dev, item); -+%fore "black" -+ -+%font "typewriter", size 3 -+ Disassemble scull_read -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+id scull_read -+%fore "black" -+ 0xf8c083b4 scull_read: push %ebp -+ 0xf8c083b5 scull_read+0x1:mov %esp,%ebp -+ 0xf8c083b7 scull_read+0x3:push %edi -+ -+ 0xf8c08465 scull_read+0xb1:sub $0x8,%esp -+%fore "blue" -+ 0xf8c08468 scull_read+0xb4:push %ecx -+ 0xf8c08469 scull_read+0xb5:push %esi -+ 0xf8c0846a scull_read+0xb6:call 0xf8c08364 scull_follow: -+%fore "black" -+ 0xf8c0846f scull_read+0xbb:mov %eax, -+%fore "blue", cont -+ %edx -+%fore "black" -+ 0xf8c08471 -+%fore "blue", cont -+scull_read+0xbd -+%fore "black", cont -+:add $0x10,%esp -+ -+ -+ Set breakpoint in driver read -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue",cont -+bp scull_read+0xbd -+%fore "black" -+ Instruction(i) BP #0 at 0xf8c08471 ([scull]scull_read+0xbd) -+ is enabled globally adjust 1 -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Breakpoints -+ -+ -+%font "typewriter", size 3 -+ Restart the system -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+go -+%fore "black" -+ -+ Hit the Breakpoint -+%font "typewriter", size 3 -+ [root@elm3b77 root]# -+%fore "blue", cont -+cat /dev/scull0 -+%fore "black" -+ Instruction(i) breakpoint #0 at 0xf8c08471 (adjusted) -+ 0xf8c08471 scull_read+0xbd:int3 -+ Entering kdb (current=0xf73ec000, pid 1249) on processor 2 -+ due to Breakpoint @ 0xf8c08471 -+ -+ Display the registers -+%font "typewriter", size 3 -+ [2]kdb> -+%fore "blue", cont -+rd -+%fore "black" -+ eax = 0xf77d7b60 ebx = 0x00000000 ecx = 0x00000000 edx = -+%fore "blue", cont -+0xf77d7b60 -+%fore "black" -+ esi = -+%fore "blue", cont -+0xf77d7b60 -+%fore "black", cont -+ edi = 0x00001000 esp = 0xf7415f40 eip = 0xf8c08471 -+ ebp = 0xf7415f78 xss = 0x00000018 xcs = 0x00000010 eflags = 0x00000246 -+ xds = 0xf7590018 xes = 0x00000018 origeax = 0xffffffff ®s = 0xf7415f0c -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Data Structures -+ -+%font "typewriter", size 3 -+ Display the Scull_Dev structure -+%font "typewriter", size 3 -+ [2]kdb> -+%fore "blue", cont -+md 0xf77d7b60 2 -+%fore "black" -+ 0xf77d7b60 -+%fore "blue", cont -+f7400000 -+%fore "black", cont -+ 00000000 00000fa0 000003e8 ..@w.... ...h... -+ 0xf77d7b70 0000534e 00000000 00000000 00000000 NS.............. -+ -+ Scull Device Structure -+%font "typewriter", size 3 -+ typedef struct Scull_Dev { -+%fore "blue" -+ void **data; -+%fore "black" -+ struct Scull_Dev *next; /* next listitem */ -+ int quantum; /* the current quantum size */ -+ int qset; /* the current array size */ -+ unsigned long size; -+ devfs_handle_t handle; /* only used if devfs is there */ -+ unsigned int access_key; /* used by sculluid and scullpriv */ -+ struct semaphore sem; /* mutual exclusion semaphore */ -+ } Scull_Dev; -+%size 6 -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: Data Structures -+ -+ -+%font "typewriter", size 3 -+ Display the quantum set (dev->data) -+%font "typewriter", size 3 -+ [2]kdb> -+%fore "blue", cont -+md f7400000 2 -+%fore "black" -+ 0xf7400000 -+%fore "blue", cont -+f73ea000 -+%fore "black", cont -+ f73f1000 f740c000 f7ab4000 . >w..?w.@@w.@+w -+ 0xf7400010 f73ef000 f755b000 00000000 00000000 .p>w.0Uw........ -+ -+ Display the first quantum (dev->data[0]) -+%font "typewriter", size 3 -+ [2]kdb> -+%fore "blue", cont -+md f73ea000 -+%fore "black" -+ 0xf73ea000 200a2a2f 616d202a 632e6e69 202d2d20 /*. * main.c -- -+ 0xf73ea010 20656874 65726162 75637320 63206c6c the bare scull c -+ 0xf73ea020 20726168 75646f6d 200a656c 2a200a2a har module. *. * -+ 0xf73ea030 706f4320 67697279 28207468 32202943 Copyright (C) 2 -+ 0xf73ea040 20313030 73656c41 646e6173 52206f72 001 Alessandro R -+ 0xf73ea050 6e696275 6e612069 6f4a2064 6874616e ubini and Jonath -+ 0xf73ea060 43206e61 6562726f 2a200a74 706f4320 an Corbet. * Cop -+ 0xf73ea070 67697279 28207468 32202943 20313030 yright (C) 2001 -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: filp command -+ -+ -+%font "typewriter", size 3 -+ Show filp usage - here is the scull_read prototype -+%font "typewriter", size 3 -+ ssize_t scull_read( -+%fore "blue", cont -+struct file *filp -+%fore "black", cont -+, char *buf, -+ size_t count, loff_t *f_pos); -+ Show the stack trace: -+%font "typewriter", size 3 -+[2]kdb> -+%fore "blue", cont -+bt -+%fore "black" -+ EBP EIP Function(args) -+ 0xee9dbf78 0xf8c08471 [scull]scull_read+0xbd ( -+%fore "blue", cont -+0xeaf6c0c0 -+%fore "black", cont -+, 0x804e128, -+ 0x1000, 0xeaf6c0e0, 0x804f000) -+ scull .text 0xf8c08060 0xf8c083b4 0xf8c084dc -+ 0xee9dbfbc 0xc0136278 sys_read+0x98 (0x3, 0x804e128, 0x1000, ... -+ kernel .text 0xc0100000 0xc01361e0 0xc01362b0 -+ 0xc010702b system_call+0x33 -+ kernel .text 0xc0100000 0xc0106ff8 0xc0107030 -+ And show the filp output -+%font "typewriter", size 3 -+ [2]kdb> -+%fore "blue", cont -+filp 0xeaf6c0c0 -+%fore "black" -+ name.name 0xe93889fc name.len 6 -+ File Pointer at 0xeaf6c0c0 -+ f_list.nxt = 0xe42deca0 f_list.prv = 0xf7e69070 -+%fore "blue" -+ f_dentry = 0xe93889a0 -+%fore "black", cont -+ f_op = 0xf8c0a200 -+ f_count = 2 f_flags = 0x8000 f_mode = 0x1 -+ f_pos = 0 f_reada = 0 f_ramax = 0 -+ f_raend = 0 f_ralen = 0 f_rawin = 0 -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Scull Demo: filp command -+ -+ -+%font "typewriter", size 3 -+ filp output - continued -+%font "typewriter", size 3 -+%fore "blue" -+ Directory Entry at 0xe93889a0 -+%fore "black" -+ d_name.len = 6 -+%fore "orange", cont -+d_name.name = 0xe93889fc -+%fore "black", cont -+> -+ d_count = 1 d_flags = 0x0 -+%fore "blue", cont -+d_inode = 0xe827b680 -+%fore "black" -+ d_hash.nxt = 0xc215aec8 d_hash.prv = 0xc215aec8 -+ d_lru.nxt = 0xe93889b8 d_lru.prv = 0xe93889b8 -+ d_child.nxt = 0xe89e1e80 d_child.prv = 0xe9388940 -+ d_subdirs.nxt = 0xe93889c8 d_subdirs.prv = 0xe93889c8 -+ d_alias.nxt = 0xe827b690 d_alias.prv = 0xe827b690 -+ d_op = 0x00000000 d_sb = 0xf7e69000 -+ -+%fore "blue" -+ Inode Entry at 0xe827b680 -+%fore "black" -+ i_mode = 0x21a4 i_nlink = 1 i_rdev = 0xfe00 -+ i_ino = 37182 i_count = 1 i_dev = 0x821 -+ i_hash.nxt = 0xc20e6be8 i_hash.prv = 0xc20e6be8 -+ i_list.nxt = 0xe827b2c8 i_list.prv = 0xe827b868 -+ i_dentry.nxt = 0xe93889d0 i_dentry.prv = 0xe93889d0 -+ -+ Check the filename (display d_name.name) -+%font "typewriter", size 3 -+ [2]kdb> -+%fore "orange", cont -+md 0xe93889fc 1 -+%fore "black" -+ 0xe93889fc 6c756373 0000306c 00000000 00000000 scull0.......... -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Kernel Structures -+ -+ -+ Objective -+ Show output from various kernel related kdb commands -+ -+ Sequence of events -+ Simple Program -+ Write a simple program which allocates memory and hangs -+ Show usage of the ps, vm, and ll commands -+ Walk an IO operation -+ Hit a breakpoint in qlogic driver (isp1020_queuecommand) -+ Show usage of scsi related commands (sc, sh, and sd) -+ Show usage of vm related commands (bh, page, inode, inode_pages) -+ -+ -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Simple program -+ -+%font "typewriter", size 3 -+ simple.c - simple program which allocates memory -+%font "typewriter", size 3 -+%fore "blue" -+ int foo_global[8192]; -+%fore "black" -+ main() -+ { -+ int * -+%fore "blue", cont -+foo_malloc; -+%fore "black" -+ int i; -+ foo_malloc = (int *)malloc(0x8192); -+ for(i = 0; i < 0x100; i++) { -+ foo_global[i] = 0xdead0000 | i; -+ foo_malloc[i] = 0xbeef0000 | i; -+ } -+ printf("foo_global at %x\n", (int)foo_global); -+ printf("foo_malloc at %x\n", (int)foo_malloc); -+ printf("sleep forever\n"); -+ sleep(2000000); -+ } -+ -+ simple output -+%font "typewriter", size 3 -+ [root@elm3b77 scull]# cc -o simple simple.c -+ [root@elm3b77 scull]# ./simple -+ foo_global at -+%fore "blue", cont -+8049780 -+%fore "black" -+ foo_malloc at -+%fore "blue", cont -+8051788 -+%fore "black" -+ sleep forever -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Simple Program -+ -+%font "typewriter", size 3 -+ Show the output of the ps command -+%font "typewriter", size 3 -+ Entering kdb (current=0xc2010000, pid 0) on processor 3 due to -+ Keyboard Entry -+ [3]kdb> -+%fore "blue", cont -+ps -+%fore "black" -+ Task Addr Pid Parent [*] cpu State Thread Command -+ 0xf7efe000 00000001 00000000 0 003 stop 0xf7efe370 init -+ 0xf7ef0000 00000002 00000001 0 001 stop 0xf7ef0370 keventd -+ 0xf7eec000 00000003 00000000 0 000 stop 0xf7eec370 ksoftirqd_CPU0 -+ 0xf7eea000 00000004 00000000 0 001 stop 0xf7eea370 ksoftirqd_CPU1 -+ 0xf7ee8000 00000005 00000000 0 002 stop 0xf7ee8370 ksoftirqd_CPU2 -+ 0xf7ee6000 00000006 00000000 0 003 stop 0xf7ee6370 ksoftirqd_CPU3 -+ -+ -+ -+ 0xf7b46000 00001006 00000737 0 003 stop 0xf7b46370 sshd -+ 0xf7ace000 00001007 00001006 0 000 stop 0xf7ace370 bash -+ 0xef06a000 00001066 00001007 0 003 stop 0xef06a370 su -+ 0xeef88000 00001067 00001066 0 000 stop 0xeef88370 bash -+ 0xeef64000 00001119 00000770 0 001 stop 0xeef64370 in.ftpd -+%fore "blue" -+ 0xeeeac000 -+%fore "black", cont -+ 00001138 00001067 0 001 stop 0xeeeac370 -+%fore "blue", cont -+simple -+%fore "black" -+ [3]kdb> -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Simple Program -+ -+%font "typewriter", size 3 -+ Display the task struct -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+md 0xeeeac000 -+%fore "black" -+ 0xeeeac000 00000001 00000000 00000000 c0000000 ................ -+ 0xeeeac010 c0339880 00000000 00000000 ffffffff ................ -+ 0xeeeac020 0000000a 00000000 00000000 -+%fore "blue", cont -+f7e10f00 -+%fore "black", cont -+ ..............aw -+ 0xeeeac030 00000001 ffffffff ffffffff 00000000 ................ -+ -+%font "typewriter", size 3 -+ Determine offset of mm_struct ptr in task_struct -+%font "typewriter", size 3 -+ struct task_struct { -+ [0] volatile long state; -+ [4] unsigned long flags; -+ [8] int sigpending; -+ [c] mm_segment_t addr_limit; -+ [10] struct exec_domain *exec_domain; -+ [14] volatile long need_resched; -+ [18] unsigned long ptrace; -+ [1c] int lock_depth; -+ [20] long counter; -+ [24] long nice; -+ [28] unsigned long policy; -+%fore "blue" -+ [2c] struct mm_struct *mm; -+%fore "black" -+ [30] int processor; -+ [34] unsigned long cpus_runnable, cpus_allowed; -+ -+ }; -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Simple Program -+ -+ -+%font "typewriter", size 3 -+ Display the mm_struct associated with simple process -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+md f7e10f00 -+%fore "black" -+ 0xf7e10f00 -+%fore "blue", cont -+e8357a80 -+%fore "black", cont -+ e8357978 f7ac77e0 eb15eac0 .z5hxy5h`w,w@j.k -+ 0xf7e10f10 00000001 00000002 0000000b 00000000 ................ -+ 0xf7e10f20 00000001 f7e10f24 f7e10f24 00000001 ................ -+ 0xf7e10f30 f7e35e70 eea7e8f0 08048000 0804862b ................ -+ 0xf7e10f40 0804962c 08049744 08051780 0805a000 ................ -+ 0xf7e10f50 bffffd10 bffffe00 bffffe09 bffffe09 ................ -+ 0xf7e10f60 bffffff3 0000005a 00000168 00000000 ................ -+ 0xf7e10f70 00000000 00000002 00000000 00000001 ................ -+ -+%font "typewriter", size 3 -+ Determine offset of the first vma in the process -+%font "typewriter", size 3 -+ struct mm_struct { -+%fore "blue" -+ struct vm_area_struct * mmap; -+%fore "black" -+ rb_root_t mm_rb; -+ struct vm_area_struct * mmap_cache; -+ -+ }; -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Simple Program -+ -+%font "typewriter", size 3 -+ Display the first vma using md -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+md e8357a80 -+%fore "black" -+ 0xe8357a80 f7e10f00 08048000 08049000 -+%fore "blue", cont -+e8727e00 -+%fore "black",cont -+ ..aw.........~rh -+ 0xe8357a90 00000025 00001875 e8727e18 00000001 %...u....~rh.... -+ -+ Display the first vma using vma -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+vma e8357a80 -+%fore "black" -+ struct vm_area_struct at 0xe8357a80 for 68 bytes -+ vm_start = 0x8048000 vm_end = 0x8049000 -+ page_prot = 0x25 -+ flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE -+%font "typewriter", size 3 -+ -+ Determine the offset to the vma list -+%font "typewriter", size 3 -+ struct vm_area_struct { -+ [0] struct mm_struct * vm_mm; -+ [4] unsigned long vm_start; -+ [8] unsigned long vm_end; -+%fore "blue" -+ [c] struct vm_area_struct *vm_next; -+%fore "black" -+ -+ }; -+ Display the next vma -+%font "typewriter", size 3 -+ [3]kdb> vma e8727e00 -+ struct vm_area_struct at 0xe8727e00 for 68 bytes -+ vm_start = 0x8049000 vm_end = 0x804a000 -+ page_prot = 0x25 -+ flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Simple Program -+ -+%font "typewriter", size 3 -+ Use the ll command to display the list of vma's -+%font "typewriter", size 3 -+ [3]kdb> ll e8357a80 0xc vma -+. -+ struct vm_area_struct at 0xe8357a80 for 68 bytes -+ vm_start = 0x8048000 vm_end = 0x8049000 -+ page_prot = 0x25 -+ flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE -+. -+ struct vm_area_struct at 0xe8727e00 for 68 bytes -+ vm_start = -+%fore "orange", cont -+0x8049000 -+%fore "black", cont -+ vm_end = -+%fore "orange", cont -+0x804a000 -+%fore "black" -+ page_prot = 0x25 -+ flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE -+. -+ struct vm_area_struct at 0xe8727c80 for 68 bytes -+ vm_start = -+%fore "blue", cont -+0x804a000 -+%fore "black", cont -+ vm_end = -+%fore "blue", cont -+0x805a000 -+%fore "black" -+ page_prot = 0x25 -+ flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC -+ -+ struct vm_area_struct at 0xe8357900 for 68 bytes -+ vm_start = 0xbfffe000 vm_end = 0xc0000000 -+ page_prot = 0x25 -+ flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC GROWSDOWN -+ -+ Match the vma to the displayed addresses -+%font "typewriter", size 3 -+ foo_global at -+%fore "orange", cont -+8049780 -+%fore "black" -+ foo_malloc at -+%fore "blue", cont -+8051788 -+%fore "black" -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+ Objective -+ Show usage of various scsi and vm related kdb commands -+ -+ Sequence: -+ Set a breakpoint in the scsi driver -+ Stops when queueing a command to the controller -+ Cause IO on an idle disk -+ Show various IO stack traces -+ Display the IO data structures -+ Display vm information about the data -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Set the breakpoint -+ -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+bp isp1020_queuecommand -+%fore "black" -+ Instruction(i) BP #0 at 0xc01ecfe0 (isp1020_queuecommand) -+ is enabled globally adjust 1 -+ -+%font "typewriter", size 3 -+ Create some activity on a previously unused disk -+ -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+go -+%fore "black" -+ [root@elm3b77 root]# -+%fore "blue", cont -+ls /rh62 -+%fore "black" -+ -+ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted) -+ 0xc01ecfe0 isp1020_queuecommand:int3 -+ -+ Entering kdb (current=0xf75ba000, pid 1181) on processor 3 due to -+ Breakpoint @ 0xc01ecfe0 -+ -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Show the stack. -+ This is a read of the /rh62 directory -+ -+%font "typewriter", size 3 -+ [1]kdb> -+%fore "blue", cont -+bt -+%fore "black" -+ EBP EIP Function(args) -+ 0xf75bbdf4 0xc01ecfe0 isp1020_queuecommand -+ 0xc01e2c77 scsi_dispatch_cmd+0x1f7 -+ 0xf75bbe24 0xc01e99b1 scsi_request_fn+0x2f1 -+ 0xf75bbe34 0xc01c84fd generic_unplug_device+0x2d -+ 0xf75bbe50 0xc011b3af __run_task_queue+0x5f -+ 0xf75bbe6c 0xc013a63c block_sync_page+0x1c -+ 0xf75bbe98 0xc0128127 __lock_page+0x77 -+ 0xf75bbea4 0xc0128178 lock_page+0x18 -+ 0xf75bbec8 0xc012a4b3 read_cache_page+0xc3 -+ 0xf75bbef4 0xc0168e23 ext2_get_page+0x23 -+ 0xf75bbf48 0xc0168fdd ext2_readdir+0xfd -+ 0xf75bbf68 0xc0143d2e vfs_readdir+0x7e -+ 0xf75bbfbc 0xc01442ed -+%fore "blue", cont -+sys_getdents64+0x4d -+%fore "black" -+ 0xc010702b system_call+0x33 -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Allow the operation to complete -+ -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+go -+%fore "black" -+ bench build etc lib mnt oldsys rh72 spv usr -+ bin data h linux mnt1 opt root test var -+ boot dev home lost+found mnt2 proc sbin tmp -+ -+%font "typewriter", size 3 -+ Force some more activity -+ -+%font "typewriter", size 3 -+ [root@elm3b77 root]# -+%fore "blue", cont -+cd /rh62/tmp -+%fore "black" -+ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted) -+ 0xc01ecfe0 isp1020_queuecommand:int3 -+ -+ Entering kdb (current=0xf768a000, pid 981) on processor 3 due to -+ Breakpoint @ 0xc01ecfe0 -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Show the stack. -+ This is an inode read for /rh62/tmp -+ -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+bt -+%fore "black" -+ EBP EIP Function(args) -+ 0xf768bd68 0xc01ecfe0 isp1020_queuecommand -+ 0xc01e2c77 scsi_dispatch_cmd+0x1f7 -+ 0xf768bd98 0xc01e99b1 scsi_request_fn+0x2f1 -+ 0xf768bda8 0xc01c84fd generic_unplug_device+0x2d -+ 0xf768bdc4 0xc011b3af __run_task_queue+0x5f -+ 0xf768bdfc 0xc0137216 __wait_on_buffer+0x56 -+ 0xf768be1c 0xc0138600 bread+0x50 -+ 0xf768be5c 0xc016b684 ext2_read_inode+0x114 -+ 0xf768bf0c 0xc013fbec real_lookup+0x7c -+ 0xf768bf78 0xc014035d link_path_walk+0x5ad -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Create a new file, causing yet more disk activity -+ -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+go -+%fore "black" -+ -+ [root@elm3b77 tmp]# -+%fore "blue", cont -+echo "Hello linux reading group" > j1;sync -+%fore "black" -+ -+ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted) -+ 0xc01ecfe0 isp1020_queuecommand:int3 -+ -+ Entering kdb (current=0xf768a000, pid 981) on processor 3 due to -+ Breakpoint @ 0xc01ecfe0 -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Show the stack -+ This is an inode read in response to the open -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+bt -+%fore "black" -+ EBP EIP Function(args) -+ 0xf768bd78 0xc01ecfe0 isp1020_queuecommand -+ 0xc01e2c77 scsi_dispatch_cmd+0x1f7 -+ 0xf768bda8 0xc01e99b1 scsi_request_fn+0x2f1 -+ 0xf768bdb8 0xc01c84fd generic_unplug_device+0x2d -+ 0xf768bdd4 0xc011b3af __run_task_queue+0x5f -+ 0xf768bdf0 0xc013a63c block_sync_page+0x1c -+ 0xf768be1c 0xc0128127 __lock_page+0x77 -+ 0xf768be28 0xc0128178 lock_page+0x18 -+ 0xf768be4c 0xc012a4b3 read_cache_page+0xc3 -+ 0xf768be78 0xc0168e23 ext2_get_page+0x23 -+ 0xf768beb8 0xc01691ed ext2_find_entry+0x8d -+ 0xf768bed4 0xc016933a ext2_inode_by_name+0x1a -+ 0xf768befc 0xc016c077 ext2_lookup+0x27 -+ 0xf768bf1c 0xc014094a lookup_hash+0x9a -+ 0xf768bf64 0xc0140c4d open_namei+0xfd -+ 0xf768bfa0 0xc0135907 filp_open+0x37 -+ 0xf768bfbc 0xc0135c64 sys_open+0x34 -+ 0xc010702b system_call+0x33 -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Let the operation continue -+%font "typewriter", size 3 -+ [3]kdb> -+%fore "blue", cont -+go -+%fore "black" -+ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted) -+ 0xc01ecfe0 isp1020_queuecommand: int3 -+ Entering kdb (current=0xc0352000, pid 0) on processor 0 due to -+ Breakpoint @ 0xc01ecfe0 -+ Show the stack -+ This is an io completion queuing the next request -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+bt -+%fore "black" -+ EBP EIP Function(args) -+ 0xc0353df4 0xc01ecfe0 isp1020_queuecommand( -+%fore "blue", cont -+0xf7e63a00 -+%fore "black", cont -+,0xc01e7fc0... -+ 0xc01e2c77 scsi_dispatch_cmd+0x1f7 -+ 0xc0353e24 0xc01e99b1 scsi_request_fn+0x2f1 -+ 0xc0353e40 0xc01e8f6a -+%fore "blue", cont -+scsi_queue_next_request+0x4a -+%fore "black" -+ 0xc0353e5c 0xc01e9166 __scsi_end_request+0x116 -+ 0xc0353ea8 0xc01e93e0 -+%fore "blue", cont -+scsi_io_completion+0x170 -+%fore "black" -+ 0xc0353ecc 0xc01f658e rw_intr+0x14e -+ 0xc0353ef8 0xc01e8668 scsi_old_done+0x6a8 -+ 0xc0353fd4 0xc01052c2 cpu_idle+0x52 -+ Function prototype -+%font "typewriter", size 3 -+ int isp1020_queuecommand( -+%fore "blue", cont -+Scsi_Cmnd *Cmnd, -+%fore "black" -+ void (*done)(Scsi_Cmnd *)) -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Show the command being queued -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+sc 0xf7e63a00 -+%fore "black" -+ scsi_cmnd at 0xf7e63a00 -+%fore "blue" -+ host = 0xf7e91400 -+%fore "black", cont -+ state = 4099 owner = 258 -+%fore "blue", cont -+device = 0xf7ed5d80 -+%fore "black" -+ bnext = 0x00000000 reset_chain = 0x00000000 eh_state = 0 -+ done = 0xc01f6440 -+ serial_number = 3402 serial_num_at_to = 0 retries = 0 timeout = 0 -+ id/lun/cmnd = [0/0/0] cmd_len = 10 old_cmd_len = 10 -+ cmnd = [2a/00/00/28/00/3f/00/00/10/00/ef/f7] -+ data_cmnd = [2a/00/00/28/00/3f/00/00/10/00/ef/f7] -+ request_buffer = 0xc03fd000 bh_next = 0x00000000 -+ request_bufflen = 8192 -+ use_sg = 2 old_use_sg = 2 sglist_len = 512 abore_reason = 0 -+ bufflen = 8192 buffer = 0xc03fd000 underflow = 8192 -+ transfersize = 512 -+ tag = 0 pid = 3401 -+ request struct -+ rq_status = RQ_ACTIVE rq_dev = [8/1] errors = 1 cmd = 0 -+ sector = 2621440 nr_sectors = 16 current_nr_sectors = 8 -+ buffer = 0xf7599000 -+%fore "blue", cont -+bh = 0xf75ca300 -+%fore "black", cont -+ bhtail = 0xf75ca3c0 -+ -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Display the host adapter -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+sh 0xf7e91400 -+%fore "black" -+ Scsi_Host at 0xf7e91400 -+ next = 0x00000000 -+%fore "blue", cont -+host_queue = 0xf7ed5d80 -+%fore "black" -+ ehandler = 0x00000000 eh_wait = 0x00000000 en_notify = 0x00000000 -+ eh_action = 0x00000000 -+ h_active = 0x0 host_wait = 0xc0353ac4 hostt = 0xc034bce0 -+ host_busy = 1 -+ host_failed = 0 extra_bytes = 524 host_no = 0 resetting = 0 -+ max id/lun/channel = [16/8/0] this_id = 7 -+ can_queue = 64 cmd_per_lun = 1 sg_tablesize = 427 u_isa_dma = 0 -+ host_blocked = 0 reverse_ordering = 0 -+ -+%font "typewriter", size 3 -+ Display the scsi device -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+sd 0xf7ed5d80 -+%fore "black" -+ scsi_device at 0xf7ed5d80 -+ next = 0xf7ed5c80 prev = 0x00000000 host = 0xf7e91400 -+ device_busy = 1 -+%fore "blue", cont -+device_queue 0xf7e63a00 -+%fore "black" -+ id/lun/chan = [0/0/0] single_lun = 0 device_blocked = 0 -+ queue_depth = 1 current_tag = 0 scsi_level = 4 -+ IBM DGHS18X 0360 -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Display the Buffer header associated with the command -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+bh 0xf75ca300 -+%fore "black" -+ buffer_head at 0xf75ca300 -+ next 0x00000000 bno 327680 rsec 2621440 size 4096 -+ dev 0x801 rdev 0x801 -+ count 2 state 0x1d [Uptodate Lock Req Mapped] ftime 0x7695e -+ b_list 1 b_reqnext 0xf75ca3c0 b_data 0xf7599000 -+%fore "blue" -+ b_page 0xc1dd6640 -+%fore "black", cont -+ b_this_page 0xf75ca300 b_private 0x00000000 -+ -+ Display the associated page structure -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+page 0xc1dd6640 -+%fore "black" -+ struct page at 0xc1dd6640 -+ next 0xc1dd7300 prev 0xc1dd6240 -+%fore "blue", cont -+addr space 0xf7af04d0 -+%fore "black" -+ index 327680 (offset 0x50000000) -+ count 2 flags PG_referenced PG_lru virtual 0xf7599000 -+ buffers 0xf75ca300 -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Display the Address space associated with the page -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+md 0xf7af04d0 -+%fore "black" -+ 0xf7af04d0 c1dd6240 c1dea740 f7af04d8 f7af04d8 @b]A@'^AX./wX./w -+ 0xf7af04e0 f7af04e0 f7af04e0 00000007 c033b700 `./w`./w.....73@ -+ 0xf7af04f0 -+%fore "blue", cont -+f7af0420 -+%fore "black", cont -+ 00000000 00000000 00000001 ./w............ -+ 0xf7af0500 000001d0 00000000 00000000 f7af050c P............./w -+ 0xf7af0510 f7af050c 00000000 f7a8afa0 00000000 ../w.... /(w.... -+ -+ The structure looks like: -+%size 3 -+ struct address_space { -+ struct list_head clean_pages; /* list of clean pages */ -+ struct list_head dirty_pages; /* list of dirty pages */ -+ struct list_head locked_pages;/* list of locked pages */ -+ unsigned long nrpages; /* number of total pages */ -+ spinlock_t page_lock; /* spinlock protecting them*/ -+ struct address_space_operations *a_ops; /* methods */ -+%fore "blue" -+ struct inode *host; /* owner: inode, block_dev */ -+%fore "black" -+ -+ }; -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Display the inode associated with the address space -+ I think htis is the inode for the block device. -+ -+%font "typewriter", size 3 -+ [1]kdb> -+%fore "blue", cont -+inode f7af0420 -+%fore "black" -+ struct inode at 0xf7af0420 -+ i_ino = 289 i_count = 1 i_dev = 0x801 i_size 4301789184 -+ i_mode = 0x8000 i_nlink = 1 i_rdev = 0x801 -+ i_hash.nxt = 0xf7af0420 i_hash.prv = 0xf7af0420 -+ i_list.nxt = 0xf7af0608 i_list.prv = 0xf7af0068 -+ i_dentry.nxt = 0xf7af0430 i_dentry.prv = 0xf7af0430 -+ i_dirty_buffers.nxt = 0xf7af0438 i_dirty_buffers.prv = 0xf7af0438 -+ i_sb = 0xc201f200 i_op = 0xc03cfdc0 i_data = 0xf7af04d0 nrpages = 6 -+ i_mapping = 0xf7af04d0 -+ i_flags 0x0 i_state 0x0 [] fs specific info @ 0xf7af0540 -+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -+%page -+ -+Walking IO structures -+ -+ -+%font "typewriter", size 3 -+ Display the page list associated with the inode -+%font "typewriter", size 3 -+ [0]kdb> -+%fore "blue", cont -+inode_pages f7af0420 -+%fore "black" -+CLEAN page_struct index cnt flags -+ 0xc1dd6240 327735 2 0x44 bh 0xf75caae0 bno 327735 -+ [Lock Req Mapped] -+%fore "blue" -+ 0xc1dd6640 327680 2 0x44 bh 0xf75ca300 bno 327680 -+ [Uptodate Lock Req Mapped] -+%fore "black" -+ 0xc1dd7300 327681 2 0x44 bh 0xf75ca3c0 bno 327681 -+ [Uptodate Lock Req Mapped] -+ 0xc1dd6e00 327684 2 0x44 bh 0xf75ca420 bno 327684 -+ [Uptodate Req Mapped] -+ 0xc1de8fc0 4 2 0xc0 bh 0xf7b5ade0 bno 4 -+ [Uptodate Req Mapped] -+ 0xc1dea700 1 2 0x44 bh 0xf7e02740 bno 1 -+ [Uptodate Req Mapped] -+ 0xc1dea740 0 2 0x44 bh 0xf7e028c0 bno 0 -+ [Uptodate Req Mapped] -+DIRTY page_struct index cnt flags -+LOCKED page_struct index cnt flags ---- a/Makefile -+++ b/Makefile -@@ -672,6 +672,7 @@ export mod_strip_cmd - - ifeq ($(KBUILD_EXTMOD),) - core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ -+core-$(CONFIG_KDB) += kdb/ - - vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ - $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ ---- a/drivers/char/keyboard.c -+++ b/drivers/char/keyboard.c -@@ -43,6 +43,9 @@ - #include - #include - #include -+#ifdef CONFIG_KDB -+#include -+#endif /* CONFIG_KDB */ - - extern void ctrl_alt_del(void); - -@@ -1199,6 +1202,13 @@ static void kbd_keycode(unsigned int key - } - #endif - -+#ifdef CONFIG_KDB -+ if (down && !rep && keycode == KEY_PAUSE && kdb_on == 1) { -+ kdb(KDB_REASON_KEYBOARD, 0, get_irq_regs()); -+ return; -+ } -+#endif /* CONFIG_KDB */ -+ - #ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */ - if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) { - if (!sysrq_down) { ---- a/drivers/hid/usbhid/hid-core.c -+++ b/drivers/hid/usbhid/hid-core.c -@@ -44,6 +44,10 @@ - #define DRIVER_DESC "USB HID core driver" - #define DRIVER_LICENSE "GPL" - -+#ifdef CONFIG_KDB_USB -+#include -+#endif -+ - /* - * Module parameters. - */ -@@ -1032,6 +1036,34 @@ static int usbhid_start(struct hid_devic - USB_INTERFACE_PROTOCOL_KEYBOARD) - usbhid_set_leds(hid); - -+#ifdef CONFIG_KDB_USB -+ /* Attach USB keyboards to kdb */ -+ if (intf->cur_altsetting->desc.bInterfaceProtocol == -+ USB_INTERFACE_PROTOCOL_KEYBOARD) { -+ int ret; -+ struct usbhid_device *usbhid = hid->driver_data; -+ extern void *usb_hcd_get_kdb_poll_func(struct usb_device *udev); -+ extern void * usb_hcd_get_kdb_completion_func(struct usb_device *udev); -+ extern int usb_hcd_check_uhci(struct usb_device *udev); -+ extern kdb_hc_keyboard_attach_t -+ usb_hcd_get_hc_keyboard_attach(struct usb_device *udev); -+ extern kdb_hc_keyboard_detach_t -+ usb_hcd_get_hc_keyboard_detach(struct usb_device *udev); -+ -+ ret = kdb_usb_keyboard_attach(usbhid->urbin, usbhid->inbuf, -+ usb_hcd_get_kdb_poll_func(interface_to_usbdev(intf)), -+ usb_hcd_get_kdb_completion_func(interface_to_usbdev(intf)), -+ usb_hcd_get_hc_keyboard_attach(interface_to_usbdev(intf)), -+ usb_hcd_get_hc_keyboard_detach(interface_to_usbdev(intf)), -+ usbhid->bufsize, -+ NULL); -+ -+ if (ret == -1) -+ printk(": FAILED to register keyboard (%s) " -+ "with KDB\n", hid->phys); -+ } -+#endif /* CONFIG_KDB_USB */ -+ - return 0; - - fail: -@@ -1051,6 +1083,14 @@ static void usbhid_stop(struct hid_devic - - if (WARN_ON(!usbhid)) - return; -+#ifdef CONFIG_KDB_USB -+ /* -+ * If the URB was for a Keyboard, detach it from kdb. -+ * If the URB was for another type of device, just -+ * allow kdb_usb_keyboard_detach() to silently fail. -+ */ -+ kdb_usb_keyboard_detach(usbhid->urbin); -+#endif - - clear_bit(HID_STARTED, &usbhid->iofl); - spin_lock_irq(&usbhid->lock); /* Sync with error handler */ ---- a/drivers/hid/usbhid/usbkbd.c -+++ b/drivers/hid/usbhid/usbkbd.c -@@ -30,6 +30,9 @@ - #include - #include - #include -+#ifdef CONFIG_KDB_USB -+#include -+#endif - - /* - * Version Information -@@ -292,6 +295,16 @@ static int usb_kbd_probe(struct usb_inte - usb_fill_int_urb(kbd->irq, dev, pipe, - kbd->new, (maxp > 8 ? 8 : maxp), - usb_kbd_irq, kbd, endpoint->bInterval); -+ -+#ifdef CONFIG_KDB_USB -+ /* Attach keyboard to kdb */ -+ extern void * usb_hcd_get_kdb_poll_func(struct usb_device *udev); -+ -+ kdb_usb_keyboard_attach(kbd->irq, kbd->new, -+ usb_hcd_get_kdb_poll_func(dev)); -+ -+#endif /* CONFIG_KDB_USB */ -+ - kbd->irq->transfer_dma = kbd->new_dma; - kbd->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - -@@ -329,6 +342,10 @@ static void usb_kbd_disconnect(struct us - - usb_set_intfdata(intf, NULL); - if (kbd) { -+#ifdef CONFIG_KDB_USB -+ /* Detach the keyboard from kdb */ -+ kdb_usb_keyboard_detach(kbd->irq); -+#endif /* CONFIG_KDB_USB */ - usb_kill_urb(kbd->irq); - input_unregister_device(kbd->dev); - usb_kbd_free_mem(interface_to_usbdev(intf), kbd); ---- a/drivers/serial/8250.c -+++ b/drivers/serial/8250.c -@@ -44,11 +44,26 @@ - #include - - #include "8250.h" -- - #ifdef CONFIG_SPARC - #include "suncore.h" - #endif - -+#ifdef CONFIG_KDB -+#include -+/* -+ * kdb_serial_line records the serial line number of the first serial console. -+ * NOTE: The kernel ignores characters on the serial line unless a user space -+ * program has opened the line first. To enter kdb before user space has opened -+ * the serial line, you can use the 'kdb=early' flag to lilo and set the -+ * appropriate breakpoints. -+ */ -+ -+static int kdb_serial_line = -1; -+static const char *kdb_serial_ptr = kdb_serial_str; -+#else -+#define KDB_8250() 0 -+#endif /* CONFIG_KDB */ -+ - /* - * Configuration: - * share_irqs - whether we pass IRQF_SHARED to request_irq(). This option -@@ -1403,6 +1418,20 @@ receive_chars(struct uart_8250_port *up, - * just force the read character to be 0 - */ - ch = 0; -+#ifdef CONFIG_KDB -+ if ((up->port.line == kdb_serial_line) && kdb_on == 1) { -+ if (ch == *kdb_serial_ptr) { -+ if (!(*++kdb_serial_ptr)) { -+ atomic_inc(&kdb_8250); -+ kdb(KDB_REASON_KEYBOARD, 0, get_irq_regs()); -+ atomic_dec(&kdb_8250); -+ kdb_serial_ptr = kdb_serial_str; -+ break; -+ } -+ } else -+ kdb_serial_ptr = kdb_serial_str; -+ } -+#endif /* CONFIG_KDB */ - - flag = TTY_NORMAL; - up->port.icount.rx++; -@@ -2776,7 +2805,7 @@ serial8250_console_write(struct console - if (up->port.sysrq) { - /* serial8250_handle_port() already took the lock */ - locked = 0; -- } else if (oops_in_progress) { -+ } else if (oops_in_progress || KDB_8250()) { - locked = spin_trylock(&up->port.lock); - } else - spin_lock(&up->port.lock); -@@ -2834,6 +2863,30 @@ static int __init serial8250_console_set - if (!port->iobase && !port->membase) - return -ENODEV; - -+#ifdef CONFIG_KDB -+ /* -+ * Remember the line number of the first serial -+ * console. We'll make this the kdb serial console too. -+ */ -+ if (co && kdb_serial_line == -1) { -+ kdb_serial_line = co->index; -+ kdb_serial.io_type = port->iotype; -+ switch (port->iotype) { -+ case SERIAL_IO_MEM: -+#ifdef SERIAL_IO_MEM32 -+ case SERIAL_IO_MEM32: -+#endif -+ kdb_serial.iobase = (unsigned long)(port->membase); -+ kdb_serial.ioreg_shift = port->regshift; -+ break; -+ default: -+ kdb_serial.iobase = port->iobase; -+ kdb_serial.ioreg_shift = 0; -+ break; -+ } -+ } -+#endif /* CONFIG_KDB */ -+ - if (options) - uart_parse_options(options, &baud, &parity, &bits, &flow); - ---- a/drivers/serial/8250_early.c -+++ b/drivers/serial/8250_early.c -@@ -38,6 +38,11 @@ - #include - #endif - -+#ifdef CONFIG_KDB -+#include -+static int kdb_serial_line = -1; -+#endif /* CONFIG_KDB */ -+ - struct early_serial8250_device { - struct uart_port port; - char options[16]; /* e.g., 115200n8 */ -@@ -231,6 +236,30 @@ int __init setup_early_serial8250_consol - - register_console(&early_serial8250_console); - -+#ifdef CONFIG_KDB -+ /* -+ * Remember the line number of the first serial -+ * console. We'll make this the kdb serial console too. -+ */ -+ if (kdb_serial_line == -1) { -+ kdb_serial_line = early_serial8250_console.index; -+ kdb_serial.io_type = early_device.port.iotype; -+ switch (early_device.port.iotype) { -+ case SERIAL_IO_MEM: -+#ifdef SERIAL_IO_MEM32 -+ case SERIAL_IO_MEM32: -+#endif -+ kdb_serial.iobase = (unsigned long)(early_device.port.membase); -+ kdb_serial.ioreg_shift = early_device.port.regshift; -+ break; -+ default: -+ kdb_serial.iobase = early_device.port.iobase; -+ kdb_serial.ioreg_shift = 0; -+ break; -+ } -+ } -+#endif /* CONFIG_KDB */ -+ - return 0; - } - ---- a/drivers/serial/sn_console.c -+++ b/drivers/serial/sn_console.c -@@ -48,6 +48,22 @@ - #include /* for mdelay */ - #include - #include -+#ifdef CONFIG_KDB -+#include -+#include -+#include -+/* -+ * kdb_serial_line records the serial line number of the first serial console. -+ * NOTE: The kernel ignores characters on the serial line unless a user space -+ * program has opened the line first. To enter kdb before user space has opened -+ * the serial line, you can use the 'kdb=early' flag to lilo and set the -+ * appropriate breakpoints. -+ */ -+ -+static int kdb_serial_line = -1; -+static char *kdb_serial_ptr = (char *)kdb_serial_str; -+#endif /* CONFIG_KDB */ -+ - - #include - #include -@@ -485,6 +501,26 @@ sn_receive_chars(struct sn_cons_port *po - "obtaining data from the console (0x%0x)\n", ch); - break; - } -+#ifdef CONFIG_KDB -+ if (kdb_on == 1) { -+ if (ch == *kdb_serial_ptr) { -+ if (!(*++kdb_serial_ptr)) { -+ spin_unlock_irqrestore(&port->sc_port.lock, flags); -+ if (!get_irq_regs()) { -+ KDB_STATE_SET(KEYBOARD); -+ KDB_ENTER(); /* to get some registers */ -+ } else -+ kdb(KDB_REASON_KEYBOARD, 0, get_irq_regs()); -+ kdb_serial_ptr = (char *)kdb_serial_str; -+ spin_lock_irqsave(&port->sc_port.lock, flags); -+ break; -+ } -+ } -+ else -+ kdb_serial_ptr = (char *)kdb_serial_str; -+ } -+#endif /* CONFIG_KDB */ -+ - #ifdef CONFIG_MAGIC_SYSRQ - if (sysrq_requested) { - unsigned long sysrq_timeout = sysrq_requested + HZ*5; -@@ -1008,6 +1044,15 @@ sn_sal_console_write(struct console *co, - */ - static int sn_sal_console_setup(struct console *co, char *options) - { -+#ifdef CONFIG_KDB -+ /* -+ * Remember the line number of the first serial -+ * console. We'll make this the kdb serial console too. -+ */ -+ if (kdb_serial_line == -1) { -+ kdb_serial_line = co->index; -+ } -+#endif /* CONFIG_KDB */ - return 0; - } - -@@ -1083,3 +1128,31 @@ static int __init sn_sal_serial_console_ - } - - console_initcall(sn_sal_serial_console_init); -+ -+#ifdef CONFIG_KDB -+int -+l1_control_in_polled(int offset) -+{ -+ int sal_call_status = 0, input; -+ int ret = 0; -+ if (offset == UART_LSR) { -+ ret = (UART_LSR_THRE | UART_LSR_TEMT); /* can send anytime */ -+ sal_call_status = ia64_sn_console_check(&input); -+ if (!sal_call_status && input) { -+ /* input pending */ -+ ret |= UART_LSR_DR; -+ } -+ } -+ return ret; -+} -+ -+int -+l1_serial_in_polled(void) -+{ -+ int ch; -+ if (!ia64_sn_console_getc(&ch)) -+ return ch; -+ else -+ return 0; -+} -+#endif /* CONFIG_KDB */ ---- a/drivers/usb/core/hcd.c -+++ b/drivers/usb/core/hcd.c -@@ -40,6 +40,9 @@ - #include - #include - #include -+#ifdef CONFIG_KDB_USB -+#include -+#endif - - #include - -@@ -2271,6 +2274,74 @@ usb_hcd_platform_shutdown(struct platfor - } - EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown); - -+#ifdef CONFIG_KDB_USB -+void * -+usb_hcd_get_kdb_poll_func(struct usb_device *udev) -+{ -+ struct usb_hcd *hcd = bus_to_hcd(udev->bus); -+ -+ if (hcd && hcd->driver) -+ return (void *)(hcd->driver->kdb_poll_char); -+ -+ return NULL; -+} -+EXPORT_SYMBOL_GPL (usb_hcd_get_kdb_poll_func); -+ -+void * -+usb_hcd_get_kdb_completion_func(struct usb_device *udev) -+{ -+ struct usb_hcd *hcd = bus_to_hcd(udev->bus); -+ -+ if (hcd && hcd->driver) -+ return (void *)(hcd->driver->kdb_completion); -+ -+ return NULL; -+} -+EXPORT_SYMBOL_GPL (usb_hcd_get_kdb_completion_func); -+ -+int -+usb_hcd_check_uhci(struct usb_device *udev) -+{ -+ struct usb_hcd *hcd = bus_to_hcd(udev->bus); -+ -+ if (hcd && hcd->driver){ -+ if (!(strcmp(hcd->driver->description, "uhci_hcd"))) -+ return 1; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL (usb_hcd_check_uhci); -+ -+kdb_hc_keyboard_attach_t -+usb_hcd_get_hc_keyboard_attach(struct usb_device *udev) -+{ -+ struct usb_hcd *hcd = bus_to_hcd(udev->bus); -+ -+ if (hcd && hcd->driver){ -+ return hcd->driver->kdb_hc_keyboard_attach; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL (usb_hcd_get_hc_keyboard_attach); -+ -+kdb_hc_keyboard_detach_t -+usb_hcd_get_hc_keyboard_detach(struct usb_device *udev) -+{ -+ struct usb_hcd *hcd = bus_to_hcd(udev->bus); -+ -+ if (hcd && hcd->driver){ -+ return hcd->driver->kdb_hc_keyboard_detach; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL (usb_hcd_get_hc_keyboard_detach); -+ -+ -+#endif /* CONFIG_KDB_USB */ -+ - /*-------------------------------------------------------------------------*/ - - #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) ---- a/drivers/usb/core/hcd.h -+++ b/drivers/usb/core/hcd.h -@@ -22,6 +22,9 @@ - #ifdef __KERNEL__ - - #include -+#ifdef CONFIG_KDB_USB -+#include -+#endif - - #define MAX_TOPO_LEVEL 6 - -@@ -287,6 +290,14 @@ struct hc_driver { - int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, - struct usb_tt *tt, gfp_t mem_flags); - int (*reset_device)(struct usb_hcd *, struct usb_device *); -+ -+#ifdef CONFIG_KDB_USB -+ /* KDB poll function for this HC */ -+ int (*kdb_poll_char)(struct urb *urb); -+ void (*kdb_completion)(struct urb *urb); -+ kdb_hc_keyboard_attach_t kdb_hc_keyboard_attach; -+ kdb_hc_keyboard_detach_t kdb_hc_keyboard_detach; -+#endif /* CONFIG_KDB_USB */ - }; - - extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); ---- a/drivers/usb/host/ehci-hcd.c -+++ b/drivers/usb/host/ehci-hcd.c -@@ -1092,6 +1092,48 @@ static int ehci_get_frame (struct usb_hc - ehci->periodic_size; - } - -+#ifdef CONFIG_KDB_USB -+ -+int -+ehci_kdb_poll_char(struct urb *urb) -+{ -+ struct ehci_hcd *ehci; -+ -+ /* just to make sure */ -+ if (!urb || !urb->dev || !urb->dev->bus) -+ return -1; -+ -+ ehci = (struct ehci_hcd *) hcd_to_ehci(bus_to_hcd(urb->dev->bus)); -+ -+ /* make sure */ -+ if (!ehci) -+ return -1; -+ -+ if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) -+ return -1; -+ -+ /* -+ * If ehci->lock is held coming into this routine, it could -+ * mean KDB was entered while the HC driver was in the midst -+ * of processing URBs. Therefore it could be dangerous to -+ * processes URBs from this poll routine. And, we can't wait on -+ * the lock since we are in KDB and kernel threads (including the -+ * one holding the lock) are suspended. -+ * So, we punt and return an error. Keyboards attached to this -+ * HC will not be useable from KDB at this time. -+ */ -+ if (spin_is_locked(&ehci->lock)) -+ return -EBUSY; -+ -+ /* processes the URB */ -+ if (qh_completions_kdb(ehci, urb->hcpriv, urb)) -+ return 0; -+ -+ return -1; -+} -+ -+#endif /* CONFIG_KDB_USB */ -+ - /*-------------------------------------------------------------------------*/ - - MODULE_DESCRIPTION(DRIVER_DESC); ---- a/drivers/usb/host/ehci-pci.c -+++ b/drivers/usb/host/ehci-pci.c -@@ -22,6 +22,10 @@ - #error "This file is PCI bus glue. CONFIG_PCI must be defined." - #endif - -+#ifdef CONFIG_KDB_USB -+#include -+#endif -+ - /*-------------------------------------------------------------------------*/ - - /* called after powerup, by probe or system-pm "wakeup" */ -@@ -412,6 +416,10 @@ static const struct hc_driver ehci_pci_h - .port_handed_over = ehci_port_handed_over, - - .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, -+ -+#ifdef CONFIG_KDB_USB -+ .kdb_poll_char = ehci_kdb_poll_char, -+#endif - }; - - /*-------------------------------------------------------------------------*/ ---- a/drivers/usb/host/ehci-q.c -+++ b/drivers/usb/host/ehci-q.c -@@ -577,6 +577,228 @@ halt: - return count; - } - -+#ifdef CONFIG_KDB_USB -+/* -+ * This routine is basically a copy of qh_completions() for use by KDB. -+ * It is modified to only work on qtds which are associated -+ * with 'kdburb'. Also, there are some fixups related to locking. -+ */ -+unsigned -+qh_completions_kdb(struct ehci_hcd *ehci, struct ehci_qh *qh, struct urb *kdburb) -+{ -+ struct ehci_qtd *last = NULL, *end = qh->dummy; -+ struct list_head *entry, *tmp; -+ int last_status = -EINPROGRESS; -+ int stopped; -+ unsigned count = 0; -+ int do_status = 0; -+ u8 state; -+ u32 halt = HALT_BIT(ehci); -+ -+ /* verify params are valid */ -+ if (!qh || !kdburb) -+ return 0; -+ -+ if (unlikely (list_empty (&qh->qtd_list))) -+ return count; -+ -+ /* completions (or tasks on other cpus) must never clobber HALT -+ * till we've gone through and cleaned everything up, even when -+ * they add urbs to this qh's queue or mark them for unlinking. -+ * -+ * NOTE: unlinking expects to be done in queue order. -+ */ -+ state = qh->qh_state; -+ qh->qh_state = QH_STATE_COMPLETING; -+ stopped = (state == QH_STATE_IDLE); -+ -+ /* remove de-activated QTDs from front of queue. -+ * after faults (including short reads), cleanup this urb -+ * then let the queue advance. -+ * if queue is stopped, handles unlinks. -+ */ -+ list_for_each_safe (entry, tmp, &qh->qtd_list) { -+ struct ehci_qtd *qtd; -+ struct urb *urb; -+ u32 token = 0; -+ int qtd_status; -+ -+ qtd = list_entry (entry, struct ehci_qtd, qtd_list); -+ urb = qtd->urb; -+ -+ if (urb != kdburb) -+ continue; -+ -+ /* clean up any state from previous QTD ...*/ -+ if (last) { -+ if (likely (last->urb != urb)) { -+ /* -+ * Lock hackery here... -+ * ehci_urb_done() makes the assumption -+ * that it's called with ehci->lock held. -+ * So, lock it if it isn't already. -+ */ -+ if (!spin_is_locked(&ehci->lock)) -+ spin_lock(&ehci->lock); -+ -+ ehci_urb_done(ehci, last->urb, last_status); -+ -+ /* -+ * ehci_urb_done() releases and reacquires -+ * ehci->lock, so release it here. -+ */ -+ if (spin_is_locked(&ehci->lock)) -+ spin_unlock (&ehci->lock); -+ -+ count++; -+ } -+ ehci_qtd_free (ehci, last); -+ last = NULL; -+ last_status = -EINPROGRESS; -+ } -+ -+ /* ignore urbs submitted during completions we reported */ -+ if (qtd == end) -+ break; -+ -+ /* hardware copies qtd out of qh overlay */ -+ rmb (); -+ token = hc32_to_cpu(ehci, qtd->hw_token); -+ -+ /* always clean up qtds the hc de-activated */ -+ if ((token & QTD_STS_ACTIVE) == 0) { -+ -+ if ((token & QTD_STS_HALT) != 0) { -+ stopped = 1; -+ -+ /* magic dummy for some short reads; qh won't advance. -+ * that silicon quirk can kick in with this dummy too. -+ */ -+ } else if (IS_SHORT_READ (token) -+ && !(qtd->hw_alt_next -+ & EHCI_LIST_END(ehci))) { -+ stopped = 1; -+ goto halt; -+ } -+ -+ /* stop scanning when we reach qtds the hc is using */ -+ } else if (likely (!stopped -+ && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { -+ break; -+ -+ } else { -+ stopped = 1; -+ -+ if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) -+ last_status = -ESHUTDOWN; -+ -+ /* ignore active urbs unless some previous qtd -+ * for the urb faulted (including short read) or -+ * its urb was canceled. we may patch qh or qtds. -+ */ -+ if (likely(last_status == -EINPROGRESS && -+ !urb->unlinked)) -+ continue; -+ -+ /* issue status after short control reads */ -+ if (unlikely (do_status != 0) -+ && QTD_PID (token) == 0 /* OUT */) { -+ do_status = 0; -+ continue; -+ } -+ -+ /* token in overlay may be most current */ -+ if (state == QH_STATE_IDLE -+ && cpu_to_hc32(ehci, qtd->qtd_dma) -+ == qh->hw_current) -+ token = hc32_to_cpu(ehci, qh->hw_token); -+ -+ /* force halt for unlinked or blocked qh, so we'll -+ * patch the qh later and so that completions can't -+ * activate it while we "know" it's stopped. -+ */ -+ if ((halt & qh->hw_token) == 0) { -+halt: -+ qh->hw_token |= halt; -+ wmb (); -+ } -+ } -+ -+ /* remove it from the queue */ -+ qtd_status = qtd_copy_status(ehci, urb, qtd->length, token); -+ if (unlikely(qtd_status == -EREMOTEIO)) { -+ do_status = (!urb->unlinked && -+ usb_pipecontrol(urb->pipe)); -+ qtd_status = 0; -+ } -+ if (likely(last_status == -EINPROGRESS)) -+ last_status = qtd_status; -+ -+ if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { -+ last = list_entry (qtd->qtd_list.prev, -+ struct ehci_qtd, qtd_list); -+ last->hw_next = qtd->hw_next; -+ } -+ list_del (&qtd->qtd_list); -+ last = qtd; -+ } -+ -+ /* last urb's completion might still need calling */ -+ if (likely (last != NULL)) { -+ /* -+ * Lock hackery here... -+ * ehci_urb_done() makes the assumption -+ * that it's called with ehci->lock held. -+ * So, lock it if it isn't already. -+ */ -+ if (!spin_is_locked(&ehci->lock)) -+ spin_lock(&ehci->lock); -+ -+ ehci_urb_done(ehci, last->urb, last_status); -+ -+ /* -+ * ehci_urb_done() releases and reacquires -+ * ehci->lock, so release it here. -+ */ -+ if (spin_is_locked(&ehci->lock)) -+ spin_unlock (&ehci->lock); -+ -+ count++; -+ ehci_qtd_free (ehci, last); -+ } -+ -+ /* restore original state; caller must unlink or relink */ -+ qh->qh_state = state; -+ -+ /* be sure the hardware's done with the qh before refreshing -+ * it after fault cleanup, or recovering from silicon wrongly -+ * overlaying the dummy qtd (which reduces DMA chatter). -+ */ -+ if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) { -+ switch (state) { -+ case QH_STATE_IDLE: -+ qh_refresh(ehci, qh); -+ break; -+ case QH_STATE_LINKED: -+ /* should be rare for periodic transfers, -+ * except maybe high bandwidth ... -+ */ -+ if ((cpu_to_hc32(ehci, QH_SMASK) -+ & qh->hw_info2) != 0) { -+ intr_deschedule (ehci, qh); -+ (void) qh_schedule (ehci, qh); -+ } else -+ unlink_async (ehci, qh); -+ break; -+ /* otherwise, unlink already started */ -+ } -+ } -+ -+ return count; -+} -+ -+#endif /* CONFIG_KDB_USB */ -+ - /*-------------------------------------------------------------------------*/ - - // high bandwidth multiplier, as encoded in highspeed endpoint descriptors ---- a/drivers/usb/host/ohci-hcd.c -+++ b/drivers/usb/host/ohci-hcd.c -@@ -987,6 +987,73 @@ static int ohci_restart (struct ohci_hcd - - /*-------------------------------------------------------------------------*/ - -+#ifdef CONFIG_KDB_USB -+ -+int -+ohci_kdb_poll_char(struct urb *urb) -+{ -+ struct ohci_hcd *ohci; -+ struct ohci_regs * regs; -+ -+ /* just to make sure */ -+ if (!urb || !urb->dev || !urb->dev->bus) -+ return -1; -+ -+ ohci = (struct ohci_hcd *) hcd_to_ohci(bus_to_hcd(urb->dev->bus)); -+ -+ /* make sure */ -+ if (!ohci || !ohci->hcca) -+ return -1; -+ -+ if (!HC_IS_RUNNING (ohci_to_hcd(ohci)->state)) -+ return -1; -+ -+ /* -+ * If ohci->lock is held coming into this routine, it could -+ * mean KDB was entered while the HC driver was in the midst -+ * of processing URBs. Therefore it could be dangerous to -+ * processes URBs from this poll routine. And, we can't wait on -+ * the lock since we are in KDB and kernel threads (including the -+ * one holding the lock) are suspended. -+ * So, we punt and return an error. Keyboards attached to this -+ * HC will not be useable from KDB at this time. -+ */ -+ if (spin_is_locked(&ohci->lock)) -+ return -EBUSY; -+ -+ regs = ohci->regs; -+ -+ /* if the urb is not currently in progress resubmit it */ -+ if (urb->status != -EINPROGRESS) { -+ -+ if (usb_submit_urb (urb, GFP_ATOMIC)) -+ return -1; -+ -+ /* make sure the HC registers are set correctly */ -+ ohci_writel (ohci, OHCI_INTR_WDH, ®s->intrenable); -+ ohci_writel (ohci, OHCI_INTR_WDH, ®s->intrstatus); -+ ohci_writel (ohci, OHCI_INTR_MIE, ®s->intrenable); -+ -+ // flush those pci writes -+ (void) ohci_readl (ohci, &ohci->regs->control); -+ } -+ -+ if (ohci->hcca->done_head) { -+ dl_done_list_kdb (ohci, urb); -+ ohci_writel (ohci, OHCI_INTR_WDH, ®s->intrstatus); -+ // flush the pci write -+ (void) ohci_readl (ohci, &ohci->regs->control); -+ -+ return 0; -+ } -+ -+ return -1; -+} -+ -+#endif /* CONFIG_KDB_USB */ -+ -+/*-------------------------------------------------------------------------*/ -+ - MODULE_AUTHOR (DRIVER_AUTHOR); - MODULE_DESCRIPTION(DRIVER_DESC); - MODULE_LICENSE ("GPL"); ---- a/drivers/usb/host/ohci-pci.c -+++ b/drivers/usb/host/ohci-pci.c -@@ -21,6 +21,10 @@ - #include - #include - -+#ifdef CONFIG_KDB_USB -+#include -+#endif -+ - - /* constants used to work around PM-related transfer - * glitches in some AMD 700 series southbridges -@@ -387,6 +391,7 @@ static int __devinit ohci_pci_start (str - ohci_err (ohci, "can't start\n"); - ohci_stop (hcd); - } -+ - return ret; - } - -@@ -485,6 +490,9 @@ static const struct hc_driver ohci_pci_h - .bus_resume = ohci_bus_resume, - #endif - .start_port_reset = ohci_start_port_reset, -+#ifdef CONFIG_KDB_USB -+ .kdb_poll_char = ohci_kdb_poll_char, -+#endif - }; - - /*-------------------------------------------------------------------------*/ ---- a/drivers/usb/host/ohci-q.c -+++ b/drivers/usb/host/ohci-q.c -@@ -1134,3 +1134,65 @@ dl_done_list (struct ohci_hcd *ohci) - td = td_next; - } - } -+ -+ -+/*-------------------------------------------------------------------------*/ -+ -+#ifdef CONFIG_KDB_USB -+static void -+dl_done_list_kdb (struct ohci_hcd *ohci, struct urb *kdburb) -+{ -+ struct td *td = dl_reverse_done_list (ohci); -+ -+ while (td) { -+ struct td *td_next = td->next_dl_td; -+ struct urb *urb = td->urb; -+ urb_priv_t *urb_priv = urb->hcpriv; -+ struct ed *ed = td->ed; -+ -+ if (urb != kdburb) { -+ td = td_next; -+ continue; -+ } -+ -+ /* update URB's length and status from TD */ -+ td_done (ohci, urb, td); -+ urb_priv->td_cnt++; -+ -+ /* If all this urb's TDs are done, just resubmit it */ -+ if (urb_priv->td_cnt == urb_priv->length) { -+ urb->actual_length = 0; -+ urb->status = -EINPROGRESS; -+ td_submit_urb (ohci, urb); -+ } -+ -+ /* clean schedule: unlink EDs that are no longer busy */ -+ if (list_empty (&ed->td_list)) { -+ if (ed->state == ED_OPER) -+ start_ed_unlink (ohci, ed); -+ -+ /* ... reenabling halted EDs only after fault cleanup */ -+ } else if ((ed->hwINFO & cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE)) -+ == cpu_to_hc32 (ohci, ED_SKIP)) { -+ td = list_entry (ed->td_list.next, struct td, td_list); -+ if (!(td->hwINFO & cpu_to_hc32 (ohci, TD_DONE))) { -+ ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP); -+ /* ... hc may need waking-up */ -+ switch (ed->type) { -+ case PIPE_CONTROL: -+ ohci_writel (ohci, OHCI_CLF, -+ &ohci->regs->cmdstatus); -+ break; -+ case PIPE_BULK: -+ ohci_writel (ohci, OHCI_BLF, -+ &ohci->regs->cmdstatus); -+ break; -+ } -+ } -+ } -+ -+ td = td_next; -+ } -+} -+ -+#endif /* CONFIG_KDB_USB */ ---- a/drivers/usb/host/uhci-hcd.c -+++ b/drivers/usb/host/uhci-hcd.c -@@ -50,6 +50,11 @@ - #include "uhci-hcd.h" - #include "pci-quirks.h" - -+#ifdef CONFIG_KDB_USB -+#include -+#include -+#endif -+ - /* - * Version Information - */ -@@ -461,6 +466,213 @@ static irqreturn_t uhci_irq(struct usb_h - return IRQ_HANDLED; - } - -+#ifdef CONFIG_KDB_USB -+/* Unlink KDB QH from hardware and software scheduler */ -+static void kdb_unlink_uhci_qh(struct urb *urb, struct uhci_qh *qh) -+{ -+ unsigned long flags; -+ struct uhci_hcd *uhci; -+ -+ uhci = (struct uhci_hcd *) hcd_to_uhci(bus_to_hcd(urb->dev->bus)); -+ -+ spin_lock_irqsave(&uhci->lock, flags); -+ unlink_interrupt(NULL, qh); -+ list_del(&(qh->node)); -+ spin_unlock_irqrestore(&uhci->lock, flags); -+ -+} -+ -+static int uhci_kdb_poll_char(struct urb *urb) -+{ -+ if (!urb) /* can happen if no keyboard attached */ -+ return -1; -+ -+ return uhci_check_kdb_uhci_qh(kdb_uhci_keyboard_get_qh(urb)); -+} -+ -+/* Only 1 UHCI Keyboard supported */ -+static inline void kdb_usb_fill_int_urb (struct urb *urb, -+ struct usb_device *dev, -+ unsigned int pipe, -+ void *transfer_buffer, -+ int buffer_length, -+ usb_complete_t complete_fn, -+ void *context, -+ int interval) -+{ -+ urb->dev = dev; -+ urb->pipe = pipe; -+ urb->transfer_buffer = transfer_buffer; -+ urb->transfer_buffer_length = buffer_length; -+ urb->complete = complete_fn; -+ urb->context = context; -+ urb->interval = interval; -+ urb->start_frame = -1; -+} -+ -+static int kdb_uhci_keyboard_attach(int i, unsigned int usbhid_bufsize) -+{ -+ struct urb *kdb_urb; -+ unsigned char *kdb_buffer; -+ dma_addr_t uhci_inbuf_dma; -+ struct urb *hid_inurb = kdb_usb_kbds[i].urb; -+ int ret = -1; -+ -+ kdb_usb_kbds[i].hid_urb = hid_inurb; -+ -+ kdb_urb = NULL; -+ kdb_buffer = NULL; -+ if (!(kdb_buffer = usb_buffer_alloc(hid_inurb->dev, -+ usbhid_bufsize, GFP_ATOMIC, -+ &uhci_inbuf_dma))) -+ goto out; -+ -+ if (!(kdb_urb = usb_alloc_urb(0, GFP_KERNEL))) -+ goto out; -+ -+ kdb_usb_fill_int_urb(kdb_urb, -+ hid_inurb->dev, -+ hid_inurb->pipe, -+ kdb_buffer, -+ hid_inurb->transfer_buffer_length, -+ hid_inurb->complete, -+ hid_inurb->context, -+ hid_inurb->interval -+ ); -+ -+ (kdb_urb)->transfer_dma = uhci_inbuf_dma; -+ (kdb_urb)->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; -+ -+ kdb_usb_kbds[i].urb = kdb_urb; -+ kdb_usb_kbds[i].buffer = kdb_buffer; -+ -+ if (usb_submit_urb(kdb_urb, GFP_ATOMIC)){ -+ kdb_usb_keyboard_detach(hid_inurb); -+ goto out; -+ } -+ /* Remove KDB special URB from endpoin queue to -+ * prevent hang during hid_disconnect(). -+ */ -+ list_del(&(kdb_urb->urb_list)); -+ -+ ret = 0; -+ return ret; -+out: -+ /* Some Error Cleanup */ -+ ret = -1; -+ printk("KDB: Error, UHCI Keyboard HID won't work!\n"); -+ -+ if (kdb_buffer) -+ usb_buffer_free(hid_inurb->dev, -+ usbhid_bufsize, kdb_buffer, -+ uhci_inbuf_dma); -+ -+ if (kdb_urb) -+ usb_free_urb(kdb_urb); -+ -+ return ret; -+} -+ -+static int kdb_uhci_keyboard_detach(struct urb *urb, int i) -+{ -+ int ret; -+ -+ if (kdb_usb_kbds[i].qh && (kdb_usb_kbds[i].hid_urb == urb)) { -+ /* UHCI keyboard */ -+ kdb_unlink_uhci_qh(kdb_usb_kbds[i].urb, kdb_usb_kbds[i].qh); -+ ret = 0; -+ } -+ ret = -1; -+ -+ return ret; -+} -+ -+/* Check if URB is managed by KDB code */ -+static int kdb_uhci_keyboard_urb(struct urb *urb) -+{ -+ int i; -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ if (kdb_usb_kbds[i].urb && kdb_usb_kbds[i].urb == urb) -+ return i; -+ } -+ return -1; -+} -+ -+/* Check if UHCI QH is managed by KDB code */ -+static int kdb_uhci_keyboard_check_uhci_qh(struct uhci_qh *qh) -+{ -+ int i; -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ if (kdb_usb_kbds[i].urb && kdb_usb_kbds[i].qh == qh) -+ return i; -+ } -+ return -1; -+} -+ -+/* Set UHCI QH using URB pointer */ -+static int kdb_uhci_keyboard_set_qh(struct urb *urb, struct uhci_qh *qh) -+{ -+ int i; -+ -+ i = kdb_uhci_keyboard_urb(urb); -+ if (i != -1) -+ kdb_usb_kbds[i].qh = qh; -+ -+ return 0; -+} -+ -+/* Get UHCI QH using URB pointer */ -+static struct uhci_qh *kdb_uhci_keyboard_get_qh(struct urb *urb) -+{ -+ int i; -+ -+ i = kdb_uhci_keyboard_urb(urb); -+ if (i != -1) -+ return kdb_usb_kbds[i].qh; -+ -+ return NULL; -+} -+ -+/* Set UHCI hid_event using URB pointer */ -+static int kdb_uhci_keyboard_set_hid_event(struct urb *urb, int hid_event) -+{ -+ int i; -+ -+ i = kdb_uhci_keyboard_urb(urb); -+ if (i != -1) -+ kdb_usb_kbds[i].kdb_hid_event = hid_event; -+ -+ return 0; -+} -+/* Get UHCI hid_event using URB pointer */ -+static int kdb_uhci_keyboard_get_hid_event(struct urb *urb) -+{ -+ int i; -+ -+ i = kdb_uhci_keyboard_urb(urb); -+ if (i != -1) -+ return kdb_usb_kbds[i].kdb_hid_event; -+ -+ return 0; -+} -+ -+/* Set UHCI hid_event using UHCI QH pointer */ -+static int kdb_uhci_keyboard_set_hid_event_qh(struct uhci_qh *qh, int hid_event) -+{ -+ int i; -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ if (kdb_usb_kbds[i].urb && kdb_usb_kbds[i].qh == qh){ -+ kdb_usb_kbds[i].kdb_hid_event = hid_event; -+ return i; -+ } -+ } -+ return -1; -+} -+#endif -+ - /* - * Store the current frame number in uhci->frame_number if the controller - * is runnning. Expand from 11 bits (of which we use only 10) to a -@@ -935,6 +1147,12 @@ static const struct hc_driver uhci_drive - - .hub_status_data = uhci_hub_status_data, - .hub_control = uhci_hub_control, -+#ifdef CONFIG_KDB_USB -+ .kdb_poll_char = uhci_kdb_poll_char, -+ .kdb_completion = kdb_uhci_urb_complete, -+ .kdb_hc_keyboard_attach = kdb_uhci_keyboard_attach, -+ .kdb_hc_keyboard_detach = kdb_uhci_keyboard_detach, -+#endif - }; - - static const struct pci_device_id uhci_pci_ids[] = { { ---- a/drivers/usb/host/uhci-q.c -+++ b/drivers/usb/host/uhci-q.c -@@ -25,6 +25,17 @@ - * games with the FSBR code to make sure we get the correct order in all - * the cases. I don't think it's worth the effort - */ -+#ifdef CONFIG_KDB_USB -+/* KDB HID QH, managed by KDB code */ -+static int kdb_uhci_keyboard_check_uhci_qh(struct uhci_qh *qh); -+static int kdb_uhci_keyboard_set_qh(struct urb *urb, struct uhci_qh *qh); -+static struct uhci_qh *kdb_uhci_keyboard_get_qh(struct urb *urb); -+static int kdb_uhci_keyboard_set_hid_event(struct urb *urb, int hid_event); -+static int kdb_uhci_keyboard_get_hid_event(struct urb *urb); -+static int kdb_uhci_keyboard_set_hid_event_qh(struct uhci_qh *qh, int hid_event); -+static int kdb_uhci_keyboard_urb(struct urb *urb); -+#endif -+ - static void uhci_set_next_interrupt(struct uhci_hcd *uhci) - { - if (uhci->is_stopped) -@@ -288,6 +299,58 @@ static struct uhci_qh *uhci_alloc_qh(str - return qh; - } - -+#ifdef CONFIG_KDB_USB -+/* -+ * Same as uhci_alloc_qh execpt it doesn't change to hep->hcpriv -+ */ -+static struct uhci_qh *kdb_uhci_alloc_qh(struct uhci_hcd *uhci, -+ struct usb_device *udev, struct usb_host_endpoint *hep) -+{ -+ dma_addr_t dma_handle; -+ struct uhci_qh *qh; -+ -+ qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); -+ if (!qh) -+ return NULL; -+ -+ memset(qh, 0, sizeof(*qh)); -+ qh->dma_handle = dma_handle; -+ -+ qh->element = UHCI_PTR_TERM; -+ qh->link = UHCI_PTR_TERM; -+ -+ INIT_LIST_HEAD(&qh->queue); -+ INIT_LIST_HEAD(&qh->node); -+ -+ if (udev) { /* Normal QH */ -+ qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; -+ if (qh->type != USB_ENDPOINT_XFER_ISOC) { -+ qh->dummy_td = uhci_alloc_td(uhci); -+ if (!qh->dummy_td) { -+ dma_pool_free(uhci->qh_pool, qh, dma_handle); -+ return NULL; -+ } -+ } -+ qh->state = QH_STATE_IDLE; -+ qh->hep = hep; -+ qh->udev = udev; -+ -+ if (qh->type == USB_ENDPOINT_XFER_INT || -+ qh->type == USB_ENDPOINT_XFER_ISOC) -+ qh->load = usb_calc_bus_time(udev->speed, -+ usb_endpoint_dir_in(&hep->desc), -+ qh->type == USB_ENDPOINT_XFER_ISOC, -+ le16_to_cpu(hep->desc.wMaxPacketSize)) -+ / 1000 + 1; -+ -+ } else { /* Skeleton QH */ -+ qh->state = QH_STATE_ACTIVE; -+ qh->type = -1; -+ } -+ return qh; -+} -+#endif -+ - static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) - { - WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); -@@ -1394,6 +1457,21 @@ static int uhci_urb_enqueue(struct usb_h - if (!urbp) - goto done; - -+#ifdef CONFIG_KDB_USB -+ /* Always allocate new QH for KDB URB. -+ * KDB HQ will be managed by KDB poll code not by -+ * UHCI HCD Driver. -+ */ -+ if (kdb_uhci_keyboard_urb(urb) != -1){ -+ /* KDB urb will be enqued only once */ -+ kdb_uhci_keyboard_set_qh(urb, NULL); -+ qh = kdb_uhci_alloc_qh(uhci, urb->dev, urb->ep); -+ if (!qh) -+ goto err_no_qh; -+ kdb_uhci_keyboard_set_qh(urb, qh); -+ } else -+#endif -+ - if (urb->ep->hcpriv) - qh = urb->ep->hcpriv; - else { -@@ -1641,6 +1719,14 @@ static int uhci_advance_check(struct uhc - int ret = 1; - unsigned status; - -+#ifdef CONFIG_KDB_USB -+ /* Don't manage KDB QH */ -+ if(kdb_uhci_keyboard_check_uhci_qh(qh) != -1){ -+ ret = 0; -+ goto done; -+ } -+#endif -+ - if (qh->type == USB_ENDPOINT_XFER_ISOC) - goto done; - -@@ -1733,6 +1819,11 @@ rescan: - uhci->next_qh = list_entry(qh->node.next, - struct uhci_qh, node); - -+#ifdef CONFIG_KDB_USB -+ /* Don't manage KDB QH */ -+ if(kdb_uhci_keyboard_check_uhci_qh(qh) != -1) -+ continue; -+#endif - if (uhci_advance_check(uhci, qh)) { - uhci_scan_qh(uhci, qh); - if (qh->state == QH_STATE_ACTIVE) { -@@ -1759,3 +1850,76 @@ rescan: - else - uhci_set_next_interrupt(uhci); - } -+ -+#ifdef CONFIG_KDB_USB -+/* -+ * Activate KDB UHCI QH, called by KDB poll code. -+ */ -+static void kdb_activate_uhci_qh(struct uhci_qh *qh) -+{ -+ struct urb_priv *urbp; -+ struct uhci_td *td; -+ __le32 status, token; -+ -+ urbp = list_entry(qh->queue.next, struct urb_priv, node); -+ -+ list_for_each_entry(td, &urbp->td_list, list){ -+ status = td->status; -+ token = td->token; -+ barrier(); -+ /* Clear Status and ActLen */ -+ status &= cpu_to_le32(0xff000000); -+ /* Make TD Active */ -+ status |= cpu_to_le32(TD_CTRL_ACTIVE); -+ /* Clear TD Interrupt */ -+ status &= cpu_to_le32(~TD_CTRL_IOC); -+ /* Toggle Data Sycronization Bit */ -+ if (token & cpu_to_le32(TD_TOKEN_TOGGLE)) -+ token &= cpu_to_le32(~TD_TOKEN_TOGGLE); -+ else -+ token |= cpu_to_le32(TD_TOKEN_TOGGLE); -+ -+ td->token = token; -+ td->status = status; -+ barrier(); -+ } -+ /* Activate KDB UHCI Keyboard HID QH */ -+ td = list_entry(urbp->td_list.next, struct uhci_td, list); -+ qh->element = LINK_TO_TD(td); -+ barrier(); -+} -+ -+/* -+ * Called when KDB finishes process key press/release event. -+ */ -+static void -+kdb_uhci_urb_complete (struct urb *urb) -+{ -+ if (!kdb_uhci_keyboard_get_hid_event(urb)) -+ return; -+ -+ /* Activate KDB TD */ -+ kdb_activate_uhci_qh(kdb_uhci_keyboard_get_qh(urb)); -+ kdb_uhci_keyboard_set_hid_event(urb, 0); -+} -+ -+/* -+ * Check if state of KDB URB changed (key was pressed/released). -+ */ -+static int uhci_check_kdb_uhci_qh(struct uhci_qh *qh) -+{ -+ struct urb_priv *urbp = NULL; -+ struct uhci_td *td; -+ unsigned status; -+ -+ urbp = list_entry(qh->queue.next, struct urb_priv, node); -+ td = list_entry(urbp->td_list.next, struct uhci_td, list); -+ status = td_status(td); -+ if (!(status & TD_CTRL_ACTIVE)){ -+ /* We're okay, the queue has advanced */ -+ kdb_uhci_keyboard_set_hid_event_qh(qh, 1); -+ return 0; -+ } -+ return -1; -+} -+#endif ---- a/fs/proc/meminfo.c -+++ b/fs/proc/meminfo.c -@@ -161,6 +161,151 @@ static int meminfo_proc_show(struct seq_ - #undef K - } - -+#ifdef CONFIG_KDB -+#include -+#include -+/* Like meminfo_proc_show() but without the locks and using kdb_printf() */ -+void -+kdb_meminfo_proc_show(void) -+{ -+ struct sysinfo i; -+ unsigned long committed; -+ unsigned long allowed; -+ struct vmalloc_info vmi; -+ long cached; -+ unsigned long pages[NR_LRU_LISTS]; -+ int lru; -+ -+/* -+ * display in kilobytes. -+ */ -+#define K(x) ((x) << (PAGE_SHIFT - 10)) -+ si_meminfo(&i); -+ kdb_si_swapinfo(&i); -+ committed = percpu_counter_read_positive(&vm_committed_as); -+ allowed = ((totalram_pages - hugetlb_total_pages()) -+ * sysctl_overcommit_ratio / 100) + total_swap_pages; -+ -+ cached = global_page_state(NR_FILE_PAGES) - -+ total_swapcache_pages - i.bufferram; -+ if (cached < 0) -+ cached = 0; -+ -+ get_vmalloc_info(&vmi); -+ -+ for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) -+ pages[lru] = global_page_state(NR_LRU_BASE + lru); -+ -+ kdb_printf( -+ "MemTotal: %8lu kB\n" -+ "MemFree: %8lu kB\n" -+ "Buffers: %8lu kB\n", -+ K(i.totalram), -+ K(i.freeram), -+ K(i.bufferram) -+ ); -+ kdb_printf( -+ "Cached: %8lu kB\n" -+ "SwapCached: %8lu kB\n" -+ "Active: %8lu kB\n" -+ "Inactive: %8lu kB\n", -+ K(cached), -+ K(total_swapcache_pages), -+ K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]), -+ K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]) -+ ); -+ kdb_printf( -+ "Active(anon): %8lu kB\n" -+ "Inactive(anon): %8lu kB\n" -+ "Active(file): %8lu kB\n" -+ "Inactive(file): %8lu kB\n", -+ K(pages[LRU_ACTIVE_ANON]), -+ K(pages[LRU_INACTIVE_ANON]), -+ K(pages[LRU_ACTIVE_FILE]), -+ K(pages[LRU_INACTIVE_FILE]) -+ ); -+#ifdef CONFIG_UNEVICTABLE_LRU -+ kdb_printf( -+ "Unevictable: %8lu kB\n" -+ "Mlocked: %8lu kB\n", -+ K(pages[LRU_UNEVICTABLE]), -+ K(global_page_state(NR_MLOCK)) -+ ); -+#endif -+#ifdef CONFIG_HIGHMEM -+ kdb_printf( -+ "HighTotal: %8lu kB\n" -+ "HighFree: %8lu kB\n" -+ "LowTotal: %8lu kB\n" -+ "LowFree: %8lu kB\n", -+ K(i.totalhigh), -+ K(i.freehigh), -+ K(i.totalram-i.totalhigh), -+ K(i.freeram-i.freehigh) -+ ); -+#endif -+ kdb_printf( -+ "SwapTotal: %8lu kB\n" -+ "SwapFree: %8lu kB\n" -+ "Dirty: %8lu kB\n", -+ K(i.totalswap), -+ K(i.freeswap), -+ K(global_page_state(NR_FILE_DIRTY)) -+ ); -+ kdb_printf( -+ "Writeback: %8lu kB\n" -+ "AnonPages: %8lu kB\n" -+ "Mapped: %8lu kB\n", -+ K(global_page_state(NR_WRITEBACK)), -+ K(global_page_state(NR_ANON_PAGES)), -+ K(global_page_state(NR_FILE_MAPPED)) -+ ); -+ kdb_printf( -+ "Slab: %8lu kB\n" -+ "SReclaimable: %8lu kB\n" -+ "SUnreclaim: %8lu kB\n", -+ K(global_page_state(NR_SLAB_RECLAIMABLE) + -+ global_page_state(NR_SLAB_UNRECLAIMABLE)), -+ K(global_page_state(NR_SLAB_RECLAIMABLE)), -+ K(global_page_state(NR_SLAB_UNRECLAIMABLE)) -+ ); -+ kdb_printf( -+ "PageTables: %8lu kB\n" -+#ifdef CONFIG_QUICKLIST -+ "Quicklists: %8lu kB\n" -+#endif -+ "NFS_Unstable: %8lu kB\n" -+ "Bounce: %8lu kB\n", -+ K(global_page_state(NR_PAGETABLE)), -+#ifdef CONFIG_QUICKLIST -+ K(quicklist_total_size()), -+#endif -+ K(global_page_state(NR_UNSTABLE_NFS)), -+ K(global_page_state(NR_BOUNCE)) -+ ); -+ kdb_printf( -+ "WritebackTmp: %8lu kB\n" -+ "CommitLimit: %8lu kB\n" -+ "Committed_AS: %8lu kB\n", -+ K(global_page_state(NR_WRITEBACK_TEMP)), -+ K(allowed), -+ K(committed) -+ ); -+ kdb_printf( -+ "VmallocTotal: %8lu kB\n" -+ "VmallocUsed: %8lu kB\n" -+ "VmallocChunk: %8lu kB\n", -+ (unsigned long)VMALLOC_TOTAL >> 10, -+ vmi.used >> 10, -+ vmi.largest_chunk >> 10 -+ ); -+ -+#ifdef CONFIG_HUGETLBFS -+ kdb_hugetlb_report_meminfo(); -+#endif -+} -+#endif /* CONFIG_KDB */ -+ - static int meminfo_proc_open(struct inode *inode, struct file *file) - { - return single_open(file, meminfo_proc_show, NULL); ---- a/fs/proc/mmu.c -+++ b/fs/proc/mmu.c -@@ -14,11 +14,21 @@ - #include - #include "internal.h" - -+#ifdef CONFIG_KDB -+#include -+#endif -+ - void get_vmalloc_info(struct vmalloc_info *vmi) - { - struct vm_struct *vma; - unsigned long free_area_size; - unsigned long prev_end; -+#ifdef CONFIG_KDB -+ int get_lock = !KDB_IS_RUNNING(); -+#else -+#define get_lock 1 -+#endif -+ - - vmi->used = 0; - -@@ -30,7 +40,8 @@ void get_vmalloc_info(struct vmalloc_inf - - prev_end = VMALLOC_START; - -- read_lock(&vmlist_lock); -+ if (get_lock) -+ read_lock(&vmlist_lock); - - for (vma = vmlist; vma; vma = vma->next) { - unsigned long addr = (unsigned long) vma->addr; -@@ -55,6 +66,7 @@ void get_vmalloc_info(struct vmalloc_inf - if (VMALLOC_END - prev_end > vmi->largest_chunk) - vmi->largest_chunk = VMALLOC_END - prev_end; - -- read_unlock(&vmlist_lock); -+ if (get_lock) -+ read_unlock(&vmlist_lock); - } - } ---- a/include/asm-generic/kmap_types.h -+++ b/include/asm-generic/kmap_types.h -@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY, - KMAP_D(16) KM_IRQ_PTE, - KMAP_D(17) KM_NMI, - KMAP_D(18) KM_NMI_PTE, --KMAP_D(19) KM_TYPE_NR -+KMAP_D(19) KM_KDB, -+KMAP_D(20) KM_TYPE_NR - }; - - #undef KMAP_D ---- a/include/linux/console.h -+++ b/include/linux/console.h -@@ -142,7 +142,12 @@ void vcs_remove_sysfs(int index); - - /* Some debug stub to catch some of the obvious races in the VT code */ - #if 1 -+#ifdef CONFIG_KDB -+#include -+#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress && !atomic_read(&kdb_event)) -+#else /* !CONFIG_KDB */ - #define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress) -+#endif /* CONFIG_KDB */ - #else - #define WARN_CONSOLE_UNLOCKED() - #endif ---- /dev/null -+++ b/include/linux/dis-asm.h -@@ -0,0 +1,347 @@ -+/* Interface between the opcode library and its callers. -+ -+ Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005 -+ Free Software Foundation, Inc. -+ -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 2, or (at your option) -+ any later version. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 51 Franklin Street - Fifth Floor, -+ Boston, MA 02110-1301, USA. -+ -+ Written by Cygnus Support, 1993. -+ -+ The opcode library (libopcodes.a) provides instruction decoders for -+ a large variety of instruction sets, callable with an identical -+ interface, for making instruction-processing programs more independent -+ of the instruction set being processed. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifndef DIS_ASM_H -+#define DIS_ASM_H -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#ifdef __KERNEL__ -+#include -+#include -+typedef void FILE; -+#else /* __KERNEL__ */ -+#include -+#include "bfd.h" -+#endif /* __KERNEL__ */ -+ -+typedef int (*fprintf_ftype) (void *, const char*, ...) ATTRIBUTE_FPTR_PRINTF_2; -+ -+enum dis_insn_type { -+ dis_noninsn, /* Not a valid instruction */ -+ dis_nonbranch, /* Not a branch instruction */ -+ dis_branch, /* Unconditional branch */ -+ dis_condbranch, /* Conditional branch */ -+ dis_jsr, /* Jump to subroutine */ -+ dis_condjsr, /* Conditional jump to subroutine */ -+ dis_dref, /* Data reference instruction */ -+ dis_dref2 /* Two data references in instruction */ -+}; -+ -+/* This struct is passed into the instruction decoding routine, -+ and is passed back out into each callback. The various fields are used -+ for conveying information from your main routine into your callbacks, -+ for passing information into the instruction decoders (such as the -+ addresses of the callback functions), or for passing information -+ back from the instruction decoders to their callers. -+ -+ It must be initialized before it is first passed; this can be done -+ by hand, or using one of the initialization macros below. */ -+ -+typedef struct disassemble_info { -+ fprintf_ftype fprintf_func; -+ void *stream; -+ void *application_data; -+ -+ /* Target description. We could replace this with a pointer to the bfd, -+ but that would require one. There currently isn't any such requirement -+ so to avoid introducing one we record these explicitly. */ -+ /* The bfd_flavour. This can be bfd_target_unknown_flavour. */ -+ enum bfd_flavour flavour; -+ /* The bfd_arch value. */ -+ enum bfd_architecture arch; -+ /* The bfd_mach value. */ -+ unsigned long mach; -+ /* Endianness (for bi-endian cpus). Mono-endian cpus can ignore this. */ -+ enum bfd_endian endian; -+ /* An arch/mach-specific bitmask of selected instruction subsets, mainly -+ for processors with run-time-switchable instruction sets. The default, -+ zero, means that there is no constraint. CGEN-based opcodes ports -+ may use ISA_foo masks. */ -+ unsigned long insn_sets; -+ -+ /* Some targets need information about the current section to accurately -+ display insns. If this is NULL, the target disassembler function -+ will have to make its best guess. */ -+ asection *section; -+ -+ /* An array of pointers to symbols either at the location being disassembled -+ or at the start of the function being disassembled. The array is sorted -+ so that the first symbol is intended to be the one used. The others are -+ present for any misc. purposes. This is not set reliably, but if it is -+ not NULL, it is correct. */ -+ asymbol **symbols; -+ /* Number of symbols in array. */ -+ int num_symbols; -+ -+ /* For use by the disassembler. -+ The top 16 bits are reserved for public use (and are documented here). -+ The bottom 16 bits are for the internal use of the disassembler. */ -+ unsigned long flags; -+#define INSN_HAS_RELOC 0x80000000 -+ void *private_data; -+ -+ /* Function used to get bytes to disassemble. MEMADDR is the -+ address of the stuff to be disassembled, MYADDR is the address to -+ put the bytes in, and LENGTH is the number of bytes to read. -+ INFO is a pointer to this struct. -+ Returns an errno value or 0 for success. */ -+ int (*read_memory_func) -+ (bfd_vma memaddr, bfd_byte *myaddr, unsigned int length, -+ struct disassemble_info *info); -+ -+ /* Function which should be called if we get an error that we can't -+ recover from. STATUS is the errno value from read_memory_func and -+ MEMADDR is the address that we were trying to read. INFO is a -+ pointer to this struct. */ -+ void (*memory_error_func) -+ (int status, bfd_vma memaddr, struct disassemble_info *info); -+ -+ /* Function called to print ADDR. */ -+ void (*print_address_func) -+ (bfd_vma addr, struct disassemble_info *info); -+ -+ /* Function called to determine if there is a symbol at the given ADDR. -+ If there is, the function returns 1, otherwise it returns 0. -+ This is used by ports which support an overlay manager where -+ the overlay number is held in the top part of an address. In -+ some circumstances we want to include the overlay number in the -+ address, (normally because there is a symbol associated with -+ that address), but sometimes we want to mask out the overlay bits. */ -+ int (* symbol_at_address_func) -+ (bfd_vma addr, struct disassemble_info * info); -+ -+ /* Function called to check if a SYMBOL is can be displayed to the user. -+ This is used by some ports that want to hide special symbols when -+ displaying debugging outout. */ -+ bfd_boolean (* symbol_is_valid) -+ (asymbol *, struct disassemble_info * info); -+ -+ /* These are for buffer_read_memory. */ -+ bfd_byte *buffer; -+ bfd_vma buffer_vma; -+ unsigned int buffer_length; -+ -+ /* This variable may be set by the instruction decoder. It suggests -+ the number of bytes objdump should display on a single line. If -+ the instruction decoder sets this, it should always set it to -+ the same value in order to get reasonable looking output. */ -+ int bytes_per_line; -+ -+ /* The next two variables control the way objdump displays the raw data. */ -+ /* For example, if bytes_per_line is 8 and bytes_per_chunk is 4, the */ -+ /* output will look like this: -+ 00: 00000000 00000000 -+ with the chunks displayed according to "display_endian". */ -+ int bytes_per_chunk; -+ enum bfd_endian display_endian; -+ -+ /* Number of octets per incremented target address -+ Normally one, but some DSPs have byte sizes of 16 or 32 bits. */ -+ unsigned int octets_per_byte; -+ -+ /* The number of zeroes we want to see at the end of a section before we -+ start skipping them. */ -+ unsigned int skip_zeroes; -+ -+ /* The number of zeroes to skip at the end of a section. If the number -+ of zeroes at the end is between SKIP_ZEROES_AT_END and SKIP_ZEROES, -+ they will be disassembled. If there are fewer than -+ SKIP_ZEROES_AT_END, they will be skipped. This is a heuristic -+ attempt to avoid disassembling zeroes inserted by section -+ alignment. */ -+ unsigned int skip_zeroes_at_end; -+ -+ /* Results from instruction decoders. Not all decoders yet support -+ this information. This info is set each time an instruction is -+ decoded, and is only valid for the last such instruction. -+ -+ To determine whether this decoder supports this information, set -+ insn_info_valid to 0, decode an instruction, then check it. */ -+ -+ char insn_info_valid; /* Branch info has been set. */ -+ char branch_delay_insns; /* How many sequential insn's will run before -+ a branch takes effect. (0 = normal) */ -+ char data_size; /* Size of data reference in insn, in bytes */ -+ enum dis_insn_type insn_type; /* Type of instruction */ -+ bfd_vma target; /* Target address of branch or dref, if known; -+ zero if unknown. */ -+ bfd_vma target2; /* Second target address for dref2 */ -+ -+ /* Command line options specific to the target disassembler. */ -+ char * disassembler_options; -+ -+} disassemble_info; -+ -+ -+/* Standard disassemblers. Disassemble one instruction at the given -+ target address. Return number of octets processed. */ -+typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *); -+ -+extern int print_insn_big_mips (bfd_vma, disassemble_info *); -+extern int print_insn_little_mips (bfd_vma, disassemble_info *); -+extern int print_insn_i386 (bfd_vma, disassemble_info *); -+extern int print_insn_i386_att (bfd_vma, disassemble_info *); -+extern int print_insn_i386_intel (bfd_vma, disassemble_info *); -+extern int print_insn_ia64 (bfd_vma, disassemble_info *); -+extern int print_insn_i370 (bfd_vma, disassemble_info *); -+extern int print_insn_m68hc11 (bfd_vma, disassemble_info *); -+extern int print_insn_m68hc12 (bfd_vma, disassemble_info *); -+extern int print_insn_m68k (bfd_vma, disassemble_info *); -+extern int print_insn_z8001 (bfd_vma, disassemble_info *); -+extern int print_insn_z8002 (bfd_vma, disassemble_info *); -+extern int print_insn_h8300 (bfd_vma, disassemble_info *); -+extern int print_insn_h8300h (bfd_vma, disassemble_info *); -+extern int print_insn_h8300s (bfd_vma, disassemble_info *); -+extern int print_insn_h8500 (bfd_vma, disassemble_info *); -+extern int print_insn_alpha (bfd_vma, disassemble_info *); -+extern int print_insn_big_arm (bfd_vma, disassemble_info *); -+extern int print_insn_little_arm (bfd_vma, disassemble_info *); -+extern int print_insn_sparc (bfd_vma, disassemble_info *); -+extern int print_insn_big_a29k (bfd_vma, disassemble_info *); -+extern int print_insn_little_a29k (bfd_vma, disassemble_info *); -+extern int print_insn_avr (bfd_vma, disassemble_info *); -+extern int print_insn_d10v (bfd_vma, disassemble_info *); -+extern int print_insn_d30v (bfd_vma, disassemble_info *); -+extern int print_insn_dlx (bfd_vma, disassemble_info *); -+extern int print_insn_fr30 (bfd_vma, disassemble_info *); -+extern int print_insn_hppa (bfd_vma, disassemble_info *); -+extern int print_insn_i860 (bfd_vma, disassemble_info *); -+extern int print_insn_i960 (bfd_vma, disassemble_info *); -+extern int print_insn_ip2k (bfd_vma, disassemble_info *); -+extern int print_insn_m32r (bfd_vma, disassemble_info *); -+extern int print_insn_m88k (bfd_vma, disassemble_info *); -+extern int print_insn_maxq_little (bfd_vma, disassemble_info *); -+extern int print_insn_maxq_big (bfd_vma, disassemble_info *); -+extern int print_insn_mcore (bfd_vma, disassemble_info *); -+extern int print_insn_mmix (bfd_vma, disassemble_info *); -+extern int print_insn_mn10200 (bfd_vma, disassemble_info *); -+extern int print_insn_mn10300 (bfd_vma, disassemble_info *); -+extern int print_insn_ms1 (bfd_vma, disassemble_info *); -+extern int print_insn_msp430 (bfd_vma, disassemble_info *); -+extern int print_insn_ns32k (bfd_vma, disassemble_info *); -+extern int print_insn_crx (bfd_vma, disassemble_info *); -+extern int print_insn_openrisc (bfd_vma, disassemble_info *); -+extern int print_insn_big_or32 (bfd_vma, disassemble_info *); -+extern int print_insn_little_or32 (bfd_vma, disassemble_info *); -+extern int print_insn_pdp11 (bfd_vma, disassemble_info *); -+extern int print_insn_pj (bfd_vma, disassemble_info *); -+extern int print_insn_big_powerpc (bfd_vma, disassemble_info *); -+extern int print_insn_little_powerpc (bfd_vma, disassemble_info *); -+extern int print_insn_rs6000 (bfd_vma, disassemble_info *); -+extern int print_insn_s390 (bfd_vma, disassemble_info *); -+extern int print_insn_sh (bfd_vma, disassemble_info *); -+extern int print_insn_tic30 (bfd_vma, disassemble_info *); -+extern int print_insn_tic4x (bfd_vma, disassemble_info *); -+extern int print_insn_tic54x (bfd_vma, disassemble_info *); -+extern int print_insn_tic80 (bfd_vma, disassemble_info *); -+extern int print_insn_v850 (bfd_vma, disassemble_info *); -+extern int print_insn_vax (bfd_vma, disassemble_info *); -+extern int print_insn_w65 (bfd_vma, disassemble_info *); -+extern int print_insn_xstormy16 (bfd_vma, disassemble_info *); -+extern int print_insn_xtensa (bfd_vma, disassemble_info *); -+extern int print_insn_sh64 (bfd_vma, disassemble_info *); -+extern int print_insn_sh64x_media (bfd_vma, disassemble_info *); -+extern int print_insn_frv (bfd_vma, disassemble_info *); -+extern int print_insn_iq2000 (bfd_vma, disassemble_info *); -+extern int print_insn_m32c (bfd_vma, disassemble_info *); -+ -+extern disassembler_ftype arc_get_disassembler (void *); -+extern disassembler_ftype cris_get_disassembler (bfd *); -+ -+extern void print_mips_disassembler_options (FILE *); -+extern void print_ppc_disassembler_options (FILE *); -+extern void print_arm_disassembler_options (FILE *); -+extern void parse_arm_disassembler_option (char *); -+extern int get_arm_regname_num_options (void); -+extern int set_arm_regname_option (int); -+extern int get_arm_regnames (int, const char **, const char **, const char *const **); -+extern bfd_boolean arm_symbol_is_valid (asymbol *, struct disassemble_info *); -+ -+/* Fetch the disassembler for a given BFD, if that support is available. */ -+extern disassembler_ftype disassembler (bfd *); -+ -+/* Amend the disassemble_info structure as necessary for the target architecture. -+ Should only be called after initialising the info->arch field. */ -+extern void disassemble_init_for_target (struct disassemble_info * info); -+ -+/* Document any target specific options available from the disassembler. */ -+extern void disassembler_usage (FILE *); -+ -+ -+/* This block of definitions is for particular callers who read instructions -+ into a buffer before calling the instruction decoder. */ -+ -+/* Here is a function which callers may wish to use for read_memory_func. -+ It gets bytes from a buffer. */ -+extern int buffer_read_memory -+ (bfd_vma, bfd_byte *, unsigned int, struct disassemble_info *); -+ -+/* This function goes with buffer_read_memory. -+ It prints a message using info->fprintf_func and info->stream. */ -+extern void perror_memory (int, bfd_vma, struct disassemble_info *); -+ -+ -+/* Just print the address in hex. This is included for completeness even -+ though both GDB and objdump provide their own (to print symbolic -+ addresses). */ -+extern void generic_print_address -+ (bfd_vma, struct disassemble_info *); -+ -+/* Always true. */ -+extern int generic_symbol_at_address -+ (bfd_vma, struct disassemble_info *); -+ -+/* Also always true. */ -+extern bfd_boolean generic_symbol_is_valid -+ (asymbol *, struct disassemble_info *); -+ -+/* Method to initialize a disassemble_info struct. This should be -+ called by all applications creating such a struct. */ -+extern void init_disassemble_info (struct disassemble_info *info, void *stream, -+ fprintf_ftype fprintf_func); -+ -+/* For compatibility with existing code. */ -+#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \ -+ init_disassemble_info (&(INFO), (STREAM), (fprintf_ftype) (FPRINTF_FUNC)) -+#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \ -+ init_disassemble_info (&(INFO), (STREAM), (fprintf_ftype) (FPRINTF_FUNC)) -+ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* ! defined (DIS_ASM_H) */ ---- /dev/null -+++ b/include/linux/kdb.h -@@ -0,0 +1,184 @@ -+#ifndef _KDB_H -+#define _KDB_H -+ -+/* -+ * Kernel Debugger Architecture Independent Global Headers -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. -+ * Copyright (C) 2000 Stephane Eranian -+ */ -+ -+#include -+#include -+#include -+ -+#ifdef CONFIG_KDB -+/* These are really private, but they must be defined before including -+ * asm-$(ARCH)/kdb.h, so make them public and put them here. -+ */ -+extern int kdb_getuserarea_size(void *, unsigned long, size_t); -+extern int kdb_putuserarea_size(unsigned long, void *, size_t); -+ -+#include -+#endif -+ -+#define KDB_MAJOR_VERSION 4 -+#define KDB_MINOR_VERSION 4 -+#define KDB_TEST_VERSION "" -+ -+/* -+ * kdb_initial_cpu is initialized to -1, and is set to the cpu -+ * number whenever the kernel debugger is entered. -+ */ -+extern volatile int kdb_initial_cpu; -+extern atomic_t kdb_event; -+extern atomic_t kdb_8250; -+#ifdef CONFIG_KDB -+#define KDB_IS_RUNNING() (kdb_initial_cpu != -1) -+#define KDB_8250() (atomic_read(&kdb_8250) != 0) -+#else -+#define KDB_IS_RUNNING() (0) -+#define KDB_8250() (0) -+#endif /* CONFIG_KDB */ -+ -+/* -+ * kdb_on -+ * -+ * Defines whether kdb is on or not. Default value -+ * is set by CONFIG_KDB_OFF. Boot with kdb=on/off/on-nokey -+ * or echo "[012]" > /proc/sys/kernel/kdb to change it. -+ */ -+extern int kdb_on; -+ -+#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_SGI_L1_CONSOLE) -+/* -+ * kdb_serial.iobase is initialized to zero, and is set to the I/O -+ * address of the serial port when the console is setup in -+ * serial_console_setup. -+ */ -+extern struct kdb_serial { -+ int io_type; -+ unsigned long iobase; -+ unsigned long ioreg_shift; -+} kdb_serial; -+#endif -+ -+/* -+ * kdb_diemsg -+ * -+ * Contains a pointer to the last string supplied to the -+ * kernel 'die' panic function. -+ */ -+extern const char *kdb_diemsg; -+ -+#define KDB_FLAG_EARLYKDB (1 << 0) /* set from boot parameter kdb=early */ -+#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */ -+#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */ -+#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */ -+#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when kdb is off */ -+#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available, kdb is disabled */ -+#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do not use keyboard */ -+#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do not use keyboard */ -+#define KDB_FLAG_RECOVERY (1 << 8) /* kdb is being entered for an error which has been recovered */ -+ -+extern volatile int kdb_flags; /* Global flags, see kdb_state for per cpu state */ -+ -+extern void kdb_save_flags(void); -+extern void kdb_restore_flags(void); -+ -+#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag) -+#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag)) -+#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag)) -+ -+/* -+ * External entry point for the kernel debugger. The pt_regs -+ * at the time of entry are supplied along with the reason for -+ * entry to the kernel debugger. -+ */ -+ -+typedef enum { -+ KDB_REASON_ENTER=1, /* KDB_ENTER() trap/fault - regs valid */ -+ KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */ -+ KDB_REASON_BREAK, /* Breakpoint inst. - regs valid */ -+ KDB_REASON_DEBUG, /* Debug Fault - regs valid */ -+ KDB_REASON_OOPS, /* Kernel Oops - regs valid */ -+ KDB_REASON_SWITCH, /* CPU switch - regs valid*/ -+ KDB_REASON_KEYBOARD, /* Keyboard entry - regs valid */ -+ KDB_REASON_NMI, /* Non-maskable interrupt; regs valid */ -+ KDB_REASON_RECURSE, /* Recursive entry to kdb; regs probably valid */ -+ KDB_REASON_CPU_UP, /* Add one cpu to kdb; regs invalid */ -+ KDB_REASON_SILENT, /* Silent entry/exit to kdb; regs invalid - internal only */ -+} kdb_reason_t; -+ -+#ifdef CONFIG_KDB -+extern int kdb(kdb_reason_t, int, struct pt_regs *); -+#else -+#define kdb(reason,error_code,frame) (0) -+#endif -+ -+/* Mainly used by kdb code, but this function is sometimes used -+ * by hacked debug code so make it generally available, not private. -+ */ -+extern void kdb_printf(const char *,...) -+ __attribute__ ((format (printf, 1, 2))); -+typedef void (*kdb_printf_t)(const char *, ...) -+ __attribute__ ((format (printf, 1, 2))); -+extern void kdb_init(void); -+ -+#if defined(CONFIG_SMP) -+/* -+ * Kernel debugger non-maskable IPI handler. -+ */ -+extern int kdb_ipi(struct pt_regs *, void (*ack_interrupt)(void)); -+extern void smp_kdb_stop(void); -+#else /* CONFIG_SMP */ -+#define smp_kdb_stop() -+#endif /* CONFIG_SMP */ -+ -+#ifdef CONFIG_KDB_USB -+ -+#include -+ -+typedef int (*kdb_hc_keyboard_attach_t)(int i, unsigned int bufsize); -+typedef int (*kdb_hc_keyboard_detach_t)(struct urb *urb, int i); -+ -+extern int kdb_usb_keyboard_attach(struct urb *urb, unsigned char *buffer, -+ void *poll_func, void *compl_func, -+ kdb_hc_keyboard_attach_t kdb_hc_keyboard_attach, -+ kdb_hc_keyboard_detach_t kdb_hc_keyboard_detach, -+ unsigned int bufsize, -+ struct urb *hid_urb); -+ -+extern int kdb_usb_keyboard_detach(struct urb *urb); -+ -+#endif /* CONFIG_KDB_USB */ -+ -+static inline -+int kdb_process_cpu(const struct task_struct *p) -+{ -+ unsigned int cpu = task_thread_info(p)->cpu; -+ if (cpu > NR_CPUS) -+ cpu = 0; -+ return cpu; -+} -+ -+extern const char kdb_serial_str[]; -+ -+#ifdef CONFIG_KDB_KDUMP -+/* Define values for kdb_kdump_state */ -+extern int kdb_kdump_state; /* KDB kdump state */ -+#define KDB_KDUMP_RESET 0 -+#define KDB_KDUMP_KDUMP 1 -+ -+void kdba_kdump_prepare(struct pt_regs *); -+void machine_crash_shutdown(struct pt_regs *); -+void machine_crash_shutdown_begin(void); -+void machine_crash_shutdown_end(struct pt_regs *); -+ -+#endif /* CONFIG_KDB_KDUMP */ -+ -+#endif /* !_KDB_H */ ---- /dev/null -+++ b/include/linux/kdbprivate.h -@@ -0,0 +1,518 @@ -+#ifndef _KDBPRIVATE_H -+#define _KDBPRIVATE_H -+ -+/* -+ * Kernel Debugger Architecture Independent Private Headers -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+ -+#include -+#include -+#include -+ -+ /* -+ * Kernel Debugger Error codes. Must not overlap with command codes. -+ */ -+ -+#define KDB_NOTFOUND (-1) -+#define KDB_ARGCOUNT (-2) -+#define KDB_BADWIDTH (-3) -+#define KDB_BADRADIX (-4) -+#define KDB_NOTENV (-5) -+#define KDB_NOENVVALUE (-6) -+#define KDB_NOTIMP (-7) -+#define KDB_ENVFULL (-8) -+#define KDB_ENVBUFFULL (-9 ) -+#define KDB_TOOMANYBPT (-10) -+#define KDB_TOOMANYDBREGS (-11) -+#define KDB_DUPBPT (-12) -+#define KDB_BPTNOTFOUND (-13) -+#define KDB_BADMODE (-14) -+#define KDB_BADINT (-15) -+#define KDB_INVADDRFMT (-16) -+#define KDB_BADREG (-17) -+#define KDB_BADCPUNUM (-18) -+#define KDB_BADLENGTH (-19) -+#define KDB_NOBP (-20) -+#define KDB_BADADDR (-21) -+ -+ /* -+ * Kernel Debugger Command codes. Must not overlap with error codes. -+ */ -+#define KDB_CMD_GO (-1001) -+#define KDB_CMD_CPU (-1002) -+#define KDB_CMD_SS (-1003) -+#define KDB_CMD_SSB (-1004) -+ -+ /* -+ * Internal debug flags -+ */ -+/* KDB_DEBUG_FLAG_BT 0x0001 Was Stack traceback debug */ -+#define KDB_DEBUG_FLAG_BP 0x0002 /* Breakpoint subsystem debug */ -+#define KDB_DEBUG_FLAG_BB_SUMM 0x0004 /* Basic block analysis, summary only */ -+#define KDB_DEBUG_FLAG_AR 0x0008 /* Activation record, generic */ -+#define KDB_DEBUG_FLAG_ARA 0x0010 /* Activation record, arch specific */ -+#define KDB_DEBUG_FLAG_BB 0x0020 /* All basic block analysis */ -+#define KDB_DEBUG_FLAG_STATE 0x0040 /* State flags */ -+#define KDB_DEBUG_FLAG_MASK 0xffff /* All debug flags */ -+#define KDB_DEBUG_FLAG_SHIFT 16 /* Shift factor for dbflags */ -+ -+#define KDB_DEBUG(flag) (kdb_flags & (KDB_DEBUG_FLAG_##flag << KDB_DEBUG_FLAG_SHIFT)) -+#define KDB_DEBUG_STATE(text,value) if (KDB_DEBUG(STATE)) kdb_print_state(text, value) -+ -+typedef enum { -+ KDB_REPEAT_NONE = 0, /* Do not repeat this command */ -+ KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ -+ KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ -+} kdb_repeat_t; -+ -+typedef int (*kdb_func_t)(int, const char **); -+ -+ /* -+ * Symbol table format returned by kallsyms. -+ */ -+ -+typedef struct __ksymtab { -+ unsigned long value; /* Address of symbol */ -+ const char *mod_name; /* Module containing symbol or "kernel" */ -+ unsigned long mod_start; -+ unsigned long mod_end; -+ const char *sec_name; /* Section containing symbol */ -+ unsigned long sec_start; -+ unsigned long sec_end; -+ const char *sym_name; /* Full symbol name, including any version */ -+ unsigned long sym_start; -+ unsigned long sym_end; -+ } kdb_symtab_t; -+extern int kallsyms_symbol_next(char *prefix_name, int flag); -+extern int kallsyms_symbol_complete(char *prefix_name, int max_len); -+ -+ /* -+ * Exported Symbols for kernel loadable modules to use. -+ */ -+extern int kdb_register(char *, kdb_func_t, char *, char *, short); -+extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, short, kdb_repeat_t); -+extern int kdb_unregister(char *); -+ -+extern int kdb_getarea_size(void *, unsigned long, size_t); -+extern int kdb_putarea_size(unsigned long, void *, size_t); -+ -+/* Like get_user and put_user, kdb_getarea and kdb_putarea take variable -+ * names, not pointers. The underlying *_size functions take pointers. -+ */ -+#define kdb_getarea(x,addr) kdb_getarea_size(&(x), addr, sizeof((x))) -+#define kdb_putarea(addr,x) kdb_putarea_size(addr, &(x), sizeof((x))) -+ -+extern int kdb_getphysword(unsigned long *word, -+ unsigned long addr, size_t size); -+extern int kdb_getword(unsigned long *, unsigned long, size_t); -+extern int kdb_putword(unsigned long, unsigned long, size_t); -+ -+extern int kdbgetularg(const char *, unsigned long *); -+extern char *kdbgetenv(const char *); -+extern int kdbgetintenv(const char *, int *); -+extern int kdbgetaddrarg(int, const char**, int*, unsigned long *, -+ long *, char **); -+extern int kdbgetsymval(const char *, kdb_symtab_t *); -+extern int kdbnearsym(unsigned long, kdb_symtab_t *); -+extern void kdbnearsym_cleanup(void); -+extern char *kdb_read(char *buffer, size_t bufsize); -+extern char *kdb_strdup(const char *str, gfp_t type); -+extern void kdb_symbol_print(kdb_machreg_t, const kdb_symtab_t *, unsigned int); -+ -+ /* -+ * Do we have a set of registers? -+ */ -+ -+#define KDB_NULL_REGS(regs) \ -+ (regs == (struct pt_regs *)NULL ? kdb_printf("%s: null regs - should never happen\n", __FUNCTION__), 1 : 0) -+ -+ /* -+ * Routine for debugging the debugger state. -+ */ -+ -+extern void kdb_print_state(const char *, int); -+ -+ /* -+ * Per cpu kdb state. A cpu can be under kdb control but outside kdb, -+ * for example when doing single step. -+ */ -+volatile extern int kdb_state[ /*NR_CPUS*/ ]; -+#define KDB_STATE_KDB 0x00000001 /* Cpu is inside kdb */ -+#define KDB_STATE_LEAVING 0x00000002 /* Cpu is leaving kdb */ -+#define KDB_STATE_CMD 0x00000004 /* Running a kdb command */ -+#define KDB_STATE_KDB_CONTROL 0x00000008 /* This cpu is under kdb control */ -+#define KDB_STATE_HOLD_CPU 0x00000010 /* Hold this cpu inside kdb */ -+#define KDB_STATE_DOING_SS 0x00000020 /* Doing ss command */ -+#define KDB_STATE_DOING_SSB 0x00000040 /* Doing ssb command, DOING_SS is also set */ -+#define KDB_STATE_SSBPT 0x00000080 /* Install breakpoint after one ss, independent of DOING_SS */ -+#define KDB_STATE_REENTRY 0x00000100 /* Valid re-entry into kdb */ -+#define KDB_STATE_SUPPRESS 0x00000200 /* Suppress error messages */ -+#define KDB_STATE_LONGJMP 0x00000400 /* longjmp() data is available */ -+#define KDB_STATE_GO_SWITCH 0x00000800 /* go is switching back to initial cpu */ -+#define KDB_STATE_PRINTF_LOCK 0x00001000 /* Holds kdb_printf lock */ -+#define KDB_STATE_WAIT_IPI 0x00002000 /* Waiting for kdb_ipi() NMI */ -+#define KDB_STATE_RECURSE 0x00004000 /* Recursive entry to kdb */ -+#define KDB_STATE_IP_ADJUSTED 0x00008000 /* Restart IP has been adjusted */ -+#define KDB_STATE_GO1 0x00010000 /* go only releases one cpu */ -+#define KDB_STATE_KEYBOARD 0x00020000 /* kdb entered via keyboard on this cpu */ -+#define KDB_STATE_KEXEC 0x00040000 /* kexec issued */ -+#define KDB_STATE_ARCH 0xff000000 /* Reserved for arch specific use */ -+ -+#define KDB_STATE_CPU(flag,cpu) (kdb_state[cpu] & KDB_STATE_##flag) -+#define KDB_STATE_SET_CPU(flag,cpu) ((void)(kdb_state[cpu] |= KDB_STATE_##flag)) -+#define KDB_STATE_CLEAR_CPU(flag,cpu) ((void)(kdb_state[cpu] &= ~KDB_STATE_##flag)) -+ -+#define KDB_STATE(flag) KDB_STATE_CPU(flag,smp_processor_id()) -+#define KDB_STATE_SET(flag) KDB_STATE_SET_CPU(flag,smp_processor_id()) -+#define KDB_STATE_CLEAR(flag) KDB_STATE_CLEAR_CPU(flag,smp_processor_id()) -+ -+ /* -+ * kdb_nextline -+ * -+ * Contains the current line number on the screen. Used -+ * to handle the built-in pager (LINES env variable) -+ */ -+extern volatile int kdb_nextline; -+ -+ /* -+ * Breakpoint state -+ * -+ * Each active and inactive breakpoint is represented by -+ * an instance of the following data structure. -+ */ -+ -+typedef struct _kdb_bp { -+ bfd_vma bp_addr; /* Address breakpoint is present at */ -+ kdb_machinst_t bp_inst; /* Replaced instruction */ -+ -+ unsigned int bp_free:1; /* This entry is available */ -+ -+ unsigned int bp_enabled:1; /* Breakpoint is active in register */ -+ unsigned int bp_global:1; /* Global to all processors */ -+ -+ unsigned int bp_hardtype:1; /* Uses hardware register */ -+ unsigned int bp_forcehw:1; /* Force hardware register */ -+ unsigned int bp_installed:1; /* Breakpoint is installed */ -+ unsigned int bp_delay:1; /* Do delayed bp handling */ -+ unsigned int bp_delayed:1; /* Delayed breakpoint */ -+ -+ int bp_cpu; /* Cpu # (if bp_global == 0) */ -+ kdbhard_bp_t bp_template; /* Hardware breakpoint template */ -+ kdbhard_bp_t *bp_hard[NR_CPUS]; /* Hardware breakpoint structure */ -+ int bp_adjust; /* Adjustment to PC for real instruction */ -+} kdb_bp_t; -+ -+ /* -+ * Breakpoint handling subsystem global variables -+ */ -+extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */]; -+ -+ /* -+ * Breakpoint architecture dependent functions. Must be provided -+ * in some form for all architectures. -+ */ -+extern void kdba_initbp(void); -+extern void kdba_printbp(kdb_bp_t *); -+extern void kdba_alloc_hwbp(kdb_bp_t *bp, int *diagp); -+extern void kdba_free_hwbp(kdb_bp_t *bp); -+extern int kdba_parsebp(int, const char**, int *, kdb_bp_t*); -+extern char *kdba_bptype(kdbhard_bp_t *); -+extern void kdba_setsinglestep(struct pt_regs *); -+extern void kdba_clearsinglestep(struct pt_regs *); -+ -+ /* -+ * Adjust instruction pointer architecture dependent function. Must be -+ * provided in some form for all architectures. -+ */ -+extern void kdba_adjust_ip(kdb_reason_t, int, struct pt_regs *); -+ -+ /* -+ * KDB-only global function prototypes. -+ */ -+extern void kdb_id1(unsigned long); -+extern void kdb_id_init(void); -+ -+ /* -+ * Initialization functions. -+ */ -+extern void kdba_init(void); -+extern void kdb_io_init(void); -+ -+ /* -+ * Architecture specific function to read a string. -+ */ -+typedef int (*get_char_func)(void); -+extern get_char_func poll_funcs[]; -+ -+#ifndef CONFIG_IA64 -+ /* -+ * Data for a single activation record on stack. -+ */ -+ -+struct kdb_stack_info { -+ kdb_machreg_t physical_start; -+ kdb_machreg_t physical_end; -+ kdb_machreg_t logical_start; -+ kdb_machreg_t logical_end; -+ kdb_machreg_t next; -+ const char * id; -+}; -+ -+typedef struct { DECLARE_BITMAP(bits, KDBA_MAXARGS); } valid_t; -+ -+struct kdb_activation_record { -+ struct kdb_stack_info stack; /* information about current stack */ -+ int args; /* number of arguments detected */ -+ kdb_machreg_t arg[KDBA_MAXARGS]; /* -> arguments */ -+ valid_t valid; /* is argument n valid? */ -+}; -+#endif -+ -+ /* -+ * Architecture specific Stack Traceback functions. -+ */ -+ -+struct task_struct; -+ -+extern int kdba_bt_address(kdb_machreg_t, int); -+extern int kdba_bt_process(const struct task_struct *, int); -+ -+ /* -+ * KDB Command Table -+ */ -+ -+typedef struct _kdbtab { -+ char *cmd_name; /* Command name */ -+ kdb_func_t cmd_func; /* Function to execute command */ -+ char *cmd_usage; /* Usage String for this command */ -+ char *cmd_help; /* Help message for this command */ -+ short cmd_flags; /* Parsing flags */ -+ short cmd_minlen; /* Minimum legal # command chars required */ -+ kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ -+} kdbtab_t; -+ -+ /* -+ * External command function declarations -+ */ -+ -+extern int kdb_id(int, const char **); -+extern int kdb_bt(int, const char **); -+ -+ /* -+ * External utility function declarations -+ */ -+extern char* kdb_getstr(char *, size_t, char *); -+ -+ /* -+ * Register contents manipulation -+ */ -+extern int kdba_getregcontents(const char *, struct pt_regs *, kdb_machreg_t *); -+extern int kdba_setregcontents(const char *, struct pt_regs *, kdb_machreg_t); -+extern int kdba_dumpregs(struct pt_regs *, const char *, const char *); -+extern int kdba_setpc(struct pt_regs *, kdb_machreg_t); -+extern kdb_machreg_t kdba_getpc(struct pt_regs *); -+ -+ /* -+ * Debug register handling. -+ */ -+extern void kdba_installdbreg(kdb_bp_t*); -+extern void kdba_removedbreg(kdb_bp_t*); -+ -+ /* -+ * Breakpoint handling - External interfaces -+ */ -+extern void kdb_initbptab(void); -+extern void kdb_bp_install_global(struct pt_regs *); -+extern void kdb_bp_install_local(struct pt_regs *); -+extern void kdb_bp_remove_global(void); -+extern void kdb_bp_remove_local(void); -+ -+ /* -+ * Breakpoint handling - Internal to kdb_bp.c/kdba_bp.c -+ */ -+extern int kdba_installbp(struct pt_regs *regs, kdb_bp_t *); -+extern int kdba_removebp(kdb_bp_t *); -+ -+ -+typedef enum { -+ KDB_DB_BPT, /* Breakpoint */ -+ KDB_DB_SS, /* Single-step trap */ -+ KDB_DB_SSB, /* Single step to branch */ -+ KDB_DB_SSBPT, /* Single step over breakpoint */ -+ KDB_DB_NOBPT /* Spurious breakpoint */ -+} kdb_dbtrap_t; -+ -+extern kdb_dbtrap_t kdba_db_trap(struct pt_regs *, int); /* DEBUG trap/fault handler */ -+extern kdb_dbtrap_t kdba_bp_trap(struct pt_regs *, int); /* Breakpoint trap/fault hdlr */ -+ -+ /* -+ * Interrupt Handling -+ */ -+typedef unsigned long kdb_intstate_t; -+ -+extern void kdba_disableint(kdb_intstate_t *); -+extern void kdba_restoreint(kdb_intstate_t *); -+ -+ /* -+ * SMP and process stack manipulation routines. -+ */ -+extern int kdba_ipi(struct pt_regs *, void (*)(void)); -+extern int kdba_main_loop(kdb_reason_t, kdb_reason_t, int, kdb_dbtrap_t, struct pt_regs *); -+extern int kdb_main_loop(kdb_reason_t, kdb_reason_t, int, kdb_dbtrap_t, struct pt_regs *); -+ -+ /* -+ * General Disassembler interfaces -+ */ -+extern int kdb_dis_fprintf(PTR, const char *, ...) __attribute__ ((format (printf, 2, 3))); -+extern int kdb_dis_fprintf_dummy(PTR, const char *, ...) __attribute__ ((format (printf, 2, 3))); -+extern disassemble_info kdb_di; -+ -+ /* -+ * Architecture Dependent Disassembler interfaces -+ */ -+extern int kdba_id_printinsn(kdb_machreg_t, disassemble_info *); -+extern int kdba_id_parsemode(const char *, disassemble_info*); -+extern void kdba_id_init(disassemble_info *); -+extern void kdba_check_pc(kdb_machreg_t *); -+ -+ /* -+ * Miscellaneous functions and data areas -+ */ -+extern char *kdb_cmds[]; -+extern void debugger_syslog_data(char *syslog_data[]); -+extern unsigned long kdb_task_state_string(const char *); -+extern char kdb_task_state_char (const struct task_struct *); -+extern unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask); -+extern void kdb_ps_suppressed(void); -+extern void kdb_ps1(const struct task_struct *p); -+extern int kdb_parse(const char *cmdstr); -+extern void kdb_print_nameval(const char *name, unsigned long val); -+extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info, int seqno); -+#ifdef CONFIG_SWAP -+extern void kdb_si_swapinfo(struct sysinfo *); -+#else -+#include -+#define kdb_si_swapinfo(x) si_swapinfo(x) -+#endif -+extern void kdb_meminfo_proc_show(void); -+#ifdef CONFIG_HUGETLB_PAGE -+extern void kdb_hugetlb_report_meminfo(void); -+#endif /* CONFIG_HUGETLB_PAGE */ -+extern const char *kdb_walk_kallsyms(loff_t *pos); -+ -+ /* -+ * Architecture Dependant Local Processor setup & cleanup interfaces -+ */ -+extern void kdba_local_arch_setup(void); -+extern void kdba_local_arch_cleanup(void); -+ -+ /* -+ * Defines for kdb_symbol_print. -+ */ -+#define KDB_SP_SPACEB 0x0001 /* Space before string */ -+#define KDB_SP_SPACEA 0x0002 /* Space after string */ -+#define KDB_SP_PAREN 0x0004 /* Parenthesis around string */ -+#define KDB_SP_VALUE 0x0008 /* Print the value of the address */ -+#define KDB_SP_SYMSIZE 0x0010 /* Print the size of the symbol */ -+#define KDB_SP_NEWLINE 0x0020 /* Newline after string */ -+#define KDB_SP_DEFAULT (KDB_SP_VALUE|KDB_SP_PAREN) -+ -+/* Save data about running processes */ -+ -+struct kdb_running_process { -+ struct task_struct *p; -+ struct pt_regs *regs; -+ int seqno; /* kdb sequence number */ -+ int irq_depth; /* irq count */ -+ struct kdba_running_process arch; /* arch dependent save data */ -+}; -+ -+extern struct kdb_running_process kdb_running_process[/* NR_CPUS */]; -+ -+extern int kdb_save_running(struct pt_regs *, kdb_reason_t, kdb_reason_t, int, kdb_dbtrap_t); -+extern void kdb_unsave_running(struct pt_regs *); -+extern struct task_struct *kdb_curr_task(int); -+ -+/* Incremented each time the main kdb loop is entered on the initial cpu, -+ * it gives some indication of how old the saved data is. -+ */ -+extern int kdb_seqno; -+ -+#define kdb_task_has_cpu(p) (task_curr(p)) -+extern void kdb_runqueue(unsigned long cpu, kdb_printf_t xxx_printf); -+ -+/* Simplify coexistence with NPTL */ -+#define kdb_do_each_thread(g, p) do_each_thread(g, p) -+#define kdb_while_each_thread(g, p) while_each_thread(g, p) -+ -+#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) -+ -+extern void *debug_kmalloc(size_t size, gfp_t flags); -+extern void debug_kfree(void *); -+extern void debug_kusage(void); -+ -+extern void kdba_set_current_task(const struct task_struct *); -+extern const struct task_struct *kdb_current_task; -+extern struct pt_regs *kdb_current_regs; -+ -+/* Functions to safely read and write kernel areas. The {to,from}_xxx -+ * addresses are not necessarily valid, these functions must check for -+ * validity. If the arch already supports get and put routines with suitable -+ * validation and/or recovery on invalid addresses then use those routines, -+ * otherwise check it yourself. -+ */ -+ -+extern int kdba_putarea_size(unsigned long to_xxx, void *from, size_t size); -+extern int kdba_getarea_size(void *to, unsigned long from_xxx, size_t size); -+extern int kdba_verify_rw(unsigned long addr, size_t size); -+ -+#ifndef KDB_RUNNING_PROCESS_ORIGINAL -+#define KDB_RUNNING_PROCESS_ORIGINAL kdb_running_process -+#endif -+ -+extern int kdb_wait_for_cpus_secs; -+extern void kdba_cpu_up(void); -+extern char kdb_prompt_str[]; -+ -+#define KDB_WORD_SIZE ((int)sizeof(kdb_machreg_t)) -+ -+#ifdef CONFIG_KDB_USB -+#include -+ -+/* support up to 8 USB keyboards (probably excessive, but...) */ -+#define KDB_USB_NUM_KEYBOARDS 8 -+ -+struct kdb_usb_kbd_info { -+ struct urb *urb; /* pointer to the URB */ -+ unsigned char *buffer; /* pointer to the kbd char buffer */ -+ int (*poll_func)(struct urb *urb); /* poll function to retrieve chars */ -+ int poll_ret; /* return val from poll_func */ -+ int caps_lock; /* state of the caps lock for this keyboard */ -+ struct uhci_qh *qh; -+ int kdb_hid_event; -+ struct urb *hid_urb; /* pointer to the HID URB */ -+ /* USB Host Controller specific callbacks */ -+ kdb_hc_keyboard_attach_t kdb_hc_keyboard_attach; -+ kdb_hc_keyboard_detach_t kdb_hc_keyboard_detach; -+ int (*kdb_hc_urb_complete)(struct urb *urb); /* called when URB int is -+ processed */ -+ -+}; -+ -+extern struct kdb_usb_kbd_info kdb_usb_kbds[KDB_USB_NUM_KEYBOARDS]; -+ -+#endif /* CONFIG_KDB_USB */ -+ -+#ifdef CONFIG_KDB_KDUMP -+#define KDUMP_REASON_RESET 0 -+extern void kdba_kdump_shutdown_slave(struct pt_regs *); -+#endif /* CONFIG_KDB_KDUMP */ -+ -+#endif /* !_KDBPRIVATE_H */ ---- a/include/linux/reboot.h -+++ b/include/linux/reboot.h -@@ -53,7 +53,14 @@ extern void machine_power_off(void); - - extern void machine_shutdown(void); - struct pt_regs; -+#ifdef CONFIG_KDB_KDUMP - extern void machine_crash_shutdown(struct pt_regs *); -+extern void machine_crash_shutdown_begin(void); -+extern void machine_crash_shutdown_other_cpu(struct pt_regs *); -+extern void machine_crash_shutdown_end(struct pt_regs *); -+#else -+extern void machine_crash_shutdown(struct pt_regs *); -+#endif /* !CONFIG_KDB_KDUMP */ - - /* - * Architecture independent implemenations of sys_reboot commands. ---- a/init/main.c -+++ b/init/main.c -@@ -101,6 +101,10 @@ extern void tc_init(void); - enum system_states system_state __read_mostly; - EXPORT_SYMBOL(system_state); - -+#ifdef CONFIG_KDB -+#include -+#endif /* CONFIG_KDB */ -+ - /* - * Boot command-line arguments - */ -@@ -203,6 +207,26 @@ static const char *panic_later, *panic_p - - extern struct obs_kernel_param __setup_start[], __setup_end[]; - -+#ifdef CONFIG_KDB -+static int __init kdb_setup(char *str) -+{ -+ if (strcmp(str, "on") == 0) { -+ kdb_on = 1; -+ } else if (strcmp(str, "on-nokey") == 0) { -+ kdb_on = 2; -+ } else if (strcmp(str, "off") == 0) { -+ kdb_on = 0; -+ } else if (strcmp(str, "early") == 0) { -+ kdb_on = 1; -+ kdb_flags |= KDB_FLAG_EARLYKDB; -+ } else -+ printk("kdb flag %s not recognised\n", str); -+ return 0; -+} -+ -+__setup("kdb=", kdb_setup); -+#endif /* CONFIG_KDB */ -+ - static int __init obsolete_checksetup(char *line) - { - struct obs_kernel_param *p; -@@ -664,6 +688,14 @@ asmlinkage void __init start_kernel(void - calibrate_delay(); - pidmap_init(); - anon_vma_init(); -+ -+#ifdef CONFIG_KDB -+ kdb_init(); -+ if (KDB_FLAG(EARLYKDB)) { -+ KDB_ENTER(); -+ } -+#endif /* CONFIG_KDB */ -+ - #ifdef CONFIG_X86 - if (efi_enabled) - efi_enter_virtual_mode(); ---- /dev/null -+++ b/kdb/ChangeLog -@@ -0,0 +1,2040 @@ -+2008-11-26 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc6-common-1. -+ -+2008-11-12 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc4-common-1. -+ -+2008-11-04 Jay Lan -+ -+ * medusa needs kdb to handle '\n' in kdb_read(), -+ Cliff Wickman -+ * kdb-v4.4-2.6.28-rc3-common-1. -+ -+2008-10-29 Jay Lan -+ -+ * "Commandeer vector 0xfe for KDB_VECTOR", version 2. -+ Cliff Wickman -+ * kdb-v4.4-2.6.28-rc2-common-2. -+ -+2008-10-27 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc2-common-1. -+ -+2008-10-20 Jay Lan -+ -+ * kdb-v4.4-2.6.27-common-1. -+ -+2008-09-30 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc8-common-1. -+ -+2008-09-22 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc7-common-1. -+ -+2008-09-03 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc5-common-1. -+ -+2008-08-19 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc3-common-1. -+ -+2008-08-15 Jay Lan -+ -+ * mm_online_pgdat_export_symbol, Jay Lan -+ - Fix compilation error by exporting first_online_pgdat & -+ next_online_pgdat for 'pgdat' command. -+ * kdb-v4.4-2.6.27-rc2-common-2.1. -+ -+2008-08-14 Jay Lan -+ -+ * Support 'kdump' command to take a kdump vmcore from KDB, -+ Dan Aloni (da-x@monatomic.org), -+ Jason Xiao (jidong.xiao@gmail.com), -+ Jay Lan (jlan@sgi.com) -+ * kdb-v4.4-2.6.27-rc2-common-2. -+ -+2008-08-06 Jay Lan -+ -+ * Fix up the NULL pointer deference issue in ohci_kdb_poll_char, -+ Jason Xiao -+ * kdb-v4.4-2.6.27-rc2-common-1. -+ -+2008-07-18 Jay Lan -+ -+ * support Hardware Breakpoint (bph/bpha) commands -+ IA64: Greg Banks -+ X86: Konstantin Baydarov -+ * kdb-v4.4-2.6.26-common-2. -+ -+2008-07-14 Jay Lan -+ -+ * kdb-v4.4-2.6.26-common-1. -+ -+2008-07-11 Jay Lan -+ -+ * New commands and some fixups and enhancements, -+ Joe Korty -+ John Blackwood -+ Jim Houston -+ - Use the non-sleeping copy_from_user_atomic. -+ - Enhance kdb_cmderror diagnostic output. -+ - Expand the KDB 'duplicate command' error message. -+ - Touch NMI watchdog in various KDB busy-loops. -+ - Support IMB HS20 Blade 8843 platform. -+ - Display exactly which cpus needed an NMI to get them into kdb. -+ - Better document that kdb's 'ps A' command can be used to show -+ _all_ processes and threads -+ - Suppress KDB boottime INFO messages if quiet boot. -+ - Add a KDB breakpoint to the OOPs path. -+ - Add CONFIG_DISCONTIGMEM support to kdbm_memmap. -+ - Extend the KDB task command to handle CONFIG_NUMA fields. -+ - Extend the KDB vm command to support NUMA stuff. -+ - Create the KDB mempolicy command. -+ - Create a pgdat command for KDB. -+ - Fix a hang on boot on some i386 systems. -+ * kdb-v4.4-2.6.26-rc9-common-1. -+ -+2008-06-30 Jay Lan -+ -+ * compilation warning cleanup, Cliff Wickman -+ * kdb-v4.4-2.6.26-rc8-common-1. -+ -+2008-06-25 Jay Lan -+ -+ * Added John Blackwood to the authors of -+ kdb-v4.4-2.6.26-rc4-common-2. -+ * kdb-v4.4-2.6.26-rc7-common-1. -+ -+2008-06-24 Jay Lan -+ -+ * support lcrash style debug_info file: Cliff Wickman -+ - It adds to kdb the ability to symbolically dereference structure -+ pointers through a lcrash-style debug_info file. -+ - Implements "print", "px", and "pd" print commands. -+ - Implements "walk" command to follow linked lists. -+ - Implements "whatis" to display a structure (with offsets). -+ - Implements "sizeof" for types (structures, typedefs, etc.). -+ * kdb-v4.4-2.6.26-rc5-common-2. -+ -+2008-06-06 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc5-common-1. -+ -+2008-06-05 Jay Lan -+ -+ * fixed 'rq/rqa' command runs off the end of runqueue's rt.active -+ priority bitmap array, John Blackwood & -+ Lachlan McIlroy -+ * kdb-v4.4-2.6.26-rc4-common-2. -+ -+2008-05-30 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc4-common-1. -+ -+2008-05-20 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc3-common-1. -+ -+2008-05-13 Jay Lan -+ -+ * XPC support is removed from KDB due to XPC changes in 2.6.26-rc1. -+ * kdb-v4.4-2.6.26-rc1-common-1. -+ -+2008-04-17 Jay Lan -+ -+ * kdb-v4.4-2.6.25-common-1. -+ -+2008-03-16 Jay Lan -+ -+ * kdb-v4.4-2.6.25-rc6-common-1. -+ -+2008-03-03 Jay Lan -+ -+ * kdb-v4.4-2.6.25-rc3-common-1. -+ -+2008-02-26 Jay Lan -+ -+ * remove 'fastcall' from kdb code. -+ * kdb-v4.4-2.6.25-rc2-common-1. -+ -+2008-02-19 Jay Lan -+ -+ * kdb-v4.4-2.6.25-rc1-common-1. -+ -+2008-02-06 Jay Lan -+ -+ * Backed out USB UHCI support since it caused dropped characters and -+ broke OHCI. -+ * Restored "archkdbcommon" commands for x86. It was lost at the x86 -+ merge. -+ * Detecting if the HC was "busy", Aaron Young -+ * kdb-v4.4-2.6.24-common-2. -+ -+2008-01-29 Jay Lan -+ -+ * kdb-v4.4-2.6.24-common-1. -+ -+2008-01-22 Jay Lan -+ -+ * USB UHCI kdb support, Konstantin Baydarov -+ * kdb-v4.4-2.6.24-rc8-common-3. -+ -+2008-01-18 Jay Lan -+ -+ * USB EHCI kdb support, Aaron Young -+ * kdb-v4.4-2.6.24-rc8-common-2. -+ -+2008-01-18 Jay Lan -+ -+ * kdb-v4.4-2.6.24-rc8-common-1. -+ -+2008-01-07 Jay Lan -+ -+ * kdb-v4.4-2.6.24-rc7-common-1. -+ -+2007-12-21 Jay Lan -+ -+ * Renamed kdb/kdba_bt_x86.c to arch/x86/kdba_bt.c. And thus, the x86 -+ backtrace code is now moved into the kdb x86 patch. -+ * kdb v4.4-2.6.24-rc6-common-1. -+ -+2007-12-12 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc5-common-1. -+ -+2007-12-05 Jay Lan -+ -+ * Fixed a 'sysctl table check failed' problem. -+ * kdb v4.4-2.6.24-rc4-common-1. -+ -+2007-11-26 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc3-common-1. -+ -+2007-11-13 Jay Lan -+ -+ * Back ported "New KDB USB interface" from Aaron Young in -+ v4.4-2.6.23-common-2 to 2.6.24 kdb patchset. -+ * kdb v4.4-2.6.24-rc2-common-2. -+ -+2007-11-12 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc2-common-1. -+ -+2007-11-09 Jay Lan -+ -+ * Rebase to 2.6.24-rc1 kernel -+ * - merged kdb-v4.4-2.6.23-i386-1 and kdb-v4.4-2.6.23-x86_64-1 -+ * into kdb-v4.4-2.6.24-rc1-x86-1 -+ * - Fields "done", "sglist_len", and "pid" are removed from -+ * struct scsi_cmnd. Thus, these fields are no longer displayed -+ * on "sc" command. -+ * kdb v4.4-2.6.24-rc1-common-1. -+ -+2007-11-08 Jay Lan -+ -+ * New KDB USB interface, Aaron Young -+ * 1. This patch allows KDB to work with any Host Contoller driver -+ * and call the correct HC driver poll routine (as long as the -+ * HC driver provides a .kdb_poll_char routine via it's -+ * associated hc_driver struct). -+ * 2. Hotplugged keyboards are now recognized by KDB. -+ * 3. Currently KDB can only make use of 1 USB type keyboard. -+ * New code can handle up to 8 attached keyboards - input is -+ * multiplexed from all of them while in kdb. -+ * kdb v4.4-2.6.23-common-2. -+ -+2007-10-24 Jay Lan -+ -+ * kdb v4.4-2.6.23-common-1. -+ -+2007-09-26 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc8-common-1. -+ -+2007-09-21 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc7-common-1. -+ -+2007-09-12 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc6-common-1. -+ -+2007-09-06 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc5-common-1. -+ -+2007-08-30 Keith Owens -+ -+ * New i386/x86_64 backtrace requires that kdb_save_running() does not -+ exit until after kdb_main_loop() has completed. -+ * List more noret functions in i386/x86_64 backtrace code. -+ * Call to a noret function ends a basic block. -+ * After a call to a noret function, eip/rip may be pointing at the next -+ function or not, depending on function alignment. Jay Lan. -+ * kdb v4.4-2.6.23-rc4-common-2. -+ -+2007-08-30 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc4-common-1. -+ -+2007-08-28 Keith Owens -+ -+ * kdb/kdba_bt_x86.c: -+ * Handle the variable amount of stack data that is pushed by x86_64 -+ * hardware on an interrupt. -+ * Add instruction vmsave. -+ * Handle pop to %rsp. -+ * Cope with return address for functions defined as ATTRIB_NORET. -+ * Include CONFIG_DEBUG_INFO in the summary line of bb_all. -+ * Check for an interrupt that was delivered while user space was in -+ * control. -+ * A return to child_rip ends a backtrace. -+ * Ignore level2_kernel_pgt and level3_kernel_pgt data areas if they -+ * occur within the text segment. -+ * kdb v4.4-2.6.23-rc3-common-2. -+ -+2007-08-24 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc3-common-1. -+ -+2007-08-24 Jay Lan -+ -+ * kdb/kdba_bt_x86.c: -+ * retint_kernel is only defined for CONFIG_PREEMPT. -+ * Handle assembler code for CONFIG_HIBERNATION=y. -+ * Handle assembler code for CONFIG_MATH_EMULATION=y. -+ * Handle assembler code for CONFIG_XEN=y. -+ * Handle assembler code for CONFIG_KPROBES=y. -+ * Add CC version to the bb_all header. -+ * Handle spurious label in jprobe_return. -+ * Handle stack switch in jprobe_return. -+ * Prefix register name with '%' in xadd/xchg temporary variable. -+ * Require bb_usage_mov() to handle all the special cases internally. -+ * Handle stack manipulation for kexec. -+ * Handle spurious label in kretprobe_trampoline_holder. -+ * Add instructions clgi, invlpga, rcl, rdpmc, stgi, vmclear, -+ * vmlaunch, vmload, vmptrld, vmread, vmresume, vmrun, vmwrite, -+ * xstore-rng. -+ * Exclude more 16 bit and/or real mode acpi functions from bb_all. -+ * Handle assembler stack switching code in i386 do_softirq. -+ * kdb/kdbmain.c: -+ * Add CC version to the summary output. -+ * Bump debug_kmalloc pool from 128K to 256K, some of the kernel -+ * functions have huge numbers of basic blocks and jumps between them. -+ * Correct reinstallation of breakpoints when exiting KDB. -+ * Keith Owens. -+ * kdb v4.4-2.6.23-rc2-common-2. -+ -+2007-08-07 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc2-common-1. -+ -+2007-08-03 Keith Owens -+ -+ * kdba_bt_x86.c: Rename some variables to make the code more readable. -+ Print more debug information when merging register states and when -+ calculating the new stack pointer. -+ * kdb v4.4-2.6.23-rc1-common-2. -+ -+2007-07-30 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc1-common-1. -+ -+2007-07-26 Keith Owens -+ -+ * New x86 backtrace code. -+ * kdb v4.4-2.6.22-common-4. -+ -+2007-07-17 Keith Owens -+ -+ * Make kdb_printf_lock an irq lock to keep lockdep happy. -+ * kdb v4.4-2.6.22-common-3. -+ -+2007-07-13 Keith Owens -+ -+ * Increase the size of the debug_alloc pool. -+ * Add the caller that obtained each entry in the debug_alloc pool. -+ * Poison entries in the debug_alloc pool. -+ * Track current and maximum usage in debug_alloc pool. -+ * Print the debug_alloc entries that are still in use when kdb exits -+ (memory leaks). -+ * Increase the default value of BTARGS to 9. -+ * kdb v4.4-2.6.22-common-2. -+ -+2007-07-09 Keith Owens -+ -+ * kdb v4.4-2.6.22-common-1. -+ -+2007-07-02 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc7-common-1. -+ -+2007-06-20 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc5-common-1. -+ -+2007-06-15 Keith Owens -+ -+ * Do not include asm/kdb.h unless CONFIG_KDB is on. Dave Jiang. -+ * kdb v4.4-2.6.22-rc4-common-2. -+ -+2007-06-08 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc4-common-1. -+ -+2007-05-28 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc3-common-1. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc2-common-1. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc1-common-1. -+ -+2007-05-17 Keith Owens -+ -+ * Add rdmsr and wrmsr commands for i386 and x86_64. Original patch by -+ Bernardo Innocenti for i386, reworked by Keith Owens to make it safe -+ on all cpu models and to handle both i386 and x86_64. -+ * kdb v4.4-2.6.21-common-3. -+ -+2007-05-15 Keith Owens -+ -+ * Correct alignment of debug_alloc_header. -+ * kdb v4.4-2.6.21-common-2. -+ -+2007-04-29 Keith Owens -+ -+ * kdb v4.4-2.6.21-common-1. -+ -+2007-04-16 Keith Owens -+ -+ * Remove dead symbol declarations. -+ * kdb v4.4-2.6.21-rc7-common-2. -+ -+2007-04-16 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc7-common-1. -+ -+2007-04-10 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc6-common-1. -+ -+2007-04-02 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc5-common-1. -+ -+2007-03-19 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc4-common-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc3-common-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc2-common-1. -+ -+2007-03-01 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc1-common-1. -+ -+2007-03-01 Keith Owens -+ -+ * Remove sparse warnings. -+ * kdb v4.4-2.6.20-common-6. -+ -+2007-02-27 Keith Owens -+ -+ * set_irq_regs() on entry to kdb() if they are not already set. -+ * kdb v4.4-2.6.20-common-5. -+ -+2007-02-22 Keith Owens -+ -+ * Initialise struct disassemble_info in kdb_id1(). -+ * kdb v4.4-2.6.20-common-4. -+ -+2007-02-16 Keith Owens -+ -+ * Clean up debug_alloc_pool code. -+ * kdb v4.4-2.6.20-common-3. -+ -+2007-02-16 Keith Owens -+ -+ * Initialise variable bits of struct disassemble_info each time. -+ * kdb v4.4-2.6.20-common-2. -+ -+2007-02-06 Keith Owens -+ -+ * kdb v4.4-2.6.20-common-1. -+ -+2007-02-01 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc7-common-1. -+ -+2007-01-08 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc4-common-1. -+ -+2007-01-02 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc3-common-1. -+ -+2006-12-21 Keith Owens -+ -+ * Initialize the debug_kmalloc pool on the first call, so it can be -+ used at any time. -+ * kdb v4.4-2.6.20-rc1-common-2. -+ -+2006-12-20 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc1-common-1. -+ -+2006-11-30 Keith Owens -+ -+ * kdb v4.4-2.6.19-common-1. -+ -+2006-11-30 Keith Owens -+ -+ * Do not access registers if kdb_current_regs is NULL. -+ * kdb v4.4-2.6.19-rc6-common-3. -+ -+2006-11-27 Keith Owens -+ -+ * Only use VT keyboard if the command line allows it and ACPI indicates -+ that there is an i8042. -+ * Optimize kdb_read() to reduce the risk of dropping input characters. -+ * Print cpumasks as lists instead of hex, also cope with long lists. -+ * kdb v4.4-2.6.19-rc6-common-2. -+ -+2006-11-20 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc6-common-1. -+ -+2006-11-09 Keith Owens -+ -+ * Change kdb() to fastcall. -+ * Correct loop in kdb_help(). Georg Nikodym. -+ * Only use VT console if the command line allows it. -+ * kdb v4.4-2.6.19-rc5-common-2. -+ -+2006-11-08 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc5-common-1. -+ -+2006-11-01 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc4-common-1. -+ -+2006-10-24 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc3-common-1. -+ -+2006-10-24 Keith Owens -+ -+ * Remove redundant regs and envp parameters. -+ * kdb v4.4-2.6.19-rc2-common-2. -+ -+2006-10-18 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc2-common-1. -+ -+2006-10-11 Keith Owens -+ -+ * Move kdbm_x86.c from the i386 to the common KDB patch. -+ * Expand kdbm_x86.c to work on x86_64 as well as i386. -+ * kdb v4.4-2.6.19-rc1-common-2. -+ -+2006-10-09 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc1-common-1. -+ -+2006-10-06 Keith Owens -+ -+ * Remove #include -+ * kdb v4.4-2.6.18-common-2. -+ -+2006-09-20 Keith Owens -+ -+ * kdb v4.4-2.6.18-common-1. -+ -+2006-09-15 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc7-common-1. -+ -+2006-08-29 Keith Owens -+ -+ * Rewrite all backtrace code. -+ * kdb v4.4-2.6.18-rc5-common-2. -+ -+2006-08-28 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc5-common-1. -+ -+2006-08-08 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc4-common-1. -+ -+2006-08-04 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc3-common-1. -+ -+2006-07-18 Keith Owens -+ -+ * 8250.c locking has been fixed so there is no need to break spinlocks -+ for keyboard entry. -+ * kdb v4.4-2.6.18-rc2-common-2. -+ -+2006-07-18 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc2-common-1. -+ -+2006-07-12 Keith Owens -+ -+ * Remove dead KDB_REASON codes. -+ * The main kdb() function is now always entered with interrupts -+ disabled, so there is no need to disable bottom halves. -+ * sparse cleanups. -+ * kdb v4.4-2.6.18-rc1-common-2. -+ -+2006-07-07 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc1-common-1. -+ -+2006-07-04 Keith Owens -+ -+ * Add KDB_REASON_CPU_UP and callbacks for cpus coming online. -+ * Relegate KDB_REASON_SILENT to KDB internal use only. -+ * Backout the v4.4-2.6.15-common-3 change that made KDB_REASON_SILENT -+ wait for cpus, the Dell Xeon problem has been fixed. -+ * notify_die() is not called for KDB_REASON_SILENT nor -+ KDB_REASON_CPU_UP, these events do not stay in KDB. -+ * Export kdb_current_task for kdbm_x86. SuSE patch -+ kdb-missing-export.diff -+ * Scale kdb_wait_for_cpus_secs by the number of online cpus. -+ * Delete kdb_enablehwfault, architectures now do their own setup. -+ * Delete kdba_enable_mce, architectures now do their own setup. -+ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr, -+ page_fault_mca. Only ever implemented on x86, difficult to maintain -+ and rarely used in the field. -+ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp. -+ * kdb v4.4-2.6.17-common-2. -+ -+2006-06-19 Keith Owens -+ -+ * kdb v4.4-2.6.17-common-1. -+ -+2006-05-31 Keith Owens -+ -+ * Break spinlocks for keyboard entry. Hopefully a temporary hack while -+ I track down why keyboard entry to KDB is hanging. -+ * kdb v4.4-2.6.17-rc5-common-2. -+ -+2006-05-25 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc5-common-1. -+ -+2006-05-15 Keith Owens -+ -+ * Refresh bfd related files from binutils 2.16.91.0.2. -+ * kdb v4.4-2.6.17-rc4-common-2. -+ -+2006-05-12 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc4-common-1. -+ -+2006-04-28 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc3-common-1. -+ -+2006-04-22 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc2-common-1. -+ -+2006-04-11 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc1-common-1. -+ -+2006-04-05 Keith Owens -+ -+ * More fixes for the timing race with KDB_ENTER_SLAVE. -+ * kdb v4.4-2.6.16-common-5. -+ -+2006-03-30 Keith Owens -+ -+ * Some code was testing KDB_IS_RUNNING() twice, which left it open to -+ races. Cache the result instead. -+ * kdb v4.4-2.6.16-common-4. -+ -+2006-03-30 Keith Owens -+ -+ * Change CONFIG_LKCD to CONFIG_LKCD_DUMP. -+ * kdb v4.4-2.6.16-common-3. -+ -+2006-03-22 Keith Owens -+ -+ * Add some more xpc flags. Dean Nelson, SGI. -+ * Replace open coded counter references with atomic_read(). -+ * Pass early_uart_console to early_uart_setup(). Francois -+ Wellenreiter, Bull. -+ * Replace open code with for_each_online_cpu(). -+ * If cpus do not come into kdb after a few seconds then let -+ architectures send a more forceful interrupt. -+ * Close a timing race with KDB_ENTER_SLAVE. -+ * kdb v4.4-2.6.16-common-2. -+ -+2006-03-21 Keith Owens -+ -+ * kdb v4.4-2.6.16-common-1. -+ -+2006-03-14 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc6-common-1. -+ -+2006-02-28 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc5-common-1. -+ -+2006-02-20 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc4-common-1. -+ -+2006-02-06 Keith Owens -+ -+ * Change CONFIG_CRASH_DUMP to CONFIG_LKCD. -+ * Remove obsolete kdb_notifier_list. -+ * kdb v4.4-2.6.16-rc2-common-2. -+ -+2006-02-06 Keith Owens -+ -+ * Add xpcusers command. Dean Nelson, SGI. -+ * kdb v4.4-2.6.16-rc2-common-1. -+ -+2006-02-02 Keith Owens -+ -+ * Check if we have a console before using it for KDB. -+ * kdb v4.4-2.6.16-rc1-common-3. -+ -+2006-02-01 Keith Owens -+ -+ * Add option 'R' to the pid command to reset to the original task. -+ * Include 'pid R' in archkdb* commands to reset up the original failing -+ task. Users may have switched to other cpus and/or tasks before -+ issuing archkdb. -+ * Compile fix for kdbm_pg.c on i386. -+ * kdb v4.4-2.6.16-rc1-common-2. -+ -+2006-01-18 Keith Owens -+ -+ * kdb v4.4-2.6.16-rc1-common-1. -+ -+2006-01-11 Keith Owens -+ -+ * Plug a timing race between KDB_ENTER_SLAVE and KDB_ENTER, and allow -+ the cpu command to switch to a slave cpu. -+ * KDB_REASON_SILENT now waits for other cpus, to avoid spurious NMI -+ events that were seen on some Xeon systems. -+ * kdb v4.4-2.6.15-common-3. -+ -+2006-01-08 Keith Owens -+ -+ * kdb mainline invokes DIE_KDEBUG_ENTER and DIE_KDEBUG_LEAVE via -+ notify_die. -+ * Move xpc debug support from xpc to mainline kdb. -+ * kdbm_cm.c: check if file_lock_operations or lock_manager_operations -+ are set before dereferencing them. Felix Blyakher, SGI. -+ * kdb v4.4-2.6.15-common-2. -+ -+2006-01-04 Keith Owens -+ -+ * Print all buffers on a page in inode pages and update formatting to be -+ legible, too. David Chinner, SGI. -+ * Update page flags in kdbm_pg. -+ * Remove inline from *.c files. -+ * kdb v4.4-2.6.15-common-1. -+ -+2005-12-25 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc7-common-1. -+ -+2005-12-20 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc6-common-1. -+ -+2005-12-10 Keith Owens -+ -+ * Update mapping of flags to strings in kdbm_pg.c and kdbm_vm.c. -+ * kdb v4.4-2.6.15-rc5-common-3. -+ -+2005-12-06 Keith Owens -+ -+ * Add RECOVERY flag to global KDB flags. -+ * Add kdb_{save,restore}_flags. -+ * kdb v4.4-2.6.15-rc5-common-2. -+ -+2005-12-05 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc5-common-1. -+ -+2005-12-02 Keith Owens -+ -+ * kdbm_vm.c: offsets of page macros should be unsigned long. Reported -+ by Dean Nelson, SGI. -+ * kdb v4.4-2.6.15-rc4-common-1. -+ -+2005-11-30 Keith Owens -+ -+ * New follow_page() API. -+ * kdb v4.4-2.6.15-rc3-common-1. -+ -+2005-11-21 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc2-common-1. -+ -+2005-11-15 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc1-common-1. -+ -+2005-11-15 Keith Owens -+ -+ * Allow kdb_printf() to be used outside kdb, in preemptible context. -+ * Build with CONFIG_SWAP=n. Reported by Leo Yuriev. -+ * kdb v4.4-2.6.14-common-2. -+ -+2005-10-28 Keith Owens -+ -+ * kdb v4.4-2.6.14-common-1. -+ -+2005-10-21 Keith Owens -+ -+ * kdb v4.4-2.6.14-rc5-common-1. -+ -+2005-10-11 Keith Owens -+ -+ * Handle removal of USB keyboard. Aaron Young, SGI. -+ * kdb v4.4-2.6.14-rc4-common-1. -+ -+2005-10-05 Keith Owens -+ -+ * Extend kdb_notifier_list() codes to include dumping. -+ * Use emergency_restart() for reboot, it can be called from interrupt -+ context, unlike machine_restart(). -+ * kdb v4.4-2.6.14-rc3-common-1. -+ -+2005-09-21 Keith Owens -+ -+ * Support kdb_current_task in register display and modify commands. -+ * Document what changes kdb's notion of the current task. -+ * Update rd documentation for IA64. -+ * Move some definictions to kdbprivate.h and remove some unused symbol -+ exports. -+ * kdb v4.4-2.6.14-rc2-common-1. -+ -+2005-09-20 Keith Owens -+ -+ * Document IA64 handlers command. -+ * Add more fields to the task command. -+ * Cope with MCA/INIT handlers in the ps command. -+ * Namespace cleanup, delete unused exports, make some functions static. -+ * Add a kdb_notifier_list callback when kdb is about to reboot the -+ system. -+ * kdb v4.4-2.6.14-rc1-common-1. -+ -+2005-08-29 Keith Owens -+ -+ * kdb v4.4-2.6.13-common-1. -+ -+2005-08-24 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc7-common-1. -+ -+2005-08-08 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc6-common-1. -+ -+2005-08-02 Keith Owens -+ -+ * Print more fields from filp, dentry. -+ * Add kdb=on-nokey to suppress kdb entry from the keyboard. -+ * kdb v4.4-2.6.13-rc5-common-1. -+ -+2005-07-30 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc4-common-1. -+ -+2005-07-26 Keith Owens -+ -+ * Fix compile problem with CONFIG_USB_KBD. -+ * kdb v4.4-2.6.13-rc3-common-3. -+ -+2005-07-22 Keith Owens -+ -+ * The asmlinkage kdb() patch was lost during packaging. Reinstate it. -+ * kdb v4.4-2.6.13-rc3-common-2. -+ -+2005-07-19 Keith Owens -+ -+ * Add support for USB keyboard (OHCI only). Aaron Young, SGI. -+ * kdb v4.4-2.6.13-rc3-common-1. -+ -+2005-07-08 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc2-common-1. -+ -+2005-07-01 Keith Owens -+ -+ * Make kdb() asmlinkage to avoid problems with CONFIG_REGPARM. -+ * Change some uses of smp_processor_id() to be preempt safe. -+ * Use DEFINE_SPINLOCK(). -+ * kdb v4.4-2.6.13-rc1-common-1. -+ -+2005-06-18 Keith Owens -+ -+ * kdb v4.4-2.6.12-common-1. -+ -+2005-06-08 Keith Owens -+ -+ * Correct early exit from bd *. -+ * kdb v4.4-2.6.12-rc6-common-1. -+ -+2005-05-25 Keith Owens -+ -+ * Delete Documentation/kdb/dump.txt. lkcd now has reasonable -+ integration with kdb. -+ * kdb v4.4-2.6.12-rc5-common-1. -+ -+2005-05-08 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc4-common-1. -+ -+2005-04-21 Keith Owens -+ -+ * Add rpte command (find the pte for a physical page). -+ * kdb v4.4-2.6.12-rc3-common-1. -+ -+2005-04-06 Keith Owens -+ -+ * Add rq and rqa commands. John Hawkes, SGI. -+ * kdb v4.4-2.6.12-rc2-common-1. -+ -+2005-03-29 Keith Owens -+ -+ * Use register_sysctl_table() instead of patching kernel/sysctl.c. -+ * Non-ASCII characters are not printable. -+ * kdb v4.4-2.6.12-rc1-common-1. -+ -+2005-03-15 Keith Owens -+ -+ * More coexistence patches for lkcd. Jason Uhlenkott, SGI. -+ * kdb v4.4-2.6.11-common-3. -+ -+2005-03-08 Keith Owens -+ -+ * Coexistence patches for lkcd. Jason Uhlenkott, SGI. -+ * kdb v4.4-2.6.11-common-2. -+ -+2005-03-03 Keith Owens -+ -+ * Add kdb to drivers/serial/8250_early.c. Francois Wellenreiter, Bull. -+ * kdb v4.4-2.6.11-common-1. -+ -+2005-02-14 Keith Owens -+ -+ * kdb v4.4-2.6.11-rc4-common-1. -+ -+2005-02-08 Keith Owens -+ -+ * kdb v4.4-2.6.11-rc3-bk4-common-1. -+ -+2005-02-03 Keith Owens -+ -+ * Print more superblock fields. Nathan Scott, SGI. -+ * Remove kallsyms correction for modules, Linus took it. -+ * kdb v4.4-2.6.11-rc3-common-1. -+ -+2005-01-27 Keith Owens -+ -+ * Add bio command. Nathan Scott, SGI. -+ * kdb v4.4-2.6.11-rc2-common-1. -+ -+2005-01-20 Keith Owens -+ -+ * Include kallsyms correction for modules until Linus takes it. -+ * kdb v4.4-2.6.11-rc1-bk7-common-1. -+ -+2005-01-12 Keith Owens -+ -+ * kallsyms now supports all symbols properly, remove kdb patch. -+ * Add last ditch allocator for debugging. -+ * Update kdb_meminfo_read_proc() for vmalloc changes. -+ * Update kdbm_vm.c for 4 level page tables. -+ * kdb v4.4-2.6.11-rc1-common-1. -+ -+2004-12-25 Keith Owens -+ -+ * Add kobject command. -+ * Ignore low addresses and large offsets in kdbnearsym(). -+ * Console updates for sn2 simulator. -+ * kdb v4.4-2.6.10-common-1. -+ -+2004-12-07 Keith Owens -+ -+ * kdb v4.4-2.6.10-rc3-common-1. -+ -+2004-11-23 Keith Owens -+ -+ * Remove warning message from kdb_get_one_user_page(), it was too noisy. -+ * kdb v4.4-2.6.10-rc2-common-1. -+ -+2004-11-02 Keith Owens -+ -+ * Build with kdb patch applied but CONFIG_KDB=n. -+ * kdb v4.4-2.6.10-rc1-common-2. -+ -+2004-10-29 Keith Owens -+ -+ * Handle new compression scheme for kallsyms. -+ * Handle move of DEAD and ZOMBIE for task->state to task->exit_state. -+ * Tweak the concept of a valid kernel address to get all symbols, -+ including the symbols in the ia64 gate page. -+ * kdb v4.4-2.6.10-rc1-common-1. -+ -+2004-10-21 Keith Owens -+ -+ * Handle variable size for the kernel log buffer. -+ * kdb v4.4-2.6.9-common-2. -+ -+2004-10-19 Keith Owens -+ -+ * kdb v4.4-2.6.9-common-1. -+ -+2004-10-12 Keith Owens -+ -+ * kdb v4.4-2.6.9-rc4-common-1. -+ -+2004-10-01 Keith Owens -+ -+ * kdb v4.4-2.6.9-rc3-common-1. -+ -+2004-09-30 Keith Owens -+ -+ * Add stackdepth command to Documentation/kdb/kdb.mm. stackdepth is -+ only supported on i386 and ia64 at the moment. -+ * Skip kdbm_pg memmap build on x86_64. Scott Lurndal, 3leafnetworks. -+ * Export kdb_serial_str for modular I/O. Bryan Cardillo, UPenn. -+ * Reinstate tab completion for symbols. -+ * kdb v4.4-2.6.9-rc2-common-2. -+ -+2004-09-14 Keith Owens -+ -+ * Add task states C (traCed) and E (dEad). -+ * kdb v4.4-2.6.9-rc2-common-1. -+ -+2004-08-27 Keith Owens -+ -+ * kdb v4.4-2.6.9-rc1-common-1. -+ -+2004-08-14 Keith Owens -+ -+ * kdb v4.4-2.6.8-common-1. -+ -+2004-08-12 Keith Owens -+ -+ * kdb v4.4-2.6.8-rc4-common-1. -+ -+2004-08-05 Keith Owens -+ -+ * Mark kdb_initcall as __attribute_used__ for newer gcc. -+ * kdb v4.4-2.6.8-rc3-common-2. -+ -+2004-08-04 Keith Owens -+ -+ * Add mdp (memory display physical) comnmand. -+ Ananth N Mavinakayanahalli, IBM. -+ * kdb v4.4-2.6.8-rc3-common-1. -+ -+2004-07-18 Keith Owens -+ -+ * Patch for new sn_console. Erik Jacobson. SGI. -+ * kdb v4.4-2.6.8-rc2-common-1. -+ -+2004-07-12 Keith Owens -+ -+ * Convert kdbm_task to standard cpumask_t. -+ * Document '*' (all breakpoints) option on bd/be/bc commands. -+ * kdb v4.4-2.6.8-rc1-common-1. -+ -+2004-06-30 Keith Owens -+ -+ * Common changes to help the x86-64 port. -+ * kdb v4.4-2.6.7-common-3. -+ -+2004-06-20 Keith Owens -+ -+ * Move kdb includes in mm/swapfile.c to reduce conflicts with other -+ SGI patches. -+ * kdb v4.4-2.6.7-common-2. -+ -+2004-06-16 Keith Owens -+ -+ * kdb v4.4-2.6.7-common-1. -+ -+2004-06-09 Keith Owens -+ -+ * kdb v4.4-2.6.7-rc3-common-1. -+ -+2004-06-09 Keith Owens -+ -+ * Namespace clean up. Mark code/variables as static when it is only -+ used in one file, delete dead code/variables. -+ * Saved interrupt state requires long, not int. -+ * kdb v4.4-2.6.7-rc2-common-3. -+ -+2004-06-08 Keith Owens -+ -+ * Whitespace clean up, no code changes. -+ * kdb v4.4-2.6.7-rc2-common-2. -+ -+2004-06-07 Keith Owens -+ -+ * kdb v4.4-2.6.7-rc2-common-1. -+ -+2004-06-06 Keith Owens -+ -+ * Avoid recursion problems in kdb_init(). -+ * Add standard archkdb commands. -+ * Add per_cpu command. -+ * Move kdb_{get,put}userarea_size definitions to linux/kdb.h. -+ * kdb v4.4-2.6.6-common-2. -+ -+2004-05-23 Keith Owens -+ -+ * Shrink the output from the cpu command. -+ * Add cpu state 'I', the cpu is idle. -+ * Add cpu state '+', some kdb data is available but the cpu is not -+ responding. -+ * Do not print tasks in state I or M by default in ps and bta commands. -+ * Add states I (idle task) and M (sleeping system daemon) to ps and -+ bta commands. -+ * Delete unused variables. -+ * Move private kdb fields from kdb.h to kdbprivate.h. -+ * Print 'for keyboard entry' for the special cases when KDB_ENTER() is -+ used to get registers. -+ * Move bfd.h and ansidecl.h from arch/$(ARCH)/kdb to include/asm-$(ARCH) -+ and remove -I arch/$(ARCH)/kdb. -+ * dmesg command now prints from either the start or end of dmesg, or at -+ an arbitrary point in the middle of the kernel log buffer. -+ * Sensible string dump for multi byte md commands. -+ * 'page' command handles ia64 correctly. -+ * Show some activity when waiting for cpus to enter kdb. -+ * Change the KDB entry code to KDB. -+ * Allow comment commands, starting with '#'. -+ * Commands defined using defcmd from kdb_cmds are not printed as they -+ are entered, use defcmd with no parameters to print all the defined -+ commands. -+ * Add summary command. -+ * Update copyright notices. -+ * Zero suppression on md command. -+ * Make set NOSECT=1 the default. -+ * PPC64 uses OF-stdout instead of console. Ananth N Mavinakayanahalli. -+ * kdb v4.4-2.6.6-common-1. -+ -+2004-05-10 Keith Owens -+ -+ * kdb v4.3-2.6.6-common-1. -+ -+2004-05-06 Keith Owens -+ -+ * kdb v4.3-2.6.6-rc3-common-1. -+ -+2004-05-06 Keith Owens -+ -+ * kdb v4.3-2.6.6-rc2-common-1. -+ -+2004-04-30 Keith Owens -+ -+ * Rewrite inode_pages command for new radix code in struct page. -+ * kdb v4.3-2.6.6-rc1-common-1. -+ -+2004-04-11 Keith Owens -+ -+ * Unlock sn_sal_lock before entering kdb from sn_serial. -+ * kdb v4.3-2.6.5-common-2. -+ -+2004-04-05 Keith Owens -+ -+ * kdb v4.3-2.6.5-common-1. -+ -+2004-03-22 Keith Owens -+ -+ * kdb v4.3-2.6.5-rc2-common-1. -+ -+2004-03-12 Keith Owens -+ -+ * More work to avoid spurious messages from WARN_CONSOLE_UNLOCKED(). -+ * bh command bug fixes. Nathan Scott. -+ * kdb v4.3-2.6.4-common-1. -+ -+2004-03-06 Keith Owens -+ -+ * Set KDB_IS_RUNNING() during kdb_init to avoid spurious messages from -+ WARN_CONSOLE_UNLOCKED(). -+ * Correct loss of symbol names in kdbnearsym. -+ * kdb v4.3-2.6.4-rc2-common-1. -+ -+2004-02-29 Keith Owens -+ -+ * kdb v4.3-2.6.4-rc1-common-1. -+ -+2004-02-21 Keith Owens -+ -+ * Correct build of kdb_cmds when using a separate object directory and -+ make it quiet. j-nomura (NEC), Keith Owens. -+ * kdb v4.3-2.6.3-common-2. -+ -+2004-02-18 Keith Owens -+ -+ * kdb v4.3-2.6.3-common-1. -+ -+2004-02-17 Keith Owens -+ -+ * Remove WAR for incorrect console registration patch. -+ * kdb v4.3-2.6.3-rc4-common-1. -+ -+2004-02-17 Keith Owens -+ -+ * Convert longjmp buffers from static to dynamic allocation, for large -+ cpu counts. -+ * Tweak kdbm_task for SMP/UP. -+ * Reconcile with kdb-v4.3 2.4.25-rc1-common-1. -+ * Simplify coexistence with NPTL patches. -+ * Support kill command on new scheduler. -+ * Do not refetch data when printing a value as characters. -+ * Document the pid command. -+ * Work around 2.6 kallsyms 'feature'. -+ * Upgrade to 2.6.3-rc3. -+ * WAR for incorrect console registration patch. -+ * kdb v4.3-2.6.3-rc3-common-1. -+ -+2003-12-03 Keith Owens -+ -+ * Reconcile 2.6-test versions from Xavier Bru (Bull), Greg Banks (SGI), -+ Jim Houston (Concurrent Computer Corp). -+ * Reconcile with kdb v4.3-2.4.23-common-2. -+ * Clean up CONFIG_KDB changes to {scripts,kernel}/kallsyms.c. -+ * Correct handling of kdb command line arguments. -+ * Make hooks into module code less intrusive. -+ * Delete kdb_active_task, not required with O(1) scheduler. -+ * Port kdbm_task.c from 2.4. -+ * Disable debug check in exit.c::next_thread() when kdb is running. -+ * Remove "only bh_disable when interrupts are set". BH must be disabled -+ in kdb to prevent deadlock on breakpoints in interrupt handlers. -+ * Add kdb to drivers/char/sn_serial.c. -+ * kdb v4.3-2.6.0-test11-common-1. -+ -+2003-11-11 Xavier Bru -+ * Merge to 2.6.0-test9 -+2003-10-17 Xavier Bru -+ * fix NUll ptr in kdb_ps at early prompt. -+2003-10-14 Xavier Bru -+ * fix NUll ptr in kdb_ps when cpu not present. -+2003-10-06 Xavier Bru -+ * Merge to 2.6.0-test5 -+ * fix compile error with CONFIG_MODULES not set. -+ -+2003-09-08 Xavier Bru -+ * Merge to 2.6.0-test4 -+ -+2003-07-10 Xavier Bru -+ -+ * Merge kdb v4.3 to 2.5.72 ia64 -+ * don't call local_bh_enable() with interrupts masked. -+ -+2003-04-07 Xavier Bru -+ -+ * Merge kdb v4.1 to 2.5.64 ia64 -+ * new kernel parameters support -+ * new module format -+ * new kallsyms support -+ -+2003-12-02 Keith Owens -+ -+ * Use correct page alignment in kdb_get_one_user_page(). -+ Prasanna S Panchamukhi, IBM. -+ * Split pte command into pte -m and pte -p. Dean Roe, SGI. -+ * kdb v4.3-2.4.23-common-2. -+ -+2003-12-01 Keith Owens -+ -+ * kdb v4.3-2.4.23-common-1. -+ -+2003-11-11 Keith Owens -+ -+ * Make KDB for USB keyboards build. Peter T. Breuer. -+ * Do not use USB keyboard if it has not been probed. -+ * kdb v4.3-2.4.23-rc1-common-1. -+ -+2003-10-10 Keith Owens -+ -+ * Sync with XFS 2.4.22 tree. -+ * kdb v4.3-2.4.22-common-2. -+ -+2003-08-29 Keith Owens -+ -+ * kdb v4.3-2.4.22-common-1. -+ -+2003-07-27 Keith Owens -+ -+ * kdb v4.3-2.4.22-pre8-common-8. -+ -+2003-07-20 Keith Owens -+ -+ * Make kdb_serial_str a common constant, the same for all consoles. -+ * Support SGI L1 console. -+ * kdb v4.3-2.4.21-common-8. -+ -+2003-07-14 Keith Owens -+ -+ * Correct ll command. -+ * kdb v4.3-2.4.21-common-7. -+ -+2003-07-08 Keith Owens -+ -+ * Export more kdb symbols. Vamsi Krishna S., IBM. -+ * kdb v4.3-2.4.21-common-6. -+ -+2003-07-07 Keith Owens -+ -+ * Tweak 'waiting for cpus' message. -+ * kdb v4.3-2.4.21-common-5. -+ -+2003-07-07 Keith Owens -+ -+ * 2.4.21-ia64-030702 patches common code that affects kdb. Workaround -+ this nuisance. -+ * kdb v4.3-2.4.21-common-4. -+ -+2003-06-24 Keith Owens -+ -+ * Add task and sigset commands. Mark Goodwin, SGI. -+ * kdb v4.3-2.4.21-common-3. -+ -+2003-06-23 Keith Owens -+ -+ * Sync with XFS 2.4.21 tree. -+ * kdb v4.3-2.4.21-common-2. -+ -+2003-06-20 Keith Owens -+ -+ * kdb v4.3-2.4.21-common-1. -+ -+2003-06-20 Keith Owens -+ -+ * More details on vm command, add vmp and pte commands. -+ Dean Nelson, Dean Roe, SGI. -+ * YAO1SCF (Yet Another O(1) Scheduler Coexistence Fix). -+ * Changes to common code to build on sparc. Tom Duffy. -+ * Move Tom Duffy's changes to drivers/sbus from the sparc64 -+ patch to the common patch to keep all the serial changes -+ together. -+ * Changes to common code to build on Xscale. Eddie Dong, Intel. -+ * Remove CROSS_COMPILE_INC. -+ * Remove obsolete boot parameter 'kdb', long since replaced by -+ 'kdb=on'. -+ * Remove obsolete kdb_eframe_t casts. -+ * Add CONFIG_KDB_CONTINUE_CATASTROPHIC. -+ * Wait a short interval for cpus to join kdb before proceeding. -+ * Automatically enable sysrq for sr command. -+ * Correct double free of kdb_printf lock, spotted by Richard Sanders. -+ * Add optional cpu parameter to btc command. -+ * kdb v4.3-2.4.20-common-1. -+ -+2003-05-02 Keith Owens -+ -+ * Some architectures have problems with the initial empty kallsyms -+ section so revert to three kallsyms passes. -+ * Flush buffered input at startup and at 'more' prompt. -+ * Only print 'more' prompt when longjmp data is available. -+ * Print more data for buffers and inodes. -+ * Disable kill command when O(1) scheduler is installed, the code -+ needs to be redone for O(1). -+ * The kernel has an undocumented assumption that enable_bh() is -+ always called with interrupts enabled, make it so. -+ * Print trailing punctuation even for symbols that are not in kernel. -+ * Add read/write access to user pages. Vamsi Krishna S., IBM -+ * Rename cpu_is_online to cpu_online, as in 2.5. -+ * O(1) scheduler removes init_task so kdb maintains its own list of -+ active tasks. -+ * Delete btp 0 option, it needed init_tasks. -+ * Clean up USB keyboard support. Steven Dake. -+ * Sync with XFS 2.4.20 tree. -+ * kdb v4.2-2.4.20-common-1. -+ -+2003-04-04 Keith Owens -+ -+ * Remove one kallsyms pass. -+ * Automatic detection of O(1) scheduler. -+ * Rename cpu_online to cpu_is_online. -+ * Workarounds for scheduler bugs. -+ * Tweak algorithm for detecting if cpu process data is available. -+ * Add 'kill' command. Sonic Zhang, Keith Owens. -+ * kdb v4.1-2.4.20-common-1. -+ -+2003-03-16 Keith Owens -+ -+ * Each cpu saves its state as it enters kdb or before it enters code -+ which cannot call kdb. -+ * Allow btp on process 0 for a specified cpu. -+ * Add btt command, backtrace given a struct task address. -+ * btc command no longer switches cpus, instead it uses the saved data. -+ * bta shows the idle task on each cpu as well as real tasks, the idle -+ task could be handling an interrupt. -+ * ps command shows the idle task on each cpu. -+ * ps checks that the saved data for a cpu matches the process running on -+ that cpu and warns about stale saved data or no saved data at all. -+ * Remove special cases for i386 backtrace from common code and simplify -+ common bt code. -+ * Clean up kdb interaction with CONFIG_SERIAL_CONSOLE. -+ * Do not automatically repeat commands after the user typed 'q'. -+ * O(1) scheduler patch changes the process cpu field but does not set -+ any indicator that O(1) is being used. Adjust kdb_process_cpu() by -+ hand after applying O(1). -+ * Add kdb_print_nameval() to common code. -+ * Convert tests of cpu_online_map to cpu_online() macro. -+ * module.h needs errno.h when compiling with CONFIG_MODULES=n. -+ * Correct duplicate breakpoint handling. -+ * Do not try to send IPI during a catastrophic error, send_ipi can hang -+ and take kdb with it. -+ * kdb memmap command is i386 only, restrict it. -+ * Add large block device (LBD) support from XFS tree. Eric Sandeen. -+ * kdb v4.0-2.4.20-common-1. -+ -+2003-02-03 Keith Owens -+ -+ * Register kdb commands early. -+ * Decode oops via kallsyms if it is available. -+ * Update copyright notices to 2003. -+ * Add defcmd/endefcmd to allow users to package their own macros. -+ * kdb commands that fail are ignored when prefixed with '-'. -+ * Add selection options to bta command. -+ * Add btc command (switch to each cpu and backtrace). -+ * Do real time detection of dead cpus. -+ * Clear ip adjusted flag when leaving kdb. -+ * Clean up ps command. -+ * Print ps output for each task when backtracing. -+ * Bump to version v3.0 to reduce confusion between kdb and kernel -+ version numbers. -+ * Add kdba_local_arch_setup/kdba_local_arch_cleanup to correct -+ keyboard freeze. Ashish Kalra. -+ * Refuse multiple breakpoints at the same address. -+ * Add fl (file_lock) command, from XFS development tree. -+ * Correct inode_pages, from XFS development tree. -+ * Add command history and editing. Sonic Zhang. -+ * Extend command history and editing to handle vt100 escape sequences. -+ * Allow tab completion at start of line. -+ * Touch nmi watchdog on long running bta and btc commands. -+ * Clean up ps output and standardize with bta codes. -+ * Correctly handle escaped characters in commands. -+ * Update man pages for btc and command history/editing. -+ * kdb v3.0-2.4.20-common-1. -+ -+2002-11-29 Keith Owens -+ -+ * Upgrade to 2.4.20. -+ * Correct Documentation/kdb/kdb_sr.man. -+ * Remove leading zeroes from pids, they are decimal, not octal. -+ * kdb v2.5-2.4.20-common-1. -+ -+2002-11-14 Keith Owens -+ -+ * Upgrade to 2.4.20-rc1. -+ * kdb v2.5-2.4.20-rc1-common-1. -+ -+2002-11-14 Keith Owens -+ -+ * Fix processing with O(1) scheduler. -+ * 'go' switches back to initial cpu first. -+ * 'go
' only allowed on initial cpu. -+ * 'go' installs the global breakpoints from the initial cpu before -+ releasing the other cpus. -+ * If 'go' has to single step over a breakpoint then it single steps just -+ the initial cpu, installs the global breakpoints then releases the -+ other cpus. -+ * General clean up of handling for breakpoints and single stepping over -+ software breakpoints. -+ * Add kdb_notifier_block so other code can tell when kdb is in control. -+ * kdb v2.5-2.4.19-common-1. -+ -+2002-11-02 Keith Owens -+ -+ * Correct build without CONFIG_KDB. -+ * kdb v2.4-2.4.19-common-3. -+ -+2002-11-01 Keith Owens -+ -+ * Minimize differences from 2.5.44. -+ * kdb v2.4-2.4.19-common-2. -+ -+2002-10-31 Keith Owens -+ -+ * Add defcmd/endefcmd feature. -+ * Remove kdb_eframe_t. -+ * Clear bp data before using. -+ * Sanity check if we have pt_regs. -+ * Force LINES > 1. -+ * Remove special case for KDB_REASON_PANIC, use KDB_ENTER() instead. -+ * Remove kdba_getcurrentframe(). -+ * Coexist with O(1) scheduler. -+ * Add lines option to dmesg, speed up dmesg. -+ * kdb v2.4-2.4.19-common-1. -+ -+2002-10-17 Keith Owens -+ -+ * Add selection critera to ps and bta commands. -+ * kdb v2.3-2.4.19-common-4. -+ -+2002-10-07 Keith Owens -+ -+ * New man page, Documentation/kdb/kdb_sr.man. -+ -+2002-10-04 Keith Owens -+ -+ * Minimize differences between patches for 2.4 and 2.5 kernels. -+ * Add Configure.help for CONFIG_KDB_USB. -+ * Reduce stack usage. -+ * kdb v2.3-2.4.19-common-3. -+ -+2002-08-10 Keith Owens -+ -+ * Replace kdb_port with kdb_serial to support memory mapped I/O. -+ David Mosberger. -+ * kdb v2.3-2.4.19-common-2. -+ -+2002-08-07 Keith Owens -+ -+ * Upgrade to 2.4.19. -+ * Remove individual SGI copyrights, the general SGI copyright applies. -+ * Handle md0. Reported by Hugh Dickins, different fix by Keith Owens. -+ * Use page_address() in kdbm_pg.c. Hugh Dickins. -+ * Remove debugging printk from kdbm_pg.c. Hugh Dickins. -+ * Move breakpoint address verification into arch dependent code. -+ * Dynamically resize kdb command table as required. -+ * Common code to support USB keyboard. Sebastien Lelarge. -+ * kdb v2.3-2.4.19-common-1. -+ -+2002-07-09 Keith Owens -+ -+ * Upgrade to 2.4.19-rc1. -+ * Add dmesg command. -+ * Clean up copyrights, Eric Sandeen. -+ * kdb v2.2-2.4.19-rc1-common-1. -+ -+2002-06-14 Keith Owens -+ -+ * Upgrade to 2.4.19-pre10. -+ * Sync with XFS. -+ * kdb v2.1-2.4.19-pre10-common-1. -+ -+2002-04-09 Keith Owens -+ -+ * Upgrade to 2.4.19-pre6. -+ * kdb v2.1-2.4.19-pre6-common-1. -+ -+2002-03-18 Keith Owens -+ -+ * Syntax check mdWcN commands. -+ -+2002-03-01 Keith Owens -+ -+ * Sync with XFS 2.4.18. -+ * kdb v2.1-2.4.18-common-2. -+ -+2002-02-26 Keith Owens -+ -+ * Upgrade to 2.4.18. -+ * Add Paul Dorwin (IBM) magicpoint slides on using kdb as -+ Documentation/kdb/slides. -+ * kdb v2.1-2.4.18-common-1. -+ -+2002-01-23 Keith Owens -+ -+ * Sync with XFS pagebuf changes. -+ * kdb v2.1-2.4.17-common-2. -+ -+2002-01-18 Keith Owens -+ -+ * Ignore single stepping during panic. -+ * Remove kdba_getword, kdba_putword. Replace with kdb_getword, -+ kdb_putword that rely on copy_xx_user. The new functions return -+ an error code, like copy_xx_user. -+ * New functions kdb_getarea, kdb_putarea for copying areas of data -+ such as structures. These functions also return an error code. -+ * Change all common code to use the new functions. -+ * bp command checks that it can read and write the word at the -+ breakpoint before accepting the address. -+ * Break points are now set FIFO and cleared LIFO so overlapping -+ entries give sensible results. -+ * Verify address before disassembling code. -+ * Common changes for sparc64. Ethan Solomita, Tom Duffy. -+ * Remove ss , never supported. -+ * Remove kallsyms entries from arch vmlinux.lds files. -+ * Specify which commands auto repeat. -+ * kdb v2.1-2.4.17-common-1. -+ -+2002-01-07 Keith Owens -+ -+ * Remove console semaphore code, not good in interrupt. -+ * Remove fragment of ia64 patch that had crept into kdb. -+ * Release as kdb v2.0-2.4.17-common-3. -+ -+2002-01-04 Keith Owens -+ -+ * Sync xfs <-> kdb common code. -+ -+2001-12-22 Keith Owens -+ -+ * Upgrade to 2.4.17. -+ * Clean up ifdef CONFIG_KDB. -+ * Add ifdef CONFIG_KDB around include kdb.h. -+ * Delete dummy kdb.h files for unsupported architectures. -+ * Delete arch i386 and ia64 specific files. This changelog now -+ applies to kdb common code only. -+ * Release as kdb v2.0-2.4.17-common-1. -+ -+2001-12-03 Keith Owens -+ -+ * Upgrade to 2.4.16. -+ * Add include/asm-um/kdb.h stub to allow XFS to be tested under UML. -+ * Check if an interrupt frame on i386 came from user space. -+ * Out of scope bug fix in kdb_id.c. Ethan Solomita. -+ * Changes to common code to support sparc64. Ethan Solomita. -+ * Change GFP_KERNEL to GFP_ATOMIC in disasm. Ethan Solomita. -+ -+2001-11-16 Keith Owens -+ -+ * Upgrade to 2.4.15-pre5. -+ * Wrap () around #define expressions with unary operators. -+ -+2001-11-13 Keith Owens -+ -+ * Upgrade to 2.4.15-pre4. -+ * kbdm_pg.c patch from Hugh Dickins. -+ -+2001-11-07 Keith Owens -+ -+ * Upgrade to 2.4.14-ia64-011105. -+ * Change name of l1 serial I/O routine, add ia64 init command. SGI. -+ * Sync kdbm_pg with XFS. -+ -+2001-11-06 Keith Owens -+ -+ * Upgrade to kernel 2.4.14. -+ -+2001-11-02 Keith Owens -+ -+ * Sync kdbm_pg.c with XFS. -+ -+2001-10-24 Keith Owens -+ -+ * Upgrade to kernel 2.4.13. -+ -+2001-10-14 Keith Owens -+ -+ * More use of TMPPREFIX in top level Makefile to speed up NFS compiles. -+ -+ * Correct repeat calculations in md/mds commands. -+ -+2001-10-10 Keith Owens -+ -+ * Copy bfd.h and ansidecl.h to arch/$(ARCH)/kdb, remove dependecies on -+ user space includes. -+ -+ * Update kdb v1.9 to kernel 2.4.11. -+ -+2001-10-01 Keith Owens -+ -+ * Update kdb v1.9 to kernel 2.4.11-pre1 and 2.4.10-ac1. -+ -+ * Correct loop in kdb_parse, reported by Tachino Nobuhiro. -+ -+2001-09-25 Keith Owens -+ -+ * Update kdb v1.8 to kernel 2.4.10. -+ -+ * kdbm_pg patch from Hugh Dickens. -+ -+ * DProbes patch from Bharata B Rao. -+ -+ * mdWcn and mmW patch from Vamsi Krishna S. -+ -+ * i386 disasm layout patch from Jean-Marc Saffroy. -+ -+ * Work around for 64 bit binutils, Simon Munton. -+ -+ * kdb.mm doc correction by Chris Pascoe. -+ -+ * Enter repeats the last command, IA64 disasm only prints one -+ instruction. Don Dugger. -+ -+ * Allow kdb/modules to be linked into vmlinux. -+ -+ * Remove obsolete code from kdb/modules/kdbm_{pg,vm}.c. -+ -+ * Warn when commands are entered at more prompt. -+ -+ * Add MODULE_AUTHOR, DESCRIPTION, LICENSE. -+ -+ * Release as kdb v1.9. -+ -+2001-02-27 Keith Owens -+ -+ * Update kdb v1.8 to kernel 2.4.2, sync kdb/modules with XFS. -+ -+ * Hook into panic() call. -+ -+2000-12-18 Keith Owens -+ -+ * Update kdb v1.7 to kernel 2.4.0-test13-pre3, sync kdb/modules with -+ XFS. -+ -+2000-11-18 Keith Owens -+ -+ * Update to kernel 2.4.0-test11-pre7, including forward port of -+ bug fixes from WIP 2.4.0-test9 tree. -+ -+ * Update to Cygnus CVS trees for disassembly code. -+ -+ * Bump to kdb v1.6. -+ -+2000-10-19 Keith Owens -+ -+ * Update to kernel 2.4.0-test10-pre4. -+ -+2000-10-15 Keith Owens -+ -+ * kdb/kdbmain.c (kdb_parse): Correctly handle blank input. -+ -+ * kdb/kdbmain.c (kdb_local, kdb): Reason SILENT can have NULL regs. -+ -+2000-10-13 Keith Owens -+ -+ * kdb/kdbmain.c: Reduce CMD_LEN to avoid overflowing kdb_printf buffer. -+ -+2000-10-11 Keith Owens -+ -+ * kdb/kdbmain.c (kdb): Test for userspace breakpoints before driving -+ other cpus into kdb. Speeds up gdb and avoids SMP race. -+ -+ * arch/i386/kdb/kdba_io.c (get_serial_char, get_kbd_char): Ignore -+ unprintable characters. -+ -+ * arch/i386/kdb/kdba_io.c (kdba_read): Better handling of buffer size. -+ -+2000-10-04 Keith Owens -+ -+ * arch/i386/kdb/kdba_bt.c (kdba_bt_process): Verify that esp is inside -+ task_struct. Original patch by Mike Galbraith. -+ -+ * kdb/kdb_io.c (kdb_getstr): Reset output line counter, remove -+ unnecessary prompts. -+ -+ * arch/i386/kdb/kdbasupport.c (kdb_getregcontents): Change " cs" to -+ "xcs", ditto ss, ds, es. gdb2kdb does not like leading spaces. -+ -+ * include/asm-xxx/kdb.h: Add dummy kdb.h for all architectures except -+ ix86. This allows #include to appear in arch independent -+ code without causing compile errors. -+ -+ * kdb/modules/kdbm_pg: Sync with XFS. -+ -+2000-10-03 Keith Owens -+ -+ * kdb/kdb_io.c (kdb_read): Ignore NMI while waiting for input. -+ -+ * kdb/kdb_io.c, kdb/Makefile: Export kdb_read. -+ -+2000-10-02 Keith Owens -+ -+ * arch/i386/kernel/smpboot.c (do_boot_cpu): Set nmi_watchdog_source to 2 -+ to avoid premature NMI oops during cpu bring up. We have to assume that -+ a box with more than 1 cpu has a working IO-APIC. -+ -+ * Documentation/kdb/{kdb.mm,kdb_md.man}: Add mdr command. -+ -+ * kdb/kdbmain.c (kdb_md): Add mdr command. -+ -+ * Release as kdb v1.5 against 2.4.0-test9-pre8. -+ -+ * arch/i386/kdb/kdba_io.c, arch/i386/kdb/kdbasupport.c, kdb/kdbmain.c, -+ kdb/kdb_io.c, kdb/kdb_id.c: Remove zero initializers for static -+ variables. -+ -+2000-09-28 Keith Owens -+ -+ * various: Add nmi_watchdog_source, 1 local APIC, 2 IO-APIC. -+ Test nmi_watchdog_source instead of nr_ioapics so UP works on SMP hardware. -+ -+ * arch/i386/kernel/io_apic.c: Rename setup_nmi to setup_nmi_io for clarity. -+ -+ * kdb/kdbmain.c (kdb_parse): Only set NO_WATCHDOG if it was already set. -+ -+ * kdb/kdbmain.c (kdb): Clear NO_WATCHDOG on all exit paths. -+ -+ * include/linux/kdb.h: Add KDB_REASON_SILENT. -+ -+ * kdb/kdbmain.c (kdb_local): Treat reason SILENT as immediate 'go'. -+ -+ * kdb/kdbmain.c (kdb_init): Invoke kdb with reason SILENT to instantiate -+ any breakpoints on boot cpu. -+ -+ * arch/i386/kernel/smpboot.c (smp_callin): Invoke kdb with reason SILENT -+ to instantiate any global breakpoints on this cpu. -+ -+ * kdb/kdb_cmds: Remove comment that said initial commands only worked on -+ boot cpu. -+ -+2000-09-27 Keith Owens -+ -+ * arch/i386/kernel/msr.c: Move {rd,wr}msr_eio to include/asm-i386/apic.h. -+ -+ * include/asm-i386/apic.h: Define NMI interfaces. -+ -+ * kernel/sysctl.c (kern_table): -+ * kernel/sysctl.c (do_proc_set_nmi_watchdog): -+ Add /proc/sys/kernel/nmi_watchdog. -+ -+ * arch/i386/kernel/apic.c: New routines set_nmi_counter_local, -+ setup_apic_nmi_watchdog. -+ -+ * arch/i386/kernel/traps.c: New routine set_nmi_watchdog(). Call apic -+ routines to set/clear local apic timer. -+ -+2000-09-26 Keith Owens -+ -+ * include/linux/sysctl.h (enum): Add NMI_WATCHDOG. -+ -+ * arch/i386/kernel/traps.c (nmi_watchdog_tick): Check nmi_watchdog is -+ still on. -+ -+ * arch/i386/config.in: Add CONFIG_UP_NMI_WATCHDOG. -+ -+ * Documentation/Configure.help: Add CONFIG_UP_NMI_WATCHDOG. -+ -+ * Documentation/nmi_watchdog.txt: Update for UP NMI watchdog. -+ -+2000-09-25 Keith Owens -+ -+ * arch/i386/kernel/apic.c (init_apic_mappings): -+ * arch/i386/kernel/io_apic.c (IO_APIC_init_uniprocessor): -+ Merge Keir Fraser's local APIC for uniprocessors patch. -+ -+2000-09-24 Keith Owens -+ -+ * Various: Declare initialization routines as __init. -+ -+ * Makefile: Define and export AWK. -+ -+ * kdb/Makefile: Generate gen-kdb_cmds.c from kdb/kdb_cmds. -+ -+ * kdb/kdbmain.c (kdb_init): Call new routine kdb_cmds_init to execute -+ whatever the user put in kdb/kdb_cmds. -+ -+ * arch/i386/kdb/kdba_bt.c (kdba_bt_stack): New parameter to -+ indicate if esp in regs is known to be valid or not. -+ -+ * kdb/kdb_bp.c, arch/i386/kdb/kdba_bp.c: More trace prints for -+ breakpoint handling. -+ -+ * arch/i386/kdb/kdba_bp.c (kdba_installbp): Finally found and fixed the -+ annoying breakpoint bug where breakpoints where not always installed -+ after 'go'. -+ -+ * Documentation/kdb: Update man pages kdb.mm, kdb_env.man, kdb_ss.man. -+ -+ * Released as kdb-v1.5-beta1-2.4.0-test8. -+ -+ * Sync to 2.4.0-test9-pre6 and release as kdb-v1.5-beta1-2.4.0-test9-pre6. -+ -+2000-09-23 Keith Owens -+ -+ * arch/i386/kdb/kdbasupport.c (kdba_getregcontents): New pseudo -+ registers cesp and ceflags to help with debugging the debugger. -+ -+ * kdb/kdbmain.c (kdb_local, kdb): Add KDB_REASON_RECURSE. Add -+ environment variable RECURSE. Add code to cope with some types of -+ recursion. -+ -+ * kdb/kdbmain.c (kdb), arch/i386/kdba/kdba_bp.c: Add -+ kdba_clearsinglestep. -+ -+2000-09-22 Keith Owens -+ -+ * drivers/video/vgacon.c (write_vga): No cli() if kdb is running, avoid -+ console deadlock. -+ -+ * arch/i386/kernel/irq.c (get_irqlock): Warn if kdb is running, may hang. -+ -+ * include/linux/kdb.h: Define KDB_IS_RUNNING as (0) if no CONFIG_KDB. -+ -+ * arch/i386/kdb/kdba_bt.c (kdba_bt_stack): Do not attempt a backtrace if -+ the code segment is not in the kernel. -+ -+ * kdb/modules: Change modules from MX_OBJS to M_OBJS. Remove EXPORT_NOSYMBOLS. -+ -+2000-09-21 Keith Owens -+ -+ * arch/i386/kernel/i386_ksyms.c: Move EXPORT_SYMBOLS for kdb to kdb/kdbmain.c. -+ -+ * kdb/Makefile: Change kdb/kdbmain.o from O_OBJS to OX_OBJS. -+ -+ * arch/i386/kernel/smp.c: Remove some #ifdef CONFIG_KDB. Remove kdbprivate.h. -+ -+ * include/linux/kdb.h: Add kdb_print_state. Add KDB_STATE_WAIT_IPI. -+ -+ * kdb/kdbmain.c (kdb): Only mark cpu as leaving if it is in KDB state. Maintain -+ WAIT_IPI state so a cpu is only driven through NMI once. -+ -+ * arch/i386/kernel/smp.c (smp_kdb_stop): All state fiddling moved to kdb(). -+ -+2000-09-20 Keith Owens -+ -+ * include/linux/kdb.h: #define kdb() as (0) if kdb is not configured. -+ -+ * arch/i386/kernel/traps.c: Remove some #ifdef CONFIG_KDB. -+ -+ * include/linux/kdbprivate.h: Move per cpu state to kdb.h. -+ -+ * include/linux/kdb.h: Add KDB_STATE_NO_WATCHDOG, KDB_STATE_PRINTF_LOCK. -+ Rename KDB_DEBUG_xxx to KDB_DEBUG_FLAG_xxx. Clean up debug flag -+ definitions. -+ -+ * arch/i386/kernel/traps.c (nmi_watchdog_tick): Check no watchdog. -+ -+ * kdb/kdbmain.c (kdb): Set no watchdog in normal kdb code. -+ -+ * kdb/kdbmain.c (kdb_parse): Allow watchdog in commands. -+ -+ * kdb/kdb_io.c (kdb_printf): No watchdog during printing. Clean up lock handling. -+ -+ * kdb/kdbmain.c (kdb_set): Clean up debug flag handling. -+ -+2000-09-19 Juan J. Quintela -+ -+ * kdb/arch/i386/kdb/kdba_io.c: Allow kdb to compile without CONFIG_VT and/or -+ serial console. -+ -+2000-09-19 Keith Owens -+ -+ * include/linux/kdb.h: Define KDB_DEBUG_STATE(). -+ -+ * kdb/kdbmain.c (kdb): Add kdb_print_state(), calls to KDB_DEBUG_STATE(). -+ -+2000-09-16 Keith Owens -+ -+ * Move to finer grained control over individual processors in kdb with -+ per cpu kdb state. Needed to allow ss[b] to only release one processor, -+ previously ss[b] released all processors. Also need to recover from -+ errors inside kdb commands, e.g. oops in kdbm_pg code. -+ -+ * various: -+ Move global flags KDB_FLAG_SSB, KDB_FLAG_SUPRESS, KDB_FLAG_FAULT, -+ KDB_FLAG_SS, KDB_FLAG_SSBPT, kdb_active, to per cpu state and macros -+ KDB_STATE(xxx). -+ Replace kdb_flags & KDB_FLAG_xxx with KDB_FLAG(xxx). -+ Replace kdb_flags & KDB_DEBUG_xxx with KDB_DEBUG(xxx). -+ Replace specific tests with wrapper KDB_IS_RUNNING(). -+ -+ * various: Remove #ifdef CONFIG_SMP from kdb code wherever -+ possible. Simplifies the code and makes it much more readable. -+ -+ * arch/i386/kdb/kdbasupport.c (kdb_setjmp): Record if we have reliable -+ longjmp data instead of assuming it is always set. -+ -+ * various: Replace smp_kdb_wait with per cpu state, HOLD_CPU. -+ -+ * init/main.c : Replace #ifdef KDB_DEBUG with KDB_DEBUG(CALLBACK). -+ -+ * include/linux/kdbprivate.h: Separate command return codes from error -+ codes. Add more detailed command codes. -+ -+ * arch/i386/kernel/traps.c (die): Change spin_lock_irq to -+ spin_lock_irqsave. Why did I do this? -+ -+ * kdb/kdbmain.c (kdb_parse): Set per cpu flag CMD before executing kdb -+ command. More detailed return codes for commands that affect -+ processors. -+ -+ * kdb/kdbmain.c (kdb_previous_event): New, check if any processors are -+ still executing the previous kdb event. Removes a race window where a -+ second event could enter kdb before the first had completely ended. -+ -+ * kdb/kdbmain.c (kdb): Document all the concurrency conditions and how -+ kdb handles them. ss[b] now releases only the current cpu. Do not set -+ breakpoints when releasing for ss[b]. Recover from errors in kdb -+ commands. Check that we have reliable longjmp data before using it. -+ -+ * various: Update return code documentation. -+ -+ * kdb/kdb_bp.c (kdb_ss): Separate ss and ssb return codes. -+ -+ * kdb/kdbsupport.c (kdb_ipi): Finer grained algorithm for deciding -+ whether to call send a stop signal to a cpu. -+ -+ * arch/i386/kdb/kdba_bp.c (kdba_db_trap): Separate ss and ssb return -+ codes. Reinstall delayed software breakpoints per cpu instead of -+ globally. Changed algorithm for handling ss[b]. -+ -+ * arch/i386/kdb/kdba_bp.c (kdba_bp_trap): Match software breakpoints per -+ cpu instead of globally. -+ -+ * include/linux/kdb.h: Bump version to kdb v1.5. -+ -+2000-09-16 Keith Owens -+ -+ * kernel/sysctl.c (kern_table): add /proc/sys/kernel/kdb. -+ -+ * init/main.c (parse_options): add boot flags kdb=on, kdb=off, -+ kdb=early. -+ -+ * include/linux/sysctl.h (enum): add KERN_KDB. -+ -+ * drivers/char/serial.c (receive_chars): check kdb_on. -+ -+ * drivers/char/keyboard.c (handle_scancode): check kdb_on. -+ -+ * arch/i386/kernel/traps.c (nmi_watchdog_tick): check kdb_on. -+ -+ * arch/i386/config.in: add CONFIG_KDB_OFF. -+ -+ * Documentation/Configure.help: add CONFIG_KDB_OFF. -+ -+ * kdb/kdbmain.c: add kdb_initial_cpu, kdb_on. -+ -+ * kdb/kdbmain.c (kdb): check kdb_on, set kdb_initial_cpu. -+ -+ * kdb/kdbmain.c (kdb_init): add Keith Owens to kdb banner. -+ -+ * kdb/kdb_io.c (kdb_printf): serialize kdb_printf output. -+ -+ * kdb/kdb_bt.c (kdb_bt): check environment variable BTAPROMPT. -+ -+ * kdb/kdbsupport.c (kdb_ipi): ignore NMI for kdb_initial_cpu. -+ -+ * kdb/modules/kdbm_pg.c (kdbm_page): merge updates from 2.4.0-test5-xfs. -+ -+ * kdb/kdb_bt.man: add btp, bta, BTAPROMPT. -+ -+ * kdb/kdb.mm: add CONFIG_KDB_OFF, boot flags, btp, bta. -+ -+ * include/linux/kdbprivate.h: add kdb_initial_cpu. -+ -+ * include/linux/kdb.h: add kdb_on, bump version to kdb v1.4. ---- /dev/null -+++ b/kdb/Makefile -@@ -0,0 +1,43 @@ -+# -+# This file is subject to the terms and conditions of the GNU General Public -+# License. See the file "COPYING" in the main directory of this archive -+# for more details. -+# -+# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+# -+ -+CCVERSION := $(shell $(CC) -v 2>&1 | sed -ne '$$p') -+obj-y := kdb_bt.o kdb_bp.o kdb_id.o kdbsupport.o gen-kdb_cmds.o kdbmain.o kdb_io.o kdbdereference.o -+CFLAGS_kdbmain.o += -DCCVERSION="$(CCVERSION)" -+ -+subdir-$(CONFIG_KDB_MODULES) := modules -+obj-y += $(addsuffix /built-in.o, $(subdir-y)) -+ -+clean-files := gen-kdb_cmds.c -+ -+override CFLAGS := $(CFLAGS:%-pg=% ) -+ -+# define architecture dependent kdb_cmds -+ifeq ($(CONFIG_IA64),y) -+ KDB_CMDS = ia64/kdb/kdb_cmds -+else -+ ifeq ($(CONFIG_X86_64),y) -+ KDB_CMDS = x86/kdb/kdb_cmds_64 -+ else -+ ifeq ($(CONFIG_X86_32),y) -+ KDB_CMDS = x86/kdb/kdb_cmds_32 -+ endif -+ endif -+endif -+ -+quiet_cmd_gen-kdb = GENKDB $@ -+ cmd_gen-kdb = $(AWK) 'BEGIN {print "\#include "; print "\#include "} \ -+ /^\#/{next} \ -+ /^[ \t]*$$/{next} \ -+ {gsub(/"/, "\\\"", $$0); \ -+ print "static __initdata char kdb_cmd" cmds++ "[] = \"" $$0 "\\n\";"} \ -+ END {print "extern char *kdb_cmds[]; char __initdata *kdb_cmds[] = {"; for (i = 0; i < cmds; ++i) {print " kdb_cmd" i ","}; print(" NULL\n};");}' \ -+ $(filter-out %/Makefile,$^) > $@ -+ -+$(obj)/gen-kdb_cmds.c: $(src)/kdb_cmds $(wildcard $(TOPDIR)/arch/$(KDB_CMDS)) $(src)/Makefile -+ $(call cmd,gen-kdb) ---- /dev/null -+++ b/kdb/kdb_bp.c -@@ -0,0 +1,661 @@ -+/* -+ * Kernel Debugger Architecture Independent Breakpoint Handler -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * Table of kdb_breakpoints -+ */ -+kdb_bp_t kdb_breakpoints[KDB_MAXBPT]; -+ -+/* -+ * Predicate to test whether a breakpoint should be installed -+ * on this CPU. -+ * -+ * Note that for purposes of installation, hardware breakpoints -+ * are treated as local (even if the global flag is set), on -+ * the assumption that the require per-cpu registers to be set. -+ */ -+ -+static inline int kdb_is_installable_global_bp(const kdb_bp_t *bp) -+{ -+ return (bp->bp_enabled && -+ bp->bp_global && -+ !bp->bp_forcehw); -+} -+ -+static int kdb_is_installable_local_bp(const kdb_bp_t *bp) -+{ -+ if (!bp->bp_enabled) -+ return 0; -+ -+ if (bp->bp_forcehw) { -+ if (bp->bp_cpu == smp_processor_id() || bp->bp_global) -+ return 1; -+ } else { -+ if (bp->bp_cpu == smp_processor_id() && !bp->bp_global) -+ return 1; -+ } -+ return 0; -+} -+ -+/* -+ * kdb_bp_install_global -+ * -+ * Install global kdb_breakpoints prior to returning from the -+ * kernel debugger. This allows the kdb_breakpoints to be set -+ * upon functions that are used internally by kdb, such as -+ * printk(). -+ * -+ * Parameters: -+ * regs Execution frame. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * This function is only called once per kdb session. -+ */ -+ -+void -+kdb_bp_install_global(struct pt_regs *regs) -+{ -+ int i; -+ -+ for(i=0; ibp_enabled, bp->bp_global); -+ } -+ /* HW BP local or global are installed in kdb_bp_install_local*/ -+ if (kdb_is_installable_global_bp(bp)) -+ kdba_installbp(regs, bp); -+ } -+} -+ -+/* -+ * kdb_bp_install_local -+ * -+ * Install local kdb_breakpoints prior to returning from the -+ * kernel debugger. This allows the kdb_breakpoints to be set -+ * upon functions that are used internally by kdb, such as -+ * printk(). -+ * -+ * Parameters: -+ * regs Execution frame. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * This function is called once per processor. -+ */ -+ -+void -+kdb_bp_install_local(struct pt_regs *regs) -+{ -+ int i; -+ -+ for(i=0; ibp_enabled, bp->bp_global, -+ smp_processor_id(), bp->bp_cpu); -+ } -+ if (kdb_is_installable_local_bp(bp)) -+ kdba_installbp(regs, bp); -+ } -+} -+ -+/* -+ * kdb_bp_remove_global -+ * -+ * Remove global kdb_breakpoints upon entry to the kernel debugger. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+void -+kdb_bp_remove_global(void) -+{ -+ int i; -+ -+ for(i=KDB_MAXBPT-1; i>=0; i--) { -+ kdb_bp_t *bp = &kdb_breakpoints[i]; -+ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdb_bp_remove_global bp %d bp_enabled %d bp_global %d\n", -+ i, bp->bp_enabled, bp->bp_global); -+ } -+ if (kdb_is_installable_global_bp(bp)) -+ kdba_removebp(bp); -+ } -+} -+ -+ -+/* -+ * kdb_bp_remove_local -+ * -+ * Remove local kdb_breakpoints upon entry to the kernel debugger. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+void -+kdb_bp_remove_local(void) -+{ -+ int i; -+ -+ for(i=KDB_MAXBPT-1; i>=0; i--) { -+ kdb_bp_t *bp = &kdb_breakpoints[i]; -+ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdb_bp_remove_local bp %d bp_enabled %d bp_global %d cpu %d bp_cpu %d\n", -+ i, bp->bp_enabled, bp->bp_global, -+ smp_processor_id(), bp->bp_cpu); -+ } -+ if (kdb_is_installable_local_bp(bp)) -+ kdba_removebp(bp); -+ } -+} -+ -+/* -+ * kdb_printbp -+ * -+ * Internal function to format and print a breakpoint entry. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+static void -+kdb_printbp(kdb_bp_t *bp, int i) -+{ -+ if (bp->bp_forcehw) { -+ kdb_printf("Forced "); -+ } -+ -+ if (!bp->bp_template.bph_free) { -+ kdb_printf("%s ", kdba_bptype(&bp->bp_template)); -+ } else { -+ kdb_printf("Instruction(i) "); -+ } -+ -+ kdb_printf("BP #%d at ", i); -+ kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT); -+ -+ if (bp->bp_enabled) { -+ kdba_printbp(bp); -+ if (bp->bp_global) -+ kdb_printf(" globally"); -+ else -+ kdb_printf(" on cpu %d", bp->bp_cpu); -+ if (bp->bp_adjust) -+ kdb_printf(" adjust %d", bp->bp_adjust); -+ } else { -+ kdb_printf("\n is disabled"); -+ } -+ -+ kdb_printf("\taddr at %016lx, hardtype=%d, forcehw=%d, installed=%d, hard=%p\n", -+ bp->bp_addr, bp->bp_hardtype, bp->bp_forcehw, -+ bp->bp_installed, bp->bp_hard); -+ -+ kdb_printf("\n"); -+} -+ -+/* -+ * kdb_bp -+ * -+ * Handle the bp, and bpa commands. -+ * -+ * [bp|bpa|bph] [DATAR|DATAW|IO [length]] -+ * -+ * Parameters: -+ * argc Count of arguments in argv -+ * argv Space delimited command line arguments -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic if failure. -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * bp Set breakpoint. Only use hardware assist if necessary. -+ * bpa Set breakpoint on all cpus, only use hardware regs if necessary -+ * bph Set breakpoint - force hardware register -+ * bpha Set breakpoint on all cpus, force hardware register -+ */ -+ -+static int -+kdb_bp(int argc, const char **argv) -+{ -+ int i, bpno; -+ kdb_bp_t *bp, *bp_check; -+ int diag; -+ int free; -+ char *symname = NULL; -+ long offset = 0ul; -+ int nextarg; -+ static kdb_bp_t kdb_bp_template; -+ -+ if (argc == 0) { -+ /* -+ * Display breakpoint table -+ */ -+ for(bpno=0,bp=kdb_breakpoints; bpnobp_free) continue; -+ -+ kdb_printbp(bp, bpno); -+ } -+ -+ return 0; -+ } -+ -+ memset(&kdb_bp_template, 0, sizeof(kdb_bp_template)); -+ -+ kdb_bp_template.bp_global = ((strcmp(argv[0], "bpa") == 0) -+ || (strcmp(argv[0], "bpha") == 0)); -+ kdb_bp_template.bp_forcehw = ((strcmp(argv[0], "bph") == 0) -+ || (strcmp(argv[0], "bpha") == 0)); -+ -+ /* Fix me: "bp" is treated as "bpa" to avoid system freeze. -jlan */ -+ if (strcmp(argv[0], "bp") == 0) -+ kdb_bp_template.bp_global = 1; -+ -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &kdb_bp_template.bp_addr, -+ &offset, &symname); -+ if (diag) -+ return diag; -+ if (!kdb_bp_template.bp_addr) -+ return KDB_BADINT; -+ -+ /* -+ * Find an empty bp structure, to allocate -+ */ -+ free = KDB_MAXBPT; -+ for(bpno=0,bp=kdb_breakpoints; bpnobp_free) { -+ break; -+ } -+ } -+ -+ if (bpno == KDB_MAXBPT) -+ return KDB_TOOMANYBPT; -+ -+ /* -+ * Handle architecture dependent parsing -+ */ -+ diag = kdba_parsebp(argc, argv, &nextarg, &kdb_bp_template); -+ if (diag) { -+ return diag; -+ } -+ -+ -+ /* -+ * Check for clashing breakpoints. -+ * -+ * Note, in this design we can't have hardware breakpoints -+ * enabled for both read and write on the same address, even -+ * though ia64 allows this. -+ */ -+ for(i=0,bp_check=kdb_breakpoints; ibp_free && -+ bp_check->bp_addr == kdb_bp_template.bp_addr && -+ (bp_check->bp_global || -+ bp_check->bp_cpu == kdb_bp_template.bp_cpu)) { -+ kdb_printf("You already have a breakpoint at " -+ kdb_bfd_vma_fmt0 "\n", kdb_bp_template.bp_addr); -+ return KDB_DUPBPT; -+ } -+ } -+ -+ kdb_bp_template.bp_enabled = 1; -+ -+ /* -+ * Actually allocate the breakpoint found earlier -+ */ -+ *bp = kdb_bp_template; -+ bp->bp_free = 0; -+ -+ if (!bp->bp_global) { -+ bp->bp_cpu = smp_processor_id(); -+ } -+ -+ /* -+ * Allocate a hardware breakpoint. If one is not available, -+ * disable the breakpoint, but leave it in the breakpoint -+ * table. When the breakpoint is re-enabled (via 'be'), we'll -+ * attempt to allocate a hardware register for it. -+ */ -+ if (!bp->bp_template.bph_free) { -+ kdba_alloc_hwbp(bp, &diag); -+ if (diag) { -+ bp->bp_enabled = 0; -+ bp->bp_hardtype = 0; -+ kdba_free_hwbp(bp); -+ return diag; -+ } -+ } -+ -+ kdb_printbp(bp, bpno); -+ -+ return 0; -+} -+ -+/* -+ * kdb_bc -+ * -+ * Handles the 'bc', 'be', and 'bd' commands -+ * -+ * [bd|bc|be] -+ * [bd|bc|be] * -+ * -+ * Parameters: -+ * argc Count of arguments in argv -+ * argv Space delimited command line arguments -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic for failure -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+#define KDBCMD_BC 0 -+#define KDBCMD_BE 1 -+#define KDBCMD_BD 2 -+ -+static int -+kdb_bc(int argc, const char **argv) -+{ -+ kdb_machreg_t addr; -+ kdb_bp_t *bp = NULL; -+ int lowbp = KDB_MAXBPT; -+ int highbp = 0; -+ int done = 0; -+ int i; -+ int diag; -+ int cmd; /* KDBCMD_B? */ -+ -+ if (strcmp(argv[0], "be") == 0) { -+ cmd = KDBCMD_BE; -+ } else if (strcmp(argv[0], "bd") == 0) { -+ cmd = KDBCMD_BD; -+ } else -+ cmd = KDBCMD_BC; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ if (strcmp(argv[1], "*") == 0) { -+ lowbp = 0; -+ highbp = KDB_MAXBPT; -+ } else { -+ diag = kdbgetularg(argv[1], &addr); -+ if (diag) -+ return diag; -+ -+ /* -+ * For addresses less than the maximum breakpoint number, -+ * assume that the breakpoint number is desired. -+ */ -+ if (addr < KDB_MAXBPT) { -+ bp = &kdb_breakpoints[addr]; -+ lowbp = highbp = addr; -+ highbp++; -+ } else { -+ for(i=0, bp=kdb_breakpoints; ibp_addr == addr) { -+ lowbp = highbp = i; -+ highbp++; -+ break; -+ } -+ } -+ } -+ } -+ -+ /* -+ * Now operate on the set of breakpoints matching the input -+ * criteria (either '*' for all, or an individual breakpoint). -+ */ -+ for(bp=&kdb_breakpoints[lowbp], i=lowbp; -+ i < highbp; -+ i++, bp++) { -+ if (bp->bp_free) -+ continue; -+ -+ done++; -+ -+ switch (cmd) { -+ case KDBCMD_BC: -+ if (bp->bp_hardtype) -+ kdba_free_hwbp(bp); -+ -+ bp->bp_enabled = 0; -+ bp->bp_global = 0; -+ -+ kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " cleared\n", -+ i, bp->bp_addr); -+ -+ bp->bp_addr = 0; -+ bp->bp_free = 1; -+ -+ break; -+ case KDBCMD_BE: -+ /* -+ * Allocate a hardware breakpoint. If one is not -+ * available, don't enable the breakpoint. -+ */ -+ if (!bp->bp_template.bph_free -+ && !bp->bp_hardtype) { -+ kdba_alloc_hwbp(bp, &diag); -+ if (diag) { -+ bp->bp_enabled = 0; -+ bp->bp_hardtype = 0; -+ kdba_free_hwbp(bp); -+ return diag; -+ } -+ } -+ -+ bp->bp_enabled = 1; -+ -+ kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " enabled", -+ i, bp->bp_addr); -+ -+ kdb_printf("\n"); -+ break; -+ case KDBCMD_BD: -+ if (!bp->bp_enabled) -+ break; -+ -+ /* -+ * Since this breakpoint is now disabled, we can -+ * give up the hardware register which is allocated -+ * to it. -+ */ -+ if (bp->bp_hardtype) -+ kdba_free_hwbp(bp); -+ -+ bp->bp_enabled = 0; -+ -+ kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " disabled\n", -+ i, bp->bp_addr); -+ -+ break; -+ } -+ if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) { -+ bp->bp_delay = 0; -+ KDB_STATE_CLEAR(SSBPT); -+ } -+ } -+ -+ return (!done)?KDB_BPTNOTFOUND:0; -+} -+ -+/* -+ * kdb_ss -+ * -+ * Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch) -+ * commands. -+ * -+ * ss -+ * ssb -+ * -+ * Parameters: -+ * argc Argument count -+ * argv Argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * KDB_CMD_SS[B] for success, a kdb error if failure. -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * Set the arch specific option to trigger a debug trap after the next -+ * instruction. -+ * -+ * For 'ssb', set the trace flag in the debug trap handler -+ * after printing the current insn and return directly without -+ * invoking the kdb command processor, until a branch instruction -+ * is encountered. -+ */ -+ -+static int -+kdb_ss(int argc, const char **argv) -+{ -+ int ssb = 0; -+ struct pt_regs *regs = get_irq_regs(); -+ -+ ssb = (strcmp(argv[0], "ssb") == 0); -+ if (argc != 0) -+ return KDB_ARGCOUNT; -+ -+ if (!regs) { -+ kdb_printf("%s: pt_regs not available\n", __FUNCTION__); -+ return KDB_BADREG; -+ } -+ -+ /* -+ * Set trace flag and go. -+ */ -+ KDB_STATE_SET(DOING_SS); -+ if (ssb) -+ KDB_STATE_SET(DOING_SSB); -+ -+ kdba_setsinglestep(regs); /* Enable single step */ -+ -+ if (ssb) -+ return KDB_CMD_SSB; -+ return KDB_CMD_SS; -+} -+ -+/* -+ * kdb_initbptab -+ * -+ * Initialize the breakpoint table. Register breakpoint commands. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+void __init -+kdb_initbptab(void) -+{ -+ int i; -+ kdb_bp_t *bp; -+ -+ /* -+ * First time initialization. -+ */ -+ memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints)); -+ -+ for (i=0, bp=kdb_breakpoints; ibp_free = 1; -+ /* -+ * The bph_free flag is architecturally required. It -+ * is set by architecture-dependent code to false (zero) -+ * in the event a hardware breakpoint register is required -+ * for this breakpoint. -+ * -+ * The rest of the template is reserved to the architecture -+ * dependent code and _must_ not be touched by the architecture -+ * independent code. -+ */ -+ bp->bp_template.bph_free = 1; -+ } -+ -+ kdb_register_repeat("bp", kdb_bp, "[]", "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("bl", kdb_bp, "[]", "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("bpa", kdb_bp, "[]", "Set/Display global breakpoints", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("bph", kdb_bp, "[]", "Set hardware breakpoint", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("bpha", kdb_bp, "[]", "Set global hardware breakpoint", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("bc", kdb_bc, "", "Clear Breakpoint", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("be", kdb_bc, "", "Enable Breakpoint", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("bd", kdb_bc, "", "Disable Breakpoint", 0, KDB_REPEAT_NONE); -+ -+ kdb_register_repeat("ss", kdb_ss, "", "Single Step", 1, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("ssb", kdb_ss, "", "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS); -+ /* -+ * Architecture dependent initialization. -+ */ -+ kdba_initbp(); -+} ---- /dev/null -+++ b/kdb/kdb_bt.c -@@ -0,0 +1,180 @@ -+/* -+ * Kernel Debugger Architecture Independent Stack Traceback -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+/* -+ * kdb_bt -+ * -+ * This function implements the 'bt' command. Print a stack -+ * traceback. -+ * -+ * bt [] (addr-exp is for alternate stacks) -+ * btp Kernel stack for -+ * btt Kernel stack for task structure at -+ * bta [DRSTCZEUIMA] All useful processes, optionally filtered by state -+ * btc [] The current process on one cpu, default is all cpus -+ * -+ * bt refers to a address on the stack, that location -+ * is assumed to contain a return address. -+ * -+ * btt refers to the address of a struct task. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * Backtrack works best when the code uses frame pointers. But even -+ * without frame pointers we should get a reasonable trace. -+ * -+ * mds comes in handy when examining the stack to do a manual traceback or -+ * to get a starting point for bt . -+ */ -+ -+static int -+kdb_bt1(const struct task_struct *p, unsigned long mask, int argcount, int btaprompt) -+{ -+ int diag; -+ char buffer[2]; -+ if (kdb_getarea(buffer[0], (unsigned long)p) || -+ kdb_getarea(buffer[0], (unsigned long)(p+1)-1)) -+ return KDB_BADADDR; -+ if (!kdb_task_state(p, mask)) -+ return 0; -+ kdb_printf("Stack traceback for pid %d\n", p->pid); -+ kdb_ps1(p); -+ diag = kdba_bt_process(p, argcount); -+ if (btaprompt) { -+ kdb_getstr(buffer, sizeof(buffer), "Enter to end, to continue:"); -+ if (buffer[0] == 'q') { -+ kdb_printf("\n"); -+ return 1; -+ } -+ } -+ touch_nmi_watchdog(); -+ return 0; -+} -+ -+int -+kdb_bt(int argc, const char **argv) -+{ -+ int diag; -+ int argcount = 5; -+ int btaprompt = 1; -+ int nextarg; -+ unsigned long addr; -+ long offset; -+ -+ kdbgetintenv("BTARGS", &argcount); /* Arguments to print */ -+ kdbgetintenv("BTAPROMPT", &btaprompt); /* Prompt after each proc in bta */ -+ -+ if (strcmp(argv[0], "bta") == 0) { -+ struct task_struct *g, *p; -+ unsigned long cpu; -+ unsigned long mask = kdb_task_state_string(argc ? argv[1] : NULL); -+ if (argc == 0) -+ kdb_ps_suppressed(); -+ /* Run the active tasks first */ -+ for (cpu = 0; cpu < NR_CPUS; ++cpu) { -+ if (!cpu_online(cpu)) -+ continue; -+ p = kdb_curr_task(cpu); -+ if (kdb_bt1(p, mask, argcount, btaprompt)) -+ return 0; -+ } -+ /* Now the inactive tasks */ -+ kdb_do_each_thread(g, p) { -+ if (task_curr(p)) -+ continue; -+ if (kdb_bt1(p, mask, argcount, btaprompt)) -+ return 0; -+ } kdb_while_each_thread(g, p); -+ } else if (strcmp(argv[0], "btp") == 0) { -+ struct task_struct *p; -+ unsigned long pid; -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ if ((diag = kdbgetularg((char *)argv[1], &pid))) -+ return diag; -+ if ((p = find_task_by_pid_ns(pid, &init_pid_ns))) { -+ kdba_set_current_task(p); -+ return kdb_bt1(p, ~0UL, argcount, 0); -+ } -+ kdb_printf("No process with pid == %ld found\n", pid); -+ return 0; -+ } else if (strcmp(argv[0], "btt") == 0) { -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ if ((diag = kdbgetularg((char *)argv[1], &addr))) -+ return diag; -+ kdba_set_current_task((struct task_struct *)addr); -+ return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0); -+ } else if (strcmp(argv[0], "btc") == 0) { -+ unsigned long cpu = ~0; -+ struct kdb_running_process *krp; -+ const struct task_struct *save_current_task = kdb_current_task; -+ char buf[80]; -+ if (argc > 1) -+ return KDB_ARGCOUNT; -+ if (argc == 1 && (diag = kdbgetularg((char *)argv[1], &cpu))) -+ return diag; -+ /* Recursive use of kdb_parse, do not use argv after this point */ -+ argv = NULL; -+ if (cpu != ~0) { -+ krp = kdb_running_process + cpu; -+ if (cpu >= NR_CPUS || !krp->seqno || !cpu_online(cpu)) { -+ kdb_printf("no process for cpu %ld\n", cpu); -+ return 0; -+ } -+ sprintf(buf, "btt 0x%p\n", krp->p); -+ kdb_parse(buf); -+ return 0; -+ } -+ kdb_printf("btc: cpu status: "); -+ kdb_parse("cpu\n"); -+ for (cpu = 0, krp = kdb_running_process; cpu < NR_CPUS; ++cpu, ++krp) { -+ if (!cpu_online(cpu) || !krp->seqno) -+ continue; -+ sprintf(buf, "btt 0x%p\n", krp->p); -+ kdb_parse(buf); -+ touch_nmi_watchdog(); -+ } -+ kdba_set_current_task(save_current_task); -+ return 0; -+ } else { -+ if (argc) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, -+ &offset, NULL); -+ if (diag) -+ return diag; -+ return kdba_bt_address(addr, argcount); -+ } else { -+ return kdb_bt1(kdb_current_task, ~0UL, argcount, 0); -+ } -+ } -+ -+ /* NOTREACHED */ -+ return 0; -+} ---- /dev/null -+++ b/kdb/kdb_cmds -@@ -0,0 +1,33 @@ -+# Initial commands for kdb, alter to suit your needs. -+# These commands are executed in kdb_init() context, no SMP, no -+# processes. Commands that require process data (including stack or -+# registers) are not reliable this early. set and bp commands should -+# be safe. Global breakpoint commands affect each cpu as it is booted. -+ -+# Standard debugging information for first level support, just type archkdb -+# or archkdbcpu or archkdbshort at the kdb prompt. -+ -+defcmd archkdb "" "First line arch debugging" -+ set BTSYMARG 1 -+ set BTARGS 9 -+ pid R -+ -archkdbcommon -+ r -+ -bta -+endefcmd -+ -+defcmd archkdbcpu "" "archkdb with only tasks on cpus" -+ set BTSYMARG 1 -+ set BTARGS 9 -+ pid R -+ -archkdbcommon -+ -btc -+endefcmd -+ -+defcmd archkdbshort "" "archkdb with less detailed backtrace" -+ set BTSYMARG 0 -+ set BTARGS 0 -+ pid R -+ -archkdbcommon -+ -bta -+endefcmd ---- /dev/null -+++ b/kdb/kdb_id.c -@@ -0,0 +1,236 @@ -+/* -+ * Kernel Debugger Architecture Independent Instruction Disassembly -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+disassemble_info kdb_di; -+ -+/* -+ * kdb_id -+ * -+ * Handle the id (instruction display) command. -+ * -+ * id [] -+ * -+ * Parameters: -+ * argc Count of arguments in argv -+ * argv Space delimited command line arguments -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic if failure. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+int -+kdb_id(int argc, const char **argv) -+{ -+ kdb_machreg_t pc; -+ int icount; -+ int diag; -+ int i; -+ char *mode; -+ int nextarg; -+ long offset = 0; -+ static kdb_machreg_t lastpc; -+ struct disassemble_info *dip = &kdb_di; -+ char lastbuf[50]; -+ unsigned long word; -+ -+ kdb_di.fprintf_func = kdb_dis_fprintf; -+ kdba_id_init(&kdb_di); -+ -+ if (argc != 1) { -+ if (lastpc == 0) { -+ return KDB_ARGCOUNT; -+ } else { -+ sprintf(lastbuf, "0x%lx", lastpc); -+ argv[1] = lastbuf; -+ argc = 1; -+ } -+ } -+ -+ -+ /* -+ * Fetch PC. First, check to see if it is a symbol, if not, -+ * try address. -+ */ -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &pc, &offset, NULL); -+ if (diag) -+ return diag; -+ kdba_check_pc(&pc); -+ if (kdb_getarea(word, pc)) -+ return(0); -+ -+ /* -+ * Number of lines to display -+ */ -+ diag = kdbgetintenv("IDCOUNT", &icount); -+ if (diag) -+ return diag; -+ -+ mode = kdbgetenv("IDMODE"); -+ diag = kdba_id_parsemode(mode, dip); -+ if (diag) { -+ return diag; -+ } -+ -+ for(i=0; i -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+static struct console *kdbcons; -+ -+#ifdef CONFIG_PPC64 -+#include -+#endif -+ -+#define CMD_BUFLEN 256 -+char kdb_prompt_str[CMD_BUFLEN]; -+ -+extern int kdb_grepping_flag; -+extern char kdb_grep_string[]; -+extern int kdb_grep_leading; -+extern int kdb_grep_trailing; -+ -+/* -+ * kdb_read -+ * -+ * This function reads a string of characters, terminated by -+ * a newline, or by reaching the end of the supplied buffer, -+ * from the current kernel debugger console device. -+ * Parameters: -+ * buffer - Address of character buffer to receive input characters. -+ * bufsize - size, in bytes, of the character buffer -+ * Returns: -+ * Returns a pointer to the buffer containing the received -+ * character string. This string will be terminated by a -+ * newline character. -+ * Locking: -+ * No locks are required to be held upon entry to this -+ * function. It is not reentrant - it relies on the fact -+ * that while kdb is running on any one processor all other -+ * processors will be spinning at the kdb barrier. -+ * Remarks: -+ * -+ * Davidm asks, why doesn't kdb use the console abstraction; -+ * here are some reasons: -+ * - you cannot debug the console abstraction with kdb if -+ * kdb uses it. -+ * - you rely on the correct functioning of the abstraction -+ * in the presence of general system failures. -+ * - You must acquire the console spinlock thus restricting -+ * the usability - what if the kernel fails with the spinlock -+ * held - one still wishes to debug such situations. -+ * - How about debugging before the console(s) are registered? -+ * - None of the current consoles (sercons, vt_console_driver) -+ * have read functions defined. -+ * - The standard pc keyboard and terminal drivers are interrupt -+ * driven. We cannot enable interrupts while kdb is active, -+ * so the standard input functions cannot be used by kdb. -+ * -+ * An implementation could be improved by removing the need for -+ * lock acquisition - just keep a 'struct console *kdbconsole;' global -+ * variable which refers to the preferred kdb console. -+ * -+ * The bulk of this function is architecture dependent. -+ * -+ * The buffer size must be >= 2. A buffer size of 2 means that the caller only -+ * wants a single key. -+ * -+ * An escape key could be the start of a vt100 control sequence such as \e[D -+ * (left arrow) or it could be a character in its own right. The standard -+ * method for detecting the difference is to wait for 2 seconds to see if there -+ * are any other characters. kdb is complicated by the lack of a timer service -+ * (interrupts are off), by multiple input sources and by the need to sometimes -+ * return after just one key. Escape sequence processing has to be done as -+ * states in the polling loop. -+ */ -+ -+char * -+kdb_read(char *buffer, size_t bufsize) -+{ -+ char *cp = buffer; -+ char *bufend = buffer+bufsize-2; /* Reserve space for newline and null byte */ -+ -+ char *lastchar; -+ char *p_tmp; -+ char tmp; -+ static char tmpbuffer[CMD_BUFLEN]; -+ int len = strlen(buffer); -+ int len_tmp; -+ int tab=0; -+ int count; -+ int i; -+ int diag, dtab_count; -+ -+#define ESCAPE_UDELAY 1000 -+#define ESCAPE_DELAY 2*1000000/ESCAPE_UDELAY /* 2 seconds worth of udelays */ -+ char escape_data[5]; /* longest vt100 escape sequence is 4 bytes */ -+ char *ped = escape_data; -+ int escape_delay = 0; -+ get_char_func *f, *f_escape = NULL; -+ -+ diag = kdbgetintenv("DTABCOUNT",&dtab_count); -+ if (diag) -+ dtab_count = 30; -+ -+ if (len > 0 ) { -+ cp += len; -+ if (*(buffer+len-1) == '\n') -+ cp--; -+ } -+ -+ lastchar = cp; -+ *cp = '\0'; -+ kdb_printf("%s", buffer); -+ -+ for (;;) { -+ int key; -+ for (f = &poll_funcs[0]; ; ++f) { -+ if (*f == NULL) { -+ /* Reset NMI watchdog once per poll loop */ -+ touch_nmi_watchdog(); -+ f = &poll_funcs[0]; -+ } -+ if (escape_delay == 2) { -+ *ped = '\0'; -+ ped = escape_data; -+ --escape_delay; -+ } -+ if (escape_delay == 1) { -+ key = *ped++; -+ if (!*ped) -+ --escape_delay; -+ break; -+ } -+ key = (*f)(); -+ if (key == -1) { -+ if (escape_delay) { -+ udelay(ESCAPE_UDELAY); -+ --escape_delay; -+ } -+ continue; -+ } -+ if (bufsize <= 2) { -+ if (key == '\r') -+ key = '\n'; -+ kdb_printf("%c", key); -+ *buffer++ = key; -+ *buffer = '\0'; -+ return buffer; -+ } -+ if (escape_delay == 0 && key == '\e') { -+ escape_delay = ESCAPE_DELAY; -+ ped = escape_data; -+ f_escape = f; -+ } -+ if (escape_delay) { -+ *ped++ = key; -+ if (f_escape != f) { -+ escape_delay = 2; -+ continue; -+ } -+ if (ped - escape_data == 1) { -+ /* \e */ -+ continue; -+ } -+ else if (ped - escape_data == 2) { -+ /* \e */ -+ if (key != '[') -+ escape_delay = 2; -+ continue; -+ } else if (ped - escape_data == 3) { -+ /* \e[ */ -+ int mapkey = 0; -+ switch (key) { -+ case 'A': mapkey = 16; break; /* \e[A, up arrow */ -+ case 'B': mapkey = 14; break; /* \e[B, down arrow */ -+ case 'C': mapkey = 6; break; /* \e[C, right arrow */ -+ case 'D': mapkey = 2; break; /* \e[D, left arrow */ -+ case '1': /* dropthrough */ -+ case '3': /* dropthrough */ -+ case '4': mapkey = -1; break; /* \e[<1,3,4>], may be home, del, end */ -+ } -+ if (mapkey != -1) { -+ if (mapkey > 0) { -+ escape_data[0] = mapkey; -+ escape_data[1] = '\0'; -+ } -+ escape_delay = 2; -+ } -+ continue; -+ } else if (ped - escape_data == 4) { -+ /* \e[<1,3,4> */ -+ int mapkey = 0; -+ if (key == '~') { -+ switch (escape_data[2]) { -+ case '1': mapkey = 1; break; /* \e[1~, home */ -+ case '3': mapkey = 4; break; /* \e[3~, del */ -+ case '4': mapkey = 5; break; /* \e[4~, end */ -+ } -+ } -+ if (mapkey > 0) { -+ escape_data[0] = mapkey; -+ escape_data[1] = '\0'; -+ } -+ escape_delay = 2; -+ continue; -+ } -+ } -+ break; /* A key to process */ -+ } -+ -+ if (key != 9) -+ tab = 0; -+ switch (key) { -+ case 8: /* backspace */ -+ if (cp > buffer) { -+ if (cp < lastchar) { -+ memcpy(tmpbuffer, cp, lastchar - cp); -+ memcpy(cp-1, tmpbuffer, lastchar - cp); -+ } -+ *(--lastchar) = '\0'; -+ --cp; -+ kdb_printf("\b%s \r", cp); -+ tmp = *cp; -+ *cp = '\0'; -+ kdb_printf(kdb_prompt_str); -+ kdb_printf("%s", buffer); -+ *cp = tmp; -+ } -+ break; -+ case 13: /* enter \r */ -+ case 10: /* enter \n */ -+ *lastchar++ = '\n'; -+ *lastchar++ = '\0'; -+ kdb_printf("\n"); -+ return buffer; -+ case 4: /* Del */ -+ if(cp < lastchar) { -+ memcpy(tmpbuffer, cp+1, lastchar - cp -1); -+ memcpy(cp, tmpbuffer, lastchar - cp -1); -+ *(--lastchar) = '\0'; -+ kdb_printf("%s \r", cp); -+ tmp = *cp; -+ *cp = '\0'; -+ kdb_printf(kdb_prompt_str); -+ kdb_printf("%s", buffer); -+ *cp = tmp; -+ } -+ break; -+ case 1: /* Home */ -+ if(cp > buffer) { -+ kdb_printf("\r"); -+ kdb_printf(kdb_prompt_str); -+ cp = buffer; -+ } -+ break; -+ case 5: /* End */ -+ if(cp < lastchar) { -+ kdb_printf("%s", cp); -+ cp = lastchar; -+ } -+ break; -+ case 2: /* Left */ -+ if (cp > buffer) { -+ kdb_printf("\b"); -+ --cp; -+ } -+ break; -+ case 14: /* Down */ -+ memset(tmpbuffer, ' ', strlen(kdb_prompt_str)+(lastchar-buffer)); -+ *(tmpbuffer+strlen(kdb_prompt_str)+(lastchar-buffer)) = '\0'; -+ kdb_printf("\r%s\r", tmpbuffer); -+ *lastchar = (char)key; -+ *(lastchar+1) = '\0'; -+ return lastchar; -+ case 6: /* Right */ -+ if (cp < lastchar) { -+ kdb_printf("%c", *cp); -+ ++cp; -+ } -+ break; -+ case 16: /* Up */ -+ memset(tmpbuffer, ' ', strlen(kdb_prompt_str)+(lastchar-buffer)); -+ *(tmpbuffer+strlen(kdb_prompt_str)+(lastchar-buffer)) = '\0'; -+ kdb_printf("\r%s\r", tmpbuffer); -+ *lastchar = (char)key; -+ *(lastchar+1) = '\0'; -+ return lastchar; -+ case 9: /* Tab */ -+ if (tab < 2) -+ ++tab; -+ p_tmp = buffer; -+ while(*p_tmp==' ') p_tmp++; -+ if (p_tmp<=cp) { -+ memcpy(tmpbuffer, p_tmp, cp-p_tmp); -+ *(tmpbuffer + (cp-p_tmp)) = '\0'; -+ p_tmp = strrchr(tmpbuffer, ' '); -+ if (p_tmp) -+ ++p_tmp; -+ else -+ p_tmp = tmpbuffer; -+ len = strlen(p_tmp); -+ count = kallsyms_symbol_complete(p_tmp, sizeof(tmpbuffer) - (p_tmp - tmpbuffer)); -+ if (tab == 2) { -+ if (count > 0) { -+ kdb_printf("\n%d symbols are found.", count); -+ if(count>dtab_count) { -+ count=dtab_count; -+ kdb_printf(" But only first %d symbols will be printed.\nYou can change the environment variable DTABCOUNT.", count); -+ } -+ kdb_printf("\n"); -+ for(i=0;i=dtab_count)kdb_printf("..."); -+ kdb_printf("\n"); -+ kdb_printf(kdb_prompt_str); -+ kdb_printf("%s", buffer); -+ } -+ } -+ else { -+ if (count > 0) { -+ len_tmp = strlen(p_tmp); -+ strncpy(p_tmp+len_tmp,cp, lastchar-cp+1); -+ len_tmp = strlen(p_tmp); -+ strncpy(cp, p_tmp+len, len_tmp-len+1); -+ len = len_tmp - len; -+ kdb_printf("%s", cp); -+ cp+=len; -+ lastchar+=len; -+ } -+ } -+ kdb_nextline = 1; /* reset output line number */ -+ } -+ break; -+ default: -+ if (key >= 32 &&lastchar < bufend) { -+ if (cp < lastchar) { -+ memcpy(tmpbuffer, cp, lastchar - cp); -+ memcpy(cp+1, tmpbuffer, lastchar - cp); -+ *++lastchar = '\0'; -+ *cp = key; -+ kdb_printf("%s\r", cp); -+ ++cp; -+ tmp = *cp; -+ *cp = '\0'; -+ kdb_printf(kdb_prompt_str); -+ kdb_printf("%s", buffer); -+ *cp = tmp; -+ } else { -+ *++lastchar = '\0'; -+ *cp++ = key; -+ kdb_printf("%c", key); -+ } -+ } -+ break; -+ } -+ } -+} -+ -+/* -+ * kdb_getstr -+ * -+ * Print the prompt string and read a command from the -+ * input device. -+ * -+ * Parameters: -+ * buffer Address of buffer to receive command -+ * bufsize Size of buffer in bytes -+ * prompt Pointer to string to use as prompt string -+ * Returns: -+ * Pointer to command buffer. -+ * Locking: -+ * None. -+ * Remarks: -+ * For SMP kernels, the processor number will be -+ * substituted for %d, %x or %o in the prompt. -+ */ -+ -+char * -+kdb_getstr(char *buffer, size_t bufsize, char *prompt) -+{ -+ if(prompt && kdb_prompt_str!=prompt) -+ strncpy(kdb_prompt_str, prompt, CMD_BUFLEN); -+ kdb_printf(kdb_prompt_str); -+ kdb_nextline = 1; /* Prompt and input resets line number */ -+ return kdb_read(buffer, bufsize); -+} -+ -+/* -+ * kdb_input_flush -+ * -+ * Get rid of any buffered console input. -+ * -+ * Parameters: -+ * none -+ * Returns: -+ * nothing -+ * Locking: -+ * none -+ * Remarks: -+ * Call this function whenever you want to flush input. If there is any -+ * outstanding input, it ignores all characters until there has been no -+ * data for approximately half a second. -+ */ -+ -+#define FLUSH_UDELAY 100 -+#define FLUSH_DELAY 500000/FLUSH_UDELAY /* 0.5 seconds worth of udelays */ -+ -+static void -+kdb_input_flush(void) -+{ -+ get_char_func *f; -+ int flush_delay = 1; -+ while (flush_delay--) { -+ touch_nmi_watchdog(); -+ for (f = &poll_funcs[0]; *f; ++f) { -+ if ((*f)() != -1) { -+ flush_delay = FLUSH_DELAY; -+ break; -+ } -+ } -+ if (flush_delay) -+ udelay(FLUSH_UDELAY); -+ } -+} -+ -+/* -+ * kdb_printf -+ * -+ * Print a string to the output device(s). -+ * -+ * Parameters: -+ * printf-like format and optional args. -+ * Returns: -+ * 0 -+ * Locking: -+ * None. -+ * Remarks: -+ * use 'kdbcons->write()' to avoid polluting 'log_buf' with -+ * kdb output. -+ * -+ * If the user is doing a cmd args | grep srch -+ * then kdb_grepping_flag is set. -+ * In that case we need to accumulate full lines (ending in \n) before -+ * searching for the pattern. -+ */ -+ -+static char kdb_buffer[256]; /* A bit too big to go on stack */ -+static char *next_avail=kdb_buffer; -+static int size_avail; -+static int suspend_grep=0; -+ -+/* -+ * search arg1 to see if it contains arg2 -+ * (kdmain.c provides flags for ^pat and pat$) -+ * -+ * return 1 for found, 0 for not found -+ */ -+int -+kdb_search_string(char *searched, char *searchfor) -+{ -+ char firstchar, *cp; -+ int len1, len2; -+ -+ /* not counting the newline at the end of "searched" */ -+ len1 = strlen(searched)-1; -+ len2 = strlen(searchfor); -+ if (len1 < len2) return 0; -+ if (kdb_grep_leading && kdb_grep_trailing && len1 != len2) return 0; -+ -+ if (kdb_grep_leading) { -+ if (!strncmp(searched, searchfor, len2)) { -+ return 1; -+ } -+ } else if (kdb_grep_trailing) { -+ if (!strncmp(searched+len1-len2, searchfor, len2)) { -+ return 1; -+ } -+ } else { -+ firstchar = *searchfor; -+ cp = searched; -+ while ((cp = strchr(cp,firstchar))) { -+ if (!strncmp(cp, searchfor, len2)) { -+ return 1; -+ } -+ cp++; -+ } -+ } -+ return 0; -+} -+ -+void -+kdb_printf(const char *fmt, ...) -+{ -+ va_list ap; -+ int diag; -+ int linecount; -+ int logging, saved_loglevel = 0; -+ int do_longjmp = 0; -+ int got_printf_lock = 0; -+ int fnd, len; -+ char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; -+ char *moreprompt = "more> "; -+ struct console *c = console_drivers; -+ static DEFINE_SPINLOCK(kdb_printf_lock); -+ unsigned long uninitialized_var(flags); -+ -+ preempt_disable(); -+ /* Serialize kdb_printf if multiple cpus try to write at once. -+ * But if any cpu goes recursive in kdb, just print the output, -+ * even if it is interleaved with any other text. -+ */ -+ if (!KDB_STATE(PRINTF_LOCK)) { -+ KDB_STATE_SET(PRINTF_LOCK); -+ spin_lock_irqsave(&kdb_printf_lock, flags); -+ got_printf_lock = 1; -+ atomic_inc(&kdb_event); -+ } else { -+ __acquire(kdb_printf_lock); -+ } -+ atomic_inc(&kdb_8250); -+ -+ diag = kdbgetintenv("LINES", &linecount); -+ if (diag || linecount <= 1) -+ linecount = 22; -+ -+ diag = kdbgetintenv("LOGGING", &logging); -+ if (diag) -+ logging = 0; -+ -+ if (!kdb_grepping_flag || suspend_grep) { -+ /* normally, every vsnprintf starts a new buffer */ -+ next_avail = kdb_buffer; -+ size_avail = sizeof(kdb_buffer); -+ } -+ va_start(ap, fmt); -+ vsnprintf(next_avail, size_avail, fmt, ap); -+ va_end(ap); -+ -+ /* -+ * If kdb_parse() found that the command was cmd xxx | grep yyy -+ * then kdb_grepping_flag is set, and kdb_grep_string contains yyy -+ * -+ * Accumulate the print data up to a newline before searching it. -+ * (vsnprintf does null-terminate the string that it generates) -+ */ -+ -+ /* skip the search if prints are temporarily unconditional */ -+ if (! suspend_grep) { -+ -+ if (kdb_grepping_flag) { -+ cp = strchr(kdb_buffer, '\n'); -+ if (!cp) { -+ /* -+ * Special cases that don't end with newlines -+ * but should be written without one: -+ * The "[nn]kdb> " prompt should -+ * appear at the front of the buffer. -+ * -+ * The "[nn]more " prompt should also be -+ * (MOREPROMPT -> moreprompt) -+ * written * but we print that ourselves, -+ * we set the suspend_grep flag to make -+ * it unconditional. -+ * -+ */ -+ if (next_avail == kdb_buffer) { -+ /* -+ * these should occur after a newline, -+ * so they will be at the front of -+ * the buffer -+ */ -+ cp2 = kdb_buffer; -+ len = strlen(kdb_prompt_str); -+ if (!strncmp(cp2,kdb_prompt_str, len)) { -+ /* -+ * We're about to start a new -+ * command, so we can go back -+ * to normal mode. -+ */ -+ kdb_grepping_flag = 0; -+ goto kdb_printit; -+ } -+ } -+ /* no newline; don't search/write the buffer -+ until one is there */ -+ len = strlen(kdb_buffer); -+ next_avail = kdb_buffer + len; -+ size_avail = sizeof(kdb_buffer) - len; -+ goto kdb_print_out; -+ } -+ -+ /* -+ * The newline is present; print through it or discard -+ * it, depending on the results of the search. -+ */ -+ cp++; /* to byte after the newline */ -+ replaced_byte = *cp; /* remember what/where it was */ -+ cphold = cp; -+ *cp = '\0'; /* end the string for our search */ -+ -+ /* -+ * We now have a newline at the end of the string -+ * Only continue with this output if it contains the -+ * search string. -+ */ -+ fnd = kdb_search_string(kdb_buffer, kdb_grep_string); -+ if (!fnd) { -+ /* -+ * At this point the complete line at the start -+ * of kdb_buffer can be discarded, as it does -+ * not contain what the user is looking for. -+ * Shift the buffer left. -+ */ -+ *cphold = replaced_byte; -+ strcpy(kdb_buffer, cphold); -+ len = strlen(kdb_buffer); -+ next_avail = kdb_buffer + len; -+ size_avail = sizeof(kdb_buffer) - len; -+ goto kdb_print_out; -+ } -+ /* -+ * at this point the string is a full line and -+ * should be printed, up to the null. -+ */ -+ } -+ } -+kdb_printit: -+ -+ /* -+ * Write to all consoles. -+ */ -+#ifdef CONFIG_SPARC64 -+ if (c == NULL) -+ prom_printf("%s", kdb_buffer); -+ else -+#endif -+ -+#ifdef CONFIG_PPC64 -+ if (udbg_write) -+ udbg_write(kdb_buffer, strlen(kdb_buffer)); -+ else -+#endif -+ -+ while (c) { -+ c->write(c, kdb_buffer, strlen(kdb_buffer)); -+ touch_nmi_watchdog(); -+ c = c->next; -+ } -+ if (logging) { -+ saved_loglevel = console_loglevel; -+ console_loglevel = 0; -+ printk("%s", kdb_buffer); -+ } -+ -+ if (KDB_STATE(LONGJMP) && strchr(kdb_buffer, '\n')) -+ kdb_nextline++; -+ -+ /* check for having reached the LINES number of printed lines */ -+ if (kdb_nextline == linecount) { -+ char buf1[16]=""; -+#if defined(CONFIG_SMP) -+ char buf2[32]; -+#endif -+ -+ /* Watch out for recursion here. Any routine that calls -+ * kdb_printf will come back through here. And kdb_read -+ * uses kdb_printf to echo on serial consoles ... -+ */ -+ kdb_nextline = 1; /* In case of recursion */ -+ -+ /* -+ * Pause until cr. -+ */ -+ moreprompt = kdbgetenv("MOREPROMPT"); -+ if (moreprompt == NULL) { -+ moreprompt = "more> "; -+ } -+ -+#if defined(CONFIG_SMP) -+ if (strchr(moreprompt, '%')) { -+ sprintf(buf2, moreprompt, get_cpu()); -+ put_cpu(); -+ moreprompt = buf2; -+ } -+#endif -+ -+ kdb_input_flush(); -+ c = console_drivers; -+#ifdef CONFIG_SPARC64 -+ if (c == NULL) -+ prom_printf("%s", moreprompt); -+ else -+#endif -+ -+#ifdef CONFIG_PPC64 -+ if (udbg_write) -+ udbg_write(moreprompt, strlen(moreprompt)); -+ else -+#endif -+ -+ while (c) { -+ c->write(c, moreprompt, strlen(moreprompt)); -+ touch_nmi_watchdog(); -+ c = c->next; -+ } -+ -+ if (logging) -+ printk("%s", moreprompt); -+ -+ kdb_read(buf1, 2); /* '2' indicates to return immediately after getting one key. */ -+ kdb_nextline = 1; /* Really set output line 1 */ -+ -+ /* empty and reset the buffer: */ -+ kdb_buffer[0] = '\0'; -+ next_avail = kdb_buffer; -+ size_avail = sizeof(kdb_buffer); -+ if ((buf1[0] == 'q') || (buf1[0] == 'Q')) { -+ /* user hit q or Q */ -+ do_longjmp = 1; -+ KDB_FLAG_SET(CMD_INTERRUPT); /* command was interrupted */ -+ /* end of command output; back to normal mode */ -+ kdb_grepping_flag = 0; -+ kdb_printf("\n"); -+ } else if (buf1[0] && buf1[0] != '\n') { -+ /* user hit something other than enter */ -+ suspend_grep = 1; /* for this recursion */ -+ kdb_printf("\nOnly 'q' or 'Q' are processed at more prompt, input ignored\n"); -+ } else if (kdb_grepping_flag) { -+ /* user hit enter */ -+ suspend_grep = 1; /* for this recursion */ -+ kdb_printf("\n"); -+ } -+ kdb_input_flush(); -+ } -+ -+ /* -+ * For grep searches, shift the printed string left. -+ * replaced_byte contains the character that was overwritten with -+ * the terminating null, and cphold points to the null. -+ * Then adjust the notion of available space in the buffer. -+ */ -+ if (kdb_grepping_flag && !suspend_grep) { -+ *cphold = replaced_byte; -+ strcpy(kdb_buffer, cphold); -+ len = strlen(kdb_buffer); -+ next_avail = kdb_buffer + len; -+ size_avail = sizeof(kdb_buffer) - len; -+ } -+ -+kdb_print_out: -+ suspend_grep = 0; /* end of what may have been a recursive call */ -+ if (logging) { -+ console_loglevel = saved_loglevel; -+ } -+ atomic_dec(&kdb_8250); -+ if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) { -+ got_printf_lock = 0; -+ spin_unlock_irqrestore(&kdb_printf_lock, flags); -+ KDB_STATE_CLEAR(PRINTF_LOCK); -+ atomic_dec(&kdb_event); -+ } else { -+ __release(kdb_printf_lock); -+ } -+ preempt_enable(); -+ if (do_longjmp) -+#ifdef kdba_setjmp -+ kdba_longjmp(&kdbjmpbuf[smp_processor_id()], 1) -+#endif /* kdba_setjmp */ -+ ; -+} -+ -+/* -+ * kdb_io_init -+ * -+ * Initialize kernel debugger output environment. -+ * -+ * Parameters: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * Select a console device. Only use a VT console if the user specified -+ * or defaulted console= /^tty[0-9]*$/ -+ */ -+ -+void __init -+kdb_io_init(void) -+{ -+ /* -+ * Select a console. -+ */ -+ struct console *c = console_drivers; -+ int vt_console = 0; -+ -+ while (c) { -+ if ((c->flags & CON_CONSDEV) && !kdbcons) -+ kdbcons = c; -+ if ((c->flags & CON_ENABLED) && -+ strncmp(c->name, "tty", 3) == 0) { -+ char *p = c->name + 3; -+ while (isdigit(*p)) -+ ++p; -+ if (*p == '\0') -+ vt_console = 1; -+ } -+ c = c->next; -+ } -+ -+ if (kdbcons == NULL) { -+ printk(KERN_ERR "kdb: Initialization failed - no console. kdb is disabled.\n"); -+ KDB_FLAG_SET(NO_CONSOLE); -+ kdb_on = 0; -+ } -+ if (!vt_console) -+ KDB_FLAG_SET(NO_VT_CONSOLE); -+ kdb_input_flush(); -+ return; -+} -+ -+#ifdef CONFIG_KDB_USB -+ -+int kdb_no_usb = 0; -+ -+static int __init opt_kdbnousb(char *str) -+{ -+ kdb_no_usb = 1; -+ return 0; -+} -+ -+early_param("kdbnousb", opt_kdbnousb); -+ -+#endif -+ -+EXPORT_SYMBOL(kdb_read); ---- /dev/null -+++ b/kdb/kdbdereference.c -@@ -0,0 +1,7258 @@ -+/* -+ * -+ * Most of this code is borrowed and adapted from the lkcd command "lcrash" -+ * and its supporting libarary. -+ * -+ * This kdb commands for casting memory structures. -+ * It provides -+ * "print" "px", "pd" * -+ * -+ * Careful of porting the klib KL_XXX functions (they call thru a jump table -+ * that we don't use here) -+ * -+ * The kernel type information is added be insmod'g the kdb debuginfo module -+ * It loads symbolic debugging info (provided from lcrash -o), -+ * (this information originally comes from the lcrash "kerntypes" file) -+ * -+ */ -+ -+#define VMALLOC_START_IA64 0xa000000200000000 -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "modules/lcrash/klib.h" -+#include "modules/lcrash/kl_stringtab.h" -+#include "modules/lcrash/kl_btnode.h" -+#include "modules/lcrash/lc_eval.h" -+ -+#undef next_node /* collision with nodemask.h */ -+int have_debug_file = 0; -+dbg_sym_t *types_tree_head; -+dbg_sym_t *typedefs_tree_head; -+kltype_t *kltype_array; -+dbg_sym_t *dsym_types_array; -+ -+ -+EXPORT_SYMBOL(types_tree_head); -+EXPORT_SYMBOL(typedefs_tree_head); -+EXPORT_SYMBOL(kltype_array); -+EXPORT_SYMBOL(dsym_types_array); -+ -+#define C_HEX 0x0002 -+#define C_WHATIS 0x0004 -+#define C_NOVARS 0x0008 -+#define C_SIZEOF 0x0010 -+#define C_SHOWOFFSET 0x0020 -+#define C_LISTHEAD 0x0040 -+#define C_LISTHEAD_N 0x0080 /* walk using list_head.next */ -+#define C_LISTHEAD_P 0x0100 /* walk using list_head.prev */ -+#define C_BINARY 0x0200 -+#define MAX_LONG_LONG 0xffffffffffffffffULL -+klib_t kdb_klib; -+klib_t *KLP = &kdb_klib; -+k_error_t klib_error = 0; -+dbg_sym_t *type_tree = (dbg_sym_t *)NULL; -+dbg_sym_t *typedef_tree = (dbg_sym_t *)NULL; -+dbg_sym_t *func_tree = (dbg_sym_t *)NULL; -+dbg_sym_t *srcfile_tree = (dbg_sym_t *)NULL; -+dbg_sym_t *var_tree = (dbg_sym_t *)NULL; -+dbg_sym_t *xtype_tree = (dbg_sym_t *)NULL; -+dbg_hashrec_t *dbg_hash[TYPE_NUM_SLOTS]; -+int all_count, deall_count; -+void single_type(char *str); -+void sizeof_type(char *str); -+typedef struct chunk_s { -+ struct chunk_s *next; /* Must be first */ -+ struct chunk_s *prev; /* Must be second */ -+ void *addr; -+ struct bucket_s *bucketp; -+ uint32_t chunksz; /* size of memory chunk (via malloc()) */ -+ uint32_t blksz; /* Not including header */ -+ short blkcount; /* Number of blksz blocks in chunk */ -+} chunk_t; -+ -+typedef struct blkhdr_s { -+ struct blkhdr_s *next; -+ union { -+ struct blkhdr_s *prev; -+ chunk_t *chunkp; -+ } b_un; -+ int flg; -+ int size; -+} blkhdr_t; -+ -+int ptrsz64 = ((int)sizeof(void *) == 8); -+alloc_functions_t alloc_functions; -+ -+/* -+ * return 1 if addr is invalid -+ */ -+static int -+invalid_address(kaddr_t addr, int count) -+{ -+ unsigned char c; -+ unsigned long lcount; -+ /* FIXME: untested? */ -+ lcount = count; -+ /* FIXME: use kdb_verify_area */ -+ while (count--) { -+ if (kdb_getarea(c, addr)) -+ return 1; -+ } -+ return 0; -+} -+ -+/* -+ * wrappers for calls to kernel-style allocation/deallocation -+ */ -+static void * -+kl_alloc_block(int size) -+{ -+ void *vp; -+ -+ vp = kmalloc(size, GFP_KERNEL); -+ if (!vp) { -+ kdb_printf ("kmalloc of %d bytes failed\n", size); -+ } -+ /* important: the lcrash code sometimes assumes that the -+ * allocation is zeroed out -+ */ -+ memset(vp, 0, size); -+ all_count++; -+ return vp; -+} -+static void -+kl_free_block(void *vp) -+{ -+ kfree(vp); -+ deall_count++; -+ return; -+} -+ -+int -+get_value(char *s, uint64_t *value) -+{ -+ return kl_get_value(s, NULL, 0, value); -+} -+ -+/* -+ * kl_get_block() -+ * -+ * Read a size block from virtual address addr in the system memory image. -+ */ -+k_error_t -+kl_get_block(kaddr_t addr, unsigned size, void *bp, void *mmap) -+{ -+ if (!bp) { -+ return(KLE_NULL_BUFF); -+ } else if (!size) { -+ return(KLE_ZERO_SIZE); -+ } -+ -+ memcpy(bp, (void *)addr, size); -+ -+ return(0); -+} -+ -+/* -+ * print_value() -+ */ -+void -+print_value(char *ldstr, uint64_t value, int width) -+{ -+ int w = 0; -+ char fmtstr[12], f, s[2]="\000\000"; -+ -+ if (ldstr) { -+ kdb_printf("%s", ldstr); -+ } -+ s[0] = '#'; -+ f = 'x'; -+ if (width) { -+ if (ptrsz64) { -+ w = 18; /* due to leading "0x" */ -+ } else { -+ w = 10; /* due to leading "0x" */ -+ } -+ } -+ if (w) { -+ sprintf(fmtstr, "%%%s%d"FMT64"%c", s, w, f); -+ } else { -+ sprintf(fmtstr, "%%%s"FMT64"%c", s, f); -+ } -+ kdb_printf(fmtstr, value); -+} -+ -+/* -+ * print_list_head() -+ */ -+void -+print_list_head(kaddr_t saddr) -+{ -+ print_value("STRUCT ADDR: ", (uint64_t)saddr, 8); -+ kdb_printf("\n"); -+} -+ -+/* -+ * check_prev_ptr() -+ */ -+void -+check_prev_ptr(kaddr_t ptr, kaddr_t prev) -+{ -+ if(ptr != prev) { -+ kdb_printf("\nWARNING: Pointer broken. %#"FMTPTR"x," -+ " SHOULD BE: %#"FMTPTR"x\n", prev, ptr); -+ } -+} -+ -+/* -+ * kl_kaddr() -- Return a kernel virtual address stored in a structure -+ * -+ * Pointer 'p' points to a kernel structure -+ * of type 's.' Get the kernel address located in member 'm.' -+ */ -+kaddr_t -+kl_kaddr(void *p, char *s, char *m) -+{ -+ uint64_t *u64p; -+ int offset; -+ -+ offset = kl_member_offset(s, m); -+ u64p = (uint64_t *)(p + offset); -+ return((kaddr_t)*u64p); -+} -+ -+/* -+ * walk_structs() -- walk linked lists of kernel data structures -+ */ -+int -+walk_structs(char *s, char *f, char *member, kaddr_t addr, int flags) -+{ -+ int size, offset, mem_offset=0; -+ kaddr_t last = 0, next; -+ kltype_t *klt = (kltype_t *)NULL, *memklt=(kltype_t *)NULL; -+ unsigned long long iter_threshold = 10000; -+ -+ int counter = 0; -+ kaddr_t head=0, head_next=0, head_prev=0, entry=0; -+ kaddr_t entry_next=0, entry_prev; -+ -+ /* field name of link pointer, determine its offset in the struct. */ -+ if ((offset = kl_member_offset(s, f)) == -1) { -+ kdb_printf("Could not determine offset for member %s of %s.\n", -+ f, s); -+ return 0; -+ } -+ -+ /* Get the type of the enclosing structure */ -+ if (!(klt = kl_find_type(s, (KLT_STRUCT|KLT_UNION)))) { -+ kdb_printf("Could not find the type of %s\n", s); -+ return(1); -+ } -+ -+ /* Get the struct size */ -+ if ((size = kl_struct_len(s)) == 0) { -+ kdb_printf ("could not get the length of %s\n", s); -+ return(1); -+ } -+ -+ /* test for a named member of the structure that should be displayed */ -+ if (member) { -+ memklt = kl_get_member(klt, member); -+ if (!memklt) { -+ kdb_printf ("%s has no member %s\n", s, member); -+ return 1; -+ } -+ mem_offset = kl_get_member_offset(klt, member); -+ } -+ -+ if ((next = addr)) { -+ /* get head of list (anchor) when struct list_head is used */ -+ if (flags & C_LISTHEAD) { -+ head = next; -+ if (invalid_address(head, sizeof(head))) { -+ kdb_printf ("invalid address %#lx\n", -+ head); -+ return 1; -+ } -+ /* get contents of addr struct member */ -+ head_next = kl_kaddr((void *)head, "list_head", "next"); -+ if (invalid_address(head, sizeof(head_next))) { -+ kdb_printf ("invalid address %#lx\n", -+ head_next); -+ return 1; -+ } -+ /* get prev field of anchor */ -+ head_prev = kl_kaddr((void *)head, "list_head", "prev"); -+ if (invalid_address(head, sizeof(head_prev))) { -+ kdb_printf ("invalid address %#lx\n", -+ head_prev); -+ return 1; -+ } -+ entry = 0; -+ } -+ } -+ -+ while(next && counter < iter_threshold) { -+ counter++; -+ if (counter > iter_threshold) { -+ kdb_printf("\nWARNING: Iteration threshold reached.\n"); -+ kdb_printf("Current threshold: %lld\n", iter_threshold); -+ break; -+ } -+ if(flags & C_LISTHEAD) { -+ if(!(entry)){ -+ if(flags & C_LISTHEAD_N){ -+ entry = head_next; -+ } else { -+ entry = head_prev; -+ } -+ last = head; -+ } -+ -+ if(head == entry) { -+ if(flags & C_LISTHEAD_N){ -+ check_prev_ptr(last, head_prev); -+ } else { -+ check_prev_ptr(last, head_next); -+ } -+ break; -+ } -+ -+ next = entry - offset; /* next structure */ -+ /* check that the whole structure can be addressed */ -+ if (invalid_address(next, size)) { -+ kdb_printf( -+ "invalid struct address %#lx\n", next); -+ return 1; -+ } -+ /* and validate that it points to valid addresses */ -+ entry_next = kl_kaddr((void *)entry,"list_head","next"); -+ if (invalid_address(entry_next, sizeof(entry_next))) { -+ kdb_printf("invalid address %#lx\n", -+ entry_next); -+ return 1; -+ } -+ entry_prev = kl_kaddr((void *)entry,"list_head","prev"); -+ if (invalid_address(entry_prev, sizeof(entry_prev))) { -+ kdb_printf("invalid address %#lx\n", -+ entry_prev); -+ return 1; -+ } -+ if(flags & C_LISTHEAD_N){ -+ check_prev_ptr(last, entry_prev); -+ } else { -+ check_prev_ptr(last, entry_next); -+ } -+ print_list_head(next); -+ last = entry; -+ if(flags & C_LISTHEAD_N){ -+ entry = entry_next; /* next list_head */ -+ } else { -+ entry = entry_prev; /* next list_head */ -+ } -+ } -+ -+ if (memklt) { -+ /* print named sub-structure in C-like struct format. */ -+ kl_print_member( -+ (void *)((unsigned long)next+mem_offset), -+ memklt, 0, C_HEX); -+ } else { -+ /* print entire structure in C-like struct format. */ -+ kl_print_type((void *)next, klt, 0, C_HEX); -+ } -+ -+ if(!(flags & C_LISTHEAD)) { -+ last = next; -+ next = (kaddr_t) (*(uint64_t*)(next + offset)); -+ } -+ } -+ -+ return(0); -+} -+ -+/* -+ * Implement the lcrash walk -s command -+ * see lcrash cmd_walk.c -+ */ -+int -+kdb_walk(int argc, const char **argv) -+{ -+ int i, nonoptc=0, optc=0, flags=0, init_len=0; -+ char *cmd, *arg, *structp=NULL, *forwp=NULL, *memberp=NULL; -+ char *addrp=NULL; -+ uint64_t value; -+ kaddr_t start_addr; -+ -+ all_count=0; -+ deall_count=0; -+ if (!have_debug_file) { -+ kdb_printf("no debuginfo file\n"); -+ return 0; -+ } -+ /* If there is nothing to evaluate, just return */ -+ if (argc == 0) { -+ return 0; -+ } -+ cmd = (char *)*argv; /* s/b "walk" */ -+ if (strcmp(cmd,"walk")) { -+ kdb_printf("got %s, not \"walk\"\n", cmd); -+ return 0; -+ } -+ -+ for (i=1; i<=argc; i++) { -+ arg = (char *)*(argv+i); -+ if (*arg == '-') { -+ optc++; -+ if (optc > 2) { -+ kdb_printf("too many options\n"); -+ kdb_printf("see 'walkhelp'\n"); -+ return 0; -+ } -+ if (*(arg+1) == 's') { -+ continue; /* ignore -s */ -+ } else if (*(arg+1) == 'h') { -+ if ((init_len=kl_struct_len("list_head")) -+ == 0) { -+ kdb_printf( -+ "could not find list_head\n"); -+ return 0; -+ } -+ if (*(arg+2) == 'p') { -+ flags = C_LISTHEAD; -+ flags |= C_LISTHEAD_P; -+ } else if (*(arg+2) == 'n') { -+ flags = C_LISTHEAD; -+ flags |= C_LISTHEAD_N; -+ } else { -+ kdb_printf("invalid -h option <%s>\n", -+ arg); -+ kdb_printf("see 'walkhelp'\n"); -+ return 0; -+ } -+ } else { -+ kdb_printf("invalid option <%s>\n", arg); -+ kdb_printf("see 'walkhelp'\n"); -+ return 0; -+ } -+ } else { -+ nonoptc++; -+ if (nonoptc > 4) { -+ kdb_printf("too many arguments\n"); -+ kdb_printf("see 'walkhelp'\n"); -+ return 0; -+ } -+ if (nonoptc == 1) { -+ structp = arg; -+ } else if (nonoptc == 2) { -+ forwp = arg; -+ } else if (nonoptc == 3) { -+ addrp = arg; -+ } else if (nonoptc == 4) { -+ /* the member is optional; if we get -+ a fourth, the previous was the member */ -+ memberp = addrp; -+ addrp = arg; -+ } else { -+ kdb_printf("invalid argument <%s>\n", arg); -+ kdb_printf("see 'walkhelp'\n"); -+ return 0; -+ } -+ } -+ } -+ if (nonoptc < 3) { -+ kdb_printf("too few arguments\n"); -+ kdb_printf("see 'walkhelp'\n"); -+ return 0; -+ } -+ if (!(flags & C_LISTHEAD)) { -+ if ((init_len=kl_struct_len(structp)) == 0) { -+ kdb_printf("could not find %s\n", structp); -+ return 0; -+ } -+ } -+ -+ /* Get the start address of the structure */ -+ if (get_value(addrp, &value)) { -+ kdb_printf ("address %s invalid\n", addrp); -+ return 0; -+ } -+ start_addr = (kaddr_t)value; -+ if (invalid_address(start_addr, init_len)) { -+ kdb_printf ("address %#lx invalid\n", start_addr); -+ return 0; -+ } -+ -+ if (memberp) { -+ } -+ -+ if (walk_structs(structp, forwp, memberp, start_addr, flags)) { -+ kdb_printf ("walk_structs failed\n"); -+ return 0; -+ } -+ /* kdb_printf("ptc allocated:%d deallocated:%d\n", -+ all_count, deall_count); */ -+ return 0; -+} -+ -+/* -+ * Implement the lcrash px (print, pd) command -+ * see lcrash cmd_print.c -+ * -+ * px -+ * e.g. px *(task_struct *)
-+ */ -+int -+kdb_debuginfo_print(int argc, const char **argv) -+{ -+ /* argc does not count the command itself, which is argv[0] */ -+ char *cmd, *next, *end, *exp, *cp; -+ unsigned char *buf; -+ int i, j, iflags; -+ node_t *np; -+ uint64_t flags = 0; -+ -+ /* If there is nothing to evaluate, just return */ -+ if (argc == 0) { -+ return 0; -+ } -+ all_count=0; -+ deall_count=0; -+ -+ cmd = (char *)*argv; -+ -+ /* Set up the flags value. If this command was invoked via -+ * "pd" or "px", then make sure the appropriate flag is set. -+ */ -+ flags = 0; -+ if (!strcmp(cmd, "pd") || !strcmp(cmd, "print")) { -+ flags = 0; -+ } else if (!strcmp(cmd, "px")) { -+ flags |= C_HEX; -+ } else if (!strcmp(cmd, "whatis")) { -+ if (argc != 1) { -+ kdb_printf("usage: whatis \n"); -+ return 0; -+ } -+ cp = (char *)*(argv+1); -+ single_type(cp); -+ /* kdb_printf("allocated:%d deallocated:%d\n", -+ all_count, deall_count); */ -+ return 0; -+ } else if (!strcmp(cmd, "sizeof")) { -+ if (!have_debug_file) { -+ kdb_printf("no debuginfo file\n"); -+ return 0; -+ } -+ if (argc != 1) { -+ kdb_printf("usage: sizeof type\n"); -+ return 0; -+ } -+ cp = (char *)*(argv+1); -+ sizeof_type(cp); -+ return 0; -+ } else { -+ kdb_printf("command error: %s\n", cmd); -+ return 0; -+ } -+ -+ /* -+ * Count the number of bytes necessary to hold the entire expression -+ * string. -+ */ -+ for (i=1, j=0; i <= argc; i++) { -+ j += (strlen(*(argv+i)) + 1); -+ } -+ -+ /* -+ * Allocate space for the expression string and copy the individual -+ * arguments into it. -+ */ -+ buf = kl_alloc_block(j); -+ if (!buf) { -+ return 0; -+ } -+ -+ for (i=1; i <= argc; i++) { -+ strcat(buf, *(argv+i)); -+ /* put spaces between arguments */ -+ if (i < argc) { -+ strcat(buf, " "); -+ } -+ } -+ -+ /* Walk through the expression string, expression by expression. -+ * Note that a comma (',') is the delimiting character between -+ * expressions. -+ */ -+ next = buf; -+ while (next) { -+ if ((end = strchr(next, ','))) { -+ *end = (char)0; -+ } -+ -+ /* Copy the next expression to a separate expression string. -+ * A separate expresison string is necessary because it is -+ * likely to get freed up in eval() when variables get expanded. -+ */ -+ i = strlen(next)+1; -+ exp = (char *)kl_alloc_block(i); -+ if (!exp) { -+ return 0; -+ } -+ strcpy(exp, next); -+ -+ /* Evaluate the expression */ -+ np = eval(&exp, 0); -+ if (!np || eval_error) { -+ print_eval_error(cmd, exp, -+ (error_token ? error_token : (char*)NULL), -+ eval_error, CMD_NAME_FLG); -+ if (np) { -+ free_nodes(np); -+ } -+ kl_free_block(buf); -+ kl_free_block(exp); -+ free_eval_memory(); -+ return 0; -+ } -+ iflags = flags; -+ if (print_eval_results(np, iflags)) { -+ free_nodes(np); -+ kl_free_block(buf); -+ free_eval_memory(); -+ return 0; -+ } -+ kl_free_block(exp); -+ -+ if (end) { -+ next = end + 1; -+ kdb_printf(" "); -+ } else { -+ next = (char*)NULL; -+ kdb_printf("\n"); -+ } -+ free_nodes(np); -+ } -+ free_eval_memory(); -+ kl_free_block(buf); -+ /* kdb_printf("allocated:%d deallocated:%d\n", -+ all_count, deall_count); */ -+ return 0; -+} -+ -+/* -+ * Display help for the px command -+ */ -+int -+kdb_pxhelp(int argc, const char **argv) -+{ -+ if (have_debug_file) { -+ kdb_printf ("Some examples of using the px command:\n"); -+ kdb_printf (" the whole structure:\n"); -+ kdb_printf (" px *(task_struct *)0xe0000...\n"); -+ kdb_printf (" one member:\n"); -+ kdb_printf (" px (*(task_struct *)0xe0000...)->comm\n"); -+ kdb_printf (" the address of a member\n"); -+ kdb_printf (" px &((task_struct *)0xe0000...)->children\n"); -+ kdb_printf (" a structure pointed to by a member:\n"); -+ kdb_printf (" px ((*(class_device *)0xe0000...)->class)->name\n"); -+ kdb_printf (" array element:\n"); -+ kdb_printf (" px (cache_sizes *)0xa0000...[0]\n"); -+ kdb_printf (" px (task_struct *)(0xe0000...)->cpus_allowed.bits[0]\n"); -+ } else { -+ kdb_printf ("There is no debug info file.\n"); -+ kdb_printf ("The px/pd/print commands can only evaluate "); -+ kdb_printf ("arithmetic expressions.\n"); -+ } -+ return 0; -+} -+ -+/* -+ * Display help for the walk command -+ */ -+int -+kdb_walkhelp(int argc, const char **argv) -+{ -+ if (!have_debug_file) { -+ kdb_printf("no debuginfo file\n"); -+ return 0; -+ } -+ kdb_printf ("Using the walk command:\n"); -+ kdb_printf (" (only the -s (symbolic) form is supported, so -s is ignored)\n"); -+ kdb_printf ("\n"); -+ kdb_printf (" If the list is not linked with list_head structures:\n"); -+ kdb_printf (" walk [-s] struct name-of-forward-pointer address\n"); -+ kdb_printf (" example: walk xyz_struct next 0xe00....\n"); -+ kdb_printf ("\n"); -+ kdb_printf (" If the list is linked with list_head structures, use -hn\n"); -+ kdb_printf (" to walk the 'next' list, -hp for the 'prev' list\n"); -+ kdb_printf (" walk -h[n|p] struct name-of-forward-pointer [member-to-show] address-of-list-head\n"); -+ kdb_printf (" example, to show the entire task_struct:\n"); -+ kdb_printf (" walk -hn task_struct tasks 0xe000....\n"); -+ kdb_printf (" example, to show the task_struct member comm:\n"); -+ kdb_printf (" walk -hn task_struct tasks comm 0xe000....\n"); -+ kdb_printf (" (address is not the address of first member's list_head, "); -+ kdb_printf ("but of the anchoring list_head\n"); -+ return 0; -+} -+ -+/* -+ * dup_block() -+ */ -+void * -+dup_block(void *b, int len) -+{ -+ void *b2; -+ -+ if ((b2 = kl_alloc_block(len))) { -+ memcpy(b2, b, len); /* dst, src, sz */ -+ } -+ return(b2); -+} -+ -+/* -+ * kl_reset_error() -+ */ -+void -+kl_reset_error(void) -+{ -+ klib_error = 0; -+} -+ -+/* -+ * given a symbol name, look up its address -+ * -+ * in lcrash, this would return a pointer to the syment_t in -+ * a binary tree of them -+ * -+ * In this one, look up the symbol in the standard kdb way, -+ * which fills in the kdb_symtab_t. -+ * Then fill in the global syment_t "lkup_syment" -- assuming -+ * we'll only need one at a time! -+ * -+ * kl_lkup_symname returns the address of syment_t if the symbol is -+ * found, else null. -+ * -+ * Note: we allocate a syment_t the caller should kfree it -+ */ -+syment_t * -+kl_lkup_symname (char *cp) -+{ -+ syment_t *sp; -+ kdb_symtab_t kdb_symtab; -+ -+ if (kdbgetsymval(cp, &kdb_symtab)) { -+ sp = (syment_t *)kl_alloc_block(sizeof(syment_t)); -+ sp->s_addr = (kaddr_t)kdb_symtab.sym_start; -+ KL_ERROR = 0; -+ return (sp); -+ } else { -+ /* returns 0 if the symbol is not found */ -+ KL_ERROR = KLE_INVALID_VALUE; -+ return ((syment_t *)0); -+ } -+} -+ -+/* -+ * kl_get_ra() -+ * -+ * This function returns its own return address. -+ * Usefule when trying to capture where we came from. -+ */ -+void* -+kl_get_ra(void) -+{ -+ return (__builtin_return_address(0)); -+} -+ -+/* start kl_util.c */ -+/* -+ * Definitions for the do_math() routine. -+ */ -+#define M_ADD '+' -+#define M_SUBTRACT '-' -+#define M_MULTIPLY '*' -+#define M_DIVIDE '/' -+ -+/* -+ * do_math() -- Calculate some math values based on a string argument -+ * passed into the function. For example, if you use: -+ * -+ * 0xffffc000*2+6/5-3*19-8 -+ * -+ * And you will get the value 0xffff7fc0 back. I could -+ * probably optimize this a bit more, but right now, it -+ * works, which is good enough for me. -+ */ -+static uint64_t -+do_math(char *str) -+{ -+ int i = 0; -+ char *buf, *loc; -+ uint64_t value1, value2; -+ syment_t *sp; -+ -+ buf = (char *)kl_alloc_block((strlen(str) + 1)); -+ sprintf(buf, "%s", str); -+ for (i = strlen(str); i >= 0; i--) { -+ if ((str[i] == M_ADD) || (str[i] == M_SUBTRACT)) { -+ buf[i] = '\0'; -+ value1 = do_math(buf); -+ value2 = do_math(&str[i+1]); -+ kl_free_block((void *)buf); -+ if (str[i] == M_SUBTRACT) { -+ return value1 - value2; -+ } else { -+ return value1 + value2; -+ } -+ } -+ } -+ -+ for (i = strlen(str); i >= 0; i--) { -+ if ((str[i] == M_MULTIPLY) || (str[i] == M_DIVIDE)) { -+ buf[i] = '\0'; -+ value1 = do_math(buf); -+ value2 = do_math(&str[i+1]); -+ kl_free_block((void *)buf); -+ if (str[i] == M_MULTIPLY) { -+ return (value1 * value2); -+ } else { -+ if (value2 == 0) { -+ /* handle divide by zero */ -+ /* XXX -- set proper error code */ -+ klib_error = 1; -+ return (0); -+ } else { -+ return (value1 / value2); -+ } -+ } -+ } -+ } -+ -+ /* -+ * Otherwise, just process the value, and return it. -+ */ -+ sp = kl_lkup_symname(buf); -+ if (KL_ERROR) { -+ KL_ERROR = 0; -+ value2 = kl_strtoull(buf, &loc, 10); -+ if (((!value2) && (buf[0] != '0')) || (*loc) || -+ (!strncmp(buf, "0x", 2)) || (!strncmp(buf, "0X", 2))) { -+ value1 = (kaddr_t)kl_strtoull(buf, (char**)NULL, 16); -+ } else { -+ value1 = (unsigned)kl_strtoull(buf, (char**)NULL, 10); -+ } -+ } else { -+ value1 = (kaddr_t)sp->s_addr; -+ kl_free_block((void *)sp); -+ } -+ kl_free_block((void *)buf); -+ return (value1); -+} -+/* -+ * kl_get_value() -- Translate numeric input strings -+ * -+ * A generic routine for translating an input string (param) in a -+ * number of dfferent ways. If the input string is an equation -+ * (contains the characters '+', '-', '/', and '*'), then perform -+ * the math evaluation and return one of the following modes (if -+ * mode is passed): -+ * -+ * 0 -- if the resulting value is <= elements, if elements (number -+ * of elements in a table) is passed. -+ * -+ * 1 -- if the first character in param is a pound sign ('#'). -+ * -+ * 3 -- the numeric result of an equation. -+ * -+ * If the input string is NOT an equation, mode (if passed) will be -+ * set in one of the following ways (depending on the contents of -+ * param and elements). -+ * -+ * o When the first character of param is a pound sign ('#'), mode -+ * is set equal to one and the trailing numeric value (assumed to -+ * be decimal) is returned. -+ * -+ * o When the first two characters in param are "0x" or "0X," or -+ * when when param contains one of the characers "abcdef," or when -+ * the length of the input value is eight characters. mode is set -+ * equal to two and the numeric value contained in param is -+ * translated as hexadecimal and returned. -+ * -+ * o The value contained in param is translated as decimal and mode -+ * is set equal to zero. The resulting value is then tested to see -+ * if it exceeds elements (if passed). If it does, then value is -+ * translated as hexadecimal and mode is set equal to two. -+ * -+ * Note that mode is only set when a pointer is passed in the mode -+ * paramater. Also note that when elements is set equal to zero, any -+ * non-hex (as determined above) value not starting with a pound sign -+ * will be translated as hexadecimal (mode will be set equal to two) -- -+ * IF the length of the string of characters is less than 16 (kaddr_t). -+ * -+ */ -+int -+kl_get_value(char *param, int *mode, int elements, uint64_t *value) -+{ -+ char *loc; -+ uint64_t v; -+ -+ kl_reset_error(); -+ -+ /* Check to see if we are going to need to do any math -+ */ -+ if (strpbrk(param, "+-/*")) { -+ if (!strncmp(param, "#", 1)) { -+ v = do_math(¶m[1]); -+ if (mode) { -+ *mode = 1; -+ } -+ } else { -+ v = do_math(param); -+ if (mode) { -+ if (elements && (*value <= elements)) { -+ *mode = 0; -+ } else { -+ *mode = 3; -+ } -+ } -+ } -+ } else { -+ if (!strncmp(param, "#", 1)) { -+ if (!strncmp(param, "0x", 2) -+ || !strncmp(param, "0X", 2) -+ || strpbrk(param, "abcdef")) { -+ v = kl_strtoull(¶m[1], &loc, 16); -+ } else { -+ v = kl_strtoull(¶m[1], &loc, 10); -+ } -+ if (loc) { -+ KL_ERROR = KLE_INVALID_VALUE; -+ return (1); -+ } -+ if (mode) { -+ *mode = 1; -+ } -+ } else if (!strncmp(param, "0x", 2) || !strncmp(param, "0X", 2) -+ || strpbrk(param, "abcdef")) { -+ v = kl_strtoull(param, &loc, 16); -+ if (loc) { -+ KL_ERROR = KLE_INVALID_VALUE; -+ return (1); -+ } -+ if (mode) { -+ *mode = 2; /* HEX VALUE */ -+ } -+ } else if (elements || (strlen(param) < 16) || -+ (strlen(param) > 16)) { -+ v = kl_strtoull(param, &loc, 10); -+ if (loc) { -+ KL_ERROR = KLE_INVALID_VALUE; -+ return (1); -+ } -+ if (elements && (v >= elements)) { -+ v = (kaddr_t)kl_strtoull(param, -+ (char**)NULL, 16); -+ if (mode) { -+ *mode = 2; /* HEX VALUE */ -+ } -+ } else if (mode) { -+ *mode = 0; -+ } -+ } else { -+ v = kl_strtoull(param, &loc, 16); -+ if (loc) { -+ KL_ERROR = KLE_INVALID_VALUE; -+ return (1); -+ } -+ if (mode) { -+ *mode = 2; /* ASSUME HEX VALUE */ -+ } -+ } -+ } -+ *value = v; -+ return (0); -+} -+/* end kl_util.c */ -+ -+/* start kl_libutil.c */ -+static int -+valid_digit(char c, int base) -+{ -+ switch(base) { -+ case 2: -+ if ((c >= '0') && (c <= '1')) { -+ return(1); -+ } else { -+ return(0); -+ } -+ case 8: -+ if ((c >= '0') && (c <= '7')) { -+ return(1); -+ } else { -+ return(0); -+ } -+ case 10: -+ if ((c >= '0') && (c <= '9')) { -+ return(1); -+ } else { -+ return(0); -+ } -+ case 16: -+ if (((c >= '0') && (c <= '9')) -+ || ((c >= 'a') && (c <= 'f')) -+ || ((c >= 'A') && (c <= 'F'))) { -+ return(1); -+ } else { -+ return(0); -+ } -+ } -+ return(0); -+} -+ -+static int -+digit_value(char c, int base, int *val) -+{ -+ if (!valid_digit(c, base)) { -+ return(1); -+ } -+ switch (base) { -+ case 2: -+ case 8: -+ case 10: -+ *val = (int)((int)(c - 48)); -+ break; -+ case 16: -+ if ((c >= 'a') && (c <= 'f')) { -+ *val = ((int)(c - 87)); -+ } else if ((c >= 'A') && (c <= 'F')) { -+ *val = ((int)(c - 55)); -+ } else { -+ *val = ((int)(c - 48)); -+ } -+ } -+ return(0); -+} -+ -+uint64_t -+kl_strtoull(char *str, char **loc, int base) -+{ -+ int dval; -+ uint64_t i = 1, v, value = 0; -+ char *c, *cp = str; -+ -+ *loc = (char *)NULL; -+ if (base == 0) { -+ if (!strncmp(cp, "0x", 2) || !strncmp(cp, "0X", 2)) { -+ base = 16; -+ } else if (cp[0] == '0') { -+ if (cp[1] == 'b') { -+ base = 2; -+ } else { -+ base = 8; -+ } -+ } else if (strpbrk(cp, "abcdefABCDEF")) { -+ base = 16; -+ } else { -+ base = 10; -+ } -+ } -+ if ((base == 8) && (*cp == '0')) { -+ cp += 1; -+ } else if ((base == 2) && !strncmp(cp, "0b", 2)) { -+ cp += 2; -+ } else if ((base == 16) && -+ (!strncmp(cp, "0x", 2) || !strncmp(cp, "0X", 2))) { -+ cp += 2; -+ } -+ c = &cp[strlen(cp) - 1]; -+ while (c >= cp) { -+ -+ if (digit_value(*c, base, &dval)) { -+ if (loc) { -+ *loc = c; -+ } -+ return(value); -+ } -+ v = dval * i; -+ if ((MAX_LONG_LONG - value) < v) { -+ return(MAX_LONG_LONG); -+ } -+ value += v; -+ i *= (uint64_t)base; -+ c--; -+ } -+ return(value); -+} -+/* end kl_libutil.c */ -+ -+/* -+ * dbg_hash_sym() -+ */ -+void -+dbg_hash_sym(uint64_t typenum, dbg_sym_t *stp) -+{ -+ dbg_hashrec_t *shp, *hshp; -+ -+ if ((typenum == 0) || (!stp)) { -+ return; -+ } -+ shp = (dbg_hashrec_t *)kl_alloc_block(sizeof(dbg_hashrec_t)); -+ shp->h_typenum = typenum; -+ shp->h_ptr = stp; -+ shp->h_next = (dbg_hashrec_t *)NULL; -+ if ((hshp = dbg_hash[TYPE_NUM_HASH(typenum)])) { -+ while (hshp->h_next) { -+ hshp = hshp->h_next; -+ } -+ hshp->h_next = shp; -+ } else { -+ dbg_hash[TYPE_NUM_HASH(typenum)] = shp; -+ } -+} -+ -+/* -+ * dbg_find_sym() -+ */ -+dbg_sym_t * -+dbg_find_sym(char *name, int type, uint64_t typenum) -+{ -+ dbg_sym_t *stp = (dbg_sym_t *)NULL; -+ -+ if (name && strlen(name)) { -+ /* Cycle through the type flags and see if any records are -+ * present. Note that if multiple type flags or DBG_ALL is -+ * passed in, only the first occurance of 'name' will be -+ * found and returned. If name exists in multiple trees, -+ * then multiple searches are necessary to find them. -+ */ -+ if (type & DBG_TYPE) { -+ if ((stp = (dbg_sym_t *)kl_find_btnode((btnode_t *) -+ type_tree, name, (int *)NULL))) { -+ goto found_sym; -+ } -+ } -+ if (type & DBG_TYPEDEF) { -+ if ((stp = (dbg_sym_t *)kl_find_btnode((btnode_t *) -+ typedef_tree, name, (int *)NULL))) { -+ goto found_sym; -+ } -+ } -+ if (!stp) { -+ return((dbg_sym_t*)NULL); -+ } -+ } -+found_sym: -+ if (typenum) { -+ dbg_hashrec_t *hshp; -+ -+ if (stp) { -+ if (stp->sym_typenum == typenum) { -+ return(stp); -+ } -+ } else if ((hshp = dbg_hash[TYPE_NUM_HASH(typenum)])) { -+ while (hshp) { -+ if (hshp->h_typenum == typenum) { -+ return(hshp->h_ptr); -+ } -+ hshp = hshp->h_next; -+ } -+ } -+ } -+ return(stp); -+} -+ -+/* -+ * kl_find_type() -- find a KLT type by name. -+ */ -+kltype_t * -+kl_find_type(char *name, int tnum) -+{ -+ dbg_sym_t *stp; -+ kltype_t *kltp = (kltype_t *)NULL; -+ -+ if (!have_debug_file) { -+ kdb_printf("no debuginfo file\n"); -+ return kltp; -+ } -+ -+ if (!tnum || IS_TYPE(tnum)) { -+ if ((stp = dbg_find_sym(name, DBG_TYPE, 0))) { -+ kltp = (kltype_t *)stp->sym_kltype; -+ if (tnum && !(kltp->kl_type & tnum)) { -+ /* We have found a type by this name -+ * but it does not have the right -+ * type number (e.g., we're looking -+ * for a struct and we don't find -+ * a KLT_STRUCT type by this name). -+ */ -+ return((kltype_t *)NULL); -+ } -+ } -+ } -+ if (!tnum || IS_TYPEDEF(tnum)) { -+ if ((stp = dbg_find_sym(name, DBG_TYPEDEF, 0))) { -+ kltp = (kltype_t *)stp->sym_kltype; -+ } -+ } -+ return(kltp); -+} -+ -+/* -+ * kl_first_btnode() -- non-recursive implementation. -+ */ -+btnode_t * -+kl_first_btnode(btnode_t *np) -+{ -+ if (!np) { -+ return((btnode_t *)NULL); -+ } -+ -+ /* Walk down the left side 'til the end... -+ */ -+ while (np->bt_left) { -+ np = np->bt_left; -+ } -+ return(np); -+} -+ -+/* -+ * kl_next_btnode() -- non-recursive implementation. -+ */ -+btnode_t * -+kl_next_btnode(btnode_t *node) -+{ -+ btnode_t *np = node, *parent; -+ -+ if (np) { -+ if (np->bt_right) { -+ return(kl_first_btnode(np->bt_right)); -+ } else { -+ parent = np->bt_parent; -+next: -+ if (parent) { -+ if (parent->bt_left == np) { -+ return(parent); -+ } -+ np = parent; -+ parent = parent->bt_parent; -+ goto next; -+ } -+ } -+ } -+ return((btnode_t *)NULL); -+} -+ -+/* -+ * dbg_next_sym() -+ */ -+dbg_sym_t * -+dbg_next_sym(dbg_sym_t *stp) -+{ -+ dbg_sym_t *next_stp; -+ -+ next_stp = (dbg_sym_t *)kl_next_btnode((btnode_t *)stp); -+ return(next_stp); -+} -+ -+/* -+ * kl_prev_btnode() -- non-recursive implementation. -+ */ -+btnode_t * -+kl_prev_btnode(btnode_t *node) -+{ -+ btnode_t *np = node, *parent; -+ -+ if (np) { -+ if (np->bt_left) { -+ np = np->bt_left; -+ while (np->bt_right) { -+ np = np->bt_right; -+ } -+ return(np); -+ } -+ parent = np->bt_parent; -+next: -+ if (parent) { -+ if (parent->bt_right == np) { -+ return(parent); -+ } -+ np = parent; -+ parent = parent->bt_parent; -+ goto next; -+ } -+ } -+ return((btnode_t *)NULL); -+} -+ -+/* -+ * dbg_prev_sym() -+ */ -+dbg_sym_t * -+dbg_prev_sym(dbg_sym_t *stp) -+{ -+ dbg_sym_t *prev_stp; -+ -+ prev_stp = (dbg_sym_t *)kl_prev_btnode((btnode_t *)stp); -+ return(prev_stp); -+} -+ -+/* -+ * kl_find_next_type() -- find next KLT type -+ */ -+kltype_t * -+kl_find_next_type(kltype_t *kltp, int type) -+{ -+ kltype_t *nkltp = NULL; -+ dbg_sym_t *nstp; -+ -+ if (kltp && kltp->kl_ptr) { -+ nstp = (dbg_sym_t *)kltp->kl_ptr; -+ nkltp = (kltype_t *)nstp->sym_kltype; -+ if (type) { -+ while(nkltp && !(nkltp->kl_type & type)) { -+ if ((nstp = dbg_next_sym(nstp))) { -+ nkltp = (kltype_t *)nstp->sym_kltype; -+ } else { -+ nkltp = (kltype_t *)NULL; -+ } -+ } -+ } -+ } -+ return(nkltp); -+} -+ -+/* -+ * dbg_first_sym() -+ */ -+dbg_sym_t * -+dbg_first_sym(int type) -+{ -+ dbg_sym_t *stp = (dbg_sym_t *)NULL; -+ -+ switch(type) { -+ case DBG_TYPE: -+ stp = (dbg_sym_t *) -+ kl_first_btnode((btnode_t *)type_tree); -+ break; -+ case DBG_TYPEDEF: -+ stp = (dbg_sym_t *) -+ kl_first_btnode((btnode_t *)typedef_tree); -+ break; -+ } -+ return(stp); -+} -+ -+/* -+ * kl_first_type() -+ */ -+kltype_t * -+kl_first_type(int tnum) -+{ -+ kltype_t *kltp = NULL; -+ dbg_sym_t *stp; -+ -+ if (IS_TYPE(tnum)) { -+ /* If (tnum == KLT_TYPE), then return the first type -+ * record, regardless of the type. Otherwise, search -+ * for the frst type that mapps into tnum. -+ */ -+ if ((stp = dbg_first_sym(DBG_TYPE))) { -+ kltp = (kltype_t *)stp->sym_kltype; -+ if (tnum != KLT_TYPE) { -+ while (kltp && !(kltp->kl_type & tnum)) { -+ if ((stp = dbg_next_sym(stp))) { -+ kltp = (kltype_t *)stp->sym_kltype; -+ } else { -+ kltp = (kltype_t *)NULL; -+ } -+ } -+ } -+ } -+ } else if (IS_TYPEDEF(tnum)) { -+ if ((stp = dbg_first_sym(DBG_TYPEDEF))) { -+ kltp = (kltype_t *)stp->sym_kltype; -+ } -+ } -+ return(kltp); -+} -+ -+/* -+ * kl_next_type() -+ */ -+kltype_t * -+kl_next_type(kltype_t *kltp) -+{ -+ dbg_sym_t *stp, *nstp; -+ kltype_t *nkltp = (kltype_t *)NULL; -+ -+ if (!kltp) { -+ return((kltype_t *)NULL); -+ } -+ stp = (dbg_sym_t *)kltp->kl_ptr; -+ if ((nstp = dbg_next_sym(stp))) { -+ nkltp = (kltype_t *)nstp->sym_kltype; -+ } -+ return(nkltp); -+} -+ -+/* -+ * kl_prev_type() -+ */ -+kltype_t * -+kl_prev_type(kltype_t *kltp) -+{ -+ dbg_sym_t *stp, *pstp; -+ kltype_t *pkltp = (kltype_t *)NULL; -+ -+ if (!kltp) { -+ return((kltype_t *)NULL); -+ } -+ stp = (dbg_sym_t *)kltp->kl_ptr; -+ if ((pstp = dbg_prev_sym(stp))) { -+ pkltp = (kltype_t *)pstp->sym_kltype; -+ } -+ return(pkltp); -+} -+ -+/* -+ * kl_realtype() -+ */ -+kltype_t * -+kl_realtype(kltype_t *kltp, int tnum) -+{ -+ kltype_t *rkltp = kltp; -+ -+ while (rkltp) { -+ if (tnum && (rkltp->kl_type == tnum)) { -+ break; -+ } -+ if (!rkltp->kl_realtype) { -+ break; -+ } -+ if (rkltp->kl_realtype == rkltp) { -+ break; -+ } -+ rkltp = rkltp->kl_realtype; -+ if (rkltp == kltp) { -+ break; -+ } -+ } -+ return(rkltp); -+} -+ -+/* -+ * dbg_find_typenum() -+ */ -+dbg_type_t * -+dbg_find_typenum(uint64_t typenum) -+{ -+ dbg_sym_t *stp; -+ dbg_type_t *sp = (dbg_type_t *)NULL; -+ -+ if ((stp = dbg_find_sym(0, DBG_TYPE, typenum))) { -+ sp = (dbg_type_t *)stp->sym_kltype; -+ } -+ return(sp); -+} -+ -+/* -+ * find type by typenum -+ */ -+kltype_t * -+kl_find_typenum(uint64_t typenum) -+{ -+ kltype_t *kltp; -+ -+ kltp = (kltype_t *)dbg_find_typenum(typenum); -+ return(kltp); -+} -+ -+/* -+ * kl_find_btnode() -- non-recursive implementation. -+ */ -+btnode_t * -+_kl_find_btnode(btnode_t *np, char *key, int *max_depth, size_t len) -+{ -+ int ret; -+ btnode_t *next, *prev; -+ -+ if (np) { -+ if (max_depth) { -+ (*max_depth)++; -+ } -+ next = np; -+again: -+ if (len) { -+ ret = strncmp(key, next->bt_key, len); -+ } else { -+ ret = strcmp(key, next->bt_key); -+ } -+ if (ret == 0) { -+ if ((prev = kl_prev_btnode(next))) { -+ if (len) { -+ ret = strncmp(key, prev->bt_key, len); -+ } else { -+ ret = strcmp(key, prev->bt_key); -+ } -+ if (ret == 0) { -+ next = prev; -+ goto again; -+ } -+ } -+ return(next); -+ } else if (ret < 0) { -+ if ((next = next->bt_left)) { -+ goto again; -+ } -+ } else { -+ if ((next = next->bt_right)) { -+ goto again; -+ } -+ } -+ } -+ return((btnode_t *)NULL); -+} -+ -+/* -+ * kl_type_size() -+ */ -+int -+kl_type_size(kltype_t *kltp) -+{ -+ kltype_t *rkltp; -+ -+ if (!kltp) { -+ return(0); -+ } -+ if (!(rkltp = kl_realtype(kltp, 0))) { -+ return(0); -+ } -+ return(rkltp->kl_size); -+} -+ -+/* -+ * kl_struct_len() -+ */ -+int -+kl_struct_len(char *s) -+{ -+ kltype_t *kltp; -+ -+ if ((kltp = kl_find_type(s, (KLT_TYPES)))) { -+ return kl_type_size(kltp); -+ } -+ return(0); -+} -+ -+/* -+ * kl_get_member() -+ */ -+kltype_t * -+kl_get_member(kltype_t *kltp, char *f) -+{ -+ kltype_t *mp; -+ -+ if ((mp = kltp->kl_member)) { -+ while (mp) { -+ if (mp->kl_flags & TYP_ANONYMOUS_FLG) { -+ kltype_t *amp; -+ -+ if ((amp = kl_get_member(mp->kl_realtype, f))) { -+ return(amp); -+ } -+ } else if (!strcmp(mp->kl_name, f)) { -+ break; -+ } -+ mp = mp->kl_member; -+ } -+ } -+ return(mp); -+} -+ -+/* -+ * kl_member() -+ */ -+kltype_t * -+kl_member(char *s, char *f) -+{ -+ kltype_t *kltp, *mp = NULL; -+ -+ if (!(kltp = kl_find_type(s, (KLT_STRUCT|KLT_UNION)))) { -+ if ((kltp = kl_find_type(s, KLT_TYPEDEF))) { -+ kltp = kl_realtype(kltp, 0); -+ } -+ } -+ if (kltp) { -+ mp = kl_get_member(kltp, f); -+ } -+ return(mp); -+} -+ -+ -+/* -+ * kl_get_member_offset() -+ */ -+int -+kl_get_member_offset(kltype_t *kltp, char *f) -+{ -+ kltype_t *mp; -+ -+ if ((mp = kltp->kl_member)) { -+ while (mp) { -+ if (mp->kl_flags & TYP_ANONYMOUS_FLG) { -+ int off; -+ -+ /* Drill down to see if the member we are looking for is in -+ * an anonymous union or struct. Since this call is recursive, -+ * the drill down may actually be multi-layer. -+ */ -+ off = kl_get_member_offset(mp->kl_realtype, f); -+ if (off >= 0) { -+ return(mp->kl_offset + off); -+ } -+ } else if (!strcmp(mp->kl_name, f)) { -+ return(mp->kl_offset); -+ } -+ mp = mp->kl_member; -+ } -+ } -+ return(-1); -+} -+ -+/* -+ * kl_member_offset() -+ */ -+int -+kl_member_offset(char *s, char *f) -+{ -+ int off = -1; -+ kltype_t *kltp; -+ -+ if (!(kltp = kl_find_type(s, (KLT_STRUCT|KLT_UNION)))) { -+ if ((kltp = kl_find_type(s, KLT_TYPEDEF))) { -+ kltp = kl_realtype(kltp, 0); -+ } -+ } -+ if (kltp) { -+ off = kl_get_member_offset(kltp, f); -+ } -+ return(off); -+} -+ -+/* -+ * kl_is_member() -+ */ -+int -+kl_is_member(char *s, char *f) -+{ -+ kltype_t *mp; -+ -+ if ((mp = kl_member(s, f))) { -+ return(1); -+ } -+ return(0); -+} -+ -+/* -+ * kl_member_size() -+ */ -+int -+kl_member_size(char *s, char *f) -+{ -+ kltype_t *mp; -+ -+ if ((mp = kl_member(s, f))) { -+ return(mp->kl_size); -+ } -+ return(0); -+} -+ -+#define TAB_SPACES 8 -+#define LEVEL_INDENT(level, flags) {\ -+ int i, j; \ -+ if (!(flags & NO_INDENT)) { \ -+ for (i = 0; i < level; i++) { \ -+ for (j = 0; j < TAB_SPACES; j++) { \ -+ kdb_printf(" "); \ -+ } \ -+ }\ -+ } \ -+} -+#define PRINT_NL(flags) \ -+ if (!(flags & SUPPRESS_NL)) { \ -+ kdb_printf("\n"); \ -+ } -+#define PRINT_SEMI_COLON(level, flags) \ -+ if (level && (!(flags & SUPPRESS_SEMI_COLON))) { \ -+ kdb_printf(";"); \ -+ } -+ -+/* -+ * print_realtype() -+ */ -+static void -+print_realtype(kltype_t *kltp) -+{ -+ kltype_t *rkltp; -+ -+ if ((rkltp = kltp->kl_realtype)) { -+ while (rkltp && rkltp->kl_realtype) { -+ rkltp = rkltp->kl_realtype; -+ } -+ if (rkltp->kl_type == KLT_BASE) { -+ kdb_printf(" (%s)", rkltp->kl_name); -+ } -+ } -+} -+ -+int align_chk = 0; -+/* -+ * kl_print_uint16() -+ * -+ */ -+void -+kl_print_uint16(void *ptr, int flags) -+{ -+ unsigned long long a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * * dump core) -+ * */ -+ if (align_chk && (uaddr_t)ptr % 16) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(unsigned long long *) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("%#llx", a); -+ } else if (flags & C_BINARY) { -+ kdb_printf("0b"); -+ kl_binary_print(a); -+ } else { -+ kdb_printf("%llu", a); -+ } -+} -+ -+#if 0 -+/* -+ * kl_print_float16() -+ * -+ */ -+void -+kl_print_float16(void *ptr, int flags) -+{ -+ double a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * * dump core) -+ * */ -+ if (align_chk && (uaddr_t)ptr % 16) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(double*) ptr; -+ kdb_printf("%f", a); -+} -+#endif -+ -+/* -+ * kl_print_int16() -+ * -+ */ -+void -+kl_print_int16(void *ptr, int flags) -+{ -+ long long a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * * dump core) -+ * */ -+ if (align_chk && (uaddr_t)ptr % 16) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(long long *) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("%#llx", a); -+ } else if (flags & C_BINARY) { -+ kdb_printf("0b"); -+ kl_binary_print(a); -+ } else { -+ kdb_printf("%lld", a); -+ } -+} -+ -+/* -+ * kl_print_int8() -+ */ -+void -+kl_print_int8(void *ptr, int flags) -+{ -+ long long a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core) -+ */ -+ if (align_chk && (uaddr_t)ptr % 8) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(long long *) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("%#llx", a); -+ } else if (flags & C_BINARY) { -+ kdb_printf("0b"); -+ kl_binary_print(a); -+ } else { -+ kdb_printf("%lld", a); -+ } -+} -+ -+#if 0 -+/* -+ * kl_print_float8() -+ */ -+void -+kl_print_float8(void *ptr, int flags) -+{ -+ double a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core) -+ */ -+ if (align_chk && (uaddr_t)ptr % 8) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(double*) ptr; -+ kdb_printf("%f", a); -+} -+#endif -+ -+/* -+ * kl_print_uint8() -+ */ -+void -+kl_print_uint8(void *ptr, int flags) -+{ -+ unsigned long long a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core) -+ */ -+ if (align_chk && (uaddr_t)ptr % 8) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(unsigned long long *) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("%#llx", a); -+ } else if (flags & C_BINARY) { -+ kdb_printf("0b"); -+ kl_binary_print(a); -+ } else { -+ kdb_printf("%llu", a); -+ } -+} -+ -+/* -+ * kl_print_int4() -+ */ -+void -+kl_print_int4(void *ptr, int flags) -+{ -+ int32_t a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core -+ */ -+ if (align_chk && (uaddr_t)ptr % 4) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(int32_t*) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("0x%x", a); -+ } else if (flags & C_BINARY) { -+ uint64_t value = a & 0xffffffff; -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ kdb_printf("%d", a); -+ } -+} -+ -+#if 0 -+/* -+ * kl_print_float4() -+ */ -+void -+kl_print_float4(void *ptr, int flags) -+{ -+ float a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core) -+ */ -+ if (align_chk && (uaddr_t)ptr % 4) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(float*) ptr; -+ kdb_printf("%f", a); -+} -+#endif -+ -+/* -+ * kl_print_uint4() -+ */ -+void -+kl_print_uint4(void *ptr, int flags) -+{ -+ uint32_t a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core) -+ */ -+ if (align_chk && (uaddr_t)ptr % 4) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(uint32_t*) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("0x%x", a); -+ } else if (flags & C_BINARY) { -+ uint64_t value = a & 0xffffffff; -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ kdb_printf("%u", a); -+ } -+} -+ -+/* -+ * kl_print_int2() -+ */ -+void -+kl_print_int2(void *ptr, int flags) -+{ -+ int16_t a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core -+ */ -+ if (align_chk && (uaddr_t)ptr % 2) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(int16_t*) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("0x%hx", a); -+ } else if (flags & C_BINARY) { -+ uint64_t value = a & 0xffff; -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ kdb_printf("%hd", a); -+ } -+} -+ -+/* -+ * kl_print_uint2() -+ */ -+void -+kl_print_uint2(void *ptr, int flags) -+{ -+ uint16_t a; -+ -+ /* Make sure the pointer is properly aligned (or we will -+ * dump core -+ */ -+ if (align_chk && (uaddr_t)ptr % 2) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ a = *(uint16_t*) ptr; -+ if (flags & C_HEX) { -+ kdb_printf("0x%hx", a); -+ } else if (flags & C_BINARY) { -+ uint64_t value = a & 0xffff; -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ kdb_printf("%hu", a); -+ } -+} -+ -+/* -+ * kl_print_char() -+ */ -+void -+kl_print_char(void *ptr, int flags) -+{ -+ char c; -+ -+ if (flags & C_HEX) { -+ kdb_printf("0x%x", (*(char *)ptr) & 0xff); -+ } else if (flags & C_BINARY) { -+ uint64_t value = (*(char *)ptr) & 0xff; -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ c = *(char *)ptr; -+ -+ kdb_printf("\'\\%03o\'", (unsigned char)c); -+ switch (c) { -+ case '\a' : -+ kdb_printf(" = \'\\a\'"); -+ break; -+ case '\b' : -+ kdb_printf(" = \'\\b\'"); -+ break; -+ case '\t' : -+ kdb_printf(" = \'\\t\'"); -+ break; -+ case '\n' : -+ kdb_printf(" = \'\\n\'"); -+ break; -+ case '\f' : -+ kdb_printf(" = \'\\f\'"); -+ break; -+ case '\r' : -+ kdb_printf(" = \'\\r\'"); -+ break; -+ case '\e' : -+ kdb_printf(" = \'\\e\'"); -+ break; -+ default : -+ if( !iscntrl((unsigned char) c) ) { -+ kdb_printf(" = \'%c\'", c); -+ } -+ break; -+ } -+ } -+} -+ -+/* -+ * kl_print_uchar() -+ */ -+void -+kl_print_uchar(void *ptr, int flags) -+{ -+ if (flags & C_HEX) { -+ kdb_printf("0x%x", *(unsigned char *)ptr); -+ } else if (flags & C_BINARY) { -+ uint64_t value = (*(unsigned char *)ptr) & 0xff; -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ kdb_printf("%u", *(unsigned char *)ptr); -+ } -+} -+ -+/* -+ * kl_print_base() -+ */ -+void -+kl_print_base(void *ptr, int size, int encoding, int flags) -+{ -+ /* FIXME: untested */ -+ if (invalid_address((kaddr_t)ptr, size)) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", (uaddr_t)ptr); -+ return; -+ } -+ switch (size) { -+ -+ case 1: -+ if (encoding == ENC_UNSIGNED) { -+ kl_print_uchar(ptr, flags); -+ } else { -+ kl_print_char(ptr, flags); -+ } -+ break; -+ -+ case 2: -+ if (encoding == ENC_UNSIGNED) { -+ kl_print_uint2(ptr, flags); -+ } else { -+ kl_print_int2(ptr, flags); -+ } -+ break; -+ -+ case 4: -+ if (encoding == ENC_UNSIGNED) { -+ kl_print_uint4(ptr, flags); -+ } else if (encoding == ENC_FLOAT) { -+ printk("error: print of 4-byte float\n"); -+ /* kl_print_float4(ptr, flags); */ -+ } else { -+ kl_print_int4(ptr, flags); -+ } -+ break; -+ -+ case 8: -+ if (encoding == ENC_UNSIGNED) { -+ kl_print_uint8(ptr, flags); -+ } else if (encoding == ENC_FLOAT) { -+ printk("error: print of 8-byte float\n"); -+ /* kl_print_float8(ptr, flags); */ -+ } else { -+ kl_print_int8(ptr, flags); -+ } -+ break; -+ -+ case 16: -+ if (encoding == ENC_UNSIGNED) { -+ /* Ex: unsigned long long */ -+ kl_print_uint16(ptr, flags); -+ } else if (encoding == ENC_FLOAT) { -+ printk("error: print of 16-byte float\n"); -+ /* Ex: long double */ -+ /* kl_print_float16(ptr, flags); */ -+ } else { -+ /* Ex: long long */ -+ kl_print_int16(ptr, flags); -+ } -+ break; -+ -+ default: -+ break; -+ } -+} -+ -+/* -+ * kl_print_base_value() -+ */ -+void -+kl_print_base_value(void *ptr, kltype_t *kltp, int flags) -+{ -+ kltype_t *rkltp=NULL; -+ -+ if (kltp->kl_type != KLT_BASE) { -+ if (!(rkltp = kltp->kl_realtype)) { -+ return; -+ } -+ if (rkltp->kl_type != KLT_BASE) { -+ return; -+ } -+ } else { -+ rkltp = kltp; -+ } -+ kl_print_base(ptr, rkltp->kl_size, rkltp->kl_encoding, flags); -+} -+ -+/* -+ * kl_print_typedef_type() -+ */ -+void -+kl_print_typedef_type( -+ void *ptr, -+ kltype_t *kltp, -+ int level, -+ int flags) -+{ -+ char *name; -+ kltype_t *rkltp; -+ -+ if (ptr) { -+ rkltp = kltp->kl_realtype; -+ while (rkltp->kl_type == KLT_TYPEDEF) { -+ if (rkltp->kl_realtype) { -+ rkltp = rkltp->kl_realtype; -+ } -+ } -+ if (rkltp->kl_type == KLT_POINTER) { -+ kl_print_pointer_type(ptr, kltp, level, flags); -+ return; -+ } -+ switch (rkltp->kl_type) { -+ case KLT_BASE: -+ kl_print_base_type(ptr, kltp, -+ level, flags); -+ break; -+ -+ case KLT_UNION: -+ case KLT_STRUCT: -+ kl_print_struct_type(ptr, kltp, -+ level, flags); -+ break; -+ -+ case KLT_ARRAY: -+ kl_print_array_type(ptr, kltp, -+ level, flags); -+ break; -+ -+ case KLT_ENUMERATION: -+ kl_print_enumeration_type(ptr, -+ kltp, level, flags); -+ break; -+ -+ default: -+ kl_print_base_type(ptr, kltp, -+ level, flags); -+ break; -+ } -+ } else { -+ LEVEL_INDENT(level, flags); -+ if (flags & NO_REALTYPE) { -+ rkltp = kltp; -+ } else { -+ rkltp = kltp->kl_realtype; -+ while (rkltp && rkltp->kl_type == KLT_POINTER) { -+ rkltp = rkltp->kl_realtype; -+ } -+ } -+ if (!rkltp) { -+ if (SUPPRESS_NAME) { -+ kdb_printf(""); -+ } else { -+ kdb_printf( "typedef %s;", -+ kltp->kl_name); -+ } -+ return; -+ } -+ if (rkltp->kl_type == KLT_FUNCTION) { -+ if (kltp->kl_realtype->kl_type == KLT_POINTER) { -+ kdb_printf("typedef %s(*%s)();", -+ kltp->kl_typestr, kltp->kl_name); -+ } else { -+ kdb_printf( "typedef %s(%s)();", -+ kltp->kl_typestr, kltp->kl_name); -+ } -+ } else if (rkltp->kl_type == KLT_ARRAY) { -+ kl_print_array_type(ptr, rkltp, level, flags); -+ } else if (rkltp->kl_type == KLT_TYPEDEF) { -+ if (!(name = rkltp->kl_name)) { -+ name = rkltp->kl_typestr; -+ } -+ -+ if (SUPPRESS_NAME) { -+ kdb_printf("%s", name); -+ } else { -+ kdb_printf("typedef %s%s;", -+ name, kltp->kl_name); -+ } -+ print_realtype(rkltp); -+ } else { -+ kl_print_type(ptr, rkltp, level, flags); -+ } -+ PRINT_NL(flags); -+ } -+} -+ -+/* -+ * kl_print_pointer_type() -+ */ -+void -+kl_print_pointer_type( -+ void *ptr, -+ kltype_t *kltp, -+ int level, -+ int flags) -+{ -+ kltype_t *itp; -+ -+ if (kltp->kl_type == KLT_MEMBER) { -+ itp = kltp->kl_realtype; -+ } else { -+ itp = kltp; -+ } -+ -+ /* See if this is a pointer to a function. If it is, then it -+ * has to be handled differently... -+ */ -+ while (itp->kl_type == KLT_POINTER) { -+ if ((itp = itp->kl_realtype)) { -+ if (itp->kl_type == KLT_FUNCTION) { -+ kl_print_function_type(ptr, -+ kltp, level, flags); -+ return; -+ } -+ } else { -+ LEVEL_INDENT(level, flags); -+ kdb_printf("%s%s;\n", -+ kltp->kl_typestr, kltp->kl_name); -+ return; -+ } -+ } -+ -+ LEVEL_INDENT(level, flags); -+ if (ptr) { -+ kaddr_t tmp; -+ tmp = *(kaddr_t *)ptr; -+ flags |= SUPPRESS_SEMI_COLON; -+ if(kltp->kl_name){ -+ if (*(kaddr_t *)ptr) { -+ kdb_printf("%s = 0x%"FMTPTR"x", -+ kltp->kl_name, tmp); -+ } else { -+ kdb_printf("%s = (nil)", kltp->kl_name); -+ } -+ } else { -+ if (tmp != 0) { -+ kdb_printf("0x%"FMTPTR"x", tmp); -+ } else { -+ kdb_printf( "(nil)"); -+ } -+ } -+ } else { -+ if (kltp->kl_typestr) { -+ if (kltp->kl_name && !(flags & SUPPRESS_NAME)) { -+ kdb_printf("%s%s", -+ kltp->kl_typestr, kltp->kl_name); -+ } else { -+ kdb_printf("%s", kltp->kl_typestr); -+ } -+ } else { -+ kdb_printf(""); -+ } -+ } -+ PRINT_SEMI_COLON(level, flags); -+ PRINT_NL(flags); -+} -+ -+/* -+ * kl_print_function_type() -+ */ -+void -+kl_print_function_type( -+ void *ptr, -+ kltype_t *kltp, -+ int level, -+ int flags) -+{ -+ LEVEL_INDENT(level, flags); -+ if (ptr) { -+ kaddr_t a; -+ -+ a = *(kaddr_t *)ptr; -+ kdb_printf("%s = 0x%"FMTPTR"x", kltp->kl_name, a); -+ } else { -+ if (flags & SUPPRESS_NAME) { -+ kdb_printf("%s(*)()", kltp->kl_typestr); -+ } else { -+ kdb_printf("%s(*%s)();", -+ kltp->kl_typestr, kltp->kl_name); -+ } -+ } -+ PRINT_NL(flags); -+} -+ -+/* -+ * kl_print_array_type() -+ */ -+void -+kl_print_array_type(void *ptr, kltype_t *kltp, int level, int flags) -+{ -+ int i, count = 0, anon = 0, size, low, high, multi = 0; -+ char typestr[128], *name, *p; -+ kltype_t *rkltp, *etp, *retp; -+ -+ if (kltp->kl_type != KLT_ARRAY) { -+ if ((rkltp = kltp->kl_realtype)) { -+ while (rkltp->kl_type != KLT_ARRAY) { -+ if (!(rkltp = rkltp->kl_realtype)) { -+ break; -+ } -+ } -+ } -+ if (!rkltp) { -+ LEVEL_INDENT(level, flags); -+ kdb_printf(""); -+ PRINT_SEMI_COLON(level, flags); -+ PRINT_NL(flags); -+ return; -+ } -+ } else { -+ rkltp = kltp; -+ } -+ -+ etp = rkltp->kl_elementtype; -+ if (!etp) { -+ LEVEL_INDENT(level, flags); -+ kdb_printf(" %s", rkltp->kl_name); -+ PRINT_SEMI_COLON(level, flags); -+ PRINT_NL(flags); -+ return; -+ } -+ -+ /* Set retp to point to the actual element type. This is necessary -+ * for multi-dimensional arrays, which link using the kl_elementtype -+ * member. -+ */ -+ retp = etp; -+ while (retp->kl_type == KLT_ARRAY) { -+ retp = retp->kl_elementtype; -+ } -+ low = rkltp->kl_low_bounds + 1; -+ high = rkltp->kl_high_bounds; -+ -+ if (ptr) { -+ -+ p = ptr; -+ -+ if ((retp->kl_size == 1) && (retp->kl_encoding == ENC_CHAR)) { -+ if (kltp->kl_type == KLT_MEMBER) { -+ LEVEL_INDENT(level, flags); -+ } -+ if (flags & SUPPRESS_NAME) { -+ kdb_printf("\""); -+ flags &= ~SUPPRESS_NAME; -+ } else { -+ kdb_printf("%s = \"", kltp->kl_name); -+ } -+ for (i = 0; i < high; i++) { -+ if (*(char*)p == 0) { -+ break; -+ } -+ kdb_printf("%c", *(char *)p); -+ p++; -+ } -+ kdb_printf("\""); -+ PRINT_NL(flags); -+ } else { -+ if (kltp->kl_type == KLT_MEMBER) { -+ LEVEL_INDENT(level, flags); -+ } -+ -+ if (flags & SUPPRESS_NAME) { -+ kdb_printf("{\n"); -+ flags &= ~SUPPRESS_NAME; -+ } else { -+ kdb_printf("%s = {\n", kltp->kl_name); -+ } -+ -+ if (retp->kl_type == KLT_POINTER) { -+ size = sizeof(void *); -+ } else { -+ while (retp->kl_realtype) { -+ retp = retp->kl_realtype; -+ } -+ size = retp->kl_size; -+ } -+ if ((retp->kl_type != KLT_STRUCT) && -+ (retp->kl_type != KLT_UNION)) { -+ /* Turn off the printing of names for all -+ * but structs and unions. -+ */ -+ flags |= SUPPRESS_NAME; -+ } -+ for (i = low; i <= high; i++) { -+ -+ LEVEL_INDENT(level + 1, flags); -+ kdb_printf("[%d] ", i); -+ -+ switch (retp->kl_type) { -+ case KLT_POINTER : -+ kl_print_pointer_type( -+ p, retp, level, -+ flags|NO_INDENT); -+ break; -+ -+ case KLT_TYPEDEF: -+ kl_print_typedef_type( -+ p, retp, level, -+ flags|NO_INDENT); -+ break; -+ -+ case KLT_BASE: -+ kl_print_base_value(p, -+ retp, flags|NO_INDENT); -+ kdb_printf("\n"); -+ break; -+ -+ case KLT_ARRAY: -+ kl_print_array_type(p, retp, -+ level + 1, -+ flags|SUPPRESS_NAME); -+ break; -+ -+ case KLT_STRUCT: -+ case KLT_UNION: -+ kl_print_struct_type(p, -+ retp, level + 1, -+ flags|NO_INDENT); -+ break; -+ -+ default: -+ kl_print_base_value( -+ p, retp, -+ flags|NO_INDENT); -+ kdb_printf("\n"); -+ break; -+ } -+ p = (void *)((uaddr_t)p + size); -+ } -+ LEVEL_INDENT(level, flags); -+ kdb_printf("}"); -+ PRINT_SEMI_COLON(level, flags); -+ PRINT_NL(flags); -+ } -+ } else { -+ if (rkltp) { -+ count = (rkltp->kl_high_bounds - -+ rkltp->kl_low_bounds) + 1; -+ } else { -+ count = 1; -+ } -+ -+ if (!strcmp(retp->kl_typestr, "struct ") || -+ !strcmp(retp->kl_typestr, "union ")) { -+ anon = 1; -+ } -+next_dimension: -+ switch (retp->kl_type) { -+ -+ case KLT_UNION: -+ case KLT_STRUCT: -+ if (anon) { -+ if (multi) { -+ kdb_printf("[%d]", count); -+ break; -+ } -+ kl_print_struct_type(ptr, retp, level, -+ flags| -+ SUPPRESS_NL| -+ SUPPRESS_SEMI_COLON); -+ if (kltp->kl_type == KLT_MEMBER) { -+ kdb_printf(" %s[%d]", -+ kltp->kl_name, count); -+ } else { -+ kdb_printf(" [%d]", count); -+ } -+ break; -+ } -+ /* else drop through */ -+ -+ default: -+ LEVEL_INDENT(level, flags); -+ if (multi) { -+ kdb_printf("[%d]", count); -+ break; -+ } -+ name = kltp->kl_name; -+ if (retp->kl_type == KLT_TYPEDEF) { -+ strcpy(typestr, retp->kl_name); -+ strcat(typestr, " "); -+ } else { -+ strcpy(typestr, retp->kl_typestr); -+ } -+ if (!name || (flags & SUPPRESS_NAME)) { -+ kdb_printf("%s[%d]", typestr, count); -+ } else { -+ kdb_printf("%s%s[%d]", -+ typestr, name, count); -+ } -+ } -+ if (etp->kl_type == KLT_ARRAY) { -+ count = etp->kl_high_bounds - etp->kl_low_bounds + 1; -+ etp = etp->kl_elementtype; -+ multi++; -+ goto next_dimension; -+ } -+ PRINT_SEMI_COLON(level, flags); -+ PRINT_NL(flags); -+ } -+} -+ -+/* -+ * kl_print_enumeration_type() -+ */ -+void -+kl_print_enumeration_type( -+ void *ptr, -+ kltype_t *kltp, -+ int level, -+ int flags) -+{ -+ unsigned long long val = 0; -+ kltype_t *mp, *rkltp; -+ -+ rkltp = kl_realtype(kltp, KLT_ENUMERATION); -+ if (ptr) { -+ switch (kltp->kl_size) { -+ case 1: -+ val = *(unsigned long long *)ptr; -+ break; -+ -+ case 2: -+ val = *(uint16_t *)ptr; -+ break; -+ -+ case 4: -+ val = *(uint32_t *)ptr; -+ break; -+ -+ case 8: -+ val = *(uint64_t *)ptr; -+ break; -+ } -+ mp = rkltp->kl_member; -+ while (mp) { -+ if (mp->kl_value == val) { -+ break; -+ } -+ mp = mp->kl_member; -+ } -+ LEVEL_INDENT(level, flags); -+ if (mp) { -+ kdb_printf("%s = (%s=%lld)", -+ kltp->kl_name, mp->kl_name, val); -+ } else { -+ kdb_printf("%s = %lld", kltp->kl_name, val); -+ } -+ PRINT_NL(flags); -+ } else { -+ LEVEL_INDENT(level, flags); -+ kdb_printf ("%s {", kltp->kl_typestr); -+ mp = rkltp->kl_member; -+ while (mp) { -+ kdb_printf("%s = %d", mp->kl_name, mp->kl_value); -+ if ((mp = mp->kl_member)) { -+ kdb_printf(", "); -+ } -+ } -+ mp = kltp; -+ if (level) { -+ kdb_printf("} %s;", mp->kl_name); -+ } else { -+ kdb_printf("};"); -+ } -+ PRINT_NL(flags); -+ } -+} -+ -+/* -+ * kl_binary_print() -+ */ -+void -+kl_binary_print(uint64_t num) -+{ -+ int i, pre = 1; -+ -+ for (i = 63; i >= 0; i--) { -+ if (num & ((uint64_t)1 << i)) { -+ kdb_printf("1"); -+ if (pre) { -+ pre = 0; -+ } -+ } else { -+ if (!pre) { -+ kdb_printf("0"); -+ } -+ } -+ } -+ if (pre) { -+ kdb_printf("0"); -+ } -+} -+ -+/* -+ * kl_get_bit_value() -+ * -+ * x = byte_size, y = bit_size, z = bit_offset -+ */ -+uint64_t -+kl_get_bit_value(void *ptr, unsigned int x, unsigned int y, unsigned int z) -+{ -+ uint64_t value=0, mask; -+ -+ /* handle x bytes of buffer -- doing just memcpy won't work -+ * on big endian architectures -+ */ -+ switch (x) { -+ case 5: -+ case 6: -+ case 7: -+ case 8: -+ x = 8; -+ value = *(uint64_t*) ptr; -+ break; -+ case 3: -+ case 4: -+ x = 4; -+ value = *(uint32_t*) ptr; -+ break; -+ case 2: -+ value = *(uint16_t*) ptr; -+ break; -+ case 1: -+ value = *(uint8_t *)ptr; -+ break; -+ default: -+ /* FIXME: set KL_ERROR */ -+ return(0); -+ } -+ /* -+ o FIXME: correct handling of overlapping fields -+ */ -+ -+ /* goto bit offset */ -+ value = value >> z; -+ -+ /* mask bit size bits */ -+ mask = (((uint64_t)1 << y) - 1); -+ return (value & mask); -+} -+ -+/* -+ * kl_print_bit_value() -+ * -+ * x = byte_size, y = bit_size, z = bit_offset -+ */ -+void -+kl_print_bit_value(void *ptr, int x, int y, int z, int flags) -+{ -+ unsigned long long value; -+ -+ value = kl_get_bit_value(ptr, x, y, z); -+ if (flags & C_HEX) { -+ kdb_printf("%#llx", value); -+ } else if (flags & C_BINARY) { -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ kdb_printf("%lld", value); -+ } -+} -+ -+/* -+ * kl_print_base_type() -+ */ -+void -+kl_print_base_type(void *ptr, kltype_t *kltp, int level, int flags) -+{ -+ LEVEL_INDENT(level, flags); -+ if (ptr) { -+ if (!(flags & SUPPRESS_NAME)) { -+ kdb_printf ("%s = ", kltp->kl_name); -+ } -+ } -+ if (kltp->kl_type == KLT_MEMBER) { -+ if (kltp->kl_bit_size < (kltp->kl_size * 8)) { -+ if (ptr) { -+ kl_print_bit_value(ptr, kltp->kl_size, -+ kltp->kl_bit_size, -+ kltp->kl_bit_offset, flags); -+ } else { -+ if (kltp->kl_name) { -+ kdb_printf ("%s%s :%d;", -+ kltp->kl_typestr, -+ kltp->kl_name, -+ kltp->kl_bit_size); -+ } else { -+ kdb_printf ("%s :%d;", -+ kltp->kl_typestr, -+ kltp->kl_bit_size); -+ } -+ } -+ PRINT_NL(flags); -+ return; -+ } -+ } -+ if (ptr) { -+ kltype_t *rkltp; -+ -+ rkltp = kl_realtype(kltp, 0); -+ if (rkltp->kl_encoding == ENC_UNDEFINED) { -+ /* This is a void value -+ */ -+ kdb_printf(""); -+ } else { -+ kl_print_base(ptr, kltp->kl_size, -+ rkltp->kl_encoding, flags); -+ } -+ } else { -+ if (kltp->kl_type == KLT_MEMBER) { -+ if (flags & SUPPRESS_NAME) { -+ kdb_printf ("%s", kltp->kl_typestr); -+ } else { -+ if (kltp->kl_name) { -+ kdb_printf("%s%s;", kltp->kl_typestr, -+ kltp->kl_name); -+ } else { -+ kdb_printf ("%s :%d;", -+ kltp->kl_typestr, -+ kltp->kl_bit_size); -+ } -+ } -+ } else { -+ if (SUPPRESS_NAME) { -+ kdb_printf("%s", kltp->kl_name); -+ } else { -+ kdb_printf("%s;", kltp->kl_name); -+ } -+ } -+ } -+ PRINT_NL(flags); -+} -+ -+/* -+ * kl_print_member() -+ */ -+void -+kl_print_member(void *ptr, kltype_t *mp, int level, int flags) -+{ -+ int kl_type = 0; -+ kltype_t *rkltp; -+ -+ if (flags & C_SHOWOFFSET) { -+ kdb_printf("%#x ", mp->kl_offset); -+ } -+ -+ if ((rkltp = mp->kl_realtype)) { -+ kl_type = rkltp->kl_type; -+ } else -+ kl_type = mp->kl_type; -+ switch (kl_type) { -+ case KLT_STRUCT: -+ case KLT_UNION: -+ kl_print_struct_type(ptr, mp, level, flags); -+ break; -+ case KLT_ARRAY: -+ kl_print_array_type(ptr, mp, level, flags); -+ break; -+ case KLT_POINTER: -+ kl_print_pointer_type(ptr, mp, level, flags); -+ break; -+ case KLT_FUNCTION: -+ kl_print_function_type(ptr, mp, level, flags); -+ break; -+ case KLT_BASE: -+ kl_print_base_type(ptr, mp, level, flags); -+ break; -+ case KLT_ENUMERATION: -+ kl_print_enumeration_type(ptr, mp, level, flags); -+ break; -+ case KLT_TYPEDEF: -+ while (rkltp && rkltp->kl_realtype) { -+ if (rkltp->kl_realtype == rkltp) { -+ break; -+ } -+ rkltp = rkltp->kl_realtype; -+ } -+ if (ptr) { -+ kl_print_typedef_type(ptr, mp, -+ level, flags); -+ break; -+ } -+ LEVEL_INDENT(level, flags); -+ if (flags & SUPPRESS_NAME) { -+ if (rkltp && (mp->kl_bit_size < -+ (rkltp->kl_size * 8))) { -+ kdb_printf ("%s :%d", -+ mp->kl_typestr, -+ mp->kl_bit_size); -+ } else { -+ kdb_printf("%s", -+ mp->kl_realtype->kl_name); -+ } -+ print_realtype(mp->kl_realtype); -+ } else { -+ if (rkltp && (mp->kl_bit_size < -+ (rkltp->kl_size * 8))) { -+ if (mp->kl_name) { -+ kdb_printf ("%s%s :%d;", -+ mp->kl_typestr, -+ mp->kl_name, -+ mp->kl_bit_size); -+ } else { -+ kdb_printf ("%s :%d;", -+ mp->kl_typestr, -+ mp->kl_bit_size); -+ } -+ } else { -+ kdb_printf("%s %s;", -+ mp->kl_realtype->kl_name, -+ mp->kl_name); -+ } -+ } -+ PRINT_NL(flags); -+ break; -+ -+ default: -+ LEVEL_INDENT(level, flags); -+ if (mp->kl_typestr) { -+ kdb_printf("%s%s;", -+ mp->kl_typestr, mp->kl_name); -+ } else { -+ kdb_printf("<\?\?\? kl_type:%d> %s;", -+ kl_type, mp->kl_name); -+ } -+ PRINT_NL(flags); -+ break; -+ } -+} -+ -+/* -+ * kl_print_struct_type() -+ */ -+void -+kl_print_struct_type(void *buf, kltype_t *kltp, int level, int flags) -+{ -+ int orig_flags = flags; -+ void *ptr = NULL; -+ kltype_t *mp, *rkltp; -+ -+ /* If we are printing out an actual struct, then don't print any -+ * semi colons. -+ */ -+ if (buf) { -+ flags |= SUPPRESS_SEMI_COLON; -+ } -+ -+ LEVEL_INDENT(level, flags); -+ if ((level == 0) || (flags & NO_INDENT)) { -+ kdb_printf("%s{\n", kltp->kl_typestr); -+ } else { -+ if (buf) { -+ if (level && !(kltp->kl_flags & TYP_ANONYMOUS_FLG)) { -+ kdb_printf("%s = %s{\n", -+ kltp->kl_name, kltp->kl_typestr); -+ } else { -+ kdb_printf("%s{\n", kltp->kl_typestr); -+ } -+ flags &= (~SUPPRESS_NL); -+ } else { -+ if (kltp->kl_typestr) { -+ kdb_printf("%s{\n", kltp->kl_typestr); -+ } else { -+ kdb_printf(" {\n"); -+ } -+ } -+ } -+ -+ /* If the SUPPRESS_NL, SUPPRESS_SEMI_COLON, and SUPPRESS_NAME flags -+ * are set and buf is NULL, then turn them off as they only apply -+ * at the end of the struct. We save the original flags for that -+ * purpose. -+ */ -+ if (!buf) { -+ flags &= ~(SUPPRESS_NL|SUPPRESS_SEMI_COLON|SUPPRESS_NAME); -+ } -+ -+ /* If the NO_INDENT is set, we need to turn it off at this -+ * point -- just in case we come across a member of this struct -+ * that is also a struct. -+ */ -+ if (flags & NO_INDENT) { -+ flags &= ~(NO_INDENT); -+ } -+ -+ if (kltp->kl_type == KLT_MEMBER) { -+ rkltp = kl_realtype(kltp, 0); -+ } else { -+ rkltp = kltp; -+ } -+ level++; -+ if ((mp = rkltp->kl_member)) { -+ while (mp) { -+ if (buf) { -+ ptr = buf + mp->kl_offset; -+ } -+ kl_print_member(ptr, mp, level, flags); -+ mp = mp->kl_member; -+ } -+ } else { -+ if (kltp->kl_flags & TYP_INCOMPLETE_FLG) { -+ LEVEL_INDENT(level, flags); -+ kdb_printf("\n"); -+ } -+ } -+ level--; -+ LEVEL_INDENT(level, flags); -+ -+ /* kl_size = 0 for empty structs */ -+ if (ptr || ((kltp->kl_size == 0) && buf)) { -+ kdb_printf("}"); -+ } else if ((kltp->kl_type == KLT_MEMBER) && -+ !(orig_flags & SUPPRESS_NAME) && -+ !(kltp->kl_flags & TYP_ANONYMOUS_FLG)) { -+ kdb_printf("} %s", kltp->kl_name); -+ } else { -+ kdb_printf("}"); -+ } -+ PRINT_SEMI_COLON(level, orig_flags); -+ PRINT_NL(orig_flags); -+} -+ -+/* -+ * kl_print_type() -+ */ -+void -+kl_print_type(void *buf, kltype_t *kltp, int level, int flags) -+{ -+ void *ptr; -+ -+ if (buf) { -+ if (kltp->kl_offset) { -+ ptr = (void *)((uaddr_t)buf + kltp->kl_offset); -+ } else { -+ ptr = buf; -+ } -+ } else { -+ ptr = 0; -+ } -+ -+ /* Only allow binary printing for base types -+ */ -+ if (kltp->kl_type != KLT_BASE) { -+ flags &= (~C_BINARY); -+ } -+ switch (kltp->kl_type) { -+ -+ case KLT_TYPEDEF: -+ kl_print_typedef_type(ptr, kltp, level, flags); -+ break; -+ -+ case KLT_STRUCT: -+ case KLT_UNION: -+ kl_print_struct_type(ptr, kltp, level, flags); -+ break; -+ -+ case KLT_MEMBER: -+ kl_print_member(ptr, kltp, level, flags); -+ break; -+ -+ case KLT_POINTER: -+ kl_print_pointer_type(ptr, kltp, level, flags); -+ break; -+ -+ case KLT_FUNCTION: -+ LEVEL_INDENT(level, flags); -+ kl_print_function_type(ptr, kltp, level, flags); -+ break; -+ -+ case KLT_ARRAY: -+ kl_print_array_type(ptr, kltp, level, flags); -+ break; -+ -+ case KLT_ENUMERATION: -+ kl_print_enumeration_type(ptr, -+ kltp, level, flags); -+ break; -+ -+ case KLT_BASE: -+ kl_print_base_type(ptr, kltp, level, flags); -+ break; -+ -+ default: -+ LEVEL_INDENT(level, flags); -+ if (flags & SUPPRESS_NAME) { -+ kdb_printf ("%s", kltp->kl_name); -+ } else { -+ kdb_printf ("%s %s;", -+ kltp->kl_name, kltp->kl_name); -+ } -+ PRINT_NL(flags); -+ } -+} -+ -+/* -+ * eval is from lcrash eval.c -+ */ -+ -+/* Forward declarations */ -+static void free_node(node_t *); -+static node_t *make_node(token_t *, int); -+static node_t *get_node_list(token_t *, int); -+static node_t *do_eval(int); -+static int is_unary(int); -+static int is_binary(int); -+static int precedence(int); -+static node_t *get_sizeof(void); -+static int replace_cast(node_t *, int); -+static int replace_unary(node_t *, int); -+static node_t *replace(node_t *, int); -+static void array_to_element(node_t*, node_t*); -+static int type_to_number(node_t *); -+kltype_t *number_to_type(node_t *); -+static type_t *eval_type(node_t *); -+static type_t *get_type(char *, int); -+static int add_rchild(node_t *, node_t *); -+static void free_nodelist(node_t *); -+ -+/* Global variables -+ */ -+static int logical_flag; -+static node_t *node_list = (node_t *)NULL; -+uint64_t eval_error; -+char *error_token; -+ -+/* -+ * set_eval_error() -+ */ -+static void -+set_eval_error(uint64_t ecode) -+{ -+ eval_error = ecode; -+} -+ -+/* -+ * is_typestr() -+ * -+ * We check for "struct", "union", etc. separately because they -+ * would not be an actual part of the type name. We also assume -+ * that the string passed in -+ * -+ * - does not have any leading blanks or tabs -+ * - is NULL terminated -+ * - contains only one type name to check -+ * - does not contain any '*' characters -+ */ -+static int -+is_typestr(char *str) -+{ -+ int len; -+ -+ len = strlen(str); -+ if ((len >= 6) && !strncmp(str, "struct", 6)) { -+ return(1); -+ } else if ((len >= 5) &&!strncmp(str, "union", 5)) { -+ return(1); -+ } else if ((len >= 5) &&!strncmp(str, "short", 5)) { -+ return(1); -+ } else if ((len >= 8) &&!strncmp(str, "unsigned", 8)) { -+ return(1); -+ } else if ((len >= 6) &&!strncmp(str, "signed", 6)) { -+ return(1); -+ } else if ((len >= 4) &&!strncmp(str, "long", 4)) { -+ return(1); -+ } -+ /* Strip off any trailing blanks -+ */ -+ while(*str && ((str[strlen(str) - 1] == ' ') -+ || (str[strlen(str) - 1] == '\t'))) { -+ str[strlen(str) - 1] = 0; -+ } -+ if (kl_find_type(str, KLT_TYPES)) { -+ return (1); -+ } -+ return(0); -+} -+ -+/* -+ * free_tokens() -+ */ -+static void -+free_tokens(token_t *tp) -+{ -+ token_t *t, *tnext; -+ -+ t = tp; -+ while (t) { -+ tnext = t->next; -+ if (t->string) { -+ kl_free_block((void *)t->string); -+ } -+ kl_free_block((void *)t); -+ t = tnext; -+ } -+} -+ -+/* -+ * process_text() -+ */ -+static int -+process_text(char **str, token_t *tok) -+{ -+ char *cp = *str; -+ char *s = NULL; -+ int len = 0; -+ -+ /* Check and see if this token is a STRING or CHARACTER -+ * type (beginning with a single or double quote). -+ */ -+ if (*cp == '\'') { -+ /* make sure that only a single character is between -+ * the single quotes (it can be an escaped character -+ * too). -+ */ -+ s = strpbrk((cp + 1), "\'"); -+ if (!s) { -+ set_eval_error(E_SINGLE_QUOTE); -+ error_token = tok->ptr; -+ return(1); -+ } -+ len = (uaddr_t)s - (uaddr_t)cp; -+ if ((*(cp+1) == '\\')) { -+ if (*(cp+2) == '0') { -+ long int val; -+ unsigned long uval; -+ char *ep; -+ -+ uval = kl_strtoull((char*)(cp+2), -+ (char **)&ep, 8); -+ val = uval; -+ if ((val > 255) || (*ep != '\'')) { -+ set_eval_error(E_BAD_CHAR); -+ error_token = tok->ptr; -+ return(1); -+ } -+ } else if (*(cp+3) != '\'') { -+ set_eval_error(E_BAD_CHAR); -+ error_token = tok->ptr; -+ return(1); -+ } -+ tok->type = CHARACTER; -+ } else if (len == 2) { -+ tok->type = CHARACTER; -+ } else { -+ -+ /* Treat as a single token entry. It's possible -+ * that what's between the single quotes is a -+ * type name. That will be determined later on. -+ */ -+ tok->type = STRING; -+ } -+ *str = cp + len; -+ } else if (*cp == '\"') { -+ s = strpbrk((cp + 1), "\""); -+ if (!s) { -+ set_eval_error(E_BAD_STRING); -+ error_token = tok->ptr; -+ return(1); -+ } -+ len = (uaddr_t)s - (uaddr_t)cp; -+ tok->type = TEXT; -+ *str = cp + len; -+ } -+ if ((tok->type == STRING) || (tok->type == TEXT)) { -+ -+ if ((tok->type == TEXT) && (strlen(cp) > (len + 1))) { -+ -+ /* Check to see if there is a comma or semi-colon -+ * directly following the string. If there is, -+ * then the string is OK (the following characters -+ * are part of the next expression). Also, it's OK -+ * to have trailing blanks as long as that's all -+ * threre is. -+ */ -+ char *c; -+ -+ c = s + 1; -+ while (*c) { -+ if ((*c == ',') || (*c == ';')) { -+ break; -+ } else if (*c != ' ') { -+ set_eval_error(E_END_EXPECTED); -+ tok->ptr = c; -+ error_token = tok->ptr; -+ return(1); -+ } -+ c++; -+ } -+ /* Truncate the trailing blanks (they are not -+ * part of the string). -+ */ -+ if (c != (s + 1)) { -+ *(s + 1) = 0; -+ } -+ } -+ tok->string = (char *)kl_alloc_block(len); -+ memcpy(tok->string, (cp + 1), len - 1); -+ tok->string[len - 1] = 0; -+ } -+ return(0); -+} -+ -+/* -+ * get_token_list() -+ */ -+static token_t * -+get_token_list(char *str) -+{ -+ int paren_count = 0; -+ char *cp; -+ token_t *tok = (token_t*)NULL, *tok_head = (token_t*)NULL; -+ token_t *tok_last = (token_t*)NULL; -+ -+ cp = str; -+ eval_error = 0; -+ -+ while (*cp) { -+ -+ /* Skip past any "white space" (spaces and tabs). -+ */ -+ switch (*cp) { -+ case ' ' : -+ case '\t' : -+ case '`' : -+ cp++; -+ continue; -+ default : -+ break; -+ } -+ -+ /* Allocate space for the next token */ -+ tok = (token_t *)kl_alloc_block(sizeof(token_t)); -+ tok->ptr = cp; -+ -+ switch(*cp) { -+ -+ /* Check for operators -+ */ -+ case '+' : -+ if (*((char*)cp + 1) == '+') { -+ -+ /* We aren't doing asignment here, -+ * so the ++ operator is not -+ * considered valid. -+ */ -+ set_eval_error(E_BAD_OPERATOR); -+ error_token = tok_last->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } else if (!tok_last || -+ (tok_last->operator && -+ (tok_last->operator != CLOSE_PAREN))) { -+ tok->operator = UNARY_PLUS; -+ } else { -+ tok->operator = ADD; -+ } -+ break; -+ -+ case '-' : -+ if (*((char*)cp + 1) == '-') { -+ -+ /* We aren't doing asignment here, so -+ * the -- operator is not considered -+ * valid. -+ */ -+ set_eval_error(E_BAD_OPERATOR); -+ error_token = tok_last->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } else if (*((char*)cp + 1) == '>') { -+ tok->operator = RIGHT_ARROW; -+ cp++; -+ } else if (!tok_last || (tok_last->operator && -+ (tok_last->operator != CLOSE_PAREN))) { -+ tok->operator = UNARY_MINUS; -+ } else { -+ tok->operator = SUBTRACT; -+ } -+ break; -+ -+ case '.' : -+ /* XXX - need to check to see if this is a -+ * decimal point in the middle fo a floating -+ * point value. -+ */ -+ tok->operator = DOT; -+ break; -+ -+ case '*' : -+ /* XXX - need a better way to tell if this is -+ * an INDIRECTION. perhaps check the next -+ * token? -+ */ -+ if (!tok_last || (tok_last->operator && -+ ((tok_last->operator != CLOSE_PAREN) && -+ (tok_last->operator != CAST)))) { -+ tok->operator = INDIRECTION; -+ } else { -+ tok->operator = MULTIPLY; -+ } -+ break; -+ -+ case '/' : -+ tok->operator = DIVIDE; -+ break; -+ -+ case '%' : -+ tok->operator = MODULUS; -+ break; -+ -+ case '(' : { -+ char *s, *s1, *s2; -+ int len; -+ -+ /* Make sure the previous token is an operator -+ */ -+ if (tok_last && !tok_last->operator) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = tok_last->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } -+ -+ if (tok_last && -+ ((tok_last->operator == RIGHT_ARROW) || -+ (tok_last->operator == DOT))) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = tok_last->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } -+ -+ /* Check here to see if following tokens -+ * constitute a cast. -+ */ -+ -+ /* Skip past any "white space" (spaces -+ * and tabs) -+ */ -+ while ((*(cp+1) == ' ') || (*(cp+1) == '\t')) { -+ cp++; -+ } -+ if ((*(cp+1) == '(') || isdigit(*(cp+1)) || -+ (*(cp+1) == '+') || (*(cp+1) == '-') || -+ (*(cp+1) == '*') || (*(cp+1) == '&') || -+ (*(cp+1) == ')')){ -+ tok->operator = OPEN_PAREN; -+ paren_count++; -+ break; -+ } -+ -+ /* Make sure we have a CLOSE_PAREN. -+ */ -+ if (!(s1 = strchr(cp+1, ')'))) { -+ set_eval_error(E_OPEN_PAREN); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } -+ /* Check to see if this is NOT a simple -+ * typecast. -+ */ -+ if (!(s2 = strchr(cp+1, '.'))) { -+ s2 = strstr(cp+1, "->"); -+ } -+ if (s2 && (s2 < s1)) { -+ tok->operator = OPEN_PAREN; -+ paren_count++; -+ break; -+ } -+ -+ if ((s = strpbrk(cp+1, "*)"))) { -+ char str[128]; -+ -+ len = (uaddr_t)s - (uaddr_t)(cp+1); -+ strncpy(str, cp+1, len); -+ str[len] = 0; -+ if (!is_typestr(str)) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } -+ if (!(s = strpbrk((cp+1), ")"))) { -+ set_eval_error(E_OPEN_PAREN); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } -+ len = (uaddr_t)s - (uaddr_t)(cp+1); -+ tok->string = (char *) -+ kl_alloc_block(len + 1); -+ memcpy(tok->string, (cp+1), len); -+ tok->string[len] = 0; -+ tok->operator = CAST; -+ cp = (char *)((uaddr_t)(cp+1) + len); -+ break; -+ } -+ tok->operator = OPEN_PAREN; -+ paren_count++; -+ break; -+ } -+ -+ case ')' : -+ if (tok_last && ((tok_last->operator == -+ RIGHT_ARROW) || -+ (tok_last->operator == DOT))) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = tok_last->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return ((token_t*)NULL); -+ } -+ tok->operator = CLOSE_PAREN; -+ paren_count--; -+ break; -+ -+ case '&' : -+ if (*((char*)cp + 1) == '&') { -+ tok->operator = LOGICAL_AND; -+ cp++; -+ } else if (!tok_last || (tok_last && -+ (tok_last->operator && -+ tok_last->operator != -+ CLOSE_PAREN))) { -+ tok->operator = ADDRESS; -+ } else { -+ tok->operator = BITWISE_AND; -+ } -+ break; -+ -+ case '|' : -+ if (*((char*)cp + 1) == '|') { -+ tok->operator = LOGICAL_OR; -+ cp++; -+ } else { -+ tok->operator = BITWISE_OR; -+ } -+ break; -+ -+ case '=' : -+ if (*((char*)cp + 1) == '=') { -+ tok->operator = EQUAL; -+ cp++; -+ } else { -+ /* ASIGNMENT -- NOT IMPLEMENTED -+ */ -+ tok->operator = NOT_YET; -+ } -+ break; -+ -+ case '<' : -+ if (*((char*)cp + 1) == '<') { -+ tok->operator = LEFT_SHIFT; -+ cp++; -+ } else if (*((char*)cp + 1) == '=') { -+ tok->operator = LESS_THAN_OR_EQUAL; -+ cp++; -+ } else { -+ tok->operator = LESS_THAN; -+ } -+ break; -+ -+ case '>' : -+ if (*((char*)(cp + 1)) == '>') { -+ tok->operator = RIGHT_SHIFT; -+ cp++; -+ } else if (*((char*)cp + 1) == '=') { -+ tok->operator = GREATER_THAN_OR_EQUAL; -+ cp++; -+ } else { -+ tok->operator = GREATER_THAN; -+ } -+ break; -+ -+ case '!' : -+ if (*((char*)cp + 1) == '=') { -+ tok->operator = NOT_EQUAL; -+ cp++; -+ } else { -+ tok->operator = LOGICAL_NEGATION; -+ } -+ break; -+ -+ case '$' : -+ set_eval_error(E_NOT_IMPLEMENTED); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return((token_t*)NULL); -+ case '~' : -+ tok->operator = ONES_COMPLEMENT; -+ break; -+ -+ case '^' : -+ tok->operator = BITWISE_EXCLUSIVE_OR; -+ break; -+ -+ case '?' : -+ set_eval_error(E_NOT_IMPLEMENTED); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return((token_t*)NULL); -+ case ':' : -+ set_eval_error(E_NOT_IMPLEMENTED); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return((token_t*)NULL); -+ case '[' : -+ tok->operator = OPEN_SQUARE_BRACKET;; -+ break; -+ -+ case ']' : -+ tok->operator = CLOSE_SQUARE_BRACKET;; -+ break; -+ -+ default: { -+ -+ char *s; -+ int len; -+ -+ /* See if the last token is a RIGHT_ARROW -+ * or a DOT. If it is, then this token must -+ * be the name of a struct/union member. -+ */ -+ if (tok_last && -+ ((tok_last->operator == RIGHT_ARROW) || -+ (tok_last->operator == DOT))) { -+ tok->type = MEMBER; -+ } else if (process_text(&cp, tok)) { -+ free_tokens(tok_head); -+ free_tokens(tok); -+ return((token_t*)NULL); -+ } -+ if (tok->type == TEXT) { -+ return(tok); -+ } else if (tok->type == STRING) { -+ if (is_typestr(tok->string)) { -+ tok->type = TYPE_DEF; -+ } else { -+ tok->operator = TEXT; -+ return(tok); -+ } -+ break; -+ } else if (tok->type == CHARACTER) { -+ break; -+ } -+ -+ /* Check and See if the entire string is -+ * a typename (valid only for whatis case). -+ */ -+ s = strpbrk(cp, -+ ".\t+-*/()[]|~!$&%^<>?:&=^\"\'"); -+ if (!s && !tok->type && is_typestr(cp)) { -+ tok->type = TYPE_DEF; -+ len = strlen(cp) + 1; -+ tok->string = (char *) -+ kl_alloc_block(len); -+ memcpy(tok->string, cp, len - 1); -+ tok->string[len - 1] = 0; -+ cp = (char *)((uaddr_t)cp + len - 2); -+ break; -+ } -+ -+ /* Now check for everything else -+ */ -+ if ((s = strpbrk(cp, -+ " .\t+-*/()[]|~!$&%^<>?:&=^\"\'"))) { -+ len = (uaddr_t)s - (uaddr_t)cp + 1; -+ } else { -+ len = strlen(cp) + 1; -+ } -+ -+ tok->string = -+ (char *)kl_alloc_block(len); -+ memcpy(tok->string, cp, len - 1); -+ tok->string[len - 1] = 0; -+ -+ cp = (char *)((uaddr_t)cp + len - 2); -+ -+ /* Check to see if this is the keyword -+ * "sizeof". If not, then check to see if -+ * the string is a member name. -+ */ -+ if (!strcmp(tok->string, "sizeof")) { -+ tok->operator = SIZEOF; -+ kl_free_block((void *)tok->string); -+ tok->string = 0; -+ } else if (tok_last && -+ ((tok_last->operator == RIGHT_ARROW) || -+ (tok_last->operator == DOT))) { -+ tok->type = MEMBER; -+ } else { -+ tok->type = STRING; -+ } -+ break; -+ } -+ } -+ if (!(tok->type)) { -+ tok->type = OPERATOR; -+ } -+ if (!tok_head) { -+ tok_head = tok_last = tok; -+ } else { -+ tok_last->next = tok; -+ tok_last = tok; -+ } -+ cp++; -+ } -+ if (paren_count < 0) { -+ set_eval_error(E_CLOSE_PAREN); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ return((token_t*)NULL); -+ } else if (paren_count > 0) { -+ set_eval_error(E_OPEN_PAREN); -+ error_token = tok->ptr; -+ free_tokens(tok_head); -+ return((token_t*)NULL); -+ } -+ return(tok_head); -+} -+ -+/* -+ * valid_binary_args() -+ */ -+int -+valid_binary_args(node_t *np, node_t *left, node_t *right) -+{ -+ int op = np->operator; -+ -+ if ((op == RIGHT_ARROW) || (op == DOT)) { -+ if (!left) { -+ set_eval_error(E_MISSING_STRUCTURE); -+ error_token = np->tok_ptr; -+ return(0); -+ } else if (!(left->node_type == TYPE_DEF) && -+ !(left->node_type == MEMBER) && -+ !(left->operator == CLOSE_PAREN) && -+ !(left->operator == CLOSE_SQUARE_BRACKET)) { -+ set_eval_error(E_BAD_STRUCTURE); -+ error_token = left->tok_ptr; -+ return(0); -+ } -+ if (!right || (!(right->node_type == MEMBER))) { -+ set_eval_error(E_BAD_MEMBER); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ return(1); -+ } -+ if (!left || !right) { -+ set_eval_error(E_MISSING_OPERAND); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ switch (left->operator) { -+ case CLOSE_PAREN: -+ case CLOSE_SQUARE_BRACKET: -+ break; -+ default: -+ switch(left->node_type) { -+ case NUMBER: -+ case STRING: -+ case TEXT: -+ case CHARACTER: -+ case EVAL_VAR: -+ case MEMBER: -+ break; -+ default: -+ set_eval_error(E_BAD_OPERAND); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ } -+ switch (right->operator) { -+ case OPEN_PAREN: -+ break; -+ default: -+ switch(right->node_type) { -+ case NUMBER: -+ case STRING: -+ case TEXT: -+ case CHARACTER: -+ case EVAL_VAR: -+ case MEMBER: -+ break; -+ default: -+ set_eval_error(E_BAD_OPERAND); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ } -+ return(1); -+} -+ -+/* -+ * get_node_list() -+ */ -+static node_t * -+get_node_list(token_t *tp, int flags) -+{ -+ node_t *root = (node_t *)NULL; -+ node_t *np = (node_t *)NULL; -+ node_t *last = (node_t *)NULL; -+ -+ /* Loop through the tokens and convert them to nodes. -+ */ -+ while (tp) { -+ np = make_node(tp, flags); -+ if (eval_error) { -+ return((node_t *)NULL); -+ } -+ if (root) { -+ last->next = np; -+ last = np; -+ } else { -+ root = last = np; -+ } -+ tp = tp->next; -+ } -+ last->next = (node_t *)NULL; /* cpw patch */ -+ last = (node_t *)NULL; -+ for (np = root; np; np = np->next) { -+ if (is_binary(np->operator)) { -+ if (!valid_binary_args(np, last, np->next)) { -+ free_nodelist(root); -+ return((node_t *)NULL); -+ } -+ } -+ last = np; -+ } -+ return(root); -+} -+ -+/* -+ * next_node() -+ */ -+static node_t * -+next_node(void) -+{ -+ node_t *np; -+ if ((np = node_list)) { -+ node_list = node_list->next; -+ np->next = (node_t*)NULL; -+ } -+ return(np); -+} -+ -+/* -+ * eval_unary() -+ */ -+static node_t * -+eval_unary(node_t *curnp, int flags) -+{ -+ node_t *n0, *n1; -+ -+ n0 = curnp; -+ -+ /* Peek ahead and make sure there is a next node. -+ * Also check to see if the next node requires -+ * a recursive call to do_eval(). If it does, we'll -+ * let the do_eval() call take care of pulling it -+ * off the list. -+ */ -+ if (!node_list) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = n0->tok_ptr; -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ if (n0->operator == CAST) { -+ if (node_list->operator == CLOSE_PAREN) { -+ -+ /* Free the CLOSE_PAREN and return -+ */ -+ free_node(next_node()); -+ return(n0); -+ } -+ if (!(node_list->node_type == NUMBER) && -+ !(node_list->node_type == VADDR) && -+ !((node_list->operator == ADDRESS) || -+ (node_list->operator == CAST) || -+ (node_list->operator == UNARY_MINUS) || -+ (node_list->operator == UNARY_PLUS) || -+ (node_list->operator == INDIRECTION) || -+ (node_list->operator == OPEN_PAREN))) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = node_list->tok_ptr; -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ } -+ if ((n0->operator == INDIRECTION) || -+ (n0->operator == ADDRESS) || -+ (n0->operator == OPEN_PAREN) || -+ is_unary(node_list->operator)) { -+ n1 = do_eval(flags); -+ if (eval_error) { -+ free_nodes(n0); -+ free_nodes(n1); -+ return((node_t*)NULL); -+ } -+ } else { -+ n1 = next_node(); -+ } -+ -+ if (n1->operator == OPEN_PAREN) { -+ /* Get the value contained within the parenthesis. -+ * If there was an error, just return. -+ */ -+ free_node(n1); -+ n1 = do_eval(flags); -+ if (eval_error) { -+ free_nodes(n1); -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ } -+ -+ n0->right = n1; -+ if (replace_unary(n0, flags) == -1) { -+ if (!eval_error) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = n0->tok_ptr; -+ } -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ return(n0); -+} -+ -+/* -+ * do_eval() -- Reduces an equation to a single value. -+ * -+ * Any parenthesis (and nested parenthesis) within the equation will -+ * be solved first via recursive calls to do_eval(). -+ */ -+static node_t * -+do_eval(int flags) -+{ -+ node_t *root = (node_t*)NULL, *curnp, *n0, *n1; -+ -+ /* Loop through the list of nodes until we run out of nodes -+ * or we hit a CLOSE_PAREN. If we hit an OPEN_PAREN, make a -+ * recursive call to do_eval(). -+ */ -+ curnp = next_node(); -+ while (curnp) { -+ n0 = n1 = (node_t *)NULL; -+ -+ if (curnp->operator == OPEN_PAREN) { -+ /* Get the value contained within the parenthesis. -+ * If there was an error, just return. -+ */ -+ free_node(curnp); -+ n0 = do_eval(flags); -+ if (eval_error) { -+ free_nodes(n0); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ -+ } else if (curnp->operator == SIZEOF) { -+ /* Free the SIZEOF node and then make a call -+ * to the get_sizeof() function (which will -+ * get the next node off the list). -+ */ -+ n0 = get_sizeof(); -+ if (eval_error) { -+ if (!error_token) { -+ error_token = curnp->tok_ptr; -+ } -+ free_node(curnp); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ free_node(curnp); -+ curnp = (node_t *)NULL; -+ } else if (is_unary(curnp->operator)) { -+ n0 = eval_unary(curnp, flags); -+ } else { -+ n0 = curnp; -+ curnp = (node_t *)NULL; -+ } -+ if (eval_error) { -+ free_nodes(n0); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ -+ /* n0 should now contain a non-operator node. Check to see if -+ * there is a next token. If there isn't, just add the last -+ * rchild and return. -+ */ -+ if (!node_list) { -+ if (root) { -+ add_rchild(root, n0); -+ } else { -+ root = n0; -+ } -+ replace(root, flags); -+ if (eval_error) { -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ return(root); -+ } -+ -+ /* Make sure the next token is an operator. -+ */ -+ if (!node_list->operator) { -+ free_nodes(root); -+ free_node(n0); -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = node_list->tok_ptr; -+ return((node_t *)NULL); -+ } else if ((node_list->operator == CLOSE_PAREN) || -+ (node_list->operator == CLOSE_SQUARE_BRACKET)) { -+ -+ if (root) { -+ add_rchild(root, n0); -+ } else { -+ root = n0; -+ } -+ -+ /* Reduce the resulting tree to a single value -+ */ -+ replace(root, flags); -+ if (eval_error) { -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ -+ /* Step over the CLOSE_PAREN or CLOSE_SQUARE_BRACKET -+ * and then return. -+ */ -+ free_node(next_node()); -+ return(root); -+ } else if (node_list->operator == OPEN_SQUARE_BRACKET) { -+next_dimension1: -+ /* skip over the OPEN_SQUARE_BRACKET token -+ */ -+ free_node(next_node()); -+ -+ /* Get the value contained within the brackets. This -+ * value must represent an array index (value or -+ * equation). -+ */ -+ n1 = do_eval(0); -+ if (eval_error) { -+ free_nodes(root); -+ free_node(n0); -+ free_node(n1); -+ return((node_t *)NULL); -+ } -+ -+ /* Convert the array (or pointer type) to an -+ * element type using the index value obtained -+ * above. Make sure that n0 contains some sort -+ * of type definition first, however. -+ */ -+ if (n0->node_type != TYPE_DEF) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n0->tok_ptr; -+ free_nodes(n0); -+ free_nodes(n1); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ array_to_element(n0, n1); -+ free_node(n1); -+ if (eval_error) { -+ free_nodes(root); -+ free_nodes(n0); -+ return((node_t *)NULL); -+ } -+ -+ /* If there aren't any more nodes, just -+ * return. -+ */ -+ if (!node_list) { -+ return(n0); -+ } -+ if (node_list->operator == OPEN_SQUARE_BRACKET) { -+ goto next_dimension1; -+ } -+ } else if (!is_binary(node_list->operator)) { -+ set_eval_error(E_BAD_OPERATOR); -+ error_token = node_list->tok_ptr; -+ free_nodes(root); -+ free_nodes(n0); -+ return((node_t *)NULL); -+ } -+ -+ /* Now get the operator node -+ */ -+ if (!(n1 = next_node())) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = n0->tok_ptr; -+ free_nodes(n0); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ -+ /* Check to see if this binary operator is RIGHT_ARROW or DOT. -+ * If it is, we need to reduce it to a single value node now. -+ */ -+ while ((n1->operator == RIGHT_ARROW) || (n1->operator == DOT)) { -+ -+ /* The next node must contain the name of the -+ * struct|union member. -+ */ -+ if (!node_list || (node_list->node_type != MEMBER)) { -+ set_eval_error(E_BAD_MEMBER); -+ error_token = n1->tok_ptr; -+ free_nodes(n0); -+ free_nodes(n1); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ n1->left = n0; -+ -+ /* Now get the next node and link it as the -+ * right child. -+ */ -+ if (!(n0 = next_node())) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = n1->tok_ptr; -+ free_nodes(n1); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ n1->right = n0; -+ if (!(n0 = replace(n1, flags))) { -+ if (!(eval_error)) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = n1->tok_ptr; -+ } -+ free_nodes(n1); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ n1 = (node_t *)NULL; -+ -+ /* Check to see if there is a next node. If there -+ * is, check to see if it is the operator CLOSE_PAREN. -+ * If it is, then return (skipping over the -+ * CLOSE_PAREN first). -+ */ -+ if (node_list && ((node_list->operator == CLOSE_PAREN) -+ || (node_list->operator == -+ CLOSE_SQUARE_BRACKET))) { -+ if (root) { -+ add_rchild(root, n0); -+ } else { -+ root = n0; -+ } -+ -+ /* Reduce the resulting tree to a single -+ * value -+ */ -+ replace(root, flags); -+ if (eval_error) { -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ -+ /* Advance the token pointer past the -+ * CLOSE_PAREN and then return. -+ */ -+ free_node(next_node()); -+ return(root); -+ } -+ -+ /* Check to see if the next node is an -+ * OPEN_SQUARE_BRACKET. If it is, then we have to -+ * reduce the contents of the square brackets to -+ * an index array. -+ */ -+ if (node_list && (node_list->operator -+ == OPEN_SQUARE_BRACKET)) { -+ -+ /* Advance the token pointer and call -+ * do_eval() again. -+ */ -+ free_node(next_node()); -+next_dimension2: -+ n1 = do_eval(0); -+ if (eval_error) { -+ free_node(n0); -+ free_node(n1); -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ -+ /* Convert the array (or pointer type) to -+ * an element type using the index value -+ * obtained above. Make sure that n0 -+ * contains some sort of type definition -+ * first, however. -+ */ -+ if (n0->node_type != TYPE_DEF) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n0->tok_ptr; -+ free_node(n0); -+ free_node(n1); -+ free_node(root); -+ return((node_t *)NULL); -+ } -+ array_to_element(n0, n1); -+ free_node(n1); -+ if (eval_error) { -+ free_node(n0); -+ free_node(root); -+ return((node_t *)NULL); -+ } -+ } -+ -+ /* Now get the next operator node (if there is one). -+ */ -+ if (!node_list) { -+ if (root) { -+ add_rchild(root, n0); -+ } else { -+ root = n0; -+ } -+ return(root); -+ } -+ n1 = next_node(); -+ if (n1->operator == OPEN_SQUARE_BRACKET) { -+ goto next_dimension2; -+ } -+ } -+ -+ if (n1 && ((n1->operator == CLOSE_PAREN) || -+ (n1->operator == CLOSE_SQUARE_BRACKET))) { -+ free_node(n1); -+ if (root) { -+ add_rchild(root, n0); -+ } else { -+ root = n0; -+ } -+ replace(root, flags); -+ if (eval_error) { -+ free_nodes(root); -+ return((node_t *)NULL); -+ } -+ return(root); -+ } -+ -+ if (!root) { -+ root = n1; -+ n1->left = n0; -+ } else if (precedence(root->operator) -+ >= precedence(n1->operator)) { -+ add_rchild(root, n0); -+ n1->left = root; -+ root = n1; -+ } else { -+ if (!root->right) { -+ n1->left = n0; -+ root->right = n1; -+ } else { -+ add_rchild(root, n0); -+ n1->left = root->right; -+ root->right = n1; -+ } -+ } -+ curnp = next_node(); -+ } /* while(curnp) */ -+ return(root); -+} -+ -+/* -+ * is_unary() -+ */ -+static int -+is_unary(int op) -+{ -+ switch (op) { -+ case LOGICAL_NEGATION : -+ case ADDRESS : -+ case INDIRECTION : -+ case UNARY_MINUS : -+ case UNARY_PLUS : -+ case ONES_COMPLEMENT : -+ case CAST : -+ return(1); -+ -+ default : -+ return(0); -+ } -+} -+ -+ -+/* -+ * is_binary() -+ */ -+static int -+is_binary(int op) -+{ -+ switch (op) { -+ -+ case BITWISE_OR : -+ case BITWISE_EXCLUSIVE_OR : -+ case BITWISE_AND : -+ case RIGHT_SHIFT : -+ case LEFT_SHIFT : -+ case ADD : -+ case SUBTRACT : -+ case MULTIPLY : -+ case DIVIDE : -+ case MODULUS : -+ case LOGICAL_OR : -+ case LOGICAL_AND : -+ case EQUAL : -+ case NOT_EQUAL : -+ case LESS_THAN : -+ case GREATER_THAN : -+ case LESS_THAN_OR_EQUAL : -+ case GREATER_THAN_OR_EQUAL : -+ case RIGHT_ARROW : -+ case DOT : -+ return(1); -+ -+ default : -+ return(0); -+ } -+} -+ -+/* -+ * precedence() -+ */ -+static int -+precedence(int a) -+{ -+ if ((a >= CONDITIONAL) && (a <= CONDITIONAL_ELSE)) { -+ return(1); -+ } else if (a == LOGICAL_OR) { -+ return(2); -+ } else if (a == LOGICAL_AND) { -+ return(3); -+ } else if (a == BITWISE_OR) { -+ return(4); -+ } else if (a == BITWISE_EXCLUSIVE_OR) { -+ return(5); -+ } else if (a == BITWISE_AND) { -+ return(6); -+ } else if ((a >= EQUAL) && (a <= NOT_EQUAL)) { -+ return(7); -+ } else if ((a >= LESS_THAN) && (a <= GREATER_THAN_OR_EQUAL)) { -+ return(8); -+ } else if ((a >= RIGHT_SHIFT) && (a <= LEFT_SHIFT)) { -+ return(9); -+ } else if ((a >= ADD) && (a <= SUBTRACT)) { -+ return(10); -+ } else if ((a >= MULTIPLY) && (a <= MODULUS)) { -+ return(11); -+ } else if ((a >= LOGICAL_NEGATION) && (a <= SIZEOF)) { -+ return(12); -+ } else if ((a >= RIGHT_ARROW) && (a <= DOT)) { -+ return(13); -+ } else { -+ return(0); -+ } -+} -+ -+/* -+ * esc_char() -+ */ -+char -+esc_char(char *str) -+{ -+ long int val; -+ unsigned long uval; -+ char ch; -+ -+ if (strlen(str) > 1) { -+ uval = kl_strtoull(str, (char **)NULL, 8); -+ val = uval; -+ ch = (char)val; -+ } else { -+ ch = str[0]; -+ } -+ switch (ch) { -+ case 'a' : -+ return((char)7); -+ case 'b' : -+ return((char)8); -+ case 't' : -+ return((char)9); -+ case 'n' : -+ return((char)10); -+ case 'f' : -+ return((char)12); -+ case 'r' : -+ return((char)13); -+ case 'e' : -+ return((char)27); -+ default: -+ return(ch); -+ } -+} -+ -+/* -+ * make_node() -+ */ -+static node_t * -+make_node(token_t *t, int flags) -+{ -+ node_t *np; -+ -+ set_eval_error(0); -+ np = (node_t*)kl_alloc_block(sizeof(*np)); -+ -+ if (t->type == OPERATOR) { -+ -+ /* Check to see if this token represents a typecast -+ */ -+ if (t->operator == CAST) { -+ type_t *tp; -+ -+ if (!(np->type = get_type(t->string, flags))) { -+ set_eval_error(E_BAD_CAST); -+ error_token = t->ptr; -+ free_nodes(np); -+ return((node_t*)NULL); -+ } -+ -+ /* Determin if this is a pointer to a type -+ */ -+ tp = np->type; -+ if (tp->flag == POINTER_FLAG) { -+ np->flags = POINTER_FLAG; -+ tp = tp->t_next; -+ while (tp->flag == POINTER_FLAG) { -+ tp = tp->t_next; -+ } -+ } -+ switch(tp->flag) { -+ case KLTYPE_FLAG: -+ np->flags |= KLTYPE_FLAG; -+ break; -+ -+ default: -+ free_nodes(np); -+ set_eval_error(E_BAD_CAST); -+ error_token = t->ptr; -+ return((node_t*)NULL); -+ } -+ if (!t->next) { -+ if (flags & C_WHATIS) { -+ np->node_type = TYPE_DEF; -+ } else { -+ set_eval_error(E_BAD_CAST); -+ error_token = t->ptr; -+ return((node_t*)NULL); -+ } -+ } else { -+ np->node_type = OPERATOR; -+ np->operator = CAST; -+ } -+ } else { -+ np->node_type = OPERATOR; -+ np->operator = t->operator; -+ } -+ } else if (t->type == MEMBER) { -+ np->name = (char *)dup_block((void *)t->string, strlen(t->string)+1); -+ np->node_type = MEMBER; -+ } else if ((t->type == STRING) || (t->type == TYPE_DEF)) { -+ syment_t *sp; -+ dbg_sym_t *stp; -+ dbg_type_t *sttp; -+ -+ if ((sp = kl_lkup_symname(t->string))) { -+ if (!(flags & C_NOVARS)) { -+ int has_type = 0; -+ -+ /* The string is a symbol name. We'll treat it as -+ * a global kernel variable and, at least, gather in -+ * the address of the symbol and the value it points -+ * to. -+ */ -+ np->address = sp->s_addr; -+ np->flags |= ADDRESS_FLAG; -+ np->name = t->string; -+ t->string = (char*)NULL; -+ -+ /* Need to see if there is type information available -+ * for this variable. Since this mapping is not -+ * available yet, we will just attach a type struct -+ * for either uint32_t or uint64_t (depending on the -+ * size of a kernel pointer). That will at least let -+ * us do something and will prevent the scenario where -+ * we have a type node with out a pointer to a type -+ * struct! -+ */ -+ np->node_type = TYPE_DEF; -+ np->flags |= KLTYPE_FLAG; -+ np->value = *((kaddr_t *)np->address); -+ /* try to get the actual type info for the variable */ -+ if(((stp = dbg_find_sym(sp->s_name, DBG_VAR, -+ (uint64_t)0)) != NULL)){ -+ if((sttp = (dbg_type_t *) -+ kl_find_typenum(stp->sym_typenum)) -+ != NULL){ -+ /* kl_get_typestring(sttp); */ -+ has_type = 1; -+ if(sttp->st_klt.kl_type == KLT_POINTER){ -+ np->flags ^= KLTYPE_FLAG; -+ np->flags |= POINTER_FLAG; -+ np->type = -+ get_type(sttp->st_typestr, -+ flags); -+ } else { -+ np->type = -+ kl_alloc_block(sizeof(type_t)); -+ np->type->un.kltp = -+ &sttp->st_klt; -+ } -+ } -+ } -+ /* no type info for the variable found */ -+ if(!has_type){ -+ if (ptrsz64) { -+ np->type = get_type("uint64_t", flags); -+ } else { -+ np->type = get_type("uint32_t", flags); -+ } -+ } -+ } -+ kl_free_block((void *)sp); -+ } else if (flags & (C_WHATIS|C_SIZEOF)) { -+ -+ kltype_t *kltp; -+ -+ if ((kltp = kl_find_type(t->string, KLT_TYPES))) { -+ -+ np->node_type = TYPE_DEF; -+ np->flags = KLTYPE_FLAG; -+ np->type = (type_t*) -+ kl_alloc_block(sizeof(type_t)); -+ np->type->flag = KLTYPE_FLAG; -+ np->type->t_kltp = kltp; -+ } else { -+ if (get_value(t->string, -+ (uint64_t *)&np->value)) { -+ set_eval_error(E_BAD_VALUE); -+ error_token = t->ptr; -+ free_nodes(np); -+ return((node_t*)NULL); -+ } -+ if (!strncmp(t->string, "0x", 2) || -+ !strncmp(t->string, "0X", 2)) { -+ np->flags |= UNSIGNED_FLAG; -+ } -+ np->node_type = NUMBER; -+ } -+ np->tok_ptr = t->ptr; -+ return(np); -+ } else { -+ if (get_value(t->string, (uint64_t *)&np->value)) { -+ set_eval_error(E_BAD_VALUE); -+ error_token = t->ptr; -+ free_nodes(np); -+ return((node_t*)NULL); -+ } -+ if (np->value > 0xffffffff) { -+ np->byte_size = 8; -+ } else { -+ np->byte_size = 4; -+ } -+ if (!strncmp(t->string, "0x", 2) || -+ !strncmp(t->string, "0X", 2)) { -+ np->flags |= UNSIGNED_FLAG; -+ } -+ np->node_type = NUMBER; -+ } -+ } else if (t->type == CHARACTER) { -+ char *cp; -+ -+ /* Step over the single quote -+ */ -+ cp = (t->ptr + 1); -+ if (*cp == '\\') { -+ int i = 0; -+ char str[16]; -+ -+ /* Step over the back slash -+ */ -+ cp++; -+ while (*cp != '\'') { -+ str[i++] = *cp++; -+ } -+ str[i] = 0; -+ np->value = esc_char(str); -+ } else { -+ np->value = *cp; -+ } -+ np->type = get_type("char", flags); -+ np->node_type = TYPE_DEF; -+ np->flags |= KLTYPE_FLAG; -+ } else if (t->type == TEXT) { -+ np->node_type = TEXT; -+ np->name = t->string; -+ /* So the block doesn't get freed twice */ -+ t->string = (char*)NULL; -+ } else { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = t->ptr; -+ return((node_t*)NULL); -+ } -+ np->tok_ptr = t->ptr; -+ return(np); -+} -+ -+/* -+ * add_node() -+ */ -+static int -+add_node(node_t *root, node_t *new_node) -+{ -+ node_t *n = root; -+ -+ /* Find the most lower-right node -+ */ -+ while (n->right) { -+ n = n->right; -+ } -+ -+ /* If the node we found is a leaf node, return an error (we will -+ * have to insert the node instead). -+ */ -+ if (n->node_type == NUMBER) { -+ return(-1); -+ } else { -+ n->right = new_node; -+ } -+ return(0); -+} -+ -+/* -+ * add_rchild() -+ */ -+static int -+add_rchild(node_t *root, node_t *new_node) -+{ -+ if (add_node(root, new_node) == -1) { -+ return(-1); -+ } -+ return(0); -+} -+ -+/* -+ * free_type() -+ */ -+static void -+free_type(type_t *head) -+{ -+ type_t *t0, *t1; -+ -+ t0 = head; -+ while(t0) { -+ if (t0->flag == POINTER_FLAG) { -+ t1 = t0->t_next; -+ kl_free_block((void *)t0); -+ t0 = t1; -+ } else { -+ if (t0->flag != KLTYPE_FLAG) { -+ kl_free_block((void *)t0->t_kltp); -+ } -+ kl_free_block((void *)t0); -+ t0 = (type_t *)NULL; -+ } -+ } -+ return; -+} -+ -+/* -+ * get_type() -- Convert a typecast string into a type. -+ * -+ * Returns a pointer to a struct containing type information. -+ * The type of struct returned is indicated by the contents -+ * of type. If the typecast contains an asterisk, set ptr_type -+ * equal to one, otherwise set it equal to zero. -+ */ -+static type_t * -+get_type(char *s, int flags) -+{ -+ int len, type = 0; -+ char *cp, typename[128]; -+ type_t *t, *head, *last; -+ kltype_t *kltp; -+ -+ head = last = (type_t *)NULL; -+ -+ /* Get the type string -+ */ -+ if (!strncmp(s, "struct", 6)) { -+ if ((cp = strpbrk(s + 7, " \t*"))) { -+ len = cp - (s + 7); -+ } else { -+ len = strlen(s + 7); -+ } -+ memcpy(typename, s + 7, len); -+ } else if (!strncmp(s, "union", 5)) { -+ if ((cp = strpbrk(s + 6, " \t*"))) { -+ len = cp - (s + 6); -+ } else { -+ len = strlen(s + 6); -+ } -+ memcpy(typename, s + 6, len); -+ } else { -+ if ((cp = strpbrk(s, "*)"))) { -+ len = cp - s; -+ } else { -+ len = strlen(s); -+ } -+ memcpy(typename, s, len); -+ } -+ -+ /* Strip off any trailing spaces -+ */ -+ while (len && ((typename[len - 1] == ' ') || -+ (typename[len - 1] == '\t'))) { -+ len--; -+ } -+ typename[len] = 0; -+ -+ if (!(kltp = kl_find_type(typename, KLT_TYPES))) { -+ return ((type_t *)NULL); -+ } -+ type = KLTYPE_FLAG; -+ -+ /* check to see if this cast is a pointer to a type, a pointer -+ * to a pointer to a type, etc. -+ */ -+ cp = s; -+ while ((cp = strpbrk(cp, "*"))) { -+ t = (type_t *)kl_alloc_block(sizeof(type_t)); -+ t->flag = POINTER_FLAG; -+ if (last) { -+ last->t_next = t; -+ last = t; -+ } else { -+ head = last = t; -+ } -+ cp++; -+ } -+ -+ /* Allocate a type block that will point to the type specific -+ * record. -+ */ -+ t = (type_t *)kl_alloc_block(sizeof(type_t)); -+ t->flag = type; -+ -+ switch (t->flag) { -+ -+ case KLTYPE_FLAG: -+ t->t_kltp = kltp; -+ break; -+ -+ default: -+ free_type(head); -+ return((type_t*)NULL); -+ } -+ if (last) { -+ last->t_next = t; -+ } else { -+ head = t; -+ } -+ return(head); -+} -+ -+/* -+ * free_node() -+ */ -+static void -+free_node(node_t *np) -+{ -+ /* If there is nothing to free, just return. -+ */ -+ if (!np) { -+ return; -+ } -+ if (np->name) { -+ kl_free_block((void *)np->name); -+ } -+ free_type(np->type); -+ kl_free_block((void *)np); -+} -+ -+/* -+ * free_nodes() -+ */ -+void -+free_nodes(node_t *np) -+{ -+ node_t *q; -+ -+ /* If there is nothing to free, just return. -+ */ -+ if (!np) { -+ return; -+ } -+ if ((q = np->left)) { -+ free_nodes(q); -+ } -+ if ((q = np->right)) { -+ free_nodes(q); -+ } -+ if (np->name) { -+ kl_free_block((void *)np->name); -+ } -+ free_type(np->type); -+ kl_free_block((void *)np); -+} -+ -+/* -+ * free_nodelist() -+ */ -+static void -+free_nodelist(node_t *np) -+{ -+ node_t *nnp; -+ -+ while(np) { -+ nnp = np->next; -+ free_node(np); -+ np = nnp; -+ } -+} -+ -+extern int alloc_debug; -+ -+/* -+ * free_eval_memory() -+ */ -+void -+free_eval_memory(void) -+{ -+ free_nodelist(node_list); -+ node_list = (node_t*)NULL; -+} -+ -+/* -+ * get_sizeof() -+ */ -+static node_t * -+get_sizeof() -+{ -+ node_t *curnp, *n0 = NULL; -+ -+ if (!(curnp = next_node())) { -+ set_eval_error(E_SYNTAX_ERROR); -+ return((node_t*)NULL); -+ } -+ -+ /* The next token should be a CAST or an open paren. -+ * If it's something else, then return an error. -+ */ -+ if (curnp->operator == OPEN_PAREN) { -+ free_nodes(curnp); -+ n0 = do_eval(C_SIZEOF); -+ if (eval_error) { -+ error_token = n0->tok_ptr; -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ } else if (curnp->operator == CAST) { -+ n0 = curnp; -+ } else { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n0->tok_ptr; -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ -+ if (!n0->type) { -+ set_eval_error(E_NOTYPE); -+ error_token = n0->tok_ptr; -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ -+ if (n0->type->flag & POINTER_FLAG) { -+ n0->value = sizeof(void *); -+ } else if (n0->type->flag & KLTYPE_FLAG) { -+ kltype_t *kltp; -+ -+ kltp = kl_realtype(n0->type->t_kltp, 0); -+ -+ if (kltp->kl_bit_size) { -+ n0->value = kltp->kl_bit_size / 8; -+ if (kltp->kl_bit_size % 8) { -+ n0->value += 1; -+ } -+ } else { -+ n0->value = kltp->kl_size; -+ } -+ } else { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n0->tok_ptr; -+ free_nodes(n0); -+ return((node_t*)NULL); -+ } -+ n0->node_type = NUMBER; -+ n0->flags = 0; -+ n0->operator = 0; -+ n0->byte_size = 0; -+ n0->address = 0; -+ if (n0->type) { -+ free_type(n0->type); -+ n0->type = 0; -+ } -+ return(n0); -+} -+ -+/* -+ * apply_unary() -+ */ -+static int -+apply_unary(node_t *n, uint64_t *value) -+{ -+ if (!n || !n->right) { -+ return(-1); -+ } -+ -+ switch (n->operator) { -+ -+ case UNARY_MINUS : -+ *value = (0 - n->right->value); -+ break; -+ -+ case UNARY_PLUS : -+ *value = (n->right->value); -+ break; -+ -+ case ONES_COMPLEMENT : -+ *value = ~(n->right->value); -+ break; -+ -+ case LOGICAL_NEGATION : -+ if (n->right->value) { -+ *value = 0; -+ } else { -+ *value = 1; -+ } -+ logical_flag++; -+ break; -+ -+ default : -+ break; -+ } -+ return(0); -+} -+ -+/* -+ * pointer_math() -+ */ -+static int -+pointer_math(node_t *np, uint64_t *value, int type, int flags) -+{ -+ int size; -+ uint64_t lvalue, rvalue; -+ type_t *tp = NULL, *tp1; -+ -+ if (type < 0) { -+ if (np->left->flags & POINTER_FLAG) { -+ -+ /* Since we only allow pointer math, -+ * anything other than a pointer causes -+ * failure. -+ */ -+ tp = (type_t*)np->left->type; -+ if (tp->flag != POINTER_FLAG) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = np->left->tok_ptr; -+ return(-1); -+ } -+ -+ tp = tp->t_next; -+ -+ switch (tp->flag) { -+ -+ case POINTER_FLAG : -+ size = sizeof(void *); -+ break; -+ -+ case KLTYPE_FLAG : { -+ /* Get the size of the real type, -+ * not just the size of a pointer -+ * If there isn't any type info, -+ * then just set size equal to the -+ * size of a pointer. -+ */ -+ kltype_t *kltp, *rkltp; -+ -+ kltp = tp->t_kltp; -+ rkltp = kl_realtype(kltp, 0); -+ if (!(size = rkltp->kl_size)) { -+ if (kltp != rkltp) { -+ size = kltp->kl_size; -+ } else { -+ size = sizeof(void *); -+ } -+ } -+ break; -+ } -+ -+ default : -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = np->left->tok_ptr; -+ return(-1); -+ } -+ lvalue = np->left->value; -+ } else { -+ size = sizeof(void *); -+ lvalue = np->left->address; -+ } -+ switch (np->operator) { -+ case ADD : -+ *value = lvalue + (np->right->value * size); -+ break; -+ -+ case SUBTRACT : -+ *value = lvalue - (np->right->value * size); -+ break; -+ -+ default : -+ set_eval_error(E_BAD_OPERATOR); -+ error_token = np->tok_ptr; -+ return(-1); -+ } -+ } else if (type > 0) { -+ if (np->right->flags & POINTER_FLAG) { -+ -+ /* Since we only allow pointer math, -+ * anything other than a pointer causes -+ * failure. -+ */ -+ tp = (type_t*)np->right->type; -+ if (tp->flag != POINTER_FLAG) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = np->right->tok_ptr; -+ return(-1); -+ } -+ -+ tp = tp->t_next; -+ -+ switch (tp->flag) { -+ -+ case POINTER_FLAG : -+ size = sizeof(void *); -+ break; -+ -+ case KLTYPE_FLAG : -+ size = tp->t_kltp->kl_size; -+ break; -+ -+ default : -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = np->right->tok_ptr; -+ return(-1); -+ } -+ rvalue = np->right->value; -+ } else { -+ size = sizeof(void *); -+ rvalue = np->right->address; -+ } -+ switch (np->operator) { -+ case ADD : -+ *value = rvalue + (np->left->value * size); -+ break; -+ -+ case SUBTRACT : -+ *value = rvalue - (np->left->value * size); -+ break; -+ -+ default : -+ set_eval_error(E_BAD_OPERATOR); -+ error_token = np->tok_ptr; -+ return(-1); -+ } -+ } else { -+ return(-1); -+ } -+ tp1 = (type_t *)kl_alloc_block(sizeof(type_t)); -+ tp1->flag = POINTER_FLAG; -+ np->type = tp1; -+ while (tp->flag == POINTER_FLAG) { -+ tp1->t_next = (type_t *)kl_alloc_block(sizeof(type_t)); -+ tp1->flag = POINTER_FLAG; -+ tp1 = tp1->t_next; -+ tp = tp->t_next; -+ } -+ if (tp) { -+ tp1->t_next = (type_t *)kl_alloc_block(sizeof(type_t)); -+ tp1 = tp1->t_next; -+ tp1->flag = KLTYPE_FLAG; -+ tp1->t_kltp = tp->t_kltp; -+ if (type < 0) { -+ if (np->left->flags & POINTER_FLAG) { -+ np->flags |= POINTER_FLAG; -+ } else { -+ np->flags |= VADDR; -+ } -+ } else { -+ if (np->right->flags & POINTER_FLAG) { -+ np->flags |= POINTER_FLAG; -+ } else { -+ np->flags |= VADDR; -+ } -+ } -+ } -+ return(0); -+} -+ -+/* -+ * check_unsigned() -+ */ -+int -+check_unsigned(node_t *np) -+{ -+ kltype_t *kltp, *rkltp; -+ -+ if (np->flags & UNSIGNED_FLAG) { -+ return(1); -+ } -+ if (!np->type) { -+ return(0); -+ } -+ if (np->type->flag == POINTER_FLAG) { -+ return(0); -+ } -+ kltp = np->type->t_kltp; -+ if ((rkltp = kl_realtype(kltp, 0))) { -+ if (rkltp->kl_encoding == ENC_UNSIGNED) { -+ np->flags |= UNSIGNED_FLAG; -+ return(1); -+ } -+ } -+ return(0); -+} -+ -+/* -+ * apply() -+ */ -+static int -+apply(node_t *np, uint64_t *value, int flags) -+{ -+ int ltype, rtype, do_signed = 0; -+ -+ /* There must be two operands -+ */ -+ if (!np->right || !np->left) { -+ set_eval_error(E_MISSING_OPERAND); -+ error_token = np->tok_ptr; -+ return(-1); -+ } -+ -+ if (np->right->node_type == OPERATOR) { -+ replace(np->right, flags); -+ if (eval_error) { -+ return(-1); -+ } -+ } -+ -+ ltype = np->left->node_type; -+ rtype = np->right->node_type; -+ if ((ltype == TYPE_DEF) || (ltype == VADDR)) { -+ if ((rtype == TYPE_DEF) || (rtype == VADDR)) { -+ set_eval_error(E_NO_VALUE); -+ error_token = np->tok_ptr; -+ return(-1); -+ } -+ if (check_unsigned(np->left)) { -+ np->flags |= UNSIGNED_FLAG; -+ } else { -+ do_signed++; -+ } -+ if (!type_to_number(np->left)) { -+ return(pointer_math(np, value, -1, flags)); -+ } -+ np->byte_size = np->left->byte_size; -+ } else if ((rtype == TYPE_DEF) || (rtype == VADDR)) { -+ if ((ltype == TYPE_DEF) || (ltype == VADDR)) { -+ error_token = np->tok_ptr; -+ set_eval_error(E_NO_VALUE); -+ return(-1); -+ } -+ if (check_unsigned(np->right)) { -+ np->flags |= UNSIGNED_FLAG; -+ } else { -+ do_signed++; -+ } -+ if (!type_to_number(np->right)) { -+ return(pointer_math(np, value, 1, flags)); -+ } -+ np->byte_size = np->right->byte_size; -+ } else if ((np->left->flags & UNSIGNED_FLAG) || -+ (np->right->flags & UNSIGNED_FLAG)) { -+ np->flags |= UNSIGNED_FLAG; -+ } else { -+ do_signed++; -+ } -+ -+ if (do_signed) { -+ switch (np->operator) { -+ case ADD : -+ *value = (int64_t)np->left->value + -+ (int64_t)np->right->value; -+ break; -+ -+ case SUBTRACT : -+ *value = (int64_t)np->left->value - -+ (int64_t)np->right->value; -+ break; -+ -+ case MULTIPLY : -+ *value = (int64_t)np->left->value * -+ (int64_t)np->right->value; -+ break; -+ -+ case DIVIDE : -+ if ((int64_t)np->right->value == 0) { -+ set_eval_error(E_DIVIDE_BY_ZERO); -+ error_token = np->right->tok_ptr; -+ return(-1); -+ } -+ *value = (int64_t)np->left->value / -+ (int64_t)np->right->value; -+ break; -+ -+ case BITWISE_OR : -+ *value = (int64_t)np->left->value | -+ (int64_t)np->right->value; -+ break; -+ -+ case BITWISE_AND : -+ *value = (int64_t)np->left->value & -+ (int64_t)np->right->value; -+ break; -+ -+ case MODULUS : -+ if ((int64_t)np->right->value == 0) { -+ set_eval_error(E_DIVIDE_BY_ZERO); -+ error_token = np->right->tok_ptr; -+ return(-1); -+ } -+ *value = (int64_t)np->left->value % -+ (int64_t)np->right->value; -+ break; -+ -+ case RIGHT_SHIFT : -+ *value = -+ (int64_t)np->left->value >> -+ (int64_t)np->right->value; -+ break; -+ -+ case LEFT_SHIFT : -+ *value = -+ (int64_t)np->left->value << -+ (int64_t)np->right->value; -+ break; -+ -+ case LOGICAL_OR : -+ if ((int64_t)np->left->value || -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case LOGICAL_AND : -+ if ((int64_t)np->left->value && -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case EQUAL : -+ if ((int64_t)np->left->value == -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case NOT_EQUAL : -+ if ((int64_t)np->left->value != -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case LESS_THAN : -+ if ((int64_t)np->left->value < -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case GREATER_THAN : -+ if ((int64_t)np->left->value > -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case LESS_THAN_OR_EQUAL : -+ if ((int64_t)np->left->value <= -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case GREATER_THAN_OR_EQUAL : -+ if ((int64_t)np->left->value >= -+ (int64_t)np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ default : -+ break; -+ } -+ } else { -+ switch (np->operator) { -+ case ADD : -+ *value = np->left->value + np->right->value; -+ break; -+ -+ case SUBTRACT : -+ *value = np->left->value - np->right->value; -+ break; -+ -+ case MULTIPLY : -+ *value = np->left->value * np->right->value; -+ break; -+ -+ case DIVIDE : -+ *value = np->left->value / np->right->value; -+ break; -+ -+ case BITWISE_OR : -+ *value = np->left->value | np->right->value; -+ break; -+ -+ case BITWISE_AND : -+ *value = np->left->value & np->right->value; -+ break; -+ -+ case MODULUS : -+ *value = np->left->value % np->right->value; -+ break; -+ -+ case RIGHT_SHIFT : -+ *value = np->left->value >> np->right->value; -+ break; -+ -+ case LEFT_SHIFT : -+ *value = np->left->value << np->right->value; -+ break; -+ -+ case LOGICAL_OR : -+ if (np->left->value || np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case LOGICAL_AND : -+ if (np->left->value && np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case EQUAL : -+ if (np->left->value == np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case NOT_EQUAL : -+ if (np->left->value != np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case LESS_THAN : -+ if (np->left->value < np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case GREATER_THAN : -+ if (np->left->value > np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case LESS_THAN_OR_EQUAL : -+ if (np->left->value <= np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ case GREATER_THAN_OR_EQUAL : -+ if (np->left->value >= np->right->value) { -+ *value = 1; -+ } else { -+ *value = 0; -+ } -+ logical_flag++; -+ break; -+ -+ default : -+ break; -+ } -+ } -+ return(0); -+} -+ -+/* -+ * member_to_type() -+ */ -+static type_t * -+member_to_type(kltype_t *kltp, int flags) -+{ -+ kltype_t *rkltp; -+ type_t *tp, *head = (type_t *)NULL, *last = (type_t *)NULL; -+ -+ /* Make sure this is a member -+ */ -+ if (kltp->kl_type != KLT_MEMBER) { -+ return((type_t *)NULL); -+ } -+ -+ rkltp = kltp->kl_realtype; -+ while (rkltp && rkltp->kl_type == KLT_POINTER) { -+ tp = (type_t *)kl_alloc_block(sizeof(type_t)); -+ tp->flag = POINTER_FLAG; -+ if (last) { -+ last->t_next = tp; -+ last = tp; -+ } else { -+ head = last = tp; -+ } -+ rkltp = rkltp->kl_realtype; -+ } -+ -+ /* If We step past all the pointer records and don't point -+ * at anything, this must be a void pointer. Setup a VOID -+ * type struct so that we can maintain a pointer to some -+ * type info. -+ */ -+ if (!rkltp) { -+ tp = (type_t *)kl_alloc_block(sizeof(type_t)); -+ tp->flag = VOID_FLAG; -+ tp->t_kltp = kltp; -+ if (last) { -+ last->t_next = tp; -+ last = tp; -+ } else { -+ head = last = tp; -+ } -+ return(head); -+ } -+ -+ tp = (type_t *)kl_alloc_block(sizeof(type_t)); -+ tp->flag = KLTYPE_FLAG; -+ tp->t_kltp = kltp; -+ if (last) { -+ last->t_next = tp; -+ } else { -+ head = tp; -+ } -+ return(head); -+} -+ -+/* -+ * replace() -- -+ * -+ * Replace the tree with a node containing the numerical result of -+ * the equation. If pointer math is performed, the result will have -+ * the same type as the pointer. -+ */ -+static node_t * -+replace(node_t *np, int flags) -+{ -+ int offset; -+ uint64_t value; -+ node_t *q; -+ -+ if (!np) { -+ return((node_t *)NULL); -+ } -+ -+ if (np->node_type == OPERATOR) { -+ if (!(q = np->left)) { -+ return((node_t *)NULL); -+ } -+ while (q) { -+ if (!replace(q, flags)) { -+ return((node_t *)NULL); -+ } -+ q = q->right; -+ } -+ -+ if ((np->operator == RIGHT_ARROW) || (np->operator == DOT)) { -+ kaddr_t addr = 0; -+ type_t *tp; -+ -+ if (!have_debug_file) { -+ kdb_printf("no debuginfo file\n"); -+ return 0; -+ } -+ -+ /* The left node must point to a TYPE_DEF -+ */ -+ if (np->left->node_type != TYPE_DEF) { -+ if (np->left->flags & NOTYPE_FLAG) { -+ set_eval_error(E_NOTYPE); -+ error_token = np->left->tok_ptr; -+ } else { -+ set_eval_error(E_BAD_TYPE); -+ error_token = np->left->tok_ptr; -+ } -+ return((node_t *)NULL); -+ } -+ -+ /* Get the type information. Check to see if we -+ * have a pointer to a type. If we do, we need -+ * to strip off the pointer and get the type info. -+ */ -+ if (np->left->type->flag == POINTER_FLAG) { -+ tp = np->left->type->t_next; -+ kl_free_block((void *)np->left->type); -+ } else { -+ tp = np->left->type; -+ } -+ -+ /* We need to zero out the left child's type pointer -+ * to prevent the type structs from being prematurely -+ * freed (upon success). We have to remember, however, -+ * to the free the type information before we return. -+ */ -+ np->left->type = (type_t*)NULL; -+ -+ /* tp should now point at a type_t struct that -+ * references a kltype_t struct. If it points -+ * to anything else, return failure. -+ * -+ */ -+ if (tp->flag != KLTYPE_FLAG) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = np->left->tok_ptr; -+ free_type(tp); -+ return((node_t *)NULL); -+ } -+ -+ switch (tp->flag) { -+ case KLTYPE_FLAG: { -+ /* Make sure that the type referenced -+ * is a struct, union, or pointer to -+ * a struct or union. If it isn't one -+ * of these, then return failure. -+ */ -+ kltype_t *kltp, *kltmp; -+ -+ kltp = kl_realtype(tp->t_kltp, 0); -+ if ((kltp->kl_type != KLT_STRUCT) && -+ (kltp->kl_type != KLT_UNION)) { -+ error_token = -+ np->left->tok_ptr; -+ set_eval_error(E_BAD_TYPE); -+ free_type(tp); -+ return((node_t *)NULL); -+ } -+ -+ /* Get type information for member. -+ * If member is a pointer to a type, -+ * get the pointer address and load -+ * it into value. In any event, load -+ * the struct/union address plus the -+ * offset of the member. -+ */ -+ kltmp = kl_get_member(kltp, -+ np->right->name); -+ if (!kltmp) { -+ set_eval_error(E_BAD_MEMBER); -+ error_token = -+ np->right->tok_ptr; -+ free_type(tp); -+ return((node_t *)NULL); -+ } -+ -+ /* We can't just use the offset value -+ * for the member. That's because it -+ * may be from an anonymous struct or -+ * union within another struct -+ * definition. -+ */ -+ offset = kl_get_member_offset(kltp, -+ np->right->name); -+ np->type = member_to_type(kltmp, flags); -+ if (!np->type) { -+ set_eval_error(E_BAD_MEMBER); -+ error_token = -+ np->right->tok_ptr; -+ free_type(tp); -+ return((node_t *)NULL); -+ } -+ -+ /* Now free the struct type information -+ */ -+ free_type(tp); -+ np->node_type = TYPE_DEF; -+ np->flags |= KLTYPE_FLAG; -+ np->operator = 0; -+ addr = 0; -+ if (np->left->flags & POINTER_FLAG) { -+ addr = np->left->value + -+ offset; -+ } else if (np->left->flags & -+ ADDRESS_FLAG) { -+ addr = np->left->address + -+ offset; -+ } -+ if (addr) { -+ np->address = addr; -+ np->flags |= ADDRESS_FLAG; -+ } -+ -+ if (np->type->flag == POINTER_FLAG) { -+ np->flags |= POINTER_FLAG; -+ np->value = *((kaddr_t *)addr); -+ } else { -+ np->value = addr; -+ } -+ break; -+ } -+ } -+ free_nodes(np->left); -+ free_nodes(np->right); -+ np->left = np->right = (node_t*)NULL; -+ return(np); -+ } else { -+ if (!np->left || !np->right) { -+ set_eval_error(E_MISSING_OPERAND); -+ error_token = np->tok_ptr; -+ return((node_t *)NULL); -+ } -+ if (np->left->byte_size && np->right->byte_size) { -+ if (np->left->byte_size > -+ np->right->byte_size) { -+ -+ /* Left byte_size is greater than right -+ */ -+ np->byte_size = np->left->byte_size; -+ np->type = np->left->type; -+ np->flags = np->left->flags; -+ free_type(np->right->type); -+ } else if (np->left->byte_size < -+ np->right->byte_size) { -+ -+ /* Right byte_size is greater than left -+ */ -+ np->byte_size = np->right->byte_size; -+ np->type = np->right->type; -+ np->flags = np->right->flags; -+ free_type(np->left->type); -+ } else { -+ -+ /* Left and right byte_size is equal -+ */ -+ if (np->left->flags & UNSIGNED_FLAG) { -+ np->byte_size = -+ np->left->byte_size; -+ np->type = np->left->type; -+ np->flags = np->left->flags; -+ free_type(np->right->type); -+ } else if (np->right->flags & -+ UNSIGNED_FLAG) { -+ np->byte_size = -+ np->right->byte_size; -+ np->type = np->right->type; -+ np->flags = np->right->flags; -+ free_type(np->left->type); -+ } else { -+ np->byte_size = -+ np->left->byte_size; -+ np->type = np->left->type; -+ np->flags = np->left->flags; -+ free_type(np->right->type); -+ } -+ } -+ } else if (np->left->byte_size) { -+ np->byte_size = np->left->byte_size; -+ np->type = np->left->type; -+ np->flags = np->left->flags; -+ free_type(np->right->type); -+ } else if (np->right->byte_size) { -+ np->byte_size = np->right->byte_size; -+ np->type = np->right->type; -+ np->flags = np->right->flags; -+ } else { -+ /* XXX - No byte sizes -+ */ -+ } -+ -+ if (apply(np, &value, flags)) { -+ return((node_t *)NULL); -+ } -+ } -+ np->right->type = np->left->type = (type_t*)NULL; -+ -+ /* Flesh out the rest of the node struct. -+ */ -+ if (np->type) { -+ np->node_type = TYPE_DEF; -+ np->flags |= KLTYPE_FLAG; -+ } else { -+ np->node_type = NUMBER; -+ np->flags &= ~(KLTYPE_FLAG); -+ } -+ np->operator = 0; -+ np->value = value; -+ kl_free_block((void *)np->left); -+ kl_free_block((void *)np->right); -+ np->left = np->right = (node_t*)NULL; -+ } -+ return(np); -+} -+ -+/* -+ * replace_cast() -+ */ -+static int -+replace_cast(node_t *n, int flags) -+{ -+ type_t *t; -+ -+ if (!n) { -+ set_eval_error(E_SYNTAX_ERROR); -+ return(-1); -+ } else if (!n->right) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = n->tok_ptr; -+ return(-1); -+ } -+ if (n->flags & POINTER_FLAG) { -+ if (n->right->node_type == VADDR) { -+ if (n->right->flags & ADDRESS_FLAG) { -+ n->value = n->right->address; -+ } else { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ -+ } else { -+ n->value = n->right->value; -+ n->address = 0; -+ } -+ } else if (n->right->flags & ADDRESS_FLAG) { -+ n->flags |= ADDRESS_FLAG; -+ n->address = n->right->address; -+ n->value = n->right->value; -+ } else { -+ kltype_t *kltp; -+ -+ if (!(t = eval_type(n))) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->tok_ptr; -+ return(-1); -+ } -+ if (t->t_kltp->kl_type != KLT_BASE) { -+ -+ kltp = kl_realtype(t->t_kltp, 0); -+ if (kltp->kl_type != KLT_BASE) { -+ set_eval_error(E_BAD_CAST); -+ error_token = n->tok_ptr; -+ return(-1); -+ } -+ } -+ n->value = n->right->value; -+ n->type = t; -+ } -+ n->node_type = TYPE_DEF; -+ n->operator = 0; -+ free_node(n->right); -+ n->right = (node_t *)NULL; -+ return(0); -+} -+ -+/* -+ * replace_indirection() -+ */ -+static int -+replace_indirection(node_t *n, int flags) -+{ -+ kaddr_t addr; -+ type_t *t, *tp, *rtp; -+ -+ /* Make sure there is a right child and that it is a TYPE_DEF. -+ */ -+ if (!n->right) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->tok_ptr; -+ return(-1); -+ } else if (n->right->node_type != TYPE_DEF) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ -+ /* Make sure the right node contains a pointer or address value. -+ * Note that it's possible for the whatis command to generate -+ * this case without any actual pointer/address value. -+ */ -+ if (!(n->right->flags & (POINTER_FLAG|ADDRESS_FLAG))) { -+ set_eval_error(E_BAD_POINTER); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ -+ /* Get the pointer to the first type struct and make sure -+ * it's a pointer. -+ */ -+ if (!(tp = n->right->type) || (tp->flag != POINTER_FLAG)) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ -+ /* Make sure we have a pointer to a type structure. -+ */ -+ if (!(n->right->flags & KLTYPE_FLAG)) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ -+ n->node_type = TYPE_DEF; -+ n->flags = KLTYPE_FLAG; -+ n->operator = 0; -+ -+ if (!(t = tp->t_next)) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ -+ if (!(rtp = eval_type(n->right))) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ -+ /* Zero out the type field in the right child so -+ * it wont accidently be freed when the right child -+ * is freed (upon success). -+ */ -+ n->right->type = (type_t*)NULL; -+ -+ n->type = t; -+ -+ /* Free the pointer struct -+ */ -+ kl_free_block((void *)tp); -+ -+ /* Get the pointer address -+ */ -+ addr = n->address = n->right->value; -+ n->flags |= ADDRESS_FLAG; -+ -+ if (rtp->t_kltp->kl_type == KLT_MEMBER) { -+ /* If this is a member, we have to step over the KLT_MEMBER -+ * struct and then make sure we have a KLT_POINTER struct. -+ * If we do, we step over it too...otherwise return an -+ * error. -+ */ -+ if (rtp->t_kltp->kl_realtype->kl_type != KLT_POINTER) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ rtp->t_kltp = rtp->t_kltp->kl_realtype; -+ } -+ -+ if (rtp->t_kltp->kl_type == KLT_POINTER) { -+ /* Strip off the pointer type record so that -+ * we pick up the actual type definition with -+ * our indirection. -+ */ -+ rtp->t_kltp = rtp->t_kltp->kl_realtype; -+ if (rtp->t_kltp->kl_name && -+ !strcmp(rtp->t_kltp->kl_name, "char")) { -+ n->flags |= STRING_FLAG; -+ } -+ } -+ -+ -+ /* If this is a pointer to a pointer, get the next -+ * pointer value. -+ */ -+ if (n->type->flag == POINTER_FLAG) { -+ n->value = *((kaddr_t *)addr); -+ -+ /* Set the appropriate node flag values -+ */ -+ n->flags |= POINTER_FLAG; -+ free_node(n->right); -+ n->left = n->right = (node_t *)NULL; -+ return(0); -+ } -+ /* Zero out the type field in the right child so it doesn't -+ * accidently get freed up when the right child is freed -+ * (upon success). -+ */ -+ n->right->type = (type_t*)NULL; -+ free_node(n->right); -+ n->left = n->right = (node_t *)NULL; -+ return(0); -+} -+ -+/* -+ * replace_unary() -+ * -+ * Convert a unary operator node that contains a pointer to a value -+ * with a node containing the numerical result. Free the node that -+ * originally contained the value. -+ */ -+static int -+replace_unary(node_t *n, int flags) -+{ -+ uint64_t value; -+ -+ if (!n->right) { -+ set_eval_error(E_MISSING_OPERAND); -+ error_token = n->tok_ptr; -+ return(-1); -+ } -+ if (is_unary(n->right->operator)) { -+ if (replace_unary(n->right, flags) == -1) { -+ return(-1); -+ } -+ } -+ if (n->operator == CAST) { -+ return(replace_cast(n, flags)); -+ } else if (n->operator == INDIRECTION) { -+ return(replace_indirection(n, flags)); -+ } else if (n->operator == ADDRESS) { -+ type_t *t; -+ -+ if (n->right->node_type == TYPE_DEF) { -+ if (!(n->right->flags & ADDRESS_FLAG)) { -+ set_eval_error(E_NO_ADDRESS); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ t = n->right->type; -+ } else { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->right->tok_ptr; -+ return(-1); -+ } -+ n->type = (type_t*)kl_alloc_block(sizeof(type_t)); -+ n->type->flag = POINTER_FLAG; -+ n->type->t_next = t; -+ n->node_type = TYPE_DEF; -+ n->operator = 0; -+ n->value = n->right->address; -+ n->flags = POINTER_FLAG; -+ if (!(t = eval_type(n))) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = n->tok_ptr; -+ return(-1); -+ } -+ n->flags |= t->flag; -+ n->right->type = 0; -+ free_nodes(n->right); -+ n->left = n->right = (node_t *)NULL; -+ return(0); -+ } else if (apply_unary(n, &value) == -1) { -+ return(-1); -+ } -+ free_nodes(n->right); -+ n->node_type = NUMBER; -+ n->operator = 0; -+ n->left = n->right = (node_t *)NULL; -+ memcpy(&n->value, &value, sizeof(uint64_t)); -+ return(0); -+} -+ -+/* -+ * pointer_to_element() -+ */ -+static void -+pointer_to_element(node_t *n0, node_t *n1) -+{ -+ int size; -+ kltype_t *kltp, *rkltp; -+ type_t *tp; -+ -+ if (!(tp = n0->type)) { -+ set_eval_error(E_BAD_INDEX); -+ error_token = n0->tok_ptr; -+ return; -+ } -+ if (tp->t_next->flag == POINTER_FLAG) { -+ size = sizeof(void *); -+ } else { -+ kltp = tp->t_next->t_kltp; -+ if (!(rkltp = kl_realtype(kltp, 0))) { -+ set_eval_error(E_BAD_INDEX); -+ error_token = n0->tok_ptr; -+ return; -+ } -+ size = rkltp->kl_size; -+ } -+ -+ /* Get the details on the array element -+ */ -+ n0->flags |= ADDRESS_FLAG; -+ n0->address = n0->value + (n1->value * size); -+ n0->type = tp->t_next; -+ kl_free_block((char *)tp); -+ if (tp->t_next->flag == POINTER_FLAG) { -+ n0->flags |= POINTER_FLAG; -+ n0->value = *((kaddr_t *)n0->address); -+ } else { -+ n0->flags &= (~POINTER_FLAG); -+ n0->value = 0; -+ } -+} -+ -+/* -+ * array_to_element() -+ */ -+static void -+array_to_element(node_t *n0, node_t *n1) -+{ -+ kltype_t *kltp, *rkltp, *ip, *ep; -+ type_t *tp, *troot = (type_t *)NULL; -+ -+ if (!(tp = n0->type)) { -+ set_eval_error(E_BAD_INDEX); -+ error_token = n0->tok_ptr; -+ return; -+ } -+ -+ /* If we are indexing a pointer, then make a call to the -+ * pointer_to_element() and return. -+ */ -+ if (tp->flag == POINTER_FLAG) { -+ return(pointer_to_element(n0, n1)); -+ } -+ -+ if (!(kltp = n0->type->t_kltp)) { -+ set_eval_error(E_BAD_INDEX); -+ error_token = n0->tok_ptr; -+ return; -+ } -+ if (!(rkltp = kl_realtype(kltp, KLT_ARRAY))) { -+ set_eval_error(E_BAD_INDEX); -+ error_token = n0->tok_ptr; -+ return; -+ } -+ ip = rkltp->kl_indextype; -+ ep = rkltp->kl_elementtype; -+ if (!ip || !ep) { -+ set_eval_error(E_BAD_INDEX); -+ error_token = n1->tok_ptr; -+ return; -+ } -+ /* Get the details on the array element -+ */ -+ n0->address = n0->address + (n1->value * ep->kl_size); -+ if (ep->kl_type == KLT_POINTER) { -+ n0->flags |= POINTER_FLAG; -+ n0->value = *((kaddr_t *)n0->address); -+ } else { -+ n0->value = 0; -+ } -+ n0->flags |= ADDRESS_FLAG; -+ kltp = ep; -+ while (kltp->kl_type == KLT_POINTER) { -+ if (troot) { -+ tp->t_next = (type_t*)kl_alloc_block(sizeof(type_t)); -+ tp = tp->t_next; -+ } else { -+ tp = (type_t*)kl_alloc_block(sizeof(type_t)); -+ troot = tp; -+ } -+ tp->flag = POINTER_FLAG; -+ kltp = kltp->kl_realtype; -+ } -+ if (troot) { -+ tp->t_next = (type_t*)kl_alloc_block(sizeof(type_t)); -+ tp = tp->t_next; -+ n0->type = troot; -+ } else { -+ tp = (type_t*)kl_alloc_block(sizeof(type_t)); -+ n0->type = tp; -+ } -+ tp->flag = KLTYPE_FLAG; -+ tp->t_kltp = ep; -+} -+ -+/* -+ * number_to_size() -+ */ -+int -+number_to_size(node_t *np) -+{ -+ int unsigned_flag = 0; -+ -+ if (np->node_type != NUMBER) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ if (np->flags & UNSIGNED_FLAG) { -+ unsigned_flag = 1; -+ } -+ if ((np->value >= 0) && (np->value <= 0xffffffff)) { -+ return(4); -+ } else if (((np->value >> 32) & 0xffffffff) == 0xffffffff) { -+ if (unsigned_flag) { -+ return(8); -+ } else if (sizeof(void *) == 4) { -+ return(4); -+ } else { -+ return(8); -+ } -+ } -+ return(8); -+} -+ -+/* -+ * number_to_type() -+ */ -+kltype_t * -+number_to_type(node_t *np) -+{ -+ int unsigned_flag = 0; -+ kltype_t *kltp, *rkltp = (kltype_t *)NULL; -+ -+ if (np->node_type != NUMBER) { -+ set_eval_error(E_BAD_TYPE); -+ error_token = np->tok_ptr; -+ return((kltype_t *)NULL); -+ } -+ if (np->flags & UNSIGNED_FLAG) { -+ unsigned_flag = 1; -+ } -+ if ((np->value >= 0) && (np->value <= 0xffffffff)) { -+ if (unsigned_flag) { -+ kltp = kl_find_type("uint32_t", KLT_TYPEDEF); -+ } else { -+ kltp = kl_find_type("int32_t", KLT_TYPEDEF); -+ } -+ } else if (((np->value >> 32) & 0xffffffff) == 0xffffffff) { -+ if (unsigned_flag) { -+ kltp = kl_find_type("uint64_t", KLT_TYPEDEF); -+ } else if (sizeof(void *) == 4) { -+ kltp = kl_find_type("int32_t", KLT_TYPEDEF); -+ } else { -+ kltp = kl_find_type("int64_t", KLT_TYPEDEF); -+ } -+ } else { -+ if (unsigned_flag) { -+ kltp = kl_find_type("uint64_t", KLT_TYPEDEF); -+ } else { -+ kltp = kl_find_type("int64_t", KLT_TYPEDEF); -+ } -+ } -+ if (kltp) { -+ if (!(rkltp = kl_realtype(kltp, 0))) { -+ rkltp = kltp; -+ } -+ } else { -+ set_eval_error(E_BAD_TYPE); -+ error_token = np->tok_ptr; -+ } -+ return(rkltp); -+} -+ -+/* -+ * type_to_number() -+ * -+ * Convert a base type to a numeric value. Return 1 on successful -+ * conversion, 0 if nothing was done. -+ */ -+static int -+type_to_number(node_t *np) -+{ -+ int byte_size, bit_offset, bit_size, encoding; -+ uint64_t value, value1; -+ kltype_t *kltp, *rkltp; -+ -+ /* Sanity check... -+ */ -+ if (np->node_type != TYPE_DEF) { -+ set_eval_error(E_NOTYPE); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ if (!np->type) { -+ set_eval_error(E_NOTYPE); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ if (np->type->flag == POINTER_FLAG) { -+ return(0); -+ } -+ -+ /* Get the real type record and make sure that it is -+ * for a base type. -+ */ -+ kltp = np->type->t_kltp; -+ rkltp = kl_realtype(kltp, 0); -+ if (rkltp->kl_type != KLT_BASE) { -+ set_eval_error(E_NOTYPE); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ -+ byte_size = rkltp->kl_size; -+ bit_offset = rkltp->kl_bit_offset; -+ if (!(bit_size = rkltp->kl_bit_size)) { -+ bit_size = byte_size * 8; -+ } -+ encoding = rkltp->kl_encoding; -+ if (np->flags & ADDRESS_FLAG) { -+ /* FIXME: untested */ -+ if (invalid_address(np->address, byte_size)) { -+ kdb_printf("ILLEGAL ADDRESS (%lx)", -+ (uaddr_t)np->address); -+ return (0); -+ } -+ kl_get_block(np->address, byte_size,(void *)&value1,(void *)0); -+ } else { -+ value1 = np->value; -+ } -+ value = kl_get_bit_value(&value1, byte_size, bit_size, bit_offset); -+ switch (byte_size) { -+ -+ case 1 : -+ if (encoding == ENC_UNSIGNED) { -+ np->value = (unsigned char)value; -+ np->flags |= UNSIGNED_FLAG; -+ } else if (encoding == ENC_SIGNED) { -+ np->value = (signed char)value; -+ } else { -+ np->value = (char)value; -+ } -+ break; -+ -+ case 2 : -+ if (encoding == ENC_UNSIGNED) { -+ np->value = (uint16_t)value; -+ np->flags |= UNSIGNED_FLAG; -+ } else { -+ np->value = (int16_t)value; -+ } -+ break; -+ -+ case 4 : -+ if (encoding == ENC_UNSIGNED) { -+ np->value = (uint32_t)value; -+ np->flags |= UNSIGNED_FLAG; -+ } else { -+ np->value = (int32_t)value; -+ } -+ break; -+ -+ case 8 : -+ if (encoding == ENC_UNSIGNED) { -+ np->value = (uint64_t)value; -+ np->flags |= UNSIGNED_FLAG; -+ } else { -+ np->value = (int64_t)value; -+ } -+ break; -+ -+ default : -+ set_eval_error(E_BAD_TYPE); -+ error_token = np->tok_ptr; -+ return(0); -+ } -+ np->byte_size = byte_size; -+ np->node_type = NUMBER; -+ return(1); -+} -+ -+/* -+ * eval_type() -+ */ -+static type_t * -+eval_type(node_t *n) -+{ -+ type_t *t; -+ -+ if (!(t = n->type)) { -+ return((type_t*)NULL); -+ } -+ while (t->flag == POINTER_FLAG) { -+ t = t->t_next; -+ -+ /* If for some reason, there is no type pointer (this shouldn't -+ * happen but...), we have to make sure that we don't try to -+ * reference a NULL pointer and get a SEGV. Return an error if -+ * 't' is NULL. -+ */ -+ if (!t) { -+ return((type_t*)NULL); -+ } -+ } -+ if (t->flag == KLTYPE_FLAG) { -+ return (t); -+ } -+ return((type_t*)NULL); -+} -+ -+/* -+ * expand_variables() -+ */ -+static char * -+expand_variables(char *exp, int flags) -+{ -+ return((char *)NULL); -+} -+ -+/* -+ * eval() -+ */ -+node_t * -+eval(char **exp, int flags) -+{ -+ token_t *tok; -+ node_t *n, *root; -+ char *e, *s; -+ -+ eval_error = 0; -+ logical_flag = 0; -+ -+ /* Make sure there is an expression to evaluate -+ */ -+ if (!(*exp)) { -+ return ((node_t*)NULL); -+ } -+ -+ /* Expand any variables that are in the expression string. If -+ * a new string is allocated by the expand_variables() function, -+ * we need to make sure the original expression string gets -+ * freed. In any event, point s at the current expression string -+ * so that it gets freed up when we are done. -+ */ -+ if ((e = expand_variables(*exp, 0))) { -+ kl_free_block((void *)*exp); -+ *exp = e; -+ } else if (eval_error) { -+ eval_error |= E_BAD_EVAR; -+ error_token = *exp; -+ } -+ s = *exp; -+ tok = get_token_list(s); -+ if (eval_error) { -+ return((node_t*)NULL); -+ } -+ -+ /* Get the node_list and evaluate the expression. -+ */ -+ node_list = get_node_list(tok, flags); -+ if (eval_error) { -+ free_nodelist(node_list); -+ node_list = (node_t*)NULL; -+ free_tokens(tok); -+ return((node_t*)NULL); -+ } -+ if (!(n = do_eval(flags))) { -+ if (!eval_error) { -+ set_eval_error(E_SYNTAX_ERROR); -+ error_token = s + strlen(s) - 1; -+ } -+ free_nodes(n); -+ free_tokens(tok); -+ return((node_t*)NULL); -+ } -+ -+ if (!(root = replace(n, flags))) { -+ if (eval_error) { -+ free_nodes(n); -+ free_tokens(tok); -+ return((node_t*)NULL); -+ } -+ root = n; -+ } -+ -+ /* Check to see if the the result should -+ * be interpreted as 'true' or 'false' -+ */ -+ if (logical_flag && ((root->value == 0) || (root->value == 1))) { -+ root->flags |= BOOLIAN_FLAG; -+ } -+ free_tokens(tok); -+ return(root); -+} -+ -+/* -+ * print_number() -+ */ -+void -+print_number(node_t *np, int flags) -+{ -+ int size; -+ unsigned long long value; -+ -+ if ((size = number_to_size(np)) && (size != sizeof(uint64_t))) { -+ value = np->value & (((uint64_t)1 << (uint64_t)(size*8))-1); -+ } else { -+ value = np->value; -+ } -+ if (flags & C_HEX) { -+ kdb_printf("0x%llx", value); -+ } else if (flags & C_BINARY) { -+ kdb_printf("0b"); -+ kl_binary_print(value); -+ } else { -+ if (np->flags & UNSIGNED_FLAG) { -+ kdb_printf("%llu", value); -+ } else { -+ kdb_printf("%lld", np->value); -+ } -+ } -+} -+ -+/* -+ * print_string() -+ */ -+void -+print_string(kaddr_t addr, int size) -+{ -+ int i; -+ char *str; -+ -+ if (!size) { -+ size = 255; -+ } -+ /* FIXME: untested */ -+ if (invalid_address(addr, size)) { -+ klib_error = KLE_INVALID_PADDR; -+ return; -+ } -+ str = (char*)kl_alloc_block(size); -+ kl_get_block(addr, size, (void *)str, (void *)0); -+ kdb_printf("\"%s", str); -+ for (i = 0; i < size; i++) { -+ if (!str[i]) { -+ break; -+ } -+ } -+ if (KL_ERROR || (i == size)) { -+ kdb_printf("..."); -+ } -+ kdb_printf("\""); -+ kl_free_block(str); -+} -+ -+/* -+ * kl_print_error() -+ */ -+void -+kl_print_error(void) -+{ -+ int ecode; -+ -+ ecode = klib_error & 0xffffffff; -+ switch(ecode) { -+ -+ /** General klib error codes -+ **/ -+ case KLE_NO_MEMORY: -+ kdb_printf("insufficient memory"); -+ break; -+ case KLE_OPEN_ERROR: -+ kdb_printf("unable to open file"); -+ break; -+ case KLE_ZERO_BLOCK: -+ kdb_printf("tried to allocate a zero-sized block"); -+ break; -+ case KLE_INVALID_VALUE: -+ kdb_printf("invalid input value"); -+ break; -+ case KLE_NULL_BUFF: -+ kdb_printf( "NULL buffer pointer"); -+ break; -+ case KLE_ZERO_SIZE: -+ kdb_printf("zero sized block requested"); -+ break; -+ case KLE_ACTIVE: -+ kdb_printf("operation not supported on a live system"); -+ break; -+ case KLE_UNSUPPORTED_ARCH: -+ kdb_printf("unsupported architecture"); -+ break; -+ case KLE_MISC_ERROR: -+ kdb_printf("KLIB error"); -+ break; -+ case KLE_NOT_SUPPORTED: -+ kdb_printf("operation not supported"); -+ break; -+ case KLE_UNKNOWN_ERROR: -+ kdb_printf("unknown error"); -+ break; -+ -+ /** memory error codes -+ **/ -+ case KLE_BAD_MAP_FILE: -+ kdb_printf("bad map file"); -+ break; -+ case KLE_BAD_DUMP: -+ kdb_printf("bad dump file"); -+ break; -+ case KLE_BAD_DUMPTYPE: -+ kdb_printf("bad dumptype"); -+ break; -+ case KLE_INVALID_LSEEK: -+ kdb_printf("lseek error"); -+ break; -+ case KLE_INVALID_READ: -+ kdb_printf("not found in dump file"); -+ break; -+ case KLE_BAD_KERNINFO: -+ kdb_printf("bad kerninfo struct"); -+ break; -+ case KLE_INVALID_PADDR: -+ kdb_printf("invalid physical address"); -+ break; -+ case KLE_INVALID_VADDR: -+ kdb_printf("invalid virtual address"); -+ break; -+ case KLE_INVALID_VADDR_ALIGN: -+ kdb_printf("invalid vaddr alignment"); -+ break; -+ case KLE_INVALID_MAPPING: -+ kdb_printf("invalid address mapping"); -+ break; -+ case KLE_PAGE_NOT_PRESENT: -+ kdb_printf("page not present"); -+ break; -+ case KLE_BAD_ELF_FILE: -+ kdb_printf("bad elf file"); -+ break; -+ case KLE_ARCHIVE_FILE: -+ kdb_printf("archive file"); -+ break; -+ case KLE_MAP_FILE_PRESENT: -+ kdb_printf("map file present"); -+ break; -+ case KLE_BAD_MAP_FILENAME: -+ kdb_printf("bad map filename"); -+ break; -+ case KLE_BAD_DUMP_FILENAME: -+ kdb_printf("bad dump filename"); -+ break; -+ case KLE_BAD_NAMELIST_FILE: -+ kdb_printf("bad namelist file"); -+ break; -+ case KLE_BAD_NAMELIST_FILENAME: -+ kdb_printf("bad namelist filename"); -+ break; -+ -+ /** symbol error codes -+ **/ -+ case KLE_NO_SYMTAB: -+ kdb_printf("no symtab"); -+ break; -+ case KLE_NO_SYMBOLS: -+ kdb_printf("no symbol information"); -+ break; -+ case KLE_NO_MODULE_LIST: -+ kdb_printf("kernel without module support"); -+ break; -+ -+ /** kernel data error codes -+ **/ -+ case KLE_INVALID_KERNELSTACK: -+ kdb_printf("invalid kernel stack"); -+ break; -+ case KLE_INVALID_STRUCT_SIZE: -+ kdb_printf("invalid struct size"); -+ break; -+ case KLE_BEFORE_RAM_OFFSET: -+ kdb_printf("physical address proceeds start of RAM"); -+ break; -+ case KLE_AFTER_MAXPFN: -+ kdb_printf("PFN exceeds maximum PFN"); -+ break; -+ case KLE_AFTER_PHYSMEM: -+ kdb_printf("address exceeds physical memory"); -+ break; -+ case KLE_AFTER_MAXMEM: -+ kdb_printf("address exceeds maximum physical address"); -+ break; -+ case KLE_PHYSMEM_NOT_INSTALLED: -+ kdb_printf("physical memory not installed"); -+ break; -+ case KLE_NO_DEFTASK: -+ kdb_printf("default task not set"); -+ break; -+ case KLE_PID_NOT_FOUND: -+ kdb_printf("PID not found"); -+ break; -+ case KLE_DEFTASK_NOT_ON_CPU: -+ kdb_printf("default task not running on a cpu"); -+ break; -+ case KLE_NO_CURCPU: -+ kdb_printf("current cpu could not be determined"); -+ break; -+ -+ case KLE_KERNEL_MAGIC_MISMATCH: -+ kdb_printf("kernel_magic mismatch " -+ "of map and memory image"); -+ break; -+ -+ case KLE_INVALID_DUMP_HEADER: -+ kdb_printf("invalid dump header in dump"); -+ break; -+ -+ case KLE_DUMP_INDEX_CREATION: -+ kdb_printf("cannot create index file"); -+ break; -+ -+ case KLE_DUMP_HEADER_ONLY: -+ kdb_printf("dump only has a dump header"); -+ break; -+ -+ case KLE_NO_END_SYMBOL: -+ kdb_printf("no _end symbol in kernel"); -+ break; -+ -+ case KLE_NO_CPU: -+ kdb_printf("CPU not installed"); -+ break; -+ -+ default: -+ break; -+ } -+ kdb_printf("\n"); -+} -+ -+/* -+ * kl_print_string() -+ * -+ * print out a string, translating all embeded control characters -+ * (e.g., '\n' for newline, '\t' for tab, etc.) -+ */ -+void -+kl_print_string(char *s) -+{ -+ char *sp, *cp; -+ -+ kl_reset_error(); -+ -+ if (!(sp = s)) { -+ klib_error = KLE_BAD_STRING; -+ return; -+ } -+ /* FIXME: untested */ -+ if (invalid_address((kaddr_t)sp, 1)) { -+ klib_error = KLE_INVALID_PADDR; -+ return; -+ } -+ -+ while (sp) { -+ if ((cp = strchr(sp, '\\'))) { -+ switch (*(cp + 1)) { -+ -+ case 'n' : -+ *cp++ = '\n'; -+ *cp++ = 0; -+ break; -+ -+ case 't' : -+ *cp++ = '\t'; -+ *cp++ = 0; -+ break; -+ -+ default : -+ if (*(cp + 1) == 0) { -+ klib_error = KLE_BAD_STRING; -+ return; -+ } -+ /* Change the '\' character to a zero -+ * and then print the string (the rest -+ * of the string will be picked -+ * up on the next pass). -+ */ -+ *cp++ = 0; -+ break; -+ } -+ kdb_printf("%s", sp); -+ sp = cp; -+ } else { -+ kdb_printf("%s", sp); -+ sp = 0; -+ } -+ } -+} -+ -+/* -+ * print_eval_results() -+ */ -+int -+print_eval_results(node_t *np, int flags) -+{ -+ int size, i, count, ptr_cnt = 0; -+ kaddr_t addr; -+ char *typestr; -+ kltype_t *kltp, *rkltp = NULL, *nkltp; -+ type_t *tp; -+ -+ /* Print the results -+ */ -+ switch (np->node_type) { -+ -+ case NUMBER: -+ print_number(np, flags); -+ break; -+ -+ case TYPE_DEF: { -+ -+ /* First, determine the number of levels of indirection -+ * by determining the number of pointer type records. -+ */ -+ if ((tp = np->type)) { -+ while (tp && (tp->flag == POINTER_FLAG)) { -+ ptr_cnt++; -+ tp = tp->t_next; -+ } -+ if (tp) { -+ rkltp = tp->t_kltp; -+ } -+ } -+ if (!rkltp) { -+ kdb_printf("Type information not available\n"); -+ return(1); -+ } -+ -+ if (ptr_cnt) { -+ -+ /* If this is a member, we need to get the -+ * first type record. -+ */ -+ if (rkltp->kl_type == KLT_MEMBER) { -+ /* We need to get down to the first -+ * real type record... -+ */ -+ rkltp = rkltp->kl_realtype; -+ } -+ -+ /* step over any KLT_POINTER type records. -+ */ -+ while (rkltp && rkltp->kl_type == KLT_POINTER) { -+ rkltp = rkltp->kl_realtype; -+ } -+ if (!rkltp) { -+ kdb_printf("Bad type information\n"); -+ return(1); -+ } -+ typestr = rkltp->kl_typestr; -+ if (rkltp->kl_type == KLT_FUNCTION) { -+ kdb_printf("%s(", typestr); -+ } else if (rkltp->kl_type == KLT_ARRAY) { -+ kdb_printf("(%s(", typestr); -+ } else { -+ kdb_printf("(%s", typestr); -+ } -+ for (i = 0; i < ptr_cnt; i++) { -+ kdb_printf("*"); -+ } -+ if (rkltp->kl_type == KLT_FUNCTION) { -+ kdb_printf(")("); -+ } else if (rkltp->kl_type == KLT_ARRAY) { -+ kdb_printf(")"); -+ -+ nkltp = rkltp; -+ while (nkltp->kl_type == KLT_ARRAY) { -+ count = nkltp->kl_high_bounds - -+ nkltp->kl_low_bounds + 1; -+ kdb_printf("[%d]", count); -+ nkltp = nkltp->kl_elementtype; -+ } -+ } -+ kdb_printf(") "); -+ kdb_printf("0x%llx", np->value); -+ -+ if (ptr_cnt > 1) { -+ break; -+ } -+ -+ if ((rkltp->kl_type == KLT_BASE) && -+ rkltp->kl_encoding == ENC_CHAR) { -+ kdb_printf(" = "); -+ print_string(np->value, 0); -+ } -+ break; -+ } -+ if (np->flags & KLTYPE_FLAG) { -+ void * ptr; -+ -+ /* Get the type information. It's possible -+ * that the type is a member. In which case, -+ * the size may only be from this record -+ * (which would be the casse if this is an -+ * array). We must check the original type -+ * record first, and try the realtype record -+ * if the value is zero. -+ */ -+ kltp = np->type->t_kltp; -+ -+ if (kltp->kl_type == KLT_MEMBER) { -+ rkltp = kltp->kl_realtype; -+ } else { -+ rkltp = kltp; -+ } -+ -+ /* Check to see if this is a typedef. If -+ * it is, then it might be a typedef for -+ * a pointer type. Don't walk to the last -+ * type record. -+ */ -+ while (rkltp->kl_type == KLT_TYPEDEF) { -+ rkltp = rkltp->kl_realtype; -+ } -+ -+ if (rkltp->kl_type == KLT_POINTER) { -+ kdb_printf("0x%llx", np->value); -+ break; -+ } -+ if((rkltp->kl_name != 0) && -+ !(strcmp(rkltp->kl_name, "void"))) { -+ /* we are about to dereference -+ * a void pointer. -+ */ -+ kdb_printf("Can't dereference a " -+ "generic pointer.\n"); -+ return(1); -+ } -+ -+ size = rkltp->kl_size; -+ if (!size || (size < 0)) { -+ size = kltp->kl_size; -+ } -+ -+ if(rkltp->kl_type==KLT_ARRAY) { -+ size = rkltp->kl_high_bounds - -+ rkltp->kl_low_bounds + 1; -+ if(rkltp->kl_elementtype == NULL){ -+ kdb_printf("Incomplete array" -+ " type.\n"); -+ return(1); -+ } -+ if(rkltp->kl_elementtype->kl_type == -+ KLT_POINTER){ -+ size *= sizeof(void *); -+ } else { -+ size *= rkltp->kl_elementtype->kl_size; -+ } -+ } -+ if(size){ -+ ptr = kl_alloc_block(size); -+ } else { -+ ptr = NULL; -+ } -+ if ((rkltp->kl_type == KLT_BASE) && -+ !(np->flags & ADDRESS_FLAG)) { -+ switch (size) { -+ case 1: -+ *(unsigned char *)ptr = -+ np->value; -+ break; -+ -+ case 2: -+ *(unsigned short *)ptr = -+ np->value; -+ break; -+ -+ case 4: -+ *(unsigned int *)ptr = -+ np->value; -+ break; -+ -+ case 8: -+ *(unsigned long long *) -+ ptr = np->value; -+ break; -+ } -+ kl_print_type(ptr, rkltp, 0, -+ flags|SUPPRESS_NAME); -+ kl_free_block(ptr); -+ return(1); -+ } -+ -+ if(size){ -+ addr = np->address; -+ if (invalid_address(addr, size)) { -+ kdb_printf ( -+ "invalid address %#lx\n", -+ addr); -+ return 1; -+ } -+ kl_get_block(addr, size, (void *)ptr, -+ (void *)0); -+ if (KL_ERROR) { -+ kl_print_error(); -+ kl_free_block(ptr); -+ return(1); -+ } -+ } -+ /* Print out the actual type -+ */ -+ switch (rkltp->kl_type) { -+ case KLT_STRUCT: -+ case KLT_UNION: -+ kl_print_type(ptr, rkltp, 0, -+ flags); -+ break; -+ -+ case KLT_ARRAY: -+ kl_print_type(ptr, rkltp, 0, -+ flags| SUPPRESS_NAME); -+ break; -+ -+ default: -+ kl_print_type(ptr, rkltp, 0, -+ (flags| -+ SUPPRESS_NAME| -+ SUPPRESS_NL)); -+ break; -+ } -+ if(ptr){ -+ kl_free_block(ptr); -+ } -+ } -+ break; -+ } -+ -+ case VADDR: -+ /* If we get here, there was no type info available. -+ * The ADDRESS_FLAG should be set (otherwise we -+ * would have returned an error). So, print out -+ * the address. -+ */ -+ kdb_printf("0x%lx", np->address); -+ break; -+ -+ default: -+ if (np->node_type == TEXT) { -+ kl_print_string(np->name); -+ if (KL_ERROR) { -+ kl_print_error(); -+ return(1); -+ } -+ } else if (np->node_type == CHARACTER) { -+ kdb_printf("\'%c\'", (char)np->value); -+ } -+ break; -+ } -+ return(0); -+} -+ -+/* -+ * print_eval_error() -+ */ -+void -+print_eval_error( -+ char *cmdname, -+ char *s, -+ char *bad_ptr, -+ uint64_t error, -+ int flags) -+{ -+ int i, cmd_len; -+ -+ kdb_printf("%s %s\n", cmdname, s); -+ cmd_len = strlen(cmdname); -+ -+ if (!bad_ptr) { -+ for (i = 0; i < (strlen(s) + cmd_len); i++) { -+ kdb_printf(" "); -+ } -+ } else { -+ for (i = 0; i < (bad_ptr - s + 1 + cmd_len); i++) { -+ kdb_printf(" "); -+ } -+ } -+ kdb_printf("^ "); -+ switch (error) { -+ case E_OPEN_PAREN : -+ kdb_printf("Too many open parenthesis\n"); -+ break; -+ -+ case E_CLOSE_PAREN : -+ kdb_printf("Too many close parenthesis\n"); -+ break; -+ -+ case E_BAD_STRUCTURE : -+ kdb_printf("Invalid structure\n"); -+ break; -+ -+ case E_MISSING_STRUCTURE : -+ kdb_printf("Missing structure\n"); -+ break; -+ -+ case E_BAD_MEMBER : -+ kdb_printf("No such member\n"); -+ break; -+ -+ case E_BAD_OPERATOR : -+ kdb_printf("Invalid operator\n"); -+ break; -+ -+ case E_MISSING_OPERAND : -+ kdb_printf("Missing operand\n"); -+ break; -+ -+ case E_BAD_OPERAND : -+ kdb_printf("Invalid operand\n"); -+ break; -+ -+ case E_BAD_TYPE : -+ kdb_printf("Invalid type\n"); -+ if (!have_debug_file) { -+ kdb_printf("no debuginfo file\n"); -+ return; -+ } -+ break; -+ -+ case E_NOTYPE : -+ kdb_printf("Could not find type information\n"); -+ break; -+ -+ case E_BAD_POINTER : -+ kdb_printf("Invalid pointer\n"); -+ break; -+ -+ case E_BAD_INDEX : -+ kdb_printf("Invalid array index\n"); -+ break; -+ -+ case E_BAD_CHAR : -+ kdb_printf("Invalid character value\n"); -+ break; -+ -+ case E_BAD_STRING : -+ kdb_printf("Non-termining string\n"); -+ break; -+ -+ case E_END_EXPECTED : -+ kdb_printf( -+ "Expected end of print statement\n"); -+ break; -+ -+ case E_BAD_EVAR : -+ kdb_printf("Invalid eval variable\n"); -+ break; -+ -+ case E_BAD_VALUE : -+ kdb_printf("Invalid value\n"); -+ break; -+ -+ case E_NO_VALUE : -+ kdb_printf("No value supplied\n"); -+ break; -+ -+ case E_DIVIDE_BY_ZERO : -+ kdb_printf("Divide by zero\n"); -+ break; -+ -+ case E_BAD_CAST : -+ kdb_printf("Invalid cast\n"); -+ break; -+ -+ case E_NO_ADDRESS : -+ kdb_printf("Not an address\n"); -+ break; -+ -+ case E_SINGLE_QUOTE : -+ kdb_printf("Missing single quote\n"); -+ break; -+ -+ case E_BAD_WHATIS : -+ kdb_printf("Invalid whatis Operation\n"); -+ break; -+ -+ case E_NOT_IMPLEMENTED : -+ kdb_printf("Not implemented\n"); -+ break; -+ -+ default : -+ kdb_printf("Syntax error\n"); -+ break; -+ } -+} -+ -+/* -+ * single_type() -+ */ -+void -+single_type(char *str) -+{ -+ char buffer[256], *type_name; -+ kltype_t *kltp; -+ syment_t *sp; -+ -+ type_name = buffer; -+ strcpy(type_name, str); -+ -+ if (have_debug_file) { -+ if ((kltp = kl_find_type(type_name, KLT_TYPE))) { -+ kl_print_type((void *)NULL, kltp, 0, C_SHOWOFFSET); -+ return; -+ } -+ if ((kltp = kl_find_type(type_name, KLT_TYPEDEF))) { -+ kdb_printf ("typedef %s:\n", type_name); -+ kl_print_type((void *)NULL, kltp, 0, C_SHOWOFFSET); -+ return; -+ } -+ } -+ if ((sp = kl_lkup_symname(type_name))) { -+ kdb_printf ("symbol %s value: %#lx\n", str, sp->s_addr); -+ kl_free_block((void *)sp); -+ return; -+ } -+ kdb_printf("could not find type or symbol information for %s\n", -+ type_name); -+ return; -+} -+ -+/* -+ * sizeof_type() -+ */ -+void -+sizeof_type(char *str) -+{ -+ char buffer[256], *type_name; -+ kltype_t *kltp; -+ -+ type_name = buffer; -+ strcpy(type_name, str); -+ -+ if ((kltp = kl_find_type(type_name, KLT_TYPE))) { -+ kdb_printf ("%s %d %#x\n", kltp->kl_typestr, -+ kltp->kl_size, kltp->kl_size); -+ return; -+ } -+ if ((kltp = kl_find_type(type_name, KLT_TYPEDEF))) { -+ kdb_printf ("%s %d %#x\n", kltp->kl_typestr, -+ kltp->kl_size, kltp->kl_size); -+ return; -+ } -+ kdb_printf("could not find type information for %s\n", type_name); -+} -+ -+EXPORT_SYMBOL(have_debug_file); -+EXPORT_SYMBOL(type_tree); -+EXPORT_SYMBOL(typedef_tree); -+ -+#if defined(CONFIG_X86_32) -+/* needed for i386: */ -+#include -+#include -+/* -+ * Generic C version of full 64 bit by 64 bit division -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * Code generated for this function might be very inefficient -+ * for some CPUs, can be overridden by linking arch-specific -+ * assembly versions such as arch/sparc/lib/udivdi.S -+ */ -+uint64_t -+__udivdi3(uint64_t dividend, uint64_t divisor) -+{ -+ uint32_t d = divisor; -+ /* Scale divisor to 32 bits */ -+ if (divisor > 0xffffffffULL) { -+ unsigned int shift = fls(divisor >> 32); -+ d = divisor >> shift; -+ dividend >>= shift; -+ } -+ /* avoid 64 bit division if possible */ -+ if (dividend >> 32) -+ do_div(dividend, d); -+ else -+ dividend = (uint32_t) dividend / d; -+ return dividend; -+} -+ -+int64_t -+__divdi3(int64_t dividend, int64_t divisor) -+{ -+ int32_t d = divisor; -+ /* Scale divisor to 32 bits */ -+ if (divisor > 0xffffffffLL) { -+ unsigned int shift = fls(divisor >> 32); -+ d = divisor >> shift; -+ dividend >>= shift; -+ } -+ /* avoid 64 bit division if possible */ -+ if (dividend >> 32) -+ do_div(dividend, d); -+ else -+ dividend = (int32_t) dividend / d; -+ return dividend; -+} -+ -+uint64_t -+__umoddi3(uint64_t dividend, uint64_t divisor) -+{ -+ return dividend - (__udivdi3(dividend, divisor) * divisor); -+} -+ -+int64_t -+__moddi3(int64_t dividend, int64_t divisor) -+{ -+ return dividend - (__divdi3(dividend, divisor) * divisor); -+} -+#endif /* CONFIG_x86_32 */ ---- /dev/null -+++ b/kdb/kdbmain.c -@@ -0,0 +1,4333 @@ -+/* -+ * Kernel Debugger Architecture Independent Main Code -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ * Copyright (C) 2000 Stephane Eranian -+ * Xscale (R) modifications copyright (C) 2003 Intel Corporation. -+ */ -+ -+/* -+ * Updated for Xscale (R) architecture support -+ * Eddie Dong 8 Jan 03 -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#if defined(CONFIG_LKCD_DUMP) || defined(CONFIG_LKCD_DUMP_MODULE) -+#include -+#endif -+#include -+#include -+#ifdef CONFIG_KDB_KDUMP -+#include -+#endif -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+char kdb_debug_info_filename[256] = {""}; -+EXPORT_SYMBOL(kdb_debug_info_filename); -+#define GREP_LEN 256 -+char kdb_grep_string[GREP_LEN]; -+int kdb_grepping_flag; -+EXPORT_SYMBOL(kdb_grepping_flag); -+int kdb_grep_leading; -+int kdb_grep_trailing; -+ -+/* -+ * Kernel debugger state flags -+ */ -+volatile int kdb_flags; -+atomic_t kdb_event; -+atomic_t kdb_8250; -+ -+/* -+ * kdb_lock protects updates to kdb_initial_cpu. Used to -+ * single thread processors through the kernel debugger. -+ */ -+static DEFINE_SPINLOCK(kdb_lock); -+volatile int kdb_initial_cpu = -1; /* cpu number that owns kdb */ -+int kdb_seqno = 2; /* how many times kdb has been entered */ -+ -+volatile int kdb_nextline = 1; -+static volatile int kdb_new_cpu; /* Which cpu to switch to */ -+ -+volatile int kdb_state[NR_CPUS]; /* Per cpu state */ -+ -+const struct task_struct *kdb_current_task; -+EXPORT_SYMBOL(kdb_current_task); -+struct pt_regs *kdb_current_regs; -+ -+#ifdef CONFIG_KDB_OFF -+int kdb_on = 0; /* Default is off */ -+#else -+int kdb_on = 1; /* Default is on */ -+#endif /* CONFIG_KDB_OFF */ -+ -+const char *kdb_diemsg; -+static int kdb_go_count; -+#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC -+static unsigned int kdb_continue_catastrophic = CONFIG_KDB_CONTINUE_CATASTROPHIC; -+#else -+static unsigned int kdb_continue_catastrophic = 0; -+#endif -+ -+#ifdef kdba_setjmp -+ /* -+ * Must have a setjmp buffer per CPU. Switching cpus will -+ * cause the jump buffer to be setup for the new cpu, and -+ * subsequent switches (and pager aborts) will use the -+ * appropriate per-processor values. -+ */ -+kdb_jmp_buf *kdbjmpbuf; -+#endif /* kdba_setjmp */ -+ -+ /* -+ * kdb_commands describes the available commands. -+ */ -+static kdbtab_t *kdb_commands; -+static int kdb_max_commands; -+ -+typedef struct _kdbmsg { -+ int km_diag; /* kdb diagnostic */ -+ char *km_msg; /* Corresponding message text */ -+} kdbmsg_t; -+ -+#define KDBMSG(msgnum, text) \ -+ { KDB_##msgnum, text } -+ -+static kdbmsg_t kdbmsgs[] = { -+ KDBMSG(NOTFOUND,"Command Not Found"), -+ KDBMSG(ARGCOUNT, "Improper argument count, see usage."), -+ KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, 8 is only allowed on 64 bit systems"), -+ KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"), -+ KDBMSG(NOTENV, "Cannot find environment variable"), -+ KDBMSG(NOENVVALUE, "Environment variable should have value"), -+ KDBMSG(NOTIMP, "Command not implemented"), -+ KDBMSG(ENVFULL, "Environment full"), -+ KDBMSG(ENVBUFFULL, "Environment buffer full"), -+ KDBMSG(TOOMANYBPT, "Too many breakpoints defined"), -+#ifdef CONFIG_CPU_XSCALE -+ KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"), -+#else -+ KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"), -+#endif -+ KDBMSG(DUPBPT, "Duplicate breakpoint address"), -+ KDBMSG(BPTNOTFOUND, "Breakpoint not found"), -+ KDBMSG(BADMODE, "Invalid IDMODE"), -+ KDBMSG(BADINT, "Illegal numeric value"), -+ KDBMSG(INVADDRFMT, "Invalid symbolic address format"), -+ KDBMSG(BADREG, "Invalid register name"), -+ KDBMSG(BADCPUNUM, "Invalid cpu number"), -+ KDBMSG(BADLENGTH, "Invalid length field"), -+ KDBMSG(NOBP, "No Breakpoint exists"), -+ KDBMSG(BADADDR, "Invalid address"), -+}; -+#undef KDBMSG -+ -+static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t); -+ -+ -+/* -+ * Initial environment. This is all kept static and local to -+ * this file. We don't want to rely on the memory allocation -+ * mechanisms in the kernel, so we use a very limited allocate-only -+ * heap for new and altered environment variables. The entire -+ * environment is limited to a fixed number of entries (add more -+ * to __env[] if required) and a fixed amount of heap (add more to -+ * KDB_ENVBUFSIZE if required). -+ */ -+ -+static char *__env[] = { -+#if defined(CONFIG_SMP) -+ "PROMPT=[%d]kdb> ", -+ "MOREPROMPT=[%d]more> ", -+#else -+ "PROMPT=kdb> ", -+ "MOREPROMPT=more> ", -+#endif -+ "RADIX=16", -+ "LINES=24", -+ "COLUMNS=80", -+ "MDCOUNT=8", /* lines of md output */ -+ "BTARGS=9", /* 9 possible args in bt */ -+ KDB_PLATFORM_ENV, -+ "DTABCOUNT=30", -+ "NOSECT=1", -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+ (char *)0, -+}; -+ -+static const int __nenv = (sizeof(__env) / sizeof(char *)); -+ -+/* external commands: */ -+int kdb_debuginfo_print(int argc, const char **argv); -+int kdb_pxhelp(int argc, const char **argv); -+int kdb_walkhelp(int argc, const char **argv); -+int kdb_walk(int argc, const char **argv); -+ -+/* -+ * kdb_serial_str is the sequence that the user must enter on a serial -+ * console to invoke kdb. It can be a single character such as "\001" -+ * (control-A) or multiple characters such as "\eKDB". NOTE: All except the -+ * last character are passed through to the application reading from the serial -+ * console. -+ * -+ * I tried to make the sequence a CONFIG_ option but most of CML1 cannot cope -+ * with '\' in strings. CML2 would have been able to do it but we lost CML2. -+ * KAO. -+ */ -+const char kdb_serial_str[] = "\eKDB"; -+EXPORT_SYMBOL(kdb_serial_str); -+ -+struct task_struct * -+kdb_curr_task(int cpu) -+{ -+ struct task_struct *p = curr_task(cpu); -+#ifdef _TIF_MCA_INIT -+ struct kdb_running_process *krp = kdb_running_process + cpu; -+ if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && krp->p) -+ p = krp->p; -+#endif -+ return p; -+} -+ -+/* -+ * kdbgetenv -+ * -+ * This function will return the character string value of -+ * an environment variable. -+ * -+ * Parameters: -+ * match A character string representing an environment variable. -+ * Outputs: -+ * None. -+ * Returns: -+ * NULL No environment variable matches 'match' -+ * char* Pointer to string value of environment variable. -+ * Locking: -+ * No locking considerations required. -+ * Remarks: -+ */ -+char * -+kdbgetenv(const char *match) -+{ -+ char **ep = __env; -+ int matchlen = strlen(match); -+ int i; -+ -+ for(i=0; i<__nenv; i++) { -+ char *e = *ep++; -+ -+ if (!e) continue; -+ -+ if ((strncmp(match, e, matchlen) == 0) -+ && ((e[matchlen] == '\0') -+ ||(e[matchlen] == '='))) { -+ char *cp = strchr(e, '='); -+ return (cp ? ++cp :""); -+ } -+ } -+ return NULL; -+} -+ -+/* -+ * kdballocenv -+ * -+ * This function is used to allocate bytes for environment entries. -+ * -+ * Parameters: -+ * match A character string representing a numeric value -+ * Outputs: -+ * *value the unsigned long represntation of the env variable 'match' -+ * Returns: -+ * Zero on success, a kdb diagnostic on failure. -+ * Locking: -+ * No locking considerations required. Must be called with all -+ * processors halted. -+ * Remarks: -+ * We use a static environment buffer (envbuffer) to hold the values -+ * of dynamically generated environment variables (see kdb_set). Buffer -+ * space once allocated is never free'd, so over time, the amount of space -+ * (currently 512 bytes) will be exhausted if env variables are changed -+ * frequently. -+ */ -+static char * -+kdballocenv(size_t bytes) -+{ -+#define KDB_ENVBUFSIZE 512 -+ static char envbuffer[KDB_ENVBUFSIZE]; -+ static int envbufsize; -+ char *ep = NULL; -+ -+ if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) { -+ ep = &envbuffer[envbufsize]; -+ envbufsize += bytes; -+ } -+ return ep; -+} -+ -+/* -+ * kdbgetulenv -+ * -+ * This function will return the value of an unsigned long-valued -+ * environment variable. -+ * -+ * Parameters: -+ * match A character string representing a numeric value -+ * Outputs: -+ * *value the unsigned long represntation of the env variable 'match' -+ * Returns: -+ * Zero on success, a kdb diagnostic on failure. -+ * Locking: -+ * No locking considerations required. -+ * Remarks: -+ */ -+ -+static int -+kdbgetulenv(const char *match, unsigned long *value) -+{ -+ char *ep; -+ -+ ep = kdbgetenv(match); -+ if (!ep) return KDB_NOTENV; -+ if (strlen(ep) == 0) return KDB_NOENVVALUE; -+ -+ *value = simple_strtoul(ep, NULL, 0); -+ -+ return 0; -+} -+ -+/* -+ * kdbgetintenv -+ * -+ * This function will return the value of an integer-valued -+ * environment variable. -+ * -+ * Parameters: -+ * match A character string representing an integer-valued env variable -+ * Outputs: -+ * *value the integer representation of the environment variable 'match' -+ * Returns: -+ * Zero on success, a kdb diagnostic on failure. -+ * Locking: -+ * No locking considerations required. -+ * Remarks: -+ */ -+ -+int -+kdbgetintenv(const char *match, int *value) { -+ unsigned long val; -+ int diag; -+ -+ diag = kdbgetulenv(match, &val); -+ if (!diag) { -+ *value = (int) val; -+ } -+ return diag; -+} -+ -+/* -+ * kdbgetularg -+ * -+ * This function will convert a numeric string -+ * into an unsigned long value. -+ * -+ * Parameters: -+ * arg A character string representing a numeric value -+ * Outputs: -+ * *value the unsigned long represntation of arg. -+ * Returns: -+ * Zero on success, a kdb diagnostic on failure. -+ * Locking: -+ * No locking considerations required. -+ * Remarks: -+ */ -+ -+int -+kdbgetularg(const char *arg, unsigned long *value) -+{ -+ char *endp; -+ unsigned long val; -+ -+ val = simple_strtoul(arg, &endp, 0); -+ -+ if (endp == arg) { -+ /* -+ * Try base 16, for us folks too lazy to type the -+ * leading 0x... -+ */ -+ val = simple_strtoul(arg, &endp, 16); -+ if (endp == arg) -+ return KDB_BADINT; -+ } -+ -+ *value = val; -+ -+ return 0; -+} -+ -+/* -+ * kdb_set -+ * -+ * This function implements the 'set' command. Alter an existing -+ * environment variable or create a new one. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_set(int argc, const char **argv) -+{ -+ int i; -+ char *ep; -+ size_t varlen, vallen; -+ -+ /* -+ * we can be invoked two ways: -+ * set var=value argv[1]="var", argv[2]="value" -+ * set var = value argv[1]="var", argv[2]="=", argv[3]="value" -+ * - if the latter, shift 'em down. -+ */ -+ if (argc == 3) { -+ argv[2] = argv[3]; -+ argc--; -+ } -+ -+ if (argc != 2) -+ return KDB_ARGCOUNT; -+ -+ /* -+ * Check for internal variables -+ */ -+ if (strcmp(argv[1], "KDBDEBUG") == 0) { -+ unsigned int debugflags; -+ char *cp; -+ -+ debugflags = simple_strtoul(argv[2], &cp, 0); -+ if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) { -+ kdb_printf("kdb: illegal debug flags '%s'\n", -+ argv[2]); -+ return 0; -+ } -+ kdb_flags = (kdb_flags & ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT)) -+ | (debugflags << KDB_DEBUG_FLAG_SHIFT); -+ -+ return 0; -+ } -+ -+ /* -+ * Tokenizer squashed the '=' sign. argv[1] is variable -+ * name, argv[2] = value. -+ */ -+ varlen = strlen(argv[1]); -+ vallen = strlen(argv[2]); -+ ep = kdballocenv(varlen + vallen + 2); -+ if (ep == (char *)0) -+ return KDB_ENVBUFFULL; -+ -+ sprintf(ep, "%s=%s", argv[1], argv[2]); -+ -+ ep[varlen+vallen+1]='\0'; -+ -+ for(i=0; i<__nenv; i++) { -+ if (__env[i] -+ && ((strncmp(__env[i], argv[1], varlen)==0) -+ && ((__env[i][varlen] == '\0') -+ || (__env[i][varlen] == '=')))) { -+ __env[i] = ep; -+ return 0; -+ } -+ } -+ -+ /* -+ * Wasn't existing variable. Fit into slot. -+ */ -+ for(i=0; i<__nenv-1; i++) { -+ if (__env[i] == (char *)0) { -+ __env[i] = ep; -+ return 0; -+ } -+ } -+ -+ return KDB_ENVFULL; -+} -+ -+static int -+kdb_check_regs(void) -+{ -+ if (!kdb_current_regs) { -+ kdb_printf("No current kdb registers." -+ " You may need to select another task\n"); -+ return KDB_BADREG; -+ } -+ return 0; -+} -+ -+/* -+ * kdbgetaddrarg -+ * -+ * This function is responsible for parsing an -+ * address-expression and returning the value of -+ * the expression, symbol name, and offset to the caller. -+ * -+ * The argument may consist of a numeric value (decimal or -+ * hexidecimal), a symbol name, a register name (preceeded -+ * by the percent sign), an environment variable with a numeric -+ * value (preceeded by a dollar sign) or a simple arithmetic -+ * expression consisting of a symbol name, +/-, and a numeric -+ * constant value (offset). -+ * -+ * Parameters: -+ * argc - count of arguments in argv -+ * argv - argument vector -+ * *nextarg - index to next unparsed argument in argv[] -+ * regs - Register state at time of KDB entry -+ * Outputs: -+ * *value - receives the value of the address-expression -+ * *offset - receives the offset specified, if any -+ * *name - receives the symbol name, if any -+ * *nextarg - index to next unparsed argument in argv[] -+ * -+ * Returns: -+ * zero is returned on success, a kdb diagnostic code is -+ * returned on error. -+ * -+ * Locking: -+ * No locking requirements. -+ * -+ * Remarks: -+ * -+ */ -+ -+int -+kdbgetaddrarg(int argc, const char **argv, int *nextarg, -+ kdb_machreg_t *value, long *offset, -+ char **name) -+{ -+ kdb_machreg_t addr; -+ unsigned long off = 0; -+ int positive; -+ int diag; -+ int found = 0; -+ char *symname; -+ char symbol = '\0'; -+ char *cp; -+ kdb_symtab_t symtab; -+ -+ /* -+ * Process arguments which follow the following syntax: -+ * -+ * symbol | numeric-address [+/- numeric-offset] -+ * %register -+ * $environment-variable -+ */ -+ -+ if (*nextarg > argc) { -+ return KDB_ARGCOUNT; -+ } -+ -+ symname = (char *)argv[*nextarg]; -+ -+ /* -+ * If there is no whitespace between the symbol -+ * or address and the '+' or '-' symbols, we -+ * remember the character and replace it with a -+ * null so the symbol/value can be properly parsed -+ */ -+ if ((cp = strpbrk(symname, "+-")) != NULL) { -+ symbol = *cp; -+ *cp++ = '\0'; -+ } -+ -+ if (symname[0] == '$') { -+ diag = kdbgetulenv(&symname[1], &addr); -+ if (diag) -+ return diag; -+ } else if (symname[0] == '%') { -+ if ((diag = kdb_check_regs())) -+ return diag; -+ diag = kdba_getregcontents(&symname[1], kdb_current_regs, &addr); -+ if (diag) -+ return diag; -+ } else { -+ found = kdbgetsymval(symname, &symtab); -+ if (found) { -+ addr = symtab.sym_start; -+ } else { -+ diag = kdbgetularg(argv[*nextarg], &addr); -+ if (diag) -+ return diag; -+ } -+ } -+ -+ if (!found) -+ found = kdbnearsym(addr, &symtab); -+ -+ (*nextarg)++; -+ -+ if (name) -+ *name = symname; -+ if (value) -+ *value = addr; -+ if (offset && name && *name) -+ *offset = addr - symtab.sym_start; -+ -+ if ((*nextarg > argc) -+ && (symbol == '\0')) -+ return 0; -+ -+ /* -+ * check for +/- and offset -+ */ -+ -+ if (symbol == '\0') { -+ if ((argv[*nextarg][0] != '+') -+ && (argv[*nextarg][0] != '-')) { -+ /* -+ * Not our argument. Return. -+ */ -+ return 0; -+ } else { -+ positive = (argv[*nextarg][0] == '+'); -+ (*nextarg)++; -+ } -+ } else -+ positive = (symbol == '+'); -+ -+ /* -+ * Now there must be an offset! -+ */ -+ if ((*nextarg > argc) -+ && (symbol == '\0')) { -+ return KDB_INVADDRFMT; -+ } -+ -+ if (!symbol) { -+ cp = (char *)argv[*nextarg]; -+ (*nextarg)++; -+ } -+ -+ diag = kdbgetularg(cp, &off); -+ if (diag) -+ return diag; -+ -+ if (!positive) -+ off = -off; -+ -+ if (offset) -+ *offset += off; -+ -+ if (value) -+ *value += off; -+ -+ return 0; -+} -+ -+static void -+kdb_cmderror(int diag) -+{ -+ int i; -+ -+ if (diag >= 0) { -+ kdb_printf("no error detected (diagnostic is %d)\n", diag); -+ return; -+ } -+ -+ for(i=0; i<__nkdb_err; i++) { -+ if (kdbmsgs[i].km_diag == diag) { -+ kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg); -+ return; -+ } -+ } -+ -+ kdb_printf("Unknown diag %d\n", -diag); -+} -+ -+/* -+ * kdb_defcmd, kdb_defcmd2 -+ * -+ * This function implements the 'defcmd' command which defines one -+ * command as a set of other commands, terminated by endefcmd. -+ * kdb_defcmd processes the initial 'defcmd' command, kdb_defcmd2 -+ * is invoked from kdb_parse for the following commands until -+ * 'endefcmd'. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+struct defcmd_set { -+ int count; -+ int usable; -+ char *name; -+ char *usage; -+ char *help; -+ char **command; -+}; -+static struct defcmd_set *defcmd_set; -+static int defcmd_set_count; -+static int defcmd_in_progress; -+ -+/* Forward references */ -+static int kdb_exec_defcmd(int argc, const char **argv); -+ -+static int -+kdb_defcmd2(const char *cmdstr, const char *argv0) -+{ -+ struct defcmd_set *s = defcmd_set + defcmd_set_count - 1; -+ char **save_command = s->command; -+ if (strcmp(argv0, "endefcmd") == 0) { -+ defcmd_in_progress = 0; -+ if (!s->count) -+ s->usable = 0; -+ if (s->usable) -+ kdb_register(s->name, kdb_exec_defcmd, s->usage, s->help, 0); -+ return 0; -+ } -+ if (!s->usable) -+ return KDB_NOTIMP; -+ s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); -+ if (!s->command) { -+ kdb_printf("Could not allocate new kdb_defcmd table for %s\n", cmdstr); -+ s->usable = 0; -+ return KDB_NOTIMP; -+ } -+ memcpy(s->command, save_command, s->count * sizeof(*(s->command))); -+ s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB); -+ kfree(save_command); -+ return 0; -+} -+ -+static int -+kdb_defcmd(int argc, const char **argv) -+{ -+ struct defcmd_set *save_defcmd_set = defcmd_set, *s; -+ if (defcmd_in_progress) { -+ kdb_printf("kdb: nested defcmd detected, assuming missing endefcmd\n"); -+ kdb_defcmd2("endefcmd", "endefcmd"); -+ } -+ if (argc == 0) { -+ int i; -+ for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) { -+ kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name, s->usage, s->help); -+ for (i = 0; i < s->count; ++i) -+ kdb_printf("%s", s->command[i]); -+ kdb_printf("endefcmd\n"); -+ } -+ return 0; -+ } -+ if (argc != 3) -+ return KDB_ARGCOUNT; -+ defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set), GFP_KDB); -+ if (!defcmd_set) { -+ kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]); -+ defcmd_set = save_defcmd_set; -+ return KDB_NOTIMP; -+ } -+ memcpy(defcmd_set, save_defcmd_set, defcmd_set_count * sizeof(*defcmd_set)); -+ kfree(save_defcmd_set); -+ s = defcmd_set + defcmd_set_count; -+ memset(s, 0, sizeof(*s)); -+ s->usable = 1; -+ s->name = kdb_strdup(argv[1], GFP_KDB); -+ s->usage = kdb_strdup(argv[2], GFP_KDB); -+ s->help = kdb_strdup(argv[3], GFP_KDB); -+ if (s->usage[0] == '"') { -+ strcpy(s->usage, s->usage+1); -+ s->usage[strlen(s->usage)-1] = '\0'; -+ } -+ if (s->help[0] == '"') { -+ strcpy(s->help, s->help+1); -+ s->help[strlen(s->help)-1] = '\0'; -+ } -+ ++defcmd_set_count; -+ defcmd_in_progress = 1; -+ return 0; -+} -+ -+/* -+ * kdb_exec_defcmd -+ * -+ * Execute the set of commands associated with this defcmd name. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_exec_defcmd(int argc, const char **argv) -+{ -+ int i, ret; -+ struct defcmd_set *s; -+ if (argc != 0) -+ return KDB_ARGCOUNT; -+ for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) { -+ if (strcmp(s->name, argv[0]) == 0) -+ break; -+ } -+ if (i == defcmd_set_count) { -+ kdb_printf("kdb_exec_defcmd: could not find commands for %s\n", argv[0]); -+ return KDB_NOTIMP; -+ } -+ for (i = 0; i < s->count; ++i) { -+ /* Recursive use of kdb_parse, do not use argv after this point */ -+ argv = NULL; -+ kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]); -+ if ((ret = kdb_parse(s->command[i]))) -+ return ret; -+ } -+ return 0; -+} -+ -+/* Command history */ -+#define KDB_CMD_HISTORY_COUNT 32 -+#define CMD_BUFLEN 200 /* kdb_printf: max printline size == 256 */ -+static unsigned int cmd_head=0, cmd_tail=0; -+static unsigned int cmdptr; -+static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN]; -+static char cmd_cur[CMD_BUFLEN]; -+ -+/* -+ * The "str" argument may point to something like | grep xyz -+ * -+ */ -+static void -+parse_grep(const char *str) -+{ -+ int len; -+ char *cp = (char *)str, *cp2; -+ -+ /* sanity check: we should have been called with the \ first */ -+ if (*cp != '|') -+ return; -+ cp++; -+ while (isspace(*cp)) cp++; -+ if (strncmp(cp,"grep ",5)) { -+ kdb_printf ("invalid 'pipe', see grephelp\n"); -+ return; -+ } -+ cp += 5; -+ while (isspace(*cp)) cp++; -+ cp2 = strchr(cp, '\n'); -+ if (cp2) -+ *cp2 = '\0'; /* remove the trailing newline */ -+ len = strlen(cp); -+ if (len == 0) { -+ kdb_printf ("invalid 'pipe', see grephelp\n"); -+ return; -+ } -+ /* now cp points to a nonzero length search string */ -+ if (*cp == '"') { -+ /* allow it be "x y z" by removing the "'s - there must -+ be two of them */ -+ cp++; -+ cp2 = strchr(cp, '"'); -+ if (!cp2) { -+ kdb_printf ("invalid quoted string, see grephelp\n"); -+ return; -+ } -+ *cp2 = '\0'; /* end the string where the 2nd " was */ -+ } -+ kdb_grep_leading = 0; -+ if (*cp == '^') { -+ kdb_grep_leading = 1; -+ cp++; -+ } -+ len = strlen(cp); -+ kdb_grep_trailing = 0; -+ if (*(cp+len-1) == '$') { -+ kdb_grep_trailing = 1; -+ *(cp+len-1) = '\0'; -+ } -+ len = strlen(cp); -+ if (!len) return; -+ if (len >= GREP_LEN) { -+ kdb_printf ("search string too long\n"); -+ return; -+ } -+ strcpy(kdb_grep_string, cp); -+ kdb_grepping_flag++; -+ return; -+} -+ -+/* -+ * kdb_parse -+ * -+ * Parse the command line, search the command table for a -+ * matching command and invoke the command function. -+ * This function may be called recursively, if it is, the second call -+ * will overwrite argv and cbuf. It is the caller's responsibility to -+ * save their argv if they recursively call kdb_parse(). -+ * -+ * Parameters: -+ * cmdstr The input command line to be parsed. -+ * regs The registers at the time kdb was entered. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic if failure. -+ * Locking: -+ * None. -+ * Remarks: -+ * Limited to 20 tokens. -+ * -+ * Real rudimentary tokenization. Basically only whitespace -+ * is considered a token delimeter (but special consideration -+ * is taken of the '=' sign as used by the 'set' command). -+ * -+ * The algorithm used to tokenize the input string relies on -+ * there being at least one whitespace (or otherwise useless) -+ * character between tokens as the character immediately following -+ * the token is altered in-place to a null-byte to terminate the -+ * token string. -+ */ -+ -+#define MAXARGC 20 -+ -+int -+kdb_parse(const char *cmdstr) -+{ -+ static char *argv[MAXARGC]; -+ static int argc = 0; -+ static char cbuf[CMD_BUFLEN+2]; -+ char *cp; -+ char *cpp, quoted; -+ kdbtab_t *tp; -+ int i, escaped, ignore_errors = 0, check_grep; -+ -+ /* -+ * First tokenize the command string. -+ */ -+ cp = (char *)cmdstr; -+ kdb_grepping_flag = check_grep = 0; -+ -+ if (KDB_FLAG(CMD_INTERRUPT)) { -+ /* Previous command was interrupted, newline must not repeat the command */ -+ KDB_FLAG_CLEAR(CMD_INTERRUPT); -+ argc = 0; /* no repeat */ -+ } -+ -+ if (*cp != '\n' && *cp != '\0') { -+ argc = 0; -+ cpp = cbuf; -+ while (*cp) { -+ /* skip whitespace */ -+ while (isspace(*cp)) cp++; -+ if ((*cp == '\0') || (*cp == '\n') || (*cp == '#' && !defcmd_in_progress)) -+ break; -+ /* special case: check for | grep pattern */ -+ if (*cp == '|') { -+ check_grep++; -+ break; -+ } -+ if (cpp >= cbuf + CMD_BUFLEN) { -+ kdb_printf("kdb_parse: command buffer overflow, command ignored\n%s\n", cmdstr); -+ return KDB_NOTFOUND; -+ } -+ if (argc >= MAXARGC - 1) { -+ kdb_printf("kdb_parse: too many arguments, command ignored\n%s\n", cmdstr); -+ return KDB_NOTFOUND; -+ } -+ argv[argc++] = cpp; -+ escaped = 0; -+ quoted = '\0'; -+ /* Copy to next unquoted and unescaped whitespace or '=' */ -+ while (*cp && *cp != '\n' && (escaped || quoted || !isspace(*cp))) { -+ if (cpp >= cbuf + CMD_BUFLEN) -+ break; -+ if (escaped) { -+ escaped = 0; -+ *cpp++ = *cp++; -+ continue; -+ } -+ if (*cp == '\\') { -+ escaped = 1; -+ ++cp; -+ continue; -+ } -+ if (*cp == quoted) { -+ quoted = '\0'; -+ } else if (*cp == '\'' || *cp == '"') { -+ quoted = *cp; -+ } -+ if ((*cpp = *cp++) == '=' && !quoted) -+ break; -+ ++cpp; -+ } -+ *cpp++ = '\0'; /* Squash a ws or '=' character */ -+ } -+ } -+ if (!argc) -+ return 0; -+ if (check_grep) -+ parse_grep(cp); -+ if (defcmd_in_progress) { -+ int result = kdb_defcmd2(cmdstr, argv[0]); -+ if (!defcmd_in_progress) { -+ argc = 0; /* avoid repeat on endefcmd */ -+ *(argv[0]) = '\0'; -+ } -+ return result; -+ } -+ if (argv[0][0] == '-' && argv[0][1] && (argv[0][1] < '0' || argv[0][1] > '9')) { -+ ignore_errors = 1; -+ ++argv[0]; -+ } -+ -+ for(tp=kdb_commands, i=0; i < kdb_max_commands; i++,tp++) { -+ if (tp->cmd_name) { -+ /* -+ * If this command is allowed to be abbreviated, -+ * check to see if this is it. -+ */ -+ -+ if (tp->cmd_minlen -+ && (strlen(argv[0]) <= tp->cmd_minlen)) { -+ if (strncmp(argv[0], -+ tp->cmd_name, -+ tp->cmd_minlen) == 0) { -+ break; -+ } -+ } -+ -+ if (strcmp(argv[0], tp->cmd_name)==0) { -+ break; -+ } -+ } -+ } -+ -+ /* -+ * If we don't find a command by this name, see if the first -+ * few characters of this match any of the known commands. -+ * e.g., md1c20 should match md. -+ */ -+ if (i == kdb_max_commands) { -+ for(tp=kdb_commands, i=0; i < kdb_max_commands; i++,tp++) { -+ if (tp->cmd_name) { -+ if (strncmp(argv[0], -+ tp->cmd_name, -+ strlen(tp->cmd_name))==0) { -+ break; -+ } -+ } -+ } -+ } -+ -+ if (i < kdb_max_commands) { -+ int result; -+ KDB_STATE_SET(CMD); -+ result = (*tp->cmd_func)(argc-1, -+ (const char**)argv); -+ if (result && ignore_errors && result > KDB_CMD_GO) -+ result = 0; -+ KDB_STATE_CLEAR(CMD); -+ switch (tp->cmd_repeat) { -+ case KDB_REPEAT_NONE: -+ argc = 0; -+ if (argv[0]) -+ *(argv[0]) = '\0'; -+ break; -+ case KDB_REPEAT_NO_ARGS: -+ argc = 1; -+ if (argv[1]) -+ *(argv[1]) = '\0'; -+ break; -+ case KDB_REPEAT_WITH_ARGS: -+ break; -+ } -+ return result; -+ } -+ -+ /* -+ * If the input with which we were presented does not -+ * map to an existing command, attempt to parse it as an -+ * address argument and display the result. Useful for -+ * obtaining the address of a variable, or the nearest symbol -+ * to an address contained in a register. -+ */ -+ { -+ kdb_machreg_t value; -+ char *name = NULL; -+ long offset; -+ int nextarg = 0; -+ -+ if (kdbgetaddrarg(0, (const char **)argv, &nextarg, -+ &value, &offset, &name)) { -+ return KDB_NOTFOUND; -+ } -+ -+ kdb_printf("%s = ", argv[0]); -+ kdb_symbol_print(value, NULL, KDB_SP_DEFAULT); -+ kdb_printf("\n"); -+ return 0; -+ } -+} -+ -+ -+static int -+handle_ctrl_cmd(char *cmd) -+{ -+#define CTRL_P 16 -+#define CTRL_N 14 -+ -+ /* initial situation */ -+ if (cmd_head == cmd_tail) return 0; -+ -+ switch(*cmd) { -+ case CTRL_P: -+ if (cmdptr != cmd_tail) -+ cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT; -+ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN); -+ return 1; -+ case CTRL_N: -+ if (cmdptr != cmd_head) -+ cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT; -+ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN); -+ return 1; -+ } -+ return 0; -+} -+ -+/* -+ * kdb_do_dump -+ * -+ * Call the dump() function if the kernel is configured for LKCD. -+ * Inputs: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. dump() may or may not return. -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static void -+kdb_do_dump(void) -+{ -+#if defined(CONFIG_LKCD_DUMP) || defined(CONFIG_LKCD_DUMP_MODULE) -+ kdb_printf("Forcing dump (if configured)\n"); -+ console_loglevel = 8; /* to see the dump messages */ -+ dump("kdb_do_dump"); -+#endif -+} -+ -+/* -+ * kdb_reboot -+ * -+ * This function implements the 'reboot' command. Reboot the system -+ * immediately. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * Shouldn't return from this function. -+ */ -+ -+static int -+kdb_reboot(int argc, const char **argv) -+{ -+ emergency_restart(); -+ kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n"); -+ while (1) {}; -+ /* NOTREACHED */ -+ return 0; -+} -+ -+#ifdef CONFIG_KDB_KDUMP -+ -+int kdb_kdump_state = KDB_KDUMP_RESET; /* KDB kdump state */ -+ -+static int kdb_cpu(int argc, const char **argv); -+ -+/* -+ * kdb_kdump_check -+ * -+ * This is where the kdump on monarch cpu is handled. -+ * -+ */ -+void kdb_kdump_check(struct pt_regs *regs) -+{ -+ if (kdb_kdump_state != KDB_KDUMP_RESET) { -+ crash_kexec(regs); -+ -+ /* If the call above returned then something -+ didn't work */ -+ kdb_printf("kdb_kdump_check: crash_kexec failed!\n"); -+ kdb_printf(" Please check if the kdump kernel has been properly loaded\n"); -+ kdb_kdump_state = KDB_KDUMP_RESET; -+ } -+} -+ -+ -+/* -+ * kdb_kdump -+ * -+ * This function implements the 'kdump' command. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * envp environment vector -+ * regs registers at time kdb was entered. -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * Shouldn't return from this function. -+ */ -+ -+static int -+kdb_kdump(int argc, const char **argv) -+{ -+ char cpu_id[6]; /* up to 99,999 cpus */ -+ const char *cpu_argv[] = {NULL, cpu_id, NULL}; -+ int ret; -+ -+ kdb_kdump_state = KDB_KDUMP_KDUMP; -+ /* Switch back to the initial cpu before process kdump command */ -+ if (smp_processor_id() != kdb_initial_cpu) { -+ sprintf(cpu_id, "%d", kdb_initial_cpu); -+ ret = kdb_cpu(1, cpu_argv); -+ if (ret != KDB_CMD_CPU) { -+ kdb_printf("kdump: Failed to switch to initial cpu %d;" -+ " aborted\n", kdb_initial_cpu); -+ kdb_kdump_state = KDB_KDUMP_RESET; -+ } -+ } else -+ ret = KDB_CMD_CPU; -+ -+ return ret; -+} -+ -+#endif /* CONFIG_KDB_KDUMP */ -+ -+static int -+kdb_quiet(int reason) -+{ -+ return (reason == KDB_REASON_CPU_UP || reason == KDB_REASON_SILENT); -+} -+ -+/* -+ * kdb_local -+ * -+ * The main code for kdb. This routine is invoked on a specific -+ * processor, it is not global. The main kdb() routine ensures -+ * that only one processor at a time is in this routine. This -+ * code is called with the real reason code on the first entry -+ * to a kdb session, thereafter it is called with reason SWITCH, -+ * even if the user goes back to the original cpu. -+ * -+ * Inputs: -+ * reason The reason KDB was invoked -+ * error The hardware-defined error code -+ * regs The exception frame at time of fault/breakpoint. NULL -+ * for reason SILENT or CPU_UP, otherwise valid. -+ * db_result Result code from the break or debug point. -+ * Returns: -+ * 0 KDB was invoked for an event which it wasn't responsible -+ * 1 KDB handled the event for which it was invoked. -+ * KDB_CMD_GO User typed 'go'. -+ * KDB_CMD_CPU User switched to another cpu. -+ * KDB_CMD_SS Single step. -+ * KDB_CMD_SSB Single step until branch. -+ * Locking: -+ * none -+ * Remarks: -+ * none -+ */ -+ -+static int -+kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_dbtrap_t db_result) -+{ -+ char *cmdbuf; -+ int diag; -+ struct task_struct *kdb_current = kdb_curr_task(smp_processor_id()); -+ -+#ifdef CONFIG_KDB_KDUMP -+ kdb_kdump_check(regs); -+#endif -+ -+ /* If kdb has been entered for an event which has been/will be -+ * recovered then silently return. We have to get this far into kdb in -+ * order to synchronize all the cpus, typically only one cpu (monarch) -+ * knows that the event is recoverable but the other cpus (slaves) may -+ * also be driven into kdb before that decision is made by the monarch. -+ * -+ * To pause in kdb even for recoverable events, 'set RECOVERY_PAUSE 1' -+ */ -+ KDB_DEBUG_STATE("kdb_local 1", reason); -+ if (reason == KDB_REASON_ENTER -+ && KDB_FLAG(RECOVERY) -+ && !KDB_FLAG(CATASTROPHIC)) { -+ int recovery_pause = 0; -+ kdbgetintenv("RECOVERY_PAUSE", &recovery_pause); -+ if (recovery_pause == 0) -+ reason = KDB_REASON_SILENT; -+ else -+ kdb_printf("%s: Recoverable error detected but" -+ " RECOVERY_PAUSE is set, staying in KDB\n", -+ __FUNCTION__); -+ } -+ -+ KDB_DEBUG_STATE("kdb_local 2", reason); -+ kdb_go_count = 0; -+ if (kdb_quiet(reason)) { -+ /* no message */ -+ } else if (reason == KDB_REASON_DEBUG) { -+ /* special case below */ -+ } else { -+ kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", kdb_current, kdb_current->pid); -+#if defined(CONFIG_SMP) -+ kdb_printf("on processor %d ", smp_processor_id()); -+#endif -+ } -+ -+ switch (reason) { -+ case KDB_REASON_DEBUG: -+ { -+ /* -+ * If re-entering kdb after a single step -+ * command, don't print the message. -+ */ -+ switch(db_result) { -+ case KDB_DB_BPT: -+ kdb_printf("\nEntering kdb (0x%p, pid %d) ", kdb_current, kdb_current->pid); -+#if defined(CONFIG_SMP) -+ kdb_printf("on processor %d ", smp_processor_id()); -+#endif -+ kdb_printf("due to Debug @ " kdb_machreg_fmt "\n", kdba_getpc(regs)); -+ break; -+ case KDB_DB_SSB: -+ /* -+ * In the midst of ssb command. Just return. -+ */ -+ KDB_DEBUG_STATE("kdb_local 3", reason); -+ return KDB_CMD_SSB; /* Continue with SSB command */ -+ -+ break; -+ case KDB_DB_SS: -+ break; -+ case KDB_DB_SSBPT: -+ KDB_DEBUG_STATE("kdb_local 4", reason); -+ return 1; /* kdba_db_trap did the work */ -+ default: -+ kdb_printf("kdb: Bad result from kdba_db_trap: %d\n", -+ db_result); -+ break; -+ } -+ -+ } -+ break; -+ case KDB_REASON_ENTER: -+ if (KDB_STATE(KEYBOARD)) -+ kdb_printf("due to Keyboard Entry\n"); -+ else { -+ kdb_printf("due to KDB_ENTER()\n"); -+ } -+ break; -+ case KDB_REASON_KEYBOARD: -+ KDB_STATE_SET(KEYBOARD); -+ kdb_printf("due to Keyboard Entry\n"); -+ break; -+ case KDB_REASON_ENTER_SLAVE: /* drop through, slaves only get released via cpu switch */ -+ case KDB_REASON_SWITCH: -+ kdb_printf("due to cpu switch\n"); -+ if (KDB_STATE(GO_SWITCH)) { -+ KDB_STATE_CLEAR(GO_SWITCH); -+ KDB_DEBUG_STATE("kdb_local 5", reason); -+ return KDB_CMD_GO; -+ } -+ break; -+ case KDB_REASON_OOPS: -+ kdb_printf("Oops: %s\n", kdb_diemsg); -+ kdb_printf("due to oops @ " kdb_machreg_fmt "\n", kdba_getpc(regs)); -+ kdba_dumpregs(regs, NULL, NULL); -+ break; -+ case KDB_REASON_NMI: -+ kdb_printf("due to NonMaskable Interrupt @ " kdb_machreg_fmt "\n", -+ kdba_getpc(regs)); -+ kdba_dumpregs(regs, NULL, NULL); -+ break; -+ case KDB_REASON_BREAK: -+ kdb_printf("due to Breakpoint @ " kdb_machreg_fmt "\n", kdba_getpc(regs)); -+ /* -+ * Determine if this breakpoint is one that we -+ * are interested in. -+ */ -+ if (db_result != KDB_DB_BPT) { -+ kdb_printf("kdb: error return from kdba_bp_trap: %d\n", db_result); -+ KDB_DEBUG_STATE("kdb_local 6", reason); -+ return 0; /* Not for us, dismiss it */ -+ } -+ break; -+ case KDB_REASON_RECURSE: -+ kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n", kdba_getpc(regs)); -+ break; -+ case KDB_REASON_CPU_UP: -+ case KDB_REASON_SILENT: -+ KDB_DEBUG_STATE("kdb_local 7", reason); -+ if (reason == KDB_REASON_CPU_UP) -+ kdba_cpu_up(); -+ return KDB_CMD_GO; /* Silent entry, silent exit */ -+ break; -+ default: -+ kdb_printf("kdb: unexpected reason code: %d\n", reason); -+ KDB_DEBUG_STATE("kdb_local 8", reason); -+ return 0; /* Not for us, dismiss it */ -+ } -+ -+ kdba_local_arch_setup(); -+ -+ kdba_set_current_task(kdb_current); -+ -+ while (1) { -+ /* -+ * Initialize pager context. -+ */ -+ kdb_nextline = 1; -+ KDB_STATE_CLEAR(SUPPRESS); -+#ifdef kdba_setjmp -+ /* -+ * Use kdba_setjmp/kdba_longjmp to break out of -+ * the pager early and to attempt to recover from kdb errors. -+ */ -+ KDB_STATE_CLEAR(LONGJMP); -+ if (kdbjmpbuf) { -+ if (kdba_setjmp(&kdbjmpbuf[smp_processor_id()])) { -+ /* Command aborted (usually in pager) */ -+ continue; -+ } -+ else -+ KDB_STATE_SET(LONGJMP); -+ } -+#endif /* kdba_setjmp */ -+ -+ cmdbuf = cmd_cur; -+ *cmdbuf = '\0'; -+ *(cmd_hist[cmd_head])='\0'; -+ -+ if (KDB_FLAG(ONLY_DO_DUMP)) { -+ /* kdb is off but a catastrophic error requires a dump. -+ * Take the dump and reboot. -+ * Turn on logging so the kdb output appears in the log -+ * buffer in the dump. -+ */ -+ const char *setargs[] = { "set", "LOGGING", "1" }; -+ kdb_set(2, setargs); -+ kdb_do_dump(); -+ kdb_reboot(0, NULL); -+ /*NOTREACHED*/ -+ } -+ -+do_full_getstr: -+#if defined(CONFIG_SMP) -+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"), smp_processor_id()); -+#else -+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT")); -+#endif -+ if (defcmd_in_progress) -+ strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN); -+ -+ /* -+ * Fetch command from keyboard -+ */ -+ cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str); -+ if (*cmdbuf != '\n') { -+ if (*cmdbuf < 32) { -+ if(cmdptr == cmd_head) { -+ strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN); -+ *(cmd_hist[cmd_head]+strlen(cmd_hist[cmd_head])-1) = '\0'; -+ } -+ if(!handle_ctrl_cmd(cmdbuf)) -+ *(cmd_cur+strlen(cmd_cur)-1) = '\0'; -+ cmdbuf = cmd_cur; -+ goto do_full_getstr; -+ } -+ else -+ strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN); -+ -+ cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT; -+ if (cmd_head == cmd_tail) cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT; -+ -+ } -+ -+ cmdptr = cmd_head; -+ diag = kdb_parse(cmdbuf); -+ if (diag == KDB_NOTFOUND) { -+ kdb_printf("Unknown kdb command: '%s'\n", cmdbuf); -+ diag = 0; -+ } -+ if (diag == KDB_CMD_GO -+ || diag == KDB_CMD_CPU -+ || diag == KDB_CMD_SS -+ || diag == KDB_CMD_SSB) -+ break; -+ -+ if (diag) -+ kdb_cmderror(diag); -+ } -+ -+ kdba_local_arch_cleanup(); -+ -+ KDB_DEBUG_STATE("kdb_local 9", diag); -+ return diag; -+} -+ -+ -+/* -+ * kdb_print_state -+ * -+ * Print the state data for the current processor for debugging. -+ * -+ * Inputs: -+ * text Identifies the debug point -+ * value Any integer value to be printed, e.g. reason code. -+ * Returns: -+ * None. -+ * Locking: -+ * none -+ * Remarks: -+ * none -+ */ -+ -+void kdb_print_state(const char *text, int value) -+{ -+ kdb_printf("state: %s cpu %d value %d initial %d state %x\n", -+ text, smp_processor_id(), value, kdb_initial_cpu, kdb_state[smp_processor_id()]); -+} -+ -+/* -+ * kdb_previous_event -+ * -+ * Return a count of cpus that are leaving kdb, i.e. the number -+ * of processors that are still handling the previous kdb event. -+ * -+ * Inputs: -+ * None. -+ * Returns: -+ * Count of cpus in previous event. -+ * Locking: -+ * none -+ * Remarks: -+ * none -+ */ -+ -+static int -+kdb_previous_event(void) -+{ -+ int i, leaving = 0; -+ for (i = 0; i < NR_CPUS; ++i) { -+ if (KDB_STATE_CPU(LEAVING, i)) -+ ++leaving; -+ } -+ return leaving; -+} -+ -+/* -+ * kdb_wait_for_cpus -+ * -+ * Invoked once at the start of a kdb event, from the controlling cpu. Wait a -+ * short period for the other cpus to enter kdb state. -+ * -+ * Inputs: -+ * none -+ * Returns: -+ * none -+ * Locking: -+ * none -+ * Remarks: -+ * none -+ */ -+ -+int kdb_wait_for_cpus_secs; -+ -+static void -+kdb_wait_for_cpus(void) -+{ -+#ifdef CONFIG_SMP -+ int online = 0, kdb_data = 0, prev_kdb_data = 0, c, time; -+ mdelay(100); -+ for (time = 0; time < kdb_wait_for_cpus_secs; ++time) { -+ online = 0; -+ kdb_data = 0; -+ for_each_online_cpu(c) { -+ ++online; -+ if (kdb_running_process[c].seqno >= kdb_seqno - 1) -+ ++kdb_data; -+ } -+ if (online == kdb_data) -+ break; -+ if (prev_kdb_data != kdb_data) { -+ kdb_nextline = 0; /* no prompt yet */ -+ kdb_printf(" %d out of %d cpus in kdb, waiting for the rest, timeout in %d second(s)\n", -+ kdb_data, online, kdb_wait_for_cpus_secs - time); -+ prev_kdb_data = kdb_data; -+ } -+ touch_nmi_watchdog(); -+ mdelay(1000); -+ /* Architectures may want to send a more forceful interrupt */ -+ if (time == min(kdb_wait_for_cpus_secs / 2, 5)) -+ kdba_wait_for_cpus(); -+ if (time % 4 == 0) -+ kdb_printf("."); -+ } -+ if (time) { -+ int wait = online - kdb_data; -+ if (wait == 0) -+ kdb_printf("All cpus are now in kdb\n"); -+ else -+ kdb_printf("%d cpu%s not in kdb, %s state is unknown\n", -+ wait, -+ wait == 1 ? " is" : "s are", -+ wait == 1 ? "its" : "their"); -+ } -+ /* give back the vector we took over in smp_kdb_stop */ -+ kdba_giveback_vector(KDB_VECTOR); -+#endif /* CONFIG_SMP */ -+} -+ -+/* -+ * kdb_main_loop -+ * -+ * The main kdb loop. After initial setup and assignment of the controlling -+ * cpu, all cpus are in this loop. One cpu is in control and will issue the kdb -+ * prompt, the others will spin until 'go' or cpu switch. -+ * -+ * To get a consistent view of the kernel stacks for all processes, this routine -+ * is invoked from the main kdb code via an architecture specific routine. -+ * kdba_main_loop is responsible for making the kernel stacks consistent for all -+ * processes, there should be no difference between a blocked process and a -+ * running process as far as kdb is concerned. -+ * -+ * Inputs: -+ * reason The reason KDB was invoked -+ * error The hardware-defined error code -+ * reason2 kdb's current reason code. Initially error but can change -+ * acording to kdb state. -+ * db_result Result code from break or debug point. -+ * regs The exception frame at time of fault/breakpoint. If reason -+ * is SILENT or CPU_UP then regs is NULL, otherwise it -+ * should always be valid. -+ * Returns: -+ * 0 KDB was invoked for an event which it wasn't responsible -+ * 1 KDB handled the event for which it was invoked. -+ * Locking: -+ * none -+ * Remarks: -+ * none -+ */ -+ -+int -+kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error, -+ kdb_dbtrap_t db_result, struct pt_regs *regs) -+{ -+ int result = 1; -+ /* Stay in kdb() until 'go', 'ss[b]' or an error */ -+ while (1) { -+ /* -+ * All processors except the one that is in control -+ * will spin here. -+ */ -+ KDB_DEBUG_STATE("kdb_main_loop 1", reason); -+ while (KDB_STATE(HOLD_CPU)) { -+ /* state KDB is turned off by kdb_cpu to see if the -+ * other cpus are still live, each cpu in this loop -+ * turns it back on. -+ */ -+ if (!KDB_STATE(KDB)) { -+ KDB_STATE_SET(KDB); -+ } -+ -+#if defined(CONFIG_KDB_KDUMP) -+ if (KDB_STATE(KEXEC)) { -+ struct pt_regs r; -+ if (regs == NULL) -+ regs = &r; -+ -+ kdba_kdump_shutdown_slave(regs); -+ return 0; -+ } -+#endif -+ } -+ -+ KDB_STATE_CLEAR(SUPPRESS); -+ KDB_DEBUG_STATE("kdb_main_loop 2", reason); -+ if (KDB_STATE(LEAVING)) -+ break; /* Another cpu said 'go' */ -+ -+ if (!kdb_quiet(reason)) -+ kdb_wait_for_cpus(); -+ /* Still using kdb, this processor is in control */ -+ result = kdb_local(reason2, error, regs, db_result); -+ KDB_DEBUG_STATE("kdb_main_loop 3", result); -+ -+ if (result == KDB_CMD_CPU) { -+ /* Cpu switch, hold the current cpu, release the target one. */ -+ reason2 = KDB_REASON_SWITCH; -+ KDB_STATE_SET(HOLD_CPU); -+ KDB_STATE_CLEAR_CPU(HOLD_CPU, kdb_new_cpu); -+ continue; -+ } -+ -+ if (result == KDB_CMD_SS) { -+ KDB_STATE_SET(DOING_SS); -+ break; -+ } -+ -+ if (result == KDB_CMD_SSB) { -+ KDB_STATE_SET(DOING_SS); -+ KDB_STATE_SET(DOING_SSB); -+ break; -+ } -+ -+ if (result && result != 1 && result != KDB_CMD_GO) -+ kdb_printf("\nUnexpected kdb_local return code %d\n", result); -+ -+ KDB_DEBUG_STATE("kdb_main_loop 4", reason); -+ break; -+ } -+ if (KDB_STATE(DOING_SS)) -+ KDB_STATE_CLEAR(SSBPT); -+ return result; -+} -+ -+/* iapc_boot_arch was defined in ACPI 2.0, FADT revision 3 onwards. For any -+ * FADT prior to revision 3, we have to assume that we have an i8042 I/O -+ * device. ACPI initialises after KDB initialises but before using KDB, so -+ * check iapc_boot_arch on each entry to KDB. -+ */ -+static void -+kdb_check_i8042(void) -+{ -+ KDB_FLAG_CLEAR(NO_I8042); -+#ifdef CONFIG_ACPI -+ if (acpi_gbl_FADT.header.revision >= 3 && -+ (acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) == 0) -+ KDB_FLAG_SET(NO_I8042); -+#endif /* CONFIG_ACPI */ -+} -+ -+/* -+ * kdb -+ * -+ * This function is the entry point for the kernel debugger. It -+ * provides a command parser and associated support functions to -+ * allow examination and control of an active kernel. -+ * -+ * The breakpoint trap code should invoke this function with -+ * one of KDB_REASON_BREAK (int 03) or KDB_REASON_DEBUG (debug register) -+ * -+ * the die_if_kernel function should invoke this function with -+ * KDB_REASON_OOPS. -+ * -+ * In single step mode, one cpu is released to run without -+ * breakpoints. Interrupts and NMI are reset to their original values, -+ * the cpu is allowed to do one instruction which causes a trap -+ * into kdb with KDB_REASON_DEBUG. -+ * -+ * Inputs: -+ * reason The reason KDB was invoked -+ * error The hardware-defined error code -+ * regs The exception frame at time of fault/breakpoint. If reason -+ * is SILENT or CPU_UP then regs is NULL, otherwise it -+ * should always be valid. -+ * Returns: -+ * 0 KDB was invoked for an event which it wasn't responsible -+ * 1 KDB handled the event for which it was invoked. -+ * Locking: -+ * none -+ * Remarks: -+ * No assumptions of system state. This function may be invoked -+ * with arbitrary locks held. It will stop all other processors -+ * in an SMP environment, disable all interrupts and does not use -+ * the operating systems keyboard driver. -+ * -+ * This code is reentrant but only for cpu switch. Any other -+ * reentrancy is an error, although kdb will attempt to recover. -+ * -+ * At the start of a kdb session the initial processor is running -+ * kdb() and the other processors can be doing anything. When the -+ * initial processor calls smp_kdb_stop() the other processors are -+ * driven through kdb_ipi which calls kdb() with reason SWITCH. -+ * That brings all processors into this routine, one with a "real" -+ * reason code, the other with SWITCH. -+ * -+ * Because the other processors are driven via smp_kdb_stop(), -+ * they enter here from the NMI handler. Until the other -+ * processors exit from here and exit from kdb_ipi, they will not -+ * take any more NMI requests. The initial cpu will still take NMI. -+ * -+ * Multiple race and reentrancy conditions, each with different -+ * advoidance mechanisms. -+ * -+ * Two cpus hit debug points at the same time. -+ * -+ * kdb_lock and kdb_initial_cpu ensure that only one cpu gets -+ * control of kdb. The others spin on kdb_initial_cpu until -+ * they are driven through NMI into kdb_ipi. When the initial -+ * cpu releases the others from NMI, they resume trying to get -+ * kdb_initial_cpu to start a new event. -+ * -+ * A cpu is released from kdb and starts a new event before the -+ * original event has completely ended. -+ * -+ * kdb_previous_event() prevents any cpu from entering -+ * kdb_initial_cpu state until the previous event has completely -+ * ended on all cpus. -+ * -+ * An exception occurs inside kdb. -+ * -+ * kdb_initial_cpu detects recursive entry to kdb and attempts -+ * to recover. The recovery uses longjmp() which means that -+ * recursive calls to kdb never return. Beware of assumptions -+ * like -+ * -+ * ++depth; -+ * kdb(); -+ * --depth; -+ * -+ * If the kdb call is recursive then longjmp takes over and -+ * --depth is never executed. -+ * -+ * NMI handling. -+ * -+ * NMI handling is tricky. The initial cpu is invoked by some kdb event, -+ * this event could be NMI driven but usually is not. The other cpus are -+ * driven into kdb() via kdb_ipi which uses NMI so at the start the other -+ * cpus will not accept NMI. Some operations such as SS release one cpu -+ * but hold all the others. Releasing a cpu means it drops back to -+ * whatever it was doing before the kdb event, this means it drops out of -+ * kdb_ipi and hence out of NMI status. But the software watchdog uses -+ * NMI and we do not want spurious watchdog calls into kdb. kdba_read() -+ * resets the watchdog counters in its input polling loop, when a kdb -+ * command is running it is subject to NMI watchdog events. -+ * -+ * Another problem with NMI handling is the NMI used to drive the other -+ * cpus into kdb cannot be distinguished from the watchdog NMI. State -+ * flag WAIT_IPI indicates that a cpu is waiting for NMI via kdb_ipi, -+ * if not set then software NMI is ignored by kdb_ipi. -+ * -+ * Cpu switching. -+ * -+ * All cpus are in kdb (or they should be), all but one are -+ * spinning on KDB_STATE(HOLD_CPU). Only one cpu is not in -+ * HOLD_CPU state, only that cpu can handle commands. -+ * -+ * Go command entered. -+ * -+ * If necessary, go will switch to the initial cpu first. If the event -+ * was caused by a software breakpoint (assumed to be global) that -+ * requires single-step to get over the breakpoint then only release the -+ * initial cpu, after the initial cpu has single-stepped the breakpoint -+ * then release the rest of the cpus. If SSBPT is not required then -+ * release all the cpus at once. -+ */ -+ -+int -+kdb(kdb_reason_t reason, int error, struct pt_regs *regs) -+{ -+ kdb_intstate_t int_state; /* Interrupt state */ -+ kdb_reason_t reason2 = reason; -+ int result = 0; /* Default is kdb did not handle it */ -+ int ss_event, old_regs_saved = 0; -+ struct pt_regs *old_regs = NULL; -+ kdb_dbtrap_t db_result=KDB_DB_NOBPT; -+ preempt_disable(); -+ atomic_inc(&kdb_event); -+ -+ switch(reason) { -+ case KDB_REASON_OOPS: -+ case KDB_REASON_NMI: -+ KDB_FLAG_SET(CATASTROPHIC); /* kernel state is dubious now */ -+ break; -+ default: -+ break; -+ } -+ switch(reason) { -+ case KDB_REASON_ENTER: -+ case KDB_REASON_ENTER_SLAVE: -+ case KDB_REASON_BREAK: -+ case KDB_REASON_DEBUG: -+ case KDB_REASON_OOPS: -+ case KDB_REASON_SWITCH: -+ case KDB_REASON_KEYBOARD: -+ case KDB_REASON_NMI: -+ if (regs && regs != get_irq_regs()) { -+ old_regs = set_irq_regs(regs); -+ old_regs_saved = 1; -+ } -+ break; -+ default: -+ break; -+ } -+ if (kdb_continue_catastrophic > 2) { -+ kdb_printf("kdb_continue_catastrophic is out of range, setting to 2\n"); -+ kdb_continue_catastrophic = 2; -+ } -+ if (!kdb_on && KDB_FLAG(CATASTROPHIC) && kdb_continue_catastrophic == 2) { -+ KDB_FLAG_SET(ONLY_DO_DUMP); -+ } -+ if (!kdb_on && !KDB_FLAG(ONLY_DO_DUMP)) -+ goto out; -+ -+ KDB_DEBUG_STATE("kdb 1", reason); -+ KDB_STATE_CLEAR(SUPPRESS); -+ -+ /* Filter out userspace breakpoints first, no point in doing all -+ * the kdb smp fiddling when it is really a gdb trap. -+ * Save the single step status first, kdba_db_trap clears ss status. -+ * kdba_b[dp]_trap sets SSBPT if required. -+ */ -+ ss_event = KDB_STATE(DOING_SS) || KDB_STATE(SSBPT); -+#ifdef CONFIG_CPU_XSCALE -+ if ( KDB_STATE(A_XSC_ICH) ) { -+ /* restore changed I_BIT */ -+ KDB_STATE_CLEAR(A_XSC_ICH); -+ kdba_restore_retirq(regs, KDB_STATE(A_XSC_IRQ)); -+ if ( !ss_event ) { -+ kdb_printf("Stranger!!! Why IRQ bit is changed====\n"); -+ } -+ } -+#endif -+ if (reason == KDB_REASON_BREAK) { -+ db_result = kdba_bp_trap(regs, error); /* Only call this once */ -+ } -+ if (reason == KDB_REASON_DEBUG) { -+ db_result = kdba_db_trap(regs, error); /* Only call this once */ -+ } -+ -+ if ((reason == KDB_REASON_BREAK || reason == KDB_REASON_DEBUG) -+ && db_result == KDB_DB_NOBPT) { -+ KDB_DEBUG_STATE("kdb 2", reason); -+ goto out; /* Not one of mine */ -+ } -+ -+ /* Turn off single step if it was being used */ -+ if (ss_event) { -+ kdba_clearsinglestep(regs); -+ /* Single step after a breakpoint removes the need for a delayed reinstall */ -+ if (reason == KDB_REASON_BREAK || reason == KDB_REASON_DEBUG) -+ KDB_STATE_CLEAR(SSBPT); -+ } -+ -+ /* kdb can validly reenter but only for certain well defined conditions */ -+ if (reason == KDB_REASON_DEBUG -+ && !KDB_STATE(HOLD_CPU) -+ && ss_event) -+ KDB_STATE_SET(REENTRY); -+ else -+ KDB_STATE_CLEAR(REENTRY); -+ -+ /* Wait for previous kdb event to completely exit before starting -+ * a new event. -+ */ -+ while (kdb_previous_event()) -+ ; -+ KDB_DEBUG_STATE("kdb 3", reason); -+ -+ /* -+ * If kdb is already active, print a message and try to recover. -+ * If recovery is not possible and recursion is allowed or -+ * forced recursion without recovery is set then try to recurse -+ * in kdb. Not guaranteed to work but it makes an attempt at -+ * debugging the debugger. -+ */ -+ if (reason != KDB_REASON_SWITCH && -+ reason != KDB_REASON_ENTER_SLAVE) { -+ if (KDB_IS_RUNNING() && !KDB_STATE(REENTRY)) { -+ int recover = 1; -+ unsigned long recurse = 0; -+ kdb_printf("kdb: Debugger re-entered on cpu %d, new reason = %d\n", -+ smp_processor_id(), reason); -+ /* Should only re-enter from released cpu */ -+ -+ if (KDB_STATE(HOLD_CPU)) { -+ kdb_printf(" Strange, cpu %d should not be running\n", smp_processor_id()); -+ recover = 0; -+ } -+ if (!KDB_STATE(CMD)) { -+ kdb_printf(" Not executing a kdb command\n"); -+ recover = 0; -+ } -+ if (!KDB_STATE(LONGJMP)) { -+ kdb_printf(" No longjmp available for recovery\n"); -+ recover = 0; -+ } -+ kdbgetulenv("RECURSE", &recurse); -+ if (recurse > 1) { -+ kdb_printf(" Forced recursion is set\n"); -+ recover = 0; -+ } -+ if (recover) { -+ kdb_printf(" Attempting to abort command and recover\n"); -+#ifdef kdba_setjmp -+ kdba_longjmp(&kdbjmpbuf[smp_processor_id()], 0); -+#endif /* kdba_setjmp */ -+ } -+ if (recurse) { -+ if (KDB_STATE(RECURSE)) { -+ kdb_printf(" Already in recursive mode\n"); -+ } else { -+ kdb_printf(" Attempting recursive mode\n"); -+ KDB_STATE_SET(RECURSE); -+ KDB_STATE_SET(REENTRY); -+ reason2 = KDB_REASON_RECURSE; -+ recover = 1; -+ } -+ } -+ if (!recover) { -+ kdb_printf(" Cannot recover, allowing event to proceed\n"); -+ /*temp*/ -+ while (KDB_IS_RUNNING()) -+ cpu_relax(); -+ goto out; -+ } -+ } -+ } else if (reason == KDB_REASON_SWITCH && !KDB_IS_RUNNING()) { -+ kdb_printf("kdb: CPU switch without kdb running, I'm confused\n"); -+ goto out; -+ } -+ -+ /* -+ * Disable interrupts, breakpoints etc. on this processor -+ * during kdb command processing -+ */ -+ KDB_STATE_SET(KDB); -+ kdba_disableint(&int_state); -+ if (!KDB_STATE(KDB_CONTROL)) { -+ kdb_bp_remove_local(); -+ KDB_STATE_SET(KDB_CONTROL); -+ } -+ -+ /* -+ * If not entering the debugger due to CPU switch or single step -+ * reentry, serialize access here. -+ * The processors may race getting to this point - if, -+ * for example, more than one processor hits a breakpoint -+ * at the same time. We'll serialize access to kdb here - -+ * other processors will loop here, and the NMI from the stop -+ * IPI will take them into kdb as switch candidates. Once -+ * the initial processor releases the debugger, the rest of -+ * the processors will race for it. -+ * -+ * The above describes the normal state of affairs, where two or more -+ * cpus that are entering kdb at the "same" time are assumed to be for -+ * separate events. However some processes such as ia64 MCA/INIT will -+ * drive all the cpus into error processing at the same time. For that -+ * case, all of the cpus entering kdb at the "same" time are really a -+ * single event. -+ * -+ * That case is handled by the use of KDB_ENTER by one cpu (the -+ * monarch) and KDB_ENTER_SLAVE on the other cpus (the slaves). -+ * KDB_ENTER_SLAVE maps to KDB_REASON_ENTER_SLAVE. The slave events -+ * will be treated as if they had just responded to the kdb IPI, i.e. -+ * as if they were KDB_REASON_SWITCH. -+ * -+ * Because of races across multiple cpus, ENTER_SLAVE can occur before -+ * the main ENTER. Hold up ENTER_SLAVE here until the main ENTER -+ * arrives. -+ */ -+ -+ if (reason == KDB_REASON_ENTER_SLAVE) { -+ spin_lock(&kdb_lock); -+ while (!KDB_IS_RUNNING()) { -+ spin_unlock(&kdb_lock); -+ while (!KDB_IS_RUNNING()) -+ cpu_relax(); -+ spin_lock(&kdb_lock); -+ } -+ reason = KDB_REASON_SWITCH; -+ KDB_STATE_SET(HOLD_CPU); -+ spin_unlock(&kdb_lock); -+ } -+ -+ if (reason == KDB_REASON_SWITCH || KDB_STATE(REENTRY)) -+ ; /* drop through */ -+ else { -+ KDB_DEBUG_STATE("kdb 4", reason); -+ spin_lock(&kdb_lock); -+ while (KDB_IS_RUNNING() || kdb_previous_event()) { -+ spin_unlock(&kdb_lock); -+ while (KDB_IS_RUNNING() || kdb_previous_event()) -+ cpu_relax(); -+ spin_lock(&kdb_lock); -+ } -+ KDB_DEBUG_STATE("kdb 5", reason); -+ -+ kdb_initial_cpu = smp_processor_id(); -+ ++kdb_seqno; -+ spin_unlock(&kdb_lock); -+ if (!kdb_quiet(reason)) -+ notify_die(DIE_KDEBUG_ENTER, "KDEBUG ENTER", regs, error, 0, 0); -+ } -+ -+ if (smp_processor_id() == kdb_initial_cpu -+ && !KDB_STATE(REENTRY)) { -+ KDB_STATE_CLEAR(HOLD_CPU); -+ KDB_STATE_CLEAR(WAIT_IPI); -+ kdb_check_i8042(); -+ /* -+ * Remove the global breakpoints. This is only done -+ * once from the initial processor on initial entry. -+ */ -+ if (!kdb_quiet(reason) || smp_processor_id() == 0) -+ kdb_bp_remove_global(); -+ -+ /* -+ * If SMP, stop other processors. The other processors -+ * will enter kdb() with KDB_REASON_SWITCH and spin in -+ * kdb_main_loop(). -+ */ -+ KDB_DEBUG_STATE("kdb 6", reason); -+ if (NR_CPUS > 1 && !kdb_quiet(reason)) { -+ int i; -+ for (i = 0; i < NR_CPUS; ++i) { -+ if (!cpu_online(i)) -+ continue; -+ if (i != kdb_initial_cpu) { -+ KDB_STATE_SET_CPU(HOLD_CPU, i); -+ KDB_STATE_SET_CPU(WAIT_IPI, i); -+ } -+ } -+ KDB_DEBUG_STATE("kdb 7", reason); -+ smp_kdb_stop(); -+ KDB_DEBUG_STATE("kdb 8", reason); -+ } -+ } -+ -+ if (KDB_STATE(GO1)) { -+ kdb_bp_remove_global(); /* They were set for single-step purposes */ -+ KDB_STATE_CLEAR(GO1); -+ reason = KDB_REASON_SILENT; /* Now silently go */ -+ } -+ -+ /* Set up a consistent set of process stacks before talking to the user */ -+ KDB_DEBUG_STATE("kdb 9", result); -+ result = kdba_main_loop(reason, reason2, error, db_result, regs); -+ reason = reason2; /* back to original event type */ -+ -+ KDB_DEBUG_STATE("kdb 10", result); -+ kdba_adjust_ip(reason, error, regs); -+ KDB_STATE_CLEAR(LONGJMP); -+ KDB_DEBUG_STATE("kdb 11", result); -+ /* go which requires single-step over a breakpoint must only release -+ * one cpu. -+ */ -+ if (result == KDB_CMD_GO && KDB_STATE(SSBPT)) -+ KDB_STATE_SET(GO1); -+ -+ if (smp_processor_id() == kdb_initial_cpu && -+ !KDB_STATE(DOING_SS) && -+ !KDB_STATE(RECURSE)) { -+ /* -+ * (Re)install the global breakpoints and cleanup the cached -+ * symbol table. This is only done once from the initial -+ * processor on go. -+ */ -+ KDB_DEBUG_STATE("kdb 12", reason); -+ if (!kdb_quiet(reason) || smp_processor_id() == 0) { -+ kdb_bp_install_global(regs); -+ kdbnearsym_cleanup(); -+ debug_kusage(); -+ } -+ if (!KDB_STATE(GO1)) { -+ /* -+ * Release all other cpus which will see KDB_STATE(LEAVING) is set. -+ */ -+ int i; -+ for (i = 0; i < NR_CPUS; ++i) { -+ if (KDB_STATE_CPU(KDB, i)) -+ KDB_STATE_SET_CPU(LEAVING, i); -+ KDB_STATE_CLEAR_CPU(WAIT_IPI, i); -+ KDB_STATE_CLEAR_CPU(HOLD_CPU, i); -+ } -+ /* Wait until all the other processors leave kdb */ -+ while (kdb_previous_event() != 1) -+ ; -+ if (!kdb_quiet(reason)) -+ notify_die(DIE_KDEBUG_LEAVE, "KDEBUG LEAVE", regs, error, 0, 0); -+ kdb_initial_cpu = -1; /* release kdb control */ -+ KDB_DEBUG_STATE("kdb 13", reason); -+ } -+ } -+ -+ KDB_DEBUG_STATE("kdb 14", result); -+ kdba_restoreint(&int_state); -+#ifdef CONFIG_CPU_XSCALE -+ if ( smp_processor_id() == kdb_initial_cpu && -+ ( KDB_STATE(SSBPT) | KDB_STATE(DOING_SS) ) -+ ) { -+ kdba_setsinglestep(regs); -+ // disable IRQ in stack frame -+ KDB_STATE_SET(A_XSC_ICH); -+ if ( kdba_disable_retirq(regs) ) { -+ KDB_STATE_SET(A_XSC_IRQ); -+ } -+ else { -+ KDB_STATE_CLEAR(A_XSC_IRQ); -+ } -+ } -+#endif -+ -+ /* Only do this work if we are really leaving kdb */ -+ if (!(KDB_STATE(DOING_SS) || KDB_STATE(SSBPT) || KDB_STATE(RECURSE))) { -+ KDB_DEBUG_STATE("kdb 15", result); -+ kdb_bp_install_local(regs); -+ if (old_regs_saved) -+ set_irq_regs(old_regs); -+ KDB_STATE_CLEAR(KDB_CONTROL); -+ } -+ -+ KDB_DEBUG_STATE("kdb 16", result); -+ KDB_FLAG_CLEAR(CATASTROPHIC); -+ KDB_STATE_CLEAR(IP_ADJUSTED); /* Re-adjust ip next time in */ -+ KDB_STATE_CLEAR(KEYBOARD); -+ KDB_STATE_CLEAR(KDB); /* Main kdb state has been cleared */ -+ KDB_STATE_CLEAR(RECURSE); -+ KDB_STATE_CLEAR(LEAVING); /* No more kdb work after this */ -+ KDB_DEBUG_STATE("kdb 17", reason); -+out: -+ atomic_dec(&kdb_event); -+ preempt_enable(); -+ return result != 0; -+} -+ -+/* -+ * kdb_mdr -+ * -+ * This function implements the guts of the 'mdr' command. -+ * -+ * mdr , -+ * -+ * Inputs: -+ * addr Start address -+ * count Number of bytes -+ * Outputs: -+ * None. -+ * Returns: -+ * Always 0. Any errors are detected and printed by kdb_getarea. -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_mdr(kdb_machreg_t addr, unsigned int count) -+{ -+ unsigned char c; -+ while (count--) { -+ if (kdb_getarea(c, addr)) -+ return 0; -+ kdb_printf("%02x", c); -+ addr++; -+ } -+ kdb_printf("\n"); -+ return 0; -+} -+ -+/* -+ * kdb_md -+ * -+ * This function implements the 'md', 'md1', 'md2', 'md4', 'md8' -+ * 'mdr' and 'mds' commands. -+ * -+ * md|mds [ [ []]] -+ * mdWcN [ [ []]] -+ * where W = is the width (1, 2, 4 or 8) and N is the count. -+ * for eg., md1c20 reads 20 bytes, 1 at a time. -+ * mdr , -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static void -+kdb_md_line(const char *fmtstr, kdb_machreg_t addr, -+ int symbolic, int nosect, int bytesperword, -+ int num, int repeat, int phys) -+{ -+ /* print just one line of data */ -+ kdb_symtab_t symtab; -+ char cbuf[32]; -+ char *c = cbuf; -+ int i; -+ unsigned long word; -+ -+ memset(cbuf, '\0', sizeof(cbuf)); -+ if (phys) -+ kdb_printf("phys " kdb_machreg_fmt0 " ", addr); -+ else -+ kdb_printf(kdb_machreg_fmt0 " ", addr); -+ -+ for (i = 0; i < num && repeat--; i++) { -+ if (phys) { -+ if (kdb_getphysword(&word, addr, bytesperword)) -+ break; -+ } else if (kdb_getword(&word, addr, bytesperword)) -+ break; -+ kdb_printf(fmtstr, word); -+ if (symbolic) -+ kdbnearsym(word, &symtab); -+ else -+ memset(&symtab, 0, sizeof(symtab)); -+ if (symtab.sym_name) { -+ kdb_symbol_print(word, &symtab, 0); -+ if (!nosect) { -+ kdb_printf("\n"); -+ kdb_printf(" %s %s " -+ kdb_machreg_fmt " " kdb_machreg_fmt " " kdb_machreg_fmt, -+ symtab.mod_name, -+ symtab.sec_name, -+ symtab.sec_start, -+ symtab.sym_start, -+ symtab.sym_end); -+ } -+ addr += bytesperword; -+ } else { -+ union { -+ u64 word; -+ unsigned char c[8]; -+ } wc; -+ unsigned char *cp; -+#ifdef __BIG_ENDIAN -+ cp = wc.c + 8 - bytesperword; -+#else -+ cp = wc.c; -+#endif -+ wc.word = word; -+#define printable_char(c) ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.';}) -+ switch (bytesperword) { -+ case 8: -+ *c++ = printable_char(*cp++); -+ *c++ = printable_char(*cp++); -+ *c++ = printable_char(*cp++); -+ *c++ = printable_char(*cp++); -+ addr += 4; -+ case 4: -+ *c++ = printable_char(*cp++); -+ *c++ = printable_char(*cp++); -+ addr += 2; -+ case 2: -+ *c++ = printable_char(*cp++); -+ addr++; -+ case 1: -+ *c++ = printable_char(*cp++); -+ addr++; -+ break; -+ } -+#undef printable_char -+ } -+ } -+ kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1), " ", cbuf); -+} -+ -+static int -+kdb_md(int argc, const char **argv) -+{ -+ static kdb_machreg_t last_addr; -+ static int last_radix, last_bytesperword, last_repeat; -+ int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat; -+ int nosect = 0; -+ char fmtchar, fmtstr[64]; -+ kdb_machreg_t addr; -+ unsigned long word; -+ long offset = 0; -+ int symbolic = 0; -+ int valid = 0; -+ int phys = 0; -+ -+ kdbgetintenv("MDCOUNT", &mdcount); -+ kdbgetintenv("RADIX", &radix); -+ kdbgetintenv("BYTESPERWORD", &bytesperword); -+ -+ /* Assume 'md ' and start with environment values */ -+ repeat = mdcount * 16 / bytesperword; -+ -+ if (strcmp(argv[0], "mdr") == 0) { -+ if (argc != 2) -+ return KDB_ARGCOUNT; -+ valid = 1; -+ } else if (isdigit(argv[0][2])) { -+ bytesperword = (int)(argv[0][2] - '0'); -+ if (bytesperword == 0) { -+ bytesperword = last_bytesperword; -+ if (bytesperword == 0) { -+ bytesperword = 4; -+ } -+ } -+ last_bytesperword = bytesperword; -+ repeat = mdcount * 16 / bytesperword; -+ if (!argv[0][3]) -+ valid = 1; -+ else if (argv[0][3] == 'c' && argv[0][4]) { -+ char *p; -+ repeat = simple_strtoul(argv[0]+4, &p, 10); -+ mdcount = ((repeat * bytesperword) + 15) / 16; -+ valid = !*p; -+ } -+ last_repeat = repeat; -+ } else if (strcmp(argv[0], "md") == 0) -+ valid = 1; -+ else if (strcmp(argv[0], "mds") == 0) -+ valid = 1; -+ else if (strcmp(argv[0], "mdp") == 0) { -+ phys = valid = 1; -+ } -+ if (!valid) -+ return KDB_NOTFOUND; -+ -+ if (argc == 0) { -+ if (last_addr == 0) -+ return KDB_ARGCOUNT; -+ addr = last_addr; -+ radix = last_radix; -+ bytesperword = last_bytesperword; -+ repeat = last_repeat; -+ mdcount = ((repeat * bytesperword) + 15) / 16; -+ } -+ -+ if (argc) { -+ kdb_machreg_t val; -+ int diag, nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ if (argc > nextarg+2) -+ return KDB_ARGCOUNT; -+ -+ if (argc >= nextarg) { -+ diag = kdbgetularg(argv[nextarg], &val); -+ if (!diag) { -+ mdcount = (int) val; -+ repeat = mdcount * 16 / bytesperword; -+ } -+ } -+ if (argc >= nextarg+1) { -+ diag = kdbgetularg(argv[nextarg+1], &val); -+ if (!diag) -+ radix = (int) val; -+ } -+ } -+ -+ if (strcmp(argv[0], "mdr") == 0) { -+ return kdb_mdr(addr, mdcount); -+ } -+ -+ switch (radix) { -+ case 10: -+ fmtchar = 'd'; -+ break; -+ case 16: -+ fmtchar = 'x'; -+ break; -+ case 8: -+ fmtchar = 'o'; -+ break; -+ default: -+ return KDB_BADRADIX; -+ } -+ -+ last_radix = radix; -+ -+ if (bytesperword > KDB_WORD_SIZE) -+ return KDB_BADWIDTH; -+ -+ switch (bytesperword) { -+ case 8: -+ sprintf(fmtstr, "%%16.16l%c ", fmtchar); -+ break; -+ case 4: -+ sprintf(fmtstr, "%%8.8l%c ", fmtchar); -+ break; -+ case 2: -+ sprintf(fmtstr, "%%4.4l%c ", fmtchar); -+ break; -+ case 1: -+ sprintf(fmtstr, "%%2.2l%c ", fmtchar); -+ break; -+ default: -+ return KDB_BADWIDTH; -+ } -+ -+ last_repeat = repeat; -+ last_bytesperword = bytesperword; -+ -+ if (strcmp(argv[0], "mds") == 0) { -+ symbolic = 1; -+ /* Do not save these changes as last_*, they are temporary mds -+ * overrides. -+ */ -+ bytesperword = KDB_WORD_SIZE; -+ repeat = mdcount; -+ kdbgetintenv("NOSECT", &nosect); -+ } -+ -+ /* Round address down modulo BYTESPERWORD */ -+ -+ addr &= ~(bytesperword-1); -+ -+ while (repeat > 0) { -+ unsigned long a; -+ int n, z, num = (symbolic ? 1 : (16 / bytesperword)); -+ -+ for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) { -+ if (phys) { -+ if (kdb_getphysword(&word, a, bytesperword) -+ || word) -+ break; -+ } else if (kdb_getword(&word, a, bytesperword) || word) -+ break; -+ } -+ n = min(num, repeat); -+ kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword, num, repeat, phys); -+ addr += bytesperword * n; -+ repeat -= n; -+ z = (z + num - 1) / num; -+ if (z > 2) { -+ int s = num * (z-2); -+ kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0 " zero suppressed\n", -+ addr, addr + bytesperword * s - 1); -+ addr += bytesperword * s; -+ repeat -= s; -+ } -+ } -+ last_addr = addr; -+ -+ return 0; -+} -+ -+/* -+ * kdb_mm -+ * -+ * This function implements the 'mm' command. -+ * -+ * mm address-expression new-value -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * mm works on machine words, mmW works on bytes. -+ */ -+ -+static int -+kdb_mm(int argc, const char **argv) -+{ -+ int diag; -+ kdb_machreg_t addr; -+ long offset = 0; -+ unsigned long contents; -+ int nextarg; -+ int width; -+ -+ if (argv[0][2] && !isdigit(argv[0][2])) -+ return KDB_NOTFOUND; -+ -+ if (argc < 2) { -+ return KDB_ARGCOUNT; -+ } -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL))) -+ return diag; -+ -+ if (nextarg > argc) -+ return KDB_ARGCOUNT; -+ -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL))) -+ return diag; -+ -+ if (nextarg != argc + 1) -+ return KDB_ARGCOUNT; -+ -+ width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE); -+ if ((diag = kdb_putword(addr, contents, width))) -+ return diag; -+ -+ kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents); -+ -+ return 0; -+} -+ -+/* -+ * kdb_go -+ * -+ * This function implements the 'go' command. -+ * -+ * go [address-expression] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * KDB_CMD_GO for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_go(int argc, const char **argv) -+{ -+ kdb_machreg_t addr; -+ int diag; -+ int nextarg; -+ long offset; -+ struct pt_regs *regs = get_irq_regs(); -+ -+ if (argc == 1) { -+ if (smp_processor_id() != kdb_initial_cpu) { -+ kdb_printf("go
must be issued from the initial cpu, do cpu %d first\n", kdb_initial_cpu); -+ return KDB_ARGCOUNT; -+ } -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, -+ &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ -+ kdba_setpc(regs, addr); -+ } else if (argc) -+ return KDB_ARGCOUNT; -+ -+ diag = KDB_CMD_GO; -+ if (KDB_FLAG(CATASTROPHIC)) { -+ kdb_printf("Catastrophic error detected\n"); -+ kdb_printf("kdb_continue_catastrophic=%d, ", -+ kdb_continue_catastrophic); -+ if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) { -+ kdb_printf("type go a second time if you really want to continue\n"); -+ return 0; -+ } -+ if (kdb_continue_catastrophic == 2) { -+ kdb_do_dump(); -+ kdb_printf("forcing reboot\n"); -+ kdb_reboot(0, NULL); -+ } -+ kdb_printf("attempting to continue\n"); -+ } -+ if (smp_processor_id() != kdb_initial_cpu) { -+ char buf[80]; -+ kdb_printf("go was not issued from initial cpu, switching back to cpu %d\n", kdb_initial_cpu); -+ sprintf(buf, "cpu %d\n", kdb_initial_cpu); -+ /* Recursive use of kdb_parse, do not use argv after this point */ -+ argv = NULL; -+ diag = kdb_parse(buf); -+ if (diag == KDB_CMD_CPU) -+ KDB_STATE_SET_CPU(GO_SWITCH, kdb_initial_cpu); -+ } -+ return diag; -+} -+ -+/* -+ * kdb_rd -+ * -+ * This function implements the 'rd' command. -+ * -+ * rd display all general registers. -+ * rd c display all control registers. -+ * rd d display all debug registers. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_rd(int argc, const char **argv) -+{ -+ int diag; -+ if (argc == 0) { -+ if ((diag = kdb_check_regs())) -+ return diag; -+ return kdba_dumpregs(kdb_current_regs, NULL, NULL); -+ } -+ -+ if (argc > 2) { -+ return KDB_ARGCOUNT; -+ } -+ -+ if ((diag = kdb_check_regs())) -+ return diag; -+ return kdba_dumpregs(kdb_current_regs, argv[1], argc==2 ? argv[2]: NULL); -+} -+ -+/* -+ * kdb_rm -+ * -+ * This function implements the 'rm' (register modify) command. -+ * -+ * rm register-name new-contents -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * Currently doesn't allow modification of control or -+ * debug registers. -+ */ -+ -+static int -+kdb_rm(int argc, const char **argv) -+{ -+ int diag; -+ int ind = 0; -+ kdb_machreg_t contents; -+ -+ if (argc != 2) { -+ return KDB_ARGCOUNT; -+ } -+ -+ /* -+ * Allow presence or absence of leading '%' symbol. -+ */ -+ -+ if (argv[1][0] == '%') -+ ind = 1; -+ -+ diag = kdbgetularg(argv[2], &contents); -+ if (diag) -+ return diag; -+ -+ if ((diag = kdb_check_regs())) -+ return diag; -+ diag = kdba_setregcontents(&argv[1][ind], kdb_current_regs, contents); -+ if (diag) -+ return diag; -+ -+ return 0; -+} -+ -+#if defined(CONFIG_MAGIC_SYSRQ) -+/* -+ * kdb_sr -+ * -+ * This function implements the 'sr' (SYSRQ key) command which -+ * interfaces to the soi-disant MAGIC SYSRQ functionality. -+ * -+ * sr -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * None. -+ */ -+static int -+kdb_sr(int argc, const char **argv) -+{ -+ extern int __sysrq_enabled; -+ if (argc != 1) { -+ return KDB_ARGCOUNT; -+ } -+ if (!__sysrq_enabled) { -+ kdb_printf("Auto activating sysrq\n"); -+ __sysrq_enabled = 1; -+ } -+ -+ handle_sysrq(*argv[1], NULL); -+ -+ return 0; -+} -+#endif /* CONFIG_MAGIC_SYSRQ */ -+ -+/* -+ * kdb_ef -+ * -+ * This function implements the 'regs' (display exception frame) -+ * command. This command takes an address and expects to find -+ * an exception frame at that address, formats and prints it. -+ * -+ * regs address-expression -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * Not done yet. -+ */ -+ -+static int -+kdb_ef(int argc, const char **argv) -+{ -+ int diag; -+ kdb_machreg_t addr; -+ long offset; -+ int nextarg; -+ -+ if (argc == 1) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ -+ return kdba_dumpregs((struct pt_regs *)addr, NULL, NULL); -+ } -+ -+ return KDB_ARGCOUNT; -+} -+ -+#if defined(CONFIG_MODULES) -+extern struct list_head *kdb_modules; -+extern void free_module(struct module *); -+ -+/* modules using other modules */ -+struct module_use -+{ -+ struct list_head list; -+ struct module *module_which_uses; -+}; -+ -+/* -+ * kdb_lsmod -+ * -+ * This function implements the 'lsmod' command. Lists currently -+ * loaded kernel modules. -+ * -+ * Mostly taken from userland lsmod. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * -+ */ -+ -+static int -+kdb_lsmod(int argc, const char **argv) -+{ -+ struct module *mod; -+ -+ if (argc != 0) -+ return KDB_ARGCOUNT; -+ -+ kdb_printf("Module Size modstruct Used by\n"); -+ list_for_each_entry(mod, kdb_modules, list) { -+ -+ kdb_printf("%-20s%8u 0x%p ", mod->name, -+ mod->core_size, (void *)mod); -+#ifdef CONFIG_MODULE_UNLOAD -+ kdb_printf("%4d ", module_refcount(mod)); -+#endif -+ if (mod->state == MODULE_STATE_GOING) -+ kdb_printf(" (Unloading)"); -+ else if (mod->state == MODULE_STATE_COMING) -+ kdb_printf(" (Loading)"); -+ else -+ kdb_printf(" (Live)"); -+ -+#ifdef CONFIG_MODULE_UNLOAD -+ { -+ struct module_use *use; -+ kdb_printf(" [ "); -+ list_for_each_entry(use, &mod->modules_which_use_me, list) -+ kdb_printf("%s ", use->module_which_uses->name); -+ kdb_printf("]\n"); -+ } -+#endif -+ } -+ -+ return 0; -+} -+ -+#endif /* CONFIG_MODULES */ -+ -+/* -+ * kdb_env -+ * -+ * This function implements the 'env' command. Display the current -+ * environment variables. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_env(int argc, const char **argv) -+{ -+ int i; -+ -+ for(i=0; i<__nenv; i++) { -+ if (__env[i]) { -+ kdb_printf("%s\n", __env[i]); -+ } -+ } -+ -+ if (KDB_DEBUG(MASK)) -+ kdb_printf("KDBFLAGS=0x%x\n", kdb_flags); -+ -+ return 0; -+} -+ -+/* -+ * kdb_dmesg -+ * -+ * This function implements the 'dmesg' command to display the contents -+ * of the syslog buffer. -+ * -+ * dmesg [lines] [adjust] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * None. -+ */ -+ -+static int -+kdb_dmesg(int argc, const char **argv) -+{ -+ char *syslog_data[4], *start, *end, c = '\0', *p; -+ int diag, logging, logsize, lines = 0, adjust = 0, n; -+ -+ if (argc > 2) -+ return KDB_ARGCOUNT; -+ if (argc) { -+ char *cp; -+ lines = simple_strtol(argv[1], &cp, 0); -+ if (*cp) -+ lines = 0; -+ if (argc > 1) { -+ adjust = simple_strtoul(argv[2], &cp, 0); -+ if (*cp || adjust < 0) -+ adjust = 0; -+ } -+ } -+ -+ /* disable LOGGING if set */ -+ diag = kdbgetintenv("LOGGING", &logging); -+ if (!diag && logging) { -+ const char *setargs[] = { "set", "LOGGING", "0" }; -+ kdb_set(2, setargs); -+ } -+ -+ /* syslog_data[0,1] physical start, end+1. syslog_data[2,3] logical start, end+1. */ -+ debugger_syslog_data(syslog_data); -+ if (syslog_data[2] == syslog_data[3]) -+ return 0; -+ logsize = syslog_data[1] - syslog_data[0]; -+ start = syslog_data[2]; -+ end = syslog_data[3]; -+#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0]) -+ for (n = 0, p = start; p < end; ++p) { -+ if ((c = *KDB_WRAP(p)) == '\n') -+ ++n; -+ } -+ if (c != '\n') -+ ++n; -+ if (lines < 0) { -+ if (adjust >= n) -+ kdb_printf("buffer only contains %d lines, nothing printed\n", n); -+ else if (adjust - lines >= n) -+ kdb_printf("buffer only contains %d lines, last %d lines printed\n", -+ n, n - adjust); -+ if (adjust) { -+ for (; start < end && adjust; ++start) { -+ if (*KDB_WRAP(start) == '\n') -+ --adjust; -+ } -+ if (start < end) -+ ++start; -+ } -+ for (p = start; p < end && lines; ++p) { -+ if (*KDB_WRAP(p) == '\n') -+ ++lines; -+ } -+ end = p; -+ } else if (lines > 0) { -+ int skip = n - (adjust + lines); -+ if (adjust >= n) { -+ kdb_printf("buffer only contains %d lines, nothing printed\n", n); -+ skip = n; -+ } else if (skip < 0) { -+ lines += skip; -+ skip = 0; -+ kdb_printf("buffer only contains %d lines, first %d lines printed\n", -+ n, lines); -+ } -+ for (; start < end && skip; ++start) { -+ if (*KDB_WRAP(start) == '\n') -+ --skip; -+ } -+ for (p = start; p < end && lines; ++p) { -+ if (*KDB_WRAP(p) == '\n') -+ --lines; -+ } -+ end = p; -+ } -+ /* Do a line at a time (max 200 chars) to reduce protocol overhead */ -+ c = '\n'; -+ while (start != end) { -+ char buf[201]; -+ p = buf; -+ while (start < end && (c = *KDB_WRAP(start)) && (p - buf) < sizeof(buf)-1) { -+ ++start; -+ *p++ = c; -+ if (c == '\n') -+ break; -+ } -+ *p = '\0'; -+ kdb_printf("%s", buf); -+ } -+ if (c != '\n') -+ kdb_printf("\n"); -+ -+ return 0; -+} -+ -+/* -+ * kdb_cpu -+ * -+ * This function implements the 'cpu' command. -+ * -+ * cpu [] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * KDB_CMD_CPU for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * All cpu's should be spinning in kdb(). However just in case -+ * a cpu did not take the smp_kdb_stop NMI, check that a cpu -+ * entered kdb() before passing control to it. -+ */ -+ -+static void -+kdb_cpu_status(void) -+{ -+ int i, start_cpu, first_print = 1; -+ char state, prev_state = '?'; -+ -+ kdb_printf("Currently on cpu %d\n", smp_processor_id()); -+ kdb_printf("Available cpus: "); -+ for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { -+ if (!cpu_online(i)) -+ state = 'F'; /* cpu is offline */ -+ else { -+ struct kdb_running_process *krp = kdb_running_process+i; -+ if (KDB_STATE_CPU(KDB, i)) { -+ state = ' '; /* cpu is responding to kdb */ -+ if (kdb_task_state_char(krp->p) == 'I') -+ state = 'I'; /* running the idle task */ -+ } else if (krp->seqno && krp->p && krp->seqno >= kdb_seqno - 1) -+ state = '+'; /* some kdb data, but not responding */ -+ else -+ state = '*'; /* no kdb data */ -+ } -+ if (state != prev_state) { -+ if (prev_state != '?') { -+ if (!first_print) -+ kdb_printf(", "); -+ first_print = 0; -+ kdb_printf("%d", start_cpu); -+ if (start_cpu < i-1) -+ kdb_printf("-%d", i-1); -+ if (prev_state != ' ') -+ kdb_printf("(%c)", prev_state); -+ } -+ prev_state = state; -+ start_cpu = i; -+ } -+ } -+ /* print the trailing cpus, ignoring them if they are all offline */ -+ if (prev_state != 'F') { -+ if (!first_print) -+ kdb_printf(", "); -+ kdb_printf("%d", start_cpu); -+ if (start_cpu < i-1) -+ kdb_printf("-%d", i-1); -+ if (prev_state != ' ') -+ kdb_printf("(%c)", prev_state); -+ } -+ kdb_printf("\n"); -+} -+ -+static int -+kdb_cpu(int argc, const char **argv) -+{ -+ unsigned long cpunum; -+ int diag, i; -+ -+ /* ask the other cpus if they are still active */ -+ for (i=0; i NR_CPUS) -+ || !cpu_online(cpunum) -+ || !KDB_STATE_CPU(KDB, cpunum)) -+ return KDB_BADCPUNUM; -+ -+ kdb_new_cpu = cpunum; -+ -+ /* -+ * Switch to other cpu -+ */ -+ return KDB_CMD_CPU; -+} -+ -+/* The user may not realize that ps/bta with no parameters does not print idle -+ * or sleeping system daemon processes, so tell them how many were suppressed. -+ */ -+void -+kdb_ps_suppressed(void) -+{ -+ int idle = 0, daemon = 0; -+ unsigned long mask_I = kdb_task_state_string("I"), -+ mask_M = kdb_task_state_string("M"); -+ unsigned long cpu; -+ const struct task_struct *p, *g; -+ for (cpu = 0; cpu < NR_CPUS; ++cpu) { -+ if (!cpu_online(cpu)) -+ continue; -+ p = kdb_curr_task(cpu); -+ if (kdb_task_state(p, mask_I)) -+ ++idle; -+ } -+ kdb_do_each_thread(g, p) { -+ if (kdb_task_state(p, mask_M)) -+ ++daemon; -+ } kdb_while_each_thread(g, p); -+ if (idle || daemon) { -+ if (idle) -+ kdb_printf("%d idle process%s (state I)%s\n", -+ idle, idle == 1 ? "" : "es", -+ daemon ? " and " : ""); -+ if (daemon) -+ kdb_printf("%d sleeping system daemon (state M) process%s", -+ daemon, daemon == 1 ? "" : "es"); -+ kdb_printf(" suppressed,\nuse 'ps A' to see all.\n"); -+ } -+} -+ -+/* -+ * kdb_ps -+ * -+ * This function implements the 'ps' command which shows -+ * a list of the active processes. -+ * -+ * ps [DRSTCZEUIMA] All processes, optionally filtered by state -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+void -+kdb_ps1(const struct task_struct *p) -+{ -+ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p); -+ kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n", -+ (void *)p, p->pid, p->parent->pid, -+ kdb_task_has_cpu(p), kdb_process_cpu(p), -+ kdb_task_state_char(p), -+ (void *)(&p->thread), -+ p == kdb_curr_task(smp_processor_id()) ? '*': ' ', -+ p->comm); -+ if (kdb_task_has_cpu(p)) { -+ if (!krp->seqno || !krp->p) -+ kdb_printf(" Error: no saved data for this cpu\n"); -+ else { -+ if (krp->seqno < kdb_seqno - 1) -+ kdb_printf(" Warning: process state is stale\n"); -+ if (krp->p != p) -+ kdb_printf(" Error: does not match running process table (0x%p)\n", krp->p); -+ } -+ } -+} -+ -+static int -+kdb_ps(int argc, const char **argv) -+{ -+ struct task_struct *g, *p; -+ unsigned long mask, cpu; -+ -+ if (argc == 0) -+ kdb_ps_suppressed(); -+ kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n", -+ (int)(2*sizeof(void *))+2, "Task Addr", -+ (int)(2*sizeof(void *))+2, "Thread"); -+ mask = kdb_task_state_string(argc ? argv[1] : NULL); -+ /* Run the active tasks first */ -+ for (cpu = 0; cpu < NR_CPUS; ++cpu) { -+ if (!cpu_online(cpu)) -+ continue; -+ p = kdb_curr_task(cpu); -+ if (kdb_task_state(p, mask)) -+ kdb_ps1(p); -+ } -+ kdb_printf("\n"); -+ /* Now the real tasks */ -+ kdb_do_each_thread(g, p) { -+ if (kdb_task_state(p, mask)) -+ kdb_ps1(p); -+ } kdb_while_each_thread(g, p); -+ -+ return 0; -+} -+ -+/* -+ * kdb_pid -+ * -+ * This function implements the 'pid' command which switches -+ * the currently active process. -+ * -+ * pid [ | R] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+ -+static int -+kdb_pid(int argc, const char **argv) -+{ -+ struct task_struct *p; -+ unsigned long val; -+ int diag; -+ -+ if (argc > 1) -+ return KDB_ARGCOUNT; -+ -+ if (argc) { -+ if (strcmp(argv[1], "R") == 0) { -+ p = KDB_RUNNING_PROCESS_ORIGINAL[kdb_initial_cpu].p; -+ } else { -+ diag = kdbgetularg(argv[1], &val); -+ if (diag) -+ return KDB_BADINT; -+ -+ p = find_task_by_pid_ns((pid_t)val, &init_pid_ns); -+ if (!p) { -+ kdb_printf("No task with pid=%d\n", (pid_t)val); -+ return 0; -+ } -+ } -+ -+ kdba_set_current_task(p); -+ } -+ -+ kdb_printf("KDB current process is %s(pid=%d)\n", kdb_current_task->comm, -+ kdb_current_task->pid); -+ -+ return 0; -+} -+ -+/* -+ * kdb_ll -+ * -+ * This function implements the 'll' command which follows a linked -+ * list and executes an arbitrary command for each element. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_ll(int argc, const char **argv) -+{ -+ int diag; -+ kdb_machreg_t addr; -+ long offset = 0; -+ kdb_machreg_t va; -+ unsigned long linkoffset; -+ int nextarg; -+ const char *command; -+ -+ if (argc != 3) { -+ return KDB_ARGCOUNT; -+ } -+ -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ -+ diag = kdbgetularg(argv[2], &linkoffset); -+ if (diag) -+ return diag; -+ -+ /* -+ * Using the starting address as -+ * the first element in the list, and assuming that -+ * the list ends with a null pointer. -+ */ -+ -+ va = addr; -+ if (!(command = kdb_strdup(argv[3], GFP_KDB))) { -+ kdb_printf("%s: cannot duplicate command\n", __FUNCTION__); -+ return 0; -+ } -+ /* Recursive use of kdb_parse, do not use argv after this point */ -+ argv = NULL; -+ -+ while (va) { -+ char buf[80]; -+ -+ sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); -+ diag = kdb_parse(buf); -+ if (diag) -+ return diag; -+ -+ addr = va + linkoffset; -+ if (kdb_getword(&va, addr, sizeof(va))) -+ return 0; -+ } -+ kfree(command); -+ -+ return 0; -+} -+ -+/* -+ * kdb_help -+ * -+ * This function implements the 'help' and '?' commands. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_help(int argc, const char **argv) -+{ -+ kdbtab_t *kt; -+ int i; -+ -+ kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description"); -+ kdb_printf("----------------------------------------------------------\n"); -+ for(i=0, kt=kdb_commands; icmd_name) -+ kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name, -+ kt->cmd_usage, kt->cmd_help); -+ } -+ return 0; -+} -+ -+extern int kdb_wake_up_process(struct task_struct * p); -+ -+/* -+ * kdb_kill -+ * -+ * This function implements the 'kill' commands. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_kill(int argc, const char **argv) -+{ -+ long sig, pid; -+ char *endp; -+ struct task_struct *p; -+ struct siginfo info; -+ -+ if (argc!=2) -+ return KDB_ARGCOUNT; -+ -+ sig = simple_strtol(argv[1], &endp, 0); -+ if (*endp) -+ return KDB_BADINT; -+ if (sig >= 0 ) { -+ kdb_printf("Invalid signal parameter.<-signal>\n"); -+ return 0; -+ } -+ sig=-sig; -+ -+ pid = simple_strtol(argv[2], &endp, 0); -+ if (*endp) -+ return KDB_BADINT; -+ if (pid <=0 ) { -+ kdb_printf("Process ID must be large than 0.\n"); -+ return 0; -+ } -+ -+ /* Find the process. */ -+ if (!(p = find_task_by_pid_ns(pid, &init_pid_ns))) { -+ kdb_printf("The specified process isn't found.\n"); -+ return 0; -+ } -+ p = p->group_leader; -+ info.si_signo = sig; -+ info.si_errno = 0; -+ info.si_code = SI_USER; -+ info.si_pid = pid; /* use same capabilities as process being signalled */ -+ info.si_uid = 0; /* kdb has root authority */ -+ kdb_send_sig_info(p, &info, kdb_seqno); -+ return 0; -+} -+ -+struct kdb_tm { -+ int tm_sec; /* seconds */ -+ int tm_min; /* minutes */ -+ int tm_hour; /* hours */ -+ int tm_mday; /* day of the month */ -+ int tm_mon; /* month */ -+ int tm_year; /* year */ -+}; -+ -+static void -+kdb_gmtime(struct timespec *tv, struct kdb_tm *tm) -+{ -+ /* This will work from 1970-2099, 2100 is not a leap year */ -+ static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; -+ memset(tm, 0, sizeof(*tm)); -+ tm->tm_sec = tv->tv_sec % (24 * 60 * 60); -+ tm->tm_mday = tv->tv_sec / (24 * 60 * 60) + (2 * 365 + 1); /* shift base from 1970 to 1968 */ -+ tm->tm_min = tm->tm_sec / 60 % 60; -+ tm->tm_hour = tm->tm_sec / 60 / 60; -+ tm->tm_sec = tm->tm_sec % 60; -+ tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1)); -+ tm->tm_mday %= (4*365+1); -+ mon_day[1] = 29; -+ while (tm->tm_mday >= mon_day[tm->tm_mon]) { -+ tm->tm_mday -= mon_day[tm->tm_mon]; -+ if (++tm->tm_mon == 12) { -+ tm->tm_mon = 0; -+ ++tm->tm_year; -+ mon_day[1] = 28; -+ } -+ } -+ ++tm->tm_mday; -+} -+ -+/* -+ * Most of this code has been lifted from kernel/timer.c::sys_sysinfo(). -+ * I cannot call that code directly from kdb, it has an unconditional -+ * cli()/sti() and calls routines that take locks which can stop the debugger. -+ */ -+ -+static void -+kdb_sysinfo(struct sysinfo *val) -+{ -+ struct timespec uptime; -+ do_posix_clock_monotonic_gettime(&uptime); -+ memset(val, 0, sizeof(*val)); -+ val->uptime = uptime.tv_sec; -+ val->loads[0] = avenrun[0]; -+ val->loads[1] = avenrun[1]; -+ val->loads[2] = avenrun[2]; -+ val->procs = nr_threads-1; -+ si_meminfo(val); -+ kdb_si_swapinfo(val); -+ -+ return; -+} -+ -+/* -+ * kdb_summary -+ * -+ * This function implements the 'summary' command. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_summary(int argc, const char **argv) -+{ -+ extern struct timespec xtime; -+ extern struct timezone sys_tz; -+ struct kdb_tm tm; -+ struct sysinfo val; -+ -+ if (argc) -+ return KDB_ARGCOUNT; -+ -+ kdb_printf("sysname %s\n", init_uts_ns.name.sysname); -+ kdb_printf("release %s\n", init_uts_ns.name.release); -+ kdb_printf("version %s\n", init_uts_ns.name.version); -+ kdb_printf("machine %s\n", init_uts_ns.name.machine); -+ kdb_printf("nodename %s\n", init_uts_ns.name.nodename); -+ kdb_printf("domainname %s\n", init_uts_ns.name.domainname); -+ kdb_printf("ccversion %s\n", __stringify(CCVERSION)); -+ -+ kdb_gmtime(&xtime, &tm); -+ kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d tz_minuteswest %d\n", -+ 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday, -+ tm.tm_hour, tm.tm_min, tm.tm_sec, -+ sys_tz.tz_minuteswest); -+ -+ kdb_sysinfo(&val); -+ kdb_printf("uptime "); -+ if (val.uptime > (24*60*60)) { -+ int days = val.uptime / (24*60*60); -+ val.uptime %= (24*60*60); -+ kdb_printf("%d day%s ", days, days == 1 ? "" : "s"); -+ } -+ kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60); -+ -+ /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */ -+ -+#define LOAD_INT(x) ((x) >> FSHIFT) -+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) -+ kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n", -+ LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), -+ LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), -+ LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); -+ kdb_printf("\n"); -+#undef LOAD_INT -+#undef LOAD_FRAC -+ -+ kdb_meminfo_proc_show(); /* in fs/proc/meminfo.c */ -+ -+ return 0; -+} -+ -+/* -+ * kdb_per_cpu -+ * -+ * This function implements the 'per_cpu' command. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdb_per_cpu(int argc, const char **argv) -+{ -+ char buf[256], fmtstr[64]; -+ kdb_symtab_t symtab; -+ cpumask_t suppress; -+ int cpu, diag; -+ unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL; -+ -+ if (argc < 1 || argc > 3) -+ return KDB_ARGCOUNT; -+ -+ cpus_clear(suppress); -+ snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]); -+ if (!kdbgetsymval(buf, &symtab)) { -+ kdb_printf("%s is not a per_cpu variable\n", argv[1]); -+ return KDB_BADADDR; -+ } -+ if (argc >=2 && (diag = kdbgetularg(argv[2], &bytesperword))) -+ return diag; -+ if (!bytesperword) -+ bytesperword = KDB_WORD_SIZE; -+ else if (bytesperword > KDB_WORD_SIZE) -+ return KDB_BADWIDTH; -+ sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword)); -+ if (argc >= 3) { -+ if ((diag = kdbgetularg(argv[3], &whichcpu))) -+ return diag; -+ if (!cpu_online(whichcpu)) { -+ kdb_printf("cpu %ld is not online\n", whichcpu); -+ return KDB_BADCPUNUM; -+ } -+ } -+ -+ /* Most architectures use __per_cpu_offset[cpu], some use -+ * __per_cpu_offset(cpu), smp has no __per_cpu_offset. -+ */ -+#ifdef __per_cpu_offset -+#define KDB_PCU(cpu) __per_cpu_offset(cpu) -+#else -+#ifdef CONFIG_SMP -+#define KDB_PCU(cpu) __per_cpu_offset[cpu] -+#else -+#define KDB_PCU(cpu) 0 -+#endif -+#endif -+ -+ for_each_online_cpu(cpu) { -+ if (whichcpu != ~0UL && whichcpu != cpu) -+ continue; -+ addr = symtab.sym_start + KDB_PCU(cpu); -+ if ((diag = kdb_getword(&val, addr, bytesperword))) { -+ kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to read, diag=%d\n", -+ cpu, addr, diag); -+ continue; -+ } -+#ifdef CONFIG_SMP -+ if (!val) { -+ cpu_set(cpu, suppress); -+ continue; -+ } -+#endif /* CONFIG_SMP */ -+ kdb_printf("%5d ", cpu); -+ kdb_md_line(fmtstr, addr, -+ bytesperword == KDB_WORD_SIZE, -+ 1, bytesperword, 1, 1, 0); -+ } -+ if (cpus_weight(suppress) == 0) -+ return 0; -+ kdb_printf("Zero suppressed cpu(s):"); -+ for_each_cpu_mask(cpu, suppress) { -+ kdb_printf(" %d", cpu); -+ if (cpu == NR_CPUS-1 || next_cpu(cpu, suppress) != cpu + 1) -+ continue; -+ while (cpu < NR_CPUS && next_cpu(cpu, suppress) == cpu + 1) -+ ++cpu; -+ kdb_printf("-%d", cpu); -+ } -+ kdb_printf("\n"); -+ -+#undef KDB_PCU -+ -+ return 0; -+} -+ -+/* -+ * display help for the use of cmd | grep pattern -+ */ -+static int -+kdb_grep_help(int argc, const char **argv) -+{ -+ kdb_printf ("Usage of cmd args | grep pattern:\n"); -+ kdb_printf (" Any command's output may be filtered through an "); -+ kdb_printf ("emulated 'pipe'.\n"); -+ kdb_printf (" 'grep' is just a key word.\n"); -+ kdb_printf -+ (" The pattern may include a very limited set of metacharacters:\n"); -+ kdb_printf (" pattern or ^pattern or pattern$ or ^pattern$\n"); -+ kdb_printf -+ (" And if there are spaces in the pattern, you may quote it:\n"); -+ kdb_printf -+ (" \"pat tern\" or \"^pat tern\" or \"pat tern$\" or \"^pat tern$\"\n"); -+ return 0; -+} -+ -+/* -+ * kdb_register_repeat -+ * -+ * This function is used to register a kernel debugger command. -+ * -+ * Inputs: -+ * cmd Command name -+ * func Function to execute the command -+ * usage A simple usage string showing arguments -+ * help A simple help string describing command -+ * repeat Does the command auto repeat on enter? -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, one if a duplicate command. -+ * Locking: -+ * none. -+ * Remarks: -+ * -+ */ -+ -+#define kdb_command_extend 50 /* arbitrary */ -+int -+kdb_register_repeat(char *cmd, -+ kdb_func_t func, -+ char *usage, -+ char *help, -+ short minlen, -+ kdb_repeat_t repeat) -+{ -+ int i; -+ kdbtab_t *kp; -+ -+ /* -+ * Brute force method to determine duplicates -+ */ -+ for (i=0, kp=kdb_commands; icmd_name && (strcmp(kp->cmd_name, cmd)==0)) { -+ kdb_printf("Duplicate kdb command registered: " -+ "%s, func %p help %s\n", cmd, func, help); -+ return 1; -+ } -+ } -+ -+ /* -+ * Insert command into first available location in table -+ */ -+ for (i=0, kp=kdb_commands; icmd_name == NULL) { -+ break; -+ } -+ } -+ -+ if (i >= kdb_max_commands) { -+ kdbtab_t *new = kmalloc((kdb_max_commands + kdb_command_extend) * sizeof(*new), GFP_KDB); -+ if (!new) { -+ kdb_printf("Could not allocate new kdb_command table\n"); -+ return 1; -+ } -+ if (kdb_commands) { -+ memcpy(new, kdb_commands, kdb_max_commands * sizeof(*new)); -+ kfree(kdb_commands); -+ } -+ memset(new + kdb_max_commands, 0, kdb_command_extend * sizeof(*new)); -+ kdb_commands = new; -+ kp = kdb_commands + kdb_max_commands; -+ kdb_max_commands += kdb_command_extend; -+ } -+ -+ kp->cmd_name = cmd; -+ kp->cmd_func = func; -+ kp->cmd_usage = usage; -+ kp->cmd_help = help; -+ kp->cmd_flags = 0; -+ kp->cmd_minlen = minlen; -+ kp->cmd_repeat = repeat; -+ -+ return 0; -+} -+ -+/* -+ * kdb_register -+ * -+ * Compatibility register function for commands that do not need to -+ * specify a repeat state. Equivalent to kdb_register_repeat with -+ * KDB_REPEAT_NONE. -+ * -+ * Inputs: -+ * cmd Command name -+ * func Function to execute the command -+ * usage A simple usage string showing arguments -+ * help A simple help string describing command -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, one if a duplicate command. -+ * Locking: -+ * none. -+ * Remarks: -+ * -+ */ -+ -+int -+kdb_register(char *cmd, -+ kdb_func_t func, -+ char *usage, -+ char *help, -+ short minlen) -+{ -+ return kdb_register_repeat(cmd, func, usage, help, minlen, KDB_REPEAT_NONE); -+} -+ -+/* -+ * kdb_unregister -+ * -+ * This function is used to unregister a kernel debugger command. -+ * It is generally called when a module which implements kdb -+ * commands is unloaded. -+ * -+ * Inputs: -+ * cmd Command name -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, one command not registered. -+ * Locking: -+ * none. -+ * Remarks: -+ * -+ */ -+ -+int -+kdb_unregister(char *cmd) -+{ -+ int i; -+ kdbtab_t *kp; -+ -+ /* -+ * find the command. -+ */ -+ for (i=0, kp=kdb_commands; icmd_name && (strcmp(kp->cmd_name, cmd)==0)) { -+ kp->cmd_name = NULL; -+ return 0; -+ } -+ } -+ -+ /* -+ * Couldn't find it. -+ */ -+ return 1; -+} -+ -+/* -+ * kdb_inittab -+ * -+ * This function is called by the kdb_init function to initialize -+ * the kdb command table. It must be called prior to any other -+ * call to kdb_register_repeat. -+ * -+ * Inputs: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ */ -+ -+static void __init -+kdb_inittab(void) -+{ -+ int i; -+ kdbtab_t *kp; -+ -+ for(i=0, kp=kdb_commands; i < kdb_max_commands; i++,kp++) { -+ kp->cmd_name = NULL; -+ } -+ -+ kdb_register_repeat("md", kdb_md, "", "Display Memory Contents, also mdWcN, e.g. md8c1", 1, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("mdr", kdb_md, " ", "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("mdp", kdb_md, " ", "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("mds", kdb_md, "", "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("mm", kdb_mm, " ", "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("id", kdb_id, "", "Display Instructions", 1, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("go", kdb_go, "[]", "Continue Execution", 1, KDB_REPEAT_NONE); -+ kdb_register_repeat("rd", kdb_rd, "", "Display Registers", 1, KDB_REPEAT_NONE); -+ kdb_register_repeat("rm", kdb_rm, " ", "Modify Registers", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("ef", kdb_ef, "", "Display exception frame", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("bt", kdb_bt, "[]", "Stack traceback", 1, KDB_REPEAT_NONE); -+ kdb_register_repeat("btp", kdb_bt, "", "Display stack for process ", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]", "Display stack all processes", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("btc", kdb_bt, "", "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("btt", kdb_bt, "", "Backtrace process given its struct task address", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("ll", kdb_ll, " ", "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("env", kdb_env, "", "Show environment variables", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("set", kdb_set, "", "Set environment variables", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("help", kdb_help, "", "Display Help Message", 1, KDB_REPEAT_NONE); -+ kdb_register_repeat("?", kdb_help, "", "Display Help Message", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("cpu", kdb_cpu, "","Switch to new cpu", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("ps", kdb_ps, "[|A]", "Display active task list", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("pid", kdb_pid, "", "Switch to another task", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("reboot", kdb_reboot, "", "Reboot the machine immediately", 0, KDB_REPEAT_NONE); -+#if defined(CONFIG_KDB_KDUMP) -+ kdb_register_repeat("kdump", kdb_kdump, "", "Calls kdump mode", 0, KDB_REPEAT_NONE); -+#endif -+#if defined(CONFIG_MODULES) -+ kdb_register_repeat("lsmod", kdb_lsmod, "", "List loaded kernel modules", 0, KDB_REPEAT_NONE); -+#endif -+#if defined(CONFIG_MAGIC_SYSRQ) -+ kdb_register_repeat("sr", kdb_sr, "", "Magic SysRq key", 0, KDB_REPEAT_NONE); -+#endif -+ kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", "Display syslog buffer", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("kill", kdb_kill, "<-signal> ", "Send a signal to a process", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("summary", kdb_summary, "", "Summarize the system", 4, KDB_REPEAT_NONE); -+ kdb_register_repeat("per_cpu", kdb_per_cpu, "", "Display per_cpu variables", 3, KDB_REPEAT_NONE); -+ kdb_register_repeat("grephelp", kdb_grep_help, "", -+ "Display help on | grep", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("print", kdb_debuginfo_print, "", -+ "Type casting, as in lcrash", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("px", kdb_debuginfo_print, "", -+ "Print in hex (type casting) (see 'pxhelp')", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("pxhelp", kdb_pxhelp, "", -+ "Display help for the px command", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("pd", kdb_debuginfo_print, "", -+ "Print in decimal (type casting)", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("whatis", kdb_debuginfo_print,"", -+ "Display the type, or the address for a symbol", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("sizeof", kdb_debuginfo_print, "", -+ "Display the size of a structure, typedef, etc.", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("walk", kdb_walk, "", -+ "Walk a linked list (see 'walkhelp')", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("walkhelp", kdb_walkhelp, "", -+ "Display help for the walk command", 0, KDB_REPEAT_NONE); -+} -+ -+/* -+ * The user has written to our "file" -+ * file: the /proc file -+ * buffer: user address of the data he is writing -+ * count: number of bytes in the user's buffer -+ */ -+static int -+kdb_write_proc_filename(struct file *file, const char __user *buffer, -+ unsigned long count, void *data) -+{ -+ int ret_count; -+ -+ /* our buffer is kdb_debug_info_filename[256] */ -+ if (count > 256) { -+ return 0; -+ } -+ if (copy_from_user(kdb_debug_info_filename, buffer, count)) { -+ return 0; -+ } -+ ret_count = count; /* actual count */ -+ /* remove any newline from the end of the file name */ -+ if (kdb_debug_info_filename[count-1] == '\n') count--; -+ kdb_debug_info_filename[count] = '\0'; -+ -+ return ret_count; -+} -+ -+/* -+ * The user is reading from our "file" -+ * page: the beginning of the user's buffer -+ * start: pointer to the user's pointer (tells him where we put the data) -+ * off: offset into the resource to be read -+ * count: length of the read -+ */ -+static int -+kdb_read_proc_filename(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ /* give him kdb_debug_info_filename[]; */ -+ return snprintf(page, count, "%s\n", kdb_debug_info_filename); -+} -+ -+/* -+ * kdb_proc_filename -+ * -+ * create /proc/kdb/debug_info_name -+ */ -+static void -+kdb_proc_filename(void) -+{ -+ struct proc_dir_entry *kdb_dir_entry, *kdb_file_entry; -+ -+ /* create /proc/kdb */ -+ kdb_dir_entry = proc_mkdir("kdb", NULL); -+ if (!kdb_dir_entry) { -+ printk ("kdb could not create /proc/kdb\n"); -+ return; -+ } -+ -+ /* read/write by owner (root) only */ -+ kdb_file_entry = create_proc_entry("debug_info_name", -+ S_IRUSR | S_IWUSR, kdb_dir_entry); -+ if (!kdb_file_entry) { -+ printk ("kdb could not create /proc/kdb/kdb_dir_entry\n"); -+ return; -+ } -+ kdb_file_entry->nlink = 1; -+ kdb_file_entry->data = (void *)NULL; -+ kdb_file_entry->read_proc = kdb_read_proc_filename; -+ kdb_file_entry->write_proc = kdb_write_proc_filename; -+ return; -+} -+ -+/* -+ * kdb_cmd_init -+ * -+ * This function is called by the kdb_init function to execute any -+ * commands defined in kdb_cmds. -+ * -+ * Inputs: -+ * Commands in *kdb_cmds[]; -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ */ -+ -+static void __init -+kdb_cmd_init(void) -+{ -+ int i, diag; -+ for (i = 0; kdb_cmds[i]; ++i) { -+ if (!defcmd_in_progress) -+ if (console_loglevel >= 6 /* KERN_INFO */) -+ kdb_printf("kdb_cmd[%d]: %s", i, kdb_cmds[i]); -+ diag = kdb_parse(kdb_cmds[i]); -+ if (diag) -+ kdb_printf("kdb command %s failed, kdb diag %d\n", -+ kdb_cmds[i], diag); -+ } -+ if (defcmd_in_progress) { -+ kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n"); -+ kdb_parse("endefcmd"); -+ } -+} -+ -+/* -+ * kdb_panic -+ * -+ * Invoked via the panic_notifier_list. -+ * -+ * Inputs: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero. -+ * Locking: -+ * None. -+ * Remarks: -+ * When this function is called from panic(), the other cpus have already -+ * been stopped. -+ * -+ */ -+ -+static int -+kdb_panic(struct notifier_block *self, unsigned long command, void *ptr) -+{ -+ KDB_FLAG_SET(CATASTROPHIC); /* kernel state is dubious now */ -+ KDB_ENTER(); -+ return 0; -+} -+ -+static struct notifier_block kdb_block = { kdb_panic, NULL, 0 }; -+ -+#ifdef CONFIG_SYSCTL -+static int proc_do_kdb(ctl_table *table, int write, void __user *buffer, -+ size_t *lenp, loff_t *ppos) -+{ -+ if (KDB_FLAG(NO_CONSOLE) && write) { -+ printk(KERN_ERR "kdb has no working console and has switched itself off\n"); -+ return -EINVAL; -+ } -+ return proc_dointvec(table, write, buffer, lenp, ppos); -+} -+ -+static ctl_table kdb_kern_table[] = { -+ { -+ .procname = "kdb", -+ .data = &kdb_on, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_do_kdb, -+ }, -+ {} -+}; -+ -+static ctl_table kdb_root_table[] = { -+ { -+ .procname = "kernel", -+ .mode = 0555, -+ .child = kdb_kern_table, -+ }, -+ {} -+}; -+#endif /* CONFIG_SYSCTL */ -+ -+static int -+kdb_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) -+{ -+ if (action == CPU_ONLINE) { -+ int cpu =(unsigned long)hcpu; -+ cpumask_t save_cpus_allowed = current->cpus_allowed; -+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); -+ kdb(KDB_REASON_CPU_UP, 0, NULL); /* do kdb setup on this cpu */ -+ set_cpus_allowed_ptr(current, &save_cpus_allowed); -+ } -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block kdb_cpu_nfb = { -+ .notifier_call = kdb_cpu_callback -+}; -+ -+/* -+ * kdb_init -+ * -+ * Initialize the kernel debugger environment. -+ * -+ * Parameters: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * None. -+ */ -+ -+void __init -+kdb_init(void) -+{ -+ kdb_initial_cpu = smp_processor_id(); -+ /* -+ * This must be called before any calls to kdb_printf. -+ */ -+ kdb_io_init(); -+ -+ kdb_inittab(); /* Initialize Command Table */ -+ kdb_initbptab(); /* Initialize Breakpoint Table */ -+ kdb_id_init(); /* Initialize Disassembler */ -+ kdba_init(); /* Architecture Dependent Initialization */ -+ -+ /* -+ * Use printk() to get message in log_buf[]; -+ */ -+ printk("kdb version %d.%d%s by Keith Owens, Scott Lurndal. "\ -+ "Copyright SGI, All Rights Reserved\n", -+ KDB_MAJOR_VERSION, KDB_MINOR_VERSION, KDB_TEST_VERSION); -+ -+ kdb_cmd_init(); /* Preset commands from kdb_cmds */ -+ kdb_initial_cpu = -1; /* Avoid recursion problems */ -+ kdb(KDB_REASON_CPU_UP, 0, NULL); /* do kdb setup on boot cpu */ -+ kdb_initial_cpu = smp_processor_id(); -+ atomic_notifier_chain_register(&panic_notifier_list, &kdb_block); -+ register_cpu_notifier(&kdb_cpu_nfb); -+ -+#ifdef kdba_setjmp -+ kdbjmpbuf = vmalloc(NR_CPUS * sizeof(*kdbjmpbuf)); -+ if (!kdbjmpbuf) -+ printk(KERN_ERR "Cannot allocate kdbjmpbuf, no kdb recovery will be possible\n"); -+#endif /* kdba_setjmp */ -+ -+ kdb_initial_cpu = -1; -+ kdb_wait_for_cpus_secs = 2*num_online_cpus(); -+ kdb_wait_for_cpus_secs = max(kdb_wait_for_cpus_secs, 10); -+} -+ -+#ifdef CONFIG_SYSCTL -+static int __init -+kdb_late_init(void) -+{ -+ register_sysctl_table(kdb_root_table); -+ /* seems that we cannot allocate with kmalloc until now */ -+ kdb_proc_filename(); -+ return 0; -+} -+ -+__initcall(kdb_late_init); -+#endif -+ -+EXPORT_SYMBOL(kdb_register); -+EXPORT_SYMBOL(kdb_register_repeat); -+EXPORT_SYMBOL(kdb_unregister); -+EXPORT_SYMBOL(kdb_getarea_size); -+EXPORT_SYMBOL(kdb_putarea_size); -+EXPORT_SYMBOL(kdb_getuserarea_size); -+EXPORT_SYMBOL(kdb_putuserarea_size); -+EXPORT_SYMBOL(kdbgetularg); -+EXPORT_SYMBOL(kdbgetenv); -+EXPORT_SYMBOL(kdbgetintenv); -+EXPORT_SYMBOL(kdbgetaddrarg); -+EXPORT_SYMBOL(kdb); -+EXPORT_SYMBOL(kdb_on); -+EXPORT_SYMBOL(kdb_seqno); -+EXPORT_SYMBOL(kdb_initial_cpu); -+EXPORT_SYMBOL(kdbnearsym); -+EXPORT_SYMBOL(kdb_printf); -+EXPORT_SYMBOL(kdb_symbol_print); -+EXPORT_SYMBOL(kdb_running_process); ---- /dev/null -+++ b/kdb/kdbsupport.c -@@ -0,0 +1,1155 @@ -+/* -+ * Kernel Debugger Architecture Independent Support Functions -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ * 03/02/13 added new 2.5 kallsyms -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+ -+/* -+ * Symbol table functions. -+ */ -+ -+/* -+ * kdbgetsymval -+ * -+ * Return the address of the given symbol. -+ * -+ * Parameters: -+ * symname Character string containing symbol name -+ * symtab Structure to receive results -+ * Outputs: -+ * Returns: -+ * 0 Symbol not found, symtab zero filled -+ * 1 Symbol mapped to module/symbol/section, data in symtab -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+int -+kdbgetsymval(const char *symname, kdb_symtab_t *symtab) -+{ -+ if (KDB_DEBUG(AR)) -+ kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname, symtab); -+ memset(symtab, 0, sizeof(*symtab)); -+ -+ if ((symtab->sym_start = kallsyms_lookup_name(symname))) { -+ if (KDB_DEBUG(AR)) -+ kdb_printf("kdbgetsymval: returns 1, symtab->sym_start=0x%lx\n", symtab->sym_start); -+ return 1; -+ } -+ if (KDB_DEBUG(AR)) -+ kdb_printf("kdbgetsymval: returns 0\n"); -+ return 0; -+} -+EXPORT_SYMBOL(kdbgetsymval); -+ -+/* -+ * kdbnearsym -+ * -+ * Return the name of the symbol with the nearest address -+ * less than 'addr'. -+ * -+ * Parameters: -+ * addr Address to check for symbol near -+ * symtab Structure to receive results -+ * Outputs: -+ * Returns: -+ * 0 No sections contain this address, symtab zero filled -+ * 1 Address mapped to module/symbol/section, data in symtab -+ * Locking: -+ * None. -+ * Remarks: -+ * 2.6 kallsyms has a "feature" where it unpacks the name into a string. -+ * If that string is reused before the caller expects it then the caller -+ * sees its string change without warning. To avoid cluttering up the -+ * main kdb code with lots of kdb_strdup, tests and kfree calls, kdbnearsym -+ * maintains an LRU list of the last few unique strings. The list is sized -+ * large enough to hold active strings, no kdb caller of kdbnearsym makes -+ * more than ~20 later calls before using a saved value. -+ */ -+ -+static char *kdb_name_table[100]; /* arbitrary size */ -+ -+int -+kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) -+{ -+ int ret = 0; -+ unsigned long symbolsize; -+ unsigned long offset; -+#define knt1_size 128 /* must be >= kallsyms table size */ -+ char *knt1 = NULL; -+ -+ if (KDB_DEBUG(AR)) -+ kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab); -+ memset(symtab, 0, sizeof(*symtab)); -+ -+ if (addr < 4096) -+ goto out; -+ knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC); -+ if (!knt1) { -+ kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n", addr); -+ goto out; -+ } -+ symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset, (char **)(&symtab->mod_name), knt1); -+ if (offset > 8*1024*1024) { -+ symtab->sym_name = NULL; -+ addr = offset = symbolsize = 0; -+ } -+ symtab->sym_start = addr - offset; -+ symtab->sym_end = symtab->sym_start + symbolsize; -+ ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0'; -+ -+ if (ret) { -+ int i; -+ /* Another 2.6 kallsyms "feature". Sometimes the sym_name is -+ * set but the buffer passed into kallsyms_lookup is not used, -+ * so it contains garbage. The caller has to work out which -+ * buffer needs to be saved. -+ * -+ * What was Rusty smoking when he wrote that code? -+ */ -+ if (symtab->sym_name != knt1) { -+ strncpy(knt1, symtab->sym_name, knt1_size); -+ knt1[knt1_size-1] = '\0'; -+ } -+ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { -+ if (kdb_name_table[i] && strcmp(kdb_name_table[i], knt1) == 0) -+ break; -+ } -+ if (i >= ARRAY_SIZE(kdb_name_table)) { -+ debug_kfree(kdb_name_table[0]); -+ memcpy(kdb_name_table, kdb_name_table+1, -+ sizeof(kdb_name_table[0])*(ARRAY_SIZE(kdb_name_table)-1)); -+ } else { -+ debug_kfree(knt1); -+ knt1 = kdb_name_table[i]; -+ memcpy(kdb_name_table+i, kdb_name_table+i+1, -+ sizeof(kdb_name_table[0])*(ARRAY_SIZE(kdb_name_table)-i-1)); -+ } -+ i = ARRAY_SIZE(kdb_name_table) - 1; -+ kdb_name_table[i] = knt1; -+ symtab->sym_name = kdb_name_table[i]; -+ knt1 = NULL; -+ } -+ -+ if (symtab->mod_name == NULL) -+ symtab->mod_name = "kernel"; -+ if (KDB_DEBUG(AR)) -+ kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret, symtab->sym_start, symtab->mod_name, symtab->sym_name, symtab->sym_name); -+ -+out: -+ debug_kfree(knt1); -+ return ret; -+} -+ -+void -+kdbnearsym_cleanup(void) -+{ -+ int i; -+ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { -+ if (kdb_name_table[i]) { -+ debug_kfree(kdb_name_table[i]); -+ kdb_name_table[i] = NULL; -+ } -+ } -+} -+ -+/* -+ * kallsyms_symbol_complete -+ * -+ * Parameters: -+ * prefix_name prefix of a symbol name to lookup -+ * max_len maximum length that can be returned -+ * Returns: -+ * Number of symbols which match the given prefix. -+ * Notes: -+ * prefix_name is changed to contain the longest unique prefix that -+ * starts with this prefix (tab completion). -+ */ -+ -+static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1]; -+ -+int kallsyms_symbol_complete(char *prefix_name, int max_len) -+{ -+ loff_t pos = 0; -+ int prefix_len = strlen(prefix_name), prev_len = 0; -+ int i, number = 0; -+ const char *name; -+ -+ while ((name = kdb_walk_kallsyms(&pos))) { -+ if (strncmp(name, prefix_name, prefix_len) == 0) { -+ strcpy(ks_namebuf, name); -+ /* Work out the longest name that matches the prefix */ -+ if (++number == 1) { -+ prev_len = min_t(int, max_len-1, strlen(ks_namebuf)); -+ memcpy(ks_namebuf_prev, ks_namebuf, prev_len); -+ ks_namebuf_prev[prev_len] = '\0'; -+ } else for (i = 0; i < prev_len; ++i) { -+ if (ks_namebuf[i] != ks_namebuf_prev[i]) { -+ prev_len = i; -+ ks_namebuf_prev[i] = '\0'; -+ break; -+ } -+ } -+ } -+ } -+ if (prev_len > prefix_len) -+ memcpy(prefix_name, ks_namebuf_prev, prev_len+1); -+ return number; -+} -+ -+/* -+ * kallsyms_symbol_next -+ * -+ * Parameters: -+ * prefix_name prefix of a symbol name to lookup -+ * flag 0 means search from the head, 1 means continue search. -+ * Returns: -+ * 1 if a symbol matches the given prefix. -+ * 0 if no string found -+ */ -+ -+int kallsyms_symbol_next(char *prefix_name, int flag) -+{ -+ int prefix_len = strlen(prefix_name); -+ static loff_t pos; -+ const char *name; -+ -+ if (!flag) -+ pos = 0; -+ -+ while ((name = kdb_walk_kallsyms(&pos))) { -+ if (strncmp(name, prefix_name, prefix_len) == 0) { -+ strncpy(prefix_name, name, strlen(name)+1); -+ return 1; -+ } -+ } -+ return 0; -+} -+ -+#if defined(CONFIG_SMP) -+/* -+ * kdb_ipi -+ * -+ * This function is called from the non-maskable interrupt -+ * handler to handle a kdb IPI instruction. -+ * -+ * Inputs: -+ * regs = Exception frame pointer -+ * Outputs: -+ * None. -+ * Returns: -+ * 0 - Did not handle NMI -+ * 1 - Handled NMI -+ * Locking: -+ * None. -+ * Remarks: -+ * Initially one processor is invoked in the kdb() code. That -+ * processor sends an ipi which drives this routine on the other -+ * processors. All this does is call kdb() with reason SWITCH. -+ * This puts all processors into the kdb() routine and all the -+ * code for breakpoints etc. is in one place. -+ * One problem with the way the kdb NMI is sent, the NMI has no -+ * identification that says it came from kdb. If the cpu's kdb state is -+ * marked as "waiting for kdb_ipi" then the NMI is treated as coming from -+ * kdb, otherwise it is assumed to be for another reason and is ignored. -+ */ -+ -+int -+kdb_ipi(struct pt_regs *regs, void (*ack_interrupt)(void)) -+{ -+ /* Do not print before checking and clearing WAIT_IPI, IPIs are -+ * going all the time. -+ */ -+ if (KDB_STATE(WAIT_IPI)) { -+ /* -+ * Stopping other processors via smp_kdb_stop(). -+ */ -+ if (ack_interrupt) -+ (*ack_interrupt)(); /* Acknowledge the interrupt */ -+ KDB_STATE_CLEAR(WAIT_IPI); -+ KDB_DEBUG_STATE("kdb_ipi 1", 0); -+ kdb(KDB_REASON_SWITCH, 0, regs); /* Spin in kdb() */ -+ KDB_DEBUG_STATE("kdb_ipi 2", 0); -+ return 1; -+ } -+ return 0; -+} -+#endif /* CONFIG_SMP */ -+ -+/* -+ * kdb_symbol_print -+ * -+ * Standard method for printing a symbol name and offset. -+ * Inputs: -+ * addr Address to be printed. -+ * symtab Address of symbol data, if NULL this routine does its -+ * own lookup. -+ * punc Punctuation for string, bit field. -+ * Outputs: -+ * None. -+ * Returns: -+ * Always 0. -+ * Locking: -+ * none. -+ * Remarks: -+ * The string and its punctuation is only printed if the address -+ * is inside the kernel, except that the value is always printed -+ * when requested. -+ */ -+ -+void -+kdb_symbol_print(kdb_machreg_t addr, const kdb_symtab_t *symtab_p, unsigned int punc) -+{ -+ kdb_symtab_t symtab, *symtab_p2; -+ if (symtab_p) { -+ symtab_p2 = (kdb_symtab_t *)symtab_p; -+ } -+ else { -+ symtab_p2 = &symtab; -+ kdbnearsym(addr, symtab_p2); -+ } -+ if (symtab_p2->sym_name || (punc & KDB_SP_VALUE)) { -+ ; /* drop through */ -+ } -+ else { -+ return; -+ } -+ if (punc & KDB_SP_SPACEB) { -+ kdb_printf(" "); -+ } -+ if (punc & KDB_SP_VALUE) { -+ kdb_printf(kdb_machreg_fmt0, addr); -+ } -+ if (symtab_p2->sym_name) { -+ if (punc & KDB_SP_VALUE) { -+ kdb_printf(" "); -+ } -+ if (punc & KDB_SP_PAREN) { -+ kdb_printf("("); -+ } -+ if (strcmp(symtab_p2->mod_name, "kernel")) { -+ kdb_printf("[%s]", symtab_p2->mod_name); -+ } -+ kdb_printf("%s", symtab_p2->sym_name); -+ if (addr != symtab_p2->sym_start) { -+ kdb_printf("+0x%lx", addr - symtab_p2->sym_start); -+ } -+ if (punc & KDB_SP_SYMSIZE) { -+ kdb_printf("/0x%lx", symtab_p2->sym_end - symtab_p2->sym_start); -+ } -+ if (punc & KDB_SP_PAREN) { -+ kdb_printf(")"); -+ } -+ } -+ if (punc & KDB_SP_SPACEA) { -+ kdb_printf(" "); -+ } -+ if (punc & KDB_SP_NEWLINE) { -+ kdb_printf("\n"); -+ } -+} -+ -+/* -+ * kdb_strdup -+ * -+ * kdb equivalent of strdup, for disasm code. -+ * Inputs: -+ * str The string to duplicate. -+ * type Flags to kmalloc for the new string. -+ * Outputs: -+ * None. -+ * Returns: -+ * Address of the new string, NULL if storage could not be allocated. -+ * Locking: -+ * none. -+ * Remarks: -+ * This is not in lib/string.c because it uses kmalloc which is not -+ * available when string.o is used in boot loaders. -+ */ -+ -+char *kdb_strdup(const char *str, gfp_t type) -+{ -+ int n = strlen(str)+1; -+ char *s = kmalloc(n, type); -+ if (!s) return NULL; -+ return strcpy(s, str); -+} -+ -+/* -+ * kdb_getarea_size -+ * -+ * Read an area of data. The kdb equivalent of copy_from_user, with -+ * kdb messages for invalid addresses. -+ * Inputs: -+ * res Pointer to the area to receive the result. -+ * addr Address of the area to copy. -+ * size Size of the area. -+ * Outputs: -+ * none. -+ * Returns: -+ * 0 for success, < 0 for error. -+ * Locking: -+ * none. -+ */ -+ -+int kdb_getarea_size(void *res, unsigned long addr, size_t size) -+{ -+ int ret = kdba_getarea_size(res, addr, size); -+ if (ret) { -+ if (!KDB_STATE(SUPPRESS)) { -+ kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr); -+ KDB_STATE_SET(SUPPRESS); -+ } -+ ret = KDB_BADADDR; -+ } -+ else { -+ KDB_STATE_CLEAR(SUPPRESS); -+ } -+ return(ret); -+} -+ -+/* -+ * kdb_putarea_size -+ * -+ * Write an area of data. The kdb equivalent of copy_to_user, with -+ * kdb messages for invalid addresses. -+ * Inputs: -+ * addr Address of the area to write to. -+ * res Pointer to the area holding the data. -+ * size Size of the area. -+ * Outputs: -+ * none. -+ * Returns: -+ * 0 for success, < 0 for error. -+ * Locking: -+ * none. -+ */ -+ -+int kdb_putarea_size(unsigned long addr, void *res, size_t size) -+{ -+ int ret = kdba_putarea_size(addr, res, size); -+ if (ret) { -+ if (!KDB_STATE(SUPPRESS)) { -+ kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr); -+ KDB_STATE_SET(SUPPRESS); -+ } -+ ret = KDB_BADADDR; -+ } -+ else { -+ KDB_STATE_CLEAR(SUPPRESS); -+ } -+ return(ret); -+} -+ -+/* -+ * kdb_getphys -+ * -+ * Read data from a physical address. Validate the address is in range, -+ * use kmap_atomic() to get data -+ * -+ * Similar to kdb_getarea() - but for phys addresses -+ * -+ * Inputs: -+ * res Pointer to the word to receive the result -+ * addr Physical address of the area to copy -+ * size Size of the area -+ * Outputs: -+ * none. -+ * Returns: -+ * 0 for success, < 0 for error. -+ * Locking: -+ * none. -+ */ -+static int kdb_getphys(void *res, unsigned long addr, size_t size) -+{ -+ unsigned long pfn; -+ void *vaddr; -+ struct page *page; -+ -+ pfn = (addr >> PAGE_SHIFT); -+ if (!pfn_valid(pfn)) -+ return 1; -+ page = pfn_to_page(pfn); -+ vaddr = kmap_atomic(page, KM_KDB); -+ memcpy(res, vaddr + (addr & (PAGE_SIZE -1)), size); -+ kunmap_atomic(vaddr, KM_KDB); -+ -+ return 0; -+} -+ -+/* -+ * kdb_getphysword -+ * -+ * Inputs: -+ * word Pointer to the word to receive the result. -+ * addr Address of the area to copy. -+ * size Size of the area. -+ * Outputs: -+ * none. -+ * Returns: -+ * 0 for success, < 0 for error. -+ * Locking: -+ * none. -+ */ -+int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) -+{ -+ int diag; -+ __u8 w1; -+ __u16 w2; -+ __u32 w4; -+ __u64 w8; -+ *word = 0; /* Default value if addr or size is invalid */ -+ -+ switch (size) { -+ case 1: -+ if (!(diag = kdb_getphys(&w1, addr, sizeof(w1)))) -+ *word = w1; -+ break; -+ case 2: -+ if (!(diag = kdb_getphys(&w2, addr, sizeof(w2)))) -+ *word = w2; -+ break; -+ case 4: -+ if (!(diag = kdb_getphys(&w4, addr, sizeof(w4)))) -+ *word = w4; -+ break; -+ case 8: -+ if (size <= sizeof(*word)) { -+ if (!(diag = kdb_getphys(&w8, addr, sizeof(w8)))) -+ *word = w8; -+ break; -+ } -+ /* drop through */ -+ default: -+ diag = KDB_BADWIDTH; -+ kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); -+ } -+ return(diag); -+} -+ -+/* -+ * kdb_getword -+ * -+ * Read a binary value. Unlike kdb_getarea, this treats data as numbers. -+ * Inputs: -+ * word Pointer to the word to receive the result. -+ * addr Address of the area to copy. -+ * size Size of the area. -+ * Outputs: -+ * none. -+ * Returns: -+ * 0 for success, < 0 for error. -+ * Locking: -+ * none. -+ */ -+ -+int kdb_getword(unsigned long *word, unsigned long addr, size_t size) -+{ -+ int diag; -+ __u8 w1; -+ __u16 w2; -+ __u32 w4; -+ __u64 w8; -+ *word = 0; /* Default value if addr or size is invalid */ -+ switch (size) { -+ case 1: -+ if (!(diag = kdb_getarea(w1, addr))) -+ *word = w1; -+ break; -+ case 2: -+ if (!(diag = kdb_getarea(w2, addr))) -+ *word = w2; -+ break; -+ case 4: -+ if (!(diag = kdb_getarea(w4, addr))) -+ *word = w4; -+ break; -+ case 8: -+ if (size <= sizeof(*word)) { -+ if (!(diag = kdb_getarea(w8, addr))) -+ *word = w8; -+ break; -+ } -+ /* drop through */ -+ default: -+ diag = KDB_BADWIDTH; -+ kdb_printf("kdb_getword: bad width %ld\n", (long) size); -+ } -+ return(diag); -+} -+ -+/* -+ * kdb_putword -+ * -+ * Write a binary value. Unlike kdb_putarea, this treats data as numbers. -+ * Inputs: -+ * addr Address of the area to write to.. -+ * word The value to set. -+ * size Size of the area. -+ * Outputs: -+ * none. -+ * Returns: -+ * 0 for success, < 0 for error. -+ * Locking: -+ * none. -+ */ -+ -+int kdb_putword(unsigned long addr, unsigned long word, size_t size) -+{ -+ int diag; -+ __u8 w1; -+ __u16 w2; -+ __u32 w4; -+ __u64 w8; -+ switch (size) { -+ case 1: -+ w1 = word; -+ diag = kdb_putarea(addr, w1); -+ break; -+ case 2: -+ w2 = word; -+ diag = kdb_putarea(addr, w2); -+ break; -+ case 4: -+ w4 = word; -+ diag = kdb_putarea(addr, w4); -+ break; -+ case 8: -+ if (size <= sizeof(word)) { -+ w8 = word; -+ diag = kdb_putarea(addr, w8); -+ break; -+ } -+ /* drop through */ -+ default: -+ diag = KDB_BADWIDTH; -+ kdb_printf("kdb_putword: bad width %ld\n", (long) size); -+ } -+ return(diag); -+} -+ -+/* -+ * kdb_task_state_string -+ * -+ * Convert a string containing any of the letters DRSTCZEUIMA to a mask -+ * for the process state field and return the value. If no argument is -+ * supplied, return the mask that corresponds to environment variable PS, -+ * DRSTCZEU by default. -+ * Inputs: -+ * s String to convert -+ * Outputs: -+ * none. -+ * Returns: -+ * Mask for process state. -+ * Locking: -+ * none. -+ * Notes: -+ * The mask folds data from several sources into a single long value, so -+ * be carefull not to overlap the bits. TASK_* bits are in the LSB, -+ * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there -+ * is no overlap between TASK_* and EXIT_* but that may not always be -+ * true, so EXIT_* bits are shifted left 16 bits before being stored in -+ * the mask. -+ */ -+ -+#define UNRUNNABLE (1UL << (8*sizeof(unsigned long) - 1)) /* unrunnable is < 0 */ -+#define RUNNING (1UL << (8*sizeof(unsigned long) - 2)) -+#define IDLE (1UL << (8*sizeof(unsigned long) - 3)) -+#define DAEMON (1UL << (8*sizeof(unsigned long) - 4)) -+ -+unsigned long -+kdb_task_state_string(const char *s) -+{ -+ long res = 0; -+ if (!s && !(s = kdbgetenv("PS"))) { -+ s = "DRSTCZEU"; /* default value for ps */ -+ } -+ while (*s) { -+ switch (*s) { -+ case 'D': res |= TASK_UNINTERRUPTIBLE; break; -+ case 'R': res |= RUNNING; break; -+ case 'S': res |= TASK_INTERRUPTIBLE; break; -+ case 'T': res |= TASK_STOPPED; break; -+ case 'C': res |= TASK_TRACED; break; -+ case 'Z': res |= EXIT_ZOMBIE << 16; break; -+ case 'E': res |= EXIT_DEAD << 16; break; -+ case 'U': res |= UNRUNNABLE; break; -+ case 'I': res |= IDLE; break; -+ case 'M': res |= DAEMON; break; -+ case 'A': res = ~0UL; break; -+ default: -+ kdb_printf("%s: unknown flag '%c' ignored\n", __FUNCTION__, *s); -+ break; -+ } -+ ++s; -+ } -+ return res; -+} -+ -+/* -+ * kdb_task_state_char -+ * -+ * Return the character that represents the task state. -+ * Inputs: -+ * p struct task for the process -+ * Outputs: -+ * none. -+ * Returns: -+ * One character to represent the task state. -+ * Locking: -+ * none. -+ */ -+ -+char -+kdb_task_state_char (const struct task_struct *p) -+{ -+ int cpu = kdb_process_cpu(p); -+ struct kdb_running_process *krp = kdb_running_process + cpu; -+ char state = (p->state == 0) ? 'R' : -+ (p->state < 0) ? 'U' : -+ (p->state & TASK_UNINTERRUPTIBLE) ? 'D' : -+ (p->state & TASK_STOPPED) ? 'T' : -+ (p->state & TASK_TRACED) ? 'C' : -+ (p->exit_state & EXIT_ZOMBIE) ? 'Z' : -+ (p->exit_state & EXIT_DEAD) ? 'E' : -+ (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; -+ if (p->pid == 0) { -+ /* Idle task. Is it really idle, apart from the kdb interrupt? */ -+ if (!kdb_task_has_cpu(p) || krp->irq_depth == 1) { -+ /* There is a corner case when the idle task takes an -+ * interrupt and dies in the interrupt code. It has an -+ * interrupt count of 1 but that did not come from kdb. -+ * This corner case can only occur on the initial cpu, -+ * all the others were entered via the kdb IPI. -+ */ -+ if (cpu != kdb_initial_cpu || KDB_STATE_CPU(KEYBOARD, cpu)) -+ state = 'I'; /* idle task */ -+ } -+ } -+ else if (!p->mm && state == 'S') { -+ state = 'M'; /* sleeping system daemon */ -+ } -+ return state; -+} -+ -+/* -+ * kdb_task_state -+ * -+ * Return true if a process has the desired state given by the mask. -+ * Inputs: -+ * p struct task for the process -+ * mask mask from kdb_task_state_string to select processes -+ * Outputs: -+ * none. -+ * Returns: -+ * True if the process matches at least one criteria defined by the mask. -+ * Locking: -+ * none. -+ */ -+ -+unsigned long -+kdb_task_state(const struct task_struct *p, unsigned long mask) -+{ -+ char state[] = { kdb_task_state_char(p), '\0' }; -+ return (mask & kdb_task_state_string(state)) != 0; -+} -+ -+struct kdb_running_process kdb_running_process[NR_CPUS]; -+ -+/* Save the state of a running process and invoke kdb_main_loop. This is -+ * invoked on the current process on each cpu (assuming the cpu is responding). -+ */ -+ -+int -+kdb_save_running(struct pt_regs *regs, kdb_reason_t reason, -+ kdb_reason_t reason2, int error, kdb_dbtrap_t db_result) -+{ -+ struct kdb_running_process *krp = kdb_running_process + smp_processor_id(); -+ krp->p = current; -+ krp->regs = regs; -+ krp->seqno = kdb_seqno; -+ krp->irq_depth = hardirq_count() >> HARDIRQ_SHIFT; -+ kdba_save_running(&(krp->arch), regs); -+ return kdb_main_loop(reason, reason2, error, db_result, regs); -+} -+ -+/* -+ * kdb_unsave_running -+ * -+ * Reverse the effect of kdb_save_running. -+ * Inputs: -+ * regs struct pt_regs for the process -+ * Outputs: -+ * Updates kdb_running_process[] for this cpu. -+ * Returns: -+ * none. -+ * Locking: -+ * none. -+ */ -+ -+void -+kdb_unsave_running(struct pt_regs *regs) -+{ -+ struct kdb_running_process *krp = kdb_running_process + smp_processor_id(); -+ kdba_unsave_running(&(krp->arch), regs); -+ krp->seqno = 0; -+} -+ -+ -+/* -+ * kdb_print_nameval -+ * -+ * Print a name and its value, converting the value to a symbol lookup -+ * if possible. -+ * Inputs: -+ * name field name to print -+ * val value of field -+ * Outputs: -+ * none. -+ * Returns: -+ * none. -+ * Locking: -+ * none. -+ */ -+ -+void -+kdb_print_nameval(const char *name, unsigned long val) -+{ -+ kdb_symtab_t symtab; -+ kdb_printf(" %-11.11s ", name); -+ if (kdbnearsym(val, &symtab)) -+ kdb_symbol_print(val, &symtab, KDB_SP_VALUE|KDB_SP_SYMSIZE|KDB_SP_NEWLINE); -+ else -+ kdb_printf("0x%lx\n", val); -+} -+ -+static struct page * kdb_get_one_user_page(const struct task_struct *tsk, unsigned long start, -+ int len, int write) -+{ -+ struct mm_struct *mm = tsk->mm; -+ unsigned int flags; -+ struct vm_area_struct * vma; -+ -+ /* shouldn't cross a page boundary. */ -+ if ((start & PAGE_MASK) != ((start+len) & PAGE_MASK)) -+ return NULL; -+ -+ /* we need to align start address to the current page boundy, PAGE_ALIGN -+ * aligns to next page boundry. -+ * FIXME: What about hugetlb? -+ */ -+ start = start & PAGE_MASK; -+ flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); -+ -+ vma = find_extend_vma(mm, start); -+ -+ /* may be we can allow access to VM_IO pages inside KDB? */ -+ if (!vma || (vma->vm_flags & VM_IO) || !(flags & vma->vm_flags)) -+ return NULL; -+ -+ return follow_page(vma, start, write ? FOLL_WRITE : 0); -+} -+ -+int kdb_getuserarea_size(void *to, unsigned long from, size_t size) -+{ -+ struct page *page; -+ void *vaddr; -+ -+ page = kdb_get_one_user_page(kdb_current_task, from, size, 0); -+ if (!page) -+ return size; -+ -+ vaddr = kmap_atomic(page, KM_KDB); -+ memcpy(to, vaddr+ (from & (PAGE_SIZE - 1)), size); -+ kunmap_atomic(vaddr, KM_KDB); -+ -+ return 0; -+} -+ -+int kdb_putuserarea_size(unsigned long to, void *from, size_t size) -+{ -+ struct page *page; -+ void *vaddr; -+ -+ page = kdb_get_one_user_page(kdb_current_task, to, size, 1); -+ if (!page) -+ return size; -+ -+ vaddr = kmap_atomic(page, KM_KDB); -+ memcpy(vaddr+ (to & (PAGE_SIZE - 1)), from, size); -+ kunmap_atomic(vaddr, KM_KDB); -+ -+ return 0; -+} -+ -+/* Last ditch allocator for debugging, so we can still debug even when the -+ * GFP_ATOMIC pool has been exhausted. The algorithms are tuned for space -+ * usage, not for speed. One smallish memory pool, the free chain is always in -+ * ascending address order to allow coalescing, allocations are done in brute -+ * force best fit. -+ */ -+ -+struct debug_alloc_header { -+ u32 next; /* offset of next header from start of pool */ -+ u32 size; -+ void *caller; -+}; -+ -+/* The memory returned by this allocator must be aligned, which means so must -+ * the header size. Do not assume that sizeof(struct debug_alloc_header) is a -+ * multiple of the alignment, explicitly calculate the overhead of this header, -+ * including the alignment. The rest of this code must not use sizeof() on any -+ * header or pointer to a header. -+ */ -+#define dah_align 8 -+#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align) -+ -+static u64 debug_alloc_pool_aligned[256*1024/dah_align]; /* 256K pool */ -+static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned; -+static u32 dah_first, dah_first_call = 1, dah_used = 0, dah_used_max = 0; -+ -+/* Locking is awkward. The debug code is called from all contexts, including -+ * non maskable interrupts. A normal spinlock is not safe in NMI context. Try -+ * to get the debug allocator lock, if it cannot be obtained after a second -+ * then give up. If the lock could not be previously obtained on this cpu then -+ * only try once. -+ * -+ * sparse has no annotation for "this function _sometimes_ acquires a lock", so -+ * fudge the acquire/release notation. -+ */ -+static DEFINE_SPINLOCK(dap_lock); -+static int -+get_dap_lock(void) -+ __acquires(dap_lock) -+{ -+ static int dap_locked = -1; -+ int count; -+ if (dap_locked == smp_processor_id()) -+ count = 1; -+ else -+ count = 1000; -+ while (1) { -+ if (spin_trylock(&dap_lock)) { -+ dap_locked = -1; -+ return 1; -+ } -+ if (!count--) -+ break; -+ udelay(1000); -+ } -+ dap_locked = smp_processor_id(); -+ __acquire(dap_lock); -+ return 0; -+} -+ -+void -+*debug_kmalloc(size_t size, gfp_t flags) -+{ -+ unsigned int rem, h_offset; -+ struct debug_alloc_header *best, *bestprev, *prev, *h; -+ void *p = NULL; -+ if (!get_dap_lock()) { -+ __release(dap_lock); /* we never actually got it */ -+ return NULL; -+ } -+ h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); -+ if (dah_first_call) { -+ h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead; -+ dah_first_call = 0; -+ } -+ size = ALIGN(size, dah_align); -+ prev = best = bestprev = NULL; -+ while (1) { -+ if (h->size >= size && (!best || h->size < best->size)) { -+ best = h; -+ bestprev = prev; -+ if (h->size == size) -+ break; -+ } -+ if (!h->next) -+ break; -+ prev = h; -+ h = (struct debug_alloc_header *)(debug_alloc_pool + h->next); -+ } -+ if (!best) -+ goto out; -+ rem = best->size - size; -+ /* The pool must always contain at least one header */ -+ if (best->next == 0 && bestprev == NULL && rem < dah_overhead) -+ goto out; -+ if (rem >= dah_overhead) { -+ best->size = size; -+ h_offset = ((char *)best - debug_alloc_pool) + -+ dah_overhead + best->size; -+ h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset); -+ h->size = rem - dah_overhead; -+ h->next = best->next; -+ } else -+ h_offset = best->next; -+ best->caller = __builtin_return_address(0); -+ dah_used += best->size; -+ dah_used_max = max(dah_used, dah_used_max); -+ if (bestprev) -+ bestprev->next = h_offset; -+ else -+ dah_first = h_offset; -+ p = (char *)best + dah_overhead; -+ memset(p, POISON_INUSE, best->size - 1); -+ *((char *)p + best->size - 1) = POISON_END; -+out: -+ spin_unlock(&dap_lock); -+ return p; -+} -+ -+void -+debug_kfree(void *p) -+{ -+ struct debug_alloc_header *h; -+ unsigned int h_offset; -+ if (!p) -+ return; -+ if ((char *)p < debug_alloc_pool || -+ (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) { -+ kfree(p); -+ return; -+ } -+ if (!get_dap_lock()) { -+ __release(dap_lock); /* we never actually got it */ -+ return; /* memory leak, cannot be helped */ -+ } -+ h = (struct debug_alloc_header *)((char *)p - dah_overhead); -+ memset(p, POISON_FREE, h->size - 1); -+ *((char *)p + h->size - 1) = POISON_END; -+ h->caller = NULL; -+ dah_used -= h->size; -+ h_offset = (char *)h - debug_alloc_pool; -+ if (h_offset < dah_first) { -+ h->next = dah_first; -+ dah_first = h_offset; -+ } else { -+ struct debug_alloc_header *prev; -+ unsigned int prev_offset; -+ prev = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); -+ while (1) { -+ if (!prev->next || prev->next > h_offset) -+ break; -+ prev = (struct debug_alloc_header *) -+ (debug_alloc_pool + prev->next); -+ } -+ prev_offset = (char *)prev - debug_alloc_pool; -+ if (prev_offset + dah_overhead + prev->size == h_offset) { -+ prev->size += dah_overhead + h->size; -+ memset(h, POISON_FREE, dah_overhead - 1); -+ *((char *)h + dah_overhead - 1) = POISON_END; -+ h = prev; -+ h_offset = prev_offset; -+ } else { -+ h->next = prev->next; -+ prev->next = h_offset; -+ } -+ } -+ if (h_offset + dah_overhead + h->size == h->next) { -+ struct debug_alloc_header *next; -+ next = (struct debug_alloc_header *) -+ (debug_alloc_pool + h->next); -+ h->size += dah_overhead + next->size; -+ h->next = next->next; -+ memset(next, POISON_FREE, dah_overhead - 1); -+ *((char *)next + dah_overhead - 1) = POISON_END; -+ } -+ spin_unlock(&dap_lock); -+} -+ -+void -+debug_kusage(void) -+{ -+ struct debug_alloc_header *h_free, *h_used; -+#ifdef CONFIG_IA64 -+ /* FIXME: using dah for ia64 unwind always results in a memory leak. -+ * Fix that memory leak first, then set debug_kusage_one_time = 1 for -+ * all architectures. -+ */ -+ static int debug_kusage_one_time = 0; -+#else -+ static int debug_kusage_one_time = 1; -+#endif -+ if (!get_dap_lock()) { -+ __release(dap_lock); /* we never actually got it */ -+ return; -+ } -+ h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); -+ if (dah_first == 0 && -+ (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead || -+ dah_first_call)) -+ goto out; -+ if (!debug_kusage_one_time) -+ goto out; -+ debug_kusage_one_time = 0; -+ kdb_printf("%s: debug_kmalloc memory leak dah_first %d\n", -+ __FUNCTION__, dah_first); -+ if (dah_first) { -+ h_used = (struct debug_alloc_header *)debug_alloc_pool; -+ kdb_printf("%s: h_used %p size %d\n", __FUNCTION__, h_used, h_used->size); -+ } -+ do { -+ h_used = (struct debug_alloc_header *) -+ ((char *)h_free + dah_overhead + h_free->size); -+ kdb_printf("%s: h_used %p size %d caller %p\n", -+ __FUNCTION__, h_used, h_used->size, h_used->caller); -+ h_free = (struct debug_alloc_header *) -+ (debug_alloc_pool + h_free->next); -+ } while (h_free->next); -+ h_used = (struct debug_alloc_header *) -+ ((char *)h_free + dah_overhead + h_free->size); -+ if ((char *)h_used - debug_alloc_pool != -+ sizeof(debug_alloc_pool_aligned)) -+ kdb_printf("%s: h_used %p size %d caller %p\n", -+ __FUNCTION__, h_used, h_used->size, h_used->caller); -+out: -+ spin_unlock(&dap_lock); -+} -+ -+/* Maintain a small stack of kdb_flags to allow recursion without disturbing -+ * the global kdb state. -+ */ -+ -+static int kdb_flags_stack[4], kdb_flags_index; -+ -+void -+kdb_save_flags(void) -+{ -+ BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack)); -+ kdb_flags_stack[kdb_flags_index++] = kdb_flags; -+} -+ -+void -+kdb_restore_flags(void) -+{ -+ BUG_ON(kdb_flags_index <= 0); -+ kdb_flags = kdb_flags_stack[--kdb_flags_index]; -+} ---- /dev/null -+++ b/kdb/modules/Makefile -@@ -0,0 +1,14 @@ -+# -+# This file is subject to the terms and conditions of the GNU General Public -+# License. See the file "COPYING" in the main directory of this archive -+# for more details. -+# -+# Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. -+# -+ -+obj-$(CONFIG_KDB_MODULES) += kdbm_pg.o kdbm_task.o kdbm_vm.o kdbm_sched.o -+obj-m += kdbm_debugtypes.o -+ifdef CONFIG_X86 -+obj-$(CONFIG_KDB_MODULES) += kdbm_x86.o -+endif -+CFLAGS_kdbm_vm.o += -I $(srctree)/drivers/scsi ---- /dev/null -+++ b/kdb/modules/kdbm_debugtypes.c -@@ -0,0 +1,388 @@ -+/* this one has some additional address validation - untested */ -+/* -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+/* -+ * -+ * Most of this code is borrowed and adapted from the lkcd command "lcrash" -+ * and its supporting libarary. -+ * -+ * This module provides kdb commands for casting memory structures. -+ * It loads symbolic debugging info (provided from lcrash -o), and provides -+ * "print" "px", "pd" -+ * (this information originally comes from the lcrash "kerntypes" file) -+ * -+ * A key here is tacking a file of debug info onto this module, for -+ * load with it at insmod time. -+ * -+ * Careful of porting the klib KL_XXX functions (they call thru a jump table -+ * that we don't use here) -+ * -+ * Usage: -+ * in order for the insmod kdbm_debugtypes.ko to succeed in loading types -+ * you must first use lcrash -t kerntypes.xxxx -o debug_info -+ * and echo debug_info > /proc/kdb/debug_info_name -+ */ -+ -+#define VMALLOC_START_IA64 0xa000000200000000 -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "lcrash/klib.h" -+#include "lcrash/kl_stringtab.h" -+#include "lcrash/kl_btnode.h" -+#include "lcrash/lc_eval.h" -+ -+MODULE_AUTHOR("SGI"); -+MODULE_DESCRIPTION("Load symbolic debugging information"); -+MODULE_LICENSE("GPL"); -+ -+#undef next_node /* collision with nodemask.h */ -+static char *stringstorage, **stringp_array; -+static void *filestorage; -+static long num_strings, num_kltypes, num_dsyms, stringstorage_size; -+extern int have_debug_file; -+extern dbg_sym_t *types_tree_head; -+extern dbg_sym_t *typedefs_tree_head; -+extern kltype_t *kltype_array; -+extern dbg_sym_t *dsym_types_array; -+extern dbg_sym_t *type_tree; -+extern dbg_sym_t *typedef_tree; -+ -+/* -+ * use a pointer's value as an index in the stringp_array (num_strings) and -+ * translate it to string address -+ * -+ * Return 0 for success, 1 for failure -+ */ -+static int -+index_to_char_ptr(char **ptrp) -+{ -+ long i; -+ -+ i = (long)*ptrp; -+ /* we use a value of -1 to mean this was a null pointer */ -+ if (i == -1) { -+ *ptrp = NULL; -+ return 0; -+ } -+ if (i > num_strings-1) { -+ printk("Could not translate character string index %#lx\n", i); -+ return 1; -+ } -+ *ptrp = *(stringp_array+i); -+ return 0; -+} -+ -+/* -+ * use a pointer's value as an index in the kltype_array (num_kltypes) and -+ * translate it to the kltype_t address -+ * -+ * return 0 for success, 1 for failure -+ */ -+static int -+index_to_kltype_ptr(kltype_t **ptrp) -+{ -+ long i; -+ -+ i = (long)*ptrp; -+ /* we use a value of -1 to mean this was a null pointer */ -+ if (i == -1) { -+ *ptrp = NULL; -+ return 0; -+ } -+ if (i > num_kltypes-1) { -+ printk("Could not translate kl_type string index %#lx\n", i); -+ return 1; -+ } -+ *ptrp = kltype_array+i; -+ return 0; -+} -+ -+/* -+ * look up a pointer in the dsym_types_array (num_dsyms) and -+ * translate it to the index in the array -+ * -+ * return 0 for success, 1 for failure -+ */ -+static int -+index_to_dbg_ptr(dbg_sym_t **ptrp) -+{ -+ long i; -+ -+ i = (long)*ptrp; -+ /* we use a value of -1 to mean this was a null pointer */ -+ if (i == -1) { -+ *ptrp = NULL; -+ return 0; -+ } -+ if (i > num_dsyms-1) { -+ printk("Could not translate dbg_sym_t index %#lx\n", i); -+ return 1; -+ } -+ *ptrp = dsym_types_array+i; -+ return 0; -+} -+ -+ -+/* -+ * Work on the image of the file built by lcrash. -+ * Unpack the strings, and resolve the pointers in the arrays of kltype_t's -+ * and dbg_sym_t's to pointers. -+ * -+ * see lcrash's lib/libklib/kl_debug.c, which generates this file -+ * -+ * Return the pointers to the heads of the two binary trees by means of -+ * pointer arguments. -+ * -+ * Return 0 for sucess, 1 for any error. -+ */ -+static int -+trans_file_image(void *file_storage, long file_size, dbg_sym_t **type_treepp, -+ dbg_sym_t **typedef_treepp) -+{ -+ int len; -+ long i, section_size, *lp, element_size; -+ long head_types_tree, head_typedefs_tree; -+ char *ptr, *stringsection, *kltypesection, *dbgsection; -+ void *kltypestorage, *dbgstorage; -+ kltype_t *klp; -+ dbg_sym_t *dbgp; -+ -+ /* 1) the strings */ -+ lp = (long *)file_storage; -+ stringsection = (char *)lp; -+ section_size = *lp++; -+ num_strings = *lp++; -+ lp++; /* element size does not apply the strings section */ -+ -+ stringstorage_size = section_size - (3*sizeof(long)); -+ stringstorage = (char *)lp; -+ -+ stringp_array = (char **)vmalloc(num_strings * sizeof(char *)); -+ if (! stringp_array) { -+ printk("vmalloc of %ld string pointers failed\n", num_strings); -+ return 1; -+ } -+ ptr = stringstorage; -+ for (i=0; ikl_name)) -+ goto bad; -+ if (index_to_char_ptr(&klp->kl_typestr)) -+ goto bad; -+ if (index_to_kltype_ptr(&klp->kl_member)) -+ goto bad; -+ if (index_to_kltype_ptr(&klp->kl_next)) -+ goto bad; -+ if (index_to_kltype_ptr(&klp->kl_realtype)) -+ goto bad; -+ if (index_to_kltype_ptr(&klp->kl_indextype)) -+ goto bad; -+ if (index_to_kltype_ptr(&klp->kl_elementtype)) -+ goto bad; -+ if (index_to_dbg_ptr((dbg_sym_t **)&klp->kl_ptr)) -+ goto bad; -+ } -+ -+ /* translate the indices in our our array of dbg_sym_t's to pointers */ -+ /* (see write_dbgtype() for the fields that can be translated) */ -+ dbgp = dsym_types_array; -+ for (i=0; isym_bt.bt_key)) -+ goto bad; -+ if (index_to_dbg_ptr((dbg_sym_t **)&dbgp->sym_bt.bt_left)) -+ goto bad; -+ if (index_to_dbg_ptr((dbg_sym_t **)&dbgp->sym_bt.bt_right)) -+ goto bad; -+ if (index_to_dbg_ptr((dbg_sym_t **)&dbgp->sym_bt.bt_parent)) -+ goto bad; -+ if (index_to_dbg_ptr((dbg_sym_t **)&dbgp->sym_next)) -+ goto bad; -+ if (index_to_dbg_ptr((dbg_sym_t **)&dbgp->sym_link)) -+ goto bad; -+ if (index_to_kltype_ptr(&dbgp->sym_kltype)) -+ goto bad; -+ } -+ -+ vfree(stringp_array); -+ return 0; -+bad: -+ printk("trans_file_image() returning an error\n"); -+ vfree(stringp_array); -+ return 1; -+} -+ -+/* there is /proc interface to this string */ -+extern char kdb_debug_info_filename[]; -+/* -+ * This is the module initialization function. -+ */ -+static int __init -+kdbm_debuginfo_init(void) -+{ -+ int len; -+ long ret, file_size; -+ ssize_t sizeread; -+ mm_segment_t fs; -+ struct file *file; -+ loff_t inode_size, pos; -+ -+ len = strlen(kdb_debug_info_filename); -+ if (!len) { -+ printk("kdb: no file name in /proc/kdb/debug_info_name\n"); -+ return -ENODEV; -+ } -+ -+ fs = get_fs(); /* save previous value of address limits */ -+ set_fs (get_ds()); /* use kernel limit */ -+ -+ file = filp_open(kdb_debug_info_filename, O_RDONLY, 0); -+ if (IS_ERR(file)) { -+ set_fs(fs); -+ printk ( -+ "kdb: open of %s (from /proc/kdb/debug_info_name) failed\n", -+ kdb_debug_info_filename); -+ return -ENODEV; -+ } -+ if (!file->f_op || (!file->f_op->read && !file->f_op->llseek)) { -+ printk ("file has no operation for read or seek\n"); -+ set_fs(fs); -+ return -ENODEV; -+ } -+ inode_size = file->f_dentry->d_inode->i_size; -+ -+ /* -+ * File has a header word on it that contains the size of the -+ * file. We don't need it, but can use it as a sanity check. -+ */ -+ pos = 0; -+ sizeread = file->f_op->read(file, (char *)&file_size, -+ sizeof(file_size), &pos); -+ if (sizeread != sizeof(file_size)) { -+ printk("could not read %d bytes from %s\n", -+ (int)sizeof(file_size), kdb_debug_info_filename); -+ ret = filp_close(file, NULL); -+ set_fs(fs); -+ return -ENODEV; -+ } -+ if (inode_size != file_size) { -+ printk("file says %ld, inode says %lld\n", -+ file_size, inode_size); -+ ret = filp_close(file, NULL); -+ set_fs(fs); -+ return -ENODEV; -+ } -+ -+ /* space for the rest of the file: */ -+ file_size -= sizeof(long); -+ filestorage = (void *)vmalloc(file_size); -+ -+ pos = sizeof(file_size); /* position after the header word */ -+ sizeread = file->f_op->read(file, (char *)filestorage, -+ file_size, &pos); -+ if (sizeread != file_size) { -+ printk("could not read %ld bytes from %s\n", -+ file_size, kdb_debug_info_filename); -+ ret = filp_close(file, NULL); -+ set_fs(fs); -+ vfree (filestorage); -+ return -ENODEV; -+ } -+ -+ ret = filp_close(file, NULL); -+ set_fs(fs); /* restore address limits before returning to user space */ -+ -+ if (trans_file_image(filestorage, file_size, &types_tree_head, -+ &typedefs_tree_head)){ -+ vfree (filestorage); -+ return -ENODEV; -+ } -+ printk("kdbm_debuginfo loaded %s\n", kdb_debug_info_filename); -+ /* set the lcrash code's binary tree head nodes */ -+ type_tree = types_tree_head; -+ typedef_tree = typedefs_tree_head; -+ -+ have_debug_file = 1; -+ -+ return 0; -+} -+ -+/* -+ * This is the module exit function. -+ */ -+static void __exit -+kdbm_debuginfo_exit(void) -+{ -+ printk("kdbm_debuginfo unloaded %s\n", kdb_debug_info_filename); -+ vfree (filestorage); -+ have_debug_file = 0; -+ return; -+} -+ -+module_init(kdbm_debuginfo_init); -+module_exit(kdbm_debuginfo_exit); ---- /dev/null -+++ b/kdb/modules/kdbm_pg.c -@@ -0,0 +1,684 @@ -+/* -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_AUTHOR("SGI"); -+MODULE_DESCRIPTION("Debug page information"); -+MODULE_LICENSE("GPL"); -+ -+/* Standard Linux page stuff */ -+ -+#if !defined(CONFIG_DISCONTIGMEM) && !defined(CONFIG_NUMA) -+/* From include/linux/page-flags.h */ -+static char *pg_flag_vals[] = { -+ "PG_locked", "PG_error", "PG_referenced", "PG_uptodate", -+ "PG_dirty", "PG_lru", "PG_active", "PG_slab", -+ "PG_owner_priv_1", "PG_arch_1", "PG_reserved", "PG_private", -+ "PG_writeback", -+#ifdef CONFIG_PAGEFLAGS_EXTENDED -+ "PG_head", "PG_tail", -+#else -+ "PG_compound", -+#endif -+ "PG_swapcache", "PG_mappedtodisk", "PG_reclaim", "PG_buddy", -+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR -+ "PG_uncached", -+#endif -+ NULL }; -+#endif -+ -+/* From include/linux/buffer_head.h */ -+static char *bh_state_vals[] = { -+ "Uptodate", "Dirty", "Lock", "Req", -+ "Uptodate_Lock", "Mapped", "New", "Async_read", -+ "Async_write", "Delay", "Boundary", "Write_EIO", -+ "Ordered", "Eopnotsupp", "Unwritten", "PriavateStart", -+ NULL }; -+ -+/* From include/linux/bio.h */ -+static char *bio_flag_vals[] = { -+ "Uptodate", "RW_block", "EOF", "Seg_valid", -+ "Cloned", "Bounced", "User_mapped", "Eopnotsupp", -+ NULL }; -+ -+/* From include/linux/fs.h */ -+static char *inode_flag_vals[] = { -+ "I_DIRTY_SYNC", "I_DIRTY_DATASYNC", "I_DIRTY_PAGES", "I_NEW", -+ "I_WILL_FREE", "I_FREEING", "I_CLEAR", "I_LOCK", -+ "I_SYNC", NULL }; -+ -+static char *map_flags(unsigned long flags, char *mapping[]) -+{ -+ static char buffer[256]; -+ int index; -+ int offset = 12; -+ -+ buffer[0] = '\0'; -+ -+ for (index = 0; flags && mapping[index]; flags >>= 1, index++) { -+ if (flags & 1) { -+ if ((offset + strlen(mapping[index]) + 1) >= 80) { -+ strcat(buffer, "\n "); -+ offset = 12; -+ } else if (offset > 12) { -+ strcat(buffer, " "); -+ offset++; -+ } -+ strcat(buffer, mapping[index]); -+ offset += strlen(mapping[index]); -+ } -+ } -+ -+ return (buffer); -+} -+ -+static int -+kdbm_buffers(int argc, const char **argv) -+{ -+ struct buffer_head bh; -+ unsigned long addr; -+ long offset = 0; -+ int nextarg; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) || -+ (diag = kdb_getarea(bh, addr))) -+ return(diag); -+ -+ kdb_printf("buffer_head at 0x%lx\n", addr); -+ kdb_printf(" bno %llu size %llu dev 0x%x\n", -+ (unsigned long long)bh.b_blocknr, -+ (unsigned long long)bh.b_size, -+ bh.b_bdev ? bh.b_bdev->bd_dev : 0); -+ kdb_printf(" count %d state 0x%lx [%s]\n", -+ bh.b_count.counter, bh.b_state, -+ map_flags(bh.b_state, bh_state_vals)); -+ kdb_printf(" b_data 0x%p\n", -+ bh.b_data); -+ kdb_printf(" b_page 0x%p b_this_page 0x%p b_private 0x%p\n", -+ bh.b_page, bh.b_this_page, bh.b_private); -+ kdb_printf(" b_end_io "); -+ if (bh.b_end_io) -+ kdb_symbol_print(kdba_funcptr_value(bh.b_end_io), NULL, KDB_SP_VALUE); -+ else -+ kdb_printf("(NULL)"); -+ kdb_printf("\n"); -+ -+ return 0; -+} -+ -+static int -+print_biovec(struct bio_vec *vec, int vcount) -+{ -+ struct bio_vec bvec; -+ unsigned long addr; -+ int diag; -+ int i; -+ -+ if (vcount < 1 || vcount > BIO_MAX_PAGES) { -+ kdb_printf(" [skipped iovecs, vcnt is %d]\n", vcount); -+ return 0; -+ } -+ -+ addr = (unsigned long)vec; -+ for (i = 0; i < vcount; i++) { -+ if ((diag = kdb_getarea(bvec, addr))) -+ return(diag); -+ addr += sizeof(bvec); -+ kdb_printf(" [%d] page 0x%p length=%u offset=%u\n", -+ i, bvec.bv_page, bvec.bv_len, bvec.bv_offset); -+ } -+ return 0; -+} -+ -+static int -+kdbm_bio(int argc, const char **argv) -+{ -+ struct bio bio; -+ unsigned long addr; -+ long offset = 0; -+ int nextarg; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) || -+ (diag = kdb_getarea(bio, addr))) -+ return(diag); -+ -+ kdb_printf("bio at 0x%lx\n", addr); -+ kdb_printf(" bno %llu next 0x%p dev 0x%x\n", -+ (unsigned long long)bio.bi_sector, -+ bio.bi_next, bio.bi_bdev ? bio.bi_bdev->bd_dev : 0); -+ kdb_printf(" vcnt %u vec 0x%p rw 0x%lx flags 0x%lx [%s]\n", -+ bio.bi_vcnt, bio.bi_io_vec, bio.bi_rw, bio.bi_flags, -+ map_flags(bio.bi_flags, bio_flag_vals)); -+ print_biovec(bio.bi_io_vec, bio.bi_vcnt); -+ kdb_printf(" count %d private 0x%p\n", -+ atomic_read(&bio.bi_cnt), bio.bi_private); -+ kdb_printf(" bi_end_io "); -+ if (bio.bi_end_io) -+ kdb_symbol_print(kdba_funcptr_value(bio.bi_end_io), NULL, KDB_SP_VALUE); -+ else -+ kdb_printf("(NULL)"); -+ kdb_printf("\n"); -+ -+ return 0; -+} -+ -+#if !defined(CONFIG_DISCONTIGMEM) && !defined(CONFIG_NUMA) -+static char *page_flags(unsigned long flags) -+{ -+ return(map_flags(flags, pg_flag_vals)); -+} -+ -+static int -+kdbm_page(int argc, const char **argv) -+{ -+ struct page page; -+ unsigned long addr; -+ long offset = 0; -+ int nextarg; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ -+#ifdef __ia64__ -+ if (rgn_index(addr) == 0) -+ addr = (unsigned long) &mem_map[addr]; /* assume region 0 is a page index, not an address */ -+#else -+ if (addr < PAGE_OFFSET) -+ addr = (unsigned long) &mem_map[addr]; -+#endif -+ -+ if ((diag = kdb_getarea(page, addr))) -+ return(diag); -+ -+ kdb_printf("struct page at 0x%lx\n", addr); -+ kdb_printf(" addr space 0x%p index %lu (offset 0x%llx)\n", -+ page.mapping, page.index, -+ (unsigned long long)page.index << PAGE_CACHE_SHIFT); -+ kdb_printf(" count %d flags %s\n", -+ page._count.counter, page_flags(page.flags)); -+ kdb_printf(" virtual 0x%p\n", page_address((struct page *)addr)); -+ if (page_has_buffers(&page)) -+ kdb_printf(" buffers 0x%p\n", page_buffers(&page)); -+ else -+ kdb_printf(" private 0x%lx\n", page_private(&page)); -+ -+ return 0; -+} -+#endif /* !CONFIG_DISCONTIGMEM && !NUMA */ -+ -+static unsigned long -+print_request(unsigned long addr) -+{ -+ struct request rq; -+ -+ if (kdb_getarea(rq, addr)) -+ return(0); -+ -+ kdb_printf("struct request at 0x%lx\n", addr); -+ kdb_printf(" errors %d sector %llu nr_sectors %llu\n", -+ rq.errors, -+ (unsigned long long)blk_rq_pos(&rq), -+ (unsigned long long)blk_rq_sectors(&rq)); -+ -+ return (unsigned long) rq.queuelist.next; -+} -+ -+static int -+kdbm_request(int argc, const char **argv) -+{ -+ long offset = 0; -+ unsigned long addr; -+ int nextarg; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ -+ print_request(addr); -+ return 0; -+} -+ -+ -+static int -+kdbm_rqueue(int argc, const char **argv) -+{ -+ struct request_queue rq; -+ unsigned long addr, head_addr, next; -+ long offset = 0; -+ int nextarg; -+ int i, diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) || -+ (diag = kdb_getarea(rq, addr))) -+ return(diag); -+ -+ kdb_printf("struct request_queue at 0x%lx\n", addr); -+ i = 0; -+ next = (unsigned long)rq.queue_head.next; -+ head_addr = addr + offsetof(struct request_queue, queue_head); -+ kdb_printf(" request queue: %s\n", next == head_addr ? -+ "empty" : ""); -+ while (next != head_addr) { -+ i++; -+ next = print_request(next); -+ } -+ -+ if (i) -+ kdb_printf("%d requests found\n", i); -+ -+ return 0; -+} -+ -+ -+static void -+do_buffer(unsigned long addr) -+{ -+ struct buffer_head bh; -+ -+ if (kdb_getarea(bh, addr)) -+ return; -+ -+ kdb_printf("\tbh 0x%lx bno %8llu [%s]\n", addr, -+ (unsigned long long)bh.b_blocknr, -+ map_flags(bh.b_state, bh_state_vals)); -+} -+ -+static void -+kdbm_show_page(struct page *page, int first) -+{ -+ if (first) -+ kdb_printf("page_struct index cnt zone nid flags\n"); -+ kdb_printf("%p%s %6lu %5d %3d %3d 0x%lx", -+ page_address(page), sizeof(void *) == 4 ? " " : "", -+ page->index, atomic_read(&(page->_count)), -+ page_zonenum(page), page_to_nid(page), -+ page->flags & (~0UL >> ZONES_SHIFT)); -+#define kdb_page_flags(page, type) if (Page ## type(page)) kdb_printf(" " #type); -+ kdb_page_flags(page, Locked); -+ kdb_page_flags(page, Error); -+ kdb_page_flags(page, Referenced); -+ kdb_page_flags(page, Uptodate); -+ kdb_page_flags(page, Dirty); -+ kdb_page_flags(page, LRU); -+ kdb_page_flags(page, Active); -+ kdb_page_flags(page, Slab); -+ kdb_page_flags(page, Checked); -+ if (page->flags & (1UL << PG_arch_1)) -+ kdb_printf(" arch_1"); -+ kdb_page_flags(page, Reserved); -+ kdb_page_flags(page, Private); -+ kdb_page_flags(page, Writeback); -+ kdb_page_flags(page, Compound); -+ kdb_page_flags(page, SwapCache); -+ kdb_page_flags(page, MappedToDisk); -+ kdb_page_flags(page, Reclaim); -+ kdb_page_flags(page, Buddy); -+ -+ /* PageHighMem is not a flag any more, but treat it as one */ -+ kdb_page_flags(page, HighMem); -+ -+ if (page_has_buffers(page)) { -+ struct buffer_head *head, *bh; -+ kdb_printf("\n"); -+ head = bh = page_buffers(page); -+ do { -+ do_buffer((unsigned long) bh); -+ } while ((bh = bh->b_this_page) != head); -+ } else if (page_private(page)) { -+ kdb_printf(" private= 0x%lx", page_private(page)); -+ } -+ /* Cannot use page_mapping(page) here, it needs swapper_space which is -+ * not exported. -+ */ -+ if (page->mapping) -+ kdb_printf(" mapping= %p", page->mapping); -+ kdb_printf("\n"); -+#undef kdb_page_flags -+} -+ -+static int -+kdbm_inode_pages(int argc, const char **argv) -+{ -+ struct inode *inode = NULL; -+ struct address_space *ap = NULL; -+ unsigned long addr, addr1 = 0; -+ long offset = 0; -+ int nextarg; -+ int diag; -+ pgoff_t next = 0; -+ struct page *page; -+ int first; -+ -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ goto out; -+ -+ if (argc == 2) { -+ nextarg = 2; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr1, -+ &offset, NULL); -+ if (diag) -+ goto out; -+ kdb_printf("Looking for page index 0x%lx ... \n", addr1); -+ next = addr1; -+ } -+ -+ if (!(inode = kmalloc(sizeof(*inode), GFP_ATOMIC))) { -+ kdb_printf("kdbm_inode_pages: cannot kmalloc inode\n"); -+ goto out; -+ } -+ if (!(ap = kmalloc(sizeof(*ap), GFP_ATOMIC))) { -+ kdb_printf("kdbm_inode_pages: cannot kmalloc ap\n"); -+ goto out; -+ } -+ if ((diag = kdb_getarea(*inode, addr))) -+ goto out; -+ if (!inode->i_mapping) { -+ kdb_printf("inode has no mapping\n"); -+ goto out; -+ } -+ if ((diag = kdb_getarea(*ap, (unsigned long) inode->i_mapping))) -+ goto out; -+ -+ /* Run the pages in the radix tree, printing the state of each page */ -+ first = 1; -+ while (radix_tree_gang_lookup(&ap->page_tree, (void **)&page, next, 1)) { -+ kdbm_show_page(page, first); -+ if (addr1) -+ break; -+ first = 0; -+ next = page->index + 1; -+ } -+ -+out: -+ if (inode) -+ kfree(inode); -+ if (ap) -+ kfree(ap); -+ return diag; -+} -+ -+static int -+kdbm_inode(int argc, const char **argv) -+{ -+ struct inode *inode = NULL; -+ unsigned long addr; -+ unsigned char *iaddr; -+ long offset = 0; -+ int nextarg; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL))) -+ goto out; -+ if (!(inode = kmalloc(sizeof(*inode), GFP_ATOMIC))) { -+ kdb_printf("kdbm_inode: cannot kmalloc inode\n"); -+ goto out; -+ } -+ if ((diag = kdb_getarea(*inode, addr))) -+ goto out; -+ -+ kdb_printf("struct inode at 0x%lx\n", addr); -+ -+ kdb_printf(" i_ino = %lu i_count = %u i_size %Ld\n", -+ inode->i_ino, atomic_read(&inode->i_count), -+ inode->i_size); -+ -+ kdb_printf(" i_mode = 0%o i_nlink = %d i_rdev = 0x%x\n", -+ inode->i_mode, inode->i_nlink, -+ inode->i_rdev); -+ -+ kdb_printf(" i_hash.nxt = 0x%p i_hash.pprev = 0x%p\n", -+ inode->i_hash.next, -+ inode->i_hash.pprev); -+ -+ kdb_printf(" i_list.nxt = 0x%p i_list.prv = 0x%p\n", -+ list_entry(inode->i_list.next, struct inode, i_list), -+ list_entry(inode->i_list.prev, struct inode, i_list)); -+ -+ kdb_printf(" i_dentry.nxt = 0x%p i_dentry.prv = 0x%p\n", -+ list_entry(inode->i_dentry.next, struct dentry, d_alias), -+ list_entry(inode->i_dentry.prev, struct dentry, d_alias)); -+ -+ kdb_printf(" i_sb = 0x%p i_op = 0x%p i_data = 0x%lx nrpages = %lu\n", -+ inode->i_sb, inode->i_op, -+ addr + offsetof(struct inode, i_data), -+ inode->i_data.nrpages); -+ kdb_printf(" i_fop= 0x%p i_flock = 0x%p i_mapping = 0x%p\n", -+ inode->i_fop, inode->i_flock, inode->i_mapping); -+ -+ kdb_printf(" i_flags 0x%x i_state 0x%lx [%s]", -+ inode->i_flags, inode->i_state, -+ map_flags(inode->i_state, inode_flag_vals)); -+ -+ iaddr = (char *)addr; -+ iaddr += offsetof(struct inode, i_private); -+ -+ kdb_printf(" fs specific info @ 0x%p\n", iaddr); -+out: -+ if (inode) -+ kfree(inode); -+ return diag; -+} -+ -+static int -+kdbm_sb(int argc, const char **argv) -+{ -+ struct super_block *sb = NULL; -+ unsigned long addr; -+ long offset = 0; -+ int nextarg; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL))) -+ goto out; -+ if (!(sb = kmalloc(sizeof(*sb), GFP_ATOMIC))) { -+ kdb_printf("kdbm_sb: cannot kmalloc sb\n"); -+ goto out; -+ } -+ if ((diag = kdb_getarea(*sb, addr))) -+ goto out; -+ -+ kdb_printf("struct super_block at 0x%lx\n", addr); -+ kdb_printf(" s_dev 0x%x blocksize 0x%lx\n", sb->s_dev, sb->s_blocksize); -+ kdb_printf(" s_flags 0x%lx s_root 0x%p\n", sb->s_flags, sb->s_root); -+ kdb_printf(" s_frozen %d s_id [%s]\n", sb->s_frozen, sb->s_id); -+out: -+ if (sb) -+ kfree(sb); -+ return diag; -+} -+ -+ -+#if !defined(CONFIG_DISCONTIGMEM) && !defined(CONFIG_NUMA) -+/* According to Steve Lord, this code is ix86 specific. Patches to extend it to -+ * other architectures will be greatefully accepted. -+ */ -+static int -+kdbm_memmap(int argc, const char **argv) -+{ -+ struct page page; -+ int i, page_count; -+ int slab_count = 0; -+ int dirty_count = 0; -+ int locked_count = 0; -+ int page_counts[10]; /* [8] = large counts, [9] = -1 counts */ -+ int buffered_count = 0; -+#ifdef buffer_delay -+ int delay_count = 0; -+#endif -+ int diag; -+ unsigned long addr; -+#ifdef CONFIG_DISCONTIGMEM -+ int node_id = -1, found_node = 0; -+ int tot_page_count = 0; -+ unsigned long unode_id; -+ pg_data_t *pgdat; -+ -+ if (argc == 1) { /* node_id was specified */ -+ diag = kdbgetularg(argv[argc], &unode_id); -+ if (diag) -+ return diag; -+ node_id = (int)unode_id; -+ } -+ else if (argc) -+ return KDB_ARGCOUNT; -+ -+ tot_page_count = 0; -+ memset(page_counts, 0, sizeof(page_counts)); -+ -+ for_each_online_pgdat(pgdat) { -+ if ((node_id != -1) && (pgdat->node_id != node_id)) -+ continue; -+ found_node = 1; -+ addr = (unsigned long)pgdat->node_mem_map; -+ page_count = pgdat->node_spanned_pages; -+ tot_page_count += page_count; -+#else -+ addr = (unsigned long)mem_map; -+ page_count = max_mapnr; -+ memset(page_counts, 0, sizeof(page_counts)); -+#endif -+ for (i = 0; i < page_count; i++) { -+ if ((diag = kdb_getarea(page, addr))) -+ return(diag); -+ addr += sizeof(page); -+ -+ if (PageSlab(&page)) -+ slab_count++; -+ if (PageDirty(&page)) -+ dirty_count++; -+ if (PageLocked(&page)) -+ locked_count++; -+ if (page._count.counter == -1) -+ page_counts[9]++; -+ else if (page._count.counter < 8) -+ page_counts[page._count.counter]++; -+ else -+ page_counts[8]++; -+ if (page_has_buffers(&page)) { -+ buffered_count++; -+#ifdef buffer_delay -+ if (buffer_delay(page.buffers)) -+ delay_count++; -+#endif -+ } -+ } -+#ifdef CONFIG_DISCONTIGMEM -+ } -+ page_count = tot_page_count; -+ if (node_id != -1) { -+ if (!found_node) { -+ kdb_printf("Node %d does not exist.\n", node_id); -+ return 0; -+ } -+ kdb_printf("Node %d pages:\n", node_id); -+ } -+#endif -+ kdb_printf(" Total pages: %6d\n", page_count); -+ kdb_printf(" Slab pages: %6d\n", slab_count); -+ kdb_printf(" Dirty pages: %6d\n", dirty_count); -+ kdb_printf(" Locked pages: %6d\n", locked_count); -+ kdb_printf(" Buffer pages: %6d\n", buffered_count); -+#ifdef buffer_delay -+ kdb_printf(" Delalloc pages: %6d\n", delay_count); -+#endif -+ kdb_printf(" -1 page count: %6d\n", page_counts[9]); -+ for (i = 0; i < 8; i++) { -+ kdb_printf(" %d page count: %6d\n", -+ i, page_counts[i]); -+ } -+ kdb_printf(" high page count: %6d\n", page_counts[8]); -+ return 0; -+} -+#endif /* !CONFIG_DISCONTIGMEM && !NUMA */ -+ -+static int __init kdbm_pg_init(void) -+{ -+#if !defined(CONFIG_DISCONTIGMEM) && !defined(CONFIG_NUMA) -+ kdb_register("page", kdbm_page, "", "Display page", 0); -+#endif -+ kdb_register("inode", kdbm_inode, "", "Display inode", 0); -+ kdb_register("sb", kdbm_sb, "", "Display super_block", 0); -+ kdb_register("bh", kdbm_buffers, "", "Display buffer", 0); -+ kdb_register("bio", kdbm_bio, "", "Display bio", 0); -+ kdb_register("inode_pages", kdbm_inode_pages, "", "Display pages in an inode", 0); -+ kdb_register("req", kdbm_request, "", "dump request struct", 0); -+ kdb_register("rqueue", kdbm_rqueue, "", "dump request queue", 0); -+#if !defined(CONFIG_DISCONTIGMEM) && !defined(CONFIG_NUMA) -+ kdb_register("memmap", kdbm_memmap, "", "page table summary", 0); -+#endif -+ -+ return 0; -+} -+ -+ -+static void __exit kdbm_pg_exit(void) -+{ -+#if !defined(CONFIG_DISCONTIGMEM) && !defined(CONFIG_NUMA) -+ kdb_unregister("page"); -+#endif -+ kdb_unregister("inode"); -+ kdb_unregister("sb"); -+ kdb_unregister("bh"); -+ kdb_unregister("bio"); -+ kdb_unregister("inode_pages"); -+ kdb_unregister("req"); -+ kdb_unregister("rqueue"); -+#if !defined(CONFIG_DISCONTIGMEM) && !defined(CONFIG_NUMA) -+ kdb_unregister("memmap"); -+#endif -+} -+ -+module_init(kdbm_pg_init) -+module_exit(kdbm_pg_exit) ---- /dev/null -+++ b/kdb/modules/kdbm_sched.c -@@ -0,0 +1,57 @@ -+/* -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_AUTHOR("SGI"); -+MODULE_DESCRIPTION("Debug scheduler information"); -+MODULE_LICENSE("GPL"); -+ -+static int -+kdbm_runqueues(int argc, const char **argv) -+{ -+ unsigned long cpu; -+ int ret = 0; -+ -+ if (argc == 1) { -+ ret = kdbgetularg((char *)argv[1], &cpu); -+ if (!ret) { -+ if (!cpu_online(cpu)) { -+ kdb_printf("Invalid cpu number\n"); -+ } else -+ kdb_runqueue(cpu, kdb_printf); -+ } -+ } else if (argc == 0) { -+ for_each_online_cpu(cpu) -+ kdb_runqueue(cpu, kdb_printf); -+ } else { -+ /* More than one arg */ -+ kdb_printf("Specify one cpu number\n"); -+ } -+ return ret; -+} -+ -+static int __init kdbm_sched_init(void) -+{ -+ kdb_register("rq", kdbm_runqueues, "", "Display runqueue for ", 0); -+ kdb_register("rqa", kdbm_runqueues, "", "Display all runqueues", 0); -+ return 0; -+} -+ -+static void __exit kdbm_sched_exit(void) -+{ -+ kdb_unregister("rq"); -+ kdb_unregister("rqa"); -+} -+ -+module_init(kdbm_sched_init) -+module_exit(kdbm_sched_exit) ---- /dev/null -+++ b/kdb/modules/kdbm_task.c -@@ -0,0 +1,196 @@ -+/* -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_AUTHOR("SGI"); -+MODULE_DESCRIPTION("Debug struct task and sigset information"); -+MODULE_LICENSE("GPL"); -+ -+static char * -+kdb_cpus_allowed_string(struct task_struct *tp) -+{ -+ static char maskbuf[NR_CPUS * 8]; -+ if (cpus_equal(tp->cpus_allowed, cpu_online_map)) -+ strcpy(maskbuf, "ALL"); -+ else if (cpus_empty(tp->cpus_allowed)) -+ strcpy(maskbuf, "NONE"); -+ else if (cpus_weight(tp->cpus_allowed) == 1) -+ snprintf(maskbuf, sizeof(maskbuf), "ONLY(%d)", first_cpu(tp->cpus_allowed)); -+ else -+ cpulist_scnprintf(maskbuf, sizeof(maskbuf), &tp->cpus_allowed); -+ return maskbuf; -+} -+ -+static int -+kdbm_task(int argc, const char **argv) -+{ -+ unsigned long addr; -+ long offset=0; -+ int nextarg; -+ int e = 0; -+ struct task_struct *tp = NULL, *tp1; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((e = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) != 0) -+ return(e); -+ -+ if (!(tp = kmalloc(sizeof(*tp), GFP_ATOMIC))) { -+ kdb_printf("%s: cannot kmalloc tp\n", __FUNCTION__); -+ goto out; -+ } -+ if ((e = kdb_getarea(*tp, addr))) { -+ kdb_printf("%s: invalid task address\n", __FUNCTION__); -+ goto out; -+ } -+ -+ tp1 = (struct task_struct *)addr; -+ kdb_printf( -+ "struct task at 0x%lx, pid=%d flags=0x%x state=%ld comm=\"%s\"\n", -+ addr, tp->pid, tp->flags, tp->state, tp->comm); -+ -+ kdb_printf(" cpu=%d policy=%u ", kdb_process_cpu(tp), tp->policy); -+ kdb_printf( -+ "prio=%d static_prio=%d cpus_allowed=", -+ tp->prio, tp->static_prio); -+ { -+ /* The cpus allowed string may be longer than kdb_printf() can -+ * handle. Print it in chunks. -+ */ -+ char c, *p; -+ p = kdb_cpus_allowed_string(tp); -+ while (1) { -+ if (strlen(p) < 100) { -+ kdb_printf("%s", p); -+ break; -+ } -+ c = p[100]; -+ p[100] = '\0'; -+ kdb_printf("%s", p); -+ p[100] = c; -+ p += 100; -+ } -+ } -+ kdb_printf(" &thread=0x%p\n", &tp1->thread); -+ -+ kdb_printf(" need_resched=%d ", -+ test_tsk_thread_flag(tp, TIF_NEED_RESCHED)); -+ kdb_printf( -+ "time_slice=%u", -+ tp->rt.time_slice); -+ kdb_printf(" lock_depth=%d\n", tp->lock_depth); -+ -+ kdb_printf( -+ " fs=0x%p files=0x%p mm=0x%p\n", -+ tp->fs, tp->files, tp->mm); -+ -+ if (tp->sysvsem.undo_list) -+ kdb_printf( -+ " sysvsem.sem_undo refcnt %d list_proc=0x%p\n", -+ atomic_read(&tp->sysvsem.undo_list->refcnt), -+ &tp->sysvsem.undo_list->list_proc); -+ -+ kdb_printf( -+ " signal=0x%p &blocked=0x%p &pending=0x%p\n", -+ tp->signal, &tp1->blocked, &tp1->pending); -+ -+ kdb_printf( -+ " utime=%ld stime=%ld cutime=%ld cstime=%ld\n", -+ tp->utime, tp->stime, -+ tp->signal ? tp->signal->cutime : 0L, -+ tp->signal ? tp->signal->cstime : 0L); -+ -+ kdb_printf(" thread_info=0x%p\n", task_thread_info(tp)); -+ kdb_printf(" ti flags=0x%lx\n", (unsigned long)task_thread_info(tp)->flags); -+ -+#ifdef CONFIG_NUMA -+ kdb_printf( -+ " mempolicy=0x%p il_next=%d\n", -+ tp->mempolicy, tp->il_next); -+#endif -+ -+out: -+ if (tp) -+ kfree(tp); -+ return e; -+} -+ -+static int -+kdbm_sigset(int argc, const char **argv) -+{ -+ sigset_t *sp = NULL; -+ unsigned long addr; -+ long offset=0; -+ int nextarg; -+ int e = 0; -+ int i; -+ char fmt[32]; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+#ifndef _NSIG_WORDS -+ kdb_printf("unavailable on this platform, _NSIG_WORDS not defined.\n"); -+#else -+ nextarg = 1; -+ if ((e = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) != 0) -+ return(e); -+ -+ if (!(sp = kmalloc(sizeof(*sp), GFP_ATOMIC))) { -+ kdb_printf("%s: cannot kmalloc sp\n", __FUNCTION__); -+ goto out; -+ } -+ if ((e = kdb_getarea(*sp, addr))) { -+ kdb_printf("%s: invalid sigset address\n", __FUNCTION__); -+ goto out; -+ } -+ -+ sprintf(fmt, "[%%d]=0x%%0%dlx ", (int)sizeof(sp->sig[0])*2); -+ kdb_printf("sigset at 0x%p : ", sp); -+ for (i=_NSIG_WORDS-1; i >= 0; i--) { -+ if (i == 0 || sp->sig[i]) { -+ kdb_printf(fmt, i, sp->sig[i]); -+ } -+ } -+ kdb_printf("\n"); -+#endif /* _NSIG_WORDS */ -+ -+out: -+ if (sp) -+ kfree(sp); -+ return e; -+} -+ -+static int __init kdbm_task_init(void) -+{ -+ kdb_register("task", kdbm_task, "", "Display task_struct", 0); -+ kdb_register("sigset", kdbm_sigset, "", "Display sigset_t", 0); -+ -+ return 0; -+} -+ -+static void __exit kdbm_task_exit(void) -+{ -+ kdb_unregister("task"); -+ kdb_unregister("sigset"); -+} -+ -+module_init(kdbm_task_init) -+module_exit(kdbm_task_exit) ---- /dev/null -+++ b/kdb/modules/kdbm_vm.c -@@ -0,0 +1,1041 @@ -+/* -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+MODULE_AUTHOR("SGI"); -+MODULE_DESCRIPTION("Debug VM information"); -+MODULE_LICENSE("GPL"); -+ -+struct __vmflags { -+ unsigned long mask; -+ char *name; -+}; -+ -+static struct __vmflags vmflags[] = { -+ { VM_READ, "VM_READ " }, -+ { VM_WRITE, "VM_WRITE " }, -+ { VM_EXEC, "VM_EXEC " }, -+ { VM_SHARED, "VM_SHARED " }, -+ { VM_MAYREAD, "VM_MAYREAD " }, -+ { VM_MAYWRITE, "VM_MAYWRITE " }, -+ { VM_MAYEXEC, "VM_MAYEXEC " }, -+ { VM_MAYSHARE, "VM_MAYSHARE " }, -+ { VM_GROWSDOWN, "VM_GROWSDOWN " }, -+ { VM_GROWSUP, "VM_GROWSUP " }, -+ { VM_PFNMAP, "VM_PFNMAP " }, -+ { VM_DENYWRITE, "VM_DENYWRITE " }, -+ { VM_EXECUTABLE, "VM_EXECUTABLE " }, -+ { VM_LOCKED, "VM_LOCKED " }, -+ { VM_IO, "VM_IO " }, -+ { VM_SEQ_READ, "VM_SEQ_READ " }, -+ { VM_RAND_READ, "VM_RAND_READ " }, -+ { VM_DONTCOPY, "VM_DONTCOPY " }, -+ { VM_DONTEXPAND, "VM_DONTEXPAND " }, -+ { VM_RESERVED, "VM_RESERVED " }, -+ { VM_ACCOUNT, "VM_ACCOUNT " }, -+ { VM_HUGETLB, "VM_HUGETLB " }, -+ { VM_NONLINEAR, "VM_NONLINEAR " }, -+ { VM_MAPPED_COPY, "VM_MAPPED_COPY " }, -+ { VM_INSERTPAGE, "VM_INSERTPAGE " }, -+ { 0, "" } -+}; -+ -+static int -+kdbm_print_vm(struct vm_area_struct *vp, unsigned long addr, int verbose_flg) -+{ -+ struct __vmflags *tp; -+ -+ kdb_printf("struct vm_area_struct at 0x%lx for %d bytes\n", -+ addr, (int) sizeof (struct vm_area_struct)); -+ -+ kdb_printf("vm_start = 0x%p vm_end = 0x%p\n", (void *) vp->vm_start, -+ (void *) vp->vm_end); -+ kdb_printf("vm_page_prot = 0x%llx\n", -+ (unsigned long long)pgprot_val(vp->vm_page_prot)); -+ -+ kdb_printf("vm_flags: "); -+ for (tp = vmflags; tp->mask; tp++) { -+ if (vp->vm_flags & tp->mask) { -+ kdb_printf(" %s", tp->name); -+ } -+ } -+ kdb_printf("\n"); -+ -+ if (!verbose_flg) -+ return 0; -+ -+ kdb_printf("vm_mm = 0x%p\n", (void *) vp->vm_mm); -+ kdb_printf("vm_next = 0x%p\n", (void *) vp->vm_next); -+ kdb_printf("shared.vm_set.list.next = 0x%p\n", (void *) vp->shared.vm_set.list.next); -+ kdb_printf("shared.vm_set.list.prev = 0x%p\n", (void *) vp->shared.vm_set.list.prev); -+ kdb_printf("shared.vm_set.parent = 0x%p\n", (void *) vp->shared.vm_set.parent); -+ kdb_printf("shared.vm_set.head = 0x%p\n", (void *) vp->shared.vm_set.head); -+ kdb_printf("anon_vma_node.next = 0x%p\n", (void *) vp->anon_vma_node.next); -+ kdb_printf("anon_vma_node.prev = 0x%p\n", (void *) vp->anon_vma_node.prev); -+ kdb_printf("vm_ops = 0x%p\n", (void *) vp->vm_ops); -+ if (vp->vm_ops != NULL) { -+ kdb_printf("vm_ops->open = 0x%p\n", vp->vm_ops->open); -+ kdb_printf("vm_ops->close = 0x%p\n", vp->vm_ops->close); -+ kdb_printf("vm_ops->fault = 0x%p\n", vp->vm_ops->fault); -+#ifdef HAVE_VMOP_MPROTECT -+ kdb_printf("vm_ops->mprotect = 0x%p\n", vp->vm_ops->mprotect); -+#endif -+#ifdef CONFIG_NUMA -+ kdb_printf("vm_ops->set_policy = 0x%p\n", vp->vm_ops->set_policy); -+ kdb_printf("vm_ops->get_policy = 0x%p\n", vp->vm_ops->get_policy); -+#endif -+ } -+ kdb_printf("vm_pgoff = 0x%lx\n", vp->vm_pgoff); -+ kdb_printf("vm_file = 0x%p\n", (void *) vp->vm_file); -+ kdb_printf("vm_private_data = 0x%p\n", vp->vm_private_data); -+#ifdef CONFIG_NUMA -+ kdb_printf("vm_policy = 0x%p\n", vp->vm_policy); -+#endif -+ -+ return 0; -+} -+ -+static int -+kdbm_print_vmp(struct vm_area_struct *vp, int verbose_flg) -+{ -+ struct __vmflags *tp; -+ -+ if (verbose_flg) { -+ kdb_printf("0x%lx: ", (unsigned long) vp); -+ } -+ -+ kdb_printf("0x%p 0x%p ", (void *) vp->vm_start, (void *) vp->vm_end); -+ -+ for (tp = vmflags; tp->mask; tp++) { -+ if (vp->vm_flags & tp->mask) { -+ kdb_printf(" %s", tp->name); -+ } -+ } -+ kdb_printf("\n"); -+ -+ return 0; -+} -+ -+ -+#ifdef CONFIG_NUMA -+#include -+ -+/* -+ * kdbm_mpol -+ * -+ * This function implements the 'mempolicy' command. -+ * Print a struct mempolicy. -+ * -+ * mempolicy
Print struct mempolicy at
-+ */ -+static int -+kdbm_mpol(int argc, const char **argv) -+{ -+ unsigned long addr; -+ long offset = 0; -+ int nextarg; -+ int err = 0; -+ struct mempolicy *mp = NULL; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((err = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, -+ NULL)) != 0) -+ return(err); -+ -+ if (!(mp = kmalloc(sizeof(*mp), GFP_ATOMIC))) { -+ kdb_printf("%s: cannot kmalloc mp\n", __FUNCTION__); -+ goto out; -+ } -+ -+ if ((err = kdb_getarea(*mp, addr))) { -+ kdb_printf("%s: invalid mempolicy address\n", __FUNCTION__); -+ goto out; -+ } -+ -+ kdb_printf("struct mempolicy at 0x%p\n", (struct mempolicy *)addr); -+ kdb_printf(" refcnt %d\n", atomic_read(&mp->refcnt)); -+ -+ switch (mp->mode) { -+ case MPOL_DEFAULT: -+ kdb_printf(" mode %d (MPOL_DEFAULT)\n", mp->mode); -+ break; -+ -+ case MPOL_PREFERRED: -+ kdb_printf(" mode %d (MPOL_PREFERRED)\n", mp->mode); -+ if (mp->flags & MPOL_F_LOCAL) -+ kdb_printf(" preferred_node local\n"); -+ else -+ kdb_printf(" preferred_node %d\n", mp->v.preferred_node); -+ break; -+ -+ case MPOL_BIND: -+ case MPOL_INTERLEAVE: -+ { -+ int i, nlongs; -+ unsigned long *longp; -+ -+ kdb_printf(" mode %d (%s)\n", mp->mode, -+ mp->mode == MPOL_INTERLEAVE -+ ? "MPOL_INTERLEAVE" -+ : "MPOL_BIND"); -+ nlongs = (int)BITS_TO_LONGS(MAX_NUMNODES); -+ kdb_printf(" nodes:"); -+ longp = mp->v.nodes.bits; -+ for (i = 0; i < nlongs; i++, longp++) -+ kdb_printf(" 0x%lx ", *longp); -+ kdb_printf("\n"); -+ break; -+ } -+ -+ default: -+ kdb_printf(" mode %d (unknown)\n", mp->mode); -+ break; -+ } -+out: -+ if (mp) -+ kfree(mp); -+ return err; -+} -+ -+#endif /* CONFIG_NUMA */ -+ -+/* -+ * kdbm_pgdat -+ * -+ * This function implements the 'pgdat' command. -+ * Print a struct pglist_data (pg_dat_t). -+ * -+ * pgdat Print struct pglist_data for node . -+ * -+ * Print pglist_data for node 0 if node_id not specified, -+ * or print the one pglist_data structure if !CONFIG_NUMA. -+ */ -+static int -+kdbm_pgdat(int argc, const char **argv) -+{ -+ int err = 0, node_id = 0, i; -+ pg_data_t *pgdatp = NULL; -+ -+#ifdef CONFIG_NUMA -+ if (argc > 1) -+ return KDB_ARGCOUNT; -+ if (argc == 1) { -+ int nextarg; -+ long offset = 0; -+ unsigned long node_id_ul; -+ -+ nextarg = 1; -+ if ((err = kdbgetaddrarg(argc, argv, &nextarg, &node_id_ul, -+ &offset, NULL)) != 0) { -+ return(err); -+ } -+ node_id = (int)node_id_ul; -+ } -+#endif -+ for_each_online_pgdat(pgdatp) { -+ if (pgdatp->node_id == node_id) -+ break; -+ } -+ if (!pgdatp) { -+ kdb_printf("%s: specified node not found\n", __FUNCTION__); -+ return 0; -+ } -+ kdb_printf("struct pglist_data at 0x%p node_id = %d\n", -+ pgdatp, pgdatp->node_id); -+ -+ for (i = 0; i < MAX_ZONELISTS; i++) { -+ int zr; -+ struct zoneref *zonerefp; -+ struct zone *zonep; -+ -+ zonerefp = pgdatp->node_zonelists[i]._zonerefs; -+ kdb_printf(" _zonerefs[%d] at 0x%p\n", i, zonerefp); -+ -+ for (zr = 0; zr <= MAX_ZONES_PER_ZONELIST; zr++, zonerefp++) { -+ int z; -+ pg_data_t *tmp_pgdatp; -+ -+ zonep = zonelist_zone(zonerefp); -+ if (!zonep) -+ break; -+ -+ kdb_printf(" 0x%p", zonep); -+ -+ for_each_online_pgdat(tmp_pgdatp) { -+ for (z = 0; z < MAX_NR_ZONES; z++) { -+ if (zonep == &tmp_pgdatp->node_zones[z]) { -+ kdb_printf (" (node %d node_zones[%d])", -+ tmp_pgdatp->node_id, z); -+ break; -+ } -+ } -+ if (z != MAX_NR_ZONES) -+ break; /* found it */ -+ } -+ kdb_printf("\n"); -+ } -+ } -+ -+ kdb_printf(" nr_zones = %d", pgdatp->nr_zones); -+#ifdef CONFIG_FLAT_NODE_MEM_MAP -+ kdb_printf(" node_mem_map = 0x%p\n", pgdatp->node_mem_map); -+#endif -+ kdb_printf(" bdata = 0x%p", pgdatp->bdata); -+ kdb_printf(" node_start_pfn = 0x%lx\n", pgdatp->node_start_pfn); -+ kdb_printf(" node_present_pages = %ld (0x%lx)\n", -+ pgdatp->node_present_pages, pgdatp->node_present_pages); -+ kdb_printf(" node_spanned_pages = %ld (0x%lx)\n", -+ pgdatp->node_spanned_pages, pgdatp->node_spanned_pages); -+ kdb_printf(" kswapd = 0x%p\n", pgdatp->kswapd); -+ -+ return err; -+} -+ -+/* -+ * kdbm_vm -+ * -+ * This function implements the 'vm' command. Print a vm_area_struct. -+ * -+ * vm [-v]
Print vm_area_struct at
-+ * vmp [-v] Print all vm_area_structs for -+ */ -+ -+static int -+kdbm_vm(int argc, const char **argv) -+{ -+ unsigned long addr; -+ long offset = 0; -+ int nextarg; -+ int diag; -+ int verbose_flg = 0; -+ -+ if (argc == 2) { -+ if (strcmp(argv[1], "-v") != 0) { -+ return KDB_ARGCOUNT; -+ } -+ verbose_flg = 1; -+ } else if (argc != 1) { -+ return KDB_ARGCOUNT; -+ } -+ -+ if (strcmp(argv[0], "vmp") == 0) { -+ struct task_struct *g, *tp; -+ struct vm_area_struct *vp; -+ pid_t pid; -+ -+ if ((diag = kdbgetularg(argv[argc], (unsigned long *) &pid))) -+ return diag; -+ -+ kdb_do_each_thread(g, tp) { -+ if (tp->pid == pid) { -+ if (tp->mm != NULL) { -+ if (verbose_flg) -+ kdb_printf -+ ("vm_area_struct "); -+ kdb_printf -+ ("vm_start vm_end vm_flags\n"); -+ vp = tp->mm->mmap; -+ while (vp != NULL) { -+ kdbm_print_vmp(vp, verbose_flg); -+ vp = vp->vm_next; -+ } -+ } -+ return 0; -+ } -+ } kdb_while_each_thread(g, tp); -+ -+ kdb_printf("No process with pid == %d found\n", pid); -+ -+ } else { -+ struct vm_area_struct v; -+ -+ nextarg = argc; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, -+ NULL)) -+ || (diag = kdb_getarea(v, addr))) -+ return (diag); -+ -+ kdbm_print_vm(&v, addr, verbose_flg); -+ } -+ -+ return 0; -+} -+ -+static int -+kdbm_print_pte(pte_t * pte) -+{ -+ kdb_printf("0x%lx (", (unsigned long) pte_val(*pte)); -+ -+ if (pte_present(*pte)) { -+#ifdef pte_exec -+ if (pte_exec(*pte)) -+ kdb_printf("X"); -+#endif -+ if (pte_write(*pte)) -+ kdb_printf("W"); -+#ifdef pte_read -+ if (pte_read(*pte)) -+ kdb_printf("R"); -+#endif -+ if (pte_young(*pte)) -+ kdb_printf("A"); -+ if (pte_dirty(*pte)) -+ kdb_printf("D"); -+ -+ } else { -+ kdb_printf("OFFSET=0x%lx ", swp_offset(pte_to_swp_entry(*pte))); -+ kdb_printf("TYPE=0x%ulx", swp_type(pte_to_swp_entry(*pte))); -+ } -+ -+ kdb_printf(")"); -+ -+ /* final newline is output by caller of kdbm_print_pte() */ -+ -+ return 0; -+} -+ -+/* -+ * kdbm_pte -+ * -+ * This function implements the 'pte' command. Print all pte_t structures -+ * that map to the given virtual address range (
through
-+ * plus ) for the given process. The default value for nbytes is -+ * one. -+ * -+ * pte -m
[] Print all pte_t structures for -+ * virtual
in address space -+ * of which is a pointer to a -+ * mm_struct -+ * pte -p
[] Print all pte_t structures for -+ * virtual
in address space -+ * of -+ */ -+ -+static int -+kdbm_pte(int argc, const char **argv) -+{ -+ unsigned long addr; -+ long offset = 0; -+ int nextarg; -+ unsigned long nbytes = 1; -+ long npgs; -+ int diag; -+ int found; -+ pid_t pid; -+ struct task_struct *tp; -+ struct mm_struct *mm, copy_of_mm; -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ pte_t *pte; -+ -+ if (argc < 3 || argc > 4) { -+ return KDB_ARGCOUNT; -+ } -+ -+ if (strcmp(argv[1], "-p") == 0) { -+ if ((diag = kdbgetularg(argv[2], (unsigned long *) &pid))) { -+ return diag; -+ } -+ -+ found = 0; -+ for_each_process(tp) { -+ if (tp->pid == pid) { -+ if (tp->mm != NULL) { -+ found = 1; -+ break; -+ } -+ kdb_printf("task structure's mm field is NULL\n"); -+ return 0; -+ } -+ } -+ -+ if (!found) { -+ kdb_printf("No process with pid == %d found\n", pid); -+ return 0; -+ } -+ mm = tp->mm; -+ } else if (strcmp(argv[1], "-m") == 0) { -+ -+ -+ nextarg = 2; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, -+ NULL)) -+ || (diag = kdb_getarea(copy_of_mm, addr))) -+ return (diag); -+ mm = ©_of_mm; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ -+ if ((diag = kdbgetularg(argv[3], &addr))) { -+ return diag; -+ } -+ -+ if (argc == 4) { -+ if ((diag = kdbgetularg(argv[4], &nbytes))) { -+ return diag; -+ } -+ } -+ -+ kdb_printf("vaddr pte\n"); -+ -+ npgs = ((((addr & ~PAGE_MASK) + nbytes) + ~PAGE_MASK) >> PAGE_SHIFT); -+ while (npgs-- > 0) { -+ -+ kdb_printf("0x%p ", (void *) (addr & PAGE_MASK)); -+ -+ pgd = pgd_offset(mm, addr); -+ if (pgd_present(*pgd)) { -+ pud = pud_offset(pgd, addr); -+ if (pud_present(*pud)) { -+ pmd = pmd_offset(pud, addr); -+ if (pmd_present(*pmd)) { -+ pte = pte_offset_map(pmd, addr); -+ if (pte_present(*pte)) { -+ kdbm_print_pte(pte); -+ } -+ } -+ } -+ } -+ -+ kdb_printf("\n"); -+ addr += PAGE_SIZE; -+ } -+ -+ return 0; -+} -+ -+/* -+ * kdbm_rpte -+ * -+ * This function implements the 'rpte' command. Print all pte_t structures -+ * that contain the given physical page range ( through -+ * plus ) for the given process. The default value for npages is -+ * one. -+ * -+ * rpte -m [] Print all pte_t structures for -+ * physical page in address space -+ * of which is a pointer to a -+ * mm_struct -+ * rpte -p [] Print all pte_t structures for -+ * physical page in address space -+ * of -+ */ -+ -+static int -+kdbm_rpte(int argc, const char **argv) -+{ -+ unsigned long addr; -+ unsigned long pfn; -+ long offset = 0; -+ int nextarg; -+ unsigned long npages = 1; -+ int diag; -+ int found; -+ pid_t pid; -+ struct task_struct *tp; -+ struct mm_struct *mm, copy_of_mm; -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ pte_t *pte; -+ unsigned long g, u, m, t; -+ -+ if (argc < 3 || argc > 4) { -+ return KDB_ARGCOUNT; -+ } -+ -+ if (strcmp(argv[1], "-p") == 0) { -+ if ((diag = kdbgetularg(argv[2], (unsigned long *) &pid))) { -+ return diag; -+ } -+ -+ found = 0; -+ for_each_process(tp) { -+ if (tp->pid == pid) { -+ if (tp->mm != NULL) { -+ found = 1; -+ break; -+ } -+ kdb_printf("task structure's mm field is NULL\n"); -+ return 0; -+ } -+ } -+ -+ if (!found) { -+ kdb_printf("No process with pid == %d found\n", pid); -+ return 0; -+ } -+ mm = tp->mm; -+ } else if (strcmp(argv[1], "-m") == 0) { -+ -+ -+ nextarg = 2; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, -+ NULL)) -+ || (diag = kdb_getarea(copy_of_mm, addr))) -+ return (diag); -+ mm = ©_of_mm; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ -+ if ((diag = kdbgetularg(argv[3], &pfn))) { -+ return diag; -+ } -+ -+ if (argc == 4) { -+ if ((diag = kdbgetularg(argv[4], &npages))) { -+ return diag; -+ } -+ } -+ -+ /* spaces after vaddr depends on sizeof(unsigned long) */ -+ kdb_printf("pfn vaddr%*s pte\n", -+ (int)(2*sizeof(unsigned long) + 2 - 5), " "); -+ -+ for (g = 0, pgd = pgd_offset(mm, 0UL); g < PTRS_PER_PGD; ++g, ++pgd) { -+ if (pgd_none(*pgd) || pgd_bad(*pgd)) -+ continue; -+ for (u = 0, pud = pud_offset(pgd, 0UL); u < PTRS_PER_PUD; ++u, ++pud) { -+ if (pud_none(*pud) || pud_bad(*pud)) -+ continue; -+ for (m = 0, pmd = pmd_offset(pud, 0UL); m < PTRS_PER_PMD; ++m, ++pmd) { -+ if (pmd_none(*pmd) || pmd_bad(*pmd)) -+ continue; -+ for (t = 0, pte = pte_offset_map(pmd, 0UL); t < PTRS_PER_PTE; ++t, ++pte) { -+ if (pte_none(*pte)) -+ continue; -+ if (pte_pfn(*pte) < pfn || pte_pfn(*pte) >= (pfn + npages)) -+ continue; -+ addr = g << PGDIR_SHIFT; -+#ifdef __ia64__ -+ /* IA64 plays tricks with the pgd mapping to save space. -+ * This reverses pgd_index(). -+ */ -+ { -+ unsigned long region = g >> (PAGE_SHIFT - 6); -+ unsigned long l1index = g - (region << (PAGE_SHIFT - 6)); -+ addr = (region << 61) + (l1index << PGDIR_SHIFT); -+ } -+#endif -+ addr += (m << PMD_SHIFT) + (t << PAGE_SHIFT); -+ kdb_printf("0x%-14lx " kdb_bfd_vma_fmt0 " ", -+ pte_pfn(*pte), addr); -+ kdbm_print_pte(pte); -+ kdb_printf("\n"); -+ } -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+static int -+kdbm_print_dentry(unsigned long daddr) -+{ -+ struct dentry d; -+ int diag; -+ char buf[256]; -+ -+ kdb_printf("Dentry at 0x%lx\n", daddr); -+ if ((diag = kdb_getarea(d, (unsigned long)daddr))) -+ return diag; -+ -+ if ((d.d_name.len > sizeof(buf)) || (diag = kdb_getarea_size(buf, (unsigned long)(d.d_name.name), d.d_name.len))) -+ kdb_printf(" d_name.len = %d d_name.name = 0x%p\n", -+ d.d_name.len, d.d_name.name); -+ else -+ kdb_printf(" d_name.len = %d d_name.name = 0x%p <%.*s>\n", -+ d.d_name.len, d.d_name.name, -+ (int)(d.d_name.len), d.d_name.name); -+ -+ kdb_printf(" d_count = %d d_flags = 0x%x d_inode = 0x%p\n", -+ atomic_read(&d.d_count), d.d_flags, d.d_inode); -+ -+ kdb_printf(" d_parent = 0x%p\n", d.d_parent); -+ -+ kdb_printf(" d_hash.nxt = 0x%p d_hash.prv = 0x%p\n", -+ d.d_hash.next, d.d_hash.pprev); -+ -+ kdb_printf(" d_lru.nxt = 0x%p d_lru.prv = 0x%p\n", -+ d.d_lru.next, d.d_lru.prev); -+ -+ kdb_printf(" d_child.nxt = 0x%p d_child.prv = 0x%p\n", -+ d.d_u.d_child.next, d.d_u.d_child.prev); -+ -+ kdb_printf(" d_subdirs.nxt = 0x%p d_subdirs.prv = 0x%p\n", -+ d.d_subdirs.next, d.d_subdirs.prev); -+ -+ kdb_printf(" d_alias.nxt = 0x%p d_alias.prv = 0x%p\n", -+ d.d_alias.next, d.d_alias.prev); -+ -+ kdb_printf(" d_op = 0x%p d_sb = 0x%p d_fsdata = 0x%p\n", -+ d.d_op, d.d_sb, d.d_fsdata); -+ -+ kdb_printf(" d_iname = %s\n", -+ d.d_iname); -+ -+ if (d.d_inode) { -+ struct inode i; -+ kdb_printf("\nInode Entry at 0x%p\n", d.d_inode); -+ if ((diag = kdb_getarea(i, (unsigned long)d.d_inode))) -+ return diag; -+ kdb_printf(" i_mode = 0%o i_nlink = %d i_rdev = 0x%x\n", -+ i.i_mode, i.i_nlink, i.i_rdev); -+ -+ kdb_printf(" i_ino = %ld i_count = %d\n", -+ i.i_ino, atomic_read(&i.i_count)); -+ -+ kdb_printf(" i_hash.nxt = 0x%p i_hash.prv = 0x%p\n", -+ i.i_hash.next, i.i_hash.pprev); -+ -+ kdb_printf(" i_list.nxt = 0x%p i_list.prv = 0x%p\n", -+ i.i_list.next, i.i_list.prev); -+ -+ kdb_printf(" i_dentry.nxt = 0x%p i_dentry.prv = 0x%p\n", -+ i.i_dentry.next, i.i_dentry.prev); -+ -+ } -+ kdb_printf("\n"); -+ return 0; -+} -+ -+static int -+kdbm_filp(int argc, const char **argv) -+{ -+ struct file f; -+ int nextarg; -+ unsigned long addr; -+ long offset; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) || -+ (diag = kdb_getarea(f, addr))) -+ return diag; -+ -+ kdb_printf("File Pointer at 0x%lx\n", addr); -+ -+ kdb_printf(" fu_list.nxt = 0x%p fu_list.prv = 0x%p\n", -+ f.f_u.fu_list.next, f.f_u.fu_list.prev); -+ -+ kdb_printf(" f_dentry = 0x%p f_vfsmnt = 0x%p f_op = 0x%p\n", -+ f.f_dentry, f.f_vfsmnt, f.f_op); -+ -+ kdb_printf(" f_count = " kdb_f_count_fmt -+ " f_flags = 0x%x f_mode = 0x%x\n", -+ atomic_long_read(&f.f_count), f.f_flags, f.f_mode); -+ -+ kdb_printf(" f_pos = %Ld\n", f.f_pos); -+#ifdef CONFIG_SECURITY -+ kdb_printf(" security = 0x%p\n", f.f_security); -+#endif -+ -+ kdb_printf(" private_data = 0x%p f_mapping = 0x%p\n\n", -+ f.private_data, f.f_mapping); -+ -+ return kdbm_print_dentry((unsigned long)f.f_dentry); -+} -+ -+static int -+kdbm_fl(int argc, const char **argv) -+{ -+ struct file_lock fl; -+ int nextarg; -+ unsigned long addr; -+ long offset; -+ int diag; -+ -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) || -+ (diag = kdb_getarea(fl, addr))) -+ return diag; -+ -+ kdb_printf("File_lock at 0x%lx\n", addr); -+ -+ kdb_printf(" fl_next = 0x%p fl_link.nxt = 0x%p fl_link.prv = 0x%p\n", -+ fl.fl_next, fl.fl_link.next, fl.fl_link.prev); -+ kdb_printf(" fl_block.nxt = 0x%p fl_block.prv = 0x%p\n", -+ fl.fl_block.next, fl.fl_block.prev); -+ kdb_printf(" fl_owner = 0x%p fl_pid = %d fl_wait = 0x%p\n", -+ fl.fl_owner, fl.fl_pid, &fl.fl_wait); -+ kdb_printf(" fl_file = 0x%p fl_flags = 0x%x\n", -+ fl.fl_file, fl.fl_flags); -+ kdb_printf(" fl_type = %d fl_start = 0x%llx fl_end = 0x%llx\n", -+ fl.fl_type, fl.fl_start, fl.fl_end); -+ -+ kdb_printf(" file_lock_operations"); -+ if (fl.fl_ops) -+ kdb_printf("\n fl_copy_lock = 0x%p fl_release_private = 0x%p\n", -+ fl.fl_ops->fl_copy_lock, fl.fl_ops->fl_release_private); -+ else -+ kdb_printf(" empty\n"); -+ -+ kdb_printf(" lock_manager_operations"); -+ if (fl.fl_lmops) -+ kdb_printf("\n fl_compare_owner = 0x%p fl_notify = 0x%p\n", -+ fl.fl_lmops->fl_compare_owner, fl.fl_lmops->fl_notify); -+ else -+ kdb_printf(" empty\n"); -+ -+ kdb_printf(" fl_fasync = 0x%p fl_break 0x%lx\n", -+ fl.fl_fasync, fl.fl_break_time); -+ -+ return 0; -+} -+ -+ -+static int -+kdbm_dentry(int argc, const char **argv) -+{ -+ int nextarg; -+ unsigned long addr; -+ long offset; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL))) -+ return diag; -+ -+ return kdbm_print_dentry(addr); -+} -+ -+static int -+kdbm_kobject(int argc, const char **argv) -+{ -+ struct kobject k; -+ int nextarg; -+ unsigned long addr; -+ long offset; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) || -+ (diag = kdb_getarea(k, addr))) -+ return diag; -+ -+ -+ kdb_printf("kobject at 0x%lx\n", addr); -+ -+ if (k.name) { -+ char c; -+ kdb_printf(" name 0x%p", k.name); -+ if (kdb_getarea(c, (unsigned long)k.name) == 0) -+ kdb_printf(" '%s'", k.name); -+ kdb_printf("\n"); -+ } -+ -+ if (k.name != kobject_name((struct kobject *)addr)) -+ kdb_printf(" name '%.20s'\n", k.name); -+ -+ kdb_printf(" kref.refcount %d'\n", atomic_read(&k.kref.refcount)); -+ -+ kdb_printf(" entry.next = 0x%p entry.prev = 0x%p\n", -+ k.entry.next, k.entry.prev); -+ -+ kdb_printf(" parent = 0x%p kset = 0x%p ktype = 0x%p sd = 0x%p\n", -+ k.parent, k.kset, k.ktype, k.sd); -+ -+ return 0; -+} -+ -+static int -+kdbm_sh(int argc, const char **argv) -+{ -+ int diag; -+ int nextarg; -+ unsigned long addr; -+ long offset = 0L; -+ struct Scsi_Host sh; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) || -+ (diag = kdb_getarea(sh, addr))) -+ return diag; -+ -+ kdb_printf("Scsi_Host at 0x%lx\n", addr); -+ kdb_printf("host_queue = 0x%p\n", sh.__devices.next); -+ kdb_printf("ehandler = 0x%p eh_action = 0x%p\n", -+ sh.ehandler, sh.eh_action); -+ kdb_printf("host_wait = 0x%p hostt = 0x%p\n", -+ &sh.host_wait, sh.hostt); -+ kdb_printf("host_failed = %d host_no = %d resetting = %d\n", -+ sh.host_failed, sh.host_no, sh.resetting); -+ kdb_printf("max id/lun/channel = [%d/%d/%d] this_id = %d\n", -+ sh.max_id, sh.max_lun, sh.max_channel, sh.this_id); -+ kdb_printf("can_queue = %d cmd_per_lun = %d sg_tablesize = %d u_isa_dma = %d\n", -+ sh.can_queue, sh.cmd_per_lun, sh.sg_tablesize, sh.unchecked_isa_dma); -+ kdb_printf("host_blocked = %d reverse_ordering = %d \n", -+ sh.host_blocked, sh.reverse_ordering); -+ -+ return 0; -+} -+ -+static int -+kdbm_sd(int argc, const char **argv) -+{ -+ int diag; -+ int nextarg; -+ unsigned long addr; -+ long offset = 0L; -+ struct scsi_device *sd = NULL; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL))) -+ goto out; -+ if (!(sd = kmalloc(sizeof(*sd), GFP_ATOMIC))) { -+ kdb_printf("kdbm_sd: cannot kmalloc sd\n"); -+ goto out; -+ } -+ if ((diag = kdb_getarea(*sd, addr))) -+ goto out; -+ -+ kdb_printf("scsi_device at 0x%lx\n", addr); -+ kdb_printf("next = 0x%p prev = 0x%p host = 0x%p\n", -+ sd->siblings.next, sd->siblings.prev, sd->host); -+ kdb_printf("device_busy = %d current_cmnd 0x%p\n", -+ sd->device_busy, sd->current_cmnd); -+ kdb_printf("id/lun/chan = [%d/%d/%d] single_lun = %d device_blocked = %d\n", -+ sd->id, sd->lun, sd->channel, sd->sdev_target->single_lun, sd->device_blocked); -+ kdb_printf("queue_depth = %d current_tag = %d scsi_level = %d\n", -+ sd->queue_depth, sd->current_tag, sd->scsi_level); -+ kdb_printf("%8.8s %16.16s %4.4s\n", sd->vendor, sd->model, sd->rev); -+out: -+ if (sd) -+ kfree(sd); -+ return diag; -+} -+ -+static int -+kdbm_sc(int argc, const char **argv) -+{ -+ int diag; -+ int nextarg; -+ unsigned long addr; -+ long offset = 0L; -+ struct scsi_cmnd *sc = NULL; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ nextarg = 1; -+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL))) -+ goto out; -+ if (!(sc = kmalloc(sizeof(*sc), GFP_ATOMIC))) { -+ kdb_printf("kdbm_sc: cannot kmalloc sc\n"); -+ goto out; -+ } -+ if ((diag = kdb_getarea(*sc, addr))) -+ goto out; -+ -+ kdb_printf("scsi_cmnd at 0x%lx\n", addr); -+ kdb_printf("device = 0x%p next = 0x%p\n", -+ sc->device, sc->list.next); -+ kdb_printf("serial_number = %ld retries = %d\n", -+ sc->serial_number, sc->retries); -+ kdb_printf("cmd_len = %d\n", sc->cmd_len); -+ kdb_printf("cmnd = [%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x]\n", -+ sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], sc->cmnd[4], -+ sc->cmnd[5], sc->cmnd[6], sc->cmnd[7], sc->cmnd[8], sc->cmnd[9], -+ sc->cmnd[10], sc->cmnd[11]); -+ kdb_printf("request_buffer = 0x%p request_bufflen = %d\n", -+ scsi_sglist(sc), scsi_bufflen(sc)); -+ kdb_printf("use_sg = %d\n", scsi_sg_count(sc)); -+ kdb_printf("underflow = %d transfersize = %d\n", -+ sc->underflow, sc->transfersize); -+ kdb_printf("tag = %d\n", sc->tag); -+ -+out: -+ if (sc) -+ kfree(sc); -+ return diag; -+} -+ -+static int __init kdbm_vm_init(void) -+{ -+ kdb_register("vm", kdbm_vm, "[-v] ", "Display vm_area_struct", 0); -+ kdb_register("vmp", kdbm_vm, "[-v] ", "Display all vm_area_struct for ", 0); -+#ifdef CONFIG_NUMA -+ kdb_register("mempolicy", kdbm_mpol, "", "Display mempolicy structure", 0); -+ kdb_register("pgdat", kdbm_pgdat, "", "Display pglist_data node structure", 0); -+#else -+ kdb_register("pgdat", kdbm_pgdat, "", "Display pglist_data node structure", 0); -+#endif -+ kdb_register("pte", kdbm_pte, "( -m | -p ) []", "Display pte_t for mm_struct or pid", 0); -+ kdb_register("rpte", kdbm_rpte, "( -m | -p ) []", "Find pte_t containing pfn for mm_struct or pid", 0); -+ kdb_register("dentry", kdbm_dentry, "", "Display interesting dentry stuff", 0); -+ kdb_register("kobject", kdbm_kobject, "", "Display interesting kobject stuff", 0); -+ kdb_register("filp", kdbm_filp, "", "Display interesting filp stuff", 0); -+ kdb_register("fl", kdbm_fl, "", "Display interesting file_lock stuff", 0); -+ kdb_register("sh", kdbm_sh, "", "Show scsi_host", 0); -+ kdb_register("sd", kdbm_sd, "", "Show scsi_device", 0); -+ kdb_register("sc", kdbm_sc, "", "Show scsi_cmnd", 0); -+ -+ return 0; -+} -+ -+static void __exit kdbm_vm_exit(void) -+{ -+ kdb_unregister("vm"); -+ kdb_unregister("vmp"); -+#ifdef CONFIG_NUMA -+ kdb_unregister("mempolicy"); -+#endif -+ kdb_unregister("pgdat"); -+ kdb_unregister("pte"); -+ kdb_unregister("rpte"); -+ kdb_unregister("dentry"); -+ kdb_unregister("kobject"); -+ kdb_unregister("filp"); -+ kdb_unregister("fl"); -+ kdb_unregister("sh"); -+ kdb_unregister("sd"); -+ kdb_unregister("sc"); -+} -+ -+module_init(kdbm_vm_init) -+module_exit(kdbm_vm_exit) ---- /dev/null -+++ b/kdb/modules/kdbm_x86.c -@@ -0,0 +1,1093 @@ -+/* -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Author: Vamsi Krishna S. -+ * (C) 2003 IBM Corporation. -+ * 2006-10-10 Keith Owens -+ * Reworked to include x86_64 support -+ * Copyright (c) 2006 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#if 0 -+#include -+#endif -+ -+MODULE_AUTHOR("Vamsi Krishna S./IBM"); -+MODULE_DESCRIPTION("x86 specific information (gdt/idt/ldt/page tables)"); -+MODULE_LICENSE("GPL"); -+ -+/* Isolate as many of the i386/x86_64 differences as possible in one spot */ -+ -+#ifdef CONFIG_X86_64 -+ -+#define KDB_X86_64 1 -+#define MOVLQ "movq" -+ -+typedef struct desc_struct kdb_desc_t; -+typedef struct gate_struct64 kdb_gate_desc_t; -+ -+#define KDB_SYS_DESC_OFFSET(d) ((unsigned long)d->offset_high << 32 | d->offset_middle << 16 | d->offset_low) -+#define KDB_SYS_DESC_CALLG_COUNT(d) 0 -+ -+#else /* !CONFIG_X86_64 */ -+ -+#define KDB_X86_64 0 -+#define MOVLQ "movl" -+ -+/* i386 has no detailed mapping for the 8 byte segment descriptor, copy the -+ * x86_64 one and merge the l and avl bits. -+ */ -+struct kdb_desc { -+ u16 limit0; -+ u16 base0; -+ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; -+ unsigned limit : 4, avl : 2, d : 1, g : 1, base2 : 8; -+} __attribute__((packed)); -+typedef struct kdb_desc kdb_desc_t; -+ -+/* i386 has no detailed mapping for the 8 byte gate descriptor, base it on the -+ * x86_64 one. -+ */ -+struct kdb_gate_desc { -+ u16 offset_low; -+ u16 segment; -+ unsigned res : 8, type : 4, s : 1, dpl : 2, p : 1; -+ u16 offset_middle; -+} __attribute__((packed)); -+typedef struct kdb_gate_desc kdb_gate_desc_t; -+ -+#define KDB_SYS_DESC_OFFSET(d) ((unsigned long)(d->offset_middle << 16 | d->offset_low)) -+#define KDB_SYS_DESC_CALLG_COUNT(d) ((unsigned int)(d->res & 0x0F)) -+ -+#endif /* CONFIG_X86_64 */ -+ -+#define KDB_SEL_MAX 0x2000 -+#define KDB_IDT_MAX 0x100 -+#define KDB_SYS_DESC_TYPE_TSS16 0x01 -+#define KDB_SYS_DESC_TYPE_LDT 0x02 -+#define KDB_SYS_DESC_TYPE_TSSB16 0x03 -+#define KDB_SYS_DESC_TYPE_CALLG16 0x04 -+#define KDB_SYS_DESC_TYPE_TASKG 0x05 -+#define KDB_SYS_DESC_TYPE_INTG16 0x06 -+#define KDB_SYS_DESC_TYPE_TRAP16 0x07 -+ -+#define KDB_SYS_DESC_TYPE_TSS 0x09 -+#define KDB_SYS_DESC_TYPE_TSSB 0x0b -+#define KDB_SYS_DESC_TYPE_CALLG 0x0c -+#define KDB_SYS_DESC_TYPE_INTG 0x0e -+#define KDB_SYS_DESC_TYPE_TRAPG 0x0f -+ -+#define KDB_SEG_DESC_TYPE_CODE 0x08 -+#define KDB_SEG_DESC_TYPE_CODE_R 0x02 -+#define KDB_SEG_DESC_TYPE_DATA_W 0x02 -+#define KDB_SEG_DESC_TYPE_CODE_C 0x02 /* conforming */ -+#define KDB_SEG_DESC_TYPE_DATA_D 0x02 /* expand-down */ -+#define KDB_SEG_DESC_TYPE_A 0x01 /* accessed */ -+ -+#define _LIMIT(d) ((unsigned long)((d)->limit << 16 | (d)->limit0)) -+#define KDB_SEG_DESC_LIMIT(d) ((d)->g ? ((_LIMIT(d)+1) << 12) -1 : _LIMIT(d)) -+ -+static unsigned long kdb_seg_desc_base(kdb_desc_t *d) -+{ -+ unsigned long base = d->base2 << 24 | d->base1 << 16 | d->base0; -+#ifdef CONFIG_X86_64 -+ switch (d->type) { -+ case KDB_SYS_DESC_TYPE_TSS: -+ case KDB_SYS_DESC_TYPE_TSSB: -+ case KDB_SYS_DESC_TYPE_LDT: -+ base += (unsigned long)(((struct ldttss_desc64 *)d)->base3) << 32; -+ break; -+ } -+#endif -+ return base; -+} -+ -+/* helper functions to display system registers in verbose mode */ -+static void display_gdtr(void) -+{ -+ struct desc_ptr gdtr; -+ -+ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr)); -+ kdb_printf("gdtr.address = " kdb_machreg_fmt0 ", gdtr.size = 0x%x\n", -+ gdtr.address, gdtr.size); -+ -+ return; -+} -+ -+static void display_ldtr(void) -+{ -+ struct desc_ptr gdtr; -+ unsigned long ldtr; -+ -+ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr)); -+ __asm__ __volatile__ ("sldt %0\n\t" : "=m"(ldtr)); -+ ldtr &= 0xfff8; /* extract the index */ -+ -+ kdb_printf("ldtr = " kdb_machreg_fmt0 " ", ldtr); -+ -+ if (ldtr < gdtr.size) { -+ kdb_desc_t *ldt_desc = -+ (kdb_desc_t *)(gdtr.address + ldtr); -+ kdb_printf("base=" kdb_machreg_fmt0 -+ ", limit=" kdb_machreg_fmt "\n", -+ kdb_seg_desc_base(ldt_desc), -+ KDB_SEG_DESC_LIMIT(ldt_desc)); -+ } else { -+ kdb_printf("invalid\n"); -+ } -+ -+ return; -+} -+ -+static void display_idtr(void) -+{ -+ struct desc_ptr idtr; -+ __asm__ __volatile__ ("sidt %0\n\t" : "=m"(idtr)); -+ kdb_printf("idtr.address = " kdb_machreg_fmt0 ", idtr.size = 0x%x\n", -+ idtr.address, idtr.size); -+ return; -+} -+ -+static const char *cr0_flags[] = { -+ "pe", "mp", "em", "ts", "et", "ne", NULL, NULL, -+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, -+ "wp", NULL, "am", NULL, NULL, NULL, NULL, NULL, -+ NULL, NULL, NULL, NULL, NULL, "nw", "cd", "pg"}; -+ -+static void display_cr0(void) -+{ -+ kdb_machreg_t cr0; -+ int i; -+ __asm__ (MOVLQ " %%cr0,%0\n\t":"=r"(cr0)); -+ kdb_printf("cr0 = " kdb_machreg_fmt0, cr0); -+ for (i = 0; i < ARRAY_SIZE(cr0_flags); i++) { -+ if (test_bit(i, &cr0) && cr0_flags[i]) -+ kdb_printf(" %s", cr0_flags[i]); -+ } -+ kdb_printf("\n"); -+ return; -+} -+ -+static void display_cr3(void) -+{ -+ kdb_machreg_t cr3; -+ __asm__ (MOVLQ " %%cr3,%0\n\t":"=r"(cr3)); -+ kdb_printf("cr3 = " kdb_machreg_fmt0 " ", cr3); -+ if (cr3 & 0x08) -+ kdb_printf("pwt "); -+ if (cr3 & 0x10) -+ kdb_printf("pcd "); -+ kdb_printf("%s=" kdb_machreg_fmt0 "\n", -+ KDB_X86_64 ? "pml4" : "pgdir", cr3 & PAGE_MASK); -+ return; -+} -+ -+static const char *cr4_flags[] = { -+ "vme", "pvi", "tsd", "de", -+ "pse", "pae", "mce", "pge", -+ "pce", "osfxsr" "osxmmexcpt"}; -+ -+static void display_cr4(void) -+{ -+ kdb_machreg_t cr4; -+ int i; -+ __asm__ (MOVLQ " %%cr4,%0\n\t":"=r"(cr4)); -+ kdb_printf("cr4 = " kdb_machreg_fmt0, cr4); -+ for (i = 0; i < ARRAY_SIZE(cr4_flags); i++) { -+ if (test_bit(i, &cr4)) -+ kdb_printf(" %s", cr4_flags[i]); -+ } -+ kdb_printf("\n"); -+ return; -+} -+ -+static void display_cr8(void) -+{ -+#ifdef CONFIG_X86_64 -+ kdb_machreg_t cr8; -+ __asm__ (MOVLQ " %%cr8,%0\n\t":"=r"(cr8)); -+ kdb_printf("cr8 = " kdb_machreg_fmt0 "\n", cr8); -+ return; -+#endif /* CONFIG_X86_64 */ -+} -+ -+static char *dr_type_name[] = { "exec", "write", "io", "rw" }; -+ -+static void display_dr_status(int nr, int enabled, int local, int len, int type) -+{ -+ if (!enabled) { -+ kdb_printf("\tdebug register %d: not enabled\n", nr); -+ return; -+ } -+ -+ kdb_printf(" debug register %d: %s, len = %d, type = %s\n", -+ nr, -+ local? " local":"global", -+ len, -+ dr_type_name[type]); -+} -+ -+static void display_dr(void) -+{ -+ kdb_machreg_t dr0, dr1, dr2, dr3, dr6, dr7; -+ int dbnr, set; -+ -+ __asm__ (MOVLQ " %%db0,%0\n\t":"=r"(dr0)); -+ __asm__ (MOVLQ " %%db1,%0\n\t":"=r"(dr1)); -+ __asm__ (MOVLQ " %%db2,%0\n\t":"=r"(dr2)); -+ __asm__ (MOVLQ " %%db3,%0\n\t":"=r"(dr3)); -+ __asm__ (MOVLQ " %%db6,%0\n\t":"=r"(dr6)); -+ __asm__ (MOVLQ " %%db7,%0\n\t":"=r"(dr7)); -+ -+ kdb_printf("dr0 = " kdb_machreg_fmt0 " dr1 = " kdb_machreg_fmt0 -+ " dr2 = " kdb_machreg_fmt0 " dr3 = " kdb_machreg_fmt0 "\n", -+ dr0, dr1, dr2, dr3); -+ kdb_printf("dr6 = " kdb_machreg_fmt0 " ", dr6); -+ dbnr = dr6 & DR6_DR_MASK; -+ if (dbnr) { -+ int nr; -+ switch(dbnr) { -+ case 1: -+ nr = 0; break; -+ case 2: -+ nr = 1; break; -+ case 4: -+ nr = 2; break; -+ default: -+ nr = 3; break; -+ } -+ kdb_printf("debug register hit = %d", nr); -+ } else if (dr6 & DR_STEP) { -+ kdb_printf("single step"); -+ } else if (dr6 & DR_SWITCH) { -+ kdb_printf("task switch"); -+ } -+ kdb_printf("\n"); -+ -+ kdb_printf("dr7 = " kdb_machreg_fmt0 "\n", dr7); -+ set = DR7_L0(dr7) || DR7_G0(dr7); -+ display_dr_status(0, set, DR7_L0(dr7), DR7_LEN0(dr7), DR7_RW0(dr7)); -+ set = DR7_L1(dr7) || DR7_G1(dr7); -+ display_dr_status(1, set, DR7_L1(dr7), DR7_LEN1(dr7), DR7_RW1(dr7)); -+ set = DR7_L2(dr7) || DR7_G2(dr7); -+ display_dr_status(2, set, DR7_L2(dr7), DR7_LEN2(dr7), DR7_RW2(dr7)); -+ set = DR7_L3(dr7) || DR7_G3(dr7); -+ display_dr_status(3, set, DR7_L3(dr7), DR7_LEN3(dr7), DR7_RW3(dr7)); -+} -+ -+static char *set_eflags[] = { -+ "carry", NULL, "parity", NULL, "adjust", NULL, "zero", "sign", -+ "trace", "intr-on", "dir", "overflow", NULL, NULL, "nestedtask", NULL, -+ "resume", "vm", "align", "vif", "vip", "id"}; -+ -+static void display_eflags(unsigned long ef) -+{ -+ int i, iopl; -+ kdb_printf("eflags = " kdb_machreg_fmt0 " ", ef); -+ for (i = 0; i < ARRAY_SIZE(set_eflags); i++) { -+ if (test_bit(i, &ef) && set_eflags[i]) -+ kdb_printf("%s ", set_eflags[i]); -+ } -+ -+ iopl = (ef & 0x00003000) >> 12; -+ kdb_printf("iopl=%d\n", iopl); -+ return; -+} -+ -+static void display_tss(struct tss_struct *t) -+{ -+#ifdef CONFIG_X86_64 -+ int i; -+ kdb_printf(" sp0 = 0x%016Lx, sp1 = 0x%016Lx\n", -+ t->x86_tss.sp0, t->x86_tss.sp1); -+ kdb_printf(" sp2 = 0x%016Lx\n", t->x86_tss.sp2); -+ for (i = 0; i < ARRAY_SIZE(t->x86_tss.ist); ++i) -+ kdb_printf(" ist[%d] = 0x%016Lx\n", -+ i, t->x86_tss.ist[i]); -+ kdb_printf(" iomap = 0x%04x\n", t->x86_tss.io_bitmap_base); -+#else /* !CONFIG_X86_64 */ -+ kdb_printf(" cs = %04x, ip = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.es, t->x86_tss.ip); -+ kdb_printf(" ss = %04x, sp = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.ss, t->x86_tss.sp); -+ kdb_printf(" ss0 = %04x, sp0 = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.ss0, t->x86_tss.sp0); -+ kdb_printf(" ss1 = %04x, sp1 = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.ss1, t->x86_tss.sp1); -+ kdb_printf(" ss2 = %04x, sp2 = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.ss2, t->x86_tss.sp2); -+ kdb_printf(" ldt = %04x, cr3 = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.ldt, t->x86_tss.__cr3); -+ kdb_printf(" ds = %04x, es = %04x fs = %04x gs = %04x\n", -+ t->x86_tss.ds, t->x86_tss.es, t->x86_tss.fs, t->x86_tss.gs); -+ kdb_printf(" ax = " kdb_machreg_fmt0 ", bx = " kdb_machreg_fmt0 -+ " cx = " kdb_machreg_fmt0 " dx = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.ax, t->x86_tss.bx, t->x86_tss.cx, t->x86_tss.dx); -+ kdb_printf(" si = " kdb_machreg_fmt0 ", di = " kdb_machreg_fmt0 -+ " bp = " kdb_machreg_fmt0 "\n", -+ t->x86_tss.si, t->x86_tss.di, t->x86_tss.bp); -+ kdb_printf(" trace = %d, iomap = 0x%04x\n", t->x86_tss.trace, t->x86_tss.io_bitmap_base); -+#endif /* CONFIG_X86_64 */ -+} -+ -+static char *gate_desc_types[] = { -+#ifdef CONFIG_X86_64 -+ "reserved-0", "reserved-1", "ldt", "reserved-3", -+ "reserved-4", "reserved-5", "reserved-6", "reserved-7", -+ "reserved-8", "tss-avlb", "reserved-10", "tss-busy", -+ "callgate", "reserved-13", "intgate", "trapgate", -+#else /* CONFIG_X86_64 */ -+ "reserved-0", "tss16-avlb", "ldt", "tss16-busy", -+ "callgate16", "taskgate", "intgate16", "trapgate16", -+ "reserved-8", "tss-avlb", "reserved-10", "tss-busy", -+ "callgate", "reserved-13", "intgate", "trapgate", -+#endif /* CONFIG_X86_64 */ -+}; -+ -+static void -+display_gate_desc(kdb_gate_desc_t *d) -+{ -+ kdb_printf("%-11s ", gate_desc_types[d->type]); -+ -+ switch(d->type) { -+ case KDB_SYS_DESC_TYPE_LDT: -+ kdb_printf("base="); -+ kdb_symbol_print(kdb_seg_desc_base((kdb_desc_t *)d), NULL, -+ KDB_SP_DEFAULT); -+ kdb_printf(" limit=" kdb_machreg_fmt " dpl=%d\n", -+ KDB_SEG_DESC_LIMIT((kdb_desc_t *)d), d->dpl); -+ break; -+ case KDB_SYS_DESC_TYPE_TSS: -+ case KDB_SYS_DESC_TYPE_TSS16: -+ case KDB_SYS_DESC_TYPE_TSSB: -+ case KDB_SYS_DESC_TYPE_TSSB16: -+ { -+ struct tss_struct *tss = -+ (struct tss_struct *) -+ kdb_seg_desc_base((kdb_desc_t *)d); -+ kdb_printf("base="); -+ kdb_symbol_print((unsigned long)tss, NULL, KDB_SP_DEFAULT); -+ kdb_printf(" limit=" kdb_machreg_fmt " dpl=%d\n", -+ KDB_SEG_DESC_LIMIT((kdb_desc_t *)d), d->dpl); -+ display_tss(tss); -+ break; -+ } -+ case KDB_SYS_DESC_TYPE_CALLG16: -+ kdb_printf("segment=0x%4.4x off=", d->segment); -+ kdb_symbol_print(KDB_SYS_DESC_OFFSET(d), NULL, KDB_SP_DEFAULT); -+ kdb_printf(" dpl=%d wc=%d\n", -+ d->dpl, KDB_SYS_DESC_CALLG_COUNT(d)); -+ break; -+ case KDB_SYS_DESC_TYPE_CALLG: -+ kdb_printf("segment=0x%4.4x off=", d->segment); -+ kdb_symbol_print(KDB_SYS_DESC_OFFSET(d), NULL, KDB_SP_DEFAULT); -+ kdb_printf(" dpl=%d\n", d->dpl); -+ break; -+ default: -+ kdb_printf("segment=0x%4.4x off=", d->segment); -+ if (KDB_SYS_DESC_OFFSET(d)) -+ kdb_symbol_print(KDB_SYS_DESC_OFFSET(d), NULL, -+ KDB_SP_DEFAULT); -+ else -+ kdb_printf(kdb_machreg_fmt0, KDB_SYS_DESC_OFFSET(d)); -+ kdb_printf(" dpl=%d", d->dpl); -+#ifdef CONFIG_X86_64 -+ if (d->ist) -+ kdb_printf(" ist=%d", d->ist); -+#endif /* CONFIG_X86_64 */ -+ kdb_printf("\n"); -+ break; -+ } -+} -+ -+static void -+display_seg_desc(kdb_desc_t *d) -+{ -+ unsigned char type = d->type; -+ -+ if (type & KDB_SEG_DESC_TYPE_CODE) { -+ kdb_printf("%-11s base=" kdb_machreg_fmt0 " limit=" -+ kdb_machreg_fmt " dpl=%d %c%c%c %s %s %s \n", -+ "code", -+ kdb_seg_desc_base(d), KDB_SEG_DESC_LIMIT(d), -+ d->dpl, -+ (type & KDB_SEG_DESC_TYPE_CODE_R)?'r':'-', -+ '-', 'x', -+#ifdef CONFIG_X86_64 -+ d->l ? "64b" : d->d ? "32b" : "16b", -+#else /* !CONFIG_X86_64 */ -+ d->d ? "32b" : "16b", -+#endif /* CONFIG_X86_64 */ -+ (type & KDB_SEG_DESC_TYPE_A)?"ac":"", -+ (type & KDB_SEG_DESC_TYPE_CODE_C)?"conf":""); -+ } else { -+ kdb_printf("%-11s base=" kdb_machreg_fmt0 " limit=" -+ kdb_machreg_fmt " dpl=%d %c%c%c %s %s %s \n", -+ "data", -+ kdb_seg_desc_base(d), KDB_SEG_DESC_LIMIT(d), -+ d->dpl, -+ 'r', -+ (type & KDB_SEG_DESC_TYPE_DATA_W)?'w':'-', -+ '-', -+ d->d ? "32b" : "16b", -+ (type & KDB_SEG_DESC_TYPE_A)?"ac":"", -+ (type & KDB_SEG_DESC_TYPE_DATA_D)?"down":""); -+ } -+} -+ -+static int -+kdb_parse_two_numbers(int argc, const char **argv, int *sel, int *count, -+ int *last_sel, int *last_count) -+{ -+ int diag; -+ -+ if (argc > 2) -+ return KDB_ARGCOUNT; -+ -+ kdbgetintenv("MDCOUNT", count); -+ -+ if (argc == 0) { -+ *sel = *last_sel; -+ if (*last_count) -+ *count = *last_count; -+ } else { -+ unsigned long val; -+ -+ if (argc >= 1) { -+ diag = kdbgetularg(argv[1], &val); -+ if (diag) -+ return diag; -+ *sel = val; -+ } -+ if (argc >= 2) { -+ diag = kdbgetularg(argv[2], &val); -+ if (diag) -+ return diag; -+ *count = (int) val; -+ *last_count = (int) val; -+ } else if (*last_count) { -+ *count = *last_count; -+ } -+ } -+ return 0; -+} -+ -+/* -+ * kdb_gdt -+ * -+ * This function implements the 'gdt' command. -+ * -+ * gdt [ []] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+static int -+kdb_gdt(int argc, const char **argv) -+{ -+ int sel = 0; -+ struct desc_ptr gdtr; -+ int diag, count = 8; -+ kdb_desc_t *gdt; -+ unsigned int max_sel; -+ static int last_sel = 0, last_count = 0; -+ -+ diag = kdb_parse_two_numbers(argc, argv, &sel, &count, -+ &last_sel, &last_count); -+ if (diag) -+ return diag; -+ -+ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr)); -+ gdt = (kdb_desc_t *) gdtr.address; -+ -+ max_sel = (gdtr.size + 1) / sizeof(kdb_desc_t); -+ if (sel >= max_sel) { -+ kdb_printf("Maximum selector (%d) reached\n", max_sel); -+ return 0; -+ } -+ -+ if (sel + count > max_sel) -+ count = max_sel - sel; -+ -+ while (count--) { -+ kdb_desc_t *d = &gdt[sel]; -+ kdb_printf("0x%4.4x ", sel++); -+ -+ if (!d->p) { -+ kdb_printf("not present\n"); -+ continue; -+ } -+ if (d->s) { -+ display_seg_desc(d); -+ } else { -+ display_gate_desc((kdb_gate_desc_t *)d); -+ if (KDB_X86_64 && count) { -+ ++sel; /* this descriptor occupies two slots */ -+ --count; -+ } -+ } -+ } -+ -+ last_sel = sel; -+ return 0; -+} -+ -+/* -+ * kdb_ldt -+ * -+ * This function implements the 'ldt' command. -+ * -+ * ldt [ []] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+static int -+kdb_ldt(int argc, const char **argv) -+{ -+ int sel = 0; -+ struct desc_ptr gdtr; -+ unsigned long ldtr = 0; -+ int diag, count = 8; -+ kdb_desc_t *ldt, *ldt_desc; -+ unsigned int max_sel; -+ static int last_sel = 0, last_count = 0; -+ -+ diag = kdb_parse_two_numbers(argc, argv, &sel, &count, -+ &last_sel, &last_count); -+ if (diag) -+ return diag; -+ -+ if (strcmp(argv[0], "ldtp") == 0) { -+ kdb_printf("pid=%d, process=%s\n", -+ kdb_current_task->pid, kdb_current_task->comm); -+ if (!kdb_current_task->mm || -+ !kdb_current_task->mm->context.ldt) { -+ kdb_printf("no special LDT for this process\n"); -+ return 0; -+ } -+ ldt = kdb_current_task->mm->context.ldt; -+ max_sel = kdb_current_task->mm->context.size; -+ } else { -+ -+ /* sldt gives the GDT selector for the segment containing LDT */ -+ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr)); -+ __asm__ __volatile__ ("sldt %0\n\t" : "=m"(ldtr)); -+ ldtr &= 0xfff8; /* extract the index */ -+ -+ if (ldtr > gdtr.size+1) { -+ kdb_printf("invalid ldtr\n"); -+ return 0; -+ } -+ -+ ldt_desc = (kdb_desc_t *)(gdtr.address + ldtr); -+ ldt = (kdb_desc_t *)kdb_seg_desc_base(ldt_desc); -+ max_sel = (KDB_SEG_DESC_LIMIT(ldt_desc)+1) / sizeof(kdb_desc_t); -+ } -+ -+ if (sel >= max_sel) { -+ kdb_printf("Maximum selector (%d) reached\n", max_sel); -+ return 0; -+ } -+ -+ if (sel + count > max_sel) -+ count = max_sel - sel; -+ -+ while (count--) { -+ kdb_desc_t *d = &ldt[sel]; -+ kdb_printf("0x%4.4x ", sel++); -+ -+ if (!d->p) { -+ kdb_printf("not present\n"); -+ continue; -+ } -+ if (d->s) { -+ display_seg_desc(d); -+ } else { -+ display_gate_desc((kdb_gate_desc_t *)d); -+ if (KDB_X86_64 && count) { -+ ++sel; /* this descriptor occupies two slots */ -+ --count; -+ } -+ } -+ } -+ -+ last_sel = sel; -+ return 0; -+} -+ -+/* -+ * kdb_idt -+ * -+ * This function implements the 'idt' command. -+ * -+ * idt [ []] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+static int -+kdb_idt(int argc, const char **argv) -+{ -+ int vec = 0; -+ struct desc_ptr idtr; -+ int diag, count = 8; -+ kdb_gate_desc_t *idt; -+ unsigned int max_entries; -+ static int last_vec = 0, last_count = 0; -+ -+ diag = kdb_parse_two_numbers(argc, argv, &vec, &count, -+ &last_vec, &last_count); -+ if (diag) -+ return diag; -+ -+ __asm__ __volatile__ ("sidt %0\n\t" : "=m"(idtr)); -+ idt = (kdb_gate_desc_t *)idtr.address; -+ -+ max_entries = (idtr.size+1) / sizeof(kdb_gate_desc_t); -+ if (vec >= max_entries) { -+ kdb_printf("Maximum vector (%d) reached\n", max_entries); -+ return 0; -+ } -+ -+ if (vec + count > max_entries) -+ count = max_entries - vec; -+ -+ while (count--) { -+ kdb_gate_desc_t *d = &idt[vec]; -+ kdb_printf("0x%4.4x ", vec++); -+ if (!d->p) { -+ kdb_printf("not present\n"); -+ continue; -+ } -+#ifndef CONFIG_X86_64 -+ if (d->s) { -+ kdb_printf("invalid\n"); -+ continue; -+ } -+#endif /* CONFIG_X86_64 */ -+ display_gate_desc(d); -+ } -+ -+ last_vec = vec; -+ -+ return 0; -+} -+ -+#if 0 -+static int -+get_pagetables(unsigned long addr, pgd_t **pgdir, pmd_t **pgmiddle, pte_t **pte) -+{ -+ pgd_t *d; -+ pmd_t *m; -+ pte_t *t; -+ -+ if (addr > PAGE_OFFSET) { -+ d = pgd_offset_k(addr); -+ } else { -+ kdb_printf("pid=%d, process=%s\n", kdb_current_task->pid, kdb_current_task->comm); -+ d = pgd_offset(kdb_current_task->mm, addr); -+ } -+ -+ if (pgd_none(*d) || pgd_bad(*d)) { -+ *pgdir = NULL; -+ *pgmiddle = NULL; -+ *pte = NULL; -+ return 0; -+ } else { -+ *pgdir = d; -+ } -+ -+ /* if _PAGE_PSE is set, pgdir points directly to the page. */ -+ if (pgd_val(*d) & _PAGE_PSE) { -+ *pgmiddle = NULL; -+ *pte = NULL; -+ return 0; -+ } -+ -+ m = pmd_offset(d, addr); -+ if (pmd_none(*m) || pmd_bad(*m)) { -+ *pgmiddle = NULL; -+ *pte = NULL; -+ return 0; -+ } else { -+ *pgmiddle = m; -+ } -+ -+ t = pte_offset(m, addr); -+ if (pte_none(*t)) { -+ *pte = NULL; -+ return 0; -+ } else { -+ *pte = t; -+ } -+ kdb_printf("\naddr=%08lx, pgd=%08lx, pmd=%08lx, pte=%08lx\n", -+ addr, -+ (unsigned long) pgd_val(*d), -+ (unsigned long) pmd_val(*m), -+ (unsigned long) pte_val(*t)); -+ return 0; -+} -+#endif -+ -+#define FORMAT_PGDIR(entry) \ -+ kdb_printf("frame=%05lx %c %s %c %c %c %s %c %s %s \n",\ -+ (entry >> PAGE_SHIFT), \ -+ (entry & _PAGE_PRESENT)?'p':'n', \ -+ (entry & _PAGE_RW)?"rw":"ro", \ -+ (entry & _PAGE_USER)?'u':'s', \ -+ (entry & _PAGE_ACCESSED)?'a':' ', \ -+ ' ', \ -+ (entry & _PAGE_PSE)?"4M":"4K", \ -+ (entry & _PAGE_GLOBAL)?'g':' ', \ -+ (entry & _PAGE_PWT)?"wt":"wb", \ -+ (entry & _PAGE_PCD)?"cd":" "); -+ -+#define FORMAT_PTE(p, entry) \ -+ kdb_printf("frame=%05lx %c%c%c %c %c %c %s %c %s %s\n", \ -+ (entry >> PAGE_SHIFT), \ -+ (pte_read(p))? 'r':'-', \ -+ (pte_write(p))? 'w':'-', \ -+ (pte_exec(p))? 'x':'-', \ -+ (pte_dirty(p))? 'd':' ', \ -+ (pte_young(p))? 'a':' ', \ -+ (entry & _PAGE_USER)? 'u':'s', \ -+ " ", \ -+ (entry & _PAGE_GLOBAL)? 'g':' ', \ -+ (entry & _PAGE_PWT)? "wt":"wb", \ -+ (entry & _PAGE_PCD)? "cd":" "); -+#if 0 -+static int -+display_pgdir(unsigned long addr, pgd_t *pgdir, int count) -+{ -+ unsigned long entry; -+ int i; -+ int index = pgdir - ((pgd_t *)(((unsigned long)pgdir) & PAGE_MASK)); -+ -+ count = min(count, PTRS_PER_PGD - index); -+ addr &= ~(PGDIR_SIZE-1); -+ -+ for (i = 0; i < count; i++, pgdir++) { -+ entry = pgd_val(*pgdir); -+ kdb_printf("pgd: addr=%08lx ", addr); -+ if (pgd_none(*pgdir)) { -+ kdb_printf("pgdir not present\n"); -+ } else { -+ FORMAT_PGDIR(entry); -+ } -+ addr += PGDIR_SIZE; -+ } -+ return i; -+} -+#endif -+ -+#if 0 /* for now, let's not print pgmiddle. */ -+static int -+display_pgmiddle(unsigned long addr, pmd_t *pgmiddle, int count) -+{ -+ unsigned long entry; -+ int i; -+ int index = pgmiddle - ((pmd_t *)(((unsigned long)pgmiddle) & PAGE_MASK)); -+ -+ count = min(count, PTRS_PER_PMD - index); -+ addr &= ~(PMD_SIZE-1); -+ -+ for (i = 0; i < count; i++, pgmiddle++) { -+ entry = pmd_val(*pgmiddle); -+ kdb_printf("pmd: addr=%08lx ", addr); -+ if (pmd_none(*pgmiddle)) { -+ kdb_printf("pgmiddle not present\n"); -+ } else { -+ FORMAT_PGDIR(entry); -+ } -+ addr += PMD_SIZE; -+ } -+ return i; -+} -+#endif -+ -+#if 0 -+static int -+display_pte(unsigned long addr, pte_t *pte, int count) -+{ -+ unsigned long entry; -+ int i; -+ int index = pte - ((pte_t *)(((unsigned long)pte) & PAGE_MASK)); -+ -+ count = min(count, PTRS_PER_PTE - index); -+ addr &= PAGE_MASK; -+ -+ for (i = 0; i < count; i++, pte++) { -+ entry = pte_val(*pte); -+ kdb_printf("pte: addr=%08lx ", addr); -+ if (pte_none(*pte)) { -+ kdb_printf("pte not present\n"); -+ } else if (!pte_present(*pte)) { -+ kdb_printf("page swapped out. swp_offset=%08lx ", SWP_OFFSET(pte_to_swp_entry(*pte))); -+ kdb_printf("swp_type=%8lx", SWP_TYPE(pte_to_swp_entry(*pte))); -+ } else { -+ FORMAT_PTE(*pte, entry); -+ } -+ addr += PAGE_SIZE; -+ } -+ return i; -+} -+ -+ -+/* -+ * kdb_pte -+ * -+ * This function implements the 'pte' command. -+ * -+ * pte [] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+static int -+kdb_pte(int argc, const char **argv) -+{ -+ static unsigned long last_addr = 0, last_count = 0; -+ int count = 8; -+ unsigned long addr; -+ long offset = 0; -+ pgd_t *pgdir; -+ pmd_t *pgmiddle; -+ pte_t *pte; -+ -+#ifdef CONFIG_X86_PAE -+ kdb_printf("This kernel is compiled with PAE support."); -+ return KDB_NOTIMP; -+#endif -+ kdbgetintenv("MDCOUNT", &count); -+ -+ if (argc == 0) { -+ if (last_addr == 0) -+ return KDB_ARGCOUNT; -+ addr = last_addr; -+ if (last_count) -+ count = last_count; -+ } else { -+ kdb_machreg_t val; -+ int diag, nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ if (argc > nextarg+1) -+ return KDB_ARGCOUNT; -+ -+ if (argc >= nextarg) { -+ diag = kdbgetularg(argv[nextarg], &val); -+ if (!diag) { -+ count = (int) val; -+ last_count = count; -+ } else if (last_count) { -+ count = last_count; -+ } -+ } -+ } -+ -+ /* -+ * round off the addr to a page boundary. -+ */ -+ addr &= PAGE_MASK; -+ -+ get_pagetables(addr, &pgdir, &pgmiddle, &pte); -+ -+ if (pgdir) -+ display_pgdir(addr, pgdir, 1); -+#if 0 /* for now, let's not print pgmiddle. */ -+ if (pgmiddle) -+ display_pgmiddle(addr, pgmiddle, 1); -+#endif -+ if (pte) { -+ int displayed; -+ displayed = display_pte(addr, pte, count); -+ addr += (displayed << PAGE_SHIFT); -+ } -+ last_addr = addr; -+ return 0; -+} -+#else -+/* -+ * Todo - In 2.5 the pte_offset macro in asm/pgtable.h seems to be -+ * renamed to pte_offset_kernel. -+ */ -+static int -+kdb_pte(int argc, const char **argv) -+{ -+ kdb_printf("not supported."); -+ return KDB_NOTIMP; -+} -+#endif -+ -+/* -+ * kdb_rdv -+ * -+ * This function implements the 'rdv' command. -+ * It displays all registers of the current processor -+ * included control registers in verbose mode. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * This should have been an option to rd command say "rd v", -+ * but it is here as it is a non-essential x86-only command, -+ * that need not clutter arch/i386/kdb/kdbasupport.c. -+ */ -+static int -+kdb_rdv(int argc, const char **argv) -+{ -+ struct pt_regs *regs = get_irq_regs(); -+ kdba_dumpregs(regs, NULL, NULL); -+ kdb_printf("\n"); -+ display_eflags(regs->flags); -+ kdb_printf("\n"); -+ display_gdtr(); -+ display_idtr(); -+ display_ldtr(); -+ kdb_printf("\n"); -+ display_cr0(); -+ display_cr3(); -+ display_cr4(); -+ display_cr8(); -+ kdb_printf("\n"); -+ display_dr(); -+ return 0; -+} -+ -+static int -+kdb_rdmsr(int argc, const char **argv) -+{ -+ unsigned long addr; -+ uint32_t l, h; -+ int diag; -+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ if ((diag = kdbgetularg(argv[1], &addr))) -+ return diag; -+ -+ if (!cpu_has(c, X86_FEATURE_MSR)) -+ return KDB_NOTIMP; -+ -+ kdb_printf("msr(0x%lx) = ", addr); -+ if ((diag = rdmsr_safe(addr, &l, &h))) { -+ kdb_printf("error %d\n", diag); -+ return KDB_BADINT; -+ } else { -+ kdb_printf("0x%08x_%08x\n", h, l); -+ } -+ -+ return 0; -+} -+ -+static int -+kdb_wrmsr(int argc, const char **argv) -+{ -+ unsigned long addr; -+ unsigned long l, h; -+ int diag; -+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); -+ -+ if (argc != 3) -+ return KDB_ARGCOUNT; -+ -+ if ((diag = kdbgetularg(argv[1], &addr)) -+ || (diag = kdbgetularg(argv[2], &h)) -+ || (diag = kdbgetularg(argv[3], &l))) -+ return diag; -+ -+ if (!cpu_has(c, X86_FEATURE_MSR)) -+ return KDB_NOTIMP; -+ -+ if ((diag = wrmsr_safe(addr, l, h))) { -+ kdb_printf("error %d\n", diag); -+ return KDB_BADINT; -+ } -+ -+ return 0; -+} -+ -+static int __init kdbm_x86_init(void) -+{ -+ kdb_register("rdv", kdb_rdv, NULL, "Display registers in verbose mode", 0); -+ kdb_register_repeat("gdt", kdb_gdt, " []", "Display GDT", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("idt", kdb_idt, " []", "Display IDT", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("ldt", kdb_ldt, " []", "Display LDT", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("ptex", kdb_pte, " []", "Display pagetables", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register_repeat("ldtp", kdb_ldt, " []", "Display Process LDT", 0, KDB_REPEAT_NO_ARGS); -+ kdb_register("rdmsr", kdb_rdmsr, "", "Display Model Specific Register", 0); -+ kdb_register("wrmsr", kdb_wrmsr, " ", "Modify Model Specific Register", 0); -+ return 0; -+} -+ -+static void __exit kdbm_x86_exit(void) -+{ -+ kdb_unregister("rdv"); -+ kdb_unregister("gdt"); -+ kdb_unregister("ldt"); -+ kdb_unregister("idt"); -+ kdb_unregister("ptex"); -+ kdb_unregister("ldtp"); -+ kdb_unregister("rdmsr"); -+ kdb_unregister("wrmsr"); -+} -+ -+module_init(kdbm_x86_init) -+module_exit(kdbm_x86_exit) ---- /dev/null -+++ b/kdb/modules/lcrash/README -@@ -0,0 +1,3 @@ -+ -+ These files are copied from lcrash. -+ The only changes are flagged with "cpw". ---- /dev/null -+++ b/kdb/modules/lcrash/asm/README -@@ -0,0 +1 @@ -+This kl_types.h is asm-ia64 version. ---- /dev/null -+++ b/kdb/modules/lcrash/asm/kl_dump_ia64.h -@@ -0,0 +1,199 @@ -+/* -+ * $Id: kl_dump_ia64.h 1151 2005-02-23 01:09:12Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+/* This header file holds the architecture specific crash dump header */ -+#ifndef __KL_DUMP_IA64_H -+#define __KL_DUMP_IA64_H -+ -+/* definitions */ -+#ifndef KL_NR_CPUS -+# define KL_NR_CPUS 128 /* max number CPUs */ -+#endif -+ -+#define KL_DUMP_MAGIC_NUMBER_IA64 0xdeaddeadULL /* magic number */ -+#define KL_DUMP_VERSION_NUMBER_IA64 0x4 /* version number */ -+ -+ -+/* -+ * mkswap.c calls getpagesize() to get the system page size, -+ * which is not necessarily the same as the hardware page size. -+ * -+ * For ia64 the kernel PAGE_SIZE can be configured from 4KB ... 16KB. -+ * -+ * The physical memory is layed out out in the hardware/minimal pages. -+ * This is the size we need to use for dumping physical pages. -+ * -+ * Note ths hardware/minimal page size being use in; -+ * arch/ia64/kernel/efi.c`efi_memmap_walk(): -+ * curr.end = curr.start + (md->num_pages << 12); -+ * -+ * Since the system page size could change between the kernel we boot -+ * on the the kernel that cause the core dume we may want to have something -+ * more constant like the maximum system page size (See include/asm-ia64/page.h). -+ */ -+#define DUMP_MIN_PAGE_SHIFT 12 -+#define DUMP_MIN_PAGE_SIZE (1UL << DUMP_MIN_PAGE_SHIFT) -+#define DUMP_MIN_PAGE_MASK (~(DUMP_MIN_PAGE_SIZE - 1)) -+#define DUMP_MIN_PAGE_ALIGN(addr) (((addr) + DUMP_MIN_PAGE_SIZE - 1) & DUMP_MIN_PAGE_MASK) -+ -+#define DUMP_MAX_PAGE_SHIFT 16 -+#define DUMP_MAX_PAGE_SIZE (1UL << DUMP_MAX_PAGE_SHIFT) -+#define DUMP_MAX_PAGE_MASK (~(DUMP_MAX_PAGE_SIZE - 1)) -+#define DUMP_MAX_PAGE_ALIGN(addr) (((addr) + DUMP_MAX_PAGE_SIZE - 1) & DUMP_MAX_PAGE_MASK) -+ -+#define DUMP_HEADER_OFFSET DUMP_MAX_PAGE_SIZE -+ -+#define DUMP_EF_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT -+ -+#define DUMP_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT -+#define DUMP_PAGE_SIZE DUMP_MIN_PAGE_SIZE -+#define DUMP_PAGE_MASK DUMP_MIN_PAGE_MASK -+#define DUMP_PAGE_ALIGN(addr) DUMP_MIN_PAGE_ALIGN(addr) -+ -+struct kl_ia64_fpreg { -+ union { -+ unsigned long bits[2]; -+ long double __dummy; /* force 16-byte alignment */ -+ } u; -+}; -+ -+struct kl_pt_regs_ia64 { -+ /* for 2.6 kernels only. This structure was totally different in 2.4 kernels */ -+ unsigned long b6; /* scratch */ -+ unsigned long b7; /* scratch */ -+ -+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ -+ unsigned long ar_ssd; /* reserved for future use (scratch) */ -+ -+ unsigned long r8; /* scratch (return value register 0) */ -+ unsigned long r9; /* scratch (return value register 1) */ -+ unsigned long r10; /* scratch (return value register 2) */ -+ unsigned long r11; /* scratch (return value register 3) */ -+ -+ unsigned long cr_ipsr; /* interrupted task's psr */ -+ unsigned long cr_iip; /* interrupted task's instruction pointer */ -+ unsigned long cr_ifs; /* interrupted task's function state */ -+ -+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ -+ unsigned long ar_pfs; /* prev function state */ -+ unsigned long ar_rsc; /* RSE configuration */ -+ /* The following two are valid only if cr_ipsr.cpl > 0: */ -+ unsigned long ar_rnat; /* RSE NaT */ -+ unsigned long ar_bspstore; /* RSE bspstore */ -+ -+ unsigned long pr; /* 64 predicate registers (1 bit each) */ -+ unsigned long b0; /* return pointer (bp) */ -+ unsigned long loadrs; /* size of dirty partition << 16 */ -+ -+ unsigned long r1; /* the gp pointer */ -+ unsigned long r12; /* interrupted task's memory stack pointer */ -+ unsigned long r13; /* thread pointer */ -+ -+ unsigned long ar_fpsr; /* floating point status (preserved) */ -+ unsigned long r15; /* scratch */ -+ -+ /* The remaining registers are NOT saved for system calls. */ -+ -+ unsigned long r14; /* scratch */ -+ unsigned long r2; /* scratch */ -+ unsigned long r3; /* scratch */ -+ -+ /* The following registers are saved by SAVE_REST: */ -+ unsigned long r16; /* scratch */ -+ unsigned long r17; /* scratch */ -+ unsigned long r18; /* scratch */ -+ unsigned long r19; /* scratch */ -+ unsigned long r20; /* scratch */ -+ unsigned long r21; /* scratch */ -+ unsigned long r22; /* scratch */ -+ unsigned long r23; /* scratch */ -+ unsigned long r24; /* scratch */ -+ unsigned long r25; /* scratch */ -+ unsigned long r26; /* scratch */ -+ unsigned long r27; /* scratch */ -+ unsigned long r28; /* scratch */ -+ unsigned long r29; /* scratch */ -+ unsigned long r30; /* scratch */ -+ unsigned long r31; /* scratch */ -+ -+ unsigned long ar_ccv; /* compare/exchange value (scratch) */ -+ -+ /* -+ * * Floating point registers that the kernel considers scratch: -+ * */ -+ struct kl_ia64_fpreg f6; /* scratch */ -+ struct kl_ia64_fpreg f7; /* scratch */ -+ struct kl_ia64_fpreg f8; /* scratch */ -+ struct kl_ia64_fpreg f9; /* scratch */ -+ struct kl_ia64_fpreg f10; /* scratch */ -+ struct kl_ia64_fpreg f11; /* scratch */ -+} __attribute__((packed)); -+ -+/* -+ * Structure: dump_header_asm_t -+ * Function: This is the header for architecture-specific stuff. It -+ * follows right after the dump header. -+ */ -+typedef struct kl_dump_header_ia64_s { -+ /* the dump magic number -- unique to verify dump is valid */ -+ uint64_t magic_number; -+ /* the version number of this dump */ -+ uint32_t version; -+ /* the size of this header (in case we can't read it) */ -+ uint32_t header_size; -+ /* pointer to pt_regs */ -+ uint64_t pt_regs; -+ /* the dump registers */ -+ struct kl_pt_regs_ia64 regs; -+ /* the rnat register saved after flushrs */ -+ uint64_t rnat; -+ /* the pfs register saved after flushrs */ -+ uint64_t pfs; -+ /* the bspstore register saved after flushrs */ -+ uint64_t bspstore; -+ -+ /* smp specific */ -+ uint32_t smp_num_cpus; -+ uint32_t dumping_cpu; -+ struct kl_pt_regs_ia64 smp_regs[KL_NR_CPUS]; -+ uint64_t smp_current_task[KL_NR_CPUS]; -+ uint64_t stack[KL_NR_CPUS]; -+} __attribute__((packed)) kl_dump_header_ia64_t; -+ -+/* The following struct is used just to calculate the size needed -+ * to store per CPU info. (Make sure it is sync with the above struct) -+ */ -+struct kl_dump_CPU_info_ia64 { -+ struct kl_pt_regs_ia64 smp_regs; -+ uint64_t smp_current_task; -+ uint64_t stack; -+} __attribute__((packed)); -+ -+/* function declarations -+ */ -+int kl_set_dumparch_ia64(void); -+uint32_t dha_num_cpus_ia64(void); -+kaddr_t dha_current_task_ia64(int cpuid); -+int dha_cpuid_ia64(kaddr_t); -+kaddr_t dha_stack_ia64(int); -+kaddr_t dha_stack_ptr_ia64(int); -+int kl_read_dump_header_ia64(void); -+ -+#endif /* __KL_DUMP_IA64_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/asm/kl_types.h -@@ -0,0 +1,48 @@ -+/* -+ * $Id: kl_types.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __ASMIA64_KL_TYPES_H -+#define __ASMIA64_KL_TYPES_H -+ -+/* cpw */ -+/* was #include */ -+#include "kl_dump_ia64.h" -+ -+#define HOST_ARCH_IA64 -+/* cpw: add this, as otherwise comes from makefile */ -+#define DUMP_ARCH_IA64 -+ -+/* Format string that allows a single fprintf() call to work for both -+ * 32-bit and 64-bit pointer values (architecture specific). -+ */ -+#ifdef CONFIG_X86_32 -+#define FMT64 "ll" -+#else -+#define FMT64 "l" -+#endif -+#define FMTPTR "l" -+ -+/* for usage in common code where host architecture -+ * specific type/macro is needed -+ */ -+typedef kl_dump_header_ia64_t kl_dump_header_asm_t; -+#define KL_DUMP_ASM_MAGIC_NUMBER KL_DUMP_MAGIC_NUMBER_IA64 -+ -+#endif /* __ASMIA64_KL_TYPES_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_alloc.h -@@ -0,0 +1,124 @@ -+/* -+ * $Id: kl_alloc.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_ALLOC_H -+#define __KL_ALLOC_H -+ -+/** -+ ** Header file for kl_alloc.c module -+ ** -+ **/ -+ -+#define K_TEMP 1 -+#define K_PERM 2 -+ -+/** function prototypes for register functions -+ **/ -+ -+/* Memory block allocator. Returns a pointer to an allocated block -+ * of size bytes. In case of error, a NULL pointer will be returned -+ * and errno will be set to indicate exactly what error occurred. -+ * Note that the flag value will determine if the block allocated is -+ * temporary (can be freed via a call to kl_free_temp_blks()) or -+ * permenant (must be freed with a call to kl_free_block()).. -+ */ -+typedef void * (*klib_block_alloc_func) ( -+ int /* size of block required */, -+ int /* flag value */, -+ void * /* return address */); -+ -+/* Memory block reallocator. Returns a pointer to a block of new_size -+ * bytes. In case of error, a NULL pointer will be returned and -+ * errno will be set to indicate exactly what error occurred. -+ * Note that the flag value will determine if the block allocated is -+ * temporary (can be free via a call to kl_free_temp_blks()) or -+ * permenant. -+ */ -+typedef void * (*klib_block_realloc_func) ( -+ void * /* pointer to block to realloc */, -+ int /* size of new block required */, -+ int /* flag value */, -+ void * /* return address */); -+ -+/* Memory block duplicator. Returns a pointer to a block that is -+ * a copy of the block passed in via pointer. In case of error, a -+ * NULL pointer will be returned and errno will be set to indicate -+ * exactly what error occurred. Note that the flag value will -+ * determine if the block allocated is temporary (will be freed -+ * via a call to kl_free_temp_blks()) or permenant. Note that this -+ * function is only supported when liballoc is used (there is no -+ * way to tell the size of a malloced block. -+ */ -+typedef void * (*klib_block_dup_func) ( -+ void * /* pointer to block to dup */, -+ int /* flag value */, -+ void * /* return address */); -+ -+/* Allocates a block large enough to hold a string (plus the terminating -+ * NULL character). -+ */ -+typedef void * (*klib_str_to_block_func) ( -+ char * /* pointer to character string */, -+ int /* flag value */, -+ void * /* return address */); -+ -+/* Frees blocks that were previously allocated. -+ */ -+typedef void (*klib_block_free_func) ( -+ void * /* pointer to block */); -+ -+/* alloc block wrapper function table structure -+ */ -+typedef struct alloc_functions_s { -+ int flag; /* Functions initialized? */ -+ klib_block_alloc_func block_alloc; /* Returns ptr to block */ -+ klib_block_realloc_func block_realloc; /* Returns ptr to new blk */ -+ klib_block_dup_func block_dup; /* Returns ptr to new blk */ -+ klib_str_to_block_func str_to_block; /* Returns ptr to new blk */ -+ klib_block_free_func block_free; /* Frees memory block */ -+} alloc_functions_t; -+ -+extern alloc_functions_t alloc_functions; -+ -+/* Macros for accessing functions in alloc_functions table -+ */ -+#define KL_BLOCK_ALLOC() (alloc_functions.block_alloc) -+#define KL_BLOCK_REALLOC() (alloc_functions.block_realloc) -+#define KL_BLOCK_DUP() (alloc_functions.block_dup) -+#define KL_STR_TO_BLOCK() (alloc_functions.str_to_block) -+#define KL_BLOCK_FREE() (alloc_functions.block_free) -+ -+void *_kl_alloc_block(int, int, void *); -+void *_kl_realloc_block(void *, int, int, void *); -+void *_kl_dup_block(void *, int, void *); -+void *_kl_str_to_block(char *, int, void *); -+#if 0 -+cpw: we create a new wrappers for these: -+void kl_free_block(void *); -+ -+#define kl_alloc_block(size, flags) _kl_alloc_block(size, flags, kl_get_ra()) -+#endif -+#define kl_realloc_block(b, new_size, flags) \ -+ _kl_realloc_block(b, new_size, flags, kl_get_ra()) -+#define kl_dup_block(b, flags) _kl_dup_block(b, flags, kl_get_ra()) -+#define kl_str_to_block(s, flags) _kl_str_to_block(s, flags, kl_get_ra()) -+ -+#endif /* __KL_ALLOC_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_bfd.h -@@ -0,0 +1,31 @@ -+/* -+ * $Id: kl_bfd.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_BFD_H -+#define __KL_BFD_H -+ -+/* cpw: " " form: */ -+#include "klib.h" -+ -+int kl_check_bfd_error(bfd_error_type); -+int kl_open_elf(char*, bfd**, bfd**); -+int kl_read_bfd_syminfo(maplist_t*); -+ -+#endif /* __KL_BFD_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_btnode.h -@@ -0,0 +1,95 @@ -+/* -+ * $Id: kl_btnode.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_BTNODE_H -+#define __KL_BTNODE_H -+ -+/* -+ * Node header struct for use in binary search tree routines -+ */ -+typedef struct btnode_s { -+ struct btnode_s *bt_left; -+ struct btnode_s *bt_right; -+ struct btnode_s *bt_parent; -+ char *bt_key; -+ int bt_height; -+} btnode_t; -+ -+#define DUPLICATES_OK 1 -+ -+/** -+ ** btnode operation function prototypes -+ **/ -+ -+/* Return the hight of a given btnode_s struct in a tree. In the -+ * event of an error (a NULL btnode_s pointer was passed in), a -+ * value of -1 will be returned. -+ */ -+int kl_btnode_height( -+ btnode_t* /* pointer to btnode_s struct */); -+ -+/* Insert a btnode_s struct into a tree. After the insertion, the -+ * tree will be left in a reasonibly ballanced state. Note that, if -+ * the DUPLICATES_OK flag is set, duplicate keys will be inserted -+ * into the tree (otherwise return an error). In the event of an -+ * error, a value of -1 will be returned. -+ */ -+int kl_insert_btnode( -+ btnode_t** /* pointer to root of tree */, -+ btnode_t* /* pointer to btnode_s struct to insert */, -+ int /* flags (DUPLICATES_OK) */); -+ -+/* Finds a btnode in a tree and removes it, making sure to keep -+ * the tree in a reasonably balanced state. As part of the -+ * delete_btnode() operation, a call will be made to the free -+ * function (passed in as a parameter) to free any application -+ * specific data. -+ */ -+int kl_delete_btnode( -+ btnode_t** /* pointer to the root of the btree */, -+ btnode_t* /* pointer to btnode_s struct to delete */, -+ void(*)(void*) /* pointer to function to actually free the node */, -+ int /* flags */); -+ -+/* Traverse a tree looking for a particular key. In the event that -+ * duplicate keys are allowed in the tree, returns the first occurance -+ * of the search key found. A pointer to an int should be passed in -+ * to hold the maximum depth reached in the search. Upon success, -+ * returns a pointer to a btnode_s struct. Otherwise, a NULL pointer -+ * will be returned. -+ */ -+btnode_t *_kl_find_btnode( -+ btnode_t* /* pointer to btnode_s struct to start search with */, -+ char* /* key we are looking for */, -+ int* /* pointer to where max depth vlaue will be placed */, -+ size_t /* if nonzero compare only first n chars of key */); -+#define kl_find_btnode(A, B, C) _kl_find_btnode(A, B, C, 0) -+ -+btnode_t *kl_first_btnode( -+ btnode_t * /* pointer to any btnode in a btree */); -+ -+btnode_t *kl_next_btnode( -+ btnode_t * /* pointer to current btnode */); -+ -+btnode_t *kl_prev_btnode( -+ btnode_t * /* Pointer to current btnode */); -+ -+#endif /* __KL_BTNODE_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_cmp.h -@@ -0,0 +1,102 @@ -+/* -+ * $Id: kl_cmp.h 1216 2005-07-06 10:03:13Z holzheu $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_CMP_H -+#define __KL_CMP_H -+ -+#define DUMP_INDEX_MAGIC 0xdeadbeef -+#define DUMP_INDEX_VERSION 31900 -+#define NUM_BUCKETS 65535 -+ -+/* -+ * Definitions for compressed cached reads. I've recently lowered -+ * these ... If they need to be increased later, I'll do so. -+ */ -+#define CMP_HIGH_WATER_MARK 25 -+#define CMP_LOW_WATER_MARK 10 -+ -+#define CMP_VM_CACHED 0x01 -+#define CMP_VM_UNCACHED 0x02 -+ -+ -+/* -+ * This structure defines a page table entry, what each value will -+ * contain. Since these can be cached or uncached, we have a flags -+ * variable to specify this. -+ */ -+typedef struct _ptableentry { -+ int flags; /* flags for page in cache */ -+ int length; /* length of page */ -+ int cached; /* cached (1 = yes, cached) */ -+ kaddr_t addr; /* addr of page */ -+ char *data; /* data in page */ -+ struct _ptableentry *next; /* ptr to next dump page */ -+ struct _ptableentry *prev; /* ptr to prev dump page */ -+ struct _ptableentry *nextcache; /* ptr to next cached page */ -+ struct _ptableentry *prevcache; /* ptr to prev cached page */ -+} ptableentry; -+ -+/* -+ * This is for the page table index from the compressed core dump. -+ * This is separate from the page table entries because these are -+ * simply addresses off of the compressed core dump, and not the -+ * actual data from the core dump. If we hash these values, we gain -+ * a lot of performance because we only have 1 to search for the -+ * page data, 1 to search for the index, and return if both searches -+ * failed. -+ */ -+typedef struct _ptableindex { -+ kl_dump_page_t dir; /* directory entry of page */ -+ kaddr_t addr; /* address of page offset */ -+ kaddr_t coreaddr; /* address of page in core */ -+ unsigned int hash; /* hash value for this index item */ -+ struct _ptableindex *next; /* next pointer */ -+} ptableindex; -+ -+typedef struct dump_index_s { -+ unsigned int magic_number; /* dump index magic number */ -+ unsigned int version_number; /* dump index version number */ -+ /* struct timeval depends on machine, use two long values here */ -+ struct {uint64_t tv_sec; -+ uint64_t tv_usec; -+ } timebuf; /* the time of the dump */ -+} __attribute__((packed)) dump_index_t; -+ -+/* Compression function */ -+typedef int (*kl_compress_fn_t)(const unsigned char *old, uint32_t old_size, unsigned char *new, uint32_t size); -+ -+/* function declarations -+ */ -+int kl_cmpreadmem(int, kaddr_t, char*, unsigned int, unsigned int); -+int kl_cmpinit( -+ int /* fd */, -+ char * /* indexname */, -+ int /* flags */); -+ -+/* Compression routine: No compression */ -+int kl_compress_none(const char *old, uint32_t old_size, char *new, uint32_t new_size); -+ -+/* Compression routine: Run length encoding */ -+int kl_compress_rle(const char *old, uint32_t old_size, char *new, uint32_t new_size); -+ -+/* Compression routine: GZIP */ -+int kl_compress_gzip(const unsigned char *old, uint32_t old_size, unsigned char *new, uint32_t new_size); -+ -+#endif /* __KL_CMP_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_copt.h -@@ -0,0 +1,29 @@ -+/* -+ * $Id: kl_copt.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * -+ * Copyright (C) 2003, 2004 Silicon Graphics, Inc. All rights reserved. -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+#ifndef __KL_COPT_H -+#define __KL_COPT_H -+ -+extern int copt_ind; -+extern char *copt_arg; -+extern int copt_error; -+ -+void reset_copt(void); -+int is_copt(char *); -+int get_copt(int, char **, const char *, char **); -+ -+#endif /* __KL_COPT_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_debug.h -@@ -0,0 +1,168 @@ -+/* -+ * $Id: kl_debug.h 1196 2005-05-17 18:34:12Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_DEBUG_H -+#define __KL_DEBUG_H -+ -+/* generic functions for reading kerntypes in stabs and dwarf2 formats */ -+ -+#define DBG_NONE 0 -+#define DBG_STABS 1 -+#define DBG_DWARF2 2 -+ -+extern int debug_format; -+ -+#define TYPE_NUM(X) ((uint64_t)(X) & 0xffffffff) -+#define SRC_FILE(X) (((uint64_t)(X) >> 48) & 0xfff) -+#define TYPE_NUM_SLOTS (255) -+#define TYPE_NUM_HASH(X) \ -+ (((SRC_FILE(X)<<1)+TYPE_NUM(X)) % (TYPE_NUM_SLOTS - 1)) -+ -+typedef struct dbg_type_s { -+ kltype_t st_klt; /* must be first */ -+ -+ int st_bit_offset; /* from start of struct/union */ -+ uint64_t st_type_num; /* DBG type_num */ -+ uint64_t st_real_type; /* real type type_num */ -+ uint64_t st_index_type; /* type_num of array index */ -+ uint64_t st_element_type; /* type_num of array element */ -+} dbg_type_t; -+ -+#define st_name st_klt.kl_name -+#define st_type st_klt.kl_type -+#define st_ptr st_klt.kl_ptr -+#define st_flags st_klt.kl_flags -+#define st_typestr st_klt.kl_typestr -+#define st_size st_klt.kl_size -+#define st_offset st_klt.kl_offset -+#define st_low_bounds st_klt.kl_low_bounds -+#define st_high_bounds st_klt.kl_high_bounds -+#define st_value st_klt.kl_value -+#define st_bit_size st_klt.kl_bit_size -+#define st_next st_klt.kl_next -+#define st_member st_klt.kl_member -+#define st_realtype st_klt.kl_realtype -+#define st_indextype st_klt.kl_indextype -+#define st_elementtype st_klt.kl_elementtype -+#define st_encoding st_klt.kl_encoding -+ -+/* Structure containing information about a symbol entry -+ */ -+/* this must match the definition in lkcd's libklib/include/kl_debug.h */ -+typedef struct dbg_sym_s { -+ btnode_t sym_bt; /* must be first */ -+ short sym_dbgtyp; /* STABS, DWARF2, ... */ -+ short sym_state; /* current state */ -+ short sym_flag; /* current flag value */ -+ short sym_type; /* symbol type */ -+ short sym_pvttype; /* private type */ -+ short sym_nmlist; /* namelist index */ -+ short sym_srcfile; /* source file index */ -+ short sym_incfile; /* include file index */ -+ int sym_num; /* symbol number */ -+ int sym_off; /* symbol table offset */ -+ int sym_stroff; /* symbol offset in string table */ -+ uint64_t sym_typenum; /* arbitrary type number */ -+ kltype_t *sym_kltype; /* Full type information */ -+ struct dbg_sym_s *sym_next; /* next pointer for chaining */ -+ struct dbg_sym_s *sym_link; /* another pointer for chaining */ -+ int sym_dup; /* duplicate symbol */ -+} dbg_sym_t; -+#define sym_name sym_bt.bt_key -+ -+extern dbg_sym_t *type_tree; -+extern dbg_sym_t *typedef_tree; -+extern dbg_sym_t *func_tree; -+extern dbg_sym_t *srcfile_tree; -+extern dbg_sym_t *var_tree; -+extern dbg_sym_t *xtype_tree; -+extern dbg_sym_t *symlist; -+extern dbg_sym_t *symlist_end; -+ -+/* State flags -+ */ -+#define DBG_SETUP 0x1 -+#define DBG_SETUP_DONE 0x2 -+#define DBG_SETUP_FAILED 0x4 -+ -+/* Flags for identifying individual symbol types -+ */ -+#define DBG_SRCFILE 0x0001 -+#define DBG_TYPE 0x0002 -+#define DBG_TYPEDEF 0x0004 -+#define DBG_FUNC 0x0008 -+#define DBG_PARAM 0x0010 -+#define DBG_LINE 0x0020 -+#define DBG_VAR 0x0040 -+#define DBG_XTYPE 0x0100 -+#define DBG_ALL 0xffff -+ -+/* Structure for cross referencing one type number to another -+ */ -+typedef struct dbg_hashrec_s { -+ uint64_t h_typenum; /* type number */ -+ dbg_sym_t *h_ptr; /* pointer to actual type */ -+ struct dbg_hashrec_s *h_next; /* next pointer (for hashing) */ -+} dbg_hashrec_t; -+ -+extern dbg_hashrec_t *dbg_hash[]; -+ -+#define HASH_SYM 1 -+#define HASH_XREF 2 -+ -+/* DBG function prototypes -+ */ -+dbg_sym_t *dbg_alloc_sym( -+ int /* format */); -+ -+void dbg_free_sym( -+ dbg_sym_t * /* dbg_sym_s pointer */); -+ -+int dbg_setup_typeinfo( -+ dbg_sym_t * /* dbg_sym_s pointer */); -+ -+int dbg_insert_sym( -+ dbg_sym_t * /* dbg_sym_s pointer */); -+ -+void dbg_hash_sym( -+ uint64_t /* typenum */, -+ dbg_sym_t * /* dbg_sym_s pointer */); -+ -+dbg_type_t *dbg_walk_hash( -+ int * /* pointer to hash index */, -+ void ** /* pointer to hash record pointer */); -+ -+dbg_sym_t *dbg_find_sym( -+ char * /* name */, -+ int /* type number */, -+ uint64_t /* typenum */); -+ -+dbg_sym_t *dbg_first_sym( -+ int /* type number */); -+ -+dbg_sym_t *dbg_next_sym( -+ dbg_sym_t * /* dbg_sym_s pointer */); -+ -+dbg_sym_t *dbg_prev_sym( -+ dbg_sym_t * /* dbg_sym_s pointer */); -+ -+dbg_type_t *dbg_find_typenum( -+ uint64_t /* typenum */); -+ -+#endif /* __KL_DEBUG_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_dump.h -@@ -0,0 +1,511 @@ -+/* -+ * $Id: kl_dump.h 1336 2006-10-23 23:27:06Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_DUMP_H -+#define __KL_DUMP_H -+ -+#if 0 -+cpw: dont need: -+#include -+#include -+#endif -+ -+/* -+ * DUMP_DEBUG: a debug level for the kernel dump code and -+ * the supporting lkcd libraries in user space. -+ * -+ * 0: FALSE: No Debug Added -+ * 1: TRUE: Break Points -+ * . -+ * . -+ * . -+ * 6: Add Debug Data to Structures -+ * . -+ * . -+ * 9: Max -+ */ -+#define DUMP_DEBUG FALSE -+ -+#if DUMP_DEBUG -+void dump_bp(void); /* Called when something exceptional occures */ -+# define DUMP_BP() dump_bp() /* BreakPoint */ -+#else -+# define DUMP_BP() -+#endif -+ -+ -+#define KL_UTS_LEN 65 /* do not change ... */ -+ -+extern int SN2_24X; -+ -+/* -+ * Size of the buffer that's used to hold: -+ * -+ * 1. the dump header (paded to fill the complete buffer) -+ * 2. the possibly compressed page headers and data -+ */ -+extern uint64_t KL_DUMP_BUFFER_SIZE; -+extern uint64_t KL_DUMP_HEADER_SIZE; -+ -+#if 0 -+/* Variables that contain page size, mask etc. used in dump format -+ * (this is not the system page size stored in the dump header) -+ */ -+uint64_t KL_DUMP_PAGE_SIZE; -+uint64_t KL_DUMP_PAGE_MASK; -+uint64_t KL_DUMP_PAGE_SHIFT; -+#endif -+ -+/* Dump header offset changed from 4k to 64k to support multiple page sizes */ -+#define KL_DUMP_HEADER_OFFSET (1ULL << 16) -+ -+ -+/* header definitions for dumps from s390 standalone dump tools */ -+#define KL_DUMP_MAGIC_S390SA 0xa8190173618f23fdULL /* s390sa magic number */ -+#define KL_DUMP_HEADER_SZ_S390SA 4096 -+ -+/* standard header definitions */ -+#define KL_DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ -+#define KL_DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ -+#define KL_DUMP_MAGIC_ASM 0xdeaddeadULL /* generic arch magic number */ -+#define KL_DUMP_VERSION_NUMBER 0x8 /* dump version number */ -+#define KL_DUMP_PANIC_LEN 0x100 /* dump panic string length */ -+ -+/* dump levels - type specific stuff added later -- add as necessary */ -+#define KL_DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ -+#define KL_DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ -+#define KL_DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ -+#define KL_DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ -+#define KL_DUMP_LEVEL_ALL_RAM 0x8 /* dump header, all RAM pages */ -+#define KL_DUMP_LEVEL_ALL 0x10 /* dump all memory RAM and firmware */ -+ -+/* dump compression options -- add as necessary */ -+#define KL_DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ -+#define KL_DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ -+#define KL_DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ -+ -+/* dump flags - any dump-type specific flags -- add as necessary */ -+#define KL_DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ -+#define KL_DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ -+#define KL_DUMP_FLAGS_DISKDUMP 0x80000000 /* dump to local disk */ -+#define KL_DUMP_FLAGS_NETDUMP 0x40000000 /* dump to network device */ -+ -+/* dump header flags -- add as necessary */ -+#define KL_DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ -+#define KL_DUMP_DH_RAW 0x1 /* raw page (no compression) */ -+#define KL_DUMP_DH_COMPRESSED 0x2 /* page is compressed */ -+#define KL_DUMP_DH_END 0x4 /* end marker on a full dump */ -+#define KL_DUMP_DH_TRUNCATED 0x8 /* dump is incomplete */ -+#define KL_DUMP_DH_TEST_PATTERN 0x10 /* dump page is a test pattern */ -+#define KL_DUMP_DH_NOT_USED 0x20 /* 1st bit not used in flags */ -+ -+/* dump ioctl() control options */ -+#ifdef IOCTL26 -+#define DIOSDUMPDEV _IOW('p', 0xA0, unsigned int) /* set the dump device */ -+#define DIOGDUMPDEV _IOR('p', 0xA1, unsigned int) /* get the dump device */ -+#define DIOSDUMPLEVEL _IOW('p', 0xA2, unsigned int) /* set the dump level */ -+#define DIOGDUMPLEVEL _IOR('p', 0xA3, unsigned int) /* get the dump level */ -+#define DIOSDUMPFLAGS _IOW('p', 0xA4, unsigned int) /* set the dump flag parameters */ -+#define DIOGDUMPFLAGS _IOR('p', 0xA5, unsigned int) /* get the dump flag parameters */ -+#define DIOSDUMPCOMPRESS _IOW('p', 0xA6, unsigned int) /* set the dump compress level */ -+#define DIOGDUMPCOMPRESS _IOR('p', 0xA7, unsigned int) /* get the dump compress level */ -+ -+/* these ioctls are used only by netdump module */ -+#define DIOSTARGETIP _IOW('p', 0xA8, unsigned int) /* set the target m/c's ip */ -+#define DIOGTARGETIP _IOR('p', 0xA9, unsigned int) /* get the target m/c's ip */ -+#define DIOSTARGETPORT _IOW('p', 0xAA, unsigned int) /* set the target m/c's port */ -+#define DIOGTARGETPORT _IOR('p', 0xAB, unsigned int) /* get the target m/c's port */ -+#define DIOSSOURCEPORT _IOW('p', 0xAC, unsigned int) /* set the source m/c's port */ -+#define DIOGSOURCEPORT _IOR('p', 0xAD, unsigned int) /* get the source m/c's port */ -+#define DIOSETHADDR _IOW('p', 0xAE, unsigned int) /* set ethernet address */ -+#define DIOGETHADDR _IOR('p', 0xAF, unsigned int) /* get ethernet address */ -+#define DIOGDUMPOKAY _IOR('p', 0xB0, unsigned int) /* check if dump is configured */ -+#define DIOSDUMPTAKE _IOW('p', 0xB1, unsigned int) /* take a manual dump */ -+#else -+#define DIOSDUMPDEV 1 /* set the dump device */ -+#define DIOGDUMPDEV 2 /* get the dump device */ -+#define DIOSDUMPLEVEL 3 /* set the dump level */ -+#define DIOGDUMPLEVEL 4 /* get the dump level */ -+#define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ -+#define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ -+#define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ -+#define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ -+#define DIOSTARGETIP 9 /* set the target m/c's ip */ -+#define DIOGTARGETIP 10 /* get the target m/c's ip */ -+#define DIOSTARGETPORT 11 /* set the target m/c's port */ -+#define DIOGTARGETPORT 12 /* get the target m/c's port */ -+#define DIOSSOURCEPORT 13 /* set the source m/c's port */ -+#define DIOGSOURCEPORT 14 /* get the source m/c's port */ -+#define DIOSETHADDR 15 /* set ethernet address */ -+#define DIOGETHADDR 16 /* get ethernet address */ -+#define DIOGDUMPOKAY 17 /* check if dump is configured */ -+#define DIOSDUMPTAKE 18 /* take a manual dump */ -+#endif -+ -+/* -+ * structures -+ */ -+ -+/* This is the header dumped at the top of every valid crash dump. -+ */ -+typedef struct kl_dump_header_s { -+ uint64_t magic_number; /* dump magic number, unique to verify dump */ -+ uint32_t version; /* version number of this dump */ -+ uint32_t header_size; /* size of this header */ -+ uint32_t dump_level; /* level of this dump */ -+ /* FIXME: rename page_size to dump_page_size -+ * The size of a hardware/physical memory page (DUMP_PAGE_SIZE). -+ * NB: Not the configurable system page (PAGE_SIZE) (4K, 8K, 16K, etc.) -+ */ -+/* uint32_t dh_dump_page_size; */ -+ uint32_t page_size; /* page size (e.g. 4K, 8K, 16K, etc.) */ -+ uint64_t memory_size; /* size of entire physical memory */ -+ uint64_t memory_start; /* start of physical memory */ -+ uint64_t memory_end; /* end of physical memory */ -+#if DUMP_DEBUG >= 6 -+ uint64_t num_bytes; /* number of bytes in this dump */ -+#endif -+ /* the number of dump pages in this dump specifically */ -+ uint32_t num_dump_pages; -+ char panic_string[KL_DUMP_PANIC_LEN]; /* panic string, if available*/ -+ -+ /* timeval depends on machine, two long values */ -+ struct {uint64_t tv_sec; -+ uint64_t tv_usec; -+ } time; /* the time of the system crash */ -+ -+ /* the NEW utsname (uname) information -- in character form */ -+ /* we do this so we don't have to include utsname.h */ -+ /* plus it helps us be more architecture independent */ -+ char utsname_sysname[KL_UTS_LEN]; -+ char utsname_nodename[KL_UTS_LEN]; -+ char utsname_release[KL_UTS_LEN]; -+ char utsname_version[KL_UTS_LEN]; -+ char utsname_machine[KL_UTS_LEN]; -+ char utsname_domainname[KL_UTS_LEN]; -+ -+ uint64_t current_task; /* fixme: better use uint64_t here */ -+ uint32_t dump_compress; /* compression type used in this dump */ -+ uint32_t dump_flags; /* any additional flags */ -+ uint32_t dump_device; /* any additional flags */ -+ uint64_t dump_buffer_size; /* version >= 9 */ -+} __attribute__((packed)) kl_dump_header_t; -+ -+/* This is the header used by the s390 standalone dump tools -+ */ -+typedef struct kl_dump_header_s390sa_s { -+ uint64_t magic_number; /* magic number for this dump (unique)*/ -+ uint32_t version; /* version number of this dump */ -+ uint32_t header_size; /* size of this header */ -+ uint32_t dump_level; /* the level of this dump (just a header?) */ -+ uint32_t page_size; /* page size of dumped Linux (4K,8K,16K etc.) */ -+ uint64_t memory_size; /* the size of all physical memory */ -+ uint64_t memory_start; /* the start of physical memory */ -+ uint64_t memory_end; /* the end of physical memory */ -+ uint32_t num_pages; /* number of pages in this dump */ -+ uint32_t pad; /* ensure 8 byte alignment for tod and cpu_id */ -+ uint64_t tod; /* the time of the dump generation */ -+ uint64_t cpu_id; /* cpu id */ -+ uint32_t arch_id; -+ uint32_t build_arch_id; -+#define KL_DH_ARCH_ID_S390X 2 -+#define KL_DH_ARCH_ID_S390 1 -+} __attribute__((packed)) kl_dump_header_s390sa_t; -+ -+/* Header associated to each physical page of memory saved in the system -+ * crash dump. -+ */ -+typedef struct kl_dump_page_s { -+#if DUMP_DEBUG >= 6 -+ uint64_t byte_offset; /* byte offset */ -+ uint64_t page_index; /* page index */ -+#endif -+ uint64_t address; /* the address of this dump page */ -+ uint32_t size; /* the size of this dump page */ -+ uint32_t flags; /* flags (DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ -+} __attribute__((packed)) kl_dump_page_t; -+ -+/* CORE_TYPE indicating type of dump -+ */ -+typedef enum { -+ dev_kmem, /* image of /dev/kmem, a running kernel */ -+ reg_core, /* Regular (uncompressed) core file */ -+ s390_core, /* s390 core file */ -+ cmp_core, /* compressed core file */ -+ unk_core /* unknown core type */ -+} CORE_TYPE; -+ -+/* function to determine kernel stack for task */ -+typedef kaddr_t(*kl_kernelstack_t) (kaddr_t); -+/* map virtual address to physical one */ -+typedef int(*kl_virtop_t)(kaddr_t, void*, kaddr_t*); -+/* function to perform page-table traversal */ -+typedef kaddr_t(*kl_mmap_virtop_t)(kaddr_t, void*); -+/* XXX description */ -+typedef int(*kl_valid_physmem_t)(kaddr_t, int); -+/* XXX description */ -+typedef kaddr_t(*kl_next_valid_physaddr_t)(kaddr_t); -+/* write a dump-header-asm, if analyzing a live system */ -+typedef int(*kl_write_dump_header_asm_t)(void*); -+/* redirect addresses pointing into task_union areas for running tasks */ -+typedef kaddr_t(*kl_fix_vaddr_t)(kaddr_t, size_t); -+/* initialize mapping of virtual to physical addresses */ -+typedef int (*kl_init_virtop_t)(void); -+ -+/* struct storing dump architecture specific values -+ */ -+typedef struct kl_dumparch_s { -+ int arch; /* KL_ARCH_ */ -+ int ptrsz; /* 32 or 64 bit */ -+ int byteorder; /* KL_LITTLE_ENDIAN or KL_BIG_ENDIAN */ -+ uint64_t pageoffset; /* PAGE_OFFSET */ -+ uint64_t kstacksize; /* size of kernel stack */ -+ uint64_t pgdshift; /* PGDIR_SHIFT */ -+ uint64_t pgdsize; /* PGDIR_SIZE */ -+ uint64_t pgdmask; /* PGDIR_MASK */ -+ uint64_t pmdshift; /* PMD_SHIFT */ -+ uint64_t pmdsize; /* PMD_SIZE */ -+ uint64_t pmdmask; /* PMD_MASK */ -+ uint64_t pageshift; /* PAGE_SHIFT */ -+ uint64_t pagesize; /* PAGE_SIZE */ -+ uint64_t pagemask; /* PAGE_MASK */ -+ uint32_t ptrsperpgd; /* PTRS_PER_PGD */ -+ uint32_t ptrsperpmd; /* PTRS_PER_PMD */ -+ uint32_t ptrsperpte; /* PTRS_PER_PTE */ -+ kl_kernelstack_t kernelstack; /* determine kernel stack for task */ -+ kl_virtop_t virtop; /* map virtual address to physical */ -+ kl_mmap_virtop_t mmap_virtop; /* traverse page table */ -+ kl_valid_physmem_t valid_physmem; /* XXX description */ -+ kl_next_valid_physaddr_t next_valid_physaddr; /* XXX description */ -+ kl_fix_vaddr_t fix_vaddr; /* XXX description */ -+ uint32_t dha_size; /* size of kl_dump_header_xxx_t */ -+ kl_write_dump_header_asm_t write_dha; /* XXX description */ -+ kl_init_virtop_t init_virtop; /* init address translation */ -+} kl_dumparch_t; -+ -+/* function types for dumpaccess */ -+typedef kaddr_t (*kl_get_ptr_t) (void*); -+typedef uint8_t (*kl_get_uint8_t) (void*); -+typedef uint16_t(*kl_get_uint16_t)(void*); -+typedef uint32_t(*kl_get_uint32_t)(void*); -+typedef uint64_t(*kl_get_uint64_t)(void*); -+/* function types for dumpaccess */ -+typedef kaddr_t (*kl_read_ptr_t) (kaddr_t); -+typedef uint8_t (*kl_read_uint8_t) (kaddr_t); -+typedef uint16_t (*kl_read_uint16_t)(kaddr_t); -+typedef uint32_t (*kl_read_uint32_t)(kaddr_t); -+typedef uint64_t (*kl_read_uint64_t)(kaddr_t); -+ -+/* struct to store dump architecture specific functions -+ */ -+typedef struct kl_dumpaccess_s { -+ /* get integer value from memory, previously read from dump */ -+ kl_get_ptr_t get_ptr; -+ kl_get_uint8_t get_uint8; -+ kl_get_uint16_t get_uint16; -+ kl_get_uint32_t get_uint32; -+ kl_get_uint64_t get_uint64; -+ /* read integer value from dump (from physical address) */ -+ kl_read_ptr_t read_ptr; -+ kl_read_uint8_t read_uint8; -+ kl_read_uint16_t read_uint16; -+ kl_read_uint32_t read_uint32; -+ kl_read_uint64_t read_uint64; -+ /* read integer value from dump (from virtual address) */ -+ kl_read_ptr_t vread_ptr; -+ kl_read_uint8_t vread_uint8; -+ kl_read_uint16_t vread_uint16; -+ kl_read_uint32_t vread_uint32; -+ kl_read_uint64_t vread_uint64; -+} kl_dumpaccess_t; -+ -+/* Struct containing sizes of frequently used kernel structures. -+ */ -+typedef struct struct_sizes_s { -+ int task_struct_sz; -+ int mm_struct_sz; -+ int page_sz; -+ int module_sz; -+ int new_utsname_sz; -+ int switch_stack_sz; -+ int pt_regs_sz; -+ int pglist_data_sz; -+ int runqueue_sz; -+} struct_sizes_t; -+ -+/* struct storing memory specifc values of the dumped Linux system -+ */ -+typedef struct kl_kerninfo_s{ -+ kaddr_t num_physpages; /* number of physical pages */ -+ kaddr_t mem_map; /* XXX description */ -+ kaddr_t high_memory; /* physical memory size */ -+ kaddr_t init_mm; /* address of mm_struct init_mm */ -+ uint64_t kernel_flags; /* to indicate kernel features -+ * e.g. KL_IS_PAE_I386 on i386 */ -+ int num_cpus; /* number of cpus */ -+ kaddr_t pgdat_list; /* pgdat_list value. used as MEM_MAP */ -+ /* not defined for DISCONTIG memory */ -+ int linux_release; /* kernel release of dump */ -+ struct_sizes_t struct_sizes; /* frequently needed struct sizes */ -+} kl_kerninfo_t; -+ -+/* various flags to indicate Linux kernel compile switches */ -+#define KL_IS_PAE_I386 0x0020 /* i386 kernel with PAE support */ -+ -+/* struct where to keep whole information about the dump -+ */ -+typedef struct kl_dumpinfo_s { -+ CORE_TYPE core_type; /* type of core file */ -+ char *dump; /* pathname for dump */ -+ char *map; /* pathname for map file */ -+ int core_fd; /* file descriptor for dump file */ -+ int rw_flag; /* O_RDONLY/O_RDWR (/dev/kmem only) */ -+ kl_dumparch_t arch; /* dump arch info */ -+ kl_dumpaccess_t func; /* dump access functions */ -+ kl_kerninfo_t mem; /* mem info for dump */ -+} kl_dumpinfo_t; -+ -+/* External declarations -+ */ -+extern char *dh_typename; -+extern char *dha_typename; -+extern void *G_dump_header; -+extern void *G_dump_header_asm; -+extern kl_dump_header_t *KL_DUMP_HEADER; -+extern void *KL_DUMP_HEADER_ASM; -+ -+/* function declarations -+ */ -+ -+/* open dump */ -+int kl_open_dump(void); -+ -+/* init sizes for some structures */ -+void kl_init_struct_sizes(void); -+ -+/* init host architecture information */ -+int kl_setup_hostinfo(void); -+ -+/* init dumpinfo structure */ -+int kl_setup_dumpinfo(char * /* map file */, -+ char * /* dump */, -+ int /* rwflag */); -+ -+ -+/* init dumpinfo structure */ -+int kl_set_dumpinfo(char * /* map file */, -+ char * /* dump */, -+ int /* arch of dump */, -+ int /* rwflag */); -+ -+/* free dumpinfo structure */ -+void kl_free_dumpinfo(kl_dumpinfo_t *); -+ -+/* set memory related characteristics of dump */ -+int kl_set_kerninfo(void); -+ -+/* set function pointers for dump access (depends on host and dump arch) */ -+int kl_set_dumpaccess(void); -+ -+/* print contents of kl_dumpinfo_t etc. */ -+int kl_print_dumpinfo(int); -+#define KL_INFO_ALL 0 -+#define KL_INFO_ENDIAN 1 -+#define KL_INFO_ARCH 2 -+#define KL_INFO_PTRSZ 3 -+#define KL_INFO_KRELEASE 4 -+#define KL_INFO_MEMSIZE 5 -+#define KL_INFO_NUMCPUS 6 -+ -+/* Functions that read data from generic dump_header */ -+int kl_valid_dump_magic(uint64_t); -+int kl_header_swap(void *); -+uint64_t kl_header_magic(void *); -+int kl_valid_header(void *); -+uint32_t kl_header_version(void *); -+int kl_header_size(void *); -+void *kl_read_header(int fd, void *); -+ -+/* init common lkcd dump header from dump */ -+void kl_init_dump_header(int); -+ -+/* try to evalutate arch from lkcd 4.1 (version <= 7) dump header */ -+int kl_dump_arch_4_1(void *); -+ -+/* swap dump header values if necessary */ -+void kl_swap_dump_header_reg(kl_dump_header_t* dh); -+void kl_swap_dump_header_s390sa(kl_dump_header_s390sa_t* dh); -+ -+/* Read dump header in from dump */ -+int kl_read_dump_header(void); -+int kl_read_dump_header_asm(void); -+ -+/* Determine the architecure of dump */ -+int kl_set_dumparch(int); -+ -+/* Finish setting up for access to dump */ -+int kl_setup_dumpaccess(int); -+ -+/* get the raw dump header */ -+int kl_get_raw_dh(int); -+int kl_get_raw_asm_dh(int); -+ -+/* get common lkcd dump header */ -+int kl_get_dump_header(kl_dump_header_t*); -+ -+/* get older style dump headers */ -+kl_dump_header_t *get_dump_header_4_1(void *); -+kl_dump_header_t *get_dump_header_SN2_24X(void *); -+ -+/* get task that was running when dump was started */ -+kaddr_t kl_dumptask(void); -+ -+/* Print dump header */ -+int kl_print_dump_header(const char* dump); -+ -+/* Print dump regular header */ -+void kl_print_dump_header_reg(kl_dump_header_t *); -+ -+/* Print s390 dump header */ -+void kl_print_dump_header_s390(char*); -+ -+/* Convert s390 to reg header */ -+void kl_s390sa_to_reg_header(kl_dump_header_s390sa_t*, kl_dump_header_t*); -+ -+/* Byte swapping functions needed for Xclrash */ -+/* get integer value from buffer and swap bytes */ -+kaddr_t kl_get_swap_ptr(void*); -+uint16_t kl_get_swap_uint16(void*); -+uint32_t kl_get_swap_uint32(void*); -+uint64_t kl_get_swap_uint64(void*); -+ -+/* read integer value from dump (physical address) and swap bytes */ -+kaddr_t kl_read_swap_ptr(kaddr_t); -+uint16_t kl_read_swap_uint16(kaddr_t); -+uint32_t kl_read_swap_uint32(kaddr_t); -+uint64_t kl_read_swap_uint64(kaddr_t); -+ -+/* read integer value from dump (virtual address) and swap bytes */ -+kaddr_t kl_vread_swap_ptr(kaddr_t); -+uint16_t kl_vread_swap_uint16(kaddr_t); -+uint32_t kl_vread_swap_uint32(kaddr_t); -+uint64_t kl_vread_swap_uint64(kaddr_t); -+ -+#endif /* __KL_DUMP_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_dump_arch.h -@@ -0,0 +1,124 @@ -+/* -+ * $Id: kl_dump_arch.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_DUMP_ARCH_H -+#define __KL_DUMP_ARCH_H -+ -+/* check for valid configuration -+ */ -+#if !(defined(HOST_ARCH_ALPHA) || defined(HOST_ARCH_I386) || \ -+ defined(HOST_ARCH_IA64) || defined(HOST_ARCH_S390) || \ -+ defined(HOST_ARCH_S390X) || defined(HOST_ARCH_ARM) || \ -+ defined(HOST_ARCH_PPC64) || defined(HOST_ARCH_X86_64)) -+# error "No valid host architecture defined." -+#endif -+#if ((defined(HOST_ARCH_ALPHA) && \ -+ (defined(HOST_ARCH_I386) || defined(HOST_ARCH_IA64) || \ -+ defined(HOST_ARCH_S390) || defined(HOST_ARCH_S390X) || \ -+ defined(HOST_ARCH_ARM) || defined(HOST_ARCH_PPC64) || \ -+ defined(HOST_ARCH_X86_64))) || \ -+ (defined(HOST_ARCH_I386) && \ -+ (defined(HOST_ARCH_IA64) || defined(HOST_ARCH_S390) || \ -+ defined(HOST_ARCH_S390X)|| defined(HOST_ARCH_ARM) || \ -+ defined(HOST_ARCH_PPC64)|| defined(HOST_ARCH_X86_64))) || \ -+ (defined(HOST_ARCH_IA64) && \ -+ (defined(HOST_ARCH_S390)|| defined(HOST_ARCH_S390X) || \ -+ defined(HOST_ARCH_ARM) || defined(HOST_ARCH_PPC64) || \ -+ defined(HOST_ARCH_X86_64))) || \ -+ (defined(HOST_ARCH_S390) && \ -+ (defined(HOST_ARCH_S390X) || defined(HOST_ARCH_ARM) || \ -+ defined(HOST_ARCH_PPC64) || defined(HOST_ARCH_X86_64))) || \ -+ (defined(HOST_ARCH_S390X) && \ -+ (defined(HOST_ARCH_ARM) || defined(HOST_ARCH_PPC64) || \ -+ defined(HOST_ARCH_X86_64))) || \ -+ (defined(HOST_ARCH_ARM) && \ -+ (defined(HOST_ARCH_PPC64) || defined(HOST_ARCH_X86_64))) || \ -+ (defined(HOST_ARCH_PPC64) && defined(HOST_ARCH_X86_64))) -+# error "More than one valid host architectures defined." -+#endif -+#if !(defined(DUMP_ARCH_ALPHA) || defined(DUMP_ARCH_I386) || \ -+ defined(DUMP_ARCH_IA64) || defined(DUMP_ARCH_S390) || \ -+ defined(DUMP_ARCH_S390X) || defined(DUMP_ARCH_ARM) || \ -+ defined(DUMP_ARCH_PPC64) || defined(DUMP_ARCH_X86_64)) -+# error "No valid dump architecture defined." -+#endif -+ -+/* optional: check that host arch equals one supported dump arch -+ */ -+#ifdef SUPPORT_HOST_ARCH -+# if (defined(HOST_ARCH_ALPHA) && !defined(DUMP_ARCH_ALPHA)) || \ -+ (defined(HOST_ARCH_I386) && !defined(DUMP_ARCH_I386)) || \ -+ (defined(HOST_ARCH_IA64) && !defined(DUMP_ARCH_IA64)) || \ -+ (defined(HOST_ARCH_S390) && !defined(DUMP_ARCH_S390)) || \ -+ (defined(HOST_ARCH_S390X) && !defined(DUMP_ARCH_S390X)) || \ -+ (defined(HOST_ARCH_ARM) && !defined(DUMP_ARCH_ARM)) || \ -+ (defined(HOST_ARCH_PPC64) && !defined(DUMP_ARCH_PPC64)) || \ -+ (defined(HOST_ARCH_X86_64) && !defined(DUMP_ARCH_X86_64)) -+# error "Host architecture not supported as dump architecture." -+# endif -+#endif -+ -+/* include dump architecture specific stuff -+ */ -+#ifdef DUMP_ARCH_ALPHA -+# include -+# include -+#endif -+/* cpw: use the " " form: */ -+#ifdef DUMP_ARCH_IA64 -+# include "kl_mem_ia64.h" -+# include "kl_dump_ia64.h" -+#endif -+#ifdef DUMP_ARCH_I386 -+# include -+# include -+#endif -+#ifdef DUMP_ARCH_S390 -+# include -+# include -+#endif -+#ifdef DUMP_ARCH_S390X -+# include -+# include -+#endif -+#ifdef DUMP_ARCH_ARM -+# include -+# include -+#endif -+#ifdef DUMP_ARCH_PPC64 -+#include -+#include -+#endif -+#ifdef DUMP_ARCH_X86_64 -+#include -+#include -+#endif -+ -+/** Function prototypes -+ **/ -+int kl_init_kern_info(void); -+ -+int kl_get_struct( -+ kaddr_t /* address */, -+ int /* size of struct */, -+ void * /* ptr to buffer */, -+ char * /* name of struct */); -+ -+#endif /* __KL_DUMP_ARCH_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_dump_ia64.h -@@ -0,0 +1,199 @@ -+/* -+ * $Id: kl_dump_ia64.h 1151 2005-02-23 01:09:12Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+/* This header file holds the architecture specific crash dump header */ -+#ifndef __KL_DUMP_IA64_H -+#define __KL_DUMP_IA64_H -+ -+/* definitions */ -+#ifndef KL_NR_CPUS -+# define KL_NR_CPUS 128 /* max number CPUs */ -+#endif -+ -+#define KL_DUMP_MAGIC_NUMBER_IA64 0xdeaddeadULL /* magic number */ -+#define KL_DUMP_VERSION_NUMBER_IA64 0x4 /* version number */ -+ -+ -+/* -+ * mkswap.c calls getpagesize() to get the system page size, -+ * which is not necessarily the same as the hardware page size. -+ * -+ * For ia64 the kernel PAGE_SIZE can be configured from 4KB ... 16KB. -+ * -+ * The physical memory is layed out out in the hardware/minimal pages. -+ * This is the size we need to use for dumping physical pages. -+ * -+ * Note ths hardware/minimal page size being use in; -+ * arch/ia64/kernel/efi.c`efi_memmap_walk(): -+ * curr.end = curr.start + (md->num_pages << 12); -+ * -+ * Since the system page size could change between the kernel we boot -+ * on the the kernel that cause the core dume we may want to have something -+ * more constant like the maximum system page size (See include/asm-ia64/page.h). -+ */ -+#define DUMP_MIN_PAGE_SHIFT 12 -+#define DUMP_MIN_PAGE_SIZE (1UL << DUMP_MIN_PAGE_SHIFT) -+#define DUMP_MIN_PAGE_MASK (~(DUMP_MIN_PAGE_SIZE - 1)) -+#define DUMP_MIN_PAGE_ALIGN(addr) (((addr) + DUMP_MIN_PAGE_SIZE - 1) & DUMP_MIN_PAGE_MASK) -+ -+#define DUMP_MAX_PAGE_SHIFT 16 -+#define DUMP_MAX_PAGE_SIZE (1UL << DUMP_MAX_PAGE_SHIFT) -+#define DUMP_MAX_PAGE_MASK (~(DUMP_MAX_PAGE_SIZE - 1)) -+#define DUMP_MAX_PAGE_ALIGN(addr) (((addr) + DUMP_MAX_PAGE_SIZE - 1) & DUMP_MAX_PAGE_MASK) -+ -+#define DUMP_HEADER_OFFSET DUMP_MAX_PAGE_SIZE -+ -+#define DUMP_EF_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT -+ -+#define DUMP_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT -+#define DUMP_PAGE_SIZE DUMP_MIN_PAGE_SIZE -+#define DUMP_PAGE_MASK DUMP_MIN_PAGE_MASK -+#define DUMP_PAGE_ALIGN(addr) DUMP_MIN_PAGE_ALIGN(addr) -+ -+struct kl_ia64_fpreg { -+ union { -+ unsigned long bits[2]; -+ long double __dummy; /* force 16-byte alignment */ -+ } u; -+}; -+ -+struct kl_pt_regs_ia64 { -+ /* for 2.6 kernels only. This structure was totally different in 2.4 kernels */ -+ unsigned long b6; /* scratch */ -+ unsigned long b7; /* scratch */ -+ -+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ -+ unsigned long ar_ssd; /* reserved for future use (scratch) */ -+ -+ unsigned long r8; /* scratch (return value register 0) */ -+ unsigned long r9; /* scratch (return value register 1) */ -+ unsigned long r10; /* scratch (return value register 2) */ -+ unsigned long r11; /* scratch (return value register 3) */ -+ -+ unsigned long cr_ipsr; /* interrupted task's psr */ -+ unsigned long cr_iip; /* interrupted task's instruction pointer */ -+ unsigned long cr_ifs; /* interrupted task's function state */ -+ -+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ -+ unsigned long ar_pfs; /* prev function state */ -+ unsigned long ar_rsc; /* RSE configuration */ -+ /* The following two are valid only if cr_ipsr.cpl > 0: */ -+ unsigned long ar_rnat; /* RSE NaT */ -+ unsigned long ar_bspstore; /* RSE bspstore */ -+ -+ unsigned long pr; /* 64 predicate registers (1 bit each) */ -+ unsigned long b0; /* return pointer (bp) */ -+ unsigned long loadrs; /* size of dirty partition << 16 */ -+ -+ unsigned long r1; /* the gp pointer */ -+ unsigned long r12; /* interrupted task's memory stack pointer */ -+ unsigned long r13; /* thread pointer */ -+ -+ unsigned long ar_fpsr; /* floating point status (preserved) */ -+ unsigned long r15; /* scratch */ -+ -+ /* The remaining registers are NOT saved for system calls. */ -+ -+ unsigned long r14; /* scratch */ -+ unsigned long r2; /* scratch */ -+ unsigned long r3; /* scratch */ -+ -+ /* The following registers are saved by SAVE_REST: */ -+ unsigned long r16; /* scratch */ -+ unsigned long r17; /* scratch */ -+ unsigned long r18; /* scratch */ -+ unsigned long r19; /* scratch */ -+ unsigned long r20; /* scratch */ -+ unsigned long r21; /* scratch */ -+ unsigned long r22; /* scratch */ -+ unsigned long r23; /* scratch */ -+ unsigned long r24; /* scratch */ -+ unsigned long r25; /* scratch */ -+ unsigned long r26; /* scratch */ -+ unsigned long r27; /* scratch */ -+ unsigned long r28; /* scratch */ -+ unsigned long r29; /* scratch */ -+ unsigned long r30; /* scratch */ -+ unsigned long r31; /* scratch */ -+ -+ unsigned long ar_ccv; /* compare/exchange value (scratch) */ -+ -+ /* -+ * * Floating point registers that the kernel considers scratch: -+ * */ -+ struct kl_ia64_fpreg f6; /* scratch */ -+ struct kl_ia64_fpreg f7; /* scratch */ -+ struct kl_ia64_fpreg f8; /* scratch */ -+ struct kl_ia64_fpreg f9; /* scratch */ -+ struct kl_ia64_fpreg f10; /* scratch */ -+ struct kl_ia64_fpreg f11; /* scratch */ -+} __attribute__((packed)); -+ -+/* -+ * Structure: dump_header_asm_t -+ * Function: This is the header for architecture-specific stuff. It -+ * follows right after the dump header. -+ */ -+typedef struct kl_dump_header_ia64_s { -+ /* the dump magic number -- unique to verify dump is valid */ -+ uint64_t magic_number; -+ /* the version number of this dump */ -+ uint32_t version; -+ /* the size of this header (in case we can't read it) */ -+ uint32_t header_size; -+ /* pointer to pt_regs */ -+ uint64_t pt_regs; -+ /* the dump registers */ -+ struct kl_pt_regs_ia64 regs; -+ /* the rnat register saved after flushrs */ -+ uint64_t rnat; -+ /* the pfs register saved after flushrs */ -+ uint64_t pfs; -+ /* the bspstore register saved after flushrs */ -+ uint64_t bspstore; -+ -+ /* smp specific */ -+ uint32_t smp_num_cpus; -+ uint32_t dumping_cpu; -+ struct kl_pt_regs_ia64 smp_regs[KL_NR_CPUS]; -+ uint64_t smp_current_task[KL_NR_CPUS]; -+ uint64_t stack[KL_NR_CPUS]; -+} __attribute__((packed)) kl_dump_header_ia64_t; -+ -+/* The following struct is used just to calculate the size needed -+ * to store per CPU info. (Make sure it is sync with the above struct) -+ */ -+struct kl_dump_CPU_info_ia64 { -+ struct kl_pt_regs_ia64 smp_regs; -+ uint64_t smp_current_task; -+ uint64_t stack; -+} __attribute__((packed)); -+ -+/* function declarations -+ */ -+int kl_set_dumparch_ia64(void); -+uint32_t dha_num_cpus_ia64(void); -+kaddr_t dha_current_task_ia64(int cpuid); -+int dha_cpuid_ia64(kaddr_t); -+kaddr_t dha_stack_ia64(int); -+kaddr_t dha_stack_ptr_ia64(int); -+int kl_read_dump_header_ia64(void); -+ -+#endif /* __KL_DUMP_IA64_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_dwarfs.h -@@ -0,0 +1,27 @@ -+/* -+ * $Id: kl_dwarfs.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by: Prashanth Tamraparni (prasht@in.ibm.com) -+ * Contributions by SGI -+ * -+ * Copyright (C) 2004 International Business Machines Corp. -+ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+#ifndef __KL_DWARFS_H -+#define __KL_DWARFS_H -+ -+/* Dwarf function declarations */ -+ -+int dw_open_namelist(char*, int); -+int dw_setup_typeinfo(void); -+ -+#endif /* __KL_DWARFS_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_error.h -@@ -0,0 +1,266 @@ -+/* -+ * $Id: kl_error.h 1169 2005-03-02 21:38:01Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_ERROR_H -+#define __KL_ERROR_H -+ -+extern uint64_t klib_error; -+extern FILE *kl_stdout; -+extern FILE *kl_stderr; -+ -+/* Error Classes -+ */ -+#define KLEC_APP 0 -+#define KLEC_KLIB 1 -+#define KLEC_MEM 2 -+#define KLEC_SYM 3 -+#define KLEC_KERN 4 -+ -+#define KLEC_CLASS_MASK 0x00000000ff000000ULL -+#define KLEC_CLASS_SHIFT 24 -+#define KLEC_ECODE_MASK 0x0000000000ffffffULL -+#define KLEC_TYPE_MASK 0xffffffff00000000ULL -+#define KLEC_TYPE_SHIFT 32 -+#define KLEC_CLASS(e) ((e & KLEC_CLASS_MASK) >> KLEC_CLASS_SHIFT) -+#define KLEC_ECODE(e) (e & KLEC_ECODE_MASK) -+#define KLEC_TYPE(e) ((e & KLEC_TYPE_MASK) >> KLEC_TYPE_SHIFT) -+ -+void kl_reset_error(void); /* reset klib_error */ -+void kl_print_error(void); /* print warning/error messages */ -+void kl_check_error(char*); /* check for/handle errors, generate messages */ -+ -+/* FIXME: not used yet -- for changes in future, improve error handling -+ */ -+typedef struct klib_error_s{ -+ uint32_t code; /* error code */ -+ uint16_t class; /* error class */ -+ uint16_t severity; /* severity of error: e.g. warning or fatal error */ -+ uint32_t datadesc; /* description of data which caused the error */ -+ FILE *fp; /* fp where to place warning and error messages */ -+} klib_error_t; -+ -+/* -+ * Some macros for accessing data in klib_error -+ */ -+#define KL_ERROR klib_error -+#define KL_ERRORFP kl_stderr -+ -+/* Error codes -+ * -+ * There are basically two types of error codes -- with each type -+ * residing in a single word in a two word error code value. The lower -+ * 32-bits contains an error class and code that represents exactly -+ * WHAT error occurred (e.g., non-numeric text in a numeric value -+ * entered by a user, bad virtual address, etc.). -+ * -+ * The upper 32-bits represents what type of data was being referenced -+ * when the error occurred (e.g., bad proc struct). Having two tiers of -+ * error codes makes it easier to generate useful and specific error -+ * messages. Note that is possible to have situations where one or the -+ * other type of error codes is not set. This is OK as long as at least -+ * one type s set. -+ */ -+ -+/* General klib error codes -+ */ -+#define KLE_KLIB (KLEC_KLIB << KLEC_CLASS_SHIFT) -+#define KLE_NO_MEMORY (KLE_KLIB|1) -+#define KLE_OPEN_ERROR (KLE_KLIB|2) -+#define KLE_ZERO_BLOCK (KLE_KLIB|3) -+#define KLE_INVALID_VALUE (KLE_KLIB|4) -+#define KLE_NULL_BUFF (KLE_KLIB|5) -+#define KLE_ZERO_SIZE (KLE_KLIB|6) -+#define KLE_ACTIVE (KLE_KLIB|7) -+#define KLE_NULL_POINTER (KLE_KLIB|8) -+#define KLE_UNSUPPORTED_ARCH (KLE_KLIB|9) -+ -+#define KLE_MISC_ERROR (KLE_KLIB|97) -+#define KLE_NOT_SUPPORTED (KLE_KLIB|98) -+#define KLE_UNKNOWN_ERROR (KLE_KLIB|99) -+ -+/* memory error codes -+ */ -+#define KLE_MEM (KLEC_MEM << KLEC_CLASS_SHIFT) -+#define KLE_BAD_MAP_FILE (KLE_MEM|1) -+#define KLE_BAD_DUMP (KLE_MEM|2) -+#define KLE_BAD_DUMPTYPE (KLE_MEM|3) -+#define KLE_INVALID_LSEEK (KLE_MEM|4) -+#define KLE_INVALID_READ (KLE_MEM|5) -+#define KLE_BAD_KERNINFO (KLE_MEM|6) -+#define KLE_INVALID_PADDR (KLE_MEM|7) -+#define KLE_INVALID_VADDR (KLE_MEM|8) -+#define KLE_INVALID_VADDR_ALIGN (KLE_MEM|9) -+#define KLE_INVALID_MAPPING (KLE_MEM|10) -+#define KLE_CMP_ERROR (KLE_MEM|11) -+#define KLE_INVALID_DUMP_MAGIC (KLE_MEM|12) -+#define KLE_KERNEL_MAGIC_MISMATCH (KLE_MEM|13) -+#define KLE_NO_END_SYMBOL (KLE_MEM|14) -+#define KLE_INVALID_DUMP_HEADER (KLE_MEM|15) -+#define KLE_DUMP_INDEX_CREATION (KLE_MEM|16) -+#define KLE_DUMP_HEADER_ONLY (KLE_MEM|17) -+#define KLE_PAGE_NOT_PRESENT (KLE_MEM|18) -+#define KLE_BAD_ELF_FILE (KLE_MEM|19) -+#define KLE_ARCHIVE_FILE (KLE_MEM|20) -+#define KLE_MAP_FILE_PRESENT (KLE_MEM|21) -+#define KLE_BAD_MAP_FILENAME (KLE_MEM|22) -+#define KLE_BAD_DUMP_FILENAME (KLE_MEM|23) -+#define KLE_BAD_NAMELIST_FILE (KLE_MEM|24) -+#define KLE_BAD_NAMELIST_FILENAME (KLE_MEM|25) -+#define KLE_LIVE_SYSTEM (KLE_MEM|26) -+#define KLE_NOT_INITIALIZED (KLE_MEM|27) -+ -+/* symbol error codes -+ */ -+#define KLE_SYM (KLEC_SYM << KLEC_CLASS_SHIFT) -+#define KLE_NO_SYMTAB (KLE_SYM|1) -+#define KLE_NO_SYMBOLS (KLE_SYM|2) -+#define KLE_INVALID_TYPE (KLE_SYM|3) -+#define KLE_NO_MODULE_LIST (KLE_SYM|4) -+ -+/* kernel data error codes -+ */ -+#define KLE_KERN (KLEC_KERN << KLEC_CLASS_SHIFT) -+#define KLE_INVALID_KERNELSTACK (KLE_KERN|1) -+#define KLE_INVALID_STRUCT_SIZE (KLE_KERN|2) -+#define KLE_BEFORE_RAM_OFFSET (KLE_KERN|3) -+#define KLE_AFTER_MAXPFN (KLE_KERN|4) -+#define KLE_AFTER_PHYSMEM (KLE_KERN|5) -+#define KLE_AFTER_MAXMEM (KLE_KERN|6) -+#define KLE_PHYSMEM_NOT_INSTALLED (KLE_KERN|7) -+#define KLE_NO_DEFTASK (KLE_KERN|8) -+#define KLE_PID_NOT_FOUND (KLE_KERN|9) -+#define KLE_DEFTASK_NOT_ON_CPU (KLE_KERN|10) -+#define KLE_NO_CURCPU (KLE_KERN|11) -+#define KLE_NO_CPU (KLE_KERN|12) -+#define KLE_SIG_ERROR (KLE_KERN|13) -+#define KLE_TASK_RUNNING (KLE_KERN|14) -+#define KLE_NO_SWITCH_STACK (KLE_KERN|15) -+ -+/* Error codes that indicate what type of data was bad. These are -+ * placed in the upper 32-bits of klib_error. -+ */ -+#define KLE_BAD_TASK_STRUCT (((uint64_t)1)<<32) -+#define KLE_BAD_SYMNAME (((uint64_t)2)<<32) -+#define KLE_BAD_SYMADDR (((uint64_t)3)<<32) -+#define KLE_BAD_FUNCADDR (((uint64_t)4)<<32) -+#define KLE_BAD_STRUCT (((uint64_t)5)<<32) -+#define KLE_BAD_FIELD (((uint64_t)6)<<32) -+#define KLE_BAD_PC (((uint64_t)7)<<32) -+#define KLE_BAD_RA (((uint64_t)8)<<32) -+#define KLE_BAD_SP (((uint64_t)9)<<32) -+#define KLE_BAD_EP (((uint64_t)10)<<32) -+#define KLE_BAD_SADDR (((uint64_t)11)<<32) -+#define KLE_BAD_KERNELSTACK (((uint64_t)12)<<32) -+#define KLE_BAD_LINENO (((uint64_t)13)<<32) -+#define KLE_MAP_FILE (((uint64_t)14)<<32) -+#define KLE_DUMP (((uint64_t)15)<<32) -+#define KLE_BAD_STRING (((uint64_t)16)<<32) -+#define KLE_ELF_FILE (((uint64_t)17)<<32) -+ -+/* flags for function kl_msg() -+ * First 3 bits define trace levels. Minimum trace threshold is trace level 1. -+ * So maximal 7 trace levels are possible. We are using only KLE_TRACELEVEL_MAX. -+ * If no trace level bits are set, it is normal output. -+ */ -+#define _KLE_TRACEBIT1 0x00000001 /* trace bit 1 */ -+#define _KLE_TRACEBIT2 0x00000002 /* trace bit 2 */ -+#define _KLE_TRACEBIT3 0x00000004 /* trace bit 3 */ -+#define _KLE_TRACENUM 8 /* used in _KLE_TRACENUM */ -+#define _KLE_TRACEMASK (_KLE_TRACENUM-1) /* mask for trace bits */ -+/* further flags */ -+#define KLE_F_NOORIGIN 0x00001000 /* do not print origin for this msg */ -+#define KLE_F_ERRORMSG 0x00002000 /* treat message as error message */ -+/* trace levels := predefined combinations of trace bits */ -+#define KLE_F_TRACELEVEL1 (_KLE_TRACEBIT1) -+#define KLE_F_TRACELEVEL2 (_KLE_TRACEBIT2) -+#define KLE_F_TRACELEVEL3 (_KLE_TRACEBIT1|_KLE_TRACEBIT2) -+#define KLE_F_TRACELEVEL4 (_KLE_TRACEBIT3) -+#define KLE_TRACELEVELMAX 4 -+#define KLE_TRACELEVEL(flg) (flg & _KLE_TRACEMASK) -+#define KLE_GETTRACELEVEL(flg) \ -+ ((KLE_TRACELEVEL(flg) > KLE_TRACELEVELMAX) ? KLE_TRACELEVELMAX : \ -+ KLE_TRACELEVEL(flg)) -+ -+/* define debug components of libklib (64 components possible) -+ * used by kl_msg() -+ */ -+#define KL_DBGCOMP_ALLOC 0x0000000001 /* liballoc */ -+#define KL_DBGCOMP_BFD 0x0000000002 /* general bfd support */ -+#define KL_DBGCOMP_BTREE 0x0000000004 /* btree implementation */ -+#define KL_DBGCOMP_COMPRESS 0x0000000008 /* gzip/rle (de)compression */ -+#define KL_DBGCOMP_INIT 0x0000000010 /* klib initialization */ -+#define KL_DBGCOMP_MEMMAP 0x0000000020 /* memory mapping */ -+#define KL_DBGCOMP_MODULE 0x0000000040 /* kernel module handling */ -+#define KL_DBGCOMP_SIGNAL 0x0000000080 /* signal handling */ -+#define KL_DBGCOMP_STABS 0x0000000100 /* stabs format support */ -+#define KL_DBGCOMP_SYMBOL 0x0000000200 /* symbol handling */ -+#define KL_DBGCOMP_TYPE 0x0000000400 /* type information handling */ -+#define KL_DBGCOMP_ALL ((uint64_t) -1) /* all components */ -+ -+/* central output routine, shouldn't be used directly, but -+ * by following macros -+ */ -+void kl_msg(uint64_t, uint32_t, const char*, const char*, int, -+ const char*, ...); -+ -+/* vararg macros that should be used instead of kl_msg() -+ */ -+/* used within libklib to print non-error messages (e.g. progress indication) -+ */ -+#define KL_MSG(fmt, args...) \ -+kl_msg(0, 0, NULL, NULL, 0, fmt, ## args) -+/* Can be used by application to print error messages; -+ * not used by libklib itself. -+ */ -+#define kl_error(fmt, args...) \ -+kl_msg(0, KLE_F_ERRORMSG, __FUNCTION__, __FILE__, __LINE__, fmt, ## args) -+/* Generate trace messages. Used for libklib debugging. Might be used -+ * by an application, too. -+ * A macro _DBG_COMPONENT has to be defined locally in the module where -+ * any trace macro is used. See above debug components. -+ * Trace messages are only printed iff _DBG_COMPONENT was set before with a -+ * call to kl_set_dbg_component(). -+ */ -+#define kl_trace1(flg, fmt, args...) \ -+kl_msg(_DBG_COMPONENT, KLE_F_TRACELEVEL1|(flg), \ -+ __FUNCTION__, __FILE__, __LINE__, fmt, ## args) -+#define kl_trace2(flg, fmt, args...) \ -+kl_msg(_DBG_COMPONENT, KLE_F_TRACELEVEL2|(flg), \ -+ __FUNCTION__, __FILE__, __LINE__, fmt, ## args) -+#define kl_trace3(flg, fmt, args...) \ -+kl_msg(_DBG_COMPONENT, KLE_F_TRACELEVEL3|(flg), \ -+ __FUNCTION__, __FILE__, __LINE__, fmt, ## args) -+#define kl_trace4(flg, fmt, args...) \ -+kl_msg(_DBG_COMPONENT, KLE_F_TRACELEVEL4|(flg), \ -+ __FUNCTION__, __FILE__, __LINE__, fmt, ## args) -+ -+/* functions to set some global variables for libklib debugging -+ */ -+int kl_set_trace_threshold(uint32_t); -+void kl_set_dbg_component(uint64_t); -+void kl_set_stdout(FILE *); -+void kl_set_stderr(FILE *); -+ -+/* functions to get contents of global variables for libklib debugging -+ */ -+uint32_t kl_get_trace_threshold(void); -+uint64_t kl_get_dbg_component(void); -+ -+#endif /* __KL_ERROR_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_htnode.h -@@ -0,0 +1,71 @@ -+/* -+ * $Id: kl_htnode.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_HTNODE_H -+#define __KL_HTNODE_H -+ -+/* Node structure for use in hierarchical trees (htrees). -+ */ -+typedef struct htnode_s { -+ struct htnode_s *next; -+ struct htnode_s *prev; -+ struct htnode_s *parent; -+ struct htnode_s *children; -+ int seq; -+ int level; -+ int key; -+} htnode_t; -+ -+/* Flag values -+ */ -+#define HT_BEFORE 0x1 -+#define HT_AFTER 0x2 -+#define HT_CHILD 0x4 -+#define HT_PEER 0x8 -+ -+/* Function prototypes -+ */ -+htnode_t *kl_next_htnode( -+ htnode_t * /* htnode pointer */); -+ -+htnode_t *kl_prev_htnode( -+ htnode_t * /* htnode pointer */); -+ -+void ht_insert_peer( -+ htnode_t * /* htnode pointer */, -+ htnode_t * /* new htnode pointer*/, -+ int /* flags */); -+ -+void ht_insert_child( -+ htnode_t * /* htnode pointer */, -+ htnode_t * /* new htnode pointer*/, -+ int /* flags */); -+ -+int ht_insert( -+ htnode_t * /* htnode pointer */, -+ htnode_t * /* new htnode pointer*/, -+ int /* flags */); -+ -+void ht_insert_next_htnode( -+ htnode_t * /* htnode pointer */, -+ htnode_t * /* new htnode pointer*/); -+ -+#endif /* __KL_HTNODE_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_lib.h -@@ -0,0 +1,65 @@ -+/* -+ * $Id: kl_lib.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_LIB_H -+#define __KL_LIB_H -+ -+/* Include system header files -+ */ -+ -+#if 0 -+/* cpw: we don't need this userland stuff: */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#endif -+ -+/* Include lkcd library header files -+ */ -+/* cpw: change these from the < > form to the " " form: */ -+#include "kl_types.h" -+#include "kl_alloc.h" -+#include "kl_libutil.h" -+#include "kl_btnode.h" -+#include "kl_htnode.h" -+#include "kl_queue.h" -+#include "kl_stringtab.h" -+ -+#endif /* __KL_LIB_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_libutil.h -@@ -0,0 +1,40 @@ -+/* -+ * $Id: kl_libutil.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2004 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_LIBUTIL_H -+#define __KL_LIBUTIL_H -+ -+/* cpw: change all these from the < > form to the " " form: */ -+#include "kl_alloc.h" -+#include "kl_btnode.h" -+#include "kl_copt.h" -+#include "kl_htnode.h" -+#include "kl_queue.h" -+#include "kl_stringtab.h" -+ -+int kl_shift_value(uint64_t ); -+int kl_string_compare(char *, char *); -+int kl_string_match(char *, char *); -+uint64_t kl_strtoull(char *, char **, int); -+time_t kl_str_to_ctime(char *); -+void *kl_get_ra(void); -+ -+#endif /* __KL_LIBUTIL_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_mem.h -@@ -0,0 +1,104 @@ -+/* -+ * $Id: kl_mem.h 1157 2005-02-25 22:04:05Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_MEM_H -+#define __KL_MEM_H -+ -+#if 0 -+cpw: skip: -+extern kaddr_t VMALLOC_START; -+extern kaddr_t VMALLOC_END; -+#endif -+ -+/* -+ * Function prototypes -+ */ -+ -+int kl_linux_release(void); -+ -+k_error_t kl_readmem( -+ kaddr_t /* physical address to start reading from */, -+ unsigned /* number of bytes to read */, -+ void * /* pointer to buffer */); -+ -+k_error_t kl_readkmem( -+ kaddr_t /* virtual address to start reading from */, -+ unsigned /* number of bytes to read */, -+ void * /* pointer to buffer */); -+ -+int kl_virtop( -+ kaddr_t /* virtual address to translate */, -+ void * /* pointer to mem_map for address translation */, -+ kaddr_t * /* pointer to physical address to return */); -+ -+k_error_t kl_get_block( -+ kaddr_t /* virtual address */, -+ unsigned /* size of block to read in */, -+ void * /* pointer to buffer */, -+ void * /* pointer to mmap */); -+ -+/* Wrapper that eliminates the mmap parameter -+ */ -+#define GET_BLOCK(a, s, b) kl_get_block(a, s, (void *)b, (void *)0) -+ -+uint64_t kl_uint( -+ void * /* pointer to buffer containing struct */, -+ char * /* name of struct */, -+ char * /* name of member */, -+ unsigned /* offset */); -+ -+int64_t kl_int( -+ void * /* pointer to buffer containing struct */, -+ char * /* name of struct */, -+ char * /* name of member */, -+ unsigned /* offset */); -+ -+kaddr_t kl_kaddr( -+ void * /* pointer to buffer containing struct */, -+ char * /* name of struct */, -+ char * /* name of member */); -+ -+/* XXX deprecated use KL_READ_PTR() instead */ -+kaddr_t kl_kaddr_to_ptr( -+ kaddr_t /* Address to dereference */); -+ -+int kl_is_valid_kaddr( -+ kaddr_t /* Address to test */, -+ void * /* pointer to mmap */, -+ int /* flags */); -+ -+/* REMIND: -+ * Likely not right for ia64 -+ */ -+#define KL_KADDR_IS_PHYSICAL(vaddr) ((vaddr >= KL_PAGE_OFFSET) && \ -+ (vaddr <= KL_HIGH_MEMORY)) -+ -+#define PGNO_TO_PADDR(pgno) (pgno << KL_PAGE_SHIFT) -+ -+/* -+ * declaration of some defaults that are used in kl_set_dumparch() -+ */ -+int kl_valid_physaddr(kaddr_t); -+int kl_valid_physmem(kaddr_t, int); -+kaddr_t kl_next_valid_physaddr(kaddr_t); -+kaddr_t kl_fix_vaddr(kaddr_t, size_t); -+int kl_init_virtop(void); -+ -+#endif /* __KL_MEM_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_mem_ia64.h -@@ -0,0 +1,149 @@ -+/* -+ * $Id: kl_mem_ia64.h 1250 2006-04-18 18:23:44Z cliffpwickman $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_MEM_IA64_H -+#define __KL_MEM_IA64_H -+ -+/* XXX - the following macros are used by functions in kl_page.c and in */ -+/* function kl_virtop, they still have to be defined properly, */ -+/* all the following macros have first to be set with correct values. */ -+/* I don't have a clue what values to use for ia64 architecture!!! */ -+ -+/* KSTACK_SIZE depends on page size (see kernel headers ptrace.h and page.h) -+ * #define IA64_STK_OFFSET ((1 << IA64_TASK_STRUCT_LOG_NUM_PAGES)*PAGE_SIZE) -+ * and -+ * #define PAGE_SIZE 1UL<= KL_HIGH_MEMORY))) -+ -+uint32_t dha_num_cpus_ia64(void); -+kaddr_t dha_current_task_ia64(int); -+int dha_cpuid_ia64(kaddr_t); -+kaddr_t dha_stack_ia64(int); -+kaddr_t dha_stack_ptr_ia64(int); -+kaddr_t kl_kernelstack_ia64(kaddr_t); -+kaddr_t kl_mmap_virtop_ia64(kaddr_t, void*); -+int kl_init_virtop_ia64(void); -+int kl_virtop_ia64(kaddr_t, void*, kaddr_t*); -+int kl_vtop_ia64(kaddr_t, kaddr_t*); -+int kl_valid_physmem_ia64(kaddr_t, int); -+kaddr_t kl_next_valid_physaddr_ia64(kaddr_t); -+kaddr_t kl_fix_vaddr_ia64(kaddr_t, size_t); -+ -+/* Structure containing key data for ia64 virtual memory mapping. -+ * Note that a number of fields are SN system specific. -+ */ -+typedef struct ia64_vminfo_s { -+ int flags; -+ kaddr_t vpernode_base; -+ kaddr_t vglobal_base; -+ kaddr_t to_phys_mask; -+ kaddr_t kernphysbase; -+ int nasid_shift; /* SN specific */ -+ int nasid_mask; /* SN specific */ -+} ia64_vminfo_t; -+ -+extern ia64_vminfo_t ia64_vminfo; -+ -+/* Some vminfo flags -+ */ -+#define MAPPED_KERN_FLAG 0x1 -+#define SN2_FLAG 0x2 -+ -+/* Some vminfo macros -+ */ -+#define IS_MAPPED_KERN (ia64_vminfo.flags & MAPPED_KERN_FLAG) -+#define IS_SN2 (ia64_vminfo.flags & SN2_FLAG) -+#define KL_VPERNODE_BASE ia64_vminfo.vpernode_base -+#define KL_VGLOBAL_BASE ia64_vminfo.vglobal_base -+#define KL_TO_PHYS_MASK ia64_vminfo.to_phys_mask -+#define KL_KERNPHYSBASE ia64_vminfo.kernphysbase -+#define KL_NASID_SHIFT ia64_vminfo.nasid_shift -+#define KL_NASID_MASK ia64_vminfo.nasid_mask -+ -+#define ADDR_TO_NASID(A) (((A) >> (long)(KL_NASID_SHIFT)) & KL_NASID_MASK) -+ -+#endif /* __KL_MEM_IA64_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_module.h -@@ -0,0 +1,69 @@ -+/* -+ * $Id: kl_module.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_MODULE_H -+#define __KL_MODULE_H -+ -+/* -+ * insmod generates ksymoops -+ * -+ */ -+ -+typedef struct kl_modinfo_s { -+ char *modname; /* name of module as loaded in dump */ -+ /* store ksym info for all modules in a linked list */ -+ struct kl_modinfo_s *next; -+ char *object_file; /* name of file that module was loaded from*/ -+ /* ? possibly store modtime and version here ? */ -+ uint64_t header; /* address of module header */ -+ uint64_t mtime; /* time of last modification of object_file */ -+ uint32_t version; /* kernel version that module was compiled for */ -+ uint64_t text_sec; /* address of text section */ -+ uint64_t text_len; /* length of text section */ -+ uint64_t data_sec; /* address of data section */ -+ uint64_t data_len; /* length of data section */ -+ uint64_t rodata_sec; /* address of rodata section */ -+ uint64_t rodata_len; /* length of rodata section */ -+ uint64_t bss_sec; /* address of rodata section */ -+ uint64_t bss_len; /* length of rodata section */ -+ char *ksym_object; /* ksym for object */ -+ char *ksym_text_sec; /* ksym for its text section */ -+ char *ksym_data_sec; /* ksym for its data section */ -+ char *ksym_rodata_sec; /* ksym for its rodata section */ -+ char *ksym_bss_sec; /* ksym for its bss sectio */ -+} kl_modinfo_t; -+ -+int kl_get_module(char*, kaddr_t*, void**); -+int kl_get_module_2_6(char*, kaddr_t*, void**); -+int kl_get_modname(char**, void*); -+int kl_new_get_modname(char**, void*); -+void kl_free_modinfo(kl_modinfo_t**); -+int kl_new_modinfo(kl_modinfo_t**, void*); -+int kl_set_modinfo(kaddr_t, char*, kl_modinfo_t*); -+int kl_complete_modinfo(kl_modinfo_t*); -+int kl_load_ksyms(int); -+int kl_load_ksyms_2_6(int); -+int kl_unload_ksyms(void); -+int kl_load_module_sym(char*, char*, char*); -+int kl_unload_module_sym(char*); -+int kl_autoload_module_info(char*); -+kl_modinfo_t * kl_lkup_modinfo(char*); -+ -+#endif /* __KL_MODULE_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_queue.h -@@ -0,0 +1,89 @@ -+/* -+ * $Id: kl_queue.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_QUEUE_H -+#define __KL_QUEUE_H -+ -+/* List element header -+ */ -+typedef struct element_s { -+ struct element_s *next; -+ struct element_s *prev; -+} element_t; -+ -+/* Some useful macros -+ */ -+#define ENQUEUE(list, elem) \ -+ kl_enqueue((element_t **)list, (element_t *)elem) -+#define DEQUEUE(list) kl_dequeue((element_t **)list) -+#define FINDQUEUE(list, elem) \ -+ kl_findqueue((element_t **)list, (element_t *)elem) -+#define REMQUEUE(list, elem) kl_remqueue((element_t **)list, (element_t *)elem) -+ -+typedef struct list_of_ptrs { -+ element_t elem; -+ unsigned long long val64; -+} list_of_ptrs_t; -+ -+#define FINDLIST_QUEUE(list, elem, compare) \ -+ kl_findlist_queue((list_of_ptrs_t **)list, \ -+ (list_of_ptrs_t *)elem, compare) -+ -+/** -+ ** Function prototypes -+ **/ -+ -+/* Add a new element to the tail of a doubly linked list. -+ */ -+void kl_enqueue( -+ element_t** /* ptr to head of list */, -+ element_t* /* ptr to element to add to the list */); -+ -+/* Remove an element from the head of a doubly linked list. A pointer -+ * to the element will be returned. In the event that the list is -+ * empty, a NULL pointer will be returned. -+ */ -+element_t *kl_dequeue( -+ element_t** /* ptr to list head (first item removed) */); -+ -+/* Checks to see if a particular element is in a list. If it is, a -+ * value of one (1) will be returned. Otherwise, a value of zero (0) -+ * will be returned. -+ */ -+int kl_findqueue( -+ element_t** /* ptr to head of list */, -+ element_t* /* ptr to element to find on list */); -+ -+/* Walks through a list of pointers to queues and looks for a -+ * particular list. -+ */ -+int kl_findlist_queue( -+ list_of_ptrs_t** /* ptr to list of lists */, -+ list_of_ptrs_t* /* ptr to list to look for */, -+ int(*)(void *, void *) /* ptr to compare function */); -+ -+/* Remove specified element from doubly linked list. -+ */ -+void kl_remqueue( -+ element_t** /* ptr to head of list */, -+ element_t* /* ptr to element to remove from list */); -+ -+#endif /* __KL_QUEUE_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_stabs.h -@@ -0,0 +1,122 @@ -+/* -+ * $Id: kl_stabs.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2004 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_STABS_H -+#define __KL_STABS_H -+ -+/* STABS specific types -+ */ -+#define STAB_XSTRUCT 100 /* Cross referense to STAB_STRUCT */ -+#define STAB_XUNION 101 /* Cross referense to STAB_UNIONB */ -+#define STAB_XENUM 102 /* Cross referense to STAB_ENUM */ -+ -+/* Structure allocated for every namelist. A namelist can be either an -+ * object file (.o or executible), or it can be an archive (.a). -+ */ -+typedef struct st_nmlist_s { -+ char *sts_filename; /* disk file name */ -+ short sts_type; /* ST_OBJ or ST_AR */ -+ short sts_nfiles; /* number of source/object files */ -+} st_nmlist_t; -+ -+/* Values for type field -+ */ -+#define ST_OBJ 1 /* object file (.o or executible) */ -+#define ST_AR 2 /* archive */ -+ -+/* Stab entry type Flags. For determining which stab entries to -+ * capture from the symbol table. -+ */ -+#define ST_UNDF 0x0001 -+#define ST_SO 0x0002 -+#define ST_LSYM 0x0004 -+#define ST_GSYM 0x0008 -+#define ST_PSYM 0x0010 -+#define ST_STSYM 0x0020 -+#define ST_LCSYM 0x0040 -+#define ST_FUN 0x0080 -+#define ST_BINCL 0x0100 -+#define ST_EINCL 0x0200 -+#define ST_EXCL 0x0400 -+#define ST_SLINE 0x0800 -+#define ST_RSYM 0x2000 -+#define ST_ALL 0xffff -+#define ST_DEFAULT (ST_LSYM|ST_GSYM|ST_FUN) -+ -+#define N_UNDF 0 -+ -+/* Structures that allow us to selectively cycle through only those BFD -+ * sections containing STAB data. -+ */ -+typedef struct stab_sect_s { -+ char *stabsect_name; -+ char *strsect_name; -+} stab_sect_t; -+ -+/* Local structure that contains the current type string (which may be -+ * just a part of the complete type defenition string) and the character -+ * index (current) pointer. -+ */ -+typedef struct stab_str_s { -+ char *str; -+ char *ptr; -+} stab_str_t; -+ -+/* Local structure containing global values that allow us to cycle -+ * through multiple object files without reinitializing. -+ */ -+typedef struct st_global_s { -+ bfd *abfd; /* current bfd pointer */ -+ int type; /* symbol entry type */ -+ int flags; /* want flags */ -+ int flag; /* current ST flag */ -+ int nmlist; /* current namelist index */ -+ int srcfile; /* current source file number */ -+ int incfile; /* current include file */ -+ int symnum; /* symbol entry number */ -+ bfd_byte *stabp; /* beg of current string table */ -+ bfd_byte *stabs_end; /* end of current string table */ -+ int staboff; /* current stab table offset */ -+ unsigned int value; /* value (e.g., function addr) */ -+ int stroffset; /* offset in stab string table */ -+ short desc; /* desc value (e.g, line number) */ -+ stab_str_t stab_str; /* current stab string */ -+} st_global_t; -+ -+/* Macros for accessing the current global values -+ */ -+#define G_abfd G_values.abfd -+#define G_type G_values.type -+#define G_flags G_values.flags -+#define G_flag G_values.flag -+#define G_nmlist G_values.nmlist -+#define G_srcfile G_values.srcfile -+#define G_incfile G_values.incfile -+#define G_symnum G_values.symnum -+#define G_stabp G_values.stabp -+#define G_stabs_end G_values.stabs_end -+#define G_staboff G_values.staboff -+#define G_value G_values.value -+#define G_stroffset G_values.stroffset -+#define G_desc G_values.desc -+#define G_stab_str G_values.stab_str -+#define CUR_CHAR G_stab_str.ptr -+ -+#endif /* __KL_STABS_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_stringtab.h -@@ -0,0 +1,68 @@ -+/* -+ * $Id: kl_stringtab.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libutil. -+ * A library which provides auxiliary functions. -+ * libutil is part of lkcdutils -- utilities for Linux kernel crash dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_STRINGTAB_H -+#define __KL_STRINGTAB_H -+ -+/* The string table structure -+ * -+ * String space is allocated from 4K blocks which are allocated -+ * as needed. The first four bytes of each block are reserved so -+ * that the blocks can be chained together (to make it easy to free -+ * them when the string table is no longer necessary). -+ */ -+typedef struct string_table_s { -+ int num_strings; -+ void *block_list; -+} string_table_t; -+ -+#define NO_STRINGTAB 0 -+#define USE_STRINGTAB 1 -+ -+/** -+ ** Function prototypes -+ **/ -+ -+/* Initialize a string table. Depending on the value of the flag -+ * parameter, either temporary or permenent blocks will be used. -+ * Upon success, a pointer to a string table will be returned. -+ * Otherwise, a NULL pointer will be returned. -+ */ -+string_table_t *kl_init_string_table( -+ int /* flag (K_TEMP/K_PERM)*/); -+ -+/* Free all memory blocks allocated for a particular string table -+ * and then free the table itself. -+ */ -+void kl_free_string_table( -+ string_table_t* /* pointer to string table */); -+ -+/* Search for a string in a string table. If the string does not -+ * exist, allocate space from the string table and add the string. -+ * In either event, a pointer to the string (from the table) will -+ * be returned. -+ */ -+char *kl_get_string( -+ string_table_t* /* pointer to string table */, -+ char* /* string to get/add from/to string table */, -+ int /* flag (K_TEMP/K_PERM)*/); -+ -+#endif /* __KL_STRINGTAB_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_sym.h -@@ -0,0 +1,131 @@ -+/* -+ * $Id: kl_sym.h 1233 2005-09-10 08:01:11Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_SYM_H -+#define __KL_SYM_H -+ -+/* The syment struct contains information about kernel symbols (text, -+ * data, etc.). The first field in syment_t is a btnode_s sruct. This -+ * allows the generic binary search tree routines, insert_tnode() and -+ * find_tnode(), to be used. -+ */ -+typedef struct syment_s { -+ btnode_t s_bt; /* Must be first */ -+ struct syment_s *s_next; /* For linked lists */ -+ struct syment_s *s_prev; /* For linked lists */ -+ kaddr_t s_addr; /* vaddr of symbol */ -+ kaddr_t s_end; /* end address of symbol */ -+ int s_type; /* text, data */ -+ struct syment_s *s_forward; /* For linked lists */ -+} syment_t; -+ -+#define s_name s_bt.bt_key -+ -+#define SYM_GLOBAL_TEXT 1 -+#define SYM_LOCAL_TEXT 2 -+#define SYM_LOCORE_TEXT 3 -+#define SYM_GLOBAL_DATA 4 -+#define SYM_LOCAL_DATA 5 -+#define SYM_ABS 6 -+#define SYM_UNK 9 -+#define SYM_KSYM 10 -+#define SYM_KSYM_TEXT 11 -+#define SYM_KALLSYMS 12 -+ -+#define SYM_MAP_ANY 0 -+#define SYM_MAP_FILE 1 -+#define SYM_MAP_KSYM 2 -+#define SYM_MAP_MODULE 3 -+#define SYM_MAP_KALLSYMS 4 -+ -+#define KL_KERNEL_MODULE "kernel_module" -+#define KL_S_BSS ".bss.start" -+#define KL_E_BSS ".bss.end" -+#define KL_S_DATA ".data.start" -+#define KL_E_DATA ".data.end" -+#define KL_S_RODATA ".rodata.start" -+#define KL_E_RODATA ".rodata.end" -+#define KL_S_TEXT ".text.start" -+#define KL_E_TEXT ".text.end" -+#define KL_SYM_END "__end__" -+ -+ -+#define KL_SYMBOL_NAME_LEN 256 -+ -+/* -+ * Struct containing symbol table information -+ */ -+typedef struct symtab_s { -+ int symcnt; /* Number of symbols */ -+ int symaddrcnt; /* Number of symbol addrs to track */ -+ syment_t **symaddrs; /* Table of symbols by address */ -+ btnode_t *symnames; /* tree of symbols by name */ -+ syment_t *text_list; /* Linked list of text symbols */ -+ syment_t *data_list; /* Linked list of data symbols */ -+} symtab_t; -+ -+ -+/* support of further mapfiles besides System.map */ -+typedef struct maplist_s { -+ struct maplist_s *next; -+ int maplist_type; /* type of maplist */ -+ char *mapfile; /* name of mapfile */ -+ char *modname; /* set if map belongs to a module */ -+ symtab_t *syminfo; -+} maplist_t; -+ -+ -+/* API Function prototypes -+ */ -+int kl_read_syminfo(maplist_t*); -+int kl_free_syminfo(char*); -+void kl_free_symtab(symtab_t*); -+void kl_free_syment_list(syment_t*); -+void kl_free_maplist(maplist_t*); -+syment_t *kl_get_similar_name(char*, char*, int*, int*); -+syment_t *kl_lkup_symname(char*); -+syment_t *_kl_lkup_symname(char*, int, size_t len); -+#define KL_LKUP_SYMNAME(NAME, TYPE, LEN) _kl_lkup_symname(NAME, TYPE, LEN) -+syment_t *kl_lkup_funcaddr(kaddr_t); -+syment_t *kl_lkup_symaddr(kaddr_t); -+syment_t *kl_lkup_symaddr_text(kaddr_t); -+syment_t *_kl_lkup_symaddr(kaddr_t, int); -+#define KL_LKUP_SYMADDR(KADDR, TYPE) _kl_lkup_symaddr(KADDR, TYPE) -+kaddr_t kl_symaddr(char * /* symbol name */); -+kaddr_t kl_symptr(char * /* symbol name */); -+kaddr_t kl_funcaddr(kaddr_t /* pc value */); -+char *kl_funcname(kaddr_t /* pc value */); -+int kl_funcsize(kaddr_t /* pc value */); -+int kl_symsize(syment_t*); -+syment_t *kl_alloc_syment(kaddr_t, kaddr_t, int, const char*); -+void kl_insert_symbols(symtab_t*, syment_t*); -+int kl_insert_artificial_symbols(symtab_t*, syment_t**, kl_modinfo_t*); -+int kl_convert_symbol(kaddr_t*, int*, char, kl_modinfo_t*); -+int kl_load_sym(char*); -+int kl_print_symtables(char*, char*, int, int); -+void kl_print_symbol(kaddr_t, syment_t*, int); -+ -+/* flag for use by kl_print_symbol() and kl_print_syminfo() -+ */ -+#define KL_SYMWOFFSET (0x01) /* with offset field */ -+#define KL_SYMFULL (0x02) /* print detailed syminfo */ -+#define KL_SYMBYNAME (0x04) /* print symbol sorted by name */ -+ -+#endif /* __KL_SYM_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_task.h -@@ -0,0 +1,39 @@ -+/* -+ * $Id: kl_task.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002, 2004 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_TASK_H -+#define __KL_TASK_H -+ -+extern kaddr_t deftask; -+ -+/* Function prototypes -+ */ -+k_error_t kl_set_deftask(kaddr_t); -+int kl_parent_pid(void *); -+kaddr_t kl_pid_to_task(kaddr_t); -+k_error_t kl_get_task_struct(kaddr_t, int, void *); -+kaddr_t kl_kernelstack(kaddr_t); -+kaddr_t kl_first_task(void); -+kaddr_t kl_next_task(void *); -+kaddr_t kl_prev_task(void *); -+kaddr_t kl_pid_to_task(kaddr_t); -+int kl_task_size(kaddr_t); -+ -+#endif /* __KL_TASK_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_typeinfo.h -@@ -0,0 +1,199 @@ -+/* -+ * $Id: kl_typeinfo.h 1259 2006-04-25 18:33:20Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2006 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_TYPEINFO_H -+#define __KL_TYPEINFO_H -+ -+#define KLT_BASE 0x001 -+#define KLT_STRUCT 0x002 -+#define KLT_UNION 0x004 -+#define KLT_ENUMERATION 0x008 -+#define KLT_MEMBER 0x010 -+#define KLT_ARRAY 0x020 -+#define KLT_POINTER 0x040 -+#define KLT_TYPEDEF 0x080 -+#define KLT_FUNCTION 0x100 -+#define KLT_VARIABLE 0x200 -+#define KLT_SRCFILE 0x400 -+#define KLT_SUBRANGE 0x800 -+#define KLT_INCOMPLETE 0x4000 -+#define KLT_UNKNOWN 0x8000 -+#define KLT_TYPE (KLT_BASE|KLT_STRUCT|KLT_UNION|KLT_ENUMERATION) -+#define KLT_TYPES (KLT_BASE|KLT_STRUCT|KLT_UNION|KLT_ENUMERATION|KLT_TYPEDEF) -+ -+#define IS_TYPE(T) ((T) & KLT_TYPE) -+#define IS_STRUCT(T) ((T) & KLT_STRUCT) -+#define IS_UNION(T) ((T) & KLT_UNION) -+#define IS_ENUM(T) ((T) & KLT_ENUM) -+#define IS_MEMBER(T) ((T) & KLT_MEMBER) -+#define IS_POINTER(T) ((T) & KLT_POINTER) -+#define IS_TYPEDEF(T) ((T) & KLT_TYPEDEF) -+ -+#define TYP_SETUP_FLG 0x01 -+#define TYP_TYPESTRING_FLG 0x02 -+#define TYP_INCOMPLETE_FLG 0x04 -+#define TYP_XREFERENCE_FLG 0x08 -+#define TYP_ANONYMOUS_FLG 0x10 /* Denotes anonymous union or struct */ -+ -+#define NO_INDENT 0x01000000 -+#define SUPPRESS_NAME 0x02000000 -+#define SUPPRESS_NL 0x04000000 -+#define SUPPRESS_SEMI_COLON 0x08000000 -+#define NO_REALTYPE 0x10000000 -+ -+extern int numnmlist; -+ -+#define KL_TYPEINFO() (numnmlist) -+ -+typedef struct kltype_s { -+ char *kl_name; /* type name */ -+ char *kl_typestr; /* 'typecast' string */ -+ void *kl_ptr; /* ptr to arch typeinfo */ -+ int kl_flags; /* (e.g., STAB_FLG) */ -+ int kl_type; /* (e.g., KLT_TYPEDEF) */ -+ int kl_offset; /* offset to 1st byte */ -+ int kl_size; /* number of bytes */ -+ int kl_bit_offset; /* offset to 1st data bit */ -+ int kl_bit_size; /* total num of data bits */ -+ int kl_encoding; /* for base value types */ -+ int kl_low_bounds; /* for arrays */ -+ int kl_high_bounds; /* for arrays */ -+ unsigned int kl_value; /* enum value, etc. */ -+ struct kltype_s *kl_member; /* struct/union member list */ -+ struct kltype_s *kl_next; /* hash lists, etc. */ -+ struct kltype_s *kl_realtype; /* pointer to real type */ -+ struct kltype_s *kl_indextype; /* pointer to index_type */ -+ struct kltype_s *kl_elementtype; /* pointer to element_type */ -+} kltype_t; -+ -+/* Flag values -+ */ -+#define K_HEX 0x1 -+#define K_OCTAL 0x2 -+#define K_BINARY 0x4 -+#define K_NO_SWAP 0x8 -+ -+/* Base type encoding values -+ */ -+#define ENC_CHAR 0x01 -+#define ENC_SIGNED 0x02 -+#define ENC_UNSIGNED 0x04 -+#define ENC_FLOAT 0x08 -+#define ENC_ADDRESS 0x10 -+#define ENC_UNDEFINED 0x20 -+ -+/* Maximum number of open namelists -+ */ -+#define MAXNMLIST 10 -+ -+typedef struct nmlist_s { -+ int index; -+ char *namelist; -+ void *private; /* pointer to private control struct */ -+ string_table_t *stringtab; -+} nmlist_t; -+ -+extern nmlist_t nmlist[]; -+extern int numnmlist; -+extern int curnmlist; -+ -+#define KL_TYPESTR_STRUCT "struct" -+#define KL_TYPESTR_UNION "union" -+#define KL_TYPESTR_ENUM "enum" -+#define KL_TYPESTR_VOID "void" -+ -+/* Function prototypes -+ */ -+kltype_t *kl_find_type( -+ char * /* type name */, -+ int /* type number */); -+ -+kltype_t *kl_find_next_type( -+ kltype_t * /* kltype_t pointer */, -+ int /* type number */); -+ -+kltype_t *kl_first_type( -+ int /* type number */); -+ -+kltype_t *kl_next_type( -+ kltype_t * /* kltype_t pointer */); -+ -+kltype_t *kl_prev_type( -+ kltype_t * /* kltype_t pointer */); -+ -+kltype_t *kl_realtype( -+ kltype_t * /* kltype_t pointer */, -+ int /* type number */); -+ -+kltype_t *kl_find_typenum( -+ uint64_t /* private typenumber */); -+ -+int kl_get_first_similar_typedef( -+ char * /* type name */, -+ char * /* fullname */); -+ -+int kl_type_size( -+ kltype_t * /* kltype_t pointer */); -+ -+int kl_struct_len( -+ char * /* struct name */); -+ -+kltype_t *kl_get_member( -+ kltype_t * /* kltype_t pointer */, -+ char * /* member name */); -+ -+int kl_get_member_offset( -+ kltype_t * /* kltype_t pointer */, -+ char * /* member name */); -+ -+int kl_is_member( -+ char * /* struct name */, -+ char * /* member name */); -+ -+kltype_t *kl_member( -+ char * /* struct name */, -+ char * /* member name */); -+ -+int kl_member_offset( -+ char * /* struct name */, -+ char * /* member name */); -+ -+int kl_member_size( -+ char * /* struct name */, -+ char * /* member name */); -+ -+/* cpw: get rid of last arguent FILE * */ -+void kl_print_member(void *, kltype_t *, int, int); -+void kl_print_pointer_type(void *, kltype_t *, int, int); -+void kl_print_function_type(void *, kltype_t *, int, int); -+void kl_print_array_type(void *, kltype_t *, int, int); -+void kl_print_enumeration_type(void *, kltype_t *, int, int); -+void kl_print_base_type(void *, kltype_t *, int, int); -+void kl_print_type(void *, kltype_t *, int, int); -+void kl_print_struct_type(void *, kltype_t *, int, int); -+void kl_print_base_value(void *, kltype_t *, int); -+ -+void kl_print_type( -+ void * /* pointer to data */, -+ kltype_t * /* pointer to type information */, -+ int /* indent level */, -+ int /* flags */); -+ -+#endif /* __KL_TYPEINFO_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/kl_types.h -@@ -0,0 +1,54 @@ -+/* -+ * $Id: kl_types.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __KL_TYPES_H -+#define __KL_TYPES_H -+ -+/* The following typedef should be used for variables or return values -+ * that contain kernel virtual or physical addresses. It should be sized -+ * such that it can hold both pointers of 64 bit architectures as well as -+ * pointers from 32 bit architectures. -+ */ -+typedef unsigned long kaddr_t; -+ -+/* The following typedef should be used when converting a pointer value -+ * (either kernel or application) to an unsigned value for pointer -+ * calculations. -+ */ -+typedef unsigned long uaddr_t; -+ -+/* KLIB error type -+ */ -+typedef uint64_t k_error_t; -+ -+/* Typedef that allows a single fprintf() call to work for both -+ * 32-bit and 64-bit pointer values. -+ */ -+#define UADDR(X) ((kaddr_t)X) -+#define UADDR64(X) ((kaddr_t)X)) -+/* #define UADDR(X) ((uaddr_t)X) */ -+/* #define UADDR64(X) ((uint64_t)((uaddr_t)X)) */ -+ -+ -+/* cpw */ -+/* was: #include */ -+#include "asm/kl_types.h" -+ -+#endif /* __KL_TYPES_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/klib.h -@@ -0,0 +1,480 @@ -+/* -+ * $Id: klib.h 1336 2006-10-23 23:27:06Z tjm $ -+ * -+ * This file is part of libklib. -+ * A library which provides access to Linux system kernel dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, NEC, and others -+ * -+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * Copyright 2000 Junichi Nomura, NEC Solutions -+ * -+ * This code is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU Lesser Public License as published by -+ * the Free Software Foundation; either version 2.1 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+/* -+ * klib.h -- Interface of the klib library, a library for access to -+ * Linux system memory dumps. -+ */ -+ -+#ifndef __KLIB_H -+#define __KLIB_H -+ -+/* Include header files -+ */ -+#if 0 -+ /* cpw: don't include all this: */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#endif -+ -+/* cpw: change all the below includes form the < > form to " " */ -+ -+/* Include libutil header -+ */ -+#include "kl_lib.h" -+ -+/* Include libklib header files -+ */ -+#include "kl_types.h" -+#include "kl_error.h" -+#include "kl_dump.h" -+#include "kl_mem.h" -+#include "kl_cmp.h" -+#include "kl_typeinfo.h" -+#include "kl_module.h" -+#include "kl_sym.h" -+#include "kl_bfd.h" -+#include "kl_debug.h" -+#include "kl_stabs.h" -+#include "kl_dwarfs.h" -+#include "kl_task.h" -+#include "kl_dump_arch.h" -+ -+ -+#ifndef TRUE -+# define TRUE 1 -+#endif -+#ifndef FALSE -+# define FALSE 0 -+#endif -+ -+#ifndef MIN -+#define MIN(x,y) (((x)<(y))?(x):(y)) -+#endif -+#ifndef MAX -+#define MAX(x,y) (((x)>(y))?(x):(y)) -+#endif -+ -+#define KL_NR_CPUS 128 -+ -+/* We have to distinc between HOST_ARCH_* and DUMP_ARCH_*. These two classes of -+ * macros are used througout the code for conditional compilation. -+ * Additional we have following macros for comparison and switch statements. -+ */ -+#define KL_ARCH_UNKNOWN 0 -+#define KL_ARCH_ALPHA 1 -+#define KL_ARCH_ARM 2 -+#define KL_ARCH_I386 3 -+#define KL_ARCH_IA64 4 -+#define KL_ARCH_M68K 5 -+#define KL_ARCH_MIPS 6 -+#define KL_ARCH_MIPS64 7 -+#define KL_ARCH_PPC 8 -+#define KL_ARCH_S390 9 -+#define KL_ARCH_SH 10 -+#define KL_ARCH_SPARK 11 -+#define KL_ARCH_SPARK64 12 -+#define KL_ARCH_S390X 13 -+#define KL_ARCH_PPC64 14 -+#define KL_ARCH_X86_64 15 -+#define KL_ARCH_IA64_SN2 16 -+#define KL_ARCH_IA64_DIG 17 -+#define KL_ARCH_IA64_HPSIM 18 -+#define KL_ARCH_IA64_HPZX1 19 -+#define KL_ARCH_S390SA 20 -+ -+#define KL_LIVE_SYSTEM 1000 -+ -+#define ARCH_IS_IA64(A) \ -+ ((A==KL_ARCH_IA64)|| \ -+ (A==KL_ARCH_IA64_SN2)|| \ -+ (A==KL_ARCH_IA64_DIG)|| \ -+ (A==KL_ARCH_IA64_HPSIM)|| \ -+ (A==KL_ARCH_IA64_HPZX1)) -+ -+#ifdef HOST_ARCH_ALPHA -+# define KL_HOST_ARCH KL_ARCH_ALPHA -+#endif -+#ifdef HOST_ARCH_ARM -+# define KL_HOST_ARCH KL_ARCH_ARM -+#endif -+#ifdef HOST_ARCH_I386 -+# define KL_HOST_ARCH KL_ARCH_I386 -+#endif -+#ifdef HOST_ARCH_IA64 -+# define KL_HOST_ARCH KL_ARCH_IA64 -+#endif -+#ifdef HOST_ARCH_S390 -+# define KL_HOST_ARCH KL_ARCH_S390 -+#endif -+#ifdef HOST_ARCH_S390X -+# define KL_HOST_ARCH KL_ARCH_S390X -+#endif -+#ifdef HOST_ARCH_PPC64 -+#define KL_HOST_ARCH KL_ARCH_PPC64 -+#endif -+#ifdef HOST_ARCH_X86_64 -+#define KL_HOST_ARCH KL_ARCH_X86_64 -+#endif -+ -+#define KL_ARCH_STR_ALPHA "alpha" -+#define KL_ARCH_STR_ARM "arm" -+#define KL_ARCH_STR_I386 "i386" -+#define KL_ARCH_STR_IA64 "ia64" -+#define KL_ARCH_STR_S390 "s390" -+#define KL_ARCH_STR_S390X "s390x" -+#define KL_ARCH_STR_PPC64 "ppc64" -+#define KL_ARCH_STR_X86_64 "x86_64" -+#define KL_ARCH_STR_IA64_SN2 "sn2" -+#define KL_ARCH_STR_UNKNOWN "unknown" -+ -+/* for endianess of dump and host arch -+ */ -+#define KL_UNKNOWN_ENDIAN 0x00 -+#define KL_LITTLE_ENDIAN 0x01 -+#define KL_BIG_ENDIAN 0x02 -+ -+/* macros for handling of different Kernel versions -+ */ -+#define LINUX_2_2_X(R) (((R) & 0xffff00) == 0x020200) -+#define LINUX_2_2_16 0x020210 -+#define LINUX_2_2_17 0x020211 -+#define LINUX_2_4_X(R) (((R) & 0xffff00) == 0x020400) -+#define LINUX_2_4_0 0x020400 -+#define LINUX_2_4_4 0x020404 -+#define LINUX_2_4_15 0x02040f -+#define LINUX_2_6_X(R) (((R) & 0xffff00) == 0x020600) -+#define LINUX_2_6_0 0x020600 -+ -+/* libklib flags -+ */ -+#define KL_FAILSAFE_FLG 0x0001 -+#define KL_NOVERIFY_FLG 0x0002 -+#define KL_SILENT_FLG 0x0004 -+#define KL_SAVETYPES_FLG 0x0008 -+#define KL_USETYPES_FLG 0x0010 -+ -+/* macros for backward compatibility -+ */ -+#define NUM_PHYSPAGES KLP->dump->mem.num_physpages -+#define MEM_MAP KLP->dump->mem.mem_map -+#define KL_HIGH_MEMORY KLP->dump->mem.high_memory -+#define KL_INIT_MM KLP->dump->mem.init_mm -+#define KL_NUM_CPUS KLP->dump->mem.num_cpus -+#define KL_PGDAT_LIST KLP->dump->mem.pgdat_list -+ -+/* macros for better use of dump architecture dependent functions -+ */ -+ -+/* read integer value from buffer */ -+#define KL_GET_PTR(ptr) (*KLP->dump->func.get_ptr)(ptr) -+#define KL_GET_LONG(ptr) ((int64_t) KL_GET_PTR(ptr)) -+#define KL_GET_ULONG(ptr) KL_GET_PTR(ptr) -+#define KL_GET_UINT8(ptr) (*KLP->dump->func.get_uint8)(ptr) -+#define KL_GET_UINT16(ptr) (*KLP->dump->func.get_uint16)(ptr) -+#define KL_GET_UINT32(ptr) (*KLP->dump->func.get_uint32)(ptr) -+#define KL_GET_UINT64(ptr) (*KLP->dump->func.get_uint64)(ptr) -+#define KL_GET_INT8(ptr) ((int8_t) KL_GET_UINT8(ptr)) -+#define KL_GET_INT16(ptr) ((int16_t) KL_GET_UINT16(ptr)) -+#define KL_GET_INT32(ptr) ((int32_t) KL_GET_UINT32(ptr)) -+#define KL_GET_INT64(ptr) ((int64_t) KL_GET_UINT64(ptr)) -+ -+/* read integer value from dump (without address mapping) -+ * Use these functions sparsely, e.g. before address translation -+ * is properly set up. -+ */ -+#define KL_READ_PTR(addr) (*KLP->dump->func.read_ptr)(addr) -+#define KL_READ_LONG(addr) ((int64_t) KL_READ_PTR(addr)) -+#define KL_READ_ULONG(addr) KL_READ_PTR(addr) -+#define KL_READ_UINT8(addr) (*KLP->dump->func.read_uint8)(addr) -+#define KL_READ_UINT16(addr) (*KLP->dump->func.read_uint16)(addr) -+#define KL_READ_UINT32(addr) (*KLP->dump->func.read_uint32)(addr) -+#define KL_READ_UINT64(addr) (*KLP->dump->func.read_uint64)(addr) -+#define KL_READ_INT8(addr) ((int8_t) KL_READ_UINT8(addr)) -+#define KL_READ_INT16(addr) ((int16_t) KL_READ_UINT16(addr)) -+#define KL_READ_INT32(addr) ((int32_t) KL_READ_UINT32(addr)) -+#define KL_READ_INT64(addr) ((int64_t) KL_READ_UINT64(addr)) -+ -+/* read integer value from dump (from virtual address) doing address mapping */ -+#define KL_VREAD_PTR(addr) (*KLP->dump->func.vread_ptr)(addr) -+#define KL_VREAD_LONG(addr) ((int64_t) KL_VREAD_PTR(addr)) -+#define KL_VREAD_ULONG(addr) KL_VREAD_PTR(addr) -+#define KL_VREAD_UINT8(addr) (*KLP->dump->func.vread_uint8)(addr) -+#define KL_VREAD_UINT16(addr) (*KLP->dump->func.vread_uint16)(addr) -+#define KL_VREAD_UINT32(addr) (*KLP->dump->func.vread_uint32)(addr) -+#define KL_VREAD_UINT64(addr) (*KLP->dump->func.vread_uint64)(addr) -+#define KL_VREAD_INT8(addr) ((int8_t) KL_VREAD_UINT8(addr)) -+#define KL_VREAD_INT16(addr) ((int16_t) KL_VREAD_UINT16(addr)) -+#define KL_VREAD_INT32(addr) ((int32_t) KL_VREAD_UINT32(addr)) -+#define KL_VREAD_INT64(addr) ((int64_t) KL_VREAD_UINT64(addr)) -+ -+/* determine start of stack */ -+#define KL_KERNELSTACK_UINT64 (*KLP->dump->arch.kernelstack) -+/* map virtual adress to physical one */ -+#define KL_VIRTOP (*KLP->dump->arch.virtop) -+/* travers page table */ -+#define KL_MMAP_VIRTOP (*KLP->dump->arch.mmap_virtop) -+/* check whether address points to valid physical memory */ -+#define KL_VALID_PHYSMEM (*KLP->dump->arch.valid_physmem) -+/* determine next valid physical address */ -+#define KL_NEXT_VALID_PHYSADDR (*KLP->dump->arch.next_valid_physaddr) -+/* XXX */ -+#define KL_FIX_VADDR (*KLP->dump->arch.fix_vaddr) -+/* write dump_header_asm_t */ -+#define KL_WRITE_DHA (*KLP->dump->arch.write_dha) -+/* size of dump_header_asm_t */ -+#define KL_DHA_SIZE (KLP->dump->arch.dha_size) -+/* init virtual to physical address mapping */ -+#define KL_INIT_VIRTOP (KLP->dump->arch.init_virtop) -+ -+ -+/* macros for easier access to dump specific values */ -+#define KL_CORE_TYPE KLP->dump->core_type -+#define KL_CORE_FD KLP->dump->core_fd -+#define KL_ARCH KLP->dump->arch.arch -+#define KL_PTRSZ KLP->dump->arch.ptrsz -+#define KL_NBPW (KL_PTRSZ/8) -+#define KL_BYTE_ORDER KLP->dump->arch.byteorder -+#define KL_PAGE_SHIFT KLP->dump->arch.pageshift -+#define KL_PAGE_SIZE KLP->dump->arch.pagesize -+#define KL_PAGE_MASK KLP->dump->arch.pagemask -+#define KL_PAGE_OFFSET KLP->dump->arch.pageoffset -+#define KL_STACK_OFFSET KLP->dump->arch.kstacksize -+#define IS_BIG_ENDIAN() (KL_BYTE_ORDER == KL_BIG_ENDIAN) -+#define IS_LITTLE_ENDIAN() (KL_BYTE_ORDER == KL_LITTLE_ENDIAN) -+#define KL_LINUX_RELEASE KLP->dump->mem.linux_release -+#define KL_KERNEL_FLAGS KLP->dump->mem.kernel_flags -+ -+#if 0 -+/* cpw: don't need all this dump file stuff: */ -+/* macros to access input files */ -+#define KL_MAP_FILE KLP->dump->map -+#define KL_DUMP_FILE KLP->dump->dump -+#define KL_KERNTYPES_FILE KLP->kerntypes -+ -+#define CORE_IS_KMEM (KL_CORE_TYPE == dev_kmem) -+#define CORE_IS_DUMP ((KL_CORE_TYPE > dev_kmem) && (KL_CORE_TYPE <= unk_core)) -+ -+ -+/* Generic dump header structure (the first three members of -+ * dump_header and dump_header_asm are the same). -+ */ -+typedef struct generic_dump_header_s { -+ uint64_t magic_number; -+ uint32_t version; -+ uint32_t header_size; -+} generic_dump_header_t; -+ -+/* Some macros for making it easier to access the generic header -+ * information in a dump_header or dump_header_asm stuct. -+ */ -+#define DHP(dh) ((generic_dump_header_t*)(dh)) -+#define DH_MAGIC(dh) DHP(dh)->magic_number -+#define DH_VERSION(dh) DHP(dh)->version -+#define DH_HEADER_SIZE(dh) DHP(dh)->header_size -+ -+extern kl_dump_header_t *DUMP_HEADER; -+extern void *DUMP_HEADER_ASM; -+#endif -+ -+/* Struct to store some host architecture specific values -+ */ -+typedef struct kl_hostarch_s { -+ int arch; /* KL_ARCH_ */ -+ int ptrsz; /* 32 or 64 bit */ -+ int byteorder; /* KL_LITTLE_ENDIAN or KL_BIG_ENDIAN */ -+} kl_hostarch_t; -+ -+/* Struct klib_s, contains all the information necessary for accessing -+ * information in the kernel. A pointer to a klib_t struct will be -+ * returned from libkern_init() if core dump analysis (or live system -+ * analysis) is possible. -+ * -+ */ -+typedef struct klib_s { -+ int k_flags; /* Flags pertaining to klib_s struct */ -+ kl_hostarch_t *host; /* host arch info */ -+ kl_dumpinfo_t *dump; /* dump information */ -+ maplist_t *k_symmap; /* symbol information */ -+ kltype_t *k_typeinfo; /* type information */ -+ char *kerntypes; /* pathname for kerntypes file */ -+} klib_t; -+ -+/* Structure to accomodate all debug formats */ -+struct namelist_format_opns { -+ /* to open/setup the namelist file */ -+ int (*open_namelist) (char *filename , int flags); -+ int (*setup_typeinfo)(void); -+}; -+ -+/* -+ * global variables -+ */ -+ -+/* Here we store almost everything, we need to know about a dump. */ -+extern klib_t *KLP; -+ -+/* macros to make live easier */ -+#define MIP KLP->dump -+#define STP KLP->k_symmap -+#define TASK_STRUCT_SZ (KLP->dump->mem.struct_sizes.task_struct_sz) -+#define MM_STRUCT_SZ (KLP->dump->mem.struct_sizes.mm_struct_sz) -+#define PAGE_SZ (KLP->dump->mem.struct_sizes.page_sz) -+#define MODULE_SZ (KLP->dump->mem.struct_sizes.module_sz) -+#define NEW_UTSNAME_SZ (KLP->dump->mem.struct_sizes.new_utsname_sz) -+#define SWITCH_STACK_SZ (KLP->dump->mem.struct_sizes.switch_stack_sz) -+#define PT_REGS_SZ (KLP->dump->mem.struct_sizes.pt_regs_sz) -+#define PGLIST_DATA_SZ (KLP->dump->mem.struct_sizes.pglist_data_sz) -+#define RUNQUEUE_SZ (KLP->dump->mem.struct_sizes.runqueue_sz) -+ -+#if 0 -+cpw: used for sial? -+/* klib_jbuf has to be defined outside libklib. -+ * Make sure to call setjmp(klib_jbuf) BEFORE kl_sig_setup() is called! */ -+extern jmp_buf klib_jbuf; -+#endif -+ -+/* Macros that eliminate the offset paramaters to the kl_uint() and kl_int() -+ * functions (just makes things cleaner looking) -+ */ -+#define KL_UINT(p, s, m) kl_uint(p, s, m, 0) -+#define KL_INT(p, s, m) kl_int(p, s, m, 0) -+ -+/* Macros for translating strings into long numeric values depending -+ * on the base of 's'. -+ */ -+#define GET_VALUE(s, value) kl_get_value(s, NULL, 0, value) -+#define GET_HEX_VALUE(s) (kaddr_t)strtoull(s, (char**)NULL, 16) -+#define GET_DEC_VALUE(s) (unsigned)strtoull(s, (char**)NULL, 10) -+#define GET_OCT_VALUE(s) (unsigned)strtoull(s, (char**)NULL, 8) -+ -+#define KL_SIGFLG_CORE 0x1 -+#define KL_SIGFLG_SILENT 0x2 -+#define KL_SIGFLG_LNGJMP 0x4 -+ -+/* Flag that tells kl_is_valid_kaddr() to perform a word aligned check -+ */ -+#define WORD_ALIGN_FLAG 1 -+ -+#define ADDR_TO_PGNO(addr) ((addr - KL_PAGE_OFFSET) >> KL_PAGE_SHIFT); -+ -+/* Generalized macros for pointing at different data types at particular -+ * offsets in kernel structs. -+ */ -+/* #define K_ADDR(p, s, f) ((uaddr_t)(p) + kl_member_offset(s, f)) */ -+#define K_ADDR(p, s, f) ((p) + kl_member_offset(s, f)) -+#define K_PTR(p, s, f) (K_ADDR((void*)p, s, f)) -+#define CHAR(p, s, f) (K_ADDR((char*)p, s, f)) -+ -+#define PTRSZ32 ((KL_PTRSZ == 32) ? 1 : 0) -+#define PTRSZ64 ((KL_PTRSZ == 64) ? 1 : 0) -+ -+/* Function prototypes -+ */ -+/* cpw: remove the last argument FILE * */ -+void kl_binary_print(uint64_t); -+void kl_print_bit_value(void *, int, int, int, int); -+void kl_print_char(void *, int); -+void kl_print_uchar(void *, int); -+void kl_print_int2(void *, int); -+void kl_print_uint2(void *, int); -+void kl_print_int4(void *, int); -+void kl_print_uint4(void *, int); -+void kl_print_float4(void *, int); -+void kl_print_int8(void *, int); -+void kl_print_uint8(void *, int); -+void kl_print_float8(void *, int); -+void kl_print_base(void *, int, int, int); -+void kl_print_string(char *); -+ -+int kl_get_live_filenames( -+ char * /* pointer to buffer for map filename */, -+ char * /* pointer to buffer for dump filename */, -+ char * /* pointer to buffer for namelist filename */); -+ -+int kl_init_klib( -+ char * /* map file name */, -+ char * /* dump file name */, -+ char * /* namelist file name */, -+ int /* system arch of memory in dump */, -+ int /* rwflag flag (/dev/mem only) */, -+ int /* Linux release */); -+ -+void kl_free_klib( -+ klib_t * /* Pointer to klib_s struct */); -+ -+ -+int kl_dump_retrieve( -+ char * /* dumpdev name */, -+ char * /* dumpdir name */, -+ int /* progress flag (zero or non-zero) */, -+ int /* debug flag (zero or non-zero) */); -+ -+int kl_dump_erase( -+ char * /* dumpdev name */); -+ -+uint64_t kl_strtoull( -+ char * /* string containing numeric value */, -+ char ** /* pointer to pointer to bad char */, -+ int /* base */); -+ -+int kl_get_value( -+ char * /* param */, -+ int * /* mode pointer */, -+ int /* number of elements */, -+ uint64_t * /* pointer to value */); -+ -+/* Functions for working with list_head structs -+ */ -+kaddr_t kl_list_entry(kaddr_t, char *, char *); -+kaddr_t kl_list_next(kaddr_t); -+kaddr_t kl_list_prev(kaddr_t); -+ -+int kl_sig_setup(int); -+ -+void kl_set_curnmlist( -+ int /* index of namelist */); -+ -+int kl_open_namelist( -+ char * /* name of namelist */, -+ int /* flags */, -+ int /* kl_flags */); -+ -+int kl_get_structure(kaddr_t, char*, size_t*, void**); -+uint64_t kl_get_bit_value(void*, unsigned int, unsigned int, unsigned int); -+void kl_s390tod_to_timeval(uint64_t, struct timeval*); -+ -+#endif /* __KLIB_H */ ---- /dev/null -+++ b/kdb/modules/lcrash/lc_eval.h -@@ -0,0 +1,225 @@ -+/* -+ * $Id: lc_eval.h 1122 2004-12-21 23:26:23Z tjm $ -+ * -+ * This file is part of lcrash, an analysis tool for Linux memory dumps. -+ * -+ * Created by Silicon Graphics, Inc. -+ * Contributions by IBM, and others -+ * -+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. -+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. See the file COPYING for more -+ * information. -+ */ -+ -+#ifndef __LC_EVAL_H -+#define __LC_EVAL_H -+ -+typedef struct type_s { -+ int flag; -+ union { -+ struct type_s *next; -+ kltype_t *kltp; -+ } un; -+} type_t; -+ -+#define t_next un.next -+#define t_kltp un.kltp -+ -+/* Structure to hold info on "tokens" extracted from eval and print -+ * command input strings. -+ */ -+typedef struct token_s { -+ short type; -+ short operator; /* if token is an operator */ -+ char *string; /* string holding value or identifier */ -+ char *ptr; /* pointer to start of token */ -+ struct token_s *next; /* next token in the chain */ -+} token_t; -+ -+/* Structure returned by the eval() function containing the result -+ * of an expression evaluation. This struct is also used to build the -+ * parse tree for the expression. -+ */ -+typedef struct node_s { -+ struct node_s *next; /* linked list pointer */ -+ unsigned char node_type; /* type of node */ -+ unsigned short flags; /* see below */ -+ unsigned char operator; /* operator if node is type OPERATOR */ -+ unsigned char byte_size; /* byte_size of base_type values */ -+ char *name; /* name of variable or struct member */ -+ /* value and address are uint64_t in lcrash, but for ia32 ... */ -+ unsigned long long value; /* numeric value or pointer */ -+ unsigned long address; /* address (could be same as pointer) */ -+ type_t *type; /* pointer to type related info */ -+ char *tok_ptr; /* pointer to token in cmd string */ -+ struct node_s *left; /* pointer to left child */ -+ struct node_s *right; /* pointer to right child */ -+} node_t; -+ -+/* Token and Node types -+ */ -+#define OPERATOR 1 -+#define NUMBER 2 -+#define INDEX 3 -+#define TYPE_DEF 4 -+#define VADDR 5 -+#define MEMBER 6 -+#define STRING 7 -+#define TEXT 8 -+#define CHARACTER 9 -+#define EVAL_VAR 10 -+ -+/* Flag values -+ */ -+#define STRING_FLAG 0x001 -+#define ADDRESS_FLAG 0x002 -+#define INDIRECTION_FLAG 0x004 -+#define POINTER_FLAG 0x008 -+#define MEMBER_FLAG 0x010 -+#define BOOLIAN_FLAG 0x020 -+#define KLTYPE_FLAG 0x040 -+#define NOTYPE_FLAG 0x080 -+#define UNSIGNED_FLAG 0x100 -+#define VOID_FLAG 0x200 -+ -+/* Flag value for print_eval_error() function -+ */ -+#define CMD_NAME_FLG 1 /* cmdname is not name of a command */ -+#define CMD_STRING_FLG 2 /* cmdname is not name of a command */ -+ -+/* Expression operators in order of precedence. -+ */ -+#define CONDITIONAL 1 -+#define CONDITIONAL_ELSE 2 -+#define LOGICAL_OR 3 -+#define LOGICAL_AND 4 -+#define BITWISE_OR 5 -+#define BITWISE_EXCLUSIVE_OR 6 -+#define BITWISE_AND 7 -+#define EQUAL 8 -+#define NOT_EQUAL 9 -+#define LESS_THAN 10 -+#define GREATER_THAN 11 -+#define LESS_THAN_OR_EQUAL 12 -+#define GREATER_THAN_OR_EQUAL 13 -+#define RIGHT_SHIFT 14 -+#define LEFT_SHIFT 15 -+#define ADD 16 -+#define SUBTRACT 17 -+#define MULTIPLY 18 -+#define DIVIDE 19 -+#define MODULUS 20 -+#define LOGICAL_NEGATION 21 -+#define ONES_COMPLEMENT 22 -+#define PREFIX_INCREMENT 23 -+#define PREFIX_DECREMENT 24 -+#define POSTFIX_INCREMENT 25 -+#define POSTFIX_DECREMENT 26 -+#define CAST 27 -+#define UNARY_MINUS 28 -+#define UNARY_PLUS 29 -+#define INDIRECTION 30 -+#define ADDRESS 31 -+#define SIZEOF 32 -+#define RIGHT_ARROW 33 -+#define DOT 34 -+#define OPEN_PAREN 100 -+#define CLOSE_PAREN 101 -+#define OPEN_SQUARE_BRACKET 102 -+#define CLOSE_SQUARE_BRACKET 103 -+#define SEMI_COLON 104 -+#define NOT_YET -1 -+ -+/* Errors codes primarily for use with eval (print) functions -+ */ -+#define E_OPEN_PAREN 1100 -+#define E_CLOSE_PAREN 1101 -+#define E_BAD_STRUCTURE 1102 -+#define E_MISSING_STRUCTURE 1103 -+#define E_BAD_MEMBER 1104 -+#define E_BAD_OPERATOR 1105 -+#define E_BAD_OPERAND 1106 -+#define E_MISSING_OPERAND 1107 -+#define E_BAD_TYPE 1108 -+#define E_NOTYPE 1109 -+#define E_BAD_POINTER 1110 -+#define E_BAD_INDEX 1111 -+#define E_BAD_CHAR 1112 -+#define E_BAD_STRING 1113 -+#define E_END_EXPECTED 1114 -+#define E_BAD_EVAR 1115 /* Bad eval variable */ -+#define E_BAD_VALUE 1116 -+#define E_NO_VALUE 1117 -+#define E_DIVIDE_BY_ZERO 1118 -+#define E_BAD_CAST 1119 -+#define E_NO_ADDRESS 1120 -+#define E_SINGLE_QUOTE 1121 -+ -+#define E_BAD_WHATIS 1197 -+#define E_NOT_IMPLEMENTED 1198 -+#define E_SYNTAX_ERROR 1199 -+ -+extern uint64_t eval_error; -+extern char *error_token; -+ -+/* Function prototypes -+ */ -+node_t *eval(char **, int); -+void print_eval_error(char *, char *, char *, uint64_t, int); -+void free_nodes(node_t *); -+ -+/* Struct to hold information about eval variables -+ */ -+typedef struct variable_s { -+ btnode_t v_bt; /* Must be first */ -+ int v_flags; -+ char *v_exp; /* What was entered on command line */ -+ char *v_typestr; /* Actual type string after eval() call */ -+ node_t *v_node; -+} variable_t; -+ -+#define v_left v_bt.bt_left -+#define v_right v_bt.bt_right -+#define v_name v_bt.bt_key -+ -+/* Flag values -+ */ -+#define V_PERM 0x001 /* can't be unset - can be modified */ -+#define V_DEFAULT 0x002 /* set at startup */ -+#define V_NOMOD 0x004 /* cannot be modified */ -+#define V_TYPEDEF 0x008 /* contains typed data */ -+#define V_REC_STRUCT 0x010 /* direct ref to struct/member (not pointer) */ -+#define V_STRING 0x020 /* contains ASCII string (no type) */ -+#define V_COMMAND 0x040 /* contains command string (no type) */ -+#define V_OPTION 0x080 /* contains option flag (e.g., $hexints) */ -+#define V_PERM_NODE 0x100 /* Don't free node after setting variable */ -+ -+/* Variable table struct -+ */ -+typedef struct vtab_s { -+ variable_t *vt_root; -+ int vt_count; -+} vtab_t; -+ -+extern vtab_t *vtab; /* Pointer to table of eval variable info */ -+ -+/* Function Prototypes -+ */ -+variable_t *make_variable(char *, char *, node_t *, int); -+void clean_variable(variable_t *); -+void free_variable(variable_t *); -+void init_variables(vtab_t *); -+int set_variable(vtab_t *, char *, char *, node_t *, int); -+int unset_variable(vtab_t *, variable_t *); -+variable_t *find_variable(vtab_t *, char *, int); -+kltype_t *number_to_type(node_t *); -+void free_eval_memory(void); -+/* cpw: was int print_eval_results(node_t *, FILE *, int); */ -+int print_eval_results(node_t *, int); -+ -+#endif /* __LC_EVAL_H */ ---- a/kernel/exit.c -+++ b/kernel/exit.c -@@ -4,6 +4,9 @@ - * Copyright (C) 1991, 1992 Linus Torvalds - */ - -+#ifdef CONFIG_KDB -+#include -+#endif - #include - #include - #include ---- a/kernel/kallsyms.c -+++ b/kernel/kallsyms.c -@@ -529,3 +529,26 @@ static int __init kallsyms_init(void) - return 0; - } - device_initcall(kallsyms_init); -+ -+ -+#ifdef CONFIG_KDB -+#include -+#include -+ -+const char *kdb_walk_kallsyms(loff_t *pos) -+{ -+ static struct kallsym_iter kdb_walk_kallsyms_iter; -+ if (*pos == 0) { -+ memset(&kdb_walk_kallsyms_iter, 0, sizeof(kdb_walk_kallsyms_iter)); -+ reset_iter(&kdb_walk_kallsyms_iter, 0); -+ } -+ while (1) { -+ if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) -+ return NULL; -+ ++*pos; -+ /* Some debugging symbols have no name. Ignore them. */ -+ if (kdb_walk_kallsyms_iter.name[0]) -+ return kdb_walk_kallsyms_iter.name; -+ } -+} -+#endif /* CONFIG_KDB */ ---- a/kernel/kexec.c -+++ b/kernel/kexec.c -@@ -40,6 +40,12 @@ - #include - #include - -+#ifdef CONFIG_KDB_KDUMP -+#include -+#include -+#include -+#endif -+ - /* Per cpu memory for storing cpu states in case of system crash. */ - note_buf_t __percpu *crash_notes; - -@@ -1080,7 +1086,16 @@ void crash_kexec(struct pt_regs *regs) - - crash_setup_regs(&fixed_regs, regs); - crash_save_vmcoreinfo(); -+ /* -+ * If we enabled KDB, we don't want to automatically -+ * perform a kdump since KDB will be responsible for -+ * executing kdb through a special 'kdump' command. -+ */ -+#ifdef CONFIG_KDB_KDUMP -+ kdba_kdump_prepare(&fixed_regs); -+#else - machine_crash_shutdown(&fixed_regs); -+#endif - machine_kexec(kexec_crash_image); - } - mutex_unlock(&kexec_mutex); ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -2786,12 +2786,23 @@ out: - return -ERANGE; - } - -+#ifdef CONFIG_KDB -+#include -+struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ -+#endif /* CONFIG_KDB */ -+ - int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, - char *name, char *module_name, int *exported) - { - struct module *mod; -+#ifdef CONFIG_KDB -+ int get_lock = !KDB_IS_RUNNING(); -+#else -+#define get_lock 1 -+#endif - -- preempt_disable(); -+ if (get_lock) -+ preempt_disable(); - list_for_each_entry_rcu(mod, &modules, list) { - if (symnum < mod->num_symtab) { - *value = mod->symtab[symnum].st_value; -@@ -2800,12 +2811,14 @@ int module_get_kallsym(unsigned int symn - KSYM_NAME_LEN); - strlcpy(module_name, mod->name, MODULE_NAME_LEN); - *exported = is_exported(name, *value, mod); -- preempt_enable(); -+ if (get_lock) -+ preempt_enable(); - return 0; - } - symnum -= mod->num_symtab; - } -- preempt_enable(); -+ if (get_lock) -+ preempt_enable(); - return -ERANGE; - } - ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -23,6 +23,9 @@ - #include - #include - #include -+#ifdef CONFIG_KDB_KDUMP -+#include -+#endif - - int panic_on_oops; - static unsigned long tainted_mask; -@@ -96,6 +99,12 @@ NORET_TYPE void panic(const char * fmt, - dump_stack(); - #endif - -+ -+#ifdef CONFIG_KDB_KDUMP -+ if (kdb_kdump_state == KDB_KDUMP_RESET) { -+ (void)kdb(KDB_REASON_OOPS, 999, get_irq_regs()); -+ } -+#endif - /* - * If we have crashed and we have a crash kernel loaded let it handle - * everything else. ---- a/kernel/sched.c -+++ b/kernel/sched.c -@@ -7948,7 +7948,7 @@ void normalize_rt_tasks(void) - - #endif /* CONFIG_MAGIC_SYSRQ */ - --#ifdef CONFIG_IA64 -+#if defined(CONFIG_IA64) || defined(CONFIG_KDB) - /* - * These functions are only useful for the IA64 MCA handling. - * -@@ -9196,3 +9196,110 @@ void synchronize_sched_expedited(void) - EXPORT_SYMBOL_GPL(synchronize_sched_expedited); - - #endif /* #else #ifndef CONFIG_SMP */ -+ -+ -+#ifdef CONFIG_KDB -+#include -+ -+static void -+kdb_prio(char *name, struct rt_prio_array *array, kdb_printf_t xxx_printf, -+ unsigned int cpu) -+{ -+ int pri, printed_header = 0; -+ struct task_struct *p; -+ -+ xxx_printf(" %s rt bitmap: 0x%lx 0x%lx 0x%lx\n", -+ name, -+ array->bitmap[0], array->bitmap[1], array->bitmap[2]); -+ -+ pri = sched_find_first_bit(array->bitmap); -+ if (pri < MAX_RT_PRIO) { -+ xxx_printf(" rt bitmap priorities:"); -+ while (pri < MAX_RT_PRIO) { -+ xxx_printf(" %d", pri); -+ pri++; -+ pri = find_next_bit(array->bitmap, MAX_RT_PRIO, pri); -+ } -+ xxx_printf("\n"); -+ } -+ -+ for (pri = 0; pri < MAX_RT_PRIO; pri++) { -+ int printed_hdr = 0; -+ struct list_head *head, *curr; -+ -+ head = array->queue + pri; -+ curr = head->next; -+ while(curr != head) { -+ struct task_struct *task; -+ if (!printed_hdr) { -+ xxx_printf(" queue at priority=%d\n", pri); -+ printed_hdr = 1; -+ } -+ task = list_entry(curr, struct task_struct, rt.run_list); -+ if (task) -+ xxx_printf(" 0x%p %d %s time_slice:%d\n", -+ task, task->pid, task->comm, -+ task->rt.time_slice); -+ curr = curr->next; -+ } -+ } -+ for_each_process(p) { -+ if (p->se.on_rq && (task_cpu(p) == cpu) && -+ (p->policy == SCHED_NORMAL)) { -+ if (!printed_header) { -+ xxx_printf(" sched_normal queue:\n"); -+ printed_header = 1; -+ } -+ xxx_printf(" 0x%p %d %s pri:%d spri:%d npri:%d\n", -+ p, p->pid, p->comm, p->prio, -+ p->static_prio, p->normal_prio); -+ } -+ } -+} -+ -+/* This code must be in sched.c because struct rq is only defined in this -+ * source. To allow most of kdb to be modular, this code cannot call any kdb -+ * functions directly, any external functions that it needs must be passed in -+ * as parameters. -+ */ -+ -+void -+kdb_runqueue(unsigned long cpu, kdb_printf_t xxx_printf) -+{ -+ int i; -+ struct rq *rq; -+ -+ rq = cpu_rq(cpu); -+ -+ xxx_printf("CPU%ld lock:%s curr:0x%p(%d)(%s)", -+ cpu, (raw_spin_is_locked(&rq->lock))?"LOCKED":"free", -+ rq->curr, rq->curr->pid, rq->curr->comm); -+ if (rq->curr == rq->idle) -+ xxx_printf(" is idle"); -+ xxx_printf("\n"); -+ -+ xxx_printf(" nr_running:%ld ", rq->nr_running); -+ xxx_printf(" nr_uninterruptible:%ld ", rq->nr_uninterruptible); -+ -+ xxx_printf(" nr_switches:%llu ", (long long)rq->nr_switches); -+ xxx_printf(" nr_iowait:%u ", atomic_read(&rq->nr_iowait)); -+ xxx_printf(" next_balance:%lu\n", rq->next_balance); -+ -+#ifdef CONFIG_SMP -+ xxx_printf(" active_balance:%u ", rq->active_balance); -+ xxx_printf(" idle_at_tick:%u\n", rq->idle_at_tick); -+ -+ xxx_printf(" push_cpu:%u ", rq->push_cpu); -+ xxx_printf(" cpu:%u ", rq->cpu); -+ xxx_printf(" online:%u\n", rq->online); -+#endif -+ -+ xxx_printf(" cpu_load:"); -+ for (i=0; icpu_load[i]); -+ xxx_printf("\n"); -+ kdb_prio("active", &rq->rt.active, xxx_printf, (unsigned int)cpu); -+} -+EXPORT_SYMBOL(kdb_runqueue); -+ -+#endif /* CONFIG_KDB */ ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -2735,3 +2735,52 @@ void __init signals_init(void) - { - sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); - } -+ -+#ifdef CONFIG_KDB -+#include -+/* -+ * kdb_send_sig_info -+ * -+ * Allows kdb to send signals without exposing signal internals. -+ * -+ * Inputs: -+ * t task -+ * siginfo signal information -+ * seqno current kdb sequence number (avoid including kdbprivate.h) -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * Checks if the required locks are available before calling the main -+ * signal code, to avoid kdb deadlocks. -+ * Remarks: -+ */ -+void -+kdb_send_sig_info(struct task_struct *t, struct siginfo *info, int seqno) -+{ -+ static struct task_struct *kdb_prev_t; -+ static int kdb_prev_seqno; -+ int sig, new_t; -+ if (!spin_trylock(&t->sighand->siglock)) { -+ kdb_printf("Can't do kill command now.\n" -+ "The sigmask lock is held somewhere else in kernel, try again later\n"); -+ return; -+ } -+ spin_unlock(&t->sighand->siglock); -+ new_t = kdb_prev_t != t || kdb_prev_seqno != seqno; -+ kdb_prev_t = t; -+ kdb_prev_seqno = seqno; -+ if (t->state != TASK_RUNNING && new_t) { -+ kdb_printf("Process is not RUNNING, sending a signal from kdb risks deadlock\n" -+ "on the run queue locks. The signal has _not_ been sent.\n" -+ "Reissue the kill command if you want to risk the deadlock.\n"); -+ return; -+ } -+ sig = info->si_signo; -+ if (send_sig_info(sig, info, t)) -+ kdb_printf("Fail to deliver Signal %d to process %d.\n", sig, t->pid); -+ else -+ kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); -+} -+#endif /* CONFIG_KDB */ ---- a/lib/bug.c -+++ b/lib/bug.c -@@ -43,6 +43,10 @@ - #include - #include - -+#ifdef CONFIG_KDB -+#include -+#endif -+ - extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; - - static inline unsigned long bug_addr(const struct bug_entry *bug) -@@ -177,5 +181,9 @@ enum bug_trap_type report_bug(unsigned l - "[verbose debug info unavailable]\n", - (void *)bugaddr); - -+#ifdef CONFIG_KDB -+ kdb(KDB_REASON_ENTER, 0, regs); -+#endif -+ - return BUG_TRAP_TYPE_BUG; - } ---- a/mm/hugetlb.c -+++ b/mm/hugetlb.c -@@ -1929,6 +1929,28 @@ int hugetlb_overcommit_handler(struct ct - - #endif /* CONFIG_SYSCTL */ - -+#ifdef CONFIG_KDB -+#include -+#include -+/* Like hugetlb_report_meminfo() but using kdb_printf() */ -+void -+kdb_hugetlb_report_meminfo(void) -+{ -+ struct hstate *h = &default_hstate; -+ kdb_printf( -+ "HugePages_Total: %5lu\n" -+ "HugePages_Free: %5lu\n" -+ "HugePages_Rsvd: %5lu\n" -+ "HugePages_Surp: %5lu\n" -+ "Hugepagesize: %5lu kB\n", -+ h->nr_huge_pages, -+ h->free_huge_pages, -+ h->resv_huge_pages, -+ h->surplus_huge_pages, -+ 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); -+} -+#endif /* CONFIG_KDB */ -+ - void hugetlb_report_meminfo(struct seq_file *m) - { - struct hstate *h = &default_hstate; ---- a/mm/mmzone.c -+++ b/mm/mmzone.c -@@ -23,6 +23,10 @@ struct pglist_data *next_online_pgdat(st - return NULL; - return NODE_DATA(nid); - } -+#ifdef CONFIG_KDB -+EXPORT_SYMBOL(first_online_pgdat); -+EXPORT_SYMBOL(next_online_pgdat); -+#endif - - /* - * next_zone - helper magic for for_each_zone() ---- a/mm/swapfile.c -+++ b/mm/swapfile.c -@@ -13,6 +13,10 @@ - #include - #include - #include -+#ifdef CONFIG_KDB -+#include -+#include -+#endif /* CONFIG_KDB */ - #include - #include - #include -@@ -2129,6 +2133,24 @@ void si_swapinfo(struct sysinfo *val) - spin_unlock(&swap_lock); - } - -+#ifdef CONFIG_KDB -+/* Like si_swapinfo() but without the locks */ -+void kdb_si_swapinfo(struct sysinfo *val) -+{ -+ unsigned int i; -+ unsigned long nr_to_be_unused = 0; -+ -+ for (i = 0; i < nr_swapfiles; i++) { -+ if (!(swap_info[i]->flags & SWP_USED) || -+ (swap_info[i]->flags & SWP_WRITEOK)) -+ continue; -+ nr_to_be_unused += swap_info[i]->inuse_pages; -+ } -+ val->freeswap = nr_swap_pages + nr_to_be_unused; -+ val->totalswap = total_swap_pages + nr_to_be_unused; -+} -+#endif /* CONFIG_KDB */ -+ - /* - * Verify that a swap entry is valid and increment its swap map count. - * diff --git a/patches.suse/kdb-fix-assignment-from-incompatible-pointer-warnings b/patches.suse/kdb-fix-assignment-from-incompatible-pointer-warnings deleted file mode 100644 index 16ba925..0000000 --- a/patches.suse/kdb-fix-assignment-from-incompatible-pointer-warnings +++ /dev/null @@ -1,31 +0,0 @@ -From: Jeff Mahoney -Subject: kdb: Fix assignment from incompatible pointer warnings -Patch-mainline: not yet, whenever KDB is upstream - - info->pfs_loc is an unsigned long *, not a u64 *. - -Signed-off-by: Jeff Mahoney ---- - arch/ia64/kdb/kdba_bt.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/arch/ia64/kdb/kdba_bt.c -+++ b/arch/ia64/kdb/kdba_bt.c -@@ -112,7 +112,7 @@ bt_print_one(kdb_machreg_t ip, - } - if (btsp) - kdb_printf(" sp 0x%016lx bsp 0x%016lx cfm 0x%016lx info->pfs_loc 0x%016lx 0x%016lx\n", -- sp, bsp, cfm, (u64) info->pfs_loc, info->pfs_loc ? *(info->pfs_loc) : 0); -+ sp, bsp, cfm, info->pfs_loc, info->pfs_loc ? *(info->pfs_loc) : 0); - } - - /* -@@ -142,7 +142,7 @@ kdba_bt_stack(int argcount, const struct - struct pt_regs *regs = NULL; - int count = 0; - int btsp = 0; /* Backtrace the kdb code as well */ -- u64 *prev_pfs_loc = NULL; -+ unsigned long *prev_pfs_loc = NULL; - extern char __attribute__ ((weak)) ia64_spinlock_contention_pre3_4[]; - extern char __attribute__ ((weak)) ia64_spinlock_contention_pre3_4_end[]; - diff --git a/patches.suse/kdb-fix-kdb_cmds-to-include-the-arch-common-macro b/patches.suse/kdb-fix-kdb_cmds-to-include-the-arch-common-macro deleted file mode 100644 index b28c41e..0000000 --- a/patches.suse/kdb-fix-kdb_cmds-to-include-the-arch-common-macro +++ /dev/null @@ -1,26 +0,0 @@ -From 8290f9ee66352a04b2858db63e20229ccd9395fb Mon Sep 17 00:00:00 2001 -From: Martin Hicks -Date: Mon, 8 Feb 2010 13:48:48 -0600 -Subject: [PATCH] kdb: fix kdb_cmds to include the arch common macro -References: bnc#578421 -Patch-mainline: Whenever kdb is accepted - -kbuild must have changed at one point and nobody noticed that -the "archkdb" type macros, which use the archkdbcommon -macro, were not working - -Signed-off-by: Martin Hicks -Acked-by: Jeff Mahoney ---- - kdb/Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kdb/Makefile -+++ b/kdb/Makefile -@@ -39,5 +39,5 @@ quiet_cmd_gen-kdb = GENKDB $@ - END {print "extern char *kdb_cmds[]; char __initdata *kdb_cmds[] = {"; for (i = 0; i < cmds; ++i) {print " kdb_cmd" i ","}; print(" NULL\n};");}' \ - $(filter-out %/Makefile,$^) > $@ - --$(obj)/gen-kdb_cmds.c: $(src)/kdb_cmds $(wildcard $(TOPDIR)/arch/$(KDB_CMDS)) $(src)/Makefile -+$(obj)/gen-kdb_cmds.c: $(src)/kdb_cmds $(wildcard $(srctree)/arch/$(KDB_CMDS)) $(src)/Makefile - $(call cmd,gen-kdb) diff --git a/patches.suse/kdb-handle-nonexistance-keyboard-controller b/patches.suse/kdb-handle-nonexistance-keyboard-controller deleted file mode 100644 index 46cd3e8..0000000 --- a/patches.suse/kdb-handle-nonexistance-keyboard-controller +++ /dev/null @@ -1,87 +0,0 @@ -From: Martin Hicks -Subject: kdb: handle nonexistance keyboard controller -References: bnc#578051 -Patch-mainline: When kdb is accepted - - On UV, we have no keyboard controller and during the kdba_io polling - routines kdb attempts to disable the interrupts on the keyboard - controller to go into polling mode. - - These non-existant port addresses return 0xff all the time, which - appears to lock up KDB during entry. - -Acked-by: Jeff Mahoney ---- - arch/x86/kdb/kdba_io.c | 32 +++++++++++++++++++++++++------- - 1 file changed, 25 insertions(+), 7 deletions(-) - ---- a/arch/x86/kdb/kdba_io.c -+++ b/arch/x86/kdb/kdba_io.c -@@ -366,7 +366,23 @@ static int get_serial_char(void) - - #ifdef CONFIG_VT_CONSOLE - --static int kbd_exists; -+static int kdb_check_kbd_exists(void) -+{ -+ static int kbd_exists = -1; -+ -+ /* One time init */ -+ if (kbd_exists == -1) { -+ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) || -+ (kbd_read_status() == 0xff && -+ kbd_read_input() == 0xff)) -+ kbd_exists = 0; -+ else -+ kbd_exists = 1; -+ } -+ -+ return kbd_exists; -+} -+ - - /* - * Check if the keyboard controller has a keypress for us. -@@ -382,12 +398,8 @@ static int get_kbd_char(void) - u_short keychar; - extern u_short plain_map[], shift_map[], ctrl_map[]; - -- if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) || -- (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) { -- kbd_exists = 0; -+ if (!kdb_check_kbd_exists()) - return -1; -- } -- kbd_exists = 1; - - if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) - return -1; -@@ -571,7 +583,7 @@ static int blink_led(void) - { - static long delay; - -- if (kbd_exists == 0) -+ if (!kdb_check_kbd_exists()) - return -1; - - if (--delay < 0) { -@@ -621,6 +633,9 @@ void kdba_local_arch_setup(void) - int timeout; - unsigned char c; - -+ if (!kdb_check_kbd_exists()) -+ return; -+ - while (kbd_read_status() & KBD_STAT_IBF); - kbd_write_command(KBD_CCMD_READ_MODE); - mdelay(1); -@@ -646,6 +661,9 @@ void kdba_local_arch_cleanup(void) - int timeout; - unsigned char c; - -+ if (!kdb_check_kbd_exists()) -+ return; -+ - while (kbd_read_status() & KBD_STAT_IBF); - kbd_write_command(KBD_CCMD_READ_MODE); - mdelay(1); diff --git a/patches.suse/kdb-ia64 b/patches.suse/kdb-ia64 deleted file mode 100644 index f5c8ab2..0000000 --- a/patches.suse/kdb-ia64 +++ /dev/null @@ -1,22912 +0,0 @@ -From: Martin Hicks -Date: Mon, 07 Dec 2009 11:52:50 -0600 -Subject: kdb-v4.4-2.6.32-ia64-3 -References: FATE#303971 -X-URL: ftp://oss.sgi.com/www/projects/kdb/download/v4.4/ -Patch-mainline: not yet - -The KDB IA64 code. - -Acked-by: Jeff Mahoney ---- - - arch/ia64/Kconfig.debug | 97 - arch/ia64/Makefile | 1 - arch/ia64/include/asm/ansidecl.h | 383 + - arch/ia64/include/asm/bfd.h | 5089 +++++++++++++++++++++ - arch/ia64/include/asm/kdb.h | 50 - arch/ia64/include/asm/kdb_break.h | 24 - arch/ia64/include/asm/kdbprivate.h | 124 - arch/ia64/include/asm/kregs.h | 2 - arch/ia64/kdb/ChangeLog | 1111 ++++ - arch/ia64/kdb/Makefile | 21 - arch/ia64/kdb/cpu-ia64-opc.c | 598 ++ - arch/ia64/kdb/ia64-asmtab.c | 8585 +++++++++++++++++++++++++++++++++++++ - arch/ia64/kdb/ia64-asmtab.h | 158 - arch/ia64/kdb/ia64-dis.c | 312 + - arch/ia64/kdb/ia64-opc.c | 749 +++ - arch/ia64/kdb/ia64-opc.h | 141 - arch/ia64/kdb/ia64.h | 402 + - arch/ia64/kdb/kdb_cmds | 17 - arch/ia64/kdb/kdba_bp.c | 841 +++ - arch/ia64/kdb/kdba_bt.c | 285 + - arch/ia64/kdb/kdba_fru.c | 65 - arch/ia64/kdb/kdba_id.c | 529 ++ - arch/ia64/kdb/kdba_io.c | 661 ++ - arch/ia64/kdb/kdba_jmp.S | 394 + - arch/ia64/kdb/kdba_pod.c | 64 - arch/ia64/kdb/kdba_support.c | 1720 +++++++ - arch/ia64/kernel/head.S | 7 - arch/ia64/kernel/mca.c | 72 - arch/ia64/kernel/smp.c | 23 - arch/ia64/kernel/traps.c | 22 - arch/ia64/kernel/unwind.c | 33 - 31 files changed, 22568 insertions(+), 12 deletions(-) - ---- a/arch/ia64/Kconfig.debug -+++ b/arch/ia64/Kconfig.debug -@@ -56,9 +56,106 @@ config IA64_DEBUG_IRQ - and restore instructions. It's useful for tracking down spinlock - problems, but slow! If you're unsure, select N. - -+config KDB -+ bool "Built-in Kernel Debugger support" -+ depends on DEBUG_KERNEL -+ select KALLSYMS -+ select KALLSYMS_ALL -+ help -+ This option provides a built-in kernel debugger. The built-in -+ kernel debugger contains commands which allow memory to be examined, -+ instructions to be disassembled and breakpoints to be set. For details, -+ see Documentation/kdb/kdb.mm and the manual pages kdb_bt, kdb_ss, etc. -+ Kdb can also be used via the serial port. Set up the system to -+ have a serial console (see Documentation/serial-console.txt). -+ The key sequence KDB on the serial port will cause the -+ kernel debugger to be entered with input from the serial port and -+ output to the serial console. If unsure, say N. -+ -+config KDB_MODULES -+ tristate "KDB modules" -+ depends on KDB -+ help -+ KDB can be extended by adding your own modules, in directory -+ kdb/modules. This option selects the way that these modules should -+ be compiled, as free standing modules (select M) or built into the -+ kernel (select Y). If unsure say M. -+ -+config KDB_OFF -+ bool "KDB off by default" -+ depends on KDB -+ help -+ Normally kdb is activated by default, as long as CONFIG_KDB is set. -+ If you want to ship a kernel with kdb support but only have kdb -+ turned on when the user requests it then select this option. When -+ compiled with CONFIG_KDB_OFF, kdb ignores all events unless you boot -+ with kdb=on or you echo "1" > /proc/sys/kernel/kdb. This option also -+ works in reverse, if kdb is normally activated, you can boot with -+ kdb=off or echo "0" > /proc/sys/kernel/kdb to deactivate kdb. If -+ unsure, say N. -+ -+config KDB_CONTINUE_CATASTROPHIC -+ int "KDB continues after catastrophic errors" -+ depends on KDB -+ default "0" -+ help -+ This integer controls the behaviour of kdb when the kernel gets a -+ catastrophic error, i.e. for a panic, oops, NMI or other watchdog -+ tripping. CONFIG_KDB_CONTINUE_CATASTROPHIC interacts with -+ /proc/sys/kernel/kdb and CONFIG_LKCD_DUMP (if your kernel has the -+ LKCD patch). -+ When KDB is active (/proc/sys/kernel/kdb == 1) and a catastrophic -+ error occurs, nothing extra happens until you type 'go'. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time -+ you type 'go', kdb warns you. The second time you type 'go', KDB -+ tries to continue - no guarantees that the kernel is still usable. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue - no -+ guarantees that the kernel is still usable. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD -+ patch and LKCD is configured to take a dump then KDB forces a dump. -+ Whether or not a dump is taken, KDB forces a reboot. -+ When KDB is not active (/proc/sys/kernel/kdb == 0) and a catastrophic -+ error occurs, the following steps are automatic, no human -+ intervention is required. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default) or 1. KDB attempts -+ to continue - no guarantees that the kernel is still usable. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD -+ patch and LKCD is configured to take a dump then KDB automatically -+ forces a dump. Whether or not a dump is taken, KDB forces a -+ reboot. -+ If you are not sure, say 0. Read Documentation/kdb/dump.txt before -+ setting to 2. -+ -+config KDB_USB -+ bool "Support for USB Keyboard in KDB (OHCI and/or EHCI only)" -+ depends on KDB && (USB_OHCI_HCD || USB_UHCI_HCD) -+ help -+ If you want to use kdb from USB keyboards then say Y here. If you -+ say N then kdb can only be used from a PC (AT) keyboard or a serial -+ console. -+ -+config KDB_HARDWARE_BREAKPOINTS -+ bool "Enable hardware breakpoints in KDB" -+ depends on KDB -+ default y -+ help -+ If you say Y here, KDB will allow you to use the IA64 -+ hardware watchpoint feature (via the bph and bpha -+ commands). Currently, only data breakpoints are -+ implemented. -+ - config SYSVIPC_COMPAT - bool - depends on COMPAT && SYSVIPC - default y - -+config KDB_KDUMP -+ bool "Support for Kdump in KDB" -+ depends on KDB -+ select KEXEC -+ default N -+ help -+ If you want to take Kdump kernel vmcore from KDB then say Y here. -+ Of imsire. say N. -+ - endmenu ---- a/arch/ia64/Makefile -+++ b/arch/ia64/Makefile -@@ -57,6 +57,7 @@ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/ - core-$(CONFIG_KVM) += arch/ia64/kvm/ - core-$(CONFIG_XEN) += arch/ia64/xen/ - -+drivers-$(CONFIG_KDB) += arch/$(ARCH)/kdb/ - drivers-$(CONFIG_PCI) += arch/ia64/pci/ - drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ - drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ ---- /dev/null -+++ b/arch/ia64/include/asm/ansidecl.h -@@ -0,0 +1,383 @@ -+/* ANSI and traditional C compatability macros -+ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 -+ Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+This program is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 2 of the License, or -+(at your option) any later version. -+ -+This program is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with this program; if not, write to the Free Software -+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+/* ANSI and traditional C compatibility macros -+ -+ ANSI C is assumed if __STDC__ is #defined. -+ -+ Macro ANSI C definition Traditional C definition -+ ----- ---- - ---------- ----------- - ---------- -+ ANSI_PROTOTYPES 1 not defined -+ PTR `void *' `char *' -+ PTRCONST `void *const' `char *' -+ LONG_DOUBLE `long double' `double' -+ const not defined `' -+ volatile not defined `' -+ signed not defined `' -+ VA_START(ap, var) va_start(ap, var) va_start(ap) -+ -+ Note that it is safe to write "void foo();" indicating a function -+ with no return value, in all K+R compilers we have been able to test. -+ -+ For declaring functions with prototypes, we also provide these: -+ -+ PARAMS ((prototype)) -+ -- for functions which take a fixed number of arguments. Use this -+ when declaring the function. When defining the function, write a -+ K+R style argument list. For example: -+ -+ char *strcpy PARAMS ((char *dest, char *source)); -+ ... -+ char * -+ strcpy (dest, source) -+ char *dest; -+ char *source; -+ { ... } -+ -+ -+ VPARAMS ((prototype, ...)) -+ -- for functions which take a variable number of arguments. Use -+ PARAMS to declare the function, VPARAMS to define it. For example: -+ -+ int printf PARAMS ((const char *format, ...)); -+ ... -+ int -+ printf VPARAMS ((const char *format, ...)) -+ { -+ ... -+ } -+ -+ For writing functions which take variable numbers of arguments, we -+ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These -+ hide the differences between K+R and C89 more -+ thoroughly than the simple VA_START() macro mentioned above. -+ -+ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end. -+ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls -+ corresponding to the list of fixed arguments. Then use va_arg -+ normally to get the variable arguments, or pass your va_list object -+ around. You do not declare the va_list yourself; VA_OPEN does it -+ for you. -+ -+ Here is a complete example: -+ -+ int -+ printf VPARAMS ((const char *format, ...)) -+ { -+ int result; -+ -+ VA_OPEN (ap, format); -+ VA_FIXEDARG (ap, const char *, format); -+ -+ result = vfprintf (stdout, format, ap); -+ VA_CLOSE (ap); -+ -+ return result; -+ } -+ -+ -+ You can declare variables either before or after the VA_OPEN, -+ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning -+ and end of a block. They must appear at the same nesting level, -+ and any variables declared after VA_OPEN go out of scope at -+ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the -+ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE -+ pairs in a single function in case you need to traverse the -+ argument list more than once. -+ -+ For ease of writing code which uses GCC extensions but needs to be -+ portable to other compilers, we provide the GCC_VERSION macro that -+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various -+ wrappers around __attribute__. Also, __extension__ will be #defined -+ to nothing if it doesn't work. See below. -+ -+ This header also defines a lot of obsolete macros: -+ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID, -+ AND, DOTS, NOARGS. Don't use them. */ -+ -+#ifndef _ANSIDECL_H -+#define _ANSIDECL_H 1 -+ -+/* Every source file includes this file, -+ so they will all get the switch for lint. */ -+/* LINTLIBRARY */ -+ -+/* Using MACRO(x,y) in cpp #if conditionals does not work with some -+ older preprocessors. Thus we can't define something like this: -+ -+#define HAVE_GCC_VERSION(MAJOR, MINOR) \ -+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) -+ -+and then test "#if HAVE_GCC_VERSION(2,7)". -+ -+So instead we use the macro below and test it against specific values. */ -+ -+/* This macro simplifies testing whether we are using gcc, and if it -+ is of a particular minimum version. (Both major & minor numbers are -+ significant.) This macro will evaluate to 0 if we are not using -+ gcc at all. */ -+#ifndef GCC_VERSION -+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) -+#endif /* GCC_VERSION */ -+ -+#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus)) -+/* All known AIX compilers implement these things (but don't always -+ define __STDC__). The RISC/OS MIPS compiler defines these things -+ in SVR4 mode, but does not define __STDC__. */ -+/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other -+ C++ compilers, does not define __STDC__, though it acts as if this -+ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ -+ -+#define ANSI_PROTOTYPES 1 -+#define PTR void * -+#define PTRCONST void *const -+#define LONG_DOUBLE long double -+ -+/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in -+ a #ifndef. */ -+#ifndef PARAMS -+#define PARAMS(ARGS) ARGS -+#endif -+ -+#define VPARAMS(ARGS) ARGS -+#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR) -+ -+/* variadic function helper macros */ -+/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's -+ use without inhibiting further decls and without declaring an -+ actual variable. */ -+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy -+#define VA_CLOSE(AP) } va_end(AP); } -+#define VA_FIXEDARG(AP, T, N) struct Qdmy -+ -+#undef const -+#undef volatile -+#undef signed -+ -+#ifdef __KERNEL__ -+#ifndef __STDC_VERSION__ -+#define __STDC_VERSION__ 0 -+#endif -+#endif /* __KERNEL__ */ -+ -+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports -+ it too, but it's not in C89. */ -+#undef inline -+#if __STDC_VERSION__ > 199901L -+/* it's a keyword */ -+#else -+# if GCC_VERSION >= 2007 -+# define inline __inline__ /* __inline__ prevents -pedantic warnings */ -+# else -+# define inline /* nothing */ -+# endif -+#endif -+ -+/* These are obsolete. Do not use. */ -+#ifndef IN_GCC -+#define CONST const -+#define VOLATILE volatile -+#define SIGNED signed -+ -+#define PROTO(type, name, arglist) type name arglist -+#define EXFUN(name, proto) name proto -+#define DEFUN(name, arglist, args) name(args) -+#define DEFUN_VOID(name) name(void) -+#define AND , -+#define DOTS , ... -+#define NOARGS void -+#endif /* ! IN_GCC */ -+ -+#else /* Not ANSI C. */ -+ -+#undef ANSI_PROTOTYPES -+#define PTR char * -+#define PTRCONST PTR -+#define LONG_DOUBLE double -+ -+#define PARAMS(args) () -+#define VPARAMS(args) (va_alist) va_dcl -+#define VA_START(va_list, var) va_start(va_list) -+ -+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy -+#define VA_CLOSE(AP) } va_end(AP); } -+#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE) -+ -+/* some systems define these in header files for non-ansi mode */ -+#undef const -+#undef volatile -+#undef signed -+#undef inline -+#define const -+#define volatile -+#define signed -+#define inline -+ -+#ifndef IN_GCC -+#define CONST -+#define VOLATILE -+#define SIGNED -+ -+#define PROTO(type, name, arglist) type name () -+#define EXFUN(name, proto) name() -+#define DEFUN(name, arglist, args) name arglist args; -+#define DEFUN_VOID(name) name() -+#define AND ; -+#define DOTS -+#define NOARGS -+#endif /* ! IN_GCC */ -+ -+#endif /* ANSI C. */ -+ -+/* Define macros for some gcc attributes. This permits us to use the -+ macros freely, and know that they will come into play for the -+ version of gcc in which they are supported. */ -+ -+#if (GCC_VERSION < 2007) -+# define __attribute__(x) -+#endif -+ -+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */ -+#ifndef ATTRIBUTE_MALLOC -+# if (GCC_VERSION >= 2096) -+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) -+# else -+# define ATTRIBUTE_MALLOC -+# endif /* GNUC >= 2.96 */ -+#endif /* ATTRIBUTE_MALLOC */ -+ -+/* Attributes on labels were valid as of gcc 2.93. */ -+#ifndef ATTRIBUTE_UNUSED_LABEL -+# if (!defined (__cplusplus) && GCC_VERSION >= 2093) -+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED -+# else -+# define ATTRIBUTE_UNUSED_LABEL -+# endif /* !__cplusplus && GNUC >= 2.93 */ -+#endif /* ATTRIBUTE_UNUSED_LABEL */ -+ -+#ifndef ATTRIBUTE_UNUSED -+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) -+#endif /* ATTRIBUTE_UNUSED */ -+ -+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the -+ identifier name. */ -+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004) -+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED -+#else /* !__cplusplus || GNUC >= 3.4 */ -+# define ARG_UNUSED(NAME) NAME -+#endif /* !__cplusplus || GNUC >= 3.4 */ -+ -+#ifndef ATTRIBUTE_NORETURN -+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) -+#endif /* ATTRIBUTE_NORETURN */ -+ -+/* Attribute `nonnull' was valid as of gcc 3.3. */ -+#ifndef ATTRIBUTE_NONNULL -+# if (GCC_VERSION >= 3003) -+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) -+# else -+# define ATTRIBUTE_NONNULL(m) -+# endif /* GNUC >= 3.3 */ -+#endif /* ATTRIBUTE_NONNULL */ -+ -+/* Attribute `pure' was valid as of gcc 3.0. */ -+#ifndef ATTRIBUTE_PURE -+# if (GCC_VERSION >= 3000) -+# define ATTRIBUTE_PURE __attribute__ ((__pure__)) -+# else -+# define ATTRIBUTE_PURE -+# endif /* GNUC >= 3.0 */ -+#endif /* ATTRIBUTE_PURE */ -+ -+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. -+ This was the case for the `printf' format attribute by itself -+ before GCC 3.3, but as of 3.3 we need to add the `nonnull' -+ attribute to retain this behavior. */ -+#ifndef ATTRIBUTE_PRINTF -+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) -+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) -+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) -+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) -+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) -+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) -+#endif /* ATTRIBUTE_PRINTF */ -+ -+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on -+ a function pointer. Format attributes were allowed on function -+ pointers as of gcc 3.1. */ -+#ifndef ATTRIBUTE_FPTR_PRINTF -+# if (GCC_VERSION >= 3001) -+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n) -+# else -+# define ATTRIBUTE_FPTR_PRINTF(m, n) -+# endif /* GNUC >= 3.1 */ -+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2) -+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3) -+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4) -+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5) -+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6) -+#endif /* ATTRIBUTE_FPTR_PRINTF */ -+ -+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A -+ NULL format specifier was allowed as of gcc 3.3. */ -+#ifndef ATTRIBUTE_NULL_PRINTF -+# if (GCC_VERSION >= 3003) -+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) -+# else -+# define ATTRIBUTE_NULL_PRINTF(m, n) -+# endif /* GNUC >= 3.3 */ -+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) -+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) -+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) -+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) -+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) -+#endif /* ATTRIBUTE_NULL_PRINTF */ -+ -+/* Attribute `sentinel' was valid as of gcc 3.5. */ -+#ifndef ATTRIBUTE_SENTINEL -+# if (GCC_VERSION >= 3005) -+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__)) -+# else -+# define ATTRIBUTE_SENTINEL -+# endif /* GNUC >= 3.5 */ -+#endif /* ATTRIBUTE_SENTINEL */ -+ -+ -+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF -+# if (GCC_VERSION >= 3000) -+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m)))) -+# else -+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) -+# endif /* GNUC >= 3.0 */ -+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */ -+ -+/* We use __extension__ in some places to suppress -pedantic warnings -+ about GCC extensions. This feature didn't work properly before -+ gcc 2.8. */ -+#if GCC_VERSION < 2008 -+#define __extension__ -+#endif -+ -+#endif /* ansidecl.h */ ---- /dev/null -+++ b/arch/ia64/include/asm/bfd.h -@@ -0,0 +1,5089 @@ -+/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically -+ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c", -+ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c", -+ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c", -+ "linker.c" and "simple.c". -+ Run "make headers" in your build bfd/ to regenerate. */ -+ -+/* Main header file for the bfd library -- portable access to object files. -+ -+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, -+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. -+ -+ Contributed by Cygnus Support. -+ -+ This file is part of BFD, the Binary File Descriptor library. -+ -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 2 of the License, or -+ (at your option) any later version. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifndef __BFD_H_SEEN__ -+#define __BFD_H_SEEN__ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#ifdef __KERNEL__ -+#include -+#else /* __KERNEL__ */ -+#include "ansidecl.h" -+#include "symcat.h" -+#endif /* __KERNEL__ */ -+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) -+#ifndef SABER -+/* This hack is to avoid a problem with some strict ANSI C preprocessors. -+ The problem is, "32_" is not a valid preprocessing token, and we don't -+ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will -+ cause the inner CONCAT2 macros to be evaluated first, producing -+ still-valid pp-tokens. Then the final concatenation can be done. */ -+#undef CONCAT4 -+#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d)) -+#endif -+#endif -+ -+/* The word size used by BFD on the host. This may be 64 with a 32 -+ bit target if the host is 64 bit, or if other 64 bit targets have -+ been selected with --enable-targets, or if --enable-64-bit-bfd. */ -+#define BFD_ARCH_SIZE 64 -+ -+/* The word size of the default bfd target. */ -+#define BFD_DEFAULT_TARGET_SIZE 64 -+ -+#define BFD_HOST_64BIT_LONG 1 -+#define BFD_HOST_LONG_LONG 1 -+#if 1 -+#define BFD_HOST_64_BIT long -+#define BFD_HOST_U_64_BIT unsigned long -+typedef BFD_HOST_64_BIT bfd_int64_t; -+typedef BFD_HOST_U_64_BIT bfd_uint64_t; -+#endif -+ -+#if BFD_ARCH_SIZE >= 64 -+#define BFD64 -+#endif -+ -+#ifndef INLINE -+#if __GNUC__ >= 2 -+#define INLINE __inline__ -+#else -+#define INLINE -+#endif -+#endif -+ -+/* Forward declaration. */ -+typedef struct bfd bfd; -+ -+/* Boolean type used in bfd. Too many systems define their own -+ versions of "boolean" for us to safely typedef a "boolean" of -+ our own. Using an enum for "bfd_boolean" has its own set of -+ problems, with strange looking casts required to avoid warnings -+ on some older compilers. Thus we just use an int. -+ -+ General rule: Functions which are bfd_boolean return TRUE on -+ success and FALSE on failure (unless they're a predicate). */ -+ -+typedef int bfd_boolean; -+#undef FALSE -+#undef TRUE -+#define FALSE 0 -+#define TRUE 1 -+ -+#ifdef BFD64 -+ -+#ifndef BFD_HOST_64_BIT -+ #error No 64 bit integer type available -+#endif /* ! defined (BFD_HOST_64_BIT) */ -+ -+typedef BFD_HOST_U_64_BIT bfd_vma; -+typedef BFD_HOST_64_BIT bfd_signed_vma; -+typedef BFD_HOST_U_64_BIT bfd_size_type; -+typedef BFD_HOST_U_64_BIT symvalue; -+ -+#ifndef fprintf_vma -+#if BFD_HOST_64BIT_LONG -+#define sprintf_vma(s,x) sprintf (s, "%016lx", x) -+#define fprintf_vma(f,x) fprintf (f, "%016lx", x) -+#else -+#define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff))) -+#define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff)) -+#define fprintf_vma(s,x) \ -+ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) -+#define sprintf_vma(s,x) \ -+ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) -+#endif -+#endif -+ -+#else /* not BFD64 */ -+ -+/* Represent a target address. Also used as a generic unsigned type -+ which is guaranteed to be big enough to hold any arithmetic types -+ we need to deal with. */ -+typedef unsigned long bfd_vma; -+ -+/* A generic signed type which is guaranteed to be big enough to hold any -+ arithmetic types we need to deal with. Can be assumed to be compatible -+ with bfd_vma in the same way that signed and unsigned ints are compatible -+ (as parameters, in assignment, etc). */ -+typedef long bfd_signed_vma; -+ -+typedef unsigned long symvalue; -+typedef unsigned long bfd_size_type; -+ -+/* Print a bfd_vma x on stream s. */ -+#define fprintf_vma(s,x) fprintf (s, "%08lx", x) -+#define sprintf_vma(s,x) sprintf (s, "%08lx", x) -+ -+#endif /* not BFD64 */ -+ -+#define HALF_BFD_SIZE_TYPE \ -+ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2)) -+ -+#ifndef BFD_HOST_64_BIT -+/* Fall back on a 32 bit type. The idea is to make these types always -+ available for function return types, but in the case that -+ BFD_HOST_64_BIT is undefined such a function should abort or -+ otherwise signal an error. */ -+typedef bfd_signed_vma bfd_int64_t; -+typedef bfd_vma bfd_uint64_t; -+#endif -+ -+/* An offset into a file. BFD always uses the largest possible offset -+ based on the build time availability of fseek, fseeko, or fseeko64. */ -+typedef BFD_HOST_64_BIT file_ptr; -+typedef unsigned BFD_HOST_64_BIT ufile_ptr; -+ -+extern void bfd_sprintf_vma (bfd *, char *, bfd_vma); -+extern void bfd_fprintf_vma (bfd *, void *, bfd_vma); -+ -+#define printf_vma(x) fprintf_vma(stdout,x) -+#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x) -+ -+typedef unsigned int flagword; /* 32 bits of flags */ -+typedef unsigned char bfd_byte; -+ -+typedef int (*bfd_qsort_closure_func) (const void *, const void *, const void *); -+extern void bfd_qsort (void *base, bfd_size_type nmemb, bfd_size_type size, -+ bfd_qsort_closure_func cmp, void *closure); -+ -+/* File formats. */ -+ -+typedef enum bfd_format -+{ -+ bfd_unknown = 0, /* File format is unknown. */ -+ bfd_object, /* Linker/assembler/compiler output. */ -+ bfd_archive, /* Object archive file. */ -+ bfd_core, /* Core dump. */ -+ bfd_type_end /* Marks the end; don't use it! */ -+} -+bfd_format; -+ -+/* Values that may appear in the flags field of a BFD. These also -+ appear in the object_flags field of the bfd_target structure, where -+ they indicate the set of flags used by that backend (not all flags -+ are meaningful for all object file formats) (FIXME: at the moment, -+ the object_flags values have mostly just been copied from backend -+ to another, and are not necessarily correct). */ -+ -+/* No flags. */ -+#define BFD_NO_FLAGS 0x00 -+ -+/* BFD contains relocation entries. */ -+#define HAS_RELOC 0x01 -+ -+/* BFD is directly executable. */ -+#define EXEC_P 0x02 -+ -+/* BFD has line number information (basically used for F_LNNO in a -+ COFF header). */ -+#define HAS_LINENO 0x04 -+ -+/* BFD has debugging information. */ -+#define HAS_DEBUG 0x08 -+ -+/* BFD has symbols. */ -+#define HAS_SYMS 0x10 -+ -+/* BFD has local symbols (basically used for F_LSYMS in a COFF -+ header). */ -+#define HAS_LOCALS 0x20 -+ -+/* BFD is a dynamic object. */ -+#define DYNAMIC 0x40 -+ -+/* Text section is write protected (if D_PAGED is not set, this is -+ like an a.out NMAGIC file) (the linker sets this by default, but -+ clears it for -r or -N). */ -+#define WP_TEXT 0x80 -+ -+/* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the -+ linker sets this by default, but clears it for -r or -n or -N). */ -+#define D_PAGED 0x100 -+ -+/* BFD is relaxable (this means that bfd_relax_section may be able to -+ do something) (sometimes bfd_relax_section can do something even if -+ this is not set). */ -+#define BFD_IS_RELAXABLE 0x200 -+ -+/* This may be set before writing out a BFD to request using a -+ traditional format. For example, this is used to request that when -+ writing out an a.out object the symbols not be hashed to eliminate -+ duplicates. */ -+#define BFD_TRADITIONAL_FORMAT 0x400 -+ -+/* This flag indicates that the BFD contents are actually cached in -+ memory. If this is set, iostream points to a bfd_in_memory struct. */ -+#define BFD_IN_MEMORY 0x800 -+ -+/* The sections in this BFD specify a memory page. */ -+#define HAS_LOAD_PAGE 0x1000 -+ -+/* This BFD has been created by the linker and doesn't correspond -+ to any input file. */ -+#define BFD_LINKER_CREATED 0x2000 -+ -+/* Symbols and relocation. */ -+ -+/* A count of carsyms (canonical archive symbols). */ -+typedef unsigned long symindex; -+ -+/* How to perform a relocation. */ -+typedef const struct reloc_howto_struct reloc_howto_type; -+ -+#define BFD_NO_MORE_SYMBOLS ((symindex) ~0) -+ -+/* General purpose part of a symbol X; -+ target specific parts are in libcoff.h, libaout.h, etc. */ -+ -+#define bfd_get_section(x) ((x)->section) -+#define bfd_get_output_section(x) ((x)->section->output_section) -+#define bfd_set_section(x,y) ((x)->section) = (y) -+#define bfd_asymbol_base(x) ((x)->section->vma) -+#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value) -+#define bfd_asymbol_name(x) ((x)->name) -+/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/ -+#define bfd_asymbol_bfd(x) ((x)->the_bfd) -+#define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour) -+ -+/* A canonical archive symbol. */ -+/* This is a type pun with struct ranlib on purpose! */ -+typedef struct carsym -+{ -+ char *name; -+ file_ptr file_offset; /* Look here to find the file. */ -+} -+carsym; /* To make these you call a carsymogen. */ -+ -+/* Used in generating armaps (archive tables of contents). -+ Perhaps just a forward definition would do? */ -+struct orl /* Output ranlib. */ -+{ -+ char **name; /* Symbol name. */ -+ union -+ { -+ file_ptr pos; -+ bfd *abfd; -+ } u; /* bfd* or file position. */ -+ int namidx; /* Index into string table. */ -+}; -+ -+/* Linenumber stuff. */ -+typedef struct lineno_cache_entry -+{ -+ unsigned int line_number; /* Linenumber from start of function. */ -+ union -+ { -+ struct bfd_symbol *sym; /* Function name. */ -+ bfd_vma offset; /* Offset into section. */ -+ } u; -+} -+alent; -+ -+/* Object and core file sections. */ -+ -+#define align_power(addr, align) \ -+ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align))) -+ -+typedef struct bfd_section *sec_ptr; -+ -+#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0) -+#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0) -+#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0) -+#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0) -+#define bfd_section_name(bfd, ptr) ((ptr)->name) -+#define bfd_section_size(bfd, ptr) ((ptr)->size) -+#define bfd_get_section_size(ptr) ((ptr)->size) -+#define bfd_section_vma(bfd, ptr) ((ptr)->vma) -+#define bfd_section_lma(bfd, ptr) ((ptr)->lma) -+#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power) -+#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0) -+#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata) -+ -+#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0) -+ -+#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE) -+#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE) -+#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE) -+/* Find the address one past the end of SEC. */ -+#define bfd_get_section_limit(bfd, sec) \ -+ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \ -+ / bfd_octets_per_byte (bfd)) -+ -+typedef struct stat stat_type; -+ -+typedef enum bfd_print_symbol -+{ -+ bfd_print_symbol_name, -+ bfd_print_symbol_more, -+ bfd_print_symbol_all -+} bfd_print_symbol_type; -+ -+/* Information about a symbol that nm needs. */ -+ -+typedef struct _symbol_info -+{ -+ symvalue value; -+ char type; -+ const char *name; /* Symbol name. */ -+ unsigned char stab_type; /* Stab type. */ -+ char stab_other; /* Stab other. */ -+ short stab_desc; /* Stab desc. */ -+ const char *stab_name; /* String for stab type. */ -+} symbol_info; -+ -+/* Get the name of a stabs type code. */ -+ -+extern const char *bfd_get_stab_name (int); -+ -+/* Hash table routines. There is no way to free up a hash table. */ -+ -+/* An element in the hash table. Most uses will actually use a larger -+ structure, and an instance of this will be the first field. */ -+ -+struct bfd_hash_entry -+{ -+ /* Next entry for this hash code. */ -+ struct bfd_hash_entry *next; -+ /* String being hashed. */ -+ const char *string; -+ /* Hash code. This is the full hash code, not the index into the -+ table. */ -+ unsigned long hash; -+}; -+ -+/* A hash table. */ -+ -+struct bfd_hash_table -+{ -+ /* The hash array. */ -+ struct bfd_hash_entry **table; -+ /* The number of slots in the hash table. */ -+ unsigned int size; -+ /* A function used to create new elements in the hash table. The -+ first entry is itself a pointer to an element. When this -+ function is first invoked, this pointer will be NULL. However, -+ having the pointer permits a hierarchy of method functions to be -+ built each of which calls the function in the superclass. Thus -+ each function should be written to allocate a new block of memory -+ only if the argument is NULL. */ -+ struct bfd_hash_entry *(*newfunc) -+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); -+ /* An objalloc for this hash table. This is a struct objalloc *, -+ but we use void * to avoid requiring the inclusion of objalloc.h. */ -+ void *memory; -+}; -+ -+/* Initialize a hash table. */ -+extern bfd_boolean bfd_hash_table_init -+ (struct bfd_hash_table *, -+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *, -+ struct bfd_hash_table *, -+ const char *)); -+ -+/* Initialize a hash table specifying a size. */ -+extern bfd_boolean bfd_hash_table_init_n -+ (struct bfd_hash_table *, -+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *, -+ struct bfd_hash_table *, -+ const char *), -+ unsigned int size); -+ -+/* Free up a hash table. */ -+extern void bfd_hash_table_free -+ (struct bfd_hash_table *); -+ -+/* Look up a string in a hash table. If CREATE is TRUE, a new entry -+ will be created for this string if one does not already exist. The -+ COPY argument must be TRUE if this routine should copy the string -+ into newly allocated memory when adding an entry. */ -+extern struct bfd_hash_entry *bfd_hash_lookup -+ (struct bfd_hash_table *, const char *, bfd_boolean create, -+ bfd_boolean copy); -+ -+/* Replace an entry in a hash table. */ -+extern void bfd_hash_replace -+ (struct bfd_hash_table *, struct bfd_hash_entry *old, -+ struct bfd_hash_entry *nw); -+ -+/* Base method for creating a hash table entry. */ -+extern struct bfd_hash_entry *bfd_hash_newfunc -+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); -+ -+/* Grab some space for a hash table entry. */ -+extern void *bfd_hash_allocate -+ (struct bfd_hash_table *, unsigned int); -+ -+/* Traverse a hash table in a random order, calling a function on each -+ element. If the function returns FALSE, the traversal stops. The -+ INFO argument is passed to the function. */ -+extern void bfd_hash_traverse -+ (struct bfd_hash_table *, -+ bfd_boolean (*) (struct bfd_hash_entry *, void *), -+ void *info); -+ -+/* Allows the default size of a hash table to be configured. New hash -+ tables allocated using bfd_hash_table_init will be created with -+ this size. */ -+extern void bfd_hash_set_default_size (bfd_size_type); -+ -+/* This structure is used to keep track of stabs in sections -+ information while linking. */ -+ -+struct stab_info -+{ -+ /* A hash table used to hold stabs strings. */ -+ struct bfd_strtab_hash *strings; -+ /* The header file hash table. */ -+ struct bfd_hash_table includes; -+ /* The first .stabstr section. */ -+ struct bfd_section *stabstr; -+}; -+ -+#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table -+ -+/* User program access to BFD facilities. */ -+ -+/* Direct I/O routines, for programs which know more about the object -+ file than BFD does. Use higher level routines if possible. */ -+ -+extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *); -+extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *); -+extern int bfd_seek (bfd *, file_ptr, int); -+extern file_ptr bfd_tell (bfd *); -+extern int bfd_flush (bfd *); -+extern int bfd_stat (bfd *, struct stat *); -+ -+/* Deprecated old routines. */ -+#if __GNUC__ -+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \ -+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \ -+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#else -+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \ -+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\ -+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#endif -+extern void warn_deprecated (const char *, const char *, int, const char *); -+ -+/* Cast from const char * to char * so that caller can assign to -+ a char * without a warning. */ -+#define bfd_get_filename(abfd) ((char *) (abfd)->filename) -+#define bfd_get_cacheable(abfd) ((abfd)->cacheable) -+#define bfd_get_format(abfd) ((abfd)->format) -+#define bfd_get_target(abfd) ((abfd)->xvec->name) -+#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour) -+#define bfd_family_coff(abfd) \ -+ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \ -+ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour) -+#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG) -+#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE) -+#define bfd_header_big_endian(abfd) \ -+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG) -+#define bfd_header_little_endian(abfd) \ -+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE) -+#define bfd_get_file_flags(abfd) ((abfd)->flags) -+#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags) -+#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags) -+#define bfd_my_archive(abfd) ((abfd)->my_archive) -+#define bfd_has_map(abfd) ((abfd)->has_armap) -+ -+#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types) -+#define bfd_usrdata(abfd) ((abfd)->usrdata) -+ -+#define bfd_get_start_address(abfd) ((abfd)->start_address) -+#define bfd_get_symcount(abfd) ((abfd)->symcount) -+#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols) -+#define bfd_count_sections(abfd) ((abfd)->section_count) -+ -+#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount) -+ -+#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char) -+ -+#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE) -+ -+extern bfd_boolean bfd_cache_close -+ (bfd *abfd); -+/* NB: This declaration should match the autogenerated one in libbfd.h. */ -+ -+extern bfd_boolean bfd_cache_close_all (void); -+ -+extern bfd_boolean bfd_record_phdr -+ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma, -+ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **); -+ -+/* Byte swapping routines. */ -+ -+bfd_uint64_t bfd_getb64 (const void *); -+bfd_uint64_t bfd_getl64 (const void *); -+bfd_int64_t bfd_getb_signed_64 (const void *); -+bfd_int64_t bfd_getl_signed_64 (const void *); -+bfd_vma bfd_getb32 (const void *); -+bfd_vma bfd_getl32 (const void *); -+bfd_signed_vma bfd_getb_signed_32 (const void *); -+bfd_signed_vma bfd_getl_signed_32 (const void *); -+bfd_vma bfd_getb16 (const void *); -+bfd_vma bfd_getl16 (const void *); -+bfd_signed_vma bfd_getb_signed_16 (const void *); -+bfd_signed_vma bfd_getl_signed_16 (const void *); -+void bfd_putb64 (bfd_uint64_t, void *); -+void bfd_putl64 (bfd_uint64_t, void *); -+void bfd_putb32 (bfd_vma, void *); -+void bfd_putl32 (bfd_vma, void *); -+void bfd_putb16 (bfd_vma, void *); -+void bfd_putl16 (bfd_vma, void *); -+ -+/* Byte swapping routines which take size and endiannes as arguments. */ -+ -+bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean); -+void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean); -+ -+extern bfd_boolean bfd_section_already_linked_table_init (void); -+extern void bfd_section_already_linked_table_free (void); -+ -+/* Externally visible ECOFF routines. */ -+ -+#if defined(__STDC__) || defined(ALMOST_STDC) -+struct ecoff_debug_info; -+struct ecoff_debug_swap; -+struct ecoff_extr; -+struct bfd_symbol; -+struct bfd_link_info; -+struct bfd_link_hash_entry; -+struct bfd_elf_version_tree; -+#endif -+extern bfd_vma bfd_ecoff_get_gp_value -+ (bfd * abfd); -+extern bfd_boolean bfd_ecoff_set_gp_value -+ (bfd *abfd, bfd_vma gp_value); -+extern bfd_boolean bfd_ecoff_set_regmasks -+ (bfd *abfd, unsigned long gprmask, unsigned long fprmask, -+ unsigned long *cprmask); -+extern void *bfd_ecoff_debug_init -+ (bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); -+extern void bfd_ecoff_debug_free -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_accumulate -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd, -+ struct ecoff_debug_info *input_debug, -+ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_accumulate_other -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd, -+ struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_externals -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, bfd_boolean relocatable, -+ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *), -+ void (*set_index) (struct bfd_symbol *, bfd_size_type)); -+extern bfd_boolean bfd_ecoff_debug_one_external -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, const char *name, -+ struct ecoff_extr *esym); -+extern bfd_size_type bfd_ecoff_debug_size -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap); -+extern bfd_boolean bfd_ecoff_write_debug -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, file_ptr where); -+extern bfd_boolean bfd_ecoff_write_accumulated_debug -+ (void *handle, bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, -+ struct bfd_link_info *info, file_ptr where); -+ -+/* Externally visible ELF routines. */ -+ -+struct bfd_link_needed_list -+{ -+ struct bfd_link_needed_list *next; -+ bfd *by; -+ const char *name; -+}; -+ -+enum dynamic_lib_link_class { -+ DYN_NORMAL = 0, -+ DYN_AS_NEEDED = 1, -+ DYN_DT_NEEDED = 2, -+ DYN_NO_ADD_NEEDED = 4, -+ DYN_NO_NEEDED = 8 -+}; -+ -+extern bfd_boolean bfd_elf_record_link_assignment -+ (bfd *, struct bfd_link_info *, const char *, bfd_boolean, -+ bfd_boolean); -+extern struct bfd_link_needed_list *bfd_elf_get_needed_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_elf_get_bfd_needed_list -+ (bfd *, struct bfd_link_needed_list **); -+extern bfd_boolean bfd_elf_size_dynamic_sections -+ (bfd *, const char *, const char *, const char *, const char * const *, -+ struct bfd_link_info *, struct bfd_section **, -+ struct bfd_elf_version_tree *); -+extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr -+ (bfd *, struct bfd_link_info *); -+extern void bfd_elf_set_dt_needed_name -+ (bfd *, const char *); -+extern const char *bfd_elf_get_dt_soname -+ (bfd *); -+extern void bfd_elf_set_dyn_lib_class -+ (bfd *, int); -+extern int bfd_elf_get_dyn_lib_class -+ (bfd *); -+extern struct bfd_link_needed_list *bfd_elf_get_runpath_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_elf_discard_info -+ (bfd *, struct bfd_link_info *); -+extern unsigned int _bfd_elf_default_action_discarded -+ (struct bfd_section *); -+ -+/* Return an upper bound on the number of bytes required to store a -+ copy of ABFD's program header table entries. Return -1 if an error -+ occurs; bfd_get_error will return an appropriate code. */ -+extern long bfd_get_elf_phdr_upper_bound -+ (bfd *abfd); -+ -+/* Copy ABFD's program header table entries to *PHDRS. The entries -+ will be stored as an array of Elf_Internal_Phdr structures, as -+ defined in include/elf/internal.h. To find out how large the -+ buffer needs to be, call bfd_get_elf_phdr_upper_bound. -+ -+ Return the number of program header table entries read, or -1 if an -+ error occurs; bfd_get_error will return an appropriate code. */ -+extern int bfd_get_elf_phdrs -+ (bfd *abfd, void *phdrs); -+ -+/* Create a new BFD as if by bfd_openr. Rather than opening a file, -+ reconstruct an ELF file by reading the segments out of remote memory -+ based on the ELF file header at EHDR_VMA and the ELF program headers it -+ points to. If not null, *LOADBASEP is filled in with the difference -+ between the VMAs from which the segments were read, and the VMAs the -+ file headers (and hence BFD's idea of each section's VMA) put them at. -+ -+ The function TARGET_READ_MEMORY is called to copy LEN bytes from the -+ remote memory at target address VMA into the local buffer at MYADDR; it -+ should return zero on success or an `errno' code on failure. TEMPL must -+ be a BFD for an ELF target with the word size and byte order found in -+ the remote memory. */ -+extern bfd *bfd_elf_bfd_from_remote_memory -+ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep, -+ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len)); -+ -+/* Return the arch_size field of an elf bfd, or -1 if not elf. */ -+extern int bfd_get_arch_size -+ (bfd *); -+ -+/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */ -+extern int bfd_get_sign_extend_vma -+ (bfd *); -+ -+extern struct bfd_section *_bfd_elf_tls_setup -+ (bfd *, struct bfd_link_info *); -+ -+extern void _bfd_fix_excluded_sec_syms -+ (bfd *, struct bfd_link_info *); -+ -+extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs -+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, -+ char **); -+ -+extern bfd_boolean bfd_bfin_elf32_create_embedded_relocs -+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, -+ char **); -+ -+/* SunOS shared library support routines for the linker. */ -+ -+extern struct bfd_link_needed_list *bfd_sunos_get_needed_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_sunos_record_link_assignment -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_sunos_size_dynamic_sections -+ (bfd *, struct bfd_link_info *, struct bfd_section **, -+ struct bfd_section **, struct bfd_section **); -+ -+/* Linux shared library support routines for the linker. */ -+ -+extern bfd_boolean bfd_i386linux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_m68klinux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_sparclinux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+ -+/* mmap hacks */ -+ -+struct _bfd_window_internal; -+typedef struct _bfd_window_internal bfd_window_internal; -+ -+typedef struct _bfd_window -+{ -+ /* What the user asked for. */ -+ void *data; -+ bfd_size_type size; -+ /* The actual window used by BFD. Small user-requested read-only -+ regions sharing a page may share a single window into the object -+ file. Read-write versions shouldn't until I've fixed things to -+ keep track of which portions have been claimed by the -+ application; don't want to give the same region back when the -+ application wants two writable copies! */ -+ struct _bfd_window_internal *i; -+} -+bfd_window; -+ -+extern void bfd_init_window -+ (bfd_window *); -+extern void bfd_free_window -+ (bfd_window *); -+extern bfd_boolean bfd_get_file_window -+ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean); -+ -+/* XCOFF support routines for the linker. */ -+ -+extern bfd_boolean bfd_xcoff_link_record_set -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type); -+extern bfd_boolean bfd_xcoff_import_symbol -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma, -+ const char *, const char *, const char *, unsigned int); -+extern bfd_boolean bfd_xcoff_export_symbol -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *); -+extern bfd_boolean bfd_xcoff_link_count_reloc -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_xcoff_record_link_assignment -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_xcoff_size_dynamic_sections -+ (bfd *, struct bfd_link_info *, const char *, const char *, -+ unsigned long, unsigned long, unsigned long, bfd_boolean, -+ int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean); -+extern bfd_boolean bfd_xcoff_link_generate_rtinit -+ (bfd *, const char *, const char *, bfd_boolean); -+ -+/* XCOFF support routines for ar. */ -+extern bfd_boolean bfd_xcoff_ar_archive_set_magic -+ (bfd *, char *); -+ -+/* Externally visible COFF routines. */ -+ -+#if defined(__STDC__) || defined(ALMOST_STDC) -+struct internal_syment; -+union internal_auxent; -+#endif -+ -+extern bfd_boolean bfd_coff_get_syment -+ (bfd *, struct bfd_symbol *, struct internal_syment *); -+ -+extern bfd_boolean bfd_coff_get_auxent -+ (bfd *, struct bfd_symbol *, int, union internal_auxent *); -+ -+extern bfd_boolean bfd_coff_set_symbol_class -+ (bfd *, struct bfd_symbol *, unsigned int); -+ -+extern bfd_boolean bfd_m68k_coff_create_embedded_relocs -+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **); -+ -+/* ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_arm_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_arm_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+extern bfd_boolean bfd_arm_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+/* PE ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_arm_pe_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_arm_pe_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+/* ELF ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_elf32_arm_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+void bfd_elf32_arm_set_target_relocs -+ (struct bfd_link_info *, int, char *, int, int); -+ -+extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd -+ (bfd *, struct bfd_link_info *); -+ -+/* ELF ARM mapping symbol support */ -+extern bfd_boolean bfd_is_arm_mapping_symbol_name -+ (const char * name); -+ -+/* ARM Note section processing. */ -+extern bfd_boolean bfd_arm_merge_machines -+ (bfd *, bfd *); -+ -+extern bfd_boolean bfd_arm_update_notes -+ (bfd *, const char *); -+ -+extern unsigned int bfd_arm_get_mach_from_notes -+ (bfd *, const char *); -+ -+/* TI COFF load page support. */ -+extern void bfd_ticoff_set_section_load_page -+ (struct bfd_section *, int); -+ -+extern int bfd_ticoff_get_section_load_page -+ (struct bfd_section *); -+ -+/* H8/300 functions. */ -+extern bfd_vma bfd_h8300_pad_address -+ (bfd *, bfd_vma); -+ -+/* IA64 Itanium code generation. Called from linker. */ -+extern void bfd_elf32_ia64_after_parse -+ (int); -+ -+extern void bfd_elf64_ia64_after_parse -+ (int); -+ -+/* This structure is used for a comdat section, as in PE. A comdat -+ section is associated with a particular symbol. When the linker -+ sees a comdat section, it keeps only one of the sections with a -+ given name and associated with a given symbol. */ -+ -+struct coff_comdat_info -+{ -+ /* The name of the symbol associated with a comdat section. */ -+ const char *name; -+ -+ /* The local symbol table index of the symbol associated with a -+ comdat section. This is only meaningful to the object file format -+ specific code; it is not an index into the list returned by -+ bfd_canonicalize_symtab. */ -+ long symbol; -+}; -+ -+extern struct coff_comdat_info *bfd_coff_get_comdat_section -+ (bfd *, struct bfd_section *); -+ -+/* Extracted from init.c. */ -+void bfd_init (void); -+ -+/* Extracted from opncls.c. */ -+bfd *bfd_fopen (const char *filename, const char *target, -+ const char *mode, int fd); -+ -+bfd *bfd_openr (const char *filename, const char *target); -+ -+bfd *bfd_fdopenr (const char *filename, const char *target, int fd); -+ -+bfd *bfd_openstreamr (const char *, const char *, void *); -+ -+bfd *bfd_openr_iovec (const char *filename, const char *target, -+ void *(*open) (struct bfd *nbfd, -+ void *open_closure), -+ void *open_closure, -+ file_ptr (*pread) (struct bfd *nbfd, -+ void *stream, -+ void *buf, -+ file_ptr nbytes, -+ file_ptr offset), -+ int (*close) (struct bfd *nbfd, -+ void *stream)); -+ -+bfd *bfd_openw (const char *filename, const char *target); -+ -+bfd_boolean bfd_close (bfd *abfd); -+ -+bfd_boolean bfd_close_all_done (bfd *); -+ -+bfd *bfd_create (const char *filename, bfd *templ); -+ -+bfd_boolean bfd_make_writable (bfd *abfd); -+ -+bfd_boolean bfd_make_readable (bfd *abfd); -+ -+unsigned long bfd_calc_gnu_debuglink_crc32 -+ (unsigned long crc, const unsigned char *buf, bfd_size_type len); -+ -+char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir); -+ -+struct bfd_section *bfd_create_gnu_debuglink_section -+ (bfd *abfd, const char *filename); -+ -+bfd_boolean bfd_fill_in_gnu_debuglink_section -+ (bfd *abfd, struct bfd_section *sect, const char *filename); -+ -+/* Extracted from libbfd.c. */ -+ -+/* Byte swapping macros for user section data. */ -+ -+#define bfd_put_8(abfd, val, ptr) \ -+ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff)) -+#define bfd_put_signed_8 \ -+ bfd_put_8 -+#define bfd_get_8(abfd, ptr) \ -+ (*(unsigned char *) (ptr) & 0xff) -+#define bfd_get_signed_8(abfd, ptr) \ -+ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80) -+ -+#define bfd_put_16(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx16, ((val),(ptr))) -+#define bfd_put_signed_16 \ -+ bfd_put_16 -+#define bfd_get_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx16, (ptr)) -+#define bfd_get_signed_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_16, (ptr)) -+ -+#define bfd_put_32(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx32, ((val),(ptr))) -+#define bfd_put_signed_32 \ -+ bfd_put_32 -+#define bfd_get_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx32, (ptr)) -+#define bfd_get_signed_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_32, (ptr)) -+ -+#define bfd_put_64(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx64, ((val), (ptr))) -+#define bfd_put_signed_64 \ -+ bfd_put_64 -+#define bfd_get_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx64, (ptr)) -+#define bfd_get_signed_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_64, (ptr)) -+ -+#define bfd_get(bits, abfd, ptr) \ -+ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \ -+ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \ -+ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \ -+ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \ -+ : (abort (), (bfd_vma) - 1)) -+ -+#define bfd_put(bits, abfd, val, ptr) \ -+ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \ -+ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \ -+ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \ -+ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \ -+ : (abort (), (void) 0)) -+ -+ -+/* Byte swapping macros for file header data. */ -+ -+#define bfd_h_put_8(abfd, val, ptr) \ -+ bfd_put_8 (abfd, val, ptr) -+#define bfd_h_put_signed_8(abfd, val, ptr) \ -+ bfd_put_8 (abfd, val, ptr) -+#define bfd_h_get_8(abfd, ptr) \ -+ bfd_get_8 (abfd, ptr) -+#define bfd_h_get_signed_8(abfd, ptr) \ -+ bfd_get_signed_8 (abfd, ptr) -+ -+#define bfd_h_put_16(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx16, (val, ptr)) -+#define bfd_h_put_signed_16 \ -+ bfd_h_put_16 -+#define bfd_h_get_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx16, (ptr)) -+#define bfd_h_get_signed_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr)) -+ -+#define bfd_h_put_32(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx32, (val, ptr)) -+#define bfd_h_put_signed_32 \ -+ bfd_h_put_32 -+#define bfd_h_get_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx32, (ptr)) -+#define bfd_h_get_signed_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr)) -+ -+#define bfd_h_put_64(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx64, (val, ptr)) -+#define bfd_h_put_signed_64 \ -+ bfd_h_put_64 -+#define bfd_h_get_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx64, (ptr)) -+#define bfd_h_get_signed_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr)) -+ -+/* Aliases for the above, which should eventually go away. */ -+ -+#define H_PUT_64 bfd_h_put_64 -+#define H_PUT_32 bfd_h_put_32 -+#define H_PUT_16 bfd_h_put_16 -+#define H_PUT_8 bfd_h_put_8 -+#define H_PUT_S64 bfd_h_put_signed_64 -+#define H_PUT_S32 bfd_h_put_signed_32 -+#define H_PUT_S16 bfd_h_put_signed_16 -+#define H_PUT_S8 bfd_h_put_signed_8 -+#define H_GET_64 bfd_h_get_64 -+#define H_GET_32 bfd_h_get_32 -+#define H_GET_16 bfd_h_get_16 -+#define H_GET_8 bfd_h_get_8 -+#define H_GET_S64 bfd_h_get_signed_64 -+#define H_GET_S32 bfd_h_get_signed_32 -+#define H_GET_S16 bfd_h_get_signed_16 -+#define H_GET_S8 bfd_h_get_signed_8 -+ -+ -+/* Extracted from bfdio.c. */ -+long bfd_get_mtime (bfd *abfd); -+ -+long bfd_get_size (bfd *abfd); -+ -+/* Extracted from bfdwin.c. */ -+/* Extracted from section.c. */ -+typedef struct bfd_section -+{ -+ /* The name of the section; the name isn't a copy, the pointer is -+ the same as that passed to bfd_make_section. */ -+ const char *name; -+ -+ /* A unique sequence number. */ -+ int id; -+ -+ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */ -+ int index; -+ -+ /* The next section in the list belonging to the BFD, or NULL. */ -+ struct bfd_section *next; -+ -+ /* The previous section in the list belonging to the BFD, or NULL. */ -+ struct bfd_section *prev; -+ -+ /* The field flags contains attributes of the section. Some -+ flags are read in from the object file, and some are -+ synthesized from other information. */ -+ flagword flags; -+ -+#define SEC_NO_FLAGS 0x000 -+ -+ /* Tells the OS to allocate space for this section when loading. -+ This is clear for a section containing debug information only. */ -+#define SEC_ALLOC 0x001 -+ -+ /* Tells the OS to load the section from the file when loading. -+ This is clear for a .bss section. */ -+#define SEC_LOAD 0x002 -+ -+ /* The section contains data still to be relocated, so there is -+ some relocation information too. */ -+#define SEC_RELOC 0x004 -+ -+ /* A signal to the OS that the section contains read only data. */ -+#define SEC_READONLY 0x008 -+ -+ /* The section contains code only. */ -+#define SEC_CODE 0x010 -+ -+ /* The section contains data only. */ -+#define SEC_DATA 0x020 -+ -+ /* The section will reside in ROM. */ -+#define SEC_ROM 0x040 -+ -+ /* The section contains constructor information. This section -+ type is used by the linker to create lists of constructors and -+ destructors used by <>. When a back end sees a symbol -+ which should be used in a constructor list, it creates a new -+ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches -+ the symbol to it, and builds a relocation. To build the lists -+ of constructors, all the linker has to do is catenate all the -+ sections called <<__CTOR_LIST__>> and relocate the data -+ contained within - exactly the operations it would peform on -+ standard data. */ -+#define SEC_CONSTRUCTOR 0x080 -+ -+ /* The section has contents - a data section could be -+ <> | <>; a debug section could be -+ <> */ -+#define SEC_HAS_CONTENTS 0x100 -+ -+ /* An instruction to the linker to not output the section -+ even if it has information which would normally be written. */ -+#define SEC_NEVER_LOAD 0x200 -+ -+ /* The section contains thread local data. */ -+#define SEC_THREAD_LOCAL 0x400 -+ -+ /* The section has GOT references. This flag is only for the -+ linker, and is currently only used by the elf32-hppa back end. -+ It will be set if global offset table references were detected -+ in this section, which indicate to the linker that the section -+ contains PIC code, and must be handled specially when doing a -+ static link. */ -+#define SEC_HAS_GOT_REF 0x800 -+ -+ /* The section contains common symbols (symbols may be defined -+ multiple times, the value of a symbol is the amount of -+ space it requires, and the largest symbol value is the one -+ used). Most targets have exactly one of these (which we -+ translate to bfd_com_section_ptr), but ECOFF has two. */ -+#define SEC_IS_COMMON 0x1000 -+ -+ /* The section contains only debugging information. For -+ example, this is set for ELF .debug and .stab sections. -+ strip tests this flag to see if a section can be -+ discarded. */ -+#define SEC_DEBUGGING 0x2000 -+ -+ /* The contents of this section are held in memory pointed to -+ by the contents field. This is checked by bfd_get_section_contents, -+ and the data is retrieved from memory if appropriate. */ -+#define SEC_IN_MEMORY 0x4000 -+ -+ /* The contents of this section are to be excluded by the -+ linker for executable and shared objects unless those -+ objects are to be further relocated. */ -+#define SEC_EXCLUDE 0x8000 -+ -+ /* The contents of this section are to be sorted based on the sum of -+ the symbol and addend values specified by the associated relocation -+ entries. Entries without associated relocation entries will be -+ appended to the end of the section in an unspecified order. */ -+#define SEC_SORT_ENTRIES 0x10000 -+ -+ /* When linking, duplicate sections of the same name should be -+ discarded, rather than being combined into a single section as -+ is usually done. This is similar to how common symbols are -+ handled. See SEC_LINK_DUPLICATES below. */ -+#define SEC_LINK_ONCE 0x20000 -+ -+ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker -+ should handle duplicate sections. */ -+#define SEC_LINK_DUPLICATES 0x40000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that duplicate -+ sections with the same name should simply be discarded. */ -+#define SEC_LINK_DUPLICATES_DISCARD 0x0 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if there are any duplicate sections, although -+ it should still only link one copy. */ -+#define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if any duplicate sections are a different size. */ -+#define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if any duplicate sections contain different -+ contents. */ -+#define SEC_LINK_DUPLICATES_SAME_CONTENTS \ -+ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE) -+ -+ /* This section was created by the linker as part of dynamic -+ relocation or other arcane processing. It is skipped when -+ going through the first-pass output, trusting that someone -+ else up the line will take care of it later. */ -+#define SEC_LINKER_CREATED 0x200000 -+ -+ /* This section should not be subject to garbage collection. */ -+#define SEC_KEEP 0x400000 -+ -+ /* This section contains "short" data, and should be placed -+ "near" the GP. */ -+#define SEC_SMALL_DATA 0x800000 -+ -+ /* Attempt to merge identical entities in the section. -+ Entity size is given in the entsize field. */ -+#define SEC_MERGE 0x1000000 -+ -+ /* If given with SEC_MERGE, entities to merge are zero terminated -+ strings where entsize specifies character size instead of fixed -+ size entries. */ -+#define SEC_STRINGS 0x2000000 -+ -+ /* This section contains data about section groups. */ -+#define SEC_GROUP 0x4000000 -+ -+ /* The section is a COFF shared library section. This flag is -+ only for the linker. If this type of section appears in -+ the input file, the linker must copy it to the output file -+ without changing the vma or size. FIXME: Although this -+ was originally intended to be general, it really is COFF -+ specific (and the flag was renamed to indicate this). It -+ might be cleaner to have some more general mechanism to -+ allow the back end to control what the linker does with -+ sections. */ -+#define SEC_COFF_SHARED_LIBRARY 0x10000000 -+ -+ /* This section contains data which may be shared with other -+ executables or shared objects. This is for COFF only. */ -+#define SEC_COFF_SHARED 0x20000000 -+ -+ /* When a section with this flag is being linked, then if the size of -+ the input section is less than a page, it should not cross a page -+ boundary. If the size of the input section is one page or more, -+ it should be aligned on a page boundary. This is for TI -+ TMS320C54X only. */ -+#define SEC_TIC54X_BLOCK 0x40000000 -+ -+ /* Conditionally link this section; do not link if there are no -+ references found to any symbol in the section. This is for TI -+ TMS320C54X only. */ -+#define SEC_TIC54X_CLINK 0x80000000 -+ -+ /* End of section flags. */ -+ -+ /* Some internal packed boolean fields. */ -+ -+ /* See the vma field. */ -+ unsigned int user_set_vma : 1; -+ -+ /* A mark flag used by some of the linker backends. */ -+ unsigned int linker_mark : 1; -+ -+ /* Another mark flag used by some of the linker backends. Set for -+ output sections that have an input section. */ -+ unsigned int linker_has_input : 1; -+ -+ /* Mark flags used by some linker backends for garbage collection. */ -+ unsigned int gc_mark : 1; -+ unsigned int gc_mark_from_eh : 1; -+ -+ /* The following flags are used by the ELF linker. */ -+ -+ /* Mark sections which have been allocated to segments. */ -+ unsigned int segment_mark : 1; -+ -+ /* Type of sec_info information. */ -+ unsigned int sec_info_type:3; -+#define ELF_INFO_TYPE_NONE 0 -+#define ELF_INFO_TYPE_STABS 1 -+#define ELF_INFO_TYPE_MERGE 2 -+#define ELF_INFO_TYPE_EH_FRAME 3 -+#define ELF_INFO_TYPE_JUST_SYMS 4 -+ -+ /* Nonzero if this section uses RELA relocations, rather than REL. */ -+ unsigned int use_rela_p:1; -+ -+ /* Bits used by various backends. The generic code doesn't touch -+ these fields. */ -+ -+ /* Nonzero if this section has TLS related relocations. */ -+ unsigned int has_tls_reloc:1; -+ -+ /* Nonzero if this section has a gp reloc. */ -+ unsigned int has_gp_reloc:1; -+ -+ /* Nonzero if this section needs the relax finalize pass. */ -+ unsigned int need_finalize_relax:1; -+ -+ /* Whether relocations have been processed. */ -+ unsigned int reloc_done : 1; -+ -+ /* End of internal packed boolean fields. */ -+ -+ /* The virtual memory address of the section - where it will be -+ at run time. The symbols are relocated against this. The -+ user_set_vma flag is maintained by bfd; if it's not set, the -+ backend can assign addresses (for example, in <>, where -+ the default address for <<.data>> is dependent on the specific -+ target and various flags). */ -+ bfd_vma vma; -+ -+ /* The load address of the section - where it would be in a -+ rom image; really only used for writing section header -+ information. */ -+ bfd_vma lma; -+ -+ /* The size of the section in octets, as it will be output. -+ Contains a value even if the section has no contents (e.g., the -+ size of <<.bss>>). */ -+ bfd_size_type size; -+ -+ /* For input sections, the original size on disk of the section, in -+ octets. This field is used by the linker relaxation code. It is -+ currently only set for sections where the linker relaxation scheme -+ doesn't cache altered section and reloc contents (stabs, eh_frame, -+ SEC_MERGE, some coff relaxing targets), and thus the original size -+ needs to be kept to read the section multiple times. -+ For output sections, rawsize holds the section size calculated on -+ a previous linker relaxation pass. */ -+ bfd_size_type rawsize; -+ -+ /* If this section is going to be output, then this value is the -+ offset in *bytes* into the output section of the first byte in the -+ input section (byte ==> smallest addressable unit on the -+ target). In most cases, if this was going to start at the -+ 100th octet (8-bit quantity) in the output section, this value -+ would be 100. However, if the target byte size is 16 bits -+ (bfd_octets_per_byte is "2"), this value would be 50. */ -+ bfd_vma output_offset; -+ -+ /* The output section through which to map on output. */ -+ struct bfd_section *output_section; -+ -+ /* The alignment requirement of the section, as an exponent of 2 - -+ e.g., 3 aligns to 2^3 (or 8). */ -+ unsigned int alignment_power; -+ -+ /* If an input section, a pointer to a vector of relocation -+ records for the data in this section. */ -+ struct reloc_cache_entry *relocation; -+ -+ /* If an output section, a pointer to a vector of pointers to -+ relocation records for the data in this section. */ -+ struct reloc_cache_entry **orelocation; -+ -+ /* The number of relocation records in one of the above. */ -+ unsigned reloc_count; -+ -+ /* Information below is back end specific - and not always used -+ or updated. */ -+ -+ /* File position of section data. */ -+ file_ptr filepos; -+ -+ /* File position of relocation info. */ -+ file_ptr rel_filepos; -+ -+ /* File position of line data. */ -+ file_ptr line_filepos; -+ -+ /* Pointer to data for applications. */ -+ void *userdata; -+ -+ /* If the SEC_IN_MEMORY flag is set, this points to the actual -+ contents. */ -+ unsigned char *contents; -+ -+ /* Attached line number information. */ -+ alent *lineno; -+ -+ /* Number of line number records. */ -+ unsigned int lineno_count; -+ -+ /* Entity size for merging purposes. */ -+ unsigned int entsize; -+ -+ /* Points to the kept section if this section is a link-once section, -+ and is discarded. */ -+ struct bfd_section *kept_section; -+ -+ /* When a section is being output, this value changes as more -+ linenumbers are written out. */ -+ file_ptr moving_line_filepos; -+ -+ /* What the section number is in the target world. */ -+ int target_index; -+ -+ void *used_by_bfd; -+ -+ /* If this is a constructor section then here is a list of the -+ relocations created to relocate items within it. */ -+ struct relent_chain *constructor_chain; -+ -+ /* The BFD which owns the section. */ -+ bfd *owner; -+ -+ /* A symbol which points at this section only. */ -+ struct bfd_symbol *symbol; -+ struct bfd_symbol **symbol_ptr_ptr; -+ -+ /* Early in the link process, map_head and map_tail are used to build -+ a list of input sections attached to an output section. Later, -+ output sections use these fields for a list of bfd_link_order -+ structs. */ -+ union { -+ struct bfd_link_order *link_order; -+ struct bfd_section *s; -+ } map_head, map_tail; -+} asection; -+ -+/* These sections are global, and are managed by BFD. The application -+ and target back end are not permitted to change the values in -+ these sections. New code should use the section_ptr macros rather -+ than referring directly to the const sections. The const sections -+ may eventually vanish. */ -+#define BFD_ABS_SECTION_NAME "*ABS*" -+#define BFD_UND_SECTION_NAME "*UND*" -+#define BFD_COM_SECTION_NAME "*COM*" -+#define BFD_IND_SECTION_NAME "*IND*" -+ -+/* The absolute section. */ -+extern asection bfd_abs_section; -+#define bfd_abs_section_ptr ((asection *) &bfd_abs_section) -+#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr) -+/* Pointer to the undefined section. */ -+extern asection bfd_und_section; -+#define bfd_und_section_ptr ((asection *) &bfd_und_section) -+#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr) -+/* Pointer to the common section. */ -+extern asection bfd_com_section; -+#define bfd_com_section_ptr ((asection *) &bfd_com_section) -+/* Pointer to the indirect section. */ -+extern asection bfd_ind_section; -+#define bfd_ind_section_ptr ((asection *) &bfd_ind_section) -+#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr) -+ -+#define bfd_is_const_section(SEC) \ -+ ( ((SEC) == bfd_abs_section_ptr) \ -+ || ((SEC) == bfd_und_section_ptr) \ -+ || ((SEC) == bfd_com_section_ptr) \ -+ || ((SEC) == bfd_ind_section_ptr)) -+ -+extern const struct bfd_symbol * const bfd_abs_symbol; -+extern const struct bfd_symbol * const bfd_com_symbol; -+extern const struct bfd_symbol * const bfd_und_symbol; -+extern const struct bfd_symbol * const bfd_ind_symbol; -+ -+/* Macros to handle insertion and deletion of a bfd's sections. These -+ only handle the list pointers, ie. do not adjust section_count, -+ target_index etc. */ -+#define bfd_section_list_remove(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ asection *_next = _s->next; \ -+ asection *_prev = _s->prev; \ -+ if (_prev) \ -+ _prev->next = _next; \ -+ else \ -+ (ABFD)->sections = _next; \ -+ if (_next) \ -+ _next->prev = _prev; \ -+ else \ -+ (ABFD)->section_last = _prev; \ -+ } \ -+ while (0) -+#define bfd_section_list_append(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ bfd *_abfd = ABFD; \ -+ _s->next = NULL; \ -+ if (_abfd->section_last) \ -+ { \ -+ _s->prev = _abfd->section_last; \ -+ _abfd->section_last->next = _s; \ -+ } \ -+ else \ -+ { \ -+ _s->prev = NULL; \ -+ _abfd->sections = _s; \ -+ } \ -+ _abfd->section_last = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_prepend(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ bfd *_abfd = ABFD; \ -+ _s->prev = NULL; \ -+ if (_abfd->sections) \ -+ { \ -+ _s->next = _abfd->sections; \ -+ _abfd->sections->prev = _s; \ -+ } \ -+ else \ -+ { \ -+ _s->next = NULL; \ -+ _abfd->section_last = _s; \ -+ } \ -+ _abfd->sections = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_insert_after(ABFD, A, S) \ -+ do \ -+ { \ -+ asection *_a = A; \ -+ asection *_s = S; \ -+ asection *_next = _a->next; \ -+ _s->next = _next; \ -+ _s->prev = _a; \ -+ _a->next = _s; \ -+ if (_next) \ -+ _next->prev = _s; \ -+ else \ -+ (ABFD)->section_last = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_insert_before(ABFD, B, S) \ -+ do \ -+ { \ -+ asection *_b = B; \ -+ asection *_s = S; \ -+ asection *_prev = _b->prev; \ -+ _s->prev = _prev; \ -+ _s->next = _b; \ -+ _b->prev = _s; \ -+ if (_prev) \ -+ _prev->next = _s; \ -+ else \ -+ (ABFD)->sections = _s; \ -+ } \ -+ while (0) -+#define bfd_section_removed_from_list(ABFD, S) \ -+ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S)) -+ -+#define BFD_FAKE_SECTION(SEC, FLAGS, SYM, SYM_PTR, NAME, IDX) \ -+ /* name, id, index, next, prev, flags, user_set_vma, */ \ -+ { NAME, IDX, 0, NULL, NULL, FLAGS, 0, \ -+ \ -+ /* linker_mark, linker_has_input, gc_mark, gc_mark_from_eh, */ \ -+ 0, 0, 1, 0, \ -+ \ -+ /* segment_mark, sec_info_type, use_rela_p, has_tls_reloc, */ \ -+ 0, 0, 0, 0, \ -+ \ -+ /* has_gp_reloc, need_finalize_relax, reloc_done, */ \ -+ 0, 0, 0, \ -+ \ -+ /* vma, lma, size, rawsize */ \ -+ 0, 0, 0, 0, \ -+ \ -+ /* output_offset, output_section, alignment_power, */ \ -+ 0, (struct bfd_section *) &SEC, 0, \ -+ \ -+ /* relocation, orelocation, reloc_count, filepos, rel_filepos, */ \ -+ NULL, NULL, 0, 0, 0, \ -+ \ -+ /* line_filepos, userdata, contents, lineno, lineno_count, */ \ -+ 0, NULL, NULL, NULL, 0, \ -+ \ -+ /* entsize, kept_section, moving_line_filepos, */ \ -+ 0, NULL, 0, \ -+ \ -+ /* target_index, used_by_bfd, constructor_chain, owner, */ \ -+ 0, NULL, NULL, NULL, \ -+ \ -+ /* symbol, */ \ -+ (struct bfd_symbol *) SYM, \ -+ \ -+ /* symbol_ptr_ptr, */ \ -+ (struct bfd_symbol **) SYM_PTR, \ -+ \ -+ /* map_head, map_tail */ \ -+ { NULL }, { NULL } \ -+ } -+ -+void bfd_section_list_clear (bfd *); -+ -+asection *bfd_get_section_by_name (bfd *abfd, const char *name); -+ -+asection *bfd_get_section_by_name_if -+ (bfd *abfd, -+ const char *name, -+ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+char *bfd_get_unique_section_name -+ (bfd *abfd, const char *templat, int *count); -+ -+asection *bfd_make_section_old_way (bfd *abfd, const char *name); -+ -+asection *bfd_make_section_anyway_with_flags -+ (bfd *abfd, const char *name, flagword flags); -+ -+asection *bfd_make_section_anyway (bfd *abfd, const char *name); -+ -+asection *bfd_make_section_with_flags -+ (bfd *, const char *name, flagword flags); -+ -+asection *bfd_make_section (bfd *, const char *name); -+ -+bfd_boolean bfd_set_section_flags -+ (bfd *abfd, asection *sec, flagword flags); -+ -+void bfd_map_over_sections -+ (bfd *abfd, -+ void (*func) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+asection *bfd_sections_find_if -+ (bfd *abfd, -+ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+bfd_boolean bfd_set_section_size -+ (bfd *abfd, asection *sec, bfd_size_type val); -+ -+bfd_boolean bfd_set_section_contents -+ (bfd *abfd, asection *section, const void *data, -+ file_ptr offset, bfd_size_type count); -+ -+bfd_boolean bfd_get_section_contents -+ (bfd *abfd, asection *section, void *location, file_ptr offset, -+ bfd_size_type count); -+ -+bfd_boolean bfd_malloc_and_get_section -+ (bfd *abfd, asection *section, bfd_byte **buf); -+ -+bfd_boolean bfd_copy_private_section_data -+ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec); -+ -+#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \ -+ BFD_SEND (obfd, _bfd_copy_private_section_data, \ -+ (ibfd, isection, obfd, osection)) -+bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec); -+ -+bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group); -+ -+/* Extracted from archures.c. */ -+enum bfd_architecture -+{ -+ bfd_arch_unknown, /* File arch not known. */ -+ bfd_arch_obscure, /* Arch known, not one of these. */ -+ bfd_arch_m68k, /* Motorola 68xxx */ -+#define bfd_mach_m68000 1 -+#define bfd_mach_m68008 2 -+#define bfd_mach_m68010 3 -+#define bfd_mach_m68020 4 -+#define bfd_mach_m68030 5 -+#define bfd_mach_m68040 6 -+#define bfd_mach_m68060 7 -+#define bfd_mach_cpu32 8 -+#define bfd_mach_mcf5200 9 -+#define bfd_mach_mcf5206e 10 -+#define bfd_mach_mcf5307 11 -+#define bfd_mach_mcf5407 12 -+#define bfd_mach_mcf528x 13 -+#define bfd_mach_mcfv4e 14 -+#define bfd_mach_mcf521x 15 -+#define bfd_mach_mcf5249 16 -+#define bfd_mach_mcf547x 17 -+#define bfd_mach_mcf548x 18 -+ bfd_arch_vax, /* DEC Vax */ -+ bfd_arch_i960, /* Intel 960 */ -+ /* The order of the following is important. -+ lower number indicates a machine type that -+ only accepts a subset of the instructions -+ available to machines with higher numbers. -+ The exception is the "ca", which is -+ incompatible with all other machines except -+ "core". */ -+ -+#define bfd_mach_i960_core 1 -+#define bfd_mach_i960_ka_sa 2 -+#define bfd_mach_i960_kb_sb 3 -+#define bfd_mach_i960_mc 4 -+#define bfd_mach_i960_xa 5 -+#define bfd_mach_i960_ca 6 -+#define bfd_mach_i960_jx 7 -+#define bfd_mach_i960_hx 8 -+ -+ bfd_arch_or32, /* OpenRISC 32 */ -+ -+ bfd_arch_sparc, /* SPARC */ -+#define bfd_mach_sparc 1 -+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */ -+#define bfd_mach_sparc_sparclet 2 -+#define bfd_mach_sparc_sparclite 3 -+#define bfd_mach_sparc_v8plus 4 -+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */ -+#define bfd_mach_sparc_sparclite_le 6 -+#define bfd_mach_sparc_v9 7 -+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */ -+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */ -+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */ -+/* Nonzero if MACH has the v9 instruction set. */ -+#define bfd_mach_sparc_v9_p(mach) \ -+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \ -+ && (mach) != bfd_mach_sparc_sparclite_le) -+/* Nonzero if MACH is a 64 bit sparc architecture. */ -+#define bfd_mach_sparc_64bit_p(mach) \ -+ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb) -+ bfd_arch_mips, /* MIPS Rxxxx */ -+#define bfd_mach_mips3000 3000 -+#define bfd_mach_mips3900 3900 -+#define bfd_mach_mips4000 4000 -+#define bfd_mach_mips4010 4010 -+#define bfd_mach_mips4100 4100 -+#define bfd_mach_mips4111 4111 -+#define bfd_mach_mips4120 4120 -+#define bfd_mach_mips4300 4300 -+#define bfd_mach_mips4400 4400 -+#define bfd_mach_mips4600 4600 -+#define bfd_mach_mips4650 4650 -+#define bfd_mach_mips5000 5000 -+#define bfd_mach_mips5400 5400 -+#define bfd_mach_mips5500 5500 -+#define bfd_mach_mips6000 6000 -+#define bfd_mach_mips7000 7000 -+#define bfd_mach_mips8000 8000 -+#define bfd_mach_mips9000 9000 -+#define bfd_mach_mips10000 10000 -+#define bfd_mach_mips12000 12000 -+#define bfd_mach_mips16 16 -+#define bfd_mach_mips5 5 -+#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */ -+#define bfd_mach_mipsisa32 32 -+#define bfd_mach_mipsisa32r2 33 -+#define bfd_mach_mipsisa64 64 -+#define bfd_mach_mipsisa64r2 65 -+ bfd_arch_i386, /* Intel 386 */ -+#define bfd_mach_i386_i386 1 -+#define bfd_mach_i386_i8086 2 -+#define bfd_mach_i386_i386_intel_syntax 3 -+#define bfd_mach_x86_64 64 -+#define bfd_mach_x86_64_intel_syntax 65 -+ bfd_arch_we32k, /* AT&T WE32xxx */ -+ bfd_arch_tahoe, /* CCI/Harris Tahoe */ -+ bfd_arch_i860, /* Intel 860 */ -+ bfd_arch_i370, /* IBM 360/370 Mainframes */ -+ bfd_arch_romp, /* IBM ROMP PC/RT */ -+ bfd_arch_convex, /* Convex */ -+ bfd_arch_m88k, /* Motorola 88xxx */ -+ bfd_arch_m98k, /* Motorola 98xxx */ -+ bfd_arch_pyramid, /* Pyramid Technology */ -+ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */ -+#define bfd_mach_h8300 1 -+#define bfd_mach_h8300h 2 -+#define bfd_mach_h8300s 3 -+#define bfd_mach_h8300hn 4 -+#define bfd_mach_h8300sn 5 -+#define bfd_mach_h8300sx 6 -+#define bfd_mach_h8300sxn 7 -+ bfd_arch_pdp11, /* DEC PDP-11 */ -+ bfd_arch_powerpc, /* PowerPC */ -+#define bfd_mach_ppc 32 -+#define bfd_mach_ppc64 64 -+#define bfd_mach_ppc_403 403 -+#define bfd_mach_ppc_403gc 4030 -+#define bfd_mach_ppc_505 505 -+#define bfd_mach_ppc_601 601 -+#define bfd_mach_ppc_602 602 -+#define bfd_mach_ppc_603 603 -+#define bfd_mach_ppc_ec603e 6031 -+#define bfd_mach_ppc_604 604 -+#define bfd_mach_ppc_620 620 -+#define bfd_mach_ppc_630 630 -+#define bfd_mach_ppc_750 750 -+#define bfd_mach_ppc_860 860 -+#define bfd_mach_ppc_a35 35 -+#define bfd_mach_ppc_rs64ii 642 -+#define bfd_mach_ppc_rs64iii 643 -+#define bfd_mach_ppc_7400 7400 -+#define bfd_mach_ppc_e500 500 -+ bfd_arch_rs6000, /* IBM RS/6000 */ -+#define bfd_mach_rs6k 6000 -+#define bfd_mach_rs6k_rs1 6001 -+#define bfd_mach_rs6k_rsc 6003 -+#define bfd_mach_rs6k_rs2 6002 -+ bfd_arch_hppa, /* HP PA RISC */ -+#define bfd_mach_hppa10 10 -+#define bfd_mach_hppa11 11 -+#define bfd_mach_hppa20 20 -+#define bfd_mach_hppa20w 25 -+ bfd_arch_d10v, /* Mitsubishi D10V */ -+#define bfd_mach_d10v 1 -+#define bfd_mach_d10v_ts2 2 -+#define bfd_mach_d10v_ts3 3 -+ bfd_arch_d30v, /* Mitsubishi D30V */ -+ bfd_arch_dlx, /* DLX */ -+ bfd_arch_m68hc11, /* Motorola 68HC11 */ -+ bfd_arch_m68hc12, /* Motorola 68HC12 */ -+#define bfd_mach_m6812_default 0 -+#define bfd_mach_m6812 1 -+#define bfd_mach_m6812s 2 -+ bfd_arch_z8k, /* Zilog Z8000 */ -+#define bfd_mach_z8001 1 -+#define bfd_mach_z8002 2 -+ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */ -+ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */ -+#define bfd_mach_sh 1 -+#define bfd_mach_sh2 0x20 -+#define bfd_mach_sh_dsp 0x2d -+#define bfd_mach_sh2a 0x2a -+#define bfd_mach_sh2a_nofpu 0x2b -+#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1 -+#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2 -+#define bfd_mach_sh2a_or_sh4 0x2a3 -+#define bfd_mach_sh2a_or_sh3e 0x2a4 -+#define bfd_mach_sh2e 0x2e -+#define bfd_mach_sh3 0x30 -+#define bfd_mach_sh3_nommu 0x31 -+#define bfd_mach_sh3_dsp 0x3d -+#define bfd_mach_sh3e 0x3e -+#define bfd_mach_sh4 0x40 -+#define bfd_mach_sh4_nofpu 0x41 -+#define bfd_mach_sh4_nommu_nofpu 0x42 -+#define bfd_mach_sh4a 0x4a -+#define bfd_mach_sh4a_nofpu 0x4b -+#define bfd_mach_sh4al_dsp 0x4d -+#define bfd_mach_sh5 0x50 -+ bfd_arch_alpha, /* Dec Alpha */ -+#define bfd_mach_alpha_ev4 0x10 -+#define bfd_mach_alpha_ev5 0x20 -+#define bfd_mach_alpha_ev6 0x30 -+ bfd_arch_arm, /* Advanced Risc Machines ARM. */ -+#define bfd_mach_arm_unknown 0 -+#define bfd_mach_arm_2 1 -+#define bfd_mach_arm_2a 2 -+#define bfd_mach_arm_3 3 -+#define bfd_mach_arm_3M 4 -+#define bfd_mach_arm_4 5 -+#define bfd_mach_arm_4T 6 -+#define bfd_mach_arm_5 7 -+#define bfd_mach_arm_5T 8 -+#define bfd_mach_arm_5TE 9 -+#define bfd_mach_arm_XScale 10 -+#define bfd_mach_arm_ep9312 11 -+#define bfd_mach_arm_iWMMXt 12 -+ bfd_arch_ns32k, /* National Semiconductors ns32000 */ -+ bfd_arch_w65, /* WDC 65816 */ -+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */ -+ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */ -+#define bfd_mach_tic3x 30 -+#define bfd_mach_tic4x 40 -+ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */ -+ bfd_arch_tic80, /* TI TMS320c80 (MVP) */ -+ bfd_arch_v850, /* NEC V850 */ -+#define bfd_mach_v850 1 -+#define bfd_mach_v850e 'E' -+#define bfd_mach_v850e1 '1' -+ bfd_arch_arc, /* ARC Cores */ -+#define bfd_mach_arc_5 5 -+#define bfd_mach_arc_6 6 -+#define bfd_mach_arc_7 7 -+#define bfd_mach_arc_8 8 -+ bfd_arch_m32c, /* Renesas M16C/M32C. */ -+#define bfd_mach_m16c 0x75 -+#define bfd_mach_m32c 0x78 -+ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */ -+#define bfd_mach_m32r 1 /* For backwards compatibility. */ -+#define bfd_mach_m32rx 'x' -+#define bfd_mach_m32r2 '2' -+ bfd_arch_mn10200, /* Matsushita MN10200 */ -+ bfd_arch_mn10300, /* Matsushita MN10300 */ -+#define bfd_mach_mn10300 300 -+#define bfd_mach_am33 330 -+#define bfd_mach_am33_2 332 -+ bfd_arch_fr30, -+#define bfd_mach_fr30 0x46523330 -+ bfd_arch_frv, -+#define bfd_mach_frv 1 -+#define bfd_mach_frvsimple 2 -+#define bfd_mach_fr300 300 -+#define bfd_mach_fr400 400 -+#define bfd_mach_fr450 450 -+#define bfd_mach_frvtomcat 499 /* fr500 prototype */ -+#define bfd_mach_fr500 500 -+#define bfd_mach_fr550 550 -+ bfd_arch_mcore, -+ bfd_arch_ia64, /* HP/Intel ia64 */ -+#define bfd_mach_ia64_elf64 64 -+#define bfd_mach_ia64_elf32 32 -+ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */ -+#define bfd_mach_ip2022 1 -+#define bfd_mach_ip2022ext 2 -+ bfd_arch_iq2000, /* Vitesse IQ2000. */ -+#define bfd_mach_iq2000 1 -+#define bfd_mach_iq10 2 -+ bfd_arch_mt, -+#define bfd_mach_ms1 1 -+#define bfd_mach_mrisc2 2 -+#define bfd_mach_ms2 3 -+ bfd_arch_pj, -+ bfd_arch_avr, /* Atmel AVR microcontrollers. */ -+#define bfd_mach_avr1 1 -+#define bfd_mach_avr2 2 -+#define bfd_mach_avr3 3 -+#define bfd_mach_avr4 4 -+#define bfd_mach_avr5 5 -+ bfd_arch_bfin, /* ADI Blackfin */ -+#define bfd_mach_bfin 1 -+ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */ -+#define bfd_mach_cr16c 1 -+ bfd_arch_crx, /* National Semiconductor CRX. */ -+#define bfd_mach_crx 1 -+ bfd_arch_cris, /* Axis CRIS */ -+#define bfd_mach_cris_v0_v10 255 -+#define bfd_mach_cris_v32 32 -+#define bfd_mach_cris_v10_v32 1032 -+ bfd_arch_s390, /* IBM s390 */ -+#define bfd_mach_s390_31 31 -+#define bfd_mach_s390_64 64 -+ bfd_arch_openrisc, /* OpenRISC */ -+ bfd_arch_mmix, /* Donald Knuth's educational processor. */ -+ bfd_arch_xstormy16, -+#define bfd_mach_xstormy16 1 -+ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */ -+#define bfd_mach_msp11 11 -+#define bfd_mach_msp110 110 -+#define bfd_mach_msp12 12 -+#define bfd_mach_msp13 13 -+#define bfd_mach_msp14 14 -+#define bfd_mach_msp15 15 -+#define bfd_mach_msp16 16 -+#define bfd_mach_msp21 21 -+#define bfd_mach_msp31 31 -+#define bfd_mach_msp32 32 -+#define bfd_mach_msp33 33 -+#define bfd_mach_msp41 41 -+#define bfd_mach_msp42 42 -+#define bfd_mach_msp43 43 -+#define bfd_mach_msp44 44 -+ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */ -+#define bfd_mach_xtensa 1 -+ bfd_arch_maxq, /* Dallas MAXQ 10/20 */ -+#define bfd_mach_maxq10 10 -+#define bfd_mach_maxq20 20 -+ bfd_arch_z80, -+#define bfd_mach_z80strict 1 /* No undocumented opcodes. */ -+#define bfd_mach_z80 3 /* With ixl, ixh, iyl, and iyh. */ -+#define bfd_mach_z80full 7 /* All undocumented instructions. */ -+#define bfd_mach_r800 11 /* R800: successor with multiplication. */ -+ bfd_arch_last -+ }; -+ -+typedef struct bfd_arch_info -+{ -+ int bits_per_word; -+ int bits_per_address; -+ int bits_per_byte; -+ enum bfd_architecture arch; -+ unsigned long mach; -+ const char *arch_name; -+ const char *printable_name; -+ unsigned int section_align_power; -+ /* TRUE if this is the default machine for the architecture. -+ The default arch should be the first entry for an arch so that -+ all the entries for that arch can be accessed via <>. */ -+ bfd_boolean the_default; -+ const struct bfd_arch_info * (*compatible) -+ (const struct bfd_arch_info *a, const struct bfd_arch_info *b); -+ -+ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *); -+ -+ const struct bfd_arch_info *next; -+} -+bfd_arch_info_type; -+ -+const char *bfd_printable_name (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_scan_arch (const char *string); -+ -+const char **bfd_arch_list (void); -+ -+const bfd_arch_info_type *bfd_arch_get_compatible -+ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns); -+ -+void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg); -+ -+enum bfd_architecture bfd_get_arch (bfd *abfd); -+ -+unsigned long bfd_get_mach (bfd *abfd); -+ -+unsigned int bfd_arch_bits_per_byte (bfd *abfd); -+ -+unsigned int bfd_arch_bits_per_address (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_lookup_arch -+ (enum bfd_architecture arch, unsigned long machine); -+ -+const char *bfd_printable_arch_mach -+ (enum bfd_architecture arch, unsigned long machine); -+ -+unsigned int bfd_octets_per_byte (bfd *abfd); -+ -+unsigned int bfd_arch_mach_octets_per_byte -+ (enum bfd_architecture arch, unsigned long machine); -+ -+/* Extracted from reloc.c. */ -+typedef enum bfd_reloc_status -+{ -+ /* No errors detected. */ -+ bfd_reloc_ok, -+ -+ /* The relocation was performed, but there was an overflow. */ -+ bfd_reloc_overflow, -+ -+ /* The address to relocate was not within the section supplied. */ -+ bfd_reloc_outofrange, -+ -+ /* Used by special functions. */ -+ bfd_reloc_continue, -+ -+ /* Unsupported relocation size requested. */ -+ bfd_reloc_notsupported, -+ -+ /* Unused. */ -+ bfd_reloc_other, -+ -+ /* The symbol to relocate against was undefined. */ -+ bfd_reloc_undefined, -+ -+ /* The relocation was performed, but may not be ok - presently -+ generated only when linking i960 coff files with i960 b.out -+ symbols. If this type is returned, the error_message argument -+ to bfd_perform_relocation will be set. */ -+ bfd_reloc_dangerous -+ } -+ bfd_reloc_status_type; -+ -+ -+typedef struct reloc_cache_entry -+{ -+ /* A pointer into the canonical table of pointers. */ -+ struct bfd_symbol **sym_ptr_ptr; -+ -+ /* offset in section. */ -+ bfd_size_type address; -+ -+ /* addend for relocation value. */ -+ bfd_vma addend; -+ -+ /* Pointer to how to perform the required relocation. */ -+ reloc_howto_type *howto; -+ -+} -+arelent; -+ -+enum complain_overflow -+{ -+ /* Do not complain on overflow. */ -+ complain_overflow_dont, -+ -+ /* Complain if the value overflows when considered as a signed -+ number one bit larger than the field. ie. A bitfield of N bits -+ is allowed to represent -2**n to 2**n-1. */ -+ complain_overflow_bitfield, -+ -+ /* Complain if the value overflows when considered as a signed -+ number. */ -+ complain_overflow_signed, -+ -+ /* Complain if the value overflows when considered as an -+ unsigned number. */ -+ complain_overflow_unsigned -+}; -+ -+struct reloc_howto_struct -+{ -+ /* The type field has mainly a documentary use - the back end can -+ do what it wants with it, though normally the back end's -+ external idea of what a reloc number is stored -+ in this field. For example, a PC relative word relocation -+ in a coff environment has the type 023 - because that's -+ what the outside world calls a R_PCRWORD reloc. */ -+ unsigned int type; -+ -+ /* The value the final relocation is shifted right by. This drops -+ unwanted data from the relocation. */ -+ unsigned int rightshift; -+ -+ /* The size of the item to be relocated. This is *not* a -+ power-of-two measure. To get the number of bytes operated -+ on by a type of relocation, use bfd_get_reloc_size. */ -+ int size; -+ -+ /* The number of bits in the item to be relocated. This is used -+ when doing overflow checking. */ -+ unsigned int bitsize; -+ -+ /* Notes that the relocation is relative to the location in the -+ data section of the addend. The relocation function will -+ subtract from the relocation value the address of the location -+ being relocated. */ -+ bfd_boolean pc_relative; -+ -+ /* The bit position of the reloc value in the destination. -+ The relocated value is left shifted by this amount. */ -+ unsigned int bitpos; -+ -+ /* What type of overflow error should be checked for when -+ relocating. */ -+ enum complain_overflow complain_on_overflow; -+ -+ /* If this field is non null, then the supplied function is -+ called rather than the normal function. This allows really -+ strange relocation methods to be accommodated (e.g., i960 callj -+ instructions). */ -+ bfd_reloc_status_type (*special_function) -+ (bfd *, arelent *, struct bfd_symbol *, void *, asection *, -+ bfd *, char **); -+ -+ /* The textual name of the relocation type. */ -+ char *name; -+ -+ /* Some formats record a relocation addend in the section contents -+ rather than with the relocation. For ELF formats this is the -+ distinction between USE_REL and USE_RELA (though the code checks -+ for USE_REL == 1/0). The value of this field is TRUE if the -+ addend is recorded with the section contents; when performing a -+ partial link (ld -r) the section contents (the data) will be -+ modified. The value of this field is FALSE if addends are -+ recorded with the relocation (in arelent.addend); when performing -+ a partial link the relocation will be modified. -+ All relocations for all ELF USE_RELA targets should set this field -+ to FALSE (values of TRUE should be looked on with suspicion). -+ However, the converse is not true: not all relocations of all ELF -+ USE_REL targets set this field to TRUE. Why this is so is peculiar -+ to each particular target. For relocs that aren't used in partial -+ links (e.g. GOT stuff) it doesn't matter what this is set to. */ -+ bfd_boolean partial_inplace; -+ -+ /* src_mask selects the part of the instruction (or data) to be used -+ in the relocation sum. If the target relocations don't have an -+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal -+ dst_mask to extract the addend from the section contents. If -+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this -+ field should be zero. Non-zero values for ELF USE_RELA targets are -+ bogus as in those cases the value in the dst_mask part of the -+ section contents should be treated as garbage. */ -+ bfd_vma src_mask; -+ -+ /* dst_mask selects which parts of the instruction (or data) are -+ replaced with a relocated value. */ -+ bfd_vma dst_mask; -+ -+ /* When some formats create PC relative instructions, they leave -+ the value of the pc of the place being relocated in the offset -+ slot of the instruction, so that a PC relative relocation can -+ be made just by adding in an ordinary offset (e.g., sun3 a.out). -+ Some formats leave the displacement part of an instruction -+ empty (e.g., m88k bcs); this flag signals the fact. */ -+ bfd_boolean pcrel_offset; -+}; -+ -+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \ -+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC } -+#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \ -+ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \ -+ NAME, FALSE, 0, 0, IN) -+ -+#define EMPTY_HOWTO(C) \ -+ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \ -+ NULL, FALSE, 0, 0, FALSE) -+ -+#define HOWTO_PREPARE(relocation, symbol) \ -+ { \ -+ if (symbol != NULL) \ -+ { \ -+ if (bfd_is_com_section (symbol->section)) \ -+ { \ -+ relocation = 0; \ -+ } \ -+ else \ -+ { \ -+ relocation = symbol->value; \ -+ } \ -+ } \ -+ } -+ -+unsigned int bfd_get_reloc_size (reloc_howto_type *); -+ -+typedef struct relent_chain -+{ -+ arelent relent; -+ struct relent_chain *next; -+} -+arelent_chain; -+ -+bfd_reloc_status_type bfd_check_overflow -+ (enum complain_overflow how, -+ unsigned int bitsize, -+ unsigned int rightshift, -+ unsigned int addrsize, -+ bfd_vma relocation); -+ -+bfd_reloc_status_type bfd_perform_relocation -+ (bfd *abfd, -+ arelent *reloc_entry, -+ void *data, -+ asection *input_section, -+ bfd *output_bfd, -+ char **error_message); -+ -+bfd_reloc_status_type bfd_install_relocation -+ (bfd *abfd, -+ arelent *reloc_entry, -+ void *data, bfd_vma data_start, -+ asection *input_section, -+ char **error_message); -+ -+enum bfd_reloc_code_real { -+ _dummy_first_bfd_reloc_code_real, -+ -+ -+/* Basic absolute relocations of N bits. */ -+ BFD_RELOC_64, -+ BFD_RELOC_32, -+ BFD_RELOC_26, -+ BFD_RELOC_24, -+ BFD_RELOC_16, -+ BFD_RELOC_14, -+ BFD_RELOC_8, -+ -+/* PC-relative relocations. Sometimes these are relative to the address -+of the relocation itself; sometimes they are relative to the start of -+the section containing the relocation. It depends on the specific target. -+ -+The 24-bit relocation is used in some Intel 960 configurations. */ -+ BFD_RELOC_64_PCREL, -+ BFD_RELOC_32_PCREL, -+ BFD_RELOC_24_PCREL, -+ BFD_RELOC_16_PCREL, -+ BFD_RELOC_12_PCREL, -+ BFD_RELOC_8_PCREL, -+ -+/* Section relative relocations. Some targets need this for DWARF2. */ -+ BFD_RELOC_32_SECREL, -+ -+/* For ELF. */ -+ BFD_RELOC_32_GOT_PCREL, -+ BFD_RELOC_16_GOT_PCREL, -+ BFD_RELOC_8_GOT_PCREL, -+ BFD_RELOC_32_GOTOFF, -+ BFD_RELOC_16_GOTOFF, -+ BFD_RELOC_LO16_GOTOFF, -+ BFD_RELOC_HI16_GOTOFF, -+ BFD_RELOC_HI16_S_GOTOFF, -+ BFD_RELOC_8_GOTOFF, -+ BFD_RELOC_64_PLT_PCREL, -+ BFD_RELOC_32_PLT_PCREL, -+ BFD_RELOC_24_PLT_PCREL, -+ BFD_RELOC_16_PLT_PCREL, -+ BFD_RELOC_8_PLT_PCREL, -+ BFD_RELOC_64_PLTOFF, -+ BFD_RELOC_32_PLTOFF, -+ BFD_RELOC_16_PLTOFF, -+ BFD_RELOC_LO16_PLTOFF, -+ BFD_RELOC_HI16_PLTOFF, -+ BFD_RELOC_HI16_S_PLTOFF, -+ BFD_RELOC_8_PLTOFF, -+ -+/* Relocations used by 68K ELF. */ -+ BFD_RELOC_68K_GLOB_DAT, -+ BFD_RELOC_68K_JMP_SLOT, -+ BFD_RELOC_68K_RELATIVE, -+ -+/* Linkage-table relative. */ -+ BFD_RELOC_32_BASEREL, -+ BFD_RELOC_16_BASEREL, -+ BFD_RELOC_LO16_BASEREL, -+ BFD_RELOC_HI16_BASEREL, -+ BFD_RELOC_HI16_S_BASEREL, -+ BFD_RELOC_8_BASEREL, -+ BFD_RELOC_RVA, -+ -+/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */ -+ BFD_RELOC_8_FFnn, -+ -+/* These PC-relative relocations are stored as word displacements -- -+i.e., byte displacements shifted right two bits. The 30-bit word -+displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the -+SPARC. (SPARC tools generally refer to this as <>.) The -+signed 16-bit displacement is used on the MIPS, and the 23-bit -+displacement is used on the Alpha. */ -+ BFD_RELOC_32_PCREL_S2, -+ BFD_RELOC_16_PCREL_S2, -+ BFD_RELOC_23_PCREL_S2, -+ -+/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of -+the target word. These are used on the SPARC. */ -+ BFD_RELOC_HI22, -+ BFD_RELOC_LO10, -+ -+/* For systems that allocate a Global Pointer register, these are -+displacements off that register. These relocation types are -+handled specially, because the value the register will have is -+decided relatively late. */ -+ BFD_RELOC_GPREL16, -+ BFD_RELOC_GPREL32, -+ -+/* Reloc types used for i960/b.out. */ -+ BFD_RELOC_I960_CALLJ, -+ -+/* SPARC ELF relocations. There is probably some overlap with other -+relocation types already defined. */ -+ BFD_RELOC_NONE, -+ BFD_RELOC_SPARC_WDISP22, -+ BFD_RELOC_SPARC22, -+ BFD_RELOC_SPARC13, -+ BFD_RELOC_SPARC_GOT10, -+ BFD_RELOC_SPARC_GOT13, -+ BFD_RELOC_SPARC_GOT22, -+ BFD_RELOC_SPARC_PC10, -+ BFD_RELOC_SPARC_PC22, -+ BFD_RELOC_SPARC_WPLT30, -+ BFD_RELOC_SPARC_COPY, -+ BFD_RELOC_SPARC_GLOB_DAT, -+ BFD_RELOC_SPARC_JMP_SLOT, -+ BFD_RELOC_SPARC_RELATIVE, -+ BFD_RELOC_SPARC_UA16, -+ BFD_RELOC_SPARC_UA32, -+ BFD_RELOC_SPARC_UA64, -+ -+/* I think these are specific to SPARC a.out (e.g., Sun 4). */ -+ BFD_RELOC_SPARC_BASE13, -+ BFD_RELOC_SPARC_BASE22, -+ -+/* SPARC64 relocations */ -+#define BFD_RELOC_SPARC_64 BFD_RELOC_64 -+ BFD_RELOC_SPARC_10, -+ BFD_RELOC_SPARC_11, -+ BFD_RELOC_SPARC_OLO10, -+ BFD_RELOC_SPARC_HH22, -+ BFD_RELOC_SPARC_HM10, -+ BFD_RELOC_SPARC_LM22, -+ BFD_RELOC_SPARC_PC_HH22, -+ BFD_RELOC_SPARC_PC_HM10, -+ BFD_RELOC_SPARC_PC_LM22, -+ BFD_RELOC_SPARC_WDISP16, -+ BFD_RELOC_SPARC_WDISP19, -+ BFD_RELOC_SPARC_7, -+ BFD_RELOC_SPARC_6, -+ BFD_RELOC_SPARC_5, -+#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL -+ BFD_RELOC_SPARC_PLT32, -+ BFD_RELOC_SPARC_PLT64, -+ BFD_RELOC_SPARC_HIX22, -+ BFD_RELOC_SPARC_LOX10, -+ BFD_RELOC_SPARC_H44, -+ BFD_RELOC_SPARC_M44, -+ BFD_RELOC_SPARC_L44, -+ BFD_RELOC_SPARC_REGISTER, -+ -+/* SPARC little endian relocation */ -+ BFD_RELOC_SPARC_REV32, -+ -+/* SPARC TLS relocations */ -+ BFD_RELOC_SPARC_TLS_GD_HI22, -+ BFD_RELOC_SPARC_TLS_GD_LO10, -+ BFD_RELOC_SPARC_TLS_GD_ADD, -+ BFD_RELOC_SPARC_TLS_GD_CALL, -+ BFD_RELOC_SPARC_TLS_LDM_HI22, -+ BFD_RELOC_SPARC_TLS_LDM_LO10, -+ BFD_RELOC_SPARC_TLS_LDM_ADD, -+ BFD_RELOC_SPARC_TLS_LDM_CALL, -+ BFD_RELOC_SPARC_TLS_LDO_HIX22, -+ BFD_RELOC_SPARC_TLS_LDO_LOX10, -+ BFD_RELOC_SPARC_TLS_LDO_ADD, -+ BFD_RELOC_SPARC_TLS_IE_HI22, -+ BFD_RELOC_SPARC_TLS_IE_LO10, -+ BFD_RELOC_SPARC_TLS_IE_LD, -+ BFD_RELOC_SPARC_TLS_IE_LDX, -+ BFD_RELOC_SPARC_TLS_IE_ADD, -+ BFD_RELOC_SPARC_TLS_LE_HIX22, -+ BFD_RELOC_SPARC_TLS_LE_LOX10, -+ BFD_RELOC_SPARC_TLS_DTPMOD32, -+ BFD_RELOC_SPARC_TLS_DTPMOD64, -+ BFD_RELOC_SPARC_TLS_DTPOFF32, -+ BFD_RELOC_SPARC_TLS_DTPOFF64, -+ BFD_RELOC_SPARC_TLS_TPOFF32, -+ BFD_RELOC_SPARC_TLS_TPOFF64, -+ -+/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or -+"addend" in some special way. -+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when -+writing; when reading, it will be the absolute section symbol. The -+addend is the displacement in bytes of the "lda" instruction from -+the "ldah" instruction (which is at the address of this reloc). */ -+ BFD_RELOC_ALPHA_GPDISP_HI16, -+ -+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as -+with GPDISP_HI16 relocs. The addend is ignored when writing the -+relocations out, and is filled in with the file's GP value on -+reading, for convenience. */ -+ BFD_RELOC_ALPHA_GPDISP_LO16, -+ -+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16 -+relocation except that there is no accompanying GPDISP_LO16 -+relocation. */ -+ BFD_RELOC_ALPHA_GPDISP, -+ -+/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference; -+the assembler turns it into a LDQ instruction to load the address of -+the symbol, and then fills in a register in the real instruction. -+ -+The LITERAL reloc, at the LDQ instruction, refers to the .lita -+section symbol. The addend is ignored when writing, but is filled -+in with the file's GP value on reading, for convenience, as with the -+GPDISP_LO16 reloc. -+ -+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16. -+It should refer to the symbol to be referenced, as with 16_GOTOFF, -+but it generates output not based on the position within the .got -+section, but relative to the GP value chosen for the file during the -+final link stage. -+ -+The LITUSE reloc, on the instruction using the loaded address, gives -+information to the linker that it might be able to use to optimize -+away some literal section references. The symbol is ignored (read -+as the absolute section symbol), and the "addend" indicates the type -+of instruction using the register: -+1 - "memory" fmt insn -+2 - byte-manipulation (byte offset reg) -+3 - jsr (target of branch) */ -+ BFD_RELOC_ALPHA_LITERAL, -+ BFD_RELOC_ALPHA_ELF_LITERAL, -+ BFD_RELOC_ALPHA_LITUSE, -+ -+/* The HINT relocation indicates a value that should be filled into the -+"hint" field of a jmp/jsr/ret instruction, for possible branch- -+prediction logic which may be provided on some processors. */ -+ BFD_RELOC_ALPHA_HINT, -+ -+/* The LINKAGE relocation outputs a linkage pair in the object file, -+which is filled by the linker. */ -+ BFD_RELOC_ALPHA_LINKAGE, -+ -+/* The CODEADDR relocation outputs a STO_CA in the object file, -+which is filled by the linker. */ -+ BFD_RELOC_ALPHA_CODEADDR, -+ -+/* The GPREL_HI/LO relocations together form a 32-bit offset from the -+GP register. */ -+ BFD_RELOC_ALPHA_GPREL_HI16, -+ BFD_RELOC_ALPHA_GPREL_LO16, -+ -+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must -+share a common GP, and the target address is adjusted for -+STO_ALPHA_STD_GPLOAD. */ -+ BFD_RELOC_ALPHA_BRSGP, -+ -+/* Alpha thread-local storage relocations. */ -+ BFD_RELOC_ALPHA_TLSGD, -+ BFD_RELOC_ALPHA_TLSLDM, -+ BFD_RELOC_ALPHA_DTPMOD64, -+ BFD_RELOC_ALPHA_GOTDTPREL16, -+ BFD_RELOC_ALPHA_DTPREL64, -+ BFD_RELOC_ALPHA_DTPREL_HI16, -+ BFD_RELOC_ALPHA_DTPREL_LO16, -+ BFD_RELOC_ALPHA_DTPREL16, -+ BFD_RELOC_ALPHA_GOTTPREL16, -+ BFD_RELOC_ALPHA_TPREL64, -+ BFD_RELOC_ALPHA_TPREL_HI16, -+ BFD_RELOC_ALPHA_TPREL_LO16, -+ BFD_RELOC_ALPHA_TPREL16, -+ -+/* Bits 27..2 of the relocation address shifted right 2 bits; -+simple reloc otherwise. */ -+ BFD_RELOC_MIPS_JMP, -+ -+/* The MIPS16 jump instruction. */ -+ BFD_RELOC_MIPS16_JMP, -+ -+/* MIPS16 GP relative reloc. */ -+ BFD_RELOC_MIPS16_GPREL, -+ -+/* High 16 bits of 32-bit value; simple reloc. */ -+ BFD_RELOC_HI16, -+ -+/* High 16 bits of 32-bit value but the low 16 bits will be sign -+extended and added to form the final result. If the low 16 -+bits form a negative number, we need to add one to the high value -+to compensate for the borrow when the low bits are added. */ -+ BFD_RELOC_HI16_S, -+ -+/* Low 16 bits. */ -+ BFD_RELOC_LO16, -+ -+/* High 16 bits of 32-bit pc-relative value */ -+ BFD_RELOC_HI16_PCREL, -+ -+/* High 16 bits of 32-bit pc-relative value, adjusted */ -+ BFD_RELOC_HI16_S_PCREL, -+ -+/* Low 16 bits of pc-relative value */ -+ BFD_RELOC_LO16_PCREL, -+ -+/* MIPS16 high 16 bits of 32-bit value. */ -+ BFD_RELOC_MIPS16_HI16, -+ -+/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign -+extended and added to form the final result. If the low 16 -+bits form a negative number, we need to add one to the high value -+to compensate for the borrow when the low bits are added. */ -+ BFD_RELOC_MIPS16_HI16_S, -+ -+/* MIPS16 low 16 bits. */ -+ BFD_RELOC_MIPS16_LO16, -+ -+/* Relocation against a MIPS literal section. */ -+ BFD_RELOC_MIPS_LITERAL, -+ -+/* MIPS ELF relocations. */ -+ BFD_RELOC_MIPS_GOT16, -+ BFD_RELOC_MIPS_CALL16, -+ BFD_RELOC_MIPS_GOT_HI16, -+ BFD_RELOC_MIPS_GOT_LO16, -+ BFD_RELOC_MIPS_CALL_HI16, -+ BFD_RELOC_MIPS_CALL_LO16, -+ BFD_RELOC_MIPS_SUB, -+ BFD_RELOC_MIPS_GOT_PAGE, -+ BFD_RELOC_MIPS_GOT_OFST, -+ BFD_RELOC_MIPS_GOT_DISP, -+ BFD_RELOC_MIPS_SHIFT5, -+ BFD_RELOC_MIPS_SHIFT6, -+ BFD_RELOC_MIPS_INSERT_A, -+ BFD_RELOC_MIPS_INSERT_B, -+ BFD_RELOC_MIPS_DELETE, -+ BFD_RELOC_MIPS_HIGHEST, -+ BFD_RELOC_MIPS_HIGHER, -+ BFD_RELOC_MIPS_SCN_DISP, -+ BFD_RELOC_MIPS_REL16, -+ BFD_RELOC_MIPS_RELGOT, -+ BFD_RELOC_MIPS_JALR, -+ BFD_RELOC_MIPS_TLS_DTPMOD32, -+ BFD_RELOC_MIPS_TLS_DTPREL32, -+ BFD_RELOC_MIPS_TLS_DTPMOD64, -+ BFD_RELOC_MIPS_TLS_DTPREL64, -+ BFD_RELOC_MIPS_TLS_GD, -+ BFD_RELOC_MIPS_TLS_LDM, -+ BFD_RELOC_MIPS_TLS_DTPREL_HI16, -+ BFD_RELOC_MIPS_TLS_DTPREL_LO16, -+ BFD_RELOC_MIPS_TLS_GOTTPREL, -+ BFD_RELOC_MIPS_TLS_TPREL32, -+ BFD_RELOC_MIPS_TLS_TPREL64, -+ BFD_RELOC_MIPS_TLS_TPREL_HI16, -+ BFD_RELOC_MIPS_TLS_TPREL_LO16, -+ -+ -+/* Fujitsu Frv Relocations. */ -+ BFD_RELOC_FRV_LABEL16, -+ BFD_RELOC_FRV_LABEL24, -+ BFD_RELOC_FRV_LO16, -+ BFD_RELOC_FRV_HI16, -+ BFD_RELOC_FRV_GPREL12, -+ BFD_RELOC_FRV_GPRELU12, -+ BFD_RELOC_FRV_GPREL32, -+ BFD_RELOC_FRV_GPRELHI, -+ BFD_RELOC_FRV_GPRELLO, -+ BFD_RELOC_FRV_GOT12, -+ BFD_RELOC_FRV_GOTHI, -+ BFD_RELOC_FRV_GOTLO, -+ BFD_RELOC_FRV_FUNCDESC, -+ BFD_RELOC_FRV_FUNCDESC_GOT12, -+ BFD_RELOC_FRV_FUNCDESC_GOTHI, -+ BFD_RELOC_FRV_FUNCDESC_GOTLO, -+ BFD_RELOC_FRV_FUNCDESC_VALUE, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFF12, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO, -+ BFD_RELOC_FRV_GOTOFF12, -+ BFD_RELOC_FRV_GOTOFFHI, -+ BFD_RELOC_FRV_GOTOFFLO, -+ BFD_RELOC_FRV_GETTLSOFF, -+ BFD_RELOC_FRV_TLSDESC_VALUE, -+ BFD_RELOC_FRV_GOTTLSDESC12, -+ BFD_RELOC_FRV_GOTTLSDESCHI, -+ BFD_RELOC_FRV_GOTTLSDESCLO, -+ BFD_RELOC_FRV_TLSMOFF12, -+ BFD_RELOC_FRV_TLSMOFFHI, -+ BFD_RELOC_FRV_TLSMOFFLO, -+ BFD_RELOC_FRV_GOTTLSOFF12, -+ BFD_RELOC_FRV_GOTTLSOFFHI, -+ BFD_RELOC_FRV_GOTTLSOFFLO, -+ BFD_RELOC_FRV_TLSOFF, -+ BFD_RELOC_FRV_TLSDESC_RELAX, -+ BFD_RELOC_FRV_GETTLSOFF_RELAX, -+ BFD_RELOC_FRV_TLSOFF_RELAX, -+ BFD_RELOC_FRV_TLSMOFF, -+ -+ -+/* This is a 24bit GOT-relative reloc for the mn10300. */ -+ BFD_RELOC_MN10300_GOTOFF24, -+ -+/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT32, -+ -+/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT24, -+ -+/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT16, -+ -+/* Copy symbol at runtime. */ -+ BFD_RELOC_MN10300_COPY, -+ -+/* Create GOT entry. */ -+ BFD_RELOC_MN10300_GLOB_DAT, -+ -+/* Create PLT entry. */ -+ BFD_RELOC_MN10300_JMP_SLOT, -+ -+/* Adjust by program base. */ -+ BFD_RELOC_MN10300_RELATIVE, -+ -+ -+/* i386/elf relocations */ -+ BFD_RELOC_386_GOT32, -+ BFD_RELOC_386_PLT32, -+ BFD_RELOC_386_COPY, -+ BFD_RELOC_386_GLOB_DAT, -+ BFD_RELOC_386_JUMP_SLOT, -+ BFD_RELOC_386_RELATIVE, -+ BFD_RELOC_386_GOTOFF, -+ BFD_RELOC_386_GOTPC, -+ BFD_RELOC_386_TLS_TPOFF, -+ BFD_RELOC_386_TLS_IE, -+ BFD_RELOC_386_TLS_GOTIE, -+ BFD_RELOC_386_TLS_LE, -+ BFD_RELOC_386_TLS_GD, -+ BFD_RELOC_386_TLS_LDM, -+ BFD_RELOC_386_TLS_LDO_32, -+ BFD_RELOC_386_TLS_IE_32, -+ BFD_RELOC_386_TLS_LE_32, -+ BFD_RELOC_386_TLS_DTPMOD32, -+ BFD_RELOC_386_TLS_DTPOFF32, -+ BFD_RELOC_386_TLS_TPOFF32, -+ -+/* x86-64/elf relocations */ -+ BFD_RELOC_X86_64_GOT32, -+ BFD_RELOC_X86_64_PLT32, -+ BFD_RELOC_X86_64_COPY, -+ BFD_RELOC_X86_64_GLOB_DAT, -+ BFD_RELOC_X86_64_JUMP_SLOT, -+ BFD_RELOC_X86_64_RELATIVE, -+ BFD_RELOC_X86_64_GOTPCREL, -+ BFD_RELOC_X86_64_32S, -+ BFD_RELOC_X86_64_DTPMOD64, -+ BFD_RELOC_X86_64_DTPOFF64, -+ BFD_RELOC_X86_64_TPOFF64, -+ BFD_RELOC_X86_64_TLSGD, -+ BFD_RELOC_X86_64_TLSLD, -+ BFD_RELOC_X86_64_DTPOFF32, -+ BFD_RELOC_X86_64_GOTTPOFF, -+ BFD_RELOC_X86_64_TPOFF32, -+ BFD_RELOC_X86_64_GOTOFF64, -+ BFD_RELOC_X86_64_GOTPC32, -+ BFD_RELOC_X86_64_GOT64, -+ BFD_RELOC_X86_64_GOTPCREL64, -+ BFD_RELOC_X86_64_GOTPC64, -+ BFD_RELOC_X86_64_GOTPLT64, -+ BFD_RELOC_X86_64_PLTOFF64, -+ -+/* ns32k relocations */ -+ BFD_RELOC_NS32K_IMM_8, -+ BFD_RELOC_NS32K_IMM_16, -+ BFD_RELOC_NS32K_IMM_32, -+ BFD_RELOC_NS32K_IMM_8_PCREL, -+ BFD_RELOC_NS32K_IMM_16_PCREL, -+ BFD_RELOC_NS32K_IMM_32_PCREL, -+ BFD_RELOC_NS32K_DISP_8, -+ BFD_RELOC_NS32K_DISP_16, -+ BFD_RELOC_NS32K_DISP_32, -+ BFD_RELOC_NS32K_DISP_8_PCREL, -+ BFD_RELOC_NS32K_DISP_16_PCREL, -+ BFD_RELOC_NS32K_DISP_32_PCREL, -+ -+/* PDP11 relocations */ -+ BFD_RELOC_PDP11_DISP_8_PCREL, -+ BFD_RELOC_PDP11_DISP_6_PCREL, -+ -+/* Picojava relocs. Not all of these appear in object files. */ -+ BFD_RELOC_PJ_CODE_HI16, -+ BFD_RELOC_PJ_CODE_LO16, -+ BFD_RELOC_PJ_CODE_DIR16, -+ BFD_RELOC_PJ_CODE_DIR32, -+ BFD_RELOC_PJ_CODE_REL16, -+ BFD_RELOC_PJ_CODE_REL32, -+ -+/* Power(rs6000) and PowerPC relocations. */ -+ BFD_RELOC_PPC_B26, -+ BFD_RELOC_PPC_BA26, -+ BFD_RELOC_PPC_TOC16, -+ BFD_RELOC_PPC_B16, -+ BFD_RELOC_PPC_B16_BRTAKEN, -+ BFD_RELOC_PPC_B16_BRNTAKEN, -+ BFD_RELOC_PPC_BA16, -+ BFD_RELOC_PPC_BA16_BRTAKEN, -+ BFD_RELOC_PPC_BA16_BRNTAKEN, -+ BFD_RELOC_PPC_COPY, -+ BFD_RELOC_PPC_GLOB_DAT, -+ BFD_RELOC_PPC_JMP_SLOT, -+ BFD_RELOC_PPC_RELATIVE, -+ BFD_RELOC_PPC_LOCAL24PC, -+ BFD_RELOC_PPC_EMB_NADDR32, -+ BFD_RELOC_PPC_EMB_NADDR16, -+ BFD_RELOC_PPC_EMB_NADDR16_LO, -+ BFD_RELOC_PPC_EMB_NADDR16_HI, -+ BFD_RELOC_PPC_EMB_NADDR16_HA, -+ BFD_RELOC_PPC_EMB_SDAI16, -+ BFD_RELOC_PPC_EMB_SDA2I16, -+ BFD_RELOC_PPC_EMB_SDA2REL, -+ BFD_RELOC_PPC_EMB_SDA21, -+ BFD_RELOC_PPC_EMB_MRKREF, -+ BFD_RELOC_PPC_EMB_RELSEC16, -+ BFD_RELOC_PPC_EMB_RELST_LO, -+ BFD_RELOC_PPC_EMB_RELST_HI, -+ BFD_RELOC_PPC_EMB_RELST_HA, -+ BFD_RELOC_PPC_EMB_BIT_FLD, -+ BFD_RELOC_PPC_EMB_RELSDA, -+ BFD_RELOC_PPC64_HIGHER, -+ BFD_RELOC_PPC64_HIGHER_S, -+ BFD_RELOC_PPC64_HIGHEST, -+ BFD_RELOC_PPC64_HIGHEST_S, -+ BFD_RELOC_PPC64_TOC16_LO, -+ BFD_RELOC_PPC64_TOC16_HI, -+ BFD_RELOC_PPC64_TOC16_HA, -+ BFD_RELOC_PPC64_TOC, -+ BFD_RELOC_PPC64_PLTGOT16, -+ BFD_RELOC_PPC64_PLTGOT16_LO, -+ BFD_RELOC_PPC64_PLTGOT16_HI, -+ BFD_RELOC_PPC64_PLTGOT16_HA, -+ BFD_RELOC_PPC64_ADDR16_DS, -+ BFD_RELOC_PPC64_ADDR16_LO_DS, -+ BFD_RELOC_PPC64_GOT16_DS, -+ BFD_RELOC_PPC64_GOT16_LO_DS, -+ BFD_RELOC_PPC64_PLT16_LO_DS, -+ BFD_RELOC_PPC64_SECTOFF_DS, -+ BFD_RELOC_PPC64_SECTOFF_LO_DS, -+ BFD_RELOC_PPC64_TOC16_DS, -+ BFD_RELOC_PPC64_TOC16_LO_DS, -+ BFD_RELOC_PPC64_PLTGOT16_DS, -+ BFD_RELOC_PPC64_PLTGOT16_LO_DS, -+ -+/* PowerPC and PowerPC64 thread-local storage relocations. */ -+ BFD_RELOC_PPC_TLS, -+ BFD_RELOC_PPC_DTPMOD, -+ BFD_RELOC_PPC_TPREL16, -+ BFD_RELOC_PPC_TPREL16_LO, -+ BFD_RELOC_PPC_TPREL16_HI, -+ BFD_RELOC_PPC_TPREL16_HA, -+ BFD_RELOC_PPC_TPREL, -+ BFD_RELOC_PPC_DTPREL16, -+ BFD_RELOC_PPC_DTPREL16_LO, -+ BFD_RELOC_PPC_DTPREL16_HI, -+ BFD_RELOC_PPC_DTPREL16_HA, -+ BFD_RELOC_PPC_DTPREL, -+ BFD_RELOC_PPC_GOT_TLSGD16, -+ BFD_RELOC_PPC_GOT_TLSGD16_LO, -+ BFD_RELOC_PPC_GOT_TLSGD16_HI, -+ BFD_RELOC_PPC_GOT_TLSGD16_HA, -+ BFD_RELOC_PPC_GOT_TLSLD16, -+ BFD_RELOC_PPC_GOT_TLSLD16_LO, -+ BFD_RELOC_PPC_GOT_TLSLD16_HI, -+ BFD_RELOC_PPC_GOT_TLSLD16_HA, -+ BFD_RELOC_PPC_GOT_TPREL16, -+ BFD_RELOC_PPC_GOT_TPREL16_LO, -+ BFD_RELOC_PPC_GOT_TPREL16_HI, -+ BFD_RELOC_PPC_GOT_TPREL16_HA, -+ BFD_RELOC_PPC_GOT_DTPREL16, -+ BFD_RELOC_PPC_GOT_DTPREL16_LO, -+ BFD_RELOC_PPC_GOT_DTPREL16_HI, -+ BFD_RELOC_PPC_GOT_DTPREL16_HA, -+ BFD_RELOC_PPC64_TPREL16_DS, -+ BFD_RELOC_PPC64_TPREL16_LO_DS, -+ BFD_RELOC_PPC64_TPREL16_HIGHER, -+ BFD_RELOC_PPC64_TPREL16_HIGHERA, -+ BFD_RELOC_PPC64_TPREL16_HIGHEST, -+ BFD_RELOC_PPC64_TPREL16_HIGHESTA, -+ BFD_RELOC_PPC64_DTPREL16_DS, -+ BFD_RELOC_PPC64_DTPREL16_LO_DS, -+ BFD_RELOC_PPC64_DTPREL16_HIGHER, -+ BFD_RELOC_PPC64_DTPREL16_HIGHERA, -+ BFD_RELOC_PPC64_DTPREL16_HIGHEST, -+ BFD_RELOC_PPC64_DTPREL16_HIGHESTA, -+ -+/* IBM 370/390 relocations */ -+ BFD_RELOC_I370_D12, -+ -+/* The type of reloc used to build a constructor table - at the moment -+probably a 32 bit wide absolute relocation, but the target can choose. -+It generally does map to one of the other relocation types. */ -+ BFD_RELOC_CTOR, -+ -+/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are -+not stored in the instruction. */ -+ BFD_RELOC_ARM_PCREL_BRANCH, -+ -+/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is -+not stored in the instruction. The 2nd lowest bit comes from a 1 bit -+field in the instruction. */ -+ BFD_RELOC_ARM_PCREL_BLX, -+ -+/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is -+not stored in the instruction. The 2nd lowest bit comes from a 1 bit -+field in the instruction. */ -+ BFD_RELOC_THUMB_PCREL_BLX, -+ -+/* ARM 26-bit pc-relative branch for an unconditional BL or BLX instruction. */ -+ BFD_RELOC_ARM_PCREL_CALL, -+ -+/* ARM 26-bit pc-relative branch for B or conditional BL instruction. */ -+ BFD_RELOC_ARM_PCREL_JUMP, -+ -+/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches. -+The lowest bit must be zero and is not stored in the instruction. -+Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an -+"nn" one smaller in all cases. Note further that BRANCH23 -+corresponds to R_ARM_THM_CALL. */ -+ BFD_RELOC_THUMB_PCREL_BRANCH7, -+ BFD_RELOC_THUMB_PCREL_BRANCH9, -+ BFD_RELOC_THUMB_PCREL_BRANCH12, -+ BFD_RELOC_THUMB_PCREL_BRANCH20, -+ BFD_RELOC_THUMB_PCREL_BRANCH23, -+ BFD_RELOC_THUMB_PCREL_BRANCH25, -+ -+/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */ -+ BFD_RELOC_ARM_OFFSET_IMM, -+ -+/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */ -+ BFD_RELOC_ARM_THUMB_OFFSET, -+ -+/* Pc-relative or absolute relocation depending on target. Used for -+entries in .init_array sections. */ -+ BFD_RELOC_ARM_TARGET1, -+ -+/* Read-only segment base relative address. */ -+ BFD_RELOC_ARM_ROSEGREL32, -+ -+/* Data segment base relative address. */ -+ BFD_RELOC_ARM_SBREL32, -+ -+/* This reloc is used for references to RTTI data from exception handling -+tables. The actual definition depends on the target. It may be a -+pc-relative or some form of GOT-indirect relocation. */ -+ BFD_RELOC_ARM_TARGET2, -+ -+/* 31-bit PC relative address. */ -+ BFD_RELOC_ARM_PREL31, -+ -+/* Relocations for setting up GOTs and PLTs for shared libraries. */ -+ BFD_RELOC_ARM_JUMP_SLOT, -+ BFD_RELOC_ARM_GLOB_DAT, -+ BFD_RELOC_ARM_GOT32, -+ BFD_RELOC_ARM_PLT32, -+ BFD_RELOC_ARM_RELATIVE, -+ BFD_RELOC_ARM_GOTOFF, -+ BFD_RELOC_ARM_GOTPC, -+ -+/* ARM thread-local storage relocations. */ -+ BFD_RELOC_ARM_TLS_GD32, -+ BFD_RELOC_ARM_TLS_LDO32, -+ BFD_RELOC_ARM_TLS_LDM32, -+ BFD_RELOC_ARM_TLS_DTPOFF32, -+ BFD_RELOC_ARM_TLS_DTPMOD32, -+ BFD_RELOC_ARM_TLS_TPOFF32, -+ BFD_RELOC_ARM_TLS_IE32, -+ BFD_RELOC_ARM_TLS_LE32, -+ -+/* These relocs are only used within the ARM assembler. They are not -+(at present) written to any object files. */ -+ BFD_RELOC_ARM_IMMEDIATE, -+ BFD_RELOC_ARM_ADRL_IMMEDIATE, -+ BFD_RELOC_ARM_T32_IMMEDIATE, -+ BFD_RELOC_ARM_T32_IMM12, -+ BFD_RELOC_ARM_T32_ADD_PC12, -+ BFD_RELOC_ARM_SHIFT_IMM, -+ BFD_RELOC_ARM_SMC, -+ BFD_RELOC_ARM_SWI, -+ BFD_RELOC_ARM_MULTI, -+ BFD_RELOC_ARM_CP_OFF_IMM, -+ BFD_RELOC_ARM_CP_OFF_IMM_S2, -+ BFD_RELOC_ARM_T32_CP_OFF_IMM, -+ BFD_RELOC_ARM_T32_CP_OFF_IMM_S2, -+ BFD_RELOC_ARM_ADR_IMM, -+ BFD_RELOC_ARM_LDR_IMM, -+ BFD_RELOC_ARM_LITERAL, -+ BFD_RELOC_ARM_IN_POOL, -+ BFD_RELOC_ARM_OFFSET_IMM8, -+ BFD_RELOC_ARM_T32_OFFSET_U8, -+ BFD_RELOC_ARM_T32_OFFSET_IMM, -+ BFD_RELOC_ARM_HWLITERAL, -+ BFD_RELOC_ARM_THUMB_ADD, -+ BFD_RELOC_ARM_THUMB_IMM, -+ BFD_RELOC_ARM_THUMB_SHIFT, -+ -+/* Renesas / SuperH SH relocs. Not all of these appear in object files. */ -+ BFD_RELOC_SH_PCDISP8BY2, -+ BFD_RELOC_SH_PCDISP12BY2, -+ BFD_RELOC_SH_IMM3, -+ BFD_RELOC_SH_IMM3U, -+ BFD_RELOC_SH_DISP12, -+ BFD_RELOC_SH_DISP12BY2, -+ BFD_RELOC_SH_DISP12BY4, -+ BFD_RELOC_SH_DISP12BY8, -+ BFD_RELOC_SH_DISP20, -+ BFD_RELOC_SH_DISP20BY8, -+ BFD_RELOC_SH_IMM4, -+ BFD_RELOC_SH_IMM4BY2, -+ BFD_RELOC_SH_IMM4BY4, -+ BFD_RELOC_SH_IMM8, -+ BFD_RELOC_SH_IMM8BY2, -+ BFD_RELOC_SH_IMM8BY4, -+ BFD_RELOC_SH_PCRELIMM8BY2, -+ BFD_RELOC_SH_PCRELIMM8BY4, -+ BFD_RELOC_SH_SWITCH16, -+ BFD_RELOC_SH_SWITCH32, -+ BFD_RELOC_SH_USES, -+ BFD_RELOC_SH_COUNT, -+ BFD_RELOC_SH_ALIGN, -+ BFD_RELOC_SH_CODE, -+ BFD_RELOC_SH_DATA, -+ BFD_RELOC_SH_LABEL, -+ BFD_RELOC_SH_LOOP_START, -+ BFD_RELOC_SH_LOOP_END, -+ BFD_RELOC_SH_COPY, -+ BFD_RELOC_SH_GLOB_DAT, -+ BFD_RELOC_SH_JMP_SLOT, -+ BFD_RELOC_SH_RELATIVE, -+ BFD_RELOC_SH_GOTPC, -+ BFD_RELOC_SH_GOT_LOW16, -+ BFD_RELOC_SH_GOT_MEDLOW16, -+ BFD_RELOC_SH_GOT_MEDHI16, -+ BFD_RELOC_SH_GOT_HI16, -+ BFD_RELOC_SH_GOTPLT_LOW16, -+ BFD_RELOC_SH_GOTPLT_MEDLOW16, -+ BFD_RELOC_SH_GOTPLT_MEDHI16, -+ BFD_RELOC_SH_GOTPLT_HI16, -+ BFD_RELOC_SH_PLT_LOW16, -+ BFD_RELOC_SH_PLT_MEDLOW16, -+ BFD_RELOC_SH_PLT_MEDHI16, -+ BFD_RELOC_SH_PLT_HI16, -+ BFD_RELOC_SH_GOTOFF_LOW16, -+ BFD_RELOC_SH_GOTOFF_MEDLOW16, -+ BFD_RELOC_SH_GOTOFF_MEDHI16, -+ BFD_RELOC_SH_GOTOFF_HI16, -+ BFD_RELOC_SH_GOTPC_LOW16, -+ BFD_RELOC_SH_GOTPC_MEDLOW16, -+ BFD_RELOC_SH_GOTPC_MEDHI16, -+ BFD_RELOC_SH_GOTPC_HI16, -+ BFD_RELOC_SH_COPY64, -+ BFD_RELOC_SH_GLOB_DAT64, -+ BFD_RELOC_SH_JMP_SLOT64, -+ BFD_RELOC_SH_RELATIVE64, -+ BFD_RELOC_SH_GOT10BY4, -+ BFD_RELOC_SH_GOT10BY8, -+ BFD_RELOC_SH_GOTPLT10BY4, -+ BFD_RELOC_SH_GOTPLT10BY8, -+ BFD_RELOC_SH_GOTPLT32, -+ BFD_RELOC_SH_SHMEDIA_CODE, -+ BFD_RELOC_SH_IMMU5, -+ BFD_RELOC_SH_IMMS6, -+ BFD_RELOC_SH_IMMS6BY32, -+ BFD_RELOC_SH_IMMU6, -+ BFD_RELOC_SH_IMMS10, -+ BFD_RELOC_SH_IMMS10BY2, -+ BFD_RELOC_SH_IMMS10BY4, -+ BFD_RELOC_SH_IMMS10BY8, -+ BFD_RELOC_SH_IMMS16, -+ BFD_RELOC_SH_IMMU16, -+ BFD_RELOC_SH_IMM_LOW16, -+ BFD_RELOC_SH_IMM_LOW16_PCREL, -+ BFD_RELOC_SH_IMM_MEDLOW16, -+ BFD_RELOC_SH_IMM_MEDLOW16_PCREL, -+ BFD_RELOC_SH_IMM_MEDHI16, -+ BFD_RELOC_SH_IMM_MEDHI16_PCREL, -+ BFD_RELOC_SH_IMM_HI16, -+ BFD_RELOC_SH_IMM_HI16_PCREL, -+ BFD_RELOC_SH_PT_16, -+ BFD_RELOC_SH_TLS_GD_32, -+ BFD_RELOC_SH_TLS_LD_32, -+ BFD_RELOC_SH_TLS_LDO_32, -+ BFD_RELOC_SH_TLS_IE_32, -+ BFD_RELOC_SH_TLS_LE_32, -+ BFD_RELOC_SH_TLS_DTPMOD32, -+ BFD_RELOC_SH_TLS_DTPOFF32, -+ BFD_RELOC_SH_TLS_TPOFF32, -+ -+/* ARC Cores relocs. -+ARC 22 bit pc-relative branch. The lowest two bits must be zero and are -+not stored in the instruction. The high 20 bits are installed in bits 26 -+through 7 of the instruction. */ -+ BFD_RELOC_ARC_B22_PCREL, -+ -+/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not -+stored in the instruction. The high 24 bits are installed in bits 23 -+through 0. */ -+ BFD_RELOC_ARC_B26, -+ -+/* ADI Blackfin 16 bit immediate absolute reloc. */ -+ BFD_RELOC_BFIN_16_IMM, -+ -+/* ADI Blackfin 16 bit immediate absolute reloc higher 16 bits. */ -+ BFD_RELOC_BFIN_16_HIGH, -+ -+/* ADI Blackfin 'a' part of LSETUP. */ -+ BFD_RELOC_BFIN_4_PCREL, -+ -+/* ADI Blackfin. */ -+ BFD_RELOC_BFIN_5_PCREL, -+ -+/* ADI Blackfin 16 bit immediate absolute reloc lower 16 bits. */ -+ BFD_RELOC_BFIN_16_LOW, -+ -+/* ADI Blackfin. */ -+ BFD_RELOC_BFIN_10_PCREL, -+ -+/* ADI Blackfin 'b' part of LSETUP. */ -+ BFD_RELOC_BFIN_11_PCREL, -+ -+/* ADI Blackfin. */ -+ BFD_RELOC_BFIN_12_PCREL_JUMP, -+ -+/* ADI Blackfin Short jump, pcrel. */ -+ BFD_RELOC_BFIN_12_PCREL_JUMP_S, -+ -+/* ADI Blackfin Call.x not implemented. */ -+ BFD_RELOC_BFIN_24_PCREL_CALL_X, -+ -+/* ADI Blackfin Long Jump pcrel. */ -+ BFD_RELOC_BFIN_24_PCREL_JUMP_L, -+ -+/* ADI Blackfin GOT relocation. */ -+ BFD_RELOC_BFIN_GOT, -+ -+/* ADI Blackfin PLTPC relocation. */ -+ BFD_RELOC_BFIN_PLTPC, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_PUSH, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_CONST, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_ADD, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_SUB, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_MULT, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_DIV, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_MOD, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_LSHIFT, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_RSHIFT, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_AND, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_OR, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_XOR, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_LAND, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_LOR, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_LEN, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_NEG, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_COMP, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_PAGE, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_HWPAGE, -+ -+/* ADI Blackfin arithmetic relocation. */ -+ BFD_ARELOC_BFIN_ADDR, -+ -+/* Mitsubishi D10V relocs. -+This is a 10-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_10_PCREL_R, -+ -+/* Mitsubishi D10V relocs. -+This is a 10-bit reloc with the right 2 bits -+assumed to be 0. This is the same as the previous reloc -+except it is in the left container, i.e., -+shifted left 15 bits. */ -+ BFD_RELOC_D10V_10_PCREL_L, -+ -+/* This is an 18-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_18, -+ -+/* This is an 18-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_18_PCREL, -+ -+/* Mitsubishi D30V relocs. -+This is a 6-bit absolute reloc. */ -+ BFD_RELOC_D30V_6, -+ -+/* This is a 6-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_9_PCREL, -+ -+/* This is a 6-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_9_PCREL_R, -+ -+/* This is a 12-bit absolute reloc with the -+right 3 bitsassumed to be 0. */ -+ BFD_RELOC_D30V_15, -+ -+/* This is a 12-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_15_PCREL, -+ -+/* This is a 12-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_15_PCREL_R, -+ -+/* This is an 18-bit absolute reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_21, -+ -+/* This is an 18-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_21_PCREL, -+ -+/* This is an 18-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_21_PCREL_R, -+ -+/* This is a 32-bit absolute reloc. */ -+ BFD_RELOC_D30V_32, -+ -+/* This is a 32-bit pc-relative reloc. */ -+ BFD_RELOC_D30V_32_PCREL, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_HI16_S, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_LO16, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_JMP26, -+ -+/* Renesas M16C/M32C Relocations. */ -+ BFD_RELOC_M32C_HI8, -+ -+/* Renesas M32R (formerly Mitsubishi M32R) relocs. -+This is a 24 bit absolute address. */ -+ BFD_RELOC_M32R_24, -+ -+/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_10_PCREL, -+ -+/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_18_PCREL, -+ -+/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_26_PCREL, -+ -+/* This is a 16-bit reloc containing the high 16 bits of an address -+used when the lower 16 bits are treated as unsigned. */ -+ BFD_RELOC_M32R_HI16_ULO, -+ -+/* This is a 16-bit reloc containing the high 16 bits of an address -+used when the lower 16 bits are treated as signed. */ -+ BFD_RELOC_M32R_HI16_SLO, -+ -+/* This is a 16-bit reloc containing the lower 16 bits of an address. */ -+ BFD_RELOC_M32R_LO16, -+ -+/* This is a 16-bit reloc containing the small data area offset for use in -+add3, load, and store instructions. */ -+ BFD_RELOC_M32R_SDA16, -+ -+/* For PIC. */ -+ BFD_RELOC_M32R_GOT24, -+ BFD_RELOC_M32R_26_PLTREL, -+ BFD_RELOC_M32R_COPY, -+ BFD_RELOC_M32R_GLOB_DAT, -+ BFD_RELOC_M32R_JMP_SLOT, -+ BFD_RELOC_M32R_RELATIVE, -+ BFD_RELOC_M32R_GOTOFF, -+ BFD_RELOC_M32R_GOTOFF_HI_ULO, -+ BFD_RELOC_M32R_GOTOFF_HI_SLO, -+ BFD_RELOC_M32R_GOTOFF_LO, -+ BFD_RELOC_M32R_GOTPC24, -+ BFD_RELOC_M32R_GOT16_HI_ULO, -+ BFD_RELOC_M32R_GOT16_HI_SLO, -+ BFD_RELOC_M32R_GOT16_LO, -+ BFD_RELOC_M32R_GOTPC_HI_ULO, -+ BFD_RELOC_M32R_GOTPC_HI_SLO, -+ BFD_RELOC_M32R_GOTPC_LO, -+ -+/* This is a 9-bit reloc */ -+ BFD_RELOC_V850_9_PCREL, -+ -+/* This is a 22-bit reloc */ -+ BFD_RELOC_V850_22_PCREL, -+ -+/* This is a 16 bit offset from the short data area pointer. */ -+ BFD_RELOC_V850_SDA_16_16_OFFSET, -+ -+/* This is a 16 bit offset (of which only 15 bits are used) from the -+short data area pointer. */ -+ BFD_RELOC_V850_SDA_15_16_OFFSET, -+ -+/* This is a 16 bit offset from the zero data area pointer. */ -+ BFD_RELOC_V850_ZDA_16_16_OFFSET, -+ -+/* This is a 16 bit offset (of which only 15 bits are used) from the -+zero data area pointer. */ -+ BFD_RELOC_V850_ZDA_15_16_OFFSET, -+ -+/* This is an 8 bit offset (of which only 6 bits are used) from the -+tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_6_8_OFFSET, -+ -+/* This is an 8bit offset (of which only 7 bits are used) from the tiny -+data area pointer. */ -+ BFD_RELOC_V850_TDA_7_8_OFFSET, -+ -+/* This is a 7 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_7_7_OFFSET, -+ -+/* This is a 16 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_16_16_OFFSET, -+ -+/* This is a 5 bit offset (of which only 4 bits are used) from the tiny -+data area pointer. */ -+ BFD_RELOC_V850_TDA_4_5_OFFSET, -+ -+/* This is a 4 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_4_4_OFFSET, -+ -+/* This is a 16 bit offset from the short data area pointer, with the -+bits placed non-contiguously in the instruction. */ -+ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET, -+ -+/* This is a 16 bit offset from the zero data area pointer, with the -+bits placed non-contiguously in the instruction. */ -+ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET, -+ -+/* This is a 6 bit offset from the call table base pointer. */ -+ BFD_RELOC_V850_CALLT_6_7_OFFSET, -+ -+/* This is a 16 bit offset from the call table base pointer. */ -+ BFD_RELOC_V850_CALLT_16_16_OFFSET, -+ -+/* Used for relaxing indirect function calls. */ -+ BFD_RELOC_V850_LONGCALL, -+ -+/* Used for relaxing indirect jumps. */ -+ BFD_RELOC_V850_LONGJUMP, -+ -+/* Used to maintain alignment whilst relaxing. */ -+ BFD_RELOC_V850_ALIGN, -+ -+/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu -+instructions. */ -+ BFD_RELOC_V850_LO16_SPLIT_OFFSET, -+ -+/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the -+instruction. */ -+ BFD_RELOC_MN10300_32_PCREL, -+ -+/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the -+instruction. */ -+ BFD_RELOC_MN10300_16_PCREL, -+ -+/* This is a 8bit DP reloc for the tms320c30, where the most -+significant 8 bits of a 24 bit word are placed into the least -+significant 8 bits of the opcode. */ -+ BFD_RELOC_TIC30_LDP, -+ -+/* This is a 7bit reloc for the tms320c54x, where the least -+significant 7 bits of a 16 bit word are placed into the least -+significant 7 bits of the opcode. */ -+ BFD_RELOC_TIC54X_PARTLS7, -+ -+/* This is a 9bit DP reloc for the tms320c54x, where the most -+significant 9 bits of a 16 bit word are placed into the least -+significant 9 bits of the opcode. */ -+ BFD_RELOC_TIC54X_PARTMS9, -+ -+/* This is an extended address 23-bit reloc for the tms320c54x. */ -+ BFD_RELOC_TIC54X_23, -+ -+/* This is a 16-bit reloc for the tms320c54x, where the least -+significant 16 bits of a 23-bit extended address are placed into -+the opcode. */ -+ BFD_RELOC_TIC54X_16_OF_23, -+ -+/* This is a reloc for the tms320c54x, where the most -+significant 7 bits of a 23-bit extended address are placed into -+the opcode. */ -+ BFD_RELOC_TIC54X_MS7_OF_23, -+ -+/* This is a 48 bit reloc for the FR30 that stores 32 bits. */ -+ BFD_RELOC_FR30_48, -+ -+/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into -+two sections. */ -+ BFD_RELOC_FR30_20, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in -+4 bits. */ -+ BFD_RELOC_FR30_6_IN_4, -+ -+/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset -+into 8 bits. */ -+ BFD_RELOC_FR30_8_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset -+into 8 bits. */ -+ BFD_RELOC_FR30_9_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset -+into 8 bits. */ -+ BFD_RELOC_FR30_10_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative -+short offset into 8 bits. */ -+ BFD_RELOC_FR30_9_PCREL, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative -+short offset into 11 bits. */ -+ BFD_RELOC_FR30_12_PCREL, -+ -+/* Motorola Mcore relocations. */ -+ BFD_RELOC_MCORE_PCREL_IMM8BY4, -+ BFD_RELOC_MCORE_PCREL_IMM11BY2, -+ BFD_RELOC_MCORE_PCREL_IMM4BY2, -+ BFD_RELOC_MCORE_PCREL_32, -+ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2, -+ BFD_RELOC_MCORE_RVA, -+ -+/* These are relocations for the GETA instruction. */ -+ BFD_RELOC_MMIX_GETA, -+ BFD_RELOC_MMIX_GETA_1, -+ BFD_RELOC_MMIX_GETA_2, -+ BFD_RELOC_MMIX_GETA_3, -+ -+/* These are relocations for a conditional branch instruction. */ -+ BFD_RELOC_MMIX_CBRANCH, -+ BFD_RELOC_MMIX_CBRANCH_J, -+ BFD_RELOC_MMIX_CBRANCH_1, -+ BFD_RELOC_MMIX_CBRANCH_2, -+ BFD_RELOC_MMIX_CBRANCH_3, -+ -+/* These are relocations for the PUSHJ instruction. */ -+ BFD_RELOC_MMIX_PUSHJ, -+ BFD_RELOC_MMIX_PUSHJ_1, -+ BFD_RELOC_MMIX_PUSHJ_2, -+ BFD_RELOC_MMIX_PUSHJ_3, -+ BFD_RELOC_MMIX_PUSHJ_STUBBABLE, -+ -+/* These are relocations for the JMP instruction. */ -+ BFD_RELOC_MMIX_JMP, -+ BFD_RELOC_MMIX_JMP_1, -+ BFD_RELOC_MMIX_JMP_2, -+ BFD_RELOC_MMIX_JMP_3, -+ -+/* This is a relocation for a relative address as in a GETA instruction or -+a branch. */ -+ BFD_RELOC_MMIX_ADDR19, -+ -+/* This is a relocation for a relative address as in a JMP instruction. */ -+ BFD_RELOC_MMIX_ADDR27, -+ -+/* This is a relocation for an instruction field that may be a general -+register or a value 0..255. */ -+ BFD_RELOC_MMIX_REG_OR_BYTE, -+ -+/* This is a relocation for an instruction field that may be a general -+register. */ -+ BFD_RELOC_MMIX_REG, -+ -+/* This is a relocation for two instruction fields holding a register and -+an offset, the equivalent of the relocation. */ -+ BFD_RELOC_MMIX_BASE_PLUS_OFFSET, -+ -+/* This relocation is an assertion that the expression is not allocated as -+a global register. It does not modify contents. */ -+ BFD_RELOC_MMIX_LOCAL, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative -+short offset into 7 bits. */ -+ BFD_RELOC_AVR_7_PCREL, -+ -+/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative -+short offset into 12 bits. */ -+ BFD_RELOC_AVR_13_PCREL, -+ -+/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually -+program memory address) into 16 bits. */ -+ BFD_RELOC_AVR_16_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually -+data memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_LO8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit -+of data memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HI8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit -+of program memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HH8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(usually data memory address) into 8 bit immediate value of SUBI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 8 bit of data memory address) into 8 bit immediate value of -+SUBI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(most high 8 bit of program memory address) into 8 bit immediate value -+of LDI or SUBI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually -+command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit -+of command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit -+of command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(usually command address) into 8 bit immediate value of SUBI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_PM_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 8 bit of 16 bit command address) into 8 bit immediate value -+of SUBI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_PM_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 6 bit of 22 bit command address) into 8 bit immediate -+value of SUBI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_PM_NEG, -+ -+/* This is a 32 bit reloc for the AVR that stores 23 bit value -+into 22 bits. */ -+ BFD_RELOC_AVR_CALL, -+ -+/* This is a 16 bit reloc for the AVR that stores all needed bits -+for absolute addressing with ldi with overflow check to linktime */ -+ BFD_RELOC_AVR_LDI, -+ -+/* This is a 6 bit reloc for the AVR that stores offset for ldd/std -+instructions */ -+ BFD_RELOC_AVR_6, -+ -+/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw -+instructions */ -+ BFD_RELOC_AVR_6_ADIW, -+ -+/* Direct 12 bit. */ -+ BFD_RELOC_390_12, -+ -+/* 12 bit GOT offset. */ -+ BFD_RELOC_390_GOT12, -+ -+/* 32 bit PC relative PLT address. */ -+ BFD_RELOC_390_PLT32, -+ -+/* Copy symbol at runtime. */ -+ BFD_RELOC_390_COPY, -+ -+/* Create GOT entry. */ -+ BFD_RELOC_390_GLOB_DAT, -+ -+/* Create PLT entry. */ -+ BFD_RELOC_390_JMP_SLOT, -+ -+/* Adjust by program base. */ -+ BFD_RELOC_390_RELATIVE, -+ -+/* 32 bit PC relative offset to GOT. */ -+ BFD_RELOC_390_GOTPC, -+ -+/* 16 bit GOT offset. */ -+ BFD_RELOC_390_GOT16, -+ -+/* PC relative 16 bit shifted by 1. */ -+ BFD_RELOC_390_PC16DBL, -+ -+/* 16 bit PC rel. PLT shifted by 1. */ -+ BFD_RELOC_390_PLT16DBL, -+ -+/* PC relative 32 bit shifted by 1. */ -+ BFD_RELOC_390_PC32DBL, -+ -+/* 32 bit PC rel. PLT shifted by 1. */ -+ BFD_RELOC_390_PLT32DBL, -+ -+/* 32 bit PC rel. GOT shifted by 1. */ -+ BFD_RELOC_390_GOTPCDBL, -+ -+/* 64 bit GOT offset. */ -+ BFD_RELOC_390_GOT64, -+ -+/* 64 bit PC relative PLT address. */ -+ BFD_RELOC_390_PLT64, -+ -+/* 32 bit rel. offset to GOT entry. */ -+ BFD_RELOC_390_GOTENT, -+ -+/* 64 bit offset to GOT. */ -+ BFD_RELOC_390_GOTOFF64, -+ -+/* 12-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT12, -+ -+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT16, -+ -+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT32, -+ -+/* 64-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT64, -+ -+/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLTENT, -+ -+/* 16-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF16, -+ -+/* 32-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF32, -+ -+/* 64-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF64, -+ -+/* s390 tls relocations. */ -+ BFD_RELOC_390_TLS_LOAD, -+ BFD_RELOC_390_TLS_GDCALL, -+ BFD_RELOC_390_TLS_LDCALL, -+ BFD_RELOC_390_TLS_GD32, -+ BFD_RELOC_390_TLS_GD64, -+ BFD_RELOC_390_TLS_GOTIE12, -+ BFD_RELOC_390_TLS_GOTIE32, -+ BFD_RELOC_390_TLS_GOTIE64, -+ BFD_RELOC_390_TLS_LDM32, -+ BFD_RELOC_390_TLS_LDM64, -+ BFD_RELOC_390_TLS_IE32, -+ BFD_RELOC_390_TLS_IE64, -+ BFD_RELOC_390_TLS_IEENT, -+ BFD_RELOC_390_TLS_LE32, -+ BFD_RELOC_390_TLS_LE64, -+ BFD_RELOC_390_TLS_LDO32, -+ BFD_RELOC_390_TLS_LDO64, -+ BFD_RELOC_390_TLS_DTPMOD, -+ BFD_RELOC_390_TLS_DTPOFF, -+ BFD_RELOC_390_TLS_TPOFF, -+ -+/* Long displacement extension. */ -+ BFD_RELOC_390_20, -+ BFD_RELOC_390_GOT20, -+ BFD_RELOC_390_GOTPLT20, -+ BFD_RELOC_390_TLS_GOTIE20, -+ -+/* Scenix IP2K - 9-bit register number / data address */ -+ BFD_RELOC_IP2K_FR9, -+ -+/* Scenix IP2K - 4-bit register/data bank number */ -+ BFD_RELOC_IP2K_BANK, -+ -+/* Scenix IP2K - low 13 bits of instruction word address */ -+ BFD_RELOC_IP2K_ADDR16CJP, -+ -+/* Scenix IP2K - high 3 bits of instruction word address */ -+ BFD_RELOC_IP2K_PAGE3, -+ -+/* Scenix IP2K - ext/low/high 8 bits of data address */ -+ BFD_RELOC_IP2K_LO8DATA, -+ BFD_RELOC_IP2K_HI8DATA, -+ BFD_RELOC_IP2K_EX8DATA, -+ -+/* Scenix IP2K - low/high 8 bits of instruction word address */ -+ BFD_RELOC_IP2K_LO8INSN, -+ BFD_RELOC_IP2K_HI8INSN, -+ -+/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */ -+ BFD_RELOC_IP2K_PC_SKIP, -+ -+/* Scenix IP2K - 16 bit word address in text section. */ -+ BFD_RELOC_IP2K_TEXT, -+ -+/* Scenix IP2K - 7-bit sp or dp offset */ -+ BFD_RELOC_IP2K_FR_OFFSET, -+ -+/* Scenix VPE4K coprocessor - data/insn-space addressing */ -+ BFD_RELOC_VPE4KMATH_DATA, -+ BFD_RELOC_VPE4KMATH_INSN, -+ -+/* These two relocations are used by the linker to determine which of -+the entries in a C++ virtual function table are actually used. When -+the --gc-sections option is given, the linker will zero out the entries -+that are not used, so that the code for those functions need not be -+included in the output. -+ -+VTABLE_INHERIT is a zero-space relocation used to describe to the -+linker the inheritance tree of a C++ virtual function table. The -+relocation's symbol should be the parent class' vtable, and the -+relocation should be located at the child vtable. -+ -+VTABLE_ENTRY is a zero-space relocation that describes the use of a -+virtual function table entry. The reloc's symbol should refer to the -+table of the class mentioned in the code. Off of that base, an offset -+describes the entry that is being used. For Rela hosts, this offset -+is stored in the reloc's addend. For Rel hosts, we are forced to put -+this offset in the reloc's section offset. */ -+ BFD_RELOC_VTABLE_INHERIT, -+ BFD_RELOC_VTABLE_ENTRY, -+ -+/* Intel IA64 Relocations. */ -+ BFD_RELOC_IA64_IMM14, -+ BFD_RELOC_IA64_IMM22, -+ BFD_RELOC_IA64_IMM64, -+ BFD_RELOC_IA64_DIR32MSB, -+ BFD_RELOC_IA64_DIR32LSB, -+ BFD_RELOC_IA64_DIR64MSB, -+ BFD_RELOC_IA64_DIR64LSB, -+ BFD_RELOC_IA64_GPREL22, -+ BFD_RELOC_IA64_GPREL64I, -+ BFD_RELOC_IA64_GPREL32MSB, -+ BFD_RELOC_IA64_GPREL32LSB, -+ BFD_RELOC_IA64_GPREL64MSB, -+ BFD_RELOC_IA64_GPREL64LSB, -+ BFD_RELOC_IA64_LTOFF22, -+ BFD_RELOC_IA64_LTOFF64I, -+ BFD_RELOC_IA64_PLTOFF22, -+ BFD_RELOC_IA64_PLTOFF64I, -+ BFD_RELOC_IA64_PLTOFF64MSB, -+ BFD_RELOC_IA64_PLTOFF64LSB, -+ BFD_RELOC_IA64_FPTR64I, -+ BFD_RELOC_IA64_FPTR32MSB, -+ BFD_RELOC_IA64_FPTR32LSB, -+ BFD_RELOC_IA64_FPTR64MSB, -+ BFD_RELOC_IA64_FPTR64LSB, -+ BFD_RELOC_IA64_PCREL21B, -+ BFD_RELOC_IA64_PCREL21BI, -+ BFD_RELOC_IA64_PCREL21M, -+ BFD_RELOC_IA64_PCREL21F, -+ BFD_RELOC_IA64_PCREL22, -+ BFD_RELOC_IA64_PCREL60B, -+ BFD_RELOC_IA64_PCREL64I, -+ BFD_RELOC_IA64_PCREL32MSB, -+ BFD_RELOC_IA64_PCREL32LSB, -+ BFD_RELOC_IA64_PCREL64MSB, -+ BFD_RELOC_IA64_PCREL64LSB, -+ BFD_RELOC_IA64_LTOFF_FPTR22, -+ BFD_RELOC_IA64_LTOFF_FPTR64I, -+ BFD_RELOC_IA64_LTOFF_FPTR32MSB, -+ BFD_RELOC_IA64_LTOFF_FPTR32LSB, -+ BFD_RELOC_IA64_LTOFF_FPTR64MSB, -+ BFD_RELOC_IA64_LTOFF_FPTR64LSB, -+ BFD_RELOC_IA64_SEGREL32MSB, -+ BFD_RELOC_IA64_SEGREL32LSB, -+ BFD_RELOC_IA64_SEGREL64MSB, -+ BFD_RELOC_IA64_SEGREL64LSB, -+ BFD_RELOC_IA64_SECREL32MSB, -+ BFD_RELOC_IA64_SECREL32LSB, -+ BFD_RELOC_IA64_SECREL64MSB, -+ BFD_RELOC_IA64_SECREL64LSB, -+ BFD_RELOC_IA64_REL32MSB, -+ BFD_RELOC_IA64_REL32LSB, -+ BFD_RELOC_IA64_REL64MSB, -+ BFD_RELOC_IA64_REL64LSB, -+ BFD_RELOC_IA64_LTV32MSB, -+ BFD_RELOC_IA64_LTV32LSB, -+ BFD_RELOC_IA64_LTV64MSB, -+ BFD_RELOC_IA64_LTV64LSB, -+ BFD_RELOC_IA64_IPLTMSB, -+ BFD_RELOC_IA64_IPLTLSB, -+ BFD_RELOC_IA64_COPY, -+ BFD_RELOC_IA64_LTOFF22X, -+ BFD_RELOC_IA64_LDXMOV, -+ BFD_RELOC_IA64_TPREL14, -+ BFD_RELOC_IA64_TPREL22, -+ BFD_RELOC_IA64_TPREL64I, -+ BFD_RELOC_IA64_TPREL64MSB, -+ BFD_RELOC_IA64_TPREL64LSB, -+ BFD_RELOC_IA64_LTOFF_TPREL22, -+ BFD_RELOC_IA64_DTPMOD64MSB, -+ BFD_RELOC_IA64_DTPMOD64LSB, -+ BFD_RELOC_IA64_LTOFF_DTPMOD22, -+ BFD_RELOC_IA64_DTPREL14, -+ BFD_RELOC_IA64_DTPREL22, -+ BFD_RELOC_IA64_DTPREL64I, -+ BFD_RELOC_IA64_DTPREL32MSB, -+ BFD_RELOC_IA64_DTPREL32LSB, -+ BFD_RELOC_IA64_DTPREL64MSB, -+ BFD_RELOC_IA64_DTPREL64LSB, -+ BFD_RELOC_IA64_LTOFF_DTPREL22, -+ -+/* Motorola 68HC11 reloc. -+This is the 8 bit high part of an absolute address. */ -+ BFD_RELOC_M68HC11_HI8, -+ -+/* Motorola 68HC11 reloc. -+This is the 8 bit low part of an absolute address. */ -+ BFD_RELOC_M68HC11_LO8, -+ -+/* Motorola 68HC11 reloc. -+This is the 3 bit of a value. */ -+ BFD_RELOC_M68HC11_3B, -+ -+/* Motorola 68HC11 reloc. -+This reloc marks the beginning of a jump/call instruction. -+It is used for linker relaxation to correctly identify beginning -+of instruction and change some branches to use PC-relative -+addressing mode. */ -+ BFD_RELOC_M68HC11_RL_JUMP, -+ -+/* Motorola 68HC11 reloc. -+This reloc marks a group of several instructions that gcc generates -+and for which the linker relaxation pass can modify and/or remove -+some of them. */ -+ BFD_RELOC_M68HC11_RL_GROUP, -+ -+/* Motorola 68HC11 reloc. -+This is the 16-bit lower part of an address. It is used for 'call' -+instruction to specify the symbol address without any special -+transformation (due to memory bank window). */ -+ BFD_RELOC_M68HC11_LO16, -+ -+/* Motorola 68HC11 reloc. -+This is a 8-bit reloc that specifies the page number of an address. -+It is used by 'call' instruction to specify the page number of -+the symbol. */ -+ BFD_RELOC_M68HC11_PAGE, -+ -+/* Motorola 68HC11 reloc. -+This is a 24-bit reloc that represents the address with a 16-bit -+value and a 8-bit page number. The symbol address is transformed -+to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */ -+ BFD_RELOC_M68HC11_24, -+ -+/* Motorola 68HC12 reloc. -+This is the 5 bits of a value. */ -+ BFD_RELOC_M68HC12_5B, -+ -+/* NS CR16C Relocations. */ -+ BFD_RELOC_16C_NUM08, -+ BFD_RELOC_16C_NUM08_C, -+ BFD_RELOC_16C_NUM16, -+ BFD_RELOC_16C_NUM16_C, -+ BFD_RELOC_16C_NUM32, -+ BFD_RELOC_16C_NUM32_C, -+ BFD_RELOC_16C_DISP04, -+ BFD_RELOC_16C_DISP04_C, -+ BFD_RELOC_16C_DISP08, -+ BFD_RELOC_16C_DISP08_C, -+ BFD_RELOC_16C_DISP16, -+ BFD_RELOC_16C_DISP16_C, -+ BFD_RELOC_16C_DISP24, -+ BFD_RELOC_16C_DISP24_C, -+ BFD_RELOC_16C_DISP24a, -+ BFD_RELOC_16C_DISP24a_C, -+ BFD_RELOC_16C_REG04, -+ BFD_RELOC_16C_REG04_C, -+ BFD_RELOC_16C_REG04a, -+ BFD_RELOC_16C_REG04a_C, -+ BFD_RELOC_16C_REG14, -+ BFD_RELOC_16C_REG14_C, -+ BFD_RELOC_16C_REG16, -+ BFD_RELOC_16C_REG16_C, -+ BFD_RELOC_16C_REG20, -+ BFD_RELOC_16C_REG20_C, -+ BFD_RELOC_16C_ABS20, -+ BFD_RELOC_16C_ABS20_C, -+ BFD_RELOC_16C_ABS24, -+ BFD_RELOC_16C_ABS24_C, -+ BFD_RELOC_16C_IMM04, -+ BFD_RELOC_16C_IMM04_C, -+ BFD_RELOC_16C_IMM16, -+ BFD_RELOC_16C_IMM16_C, -+ BFD_RELOC_16C_IMM20, -+ BFD_RELOC_16C_IMM20_C, -+ BFD_RELOC_16C_IMM24, -+ BFD_RELOC_16C_IMM24_C, -+ BFD_RELOC_16C_IMM32, -+ BFD_RELOC_16C_IMM32_C, -+ -+/* NS CRX Relocations. */ -+ BFD_RELOC_CRX_REL4, -+ BFD_RELOC_CRX_REL8, -+ BFD_RELOC_CRX_REL8_CMP, -+ BFD_RELOC_CRX_REL16, -+ BFD_RELOC_CRX_REL24, -+ BFD_RELOC_CRX_REL32, -+ BFD_RELOC_CRX_REGREL12, -+ BFD_RELOC_CRX_REGREL22, -+ BFD_RELOC_CRX_REGREL28, -+ BFD_RELOC_CRX_REGREL32, -+ BFD_RELOC_CRX_ABS16, -+ BFD_RELOC_CRX_ABS32, -+ BFD_RELOC_CRX_NUM8, -+ BFD_RELOC_CRX_NUM16, -+ BFD_RELOC_CRX_NUM32, -+ BFD_RELOC_CRX_IMM16, -+ BFD_RELOC_CRX_IMM32, -+ BFD_RELOC_CRX_SWITCH8, -+ BFD_RELOC_CRX_SWITCH16, -+ BFD_RELOC_CRX_SWITCH32, -+ -+/* These relocs are only used within the CRIS assembler. They are not -+(at present) written to any object files. */ -+ BFD_RELOC_CRIS_BDISP8, -+ BFD_RELOC_CRIS_UNSIGNED_5, -+ BFD_RELOC_CRIS_SIGNED_6, -+ BFD_RELOC_CRIS_UNSIGNED_6, -+ BFD_RELOC_CRIS_SIGNED_8, -+ BFD_RELOC_CRIS_UNSIGNED_8, -+ BFD_RELOC_CRIS_SIGNED_16, -+ BFD_RELOC_CRIS_UNSIGNED_16, -+ BFD_RELOC_CRIS_LAPCQ_OFFSET, -+ BFD_RELOC_CRIS_UNSIGNED_4, -+ -+/* Relocs used in ELF shared libraries for CRIS. */ -+ BFD_RELOC_CRIS_COPY, -+ BFD_RELOC_CRIS_GLOB_DAT, -+ BFD_RELOC_CRIS_JUMP_SLOT, -+ BFD_RELOC_CRIS_RELATIVE, -+ -+/* 32-bit offset to symbol-entry within GOT. */ -+ BFD_RELOC_CRIS_32_GOT, -+ -+/* 16-bit offset to symbol-entry within GOT. */ -+ BFD_RELOC_CRIS_16_GOT, -+ -+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_CRIS_32_GOTPLT, -+ -+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_CRIS_16_GOTPLT, -+ -+/* 32-bit offset to symbol, relative to GOT. */ -+ BFD_RELOC_CRIS_32_GOTREL, -+ -+/* 32-bit offset to symbol with PLT entry, relative to GOT. */ -+ BFD_RELOC_CRIS_32_PLT_GOTREL, -+ -+/* 32-bit offset to symbol with PLT entry, relative to this relocation. */ -+ BFD_RELOC_CRIS_32_PLT_PCREL, -+ -+/* Intel i860 Relocations. */ -+ BFD_RELOC_860_COPY, -+ BFD_RELOC_860_GLOB_DAT, -+ BFD_RELOC_860_JUMP_SLOT, -+ BFD_RELOC_860_RELATIVE, -+ BFD_RELOC_860_PC26, -+ BFD_RELOC_860_PLT26, -+ BFD_RELOC_860_PC16, -+ BFD_RELOC_860_LOW0, -+ BFD_RELOC_860_SPLIT0, -+ BFD_RELOC_860_LOW1, -+ BFD_RELOC_860_SPLIT1, -+ BFD_RELOC_860_LOW2, -+ BFD_RELOC_860_SPLIT2, -+ BFD_RELOC_860_LOW3, -+ BFD_RELOC_860_LOGOT0, -+ BFD_RELOC_860_SPGOT0, -+ BFD_RELOC_860_LOGOT1, -+ BFD_RELOC_860_SPGOT1, -+ BFD_RELOC_860_LOGOTOFF0, -+ BFD_RELOC_860_SPGOTOFF0, -+ BFD_RELOC_860_LOGOTOFF1, -+ BFD_RELOC_860_SPGOTOFF1, -+ BFD_RELOC_860_LOGOTOFF2, -+ BFD_RELOC_860_LOGOTOFF3, -+ BFD_RELOC_860_LOPC, -+ BFD_RELOC_860_HIGHADJ, -+ BFD_RELOC_860_HAGOT, -+ BFD_RELOC_860_HAGOTOFF, -+ BFD_RELOC_860_HAPC, -+ BFD_RELOC_860_HIGH, -+ BFD_RELOC_860_HIGOT, -+ BFD_RELOC_860_HIGOTOFF, -+ -+/* OpenRISC Relocations. */ -+ BFD_RELOC_OPENRISC_ABS_26, -+ BFD_RELOC_OPENRISC_REL_26, -+ -+/* H8 elf Relocations. */ -+ BFD_RELOC_H8_DIR16A8, -+ BFD_RELOC_H8_DIR16R8, -+ BFD_RELOC_H8_DIR24A8, -+ BFD_RELOC_H8_DIR24R8, -+ BFD_RELOC_H8_DIR32A16, -+ -+/* Sony Xstormy16 Relocations. */ -+ BFD_RELOC_XSTORMY16_REL_12, -+ BFD_RELOC_XSTORMY16_12, -+ BFD_RELOC_XSTORMY16_24, -+ BFD_RELOC_XSTORMY16_FPTR16, -+ -+/* Relocations used by VAX ELF. */ -+ BFD_RELOC_VAX_GLOB_DAT, -+ BFD_RELOC_VAX_JMP_SLOT, -+ BFD_RELOC_VAX_RELATIVE, -+ -+/* Morpho MT - 16 bit immediate relocation. */ -+ BFD_RELOC_MT_PC16, -+ -+/* Morpho MT - Hi 16 bits of an address. */ -+ BFD_RELOC_MT_HI16, -+ -+/* Morpho MT - Low 16 bits of an address. */ -+ BFD_RELOC_MT_LO16, -+ -+/* Morpho MT - Used to tell the linker which vtable entries are used. */ -+ BFD_RELOC_MT_GNU_VTINHERIT, -+ -+/* Morpho MT - Used to tell the linker which vtable entries are used. */ -+ BFD_RELOC_MT_GNU_VTENTRY, -+ -+/* Morpho MT - 8 bit immediate relocation. */ -+ BFD_RELOC_MT_PCINSN8, -+ -+/* msp430 specific relocation codes */ -+ BFD_RELOC_MSP430_10_PCREL, -+ BFD_RELOC_MSP430_16_PCREL, -+ BFD_RELOC_MSP430_16, -+ BFD_RELOC_MSP430_16_PCREL_BYTE, -+ BFD_RELOC_MSP430_16_BYTE, -+ BFD_RELOC_MSP430_2X_PCREL, -+ BFD_RELOC_MSP430_RL_PCREL, -+ -+/* IQ2000 Relocations. */ -+ BFD_RELOC_IQ2000_OFFSET_16, -+ BFD_RELOC_IQ2000_OFFSET_21, -+ BFD_RELOC_IQ2000_UHI16, -+ -+/* Special Xtensa relocation used only by PLT entries in ELF shared -+objects to indicate that the runtime linker should set the value -+to one of its own internal functions or data structures. */ -+ BFD_RELOC_XTENSA_RTLD, -+ -+/* Xtensa relocations for ELF shared objects. */ -+ BFD_RELOC_XTENSA_GLOB_DAT, -+ BFD_RELOC_XTENSA_JMP_SLOT, -+ BFD_RELOC_XTENSA_RELATIVE, -+ -+/* Xtensa relocation used in ELF object files for symbols that may require -+PLT entries. Otherwise, this is just a generic 32-bit relocation. */ -+ BFD_RELOC_XTENSA_PLT, -+ -+/* Xtensa relocations to mark the difference of two local symbols. -+These are only needed to support linker relaxation and can be ignored -+when not relaxing. The field is set to the value of the difference -+assuming no relaxation. The relocation encodes the position of the -+first symbol so the linker can determine whether to adjust the field -+value. */ -+ BFD_RELOC_XTENSA_DIFF8, -+ BFD_RELOC_XTENSA_DIFF16, -+ BFD_RELOC_XTENSA_DIFF32, -+ -+/* Generic Xtensa relocations for instruction operands. Only the slot -+number is encoded in the relocation. The relocation applies to the -+last PC-relative immediate operand, or if there are no PC-relative -+immediates, to the last immediate operand. */ -+ BFD_RELOC_XTENSA_SLOT0_OP, -+ BFD_RELOC_XTENSA_SLOT1_OP, -+ BFD_RELOC_XTENSA_SLOT2_OP, -+ BFD_RELOC_XTENSA_SLOT3_OP, -+ BFD_RELOC_XTENSA_SLOT4_OP, -+ BFD_RELOC_XTENSA_SLOT5_OP, -+ BFD_RELOC_XTENSA_SLOT6_OP, -+ BFD_RELOC_XTENSA_SLOT7_OP, -+ BFD_RELOC_XTENSA_SLOT8_OP, -+ BFD_RELOC_XTENSA_SLOT9_OP, -+ BFD_RELOC_XTENSA_SLOT10_OP, -+ BFD_RELOC_XTENSA_SLOT11_OP, -+ BFD_RELOC_XTENSA_SLOT12_OP, -+ BFD_RELOC_XTENSA_SLOT13_OP, -+ BFD_RELOC_XTENSA_SLOT14_OP, -+ -+/* Alternate Xtensa relocations. Only the slot is encoded in the -+relocation. The meaning of these relocations is opcode-specific. */ -+ BFD_RELOC_XTENSA_SLOT0_ALT, -+ BFD_RELOC_XTENSA_SLOT1_ALT, -+ BFD_RELOC_XTENSA_SLOT2_ALT, -+ BFD_RELOC_XTENSA_SLOT3_ALT, -+ BFD_RELOC_XTENSA_SLOT4_ALT, -+ BFD_RELOC_XTENSA_SLOT5_ALT, -+ BFD_RELOC_XTENSA_SLOT6_ALT, -+ BFD_RELOC_XTENSA_SLOT7_ALT, -+ BFD_RELOC_XTENSA_SLOT8_ALT, -+ BFD_RELOC_XTENSA_SLOT9_ALT, -+ BFD_RELOC_XTENSA_SLOT10_ALT, -+ BFD_RELOC_XTENSA_SLOT11_ALT, -+ BFD_RELOC_XTENSA_SLOT12_ALT, -+ BFD_RELOC_XTENSA_SLOT13_ALT, -+ BFD_RELOC_XTENSA_SLOT14_ALT, -+ -+/* Xtensa relocations for backward compatibility. These have all been -+replaced by BFD_RELOC_XTENSA_SLOT0_OP. */ -+ BFD_RELOC_XTENSA_OP0, -+ BFD_RELOC_XTENSA_OP1, -+ BFD_RELOC_XTENSA_OP2, -+ -+/* Xtensa relocation to mark that the assembler expanded the -+instructions from an original target. The expansion size is -+encoded in the reloc size. */ -+ BFD_RELOC_XTENSA_ASM_EXPAND, -+ -+/* Xtensa relocation to mark that the linker should simplify -+assembler-expanded instructions. This is commonly used -+internally by the linker after analysis of a -+BFD_RELOC_XTENSA_ASM_EXPAND. */ -+ BFD_RELOC_XTENSA_ASM_SIMPLIFY, -+ -+/* 8 bit signed offset in (ix+d) or (iy+d). */ -+ BFD_RELOC_Z80_DISP8, -+ -+/* DJNZ offset. */ -+ BFD_RELOC_Z8K_DISP7, -+ -+/* CALR offset. */ -+ BFD_RELOC_Z8K_CALLR, -+ -+/* 4 bit value. */ -+ BFD_RELOC_Z8K_IMM4L, -+ BFD_RELOC_UNUSED }; -+typedef enum bfd_reloc_code_real bfd_reloc_code_real_type; -+reloc_howto_type *bfd_reloc_type_lookup -+ (bfd *abfd, bfd_reloc_code_real_type code); -+ -+const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code); -+ -+/* Extracted from syms.c. */ -+ -+typedef struct bfd_symbol -+{ -+ /* A pointer to the BFD which owns the symbol. This information -+ is necessary so that a back end can work out what additional -+ information (invisible to the application writer) is carried -+ with the symbol. -+ -+ This field is *almost* redundant, since you can use section->owner -+ instead, except that some symbols point to the global sections -+ bfd_{abs,com,und}_section. This could be fixed by making -+ these globals be per-bfd (or per-target-flavor). FIXME. */ -+ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */ -+ -+ /* The text of the symbol. The name is left alone, and not copied; the -+ application may not alter it. */ -+ const char *name; -+ -+ /* The value of the symbol. This really should be a union of a -+ numeric value with a pointer, since some flags indicate that -+ a pointer to another symbol is stored here. */ -+ symvalue value; -+ -+ /* Attributes of a symbol. */ -+#define BSF_NO_FLAGS 0x00 -+ -+ /* The symbol has local scope; <> in <>. The value -+ is the offset into the section of the data. */ -+#define BSF_LOCAL 0x01 -+ -+ /* The symbol has global scope; initialized data in <>. The -+ value is the offset into the section of the data. */ -+#define BSF_GLOBAL 0x02 -+ -+ /* The symbol has global scope and is exported. The value is -+ the offset into the section of the data. */ -+#define BSF_EXPORT BSF_GLOBAL /* No real difference. */ -+ -+ /* A normal C symbol would be one of: -+ <>, <>, <> or -+ <>. */ -+ -+ /* The symbol is a debugging record. The value has an arbitrary -+ meaning, unless BSF_DEBUGGING_RELOC is also set. */ -+#define BSF_DEBUGGING 0x08 -+ -+ /* The symbol denotes a function entry point. Used in ELF, -+ perhaps others someday. */ -+#define BSF_FUNCTION 0x10 -+ -+ /* Used by the linker. */ -+#define BSF_KEEP 0x20 -+#define BSF_KEEP_G 0x40 -+ -+ /* A weak global symbol, overridable without warnings by -+ a regular global symbol of the same name. */ -+#define BSF_WEAK 0x80 -+ -+ /* This symbol was created to point to a section, e.g. ELF's -+ STT_SECTION symbols. */ -+#define BSF_SECTION_SYM 0x100 -+ -+ /* The symbol used to be a common symbol, but now it is -+ allocated. */ -+#define BSF_OLD_COMMON 0x200 -+ -+ /* The default value for common data. */ -+#define BFD_FORT_COMM_DEFAULT_VALUE 0 -+ -+ /* In some files the type of a symbol sometimes alters its -+ location in an output file - ie in coff a <> symbol -+ which is also <> symbol appears where it was -+ declared and not at the end of a section. This bit is set -+ by the target BFD part to convey this information. */ -+#define BSF_NOT_AT_END 0x400 -+ -+ /* Signal that the symbol is the label of constructor section. */ -+#define BSF_CONSTRUCTOR 0x800 -+ -+ /* Signal that the symbol is a warning symbol. The name is a -+ warning. The name of the next symbol is the one to warn about; -+ if a reference is made to a symbol with the same name as the next -+ symbol, a warning is issued by the linker. */ -+#define BSF_WARNING 0x1000 -+ -+ /* Signal that the symbol is indirect. This symbol is an indirect -+ pointer to the symbol with the same name as the next symbol. */ -+#define BSF_INDIRECT 0x2000 -+ -+ /* BSF_FILE marks symbols that contain a file name. This is used -+ for ELF STT_FILE symbols. */ -+#define BSF_FILE 0x4000 -+ -+ /* Symbol is from dynamic linking information. */ -+#define BSF_DYNAMIC 0x8000 -+ -+ /* The symbol denotes a data object. Used in ELF, and perhaps -+ others someday. */ -+#define BSF_OBJECT 0x10000 -+ -+ /* This symbol is a debugging symbol. The value is the offset -+ into the section of the data. BSF_DEBUGGING should be set -+ as well. */ -+#define BSF_DEBUGGING_RELOC 0x20000 -+ -+ /* This symbol is thread local. Used in ELF. */ -+#define BSF_THREAD_LOCAL 0x40000 -+ -+ flagword flags; -+ -+ /* A pointer to the section to which this symbol is -+ relative. This will always be non NULL, there are special -+ sections for undefined and absolute symbols. */ -+ struct bfd_section *section; -+ -+ /* Back end special data. */ -+ union -+ { -+ void *p; -+ bfd_vma i; -+ } -+ udata; -+} -+asymbol; -+ -+#define bfd_get_symtab_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd)) -+ -+bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym); -+ -+bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name); -+ -+#define bfd_is_local_label_name(abfd, name) \ -+ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name)) -+ -+bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym); -+ -+#define bfd_is_target_special_symbol(abfd, sym) \ -+ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym)) -+ -+#define bfd_canonicalize_symtab(abfd, location) \ -+ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location)) -+ -+bfd_boolean bfd_set_symtab -+ (bfd *abfd, asymbol **location, unsigned int count); -+ -+void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol); -+ -+#define bfd_make_empty_symbol(abfd) \ -+ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd)) -+ -+asymbol *_bfd_generic_make_empty_symbol (bfd *); -+ -+#define bfd_make_debug_symbol(abfd,ptr,size) \ -+ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size)) -+ -+int bfd_decode_symclass (asymbol *symbol); -+ -+bfd_boolean bfd_is_undefined_symclass (int symclass); -+ -+void bfd_symbol_info (asymbol *symbol, symbol_info *ret); -+ -+bfd_boolean bfd_copy_private_symbol_data -+ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym); -+ -+#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \ -+ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \ -+ (ibfd, isymbol, obfd, osymbol)) -+ -+/* Extracted from bfd.c. */ -+struct bfd -+{ -+ /* A unique identifier of the BFD */ -+ unsigned int id; -+ -+ /* The filename the application opened the BFD with. */ -+ const char *filename; -+ -+ /* A pointer to the target jump table. */ -+ const struct bfd_target *xvec; -+ -+ /* The IOSTREAM, and corresponding IO vector that provide access -+ to the file backing the BFD. */ -+ void *iostream; -+ const struct bfd_iovec *iovec; -+ -+ /* Is the file descriptor being cached? That is, can it be closed as -+ needed, and re-opened when accessed later? */ -+ bfd_boolean cacheable; -+ -+ /* Marks whether there was a default target specified when the -+ BFD was opened. This is used to select which matching algorithm -+ to use to choose the back end. */ -+ bfd_boolean target_defaulted; -+ -+ /* The caching routines use these to maintain a -+ least-recently-used list of BFDs. */ -+ struct bfd *lru_prev, *lru_next; -+ -+ /* When a file is closed by the caching routines, BFD retains -+ state information on the file here... */ -+ ufile_ptr where; -+ -+ /* ... and here: (``once'' means at least once). */ -+ bfd_boolean opened_once; -+ -+ /* Set if we have a locally maintained mtime value, rather than -+ getting it from the file each time. */ -+ bfd_boolean mtime_set; -+ -+ /* File modified time, if mtime_set is TRUE. */ -+ long mtime; -+ -+ /* Reserved for an unimplemented file locking extension. */ -+ int ifd; -+ -+ /* The format which belongs to the BFD. (object, core, etc.) */ -+ bfd_format format; -+ -+ /* The direction with which the BFD was opened. */ -+ enum bfd_direction -+ { -+ no_direction = 0, -+ read_direction = 1, -+ write_direction = 2, -+ both_direction = 3 -+ } -+ direction; -+ -+ /* Format_specific flags. */ -+ flagword flags; -+ -+ /* Currently my_archive is tested before adding origin to -+ anything. I believe that this can become always an add of -+ origin, with origin set to 0 for non archive files. */ -+ ufile_ptr origin; -+ -+ /* Remember when output has begun, to stop strange things -+ from happening. */ -+ bfd_boolean output_has_begun; -+ -+ /* A hash table for section names. */ -+ struct bfd_hash_table section_htab; -+ -+ /* Pointer to linked list of sections. */ -+ struct bfd_section *sections; -+ -+ /* The last section on the section list. */ -+ struct bfd_section *section_last; -+ -+ /* The number of sections. */ -+ unsigned int section_count; -+ -+ /* Stuff only useful for object files: -+ The start address. */ -+ bfd_vma start_address; -+ -+ /* Used for input and output. */ -+ unsigned int symcount; -+ -+ /* Symbol table for output BFD (with symcount entries). */ -+ struct bfd_symbol **outsymbols; -+ -+ /* Used for slurped dynamic symbol tables. */ -+ unsigned int dynsymcount; -+ -+ /* Pointer to structure which contains architecture information. */ -+ const struct bfd_arch_info *arch_info; -+ -+ /* Flag set if symbols from this BFD should not be exported. */ -+ bfd_boolean no_export; -+ -+ /* Stuff only useful for archives. */ -+ void *arelt_data; -+ struct bfd *my_archive; /* The containing archive BFD. */ -+ struct bfd *next; /* The next BFD in the archive. */ -+ struct bfd *archive_head; /* The first BFD in the archive. */ -+ bfd_boolean has_armap; -+ -+ /* A chain of BFD structures involved in a link. */ -+ struct bfd *link_next; -+ -+ /* A field used by _bfd_generic_link_add_archive_symbols. This will -+ be used only for archive elements. */ -+ int archive_pass; -+ -+ /* Used by the back end to hold private data. */ -+ union -+ { -+ struct aout_data_struct *aout_data; -+ struct artdata *aout_ar_data; -+ struct _oasys_data *oasys_obj_data; -+ struct _oasys_ar_data *oasys_ar_data; -+ struct coff_tdata *coff_obj_data; -+ struct pe_tdata *pe_obj_data; -+ struct xcoff_tdata *xcoff_obj_data; -+ struct ecoff_tdata *ecoff_obj_data; -+ struct ieee_data_struct *ieee_data; -+ struct ieee_ar_data_struct *ieee_ar_data; -+ struct srec_data_struct *srec_data; -+ struct ihex_data_struct *ihex_data; -+ struct tekhex_data_struct *tekhex_data; -+ struct elf_obj_tdata *elf_obj_data; -+ struct nlm_obj_tdata *nlm_obj_data; -+ struct bout_data_struct *bout_data; -+ struct mmo_data_struct *mmo_data; -+ struct sun_core_struct *sun_core_data; -+ struct sco5_core_struct *sco5_core_data; -+ struct trad_core_struct *trad_core_data; -+ struct som_data_struct *som_data; -+ struct hpux_core_struct *hpux_core_data; -+ struct hppabsd_core_struct *hppabsd_core_data; -+ struct sgi_core_struct *sgi_core_data; -+ struct lynx_core_struct *lynx_core_data; -+ struct osf_core_struct *osf_core_data; -+ struct cisco_core_struct *cisco_core_data; -+ struct versados_data_struct *versados_data; -+ struct netbsd_core_struct *netbsd_core_data; -+ struct mach_o_data_struct *mach_o_data; -+ struct mach_o_fat_data_struct *mach_o_fat_data; -+ struct bfd_pef_data_struct *pef_data; -+ struct bfd_pef_xlib_data_struct *pef_xlib_data; -+ struct bfd_sym_data_struct *sym_data; -+ void *any; -+ } -+ tdata; -+ -+ /* Used by the application to hold private data. */ -+ void *usrdata; -+ -+ /* Where all the allocated stuff under this BFD goes. This is a -+ struct objalloc *, but we use void * to avoid requiring the inclusion -+ of objalloc.h. */ -+ void *memory; -+}; -+ -+typedef enum bfd_error -+{ -+ bfd_error_no_error = 0, -+ bfd_error_system_call, -+ bfd_error_invalid_target, -+ bfd_error_wrong_format, -+ bfd_error_wrong_object_format, -+ bfd_error_invalid_operation, -+ bfd_error_no_memory, -+ bfd_error_no_symbols, -+ bfd_error_no_armap, -+ bfd_error_no_more_archived_files, -+ bfd_error_malformed_archive, -+ bfd_error_file_not_recognized, -+ bfd_error_file_ambiguously_recognized, -+ bfd_error_no_contents, -+ bfd_error_nonrepresentable_section, -+ bfd_error_no_debug_section, -+ bfd_error_bad_value, -+ bfd_error_file_truncated, -+ bfd_error_file_too_big, -+ bfd_error_invalid_error_code -+} -+bfd_error_type; -+ -+bfd_error_type bfd_get_error (void); -+ -+void bfd_set_error (bfd_error_type error_tag); -+ -+const char *bfd_errmsg (bfd_error_type error_tag); -+ -+void bfd_perror (const char *message); -+ -+typedef void (*bfd_error_handler_type) (const char *, ...); -+ -+bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type); -+ -+void bfd_set_error_program_name (const char *); -+ -+bfd_error_handler_type bfd_get_error_handler (void); -+ -+long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect); -+ -+long bfd_canonicalize_reloc -+ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms); -+ -+void bfd_set_reloc -+ (bfd *abfd, asection *sec, arelent **rel, unsigned int count); -+ -+bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags); -+ -+int bfd_get_arch_size (bfd *abfd); -+ -+int bfd_get_sign_extend_vma (bfd *abfd); -+ -+bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma); -+ -+unsigned int bfd_get_gp_size (bfd *abfd); -+ -+void bfd_set_gp_size (bfd *abfd, unsigned int i); -+ -+bfd_vma bfd_scan_vma (const char *string, const char **end, int base); -+ -+bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_copy_private_header_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_copy_private_header_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_copy_private_bfd_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_merge_private_bfd_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags); -+ -+#define bfd_set_private_flags(abfd, flags) \ -+ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags)) -+#define bfd_sizeof_headers(abfd, reloc) \ -+ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc)) -+ -+#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \ -+ BFD_SEND (abfd, _bfd_find_nearest_line, \ -+ (abfd, sec, syms, off, file, func, line)) -+ -+#define bfd_find_line(abfd, syms, sym, file, line) \ -+ BFD_SEND (abfd, _bfd_find_line, \ -+ (abfd, syms, sym, file, line)) -+ -+#define bfd_find_inliner_info(abfd, file, func, line) \ -+ BFD_SEND (abfd, _bfd_find_inliner_info, \ -+ (abfd, file, func, line)) -+ -+#define bfd_debug_info_start(abfd) \ -+ BFD_SEND (abfd, _bfd_debug_info_start, (abfd)) -+ -+#define bfd_debug_info_end(abfd) \ -+ BFD_SEND (abfd, _bfd_debug_info_end, (abfd)) -+ -+#define bfd_debug_info_accumulate(abfd, section) \ -+ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section)) -+ -+#define bfd_stat_arch_elt(abfd, stat) \ -+ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat)) -+ -+#define bfd_update_armap_timestamp(abfd) \ -+ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd)) -+ -+#define bfd_set_arch_mach(abfd, arch, mach)\ -+ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach)) -+ -+#define bfd_relax_section(abfd, section, link_info, again) \ -+ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again)) -+ -+#define bfd_gc_sections(abfd, link_info) \ -+ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info)) -+ -+#define bfd_merge_sections(abfd, link_info) \ -+ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info)) -+ -+#define bfd_is_group_section(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec)) -+ -+#define bfd_discard_group(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec)) -+ -+#define bfd_link_hash_table_create(abfd) \ -+ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd)) -+ -+#define bfd_link_hash_table_free(abfd, hash) \ -+ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash)) -+ -+#define bfd_link_add_symbols(abfd, info) \ -+ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info)) -+ -+#define bfd_link_just_syms(abfd, sec, info) \ -+ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info)) -+ -+#define bfd_final_link(abfd, info) \ -+ BFD_SEND (abfd, _bfd_final_link, (abfd, info)) -+ -+#define bfd_free_cached_info(abfd) \ -+ BFD_SEND (abfd, _bfd_free_cached_info, (abfd)) -+ -+#define bfd_get_dynamic_symtab_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd)) -+ -+#define bfd_print_private_bfd_data(abfd, file)\ -+ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file)) -+ -+#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \ -+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols)) -+ -+#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \ -+ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \ -+ dyncount, dynsyms, ret)) -+ -+#define bfd_get_dynamic_reloc_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd)) -+ -+#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \ -+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms)) -+ -+extern bfd_byte *bfd_get_relocated_section_contents -+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *, -+ bfd_boolean, asymbol **); -+ -+bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative); -+ -+struct bfd_preserve -+{ -+ void *marker; -+ void *tdata; -+ flagword flags; -+ const struct bfd_arch_info *arch_info; -+ struct bfd_section *sections; -+ struct bfd_section *section_last; -+ unsigned int section_count; -+ struct bfd_hash_table section_htab; -+}; -+ -+bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *); -+ -+void bfd_preserve_restore (bfd *, struct bfd_preserve *); -+ -+void bfd_preserve_finish (bfd *, struct bfd_preserve *); -+ -+/* Extracted from archive.c. */ -+symindex bfd_get_next_mapent -+ (bfd *abfd, symindex previous, carsym **sym); -+ -+bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head); -+ -+bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous); -+ -+/* Extracted from corefile.c. */ -+const char *bfd_core_file_failing_command (bfd *abfd); -+ -+int bfd_core_file_failing_signal (bfd *abfd); -+ -+bfd_boolean core_file_matches_executable_p -+ (bfd *core_bfd, bfd *exec_bfd); -+ -+/* Extracted from targets.c. */ -+#define BFD_SEND(bfd, message, arglist) \ -+ ((*((bfd)->xvec->message)) arglist) -+ -+#ifdef DEBUG_BFD_SEND -+#undef BFD_SEND -+#define BFD_SEND(bfd, message, arglist) \ -+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ -+ ((*((bfd)->xvec->message)) arglist) : \ -+ (bfd_assert (__FILE__,__LINE__), NULL)) -+#endif -+#define BFD_SEND_FMT(bfd, message, arglist) \ -+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) -+ -+#ifdef DEBUG_BFD_SEND -+#undef BFD_SEND_FMT -+#define BFD_SEND_FMT(bfd, message, arglist) \ -+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ -+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \ -+ (bfd_assert (__FILE__,__LINE__), NULL)) -+#endif -+ -+enum bfd_flavour -+{ -+ bfd_target_unknown_flavour, -+ bfd_target_aout_flavour, -+ bfd_target_coff_flavour, -+ bfd_target_ecoff_flavour, -+ bfd_target_xcoff_flavour, -+ bfd_target_elf_flavour, -+ bfd_target_ieee_flavour, -+ bfd_target_nlm_flavour, -+ bfd_target_oasys_flavour, -+ bfd_target_tekhex_flavour, -+ bfd_target_srec_flavour, -+ bfd_target_ihex_flavour, -+ bfd_target_som_flavour, -+ bfd_target_os9k_flavour, -+ bfd_target_versados_flavour, -+ bfd_target_msdos_flavour, -+ bfd_target_ovax_flavour, -+ bfd_target_evax_flavour, -+ bfd_target_mmo_flavour, -+ bfd_target_mach_o_flavour, -+ bfd_target_pef_flavour, -+ bfd_target_pef_xlib_flavour, -+ bfd_target_sym_flavour -+}; -+ -+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN }; -+ -+/* Forward declaration. */ -+typedef struct bfd_link_info _bfd_link_info; -+ -+typedef struct bfd_target -+{ -+ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */ -+ char *name; -+ -+ /* The "flavour" of a back end is a general indication about -+ the contents of a file. */ -+ enum bfd_flavour flavour; -+ -+ /* The order of bytes within the data area of a file. */ -+ enum bfd_endian byteorder; -+ -+ /* The order of bytes within the header parts of a file. */ -+ enum bfd_endian header_byteorder; -+ -+ /* A mask of all the flags which an executable may have set - -+ from the set <>, <>, ...<>. */ -+ flagword object_flags; -+ -+ /* A mask of all the flags which a section may have set - from -+ the set <>, <>, ...<>. */ -+ flagword section_flags; -+ -+ /* The character normally found at the front of a symbol. -+ (if any), perhaps `_'. */ -+ char symbol_leading_char; -+ -+ /* The pad character for file names within an archive header. */ -+ char ar_pad_char; -+ -+ /* The maximum number of characters in an archive header. */ -+ unsigned short ar_max_namelen; -+ -+ /* Entries for byte swapping for data. These are different from the -+ other entry points, since they don't take a BFD as the first argument. -+ Certain other handlers could do the same. */ -+ bfd_uint64_t (*bfd_getx64) (const void *); -+ bfd_int64_t (*bfd_getx_signed_64) (const void *); -+ void (*bfd_putx64) (bfd_uint64_t, void *); -+ bfd_vma (*bfd_getx32) (const void *); -+ bfd_signed_vma (*bfd_getx_signed_32) (const void *); -+ void (*bfd_putx32) (bfd_vma, void *); -+ bfd_vma (*bfd_getx16) (const void *); -+ bfd_signed_vma (*bfd_getx_signed_16) (const void *); -+ void (*bfd_putx16) (bfd_vma, void *); -+ -+ /* Byte swapping for the headers. */ -+ bfd_uint64_t (*bfd_h_getx64) (const void *); -+ bfd_int64_t (*bfd_h_getx_signed_64) (const void *); -+ void (*bfd_h_putx64) (bfd_uint64_t, void *); -+ bfd_vma (*bfd_h_getx32) (const void *); -+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *); -+ void (*bfd_h_putx32) (bfd_vma, void *); -+ bfd_vma (*bfd_h_getx16) (const void *); -+ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *); -+ void (*bfd_h_putx16) (bfd_vma, void *); -+ -+ /* Format dependent routines: these are vectors of entry points -+ within the target vector structure, one for each format to check. */ -+ -+ /* Check the format of a file being read. Return a <> or zero. */ -+ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *); -+ -+ /* Set the format of a file being written. */ -+ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *); -+ -+ /* Write cached information into a file being written, at <>. */ -+ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *); -+ -+ -+ /* Generic entry points. */ -+#define BFD_JUMP_TABLE_GENERIC(NAME) \ -+ NAME##_close_and_cleanup, \ -+ NAME##_bfd_free_cached_info, \ -+ NAME##_new_section_hook, \ -+ NAME##_get_section_contents, \ -+ NAME##_get_section_contents_in_window -+ -+ /* Called when the BFD is being closed to do any necessary cleanup. */ -+ bfd_boolean (*_close_and_cleanup) (bfd *); -+ /* Ask the BFD to free all cached information. */ -+ bfd_boolean (*_bfd_free_cached_info) (bfd *); -+ /* Called when a new section is created. */ -+ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr); -+ /* Read the contents of a section. */ -+ bfd_boolean (*_bfd_get_section_contents) -+ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type); -+ bfd_boolean (*_bfd_get_section_contents_in_window) -+ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type); -+ -+ /* Entry points to copy private data. */ -+#define BFD_JUMP_TABLE_COPY(NAME) \ -+ NAME##_bfd_copy_private_bfd_data, \ -+ NAME##_bfd_merge_private_bfd_data, \ -+ _bfd_generic_init_private_section_data, \ -+ NAME##_bfd_copy_private_section_data, \ -+ NAME##_bfd_copy_private_symbol_data, \ -+ NAME##_bfd_copy_private_header_data, \ -+ NAME##_bfd_set_private_flags, \ -+ NAME##_bfd_print_private_bfd_data -+ -+ /* Called to copy BFD general private data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *); -+ /* Called to merge BFD general private data from one object file -+ to a common output file when linking. */ -+ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *); -+ /* Called to initialize BFD private section data from one object file -+ to another. */ -+#define bfd_init_private_section_data(ibfd, isec, obfd, osec, link_info) \ -+ BFD_SEND (obfd, _bfd_init_private_section_data, (ibfd, isec, obfd, osec, link_info)) -+ bfd_boolean (*_bfd_init_private_section_data) -+ (bfd *, sec_ptr, bfd *, sec_ptr, struct bfd_link_info *); -+ /* Called to copy BFD private section data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_section_data) -+ (bfd *, sec_ptr, bfd *, sec_ptr); -+ /* Called to copy BFD private symbol data from one symbol -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_symbol_data) -+ (bfd *, asymbol *, bfd *, asymbol *); -+ /* Called to copy BFD private header data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_header_data) -+ (bfd *, bfd *); -+ /* Called to set private backend flags. */ -+ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword); -+ -+ /* Called to print private BFD data. */ -+ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *); -+ -+ /* Core file entry points. */ -+#define BFD_JUMP_TABLE_CORE(NAME) \ -+ NAME##_core_file_failing_command, \ -+ NAME##_core_file_failing_signal, \ -+ NAME##_core_file_matches_executable_p -+ -+ char * (*_core_file_failing_command) (bfd *); -+ int (*_core_file_failing_signal) (bfd *); -+ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *); -+ -+ /* Archive entry points. */ -+#define BFD_JUMP_TABLE_ARCHIVE(NAME) \ -+ NAME##_slurp_armap, \ -+ NAME##_slurp_extended_name_table, \ -+ NAME##_construct_extended_name_table, \ -+ NAME##_truncate_arname, \ -+ NAME##_write_armap, \ -+ NAME##_read_ar_hdr, \ -+ NAME##_openr_next_archived_file, \ -+ NAME##_get_elt_at_index, \ -+ NAME##_generic_stat_arch_elt, \ -+ NAME##_update_armap_timestamp -+ -+ bfd_boolean (*_bfd_slurp_armap) (bfd *); -+ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *); -+ bfd_boolean (*_bfd_construct_extended_name_table) -+ (bfd *, char **, bfd_size_type *, const char **); -+ void (*_bfd_truncate_arname) (bfd *, const char *, char *); -+ bfd_boolean (*write_armap) -+ (bfd *, unsigned int, struct orl *, unsigned int, int); -+ void * (*_bfd_read_ar_hdr_fn) (bfd *); -+ bfd * (*openr_next_archived_file) (bfd *, bfd *); -+#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i)) -+ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex); -+ int (*_bfd_stat_arch_elt) (bfd *, struct stat *); -+ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *); -+ -+ /* Entry points used for symbols. */ -+#define BFD_JUMP_TABLE_SYMBOLS(NAME) \ -+ NAME##_get_symtab_upper_bound, \ -+ NAME##_canonicalize_symtab, \ -+ NAME##_make_empty_symbol, \ -+ NAME##_print_symbol, \ -+ NAME##_get_symbol_info, \ -+ NAME##_bfd_is_local_label_name, \ -+ NAME##_bfd_is_target_special_symbol, \ -+ NAME##_get_lineno, \ -+ NAME##_find_nearest_line, \ -+ _bfd_generic_find_line, \ -+ NAME##_find_inliner_info, \ -+ NAME##_bfd_make_debug_symbol, \ -+ NAME##_read_minisymbols, \ -+ NAME##_minisymbol_to_symbol -+ -+ long (*_bfd_get_symtab_upper_bound) (bfd *); -+ long (*_bfd_canonicalize_symtab) -+ (bfd *, struct bfd_symbol **); -+ struct bfd_symbol * -+ (*_bfd_make_empty_symbol) (bfd *); -+ void (*_bfd_print_symbol) -+ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type); -+#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e)) -+ void (*_bfd_get_symbol_info) -+ (bfd *, struct bfd_symbol *, symbol_info *); -+#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e)) -+ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *); -+ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *); -+ alent * (*_get_lineno) (bfd *, struct bfd_symbol *); -+ bfd_boolean (*_bfd_find_nearest_line) -+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma, -+ const char **, const char **, unsigned int *); -+ bfd_boolean (*_bfd_find_line) -+ (bfd *, struct bfd_symbol **, struct bfd_symbol *, -+ const char **, unsigned int *); -+ bfd_boolean (*_bfd_find_inliner_info) -+ (bfd *, const char **, const char **, unsigned int *); -+ /* Back-door to allow format-aware applications to create debug symbols -+ while using BFD for everything else. Currently used by the assembler -+ when creating COFF files. */ -+ asymbol * (*_bfd_make_debug_symbol) -+ (bfd *, void *, unsigned long size); -+#define bfd_read_minisymbols(b, d, m, s) \ -+ BFD_SEND (b, _read_minisymbols, (b, d, m, s)) -+ long (*_read_minisymbols) -+ (bfd *, bfd_boolean, void **, unsigned int *); -+#define bfd_minisymbol_to_symbol(b, d, m, f) \ -+ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f)) -+ asymbol * (*_minisymbol_to_symbol) -+ (bfd *, bfd_boolean, const void *, asymbol *); -+ -+ /* Routines for relocs. */ -+#define BFD_JUMP_TABLE_RELOCS(NAME) \ -+ NAME##_get_reloc_upper_bound, \ -+ NAME##_canonicalize_reloc, \ -+ NAME##_bfd_reloc_type_lookup -+ -+ long (*_get_reloc_upper_bound) (bfd *, sec_ptr); -+ long (*_bfd_canonicalize_reloc) -+ (bfd *, sec_ptr, arelent **, struct bfd_symbol **); -+ /* See documentation on reloc types. */ -+ reloc_howto_type * -+ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type); -+ -+ /* Routines used when writing an object file. */ -+#define BFD_JUMP_TABLE_WRITE(NAME) \ -+ NAME##_set_arch_mach, \ -+ NAME##_set_section_contents -+ -+ bfd_boolean (*_bfd_set_arch_mach) -+ (bfd *, enum bfd_architecture, unsigned long); -+ bfd_boolean (*_bfd_set_section_contents) -+ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type); -+ -+ /* Routines used by the linker. */ -+#define BFD_JUMP_TABLE_LINK(NAME) \ -+ NAME##_sizeof_headers, \ -+ NAME##_bfd_get_relocated_section_contents, \ -+ NAME##_bfd_relax_section, \ -+ NAME##_bfd_link_hash_table_create, \ -+ NAME##_bfd_link_hash_table_free, \ -+ NAME##_bfd_link_add_symbols, \ -+ NAME##_bfd_link_just_syms, \ -+ NAME##_bfd_final_link, \ -+ NAME##_bfd_link_split_section, \ -+ NAME##_bfd_gc_sections, \ -+ NAME##_bfd_merge_sections, \ -+ NAME##_bfd_is_group_section, \ -+ NAME##_bfd_discard_group, \ -+ NAME##_section_already_linked \ -+ -+ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean); -+ bfd_byte * (*_bfd_get_relocated_section_contents) -+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, -+ bfd_byte *, bfd_boolean, struct bfd_symbol **); -+ -+ bfd_boolean (*_bfd_relax_section) -+ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *); -+ -+ /* Create a hash table for the linker. Different backends store -+ different information in this table. */ -+ struct bfd_link_hash_table * -+ (*_bfd_link_hash_table_create) (bfd *); -+ -+ /* Release the memory associated with the linker hash table. */ -+ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *); -+ -+ /* Add symbols from this object file into the hash table. */ -+ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *); -+ -+ /* Indicate that we are only retrieving symbol values from this section. */ -+ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *); -+ -+ /* Do a link based on the link_order structures attached to each -+ section of the BFD. */ -+ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *); -+ -+ /* Should this section be split up into smaller pieces during linking. */ -+ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *); -+ -+ /* Remove sections that are not referenced from the output. */ -+ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *); -+ -+ /* Attempt to merge SEC_MERGE sections. */ -+ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *); -+ -+ /* Is this section a member of a group? */ -+ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *); -+ -+ /* Discard members of a group. */ -+ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *); -+ -+ /* Check if SEC has been already linked during a reloceatable or -+ final link. */ -+ void (*_section_already_linked) (bfd *, struct bfd_section *); -+ -+ /* Routines to handle dynamic symbols and relocs. */ -+#define BFD_JUMP_TABLE_DYNAMIC(NAME) \ -+ NAME##_get_dynamic_symtab_upper_bound, \ -+ NAME##_canonicalize_dynamic_symtab, \ -+ NAME##_get_synthetic_symtab, \ -+ NAME##_get_dynamic_reloc_upper_bound, \ -+ NAME##_canonicalize_dynamic_reloc -+ -+ /* Get the amount of memory required to hold the dynamic symbols. */ -+ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *); -+ /* Read in the dynamic symbols. */ -+ long (*_bfd_canonicalize_dynamic_symtab) -+ (bfd *, struct bfd_symbol **); -+ /* Create synthetized symbols. */ -+ long (*_bfd_get_synthetic_symtab) -+ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **, -+ struct bfd_symbol **); -+ /* Get the amount of memory required to hold the dynamic relocs. */ -+ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *); -+ /* Read in the dynamic relocs. */ -+ long (*_bfd_canonicalize_dynamic_reloc) -+ (bfd *, arelent **, struct bfd_symbol **); -+ -+ /* Opposite endian version of this target. */ -+ const struct bfd_target * alternative_target; -+ -+ /* Data for use by back-end routines, which isn't -+ generic enough to belong in this structure. */ -+ const void *backend_data; -+ -+} bfd_target; -+ -+bfd_boolean bfd_set_default_target (const char *name); -+ -+const bfd_target *bfd_find_target (const char *target_name, bfd *abfd); -+ -+const char ** bfd_target_list (void); -+ -+const bfd_target *bfd_search_for_target -+ (int (*search_func) (const bfd_target *, void *), -+ void *); -+ -+/* Extracted from format.c. */ -+bfd_boolean bfd_check_format (bfd *abfd, bfd_format format); -+ -+bfd_boolean bfd_check_format_matches -+ (bfd *abfd, bfd_format format, char ***matching); -+ -+bfd_boolean bfd_set_format (bfd *abfd, bfd_format format); -+ -+const char *bfd_format_string (bfd_format format); -+ -+/* Extracted from linker.c. */ -+bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec); -+ -+#define bfd_link_split_section(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec)) -+ -+void bfd_section_already_linked (bfd *abfd, asection *sec); -+ -+#define bfd_section_already_linked(abfd, sec) \ -+ BFD_SEND (abfd, _section_already_linked, (abfd, sec)) -+ -+/* Extracted from simple.c. */ -+bfd_byte *bfd_simple_get_relocated_section_contents -+ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table); -+ -+#ifdef __cplusplus -+} -+#endif -+#endif ---- /dev/null -+++ b/arch/ia64/include/asm/kdb.h -@@ -0,0 +1,50 @@ -+#ifndef _ASM_KDB_H -+#define _ASM_KDB_H -+ -+/* -+ * Kernel Debugger Architecture Dependent Global Headers -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2008 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+/* -+ * KDB_ENTER() is a macro which causes entry into the kernel -+ * debugger from any point in the kernel code stream. If it -+ * is intended to be used from interrupt level, it must use -+ * a non-maskable entry method. -+ */ -+#include /* break numbers are separated for CONFIG_KDB_LOCK */ -+#define __KDB_ENTER2(b) asm("\tbreak.m "#b"\n") -+#define __KDB_ENTER1(b) __KDB_ENTER2(b) -+#define KDB_ENTER() do {if (kdb_on && !KDB_IS_RUNNING()) { __KDB_ENTER1(KDB_BREAK_ENTER); }} while(0) -+#define KDB_ENTER_SLAVE() do {if (kdb_on) { __KDB_ENTER1(KDB_BREAK_ENTER_SLAVE); }} while(0) -+ -+ /* -+ * Needed for exported symbols. -+ */ -+typedef unsigned long kdb_machreg_t; -+ -+#define kdb_machreg_fmt "0x%lx" -+#define kdb_machreg_fmt0 "0x%016lx" -+#define kdb_bfd_vma_fmt "0x%lx" -+#define kdb_bfd_vma_fmt0 "0x%016lx" -+#define kdb_elfw_addr_fmt "0x%lx" -+#define kdb_elfw_addr_fmt0 "0x%016lx" -+#define kdb_f_count_fmt "%ld" -+ -+static inline unsigned long -+kdba_funcptr_value(void *fp) -+{ -+ /* ia64 function descriptor, first word is address of code */ -+ return *(unsigned long *)fp; -+} -+ -+#ifdef CONFIG_SMP -+#define kdba_giveback_vector(vector) (0) -+#endif -+ -+#endif /* !_ASM_KDB_H */ ---- /dev/null -+++ b/arch/ia64/include/asm/kdb_break.h -@@ -0,0 +1,24 @@ -+#ifndef _ASM_KDB_BREAK_H -+#define _ASM_KDB_BREAK_H -+ -+/* -+ * Kernel Debugger Architecture Dependent Global Headers -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+/* -+ * Break numbers are used by CONFIG_KDB_LOCK code. They need to be seperated -+ * from asm/kdb.h to let spinlock code build without pulling in all of the kdb -+ * headers. -+ */ -+ -+#define KDB_BREAK_BREAK 0x80100 /* kdb breakpoint in kernel */ -+#define KDB_BREAK_ENTER 0x80101 /* KDB_ENTER(), single event or monarch */ -+#define KDB_BREAK_ENTER_SLAVE 0x80102 /* KDB_ENTER_SLAVE(), concurrent slave events */ -+ -+#endif /* !_ASM_KDB_BREAK_H */ ---- /dev/null -+++ b/arch/ia64/include/asm/kdbprivate.h -@@ -0,0 +1,124 @@ -+#ifndef _ASM_KDBPRIVATE_H -+#define _ASM_KDBPRIVATE_H -+ -+/* -+ * Kernel Debugger Architecture Dependent Private Headers -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+/* Definition of an machine instruction. -+ * Takes care of VLIW processors like Itanium -+ */ -+ -+typedef struct { -+ unsigned long inst[2]; -+} kdb_machinst_t; -+ -+/* -+ * KDB_MAXBPT describes the total number of breakpoints -+ * supported by this architecure. -+ */ -+#define KDB_MAXBPT 16 -+ -+/* -+ * KDB_MAXHARDBPT describes the total number of hardware -+ * breakpoint registers that exist. -+ */ -+#define KDB_MAXHARDBPT 4 -+ -+/* -+ * Platform specific environment entries -+ */ -+#define KDB_PLATFORM_ENV "IDMODE=ia64", "BYTESPERWORD=4", "IDCOUNT=8" -+ -+/* -+ * Support for IA64 debug registers -+ */ -+typedef struct _kdbhard_bp { -+ kdb_machreg_t bph_reg; /* Register this breakpoint uses */ -+ -+ unsigned int bph_free:1; /* Register available for use */ -+ unsigned int bph_data:1; /* Data Access breakpoint */ -+ -+ unsigned int bph_write:1; /* Write Data breakpoint */ -+ unsigned int bph_mode:2; /* 0=inst, 1=write, 2=io, 3=read */ -+ unsigned int bph_length:2; /* 0=1, 1=2, 2=BAD, 3=4 (bytes) */ -+} kdbhard_bp_t; -+ -+#define getprsregs(regs) ((struct switch_stack *)regs -1) -+ -+/* bkpt support using break inst instead of IBP reg */ -+ -+/* -+ * Define certain specific instructions -+ */ -+#define BREAK_INSTR (long)(KDB_BREAK_BREAK << (5+6)) -+#define INST_SLOT0_MASK (0x1ffffffffffL << 5) -+ -+#define BKPTMODE_DATAR 3 -+#define BKPTMODE_IO 2 -+#define BKPTMODE_DATAW 1 -+#define BKPTMODE_INST 0 -+ -+/* Some of the fault registers needed by kdb but not passed with -+ * regs or switch stack. -+ */ -+typedef struct fault_regs { -+ unsigned long isr ; -+ unsigned long ifa ; -+ unsigned long iim ; -+ unsigned long itir ; -+} fault_regs_t ; -+ -+/* -+ * Support for setjmp/longjmp -+ */ -+ -+/* __jmp_buf definition copied from libc/sysdeps/unix/sysv/linux/ia64/bits/setjmp.h */ -+ -+#define _JBLEN 70 -+ -+typedef struct __kdb_jmp_buf { -+ unsigned long __jmp_buf[_JBLEN]; -+} kdb_jmp_buf __attribute__ ((aligned (16))); -+ -+extern int kdba_setjmp(kdb_jmp_buf *); -+extern void kdba_longjmp(kdb_jmp_buf *, int); -+#define kdba_setjmp kdba_setjmp -+ -+extern kdb_jmp_buf *kdbjmpbuf; -+ -+/* Arch specific data saved for running processes */ -+ -+struct kdba_running_process { -+ struct switch_stack *sw; -+}; -+ -+extern void kdba_save_running(struct kdba_running_process *, struct pt_regs *); -+extern void kdba_unsave_running(struct kdba_running_process *, struct pt_regs *); -+ -+/* kdba wrappers which want to save switch stack will call unw_init_running(). -+ * That routine only takes a void* so pack the interrupt data into a structure. -+ */ -+ -+#include /* for irqreturn_t */ -+ -+enum kdba_serial_console { -+ KDBA_SC_NONE = 0, -+ KDBA_SC_STANDARD, -+ KDBA_SC_SGI_L1, -+}; -+ -+extern enum kdba_serial_console kdba_serial_console; -+ -+#define KDB_RUNNING_PROCESS_ORIGINAL kdb_running_process_save -+extern struct kdb_running_process *kdb_running_process_save; /* [NR_CPUS] */ -+ -+extern void kdba_wait_for_cpus(void); -+ -+#endif /* !_ASM_KDBPRIVATE_H */ ---- a/arch/ia64/include/asm/kregs.h -+++ b/arch/ia64/include/asm/kregs.h -@@ -72,7 +72,7 @@ - /* A mask of PSR bits that we generally don't want to inherit across a clone2() or an - execve(). Only list flags here that need to be cleared/set for BOTH clone2() and - execve(). */ --#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \ -+#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_LP | \ - IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \ - IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA) - #define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP) ---- /dev/null -+++ b/arch/ia64/kdb/ChangeLog -@@ -0,0 +1,1111 @@ -+2008-11-26 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc6-ia64-1. -+ -+2008-11-12 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc4-ia64-1. -+ -+2008-11-04 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc3-ia64-1. -+ -+2008-10-29 Jay Lan -+ -+ * "Commandeer vector 0xfe for KDB_VECTOR", version 2. -+ Cliff Wickman -+ * kdb-v4.4-2.6.28-rc2-ia64-2. -+ -+2008-10-27 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc2-ia64-1. -+ -+2008-10-20 Jay Lan -+ -+ * kdb-v4.4-2.6.27-ia64-1. -+ -+2008-09-30 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc8-ia64-1. -+ -+2008-09-22 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc7-ia64-1. -+ -+2008-09-03 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc5-ia64-1. -+ -+2008-08-19 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc3-ia64-1. -+ -+2008-08-15 Jay Lan -+ -+ * Fix a problem that slave cpus panic'ed during NMI. -+ Jay Lan -+ * kdb-v4.4-2.6.27-rc2-ia64-2.1. -+ -+2008-08-14 Jay Lan -+ -+ * Support 'kdump' command to take a kdump vmcore from KDB, -+ Dan Aloni (da-x@monatomic.org), -+ Jason Xiao (jidong.xiao@gmail.com), -+ Jay Lan (jlan@sgi.com) -+ * kdb-v4.4-2.6.27-rc2-ia64-2. -+ -+2008-08-06 Jay Lan -+ -+ * Fix up the NULL pointer deference issue in ohci_kdb_poll_char, -+ Jason Xiao -+ * kdb-v4.4-2.6.27-rc2-ia64-1. -+ -+2008-07-18 Jay Lan -+ -+ * support Hardware Breakpoint (bph/bpha) commands -+ IA64: Greg Banks -+ X86: Konstantin Baydarov -+ * kdb-v4.4-2.6.26-ia64-2. -+ -+2008-07-14 Jay Lan -+ -+ * kdb-v4.4-2.6.26-ia64-1. -+ -+2008-07-11 Jay Lan -+ -+ * New commands and some fixups and enhancements, -+ Joe Korty -+ John Blackwood -+ Jim Houston -+ - Use the non-sleeping copy_from_user_atomic. -+ - Enhance kdb_cmderror diagnostic output. -+ - Expand the KDB 'duplicate command' error message. -+ - Touch NMI watchdog in various KDB busy-loops. -+ - Support IMB HS20 Blade 8843 platform. -+ - Display exactly which cpus needed an NMI to get them into kdb. -+ - Better document that kdb's 'ps A' command can be used to show -+ _all_ processes and threads -+ - Suppress KDB boottime INFO messages if quiet boot. -+ - Add a KDB breakpoint to the OOPs path. -+ - Add CONFIG_DISCONTIGMEM support to kdbm_memmap. -+ - Extend the KDB task command to handle CONFIG_NUMA fields. -+ - Extend the KDB vm command to support NUMA stuff. -+ - Create the KDB mempolicy command. -+ - Create a pgdat command for KDB. -+ - Fix a hang on boot on some i386 systems. -+ * kdb-v4.4-2.6.26-rc9-ia64-1. -+ -+2008-06-30 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc8-ia64-1. -+ -+2008-06-25 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc7-ia64-1. -+ -+2008-06-06 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc5-ia64-1. -+ -+2008-05-30 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc4-ia64-1. -+ -+2008-05-20 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc3-ia64-1. -+ -+2008-05-13 Jay Lan -+ -+ * XPC support removed from KDB due to XPC changes to 2.6.26-rc1. -+ * kdb-v4.4-2.6.26-rc1-ia64-1. -+ -+2008-04-17 Jay Lan -+ -+ * kdb-v4.4-2.6.25-ia64-1. -+ -+2008-03-16 Jay Lan -+ -+ * kdb-v4.4-2.6.25-rc6-ia64-1. -+ -+2008-03-03 Jay Lan -+ -+ * kdb-v4.4-2.6.25-rc3-ia64-1. -+ -+2008-02-26 Jay Lan -+ -+ * kdb-v4.4-2.6.25-rc2-ia64-1. -+ -+2008-02-19 Jay Lan -+ -+ * kdb-v4.4-2.6.25-rc1-ia64-1. -+ -+2008-02-01 Jay Lan -+ -+ * Backed out USB UHCI support since it caused dropped characters and -+ broke OHCI. -+ * Restored "archkdbcommon" commands for x86. It was lost at the x86 -+ merge. -+ * Detecting if the HC was "busy", Aaron Young -+ * kdb-v4.4-2.6.24-ia64-2. -+ -+2008-01-29 Jay Lan -+ -+ * kdb-v4.4-2.6.24-ia64-1. -+ -+2008-01-22 Jay Lan -+ -+ * USB UHCI kdb support, Konstantin Baydarov -+ * kdb-v4.4-2.6.24-rc8-ia64-3. -+ -+2008-01-18 Jay Lan -+ -+ * USB EHCI kdb support, Aaron Young -+ * kdb-v4.4-2.6.24-rc8-ia64-2. -+ -+2008-01-18 Jay Lan -+ -+ * kdb-v4.4-2.6.24-rc8-ia64-1. -+ -+2008-01-07 Jay Lan -+ -+ * kdb-v4.4-2.6.24-rc7-ia64-1. -+ -+2007-12-21 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc6-ia64-1. -+ -+2007-12-12 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc5-ia64-1. -+ -+2007-12-05 Jay Lan -+ -+ * Fixed a 'sysctl table check failed' problem. -+ * kdb v4.4-2.6.24-rc4-ia64-1. -+ -+2007-11-26 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc3-ia64-1. -+ -+2007-11-13 Jay Lan -+ -+ * Back ported "New KDB USB interface" from Aaron Young in -+ v4.4-2.6.23-ia64-2 to 2.6.24 kdb patchset. -+ * kdb v4.4-2.6.24-rc2-ia64-2. -+ -+2007-11-12 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc2-ia64-1. -+ -+2007-11-09 Jay Lan -+ -+ * Rebase to 2.6.24-rc1 kernel -+ * - merged kdb-v4.4-2.6.23-i386-1 and kdb-v4.4-2.6.23-x86_64-1 -+ * into kdb-v4.4-2.6.24-rc1-x86-1 -+ * - Fields "done", "sglist_len", and "pid" are removed from -+ * struct scsi_cmnd. Thus, these fields are no longer displayed -+ * on "sc" command. -+ * kdb v4.4-2.6.24-rc1-ia64-1. -+ -+2007-11-08 Jay Lan -+ -+ * New KDB USB interface, Aaron Young -+ * 1. This patch allows KDB to work with any Host Contoller driver -+ * and call the correct HC driver poll routine (as long as the -+ * HC driver provides a .kdb_poll_char routine via it's -+ * associated hc_driver struct). -+ * 2. Hotplugged keyboards are now recognized by KDB. -+ * 3. Currently KDB can only make use of 1 USB type keyboard. -+ * New code can handle up to 8 attached keyboards - input is -+ * multiplexed from all of them while in kdb. -+ * kdb v4.4-2.6.23-ia64-2. -+ -+2007-10-24 Jay Lan -+ -+ * kdb v4.4-2.6.23-ia64-1. -+ -+2007-09-26 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc8-ia64-1. -+ -+2007-09-21 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc7-ia64-1. -+ -+2007-09-19 Jay Lan -+ -+ * Get into KDB successfully if multiple cpus are in MCA. -+ * kdb v4.4-2.6.23-rc6-ia64-2. -+ -+2007-09-12 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc6-ia64-1. -+ -+2007-09-06 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc5-ia64-1. -+ -+2007-08-30 Keith Owens -+ -+ * New i386/x86_64 backtrace requires that kdb_save_running() does not -+ exit until after kdb_main_loop() has completed. -+ * kdb v4.4-2.6.23-rc4-ia64-2. -+ -+2007-08-30 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc4-ia64-1. -+ -+2007-08-24 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc3-ia64-1. -+ -+2007-08-07 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc2-ia64-1. -+ -+2007-07-30 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc1-ia64-1. -+ -+2007-07-09 Keith Owens -+ -+ * kdb v4.4-2.6.22-ia64-1. -+ -+2007-07-02 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc7-ia64-1. -+ -+2007-06-20 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc5-ia64-1. -+ -+2007-06-08 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc4-ia64-1. -+ -+2007-05-28 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc3-ia64-1. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc2-ia64-1. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc1-ia64-1. -+ -+2007-04-29 Keith Owens -+ -+ * kdb v4.4-2.6.21-ia64-1. -+ -+2007-04-16 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc7-ia64-1. -+ -+2007-04-10 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc6-ia64-1. -+ -+2007-04-02 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc5-ia64-1. -+ -+2007-03-19 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc4-ia64-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc3-ia64-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc2-ia64-1. -+ -+2007-03-01 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc1-ia64-1. -+ -+2007-03-01 Keith Owens -+ -+ * Remove sparse warnings. -+ * kdb v4.4-2.6.20-ia64-3. -+ -+2007-02-16 Keith Owens -+ -+ * Initialise variable bits of struct disassemble_info each time. -+ * kdb v4.4-2.6.20-ia64-2. -+ -+2007-02-06 Keith Owens -+ -+ * kdb v4.4-2.6.20-ia64-1. -+ -+2007-02-01 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc7-ia64-1. -+ -+2007-01-08 Keith Owens -+ -+ * Detect calls via PLT and decode the target address. -+ * kdb v4.4-2.6.20-rc4-ia64-2. -+ -+2007-01-08 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc4-ia64-1. -+ -+2007-01-02 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc3-ia64-1. -+ -+2006-12-20 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc1-ia64-1. -+ -+2006-12-07 Keith Owens -+ -+ * Export kdba_dumpregs. -+ * kdb v4.4-2.6.19-ia64-2. -+ -+2006-11-30 Keith Owens -+ -+ * kdb v4.4-2.6.19-ia64-1. -+ -+2006-11-27 Keith Owens -+ -+ * Only use VT keyboard if the command line allows it and ACPI indicates -+ that there is an i8042. -+ * kdb v4.4-2.6.19-rc6-ia64-2. -+ -+2006-11-20 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc6-ia64-1. -+ -+2006-11-09 Keith Owens -+ -+ * Only use VT console if the command line allows it. -+ * kdb v4.4-2.6.19-rc5-ia64-2. -+ -+2006-11-08 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc5-ia64-1. -+ -+2006-11-01 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc4-ia64-1. -+ -+2006-10-24 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc3-ia64-1. -+ -+2006-10-24 Keith Owens -+ -+ * Remove redundant regs and envp parameters. -+ * kdb v4.4-2.6.19-rc2-ia64-2. -+ -+2006-10-18 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc2-ia64-1. -+ -+2006-10-09 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc1-ia64-1. -+ -+2006-10-06 Keith Owens -+ -+ * Remove #include -+ * kdb v4.4-2.6.18-ia64-2. -+ -+2006-09-20 Keith Owens -+ -+ * kdb v4.4-2.6.18-ia64-1. -+ -+2006-09-15 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc7-ia64-1. -+ -+2006-08-29 Keith Owens -+ -+ * Rewrite all backtrace code. -+ * kdb v4.4-2.6.18-rc5-ia64-2. -+ -+2006-08-28 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc5-ia64-1. -+ -+2006-08-08 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc4-ia64-1. -+ -+2006-08-04 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc3-ia64-1. -+ -+2006-07-18 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc2-ia64-1. -+ -+2006-07-12 Keith Owens -+ -+ * Remove dead KDB_REASON codes. -+ * sparse cleanups. -+ * kdb v4.4-2.6.18-rc1-ia64-2. -+ -+2006-07-07 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc1-ia64-1. -+ -+2006-07-04 Keith Owens -+ -+ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr, -+ page_fault_mca. Only ever implemented on x86, difficult to maintain -+ and rarely used in the field. -+ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp. -+ * kdb v4.4-2.6.17-ia64-2. -+ -+2006-06-19 Keith Owens -+ -+ * kdb v4.4-2.6.17-ia64-1. -+ -+2006-05-25 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc5-ia64-1. -+ -+2006-05-15 Keith Owens -+ -+ * Refresh bfd related files from binutils 2.16.91.0.2. -+ * kdb v4.4-2.6.17-rc4-ia64-2. -+ -+2006-05-12 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc4-ia64-1. -+ -+2006-04-28 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc3-ia64-1. -+ -+2006-04-22 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc2-ia64-1. -+ -+2006-04-11 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc1-ia64-1. -+ -+2006-03-30 Keith Owens -+ -+ * Change CONFIG_LKCD to CONFIG_LKCD_DUMP. -+ * kdb v4.4-2.6.16-ia64-3. -+ -+2006-03-24 Keith Owens -+ -+ * Use INIT to interrupt cpus that do not respond to a normal kdb IPI. -+ * Remove KDBA_MCA_TRACE from arch/ia64/kernel/mca.c. -+ * kdb v4.4-2.6.16-ia64-2. -+ -+2006-03-21 Keith Owens -+ -+ * kdb v4.4-2.6.16-ia64-1. -+ -+2006-03-14 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc6-ia64-1. -+ -+2006-02-28 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc5-ia64-1. -+ -+2006-02-20 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc4-ia64-1. -+ -+2006-02-07 Keith Owens -+ -+ * Change kdb_running_process_save from a static array to a pointer. -+ gcc 4.0 objects to forward declarations for arrays with an incomplete -+ type. -+ * kdb v4.4-2.6.16-rc2-ia64-3. -+ -+2006-02-06 Keith Owens -+ -+ * Change CONFIG_CRASH_DUMP to CONFIG_LKCD. -+ * kdb v4.4-2.6.16-rc2-ia64-2. -+ -+2006-02-06 Keith Owens -+ -+ * kdb v4.4-2.6.16-rc2-ia64-1. -+ -+2006-02-01 Keith Owens -+ -+ * Handlers: check that the task is in kernel space before looking at -+ the thread_info bits. -+ * Expose kdb_running_process_save[] so 'pid R' can get the original -+ process, even when the MCA/INIT handlers are being used. -+ * kdb v4.4-2.6.16-rc1-ia64-3. -+ -+2006-01-19 Keith Owens -+ -+ * Add back some kdb changes to xpc_main that were lost due to a patch -+ conflict. -+ * kdb v4.4-2.6.16-rc1-ia64-2. -+ -+2006-01-18 Keith Owens -+ -+ * kdb v4.4-2.6.16-rc1-ia64-1. -+ -+2006-01-10 Keith Owens -+ -+ * Build kdba_pod for generic as well as sn2 kernels and test at run -+ time if the platform is sn2. -+ * kdb v4.4-2.6.15-ia64-3. -+ -+2006-01-08 Keith Owens -+ -+ * Convert xpc to use DIE_KDEBUG_ENTER and DIE_KDEBUG_LEAVE. -+ * Add debug option for xpc. -+ * break.b always sets a debug trap number of 0 , so pass that to kdb as -+ well as the normal kdb traaps. -+ * kdb v4.4-2.6.15-ia64-2. -+ -+2006-01-04 Keith Owens -+ -+ * Remove some inlines and the last vestige of CONFIG_NUMA_REPLICATE. -+ * Read the keyboard acknowledgment after sending a character. SuSE -+ Bugzilla 60240. -+ * kdb v4.4-2.6.15-ia64-1. -+ -+2005-12-25 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc7-ia64-1. -+ -+2005-12-20 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc6-ia64-1. -+ -+2005-12-06 Keith Owens -+ -+ * Use RECOVERY flag in MCA handler. -+ * kdb v4.4-2.6.15-rc5-ia64-2. -+ -+2005-12-05 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc5-ia64-1. -+ -+2005-12-02 Keith Owens -+ -+ * Reinstate hook for debug trap, the patch chunk was accidentally -+ dropped in 2.6.15-rc1. -+ * kdb v4.4-2.6.15-rc4-ia64-1. -+ -+2005-11-30 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc3-ia64-1. -+ -+2005-11-21 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc2-ia64-1. -+ -+2005-11-15 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc1-ia64-1. -+ -+2005-10-28 Keith Owens -+ -+ * kdb v4.4-2.6.14-ia64-1. -+ -+2005-10-21 Keith Owens -+ -+ * kdb v4.4-2.6.14-rc5-ia64-1. -+ -+2005-10-11 Keith Owens -+ -+ * Handle removal of USB keyboard. Aaron Young, SGI -+ * kdb v4.4-2.6.14-rc4-ia64-1. -+ -+2005-10-04 Keith Owens -+ -+ * kdb v4.4-2.6.14-rc3-ia64-1. -+ -+2005-09-21 Keith Owens -+ -+ * Support kdb_current_task in register display and modify commands. -+ * kdb v4.4-2.6.14-rc2-ia64-1. -+ -+2005-09-20 Keith Owens -+ -+ * Coexist with kprobes. -+ * Coexist with MCA/INIT rewrite. -+ * Add KDB_ENTER_SLAVE to handle concurrent entry to kdb from multiple -+ cpus. -+ * Add handlers command to control whether the MCA/INIT task or the -+ original task is displayed. -+ * Namespace clean up, remove unused kdba_sw_interrupt. -+ * kdb v4.4-2.6.14-rc1-ia64-1. -+ -+2005-08-29 Keith Owens -+ -+ * kdb v4.4-2.6.13-ia64-1. -+ -+2005-08-24 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc7-ia64-1. -+ -+2005-08-08 Keith Owens -+ -+ * Add minstate command. -+ * kdb v4.4-2.6.13-rc6-ia64-1. -+ -+2005-08-02 Keith Owens -+ -+ * Replace hard coded kdb declarations with #include . -+ * kdb v4.4-2.6.13-rc5-ia64-1. -+ -+2005-07-30 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc4-ia64-1. -+ -+2005-07-22 Keith Owens -+ -+ * Handle INIT delivered while in physical mode. -+ * kdb v4.4-2.6.13-rc3-ia64-2. -+ -+2005-07-19 Keith Owens -+ -+ * Add support for USB keyboard (OHCI only). Aaron Young, SGI. -+ * kdb v4.4-2.6.13-rc3-ia64-1. -+ -+2005-07-08 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc2-ia64-1. -+ -+2005-07-01 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc1-ia64-1. -+ -+2005-06-18 Keith Owens -+ -+ * Standard IA64 code now works around break.b setting cr.iim to 0 -+ instead of the break number. Remove the kdb workaround. -+ * kdb v4.4-2.6.12-ia64-1. -+ -+2005-06-08 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc6-ia64-1. -+ -+2005-05-25 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc5-ia64-1. -+ -+2005-05-24 Keith Owens -+ -+ * break.b sets cr.iim to 0 instead of the break number. Deal with it. -+ * kdb v4.4-2.6.12-rc4-ia64-3. -+ -+2005-05-14 Keith Owens -+ -+ * Correct MCA path after calling kdba_mca_bspstore_fixup(). -+ Mark Larson, SGI. -+ * Tell the user that MCA/INIT is recoverable so kdb is not entered. -+ * kdb v4.4-2.6.12-rc4-ia64-2. -+ -+2005-05-08 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc4-ia64-1. -+ -+2005-04-21 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc3-ia64-1. -+ -+2005-04-06 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc2-ia64-1. -+ -+2005-04-04 Keith Owens -+ -+ * More tweaks to cope with invalid old bspstore in MCA handler. -+ * kdb v4.4-2.6.12-rc1-ia64-2. -+ -+2005-03-29 Keith Owens -+ -+ * Replace __copy_to_user with __copy_to_user_inatomic. -+ * MCA handler, do not use old_bspstore if it is in region 4 or below. -+ * kdb v4.4-2.6.12-rc1-ia64-1. -+ -+2005-03-08 Keith Owens -+ -+ * Coexistence patches for lkcd. Jason Uhlenkott, SGI. -+ * kdb v4.4-2.6.11-ia64-2. -+ -+2005-03-03 Keith Owens -+ -+ * kdb-v4.4-2.6.11-ia64-1. -+ -+2005-02-14 Keith Owens -+ -+ * kdb-v4.4-2.6.11-rc4-ia64-1. -+ -+2005-02-08 Keith Owens -+ -+ * kdb-v4.4-2.6.11-rc3-bk4-ia64-1. -+ -+2005-02-03 Keith Owens -+ -+ * kdb-v4.4-2.6.11-rc3-ia64-1. -+ -+2005-01-27 Keith Owens -+ -+ * kdb-v4.4-2.6.11-rc2-ia64-1. -+ -+2005-01-20 Keith Owens -+ -+ * MCA and INIT stacks moved to per-cpu area. -+ * kdb-v4.4-2.6.11-rc1-bk7-ia64-1. -+ -+2005-01-12 Keith Owens -+ -+ * ia64_spinlock_contention_pre3_4_end is in base kernel, remove from kdb. -+ * Use last ditch allocator if unwind cannot allocate memory. -+ * kdb-v4.4-2.6.11-rc1-ia64-1. -+ -+2004-12-25 Keith Owens -+ -+ * Add cpuinfo command. -+ * kdb-v4.4-2.6.10-ia64-1. -+ -+2004-12-07 Keith Owens -+ -+ * Clean up error path in kdba_mca_init. -+ * kdb-v4.4-2.6.10-rc3-ia64-1. -+ -+2004-11-15 Keith Owens -+ -+ * kdb-v4.4-2.6.10-rc2-ia64-1. -+ -+2004-10-29 Keith Owens -+ -+ * kdb-v4.4-2.6.10-rc1-ia64-1. -+ -+2004-10-19 Keith Owens -+ -+ * kdb-v4.4-2.6.9-ia64-1. -+ -+2004-10-12 Keith Owens -+ -+ * kdb-v4.4-2.6.9-rc4-ia64-1. -+ -+2004-10-01 Keith Owens -+ -+ * kdb-v4.4-2.6.9-rc3-ia64-1. -+ -+2004-09-30 Keith Owens -+ -+ * Add stackdepth command. -+ * kdb-v4.4-2.6.9-rc2-ia64-3. -+ -+2004-09-16 Keith Owens -+ -+ * Fixes for current in region 5 instead of 7 (idle task on cpu 0). -+ * kdb-v4.4-2.6.9-rc2-ia64-2. -+ -+2004-09-14 Keith Owens -+ -+ * kdb-v4.4-2.6.9-rc2-ia64-1. -+ -+2004-08-27 Keith Owens -+ -+ * kdb-v4.4-2.6.9-rc1-ia64-1. -+ -+2004-08-14 Keith Owens -+ -+ * kdb-v4.4-2.6.8-ia64-1. -+ -+2004-08-12 Keith Owens -+ -+ * kdb-v4.4-2.6.8-rc4-ia64-1. -+ -+2004-08-04 Keith Owens -+ -+ * kdb-v4.4-2.6.8-rc3-ia64-1. -+ -+2004-07-18 Keith Owens -+ -+ * New config name for SN serial console. -+ * kdb-v4.4-2.6.8-rc2-ia64-1. -+ -+2004-07-12 Keith Owens -+ -+ * kdb-v4.4-2.6.8-rc1-ia64-1. -+ -+2004-06-30 Keith Owens -+ -+ * kdb-v4.4-2.6.7-ia64-040629-1. -+ -+2004-06-16 Keith Owens -+ -+ * Coexist with 2.6.7-ia64-040619. -+ * kdb-v4.4-2.6.7-ia64-040619-1. -+ -+2004-06-16 Keith Owens -+ -+ * kdb v4.4-2.6.7-ia64-1. -+ -+2004-06-10 Keith Owens -+ -+ * kdb v4.4-2.6.7-rc3-ia64-1. -+ -+2004-06-09 Keith Owens -+ -+ * Namespace clean up. Mark code/variables as static when it is only -+ used in one file, delete dead code/variables. -+ * Saved interrupt state requires long, not int. -+ * kdb v4.4-2.6.7-rc2-ia64-3. -+ -+2004-06-08 Keith Owens -+ -+ * Whitespace clean up, no code changes. -+ * kdb v4.4-2.6.7-rc2-2. -+ -+2004-06-07 Keith Owens -+ -+ * Force KALLSYMS and KALLSYMS_ALL for CONFIG_KDB. -+ * kdb v4.4-2.6.7-rc2-1. -+ -+2004-06-06 Keith Owens -+ -+ * Add standard archkdb commands. -+ * Move kdb_{get,put}userarea_size definitions to linux/kdb.h. -+ * kdb v4.4-2.6.6-ia64-040521-2. -+ -+2004-05-25 Keith Owens -+ -+ * Update Kconfig text. -+ * kdb v4.4-2.6.6-ia64-040521-1. -+ -+2004-05-23 Keith Owens -+ -+ * Move bfd.h and ansidecl.h from arch/$(ARCH)/kdb to include/asm-$(ARCH). -+ * ia64-opc.c needs kdbprivate.h after common reorganisation. -+ * Update copyright notices. -+ * kdb v4.4-2.6.6-ia64-1. -+ -+2004-05-60 Keith Owens -+ -+ * kdb v4.3-2.6.6-rc3-ia64-1. -+ -+2004-05-60 Keith Owens -+ -+ * Tweak WAR for backtrace through contended spinlocks. -+ * kdb v4.3-2.6.6-rc2-ia64-1. -+ -+2004-04-30 Keith Owens -+ -+ * kdb v4.3-2.6.6-rc1-ia64-1. -+ -+2004-04-15 Keith Owens -+ -+ * kdb v4.3-2.6.5-ia64-040413-1. -+ -+2004-03-06 Keith Owens -+ -+ * Use kdb_print for unwind debugging. -+ * kdb v4.3-2.6.4-rc2-ia64-1. -+ -+2004-02-29 Keith Owens -+ -+ * kdb v4.3-2.6.4-rc1-ia64-1. -+ -+2004-02-18 Keith Owens -+ -+ * kdb v4.3-2.6.3-ia64-1. -+ -+2004-02-17 Keith Owens -+ -+ * Reconcile 2.6-test versions from Xavier Bru (Bull), Greg Banks (SGI), -+ Jim Houston (Concurrent Computer Corp). -+ * Reconcile with kdb v4.3-2.4.23-ia64-0312??-1. -+ * Reconcile with salinfo changes. -+ * Port WAR for backtrace from spinlock contention from 2.4 to 2.6. -+ * Merge PGS FIFO tweak with SERIAL_IO_MEM and concurrent support for -+ multiple consoles (no USB consoles yet). -+ * Update pt_regs output to match the order of struct pt_regs. -+ * KDB wrappers for interrupts handlers now return the handler's return code. -+ * tpa and tpav commands from Anonymous. -+ * Reconcile with mca changes. -+ * Upgrade to 2.6.3-rc3. -+ * kdb v4.3-2.6.3-rc3-ia64-1. -+ -+2003-10-22 Xavier Bru -+ * Merge to 2.6.0-test7 -+2003-10-20 Philippe Garrigues -+ * Enable FIFO in UART -+2003-09-08 Xavier Bru -+ * Merge to 2.6.0-test4 -+2003-03-21 Xavier Bru -+ * Merge kdb v4.0 on 2.5.64 ia64 -+ * new kernel parameters support -+ * new kallsyms support -+ -+2003-10-24 Keith Owens -+ -+ * kdb v4.3-2.4.23-pre8-cset-1.1069.1.143-to-1.1108-ia64-1. -+ -+2003-10-03 Keith Owens -+ -+ * After MCA, copy the saved RSE registers from ia64_mca_bspstore to the -+ stack of the failing process. -+ * Abort backtrace when we hit IVT, no unwind data which confuses -+ unw_unwind(). -+ * Workaround for backtrace through spinlock contention called from leaf -+ functions. -+ * kdb v4.3-2.4.22-ia64-030909-1. -+ -+2003-07-20 Keith Owens -+ -+ * MCA rendezvous timeout affects kdb_wait_for_cpus_secs. -+ * Support SGI L1 console. -+ * kdb v4.3-2.4.21-ia64-030702-2. -+ -+2003-07-08 Keith Owens -+ -+ * print_symbol() in mca.c does something useful when kdb is installed. -+ * Unwind and SAL changes removed from kdb, they are in the base kernel. -+ * kdb v4.3-2.4.21-ia64-030702-1. -+ -+2003-06-20 Keith Owens -+ -+ * Add CONFIG_KDB_CONTINUE_CATASTROPHIC. -+ * Do not send IPI if the machine state does not require them. -+ * Correct definition of KDB_ENTER(). -+ * Workaround for broken init monarch handler. -+ * Monarch cpu must get to kdb, even if it was interrupted in user space. -+ * Unwind fixes. -+ * Generalize ia64_spinlock_contention name. -+ * Add kdba_fru for SN machines. -+ * Correct test for cpu number. -+ * kdb v4.3-2.4.20-ia64-020821-1. -+ -+2003-05-02 Keith Owens -+ -+ * Add kdba_fp_value(). -+ * Limit backtrace size to catch loops. -+ * Print spinlock name in ia64_spinlock_contention. -+ * Tweak INIT slave stack lock and handler. -+ * Add read/write access to user pages. Vamsi Krishna S., IBM -+ * Rename cpu_is_online to cpu_online, as in 2.5. -+ * Clean up USB keyboard support. -+ * Clean up serial console support. -+ * kdb v4.2-2.4.20-ia64-020821-1. -+ -+2003-04-04 Keith Owens -+ -+ * Add support for INIT slave interrupts. -+ * Tell SAL to always rendezvous on MCA. -+ * No lock on SAL rendezvous call. -+ * Include unwind.c from 2.4.21-pre5. -+ * Rename cpu_online to cpu_is_online. -+ * Workarounds for scheduler bugs. -+ * kdb v4.1-2.4.20-ia64-020821-1. -+ -+2003-03-16 Keith Owens -+ -+ * Each cpu saves its state as it enters kdb or before it enters code -+ which cannot call kdb, converting kdb from a pull to a push model. -+ * Clean up kdb interaction with CONFIG_SERIAL_CONSOLE. -+ * Removal of special cases for i386 backtrace from common code -+ simplifies the architecture code. -+ * Add support for MCA events (both main and rendezvous) plus INIT -+ monarch event. -+ * Correct decode of brl. -+ * Move kdba_print_nameval to common code. -+ * Generalize kdba unwind handlers. -+ * Fix decode of sal records (fix included in later ia64 kernels). -+ * Handle multiple pt_regs in stack (fix included in later ia64 kernels). -+ * Clean up debug code in unwind (fix included in later ia64 kernels). -+ * Move kdb break numbers to their own file so it can be used in asm. -+ * kdb v4.0-2.4.20-ia64-021210-1. -+ -+2003-02-03 Keith Owens -+ -+ * Register kdb commands early. -+ * Handle KDB_ENTER() when kdb=off. -+ * Optimize __kdba_getarea_size when width is a constant. -+ * Decode oops via kallsyms if it is available. -+ * Update copyright notices to 2003. -+ * Add commands to dump struct pt_regs and switch_stack. -+ * Handle padding from unw_init_running for switch_stack. -+ * Add dummy kdba_local_arch_setup/kdba_local_arch_cleanup. -+ * Warning for pod mode. -+ * Add command history and editing. Sonic Zhang. -+ * kdb_toggleled is conditional on KDB_BLINK_LED. Bernhard Fischer. -+ * Allow tab on serial line for symbol completion. -+ * Ignore KDB_ENTER() when kdb is already running. -+ * kdb v3.0-2.4.20-ia64-021210-1. -+ -+2003-01-23 Keith Owens -+ -+ * Upgrade to 2.4.20-ia64-021210. -+ * kdb v2.5-2.4.20-ia64-021210-1. -+ -+2002-11-14 Keith Owens -+ -+ * General clean up of handling for breakpoints and single stepping over -+ software breakpoints. -+ * kdb v2.5-2.4.19-ia64-020821-1. -+ -+2002-10-31 Keith Owens -+ -+ * Remove kdb_eframe_t. -+ * Sanity check if we have pt_regs. -+ * Remove kdba_getcurrentframe(). -+ * Comments for coexistence with O(1) scheduler. -+ * kdb v2.4-2.4.19-ia64-020821-1. -+ -+2002-10-15 Keith Owens -+ -+ * Minimize differences between patches for 2.4 and 2.5 kernels. -+ * kdb v2.3-2.4.19-ia64-020821-2. -+ -+2002-08-10 Keith Owens -+ -+ * Verify rw address for instruction breakpoint. -+ * Replace kdb_port with kdb_serial to support memory mapped I/O. -+ David Mosberger. -+ Note: This needs kdb v2.3-2.4.18-common-2 or later. -+ * kdb v2.3-2.4.18-ia64-020722-2. -+ -+2002-08-07 Keith Owens -+ -+ * Upgrade to 2.4.18-ia64-020722. -+ * Remove individual SGI copyrights, the general SGI copyright applies. -+ * Clean up disassembly layout. Hugh Dickins, Keith Owens. -+ * Remove fixed KDB_MAX_COMMANDS size. -+ * Add set_fs() around __copy_to_user on kernel addresses. -+ Randolph Chung. -+ * Position ia64 for CONFIG_NUMA_REPLICATE. -+ * Stacked registers modification support. Sebastien Lelarge. -+ * USB keyboard support. Sebastien Lelarge. -+ * kdb v2.3-2.4.18-ia64-020722-1. -+ -+2002-03-20 Keith Owens -+ -+ * Sync with 2.4.17-sn2. -+ * Add pod command. -+ -+2002-02-20 Keith Owens -+ -+ * Call kdb from mca handler. Jenna S. Hall, Intel. -+ * kdb v2.1-2.4.17-ia64-011226-2. -+ -+2002-01-18 Keith Owens -+ -+ * Replace kdb_get/putword with kdb_get/putarea functions. -+ * Wrap kdb references in #ifdef CONFIG_KDB. -+ * Delete sample i386 code. -+ * Refuse to update kernel text on NUMA systems. -+ * Reject hardware breakpoints, not supported yet. -+ * kdb v2.1-2.4.17-ia64-011226-1. -+ -+2002-01-07 Keith Owens -+ -+ * Split kdb for ia64 as kdb v2.0-2.4.17-ia64-011226-1. ---- /dev/null -+++ b/arch/ia64/kdb/Makefile -@@ -0,0 +1,21 @@ -+# -+# This file is subject to the terms and conditions of the GNU General Public -+# License. See the file "COPYING" in the main directory of this archive -+# for more details. -+# -+# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+# -+ -+obj-y := kdba_bt.o kdba_bp.o kdba_io.o kdba_support.o \ -+ cpu-ia64-opc.o ia64-dis.o ia64-opc.o kdba_id.o kdba_jmp.o -+ -+# fru does not compile on 2.6. -+# obj-$(CONFIG_IA64_SGI_SN2) += kdba_fru.o -+obj-$(CONFIG_IA64_SGI_SN2) += kdba_pod.o -+obj-$(CONFIG_IA64_GENERIC) += kdba_pod.o -+ -+override CFLAGS := $(CFLAGS:%-pg=% ) -+ -+AFLAGS_kdba_jmp.o += $(AFLAGS_KERNEL) -+ -+USE_STANDARD_AS_RULE := true ---- /dev/null -+++ b/arch/ia64/kdb/cpu-ia64-opc.c -@@ -0,0 +1,598 @@ -+/* Copyright 1998, 1999, 2000, 2001, 2002, 2003 -+ Free Software Foundation, Inc. -+ Contributed by David Mosberger-Tang -+ -+This file is part of BFD, the Binary File Descriptor library. -+ -+This program is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 2 of the License, or -+(at your option) any later version. -+ -+This program is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with this program; if not, write to the Free Software -+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+/* Logically, this code should be part of libopcode but since some of -+ the operand insertion/extraction functions help bfd to implement -+ relocations, this code is included as part of cpu-ia64.c. This -+ avoids circular dependencies between libopcode and libbfd and also -+ obviates the need for applications to link in libopcode when all -+ they really want is libbfd. -+ -+ --davidm Mon Apr 13 22:14:02 1998 */ -+ -+#ifdef __KERNEL__ -+#include "ia64-opc.h" -+#else /* __KERNEL__ */ -+#include "../opcodes/ia64-opc.h" -+#endif /* __KERNEL__ */ -+ -+#define NELEMS(a) ((int) (sizeof (a) / sizeof ((a)[0]))) -+ -+static const char* -+ins_rsvd (const struct ia64_operand *self ATTRIBUTE_UNUSED, -+ ia64_insn value ATTRIBUTE_UNUSED, ia64_insn *code ATTRIBUTE_UNUSED) -+{ -+ return "internal error---this shouldn't happen"; -+} -+ -+static const char* -+ext_rsvd (const struct ia64_operand *self ATTRIBUTE_UNUSED, -+ ia64_insn code ATTRIBUTE_UNUSED, ia64_insn *valuep ATTRIBUTE_UNUSED) -+{ -+ return "internal error---this shouldn't happen"; -+} -+ -+static const char* -+ins_const (const struct ia64_operand *self ATTRIBUTE_UNUSED, -+ ia64_insn value ATTRIBUTE_UNUSED, ia64_insn *code ATTRIBUTE_UNUSED) -+{ -+ return 0; -+} -+ -+static const char* -+ext_const (const struct ia64_operand *self ATTRIBUTE_UNUSED, -+ ia64_insn code ATTRIBUTE_UNUSED, ia64_insn *valuep ATTRIBUTE_UNUSED) -+{ -+ return 0; -+} -+ -+static const char* -+ins_reg (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ if (value >= 1u << self->field[0].bits) -+ return "register number out of range"; -+ -+ *code |= value << self->field[0].shift; -+ return 0; -+} -+ -+static const char* -+ext_reg (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ *valuep = ((code >> self->field[0].shift) -+ & ((1u << self->field[0].bits) - 1)); -+ return 0; -+} -+ -+static const char* -+ins_immu (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ ia64_insn new = 0; -+ int i; -+ -+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i) -+ { -+ new |= ((value & ((((ia64_insn) 1) << self->field[i].bits) - 1)) -+ << self->field[i].shift); -+ value >>= self->field[i].bits; -+ } -+ if (value) -+ return "integer operand out of range"; -+ -+ *code |= new; -+ return 0; -+} -+ -+static const char* -+ext_immu (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ BFD_HOST_U_64_BIT value = 0; -+ int i, bits = 0, total = 0; -+ -+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i) -+ { -+ bits = self->field[i].bits; -+ value |= ((code >> self->field[i].shift) -+ & ((((BFD_HOST_U_64_BIT) 1) << bits) - 1)) << total; -+ total += bits; -+ } -+ *valuep = value; -+ return 0; -+} -+ -+static const char* -+ins_immus8 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ if (value & 0x7) -+ return "value not an integer multiple of 8"; -+ return ins_immu (self, value >> 3, code); -+} -+ -+static const char* -+ext_immus8 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ const char *result; -+ -+ result = ext_immu (self, code, valuep); -+ if (result) -+ return result; -+ -+ *valuep = *valuep << 3; -+ return 0; -+} -+ -+static const char* -+ins_imms_scaled (const struct ia64_operand *self, ia64_insn value, -+ ia64_insn *code, int scale) -+{ -+ BFD_HOST_64_BIT svalue = value, sign_bit = 0; -+ ia64_insn new = 0; -+ int i; -+ -+ svalue >>= scale; -+ -+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i) -+ { -+ new |= ((svalue & ((((ia64_insn) 1) << self->field[i].bits) - 1)) -+ << self->field[i].shift); -+ sign_bit = (svalue >> (self->field[i].bits - 1)) & 1; -+ svalue >>= self->field[i].bits; -+ } -+ if ((!sign_bit && svalue != 0) || (sign_bit && svalue != -1)) -+ return "integer operand out of range"; -+ -+ *code |= new; -+ return 0; -+} -+ -+static const char* -+ext_imms_scaled (const struct ia64_operand *self, ia64_insn code, -+ ia64_insn *valuep, int scale) -+{ -+ int i, bits = 0, total = 0; -+ BFD_HOST_64_BIT val = 0, sign; -+ -+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i) -+ { -+ bits = self->field[i].bits; -+ val |= ((code >> self->field[i].shift) -+ & ((((BFD_HOST_U_64_BIT) 1) << bits) - 1)) << total; -+ total += bits; -+ } -+ /* sign extend: */ -+ sign = (BFD_HOST_64_BIT) 1 << (total - 1); -+ val = (val ^ sign) - sign; -+ -+ *valuep = (val << scale); -+ return 0; -+} -+ -+static const char* -+ins_imms (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ return ins_imms_scaled (self, value, code, 0); -+} -+ -+static const char* -+ins_immsu4 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ value = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000; -+ -+ return ins_imms_scaled (self, value, code, 0); -+} -+ -+static const char* -+ext_imms (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ return ext_imms_scaled (self, code, valuep, 0); -+} -+ -+static const char* -+ins_immsm1 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ --value; -+ return ins_imms_scaled (self, value, code, 0); -+} -+ -+static const char* -+ins_immsm1u4 (const struct ia64_operand *self, ia64_insn value, -+ ia64_insn *code) -+{ -+ value = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000; -+ -+ --value; -+ return ins_imms_scaled (self, value, code, 0); -+} -+ -+static const char* -+ext_immsm1 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ const char *res = ext_imms_scaled (self, code, valuep, 0); -+ -+ ++*valuep; -+ return res; -+} -+ -+static const char* -+ins_imms1 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ return ins_imms_scaled (self, value, code, 1); -+} -+ -+static const char* -+ext_imms1 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ return ext_imms_scaled (self, code, valuep, 1); -+} -+ -+static const char* -+ins_imms4 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ return ins_imms_scaled (self, value, code, 4); -+} -+ -+static const char* -+ext_imms4 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ return ext_imms_scaled (self, code, valuep, 4); -+} -+ -+static const char* -+ins_imms16 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ return ins_imms_scaled (self, value, code, 16); -+} -+ -+static const char* -+ext_imms16 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ return ext_imms_scaled (self, code, valuep, 16); -+} -+ -+static const char* -+ins_cimmu (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ ia64_insn mask = (((ia64_insn) 1) << self->field[0].bits) - 1; -+ return ins_immu (self, value ^ mask, code); -+} -+ -+static const char* -+ext_cimmu (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ const char *result; -+ ia64_insn mask; -+ -+ mask = (((ia64_insn) 1) << self->field[0].bits) - 1; -+ result = ext_immu (self, code, valuep); -+ if (!result) -+ { -+ mask = (((ia64_insn) 1) << self->field[0].bits) - 1; -+ *valuep ^= mask; -+ } -+ return result; -+} -+ -+static const char* -+ins_cnt (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ --value; -+ if (value >= ((BFD_HOST_U_64_BIT) 1) << self->field[0].bits) -+ return "count out of range"; -+ -+ *code |= value << self->field[0].shift; -+ return 0; -+} -+ -+static const char* -+ext_cnt (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ *valuep = ((code >> self->field[0].shift) -+ & ((((BFD_HOST_U_64_BIT) 1) << self->field[0].bits) - 1)) + 1; -+ return 0; -+} -+ -+static const char* -+ins_cnt2b (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ --value; -+ -+ if (value > 2) -+ return "count must be in range 1..3"; -+ -+ *code |= value << self->field[0].shift; -+ return 0; -+} -+ -+static const char* -+ext_cnt2b (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ *valuep = ((code >> self->field[0].shift) & 0x3) + 1; -+ return 0; -+} -+ -+static const char* -+ins_cnt2c (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ switch (value) -+ { -+ case 0: value = 0; break; -+ case 7: value = 1; break; -+ case 15: value = 2; break; -+ case 16: value = 3; break; -+ default: return "count must be 0, 7, 15, or 16"; -+ } -+ *code |= value << self->field[0].shift; -+ return 0; -+} -+ -+static const char* -+ext_cnt2c (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ ia64_insn value; -+ -+ value = (code >> self->field[0].shift) & 0x3; -+ switch (value) -+ { -+ case 0: value = 0; break; -+ case 1: value = 7; break; -+ case 2: value = 15; break; -+ case 3: value = 16; break; -+ } -+ *valuep = value; -+ return 0; -+} -+ -+static const char* -+ins_inc3 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code) -+{ -+ BFD_HOST_64_BIT val = value; -+ BFD_HOST_U_64_BIT sign = 0; -+ -+ if (val < 0) -+ { -+ sign = 0x4; -+ value = -value; -+ } -+ switch (value) -+ { -+ case 1: value = 3; break; -+ case 4: value = 2; break; -+ case 8: value = 1; break; -+ case 16: value = 0; break; -+ default: return "count must be +/- 1, 4, 8, or 16"; -+ } -+ *code |= (sign | value) << self->field[0].shift; -+ return 0; -+} -+ -+static const char* -+ext_inc3 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep) -+{ -+ BFD_HOST_64_BIT val; -+ int negate; -+ -+ val = (code >> self->field[0].shift) & 0x7; -+ negate = val & 0x4; -+ switch (val & 0x3) -+ { -+ case 0: val = 16; break; -+ case 1: val = 8; break; -+ case 2: val = 4; break; -+ case 3: val = 1; break; -+ } -+ if (negate) -+ val = -val; -+ -+ *valuep = val; -+ return 0; -+} -+ -+#define CST IA64_OPND_CLASS_CST -+#define REG IA64_OPND_CLASS_REG -+#define IND IA64_OPND_CLASS_IND -+#define ABS IA64_OPND_CLASS_ABS -+#define REL IA64_OPND_CLASS_REL -+ -+#define SDEC IA64_OPND_FLAG_DECIMAL_SIGNED -+#define UDEC IA64_OPND_FLAG_DECIMAL_UNSIGNED -+ -+const struct ia64_operand elf64_ia64_operands[IA64_OPND_COUNT] = -+ { -+ /* constants: */ -+ { CST, ins_const, ext_const, "NIL", {{ 0, 0}}, 0, "" }, -+ { CST, ins_const, ext_const, "ar.csd", {{ 0, 0}}, 0, "ar.csd" }, -+ { CST, ins_const, ext_const, "ar.ccv", {{ 0, 0}}, 0, "ar.ccv" }, -+ { CST, ins_const, ext_const, "ar.pfs", {{ 0, 0}}, 0, "ar.pfs" }, -+ { CST, ins_const, ext_const, "1", {{ 0, 0}}, 0, "1" }, -+ { CST, ins_const, ext_const, "8", {{ 0, 0}}, 0, "8" }, -+ { CST, ins_const, ext_const, "16", {{ 0, 0}}, 0, "16" }, -+ { CST, ins_const, ext_const, "r0", {{ 0, 0}}, 0, "r0" }, -+ { CST, ins_const, ext_const, "ip", {{ 0, 0}}, 0, "ip" }, -+ { CST, ins_const, ext_const, "pr", {{ 0, 0}}, 0, "pr" }, -+ { CST, ins_const, ext_const, "pr.rot", {{ 0, 0}}, 0, "pr.rot" }, -+ { CST, ins_const, ext_const, "psr", {{ 0, 0}}, 0, "psr" }, -+ { CST, ins_const, ext_const, "psr.l", {{ 0, 0}}, 0, "psr.l" }, -+ { CST, ins_const, ext_const, "psr.um", {{ 0, 0}}, 0, "psr.um" }, -+ -+ /* register operands: */ -+ { REG, ins_reg, ext_reg, "ar", {{ 7, 20}}, 0, /* AR3 */ -+ "an application register" }, -+ { REG, ins_reg, ext_reg, "b", {{ 3, 6}}, 0, /* B1 */ -+ "a branch register" }, -+ { REG, ins_reg, ext_reg, "b", {{ 3, 13}}, 0, /* B2 */ -+ "a branch register"}, -+ { REG, ins_reg, ext_reg, "cr", {{ 7, 20}}, 0, /* CR */ -+ "a control register"}, -+ { REG, ins_reg, ext_reg, "f", {{ 7, 6}}, 0, /* F1 */ -+ "a floating-point register" }, -+ { REG, ins_reg, ext_reg, "f", {{ 7, 13}}, 0, /* F2 */ -+ "a floating-point register" }, -+ { REG, ins_reg, ext_reg, "f", {{ 7, 20}}, 0, /* F3 */ -+ "a floating-point register" }, -+ { REG, ins_reg, ext_reg, "f", {{ 7, 27}}, 0, /* F4 */ -+ "a floating-point register" }, -+ { REG, ins_reg, ext_reg, "p", {{ 6, 6}}, 0, /* P1 */ -+ "a predicate register" }, -+ { REG, ins_reg, ext_reg, "p", {{ 6, 27}}, 0, /* P2 */ -+ "a predicate register" }, -+ { REG, ins_reg, ext_reg, "r", {{ 7, 6}}, 0, /* R1 */ -+ "a general register" }, -+ { REG, ins_reg, ext_reg, "r", {{ 7, 13}}, 0, /* R2 */ -+ "a general register" }, -+ { REG, ins_reg, ext_reg, "r", {{ 7, 20}}, 0, /* R3 */ -+ "a general register" }, -+ { REG, ins_reg, ext_reg, "r", {{ 2, 20}}, 0, /* R3_2 */ -+ "a general register r0-r3" }, -+ -+ /* indirect operands: */ -+ { IND, ins_reg, ext_reg, "cpuid", {{7, 20}}, 0, /* CPUID_R3 */ -+ "a cpuid register" }, -+ { IND, ins_reg, ext_reg, "dbr", {{7, 20}}, 0, /* DBR_R3 */ -+ "a dbr register" }, -+ { IND, ins_reg, ext_reg, "dtr", {{7, 20}}, 0, /* DTR_R3 */ -+ "a dtr register" }, -+ { IND, ins_reg, ext_reg, "itr", {{7, 20}}, 0, /* ITR_R3 */ -+ "an itr register" }, -+ { IND, ins_reg, ext_reg, "ibr", {{7, 20}}, 0, /* IBR_R3 */ -+ "an ibr register" }, -+ { IND, ins_reg, ext_reg, "", {{7, 20}}, 0, /* MR3 */ -+ "an indirect memory address" }, -+ { IND, ins_reg, ext_reg, "msr", {{7, 20}}, 0, /* MSR_R3 */ -+ "an msr register" }, -+ { IND, ins_reg, ext_reg, "pkr", {{7, 20}}, 0, /* PKR_R3 */ -+ "a pkr register" }, -+ { IND, ins_reg, ext_reg, "pmc", {{7, 20}}, 0, /* PMC_R3 */ -+ "a pmc register" }, -+ { IND, ins_reg, ext_reg, "pmd", {{7, 20}}, 0, /* PMD_R3 */ -+ "a pmd register" }, -+ { IND, ins_reg, ext_reg, "rr", {{7, 20}}, 0, /* RR_R3 */ -+ "an rr register" }, -+ -+ /* immediate operands: */ -+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 5, 20 }}, UDEC, /* CCNT5 */ -+ "a 5-bit count (0-31)" }, -+ { ABS, ins_cnt, ext_cnt, 0, {{ 2, 27 }}, UDEC, /* CNT2a */ -+ "a 2-bit count (1-4)" }, -+ { ABS, ins_cnt2b, ext_cnt2b, 0, {{ 2, 27 }}, UDEC, /* CNT2b */ -+ "a 2-bit count (1-3)" }, -+ { ABS, ins_cnt2c, ext_cnt2c, 0, {{ 2, 30 }}, UDEC, /* CNT2c */ -+ "a count (0, 7, 15, or 16)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 5, 14}}, UDEC, /* CNT5 */ -+ "a 5-bit count (0-31)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 6, 27}}, UDEC, /* CNT6 */ -+ "a 6-bit count (0-63)" }, -+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 6, 20}}, UDEC, /* CPOS6a */ -+ "a 6-bit bit pos (0-63)" }, -+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 6, 14}}, UDEC, /* CPOS6b */ -+ "a 6-bit bit pos (0-63)" }, -+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 6, 31}}, UDEC, /* CPOS6c */ -+ "a 6-bit bit pos (0-63)" }, -+ { ABS, ins_imms, ext_imms, 0, {{ 1, 36}}, SDEC, /* IMM1 */ -+ "a 1-bit integer (-1, 0)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 2, 13}}, UDEC, /* IMMU2 */ -+ "a 2-bit unsigned (0-3)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 7, 13}}, 0, /* IMMU7a */ -+ "a 7-bit unsigned (0-127)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 7, 20}}, 0, /* IMMU7b */ -+ "a 7-bit unsigned (0-127)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 7, 13}}, UDEC, /* SOF */ -+ "a frame size (register count)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 7, 20}}, UDEC, /* SOL */ -+ "a local register count" }, -+ { ABS, ins_immus8,ext_immus8,0, {{ 4, 27}}, UDEC, /* SOR */ -+ "a rotating register count (integer multiple of 8)" }, -+ { ABS, ins_imms, ext_imms, 0, /* IMM8 */ -+ {{ 7, 13}, { 1, 36}}, SDEC, -+ "an 8-bit integer (-128-127)" }, -+ { ABS, ins_immsu4, ext_imms, 0, /* IMM8U4 */ -+ {{ 7, 13}, { 1, 36}}, SDEC, -+ "an 8-bit signed integer for 32-bit unsigned compare (-128-127)" }, -+ { ABS, ins_immsm1, ext_immsm1, 0, /* IMM8M1 */ -+ {{ 7, 13}, { 1, 36}}, SDEC, -+ "an 8-bit integer (-127-128)" }, -+ { ABS, ins_immsm1u4, ext_immsm1, 0, /* IMM8M1U4 */ -+ {{ 7, 13}, { 1, 36}}, SDEC, -+ "an 8-bit integer for 32-bit unsigned compare (-127-(-1),1-128,0x100000000)" }, -+ { ABS, ins_immsm1, ext_immsm1, 0, /* IMM8M1U8 */ -+ {{ 7, 13}, { 1, 36}}, SDEC, -+ "an 8-bit integer for 64-bit unsigned compare (-127-(-1),1-128,0x10000000000000000)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 2, 33}, { 7, 20}}, 0, /* IMMU9 */ -+ "a 9-bit unsigned (0-511)" }, -+ { ABS, ins_imms, ext_imms, 0, /* IMM9a */ -+ {{ 7, 6}, { 1, 27}, { 1, 36}}, SDEC, -+ "a 9-bit integer (-256-255)" }, -+ { ABS, ins_imms, ext_imms, 0, /* IMM9b */ -+ {{ 7, 13}, { 1, 27}, { 1, 36}}, SDEC, -+ "a 9-bit integer (-256-255)" }, -+ { ABS, ins_imms, ext_imms, 0, /* IMM14 */ -+ {{ 7, 13}, { 6, 27}, { 1, 36}}, SDEC, -+ "a 14-bit integer (-8192-8191)" }, -+ { ABS, ins_imms1, ext_imms1, 0, /* IMM17 */ -+ {{ 7, 6}, { 8, 24}, { 1, 36}}, 0, -+ "a 17-bit integer (-65536-65535)" }, -+ { ABS, ins_immu, ext_immu, 0, {{20, 6}, { 1, 36}}, 0, /* IMMU21 */ -+ "a 21-bit unsigned" }, -+ { ABS, ins_imms, ext_imms, 0, /* IMM22 */ -+ {{ 7, 13}, { 9, 27}, { 5, 22}, { 1, 36}}, SDEC, -+ "a 22-bit signed integer" }, -+ { ABS, ins_immu, ext_immu, 0, /* IMMU24 */ -+ {{21, 6}, { 2, 31}, { 1, 36}}, 0, -+ "a 24-bit unsigned" }, -+ { ABS, ins_imms16,ext_imms16,0, {{27, 6}, { 1, 36}}, 0, /* IMM44 */ -+ "a 44-bit unsigned (least 16 bits ignored/zeroes)" }, -+ { ABS, ins_rsvd, ext_rsvd, 0, {{0, 0}}, 0, /* IMMU62 */ -+ "a 62-bit unsigned" }, -+ { ABS, ins_rsvd, ext_rsvd, 0, {{0, 0}}, 0, /* IMMU64 */ -+ "a 64-bit unsigned" }, -+ { ABS, ins_inc3, ext_inc3, 0, {{ 3, 13}}, SDEC, /* INC3 */ -+ "an increment (+/- 1, 4, 8, or 16)" }, -+ { ABS, ins_cnt, ext_cnt, 0, {{ 4, 27}}, UDEC, /* LEN4 */ -+ "a 4-bit length (1-16)" }, -+ { ABS, ins_cnt, ext_cnt, 0, {{ 6, 27}}, UDEC, /* LEN6 */ -+ "a 6-bit length (1-64)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 4, 20}}, 0, /* MBTYPE4 */ -+ "a mix type (@rev, @mix, @shuf, @alt, or @brcst)" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 8, 20}}, 0, /* MBTYPE8 */ -+ "an 8-bit mix type" }, -+ { ABS, ins_immu, ext_immu, 0, {{ 6, 14}}, UDEC, /* POS6 */ -+ "a 6-bit bit pos (0-63)" }, -+ { REL, ins_imms4, ext_imms4, 0, {{ 7, 6}, { 2, 33}}, 0, /* TAG13 */ -+ "a branch tag" }, -+ { REL, ins_imms4, ext_imms4, 0, {{ 9, 24}}, 0, /* TAG13b */ -+ "a branch tag" }, -+ { REL, ins_imms4, ext_imms4, 0, {{20, 6}, { 1, 36}}, 0, /* TGT25 */ -+ "a branch target" }, -+ { REL, ins_imms4, ext_imms4, 0, /* TGT25b */ -+ {{ 7, 6}, {13, 20}, { 1, 36}}, 0, -+ "a branch target" }, -+ { REL, ins_imms4, ext_imms4, 0, {{20, 13}, { 1, 36}}, 0, /* TGT25c */ -+ "a branch target" }, -+ { REL, ins_rsvd, ext_rsvd, 0, {{0, 0}}, 0, /* TGT64 */ -+ "a branch target" }, -+ -+ { ABS, ins_const, ext_const, 0, {{0, 0}}, 0, /* LDXMOV */ -+ "ldxmov target" }, -+ }; ---- /dev/null -+++ b/arch/ia64/kdb/ia64-asmtab.c -@@ -0,0 +1,8585 @@ -+/* This file is automatically generated by ia64-gen. Do not edit! */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+static const char * const ia64_strings[] = { -+ "", "0", "1", "a", "acq", "add", "addl", "addp4", "adds", "alloc", "and", -+ "andcm", "b", "bias", "br", "break", "brl", "brp", "bsw", "c", "call", -+ "cexit", "chk", "cloop", "clr", "clrrrb", "cmp", "cmp4", "cmp8xchg16", -+ "cmpxchg1", "cmpxchg2", "cmpxchg4", "cmpxchg8", "cond", "cover", "ctop", -+ "czx1", "czx2", "d", "dep", "dpnt", "dptk", "e", "epc", "eq", "excl", -+ "exit", "exp", "extr", "f", "fabs", "fadd", "famax", "famin", "fand", -+ "fandcm", "fault", "fc", "fchkf", "fclass", "fclrf", "fcmp", "fcvt", -+ "fetchadd4", "fetchadd8", "few", "fill", "flushrs", "fma", "fmax", -+ "fmerge", "fmin", "fmix", "fmpy", "fms", "fneg", "fnegabs", "fnma", -+ "fnmpy", "fnorm", "for", "fpabs", "fpack", "fpamax", "fpamin", "fpcmp", -+ "fpcvt", "fpma", "fpmax", "fpmerge", "fpmin", "fpmpy", "fpms", "fpneg", -+ "fpnegabs", "fpnma", "fpnmpy", "fprcpa", "fprsqrta", "frcpa", "frsqrta", -+ "fselect", "fsetc", "fsub", "fswap", "fsxt", "fwb", "fx", "fxor", "fxu", -+ "g", "ga", "ge", "getf", "geu", "gt", "gtu", "h", "hint", "hu", "i", "ia", -+ "imp", "invala", "itc", "itr", "l", "ld1", "ld16", "ld2", "ld4", "ld8", -+ "ldf", "ldf8", "ldfd", "ldfe", "ldfp8", "ldfpd", "ldfps", "ldfs", "le", -+ "leu", "lfetch", "loadrs", "loop", "lr", "lt", "ltu", "lu", "m", "many", -+ "mf", "mix1", "mix2", "mix4", "mov", "movl", "mux1", "mux2", "nc", "ne", -+ "neq", "nge", "ngt", "nl", "nle", "nlt", "nm", "nop", "nr", "ns", "nt1", -+ "nt2", "nta", "nz", "or", "orcm", "ord", "pack2", "pack4", "padd1", -+ "padd2", "padd4", "pavg1", "pavg2", "pavgsub1", "pavgsub2", "pcmp1", -+ "pcmp2", "pcmp4", "pmax1", "pmax2", "pmin1", "pmin2", "pmpy2", "pmpyshr2", -+ "popcnt", "pr", "probe", "psad1", "pshl2", "pshl4", "pshladd2", "pshr2", -+ "pshr4", "pshradd2", "psub1", "psub2", "psub4", "ptc", "ptr", "r", "raz", -+ "rel", "ret", "rfi", "rsm", "rum", "rw", "s", "s0", "s1", "s2", "s3", -+ "sa", "se", "setf", "shl", "shladd", "shladdp4", "shr", "shrp", "sig", -+ "spill", "spnt", "sptk", "srlz", "ssm", "sss", "st1", "st16", "st2", -+ "st4", "st8", "stf", "stf8", "stfd", "stfe", "stfs", "sub", "sum", "sxt1", -+ "sxt2", "sxt4", "sync", "tak", "tbit", "thash", "tnat", "tpa", "trunc", -+ "ttag", "u", "unc", "unord", "unpack1", "unpack2", "unpack4", "uss", -+ "uus", "uuu", "w", "wexit", "wtop", "x", "xchg1", "xchg2", "xchg4", -+ "xchg8", "xf", "xma", "xmpy", "xor", "xuf", "z", "zxt1", "zxt2", "zxt4", -+}; -+ -+static const struct ia64_dependency -+dependencies[] = { -+ { "ALAT", 0, 0, 0, -1, NULL, }, -+ { "AR[BSP]", 26, 0, 2, 17, NULL, }, -+ { "AR[BSPSTORE]", 26, 0, 2, 18, NULL, }, -+ { "AR[CFLG]", 26, 0, 2, 27, NULL, }, -+ { "AR[CCV]", 26, 0, 2, 32, NULL, }, -+ { "AR[CSD]", 26, 0, 2, 25, NULL, }, -+ { "AR[EC]", 26, 0, 2, 66, NULL, }, -+ { "AR[EFLAG]", 26, 0, 2, 24, NULL, }, -+ { "AR[FCR]", 26, 0, 2, 21, NULL, }, -+ { "AR[FDR]", 26, 0, 2, 30, NULL, }, -+ { "AR[FIR]", 26, 0, 2, 29, NULL, }, -+ { "AR[FPSR].sf0.controls", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].sf1.controls", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].sf2.controls", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].sf3.controls", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].sf0.flags", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].sf1.flags", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].sf2.flags", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].sf3.flags", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].traps", 30, 0, 2, -1, NULL, }, -+ { "AR[FPSR].rv", 30, 0, 2, -1, NULL, }, -+ { "AR[FSR]", 26, 0, 2, 28, NULL, }, -+ { "AR[ITC]", 26, 0, 2, 44, NULL, }, -+ { "AR[K%], % in 0 - 7", 1, 0, 2, -1, NULL, }, -+ { "AR[LC]", 26, 0, 2, 65, NULL, }, -+ { "AR[PFS]", 26, 0, 2, 64, NULL, }, -+ { "AR[PFS]", 26, 0, 2, 64, NULL, }, -+ { "AR[PFS]", 26, 0, 0, 64, NULL, }, -+ { "AR[RNAT]", 26, 0, 2, 19, NULL, }, -+ { "AR[RSC]", 26, 0, 2, 16, NULL, }, -+ { "AR[SSD]", 26, 0, 2, 26, NULL, }, -+ { "AR[UNAT]{%}, % in 0 - 63", 2, 0, 2, -1, NULL, }, -+ { "AR%, % in 8-15, 20, 22-23, 31, 33-35, 37-39, 41-43, 45-47, 67-111", 3, 0, 0, -1, NULL, }, -+ { "AR%, % in 48-63, 112-127", 4, 0, 2, -1, NULL, }, -+ { "BR%, % in 0 - 7", 5, 0, 2, -1, NULL, }, -+ { "BR%, % in 0 - 7", 5, 0, 0, -1, NULL, }, -+ { "BR%, % in 0 - 7", 5, 0, 2, -1, NULL, }, -+ { "CFM", 6, 0, 2, -1, NULL, }, -+ { "CFM", 6, 0, 2, -1, NULL, }, -+ { "CFM", 6, 0, 2, -1, NULL, }, -+ { "CFM", 6, 0, 2, -1, NULL, }, -+ { "CFM", 6, 0, 0, -1, NULL, }, -+ { "CPUID#", 7, 0, 5, -1, NULL, }, -+ { "CR[CMCV]", 27, 0, 3, 74, NULL, }, -+ { "CR[DCR]", 27, 0, 3, 0, NULL, }, -+ { "CR[EOI]", 27, 0, 7, 67, "SC Section 10.8.3.4", }, -+ { "CR[GPTA]", 27, 0, 3, 9, NULL, }, -+ { "CR[IFA]", 27, 0, 1, 20, NULL, }, -+ { "CR[IFA]", 27, 0, 3, 20, NULL, }, -+ { "CR[IFS]", 27, 0, 3, 23, NULL, }, -+ { "CR[IFS]", 27, 0, 1, 23, NULL, }, -+ { "CR[IFS]", 27, 0, 1, 23, NULL, }, -+ { "CR[IHA]", 27, 0, 3, 25, NULL, }, -+ { "CR[IIM]", 27, 0, 3, 24, NULL, }, -+ { "CR[IIP]", 27, 0, 3, 19, NULL, }, -+ { "CR[IIP]", 27, 0, 1, 19, NULL, }, -+ { "CR[IIPA]", 27, 0, 3, 22, NULL, }, -+ { "CR[IPSR]", 27, 0, 3, 16, NULL, }, -+ { "CR[IPSR]", 27, 0, 1, 16, NULL, }, -+ { "CR[IRR%], % in 0 - 3", 8, 0, 3, -1, NULL, }, -+ { "CR[ISR]", 27, 0, 3, 17, NULL, }, -+ { "CR[ITIR]", 27, 0, 3, 21, NULL, }, -+ { "CR[ITIR]", 27, 0, 1, 21, NULL, }, -+ { "CR[ITM]", 27, 0, 3, 1, NULL, }, -+ { "CR[ITV]", 27, 0, 3, 72, NULL, }, -+ { "CR[IVA]", 27, 0, 4, 2, NULL, }, -+ { "CR[IVR]", 27, 0, 7, 65, "SC Section 10.8.3.2", }, -+ { "CR[LID]", 27, 0, 7, 64, "SC Section 10.8.3.1", }, -+ { "CR[LRR%], % in 0 - 1", 9, 0, 3, -1, NULL, }, -+ { "CR[PMV]", 27, 0, 3, 73, NULL, }, -+ { "CR[PTA]", 27, 0, 3, 8, NULL, }, -+ { "CR[TPR]", 27, 0, 3, 66, NULL, }, -+ { "CR[TPR]", 27, 0, 7, 66, "SC Section 10.8.3.3", }, -+ { "CR%, % in 3-7, 10-15, 18, 26-63, 75-79, 82-127", 10, 0, 0, -1, NULL, }, -+ { "DBR#", 11, 0, 2, -1, NULL, }, -+ { "DBR#", 11, 0, 3, -1, NULL, }, -+ { "DTC", 0, 0, 3, -1, NULL, }, -+ { "DTC", 0, 0, 2, -1, NULL, }, -+ { "DTC", 0, 0, 0, -1, NULL, }, -+ { "DTC", 0, 0, 2, -1, NULL, }, -+ { "DTC_LIMIT*", 0, 0, 2, -1, NULL, }, -+ { "DTR", 0, 0, 3, -1, NULL, }, -+ { "DTR", 0, 0, 2, -1, NULL, }, -+ { "DTR", 0, 0, 3, -1, NULL, }, -+ { "DTR", 0, 0, 0, -1, NULL, }, -+ { "DTR", 0, 0, 2, -1, NULL, }, -+ { "FR%, % in 0 - 1", 12, 0, 0, -1, NULL, }, -+ { "FR%, % in 2 - 127", 13, 0, 2, -1, NULL, }, -+ { "FR%, % in 2 - 127", 13, 0, 0, -1, NULL, }, -+ { "GR0", 14, 0, 0, -1, NULL, }, -+ { "GR%, % in 1 - 127", 15, 0, 0, -1, NULL, }, -+ { "GR%, % in 1 - 127", 15, 0, 2, -1, NULL, }, -+ { "IBR#", 16, 0, 2, -1, NULL, }, -+ { "InService*", 17, 0, 3, -1, NULL, }, -+ { "InService*", 17, 0, 2, -1, NULL, }, -+ { "InService*", 17, 0, 2, -1, NULL, }, -+ { "IP", 0, 0, 0, -1, NULL, }, -+ { "ITC", 0, 0, 4, -1, NULL, }, -+ { "ITC", 0, 0, 2, -1, NULL, }, -+ { "ITC", 0, 0, 0, -1, NULL, }, -+ { "ITC", 0, 0, 4, -1, NULL, }, -+ { "ITC", 0, 0, 2, -1, NULL, }, -+ { "ITC_LIMIT*", 0, 0, 2, -1, NULL, }, -+ { "ITR", 0, 0, 2, -1, NULL, }, -+ { "ITR", 0, 0, 4, -1, NULL, }, -+ { "ITR", 0, 0, 2, -1, NULL, }, -+ { "ITR", 0, 0, 0, -1, NULL, }, -+ { "ITR", 0, 0, 4, -1, NULL, }, -+ { "memory", 0, 0, 0, -1, NULL, }, -+ { "MSR#", 18, 0, 5, -1, NULL, }, -+ { "PKR#", 19, 0, 3, -1, NULL, }, -+ { "PKR#", 19, 0, 0, -1, NULL, }, -+ { "PKR#", 19, 0, 2, -1, NULL, }, -+ { "PKR#", 19, 0, 2, -1, NULL, }, -+ { "PMC#", 20, 0, 2, -1, NULL, }, -+ { "PMC#", 20, 0, 7, -1, "SC+3 Section 12.1.1", }, -+ { "PMD#", 21, 0, 2, -1, NULL, }, -+ { "PR0", 0, 0, 0, -1, NULL, }, -+ { "PR%, % in 1 - 15", 22, 0, 2, -1, NULL, }, -+ { "PR%, % in 1 - 15", 22, 0, 2, -1, NULL, }, -+ { "PR%, % in 1 - 15", 22, 0, 0, -1, NULL, }, -+ { "PR%, % in 16 - 62", 23, 0, 2, -1, NULL, }, -+ { "PR%, % in 16 - 62", 23, 0, 2, -1, NULL, }, -+ { "PR%, % in 16 - 62", 23, 0, 0, -1, NULL, }, -+ { "PR63", 24, 0, 2, -1, NULL, }, -+ { "PR63", 24, 0, 2, -1, NULL, }, -+ { "PR63", 24, 0, 0, -1, NULL, }, -+ { "PSR.ac", 28, 0, 1, 3, NULL, }, -+ { "PSR.ac", 28, 0, 3, 3, NULL, }, -+ { "PSR.ac", 28, 0, 2, 3, NULL, }, -+ { "PSR.be", 28, 0, 1, 1, NULL, }, -+ { "PSR.be", 28, 0, 3, 1, NULL, }, -+ { "PSR.be", 28, 0, 2, 1, NULL, }, -+ { "PSR.bn", 28, 0, 2, 44, NULL, }, -+ { "PSR.cpl", 28, 0, 1, 32, NULL, }, -+ { "PSR.da", 28, 0, 3, 38, NULL, }, -+ { "PSR.db", 28, 0, 3, 24, NULL, }, -+ { "PSR.db", 28, 0, 2, 24, NULL, }, -+ { "PSR.db", 28, 0, 3, 24, NULL, }, -+ { "PSR.dd", 28, 0, 3, 39, NULL, }, -+ { "PSR.dfh", 28, 0, 3, 19, NULL, }, -+ { "PSR.dfh", 28, 0, 2, 19, NULL, }, -+ { "PSR.dfl", 28, 0, 3, 18, NULL, }, -+ { "PSR.dfl", 28, 0, 2, 18, NULL, }, -+ { "PSR.di", 28, 0, 3, 22, NULL, }, -+ { "PSR.di", 28, 0, 2, 22, NULL, }, -+ { "PSR.dt", 28, 0, 3, 17, NULL, }, -+ { "PSR.dt", 28, 0, 2, 17, NULL, }, -+ { "PSR.ed", 28, 0, 3, 43, NULL, }, -+ { "PSR.i", 28, 0, 2, 14, NULL, }, -+ { "PSR.i", 28, 0, 3, 14, NULL, }, -+ { "PSR.ia", 28, 0, 0, 14, NULL, }, -+ { "PSR.ic", 28, 0, 2, 13, NULL, }, -+ { "PSR.ic", 28, 0, 3, 13, NULL, }, -+ { "PSR.id", 28, 0, 0, 14, NULL, }, -+ { "PSR.is", 28, 0, 0, 14, NULL, }, -+ { "PSR.it", 28, 0, 3, 14, NULL, }, -+ { "PSR.lp", 28, 0, 2, 25, NULL, }, -+ { "PSR.lp", 28, 0, 3, 25, NULL, }, -+ { "PSR.lp", 28, 0, 3, 25, NULL, }, -+ { "PSR.mc", 28, 0, 0, 35, NULL, }, -+ { "PSR.mfh", 28, 0, 2, 5, NULL, }, -+ { "PSR.mfl", 28, 0, 2, 4, NULL, }, -+ { "PSR.pk", 28, 0, 3, 15, NULL, }, -+ { "PSR.pk", 28, 0, 2, 15, NULL, }, -+ { "PSR.pp", 28, 0, 2, 21, NULL, }, -+ { "PSR.ri", 28, 0, 0, 41, NULL, }, -+ { "PSR.rt", 28, 0, 2, 27, NULL, }, -+ { "PSR.rt", 28, 0, 3, 27, NULL, }, -+ { "PSR.rt", 28, 0, 3, 27, NULL, }, -+ { "PSR.si", 28, 0, 2, 23, NULL, }, -+ { "PSR.si", 28, 0, 3, 23, NULL, }, -+ { "PSR.sp", 28, 0, 2, 20, NULL, }, -+ { "PSR.sp", 28, 0, 3, 20, NULL, }, -+ { "PSR.ss", 28, 0, 3, 40, NULL, }, -+ { "PSR.tb", 28, 0, 3, 26, NULL, }, -+ { "PSR.tb", 28, 0, 2, 26, NULL, }, -+ { "PSR.up", 28, 0, 2, 2, NULL, }, -+ { "RR#", 25, 0, 3, -1, NULL, }, -+ { "RR#", 25, 0, 2, -1, NULL, }, -+ { "RSE", 29, 0, 2, -1, NULL, }, -+ { "ALAT", 0, 1, 0, -1, NULL, }, -+ { "AR[BSP]", 26, 1, 2, 17, NULL, }, -+ { "AR[BSPSTORE]", 26, 1, 2, 18, NULL, }, -+ { "AR[CCV]", 26, 1, 2, 32, NULL, }, -+ { "AR[CFLG]", 26, 1, 2, 27, NULL, }, -+ { "AR[CSD]", 26, 1, 2, 25, NULL, }, -+ { "AR[EC]", 26, 1, 2, 66, NULL, }, -+ { "AR[EFLAG]", 26, 1, 2, 24, NULL, }, -+ { "AR[FCR]", 26, 1, 2, 21, NULL, }, -+ { "AR[FDR]", 26, 1, 2, 30, NULL, }, -+ { "AR[FIR]", 26, 1, 2, 29, NULL, }, -+ { "AR[FPSR].sf0.controls", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf1.controls", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf2.controls", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf3.controls", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf0.flags", 30, 1, 0, -1, NULL, }, -+ { "AR[FPSR].sf0.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf0.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf1.flags", 30, 1, 0, -1, NULL, }, -+ { "AR[FPSR].sf1.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf1.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf2.flags", 30, 1, 0, -1, NULL, }, -+ { "AR[FPSR].sf2.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf2.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf3.flags", 30, 1, 0, -1, NULL, }, -+ { "AR[FPSR].sf3.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].sf3.flags", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].rv", 30, 1, 2, -1, NULL, }, -+ { "AR[FPSR].traps", 30, 1, 2, -1, NULL, }, -+ { "AR[FSR]", 26, 1, 2, 28, NULL, }, -+ { "AR[ITC]", 26, 1, 2, 44, NULL, }, -+ { "AR[K%], % in 0 - 7", 1, 1, 2, -1, NULL, }, -+ { "AR[LC]", 26, 1, 2, 65, NULL, }, -+ { "AR[PFS]", 26, 1, 0, 64, NULL, }, -+ { "AR[PFS]", 26, 1, 2, 64, NULL, }, -+ { "AR[PFS]", 26, 1, 2, 64, NULL, }, -+ { "AR[RNAT]", 26, 1, 2, 19, NULL, }, -+ { "AR[RSC]", 26, 1, 2, 16, NULL, }, -+ { "AR[UNAT]{%}, % in 0 - 63", 2, 1, 2, -1, NULL, }, -+ { "AR%, % in 8-15, 20, 22-23, 31, 33-35, 37-39, 41-43, 45-47, 67-111", 3, 1, 0, -1, NULL, }, -+ { "AR%, % in 48 - 63, 112-127", 4, 1, 2, -1, NULL, }, -+ { "BR%, % in 0 - 7", 5, 1, 2, -1, NULL, }, -+ { "BR%, % in 0 - 7", 5, 1, 2, -1, NULL, }, -+ { "BR%, % in 0 - 7", 5, 1, 2, -1, NULL, }, -+ { "BR%, % in 0 - 7", 5, 1, 0, -1, NULL, }, -+ { "CFM", 6, 1, 2, -1, NULL, }, -+ { "CPUID#", 7, 1, 0, -1, NULL, }, -+ { "CR[CMCV]", 27, 1, 2, 74, NULL, }, -+ { "CR[DCR]", 27, 1, 2, 0, NULL, }, -+ { "CR[EOI]", 27, 1, 7, 67, "SC Section 10.8.3.4", }, -+ { "CR[GPTA]", 27, 1, 2, 9, NULL, }, -+ { "CR[IFA]", 27, 1, 2, 20, NULL, }, -+ { "CR[IFS]", 27, 1, 2, 23, NULL, }, -+ { "CR[IHA]", 27, 1, 2, 25, NULL, }, -+ { "CR[IIM]", 27, 1, 2, 24, NULL, }, -+ { "CR[IIP]", 27, 1, 2, 19, NULL, }, -+ { "CR[IIPA]", 27, 1, 2, 22, NULL, }, -+ { "CR[IPSR]", 27, 1, 2, 16, NULL, }, -+ { "CR[IRR%], % in 0 - 3", 8, 1, 2, -1, NULL, }, -+ { "CR[ISR]", 27, 1, 2, 17, NULL, }, -+ { "CR[ITIR]", 27, 1, 2, 21, NULL, }, -+ { "CR[ITM]", 27, 1, 2, 1, NULL, }, -+ { "CR[ITV]", 27, 1, 2, 72, NULL, }, -+ { "CR[IVA]", 27, 1, 2, 2, NULL, }, -+ { "CR[IVR]", 27, 1, 7, 65, "SC", }, -+ { "CR[LID]", 27, 1, 7, 64, "SC", }, -+ { "CR[LRR%], % in 0 - 1", 9, 1, 2, -1, NULL, }, -+ { "CR[PMV]", 27, 1, 2, 73, NULL, }, -+ { "CR[PTA]", 27, 1, 2, 8, NULL, }, -+ { "CR[TPR]", 27, 1, 2, 66, NULL, }, -+ { "CR%, % in 3-7, 10-15, 18, 26-63, 75-79, 82-127", 10, 1, 0, -1, NULL, }, -+ { "DBR#", 11, 1, 2, -1, NULL, }, -+ { "DTC", 0, 1, 0, -1, NULL, }, -+ { "DTC", 0, 1, 2, -1, NULL, }, -+ { "DTC", 0, 1, 2, -1, NULL, }, -+ { "DTC_LIMIT*", 0, 1, 2, -1, NULL, }, -+ { "DTR", 0, 1, 2, -1, NULL, }, -+ { "DTR", 0, 1, 2, -1, NULL, }, -+ { "DTR", 0, 1, 2, -1, NULL, }, -+ { "DTR", 0, 1, 0, -1, NULL, }, -+ { "FR%, % in 0 - 1", 12, 1, 0, -1, NULL, }, -+ { "FR%, % in 2 - 127", 13, 1, 2, -1, NULL, }, -+ { "GR0", 14, 1, 0, -1, NULL, }, -+ { "GR%, % in 1 - 127", 15, 1, 2, -1, NULL, }, -+ { "IBR#", 16, 1, 2, -1, NULL, }, -+ { "InService*", 17, 1, 7, -1, "SC", }, -+ { "IP", 0, 1, 0, -1, NULL, }, -+ { "ITC", 0, 1, 0, -1, NULL, }, -+ { "ITC", 0, 1, 2, -1, NULL, }, -+ { "ITC", 0, 1, 2, -1, NULL, }, -+ { "ITR", 0, 1, 2, -1, NULL, }, -+ { "ITR", 0, 1, 2, -1, NULL, }, -+ { "ITR", 0, 1, 0, -1, NULL, }, -+ { "memory", 0, 1, 0, -1, NULL, }, -+ { "MSR#", 18, 1, 7, -1, "SC", }, -+ { "PKR#", 19, 1, 0, -1, NULL, }, -+ { "PKR#", 19, 1, 0, -1, NULL, }, -+ { "PKR#", 19, 1, 2, -1, NULL, }, -+ { "PMC#", 20, 1, 2, -1, NULL, }, -+ { "PMD#", 21, 1, 2, -1, NULL, }, -+ { "PR0", 0, 1, 0, -1, NULL, }, -+ { "PR%, % in 1 - 15", 22, 1, 0, -1, NULL, }, -+ { "PR%, % in 1 - 15", 22, 1, 0, -1, NULL, }, -+ { "PR%, % in 1 - 15", 22, 1, 2, -1, NULL, }, -+ { "PR%, % in 1 - 15", 22, 1, 2, -1, NULL, }, -+ { "PR%, % in 16 - 62", 23, 1, 0, -1, NULL, }, -+ { "PR%, % in 16 - 62", 23, 1, 0, -1, NULL, }, -+ { "PR%, % in 16 - 62", 23, 1, 2, -1, NULL, }, -+ { "PR%, % in 16 - 62", 23, 1, 2, -1, NULL, }, -+ { "PR63", 24, 1, 0, -1, NULL, }, -+ { "PR63", 24, 1, 0, -1, NULL, }, -+ { "PR63", 24, 1, 2, -1, NULL, }, -+ { "PR63", 24, 1, 2, -1, NULL, }, -+ { "PSR.ac", 28, 1, 2, 3, NULL, }, -+ { "PSR.be", 28, 1, 2, 1, NULL, }, -+ { "PSR.bn", 28, 1, 2, 44, NULL, }, -+ { "PSR.cpl", 28, 1, 2, 32, NULL, }, -+ { "PSR.da", 28, 1, 2, 38, NULL, }, -+ { "PSR.db", 28, 1, 2, 24, NULL, }, -+ { "PSR.dd", 28, 1, 2, 39, NULL, }, -+ { "PSR.dfh", 28, 1, 2, 19, NULL, }, -+ { "PSR.dfl", 28, 1, 2, 18, NULL, }, -+ { "PSR.di", 28, 1, 2, 22, NULL, }, -+ { "PSR.dt", 28, 1, 2, 17, NULL, }, -+ { "PSR.ed", 28, 1, 2, 43, NULL, }, -+ { "PSR.i", 28, 1, 2, 14, NULL, }, -+ { "PSR.ia", 28, 1, 2, 14, NULL, }, -+ { "PSR.ic", 28, 1, 2, 13, NULL, }, -+ { "PSR.id", 28, 1, 2, 14, NULL, }, -+ { "PSR.is", 28, 1, 2, 14, NULL, }, -+ { "PSR.it", 28, 1, 2, 14, NULL, }, -+ { "PSR.lp", 28, 1, 2, 25, NULL, }, -+ { "PSR.mc", 28, 1, 2, 35, NULL, }, -+ { "PSR.mfh", 28, 1, 0, 5, NULL, }, -+ { "PSR.mfh", 28, 1, 2, 5, NULL, }, -+ { "PSR.mfh", 28, 1, 2, 5, NULL, }, -+ { "PSR.mfl", 28, 1, 0, 4, NULL, }, -+ { "PSR.mfl", 28, 1, 2, 4, NULL, }, -+ { "PSR.mfl", 28, 1, 2, 4, NULL, }, -+ { "PSR.pk", 28, 1, 2, 15, NULL, }, -+ { "PSR.pp", 28, 1, 2, 21, NULL, }, -+ { "PSR.ri", 28, 1, 2, 41, NULL, }, -+ { "PSR.rt", 28, 1, 2, 27, NULL, }, -+ { "PSR.si", 28, 1, 2, 23, NULL, }, -+ { "PSR.sp", 28, 1, 2, 20, NULL, }, -+ { "PSR.ss", 28, 1, 2, 40, NULL, }, -+ { "PSR.tb", 28, 1, 2, 26, NULL, }, -+ { "PSR.up", 28, 1, 2, 2, NULL, }, -+ { "RR#", 25, 1, 2, -1, NULL, }, -+ { "RSE", 29, 1, 2, -1, NULL, }, -+ { "PR63", 24, 2, 6, -1, NULL, }, -+}; -+ -+static const unsigned short dep0[] = { -+ 96, 267, 2139, 2312, -+}; -+ -+static const unsigned short dep1[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, 4135, -+ 20613, -+}; -+ -+static const unsigned short dep2[] = { -+ 96, 267, 2165, 2166, 2168, 2169, 2171, 2172, 2174, 2329, 2332, 2333, 2336, -+ 2337, 2340, 2341, -+}; -+ -+static const unsigned short dep3[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2329, 2332, -+ 2333, 2336, 2337, 2340, 2341, 4135, 20613, -+}; -+ -+static const unsigned short dep4[] = { -+ 96, 267, 22645, 22646, 22648, 22649, 22651, 22652, 22654, 22809, 22812, 22813, -+ 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep5[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+ 22809, 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep6[] = { -+ 96, 267, 2165, 2166, 2168, 2169, 2171, 2172, 2174, 2329, 2330, 2332, 2334, -+ 2336, 2338, 2340, -+}; -+ -+static const unsigned short dep7[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2329, 2330, -+ 2333, 2334, 2337, 2338, 2341, 4135, 20613, -+}; -+ -+static const unsigned short dep8[] = { -+ 96, 267, 2165, 2166, 2168, 2169, 2171, 2172, 2174, 2329, 2331, 2333, 2335, -+ 2337, 2339, 2341, -+}; -+ -+static const unsigned short dep9[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2329, 2331, -+ 2332, 2335, 2336, 2339, 2340, 4135, 20613, -+}; -+ -+static const unsigned short dep10[] = { -+ 96, 267, 2165, 2166, 2168, 2169, 2171, 2172, 2174, 2329, 2330, 2331, 2332, -+ 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, -+}; -+ -+static const unsigned short dep11[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2329, 2330, -+ 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 4135, 20613, -+ -+}; -+ -+static const unsigned short dep12[] = { -+ 96, 267, 2379, -+}; -+ -+static const unsigned short dep13[] = { -+ 40, 41, 96, 156, 174, 175, 267, 2082, 2083, 2165, 2167, 2168, 2170, 2171, -+ 2173, 2174, 4135, -+}; -+ -+static const unsigned short dep14[] = { -+ 96, 155, 267, 310, 2379, 28852, 29002, -+}; -+ -+static const unsigned short dep15[] = { -+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -+ 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 40, 41, 96, 144, 156, 174, 175, -+ 267, 310, 2082, 2083, 2165, 2167, 2168, 2170, 2171, 2173, 2174, 4135, 28852, -+ 29002, -+}; -+ -+static const unsigned short dep16[] = { -+ 1, 6, 40, 96, 134, 182, 187, 226, 267, 297, 2379, 28852, 29002, -+}; -+ -+static const unsigned short dep17[] = { -+ 1, 25, 27, 38, 40, 41, 96, 156, 158, 159, 174, 175, 182, 187, 226, 267, 297, -+ 2082, 2083, 2165, 2167, 2168, 2170, 2171, 2173, 2174, 4135, 28852, 29002, -+ -+}; -+ -+static const unsigned short dep18[] = { -+ 1, 40, 51, 96, 182, 226, 233, 267, 28852, 29002, -+}; -+ -+static const unsigned short dep19[] = { -+ 1, 38, 40, 41, 96, 153, 174, 182, 226, 233, 267, 4135, 28852, 29002, -+}; -+ -+static const unsigned short dep20[] = { -+ 40, 96, 226, 267, -+}; -+ -+static const unsigned short dep21[] = { -+ 96, 174, 226, 267, -+}; -+ -+static const unsigned short dep22[] = { -+ 1, 40, 96, 128, 129, 131, 132, 133, 134, 135, 138, 139, 140, 141, 142, 143, -+ 144, 145, 146, 147, 148, 150, 151, 152, 153, 154, 155, 156, 159, 160, 161, -+ 162, 163, 164, 165, 166, 169, 170, 171, 172, 173, 174, 175, 176, 177, 182, -+ 226, 267, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, -+ 307, 308, 309, 310, 311, 312, 313, 315, 316, 318, 319, 320, 321, 322, 323, -+ 324, 325, 326, 327, 328, 28852, 29002, -+}; -+ -+static const unsigned short dep23[] = { -+ 1, 38, 40, 41, 50, 51, 55, 58, 72, 96, 134, 174, 182, 226, 267, 294, 295, -+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, -+ 311, 312, 313, 315, 316, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, -+ 328, 4135, 28852, 29002, -+}; -+ -+static const unsigned short dep24[] = { -+ 96, 133, 267, 296, -+}; -+ -+static const unsigned short dep25[] = { -+ 96, 134, 174, 267, 296, -+}; -+ -+static const unsigned short dep26[] = { -+ 96, 134, 267, 297, -+}; -+ -+static const unsigned short dep27[] = { -+ 25, 26, 96, 97, 100, 104, 107, 134, 156, 174, 267, 297, -+}; -+ -+static const unsigned short dep28[] = { -+ 40, 41, 96, 174, 267, 2165, 2167, 2168, 2170, 2171, 2173, 2174, 4135, -+}; -+ -+static const unsigned short dep29[] = { -+ 1, 25, 40, 96, 182, 214, 215, 226, 267, 2082, 2270, 2273, 2379, 28852, 29002, -+ -+}; -+ -+static const unsigned short dep30[] = { -+ 1, 6, 38, 40, 41, 96, 134, 156, 174, 175, 182, 214, 216, 226, 267, 2082, 2083, -+ 2165, 2167, 2168, 2170, 2171, 2173, 2174, 2271, 2273, 4135, 28852, 29002, -+ -+}; -+ -+static const unsigned short dep31[] = { -+ 96, 267, -+}; -+ -+static const unsigned short dep32[] = { -+ 96, 174, 267, 2082, 2084, -+}; -+ -+static const unsigned short dep33[] = { -+ 40, 41, 96, 156, 174, 175, 267, 2165, 2167, 2168, 2170, 2171, 2173, 2174, -+ 4135, -+}; -+ -+static const unsigned short dep34[] = { -+ 6, 37, 38, 39, 96, 124, 125, 187, 226, 267, 292, 293, 2379, -+}; -+ -+static const unsigned short dep35[] = { -+ 6, 37, 40, 41, 96, 156, 174, 175, 187, 226, 267, 292, 293, 331, 2165, 2167, -+ 2168, 2170, 2171, 2173, 2174, 4135, -+}; -+ -+static const unsigned short dep36[] = { -+ 24, 96, 213, 267, 2379, -+}; -+ -+static const unsigned short dep37[] = { -+ 24, 40, 41, 96, 156, 174, 175, 213, 267, 2165, 2167, 2168, 2170, 2171, 2173, -+ 2174, 4135, -+}; -+ -+static const unsigned short dep38[] = { -+ 6, 24, 37, 38, 39, 96, 124, 125, 187, 213, 226, 267, 292, 293, 2379, -+}; -+ -+static const unsigned short dep39[] = { -+ 6, 24, 37, 40, 41, 96, 156, 174, 175, 187, 213, 226, 267, 292, 293, 331, 2165, -+ 2167, 2168, 2170, 2171, 2173, 2174, 4135, -+}; -+ -+static const unsigned short dep40[] = { -+ 1, 6, 38, 40, 41, 96, 134, 156, 174, 175, 182, 214, 216, 226, 267, 2165, 2167, -+ 2168, 2170, 2171, 2173, 2174, 2271, 2273, 4135, 28852, 29002, -+}; -+ -+static const unsigned short dep41[] = { -+ 96, 174, 267, -+}; -+ -+static const unsigned short dep42[] = { -+ 15, 96, 196, 197, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, 22812, 22813, 22816, -+ 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep43[] = { -+ 11, 19, 20, 40, 41, 96, 174, 196, 198, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 22809, 22812, -+ 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep44[] = { -+ 15, 16, 17, 18, 96, 196, 197, 199, 200, 202, 203, 205, 206, 267, 2135, 2310, -+ 18593, 18594, 18746, 18747, 18749, 18750, 22645, 22646, 22647, 22649, 22650, -+ 22652, 22653, 22809, 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep45[] = { -+ 11, 12, 13, 14, 19, 20, 40, 41, 96, 174, 196, 198, 199, 201, 202, 204, 205, -+ 207, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 2310, 4135, 16524, 16526, -+ 18746, 18748, 18749, 18751, 22809, 22812, 22813, 22816, 22817, 22820, 22821, -+ -+}; -+ -+static const unsigned short dep46[] = { -+ 16, 96, 199, 200, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, 22812, 22813, 22816, -+ 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep47[] = { -+ 12, 19, 20, 40, 41, 96, 174, 199, 201, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 22809, 22812, -+ 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep48[] = { -+ 17, 96, 202, 203, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, 22812, 22813, 22816, -+ 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep49[] = { -+ 13, 19, 20, 40, 41, 96, 174, 202, 204, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 22809, 22812, -+ 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep50[] = { -+ 18, 96, 205, 206, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, 22812, 22813, 22816, -+ 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep51[] = { -+ 14, 19, 20, 40, 41, 96, 174, 205, 207, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 22809, 22812, -+ 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep52[] = { -+ 15, 96, 196, 197, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ -+}; -+ -+static const unsigned short dep53[] = { -+ 11, 19, 20, 40, 41, 96, 174, 196, 198, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, -+}; -+ -+static const unsigned short dep54[] = { -+ 15, 16, 17, 18, 96, 196, 197, 199, 200, 202, 203, 205, 206, 267, 2135, 2310, -+ 18593, 18594, 18746, 18747, 18749, 18750, -+}; -+ -+static const unsigned short dep55[] = { -+ 11, 12, 13, 14, 19, 20, 40, 41, 96, 174, 196, 198, 199, 201, 202, 204, 205, -+ 207, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 2310, 4135, 16524, 16526, -+ 18746, 18748, 18749, 18751, -+}; -+ -+static const unsigned short dep56[] = { -+ 16, 96, 199, 200, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ -+}; -+ -+static const unsigned short dep57[] = { -+ 12, 19, 20, 40, 41, 96, 174, 199, 201, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, -+}; -+ -+static const unsigned short dep58[] = { -+ 17, 96, 202, 203, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ -+}; -+ -+static const unsigned short dep59[] = { -+ 13, 19, 20, 40, 41, 96, 174, 202, 204, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, -+}; -+ -+static const unsigned short dep60[] = { -+ 18, 96, 205, 206, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+ -+}; -+ -+static const unsigned short dep61[] = { -+ 14, 19, 20, 40, 41, 96, 174, 205, 207, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, -+}; -+ -+static const unsigned short dep62[] = { -+ 96, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+}; -+ -+static const unsigned short dep63[] = { -+ 40, 41, 96, 174, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 2310, 4135, -+ 16524, 16526, 18746, 18748, 18749, 18751, -+}; -+ -+static const unsigned short dep64[] = { -+ 11, 96, 192, 267, -+}; -+ -+static const unsigned short dep65[] = { -+ 11, 40, 41, 96, 174, 192, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep66[] = { -+ 11, 40, 41, 96, 174, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep67[] = { -+ 12, 96, 193, 267, -+}; -+ -+static const unsigned short dep68[] = { -+ 11, 40, 41, 96, 174, 193, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep69[] = { -+ 13, 96, 194, 267, -+}; -+ -+static const unsigned short dep70[] = { -+ 11, 40, 41, 96, 174, 194, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep71[] = { -+ 14, 96, 195, 267, -+}; -+ -+static const unsigned short dep72[] = { -+ 11, 40, 41, 96, 174, 195, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep73[] = { -+ 15, 96, 197, 198, 267, -+}; -+ -+static const unsigned short dep74[] = { -+ 40, 41, 96, 174, 197, 198, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep75[] = { -+ 40, 41, 96, 174, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep76[] = { -+ 16, 96, 200, 201, 267, -+}; -+ -+static const unsigned short dep77[] = { -+ 40, 41, 96, 174, 200, 201, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep78[] = { -+ 17, 96, 203, 204, 267, -+}; -+ -+static const unsigned short dep79[] = { -+ 40, 41, 96, 174, 203, 204, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep80[] = { -+ 18, 96, 206, 207, 267, -+}; -+ -+static const unsigned short dep81[] = { -+ 40, 41, 96, 174, 206, 207, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep82[] = { -+ 15, 19, 20, 40, 41, 96, 156, 174, 175, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep83[] = { -+ 15, 16, 19, 20, 40, 41, 96, 156, 174, 175, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep84[] = { -+ 15, 17, 19, 20, 40, 41, 96, 156, 174, 175, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep85[] = { -+ 15, 18, 19, 20, 40, 41, 96, 156, 174, 175, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep86[] = { -+ 15, 96, 196, 197, 267, -+}; -+ -+static const unsigned short dep87[] = { -+ 11, 19, 20, 40, 41, 96, 174, 196, 198, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep88[] = { -+ 15, 16, 17, 18, 96, 196, 197, 199, 200, 202, 203, 205, 206, 267, -+}; -+ -+static const unsigned short dep89[] = { -+ 11, 12, 13, 14, 19, 20, 40, 41, 96, 174, 196, 198, 199, 201, 202, 204, 205, -+ 207, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep90[] = { -+ 16, 96, 199, 200, 267, -+}; -+ -+static const unsigned short dep91[] = { -+ 12, 19, 20, 40, 41, 96, 174, 199, 201, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep92[] = { -+ 17, 96, 202, 203, 267, -+}; -+ -+static const unsigned short dep93[] = { -+ 13, 19, 20, 40, 41, 96, 174, 202, 204, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep94[] = { -+ 18, 96, 205, 206, 267, -+}; -+ -+static const unsigned short dep95[] = { -+ 14, 19, 20, 40, 41, 96, 174, 205, 207, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep96[] = { -+ 15, 96, 196, 197, 267, 2165, 2166, 2167, 2169, 2170, 2172, 2173, 2329, 2332, -+ 2333, 2336, 2337, 2340, 2341, -+}; -+ -+static const unsigned short dep97[] = { -+ 11, 19, 20, 40, 41, 96, 174, 196, 198, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2329, 2332, 2333, 2336, 2337, 2340, 2341, 4135, 16524, 16526, -+ -+}; -+ -+static const unsigned short dep98[] = { -+ 15, 16, 17, 18, 96, 196, 197, 199, 200, 202, 203, 205, 206, 267, 2165, 2166, -+ 2167, 2169, 2170, 2172, 2173, 2329, 2332, 2333, 2336, 2337, 2340, 2341, -+}; -+ -+static const unsigned short dep99[] = { -+ 11, 12, 13, 14, 19, 20, 40, 41, 96, 174, 196, 198, 199, 201, 202, 204, 205, -+ 207, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 2329, 2332, 2333, 2336, -+ 2337, 2340, 2341, 4135, 16524, 16526, -+}; -+ -+static const unsigned short dep100[] = { -+ 16, 96, 199, 200, 267, 2165, 2166, 2167, 2169, 2170, 2172, 2173, 2329, 2332, -+ 2333, 2336, 2337, 2340, 2341, -+}; -+ -+static const unsigned short dep101[] = { -+ 12, 19, 20, 40, 41, 96, 174, 199, 201, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2329, 2332, 2333, 2336, 2337, 2340, 2341, 4135, 16524, 16526, -+ -+}; -+ -+static const unsigned short dep102[] = { -+ 17, 96, 202, 203, 267, 2165, 2166, 2167, 2169, 2170, 2172, 2173, 2329, 2332, -+ 2333, 2336, 2337, 2340, 2341, -+}; -+ -+static const unsigned short dep103[] = { -+ 13, 19, 20, 40, 41, 96, 174, 202, 204, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2329, 2332, 2333, 2336, 2337, 2340, 2341, 4135, 16524, 16526, -+ -+}; -+ -+static const unsigned short dep104[] = { -+ 18, 96, 205, 206, 267, 2165, 2166, 2167, 2169, 2170, 2172, 2173, 2329, 2332, -+ 2333, 2336, 2337, 2340, 2341, -+}; -+ -+static const unsigned short dep105[] = { -+ 14, 19, 20, 40, 41, 96, 174, 205, 207, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 2329, 2332, 2333, 2336, 2337, 2340, 2341, 4135, 16524, 16526, -+ -+}; -+ -+static const unsigned short dep106[] = { -+ 15, 96, 196, 197, 267, 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, -+ 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep107[] = { -+ 11, 19, 20, 40, 41, 96, 174, 196, 198, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 4135, 16524, 16526, 22809, 22812, 22813, 22816, 22817, 22820, -+ 22821, -+}; -+ -+static const unsigned short dep108[] = { -+ 15, 16, 17, 18, 96, 196, 197, 199, 200, 202, 203, 205, 206, 267, 22645, 22646, -+ 22647, 22649, 22650, 22652, 22653, 22809, 22812, 22813, 22816, 22817, 22820, -+ 22821, -+}; -+ -+static const unsigned short dep109[] = { -+ 11, 12, 13, 14, 19, 20, 40, 41, 96, 174, 196, 198, 199, 201, 202, 204, 205, -+ 207, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 4135, 16524, 16526, 22809, -+ 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep110[] = { -+ 16, 96, 199, 200, 267, 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, -+ 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep111[] = { -+ 12, 19, 20, 40, 41, 96, 174, 199, 201, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 4135, 16524, 16526, 22809, 22812, 22813, 22816, 22817, 22820, -+ 22821, -+}; -+ -+static const unsigned short dep112[] = { -+ 17, 96, 202, 203, 267, 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, -+ 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep113[] = { -+ 13, 19, 20, 40, 41, 96, 174, 202, 204, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 4135, 16524, 16526, 22809, 22812, 22813, 22816, 22817, 22820, -+ 22821, -+}; -+ -+static const unsigned short dep114[] = { -+ 18, 96, 205, 206, 267, 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, -+ 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep115[] = { -+ 14, 19, 20, 40, 41, 96, 174, 205, 207, 267, 2134, 2135, 2136, 2165, 2166, -+ 2169, 2172, 4135, 16524, 16526, 22809, 22812, 22813, 22816, 22817, 22820, -+ 22821, -+}; -+ -+static const unsigned short dep116[] = { -+ 96, 267, 2165, 2166, 2167, 2169, 2170, 2172, 2173, 2329, 2332, 2333, 2336, -+ 2337, 2340, 2341, -+}; -+ -+static const unsigned short dep117[] = { -+ 40, 41, 96, 174, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 2329, 2332, -+ 2333, 2336, 2337, 2340, 2341, 4135, 16524, 16526, -+}; -+ -+static const unsigned short dep118[] = { -+ 96, 267, 22645, 22646, 22647, 22649, 22650, 22652, 22653, 22809, 22812, 22813, -+ 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep119[] = { -+ 40, 41, 96, 174, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 4135, 16524, -+ 16526, 22809, 22812, 22813, 22816, 22817, 22820, 22821, -+}; -+ -+static const unsigned short dep120[] = { -+ 19, 20, 40, 41, 96, 174, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 2310, -+ 4135, 16524, 16526, 18746, 18748, 18749, 18751, -+}; -+ -+static const unsigned short dep121[] = { -+ 40, 41, 96, 156, 174, 175, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 4135, 20613, -+}; -+ -+static const unsigned short dep122[] = { -+ 96, 267, 2083, 2084, 2271, 2272, -+}; -+ -+static const unsigned short dep123[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2270, 2272, -+ 4135, 20613, -+}; -+ -+static const unsigned short dep124[] = { -+ 40, 41, 96, 174, 267, 2082, 2084, 2165, 2166, 2169, 2172, 2312, 4135, 20613, -+ -+}; -+ -+static const unsigned short dep125[] = { -+ 96, 267, 14454, 14456, 14457, 14459, 14460, 14462, 14620, 14621, 14624, 14625, -+ 14628, 14629, -+}; -+ -+static const unsigned short dep126[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 4135, 14620, 14621, 14624, 14625, -+ 14628, 14629, 20613, 24693, 24694, 24697, 24700, -+}; -+ -+static const unsigned short dep127[] = { -+ 96, 121, 123, 124, 126, 267, 288, 289, 292, 293, -+}; -+ -+static const unsigned short dep128[] = { -+ 40, 41, 96, 174, 267, 288, 289, 292, 293, 4135, 24693, 24694, 24697, 24700, -+ -+}; -+ -+static const unsigned short dep129[] = { -+ 40, 41, 96, 174, 267, 2165, 2166, 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep130[] = { -+ 40, 41, 96, 118, 121, 124, 174, 267, 2312, 4135, 20613, 24693, -+}; -+ -+static const unsigned short dep131[] = { -+ 6, 24, 26, 27, 96, 187, 213, 216, 267, 2081, 2269, -+}; -+ -+static const unsigned short dep132[] = { -+ 40, 41, 96, 174, 187, 213, 215, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 2269, 4135, 20613, -+}; -+ -+static const unsigned short dep133[] = { -+ 6, 24, 25, 26, 40, 41, 96, 174, 267, 2081, 2165, 2166, 2169, 2172, 2312, 4135, -+ 20613, -+}; -+ -+static const unsigned short dep134[] = { -+ 0, 40, 41, 96, 156, 174, 175, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep135[] = { -+ 0, 96, 181, 267, -+}; -+ -+static const unsigned short dep136[] = { -+ 0, 40, 41, 96, 156, 174, 175, 181, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep137[] = { -+ 40, 41, 96, 174, 181, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep138[] = { -+ 2, 28, 96, 183, 217, 267, 28852, 29002, -+}; -+ -+static const unsigned short dep139[] = { -+ 1, 2, 28, 29, 96, 168, 169, 174, 183, 217, 267, 28852, 29002, -+}; -+ -+static const unsigned short dep140[] = { -+ 1, 28, 29, 38, 40, 41, 96, 168, 169, 174, 183, 217, 267, 4135, 28852, 29002, -+ -+}; -+ -+static const unsigned short dep141[] = { -+ 0, 40, 41, 96, 174, 181, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep142[] = { -+ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, -+ 28, 29, 30, 31, 96, 182, 183, 184, 185, 186, 188, 189, 190, 191, 192, 193, -+ 194, 195, 197, 198, 200, 201, 203, 204, 206, 207, 208, 209, 210, 211, 217, -+ 218, 219, 267, 2071, 2081, 2260, 2269, 28852, 29002, -+}; -+ -+static const unsigned short dep143[] = { -+ 29, 40, 41, 96, 134, 174, 182, 183, 184, 185, 186, 188, 189, 190, 191, 192, -+ 193, 194, 195, 197, 198, 200, 201, 203, 204, 206, 207, 208, 209, 210, 211, -+ 217, 218, 219, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2260, 2269, -+ 4135, 20613, 28852, 29002, -+}; -+ -+static const unsigned short dep144[] = { -+ 96, 267, 14463, 14465, 14466, 14468, 14497, 14498, 14513, 14630, 14631, 14651, -+ 14652, 14654, 14655, 14664, -+}; -+ -+static const unsigned short dep145[] = { -+ 40, 41, 96, 173, 174, 267, 2165, 2166, 2169, 2172, 4135, 14630, 14631, 14651, -+ 14652, 14654, 14655, 14664, -+}; -+ -+static const unsigned short dep146[] = { -+ 14463, 14465, 14466, 14468, 14497, 14498, 14513, 14630, 14631, 14651, 14652, -+ 14654, 14655, 14664, -+}; -+ -+static const unsigned short dep147[] = { -+ 173, 14630, 14631, 14651, 14652, 14654, 14655, 14664, -+}; -+ -+static const unsigned short dep148[] = { -+ 96, 267, 14464, 14465, 14467, 14468, 14476, 14477, 14478, 14479, 14480, 14481, -+ 14482, 14483, 14485, 14488, 14489, 14497, 14498, 14499, 14500, 14501, 14506, -+ 14507, 14508, 14509, 14513, 14630, 14631, 14637, 14638, 14639, 14640, 14642, -+ 14644, 14651, 14652, 14654, 14655, 14656, 14657, 14660, 14661, 14664, -+}; -+ -+static const unsigned short dep149[] = { -+ 40, 41, 72, 96, 134, 174, 267, 2165, 2166, 2169, 2172, 4135, 14630, 14631, -+ 14637, 14638, 14639, 14640, 14642, 14644, 14651, 14652, 14654, 14655, 14656, -+ 14657, 14660, 14661, 14664, -+}; -+ -+static const unsigned short dep150[] = { -+ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, -+ 28, 29, 30, 31, 40, 41, 96, 134, 171, 174, 267, 2071, 2081, 2165, 2166, 2169, -+ 2172, 2312, 4135, 20613, 28852, -+}; -+ -+static const unsigned short dep151[] = { -+ 43, 44, 45, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56, 57, 58, 60, 61, 62, 63, -+ 64, 65, 67, 69, 70, 71, 72, 93, 95, 96, 228, 229, 230, 231, 232, 233, 234, -+ 235, 236, 237, 238, 240, 241, 242, 243, 244, 246, 248, 249, 250, 266, 267, -+ 2116, 2295, -+}; -+ -+static const unsigned short dep152[] = { -+ 40, 41, 95, 96, 134, 153, 174, 228, 229, 230, 231, 232, 233, 234, 235, 236, -+ 237, 238, 240, 241, 242, 243, 244, 246, 248, 249, 250, 266, 267, 2137, 2138, -+ 2139, 2165, 2166, 2169, 2172, 2295, 4135, 20613, -+}; -+ -+static const unsigned short dep153[] = { -+ 59, 94, 96, 239, 266, 267, 2139, 2312, -+}; -+ -+static const unsigned short dep154[] = { -+ 40, 41, 43, 44, 46, 48, 49, 51, 52, 53, 54, 56, 57, 60, 61, 63, 64, 65, 66, -+ 67, 69, 70, 71, 93, 94, 96, 134, 153, 174, 239, 266, 267, 2107, 2116, 2165, -+ 2166, 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep155[] = { -+ 2, 28, 41, 96, 183, 217, 226, 267, 2139, 2312, 28852, 29002, -+}; -+ -+static const unsigned short dep156[] = { -+ 2, 25, 26, 28, 29, 38, 40, 41, 96, 168, 169, 174, 183, 217, 226, 267, 2312, -+ 4135, 20613, 28852, 29002, -+}; -+ -+static const unsigned short dep157[] = { -+ 96, 128, 129, 131, 132, 136, 137, 140, 141, 142, 143, 144, 145, 146, 147, -+ 149, 152, 153, 157, 158, 161, 162, 163, 164, 165, 167, 168, 170, 171, 172, -+ 173, 175, 176, 177, 267, 294, 295, 299, 301, 302, 303, 304, 306, 308, 312, -+ 315, 316, 318, 319, 320, 321, 323, 324, 325, 327, 328, -+}; -+ -+static const unsigned short dep158[] = { -+ 40, 41, 72, 96, 134, 174, 267, 294, 295, 299, 301, 302, 303, 304, 306, 308, -+ 312, 315, 316, 318, 319, 320, 321, 323, 324, 325, 327, 328, 2137, 2138, 2139, -+ 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep159[] = { -+ 96, 127, 129, 130, 132, 161, 162, 177, 267, 294, 295, 315, 316, 318, 319, -+ 328, -+}; -+ -+static const unsigned short dep160[] = { -+ 40, 41, 96, 173, 174, 267, 294, 295, 315, 316, 318, 319, 328, 2137, 2138, -+ 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep161[] = { -+ 40, 41, 96, 129, 132, 134, 137, 138, 141, 143, 145, 147, 149, 150, 152, 156, -+ 157, 159, 160, 161, 162, 164, 165, 167, 169, 170, 172, 174, 176, 177, 267, -+ 2165, 2166, 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep162[] = { -+ 40, 41, 96, 129, 132, 161, 162, 174, 177, 267, 2165, 2166, 2169, 2172, 2312, -+ 4135, 20613, -+}; -+ -+static const unsigned short dep163[] = { -+ 40, 41, 75, 76, 81, 83, 96, 110, 134, 163, 174, 178, 267, 2137, 2138, 2139, -+ 2165, 2166, 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep164[] = { -+ 40, 41, 75, 76, 81, 83, 96, 110, 134, 135, 136, 138, 139, 163, 174, 178, 267, -+ 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep165[] = { -+ 76, 77, 96, 100, 101, 254, 255, 267, 269, 270, -+}; -+ -+static const unsigned short dep166[] = { -+ 40, 41, 47, 62, 77, 79, 85, 96, 98, 101, 134, 153, 174, 178, 254, 255, 267, -+ 269, 270, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep167[] = { -+ 40, 41, 47, 62, 77, 79, 96, 98, 101, 103, 105, 134, 153, 174, 178, 254, 255, -+ 267, 269, 270, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep168[] = { -+ 96, 267, 12466, 12467, 12617, -+}; -+ -+static const unsigned short dep169[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, -+ 12617, 20613, -+}; -+ -+static const unsigned short dep170[] = { -+ 96, 267, 6218, 6219, 6396, -+}; -+ -+static const unsigned short dep171[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, -+ 6396, 20613, -+}; -+ -+static const unsigned short dep172[] = { -+ 96, 267, 6236, 6409, -+}; -+ -+static const unsigned short dep173[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, -+ 6409, 20613, -+}; -+ -+static const unsigned short dep174[] = { -+ 96, 267, 6254, 6255, 6256, 6257, 6420, 6422, 8469, -+}; -+ -+static const unsigned short dep175[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, -+ 6257, 6421, 6422, 8303, 8468, 20613, -+}; -+ -+static const unsigned short dep176[] = { -+ 96, 267, 6258, 6259, 6423, -+}; -+ -+static const unsigned short dep177[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, -+ 6423, 20613, -+}; -+ -+static const unsigned short dep178[] = { -+ 96, 267, 6260, 6424, -+}; -+ -+static const unsigned short dep179[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, -+ 6424, 20613, -+}; -+ -+static const unsigned short dep180[] = { -+ 96, 267, 10349, 10515, -+}; -+ -+static const unsigned short dep181[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, -+ 10515, 20613, -+}; -+ -+static const unsigned short dep182[] = { -+ 76, 77, 81, 82, 96, 100, 101, 254, 255, 257, 258, 267, 269, 270, -+}; -+ -+static const unsigned short dep183[] = { -+ 40, 41, 47, 62, 77, 79, 82, 85, 96, 98, 101, 134, 153, 174, 178, 254, 255, -+ 257, 259, 267, 269, 270, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+ -+}; -+ -+static const unsigned short dep184[] = { -+ 76, 77, 96, 100, 101, 103, 104, 254, 255, 267, 269, 270, 271, 272, -+}; -+ -+static const unsigned short dep185[] = { -+ 40, 41, 47, 62, 77, 79, 96, 98, 101, 103, 105, 134, 153, 174, 178, 254, 255, -+ 267, 269, 270, 271, 272, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+ -+}; -+ -+static const unsigned short dep186[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 12467, 20613, -+}; -+ -+static const unsigned short dep187[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 6218, 20613, -+}; -+ -+static const unsigned short dep188[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 6236, 20613, -+}; -+ -+static const unsigned short dep189[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 6256, 8302, 20613, -+}; -+ -+static const unsigned short dep190[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 6258, 20613, -+}; -+ -+static const unsigned short dep191[] = { -+ 40, 41, 96, 134, 173, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 2312, 4135, 6259, 6260, 20613, -+}; -+ -+static const unsigned short dep192[] = { -+ 40, 41, 96, 134, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 10349, 20613, -+}; -+ -+static const unsigned short dep193[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, 4135, -+ 6186, 20613, -+}; -+ -+static const unsigned short dep194[] = { -+ 76, 78, 79, 96, 97, 98, 99, 253, 254, 267, 268, 269, -+}; -+ -+static const unsigned short dep195[] = { -+ 40, 41, 77, 78, 82, 84, 96, 99, 101, 103, 106, 134, 174, 178, 253, 255, 267, -+ 268, 270, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep196[] = { -+ 76, 78, 79, 80, 96, 97, 98, 99, 102, 253, 254, 256, 267, 268, 269, -+}; -+ -+static const unsigned short dep197[] = { -+ 40, 41, 77, 78, 80, 82, 84, 96, 99, 101, 102, 103, 106, 134, 174, 178, 253, -+ 255, 256, 267, 268, 270, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+ -+}; -+ -+static const unsigned short dep198[] = { -+ 76, 78, 79, 83, 84, 85, 96, 97, 98, 99, 253, 254, 259, 260, 267, 268, 269, -+ -+}; -+ -+static const unsigned short dep199[] = { -+ 40, 41, 77, 78, 82, 84, 96, 99, 101, 134, 174, 178, 253, 255, 258, 260, 267, -+ 268, 270, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep200[] = { -+ 76, 78, 79, 96, 97, 98, 99, 105, 106, 107, 253, 254, 267, 268, 269, 272, 273, -+ -+}; -+ -+static const unsigned short dep201[] = { -+ 40, 41, 77, 78, 96, 99, 101, 103, 106, 134, 174, 178, 253, 255, 267, 268, -+ 270, 271, 273, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep202[] = { -+ 40, 41, 46, 70, 96, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep203[] = { -+ 40, 41, 96, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 20613, -+}; -+ -+static const unsigned short dep204[] = { -+ 40, 41, 76, 81, 83, 96, 134, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, -+ 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep205[] = { -+ 40, 41, 96, 156, 174, 175, 267, 2134, 2135, 2136, 2137, 2138, 2139, 2165, -+ 2166, 2169, 2172, 4135, 16524, 16526, 20613, -+}; -+ -+static const unsigned short dep206[] = { -+ 40, 41, 76, 81, 83, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 4135, 20613, -+}; -+ -+static const unsigned short dep207[] = { -+ 40, 41, 77, 78, 96, 99, 134, 174, 253, 255, 267, 268, 270, 2137, 2138, 2139, -+ 2165, 2166, 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep208[] = { -+ 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2312, -+ 4135, 20613, -+}; -+ -+static const unsigned short dep209[] = { -+ 5, 96, 186, 267, 2139, 2312, -+}; -+ -+static const unsigned short dep210[] = { -+ 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 186, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep211[] = { -+ 40, 41, 44, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 148, 163, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep212[] = { -+ 0, 96, 181, 267, 2139, 2312, -+}; -+ -+static const unsigned short dep213[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 163, 174, 178, 181, 267, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep214[] = { -+ 0, 40, 41, 44, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, -+ 136, 138, 139, 146, 148, 163, 174, 178, 181, 267, 2137, 2138, 2139, 2165, -+ 2166, 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep215[] = { -+ 31, 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 163, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep216[] = { -+ 0, 96, 181, 267, 2312, 26714, -+}; -+ -+static const unsigned short dep217[] = { -+ 0, 96, 108, 181, 267, 274, -+}; -+ -+static const unsigned short dep218[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 181, 267, 274, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep219[] = { -+ 0, 5, 40, 41, 75, 76, 81, 83, 96, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 163, 174, 178, 181, 267, 274, 2137, 2138, 2139, 2165, 2166, -+ 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep220[] = { -+ 0, 31, 96, 108, 181, 219, 267, 274, -+}; -+ -+static const unsigned short dep221[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 181, 219, 267, 274, 2137, 2138, 2139, 2165, 2166, -+ 2169, 2172, 4135, 20613, -+}; -+ -+static const unsigned short dep222[] = { -+ 0, 96, 108, 181, 267, 274, 2139, 2312, -+}; -+ -+static const unsigned short dep223[] = { -+ 0, 4, 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, -+ 136, 138, 139, 146, 163, 174, 178, 181, 267, 274, 2137, 2138, 2139, 2165, -+ 2166, 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep224[] = { -+ 0, 4, 5, 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, -+ 136, 138, 139, 146, 163, 174, 178, 181, 267, 274, 2137, 2138, 2139, 2165, -+ 2166, 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep225[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 163, 174, 178, 181, 267, 274, 2137, 2138, 2139, 2165, 2166, -+ 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep226[] = { -+ 40, 41, 96, 174, 267, 2134, 2135, 2136, 2165, 2166, 2169, 2172, 2312, 4135, -+ 16524, 16526, 20613, -+}; -+ -+static const unsigned short dep227[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 181, 267, 274, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep228[] = { -+ 0, 31, 96, 108, 181, 219, 267, 274, 2139, 2312, -+}; -+ -+static const unsigned short dep229[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 181, 219, 267, 274, 2137, 2138, 2139, 2165, 2166, -+ 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep230[] = { -+ 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2310, -+ 4135, 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+}; -+ -+static const unsigned short dep231[] = { -+ 40, 41, 44, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 148, 163, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+}; -+ -+static const unsigned short dep232[] = { -+ 0, 96, 181, 267, 2135, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+}; -+ -+static const unsigned short dep233[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 163, 174, 178, 181, 267, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+}; -+ -+static const unsigned short dep234[] = { -+ 0, 40, 41, 44, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, -+ 136, 138, 139, 146, 148, 163, 174, 178, 181, 267, 2137, 2138, 2139, 2165, -+ 2166, 2169, 2172, 2310, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+ -+}; -+ -+static const unsigned short dep235[] = { -+ 0, 96, 181, 267, 2136, 2310, 18593, 18594, 18746, 18747, 18749, 18750, -+}; -+ -+static const unsigned short dep236[] = { -+ 96, 267, 2135, 2139, 2310, 2312, 18593, 18594, 18746, 18747, 18749, 18750, -+ -+}; -+ -+static const unsigned short dep237[] = { -+ 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2310, -+ 2312, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+}; -+ -+static const unsigned short dep238[] = { -+ 40, 41, 44, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 148, 163, 174, 178, 267, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 2310, 2312, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+}; -+ -+static const unsigned short dep239[] = { -+ 0, 96, 181, 267, 2135, 2139, 2310, 2312, 18593, 18594, 18746, 18747, 18749, -+ 18750, -+}; -+ -+static const unsigned short dep240[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, 136, -+ 138, 139, 146, 163, 174, 178, 181, 267, 2137, 2138, 2139, 2165, 2166, 2169, -+ 2172, 2310, 2312, 4135, 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+}; -+ -+static const unsigned short dep241[] = { -+ 0, 40, 41, 44, 75, 76, 81, 83, 96, 108, 110, 127, 128, 130, 131, 134, 135, -+ 136, 138, 139, 146, 148, 163, 174, 178, 181, 267, 2137, 2138, 2139, 2165, -+ 2166, 2169, 2172, 2310, 2312, 4135, 16524, 16526, 18746, 18748, 18749, 18751, -+ 20613, -+}; -+ -+static const unsigned short dep242[] = { -+ 0, 96, 181, 267, 2136, 2139, 2310, 2312, 18593, 18594, 18746, 18747, 18749, -+ 18750, -+}; -+ -+static const unsigned short dep243[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 181, 267, 274, 2134, 2135, 2136, 2137, 2138, 2139, -+ 2165, 2166, 2169, 2172, 4135, 16524, 16526, 20613, -+}; -+ -+static const unsigned short dep244[] = { -+ 40, 41, 75, 96, 134, 148, 174, 267, 2165, 2166, 2169, 2172, 4135, -+}; -+ -+static const unsigned short dep245[] = { -+ 40, 41, 75, 96, 134, 135, 139, 148, 174, 267, 2165, 2166, 2169, 2172, 4135, -+ -+}; -+ -+static const unsigned short dep246[] = { -+ 40, 41, 75, 96, 134, 148, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, -+ 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep247[] = { -+ 40, 41, 75, 96, 134, 135, 139, 148, 174, 267, 2137, 2138, 2139, 2165, 2166, -+ 2169, 2172, 2312, 4135, 20613, -+}; -+ -+static const unsigned short dep248[] = { -+ 40, 41, 96, 174, 267, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2310, 4135, -+ 16524, 16526, 18746, 18748, 18749, 18751, 20613, -+}; -+ -+static const unsigned short dep249[] = { -+ 0, 40, 41, 75, 76, 81, 83, 96, 110, 127, 128, 130, 131, 134, 135, 136, 138, -+ 139, 146, 163, 174, 178, 181, 267, 274, 2134, 2135, 2136, 2137, 2138, 2139, -+ 2165, 2166, 2169, 2172, 2312, 4135, 16524, 16526, 20613, -+}; -+ -+static const unsigned short dep250[] = { -+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -+ 22, 24, 26, 27, 28, 29, 30, 31, 96, 182, 183, 184, 185, 186, 187, 188, 189, -+ 190, 191, 192, 193, 194, 195, 197, 198, 200, 201, 203, 204, 206, 207, 208, -+ 209, 210, 211, 213, 216, 217, 218, 219, 267, 2071, 2081, 2139, 2260, 2269, -+ 2312, 28852, 29002, -+}; -+ -+static const unsigned short dep251[] = { -+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -+ 22, 24, 25, 26, 28, 29, 30, 31, 40, 41, 96, 134, 171, 174, 182, 183, 184, -+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 197, 198, 200, 201, -+ 203, 204, 206, 207, 208, 209, 210, 211, 213, 215, 217, 218, 219, 267, 2071, -+ 2081, 2137, 2138, 2139, 2165, 2166, 2169, 2172, 2260, 2269, 2312, 4135, 20613, -+ 28852, 29002, -+}; -+ -+#define NELS(X) (sizeof(X)/sizeof(X[0])) -+static const struct ia64_opcode_dependency -+op_dependencies[] = { -+ { NELS(dep1), dep1, NELS(dep0), dep0, }, -+ { NELS(dep3), dep3, NELS(dep2), dep2, }, -+ { NELS(dep5), dep5, NELS(dep4), dep4, }, -+ { NELS(dep7), dep7, NELS(dep6), dep6, }, -+ { NELS(dep9), dep9, NELS(dep8), dep8, }, -+ { NELS(dep11), dep11, NELS(dep10), dep10, }, -+ { NELS(dep13), dep13, NELS(dep12), dep12, }, -+ { NELS(dep15), dep15, NELS(dep14), dep14, }, -+ { NELS(dep17), dep17, NELS(dep16), dep16, }, -+ { NELS(dep19), dep19, NELS(dep18), dep18, }, -+ { NELS(dep21), dep21, NELS(dep20), dep20, }, -+ { NELS(dep23), dep23, NELS(dep22), dep22, }, -+ { NELS(dep25), dep25, NELS(dep24), dep24, }, -+ { NELS(dep27), dep27, NELS(dep26), dep26, }, -+ { NELS(dep28), dep28, NELS(dep12), dep12, }, -+ { NELS(dep30), dep30, NELS(dep29), dep29, }, -+ { NELS(dep32), dep32, NELS(dep31), dep31, }, -+ { NELS(dep33), dep33, NELS(dep12), dep12, }, -+ { NELS(dep35), dep35, NELS(dep34), dep34, }, -+ { NELS(dep37), dep37, NELS(dep36), dep36, }, -+ { NELS(dep39), dep39, NELS(dep38), dep38, }, -+ { NELS(dep40), dep40, NELS(dep29), dep29, }, -+ { NELS(dep41), dep41, NELS(dep31), dep31, }, -+ { NELS(dep43), dep43, NELS(dep42), dep42, }, -+ { NELS(dep45), dep45, NELS(dep44), dep44, }, -+ { NELS(dep47), dep47, NELS(dep46), dep46, }, -+ { NELS(dep49), dep49, NELS(dep48), dep48, }, -+ { NELS(dep51), dep51, NELS(dep50), dep50, }, -+ { NELS(dep53), dep53, NELS(dep52), dep52, }, -+ { NELS(dep55), dep55, NELS(dep54), dep54, }, -+ { NELS(dep57), dep57, NELS(dep56), dep56, }, -+ { NELS(dep59), dep59, NELS(dep58), dep58, }, -+ { NELS(dep61), dep61, NELS(dep60), dep60, }, -+ { NELS(dep63), dep63, NELS(dep62), dep62, }, -+ { NELS(dep65), dep65, NELS(dep64), dep64, }, -+ { NELS(dep66), dep66, NELS(dep31), dep31, }, -+ { NELS(dep68), dep68, NELS(dep67), dep67, }, -+ { NELS(dep70), dep70, NELS(dep69), dep69, }, -+ { NELS(dep72), dep72, NELS(dep71), dep71, }, -+ { NELS(dep74), dep74, NELS(dep73), dep73, }, -+ { NELS(dep75), dep75, NELS(dep31), dep31, }, -+ { NELS(dep77), dep77, NELS(dep76), dep76, }, -+ { NELS(dep79), dep79, NELS(dep78), dep78, }, -+ { NELS(dep81), dep81, NELS(dep80), dep80, }, -+ { NELS(dep82), dep82, NELS(dep31), dep31, }, -+ { NELS(dep83), dep83, NELS(dep31), dep31, }, -+ { NELS(dep84), dep84, NELS(dep31), dep31, }, -+ { NELS(dep85), dep85, NELS(dep31), dep31, }, -+ { NELS(dep87), dep87, NELS(dep86), dep86, }, -+ { NELS(dep89), dep89, NELS(dep88), dep88, }, -+ { NELS(dep91), dep91, NELS(dep90), dep90, }, -+ { NELS(dep93), dep93, NELS(dep92), dep92, }, -+ { NELS(dep95), dep95, NELS(dep94), dep94, }, -+ { NELS(dep97), dep97, NELS(dep96), dep96, }, -+ { NELS(dep99), dep99, NELS(dep98), dep98, }, -+ { NELS(dep101), dep101, NELS(dep100), dep100, }, -+ { NELS(dep103), dep103, NELS(dep102), dep102, }, -+ { NELS(dep105), dep105, NELS(dep104), dep104, }, -+ { NELS(dep107), dep107, NELS(dep106), dep106, }, -+ { NELS(dep109), dep109, NELS(dep108), dep108, }, -+ { NELS(dep111), dep111, NELS(dep110), dep110, }, -+ { NELS(dep113), dep113, NELS(dep112), dep112, }, -+ { NELS(dep115), dep115, NELS(dep114), dep114, }, -+ { NELS(dep117), dep117, NELS(dep116), dep116, }, -+ { NELS(dep119), dep119, NELS(dep118), dep118, }, -+ { NELS(dep120), dep120, NELS(dep62), dep62, }, -+ { NELS(dep121), dep121, NELS(dep31), dep31, }, -+ { NELS(dep123), dep123, NELS(dep122), dep122, }, -+ { NELS(dep124), dep124, NELS(dep0), dep0, }, -+ { NELS(dep126), dep126, NELS(dep125), dep125, }, -+ { NELS(dep128), dep128, NELS(dep127), dep127, }, -+ { NELS(dep129), dep129, NELS(dep0), dep0, }, -+ { NELS(dep130), dep130, NELS(dep0), dep0, }, -+ { NELS(dep132), dep132, NELS(dep131), dep131, }, -+ { NELS(dep133), dep133, NELS(dep0), dep0, }, -+ { NELS(dep134), dep134, NELS(dep31), dep31, }, -+ { NELS(dep136), dep136, NELS(dep135), dep135, }, -+ { NELS(dep137), dep137, NELS(dep135), dep135, }, -+ { NELS(dep139), dep139, NELS(dep138), dep138, }, -+ { NELS(dep140), dep140, NELS(dep138), dep138, }, -+ { NELS(dep141), dep141, NELS(dep135), dep135, }, -+ { NELS(dep143), dep143, NELS(dep142), dep142, }, -+ { NELS(dep145), dep145, NELS(dep144), dep144, }, -+ { NELS(dep147), dep147, NELS(dep146), dep146, }, -+ { NELS(dep149), dep149, NELS(dep148), dep148, }, -+ { NELS(dep150), dep150, NELS(dep0), dep0, }, -+ { NELS(dep152), dep152, NELS(dep151), dep151, }, -+ { NELS(dep154), dep154, NELS(dep153), dep153, }, -+ { NELS(dep156), dep156, NELS(dep155), dep155, }, -+ { NELS(dep158), dep158, NELS(dep157), dep157, }, -+ { NELS(dep160), dep160, NELS(dep159), dep159, }, -+ { NELS(dep161), dep161, NELS(dep0), dep0, }, -+ { NELS(dep162), dep162, NELS(dep0), dep0, }, -+ { NELS(dep163), dep163, NELS(dep0), dep0, }, -+ { NELS(dep164), dep164, NELS(dep31), dep31, }, -+ { NELS(dep166), dep166, NELS(dep165), dep165, }, -+ { NELS(dep167), dep167, NELS(dep165), dep165, }, -+ { NELS(dep169), dep169, NELS(dep168), dep168, }, -+ { NELS(dep171), dep171, NELS(dep170), dep170, }, -+ { NELS(dep173), dep173, NELS(dep172), dep172, }, -+ { NELS(dep175), dep175, NELS(dep174), dep174, }, -+ { NELS(dep177), dep177, NELS(dep176), dep176, }, -+ { NELS(dep179), dep179, NELS(dep178), dep178, }, -+ { NELS(dep181), dep181, NELS(dep180), dep180, }, -+ { NELS(dep183), dep183, NELS(dep182), dep182, }, -+ { NELS(dep185), dep185, NELS(dep184), dep184, }, -+ { NELS(dep186), dep186, NELS(dep0), dep0, }, -+ { NELS(dep187), dep187, NELS(dep0), dep0, }, -+ { NELS(dep188), dep188, NELS(dep0), dep0, }, -+ { NELS(dep189), dep189, NELS(dep0), dep0, }, -+ { NELS(dep190), dep190, NELS(dep0), dep0, }, -+ { NELS(dep191), dep191, NELS(dep0), dep0, }, -+ { NELS(dep192), dep192, NELS(dep0), dep0, }, -+ { NELS(dep193), dep193, NELS(dep0), dep0, }, -+ { NELS(dep195), dep195, NELS(dep194), dep194, }, -+ { NELS(dep197), dep197, NELS(dep196), dep196, }, -+ { NELS(dep199), dep199, NELS(dep198), dep198, }, -+ { NELS(dep201), dep201, NELS(dep200), dep200, }, -+ { NELS(dep202), dep202, NELS(dep0), dep0, }, -+ { NELS(dep203), dep203, NELS(dep0), dep0, }, -+ { NELS(dep204), dep204, NELS(dep0), dep0, }, -+ { NELS(dep205), dep205, NELS(dep31), dep31, }, -+ { NELS(dep206), dep206, NELS(dep31), dep31, }, -+ { NELS(dep207), dep207, NELS(dep194), dep194, }, -+ { NELS(dep208), dep208, NELS(dep0), dep0, }, -+ { NELS(dep210), dep210, NELS(dep209), dep209, }, -+ { NELS(dep211), dep211, NELS(dep0), dep0, }, -+ { NELS(dep213), dep213, NELS(dep212), dep212, }, -+ { NELS(dep214), dep214, NELS(dep212), dep212, }, -+ { NELS(dep215), dep215, NELS(dep0), dep0, }, -+ { NELS(dep213), dep213, NELS(dep216), dep216, }, -+ { NELS(dep218), dep218, NELS(dep217), dep217, }, -+ { NELS(dep219), dep219, NELS(dep217), dep217, }, -+ { NELS(dep221), dep221, NELS(dep220), dep220, }, -+ { NELS(dep223), dep223, NELS(dep222), dep222, }, -+ { NELS(dep224), dep224, NELS(dep222), dep222, }, -+ { NELS(dep225), dep225, NELS(dep222), dep222, }, -+ { NELS(dep226), dep226, NELS(dep0), dep0, }, -+ { NELS(dep227), dep227, NELS(dep222), dep222, }, -+ { NELS(dep229), dep229, NELS(dep228), dep228, }, -+ { NELS(dep230), dep230, NELS(dep62), dep62, }, -+ { NELS(dep231), dep231, NELS(dep62), dep62, }, -+ { NELS(dep233), dep233, NELS(dep232), dep232, }, -+ { NELS(dep234), dep234, NELS(dep232), dep232, }, -+ { NELS(dep233), dep233, NELS(dep235), dep235, }, -+ { NELS(dep237), dep237, NELS(dep236), dep236, }, -+ { NELS(dep238), dep238, NELS(dep236), dep236, }, -+ { NELS(dep240), dep240, NELS(dep239), dep239, }, -+ { NELS(dep241), dep241, NELS(dep239), dep239, }, -+ { NELS(dep240), dep240, NELS(dep242), dep242, }, -+ { NELS(dep243), dep243, NELS(dep217), dep217, }, -+ { NELS(dep244), dep244, NELS(dep31), dep31, }, -+ { NELS(dep245), dep245, NELS(dep31), dep31, }, -+ { NELS(dep246), dep246, NELS(dep0), dep0, }, -+ { NELS(dep247), dep247, NELS(dep0), dep0, }, -+ { NELS(dep248), dep248, NELS(dep62), dep62, }, -+ { NELS(dep249), dep249, NELS(dep222), dep222, }, -+ { 0, NULL, 0, NULL, }, -+ { NELS(dep251), dep251, NELS(dep250), dep250, }, -+}; -+ -+static const struct ia64_completer_table -+completer_table[] = { -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 88 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 88 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 576, -1, 0, 1, 6 }, -+ { 0x0, 0x0, 0, 639, -1, 0, 1, 17 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 157 }, -+ { 0x0, 0x0, 0, 738, -1, 0, 1, 17 }, -+ { 0x0, 0x0, 0, 2164, -1, 0, 1, 10 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 9 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 13 }, -+ { 0x1, 0x1, 0, -1, -1, 13, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, 2372, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, 1122, -1, 0, 1, 122 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 44 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 78 }, -+ { 0x0, 0x0, 0, 2212, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, 2439, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, 2216, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, 2218, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, 2448, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, 2451, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, 2473, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, 2476, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 24 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 24 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 24 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 24 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 35 }, -+ { 0x0, 0x0, 0, 2484, -1, 0, 1, 29 }, -+ { 0x0, 0x0, 0, 1391, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 157 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 77 }, -+ { 0x0, 0x0, 0, 1439, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1448, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1457, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1459, -1, 0, 1, 125 }, -+ { 0x0, 0x0, 0, 1461, -1, 0, 1, 125 }, -+ { 0x0, 0x0, 0, 1470, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1479, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1488, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1497, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1506, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1515, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1525, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1535, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1545, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 0, 1554, -1, 0, 1, 140 }, -+ { 0x0, 0x0, 0, 1560, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1566, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1572, -1, 0, 1, 140 }, -+ { 0x0, 0x0, 0, 1578, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1584, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1590, -1, 0, 1, 140 }, -+ { 0x0, 0x0, 0, 1596, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1602, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1608, -1, 0, 1, 140 }, -+ { 0x0, 0x0, 0, 1614, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1620, -1, 0, 1, 140 }, -+ { 0x0, 0x0, 0, 1626, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1632, -1, 0, 1, 140 }, -+ { 0x0, 0x0, 0, 1638, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1644, -1, 0, 1, 140 }, -+ { 0x0, 0x0, 0, 1650, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1656, -1, 0, 1, 145 }, -+ { 0x0, 0x0, 0, 1660, -1, 0, 1, 151 }, -+ { 0x0, 0x0, 0, 1664, -1, 0, 1, 153 }, -+ { 0x0, 0x0, 0, 1668, -1, 0, 1, 153 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 79 }, -+ { 0x0, 0x0, 0, 256, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 67 }, -+ { 0x1, 0x1, 0, 1148, -1, 20, 1, 67 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 68 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 69 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 69 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 70 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 71 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 72 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 86 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 87 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 89 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 90 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 91 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 92 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 97 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 98 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 99 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 100 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 101 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 102 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 103 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 106 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 107 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 108 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 109 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 110 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 111 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 112 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 113 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 158 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 158 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 158 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 71 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 157 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2824, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2825, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2176, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2177, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2839, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2840, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2841, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2842, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2843, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2826, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, 2827, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 11 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 84 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 83 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x1, 0x1, 0, -1, -1, 13, 1, 0 }, -+ { 0x0, 0x0, 0, 2845, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 84 }, -+ { 0x0, 0x0, 0, 1948, -1, 0, 1, 131 }, -+ { 0x0, 0x0, 0, 1950, -1, 0, 1, 138 }, -+ { 0x0, 0x0, 0, 1952, -1, 0, 1, 132 }, -+ { 0x0, 0x0, 0, 1954, -1, 0, 1, 132 }, -+ { 0x0, 0x0, 0, 1956, -1, 0, 1, 131 }, -+ { 0x0, 0x0, 0, 1958, -1, 0, 1, 138 }, -+ { 0x0, 0x0, 0, 1960, -1, 0, 1, 131 }, -+ { 0x0, 0x0, 0, 1962, -1, 0, 1, 138 }, -+ { 0x0, 0x0, 0, 1965, -1, 0, 1, 131 }, -+ { 0x0, 0x0, 0, 1968, -1, 0, 1, 138 }, -+ { 0x0, 0x0, 0, 1971, -1, 0, 1, 150 }, -+ { 0x0, 0x0, 0, 1972, -1, 0, 1, 156 }, -+ { 0x0, 0x0, 0, 1973, -1, 0, 1, 150 }, -+ { 0x0, 0x0, 0, 1974, -1, 0, 1, 156 }, -+ { 0x0, 0x0, 0, 1975, -1, 0, 1, 150 }, -+ { 0x0, 0x0, 0, 1976, -1, 0, 1, 156 }, -+ { 0x0, 0x0, 0, 1977, -1, 0, 1, 150 }, -+ { 0x0, 0x0, 0, 1978, -1, 0, 1, 156 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 82 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 120 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 118 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 120 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 119 }, -+ { 0x0, 0x0, 0, 1669, -1, 0, 1, 136 }, -+ { 0x0, 0x0, 0, 1670, -1, 0, 1, 136 }, -+ { 0x0, 0x0, 0, 1671, -1, 0, 1, 136 }, -+ { 0x0, 0x0, 0, 1672, -1, 0, 1, 136 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 0, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 1, 223, -1, 0, 1, 12 }, -+ { 0x1, 0x1, 2, -1, -1, 27, 1, 12 }, -+ { 0x0, 0x0, 3, -1, 1322, 0, 0, -1 }, -+ { 0x0, 0x0, 3, -1, 1323, 0, 0, -1 }, -+ { 0x1, 0x1, 3, 2715, 1432, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2716, 1441, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2717, 1450, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2718, 1463, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2719, 1472, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2720, 1481, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2721, 1490, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2722, 1499, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2723, 1508, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2724, 1517, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2725, 1527, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2726, 1537, 33, 1, 127 }, -+ { 0x1, 0x1, 3, 2727, 1550, 33, 1, 142 }, -+ { 0x1, 0x1, 3, 2728, 1556, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2729, 1562, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2730, 1568, 33, 1, 142 }, -+ { 0x1, 0x1, 3, 2731, 1574, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2732, 1580, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2733, 1586, 33, 1, 142 }, -+ { 0x1, 0x1, 3, 2734, 1592, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2735, 1598, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2736, 1604, 33, 1, 142 }, -+ { 0x1, 0x1, 3, 2737, 1610, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2738, 1616, 33, 1, 142 }, -+ { 0x1, 0x1, 3, 2739, 1622, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2740, 1628, 33, 1, 142 }, -+ { 0x1, 0x1, 3, 2741, 1634, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2742, 1640, 33, 1, 142 }, -+ { 0x1, 0x1, 3, 2743, 1646, 33, 1, 147 }, -+ { 0x1, 0x1, 3, 2744, 1652, 33, 1, 147 }, -+ { 0x1, 0x1, 3, -1, -1, 27, 1, 40 }, -+ { 0x0, 0x0, 4, 2178, 1407, 0, 1, 135 }, -+ { 0x0, 0x0, 4, 2179, 1409, 0, 1, 135 }, -+ { 0x0, 0x0, 4, 2180, 1411, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2181, 1413, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2182, 1415, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2183, 1417, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2184, 1419, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2185, 1421, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2186, 1423, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2187, 1425, 0, 1, 134 }, -+ { 0x0, 0x0, 4, 2188, 1427, 0, 1, 136 }, -+ { 0x0, 0x0, 4, 2189, 1429, 0, 1, 136 }, -+ { 0x1, 0x1, 4, -1, 1436, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 534, 1435, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1445, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 535, 1444, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1454, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 536, 1453, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1458, 32, 1, 125 }, -+ { 0x1, 0x1, 4, -1, 1460, 32, 1, 125 }, -+ { 0x1, 0x1, 4, -1, 1467, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 537, 1466, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1476, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 538, 1475, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1485, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 539, 1484, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1494, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 540, 1493, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1503, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 541, 1502, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1512, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 542, 1511, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1522, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 1018, 1520, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1532, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 1019, 1530, 32, 1, 124 }, -+ { 0x1, 0x1, 4, -1, 1542, 33, 1, 130 }, -+ { 0x5, 0x5, 4, 1020, 1540, 32, 1, 124 }, -+ { 0x1, 0x21, 10, 1991, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 1992, -1, 12, 1, 3 }, -+ { 0x1, 0x21, 10, 410, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2048, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, -1, 2049, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2050, 0, 0, -1 }, -+ { 0x0, 0x0, 10, 1995, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 1996, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 1997, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 1998, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, 420, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 2054, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 424, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2056, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, 428, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 2058, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 432, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2060, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, 436, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 2062, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 440, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2064, -1, 12, 1, 3 }, -+ { 0x1, 0x21, 10, 2011, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2012, -1, 12, 1, 3 }, -+ { 0x1, 0x21, 10, 450, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2070, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, -1, 2071, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2072, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2075, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2076, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2077, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2078, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2079, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2080, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2081, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2082, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2083, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2084, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2085, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2086, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2087, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2088, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2089, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2090, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2091, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2092, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2093, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2094, 0, 0, -1 }, -+ { 0x1, 0x21, 10, 2015, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2016, -1, 12, 1, 3 }, -+ { 0x1, 0x21, 10, 458, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2096, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, -1, 2097, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2098, 0, 0, -1 }, -+ { 0x0, 0x0, 10, 2019, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 2020, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 2021, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2022, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, 468, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 2102, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 472, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2104, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, 476, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 2106, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 480, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2108, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, 484, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 10, 2110, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 488, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2112, -1, 12, 1, 3 }, -+ { 0x1, 0x21, 10, 2035, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2036, -1, 12, 1, 3 }, -+ { 0x1, 0x21, 10, 498, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 10, 2118, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, -1, 2119, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2120, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2123, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2124, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2125, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2126, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2127, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2128, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2129, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2130, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2131, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2132, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2133, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2134, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2135, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2136, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2137, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2138, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2139, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2140, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2141, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2142, 0, 0, -1 }, -+ { 0x1, 0x1, 10, 2039, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 10, 2040, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 2041, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 10, 2042, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, -1, 2143, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2145, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2147, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2149, 0, 0, -1 }, -+ { 0x1, 0x1, 10, 2043, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 10, 2044, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 10, 2045, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 10, 2046, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 10, -1, 2151, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2153, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2155, 0, 0, -1 }, -+ { 0x0, 0x0, 10, -1, 2157, 0, 0, -1 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x4200001, 11, 1993, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 298, -1, 33, 1, 3 }, -+ { 0x0, 0x0, 11, 2051, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 11, 2052, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 1999, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x0, 0x0, 11, 306, -1, 0, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x200001, 11, 2001, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 308, -1, 33, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 2003, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x0, 0x0, 11, 310, -1, 0, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x200001, 11, 2005, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 312, -1, 33, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 2007, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x0, 0x0, 11, 314, -1, 0, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x200001, 11, 2009, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 316, -1, 33, 1, 3 }, -+ { 0x0, 0x0, 11, 2065, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 11, 2066, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 11, 2067, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 11, 2068, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x4200001, 11, 2013, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 320, -1, 33, 1, 3 }, -+ { 0x0, 0x0, 11, 2073, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 11, 2074, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x4200001, 11, 2017, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 346, -1, 33, 1, 3 }, -+ { 0x0, 0x0, 11, 2099, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 11, 2100, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 2023, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x0, 0x0, 11, 354, -1, 0, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x200001, 11, 2025, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 356, -1, 33, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 2027, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x0, 0x0, 11, 358, -1, 0, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x200001, 11, 2029, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 360, -1, 33, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 2031, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x0, 0x0, 11, 362, -1, 0, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x200001, 11, 2033, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 364, -1, 33, 1, 3 }, -+ { 0x0, 0x0, 11, 2113, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 11, 2114, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 11, 2115, -1, 33, 1, 3 }, -+ { 0x200001, 0x200001, 11, 2116, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x200001, 0x4200001, 11, 2037, -1, 12, 1, 3 }, -+ { 0x2, 0x3, 11, -1, -1, 37, 1, 5 }, -+ { 0x1, 0x1, 11, 368, -1, 33, 1, 3 }, -+ { 0x0, 0x0, 11, 2121, -1, 0, 1, 3 }, -+ { 0x1, 0x1, 11, 2122, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, 2144, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 11, 2146, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 11, 2148, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 11, 2150, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, -1, -1, 36, 1, 5 }, -+ { 0x1, 0x1, 11, 2152, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 11, 2154, -1, 12, 1, 3 }, -+ { 0x1, 0x1, 11, 2156, -1, 36, 1, 3 }, -+ { 0x1000001, 0x1000001, 11, 2158, -1, 12, 1, 3 }, -+ { 0x0, 0x0, 12, -1, -1, 0, 1, 14 }, -+ { 0x0, 0x0, 12, -1, -1, 0, 1, 14 }, -+ { 0x0, 0x0, 12, -1, -1, 0, 1, 14 }, -+ { 0x1, 0x1, 13, 270, 1434, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 272, 1443, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 274, 1452, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 278, 1465, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 280, 1474, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 282, 1483, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 284, 1492, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 286, 1501, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 288, 1510, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 290, 1519, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 292, 1529, 34, 1, 124 }, -+ { 0x1, 0x1, 13, 294, 1539, 34, 1, 124 }, -+ { 0x0, 0x0, 19, -1, 777, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 778, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 779, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 780, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 781, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 782, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 783, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 784, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 785, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 786, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 787, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 788, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 789, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 790, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 791, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 792, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 793, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 794, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 795, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 796, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 797, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 798, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 799, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 800, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 801, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 802, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 803, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 804, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 805, 0, 0, -1 }, -+ { 0x0, 0x0, 19, -1, 806, 0, 0, -1 }, -+ { 0x0, 0x0, 20, -1, 2793, 0, 0, -1 }, -+ { 0x0, 0x0, 20, -1, 2794, 0, 0, -1 }, -+ { 0x0, 0x0, 20, -1, 2809, 0, 0, -1 }, -+ { 0x0, 0x0, 20, -1, 2810, 0, 0, -1 }, -+ { 0x0, 0x0, 20, -1, 2815, 0, 0, -1 }, -+ { 0x0, 0x0, 20, -1, 2816, 0, 0, -1 }, -+ { 0x0, 0x0, 21, 813, 2805, 0, 0, -1 }, -+ { 0x0, 0x0, 21, 814, 2807, 0, 0, -1 }, -+ { 0x0, 0x0, 23, -1, 2803, 0, 0, -1 }, -+ { 0x0, 0x0, 23, -1, 2804, 0, 0, -1 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, 1254, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 6 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 7 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 8 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 15 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, 1275, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 18 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 19 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 20 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, 1308, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 17 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 35, 1, 21 }, -+ { 0x1, 0x1, 24, -1, -1, 33, 1, 76 }, -+ { 0x1, 0x1, 24, -1, -1, 33, 1, 76 }, -+ { 0x1, 0x1, 24, 1324, 1437, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1325, 1446, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1326, 1455, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1327, 1468, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1328, 1477, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1329, 1486, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1330, 1495, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1331, 1504, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1332, 1513, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1333, 1523, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1334, 1533, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1335, 1543, 35, 1, 130 }, -+ { 0x1, 0x1, 24, 1336, 1552, 35, 1, 144 }, -+ { 0x1, 0x1, 24, 1337, 1558, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1338, 1564, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1339, 1570, 35, 1, 144 }, -+ { 0x1, 0x1, 24, 1340, 1576, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1341, 1582, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1342, 1588, 35, 1, 144 }, -+ { 0x1, 0x1, 24, 1343, 1594, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1344, 1600, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1345, 1606, 35, 1, 144 }, -+ { 0x1, 0x1, 24, 1346, 1612, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1347, 1618, 35, 1, 144 }, -+ { 0x1, 0x1, 24, 1348, 1624, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1349, 1630, 35, 1, 144 }, -+ { 0x1, 0x1, 24, 1350, 1636, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1351, 1642, 35, 1, 144 }, -+ { 0x1, 0x1, 24, 1352, 1648, 35, 1, 149 }, -+ { 0x1, 0x1, 24, 1353, 1654, 35, 1, 149 }, -+ { 0x0, 0x0, 33, 2787, 2785, 0, 0, -1 }, -+ { 0x0, 0x0, 33, 2790, 2788, 0, 0, -1 }, -+ { 0x0, 0x0, 33, 2796, 2795, 0, 0, -1 }, -+ { 0x0, 0x0, 33, 2798, 2797, 0, 0, -1 }, -+ { 0x0, 0x0, 33, 2812, 2811, 0, 0, -1 }, -+ { 0x0, 0x0, 33, 2814, 2813, 0, 0, -1 }, -+ { 0x0, 0x0, 35, -1, 2806, 0, 0, -1 }, -+ { 0x0, 0x0, 35, -1, 2808, 0, 0, -1 }, -+ { 0x1, 0x1, 38, -1, 2256, 37, 1, 29 }, -+ { 0x1, 0x1, 38, -1, 2315, 37, 1, 29 }, -+ { 0x0, 0x0, 38, -1, 2318, 0, 0, -1 }, -+ { 0x1, 0x1, 38, -1, -1, 37, 1, 29 }, -+ { 0x1, 0x1, 38, -1, 2323, 37, 1, 29 }, -+ { 0x0, 0x0, 38, -1, 2326, 0, 0, -1 }, -+ { 0x1, 0x1, 38, -1, -1, 37, 1, 29 }, -+ { 0x0, 0x0, 38, -1, 2329, 0, 0, -1 }, -+ { 0x1, 0x1, 38, -1, -1, 37, 1, 29 }, -+ { 0x1, 0x1, 38, -1, 2332, 37, 1, 29 }, -+ { 0x1, 0x1, 38, -1, 2335, 37, 1, 29 }, -+ { 0x1, 0x1, 38, -1, 2368, 37, 1, 29 }, -+ { 0x3, 0x3, 38, -1, -1, 30, 1, 137 }, -+ { 0x0, 0x0, 38, 1124, -1, 0, 1, 95 }, -+ { 0x0, 0x0, 38, -1, -1, 0, 1, 104 }, -+ { 0x0, 0x0, 38, 1130, -1, 0, 1, 116 }, -+ { 0x3, 0x3, 38, -1, -1, 30, 1, 155 }, -+ { 0x0, 0x0, 38, 1131, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 40, -1, 955, 0, 0, -1 }, -+ { 0x0, 0x0, 40, -1, 963, 0, 0, -1 }, -+ { 0x0, 0x0, 40, 1133, 959, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 604, 33, 1, 6 }, -+ { 0x18000001, 0x18000001, 40, -1, 612, 6, 1, 7 }, -+ { 0x3, 0x3, 40, 1134, 608, 33, 1, 6 }, -+ { 0x0, 0x0, 40, -1, 967, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 624, 33, 1, 8 }, -+ { 0x0, 0x0, 40, -1, 971, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 636, 33, 1, 15 }, -+ { 0x0, 0x0, 40, -1, 976, 0, 0, -1 }, -+ { 0x0, 0x0, 40, -1, 980, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 659, 33, 1, 17 }, -+ { 0x3, 0x3, 40, -1, 663, 33, 1, 17 }, -+ { 0x0, 0x0, 40, -1, 984, 0, 0, -1 }, -+ { 0x0, 0x0, 40, -1, 988, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 683, 33, 1, 18 }, -+ { 0x18000001, 0x18000001, 40, -1, 687, 6, 1, 18 }, -+ { 0x0, 0x0, 40, -1, 992, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 699, 33, 1, 19 }, -+ { 0x0, 0x0, 40, -1, 996, 0, 0, -1 }, -+ { 0x0, 0x0, 40, -1, 1000, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 719, 33, 1, 20 }, -+ { 0x18000001, 0x18000001, 40, -1, 723, 6, 1, 20 }, -+ { 0x0, 0x0, 40, -1, 1004, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 735, 33, 1, 21 }, -+ { 0x0, 0x0, 40, -1, 1009, 0, 0, -1 }, -+ { 0x0, 0x0, 40, -1, 1013, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 758, 33, 1, 17 }, -+ { 0x3, 0x3, 40, -1, 762, 33, 1, 17 }, -+ { 0x0, 0x0, 40, -1, 1017, 0, 0, -1 }, -+ { 0x3, 0x3, 40, -1, 774, 33, 1, 21 }, -+ { 0x0, 0x0, 41, 833, 954, 0, 0, -1 }, -+ { 0x0, 0x0, 41, 834, 962, 0, 0, -1 }, -+ { 0x0, 0x0, 41, 835, 958, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 836, 603, 34, 1, 6 }, -+ { 0x10000001, 0x10000001, 41, 837, 611, 6, 1, 7 }, -+ { 0x1, 0x1, 41, 838, 607, 34, 1, 6 }, -+ { 0x0, 0x0, 41, 839, 966, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 840, 623, 34, 1, 8 }, -+ { 0x0, 0x0, 41, 841, 970, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 842, 635, 34, 1, 15 }, -+ { 0x0, 0x0, 41, 843, 975, 0, 0, -1 }, -+ { 0x0, 0x0, 41, 844, 979, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 845, 658, 34, 1, 17 }, -+ { 0x1, 0x1, 41, 846, 662, 34, 1, 17 }, -+ { 0x0, 0x0, 41, 847, 983, 0, 0, -1 }, -+ { 0x0, 0x0, 41, 848, 987, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 849, 682, 34, 1, 18 }, -+ { 0x10000001, 0x10000001, 41, 850, 686, 6, 1, 18 }, -+ { 0x0, 0x0, 41, 851, 991, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 852, 698, 34, 1, 19 }, -+ { 0x0, 0x0, 41, 853, 995, 0, 0, -1 }, -+ { 0x0, 0x0, 41, 854, 999, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 855, 718, 34, 1, 20 }, -+ { 0x10000001, 0x10000001, 41, 856, 722, 6, 1, 20 }, -+ { 0x0, 0x0, 41, 857, 1003, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 858, 734, 34, 1, 21 }, -+ { 0x0, 0x0, 41, 859, 1008, 0, 0, -1 }, -+ { 0x0, 0x0, 41, 860, 1012, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 861, 757, 34, 1, 17 }, -+ { 0x1, 0x1, 41, 862, 761, 34, 1, 17 }, -+ { 0x0, 0x0, 41, 863, 1016, 0, 0, -1 }, -+ { 0x1, 0x1, 41, 864, 773, 34, 1, 21 }, -+ { 0x800001, 0x800001, 41, -1, 1138, 4, 1, 16 }, -+ { 0x1, 0x1, 41, 2202, 1136, 4, 1, 16 }, -+ { 0x1, 0x1, 41, 939, 1141, 4, 1, 22 }, -+ { 0x2, 0x3, 41, -1, 1146, 20, 1, 67 }, -+ { 0x1, 0x1, 41, 2203, 1144, 21, 1, 67 }, -+ { 0x0, 0x0, 42, -1, -1, 0, 1, 80 }, -+ { 0x0, 0x0, 42, -1, -1, 0, 1, 80 }, -+ { 0x0, 0x0, 42, -1, -1, 0, 1, 123 }, -+ { 0x1, 0x1, 44, 1354, 295, 38, 1, 1 }, -+ { 0x1, 0x1, 44, 1355, 297, 38, 1, 1 }, -+ { 0x0, 0x0, 44, -1, 300, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 414, 0, 0, -1 }, -+ { 0x1, 0x1, 44, 1359, 317, 38, 1, 1 }, -+ { 0x1, 0x1, 44, 1360, 319, 38, 1, 1 }, -+ { 0x0, 0x0, 44, -1, 322, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 454, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 324, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 342, 0, 0, -1 }, -+ { 0x1, 0x1, 44, 1366, 343, 38, 1, 1 }, -+ { 0x1, 0x1, 44, 1367, 345, 38, 1, 1 }, -+ { 0x0, 0x0, 44, -1, 348, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 462, 0, 0, -1 }, -+ { 0x1, 0x1, 44, 1371, 365, 38, 1, 1 }, -+ { 0x1, 0x1, 44, 1372, 367, 38, 1, 1 }, -+ { 0x0, 0x0, 44, -1, 370, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 502, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 372, 0, 0, -1 }, -+ { 0x0, 0x0, 44, -1, 390, 0, 0, -1 }, -+ { 0x0, 0x0, 44, 1230, 2263, 0, 0, -1 }, -+ { 0x0, 0x0, 44, 1231, 2271, 0, 1, 54 }, -+ { 0x0, 0x0, 44, 1232, 2938, 0, 1, 54 }, -+ { 0x0, 0x0, 44, 1233, 2339, 0, 0, -1 }, -+ { 0x0, 0x0, 44, 1234, -1, 0, 1, 49 }, -+ { 0x0, 0x0, 44, 1102, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 44, 1103, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 44, 1104, -1, 0, 1, 0 }, -+ { 0x1, 0x1, 45, -1, 1658, 30, 1, 152 }, -+ { 0x1, 0x1, 45, 945, 1657, 30, 1, 151 }, -+ { 0x1, 0x1, 45, -1, 1662, 30, 1, 154 }, -+ { 0x1, 0x1, 45, 946, 1661, 30, 1, 153 }, -+ { 0x1, 0x1, 45, -1, 1666, 30, 1, 154 }, -+ { 0x1, 0x1, 45, 947, 1665, 30, 1, 153 }, -+ { 0x3, 0x3, 46, -1, 1142, 3, 1, 22 }, -+ { 0x1, 0x1, 47, 2223, -1, 30, 1, 137 }, -+ { 0x1, 0x1, 47, 2254, -1, 30, 1, 155 }, -+ { 0x0, 0x0, 49, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 49, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 49, -1, -1, 0, 1, 40 }, -+ { 0x1, 0x1, 56, -1, 1659, 31, 1, 152 }, -+ { 0x1, 0x1, 56, -1, 1663, 31, 1, 154 }, -+ { 0x1, 0x1, 56, -1, 1667, 31, 1, 154 }, -+ { 0x0, 0x0, 56, -1, -1, 0, 1, 94 }, -+ { 0x2, 0x3, 56, -1, -1, 27, 1, 94 }, -+ { 0x1, 0x1, 56, -1, -1, 28, 1, 94 }, -+ { 0x0, 0x0, 65, 14, 574, 0, 1, 6 }, -+ { 0x0, 0x0, 65, 1255, 577, 0, 1, 6 }, -+ { 0x1, 0x1, 65, 1256, 579, 33, 1, 6 }, -+ { 0x1, 0x1, 65, 1257, 581, 34, 1, 6 }, -+ { 0x3, 0x3, 65, 1258, 583, 33, 1, 6 }, -+ { 0x0, 0x0, 65, 1259, 585, 0, 1, 6 }, -+ { 0x1, 0x1, 65, 1260, 587, 33, 1, 6 }, -+ { 0x1, 0x1, 65, 1261, 589, 34, 1, 6 }, -+ { 0x3, 0x3, 65, 1262, 591, 33, 1, 6 }, -+ { 0x1, 0x1, 65, 1263, 593, 6, 1, 7 }, -+ { 0x8000001, 0x8000001, 65, 1264, 595, 6, 1, 7 }, -+ { 0x10000001, 0x10000001, 65, 1265, 597, 6, 1, 7 }, -+ { 0x18000001, 0x18000001, 65, 1266, 599, 6, 1, 7 }, -+ { 0x0, 0x0, 65, 1267, 613, 0, 1, 8 }, -+ { 0x1, 0x1, 65, 1268, 615, 33, 1, 8 }, -+ { 0x1, 0x1, 65, 1269, 617, 34, 1, 8 }, -+ { 0x3, 0x3, 65, 1270, 619, 33, 1, 8 }, -+ { 0x0, 0x0, 65, 1271, 625, 0, 1, 15 }, -+ { 0x1, 0x1, 65, 1272, 627, 33, 1, 15 }, -+ { 0x1, 0x1, 65, 1273, 629, 34, 1, 15 }, -+ { 0x3, 0x3, 65, 1274, 631, 33, 1, 15 }, -+ { 0x0, 0x0, 65, 15, 637, 0, 1, 17 }, -+ { 0x0, 0x0, 65, 1276, 640, 0, 1, 17 }, -+ { 0x1, 0x1, 65, 1277, 642, 33, 1, 17 }, -+ { 0x1, 0x1, 65, 1278, 644, 34, 1, 17 }, -+ { 0x3, 0x3, 65, 1279, 646, 33, 1, 17 }, -+ { 0x0, 0x0, 65, 1280, 648, 0, 1, 17 }, -+ { 0x1, 0x1, 65, 1281, 650, 33, 1, 17 }, -+ { 0x1, 0x1, 65, 1282, 652, 34, 1, 17 }, -+ { 0x3, 0x3, 65, 1283, 654, 33, 1, 17 }, -+ { 0x0, 0x0, 65, 1284, 664, 0, 1, 18 }, -+ { 0x1, 0x1, 65, 1285, 666, 33, 1, 18 }, -+ { 0x1, 0x1, 65, 1286, 668, 34, 1, 18 }, -+ { 0x3, 0x3, 65, 1287, 670, 33, 1, 18 }, -+ { 0x1, 0x1, 65, 1288, 672, 6, 1, 18 }, -+ { 0x8000001, 0x8000001, 65, 1289, 674, 6, 1, 18 }, -+ { 0x10000001, 0x10000001, 65, 1290, 676, 6, 1, 18 }, -+ { 0x18000001, 0x18000001, 65, 1291, 678, 6, 1, 18 }, -+ { 0x0, 0x0, 65, 1292, 688, 0, 1, 19 }, -+ { 0x1, 0x1, 65, 1293, 690, 33, 1, 19 }, -+ { 0x1, 0x1, 65, 1294, 692, 34, 1, 19 }, -+ { 0x3, 0x3, 65, 1295, 694, 33, 1, 19 }, -+ { 0x0, 0x0, 65, 1296, 700, 0, 1, 20 }, -+ { 0x1, 0x1, 65, 1297, 702, 33, 1, 20 }, -+ { 0x1, 0x1, 65, 1298, 704, 34, 1, 20 }, -+ { 0x3, 0x3, 65, 1299, 706, 33, 1, 20 }, -+ { 0x1, 0x1, 65, 1300, 708, 6, 1, 20 }, -+ { 0x8000001, 0x8000001, 65, 1301, 710, 6, 1, 20 }, -+ { 0x10000001, 0x10000001, 65, 1302, 712, 6, 1, 20 }, -+ { 0x18000001, 0x18000001, 65, 1303, 714, 6, 1, 20 }, -+ { 0x0, 0x0, 65, 1304, 724, 0, 1, 21 }, -+ { 0x1, 0x1, 65, 1305, 726, 33, 1, 21 }, -+ { 0x1, 0x1, 65, 1306, 728, 34, 1, 21 }, -+ { 0x3, 0x3, 65, 1307, 730, 33, 1, 21 }, -+ { 0x0, 0x0, 65, 17, 736, 0, 1, 17 }, -+ { 0x0, 0x0, 65, 1309, 739, 0, 1, 17 }, -+ { 0x1, 0x1, 65, 1310, 741, 33, 1, 17 }, -+ { 0x1, 0x1, 65, 1311, 743, 34, 1, 17 }, -+ { 0x3, 0x3, 65, 1312, 745, 33, 1, 17 }, -+ { 0x0, 0x0, 65, 1313, 747, 0, 1, 17 }, -+ { 0x1, 0x1, 65, 1314, 749, 33, 1, 17 }, -+ { 0x1, 0x1, 65, 1315, 751, 34, 1, 17 }, -+ { 0x3, 0x3, 65, 1316, 753, 33, 1, 17 }, -+ { 0x0, 0x0, 65, 1317, 763, 0, 1, 21 }, -+ { 0x1, 0x1, 65, 1318, 765, 33, 1, 21 }, -+ { 0x1, 0x1, 65, 1319, 767, 34, 1, 21 }, -+ { 0x3, 0x3, 65, 1320, 769, 33, 1, 21 }, -+ { 0x3, 0x3, 66, 543, 1521, 33, 1, 129 }, -+ { 0x3, 0x3, 66, 544, 1531, 33, 1, 129 }, -+ { 0x3, 0x3, 66, 545, 1541, 33, 1, 129 }, -+ { 0x0, 0x0, 66, -1, 1546, 0, 1, 140 }, -+ { 0x0, 0x0, 66, -1, 1547, 0, 1, 145 }, -+ { 0x0, 0x0, 66, -1, 1548, 0, 1, 145 }, -+ { 0x0, 0x0, 107, 1028, 2311, 0, 0, -1 }, -+ { 0x0, 0x0, 107, 1029, 2830, 0, 1, 29 }, -+ { 0x0, 0x0, 107, 1030, 2352, 0, 0, -1 }, -+ { 0x0, 0x0, 107, 1031, 2834, 0, 1, 29 }, -+ { 0x0, 0x0, 109, -1, 2313, 0, 0, -1 }, -+ { 0x1, 0x1, 109, -1, 2831, 27, 1, 29 }, -+ { 0x0, 0x0, 109, -1, 2354, 0, 0, -1 }, -+ { 0x1, 0x1, 109, -1, 2835, 27, 1, 29 }, -+ { 0x0, 0x0, 110, 1033, -1, 0, 1, 115 }, -+ { 0x1, 0x1, 111, -1, -1, 27, 1, 115 }, -+ { 0x0, 0x0, 112, 1064, 2860, 0, 1, 1 }, -+ { 0x0, 0x0, 112, 1065, 2863, 0, 1, 1 }, -+ { 0x0, 0x0, 112, 1206, 303, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1207, 307, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1167, 430, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1168, 438, 0, 0, -1 }, -+ { 0x0, 0x0, 112, -1, 446, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1066, 2876, 0, 1, 1 }, -+ { 0x0, 0x0, 112, 1067, 2879, 0, 1, 1 }, -+ { 0x0, 0x0, 112, -1, 328, 0, 0, -1 }, -+ { 0x0, 0x0, 112, -1, 332, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1215, 333, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1216, 337, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1068, 2900, 0, 1, 1 }, -+ { 0x0, 0x0, 112, 1069, 2903, 0, 1, 1 }, -+ { 0x0, 0x0, 112, 1219, 351, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1220, 355, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1180, 478, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1181, 486, 0, 0, -1 }, -+ { 0x0, 0x0, 112, -1, 494, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1373, 2914, 0, 1, 1 }, -+ { 0x0, 0x0, 112, 1374, 2916, 0, 1, 1 }, -+ { 0x0, 0x0, 112, -1, 376, 0, 0, -1 }, -+ { 0x0, 0x0, 112, -1, 380, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1228, 381, 0, 0, -1 }, -+ { 0x0, 0x0, 112, 1229, 385, 0, 0, -1 }, -+ { 0x0, 0x0, 112, -1, 2281, 0, 0, -1 }, -+ { 0x1, 0x9, 112, -1, 2285, 33, 1, 54 }, -+ { 0x1, 0x9, 112, -1, 2947, 33, 1, 54 }, -+ { 0x2, 0x3, 112, 1390, 2348, 27, 1, 49 }, -+ { 0x1, 0x1, 114, 1356, 2861, 37, 1, 1 }, -+ { 0x1, 0x1, 114, 1357, 2864, 37, 1, 1 }, -+ { 0x1, 0x1, 114, 1361, 2877, 37, 1, 1 }, -+ { 0x1, 0x1, 114, 1362, 2880, 37, 1, 1 }, -+ { 0x1, 0x1, 114, 1368, 2901, 37, 1, 1 }, -+ { 0x1, 0x1, 114, 1369, 2904, 37, 1, 1 }, -+ { 0x0, 0x0, 114, -1, 2924, 0, 1, 1 }, -+ { 0x0, 0x0, 114, -1, 2925, 0, 1, 1 }, -+ { 0x0, 0x0, 115, 1105, 2856, 0, 1, 1 }, -+ { 0x0, 0x0, 115, 1106, 2858, 0, 1, 1 }, -+ { 0x0, 0x0, 115, 1165, 301, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1166, 305, 0, 0, -1 }, -+ { 0x0, 0x0, 115, -1, 434, 0, 0, -1 }, -+ { 0x0, 0x0, 115, -1, 442, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1210, 444, 0, 0, -1 }, -+ { 0x0, 0x0, 115, -1, 2874, 0, 1, 1 }, -+ { 0x0, 0x0, 115, -1, 2875, 0, 1, 1 }, -+ { 0x0, 0x0, 115, 1213, 326, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1214, 330, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1174, 335, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1175, 339, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1109, 2896, 0, 1, 1 }, -+ { 0x0, 0x0, 115, 1110, 2898, 0, 1, 1 }, -+ { 0x0, 0x0, 115, 1178, 349, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1179, 353, 0, 0, -1 }, -+ { 0x0, 0x0, 115, -1, 482, 0, 0, -1 }, -+ { 0x0, 0x0, 115, -1, 490, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1223, 492, 0, 0, -1 }, -+ { 0x0, 0x0, 115, -1, 2912, 0, 1, 1 }, -+ { 0x0, 0x0, 115, -1, 2913, 0, 1, 1 }, -+ { 0x0, 0x0, 115, 1226, 374, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1227, 378, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1187, 383, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1188, 387, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1060, 2279, 0, 0, -1 }, -+ { 0x0, 0x0, 115, 1061, 2283, 0, 1, 54 }, -+ { 0x0, 0x0, 115, 1062, 2946, 0, 1, 54 }, -+ { 0x0, 0x0, 115, 1063, 2347, 0, 1, 49 }, -+ { 0x1, 0x1, 115, -1, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 115, -1, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 115, -1, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 116, -1, 2857, 37, 1, 1 }, -+ { 0x1, 0x1, 116, -1, 2859, 37, 1, 1 }, -+ { 0x0, 0x0, 116, -1, 2884, 0, 1, 1 }, -+ { 0x0, 0x0, 116, -1, 2885, 0, 1, 1 }, -+ { 0x1, 0x1, 116, -1, 2897, 37, 1, 1 }, -+ { 0x1, 0x1, 116, -1, 2899, 37, 1, 1 }, -+ { 0x0, 0x0, 116, -1, 2922, 0, 1, 1 }, -+ { 0x0, 0x0, 116, -1, 2923, 0, 1, 1 }, -+ { 0x0, 0x0, 117, 1158, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 117, 1159, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 117, 1160, -1, 0, 1, 0 }, -+ { 0x3, 0x3, 117, 1118, -1, 34, 1, 33 }, -+ { 0x3, 0x3, 117, 1119, -1, 34, 1, 40 }, -+ { 0x1, 0x1, 119, -1, -1, 35, 1, 33 }, -+ { 0x1, 0x1, 119, -1, -1, 35, 1, 40 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 66 }, -+ { 0x1, 0x1, 120, -1, -1, 36, 1, 122 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 40 }, -+ { 0x1, 0x1, 120, -1, -1, 27, 1, 96 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 105 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 73 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 73 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 74 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 40 }, -+ { 0x1, 0x1, 120, -1, -1, 27, 1, 117 }, -+ { 0x1, 0x1, 120, -1, -1, 27, 1, 40 }, -+ { 0x0, 0x0, 120, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 121, -1, 2786, 0, 0, -1 }, -+ { 0x0, 0x0, 121, -1, 2789, 0, 0, -1 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 16 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 16 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 16 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 16 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 22 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 22 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 22 }, -+ { 0x1, 0x1, 122, -1, -1, 35, 1, 22 }, -+ { 0x1, 0x1, 122, -1, -1, 23, 1, 67 }, -+ { 0x1, 0x1, 122, -1, -1, 23, 1, 67 }, -+ { 0x1, 0x1, 122, -1, -1, 23, 1, 67 }, -+ { 0x1, 0x1, 122, -1, -1, 23, 1, 67 }, -+ { 0x1, 0x1, 122, 900, -1, 23, 1, 67 }, -+ { 0x9, 0x9, 122, 901, -1, 20, 1, 67 }, -+ { 0x0, 0x0, 126, 2165, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 126, 2166, -1, 0, 1, 0 }, -+ { 0x1, 0x1, 126, -1, -1, 28, 1, 33 }, -+ { 0x1, 0x1, 126, -1, -1, 27, 1, 33 }, -+ { 0x1, 0x1, 126, -1, -1, 29, 1, 0 }, -+ { 0x1, 0x1, 126, -1, -1, 29, 1, 0 }, -+ { 0x1, 0x1, 126, -1, -1, 29, 1, 0 }, -+ { 0x1, 0x1, 126, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x0, 126, -1, -1, 0, 1, 114 }, -+ { 0x1, 0x1, 126, -1, -1, 29, 1, 0 }, -+ { 0x1, 0x1, 126, -1, -1, 29, 1, 0 }, -+ { 0x1, 0x1, 126, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x0, 126, 1116, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 126, 1244, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 140, 1194, 2852, 0, 1, 1 }, -+ { 0x0, 0x0, 140, 1195, 2854, 0, 1, 1 }, -+ { 0x0, 0x0, 140, 1036, 302, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1037, 422, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1076, 311, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1077, 315, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1078, 443, 0, 0, -1 }, -+ { 0x0, 0x0, 140, -1, 2872, 0, 1, 1 }, -+ { 0x0, 0x0, 140, -1, 2873, 0, 1, 1 }, -+ { 0x0, 0x0, 140, 1081, 325, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1082, 329, 0, 0, -1 }, -+ { 0x0, 0x0, 140, -1, 336, 0, 0, -1 }, -+ { 0x0, 0x0, 140, -1, 340, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1198, 2892, 0, 1, 1 }, -+ { 0x0, 0x0, 140, 1199, 2894, 0, 1, 1 }, -+ { 0x0, 0x0, 140, 1049, 350, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1050, 470, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1089, 359, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1090, 363, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1091, 491, 0, 0, -1 }, -+ { 0x0, 0x0, 140, -1, 2910, 0, 1, 1 }, -+ { 0x0, 0x0, 140, -1, 2911, 0, 1, 1 }, -+ { 0x0, 0x0, 140, 1094, 373, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 1095, 377, 0, 0, -1 }, -+ { 0x0, 0x0, 140, -1, 384, 0, 0, -1 }, -+ { 0x0, 0x0, 140, -1, 388, 0, 0, -1 }, -+ { 0x0, 0x0, 140, 2974, 2267, 0, 0, -1 }, -+ { 0x1, 0x1, 140, 2975, 2275, 33, 1, 54 }, -+ { 0x1, 0x1, 140, 2976, 2940, 33, 1, 54 }, -+ { 0x0, 0x0, 140, 2977, 2341, 0, 0, -1 }, -+ { 0x1, 0x1, 140, 2978, -1, 28, 1, 49 }, -+ { 0x1, 0x1, 141, -1, 2853, 37, 1, 1 }, -+ { 0x1, 0x1, 141, -1, 2855, 37, 1, 1 }, -+ { 0x0, 0x0, 141, -1, 2882, 0, 1, 1 }, -+ { 0x0, 0x0, 141, -1, 2883, 0, 1, 1 }, -+ { 0x1, 0x1, 141, -1, 2893, 37, 1, 1 }, -+ { 0x1, 0x1, 141, -1, 2895, 37, 1, 1 }, -+ { 0x0, 0x0, 141, -1, 2920, 0, 1, 1 }, -+ { 0x0, 0x0, 141, -1, 2921, 0, 1, 1 }, -+ { 0x1, 0x1, 144, 899, 1140, 3, 1, 22 }, -+ { 0x0, 0x0, 145, 2167, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 146, 905, 2846, 0, 1, 1 }, -+ { 0x0, 0x0, 146, 906, 2849, 0, 1, 1 }, -+ { 0x0, 0x0, 146, -1, 304, 0, 0, -1 }, -+ { 0x0, 0x0, 146, -1, 426, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1038, 309, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1039, 313, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1040, 445, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 909, 2866, 0, 1, 1 }, -+ { 0x0, 0x0, 146, 910, 2869, 0, 1, 1 }, -+ { 0x0, 0x0, 146, 1043, 327, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1044, 331, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1083, 334, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1084, 338, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 915, 2886, 0, 1, 1 }, -+ { 0x0, 0x0, 146, 916, 2889, 0, 1, 1 }, -+ { 0x0, 0x0, 146, -1, 352, 0, 0, -1 }, -+ { 0x0, 0x0, 146, -1, 474, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1051, 357, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1052, 361, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1053, 493, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 919, 2906, 0, 1, 1 }, -+ { 0x0, 0x0, 146, 920, 2908, 0, 1, 1 }, -+ { 0x0, 0x0, 146, 1056, 375, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1057, 379, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1096, 382, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1097, 386, 0, 0, -1 }, -+ { 0x0, 0x0, 146, 1189, 2265, 0, 0, -1 }, -+ { 0x1, 0x1, 146, 1190, 2273, 36, 1, 54 }, -+ { 0x1, 0x1, 146, 1191, 2939, 36, 1, 54 }, -+ { 0x0, 0x0, 146, 1192, 2340, 0, 0, -1 }, -+ { 0x1, 0x1, 146, 1193, -1, 27, 1, 49 }, -+ { 0x1, 0x1, 147, -1, 2848, 37, 1, 1 }, -+ { 0x1, 0x1, 147, -1, 2851, 37, 1, 1 }, -+ { 0x1, 0x1, 147, -1, 2868, 37, 1, 1 }, -+ { 0x1, 0x1, 147, -1, 2871, 37, 1, 1 }, -+ { 0x1, 0x1, 147, -1, 2888, 37, 1, 1 }, -+ { 0x1, 0x1, 147, -1, 2891, 37, 1, 1 }, -+ { 0x0, 0x0, 147, -1, 2918, 0, 1, 1 }, -+ { 0x0, 0x0, 147, -1, 2919, 0, 1, 1 }, -+ { 0x0, 0x0, 148, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 148, 1117, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 149, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 149, -1, -1, 0, 1, 66 }, -+ { 0x0, 0x0, 149, -1, 2926, 0, 1, 63 }, -+ { 0x0, 0x0, 149, -1, 2927, 0, 1, 63 }, -+ { 0x0, 0x0, 149, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 149, -1, -1, 0, 1, 81 }, -+ { 0x0, 0x0, 149, -1, -1, 0, 1, 81 }, -+ { 0x0, 0x0, 149, -1, -1, 0, 1, 85 }, -+ { 0x0, 0x0, 149, -1, -1, 0, 1, 40 }, -+ { 0x1, 0x1, 150, -1, 575, 12, 1, 6 }, -+ { 0x1, 0x1, 150, -1, 578, 12, 1, 6 }, -+ { 0x200001, 0x200001, 150, -1, 580, 12, 1, 6 }, -+ { 0x400001, 0x400001, 150, -1, 582, 12, 1, 6 }, -+ { 0x600001, 0x600001, 150, -1, 584, 12, 1, 6 }, -+ { 0x1, 0x1, 150, -1, 586, 12, 1, 6 }, -+ { 0x200001, 0x200001, 150, -1, 588, 12, 1, 6 }, -+ { 0x400001, 0x400001, 150, -1, 590, 12, 1, 6 }, -+ { 0x600001, 0x600001, 150, -1, 592, 12, 1, 6 }, -+ { 0x41, 0x41, 150, -1, 594, 6, 1, 7 }, -+ { 0x8000041, 0x8000041, 150, -1, 596, 6, 1, 7 }, -+ { 0x10000041, 0x10000041, 150, -1, 598, 6, 1, 7 }, -+ { 0x18000041, 0x18000041, 150, -1, 600, 6, 1, 7 }, -+ { 0x1, 0x1, 150, -1, 614, 12, 1, 8 }, -+ { 0x200001, 0x200001, 150, -1, 616, 12, 1, 8 }, -+ { 0x400001, 0x400001, 150, -1, 618, 12, 1, 8 }, -+ { 0x600001, 0x600001, 150, -1, 620, 12, 1, 8 }, -+ { 0x1, 0x1, 150, -1, 626, 12, 1, 15 }, -+ { 0x200001, 0x200001, 150, -1, 628, 12, 1, 15 }, -+ { 0x400001, 0x400001, 150, -1, 630, 12, 1, 15 }, -+ { 0x600001, 0x600001, 150, -1, 632, 12, 1, 15 }, -+ { 0x1, 0x1, 150, -1, 638, 12, 1, 17 }, -+ { 0x1, 0x1, 150, -1, 641, 12, 1, 17 }, -+ { 0x200001, 0x200001, 150, -1, 643, 12, 1, 17 }, -+ { 0x400001, 0x400001, 150, -1, 645, 12, 1, 17 }, -+ { 0x600001, 0x600001, 150, -1, 647, 12, 1, 17 }, -+ { 0x1, 0x1, 150, -1, 649, 12, 1, 17 }, -+ { 0x200001, 0x200001, 150, -1, 651, 12, 1, 17 }, -+ { 0x400001, 0x400001, 150, -1, 653, 12, 1, 17 }, -+ { 0x600001, 0x600001, 150, -1, 655, 12, 1, 17 }, -+ { 0x1, 0x1, 150, -1, 665, 12, 1, 18 }, -+ { 0x200001, 0x200001, 150, -1, 667, 12, 1, 18 }, -+ { 0x400001, 0x400001, 150, -1, 669, 12, 1, 18 }, -+ { 0x600001, 0x600001, 150, -1, 671, 12, 1, 18 }, -+ { 0x41, 0x41, 150, -1, 673, 6, 1, 18 }, -+ { 0x8000041, 0x8000041, 150, -1, 675, 6, 1, 18 }, -+ { 0x10000041, 0x10000041, 150, -1, 677, 6, 1, 18 }, -+ { 0x18000041, 0x18000041, 150, -1, 679, 6, 1, 18 }, -+ { 0x1, 0x1, 150, -1, 689, 12, 1, 19 }, -+ { 0x200001, 0x200001, 150, -1, 691, 12, 1, 19 }, -+ { 0x400001, 0x400001, 150, -1, 693, 12, 1, 19 }, -+ { 0x600001, 0x600001, 150, -1, 695, 12, 1, 19 }, -+ { 0x1, 0x1, 150, -1, 701, 12, 1, 20 }, -+ { 0x200001, 0x200001, 150, -1, 703, 12, 1, 20 }, -+ { 0x400001, 0x400001, 150, -1, 705, 12, 1, 20 }, -+ { 0x600001, 0x600001, 150, -1, 707, 12, 1, 20 }, -+ { 0x41, 0x41, 150, -1, 709, 6, 1, 20 }, -+ { 0x8000041, 0x8000041, 150, -1, 711, 6, 1, 20 }, -+ { 0x10000041, 0x10000041, 150, -1, 713, 6, 1, 20 }, -+ { 0x18000041, 0x18000041, 150, -1, 715, 6, 1, 20 }, -+ { 0x1, 0x1, 150, -1, 725, 12, 1, 21 }, -+ { 0x200001, 0x200001, 150, -1, 727, 12, 1, 21 }, -+ { 0x400001, 0x400001, 150, -1, 729, 12, 1, 21 }, -+ { 0x600001, 0x600001, 150, -1, 731, 12, 1, 21 }, -+ { 0x1, 0x1, 150, -1, 737, 12, 1, 17 }, -+ { 0x1, 0x1, 150, -1, 740, 12, 1, 17 }, -+ { 0x200001, 0x200001, 150, -1, 742, 12, 1, 17 }, -+ { 0x400001, 0x400001, 150, -1, 744, 12, 1, 17 }, -+ { 0x600001, 0x600001, 150, -1, 746, 12, 1, 17 }, -+ { 0x1, 0x1, 150, -1, 748, 12, 1, 17 }, -+ { 0x200001, 0x200001, 150, -1, 750, 12, 1, 17 }, -+ { 0x400001, 0x400001, 150, -1, 752, 12, 1, 17 }, -+ { 0x600001, 0x600001, 150, -1, 754, 12, 1, 17 }, -+ { 0x1, 0x1, 150, -1, 764, 12, 1, 21 }, -+ { 0x200001, 0x200001, 150, -1, 766, 12, 1, 21 }, -+ { 0x400001, 0x400001, 150, -1, 768, 12, 1, 21 }, -+ { 0x600001, 0x600001, 150, -1, 770, 12, 1, 21 }, -+ { 0x0, 0x0, 155, -1, -1, 0, 1, 124 }, -+ { 0x0, 0x0, 159, 775, -1, 0, 1, 75 }, -+ { 0x0, 0x0, 159, 776, -1, 0, 1, 75 }, -+ { 0x9, 0x9, 159, -1, 1438, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1447, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1456, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1469, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1478, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1487, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1496, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1505, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1514, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1524, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1534, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1544, 32, 1, 130 }, -+ { 0x9, 0x9, 159, -1, 1553, 32, 1, 144 }, -+ { 0x9, 0x9, 159, -1, 1559, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1565, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1571, 32, 1, 144 }, -+ { 0x9, 0x9, 159, -1, 1577, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1583, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1589, 32, 1, 144 }, -+ { 0x9, 0x9, 159, -1, 1595, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1601, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1607, 32, 1, 144 }, -+ { 0x9, 0x9, 159, -1, 1613, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1619, 32, 1, 144 }, -+ { 0x9, 0x9, 159, -1, 1625, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1631, 32, 1, 144 }, -+ { 0x9, 0x9, 159, -1, 1637, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1643, 32, 1, 144 }, -+ { 0x9, 0x9, 159, -1, 1649, 32, 1, 149 }, -+ { 0x9, 0x9, 159, -1, 1655, 32, 1, 149 }, -+ { 0x0, 0x0, 160, 1235, 296, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 1236, 412, 0, 0, -1 }, -+ { 0x1, 0x1, 160, -1, 2862, 38, 1, 1 }, -+ { 0x1, 0x1, 160, 907, 2865, 38, 1, 1 }, -+ { 0x0, 0x0, 160, 908, 413, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 1237, 318, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 1238, 452, 0, 0, -1 }, -+ { 0x1, 0x1, 160, -1, 2878, 38, 1, 1 }, -+ { 0x1, 0x1, 160, 911, 2881, 38, 1, 1 }, -+ { 0x0, 0x0, 160, 912, 453, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 913, 323, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 914, 341, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 1239, 344, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 1240, 460, 0, 0, -1 }, -+ { 0x1, 0x1, 160, -1, 2902, 38, 1, 1 }, -+ { 0x1, 0x1, 160, 917, 2905, 38, 1, 1 }, -+ { 0x0, 0x0, 160, 918, 461, 0, 0, -1 }, -+ { 0x0, 0x0, 160, -1, 366, 0, 0, -1 }, -+ { 0x0, 0x0, 160, -1, 500, 0, 0, -1 }, -+ { 0x1, 0x1, 160, -1, 2915, 38, 1, 1 }, -+ { 0x1, 0x1, 160, 921, 2917, 38, 1, 1 }, -+ { 0x0, 0x0, 160, 922, 501, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 923, 371, 0, 0, -1 }, -+ { 0x0, 0x0, 160, 924, 389, 0, 0, -1 }, -+ { 0x0, 0x0, 161, 1397, 2287, 0, 0, -1 }, -+ { 0x0, 0x0, 161, 1398, 2295, 0, 1, 54 }, -+ { 0x0, 0x0, 161, 1399, 2956, 0, 1, 54 }, -+ { 0x0, 0x0, 161, 1400, 2343, 0, 0, -1 }, -+ { 0x1, 0x1, 161, 1401, -1, 29, 1, 49 }, -+ { 0x0, 0x0, 162, -1, 2305, 0, 0, -1 }, -+ { 0x1, 0x9, 162, -1, 2309, 33, 1, 54 }, -+ { 0x1, 0x9, 162, -1, 2965, 33, 1, 54 }, -+ { 0x6, 0x7, 162, -1, 2350, 27, 1, 49 }, -+ { 0x0, 0x0, 163, 1383, 2303, 0, 0, -1 }, -+ { 0x0, 0x0, 163, 1384, 2307, 0, 1, 54 }, -+ { 0x0, 0x0, 163, 1385, 2964, 0, 1, 54 }, -+ { 0x1, 0x1, 163, 1386, 2349, 29, 1, 49 }, -+ { 0x1, 0x1, 164, 1404, -1, 27, 1, 33 }, -+ { 0x0, 0x0, 165, 2159, 2291, 0, 0, -1 }, -+ { 0x1, 0x1, 165, 2160, 2299, 33, 1, 54 }, -+ { 0x1, 0x1, 165, 2161, 2958, 33, 1, 54 }, -+ { 0x0, 0x0, 165, 2162, 2345, 0, 0, -1 }, -+ { 0x3, 0x3, 165, 2163, -1, 28, 1, 49 }, -+ { 0x0, 0x0, 166, 1392, 2289, 0, 0, -1 }, -+ { 0x1, 0x1, 166, 1393, 2297, 36, 1, 54 }, -+ { 0x1, 0x1, 166, 1394, 2957, 36, 1, 54 }, -+ { 0x0, 0x0, 166, 1395, 2344, 0, 0, -1 }, -+ { 0x5, 0x5, 166, 1396, -1, 27, 1, 49 }, -+ { 0x0, 0x0, 167, -1, 2928, 0, 1, 63 }, -+ { 0x0, 0x0, 167, -1, 2929, 0, 1, 63 }, -+ { 0x1, 0x1, 169, -1, -1, 28, 1, 33 }, -+ { 0x1, 0x1, 170, 2745, -1, 27, 1, 33 }, -+ { 0x1, 0x1, 170, 2746, -1, 27, 1, 33 }, -+ { 0x1, 0x1, 171, 1685, -1, 28, 1, 135 }, -+ { 0x1, 0x1, 171, 1686, -1, 28, 1, 135 }, -+ { 0x1, 0x1, 171, 1687, -1, 28, 1, 135 }, -+ { 0x1, 0x1, 171, 1688, -1, 28, 1, 135 }, -+ { 0x1, 0x1, 171, 1689, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1690, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1691, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1692, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1693, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1694, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1695, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1696, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1697, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1698, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1699, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1700, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1701, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1702, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1703, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1704, -1, 28, 1, 134 }, -+ { 0x1, 0x1, 171, 1705, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 171, 1706, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 171, 1707, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 171, 1708, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 171, 1709, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1710, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1711, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1712, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1713, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1714, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1715, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1716, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1717, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1718, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1719, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1720, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1721, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1722, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1723, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1724, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1725, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1726, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1727, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1728, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1729, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1730, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1731, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1732, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1733, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1734, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1735, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1736, -1, 28, 1, 125 }, -+ { 0x1, 0x1, 171, 1737, -1, 28, 1, 125 }, -+ { 0x1, 0x1, 171, 1738, -1, 28, 1, 125 }, -+ { 0x1, 0x1, 171, 1739, -1, 28, 1, 125 }, -+ { 0x1, 0x1, 171, 1740, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1741, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1742, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1743, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1744, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1745, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1746, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1747, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1748, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1749, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1750, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1751, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1752, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1753, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1754, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1755, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1756, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1757, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1758, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1759, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1760, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1761, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1762, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1763, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1764, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1765, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1766, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1767, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1768, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1769, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1770, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1771, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1772, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1773, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1774, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1775, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1776, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1777, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1778, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1779, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1780, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1781, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1782, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1783, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1784, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1785, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1786, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1787, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1788, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1789, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1790, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1791, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1792, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1793, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1794, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1795, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1796, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1797, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1798, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1799, -1, 28, 1, 129 }, -+ { 0x1, 0x1, 171, 1800, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1801, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1802, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1803, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1804, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1805, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1806, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1807, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1808, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1809, -1, 28, 1, 129 }, -+ { 0x1, 0x1, 171, 1810, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1811, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1812, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1813, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1814, -1, 28, 1, 126 }, -+ { 0x1, 0x1, 171, 1815, -1, 28, 1, 127 }, -+ { 0x1, 0x1, 171, 1816, -1, 28, 1, 128 }, -+ { 0x1, 0x1, 171, 1817, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1818, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1819, -1, 28, 1, 129 }, -+ { 0x1, 0x1, 171, 1820, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1821, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1822, -1, 28, 1, 130 }, -+ { 0x1, 0x1, 171, 1823, -1, 28, 1, 124 }, -+ { 0x1, 0x1, 171, 1824, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1825, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1826, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1827, -1, 28, 1, 141 }, -+ { 0x1, 0x1, 171, 1828, -1, 28, 1, 142 }, -+ { 0x1, 0x1, 171, 1829, -1, 28, 1, 143 }, -+ { 0x1, 0x1, 171, 1830, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1831, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1832, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1833, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1834, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1835, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1836, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1837, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1838, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1839, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1840, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1841, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1842, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1843, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1844, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1845, -1, 28, 1, 141 }, -+ { 0x1, 0x1, 171, 1846, -1, 28, 1, 142 }, -+ { 0x1, 0x1, 171, 1847, -1, 28, 1, 143 }, -+ { 0x1, 0x1, 171, 1848, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1849, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1850, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1851, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1852, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1853, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1854, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1855, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1856, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1857, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1858, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1859, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1860, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1861, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1862, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1863, -1, 28, 1, 141 }, -+ { 0x1, 0x1, 171, 1864, -1, 28, 1, 142 }, -+ { 0x1, 0x1, 171, 1865, -1, 28, 1, 143 }, -+ { 0x1, 0x1, 171, 1866, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1867, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1868, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1869, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1870, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1871, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1872, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1873, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1874, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1875, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1876, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1877, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1878, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1879, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1880, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1881, -1, 28, 1, 141 }, -+ { 0x1, 0x1, 171, 1882, -1, 28, 1, 142 }, -+ { 0x1, 0x1, 171, 1883, -1, 28, 1, 143 }, -+ { 0x1, 0x1, 171, 1884, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1885, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1886, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1887, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1888, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1889, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1890, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1891, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1892, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1893, -1, 28, 1, 141 }, -+ { 0x1, 0x1, 171, 1894, -1, 28, 1, 142 }, -+ { 0x1, 0x1, 171, 1895, -1, 28, 1, 143 }, -+ { 0x1, 0x1, 171, 1896, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1897, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1898, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1899, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1900, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1901, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1902, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1903, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1904, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1905, -1, 28, 1, 141 }, -+ { 0x1, 0x1, 171, 1906, -1, 28, 1, 142 }, -+ { 0x1, 0x1, 171, 1907, -1, 28, 1, 143 }, -+ { 0x1, 0x1, 171, 1908, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1909, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1910, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1911, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1912, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1913, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1914, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1915, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1916, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1917, -1, 28, 1, 141 }, -+ { 0x1, 0x1, 171, 1918, -1, 28, 1, 142 }, -+ { 0x1, 0x1, 171, 1919, -1, 28, 1, 143 }, -+ { 0x1, 0x1, 171, 1920, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1921, -1, 28, 1, 144 }, -+ { 0x1, 0x1, 171, 1922, -1, 28, 1, 140 }, -+ { 0x1, 0x1, 171, 1923, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1924, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1925, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1926, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1927, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1928, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1929, -1, 28, 1, 146 }, -+ { 0x1, 0x1, 171, 1930, -1, 28, 1, 147 }, -+ { 0x1, 0x1, 171, 1931, -1, 28, 1, 148 }, -+ { 0x1, 0x1, 171, 1932, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1933, -1, 28, 1, 149 }, -+ { 0x1, 0x1, 171, 1934, -1, 28, 1, 145 }, -+ { 0x1, 0x1, 171, 1673, -1, 28, 1, 151 }, -+ { 0x1, 0x1, 171, 1674, -1, 28, 1, 152 }, -+ { 0x1, 0x1, 171, 1675, -1, 28, 1, 152 }, -+ { 0x1, 0x1, 171, 1676, -1, 28, 1, 151 }, -+ { 0x1, 0x1, 171, 1677, -1, 28, 1, 153 }, -+ { 0x1, 0x1, 171, 1678, -1, 28, 1, 154 }, -+ { 0x1, 0x1, 171, 1679, -1, 28, 1, 154 }, -+ { 0x1, 0x1, 171, 1680, -1, 28, 1, 153 }, -+ { 0x1, 0x1, 171, 1681, -1, 28, 1, 153 }, -+ { 0x1, 0x1, 171, 1682, -1, 28, 1, 154 }, -+ { 0x1, 0x1, 171, 1683, -1, 28, 1, 154 }, -+ { 0x1, 0x1, 171, 1684, -1, 28, 1, 153 }, -+ { 0x1, 0x1, 171, 1979, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 171, 1980, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 171, 1981, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 171, 1982, -1, 28, 1, 136 }, -+ { 0x1, 0x1, 172, 1935, -1, 29, 1, 151 }, -+ { 0x1, 0x1, 172, 1936, -1, 29, 1, 152 }, -+ { 0x1, 0x1, 172, 1937, -1, 29, 1, 152 }, -+ { 0x1, 0x1, 172, 1938, -1, 29, 1, 151 }, -+ { 0x1, 0x1, 172, 1939, -1, 29, 1, 153 }, -+ { 0x1, 0x1, 172, 1940, -1, 29, 1, 154 }, -+ { 0x1, 0x1, 172, 1941, -1, 29, 1, 154 }, -+ { 0x1, 0x1, 172, 1942, -1, 29, 1, 153 }, -+ { 0x1, 0x1, 172, 1943, -1, 29, 1, 153 }, -+ { 0x1, 0x1, 172, 1944, -1, 29, 1, 154 }, -+ { 0x1, 0x1, 172, 1945, -1, 29, 1, 154 }, -+ { 0x1, 0x1, 172, 1946, -1, 29, 1, 153 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 135 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 135 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 135 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 135 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 134 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 269, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2224, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 271, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2225, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 273, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2226, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 125 }, -+ { 0x3, 0x3, 173, 275, -1, 28, 1, 125 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 125 }, -+ { 0x3, 0x3, 173, 276, -1, 28, 1, 125 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 277, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2227, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 279, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2228, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 281, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2229, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 283, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2230, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 285, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2231, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 287, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2232, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 129 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 289, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2233, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 129 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 291, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2234, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 126 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 127 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 128 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 129 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 293, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 130 }, -+ { 0x3, 0x3, 173, 2235, -1, 28, 1, 124 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 141 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 142 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 143 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, 2236, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2237, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2238, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 141 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 142 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 143 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, 2239, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2240, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2241, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 141 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 142 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 143 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, 2242, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2243, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2244, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 141 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 142 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 143 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, 2245, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2246, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 141 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 142 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 143 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, 2247, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2248, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 141 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 142 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 143 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, 2249, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2250, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 141 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 142 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 143 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 144 }, -+ { 0x3, 0x3, 173, 2251, -1, 28, 1, 140 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2252, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 146 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 147 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 148 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 149 }, -+ { 0x3, 0x3, 173, 2253, -1, 28, 1, 145 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 151 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 152 }, -+ { 0x3, 0x3, 173, 933, -1, 28, 1, 152 }, -+ { 0x3, 0x3, 173, 934, -1, 28, 1, 151 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 153 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 154 }, -+ { 0x3, 0x3, 173, 935, -1, 28, 1, 154 }, -+ { 0x3, 0x3, 173, 936, -1, 28, 1, 153 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 153 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 154 }, -+ { 0x3, 0x3, 173, 937, -1, 28, 1, 154 }, -+ { 0x3, 0x3, 173, 938, -1, 28, 1, 153 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, 2190, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, 2191, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 132 }, -+ { 0x3, 0x3, 173, 2192, -1, 28, 1, 132 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 132 }, -+ { 0x3, 0x3, 173, 2193, -1, 28, 1, 132 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, 2194, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, 2195, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, 2196, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, 2197, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 133 }, -+ { 0x3, 0x3, 173, 2198, -1, 28, 1, 131 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 139 }, -+ { 0x3, 0x3, 173, 2199, -1, 28, 1, 138 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 150 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 156 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 150 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 156 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 150 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 156 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 150 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 156 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 150 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 156 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x3, 0x3, 173, -1, -1, 28, 1, 136 }, -+ { 0x0, 0x0, 174, -1, 392, 0, 0, -1 }, -+ { 0x0, 0x0, 174, -1, 394, 0, 0, -1 }, -+ { 0x0, 0x0, 174, 3004, 2968, 0, 1, 1 }, -+ { 0x0, 0x0, 174, 3005, 2969, 0, 1, 1 }, -+ { 0x0, 0x0, 174, -1, 400, 0, 0, -1 }, -+ { 0x0, 0x0, 174, -1, 402, 0, 0, -1 }, -+ { 0x0, 0x0, 174, 3008, 2972, 0, 1, 1 }, -+ { 0x0, 0x0, 174, 3009, 2973, 0, 1, 1 }, -+ { 0x11, 0x31, 175, 2847, 407, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 408, 12, 1, 4 }, -+ { 0x11, 0x31, 175, 2047, 409, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 411, 12, 1, 4 }, -+ { 0x1, 0x1, 175, -1, 415, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 416, 12, 1, 4 }, -+ { 0x11, 0x11, 175, -1, 417, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 418, 12, 1, 4 }, -+ { 0x1, 0x1, 175, 2053, 419, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 421, 12, 1, 4 }, -+ { 0x11, 0x11, 175, 2055, 423, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 425, 12, 1, 4 }, -+ { 0x1, 0x1, 175, 2057, 427, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 429, 12, 1, 4 }, -+ { 0x11, 0x11, 175, 2059, 431, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 433, 12, 1, 4 }, -+ { 0x1, 0x1, 175, 2061, 435, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 437, 12, 1, 4 }, -+ { 0x11, 0x11, 175, 2063, 439, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 441, 12, 1, 4 }, -+ { 0x11, 0x31, 175, 2867, 447, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 448, 12, 1, 4 }, -+ { 0x11, 0x31, 175, 2069, 449, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 451, 12, 1, 4 }, -+ { 0x11, 0x31, 175, 2887, 455, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 456, 12, 1, 4 }, -+ { 0x11, 0x31, 175, 2095, 457, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 459, 12, 1, 4 }, -+ { 0x1, 0x1, 175, -1, 463, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 464, 12, 1, 4 }, -+ { 0x11, 0x11, 175, -1, 465, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 466, 12, 1, 4 }, -+ { 0x1, 0x1, 175, 2101, 467, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 469, 12, 1, 4 }, -+ { 0x11, 0x11, 175, 2103, 471, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 473, 12, 1, 4 }, -+ { 0x1, 0x1, 175, 2105, 475, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 477, 12, 1, 4 }, -+ { 0x11, 0x11, 175, 2107, 479, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 481, 12, 1, 4 }, -+ { 0x1, 0x1, 175, 2109, 483, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 175, -1, 485, 12, 1, 4 }, -+ { 0x11, 0x11, 175, 2111, 487, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 489, 12, 1, 4 }, -+ { 0x11, 0x31, 175, 2907, 495, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 496, 12, 1, 4 }, -+ { 0x11, 0x31, 175, 2117, 497, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 175, -1, 499, 12, 1, 4 }, -+ { 0x1, 0x1, 175, -1, 503, 33, 1, 4 }, -+ { 0x200001, 0x200001, 175, -1, 504, 12, 1, 4 }, -+ { 0x1, 0x1, 175, -1, 505, 33, 1, 4 }, -+ { 0x200001, 0x200001, 175, -1, 506, 12, 1, 4 }, -+ { 0x1, 0x1, 175, -1, 511, 33, 1, 4 }, -+ { 0x200001, 0x200001, 175, -1, 512, 12, 1, 4 }, -+ { 0x1, 0x1, 175, -1, 513, 33, 1, 4 }, -+ { 0x200001, 0x200001, 175, -1, 514, 12, 1, 4 }, -+ { 0x2200001, 0x6200001, 176, 2850, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 1994, -1, 33, 1, 4 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x4200001, 0x4200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x1, 0x1, 176, 2000, -1, 37, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2002, -1, 33, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x1, 0x1, 176, 2004, -1, 37, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2006, -1, 33, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x1, 0x1, 176, 2008, -1, 37, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2010, -1, 33, 1, 4 }, -+ { 0x1, 0x1, 176, -1, -1, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, -1, -1, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x2200001, 0x6200001, 176, 2870, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2014, -1, 33, 1, 4 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x4200001, 0x4200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x2200001, 0x6200001, 176, 2890, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2018, -1, 33, 1, 4 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x4200001, 0x4200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x1, 0x1, 176, 2024, -1, 37, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2026, -1, 33, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x1, 0x1, 176, 2028, -1, 37, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2030, -1, 33, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x1, 0x1, 176, 2032, -1, 37, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2034, -1, 33, 1, 4 }, -+ { 0x1, 0x1, 176, -1, -1, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, -1, -1, 33, 1, 4 }, -+ { 0x2200001, 0x2200001, 176, -1, -1, 12, 1, 4 }, -+ { 0x2200001, 0x6200001, 176, 2909, -1, 12, 1, 4 }, -+ { 0x11, 0x11, 176, 2038, -1, 33, 1, 4 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x4200001, 0x4200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 37, 1, 4 }, -+ { 0x2000001, 0x2000001, 176, -1, -1, 12, 1, 4 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 33, 1, 5 }, -+ { 0x200001, 0x200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x0, 0x0, 176, -1, -1, 0, 1, 5 }, -+ { 0x1, 0x1, 176, -1, -1, 12, 1, 5 }, -+ { 0x9, 0x9, 176, -1, -1, 33, 1, 5 }, -+ { 0x1, 0x1, 176, 395, -1, 33, 1, 4 }, -+ { 0x1200001, 0x1200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x200001, 0x200001, 176, 396, -1, 12, 1, 4 }, -+ { 0x9, 0x9, 176, -1, -1, 33, 1, 5 }, -+ { 0x1, 0x1, 176, 397, -1, 33, 1, 4 }, -+ { 0x1200001, 0x1200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x200001, 0x200001, 176, 398, -1, 12, 1, 4 }, -+ { 0x9, 0x9, 176, -1, -1, 33, 1, 5 }, -+ { 0x1, 0x1, 176, 403, -1, 33, 1, 4 }, -+ { 0x1200001, 0x1200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x200001, 0x200001, 176, 404, -1, 12, 1, 4 }, -+ { 0x9, 0x9, 176, -1, -1, 33, 1, 5 }, -+ { 0x1, 0x1, 176, 405, -1, 33, 1, 4 }, -+ { 0x1200001, 0x1200001, 176, -1, -1, 12, 1, 5 }, -+ { 0x200001, 0x200001, 176, 406, -1, 12, 1, 4 }, -+ { 0x0, 0x0, 177, -1, 2293, 0, 0, -1 }, -+ { 0x9, 0x9, 177, -1, 2301, 33, 1, 49 }, -+ { 0x9, 0x9, 177, -1, 2959, 33, 1, 49 }, -+ { 0x0, 0x0, 177, -1, 2346, 0, 0, -1 }, -+ { 0x7, 0x7, 177, -1, -1, 27, 1, 49 }, -+ { 0x1, 0x1, 197, -1, -1, 27, 1, 10 }, -+ { 0x1, 0x1, 211, -1, -1, 29, 1, 0 }, -+ { 0x1, 0x1, 211, -1, -1, 29, 1, 0 }, -+ { 0x2, 0x3, 211, 1151, -1, 27, 1, 33 }, -+ { 0x0, 0x0, 211, 1152, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 211, 1153, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 211, 1154, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 211, 1155, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 211, 1156, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 211, 2988, -1, 0, 1, 93 }, -+ { 0x0, 0x0, 211, 2989, -1, 0, 1, 93 }, -+ { 0x0, 0x0, 211, 2990, 949, 0, 0, -1 }, -+ { 0x1, 0x1, 212, -1, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 212, -1, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 213, -1, 1408, 32, 1, 135 }, -+ { 0x1, 0x1, 213, -1, 1410, 32, 1, 135 }, -+ { 0x1, 0x1, 213, -1, 1412, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1414, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1416, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1418, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1420, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1422, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1424, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1426, 32, 1, 134 }, -+ { 0x1, 0x1, 213, -1, 1428, 32, 1, 136 }, -+ { 0x1, 0x1, 213, -1, 1430, 32, 1, 136 }, -+ { 0x1, 0x1, 213, -1, 1947, 32, 1, 131 }, -+ { 0x1, 0x1, 213, -1, 1949, 32, 1, 138 }, -+ { 0x1, 0x1, 213, -1, 1951, 32, 1, 132 }, -+ { 0x1, 0x1, 213, -1, 1953, 32, 1, 132 }, -+ { 0x1, 0x1, 213, -1, 1955, 32, 1, 131 }, -+ { 0x1, 0x1, 213, -1, 1957, 32, 1, 138 }, -+ { 0x1, 0x1, 213, -1, 1959, 32, 1, 131 }, -+ { 0x1, 0x1, 213, -1, 1961, 32, 1, 138 }, -+ { 0x1, 0x1, 213, 2749, 1963, 32, 1, 131 }, -+ { 0x1, 0x1, 213, 2750, 1966, 32, 1, 138 }, -+ { 0x0, 0x0, 214, -1, 2791, 0, 0, -1 }, -+ { 0x0, 0x0, 214, -1, 2792, 0, 0, -1 }, -+ { 0x0, 0x0, 214, -1, 2817, 0, 0, -1 }, -+ { 0x5, 0x5, 214, -1, 2820, 20, 1, 67 }, -+ { 0x0, 0x0, 218, 2175, 948, 0, 0, -1 }, -+ { 0x0, 0x0, 219, -1, 1121, 0, 0, -1 }, -+ { 0x0, 0x0, 219, -1, 1246, 0, 0, -1 }, -+ { 0x0, 0x0, 219, -1, -1, 0, 1, 121 }, -+ { 0x0, 0x0, 219, -1, -1, 0, 1, 66 }, -+ { 0x1, 0x1, 219, 815, 2255, 36, 1, 65 }, -+ { 0x1, 0x1, 219, 816, 2314, 36, 1, 65 }, -+ { 0x0, 0x0, 219, 817, 2317, 0, 0, -1 }, -+ { 0x1, 0x1, 219, 818, -1, 36, 1, 65 }, -+ { 0x0, 0x0, 219, 1405, -1, 0, 1, 33 }, -+ { 0x1, 0x1, 219, 819, 2322, 36, 1, 65 }, -+ { 0x0, 0x0, 219, 820, 2325, 0, 0, -1 }, -+ { 0x1, 0x1, 219, 821, -1, 36, 1, 65 }, -+ { 0x0, 0x0, 219, 822, 2328, 0, 0, -1 }, -+ { 0x1, 0x1, 219, 823, -1, 36, 1, 65 }, -+ { 0x1, 0x1, 219, 824, 2331, 36, 1, 65 }, -+ { 0x1, 0x1, 219, 825, 2334, 36, 1, 65 }, -+ { 0x0, 0x0, 219, 1406, -1, 0, 1, 33 }, -+ { 0x1, 0x1, 219, 826, 2367, 36, 1, 65 }, -+ { 0x1, 0x1, 219, 827, -1, 31, 1, 137 }, -+ { 0x1, 0x1, 219, 226, 1431, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 227, 1440, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 228, 1449, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 229, 1462, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 230, 1471, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 231, 1480, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 232, 1489, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 233, 1498, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 234, 1507, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 235, 1516, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 236, 1526, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 237, 1536, 32, 1, 126 }, -+ { 0x1, 0x1, 219, 238, 1549, 32, 1, 141 }, -+ { 0x1, 0x1, 219, 239, 1555, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 240, 1561, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 241, 1567, 32, 1, 141 }, -+ { 0x1, 0x1, 219, 242, 1573, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 243, 1579, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 244, 1585, 32, 1, 141 }, -+ { 0x1, 0x1, 219, 245, 1591, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 246, 1597, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 247, 1603, 32, 1, 141 }, -+ { 0x1, 0x1, 219, 248, 1609, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 249, 1615, 32, 1, 141 }, -+ { 0x1, 0x1, 219, 250, 1621, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 251, 1627, 32, 1, 141 }, -+ { 0x1, 0x1, 219, 252, 1633, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 253, 1639, 32, 1, 141 }, -+ { 0x1, 0x1, 219, 254, 1645, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 255, 1651, 32, 1, 146 }, -+ { 0x1, 0x1, 219, 831, -1, 31, 1, 155 }, -+ { 0x0, 0x0, 220, 2370, -1, 0, 1, 65 }, -+ { 0x0, 0x0, 220, 2371, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 25, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2373, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2374, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2375, -1, 0, 1, 44 }, -+ { 0x0, 0x0, 220, 2376, -1, 0, 1, 39 }, -+ { 0x1, 0x1, 220, 2377, -1, 12, 1, 58 }, -+ { 0x0, 0x0, 220, 2378, -1, 0, 1, 53 }, -+ { 0x1000001, 0x1000001, 220, 2379, -1, 12, 1, 58 }, -+ { 0x1, 0x1, 220, 2380, -1, 36, 1, 53 }, -+ { 0x200001, 0x200001, 220, 2381, -1, 12, 1, 58 }, -+ { 0x1, 0x1, 220, 2382, -1, 33, 1, 53 }, -+ { 0x1200001, 0x1200001, 220, 2383, -1, 12, 1, 48 }, -+ { 0x9, 0x9, 220, 2384, -1, 33, 1, 48 }, -+ { 0x0, 0x0, 220, 2385, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2386, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2387, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2388, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2389, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2390, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2391, -1, 0, 1, 48 }, -+ { 0x0, 0x0, 220, 2392, -1, 0, 1, 48 }, -+ { 0x1, 0x1, 220, 2393, -1, 12, 1, 58 }, -+ { 0x0, 0x0, 220, 2394, -1, 0, 1, 53 }, -+ { 0x200001, 0x1200001, 220, 2395, -1, 12, 1, 58 }, -+ { 0x1, 0x9, 220, 2396, -1, 33, 1, 53 }, -+ { 0x0, 0x0, 220, 2397, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2398, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2399, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2400, -1, 0, 1, 53 }, -+ { 0x1, 0x1, 220, 2401, -1, 12, 1, 58 }, -+ { 0x0, 0x0, 220, 2402, -1, 0, 1, 53 }, -+ { 0x1000001, 0x1000001, 220, 2403, -1, 12, 1, 58 }, -+ { 0x1, 0x1, 220, 2404, -1, 36, 1, 53 }, -+ { 0x200001, 0x200001, 220, 2405, -1, 12, 1, 58 }, -+ { 0x1, 0x1, 220, 2406, -1, 33, 1, 53 }, -+ { 0x1200001, 0x1200001, 220, 2407, -1, 12, 1, 48 }, -+ { 0x9, 0x9, 220, 2408, -1, 33, 1, 48 }, -+ { 0x0, 0x0, 220, 2409, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2410, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2411, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2412, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2413, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2414, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2415, -1, 0, 1, 48 }, -+ { 0x0, 0x0, 220, 2416, -1, 0, 1, 48 }, -+ { 0x1, 0x1, 220, 2417, -1, 12, 1, 58 }, -+ { 0x0, 0x0, 220, 2418, -1, 0, 1, 53 }, -+ { 0x200001, 0x1200001, 220, 2419, -1, 12, 1, 58 }, -+ { 0x1, 0x9, 220, 2420, -1, 33, 1, 53 }, -+ { 0x0, 0x0, 220, 2421, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2422, -1, 0, 1, 53 }, -+ { 0x0, 0x0, 220, 2423, -1, 0, 1, 58 }, -+ { 0x0, 0x0, 220, 2424, -1, 0, 1, 53 }, -+ { 0x1, 0x1, 220, 2425, -1, 28, 1, 28 }, -+ { 0x0, 0x0, 220, 2426, -1, 0, 1, 28 }, -+ { 0x3, 0x3, 220, 2427, -1, 27, 1, 28 }, -+ { 0x1, 0x1, 220, 2428, -1, 27, 1, 28 }, -+ { 0x0, 0x0, 220, 2429, -1, 0, 1, 65 }, -+ { 0x0, 0x0, 220, 2430, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2431, -1, 0, 1, 28 }, -+ { 0x1, 0x1, 220, 2432, -1, 36, 1, 65 }, -+ { 0x1, 0x1, 220, 2433, -1, 37, 1, 28 }, -+ { 0x0, 0x0, 220, 2434, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2435, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2436, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2437, -1, 0, 1, 65 }, -+ { 0x0, 0x0, 220, 2438, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 37, -1, 0, 1, 28 }, -+ { 0x1, 0x1, 220, 2440, -1, 36, 1, 65 }, -+ { 0x1, 0x1, 220, 2441, -1, 37, 1, 28 }, -+ { 0x0, 0x0, 220, 2442, -1, 0, 1, 28 }, -+ { 0x1, 0x1, 220, 2443, -1, 36, 1, 65 }, -+ { 0x1, 0x1, 220, 2444, -1, 37, 1, 28 }, -+ { 0x0, 0x0, 220, 2445, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2446, -1, 0, 1, 65 }, -+ { 0x0, 0x0, 220, 2447, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 42, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2449, -1, 0, 1, 65 }, -+ { 0x0, 0x0, 220, 2450, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 43, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2452, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2453, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2454, -1, 0, 1, 48 }, -+ { 0x1, 0x1, 220, 2455, -1, 27, 1, 48 }, -+ { 0x1, 0x1, 220, 2456, -1, 28, 1, 48 }, -+ { 0x3, 0x3, 220, 2457, -1, 27, 1, 48 }, -+ { 0x1, 0x1, 220, 2458, -1, 29, 1, 48 }, -+ { 0x5, 0x5, 220, 2459, -1, 27, 1, 48 }, -+ { 0x3, 0x3, 220, 2460, -1, 28, 1, 48 }, -+ { 0x7, 0x7, 220, 2461, -1, 27, 1, 48 }, -+ { 0x0, 0x0, 220, 2462, -1, 0, 1, 48 }, -+ { 0x0, 0x0, 220, 2463, -1, 0, 1, 48 }, -+ { 0x0, 0x0, 220, 2464, -1, 0, 1, 48 }, -+ { 0x0, 0x0, 220, 2465, -1, 0, 1, 48 }, -+ { 0x1, 0x1, 220, 2466, -1, 28, 1, 28 }, -+ { 0x0, 0x0, 220, 2467, -1, 0, 1, 28 }, -+ { 0x3, 0x3, 220, 2468, -1, 27, 1, 28 }, -+ { 0x1, 0x1, 220, 2469, -1, 27, 1, 28 }, -+ { 0x0, 0x0, 220, 2470, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2471, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2472, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 52, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2474, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2475, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 57, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 2477, -1, 0, 1, 23 }, -+ { 0x0, 0x0, 220, 2478, -1, 0, 1, 23 }, -+ { 0x0, 0x0, 220, 2479, -1, 0, 1, 23 }, -+ { 0x0, 0x0, 220, 2480, -1, 0, 1, 23 }, -+ { 0x0, 0x0, 220, 2481, -1, 0, 1, 34 }, -+ { 0x0, 0x0, 220, 2482, -1, 0, 1, 65 }, -+ { 0x0, 0x0, 220, 2483, -1, 0, 1, 28 }, -+ { 0x0, 0x0, 220, 64, -1, 0, 1, 28 }, -+ { 0x1, 0x1, 221, 2485, -1, 34, 1, 65 }, -+ { 0x1, 0x1, 221, 2486, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2487, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2488, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2489, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2490, -1, 34, 1, 45 }, -+ { 0x1, 0x1, 221, 2491, -1, 34, 1, 41 }, -+ { 0x400001, 0x400001, 221, 2492, -1, 12, 1, 60 }, -+ { 0x1, 0x1, 221, 2493, -1, 34, 1, 55 }, -+ { 0x1400001, 0x1400001, 221, 2494, -1, 12, 1, 60 }, -+ { 0x5, 0x5, 221, 2495, -1, 34, 1, 55 }, -+ { 0x600001, 0x600001, 221, 2496, -1, 12, 1, 60 }, -+ { 0x3, 0x3, 221, 2497, -1, 33, 1, 55 }, -+ { 0x1600001, 0x1600001, 221, 2498, -1, 12, 1, 50 }, -+ { 0xb, 0xb, 221, 2499, -1, 33, 1, 50 }, -+ { 0x1, 0x1, 221, 2500, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2501, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2502, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2503, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2504, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2505, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2506, -1, 34, 1, 50 }, -+ { 0x1, 0x1, 221, 2507, -1, 34, 1, 50 }, -+ { 0x400001, 0x400001, 221, 2508, -1, 12, 1, 60 }, -+ { 0x1, 0x1, 221, 2509, -1, 34, 1, 55 }, -+ { 0x600001, 0x1600001, 221, 2510, -1, 12, 1, 60 }, -+ { 0x3, 0xb, 221, 2511, -1, 33, 1, 55 }, -+ { 0x1, 0x1, 221, 2512, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2513, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2514, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2515, -1, 34, 1, 55 }, -+ { 0x400001, 0x400001, 221, 2516, -1, 12, 1, 60 }, -+ { 0x1, 0x1, 221, 2517, -1, 34, 1, 55 }, -+ { 0x1400001, 0x1400001, 221, 2518, -1, 12, 1, 60 }, -+ { 0x5, 0x5, 221, 2519, -1, 34, 1, 55 }, -+ { 0x600001, 0x600001, 221, 2520, -1, 12, 1, 60 }, -+ { 0x3, 0x3, 221, 2521, -1, 33, 1, 55 }, -+ { 0x1600001, 0x1600001, 221, 2522, -1, 12, 1, 50 }, -+ { 0xb, 0xb, 221, 2523, -1, 33, 1, 50 }, -+ { 0x1, 0x1, 221, 2524, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2525, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2526, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2527, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2528, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2529, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2530, -1, 34, 1, 50 }, -+ { 0x1, 0x1, 221, 2531, -1, 34, 1, 50 }, -+ { 0x400001, 0x400001, 221, 2532, -1, 12, 1, 60 }, -+ { 0x1, 0x1, 221, 2533, -1, 34, 1, 55 }, -+ { 0x600001, 0x1600001, 221, 2534, -1, 12, 1, 60 }, -+ { 0x3, 0xb, 221, 2535, -1, 33, 1, 55 }, -+ { 0x1, 0x1, 221, 2536, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2537, -1, 34, 1, 55 }, -+ { 0x1, 0x1, 221, 2538, -1, 34, 1, 60 }, -+ { 0x1, 0x1, 221, 2539, -1, 34, 1, 55 }, -+ { 0x41, 0x41, 221, 2540, -1, 28, 1, 30 }, -+ { 0x1, 0x1, 221, 2541, -1, 34, 1, 30 }, -+ { 0x83, 0x83, 221, 2542, -1, 27, 1, 30 }, -+ { 0x81, 0x81, 221, 2543, -1, 27, 1, 30 }, -+ { 0x1, 0x1, 221, 2544, -1, 34, 1, 65 }, -+ { 0x1, 0x1, 221, 2545, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2546, -1, 34, 1, 30 }, -+ { 0x5, 0x5, 221, 2547, -1, 34, 1, 65 }, -+ { 0x9, 0x9, 221, 2548, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2549, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2550, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2551, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2552, -1, 34, 1, 65 }, -+ { 0x1, 0x1, 221, 2553, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2554, -1, 34, 1, 30 }, -+ { 0x5, 0x5, 221, 2555, -1, 34, 1, 65 }, -+ { 0x9, 0x9, 221, 2556, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2557, -1, 34, 1, 30 }, -+ { 0x5, 0x5, 221, 2558, -1, 34, 1, 65 }, -+ { 0x9, 0x9, 221, 2559, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2560, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2561, -1, 34, 1, 65 }, -+ { 0x1, 0x1, 221, 2562, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2563, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2564, -1, 34, 1, 65 }, -+ { 0x1, 0x1, 221, 2565, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2566, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2567, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2568, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2569, -1, 34, 1, 50 }, -+ { 0x81, 0x81, 221, 2570, -1, 27, 1, 50 }, -+ { 0x41, 0x41, 221, 2571, -1, 28, 1, 50 }, -+ { 0x83, 0x83, 221, 2572, -1, 27, 1, 50 }, -+ { 0x21, 0x21, 221, 2573, -1, 29, 1, 50 }, -+ { 0x85, 0x85, 221, 2574, -1, 27, 1, 50 }, -+ { 0x43, 0x43, 221, 2575, -1, 28, 1, 50 }, -+ { 0x87, 0x87, 221, 2576, -1, 27, 1, 50 }, -+ { 0x1, 0x1, 221, 2577, -1, 34, 1, 50 }, -+ { 0x1, 0x1, 221, 2578, -1, 34, 1, 50 }, -+ { 0x1, 0x1, 221, 2579, -1, 34, 1, 50 }, -+ { 0x1, 0x1, 221, 2580, -1, 34, 1, 50 }, -+ { 0x41, 0x41, 221, 2581, -1, 28, 1, 30 }, -+ { 0x1, 0x1, 221, 2582, -1, 34, 1, 30 }, -+ { 0x83, 0x83, 221, 2583, -1, 27, 1, 30 }, -+ { 0x81, 0x81, 221, 2584, -1, 27, 1, 30 }, -+ { 0x1, 0x1, 221, 2585, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2586, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2587, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2588, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2589, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2590, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2591, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2592, -1, 34, 1, 25 }, -+ { 0x1, 0x1, 221, 2593, -1, 34, 1, 25 }, -+ { 0x1, 0x1, 221, 2594, -1, 34, 1, 25 }, -+ { 0x1, 0x1, 221, 2595, -1, 34, 1, 25 }, -+ { 0x1, 0x1, 221, 2596, -1, 34, 1, 36 }, -+ { 0x1, 0x1, 221, 2597, -1, 34, 1, 65 }, -+ { 0x1, 0x1, 221, 2598, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 221, 2599, -1, 34, 1, 30 }, -+ { 0x1, 0x1, 222, 2600, -1, 35, 1, 65 }, -+ { 0x1, 0x1, 222, 2601, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2602, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2603, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2604, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2605, -1, 35, 1, 46 }, -+ { 0x1, 0x1, 222, 2606, -1, 35, 1, 42 }, -+ { 0x800001, 0x800001, 222, 2607, -1, 12, 1, 61 }, -+ { 0x1, 0x1, 222, 2608, -1, 35, 1, 56 }, -+ { 0x1800001, 0x1800001, 222, 2609, -1, 12, 1, 61 }, -+ { 0x3, 0x3, 222, 2610, -1, 35, 1, 56 }, -+ { 0xa00001, 0xa00001, 222, 2611, -1, 12, 1, 61 }, -+ { 0x5, 0x5, 222, 2612, -1, 33, 1, 56 }, -+ { 0x1a00001, 0x1a00001, 222, 2613, -1, 12, 1, 51 }, -+ { 0xd, 0xd, 222, 2614, -1, 33, 1, 51 }, -+ { 0x1, 0x1, 222, 2615, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2616, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2617, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2618, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2619, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2620, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2621, -1, 35, 1, 51 }, -+ { 0x1, 0x1, 222, 2622, -1, 35, 1, 51 }, -+ { 0x800001, 0x800001, 222, 2623, -1, 12, 1, 61 }, -+ { 0x1, 0x1, 222, 2624, -1, 35, 1, 56 }, -+ { 0xa00001, 0x1a00001, 222, 2625, -1, 12, 1, 61 }, -+ { 0x5, 0xd, 222, 2626, -1, 33, 1, 56 }, -+ { 0x1, 0x1, 222, 2627, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2628, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2629, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2630, -1, 35, 1, 56 }, -+ { 0x800001, 0x800001, 222, 2631, -1, 12, 1, 61 }, -+ { 0x1, 0x1, 222, 2632, -1, 35, 1, 56 }, -+ { 0x1800001, 0x1800001, 222, 2633, -1, 12, 1, 61 }, -+ { 0x3, 0x3, 222, 2634, -1, 35, 1, 56 }, -+ { 0xa00001, 0xa00001, 222, 2635, -1, 12, 1, 61 }, -+ { 0x5, 0x5, 222, 2636, -1, 33, 1, 56 }, -+ { 0x1a00001, 0x1a00001, 222, 2637, -1, 12, 1, 51 }, -+ { 0xd, 0xd, 222, 2638, -1, 33, 1, 51 }, -+ { 0x1, 0x1, 222, 2639, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2640, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2641, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2642, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2643, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2644, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2645, -1, 35, 1, 51 }, -+ { 0x1, 0x1, 222, 2646, -1, 35, 1, 51 }, -+ { 0x800001, 0x800001, 222, 2647, -1, 12, 1, 61 }, -+ { 0x1, 0x1, 222, 2648, -1, 35, 1, 56 }, -+ { 0xa00001, 0x1a00001, 222, 2649, -1, 12, 1, 61 }, -+ { 0x5, 0xd, 222, 2650, -1, 33, 1, 56 }, -+ { 0x1, 0x1, 222, 2651, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2652, -1, 35, 1, 56 }, -+ { 0x1, 0x1, 222, 2653, -1, 35, 1, 61 }, -+ { 0x1, 0x1, 222, 2654, -1, 35, 1, 56 }, -+ { 0x81, 0x81, 222, 2655, -1, 28, 1, 31 }, -+ { 0x1, 0x1, 222, 2656, -1, 35, 1, 31 }, -+ { 0x103, 0x103, 222, 2657, -1, 27, 1, 31 }, -+ { 0x101, 0x101, 222, 2658, -1, 27, 1, 31 }, -+ { 0x1, 0x1, 222, 2659, -1, 35, 1, 65 }, -+ { 0x1, 0x1, 222, 2660, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2661, -1, 35, 1, 31 }, -+ { 0x3, 0x3, 222, 2662, -1, 35, 1, 65 }, -+ { 0x5, 0x5, 222, 2663, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2664, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2665, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2666, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2667, -1, 35, 1, 65 }, -+ { 0x1, 0x1, 222, 2668, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2669, -1, 35, 1, 31 }, -+ { 0x3, 0x3, 222, 2670, -1, 35, 1, 65 }, -+ { 0x5, 0x5, 222, 2671, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2672, -1, 35, 1, 31 }, -+ { 0x3, 0x3, 222, 2673, -1, 35, 1, 65 }, -+ { 0x5, 0x5, 222, 2674, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2675, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2676, -1, 35, 1, 65 }, -+ { 0x1, 0x1, 222, 2677, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2678, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2679, -1, 35, 1, 65 }, -+ { 0x1, 0x1, 222, 2680, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2681, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2682, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2683, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2684, -1, 35, 1, 51 }, -+ { 0x101, 0x101, 222, 2685, -1, 27, 1, 51 }, -+ { 0x81, 0x81, 222, 2686, -1, 28, 1, 51 }, -+ { 0x103, 0x103, 222, 2687, -1, 27, 1, 51 }, -+ { 0x41, 0x41, 222, 2688, -1, 29, 1, 51 }, -+ { 0x105, 0x105, 222, 2689, -1, 27, 1, 51 }, -+ { 0x83, 0x83, 222, 2690, -1, 28, 1, 51 }, -+ { 0x107, 0x107, 222, 2691, -1, 27, 1, 51 }, -+ { 0x1, 0x1, 222, 2692, -1, 35, 1, 51 }, -+ { 0x1, 0x1, 222, 2693, -1, 35, 1, 51 }, -+ { 0x1, 0x1, 222, 2694, -1, 35, 1, 51 }, -+ { 0x1, 0x1, 222, 2695, -1, 35, 1, 51 }, -+ { 0x81, 0x81, 222, 2696, -1, 28, 1, 31 }, -+ { 0x1, 0x1, 222, 2697, -1, 35, 1, 31 }, -+ { 0x103, 0x103, 222, 2698, -1, 27, 1, 31 }, -+ { 0x101, 0x101, 222, 2699, -1, 27, 1, 31 }, -+ { 0x1, 0x1, 222, 2700, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2701, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2702, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2703, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2704, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2705, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2706, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2707, -1, 35, 1, 26 }, -+ { 0x1, 0x1, 222, 2708, -1, 35, 1, 26 }, -+ { 0x1, 0x1, 222, 2709, -1, 35, 1, 26 }, -+ { 0x1, 0x1, 222, 2710, -1, 35, 1, 26 }, -+ { 0x1, 0x1, 222, 2711, -1, 35, 1, 37 }, -+ { 0x1, 0x1, 222, 2712, -1, 35, 1, 65 }, -+ { 0x1, 0x1, 222, 2713, -1, 35, 1, 31 }, -+ { 0x1, 0x1, 222, 2714, -1, 35, 1, 31 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 65 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2209, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 47 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 43 }, -+ { 0xc00001, 0xc00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x3, 0x3, 223, 2930, -1, 34, 1, 57 }, -+ { 0x1c00001, 0x1c00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x7, 0x7, 223, 2931, -1, 34, 1, 57 }, -+ { 0xe00001, 0xe00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x7, 0x7, 223, 2932, -1, 33, 1, 57 }, -+ { 0x1e00001, 0x1e00001, 223, -1, -1, 12, 1, 52 }, -+ { 0xf, 0xf, 223, 2933, -1, 33, 1, 52 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2934, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2935, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2936, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 52 }, -+ { 0x3, 0x3, 223, 2937, -1, 34, 1, 52 }, -+ { 0xc00001, 0xc00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x3, 0x3, 223, 2942, -1, 34, 1, 57 }, -+ { 0xe00001, 0x1e00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x7, 0xf, 223, 2943, -1, 33, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2944, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2945, -1, 34, 1, 57 }, -+ { 0xc00001, 0xc00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x3, 0x3, 223, 2948, -1, 34, 1, 57 }, -+ { 0x1c00001, 0x1c00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x7, 0x7, 223, 2949, -1, 34, 1, 57 }, -+ { 0xe00001, 0xe00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x7, 0x7, 223, 2950, -1, 33, 1, 57 }, -+ { 0x1e00001, 0x1e00001, 223, -1, -1, 12, 1, 52 }, -+ { 0xf, 0xf, 223, 2951, -1, 33, 1, 52 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2952, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2953, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2954, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 52 }, -+ { 0x3, 0x3, 223, 2955, -1, 34, 1, 52 }, -+ { 0xc00001, 0xc00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x3, 0x3, 223, 2960, -1, 34, 1, 57 }, -+ { 0xe00001, 0x1e00001, 223, -1, -1, 12, 1, 62 }, -+ { 0x7, 0xf, 223, 2961, -1, 33, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2962, -1, 34, 1, 57 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 62 }, -+ { 0x3, 0x3, 223, 2963, -1, 34, 1, 57 }, -+ { 0xc1, 0xc1, 223, -1, -1, 28, 1, 32 }, -+ { 0x3, 0x3, 223, 2828, -1, 34, 1, 32 }, -+ { 0x183, 0x183, 223, -1, -1, 27, 1, 32 }, -+ { 0x181, 0x181, 223, 2829, -1, 27, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 65 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2210, -1, 34, 1, 32 }, -+ { 0x7, 0x7, 223, -1, -1, 34, 1, 65 }, -+ { 0xb, 0xb, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2211, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 65 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2214, -1, 34, 1, 32 }, -+ { 0x7, 0x7, 223, -1, -1, 34, 1, 65 }, -+ { 0xb, 0xb, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2215, -1, 34, 1, 32 }, -+ { 0x7, 0x7, 223, -1, -1, 34, 1, 65 }, -+ { 0xb, 0xb, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2217, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 65 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2219, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 65 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2220, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 52 }, -+ { 0x181, 0x181, 223, -1, -1, 27, 1, 52 }, -+ { 0xc1, 0xc1, 223, -1, -1, 28, 1, 52 }, -+ { 0x183, 0x183, 223, -1, -1, 27, 1, 52 }, -+ { 0x61, 0x61, 223, -1, -1, 29, 1, 52 }, -+ { 0x185, 0x185, 223, -1, -1, 27, 1, 52 }, -+ { 0xc3, 0xc3, 223, -1, -1, 28, 1, 52 }, -+ { 0x187, 0x187, 223, -1, -1, 27, 1, 52 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 52 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 52 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 52 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 52 }, -+ { 0xc1, 0xc1, 223, -1, -1, 28, 1, 32 }, -+ { 0x3, 0x3, 223, 2832, -1, 34, 1, 32 }, -+ { 0x183, 0x183, 223, -1, -1, 27, 1, 32 }, -+ { 0x181, 0x181, 223, 2833, -1, 27, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 27 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 27 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 27 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 27 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 38 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 65 }, -+ { 0x3, 0x3, 223, -1, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 223, 2222, -1, 34, 1, 32 }, -+ { 0x3, 0x3, 224, 522, 1433, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 523, 1442, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 524, 1451, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 525, 1464, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 526, 1473, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 527, 1482, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 528, 1491, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 529, 1500, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 530, 1509, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 531, 1518, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 532, 1528, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 533, 1538, 32, 1, 128 }, -+ { 0x3, 0x3, 224, 546, 1551, 32, 1, 143 }, -+ { 0x3, 0x3, 224, 547, 1557, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 548, 1563, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 549, 1569, 32, 1, 143 }, -+ { 0x3, 0x3, 224, 550, 1575, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 551, 1581, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 552, 1587, 32, 1, 143 }, -+ { 0x3, 0x3, 224, 553, 1593, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 554, 1599, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 555, 1605, 32, 1, 143 }, -+ { 0x3, 0x3, 224, 556, 1611, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 557, 1617, 32, 1, 143 }, -+ { 0x3, 0x3, 224, 558, 1623, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 559, 1629, 32, 1, 143 }, -+ { 0x3, 0x3, 224, 560, 1635, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 561, 1641, 32, 1, 143 }, -+ { 0x3, 0x3, 224, 562, 1647, 32, 1, 148 }, -+ { 0x3, 0x3, 224, 563, 1653, 32, 1, 148 }, -+ { 0x1, 0x1, 225, -1, -1, 28, 1, 33 }, -+ { 0x1, 0x1, 225, -1, -1, 28, 1, 33 }, -+ { 0x0, 0x0, 232, 940, -1, 0, 1, 137 }, -+ { 0x0, 0x0, 232, 941, -1, 0, 1, 155 }, -+ { 0x1, 0x1, 233, -1, 1964, 33, 1, 133 }, -+ { 0x1, 0x1, 233, -1, 1967, 33, 1, 139 }, -+ { 0x0, 0x0, 233, -1, 1969, 0, 1, 150 }, -+ { 0x0, 0x0, 233, -1, 1970, 0, 1, 156 }, -+ { 0x0, 0x0, 234, 865, 953, 0, 0, -1 }, -+ { 0x0, 0x0, 234, 866, 961, 0, 0, -1 }, -+ { 0x0, 0x0, 234, 867, 957, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 868, 602, 33, 1, 6 }, -+ { 0x8000001, 0x8000001, 234, 869, 610, 6, 1, 7 }, -+ { 0x1, 0x1, 234, 870, 606, 33, 1, 6 }, -+ { 0x0, 0x0, 234, 871, 965, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 872, 622, 33, 1, 8 }, -+ { 0x0, 0x0, 234, 873, 969, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 874, 634, 33, 1, 15 }, -+ { 0x0, 0x0, 234, 875, 974, 0, 0, -1 }, -+ { 0x0, 0x0, 234, 876, 978, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 877, 657, 33, 1, 17 }, -+ { 0x1, 0x1, 234, 878, 661, 33, 1, 17 }, -+ { 0x0, 0x0, 234, 879, 982, 0, 0, -1 }, -+ { 0x0, 0x0, 234, 880, 986, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 881, 681, 33, 1, 18 }, -+ { 0x8000001, 0x8000001, 234, 882, 685, 6, 1, 18 }, -+ { 0x0, 0x0, 234, 883, 990, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 884, 697, 33, 1, 19 }, -+ { 0x0, 0x0, 234, 885, 994, 0, 0, -1 }, -+ { 0x0, 0x0, 234, 886, 998, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 887, 717, 33, 1, 20 }, -+ { 0x8000001, 0x8000001, 234, 888, 721, 6, 1, 20 }, -+ { 0x0, 0x0, 234, 889, 1002, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 890, 733, 33, 1, 21 }, -+ { 0x0, 0x0, 234, 891, 1007, 0, 0, -1 }, -+ { 0x0, 0x0, 234, 892, 1011, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 893, 756, 33, 1, 17 }, -+ { 0x1, 0x1, 234, 894, 760, 33, 1, 17 }, -+ { 0x0, 0x0, 234, 895, 1015, 0, 0, -1 }, -+ { 0x1, 0x1, 234, 896, 772, 33, 1, 21 }, -+ { 0x0, 0x0, 235, 2753, 952, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2754, 960, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2755, 956, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2756, 601, 0, 1, 6 }, -+ { 0x1, 0x1, 235, 2757, 609, 6, 1, 7 }, -+ { 0x0, 0x0, 235, 2758, 605, 0, 1, 6 }, -+ { 0x0, 0x0, 235, 2759, 964, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2760, 621, 0, 1, 8 }, -+ { 0x0, 0x0, 235, 2761, 968, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2762, 633, 0, 1, 15 }, -+ { 0x0, 0x0, 235, 2763, 973, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2764, 977, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2765, 656, 0, 1, 17 }, -+ { 0x0, 0x0, 235, 2766, 660, 0, 1, 17 }, -+ { 0x0, 0x0, 235, 2767, 981, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2768, 985, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2769, 680, 0, 1, 18 }, -+ { 0x1, 0x1, 235, 2770, 684, 6, 1, 18 }, -+ { 0x0, 0x0, 235, 2771, 989, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2772, 696, 0, 1, 19 }, -+ { 0x0, 0x0, 235, 2773, 993, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2774, 997, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2775, 716, 0, 1, 20 }, -+ { 0x1, 0x1, 235, 2776, 720, 6, 1, 20 }, -+ { 0x0, 0x0, 235, 2777, 1001, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2778, 732, 0, 1, 21 }, -+ { 0x0, 0x0, 235, 2779, 1006, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2780, 1010, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2781, 755, 0, 1, 17 }, -+ { 0x0, 0x0, 235, 2782, 759, 0, 1, 17 }, -+ { 0x0, 0x0, 235, 2783, 1014, 0, 0, -1 }, -+ { 0x0, 0x0, 235, 2784, 771, 0, 1, 21 }, -+ { 0x1, 0x1, 235, 897, 1137, 27, 1, 16 }, -+ { 0x0, 0x0, 235, 898, 1135, 0, 1, 16 }, -+ { 0x0, 0x0, 235, 1202, 1139, 0, 1, 22 }, -+ { 0x0, 0x1, 235, 1147, 1145, 20, 1, 67 }, -+ { 0x0, 0x0, 235, 111, 1143, 0, 1, 67 }, -+ { 0x1, 0x1, 238, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x0, 238, -1, -1, 0, 1, 0 }, -+ { 0x1, 0x1, 238, 2984, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 238, 2985, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 238, 2986, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 238, 2987, -1, 27, 1, 0 }, -+ { 0x0, 0x0, 260, -1, 2310, 0, 0, -1 }, -+ { 0x0, 0x0, 260, -1, 2312, 0, 0, -1 }, -+ { 0x1, 0x1, 260, -1, -1, 28, 1, 29 }, -+ { 0x1, 0x1, 260, -1, -1, 28, 1, 29 }, -+ { 0x0, 0x0, 260, -1, 2351, 0, 0, -1 }, -+ { 0x0, 0x0, 260, -1, 2353, 0, 0, -1 }, -+ { 0x1, 0x1, 260, -1, -1, 28, 1, 29 }, -+ { 0x1, 0x1, 260, -1, -1, 28, 1, 29 }, -+ { 0x0, 0x0, 262, 23, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 262, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 262, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x1, 262, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x1, 262, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x1, 262, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x1, 262, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x1, 262, -1, -1, 29, 1, 0 }, -+ { 0x0, 0x0, 262, 180, -1, 0, 1, 0 }, -+ { 0x0, 0x1, 262, -1, -1, 29, 1, 0 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 299, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 321, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 347, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 369, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 64 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 64 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 64 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 64 }, -+ { 0x0, 0x0, 263, -1, 2262, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2264, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2266, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2268, 0, 0, -1 }, -+ { 0x1, 0x1, 263, -1, 2270, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2272, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2274, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2276, 12, 1, 49 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 49 }, -+ { 0x0, 0x0, 263, -1, 2278, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2280, 0, 0, -1 }, -+ { 0x1, 0x1, 263, -1, 2282, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2284, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x0, 0x0, 263, -1, 2286, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2288, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2290, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2292, 0, 0, -1 }, -+ { 0x1, 0x1, 263, -1, 2294, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2296, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2298, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2300, 12, 1, 49 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 49 }, -+ { 0x0, 0x0, 263, -1, 2302, 0, 0, -1 }, -+ { 0x0, 0x0, 263, -1, 2304, 0, 0, -1 }, -+ { 0x1, 0x1, 263, -1, 2306, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, 2308, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, -1, -1, 12, 1, 59 }, -+ { 0x1, 0x1, 263, 391, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 393, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 507, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 509, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 399, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 401, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 515, -1, 12, 1, 2 }, -+ { 0x1, 0x1, 263, 517, -1, 12, 1, 2 }, -+ { 0x0, 0x0, 264, -1, 2269, 0, 0, -1 }, -+ { 0x9, 0x9, 264, -1, 2277, 33, 1, 49 }, -+ { 0x9, 0x9, 264, -1, 2941, 33, 1, 49 }, -+ { 0x0, 0x0, 264, 1381, 2342, 0, 0, -1 }, -+ { 0x3, 0x3, 264, 1382, -1, 27, 1, 49 }, -+ { 0x0, 0x0, 268, 2822, -1, 0, 1, 0 }, -+ { 0x3, 0x3, 269, -1, -1, 27, 1, 0 }, -+ { 0x3, 0x3, 269, -1, -1, 27, 1, 0 }, -+ { 0x3, 0x3, 269, -1, -1, 27, 1, 0 }, -+ { 0x3, 0x3, 269, -1, -1, 27, 1, 0 }, -+ { 0x1, 0x1, 270, 2980, -1, 28, 1, 0 }, -+ { 0x1, 0x1, 270, 2981, -1, 28, 1, 0 }, -+ { 0x1, 0x1, 270, 2982, -1, 28, 1, 0 }, -+ { 0x1, 0x1, 270, 2983, -1, 28, 1, 0 }, -+ { 0x1, 0x1, 271, -1, -1, 27, 1, 93 }, -+ { 0x1, 0x1, 271, -1, -1, 27, 1, 93 }, -+ { 0x0, 0x0, 271, -1, 950, 0, 0, -1 }, -+ { 0x0, 0x0, 272, 2993, 2799, 0, 0, -1 }, -+ { 0x0, 0x0, 272, 2994, 2801, 0, 0, -1 }, -+ { 0x0, 0x0, 273, -1, 2800, 0, 0, -1 }, -+ { 0x0, 0x0, 273, -1, 2802, 0, 0, -1 }, -+ { 0x0, 0x0, 274, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 274, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 274, -1, -1, 0, 1, 40 }, -+ { 0x0, 0x0, 279, -1, -1, 0, 1, 33 }, -+ { 0x0, 0x0, 283, -1, 2316, 0, 1, 29 }, -+ { 0x0, 0x0, 284, -1, -1, 0, 1, 0 }, -+ { 0x0, 0x0, 284, -1, -1, 0, 1, 71 }, -+ { 0x0, 0x0, 284, 1983, 2966, 0, 1, 1 }, -+ { 0x0, 0x0, 284, 1984, 2967, 0, 1, 1 }, -+ { 0x0, 0x0, 284, -1, 508, 0, 0, -1 }, -+ { 0x0, 0x0, 284, -1, 510, 0, 0, -1 }, -+ { 0x0, 0x0, 284, 1987, 2970, 0, 1, 1 }, -+ { 0x0, 0x0, 284, 1988, 2971, 0, 1, 1 }, -+ { 0x0, 0x0, 284, -1, 516, 0, 0, -1 }, -+ { 0x0, 0x0, 284, -1, 518, 0, 0, -1 }, -+}; -+ -+static const struct ia64_main_table -+main_table[] = { -+ { 5, 1, 1, 0x0000010000000000ull, 0x000001eff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 0, }, -+ { 5, 1, 1, 0x0000010008000000ull, 0x000001eff8000000ull, { 24, 25, 26, 4, 0 }, 0x0, 1, }, -+ { 5, 7, 1, 0x0000000000000000ull, 0x0000000000000000ull, { 24, 66, 27, 0, 0 }, 0x0, 2, }, -+ { 5, 7, 1, 0x0000000000000000ull, 0x0000000000000000ull, { 24, 63, 26, 0, 0 }, 0x0, 3, }, -+ { 6, 1, 1, 0x0000012000000000ull, 0x000001e000000000ull, { 24, 66, 27, 0, 0 }, 0x0, 4, }, -+ { 7, 1, 1, 0x0000010040000000ull, 0x000001eff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 5, }, -+ { 7, 1, 1, 0x0000010c00000000ull, 0x000001ee00000000ull, { 24, 63, 26, 0, 0 }, 0x0, 6, }, -+ { 8, 1, 1, 0x0000010800000000ull, 0x000001ee00000000ull, { 24, 63, 26, 0, 0 }, 0x0, 7, }, -+ { 9, 3, 1, 0x0000002c00000000ull, 0x000001ee00000000ull, { 24, 3, 52, 53, 54 }, 0x221, 8, }, -+ { 9, 3, 1, 0x0000002c00000000ull, 0x000001ee00000000ull, { 24, 52, 53, 54, 0 }, 0x261, 9, }, -+ { 10, 1, 1, 0x0000010060000000ull, 0x000001eff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 10, }, -+ { 10, 1, 1, 0x0000010160000000ull, 0x000001eff8000000ull, { 24, 55, 26, 0, 0 }, 0x0, 11, }, -+ { 11, 1, 1, 0x0000010068000000ull, 0x000001eff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 12, }, -+ { 11, 1, 1, 0x0000010168000000ull, 0x000001eff8000000ull, { 24, 55, 26, 0, 0 }, 0x0, 13, }, -+ { 14, 4, 0, 0x0000000100000000ull, 0x000001eff80011ffull, { 16, 0, 0, 0, 0 }, 0x40, 951, }, -+ { 14, 4, 0, 0x0000000100000000ull, 0x000001eff80011c0ull, { 16, 0, 0, 0, 0 }, 0x0, 807, }, -+ { 14, 4, 0, 0x0000000100000000ull, 0x000001eff80011c0ull, { 16, 0, 0, 0, 0 }, 0x40, 808, }, -+ { 14, 4, 0, 0x0000000108000100ull, 0x000001eff80011c0ull, { 16, 0, 0, 0, 0 }, 0x200, 2200, }, -+ { 14, 4, 0, 0x0000000108000100ull, 0x000001eff80011c0ull, { 16, 0, 0, 0, 0 }, 0x240, 2201, }, -+ { 14, 4, 1, 0x0000002100000000ull, 0x000001ef00001000ull, { 15, 16, 0, 0, 0 }, 0x0, 564, }, -+ { 14, 4, 1, 0x0000002100000000ull, 0x000001ef00001000ull, { 15, 16, 0, 0, 0 }, 0x40, 565, }, -+ { 14, 4, 0, 0x0000008000000000ull, 0x000001ee000011ffull, { 81, 0, 0, 0, 0 }, 0x40, 972, }, -+ { 14, 4, 0, 0x0000008000000000ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x0, 809, }, -+ { 14, 4, 0, 0x0000008000000000ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x40, 810, }, -+ { 14, 4, 0, 0x0000008000000080ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x210, 2991, }, -+ { 14, 4, 0, 0x0000008000000080ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x250, 2992, }, -+ { 14, 4, 0, 0x0000008000000140ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x30, 572, }, -+ { 14, 4, 0, 0x0000008000000140ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x70, 573, }, -+ { 14, 4, 0, 0x0000008000000180ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x230, 570, }, -+ { 14, 4, 0, 0x0000008000000180ull, 0x000001ee000011c0ull, { 81, 0, 0, 0, 0 }, 0x270, 571, }, -+ { 14, 4, 1, 0x000000a000000000ull, 0x000001ee00001000ull, { 15, 81, 0, 0, 0 }, 0x0, 566, }, -+ { 14, 4, 1, 0x000000a000000000ull, 0x000001ee00001000ull, { 15, 81, 0, 0, 0 }, 0x40, 567, }, -+ { 15, 4, 0, 0x0000000000000000ull, 0x000001e1f8000000ull, { 65, 0, 0, 0, 0 }, 0x0, 519, }, -+ { 15, 5, 0, 0x0000000000000000ull, 0x000001e3f8000000ull, { 65, 0, 0, 0, 0 }, 0x0, 942, }, -+ { 15, 2, 0, 0x0000000000000000ull, 0x000001eff8000000ull, { 65, 0, 0, 0, 0 }, 0x2, 1120, }, -+ { 15, 3, 0, 0x0000000000000000ull, 0x000001eff8000000ull, { 65, 0, 0, 0, 0 }, 0x0, 1245, }, -+ { 15, 6, 0, 0x0000000000000000ull, 0x000001eff8000000ull, { 69, 0, 0, 0, 0 }, 0x0, 2995, }, -+ { 15, 7, 0, 0x0000000000000000ull, 0x0000000000000000ull, { 65, 0, 0, 0, 0 }, 0x0, 16, }, -+ { 16, 6, 0, 0x0000018000000000ull, 0x000001ee000011ffull, { 82, 0, 0, 0, 0 }, 0x40, 1005, }, -+ { 16, 6, 0, 0x0000018000000000ull, 0x000001ee000011c0ull, { 82, 0, 0, 0, 0 }, 0x0, 811, }, -+ { 16, 6, 0, 0x0000018000000000ull, 0x000001ee000011c0ull, { 82, 0, 0, 0, 0 }, 0x40, 812, }, -+ { 16, 6, 1, 0x000001a000000000ull, 0x000001ee00001000ull, { 15, 82, 0, 0, 0 }, 0x0, 568, }, -+ { 16, 6, 1, 0x000001a000000000ull, 0x000001ee00001000ull, { 15, 82, 0, 0, 0 }, 0x40, 569, }, -+ { 17, 4, 0, 0x0000004080000000ull, 0x000001e9f8000018ull, { 16, 77, 0, 0, 0 }, 0x20, 2818, }, -+ { 17, 4, 0, 0x000000e000000000ull, 0x000001e800000018ull, { 81, 77, 0, 0, 0 }, 0x20, 2819, }, -+ { 18, 4, 0, 0x0000000060000000ull, 0x000001e1f8000000ull, { 0, 0, 0, 0, 0 }, 0x2c, 222, }, -+ { 22, 2, 0, 0x0000000200000000ull, 0x000001ee00000000ull, { 25, 80, 0, 0, 0 }, 0x0, 2205, }, -+ { 22, 3, 0, 0x0000000800000000ull, 0x000001ee00000000ull, { 24, 81, 0, 0, 0 }, 0x0, 224, }, -+ { 22, 3, 0, 0x0000000c00000000ull, 0x000001ee00000000ull, { 18, 81, 0, 0, 0 }, 0x0, 225, }, -+ { 22, 3, 0, 0x0000002200000000ull, 0x000001ee00000000ull, { 25, 80, 0, 0, 0 }, 0x0, 2206, }, -+ { 22, 3, 0, 0x0000002600000000ull, 0x000001ee00000000ull, { 19, 80, 0, 0, 0 }, 0x0, 2207, }, -+ { 22, 7, 0, 0x0000000000000000ull, 0x0000000000000000ull, { 25, 80, 0, 0, 0 }, 0x0, 2208, }, -+ { 25, 4, 0, 0x0000000020000000ull, 0x000001e1f8000000ull, { 0, 0, 0, 0, 0 }, 0x224, 18, }, -+ { 26, 1, 2, 0x0000018000000000ull, 0x000001fe00001000ull, { 22, 23, 25, 26, 0 }, 0x0, 1204, }, -+ { 26, 1, 1, 0x0000018000000000ull, 0x000001fe00001000ull, { 22, 25, 26, 0, 0 }, 0x40, 1205, }, -+ { 26, 1, 2, 0x0000018000000000ull, 0x000001fe00001000ull, { 23, 22, 26, 25, 0 }, 0x0, 1163, }, -+ { 26, 1, 1, 0x0000018000000000ull, 0x000001fe00001000ull, { 23, 26, 25, 0, 0 }, 0x40, 1164, }, -+ { 26, 1, 2, 0x0000018000000000ull, 0x000001fe00001000ull, { 22, 23, 26, 25, 0 }, 0x0, 1072, }, -+ { 26, 1, 1, 0x0000018000000000ull, 0x000001fe00001000ull, { 22, 26, 25, 0, 0 }, 0x40, 1073, }, -+ { 26, 1, 2, 0x0000018000000000ull, 0x000001fe00001000ull, { 23, 22, 25, 26, 0 }, 0x0, 1034, }, -+ { 26, 1, 1, 0x0000018000000000ull, 0x000001fe00001000ull, { 23, 25, 26, 0, 0 }, 0x40, 1035, }, -+ { 26, 1, 2, 0x0000018200000000ull, 0x000001fe00001000ull, { 22, 23, 25, 26, 0 }, 0x40, 1358, }, -+ { 26, 1, 2, 0x0000019000000000ull, 0x000001fe00001000ull, { 22, 23, 7, 26, 0 }, 0x0, 1074, }, -+ { 26, 1, 1, 0x0000019000000000ull, 0x000001fe00001000ull, { 22, 7, 26, 0, 0 }, 0x40, 1075, }, -+ { 26, 1, 2, 0x0000019000000000ull, 0x000001fe00001000ull, { 22, 23, 26, 7, 0 }, 0x40, 1208, }, -+ { 26, 1, 1, 0x0000019000000000ull, 0x000001fe00001000ull, { 22, 26, 7, 0, 0 }, 0x40, 1209, }, -+ { 26, 1, 2, 0x0000019000000000ull, 0x000001fe00001000ull, { 22, 23, 7, 26, 0 }, 0x40, 1169, }, -+ { 26, 1, 2, 0x0000018800000000ull, 0x000001ee00001000ull, { 22, 23, 55, 26, 0 }, 0x0, 1211, }, -+ { 26, 1, 1, 0x0000018800000000ull, 0x000001ee00001000ull, { 22, 55, 26, 0, 0 }, 0x40, 1212, }, -+ { 26, 1, 2, 0x0000018800000000ull, 0x000001ee00001000ull, { 22, 23, 57, 26, 0 }, 0x0, 1170, }, -+ { 26, 1, 1, 0x0000018800000000ull, 0x000001ee00001000ull, { 22, 57, 26, 0, 0 }, 0x40, 1171, }, -+ { 26, 1, 2, 0x0000018800000000ull, 0x000001ee00001000ull, { 23, 22, 57, 26, 0 }, 0x0, 1079, }, -+ { 26, 1, 1, 0x0000018800000000ull, 0x000001ee00001000ull, { 23, 57, 26, 0, 0 }, 0x40, 1080, }, -+ { 26, 1, 2, 0x0000018800000000ull, 0x000001ee00001000ull, { 23, 22, 55, 26, 0 }, 0x0, 1041, }, -+ { 26, 1, 1, 0x0000018800000000ull, 0x000001ee00001000ull, { 23, 55, 26, 0, 0 }, 0x40, 1042, }, -+ { 26, 1, 2, 0x0000018a00000000ull, 0x000001ee00001000ull, { 22, 23, 55, 26, 0 }, 0x40, 1363, }, -+ { 26, 1, 2, 0x000001a800000000ull, 0x000001ee00001000ull, { 22, 23, 59, 26, 0 }, 0x0, 1196, }, -+ { 26, 1, 1, 0x000001a800000000ull, 0x000001ee00001000ull, { 22, 59, 26, 0, 0 }, 0x40, 1197, }, -+ { 26, 1, 2, 0x000001a800000000ull, 0x000001ee00001000ull, { 23, 22, 59, 26, 0 }, 0x0, 1107, }, -+ { 26, 1, 1, 0x000001a800000000ull, 0x000001ee00001000ull, { 23, 59, 26, 0, 0 }, 0x40, 1108, }, -+ { 26, 1, 2, 0x000001c200000000ull, 0x000001fe00001000ull, { 23, 22, 25, 26, 0 }, 0x40, 1364, }, -+ { 26, 1, 2, 0x000001d000000000ull, 0x000001fe00001000ull, { 23, 22, 7, 26, 0 }, 0x40, 1172, }, -+ { 26, 1, 1, 0x000001d000000000ull, 0x000001fe00001000ull, { 23, 7, 26, 0, 0 }, 0x40, 1173, }, -+ { 26, 1, 2, 0x000001d000000000ull, 0x000001fe00001000ull, { 23, 22, 26, 7, 0 }, 0x40, 1045, }, -+ { 26, 1, 1, 0x000001d000000000ull, 0x000001fe00001000ull, { 23, 26, 7, 0, 0 }, 0x40, 1046, }, -+ { 26, 1, 2, 0x000001ca00000000ull, 0x000001ee00001000ull, { 23, 22, 55, 26, 0 }, 0x40, 1365, }, -+ { 27, 1, 2, 0x0000018400000000ull, 0x000001fe00001000ull, { 22, 23, 25, 26, 0 }, 0x0, 1217, }, -+ { 27, 1, 1, 0x0000018400000000ull, 0x000001fe00001000ull, { 22, 25, 26, 0, 0 }, 0x40, 1218, }, -+ { 27, 1, 2, 0x0000018400000000ull, 0x000001fe00001000ull, { 23, 22, 26, 25, 0 }, 0x0, 1176, }, -+ { 27, 1, 1, 0x0000018400000000ull, 0x000001fe00001000ull, { 23, 26, 25, 0, 0 }, 0x40, 1177, }, -+ { 27, 1, 2, 0x0000018400000000ull, 0x000001fe00001000ull, { 22, 23, 26, 25, 0 }, 0x0, 1085, }, -+ { 27, 1, 1, 0x0000018400000000ull, 0x000001fe00001000ull, { 22, 26, 25, 0, 0 }, 0x40, 1086, }, -+ { 27, 1, 2, 0x0000018400000000ull, 0x000001fe00001000ull, { 23, 22, 25, 26, 0 }, 0x0, 1047, }, -+ { 27, 1, 1, 0x0000018400000000ull, 0x000001fe00001000ull, { 23, 25, 26, 0, 0 }, 0x40, 1048, }, -+ { 27, 1, 2, 0x0000018600000000ull, 0x000001fe00001000ull, { 22, 23, 25, 26, 0 }, 0x40, 1370, }, -+ { 27, 1, 2, 0x0000019400000000ull, 0x000001fe00001000ull, { 22, 23, 7, 26, 0 }, 0x0, 1087, }, -+ { 27, 1, 1, 0x0000019400000000ull, 0x000001fe00001000ull, { 22, 7, 26, 0, 0 }, 0x40, 1088, }, -+ { 27, 1, 2, 0x0000019400000000ull, 0x000001fe00001000ull, { 22, 23, 26, 7, 0 }, 0x40, 1221, }, -+ { 27, 1, 1, 0x0000019400000000ull, 0x000001fe00001000ull, { 22, 26, 7, 0, 0 }, 0x40, 1222, }, -+ { 27, 1, 2, 0x0000019400000000ull, 0x000001fe00001000ull, { 22, 23, 7, 26, 0 }, 0x40, 1182, }, -+ { 27, 1, 2, 0x0000018c00000000ull, 0x000001ee00001000ull, { 22, 23, 55, 26, 0 }, 0x0, 1224, }, -+ { 27, 1, 1, 0x0000018c00000000ull, 0x000001ee00001000ull, { 22, 55, 26, 0, 0 }, 0x40, 1225, }, -+ { 27, 1, 2, 0x0000018c00000000ull, 0x000001ee00001000ull, { 22, 23, 57, 26, 0 }, 0x0, 1183, }, -+ { 27, 1, 1, 0x0000018c00000000ull, 0x000001ee00001000ull, { 22, 57, 26, 0, 0 }, 0x40, 1184, }, -+ { 27, 1, 2, 0x0000018c00000000ull, 0x000001ee00001000ull, { 23, 22, 57, 26, 0 }, 0x0, 1092, }, -+ { 27, 1, 1, 0x0000018c00000000ull, 0x000001ee00001000ull, { 23, 57, 26, 0, 0 }, 0x40, 1093, }, -+ { 27, 1, 2, 0x0000018c00000000ull, 0x000001ee00001000ull, { 23, 22, 55, 26, 0 }, 0x0, 1054, }, -+ { 27, 1, 1, 0x0000018c00000000ull, 0x000001ee00001000ull, { 23, 55, 26, 0, 0 }, 0x40, 1055, }, -+ { 27, 1, 2, 0x0000018e00000000ull, 0x000001ee00001000ull, { 22, 23, 55, 26, 0 }, 0x40, 1375, }, -+ { 27, 1, 2, 0x000001ac00000000ull, 0x000001ee00001000ull, { 22, 23, 56, 26, 0 }, 0x0, 1241, }, -+ { 27, 1, 1, 0x000001ac00000000ull, 0x000001ee00001000ull, { 22, 56, 26, 0, 0 }, 0x40, 1242, }, -+ { 27, 1, 2, 0x000001ac00000000ull, 0x000001ee00001000ull, { 22, 23, 58, 26, 0 }, 0x0, 1200, }, -+ { 27, 1, 1, 0x000001ac00000000ull, 0x000001ee00001000ull, { 22, 58, 26, 0, 0 }, 0x40, 1201, }, -+ { 27, 1, 2, 0x000001ac00000000ull, 0x000001ee00001000ull, { 23, 22, 58, 26, 0 }, 0x0, 1111, }, -+ { 27, 1, 1, 0x000001ac00000000ull, 0x000001ee00001000ull, { 23, 58, 26, 0, 0 }, 0x40, 1112, }, -+ { 27, 1, 2, 0x000001ac00000000ull, 0x000001ee00001000ull, { 23, 22, 56, 26, 0 }, 0x0, 1070, }, -+ { 27, 1, 1, 0x000001ac00000000ull, 0x000001ee00001000ull, { 23, 56, 26, 0, 0 }, 0x40, 1071, }, -+ { 27, 1, 2, 0x000001c600000000ull, 0x000001fe00001000ull, { 23, 22, 25, 26, 0 }, 0x40, 1376, }, -+ { 27, 1, 2, 0x000001d400000000ull, 0x000001fe00001000ull, { 23, 22, 7, 26, 0 }, 0x40, 1185, }, -+ { 27, 1, 1, 0x000001d400000000ull, 0x000001fe00001000ull, { 23, 7, 26, 0, 0 }, 0x40, 1186, }, -+ { 27, 1, 2, 0x000001d400000000ull, 0x000001fe00001000ull, { 23, 22, 26, 7, 0 }, 0x40, 1058, }, -+ { 27, 1, 1, 0x000001d400000000ull, 0x000001fe00001000ull, { 23, 26, 7, 0, 0 }, 0x40, 1059, }, -+ { 27, 1, 2, 0x000001ce00000000ull, 0x000001ee00001000ull, { 23, 22, 55, 26, 0 }, 0x40, 1377, }, -+ { 28, 3, 1, 0x0000008808000000ull, 0x000001fff8000000ull, { 24, 33, 25, 1, 2 }, 0x0, 257, }, -+ { 28, 3, 1, 0x0000008808000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x40, 258, }, -+ { 29, 3, 1, 0x0000008008000000ull, 0x000001fff8000000ull, { 24, 33, 25, 2, 0 }, 0x0, 259, }, -+ { 29, 3, 1, 0x0000008008000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x40, 260, }, -+ { 30, 3, 1, 0x0000008048000000ull, 0x000001fff8000000ull, { 24, 33, 25, 2, 0 }, 0x0, 261, }, -+ { 30, 3, 1, 0x0000008048000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x40, 262, }, -+ { 31, 3, 1, 0x0000008088000000ull, 0x000001fff8000000ull, { 24, 33, 25, 2, 0 }, 0x0, 263, }, -+ { 31, 3, 1, 0x0000008088000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x40, 264, }, -+ { 32, 3, 1, 0x00000080c8000000ull, 0x000001fff8000000ull, { 24, 33, 25, 2, 0 }, 0x0, 265, }, -+ { 32, 3, 1, 0x00000080c8000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x40, 266, }, -+ { 34, 4, 0, 0x0000000010000000ull, 0x000001e1f8000000ull, { 0, 0, 0, 0, 0 }, 0x224, 19, }, -+ { 36, 2, 1, 0x00000000c0000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 1149, }, -+ { 37, 2, 1, 0x00000000c8000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 1150, }, -+ { 39, 2, 1, 0x0000008000000000ull, 0x000001e000000000ull, { 24, 25, 26, 47, 72 }, 0x0, 20, }, -+ { 39, 2, 1, 0x000000a600000000ull, 0x000001ee04000000ull, { 24, 25, 45, 73, 0 }, 0x0, 3000, }, -+ { 39, 2, 1, 0x000000a604000000ull, 0x000001ee04000000ull, { 24, 55, 45, 73, 0 }, 0x0, 3001, }, -+ { 39, 2, 1, 0x000000ae00000000ull, 0x000001ee00000000ull, { 24, 48, 26, 46, 73 }, 0x0, 21, }, -+ { 43, 4, 0, 0x0000000080000000ull, 0x000001e1f8000000ull, { 0, 0, 0, 0, 0 }, 0x20, 22, }, -+ { 48, 2, 1, 0x000000a400000000ull, 0x000001ee00002000ull, { 24, 26, 76, 73, 0 }, 0x0, 2836, }, -+ { 50, 5, 1, 0x0000000080000000ull, 0x000001e3f80fe000ull, { 18, 20, 0, 0, 0 }, 0x40, 24, }, -+ { 51, 5, 1, 0x0000010008000000ull, 0x000001fff8000000ull, { 18, 20, 19, 0, 0 }, 0x40, 2257, }, -+ { 52, 5, 1, 0x00000000b8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2258, }, -+ { 52, 5, 1, 0x00000000b8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 26, }, -+ { 53, 5, 1, 0x00000000b0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2259, }, -+ { 53, 5, 1, 0x00000000b0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 27, }, -+ { 54, 5, 1, 0x0000000160000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 28, }, -+ { 55, 5, 1, 0x0000000168000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 29, }, -+ { 57, 3, 0, 0x0000002180000000ull, 0x000001fff8000000ull, { 26, 0, 0, 0, 0 }, 0x0, 30, }, -+ { 58, 5, 0, 0x0000000040000000ull, 0x000001eff8000000ull, { 79, 0, 0, 0, 0 }, 0x0, 2260, }, -+ { 58, 5, 0, 0x0000000040000000ull, 0x000001eff8000000ull, { 79, 0, 0, 0, 0 }, 0x40, 31, }, -+ { 59, 5, 2, 0x000000a000000000ull, 0x000001e000001000ull, { 22, 23, 19, 60, 0 }, 0x0, 1247, }, -+ { 59, 5, 1, 0x000000a000000000ull, 0x000001e000001000ull, { 22, 19, 60, 0, 0 }, 0x40, 1248, }, -+ { 59, 5, 2, 0x000000a000000000ull, 0x000001e000001000ull, { 23, 22, 19, 60, 0 }, 0x40, 1402, }, -+ { 59, 5, 1, 0x000000a000000000ull, 0x000001e000001000ull, { 23, 19, 60, 0, 0 }, 0x40, 1403, }, -+ { 60, 5, 0, 0x0000000028000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x0, 2261, }, -+ { 60, 5, 0, 0x0000000028000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x40, 32, }, -+ { 61, 5, 2, 0x0000008000000000ull, 0x000001fe00001000ull, { 22, 23, 19, 20, 0 }, 0x0, 925, }, -+ { 61, 5, 1, 0x0000008000000000ull, 0x000001fe00001000ull, { 22, 19, 20, 0, 0 }, 0x40, 926, }, -+ { 61, 5, 2, 0x0000008000000000ull, 0x000001fe00001000ull, { 22, 23, 19, 20, 0 }, 0x40, 927, }, -+ { 61, 5, 2, 0x0000009000000000ull, 0x000001fe00001000ull, { 22, 23, 20, 19, 0 }, 0x0, 1098, }, -+ { 61, 5, 1, 0x0000009000000000ull, 0x000001fe00001000ull, { 22, 20, 19, 0, 0 }, 0x40, 1099, }, -+ { 61, 5, 2, 0x0000009000000000ull, 0x000001fe00001000ull, { 22, 23, 20, 19, 0 }, 0x40, 1100, }, -+ { 61, 5, 2, 0x0000008000000000ull, 0x000001fe00001000ull, { 23, 22, 19, 20, 0 }, 0x0, 1378, }, -+ { 61, 5, 1, 0x0000008000000000ull, 0x000001fe00001000ull, { 23, 19, 20, 0, 0 }, 0x40, 1379, }, -+ { 61, 5, 2, 0x0000008000000000ull, 0x000001fe00001000ull, { 23, 22, 19, 20, 0 }, 0x40, 1380, }, -+ { 61, 5, 2, 0x0000009000000000ull, 0x000001fe00001000ull, { 23, 22, 20, 19, 0 }, 0x0, 1387, }, -+ { 61, 5, 1, 0x0000009000000000ull, 0x000001fe00001000ull, { 23, 20, 19, 0, 0 }, 0x40, 1388, }, -+ { 61, 5, 2, 0x0000009000000000ull, 0x000001fe00001000ull, { 23, 22, 20, 19, 0 }, 0x40, 1389, }, -+ { 62, 5, 1, 0x00000000c0000000ull, 0x000001eff8000000ull, { 18, 19, 0, 0, 0 }, 0x0, 1024, }, -+ { 62, 5, 1, 0x00000000c0000000ull, 0x000001eff8000000ull, { 18, 19, 0, 0, 0 }, 0x40, 1025, }, -+ { 62, 5, 1, 0x00000000e0000000ull, 0x000001e3f8000000ull, { 18, 19, 0, 0, 0 }, 0x0, 2998, }, -+ { 62, 5, 1, 0x0000010008000000ull, 0x000001fff80fe000ull, { 18, 20, 0, 0, 0 }, 0x40, 2999, }, -+ { 63, 3, 1, 0x0000008488000000ull, 0x000001fff8000000ull, { 24, 33, 71, 0, 0 }, 0x0, 267, }, -+ { 64, 3, 1, 0x00000084c8000000ull, 0x000001fff8000000ull, { 24, 33, 71, 0, 0 }, 0x0, 268, }, -+ { 67, 3, 0, 0x0000000060000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x21, 33, }, -+ { 68, 5, 1, 0x0000010000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x0, 2319, }, -+ { 68, 5, 1, 0x0000010000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x40, 34, }, -+ { 69, 5, 1, 0x00000000a8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2320, }, -+ { 69, 5, 1, 0x00000000a8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 35, }, -+ { 70, 5, 1, 0x0000000080000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2213, }, -+ { 71, 5, 1, 0x00000000a0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2321, }, -+ { 71, 5, 1, 0x00000000a0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 36, }, -+ { 72, 5, 1, 0x00000001c8000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 1203, }, -+ { 73, 5, 1, 0x0000010000000000ull, 0x000001fc000fe000ull, { 18, 20, 21, 0, 0 }, 0x40, 2324, }, -+ { 74, 5, 1, 0x0000014000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x0, 2327, }, -+ { 74, 5, 1, 0x0000014000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x40, 38, }, -+ { 75, 5, 1, 0x0000000088000000ull, 0x000001e3f8000000ull, { 18, 20, 0, 0, 0 }, 0xc0, 39, }, -+ { 76, 5, 1, 0x0000000088000000ull, 0x000001e3f80fe000ull, { 18, 20, 0, 0, 0 }, 0x40, 40, }, -+ { 77, 5, 1, 0x0000018000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x0, 2330, }, -+ { 77, 5, 1, 0x0000018000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x40, 41, }, -+ { 78, 5, 1, 0x0000018000000000ull, 0x000001fc000fe000ull, { 18, 20, 21, 0, 0 }, 0x40, 2333, }, -+ { 79, 5, 1, 0x0000010008000000ull, 0x000001fff80fe000ull, { 18, 20, 0, 0, 0 }, 0x40, 2336, }, -+ { 80, 5, 1, 0x0000000170000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 44, }, -+ { 81, 5, 1, 0x0000002080000000ull, 0x000001e3f80fe000ull, { 18, 20, 0, 0, 0 }, 0x40, 45, }, -+ { 82, 5, 1, 0x0000000140000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 46, }, -+ { 83, 5, 1, 0x00000020b8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2337, }, -+ { 83, 5, 1, 0x00000020b8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 47, }, -+ { 84, 5, 1, 0x00000020b0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2338, }, -+ { 84, 5, 1, 0x00000020b0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 48, }, -+ { 85, 5, 1, 0x0000002180000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 928, }, -+ { 85, 5, 1, 0x0000002180000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 929, }, -+ { 85, 5, 1, 0x0000002188000000ull, 0x000001eff8000000ull, { 18, 20, 19, 0, 0 }, 0x40, 1101, }, -+ { 86, 5, 1, 0x00000020c0000000ull, 0x000001eff8000000ull, { 18, 19, 0, 0, 0 }, 0x0, 1026, }, -+ { 86, 5, 1, 0x00000020c0000000ull, 0x000001eff8000000ull, { 18, 19, 0, 0, 0 }, 0x40, 1027, }, -+ { 87, 5, 1, 0x0000013000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x0, 2355, }, -+ { 87, 5, 1, 0x0000013000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x40, 49, }, -+ { 88, 5, 1, 0x00000020a8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2356, }, -+ { 88, 5, 1, 0x00000020a8000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 50, }, -+ { 89, 5, 1, 0x0000002080000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2221, }, -+ { 90, 5, 1, 0x00000020a0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2357, }, -+ { 90, 5, 1, 0x00000020a0000000ull, 0x000001eff8000000ull, { 18, 19, 20, 0, 0 }, 0x40, 51, }, -+ { 91, 5, 1, 0x0000013000000000ull, 0x000001fc000fe000ull, { 18, 20, 21, 0, 0 }, 0x40, 2358, }, -+ { 92, 5, 1, 0x0000017000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x0, 2359, }, -+ { 92, 5, 1, 0x0000017000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x40, 53, }, -+ { 93, 5, 1, 0x0000002088000000ull, 0x000001e3f8000000ull, { 18, 20, 0, 0, 0 }, 0xc0, 54, }, -+ { 94, 5, 1, 0x0000002088000000ull, 0x000001e3f80fe000ull, { 18, 20, 0, 0, 0 }, 0x40, 55, }, -+ { 95, 5, 1, 0x000001b000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x0, 2360, }, -+ { 95, 5, 1, 0x000001b000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x40, 56, }, -+ { 96, 5, 1, 0x000001b000000000ull, 0x000001fc000fe000ull, { 18, 20, 21, 0, 0 }, 0x40, 2361, }, -+ { 97, 5, 2, 0x0000002200000000ull, 0x000001fe00000000ull, { 18, 23, 19, 20, 0 }, 0x0, 2362, }, -+ { 97, 5, 2, 0x0000002200000000ull, 0x000001fe00000000ull, { 18, 23, 19, 20, 0 }, 0x40, 58, }, -+ { 98, 5, 2, 0x0000003200000000ull, 0x000001fe00000000ull, { 18, 23, 20, 0, 0 }, 0x0, 2363, }, -+ { 98, 5, 2, 0x0000003200000000ull, 0x000001fe00000000ull, { 18, 23, 20, 0, 0 }, 0x40, 59, }, -+ { 99, 5, 2, 0x0000000200000000ull, 0x000001fe00000000ull, { 18, 23, 19, 20, 0 }, 0x0, 2364, }, -+ { 99, 5, 2, 0x0000000200000000ull, 0x000001fe00000000ull, { 18, 23, 19, 20, 0 }, 0x40, 60, }, -+ { 100, 5, 2, 0x0000001200000000ull, 0x000001fe00000000ull, { 18, 23, 20, 0, 0 }, 0x0, 2365, }, -+ { 100, 5, 2, 0x0000001200000000ull, 0x000001fe00000000ull, { 18, 23, 20, 0, 0 }, 0x40, 61, }, -+ { 101, 5, 1, 0x000001c000000000ull, 0x000001f000000000ull, { 18, 20, 21, 19, 0 }, 0x0, 62, }, -+ { 102, 5, 0, 0x0000000020000000ull, 0x000001eff8000000ull, { 50, 51, 0, 0, 0 }, 0x0, 2366, }, -+ { 102, 5, 0, 0x0000000020000000ull, 0x000001eff8000000ull, { 50, 51, 0, 0, 0 }, 0x40, 63, }, -+ { 103, 5, 1, 0x0000014008000000ull, 0x000001fff8000000ull, { 18, 20, 19, 0, 0 }, 0x40, 2369, }, -+ { 104, 5, 1, 0x00000001a0000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 65, }, -+ { 105, 5, 1, 0x00000001e0000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 2168, }, -+ { 106, 3, 0, 0x0000000100000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x0, 66, }, -+ { 108, 5, 1, 0x0000000178000000ull, 0x000001e3f8000000ull, { 18, 19, 20, 0, 0 }, 0x0, 67, }, -+ { 113, 3, 1, 0x0000008708000000ull, 0x000001ffc8000000ull, { 24, 19, 0, 0, 0 }, 0x0, 2747, }, -+ { 118, 4, 0, 0x0000004008000000ull, 0x000001e1f8000000ull, { 65, 0, 0, 0, 0 }, 0x0, 520, }, -+ { 118, 5, 0, 0x000000000c000000ull, 0x000001e3fc000000ull, { 65, 0, 0, 0, 0 }, 0x0, 943, }, -+ { 118, 2, 0, 0x000000000c000000ull, 0x000001effc000000ull, { 65, 0, 0, 0, 0 }, 0x2, 1123, }, -+ { 118, 3, 0, 0x000000000c000000ull, 0x000001effc000000ull, { 65, 0, 0, 0, 0 }, 0x0, 1249, }, -+ { 118, 6, 0, 0x000000000c000000ull, 0x000001effc000000ull, { 69, 0, 0, 0, 0 }, 0x0, 2996, }, -+ { 118, 7, 0, 0x0000000000000000ull, 0x0000000000000000ull, { 65, 0, 0, 0, 0 }, 0x0, 68, }, -+ { 123, 3, 0, 0x0000000080000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x0, 69, }, -+ { 123, 3, 0, 0x0000000090000000ull, 0x000001eff8000000ull, { 24, 0, 0, 0, 0 }, 0x0, 902, }, -+ { 123, 3, 0, 0x0000000098000000ull, 0x000001eff8000000ull, { 18, 0, 0, 0, 0 }, 0x0, 903, }, -+ { 124, 3, 0, 0x0000002170000000ull, 0x000001eff8000000ull, { 25, 0, 0, 0, 0 }, 0xc, 828, }, -+ { 125, 3, 1, 0x0000002070000000ull, 0x000001eff8000000ull, { 30, 25, 0, 0, 0 }, 0x8, 829, }, -+ { 125, 3, 1, 0x0000002078000000ull, 0x000001eff8000000ull, { 31, 25, 0, 0, 0 }, 0x8, 1125, }, -+ { 127, 3, 1, 0x0000008000000000ull, 0x000001fff8000000ull, { 24, 33, 0, 0, 0 }, 0x0, 70, }, -+ { 127, 3, 1, 0x0000009000000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x400, 71, }, -+ { 127, 3, 1, 0x000000a000000000ull, 0x000001eff0000000ull, { 24, 33, 62, 0, 0 }, 0x400, 72, }, -+ { 128, 3, 2, 0x0000008a08000000ull, 0x000001fff8000000ull, { 24, 1, 33, 0, 0 }, 0x0, 73, }, -+ { 128, 3, 1, 0x0000008a08000000ull, 0x000001fff8000000ull, { 24, 33, 0, 0, 0 }, 0x40, 74, }, -+ { 129, 3, 1, 0x0000008040000000ull, 0x000001fff8000000ull, { 24, 33, 0, 0, 0 }, 0x0, 75, }, -+ { 129, 3, 1, 0x0000009040000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x400, 76, }, -+ { 129, 3, 1, 0x000000a040000000ull, 0x000001eff0000000ull, { 24, 33, 62, 0, 0 }, 0x400, 77, }, -+ { 130, 3, 1, 0x0000008080000000ull, 0x000001fff8000000ull, { 24, 33, 0, 0, 0 }, 0x0, 78, }, -+ { 130, 3, 1, 0x0000009080000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x400, 79, }, -+ { 130, 3, 1, 0x000000a080000000ull, 0x000001eff0000000ull, { 24, 33, 62, 0, 0 }, 0x400, 80, }, -+ { 131, 3, 1, 0x00000080c0000000ull, 0x000001fff8000000ull, { 24, 33, 0, 0, 0 }, 0x0, 81, }, -+ { 131, 3, 1, 0x00000080c0000000ull, 0x000001fff8000000ull, { 24, 33, 83, 0, 0 }, 0x0, 1321, }, -+ { 131, 3, 1, 0x00000090c0000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x400, 82, }, -+ { 131, 3, 1, 0x000000a0c0000000ull, 0x000001eff0000000ull, { 24, 33, 62, 0, 0 }, 0x400, 83, }, -+ { 132, 3, 1, 0x000000c6c0000000ull, 0x000001fff8000000ull, { 18, 33, 0, 0, 0 }, 0x0, 1021, }, -+ { 132, 3, 1, 0x000000d6c0000000ull, 0x000001fff8000000ull, { 18, 33, 25, 0, 0 }, 0x400, 1022, }, -+ { 132, 3, 1, 0x000000e6c0000000ull, 0x000001eff0000000ull, { 18, 33, 62, 0, 0 }, 0x400, 1023, }, -+ { 133, 3, 1, 0x000000c040000000ull, 0x000001fff8000000ull, { 18, 33, 0, 0, 0 }, 0x0, 84, }, -+ { 133, 3, 1, 0x000000d040000000ull, 0x000001fff8000000ull, { 18, 33, 25, 0, 0 }, 0x400, 85, }, -+ { 133, 3, 1, 0x000000e040000000ull, 0x000001eff0000000ull, { 18, 33, 62, 0, 0 }, 0x400, 86, }, -+ { 134, 3, 1, 0x000000c0c0000000ull, 0x000001fff8000000ull, { 18, 33, 0, 0, 0 }, 0x0, 87, }, -+ { 134, 3, 1, 0x000000d0c0000000ull, 0x000001fff8000000ull, { 18, 33, 25, 0, 0 }, 0x400, 88, }, -+ { 134, 3, 1, 0x000000e0c0000000ull, 0x000001eff0000000ull, { 18, 33, 62, 0, 0 }, 0x400, 89, }, -+ { 135, 3, 1, 0x000000c000000000ull, 0x000001fff8000000ull, { 18, 33, 0, 0, 0 }, 0x0, 90, }, -+ { 135, 3, 1, 0x000000d000000000ull, 0x000001fff8000000ull, { 18, 33, 25, 0, 0 }, 0x400, 91, }, -+ { 135, 3, 1, 0x000000e000000000ull, 0x000001eff0000000ull, { 18, 33, 62, 0, 0 }, 0x400, 92, }, -+ { 136, 3, 2, 0x000000c048000000ull, 0x000001fff8000000ull, { 18, 19, 33, 0, 0 }, 0x0, 93, }, -+ { 136, 3, 2, 0x000000d048000000ull, 0x000001fff8000000ull, { 18, 19, 33, 6, 0 }, 0x400, 94, }, -+ { 137, 3, 2, 0x000000c0c8000000ull, 0x000001fff8000000ull, { 18, 19, 33, 0, 0 }, 0x0, 95, }, -+ { 137, 3, 2, 0x000000d0c8000000ull, 0x000001fff8000000ull, { 18, 19, 33, 6, 0 }, 0x400, 96, }, -+ { 138, 3, 2, 0x000000c088000000ull, 0x000001fff8000000ull, { 18, 19, 33, 0, 0 }, 0x0, 97, }, -+ { 138, 3, 2, 0x000000d088000000ull, 0x000001fff8000000ull, { 18, 19, 33, 5, 0 }, 0x400, 98, }, -+ { 139, 3, 1, 0x000000c080000000ull, 0x000001fff8000000ull, { 18, 33, 0, 0, 0 }, 0x0, 99, }, -+ { 139, 3, 1, 0x000000d080000000ull, 0x000001fff8000000ull, { 18, 33, 25, 0, 0 }, 0x400, 100, }, -+ { 139, 3, 1, 0x000000e080000000ull, 0x000001eff0000000ull, { 18, 33, 62, 0, 0 }, 0x400, 101, }, -+ { 142, 3, 0, 0x000000cb00000000ull, 0x000001fff8000000ull, { 33, 0, 0, 0, 0 }, 0x0, 102, }, -+ { 142, 3, 0, 0x000000db00000000ull, 0x000001fff8000000ull, { 33, 25, 0, 0, 0 }, 0x400, 103, }, -+ { 142, 3, 0, 0x000000eb00000000ull, 0x000001eff0000000ull, { 33, 62, 0, 0, 0 }, 0x400, 104, }, -+ { 143, 3, 0, 0x0000000050000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x21, 105, }, -+ { 151, 3, 0, 0x0000000110000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x0, 106, }, -+ { 152, 2, 1, 0x000000e880000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2169, }, -+ { 153, 2, 1, 0x000000ea80000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2170, }, -+ { 154, 2, 1, 0x000000f880000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2171, }, -+ { 155, 1, 1, 0x0000010800000000ull, 0x000001fff80fe000ull, { 24, 26, 0, 0, 0 }, 0x0, 107, }, -+ { 155, 1, 1, 0x0000012000000000ull, 0x000001e000300000ull, { 24, 66, 0, 0, 0 }, 0x40, 108, }, -+ { 155, 5, 1, 0x0000000080000000ull, 0x000001e3f8000000ull, { 18, 20, 0, 0, 0 }, 0xc0, 109, }, -+ { 155, 2, 1, 0x0000000e00100000ull, 0x000001ee00f00000ull, { 15, 25, 0, 0, 0 }, 0x40, 110, }, -+ { 155, 2, 1, 0x0000000e00000000ull, 0x000001ee00f00000ull, { 15, 25, 78, 0, 0 }, 0x0, 2821, }, -+ { 155, 2, 1, 0x0000000188000000ull, 0x000001eff8000000ull, { 24, 16, 0, 0, 0 }, 0x0, 112, }, -+ { 155, 2, 1, 0x0000000600000000ull, 0x000001ee00000000ull, { 9, 25, 64, 0, 0 }, 0x0, 113, }, -+ { 155, 2, 1, 0x00000016ff001fc0ull, 0x000001feff001fc0ull, { 9, 25, 0, 0, 0 }, 0x40, 114, }, -+ { 155, 2, 1, 0x0000000400000000ull, 0x000001ee00000000ull, { 10, 68, 0, 0, 0 }, 0x0, 115, }, -+ { 155, 2, 1, 0x0000000180000000ull, 0x000001eff8000000ull, { 24, 8, 0, 0, 0 }, 0x0, 116, }, -+ { 155, 2, 1, 0x0000000198000000ull, 0x000001eff8000000ull, { 24, 9, 0, 0, 0 }, 0x0, 117, }, -+ { 155, 2, 1, 0x0000000150000000ull, 0x000001eff8000000ull, { 14, 25, 0, 0, 0 }, 0x0, 1126, }, -+ { 155, 2, 1, 0x0000000050000000ull, 0x000001eff8000000ull, { 14, 55, 0, 0, 0 }, 0x0, 1127, }, -+ { 155, 2, 1, 0x0000000190000000ull, 0x000001eff8000000ull, { 24, 14, 0, 0, 0 }, 0x0, 1128, }, -+ { 155, 3, 1, 0x0000000140000000ull, 0x000001eff8000000ull, { 14, 55, 0, 0, 0 }, 0x0, 1250, }, -+ { 155, 3, 1, 0x0000002150000000ull, 0x000001eff8000000ull, { 14, 25, 0, 0, 0 }, 0x0, 1251, }, -+ { 155, 3, 1, 0x0000002110000000ull, 0x000001eff8000000ull, { 24, 14, 0, 0, 0 }, 0x0, 1252, }, -+ { 155, 3, 1, 0x0000002160000000ull, 0x000001eff8000000ull, { 17, 25, 0, 0, 0 }, 0x8, 118, }, -+ { 155, 3, 1, 0x0000002120000000ull, 0x000001eff8000000ull, { 24, 17, 0, 0, 0 }, 0x8, 119, }, -+ { 155, 3, 1, 0x0000002168000000ull, 0x000001eff8000000ull, { 12, 25, 0, 0, 0 }, 0x8, 120, }, -+ { 155, 3, 1, 0x0000002148000000ull, 0x000001eff8000000ull, { 13, 25, 0, 0, 0 }, 0x0, 121, }, -+ { 155, 3, 1, 0x0000002128000000ull, 0x000001eff8000000ull, { 24, 11, 0, 0, 0 }, 0x8, 122, }, -+ { 155, 3, 1, 0x0000002108000000ull, 0x000001eff8000000ull, { 24, 13, 0, 0, 0 }, 0x0, 123, }, -+ { 155, 3, 1, 0x0000002000000000ull, 0x000001eff8000000ull, { 38, 25, 0, 0, 0 }, 0x8, 124, }, -+ { 155, 3, 1, 0x0000002008000000ull, 0x000001eff8000000ull, { 29, 25, 0, 0, 0 }, 0x8, 125, }, -+ { 155, 3, 1, 0x0000002010000000ull, 0x000001eff8000000ull, { 32, 25, 0, 0, 0 }, 0x8, 126, }, -+ { 155, 3, 1, 0x0000002018000000ull, 0x000001eff8000000ull, { 35, 25, 0, 0, 0 }, 0x8, 127, }, -+ { 155, 3, 1, 0x0000002020000000ull, 0x000001eff8000000ull, { 36, 25, 0, 0, 0 }, 0x8, 128, }, -+ { 155, 3, 1, 0x0000002028000000ull, 0x000001eff8000000ull, { 37, 25, 0, 0, 0 }, 0x8, 129, }, -+ { 155, 3, 1, 0x0000002030000000ull, 0x000001eff8000000ull, { 34, 25, 0, 0, 0 }, 0x8, 130, }, -+ { 155, 3, 1, 0x0000002080000000ull, 0x000001eff8000000ull, { 24, 38, 0, 0, 0 }, 0x8, 131, }, -+ { 155, 3, 1, 0x0000002088000000ull, 0x000001eff8000000ull, { 24, 29, 0, 0, 0 }, 0x8, 132, }, -+ { 155, 3, 1, 0x0000002090000000ull, 0x000001eff8000000ull, { 24, 32, 0, 0, 0 }, 0x8, 133, }, -+ { 155, 3, 1, 0x0000002098000000ull, 0x000001eff8000000ull, { 24, 35, 0, 0, 0 }, 0x8, 134, }, -+ { 155, 3, 1, 0x00000020a0000000ull, 0x000001eff8000000ull, { 24, 36, 0, 0, 0 }, 0x8, 135, }, -+ { 155, 3, 1, 0x00000020a8000000ull, 0x000001eff8000000ull, { 24, 37, 0, 0, 0 }, 0x0, 136, }, -+ { 155, 3, 1, 0x00000020b0000000ull, 0x000001eff8000000ull, { 24, 34, 0, 0, 0 }, 0x8, 137, }, -+ { 155, 3, 1, 0x00000020b8000000ull, 0x000001eff8000000ull, { 24, 28, 0, 0, 0 }, 0x0, 138, }, -+ { 155, 7, 1, 0x0000000000000000ull, 0x0000000000000000ull, { 24, 14, 0, 0, 0 }, 0x0, 139, }, -+ { 155, 7, 1, 0x0000000000000000ull, 0x0000000000000000ull, { 14, 55, 0, 0, 0 }, 0x0, 140, }, -+ { 155, 7, 1, 0x0000000000000000ull, 0x0000000000000000ull, { 14, 25, 0, 0, 0 }, 0x0, 141, }, -+ { 156, 6, 1, 0x000000c000000000ull, 0x000001e000100000ull, { 24, 70, 0, 0, 0 }, 0x0, 142, }, -+ { 157, 2, 1, 0x000000eca0000000ull, 0x000001fff0000000ull, { 24, 25, 74, 0, 0 }, 0x0, 143, }, -+ { 158, 2, 1, 0x000000eea0000000ull, 0x000001fff0000000ull, { 24, 25, 75, 0, 0 }, 0x0, 144, }, -+ { 168, 4, 0, 0x0000004000000000ull, 0x000001e1f8000000ull, { 65, 0, 0, 0, 0 }, 0x0, 521, }, -+ { 168, 5, 0, 0x0000000008000000ull, 0x000001e3fc000000ull, { 65, 0, 0, 0, 0 }, 0x0, 944, }, -+ { 168, 2, 0, 0x0000000008000000ull, 0x000001effc000000ull, { 65, 0, 0, 0, 0 }, 0x2, 1129, }, -+ { 168, 3, 0, 0x0000000008000000ull, 0x000001effc000000ull, { 65, 0, 0, 0, 0 }, 0x0, 1253, }, -+ { 168, 6, 0, 0x0000000008000000ull, 0x000001effc000000ull, { 69, 0, 0, 0, 0 }, 0x0, 2997, }, -+ { 168, 7, 0, 0x0000000000000000ull, 0x0000000000000000ull, { 65, 0, 0, 0, 0 }, 0x0, 145, }, -+ { 175, 1, 1, 0x0000010070000000ull, 0x000001eff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 146, }, -+ { 175, 1, 1, 0x0000010170000000ull, 0x000001eff8000000ull, { 24, 55, 26, 0, 0 }, 0x0, 147, }, -+ { 178, 2, 1, 0x000000ea00000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2979, }, -+ { 179, 2, 1, 0x000000f820000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2823, }, -+ { 180, 1, 1, 0x0000010400000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 148, }, -+ { 181, 1, 1, 0x0000010600000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 149, }, -+ { 182, 1, 1, 0x0000011400000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 150, }, -+ { 183, 1, 1, 0x0000010450000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 151, }, -+ { 184, 1, 1, 0x0000010650000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 152, }, -+ { 185, 1, 1, 0x0000010470000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 153, }, -+ { 186, 1, 1, 0x0000010670000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 154, }, -+ { 187, 1, 1, 0x0000010520000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 930, }, -+ { 188, 1, 1, 0x0000010720000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 931, }, -+ { 189, 1, 1, 0x0000011520000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 932, }, -+ { 190, 2, 1, 0x000000e850000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2837, }, -+ { 191, 2, 1, 0x000000ea70000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 155, }, -+ { 192, 2, 1, 0x000000e810000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2838, }, -+ { 193, 2, 1, 0x000000ea30000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 156, }, -+ { 194, 2, 1, 0x000000ead0000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 2172, }, -+ { 195, 2, 1, 0x000000e230000000ull, 0x000001ff30000000ull, { 24, 25, 26, 42, 0 }, 0x0, 157, }, -+ { 196, 2, 1, 0x000000e690000000ull, 0x000001fff0000000ull, { 24, 26, 0, 0, 0 }, 0x0, 158, }, -+ { 198, 3, 1, 0x00000021c0000000ull, 0x000001eff8000000ull, { 24, 26, 25, 0, 0 }, 0x0, 2173, }, -+ { 198, 3, 1, 0x00000020c0000000ull, 0x000001eff8000000ull, { 24, 26, 49, 0, 0 }, 0x0, 2174, }, -+ { 198, 3, 0, 0x0000002188000000ull, 0x000001eff8000000ull, { 26, 49, 0, 0, 0 }, 0x0, 2204, }, -+ { 199, 2, 1, 0x000000e8b0000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 159, }, -+ { 200, 2, 1, 0x000000e240000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 160, }, -+ { 200, 2, 1, 0x000000ee50000000ull, 0x000001fff0000000ull, { 24, 25, 39, 0, 0 }, 0x0, 161, }, -+ { 201, 2, 1, 0x000000f040000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 162, }, -+ { 201, 2, 1, 0x000000fc50000000ull, 0x000001fff0000000ull, { 24, 25, 39, 0, 0 }, 0x0, 163, }, -+ { 202, 1, 1, 0x0000010680000000ull, 0x000001ffe0000000ull, { 24, 25, 41, 26, 0 }, 0x0, 164, }, -+ { 203, 2, 1, 0x000000e220000000ull, 0x000001fff0000000ull, { 24, 26, 25, 0, 0 }, 0x0, 165, }, -+ { 203, 2, 1, 0x000000e630000000ull, 0x000001fff0000000ull, { 24, 26, 43, 0, 0 }, 0x0, 166, }, -+ { 204, 2, 1, 0x000000f020000000ull, 0x000001fff0000000ull, { 24, 26, 25, 0, 0 }, 0x0, 167, }, -+ { 204, 2, 1, 0x000000f430000000ull, 0x000001fff0000000ull, { 24, 26, 43, 0, 0 }, 0x0, 168, }, -+ { 205, 1, 1, 0x00000106c0000000ull, 0x000001ffe0000000ull, { 24, 25, 41, 26, 0 }, 0x0, 169, }, -+ { 206, 1, 1, 0x0000010420000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 170, }, -+ { 207, 1, 1, 0x0000010620000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 171, }, -+ { 208, 1, 1, 0x0000011420000000ull, 0x000001fff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 172, }, -+ { 209, 3, 0, 0x0000002048000000ull, 0x000001eff8000000ull, { 26, 25, 0, 0, 0 }, 0x8, 1157, }, -+ { 209, 3, 0, 0x0000002050000000ull, 0x000001eff8000000ull, { 26, 25, 0, 0, 0 }, 0xc, 1032, }, -+ { 209, 3, 0, 0x00000021a0000000ull, 0x000001eff8000000ull, { 26, 0, 0, 0, 0 }, 0x8, 904, }, -+ { 210, 3, 0, 0x0000002060000000ull, 0x000001eff8000000ull, { 26, 25, 0, 0, 0 }, 0x8, 830, }, -+ { 215, 4, 0, 0x0000000040000000ull, 0x000001e1f8000000ull, { 0, 0, 0, 0, 0 }, 0x22c, 173, }, -+ { 216, 3, 0, 0x0000000038000000ull, 0x000001ee78000000ull, { 67, 0, 0, 0, 0 }, 0x8, 174, }, -+ { 217, 3, 0, 0x0000000028000000ull, 0x000001ee78000000ull, { 67, 0, 0, 0, 0 }, 0x0, 175, }, -+ { 226, 3, 1, 0x000000c708000000ull, 0x000001ffc8000000ull, { 18, 25, 0, 0, 0 }, 0x0, 2748, }, -+ { 227, 2, 1, 0x000000a600000000ull, 0x000001ee04000000ull, { 24, 25, 45, 0, 0 }, 0x140, 176, }, -+ { 227, 2, 1, 0x000000f240000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 177, }, -+ { 228, 1, 1, 0x0000010080000000ull, 0x000001efe0000000ull, { 24, 25, 40, 26, 0 }, 0x0, 178, }, -+ { 229, 1, 1, 0x00000100c0000000ull, 0x000001efe0000000ull, { 24, 25, 40, 26, 0 }, 0x0, 179, }, -+ { 230, 2, 1, 0x000000a400000000ull, 0x000001ee00002000ull, { 24, 26, 76, 0, 0 }, 0x140, 2844, }, -+ { 230, 2, 1, 0x000000f220000000ull, 0x000001fff0000000ull, { 24, 26, 25, 0, 0 }, 0x0, 181, }, -+ { 231, 2, 1, 0x000000ac00000000ull, 0x000001ee00000000ull, { 24, 25, 26, 44, 0 }, 0x0, 182, }, -+ { 236, 3, 0, 0x0000000180000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x0, 832, }, -+ { 237, 3, 0, 0x0000000030000000ull, 0x000001ee78000000ull, { 67, 0, 0, 0, 0 }, 0x8, 183, }, -+ { 239, 3, 1, 0x0000008c00000000ull, 0x000001fff8000000ull, { 33, 25, 0, 0, 0 }, 0x0, 184, }, -+ { 239, 3, 1, 0x000000ac00000000ull, 0x000001eff0000000ull, { 33, 25, 61, 0, 0 }, 0x400, 185, }, -+ { 240, 3, 1, 0x0000008c08000000ull, 0x000001fff8000000ull, { 33, 25, 1, 0, 0 }, 0x0, 186, }, -+ { 240, 3, 1, 0x0000008c08000000ull, 0x000001fff8000000ull, { 33, 25, 0, 0, 0 }, 0x40, 187, }, -+ { 241, 3, 1, 0x0000008c40000000ull, 0x000001fff8000000ull, { 33, 25, 0, 0, 0 }, 0x0, 188, }, -+ { 241, 3, 1, 0x000000ac40000000ull, 0x000001eff0000000ull, { 33, 25, 61, 0, 0 }, 0x400, 189, }, -+ { 242, 3, 1, 0x0000008c80000000ull, 0x000001fff8000000ull, { 33, 25, 0, 0, 0 }, 0x0, 190, }, -+ { 242, 3, 1, 0x000000ac80000000ull, 0x000001eff0000000ull, { 33, 25, 61, 0, 0 }, 0x400, 191, }, -+ { 243, 3, 1, 0x0000008cc0000000ull, 0x000001fff8000000ull, { 33, 25, 0, 0, 0 }, 0x0, 192, }, -+ { 243, 3, 1, 0x000000acc0000000ull, 0x000001eff0000000ull, { 33, 25, 61, 0, 0 }, 0x400, 193, }, -+ { 244, 3, 1, 0x000000cec0000000ull, 0x000001fff8000000ull, { 33, 19, 0, 0, 0 }, 0x0, 2751, }, -+ { 244, 3, 1, 0x000000eec0000000ull, 0x000001eff0000000ull, { 33, 19, 61, 0, 0 }, 0x400, 2752, }, -+ { 245, 3, 1, 0x000000cc40000000ull, 0x000001fff8000000ull, { 33, 19, 0, 0, 0 }, 0x0, 194, }, -+ { 245, 3, 1, 0x000000ec40000000ull, 0x000001eff0000000ull, { 33, 19, 61, 0, 0 }, 0x400, 195, }, -+ { 246, 3, 1, 0x000000ccc0000000ull, 0x000001fff8000000ull, { 33, 19, 0, 0, 0 }, 0x0, 196, }, -+ { 246, 3, 1, 0x000000ecc0000000ull, 0x000001eff0000000ull, { 33, 19, 61, 0, 0 }, 0x400, 197, }, -+ { 247, 3, 1, 0x000000cc00000000ull, 0x000001fff8000000ull, { 33, 19, 0, 0, 0 }, 0x0, 198, }, -+ { 247, 3, 1, 0x000000ec00000000ull, 0x000001eff0000000ull, { 33, 19, 61, 0, 0 }, 0x400, 199, }, -+ { 248, 3, 1, 0x000000cc80000000ull, 0x000001fff8000000ull, { 33, 19, 0, 0, 0 }, 0x0, 200, }, -+ { 248, 3, 1, 0x000000ec80000000ull, 0x000001eff0000000ull, { 33, 19, 61, 0, 0 }, 0x400, 201, }, -+ { 249, 1, 1, 0x0000010028000000ull, 0x000001eff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 202, }, -+ { 249, 1, 1, 0x0000010020000000ull, 0x000001eff8000000ull, { 24, 25, 26, 4, 0 }, 0x0, 203, }, -+ { 249, 1, 1, 0x0000010128000000ull, 0x000001eff8000000ull, { 24, 55, 26, 0, 0 }, 0x0, 204, }, -+ { 250, 3, 0, 0x0000000020000000ull, 0x000001ee78000000ull, { 67, 0, 0, 0, 0 }, 0x0, 205, }, -+ { 251, 2, 1, 0x00000000a0000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 206, }, -+ { 252, 2, 1, 0x00000000a8000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 207, }, -+ { 253, 2, 1, 0x00000000b0000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 208, }, -+ { 254, 3, 0, 0x0000000198000000ull, 0x000001eff8000000ull, { 0, 0, 0, 0, 0 }, 0x0, 1132, }, -+ { 255, 3, 1, 0x00000020f8000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x8, 209, }, -+ { 256, 2, 2, 0x000000a000000000ull, 0x000001fe00003000ull, { 22, 23, 26, 76, 0 }, 0x0, 3002, }, -+ { 256, 2, 1, 0x000000a000000000ull, 0x000001fe00003000ull, { 22, 26, 76, 0, 0 }, 0x40, 3003, }, -+ { 256, 2, 2, 0x000000a000000000ull, 0x000001fe00003000ull, { 23, 22, 26, 76, 0 }, 0x40, 1985, }, -+ { 256, 2, 1, 0x000000a000000000ull, 0x000001fe00003000ull, { 23, 26, 76, 0, 0 }, 0x40, 1986, }, -+ { 257, 3, 1, 0x00000020d0000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 210, }, -+ { 258, 2, 2, 0x000000a000002000ull, 0x000001fe00003000ull, { 22, 23, 26, 0, 0 }, 0x0, 3006, }, -+ { 258, 2, 1, 0x000000a000002000ull, 0x000001fe00003000ull, { 22, 26, 0, 0, 0 }, 0x40, 3007, }, -+ { 258, 2, 2, 0x000000a000002000ull, 0x000001fe00003000ull, { 23, 22, 26, 0, 0 }, 0x40, 1989, }, -+ { 258, 2, 1, 0x000000a000002000ull, 0x000001fe00003000ull, { 23, 26, 0, 0, 0 }, 0x40, 1990, }, -+ { 259, 3, 1, 0x00000020f0000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x8, 211, }, -+ { 261, 3, 1, 0x00000020d8000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 212, }, -+ { 265, 2, 1, 0x000000e840000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 1113, }, -+ { 266, 2, 1, 0x000000ea40000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 1114, }, -+ { 267, 2, 1, 0x000000f840000000ull, 0x000001fff0000000ull, { 24, 25, 26, 0, 0 }, 0x0, 1115, }, -+ { 275, 3, 1, 0x0000008208000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x0, 213, }, -+ { 276, 3, 1, 0x0000008248000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x0, 214, }, -+ { 277, 3, 1, 0x0000008288000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x0, 215, }, -+ { 278, 3, 1, 0x00000082c8000000ull, 0x000001fff8000000ull, { 24, 33, 25, 0, 0 }, 0x0, 216, }, -+ { 280, 5, 1, 0x000001d000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x0, 1161, }, -+ { 280, 5, 1, 0x000001d000000000ull, 0x000001fc00000000ull, { 18, 20, 21, 19, 0 }, 0x40, 1243, }, -+ { 281, 5, 1, 0x000001d000000000ull, 0x000001fc000fe000ull, { 18, 20, 21, 0, 0 }, 0x40, 1162, }, -+ { 282, 1, 1, 0x0000010078000000ull, 0x000001eff8000000ull, { 24, 25, 26, 0, 0 }, 0x0, 217, }, -+ { 282, 1, 1, 0x0000010178000000ull, 0x000001eff8000000ull, { 24, 55, 26, 0, 0 }, 0x0, 218, }, -+ { 285, 2, 1, 0x0000000080000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 219, }, -+ { 286, 2, 1, 0x0000000088000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 220, }, -+ { 287, 2, 1, 0x0000000090000000ull, 0x000001eff8000000ull, { 24, 26, 0, 0, 0 }, 0x0, 221, }, -+}; -+ -+static const char dis_table[] = { -+0xa0, 0xc5, 0xe8, 0xa0, 0x2e, 0x98, 0xa0, 0x2c, 0x80, 0xa0, 0x1b, 0xc0, -+0x98, 0xb0, 0x02, 0x50, 0x90, 0x50, 0x90, 0x28, 0x24, 0x38, 0x28, 0x24, -+0x38, 0x20, 0x90, 0x28, 0x24, 0x38, 0x18, 0x24, 0x38, 0x10, 0x91, 0x60, -+0x90, 0x28, 0x24, 0x38, 0x00, 0x10, 0x10, 0x58, 0x41, 0x61, 0xbf, 0xc0, -+0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, -+0x10, 0x10, 0x52, 0xc0, 0xc0, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, -+0x10, 0x10, 0x10, 0x24, 0x23, 0x70, 0x90, 0x28, 0x24, 0x37, 0xf0, 0x24, -+0x37, 0xe8, 0xa8, 0x0b, 0x48, 0x15, 0x20, 0x97, 0x20, 0x95, 0xc8, 0x9a, -+0xb8, 0x05, 0x38, 0x91, 0x18, 0x90, 0xa0, 0x90, 0x60, 0x80, 0x90, 0x20, -+0x34, 0x86, 0xa4, 0x24, 0x00, 0x34, 0x83, 0x80, 0xa4, 0x35, 0xa0, 0x36, -+0xb9, 0x90, 0x50, 0x90, 0x28, 0x80, 0x36, 0xaf, 0x80, 0x34, 0x66, 0x81, -+0x33, 0xe2, 0x90, 0xe0, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x23, 0x10, 0x34, -+0x63, 0xa4, 0x1f, 0x08, 0x34, 0x60, 0x90, 0x38, 0xa4, 0x37, 0xa0, 0x36, -+0xfa, 0xa4, 0x37, 0x48, 0x36, 0xee, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x36, -+0x20, 0x36, 0xcf, 0xa4, 0x35, 0xf8, 0x36, 0xca, 0x80, 0xa4, 0x22, 0xf0, -+0x34, 0x5f, 0x92, 0x18, 0x91, 0xc0, 0x80, 0x91, 0x80, 0x90, 0xf8, 0xdb, -+0x84, 0x60, 0xf9, 0x40, 0xc0, 0xc0, 0x80, 0xa4, 0x41, 0x58, 0x8c, 0x42, -+0xb8, 0x84, 0x38, 0x61, 0xc0, 0xc0, 0x80, 0xa4, 0x41, 0x48, 0x8c, 0x42, -+0x98, 0x84, 0x38, 0x5f, 0xd3, 0x82, 0x40, 0x50, 0xc0, 0xc0, 0x81, 0x38, -+0x13, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x11, 0xa4, 0x1f, 0x18, 0x33, 0xe4, -+0x80, 0x90, 0x28, 0x80, 0x33, 0xe0, 0x80, 0x34, 0x68, 0x81, 0x90, 0x38, -+0xa4, 0x23, 0x80, 0x34, 0x6b, 0xa4, 0x23, 0x48, 0x34, 0x65, 0xc0, 0x40, -+0x10, 0x10, 0x90, 0x38, 0xa4, 0x1e, 0xf0, 0x33, 0xdf, 0xa4, 0x1e, 0xe0, -+0x33, 0xdd, 0x18, 0x24, 0x23, 0xf8, 0x83, 0x90, 0xa8, 0xd3, 0x82, 0xc0, -+0xc0, 0xc0, 0x80, 0xa4, 0x41, 0x28, 0x38, 0x4b, 0xc0, 0xc0, 0x80, 0xa4, -+0x41, 0x18, 0x38, 0x47, 0xd3, 0x82, 0x40, 0x50, 0xc0, 0xc0, 0x81, 0x38, -+0x0d, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x0b, 0x92, 0xb8, 0x99, 0x84, 0x23, -+0x68, 0x90, 0x78, 0x90, 0x50, 0x10, 0x10, 0x80, 0xa4, 0x35, 0x98, 0x36, -+0xb8, 0x82, 0x36, 0xae, 0x90, 0x80, 0x10, 0x10, 0x90, 0x38, 0xa4, 0x37, -+0x98, 0x36, 0xf9, 0xa4, 0x37, 0x40, 0x36, 0xed, 0x80, 0x90, 0x38, 0xa4, -+0x36, 0x18, 0x36, 0xce, 0xa4, 0x35, 0xf0, 0x36, 0xc9, 0x83, 0x90, 0xa8, -+0xd3, 0x82, 0xc0, 0xc0, 0xc0, 0x80, 0xa4, 0x40, 0xf8, 0x38, 0x3f, 0xc0, -+0xc0, 0x80, 0xa4, 0x40, 0xe8, 0x38, 0x3b, 0xd3, 0x82, 0x40, 0x50, 0xc0, -+0xc0, 0x81, 0x38, 0x07, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x05, 0x18, 0x24, -+0x23, 0x78, 0x83, 0x90, 0xa8, 0xd3, 0x82, 0xc0, 0xc0, 0xc0, 0x80, 0xa4, -+0x40, 0xc8, 0x38, 0x33, 0xc0, 0xc0, 0x80, 0xa4, 0x40, 0xb8, 0x38, 0x2f, -+0xd3, 0x82, 0x40, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x01, 0x50, 0xc0, 0xc0, -+0x81, 0x37, 0xff, 0x94, 0x50, 0x92, 0xf8, 0x99, 0x84, 0x1f, 0x48, 0x90, -+0x78, 0x90, 0x50, 0x10, 0x10, 0x80, 0xa4, 0x35, 0x90, 0x36, 0xb7, 0x82, -+0x36, 0xad, 0x90, 0x80, 0x10, 0x10, 0x90, 0x38, 0xa4, 0x37, 0x90, 0x36, -+0xf8, 0xa4, 0x37, 0x38, 0x36, 0xec, 0x80, 0x90, 0x38, 0xa4, 0x36, 0x10, -+0x36, 0xcd, 0xa4, 0x35, 0xe8, 0x36, 0xc8, 0x83, 0x90, 0xe8, 0xd3, 0x83, -+0xc0, 0xc0, 0xc0, 0x80, 0xa4, 0x41, 0x68, 0x8c, 0x42, 0xd8, 0x84, 0x38, -+0x63, 0xc0, 0xc0, 0x80, 0xa4, 0x41, 0x50, 0x8c, 0x42, 0xa8, 0x84, 0x38, -+0x60, 0xd3, 0x82, 0x40, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x15, 0x50, 0xc0, -+0xc0, 0x81, 0x38, 0x12, 0x18, 0x24, 0x1f, 0x40, 0x83, 0x90, 0xa8, 0xd3, -+0x82, 0xc0, 0xc0, 0xc0, 0x80, 0xa4, 0x41, 0x38, 0x38, 0x4f, 0xc0, 0xc0, -+0x80, 0xa4, 0x41, 0x20, 0x38, 0x49, 0xd3, 0x82, 0x40, 0x50, 0xc0, 0xc0, -+0x81, 0x38, 0x0f, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x0c, 0x92, 0xb8, 0x99, -+0x84, 0x1f, 0x38, 0x90, 0x78, 0x90, 0x50, 0x10, 0x10, 0x80, 0xa4, 0x35, -+0x88, 0x36, 0xb6, 0x82, 0x36, 0xac, 0x90, 0x80, 0x10, 0x10, 0x90, 0x38, -+0xa4, 0x37, 0x88, 0x36, 0xf7, 0xa4, 0x37, 0x30, 0x36, 0xeb, 0x80, 0x90, -+0x38, 0xa4, 0x36, 0x08, 0x36, 0xcc, 0xa4, 0x35, 0xe0, 0x36, 0xc7, 0x83, -+0x90, 0xa8, 0xd3, 0x82, 0xc0, 0xc0, 0xc0, 0x80, 0xa4, 0x41, 0x08, 0x38, -+0x43, 0xc0, 0xc0, 0x80, 0xa4, 0x40, 0xf0, 0x38, 0x3d, 0xd3, 0x82, 0x40, -+0x50, 0xc0, 0xc0, 0x81, 0x38, 0x09, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x06, -+0x18, 0x20, 0x01, 0x48, 0x83, 0x90, 0xa8, 0xd3, 0x82, 0xc0, 0xc0, 0xc0, -+0x80, 0xa4, 0x40, 0xd8, 0x38, 0x37, 0xc0, 0xc0, 0x80, 0xa4, 0x40, 0xc0, -+0x38, 0x31, 0xd3, 0x82, 0x40, 0x50, 0xc0, 0xc0, 0x81, 0x38, 0x03, 0x50, -+0xc0, 0xc0, 0x81, 0x38, 0x00, 0xda, 0x06, 0xe0, 0xf9, 0x80, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x23, 0xe8, 0x34, 0x7b, 0x80, 0x34, 0x78, 0x90, 0x38, -+0xa4, 0x23, 0x90, 0x34, 0x76, 0x80, 0x34, 0x73, 0x90, 0x60, 0x90, 0x38, -+0xa4, 0x23, 0xd0, 0x34, 0x7c, 0x80, 0x34, 0x79, 0x90, 0x38, 0xa4, 0x23, -+0xa8, 0x34, 0x77, 0x80, 0x34, 0x74, 0xc8, 0x40, 0x19, 0x00, 0x91, 0x58, -+0x90, 0x60, 0x82, 0x90, 0x20, 0x36, 0xab, 0xa4, 0x35, 0x48, 0x36, 0xaa, -+0x90, 0xc0, 0x80, 0x90, 0x90, 0x90, 0x48, 0xc9, 0xe1, 0xb9, 0x00, 0x85, -+0x36, 0xe3, 0xc9, 0xe1, 0xb8, 0x40, 0x85, 0x36, 0xe0, 0x80, 0x36, 0xdf, -+0x10, 0x10, 0x81, 0x36, 0xbb, 0x90, 0xa8, 0x10, 0x10, 0x90, 0x28, 0x81, -+0x36, 0xd9, 0x90, 0x38, 0xa4, 0x36, 0xa0, 0x36, 0xd5, 0xa4, 0x36, 0x90, -+0x36, 0xd3, 0x90, 0x70, 0x10, 0x10, 0x90, 0x38, 0xa4, 0x36, 0xb8, 0x36, -+0xd8, 0x80, 0x36, 0xd6, 0x90, 0x60, 0x90, 0x28, 0x24, 0x36, 0xf0, 0xa4, -+0x36, 0xe0, 0x36, 0xdd, 0x80, 0xa4, 0x36, 0xd0, 0x36, 0xdb, 0x80, 0x90, -+0xf8, 0x90, 0x90, 0x90, 0x50, 0x90, 0x28, 0x80, 0x37, 0xf7, 0x80, 0x37, -+0xfe, 0x80, 0xa4, 0x3f, 0xe0, 0x37, 0xfd, 0x90, 0x28, 0x81, 0x37, 0xfb, -+0x80, 0xa4, 0x3f, 0xc8, 0x37, 0xfa, 0x83, 0x37, 0xf8, 0x98, 0xe8, 0x01, -+0xb0, 0x90, 0x88, 0x90, 0x60, 0xa4, 0x35, 0x38, 0x10, 0x10, 0x10, 0x10, -+0x83, 0x33, 0xb7, 0x24, 0x35, 0x30, 0x90, 0x28, 0x24, 0x35, 0x28, 0x24, -+0x35, 0x20, 0x90, 0x88, 0x90, 0x60, 0xa4, 0x35, 0x10, 0x10, 0x10, 0x10, -+0x10, 0x83, 0x33, 0xb6, 0x24, 0x35, 0x08, 0x90, 0x28, 0x24, 0x35, 0x00, -+0x24, 0x34, 0xf8, 0xa8, 0x09, 0x00, 0x0e, 0x20, 0x96, 0x48, 0x95, 0xe8, -+0x93, 0x38, 0x91, 0xa0, 0x90, 0xd0, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x1e, -+0x60, 0x33, 0xcd, 0xa4, 0x1e, 0x50, 0x33, 0xcb, 0x90, 0x38, 0xa4, 0x1e, -+0x40, 0x33, 0xc9, 0x80, 0x33, 0xc7, 0x90, 0x60, 0x90, 0x28, 0x24, 0x1e, -+0x00, 0xa4, 0x1d, 0xf0, 0x33, 0xbf, 0x90, 0x38, 0xa4, 0x1d, 0xe0, 0x33, -+0xbd, 0xa4, 0x1e, 0x28, 0x33, 0xc6, 0x90, 0xe0, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x1e, 0x18, 0x33, 0xc4, 0xa4, 0x1e, 0x08, 0x33, 0xc2, 0x90, 0x38, -+0xa4, 0x34, 0xb0, 0x36, 0x9c, 0xa4, 0x34, 0x50, 0x36, 0x90, 0x90, 0x70, -+0x90, 0x38, 0xa4, 0x31, 0x90, 0x36, 0x3e, 0xa4, 0x31, 0x60, 0x36, 0x38, -+0x10, 0x10, 0xa4, 0x1d, 0xd0, 0x33, 0xbb, 0x99, 0x60, 0x02, 0x70, 0x90, -+0x90, 0x90, 0x50, 0x90, 0x28, 0x24, 0x1e, 0x90, 0x80, 0x33, 0xda, 0x80, -+0xa4, 0x1e, 0x98, 0x33, 0xd8, 0x90, 0x50, 0x90, 0x28, 0x24, 0x1e, 0xa0, -+0x80, 0x33, 0xdb, 0x90, 0x38, 0xa4, 0x1e, 0xa8, 0x33, 0xd9, 0xa4, 0x1e, -+0x70, 0x33, 0xcf, 0x90, 0xe0, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x33, 0xe8, -+0x36, 0x85, 0xa4, 0x33, 0x48, 0x36, 0x72, 0x90, 0x38, 0xa4, 0x32, 0xe0, -+0x36, 0x63, 0xa4, 0x32, 0x50, 0x36, 0x52, 0x81, 0xa4, 0x1e, 0x80, 0x33, -+0xd1, 0xe4, 0xa1, 0xfc, 0x40, 0x37, 0xf3, 0x18, 0x24, 0x1d, 0xc8, 0xe4, -+0xe1, 0xfa, 0xc0, 0x37, 0xed, 0x92, 0x40, 0x91, 0x08, 0x10, 0x10, 0x90, -+0x80, 0x10, 0x10, 0x90, 0x38, 0xa4, 0x34, 0xa8, 0x36, 0x9b, 0xa4, 0x34, -+0x48, 0x36, 0x8f, 0x80, 0x90, 0x38, 0xa4, 0x31, 0x88, 0x36, 0x3d, 0xa4, -+0x31, 0x58, 0x36, 0x37, 0x18, 0x20, 0x00, 0xf8, 0x80, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x33, 0xd8, 0x36, 0x84, 0xa4, 0x33, 0x40, 0x36, 0x70, 0x90, -+0x38, 0xa4, 0x32, 0xd0, 0x36, 0x62, 0xa4, 0x32, 0x48, 0x36, 0x50, 0xe4, -+0xa1, 0xf9, 0x40, 0x37, 0xe7, 0x18, 0x24, 0x1d, 0xc0, 0xe4, 0xe1, 0xf7, -+0xc0, 0x37, 0xe1, 0x92, 0x90, 0x92, 0x40, 0x91, 0x08, 0x10, 0x10, 0x90, -+0x80, 0x10, 0x10, 0x90, 0x38, 0xa4, 0x34, 0xa0, 0x36, 0x9a, 0xa4, 0x34, -+0x40, 0x36, 0x8e, 0x80, 0x90, 0x38, 0xa4, 0x31, 0x80, 0x36, 0x3c, 0xa4, -+0x31, 0x50, 0x36, 0x36, 0x18, 0x20, 0x00, 0xf8, 0x80, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x33, 0xc8, 0x36, 0x83, 0xa4, 0x33, 0x38, 0x36, 0x6e, 0x90, -+0x38, 0xa4, 0x32, 0xc0, 0x36, 0x61, 0xa4, 0x32, 0x40, 0x36, 0x4e, 0xe4, -+0xa1, 0xfc, 0x80, 0x37, 0xf5, 0x10, 0x10, 0xe4, 0xe1, 0xfb, 0x00, 0x37, -+0xef, 0x92, 0x50, 0x99, 0x1c, 0x1e, 0xb0, 0x10, 0x10, 0x90, 0x80, 0x10, -+0x10, 0x90, 0x38, 0xa4, 0x34, 0x98, 0x36, 0x99, 0xa4, 0x34, 0x38, 0x36, -+0x8d, 0x80, 0x90, 0x38, 0xa4, 0x31, 0x78, 0x36, 0x3b, 0xa4, 0x31, 0x48, -+0x36, 0x35, 0x18, 0x20, 0x00, 0xf8, 0x80, 0x90, 0x70, 0x90, 0x38, 0xa4, -+0x33, 0xb8, 0x36, 0x82, 0xa4, 0x33, 0x30, 0x36, 0x6c, 0x90, 0x38, 0xa4, -+0x32, 0xb0, 0x36, 0x60, 0xa4, 0x32, 0x38, 0x36, 0x4c, 0xe4, 0xa1, 0xf9, -+0x80, 0x37, 0xe9, 0x10, 0x10, 0xe4, 0xe1, 0xf8, 0x00, 0x37, 0xe3, 0xc0, -+0x40, 0x80, 0x10, 0x10, 0x81, 0x90, 0x90, 0x90, 0x48, 0xc9, 0xe1, 0x90, -+0x80, 0x85, 0x36, 0x46, 0xc9, 0xe1, 0x91, 0x00, 0x85, 0x36, 0x43, 0x80, -+0x36, 0x41, 0x80, 0xd8, 0x47, 0x80, 0x0d, 0xc0, 0xc0, 0x80, 0x10, 0x10, -+0x82, 0x90, 0x58, 0xd5, 0x81, 0x80, 0x80, 0x37, 0xdd, 0x80, 0x37, 0xdb, -+0xd5, 0x81, 0x80, 0x80, 0x37, 0xd9, 0x80, 0x37, 0xd7, 0xc0, 0x80, 0x10, -+0x10, 0x82, 0x90, 0x58, 0xd5, 0x81, 0x80, 0x80, 0x37, 0xde, 0x80, 0x37, -+0xdc, 0xd5, 0x81, 0x80, 0x80, 0x37, 0xda, 0x80, 0x37, 0xd8, 0xc0, 0x80, -+0x83, 0xa4, 0x3e, 0xa8, 0x37, 0xd6, 0xa0, 0x57, 0xc0, 0xa0, 0x41, 0xe0, -+0xa8, 0x1e, 0xb0, 0x34, 0x88, 0xa0, 0x12, 0x38, 0xa0, 0x0b, 0x48, 0x96, -+0x00, 0x9a, 0xf0, 0x05, 0xc0, 0x91, 0x70, 0x90, 0xb8, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x15, 0x58, 0x33, 0xb5, 0xa4, 0x15, 0x78, 0x33, 0xb4, 0x10, -+0x10, 0xa4, 0x15, 0x68, 0x33, 0xb3, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x14, -+0xf8, 0x33, 0x9a, 0xa4, 0x15, 0x18, 0x33, 0x99, 0x10, 0x10, 0xa4, 0x15, -+0x08, 0x33, 0x98, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x14, 0x98, -+0x33, 0x7f, 0xa4, 0x14, 0xb8, 0x33, 0x7e, 0x10, 0x10, 0xa4, 0x14, 0xa8, -+0x33, 0x7d, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x14, 0x38, 0x33, 0x63, 0xa4, -+0x14, 0x58, 0x33, 0x62, 0x10, 0x10, 0xa4, 0x14, 0x48, 0x33, 0x61, 0x91, -+0x70, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x15, 0x28, 0x33, 0xb0, -+0xa4, 0x15, 0x48, 0x33, 0xb2, 0x10, 0x10, 0xa4, 0x15, 0x38, 0x33, 0xb1, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x14, 0xc8, 0x33, 0x95, 0xa4, 0x14, 0xe8, -+0x33, 0x97, 0x10, 0x10, 0xa4, 0x14, 0xd8, 0x33, 0x96, 0x90, 0xb8, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x14, 0x68, 0x33, 0x7a, 0xa4, 0x14, 0x88, 0x33, -+0x7c, 0x10, 0x10, 0xa4, 0x14, 0x78, 0x33, 0x7b, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x14, 0x08, 0x33, 0x5e, 0xa4, 0x14, 0x28, 0x33, 0x60, 0x10, 0x10, -+0xa4, 0x14, 0x18, 0x33, 0x5f, 0xe4, 0xe1, 0x83, 0x40, 0x36, 0x21, 0x9a, -+0xf0, 0x05, 0x00, 0x91, 0x70, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, -+0x13, 0xa0, 0x33, 0xad, 0xa4, 0x13, 0x98, 0x33, 0xaf, 0x10, 0x10, 0xa4, -+0x13, 0x90, 0x33, 0xae, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x13, 0x88, 0x33, -+0x92, 0xa4, 0x13, 0x80, 0x33, 0x94, 0x10, 0x10, 0xa4, 0x13, 0x78, 0x33, -+0x93, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x13, 0x70, 0x33, 0x77, -+0xa4, 0x13, 0x68, 0x33, 0x79, 0x10, 0x10, 0xa4, 0x13, 0x60, 0x33, 0x78, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x13, 0x58, 0x33, 0x5b, 0xa4, 0x13, 0x50, -+0x33, 0x5d, 0x10, 0x10, 0xa4, 0x13, 0x48, 0x33, 0x5c, 0x91, 0x10, 0x90, -+0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0xaa, 0x80, 0x33, 0xac, 0x10, -+0x10, 0x80, 0x33, 0xab, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x8f, 0x80, -+0x33, 0x91, 0x10, 0x10, 0x80, 0x33, 0x90, 0x90, 0x88, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x33, 0x74, 0x80, 0x33, 0x76, 0x10, 0x10, 0x80, 0x33, 0x75, -+0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x58, 0x80, 0x33, 0x5a, 0x10, 0x10, -+0x80, 0x33, 0x59, 0xe4, 0xe1, 0x5e, 0x40, 0x35, 0xa1, 0x95, 0x40, 0x9a, -+0x90, 0x05, 0x00, 0x91, 0x10, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, -+0x33, 0xa7, 0x80, 0x33, 0xa9, 0x10, 0x10, 0x80, 0x33, 0xa8, 0x90, 0x50, -+0x90, 0x28, 0x80, 0x33, 0x8c, 0x80, 0x33, 0x8e, 0x10, 0x10, 0x80, 0x33, -+0x8d, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x13, 0x30, 0x33, 0x71, -+0xa4, 0x13, 0x40, 0x33, 0x73, 0x10, 0x10, 0xa4, 0x13, 0x38, 0x33, 0x72, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x13, 0x00, 0x33, 0x55, 0xa4, 0x13, 0x10, -+0x33, 0x57, 0x10, 0x10, 0xa4, 0x13, 0x08, 0x33, 0x56, 0x91, 0x10, 0x90, -+0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0xa4, 0x80, 0x33, 0xa6, 0x10, -+0x10, 0x80, 0x33, 0xa5, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x89, 0x80, -+0x33, 0x8b, 0x10, 0x10, 0x80, 0x33, 0x8a, 0x90, 0xb8, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x13, 0x18, 0x33, 0x6e, 0xa4, 0x13, 0x28, 0x33, 0x70, 0x10, -+0x10, 0xa4, 0x13, 0x20, 0x33, 0x6f, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x12, -+0xe8, 0x33, 0x52, 0xa4, 0x12, 0xf8, 0x33, 0x54, 0x10, 0x10, 0xa4, 0x12, -+0xf0, 0x33, 0x53, 0xe4, 0xe1, 0x82, 0x40, 0x36, 0x1d, 0x98, 0xb8, 0x01, -+0x68, 0x10, 0x10, 0x10, 0x10, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x4f, -+0x80, 0x33, 0x51, 0x10, 0x10, 0x80, 0x33, 0x50, 0x90, 0x60, 0x90, 0x30, -+0x60, 0xa0, 0x97, 0x00, 0x60, 0xa0, 0x96, 0xc0, 0x90, 0x30, 0x60, 0xa0, -+0x96, 0x80, 0x60, 0xa0, 0x96, 0x40, 0xe4, 0xe1, 0x5c, 0x40, 0x35, 0x99, -+0xa0, 0x08, 0x08, 0x94, 0xe0, 0x9a, 0x60, 0x04, 0xa0, 0x91, 0x40, 0x90, -+0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x13, 0xd8, 0x33, 0x9e, 0xa4, 0x13, -+0xf8, 0x33, 0xa3, 0x10, 0x10, 0xa4, 0x13, 0xe8, 0x33, 0xa2, 0x90, 0x50, -+0x90, 0x28, 0x80, 0x33, 0x83, 0x80, 0x33, 0x88, 0x10, 0x10, 0x80, 0x33, -+0x87, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x68, 0x80, 0x33, -+0x6d, 0x10, 0x10, 0x80, 0x33, 0x6c, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, -+0x49, 0x80, 0x33, 0x4e, 0x10, 0x10, 0x80, 0x33, 0x4d, 0x91, 0x40, 0x90, -+0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x13, 0xa8, 0x33, 0x9b, 0xa4, 0x13, -+0xc8, 0x33, 0x9d, 0x10, 0x10, 0xa4, 0x13, 0xb8, 0x33, 0x9c, 0x90, 0x50, -+0x90, 0x28, 0x80, 0x33, 0x80, 0x80, 0x33, 0x82, 0x10, 0x10, 0x80, 0x33, -+0x81, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x65, 0x80, 0x33, -+0x67, 0x10, 0x10, 0x80, 0x33, 0x66, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, -+0x46, 0x80, 0x33, 0x48, 0x10, 0x10, 0x80, 0x33, 0x47, 0xe4, 0xe1, 0x81, -+0x40, 0x36, 0x19, 0x9a, 0x60, 0x02, 0xe0, 0x91, 0x40, 0x90, 0xb8, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x1a, 0x20, 0x33, 0x9f, 0xa4, 0x1a, 0x10, 0x33, -+0xa1, 0x10, 0x10, 0xa4, 0x1a, 0x00, 0x33, 0xa0, 0x90, 0x50, 0x90, 0x28, -+0x80, 0x33, 0x84, 0x80, 0x33, 0x86, 0x10, 0x10, 0x80, 0x33, 0x85, 0x90, -+0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x69, 0x80, 0x33, 0x6b, 0x10, -+0x10, 0x80, 0x33, 0x6a, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x4a, 0x80, -+0x33, 0x4c, 0x10, 0x10, 0x80, 0x33, 0x4b, 0x81, 0x90, 0x50, 0x90, 0x28, -+0x24, 0x19, 0xd0, 0x24, 0x19, 0xf0, 0x10, 0x10, 0x24, 0x19, 0xe0, 0xe4, -+0xe1, 0x5a, 0x40, 0x35, 0x91, 0x93, 0x90, 0x99, 0xb8, 0x03, 0x50, 0x90, -+0xe8, 0x90, 0x88, 0x90, 0x40, 0x80, 0xa4, 0x15, 0xb8, 0x32, 0xca, 0x10, -+0x10, 0xa4, 0x15, 0xa8, 0x32, 0xc9, 0x90, 0x28, 0x81, 0x32, 0xc6, 0x10, -+0x10, 0x80, 0x32, 0xc5, 0x90, 0x60, 0x90, 0x28, 0x81, 0x32, 0xc2, 0x10, -+0x10, 0x80, 0x32, 0xc1, 0x90, 0x28, 0x81, 0x32, 0xbe, 0x10, 0x10, 0x80, -+0x32, 0xbd, 0x90, 0xe8, 0x90, 0x88, 0x90, 0x40, 0x80, 0xa4, 0x15, 0x88, -+0x32, 0xc7, 0x10, 0x10, 0xa4, 0x15, 0x98, 0x32, 0xc8, 0x90, 0x28, 0x81, -+0x32, 0xc3, 0x10, 0x10, 0x80, 0x32, 0xc4, 0x90, 0x60, 0x90, 0x28, 0x81, -+0x32, 0xbf, 0x10, 0x10, 0x80, 0x32, 0xc0, 0x90, 0x28, 0x81, 0x32, 0xbb, -+0x10, 0x10, 0x80, 0x32, 0xbc, 0xe4, 0xe1, 0x80, 0x40, 0x36, 0x15, 0x88, -+0x00, 0x88, 0x10, 0x10, 0x10, 0x10, 0x90, 0x28, 0x81, 0x32, 0xb9, 0x10, -+0x10, 0x80, 0x32, 0xba, 0xe4, 0xe1, 0x58, 0x40, 0x35, 0x89, 0xa0, 0x0e, -+0x80, 0xa0, 0x09, 0x08, 0x94, 0x80, 0x9a, 0x30, 0x04, 0x40, 0x91, 0x10, -+0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x39, 0x80, 0x33, 0x38, -+0x10, 0x10, 0x80, 0x33, 0x37, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x1e, -+0x80, 0x33, 0x1d, 0x10, 0x10, 0x80, 0x33, 0x1c, 0x90, 0x88, 0x90, 0x50, -+0x90, 0x28, 0x80, 0x33, 0x03, 0x80, 0x33, 0x02, 0x10, 0x10, 0x80, 0x33, -+0x01, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xe8, 0x80, 0x32, 0xe7, 0x10, -+0x10, 0x80, 0x32, 0xe6, 0x91, 0x10, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, -+0x80, 0x33, 0x34, 0x80, 0x33, 0x36, 0x10, 0x10, 0x80, 0x33, 0x35, 0x90, -+0x50, 0x90, 0x28, 0x80, 0x33, 0x19, 0x80, 0x33, 0x1b, 0x10, 0x10, 0x80, -+0x33, 0x1a, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xfe, 0x80, -+0x33, 0x00, 0x10, 0x10, 0x80, 0x32, 0xff, 0x90, 0x50, 0x90, 0x28, 0x80, -+0x32, 0xe3, 0x80, 0x32, 0xe5, 0x10, 0x10, 0x80, 0x32, 0xe4, 0xe4, 0xe1, -+0x72, 0x40, 0x35, 0xf1, 0x9a, 0x30, 0x04, 0x40, 0x91, 0x10, 0x90, 0x88, -+0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x31, 0x80, 0x33, 0x33, 0x10, 0x10, -+0x80, 0x33, 0x32, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x16, 0x80, 0x33, -+0x18, 0x10, 0x10, 0x80, 0x33, 0x17, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, -+0x80, 0x32, 0xfb, 0x80, 0x32, 0xfd, 0x10, 0x10, 0x80, 0x32, 0xfc, 0x90, -+0x50, 0x90, 0x28, 0x80, 0x32, 0xe0, 0x80, 0x32, 0xe2, 0x10, 0x10, 0x80, -+0x32, 0xe1, 0x91, 0x10, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, -+0x2e, 0x80, 0x33, 0x30, 0x10, 0x10, 0x80, 0x33, 0x2f, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x33, 0x13, 0x80, 0x33, 0x15, 0x10, 0x10, 0x80, 0x33, 0x14, -+0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xf8, 0x80, 0x32, 0xfa, -+0x10, 0x10, 0x80, 0x32, 0xf9, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xdd, -+0x80, 0x32, 0xdf, 0x10, 0x10, 0x80, 0x32, 0xde, 0xe4, 0xe1, 0x51, 0x40, -+0x35, 0x59, 0x94, 0x80, 0x9a, 0x30, 0x04, 0x40, 0x91, 0x10, 0x90, 0x88, -+0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x2b, 0x80, 0x33, 0x2d, 0x10, 0x10, -+0x80, 0x33, 0x2c, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x10, 0x80, 0x33, -+0x12, 0x10, 0x10, 0x80, 0x33, 0x11, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, -+0x80, 0x32, 0xf5, 0x80, 0x32, 0xf7, 0x10, 0x10, 0x80, 0x32, 0xf6, 0x90, -+0x50, 0x90, 0x28, 0x80, 0x32, 0xda, 0x80, 0x32, 0xdc, 0x10, 0x10, 0x80, -+0x32, 0xdb, 0x91, 0x10, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, -+0x28, 0x80, 0x33, 0x2a, 0x10, 0x10, 0x80, 0x33, 0x29, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x33, 0x0d, 0x80, 0x33, 0x0f, 0x10, 0x10, 0x80, 0x33, 0x0e, -+0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xf2, 0x80, 0x32, 0xf4, -+0x10, 0x10, 0x80, 0x32, 0xf3, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xd7, -+0x80, 0x32, 0xd9, 0x10, 0x10, 0x80, 0x32, 0xd8, 0xe4, 0xe1, 0x70, 0x40, -+0x35, 0xe9, 0x88, 0x00, 0xb0, 0x10, 0x10, 0x10, 0x10, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x32, 0xd4, 0x80, 0x32, 0xd6, 0x10, 0x10, 0x80, 0x32, 0xd5, -+0xe4, 0xe1, 0x50, 0x40, 0x35, 0x55, 0x96, 0xe8, 0x94, 0x80, 0x9a, 0x30, -+0x04, 0x40, 0x91, 0x10, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, -+0x22, 0x80, 0x33, 0x27, 0x10, 0x10, 0x80, 0x33, 0x26, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x33, 0x07, 0x80, 0x33, 0x0c, 0x10, 0x10, 0x80, 0x33, 0x0b, -+0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xec, 0x80, 0x32, 0xf1, -+0x10, 0x10, 0x80, 0x32, 0xf0, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xce, -+0x80, 0x32, 0xd3, 0x10, 0x10, 0x80, 0x32, 0xd2, 0x91, 0x10, 0x90, 0x88, -+0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x1f, 0x80, 0x33, 0x21, 0x10, 0x10, -+0x80, 0x33, 0x20, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x04, 0x80, 0x33, -+0x06, 0x10, 0x10, 0x80, 0x33, 0x05, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, -+0x80, 0x32, 0xe9, 0x80, 0x32, 0xeb, 0x10, 0x10, 0x80, 0x32, 0xea, 0x90, -+0x50, 0x90, 0x28, 0x80, 0x32, 0xcb, 0x80, 0x32, 0xcd, 0x10, 0x10, 0x80, -+0x32, 0xcc, 0xe4, 0xe1, 0x6e, 0x40, 0x35, 0xe1, 0x88, 0x02, 0x28, 0x91, -+0x10, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0x23, 0x80, 0x33, -+0x25, 0x10, 0x10, 0x80, 0x33, 0x24, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, -+0x08, 0x80, 0x33, 0x0a, 0x10, 0x10, 0x80, 0x33, 0x09, 0x90, 0x88, 0x90, -+0x50, 0x90, 0x28, 0x80, 0x32, 0xed, 0x80, 0x32, 0xef, 0x10, 0x10, 0x80, -+0x32, 0xee, 0x90, 0x50, 0x90, 0x28, 0x80, 0x32, 0xcf, 0x80, 0x32, 0xd1, -+0x10, 0x10, 0x80, 0x32, 0xd0, 0xe4, 0xe1, 0x4f, 0x40, 0x35, 0x51, 0x90, -+0x40, 0xe5, 0x21, 0x6c, 0x40, 0x35, 0xd9, 0xe5, 0x21, 0x4e, 0x40, 0x35, -+0x4d, 0x9e, 0xb4, 0x22, 0xe8, 0x93, 0x70, 0x91, 0xd8, 0xd5, 0x07, 0x80, -+0xd0, 0xc4, 0x40, 0x90, 0x48, 0x80, 0x8c, 0x3e, 0x38, 0x84, 0x37, 0xd1, -+0xa4, 0x3c, 0x18, 0x37, 0x9b, 0x90, 0x28, 0x24, 0x3b, 0x58, 0xa4, 0x39, -+0xd8, 0x37, 0x53, 0xd0, 0xc4, 0x40, 0x90, 0x48, 0x80, 0x8c, 0x3e, 0x18, -+0x84, 0x37, 0xcf, 0xa4, 0x3c, 0x08, 0x37, 0x99, 0x90, 0x28, 0x24, 0x3b, -+0x48, 0xa4, 0x39, 0xc8, 0x37, 0x51, 0xd5, 0x06, 0x80, 0xd0, 0xc3, 0x40, -+0x90, 0x28, 0x80, 0x37, 0xbb, 0xa4, 0x3b, 0xe8, 0x37, 0x95, 0x90, 0x28, -+0x24, 0x3b, 0x28, 0xa4, 0x39, 0xa8, 0x37, 0x4d, 0xd0, 0xc3, 0x40, 0x90, -+0x28, 0x80, 0x37, 0xb7, 0xa4, 0x3b, 0xd8, 0x37, 0x93, 0x90, 0x28, 0x24, -+0x3b, 0x18, 0xa4, 0x39, 0x98, 0x37, 0x4b, 0x91, 0x98, 0xd5, 0x06, 0x80, -+0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, 0x37, 0xaf, 0xa4, 0x3b, 0xb8, 0x37, -+0x8f, 0x90, 0x28, 0x24, 0x3a, 0xf8, 0xa4, 0x39, 0x78, 0x37, 0x47, 0xd0, -+0xc3, 0x40, 0x90, 0x28, 0x80, 0x37, 0xab, 0xa4, 0x3b, 0xa8, 0x37, 0x8d, -+0x90, 0x28, 0x24, 0x3a, 0xe8, 0xa4, 0x39, 0x68, 0x37, 0x45, 0xd5, 0x06, -+0x80, 0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, 0x37, 0xa3, 0xa4, 0x3b, 0x88, -+0x37, 0x89, 0x90, 0x28, 0x24, 0x3a, 0xc8, 0xa4, 0x39, 0x48, 0x37, 0x41, -+0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, 0x37, 0x9f, 0xa4, 0x3b, 0x78, 0x37, -+0x87, 0x90, 0x28, 0x24, 0x3a, 0xb8, 0xa4, 0x39, 0x38, 0x37, 0x3f, 0x93, -+0x70, 0x91, 0xd8, 0xd5, 0x07, 0x80, 0xd0, 0xc4, 0x40, 0x90, 0x48, 0x80, -+0x8c, 0x3e, 0x58, 0x84, 0x37, 0xd3, 0xa4, 0x3c, 0x28, 0x37, 0x9d, 0x90, -+0x28, 0x24, 0x3b, 0x68, 0xa4, 0x39, 0xe8, 0x37, 0x55, 0xd0, 0xc4, 0x40, -+0x90, 0x48, 0x80, 0x8c, 0x3e, 0x28, 0x84, 0x37, 0xd0, 0xa4, 0x3c, 0x10, -+0x37, 0x9a, 0x90, 0x28, 0x24, 0x3b, 0x50, 0xa4, 0x39, 0xd0, 0x37, 0x52, -+0xd5, 0x06, 0x80, 0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, 0x37, 0xbf, 0xa4, -+0x3b, 0xf8, 0x37, 0x97, 0x90, 0x28, 0x24, 0x3b, 0x38, 0xa4, 0x39, 0xb8, -+0x37, 0x4f, 0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, 0x37, 0xb9, 0xa4, 0x3b, -+0xe0, 0x37, 0x94, 0x90, 0x28, 0x24, 0x3b, 0x20, 0xa4, 0x39, 0xa0, 0x37, -+0x4c, 0x91, 0x98, 0xd5, 0x06, 0x80, 0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, -+0x37, 0xb3, 0xa4, 0x3b, 0xc8, 0x37, 0x91, 0x90, 0x28, 0x24, 0x3b, 0x08, -+0xa4, 0x39, 0x88, 0x37, 0x49, 0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, 0x37, -+0xad, 0xa4, 0x3b, 0xb0, 0x37, 0x8e, 0x90, 0x28, 0x24, 0x3a, 0xf0, 0xa4, -+0x39, 0x70, 0x37, 0x46, 0xd5, 0x06, 0x80, 0xd0, 0xc3, 0x40, 0x90, 0x28, -+0x80, 0x37, 0xa7, 0xa4, 0x3b, 0x98, 0x37, 0x8b, 0x90, 0x28, 0x24, 0x3a, -+0xd8, 0xa4, 0x39, 0x58, 0x37, 0x43, 0xd0, 0xc3, 0x40, 0x90, 0x28, 0x80, -+0x37, 0xa1, 0xa4, 0x3b, 0x80, 0x37, 0x88, 0x90, 0x28, 0x24, 0x3a, 0xc0, -+0xa4, 0x39, 0x40, 0x37, 0x40, 0x99, 0x08, 0x01, 0xf0, 0x81, 0x90, 0x78, -+0xd4, 0xc2, 0x00, 0xa4, 0x22, 0x80, 0x34, 0x40, 0xa4, 0x21, 0x80, 0x34, -+0x20, 0xd4, 0xc2, 0x00, 0xa4, 0x21, 0xa0, 0x34, 0x44, 0xa4, 0x20, 0xa0, -+0x34, 0x24, 0x81, 0x90, 0x78, 0xd4, 0xc2, 0x00, 0xa4, 0x21, 0xe0, 0x34, -+0x4c, 0xa4, 0x20, 0xe0, 0x34, 0x2c, 0xd4, 0xc2, 0x00, 0xa4, 0x21, 0xc0, -+0x34, 0x48, 0xa4, 0x20, 0xc0, 0x34, 0x28, 0xa8, 0x0b, 0x18, 0x13, 0xa8, -+0x96, 0x80, 0x93, 0x40, 0x99, 0x90, 0x03, 0x00, 0x90, 0xc0, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x12, 0xb8, 0x32, 0x58, 0x24, 0x12, 0xb0, 0x90, 0x38, -+0xa4, 0x11, 0xe0, 0x32, 0x3d, 0x24, 0x11, 0xd8, 0x90, 0x60, 0x90, 0x38, -+0xa4, 0x11, 0x08, 0x32, 0x22, 0x24, 0x11, 0x00, 0x90, 0x38, 0xa4, 0x10, -+0x30, 0x32, 0x07, 0x24, 0x10, 0x28, 0x90, 0xc0, 0x90, 0x60, 0x90, 0x38, -+0xa4, 0x12, 0xa8, 0x32, 0x53, 0x24, 0x12, 0xa0, 0x90, 0x38, 0xa4, 0x11, -+0xd0, 0x32, 0x38, 0x24, 0x11, 0xc8, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x10, -+0xf8, 0x32, 0x1d, 0x24, 0x10, 0xf0, 0x90, 0x38, 0xa4, 0x10, 0x20, 0x32, -+0x02, 0x24, 0x10, 0x18, 0xe4, 0xe1, 0xc8, 0x40, 0x37, 0x23, 0x99, 0x90, -+0x03, 0x00, 0x90, 0xc0, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x12, 0x90, 0x32, -+0x50, 0x24, 0x12, 0x88, 0x90, 0x38, 0xa4, 0x11, 0xb8, 0x32, 0x35, 0x24, -+0x11, 0xb0, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x10, 0xe0, 0x32, 0x1a, 0x24, -+0x10, 0xd8, 0x90, 0x38, 0xa4, 0x10, 0x08, 0x31, 0xff, 0x24, 0x10, 0x00, -+0x90, 0xc0, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x12, 0x78, 0x32, 0x4d, 0x24, -+0x12, 0x70, 0x90, 0x38, 0xa4, 0x11, 0xa0, 0x32, 0x32, 0x24, 0x11, 0x98, -+0x90, 0x60, 0x90, 0x38, 0xa4, 0x10, 0xc8, 0x32, 0x17, 0x24, 0x10, 0xc0, -+0x90, 0x38, 0xa4, 0x0f, 0xf0, 0x31, 0xfc, 0x24, 0x0f, 0xe8, 0xe4, 0xe1, -+0xc6, 0xc0, 0x37, 0x1d, 0x93, 0x78, 0x99, 0x90, 0x03, 0x00, 0x90, 0xc0, -+0x90, 0x60, 0x90, 0x38, 0xa4, 0x12, 0x60, 0x32, 0x4a, 0x24, 0x12, 0x58, -+0x90, 0x38, 0xa4, 0x11, 0x88, 0x32, 0x2f, 0x24, 0x11, 0x80, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x10, 0xb0, 0x32, 0x14, 0x24, 0x10, 0xa8, 0x90, 0x38, -+0xa4, 0x0f, 0xd8, 0x31, 0xf9, 0x24, 0x0f, 0xd0, 0x90, 0xc0, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x12, 0x48, 0x32, 0x47, 0x24, 0x12, 0x40, 0x90, 0x38, -+0xa4, 0x11, 0x70, 0x32, 0x2c, 0x24, 0x11, 0x68, 0x90, 0x60, 0x90, 0x38, -+0xa4, 0x10, 0x98, 0x32, 0x11, 0x24, 0x10, 0x90, 0x90, 0x38, 0xa4, 0x0f, -+0xc0, 0x31, 0xf6, 0x24, 0x0f, 0xb8, 0xec, 0xa1, 0x16, 0x00, 0x02, 0x00, -+0x34, 0x5a, 0xa4, 0x38, 0xa8, 0x37, 0x17, 0x88, 0x00, 0x88, 0x10, 0x10, -+0x10, 0x10, 0x90, 0x38, 0xa4, 0x0f, 0xa8, 0x31, 0xf3, 0x24, 0x0f, 0xa0, -+0xe9, 0x61, 0x15, 0x40, 0x02, 0x00, 0x34, 0x56, 0xe3, 0x61, 0xc3, 0xc0, -+0x37, 0x11, 0x95, 0x08, 0x93, 0x40, 0x99, 0x90, 0x03, 0x00, 0x90, 0xc0, -+0x90, 0x60, 0x90, 0x38, 0xa4, 0x12, 0x30, 0x32, 0x41, 0x24, 0x12, 0x28, -+0x90, 0x38, 0xa4, 0x11, 0x58, 0x32, 0x26, 0x24, 0x11, 0x50, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x10, 0x80, 0x32, 0x0b, 0x24, 0x10, 0x78, 0x90, 0x38, -+0xa4, 0x0f, 0x90, 0x31, 0xed, 0x24, 0x0f, 0x88, 0x90, 0xc0, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x12, 0x00, 0x32, 0x3e, 0x24, 0x11, 0xf8, 0x90, 0x38, -+0xa4, 0x11, 0x28, 0x32, 0x23, 0x24, 0x11, 0x20, 0x90, 0x60, 0x90, 0x38, -+0xa4, 0x10, 0x50, 0x32, 0x08, 0x24, 0x10, 0x48, 0x90, 0x38, 0xa4, 0x0f, -+0x60, 0x31, 0xea, 0x24, 0x0f, 0x58, 0xe4, 0xe1, 0xc8, 0x80, 0x37, 0x25, -+0x88, 0x01, 0x88, 0x90, 0xc0, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x12, 0x20, -+0x32, 0x42, 0x24, 0x12, 0x18, 0x90, 0x38, 0xa4, 0x11, 0x48, 0x32, 0x27, -+0x24, 0x11, 0x40, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x10, 0x70, 0x32, 0x0c, -+0x24, 0x10, 0x68, 0x90, 0x38, 0xa4, 0x0f, 0x80, 0x31, 0xee, 0x24, 0x0f, -+0x78, 0xe4, 0xe1, 0xc7, 0x00, 0x37, 0x1f, 0x92, 0xd0, 0x99, 0x50, 0x02, -+0x80, 0x90, 0xa0, 0x90, 0x50, 0x90, 0x28, 0x80, 0x31, 0xe9, 0x24, 0x0f, -+0x40, 0x90, 0x28, 0x80, 0x31, 0xe5, 0x24, 0x0f, 0x20, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x31, 0xe1, 0x24, 0x0f, 0x00, 0x90, 0x28, 0x80, 0x31, 0xdd, -+0x24, 0x0e, 0xe0, 0x90, 0xa0, 0x90, 0x50, 0x90, 0x28, 0x80, 0x31, 0xe6, -+0x24, 0x0f, 0x38, 0x90, 0x28, 0x80, 0x31, 0xe2, 0x24, 0x0f, 0x18, 0x90, -+0x50, 0x90, 0x28, 0x80, 0x31, 0xde, 0x24, 0x0e, 0xf8, 0x90, 0x28, 0x80, -+0x31, 0xda, 0x24, 0x0e, 0xd8, 0xec, 0xe1, 0xc5, 0xa1, 0x17, 0x00, 0x37, -+0x19, 0x88, 0x00, 0x78, 0x10, 0x10, 0x10, 0x10, 0x90, 0x28, 0x80, 0x31, -+0xd8, 0x24, 0x0e, 0xc8, 0xec, 0xe1, 0xc4, 0x21, 0x15, 0x00, 0x37, 0x13, -+0xe5, 0xa1, 0x4d, 0x40, 0x35, 0x31, 0xa0, 0x2a, 0x10, 0xa8, 0x16, 0x60, -+0x29, 0xd8, 0xa0, 0x0c, 0x48, 0xa0, 0x0a, 0xc8, 0x95, 0x60, 0x92, 0xb0, -+0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x31, 0xa1, 0x80, -+0x31, 0xa0, 0x10, 0x10, 0x80, 0x31, 0x9f, 0x90, 0x70, 0x90, 0x38, 0xa4, -+0x08, 0x98, 0x31, 0xb3, 0xa4, 0x08, 0x90, 0x31, 0xb2, 0x10, 0x10, 0xa4, -+0x08, 0x88, 0x31, 0xb1, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x09, -+0xb8, 0x31, 0xd7, 0xa4, 0x09, 0xb0, 0x31, 0xd6, 0x10, 0x10, 0xa4, 0x09, -+0xa8, 0x31, 0xd5, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x09, 0x28, 0x31, 0xc5, -+0xa4, 0x09, 0x20, 0x31, 0xc4, 0x10, 0x10, 0xa4, 0x09, 0x18, 0x31, 0xc3, -+0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x31, 0x9c, 0x80, -+0x31, 0x9e, 0x10, 0x10, 0x80, 0x31, 0x9d, 0x90, 0x70, 0x90, 0x38, 0xa4, -+0x08, 0x70, 0x31, 0xae, 0xa4, 0x08, 0x80, 0x31, 0xb0, 0x10, 0x10, 0xa4, -+0x08, 0x78, 0x31, 0xaf, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x09, -+0x90, 0x31, 0xd2, 0xa4, 0x09, 0xa0, 0x31, 0xd4, 0x10, 0x10, 0xa4, 0x09, -+0x98, 0x31, 0xd3, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x09, 0x00, 0x31, 0xc0, -+0xa4, 0x09, 0x10, 0x31, 0xc2, 0x10, 0x10, 0xa4, 0x09, 0x08, 0x31, 0xc1, -+0x92, 0xb0, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x31, -+0x99, 0x80, 0x31, 0x9b, 0x10, 0x10, 0x80, 0x31, 0x9a, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x08, 0x58, 0x31, 0xab, 0xa4, 0x08, 0x68, 0x31, 0xad, 0x10, -+0x10, 0xa4, 0x08, 0x60, 0x31, 0xac, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x09, 0x78, 0x31, 0xcf, 0xa4, 0x09, 0x88, 0x31, 0xd1, 0x10, 0x10, -+0xa4, 0x09, 0x80, 0x31, 0xd0, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x08, 0xe8, -+0x31, 0xbd, 0xa4, 0x08, 0xf8, 0x31, 0xbf, 0x10, 0x10, 0xa4, 0x08, 0xf0, -+0x31, 0xbe, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, 0x31, -+0x96, 0x80, 0x31, 0x98, 0x10, 0x10, 0x80, 0x31, 0x97, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x08, 0x40, 0x31, 0xa8, 0xa4, 0x08, 0x50, 0x31, 0xaa, 0x10, -+0x10, 0xa4, 0x08, 0x48, 0x31, 0xa9, 0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x09, 0x60, 0x31, 0xcc, 0xa4, 0x09, 0x70, 0x31, 0xce, 0x10, 0x10, -+0xa4, 0x09, 0x68, 0x31, 0xcd, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x08, 0xd0, -+0x31, 0xba, 0xa4, 0x08, 0xe0, 0x31, 0xbc, 0x10, 0x10, 0xa4, 0x08, 0xd8, -+0x31, 0xbb, 0x10, 0x10, 0x90, 0xa8, 0x10, 0x10, 0x10, 0x10, 0x90, 0x50, -+0x90, 0x28, 0x80, 0x31, 0x8d, 0x80, 0x31, 0x8f, 0x10, 0x10, 0x80, 0x31, -+0x8e, 0x90, 0x60, 0x90, 0x30, 0x60, 0xa0, 0x2a, 0xc0, 0x60, 0xa0, 0x2a, -+0x80, 0x90, 0x30, 0x60, 0xa0, 0x2a, 0x40, 0x60, 0xa0, 0x2a, 0x00, 0x97, -+0xf0, 0x95, 0x60, 0x92, 0xb0, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x31, 0x93, 0x80, 0x31, 0x95, 0x10, 0x10, 0x80, 0x31, 0x94, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x08, 0x28, 0x31, 0xa5, 0xa4, 0x08, 0x38, -+0x31, 0xa7, 0x10, 0x10, 0xa4, 0x08, 0x30, 0x31, 0xa6, 0x90, 0xb8, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x09, 0x48, 0x31, 0xc9, 0xa4, 0x09, 0x58, 0x31, -+0xcb, 0x10, 0x10, 0xa4, 0x09, 0x50, 0x31, 0xca, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x08, 0xb8, 0x31, 0xb7, 0xa4, 0x08, 0xc8, 0x31, 0xb9, 0x10, 0x10, -+0xa4, 0x08, 0xc0, 0x31, 0xb8, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x31, 0x90, 0x80, 0x31, 0x92, 0x10, 0x10, 0x80, 0x31, 0x91, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x08, 0x10, 0x31, 0xa2, 0xa4, 0x08, 0x20, -+0x31, 0xa4, 0x10, 0x10, 0xa4, 0x08, 0x18, 0x31, 0xa3, 0x90, 0xb8, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x09, 0x30, 0x31, 0xc6, 0xa4, 0x09, 0x40, 0x31, -+0xc8, 0x10, 0x10, 0xa4, 0x09, 0x38, 0x31, 0xc7, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x08, 0xa0, 0x31, 0xb4, 0xa4, 0x08, 0xb0, 0x31, 0xb6, 0x10, 0x10, -+0xa4, 0x08, 0xa8, 0x31, 0xb5, 0x10, 0x10, 0x91, 0x40, 0x90, 0xa0, 0x90, -+0x50, 0x90, 0x28, 0x80, 0x30, 0xcb, 0x80, 0x30, 0xca, 0x90, 0x28, 0x80, -+0x30, 0xc9, 0x80, 0x30, 0xc8, 0x90, 0x50, 0x90, 0x28, 0x80, 0x30, 0xc4, -+0x80, 0x30, 0xc7, 0x90, 0x28, 0x80, 0x30, 0xc6, 0x80, 0x30, 0xc5, 0x90, -+0xa0, 0x90, 0x50, 0x90, 0x28, 0x80, 0x30, 0xbc, 0x80, 0x30, 0xc3, 0x90, -+0x28, 0x80, 0x30, 0xc2, 0x80, 0x30, 0xc1, 0x90, 0x50, 0x90, 0x28, 0x80, -+0x30, 0xbd, 0x80, 0x30, 0xc0, 0x90, 0x28, 0x80, 0x30, 0xbf, 0x80, 0x30, -+0xbe, 0x91, 0x88, 0x80, 0x90, 0xc0, 0x90, 0x60, 0x90, 0x28, 0x81, 0x31, -+0x3b, 0x10, 0x10, 0x80, 0x31, 0x3a, 0x90, 0x28, 0x81, 0x31, 0x3d, 0x10, -+0x10, 0x80, 0x31, 0x3c, 0x90, 0x60, 0x90, 0x28, 0x81, 0x31, 0x41, 0x10, -+0x10, 0x80, 0x31, 0x40, 0x90, 0x28, 0x81, 0x31, 0x3f, 0x10, 0x10, 0x80, -+0x31, 0x3e, 0x80, 0x10, 0x10, 0x10, 0x10, 0x90, 0x28, 0x81, 0x31, 0x38, -+0x10, 0x10, 0x80, 0x31, 0x39, 0xa0, 0x0b, 0x90, 0xa0, 0x0a, 0xc8, 0x95, -+0x60, 0x92, 0xb0, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, -+0x31, 0x56, 0x80, 0x31, 0x55, 0x10, 0x10, 0x80, 0x31, 0x54, 0x90, 0x70, -+0x90, 0x38, 0xa4, 0x06, 0xe8, 0x31, 0x68, 0xa4, 0x06, 0xe0, 0x31, 0x67, -+0x10, 0x10, 0xa4, 0x06, 0xd8, 0x31, 0x66, 0x90, 0xb8, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x08, 0x08, 0x31, 0x8c, 0xa4, 0x08, 0x00, 0x31, 0x8b, 0x10, -+0x10, 0xa4, 0x07, 0xf8, 0x31, 0x8a, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x07, -+0x78, 0x31, 0x7a, 0xa4, 0x07, 0x70, 0x31, 0x79, 0x10, 0x10, 0xa4, 0x07, -+0x68, 0x31, 0x78, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, 0x28, 0x80, -+0x31, 0x51, 0x80, 0x31, 0x53, 0x10, 0x10, 0x80, 0x31, 0x52, 0x90, 0x70, -+0x90, 0x38, 0xa4, 0x06, 0xc0, 0x31, 0x63, 0xa4, 0x06, 0xd0, 0x31, 0x65, -+0x10, 0x10, 0xa4, 0x06, 0xc8, 0x31, 0x64, 0x90, 0xb8, 0x90, 0x70, 0x90, -+0x38, 0xa4, 0x07, 0xe0, 0x31, 0x87, 0xa4, 0x07, 0xf0, 0x31, 0x89, 0x10, -+0x10, 0xa4, 0x07, 0xe8, 0x31, 0x88, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x07, -+0x50, 0x31, 0x75, 0xa4, 0x07, 0x60, 0x31, 0x77, 0x10, 0x10, 0xa4, 0x07, -+0x58, 0x31, 0x76, 0x92, 0xb0, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x31, 0x4e, 0x80, 0x31, 0x50, 0x10, 0x10, 0x80, 0x31, 0x4f, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x06, 0xa8, 0x31, 0x60, 0xa4, 0x06, 0xb8, -+0x31, 0x62, 0x10, 0x10, 0xa4, 0x06, 0xb0, 0x31, 0x61, 0x90, 0xb8, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x07, 0xc8, 0x31, 0x84, 0xa4, 0x07, 0xd8, 0x31, -+0x86, 0x10, 0x10, 0xa4, 0x07, 0xd0, 0x31, 0x85, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x07, 0x38, 0x31, 0x72, 0xa4, 0x07, 0x48, 0x31, 0x74, 0x10, 0x10, -+0xa4, 0x07, 0x40, 0x31, 0x73, 0x91, 0x40, 0x90, 0x88, 0x90, 0x50, 0x90, -+0x28, 0x80, 0x31, 0x4b, 0x80, 0x31, 0x4d, 0x10, 0x10, 0x80, 0x31, 0x4c, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x06, 0x90, 0x31, 0x5d, 0xa4, 0x06, 0xa0, -+0x31, 0x5f, 0x10, 0x10, 0xa4, 0x06, 0x98, 0x31, 0x5e, 0x90, 0xb8, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x07, 0xb0, 0x31, 0x81, 0xa4, 0x07, 0xc0, 0x31, -+0x83, 0x10, 0x10, 0xa4, 0x07, 0xb8, 0x31, 0x82, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x07, 0x20, 0x31, 0x6f, 0xa4, 0x07, 0x30, 0x31, 0x71, 0x10, 0x10, -+0xa4, 0x07, 0x28, 0x31, 0x70, 0x10, 0x10, 0x80, 0x10, 0x10, 0x10, 0x10, -+0x90, 0x50, 0x90, 0x28, 0x80, 0x31, 0x42, 0x80, 0x31, 0x44, 0x10, 0x10, -+0x80, 0x31, 0x43, 0x80, 0x95, 0x60, 0x92, 0xb0, 0x91, 0x40, 0x90, 0x88, -+0x90, 0x50, 0x90, 0x28, 0x80, 0x31, 0x48, 0x80, 0x31, 0x4a, 0x10, 0x10, -+0x80, 0x31, 0x49, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x06, 0x78, 0x31, 0x5a, -+0xa4, 0x06, 0x88, 0x31, 0x5c, 0x10, 0x10, 0xa4, 0x06, 0x80, 0x31, 0x5b, -+0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x07, 0x98, 0x31, 0x7e, 0xa4, -+0x07, 0xa8, 0x31, 0x80, 0x10, 0x10, 0xa4, 0x07, 0xa0, 0x31, 0x7f, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x07, 0x08, 0x31, 0x6c, 0xa4, 0x07, 0x18, 0x31, -+0x6e, 0x10, 0x10, 0xa4, 0x07, 0x10, 0x31, 0x6d, 0x91, 0x40, 0x90, 0x88, -+0x90, 0x50, 0x90, 0x28, 0x80, 0x31, 0x45, 0x80, 0x31, 0x47, 0x10, 0x10, -+0x80, 0x31, 0x46, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x06, 0x60, 0x31, 0x57, -+0xa4, 0x06, 0x70, 0x31, 0x59, 0x10, 0x10, 0xa4, 0x06, 0x68, 0x31, 0x58, -+0x90, 0xb8, 0x90, 0x70, 0x90, 0x38, 0xa4, 0x07, 0x80, 0x31, 0x7b, 0xa4, -+0x07, 0x90, 0x31, 0x7d, 0x10, 0x10, 0xa4, 0x07, 0x88, 0x31, 0x7c, 0x90, -+0x70, 0x90, 0x38, 0xa4, 0x06, 0xf0, 0x31, 0x69, 0xa4, 0x07, 0x00, 0x31, -+0x6b, 0x10, 0x10, 0xa4, 0x06, 0xf8, 0x31, 0x6a, 0x10, 0x10, 0x91, 0x40, -+0x90, 0xa0, 0x90, 0x50, 0x90, 0x28, 0x80, 0x30, 0xbb, 0x80, 0x30, 0xba, -+0x90, 0x28, 0x80, 0x30, 0xb9, 0x80, 0x30, 0xb8, 0x90, 0x50, 0x90, 0x28, -+0x80, 0x30, 0xb4, 0x80, 0x30, 0xb7, 0x90, 0x28, 0x80, 0x30, 0xb6, 0x80, -+0x30, 0xb5, 0x90, 0xa0, 0x90, 0x50, 0x90, 0x28, 0x80, 0x30, 0xac, 0x80, -+0x30, 0xb3, 0x90, 0x28, 0x80, 0x30, 0xb2, 0x80, 0x30, 0xb1, 0x90, 0x50, -+0x90, 0x28, 0x80, 0x30, 0xad, 0x80, 0x30, 0xb0, 0x90, 0x28, 0x80, 0x30, -+0xaf, 0x80, 0x30, 0xae, 0xc3, 0xc0, 0x30, 0x42, 0x9c, 0xe8, 0x07, 0x60, -+0x91, 0x90, 0x90, 0xf0, 0x10, 0x10, 0x80, 0x88, 0x00, 0x80, 0x90, 0x50, -+0x90, 0x28, 0x80, 0x33, 0xf8, 0x80, 0x33, 0xf9, 0x81, 0x33, 0xef, 0xd0, -+0x41, 0x80, 0x24, 0x20, 0x90, 0x24, 0x20, 0x98, 0x10, 0x10, 0x80, 0x90, -+0x58, 0x80, 0x90, 0x28, 0x24, 0x1f, 0x90, 0x24, 0x1f, 0x98, 0x81, 0x24, -+0x1f, 0x50, 0x92, 0x68, 0x91, 0x00, 0x80, 0x90, 0x90, 0x90, 0x30, 0x80, -+0x24, 0x20, 0x00, 0x90, 0x38, 0xa4, 0x1f, 0xf8, 0x34, 0x06, 0x80, 0x34, -+0x05, 0x80, 0x90, 0x28, 0x80, 0x34, 0x0f, 0xa4, 0x1f, 0xe0, 0x34, 0x0e, -+0x80, 0x90, 0xc0, 0x90, 0x60, 0x90, 0x28, 0x80, 0x34, 0x09, 0xa4, 0x1f, -+0xf0, 0x34, 0x08, 0x90, 0x28, 0x80, 0x34, 0x04, 0xa4, 0x1f, 0xe8, 0x34, -+0x03, 0x90, 0x50, 0x90, 0x28, 0x80, 0x34, 0x0d, 0x80, 0x34, 0x0c, 0x90, -+0x28, 0x24, 0x20, 0x88, 0x24, 0x20, 0x80, 0x90, 0x58, 0x80, 0x10, 0x10, -+0x80, 0x10, 0x10, 0x80, 0x33, 0xfb, 0x80, 0x90, 0x40, 0x10, 0x10, 0x80, -+0x24, 0x1f, 0x60, 0x80, 0x10, 0x10, 0x80, 0x33, 0xfa, 0x91, 0x58, 0x91, -+0x00, 0x90, 0x80, 0x81, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, 0xf6, 0x80, -+0x33, 0xf7, 0x81, 0x33, 0xee, 0x81, 0x90, 0x50, 0x90, 0x28, 0x80, 0x33, -+0xf4, 0x80, 0x33, 0xf5, 0x81, 0x33, 0xed, 0x83, 0x90, 0x28, 0x24, 0x1f, -+0x80, 0x24, 0x1f, 0x88, 0x90, 0xe8, 0x81, 0x90, 0x88, 0x90, 0x38, 0x10, -+0x10, 0x80, 0x34, 0x07, 0x90, 0x28, 0x80, 0x34, 0x02, 0x80, 0x34, 0x01, -+0x80, 0x90, 0x28, 0x80, 0x34, 0x0b, 0x80, 0x34, 0x0a, 0x82, 0x10, 0x10, -+0x80, 0x24, 0x1f, 0x58, 0x97, 0x10, 0x9e, 0x10, 0x06, 0x98, 0x93, 0x00, -+0x91, 0x80, 0x90, 0xc0, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x03, 0x80, 0x30, -+0x71, 0x24, 0x03, 0x78, 0x90, 0x38, 0xa4, 0x04, 0x10, 0x30, 0x83, 0x24, -+0x04, 0x08, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x05, 0x30, 0x30, 0xa7, 0x24, -+0x05, 0x28, 0x90, 0x38, 0xa4, 0x04, 0xa0, 0x30, 0x95, 0x24, 0x04, 0x98, -+0x90, 0xc0, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x03, 0x70, 0x30, 0x6c, 0x24, -+0x03, 0x68, 0x90, 0x38, 0xa4, 0x04, 0x00, 0x30, 0x7e, 0x24, 0x03, 0xf8, -+0x90, 0x60, 0x90, 0x38, 0xa4, 0x05, 0x20, 0x30, 0xa2, 0x24, 0x05, 0x18, -+0x90, 0x38, 0xa4, 0x04, 0x90, 0x30, 0x90, 0x24, 0x04, 0x88, 0x91, 0x80, -+0x90, 0xc0, 0x90, 0x60, 0x90, 0x38, 0xa4, 0x03, 0x58, 0x30, 0x69, 0x24, -+0x03, 0x50, 0x90, 0x38, 0xa4, 0x03, 0xe8, 0x30, 0x7b, 0x24, 0x03, 0xe0, -+0x90, 0x60, 0x90, 0x38, 0xa4, 0x05, 0x08, 0x30, 0x9f, 0x24, 0x05, 0x00, -+0x90, 0x38, 0xa4, 0x04, 0x78, 0x30, 0x8d, 0x24, 0x04, 0x70, 0x90, 0xc0, -+0x90, 0x60, 0x90, 0x38, 0xa4, 0x03, 0x40, 0x30, 0x66, 0x24, 0x03, 0x38, -+0x90, 0x38, 0xa4, 0x03, 0xd0, 0x30, 0x78, 0x24, 0x03, 0xc8, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x04, 0xf0, 0x30, 0x9c, 0x24, 0x04, 0xe8, 0x90, 0x38, -+0xa4, 0x04, 0x60, 0x30, 0x8a, 0x24, 0x04, 0x58, 0x10, 0x10, 0x80, 0x10, -+0x10, 0x10, 0x10, 0x90, 0x38, 0xa4, 0x02, 0xf8, 0x30, 0x5d, 0x24, 0x02, -+0xf0, 0xd7, 0x42, 0x00, 0xa4, 0x38, 0x58, 0x37, 0x0d, 0xa4, 0x38, 0x38, -+0x37, 0x09, 0x9c, 0xe0, 0x06, 0x90, 0x93, 0x00, 0x91, 0x80, 0x90, 0xc0, -+0x90, 0x60, 0x90, 0x38, 0xa4, 0x03, 0x28, 0x30, 0x63, 0x24, 0x03, 0x20, -+0x90, 0x38, 0xa4, 0x03, 0xb8, 0x30, 0x75, 0x24, 0x03, 0xb0, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x04, 0xd8, 0x30, 0x99, 0x24, 0x04, 0xd0, 0x90, 0x38, -+0xa4, 0x04, 0x48, 0x30, 0x87, 0x24, 0x04, 0x40, 0x90, 0xc0, 0x90, 0x60, -+0x90, 0x38, 0xa4, 0x03, 0x10, 0x30, 0x60, 0x24, 0x03, 0x08, 0x90, 0x38, -+0xa4, 0x03, 0xa0, 0x30, 0x72, 0x24, 0x03, 0x98, 0x90, 0x60, 0x90, 0x38, -+0xa4, 0x04, 0xc0, 0x30, 0x96, 0x24, 0x04, 0xb8, 0x90, 0x38, 0xa4, 0x04, -+0x30, 0x30, 0x84, 0x24, 0x04, 0x28, 0x10, 0x10, 0x90, 0xe0, 0x90, 0x70, -+0x90, 0x38, 0xa4, 0x02, 0x88, 0x30, 0x52, 0xa4, 0x02, 0x78, 0x30, 0x50, -+0x90, 0x38, 0xa4, 0x02, 0x70, 0x30, 0x4b, 0xa4, 0x02, 0x60, 0x30, 0x4d, -+0x90, 0x70, 0x90, 0x38, 0xa4, 0x02, 0x50, 0x30, 0x43, 0xa4, 0x02, 0x40, -+0x30, 0x49, 0x90, 0x38, 0xa4, 0x02, 0x38, 0x30, 0x44, 0xa4, 0x02, 0x28, -+0x30, 0x46, 0x91, 0x48, 0x80, 0x90, 0xa0, 0x90, 0x50, 0x90, 0x28, 0x80, -+0x30, 0x56, 0x24, 0x02, 0xa8, 0x90, 0x28, 0x80, 0x30, 0x58, 0x24, 0x02, -+0xb8, 0x90, 0x50, 0x90, 0x28, 0x80, 0x30, 0x5c, 0x24, 0x02, 0xd8, 0x90, -+0x28, 0x80, 0x30, 0x5a, 0x24, 0x02, 0xc8, 0x80, 0x10, 0x10, 0x10, 0x10, -+0x90, 0x28, 0x80, 0x30, 0x53, 0x24, 0x02, 0xa0, 0xd7, 0x42, 0x00, 0xa4, -+0x38, 0x60, 0x37, 0x0e, 0xa4, 0x38, 0x40, 0x37, 0x0a, 0xa0, 0x14, 0x68, -+0xa0, 0x10, 0x90, 0xa0, 0x0c, 0x60, 0x9e, 0x88, 0x09, 0xd0, 0x94, 0xf0, -+0x90, 0xb0, 0x88, 0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, 0xe1, 0x44, 0x40, -+0x85, 0x35, 0x2d, 0xcb, 0x61, 0x3d, 0x00, 0x85, 0x35, 0x03, 0x9a, 0x00, -+0x03, 0xf8, 0x91, 0x98, 0x80, 0x91, 0x10, 0x90, 0xa0, 0x90, 0x68, 0x90, -+0x20, 0x3a, 0x53, 0xc9, 0xe2, 0x94, 0x40, 0x85, 0x35, 0x2b, 0xa4, 0x52, -+0x78, 0x3a, 0x50, 0x90, 0x38, 0xa4, 0x52, 0x40, 0x3a, 0x49, 0xa4, 0x52, -+0x30, 0x3a, 0x47, 0x90, 0x48, 0x10, 0x10, 0xa4, 0x51, 0xf8, 0x3a, 0x40, -+0x10, 0x10, 0x80, 0x3a, 0x3c, 0x81, 0x10, 0x10, 0x80, 0xa4, 0x51, 0xc8, -+0x3a, 0x3a, 0x91, 0xb0, 0x91, 0x60, 0x90, 0xe0, 0x90, 0x70, 0x90, 0x38, -+0xa4, 0x52, 0x68, 0x3a, 0x4e, 0xa4, 0x52, 0x58, 0x3a, 0x4c, 0x90, 0x38, -+0xa4, 0x52, 0x20, 0x3a, 0x45, 0xa4, 0x52, 0x10, 0x3a, 0x43, 0x90, 0x48, -+0x10, 0x10, 0xa4, 0x51, 0xe8, 0x3a, 0x3e, 0x10, 0x10, 0x80, 0x3a, 0x3b, -+0x90, 0x28, 0x80, 0x3a, 0x34, 0x80, 0x3a, 0x33, 0x81, 0x10, 0x10, 0x80, -+0xa4, 0x51, 0xb8, 0x3a, 0x38, 0xcb, 0x61, 0x3c, 0xc0, 0x85, 0x35, 0x02, -+0x90, 0xd8, 0x88, 0x00, 0x90, 0x84, 0x90, 0x38, 0xc1, 0xc0, 0x85, 0x3a, -+0x56, 0xc9, 0xe1, 0x44, 0x00, 0x85, 0x35, 0x29, 0xcb, 0x61, 0x3c, 0x80, -+0x85, 0x35, 0x01, 0x88, 0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, 0xe1, 0x43, -+0xc0, 0x85, 0x35, 0x27, 0xcb, 0x61, 0x3c, 0x40, 0x85, 0x35, 0x00, 0x91, -+0xf8, 0x90, 0xb0, 0x88, 0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, 0xe1, 0x43, -+0x40, 0x85, 0x35, 0x23, 0xcb, 0x61, 0x3b, 0xc0, 0x85, 0x34, 0xfe, 0x88, -+0x01, 0x00, 0x90, 0xa0, 0x81, 0x90, 0x70, 0x80, 0x90, 0x20, 0x3a, 0x4a, -+0xc9, 0xe1, 0x43, 0x00, 0x85, 0x35, 0x21, 0x81, 0x3a, 0x41, 0x81, 0x10, -+0x10, 0x80, 0xa4, 0x51, 0xa8, 0x3a, 0x36, 0xcb, 0x61, 0x3b, 0x80, 0x85, -+0x34, 0xfd, 0x90, 0xb0, 0x88, 0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, 0xe1, -+0x42, 0xc0, 0x85, 0x35, 0x1f, 0xcb, 0x61, 0x3b, 0x40, 0x85, 0x34, 0xfc, -+0x88, 0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, 0xe1, 0x42, 0x80, 0x85, 0x35, -+0x1d, 0xcb, 0x61, 0x3b, 0x00, 0x85, 0x34, 0xfb, 0x92, 0x38, 0x81, 0x91, -+0x68, 0x91, 0x18, 0x90, 0x80, 0x90, 0x40, 0x80, 0xa4, 0x53, 0x28, 0x3a, -+0x66, 0x80, 0xa4, 0x53, 0x20, 0x3a, 0x63, 0x90, 0x28, 0x81, 0x3a, 0x62, -+0x90, 0x38, 0xa4, 0x53, 0x00, 0x3a, 0x61, 0xa4, 0x52, 0xf0, 0x3a, 0x5f, -+0x90, 0x28, 0x80, 0x3a, 0x5d, 0x80, 0x3a, 0x5c, 0x80, 0x90, 0x40, 0x10, -+0x10, 0x80, 0x24, 0x52, 0xd8, 0x10, 0x10, 0x90, 0x38, 0xa4, 0x52, 0xc8, -+0x3a, 0x5a, 0xa4, 0x52, 0xb8, 0x3a, 0x58, 0x90, 0x28, 0x80, 0x3a, 0x55, -+0x80, 0x3a, 0x54, 0x9a, 0xd0, 0x03, 0xe0, 0x91, 0x60, 0x90, 0xb0, 0x88, -+0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, 0xe1, 0x42, 0x00, 0x85, 0x35, 0x19, -+0xcb, 0x61, 0x3a, 0x80, 0x85, 0x34, 0xf9, 0x88, 0x00, 0x68, 0x84, 0x10, -+0x10, 0xc9, 0xe1, 0x41, 0xc0, 0x85, 0x35, 0x17, 0xcb, 0x61, 0x3a, 0x40, -+0x85, 0x34, 0xf8, 0x90, 0xb0, 0x88, 0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, -+0xe1, 0x41, 0x80, 0x85, 0x35, 0x15, 0xcb, 0x61, 0x3a, 0x00, 0x85, 0x34, -+0xf7, 0x88, 0x00, 0x68, 0x84, 0x10, 0x10, 0xc9, 0xe1, 0x41, 0x40, 0x85, -+0x35, 0x13, 0xcb, 0x61, 0x39, 0xc0, 0x85, 0x34, 0xf6, 0x90, 0x90, 0x90, -+0x48, 0xcb, 0xa1, 0x38, 0x00, 0x85, 0x34, 0xe5, 0xcb, 0xa1, 0x37, 0xc0, -+0x85, 0x34, 0xe4, 0x90, 0x48, 0xcb, 0xa1, 0x37, 0x80, 0x85, 0x34, 0xe3, -+0xcb, 0xa1, 0x37, 0x40, 0x85, 0x34, 0xe2, 0xcb, 0xa2, 0x8c, 0x40, 0x80, -+0x3a, 0x32, 0x92, 0x40, 0x91, 0x20, 0x90, 0x90, 0x90, 0x48, 0x8c, 0x26, -+0x60, 0x84, 0x24, 0x26, 0xd8, 0x8c, 0x26, 0x58, 0x84, 0x24, 0x26, 0xd0, -+0x90, 0x48, 0x8c, 0x26, 0x50, 0x84, 0x24, 0x26, 0xc8, 0x8c, 0x26, 0x48, -+0x84, 0x24, 0x26, 0xc0, 0x90, 0x90, 0x90, 0x48, 0x8c, 0x26, 0x38, 0x84, -+0x24, 0x26, 0xb0, 0x8c, 0x26, 0x30, 0x84, 0x24, 0x26, 0xa8, 0x90, 0x48, -+0x8c, 0x26, 0x28, 0x84, 0x24, 0x26, 0xa0, 0x8c, 0x26, 0x20, 0x84, 0x24, -+0x26, 0x98, 0x91, 0x20, 0x90, 0x90, 0x90, 0x48, 0x8c, 0x26, 0x10, 0x84, -+0x24, 0x26, 0x88, 0x8c, 0x26, 0x08, 0x84, 0x24, 0x26, 0x80, 0x90, 0x48, -+0x8c, 0x26, 0x00, 0x84, 0x24, 0x26, 0x78, 0x8c, 0x25, 0xf8, 0x84, 0x24, -+0x26, 0x70, 0x90, 0x38, 0xa4, 0x25, 0xe0, 0x34, 0xbd, 0xa4, 0x25, 0xd0, -+0x34, 0xbb, 0xa0, 0x0f, 0x50, 0xa0, 0x09, 0x08, 0x9a, 0x30, 0x04, 0x40, -+0x91, 0x90, 0x90, 0xc8, 0x98, 0x50, 0x00, 0x80, 0xe5, 0x22, 0x8a, 0x40, -+0x3a, 0x21, 0xe5, 0x22, 0x82, 0x40, 0x3a, 0x1d, 0xcb, 0x61, 0x2a, 0x40, -+0x85, 0x34, 0xb8, 0x98, 0x50, 0x00, 0x80, 0xe5, 0x22, 0x7a, 0x40, 0x39, -+0xe1, 0xe5, 0x22, 0x72, 0x40, 0x39, 0xdd, 0xcb, 0x61, 0x2a, 0x00, 0x85, -+0x34, 0xb7, 0x90, 0x48, 0xcb, 0xa1, 0x29, 0xc0, 0x85, 0x34, 0xb6, 0xcb, -+0xa1, 0x29, 0x80, 0x85, 0x34, 0xb5, 0x91, 0x90, 0x90, 0xc8, 0x98, 0x50, -+0x00, 0x80, 0xe5, 0x22, 0x64, 0x40, 0x39, 0xa9, 0xe5, 0x22, 0x58, 0x40, -+0x39, 0x79, 0xcb, 0x61, 0x29, 0x00, 0x85, 0x34, 0xb3, 0x98, 0x50, 0x00, -+0x80, 0xe5, 0x22, 0x4c, 0x40, 0x39, 0x49, 0xe5, 0x22, 0x40, 0x40, 0x39, -+0x19, 0xcb, 0x61, 0x28, 0xc0, 0x85, 0x34, 0xb2, 0x90, 0x48, 0xcb, 0xa1, -+0x28, 0x80, 0x85, 0x34, 0xb1, 0xcb, 0xa1, 0x28, 0x40, 0x85, 0x34, 0xb0, -+0x92, 0x20, 0x91, 0x30, 0x90, 0xb8, 0xd5, 0x03, 0x00, 0xc0, 0xc0, 0x81, -+0x8c, 0x01, 0xa0, 0x84, 0x30, 0x3e, 0xc0, 0xc0, 0x81, 0x8c, 0x01, 0x80, -+0x84, 0x30, 0x3c, 0xd5, 0x02, 0x00, 0xc0, 0xc0, 0x81, 0x30, 0x28, 0xc0, -+0xc0, 0x81, 0x30, 0x24, 0x90, 0x78, 0xd5, 0x02, 0x00, 0xc0, 0xc0, 0x81, -+0x30, 0x1c, 0xc0, 0xc0, 0x81, 0x30, 0x18, 0xd5, 0x02, 0x00, 0xc0, 0xc0, -+0x81, 0x30, 0x10, 0xc0, 0xc0, 0x81, 0x30, 0x0c, 0x91, 0x70, 0x90, 0xd8, -+0xd5, 0x03, 0x80, 0xc8, 0xe2, 0x38, 0x40, 0x81, 0x8c, 0x01, 0xc0, 0x84, -+0x30, 0x40, 0xc8, 0xe2, 0x3a, 0x40, 0x81, 0x8c, 0x01, 0x90, 0x84, 0x30, -+0x3d, 0xd5, 0x02, 0x80, 0xc8, 0xe2, 0x37, 0x40, 0x81, 0x30, 0x2c, 0xc8, -+0xe2, 0x31, 0xc0, 0x81, 0x30, 0x26, 0x90, 0x98, 0xd5, 0x02, 0x80, 0xc8, -+0xe2, 0x26, 0xc0, 0x81, 0x30, 0x20, 0xc8, 0xe2, 0x28, 0xc0, 0x81, 0x30, -+0x1a, 0xd5, 0x02, 0x80, 0xc8, 0xe2, 0x25, 0xc0, 0x81, 0x30, 0x14, 0xc8, -+0xe2, 0x20, 0x40, 0x81, 0x30, 0x0e, 0x9a, 0x30, 0x04, 0x40, 0x91, 0x90, -+0x90, 0xc8, 0x98, 0x50, 0x00, 0x80, 0xe5, 0x22, 0x7e, 0x40, 0x39, 0xf1, -+0xe5, 0x22, 0x80, 0x40, 0x3a, 0x15, 0xcb, 0x61, 0x27, 0xc0, 0x85, 0x34, -+0xae, 0x98, 0x50, 0x00, 0x80, 0xe5, 0x22, 0x6e, 0x40, 0x39, 0xb1, 0xe5, -+0x22, 0x70, 0x40, 0x39, 0xd5, 0xcb, 0x61, 0x27, 0x80, 0x85, 0x34, 0xad, -+0x90, 0x48, 0xcb, 0xa1, 0x27, 0x40, 0x85, 0x34, 0xac, 0xcb, 0xa1, 0x27, -+0x00, 0x85, 0x34, 0xab, 0x91, 0x90, 0x90, 0xc8, 0x98, 0x50, 0x00, 0x80, -+0xe5, 0x22, 0x60, 0x40, 0x39, 0x99, 0xe5, 0x22, 0x54, 0x40, 0x39, 0x69, -+0xcb, 0x61, 0x25, 0x40, 0x85, 0x34, 0x9a, 0x98, 0x50, 0x00, 0x80, 0xe5, -+0x22, 0x48, 0x40, 0x39, 0x39, 0xe5, 0x22, 0x3c, 0x40, 0x39, 0x09, 0xcb, -+0x61, 0x25, 0x00, 0x85, 0x34, 0x99, 0x90, 0x48, 0xcb, 0xa1, 0x24, 0xc0, -+0x85, 0x34, 0x98, 0xcb, 0xa1, 0x24, 0x80, 0x85, 0x34, 0x97, 0x91, 0x00, -+0x90, 0x80, 0x90, 0x40, 0xe5, 0x20, 0x02, 0x40, 0x30, 0x0a, 0xe5, 0x20, -+0x01, 0x80, 0x30, 0x07, 0x90, 0x40, 0xe5, 0x20, 0x00, 0xc0, 0x30, 0x04, -+0xe5, 0x20, 0x00, 0x00, 0x30, 0x01, 0x90, 0x80, 0x90, 0x40, 0xe5, 0x22, -+0x2d, 0x40, 0x38, 0xab, 0xe5, 0x22, 0x2f, 0x80, 0x38, 0xd3, 0x90, 0x40, -+0xe5, 0x22, 0x1b, 0xc0, 0x38, 0x65, 0xe5, 0x22, 0x1e, 0x00, 0x38, 0x8d, -+0x80, 0x99, 0x28, 0x02, 0xf0, 0x8c, 0x24, 0x48, 0x90, 0x80, 0x90, 0x40, -+0xe5, 0x22, 0x84, 0x40, 0x3a, 0x0d, 0xe5, 0x22, 0x81, 0x40, 0x3a, 0x19, -+0x90, 0x40, 0xe5, 0x22, 0x74, 0x40, 0x39, 0xcd, 0xe5, 0x22, 0x71, 0x40, -+0x39, 0xd9, 0x91, 0x48, 0x90, 0xc8, 0x98, 0x50, 0x00, 0x80, 0xe5, 0x22, -+0x62, 0x40, 0x39, 0xa1, 0xe5, 0x22, 0x56, 0x40, 0x39, 0x71, 0xcb, 0x61, -+0x23, 0x00, 0x85, 0x34, 0x90, 0x90, 0x40, 0xe5, 0x22, 0x4a, 0x40, 0x39, -+0x41, 0xe5, 0x22, 0x3e, 0x40, 0x39, 0x11, 0x90, 0x48, 0xcb, 0xa1, 0x22, -+0x80, 0x85, 0x34, 0x8e, 0xcb, 0xa1, 0x22, 0xc0, 0x85, 0x34, 0x8f, 0x10, -+0x10, 0x90, 0x80, 0x90, 0x40, 0xe5, 0x22, 0x33, 0xc0, 0x38, 0xcb, 0xe5, -+0x22, 0x30, 0xc0, 0x38, 0xd9, 0x90, 0x40, 0xe5, 0x22, 0x22, 0x40, 0x38, -+0x85, 0xe5, 0x22, 0x1f, 0x40, 0x38, 0x93, -+}; -+ -+static const struct ia64_dis_names ia64_dis_names[] = { -+{ 0x51, 41, 0, 10 }, -+{ 0x31, 41, 1, 20 }, -+{ 0x11, 42, 0, 19 }, -+{ 0x29, 41, 0, 12 }, -+{ 0x19, 41, 1, 24 }, -+{ 0x9, 42, 0, 23 }, -+{ 0x15, 41, 0, 14 }, -+{ 0xd, 41, 1, 28 }, -+{ 0x5, 42, 0, 27 }, -+{ 0xb, 41, 0, 16 }, -+{ 0x7, 41, 1, 32 }, -+{ 0x3, 42, 0, 31 }, -+{ 0x51, 39, 1, 58 }, -+{ 0x50, 39, 0, 34 }, -+{ 0xd1, 39, 1, 57 }, -+{ 0xd0, 39, 0, 33 }, -+{ 0x31, 39, 1, 68 }, -+{ 0x30, 39, 1, 44 }, -+{ 0x11, 40, 1, 67 }, -+{ 0x10, 40, 0, 43 }, -+{ 0x71, 39, 1, 66 }, -+{ 0x70, 39, 1, 42 }, -+{ 0x31, 40, 1, 65 }, -+{ 0x30, 40, 0, 41 }, -+{ 0x29, 39, 1, 60 }, -+{ 0x28, 39, 0, 36 }, -+{ 0x69, 39, 1, 59 }, -+{ 0x68, 39, 0, 35 }, -+{ 0x19, 39, 1, 72 }, -+{ 0x18, 39, 1, 48 }, -+{ 0x9, 40, 1, 71 }, -+{ 0x8, 40, 0, 47 }, -+{ 0x39, 39, 1, 70 }, -+{ 0x38, 39, 1, 46 }, -+{ 0x19, 40, 1, 69 }, -+{ 0x18, 40, 0, 45 }, -+{ 0x15, 39, 1, 62 }, -+{ 0x14, 39, 0, 38 }, -+{ 0x35, 39, 1, 61 }, -+{ 0x34, 39, 0, 37 }, -+{ 0xd, 39, 1, 76 }, -+{ 0xc, 39, 1, 52 }, -+{ 0x5, 40, 1, 75 }, -+{ 0x4, 40, 0, 51 }, -+{ 0x1d, 39, 1, 74 }, -+{ 0x1c, 39, 1, 50 }, -+{ 0xd, 40, 1, 73 }, -+{ 0xc, 40, 0, 49 }, -+{ 0xb, 39, 1, 64 }, -+{ 0xa, 39, 0, 40 }, -+{ 0x1b, 39, 1, 63 }, -+{ 0x1a, 39, 0, 39 }, -+{ 0x7, 39, 1, 80 }, -+{ 0x6, 39, 1, 56 }, -+{ 0x3, 40, 1, 79 }, -+{ 0x2, 40, 0, 55 }, -+{ 0xf, 39, 1, 78 }, -+{ 0xe, 39, 1, 54 }, -+{ 0x7, 40, 1, 77 }, -+{ 0x6, 40, 0, 53 }, -+{ 0x8, 38, 0, 82 }, -+{ 0x18, 38, 0, 81 }, -+{ 0x1, 38, 1, 86 }, -+{ 0x2, 38, 0, 85 }, -+{ 0x3, 38, 1, 84 }, -+{ 0x4, 38, 0, 83 }, -+{ 0x1, 336, 0, 87 }, -+{ 0x20, 289, 0, 98 }, -+{ 0x220, 289, 0, 94 }, -+{ 0x1220, 289, 0, 91 }, -+{ 0xa20, 289, 0, 92 }, -+{ 0x620, 289, 0, 93 }, -+{ 0x120, 289, 0, 95 }, -+{ 0xa0, 289, 0, 96 }, -+{ 0x60, 289, 0, 97 }, -+{ 0x10, 289, 0, 102 }, -+{ 0x90, 289, 0, 99 }, -+{ 0x50, 289, 0, 100 }, -+{ 0x30, 289, 0, 101 }, -+{ 0x8, 289, 0, 103 }, -+{ 0x4, 289, 0, 104 }, -+{ 0x2, 289, 0, 105 }, -+{ 0x1, 289, 0, 106 }, -+{ 0x1, 411, 0, 108 }, -+{ 0x3, 411, 0, 107 }, -+{ 0x2, 417, 0, 109 }, -+{ 0x1, 417, 0, 110 }, -+{ 0x2, 413, 0, 111 }, -+{ 0x1, 413, 0, 112 }, -+{ 0x2, 415, 0, 113 }, -+{ 0x1, 415, 0, 114 }, -+{ 0x2, 419, 0, 115 }, -+{ 0x1, 419, 0, 116 }, -+{ 0x1, 268, 0, 143 }, -+{ 0x5, 268, 0, 141 }, -+{ 0x3, 268, 0, 142 }, -+{ 0x140, 277, 0, 119 }, -+{ 0x540, 277, 0, 117 }, -+{ 0x340, 277, 0, 118 }, -+{ 0xc0, 277, 0, 131 }, -+{ 0x2c0, 277, 0, 129 }, -+{ 0x1c0, 277, 0, 130 }, -+{ 0x20, 277, 0, 146 }, -+{ 0xa0, 277, 0, 144 }, -+{ 0x60, 277, 0, 145 }, -+{ 0x10, 277, 0, 158 }, -+{ 0x50, 277, 0, 156 }, -+{ 0x30, 277, 0, 157 }, -+{ 0x8, 277, 0, 170 }, -+{ 0x28, 277, 0, 168 }, -+{ 0x18, 277, 0, 169 }, -+{ 0x4, 277, 0, 180 }, -+{ 0x2, 277, 0, 181 }, -+{ 0x1, 277, 0, 182 }, -+{ 0x140, 271, 0, 122 }, -+{ 0x540, 271, 0, 120 }, -+{ 0x340, 271, 0, 121 }, -+{ 0xc0, 271, 0, 134 }, -+{ 0x2c0, 271, 0, 132 }, -+{ 0x1c0, 271, 0, 133 }, -+{ 0x20, 271, 0, 149 }, -+{ 0xa0, 271, 0, 147 }, -+{ 0x60, 271, 0, 148 }, -+{ 0x10, 271, 0, 161 }, -+{ 0x50, 271, 0, 159 }, -+{ 0x30, 271, 0, 160 }, -+{ 0x8, 271, 0, 173 }, -+{ 0x28, 271, 0, 171 }, -+{ 0x18, 271, 0, 172 }, -+{ 0x4, 271, 0, 183 }, -+{ 0x2, 271, 0, 184 }, -+{ 0x1, 271, 0, 185 }, -+{ 0x140, 274, 0, 125 }, -+{ 0x540, 274, 0, 123 }, -+{ 0x340, 274, 0, 124 }, -+{ 0xc0, 274, 0, 137 }, -+{ 0x2c0, 274, 0, 135 }, -+{ 0x1c0, 274, 0, 136 }, -+{ 0x20, 274, 0, 152 }, -+{ 0xa0, 274, 0, 150 }, -+{ 0x60, 274, 0, 151 }, -+{ 0x10, 274, 0, 164 }, -+{ 0x50, 274, 0, 162 }, -+{ 0x30, 274, 0, 163 }, -+{ 0x8, 274, 0, 176 }, -+{ 0x28, 274, 0, 174 }, -+{ 0x18, 274, 0, 175 }, -+{ 0x4, 274, 0, 186 }, -+{ 0x2, 274, 0, 187 }, -+{ 0x1, 274, 0, 188 }, -+{ 0x140, 286, 0, 128 }, -+{ 0x540, 286, 0, 126 }, -+{ 0x340, 286, 0, 127 }, -+{ 0xc0, 286, 0, 140 }, -+{ 0x2c0, 286, 0, 138 }, -+{ 0x1c0, 286, 0, 139 }, -+{ 0x20, 286, 0, 155 }, -+{ 0xa0, 286, 0, 153 }, -+{ 0x60, 286, 0, 154 }, -+{ 0x10, 286, 0, 167 }, -+{ 0x50, 286, 0, 165 }, -+{ 0x30, 286, 0, 166 }, -+{ 0x8, 286, 0, 179 }, -+{ 0x28, 286, 0, 177 }, -+{ 0x18, 286, 0, 178 }, -+{ 0x4, 286, 0, 189 }, -+{ 0x2, 286, 0, 190 }, -+{ 0x1, 286, 0, 191 }, -+{ 0x8, 390, 0, 192 }, -+{ 0x4, 390, 0, 193 }, -+{ 0x2, 390, 0, 194 }, -+{ 0x1, 390, 0, 195 }, -+{ 0x20, 288, 0, 203 }, -+{ 0x220, 288, 0, 199 }, -+{ 0x1220, 288, 0, 196 }, -+{ 0xa20, 288, 0, 197 }, -+{ 0x620, 288, 0, 198 }, -+{ 0x120, 288, 0, 200 }, -+{ 0xa0, 288, 0, 201 }, -+{ 0x60, 288, 0, 202 }, -+{ 0x10, 288, 0, 207 }, -+{ 0x90, 288, 0, 204 }, -+{ 0x50, 288, 0, 205 }, -+{ 0x30, 288, 0, 206 }, -+{ 0x8, 288, 0, 208 }, -+{ 0x4, 288, 0, 209 }, -+{ 0x2, 288, 0, 210 }, -+{ 0x1, 288, 0, 211 }, -+{ 0x20, 287, 0, 219 }, -+{ 0x220, 287, 0, 215 }, -+{ 0x1220, 287, 0, 212 }, -+{ 0xa20, 287, 0, 213 }, -+{ 0x620, 287, 0, 214 }, -+{ 0x120, 287, 0, 216 }, -+{ 0xa0, 287, 0, 217 }, -+{ 0x60, 287, 0, 218 }, -+{ 0x10, 287, 0, 223 }, -+{ 0x90, 287, 0, 220 }, -+{ 0x50, 287, 0, 221 }, -+{ 0x30, 287, 0, 222 }, -+{ 0x8, 287, 0, 224 }, -+{ 0x4, 287, 0, 225 }, -+{ 0x2, 287, 0, 226 }, -+{ 0x1, 287, 0, 227 }, -+{ 0x140, 279, 0, 230 }, -+{ 0x540, 279, 0, 228 }, -+{ 0x340, 279, 0, 229 }, -+{ 0xc0, 279, 0, 239 }, -+{ 0x2c0, 279, 0, 237 }, -+{ 0x1c0, 279, 0, 238 }, -+{ 0x20, 279, 0, 248 }, -+{ 0xa0, 279, 0, 246 }, -+{ 0x60, 279, 0, 247 }, -+{ 0x10, 279, 0, 257 }, -+{ 0x50, 279, 0, 255 }, -+{ 0x30, 279, 0, 256 }, -+{ 0x8, 279, 0, 266 }, -+{ 0x28, 279, 0, 264 }, -+{ 0x18, 279, 0, 265 }, -+{ 0x4, 279, 0, 273 }, -+{ 0x2, 279, 0, 274 }, -+{ 0x1, 279, 0, 275 }, -+{ 0x140, 281, 0, 233 }, -+{ 0x540, 281, 0, 231 }, -+{ 0x340, 281, 0, 232 }, -+{ 0xc0, 281, 0, 242 }, -+{ 0x2c0, 281, 0, 240 }, -+{ 0x1c0, 281, 0, 241 }, -+{ 0x20, 281, 0, 251 }, -+{ 0xa0, 281, 0, 249 }, -+{ 0x60, 281, 0, 250 }, -+{ 0x10, 281, 0, 260 }, -+{ 0x50, 281, 0, 258 }, -+{ 0x30, 281, 0, 259 }, -+{ 0x8, 281, 0, 269 }, -+{ 0x28, 281, 0, 267 }, -+{ 0x18, 281, 0, 268 }, -+{ 0x4, 281, 0, 276 }, -+{ 0x2, 281, 0, 277 }, -+{ 0x1, 281, 0, 278 }, -+{ 0x140, 283, 0, 236 }, -+{ 0x540, 283, 0, 234 }, -+{ 0x340, 283, 0, 235 }, -+{ 0xc0, 283, 0, 245 }, -+{ 0x2c0, 283, 0, 243 }, -+{ 0x1c0, 283, 0, 244 }, -+{ 0x20, 283, 0, 254 }, -+{ 0xa0, 283, 0, 252 }, -+{ 0x60, 283, 0, 253 }, -+{ 0x10, 283, 0, 263 }, -+{ 0x50, 283, 0, 261 }, -+{ 0x30, 283, 0, 262 }, -+{ 0x8, 283, 0, 272 }, -+{ 0x28, 283, 0, 270 }, -+{ 0x18, 283, 0, 271 }, -+{ 0x4, 283, 0, 279 }, -+{ 0x2, 283, 0, 280 }, -+{ 0x1, 283, 0, 281 }, -+{ 0x140, 278, 0, 284 }, -+{ 0x540, 278, 0, 282 }, -+{ 0x340, 278, 0, 283 }, -+{ 0xc0, 278, 0, 293 }, -+{ 0x2c0, 278, 0, 291 }, -+{ 0x1c0, 278, 0, 292 }, -+{ 0x20, 278, 0, 302 }, -+{ 0xa0, 278, 0, 300 }, -+{ 0x60, 278, 0, 301 }, -+{ 0x10, 278, 0, 311 }, -+{ 0x50, 278, 0, 309 }, -+{ 0x30, 278, 0, 310 }, -+{ 0x8, 278, 0, 320 }, -+{ 0x28, 278, 0, 318 }, -+{ 0x18, 278, 0, 319 }, -+{ 0x4, 278, 0, 327 }, -+{ 0x2, 278, 0, 328 }, -+{ 0x1, 278, 0, 329 }, -+{ 0x140, 280, 0, 287 }, -+{ 0x540, 280, 0, 285 }, -+{ 0x340, 280, 0, 286 }, -+{ 0xc0, 280, 0, 296 }, -+{ 0x2c0, 280, 0, 294 }, -+{ 0x1c0, 280, 0, 295 }, -+{ 0x20, 280, 0, 305 }, -+{ 0xa0, 280, 0, 303 }, -+{ 0x60, 280, 0, 304 }, -+{ 0x10, 280, 0, 314 }, -+{ 0x50, 280, 0, 312 }, -+{ 0x30, 280, 0, 313 }, -+{ 0x8, 280, 0, 323 }, -+{ 0x28, 280, 0, 321 }, -+{ 0x18, 280, 0, 322 }, -+{ 0x4, 280, 0, 330 }, -+{ 0x2, 280, 0, 331 }, -+{ 0x1, 280, 0, 332 }, -+{ 0x140, 282, 0, 290 }, -+{ 0x540, 282, 0, 288 }, -+{ 0x340, 282, 0, 289 }, -+{ 0xc0, 282, 0, 299 }, -+{ 0x2c0, 282, 0, 297 }, -+{ 0x1c0, 282, 0, 298 }, -+{ 0x20, 282, 0, 308 }, -+{ 0xa0, 282, 0, 306 }, -+{ 0x60, 282, 0, 307 }, -+{ 0x10, 282, 0, 317 }, -+{ 0x50, 282, 0, 315 }, -+{ 0x30, 282, 0, 316 }, -+{ 0x8, 282, 0, 326 }, -+{ 0x28, 282, 0, 324 }, -+{ 0x18, 282, 0, 325 }, -+{ 0x4, 282, 0, 333 }, -+{ 0x2, 282, 0, 334 }, -+{ 0x1, 282, 0, 335 }, -+{ 0x1, 410, 0, 337 }, -+{ 0x3, 410, 0, 336 }, -+{ 0x2, 416, 0, 338 }, -+{ 0x1, 416, 0, 339 }, -+{ 0x2, 412, 0, 340 }, -+{ 0x1, 412, 0, 341 }, -+{ 0x2, 414, 0, 342 }, -+{ 0x1, 414, 0, 343 }, -+{ 0x2, 418, 0, 344 }, -+{ 0x1, 418, 0, 345 }, -+{ 0x1, 267, 0, 372 }, -+{ 0x5, 267, 0, 370 }, -+{ 0x3, 267, 0, 371 }, -+{ 0x140, 276, 0, 348 }, -+{ 0x540, 276, 0, 346 }, -+{ 0x340, 276, 0, 347 }, -+{ 0xc0, 276, 0, 360 }, -+{ 0x2c0, 276, 0, 358 }, -+{ 0x1c0, 276, 0, 359 }, -+{ 0x20, 276, 0, 375 }, -+{ 0xa0, 276, 0, 373 }, -+{ 0x60, 276, 0, 374 }, -+{ 0x10, 276, 0, 387 }, -+{ 0x50, 276, 0, 385 }, -+{ 0x30, 276, 0, 386 }, -+{ 0x8, 276, 0, 399 }, -+{ 0x28, 276, 0, 397 }, -+{ 0x18, 276, 0, 398 }, -+{ 0x4, 276, 0, 409 }, -+{ 0x2, 276, 0, 410 }, -+{ 0x1, 276, 0, 411 }, -+{ 0x140, 270, 0, 351 }, -+{ 0x540, 270, 0, 349 }, -+{ 0x340, 270, 0, 350 }, -+{ 0xc0, 270, 0, 363 }, -+{ 0x2c0, 270, 0, 361 }, -+{ 0x1c0, 270, 0, 362 }, -+{ 0x20, 270, 0, 378 }, -+{ 0xa0, 270, 0, 376 }, -+{ 0x60, 270, 0, 377 }, -+{ 0x10, 270, 0, 390 }, -+{ 0x50, 270, 0, 388 }, -+{ 0x30, 270, 0, 389 }, -+{ 0x8, 270, 0, 402 }, -+{ 0x28, 270, 0, 400 }, -+{ 0x18, 270, 0, 401 }, -+{ 0x4, 270, 0, 412 }, -+{ 0x2, 270, 0, 413 }, -+{ 0x1, 270, 0, 414 }, -+{ 0x140, 273, 0, 354 }, -+{ 0x540, 273, 0, 352 }, -+{ 0x340, 273, 0, 353 }, -+{ 0xc0, 273, 0, 366 }, -+{ 0x2c0, 273, 0, 364 }, -+{ 0x1c0, 273, 0, 365 }, -+{ 0x20, 273, 0, 381 }, -+{ 0xa0, 273, 0, 379 }, -+{ 0x60, 273, 0, 380 }, -+{ 0x10, 273, 0, 393 }, -+{ 0x50, 273, 0, 391 }, -+{ 0x30, 273, 0, 392 }, -+{ 0x8, 273, 0, 405 }, -+{ 0x28, 273, 0, 403 }, -+{ 0x18, 273, 0, 404 }, -+{ 0x4, 273, 0, 415 }, -+{ 0x2, 273, 0, 416 }, -+{ 0x1, 273, 0, 417 }, -+{ 0x140, 285, 0, 357 }, -+{ 0x540, 285, 0, 355 }, -+{ 0x340, 285, 0, 356 }, -+{ 0xc0, 285, 0, 369 }, -+{ 0x2c0, 285, 0, 367 }, -+{ 0x1c0, 285, 0, 368 }, -+{ 0x20, 285, 0, 384 }, -+{ 0xa0, 285, 0, 382 }, -+{ 0x60, 285, 0, 383 }, -+{ 0x10, 285, 0, 396 }, -+{ 0x50, 285, 0, 394 }, -+{ 0x30, 285, 0, 395 }, -+{ 0x8, 285, 0, 408 }, -+{ 0x28, 285, 0, 406 }, -+{ 0x18, 285, 0, 407 }, -+{ 0x4, 285, 0, 418 }, -+{ 0x2, 285, 0, 419 }, -+{ 0x1, 285, 0, 420 }, -+{ 0x1, 266, 0, 447 }, -+{ 0x5, 266, 0, 445 }, -+{ 0x3, 266, 0, 446 }, -+{ 0x140, 275, 0, 423 }, -+{ 0x540, 275, 0, 421 }, -+{ 0x340, 275, 0, 422 }, -+{ 0xc0, 275, 0, 435 }, -+{ 0x2c0, 275, 0, 433 }, -+{ 0x1c0, 275, 0, 434 }, -+{ 0x20, 275, 0, 450 }, -+{ 0xa0, 275, 0, 448 }, -+{ 0x60, 275, 0, 449 }, -+{ 0x10, 275, 0, 462 }, -+{ 0x50, 275, 0, 460 }, -+{ 0x30, 275, 0, 461 }, -+{ 0x8, 275, 0, 474 }, -+{ 0x28, 275, 0, 472 }, -+{ 0x18, 275, 0, 473 }, -+{ 0x4, 275, 0, 484 }, -+{ 0x2, 275, 0, 485 }, -+{ 0x1, 275, 0, 486 }, -+{ 0x140, 269, 0, 426 }, -+{ 0x540, 269, 0, 424 }, -+{ 0x340, 269, 0, 425 }, -+{ 0xc0, 269, 0, 438 }, -+{ 0x2c0, 269, 0, 436 }, -+{ 0x1c0, 269, 0, 437 }, -+{ 0x20, 269, 0, 453 }, -+{ 0xa0, 269, 0, 451 }, -+{ 0x60, 269, 0, 452 }, -+{ 0x10, 269, 0, 465 }, -+{ 0x50, 269, 0, 463 }, -+{ 0x30, 269, 0, 464 }, -+{ 0x8, 269, 0, 477 }, -+{ 0x28, 269, 0, 475 }, -+{ 0x18, 269, 0, 476 }, -+{ 0x4, 269, 0, 487 }, -+{ 0x2, 269, 0, 488 }, -+{ 0x1, 269, 0, 489 }, -+{ 0x140, 272, 0, 429 }, -+{ 0x540, 272, 0, 427 }, -+{ 0x340, 272, 0, 428 }, -+{ 0xc0, 272, 0, 441 }, -+{ 0x2c0, 272, 0, 439 }, -+{ 0x1c0, 272, 0, 440 }, -+{ 0x20, 272, 0, 456 }, -+{ 0xa0, 272, 0, 454 }, -+{ 0x60, 272, 0, 455 }, -+{ 0x10, 272, 0, 468 }, -+{ 0x50, 272, 0, 466 }, -+{ 0x30, 272, 0, 467 }, -+{ 0x8, 272, 0, 480 }, -+{ 0x28, 272, 0, 478 }, -+{ 0x18, 272, 0, 479 }, -+{ 0x4, 272, 0, 490 }, -+{ 0x2, 272, 0, 491 }, -+{ 0x1, 272, 0, 492 }, -+{ 0x140, 284, 0, 432 }, -+{ 0x540, 284, 0, 430 }, -+{ 0x340, 284, 0, 431 }, -+{ 0xc0, 284, 0, 444 }, -+{ 0x2c0, 284, 0, 442 }, -+{ 0x1c0, 284, 0, 443 }, -+{ 0x20, 284, 0, 459 }, -+{ 0xa0, 284, 0, 457 }, -+{ 0x60, 284, 0, 458 }, -+{ 0x10, 284, 0, 471 }, -+{ 0x50, 284, 0, 469 }, -+{ 0x30, 284, 0, 470 }, -+{ 0x8, 284, 0, 483 }, -+{ 0x28, 284, 0, 481 }, -+{ 0x18, 284, 0, 482 }, -+{ 0x4, 284, 0, 493 }, -+{ 0x2, 284, 0, 494 }, -+{ 0x1, 284, 0, 495 }, -+{ 0x8, 409, 0, 497 }, -+{ 0x18, 409, 0, 496 }, -+{ 0x4, 409, 0, 499 }, -+{ 0xc, 409, 0, 498 }, -+{ 0x2, 409, 0, 506 }, -+{ 0x1, 409, 0, 507 }, -+{ 0x4, 407, 0, 501 }, -+{ 0xc, 407, 0, 500 }, -+{ 0x2, 407, 0, 508 }, -+{ 0x1, 407, 0, 509 }, -+{ 0x4, 405, 0, 503 }, -+{ 0xc, 405, 0, 502 }, -+{ 0x2, 405, 0, 510 }, -+{ 0x1, 405, 0, 511 }, -+{ 0x4, 401, 0, 505 }, -+{ 0xc, 401, 0, 504 }, -+{ 0x2, 401, 0, 512 }, -+{ 0x1, 401, 0, 513 }, -+{ 0xa00, 265, 0, 528 }, -+{ 0x2a00, 265, 0, 526 }, -+{ 0x1a00, 265, 0, 527 }, -+{ 0x600, 265, 0, 540 }, -+{ 0x2600, 265, 0, 516 }, -+{ 0xa600, 265, 0, 514 }, -+{ 0x6600, 265, 0, 515 }, -+{ 0x1600, 265, 0, 538 }, -+{ 0xe00, 265, 0, 539 }, -+{ 0x100, 265, 0, 552 }, -+{ 0x500, 265, 0, 550 }, -+{ 0x300, 265, 0, 551 }, -+{ 0x80, 265, 0, 555 }, -+{ 0x280, 265, 0, 553 }, -+{ 0x180, 265, 0, 554 }, -+{ 0x40, 265, 0, 567 }, -+{ 0x140, 265, 0, 565 }, -+{ 0xc0, 265, 0, 566 }, -+{ 0x20, 265, 0, 579 }, -+{ 0xa0, 265, 0, 577 }, -+{ 0x60, 265, 0, 578 }, -+{ 0x10, 265, 0, 591 }, -+{ 0x50, 265, 0, 589 }, -+{ 0x30, 265, 0, 590 }, -+{ 0x8, 265, 0, 603 }, -+{ 0x28, 265, 0, 601 }, -+{ 0x18, 265, 0, 602 }, -+{ 0x4, 265, 0, 613 }, -+{ 0x2, 265, 0, 614 }, -+{ 0x1, 265, 0, 615 }, -+{ 0x500, 261, 0, 531 }, -+{ 0x1500, 261, 0, 529 }, -+{ 0xd00, 261, 0, 530 }, -+{ 0x300, 261, 0, 543 }, -+{ 0x1300, 261, 0, 519 }, -+{ 0x5300, 261, 0, 517 }, -+{ 0x3300, 261, 0, 518 }, -+{ 0xb00, 261, 0, 541 }, -+{ 0x700, 261, 0, 542 }, -+{ 0x80, 261, 0, 558 }, -+{ 0x280, 261, 0, 556 }, -+{ 0x180, 261, 0, 557 }, -+{ 0x40, 261, 0, 570 }, -+{ 0x140, 261, 0, 568 }, -+{ 0xc0, 261, 0, 569 }, -+{ 0x20, 261, 0, 582 }, -+{ 0xa0, 261, 0, 580 }, -+{ 0x60, 261, 0, 581 }, -+{ 0x10, 261, 0, 594 }, -+{ 0x50, 261, 0, 592 }, -+{ 0x30, 261, 0, 593 }, -+{ 0x8, 261, 0, 606 }, -+{ 0x28, 261, 0, 604 }, -+{ 0x18, 261, 0, 605 }, -+{ 0x4, 261, 0, 616 }, -+{ 0x2, 261, 0, 617 }, -+{ 0x1, 261, 0, 618 }, -+{ 0x500, 258, 0, 534 }, -+{ 0x1500, 258, 0, 532 }, -+{ 0xd00, 258, 0, 533 }, -+{ 0x300, 258, 0, 546 }, -+{ 0x1300, 258, 0, 522 }, -+{ 0x5300, 258, 0, 520 }, -+{ 0x3300, 258, 0, 521 }, -+{ 0xb00, 258, 0, 544 }, -+{ 0x700, 258, 0, 545 }, -+{ 0x80, 258, 0, 561 }, -+{ 0x280, 258, 0, 559 }, -+{ 0x180, 258, 0, 560 }, -+{ 0x40, 258, 0, 573 }, -+{ 0x140, 258, 0, 571 }, -+{ 0xc0, 258, 0, 572 }, -+{ 0x20, 258, 0, 585 }, -+{ 0xa0, 258, 0, 583 }, -+{ 0x60, 258, 0, 584 }, -+{ 0x10, 258, 0, 597 }, -+{ 0x50, 258, 0, 595 }, -+{ 0x30, 258, 0, 596 }, -+{ 0x8, 258, 0, 609 }, -+{ 0x28, 258, 0, 607 }, -+{ 0x18, 258, 0, 608 }, -+{ 0x4, 258, 0, 619 }, -+{ 0x2, 258, 0, 620 }, -+{ 0x1, 258, 0, 621 }, -+{ 0x500, 253, 0, 537 }, -+{ 0x1500, 253, 0, 535 }, -+{ 0xd00, 253, 0, 536 }, -+{ 0x300, 253, 0, 549 }, -+{ 0x1300, 253, 0, 525 }, -+{ 0x5300, 253, 0, 523 }, -+{ 0x3300, 253, 0, 524 }, -+{ 0xb00, 253, 0, 547 }, -+{ 0x700, 253, 0, 548 }, -+{ 0x80, 253, 0, 564 }, -+{ 0x280, 253, 0, 562 }, -+{ 0x180, 253, 0, 563 }, -+{ 0x40, 253, 0, 576 }, -+{ 0x140, 253, 0, 574 }, -+{ 0xc0, 253, 0, 575 }, -+{ 0x20, 253, 0, 588 }, -+{ 0xa0, 253, 0, 586 }, -+{ 0x60, 253, 0, 587 }, -+{ 0x10, 253, 0, 600 }, -+{ 0x50, 253, 0, 598 }, -+{ 0x30, 253, 0, 599 }, -+{ 0x8, 253, 0, 612 }, -+{ 0x28, 253, 0, 610 }, -+{ 0x18, 253, 0, 611 }, -+{ 0x4, 253, 0, 622 }, -+{ 0x2, 253, 0, 623 }, -+{ 0x1, 253, 0, 624 }, -+{ 0x8, 238, 0, 625 }, -+{ 0x4, 238, 0, 626 }, -+{ 0x2, 238, 0, 627 }, -+{ 0x1, 238, 0, 628 }, -+{ 0x2, 176, 0, 631 }, -+{ 0xa, 176, 0, 629 }, -+{ 0x6, 176, 0, 630 }, -+{ 0x1, 176, 0, 637 }, -+{ 0x5, 176, 0, 635 }, -+{ 0x3, 176, 0, 636 }, -+{ 0x2, 175, 0, 634 }, -+{ 0xa, 175, 0, 632 }, -+{ 0x6, 175, 0, 633 }, -+{ 0x1, 175, 0, 640 }, -+{ 0x5, 175, 0, 638 }, -+{ 0x3, 175, 0, 639 }, -+{ 0x4, 446, 0, 641 }, -+{ 0x2, 446, 0, 642 }, -+{ 0x1, 446, 0, 643 }, -+{ 0x4, 445, 0, 644 }, -+{ 0x2, 445, 0, 645 }, -+{ 0x1, 445, 0, 646 }, -+{ 0x4, 444, 0, 647 }, -+{ 0x2, 444, 0, 648 }, -+{ 0x1, 444, 0, 649 }, -+{ 0x4, 443, 0, 650 }, -+{ 0x2, 443, 0, 651 }, -+{ 0x1, 443, 0, 652 }, -+{ 0x2, 123, 1, 658 }, -+{ 0x2, 124, 0, 657 }, -+{ 0xa, 123, 1, 654 }, -+{ 0xa, 124, 0, 653 }, -+{ 0x6, 123, 1, 656 }, -+{ 0x6, 124, 0, 655 }, -+{ 0x1, 123, 1, 688 }, -+{ 0x1, 124, 0, 687 }, -+{ 0x5, 123, 1, 684 }, -+{ 0x5, 124, 0, 683 }, -+{ 0x3, 123, 1, 686 }, -+{ 0x3, 124, 0, 685 }, -+{ 0x2, 131, 1, 664 }, -+{ 0x2, 132, 0, 663 }, -+{ 0xa, 131, 1, 660 }, -+{ 0xa, 132, 0, 659 }, -+{ 0x6, 131, 1, 662 }, -+{ 0x6, 132, 0, 661 }, -+{ 0x1, 131, 1, 694 }, -+{ 0x1, 132, 0, 693 }, -+{ 0x5, 131, 1, 690 }, -+{ 0x5, 132, 0, 689 }, -+{ 0x3, 131, 1, 692 }, -+{ 0x3, 132, 0, 691 }, -+{ 0x2, 129, 1, 670 }, -+{ 0x2, 130, 0, 669 }, -+{ 0xa, 129, 1, 666 }, -+{ 0xa, 130, 0, 665 }, -+{ 0x6, 129, 1, 668 }, -+{ 0x6, 130, 0, 667 }, -+{ 0x1, 129, 1, 700 }, -+{ 0x1, 130, 0, 699 }, -+{ 0x5, 129, 1, 696 }, -+{ 0x5, 130, 0, 695 }, -+{ 0x3, 129, 1, 698 }, -+{ 0x3, 130, 0, 697 }, -+{ 0x2, 127, 1, 676 }, -+{ 0x2, 128, 0, 675 }, -+{ 0xa, 127, 1, 672 }, -+{ 0xa, 128, 0, 671 }, -+{ 0x6, 127, 1, 674 }, -+{ 0x6, 128, 0, 673 }, -+{ 0x1, 127, 1, 706 }, -+{ 0x1, 128, 0, 705 }, -+{ 0x5, 127, 1, 702 }, -+{ 0x5, 128, 0, 701 }, -+{ 0x3, 127, 1, 704 }, -+{ 0x3, 128, 0, 703 }, -+{ 0x2, 125, 1, 682 }, -+{ 0x2, 126, 0, 681 }, -+{ 0xa, 125, 1, 678 }, -+{ 0xa, 126, 0, 677 }, -+{ 0x6, 125, 1, 680 }, -+{ 0x6, 126, 0, 679 }, -+{ 0x1, 125, 1, 712 }, -+{ 0x1, 126, 0, 711 }, -+{ 0x5, 125, 1, 708 }, -+{ 0x5, 126, 0, 707 }, -+{ 0x3, 125, 1, 710 }, -+{ 0x3, 126, 0, 709 }, -+{ 0x4, 402, 1, 718 }, -+{ 0x4, 403, 0, 717 }, -+{ 0xc, 402, 1, 716 }, -+{ 0xc, 403, 0, 715 }, -+{ 0x2, 402, 1, 728 }, -+{ 0x2, 403, 0, 727 }, -+{ 0x1, 402, 1, 730 }, -+{ 0x1, 403, 0, 729 }, -+{ 0x8, 408, 0, 714 }, -+{ 0x18, 408, 0, 713 }, -+{ 0x4, 408, 0, 720 }, -+{ 0xc, 408, 0, 719 }, -+{ 0x2, 408, 0, 731 }, -+{ 0x1, 408, 0, 732 }, -+{ 0x4, 406, 0, 722 }, -+{ 0xc, 406, 0, 721 }, -+{ 0x2, 406, 0, 733 }, -+{ 0x1, 406, 0, 734 }, -+{ 0x4, 404, 0, 724 }, -+{ 0xc, 404, 0, 723 }, -+{ 0x2, 404, 0, 735 }, -+{ 0x1, 404, 0, 736 }, -+{ 0x4, 400, 0, 726 }, -+{ 0xc, 400, 0, 725 }, -+{ 0x2, 400, 0, 737 }, -+{ 0x1, 400, 0, 738 }, -+{ 0xa00, 264, 0, 753 }, -+{ 0x2a00, 264, 0, 751 }, -+{ 0x1a00, 264, 0, 752 }, -+{ 0x600, 264, 0, 765 }, -+{ 0x2600, 264, 0, 741 }, -+{ 0xa600, 264, 0, 739 }, -+{ 0x6600, 264, 0, 740 }, -+{ 0x1600, 264, 0, 763 }, -+{ 0xe00, 264, 0, 764 }, -+{ 0x100, 264, 0, 777 }, -+{ 0x500, 264, 0, 775 }, -+{ 0x300, 264, 0, 776 }, -+{ 0x80, 264, 0, 780 }, -+{ 0x280, 264, 0, 778 }, -+{ 0x180, 264, 0, 779 }, -+{ 0x40, 264, 0, 792 }, -+{ 0x140, 264, 0, 790 }, -+{ 0xc0, 264, 0, 791 }, -+{ 0x20, 264, 0, 804 }, -+{ 0xa0, 264, 0, 802 }, -+{ 0x60, 264, 0, 803 }, -+{ 0x10, 264, 0, 816 }, -+{ 0x50, 264, 0, 814 }, -+{ 0x30, 264, 0, 815 }, -+{ 0x8, 264, 0, 828 }, -+{ 0x28, 264, 0, 826 }, -+{ 0x18, 264, 0, 827 }, -+{ 0x4, 264, 0, 838 }, -+{ 0x2, 264, 0, 839 }, -+{ 0x1, 264, 0, 840 }, -+{ 0x500, 260, 0, 756 }, -+{ 0x1500, 260, 0, 754 }, -+{ 0xd00, 260, 0, 755 }, -+{ 0x300, 260, 0, 768 }, -+{ 0x1300, 260, 0, 744 }, -+{ 0x5300, 260, 0, 742 }, -+{ 0x3300, 260, 0, 743 }, -+{ 0xb00, 260, 0, 766 }, -+{ 0x700, 260, 0, 767 }, -+{ 0x80, 260, 0, 783 }, -+{ 0x280, 260, 0, 781 }, -+{ 0x180, 260, 0, 782 }, -+{ 0x40, 260, 0, 795 }, -+{ 0x140, 260, 0, 793 }, -+{ 0xc0, 260, 0, 794 }, -+{ 0x20, 260, 0, 807 }, -+{ 0xa0, 260, 0, 805 }, -+{ 0x60, 260, 0, 806 }, -+{ 0x10, 260, 0, 819 }, -+{ 0x50, 260, 0, 817 }, -+{ 0x30, 260, 0, 818 }, -+{ 0x8, 260, 0, 831 }, -+{ 0x28, 260, 0, 829 }, -+{ 0x18, 260, 0, 830 }, -+{ 0x4, 260, 0, 841 }, -+{ 0x2, 260, 0, 842 }, -+{ 0x1, 260, 0, 843 }, -+{ 0x500, 257, 0, 759 }, -+{ 0x1500, 257, 0, 757 }, -+{ 0xd00, 257, 0, 758 }, -+{ 0x300, 257, 0, 771 }, -+{ 0x1300, 257, 0, 747 }, -+{ 0x5300, 257, 0, 745 }, -+{ 0x3300, 257, 0, 746 }, -+{ 0xb00, 257, 0, 769 }, -+{ 0x700, 257, 0, 770 }, -+{ 0x80, 257, 0, 786 }, -+{ 0x280, 257, 0, 784 }, -+{ 0x180, 257, 0, 785 }, -+{ 0x40, 257, 0, 798 }, -+{ 0x140, 257, 0, 796 }, -+{ 0xc0, 257, 0, 797 }, -+{ 0x20, 257, 0, 810 }, -+{ 0xa0, 257, 0, 808 }, -+{ 0x60, 257, 0, 809 }, -+{ 0x10, 257, 0, 822 }, -+{ 0x50, 257, 0, 820 }, -+{ 0x30, 257, 0, 821 }, -+{ 0x8, 257, 0, 834 }, -+{ 0x28, 257, 0, 832 }, -+{ 0x18, 257, 0, 833 }, -+{ 0x4, 257, 0, 844 }, -+{ 0x2, 257, 0, 845 }, -+{ 0x1, 257, 0, 846 }, -+{ 0x500, 252, 0, 762 }, -+{ 0x1500, 252, 0, 760 }, -+{ 0xd00, 252, 0, 761 }, -+{ 0x300, 252, 0, 774 }, -+{ 0x1300, 252, 0, 750 }, -+{ 0x5300, 252, 0, 748 }, -+{ 0x3300, 252, 0, 749 }, -+{ 0xb00, 252, 0, 772 }, -+{ 0x700, 252, 0, 773 }, -+{ 0x80, 252, 0, 789 }, -+{ 0x280, 252, 0, 787 }, -+{ 0x180, 252, 0, 788 }, -+{ 0x40, 252, 0, 801 }, -+{ 0x140, 252, 0, 799 }, -+{ 0xc0, 252, 0, 800 }, -+{ 0x20, 252, 0, 813 }, -+{ 0xa0, 252, 0, 811 }, -+{ 0x60, 252, 0, 812 }, -+{ 0x10, 252, 0, 825 }, -+{ 0x50, 252, 0, 823 }, -+{ 0x30, 252, 0, 824 }, -+{ 0x8, 252, 0, 837 }, -+{ 0x28, 252, 0, 835 }, -+{ 0x18, 252, 0, 836 }, -+{ 0x4, 252, 0, 847 }, -+{ 0x2, 252, 0, 848 }, -+{ 0x1, 252, 0, 849 }, -+{ 0x8, 254, 1, 895 }, -+{ 0x8, 255, 0, 894 }, -+{ 0x28, 254, 1, 891 }, -+{ 0x28, 255, 0, 890 }, -+{ 0x18, 254, 1, 893 }, -+{ 0x18, 255, 0, 892 }, -+{ 0x4, 254, 1, 957 }, -+{ 0x4, 255, 0, 956 }, -+{ 0x2, 254, 1, 959 }, -+{ 0x2, 255, 0, 958 }, -+{ 0x1, 254, 1, 961 }, -+{ 0x1, 255, 0, 960 }, -+{ 0xa00, 262, 0, 865 }, -+{ 0x2a00, 262, 0, 863 }, -+{ 0x1a00, 262, 0, 864 }, -+{ 0x600, 262, 0, 877 }, -+{ 0x2600, 262, 0, 853 }, -+{ 0xa600, 262, 0, 851 }, -+{ 0x6600, 262, 0, 852 }, -+{ 0x1600, 262, 0, 875 }, -+{ 0xe00, 262, 0, 876 }, -+{ 0x100, 262, 0, 889 }, -+{ 0x500, 262, 0, 887 }, -+{ 0x300, 262, 0, 888 }, -+{ 0x80, 262, 0, 898 }, -+{ 0x280, 262, 0, 896 }, -+{ 0x180, 262, 0, 897 }, -+{ 0x40, 262, 0, 910 }, -+{ 0x140, 262, 0, 908 }, -+{ 0xc0, 262, 0, 909 }, -+{ 0x20, 262, 0, 922 }, -+{ 0xa0, 262, 0, 920 }, -+{ 0x60, 262, 0, 921 }, -+{ 0x10, 262, 0, 934 }, -+{ 0x50, 262, 0, 932 }, -+{ 0x30, 262, 0, 933 }, -+{ 0x8, 262, 0, 946 }, -+{ 0x28, 262, 0, 944 }, -+{ 0x18, 262, 0, 945 }, -+{ 0x4, 262, 0, 962 }, -+{ 0x2, 262, 0, 963 }, -+{ 0x1, 262, 1, 964 }, -+{ 0x1, 263, 0, 850 }, -+{ 0x500, 259, 0, 868 }, -+{ 0x1500, 259, 0, 866 }, -+{ 0xd00, 259, 0, 867 }, -+{ 0x300, 259, 0, 880 }, -+{ 0x1300, 259, 0, 856 }, -+{ 0x5300, 259, 0, 854 }, -+{ 0x3300, 259, 0, 855 }, -+{ 0xb00, 259, 0, 878 }, -+{ 0x700, 259, 0, 879 }, -+{ 0x80, 259, 0, 901 }, -+{ 0x280, 259, 0, 899 }, -+{ 0x180, 259, 0, 900 }, -+{ 0x40, 259, 0, 913 }, -+{ 0x140, 259, 0, 911 }, -+{ 0xc0, 259, 0, 912 }, -+{ 0x20, 259, 0, 925 }, -+{ 0xa0, 259, 0, 923 }, -+{ 0x60, 259, 0, 924 }, -+{ 0x10, 259, 0, 937 }, -+{ 0x50, 259, 0, 935 }, -+{ 0x30, 259, 0, 936 }, -+{ 0x8, 259, 0, 949 }, -+{ 0x28, 259, 0, 947 }, -+{ 0x18, 259, 0, 948 }, -+{ 0x4, 259, 0, 965 }, -+{ 0x2, 259, 0, 966 }, -+{ 0x1, 259, 0, 967 }, -+{ 0x500, 256, 0, 871 }, -+{ 0x1500, 256, 0, 869 }, -+{ 0xd00, 256, 0, 870 }, -+{ 0x300, 256, 0, 883 }, -+{ 0x1300, 256, 0, 859 }, -+{ 0x5300, 256, 0, 857 }, -+{ 0x3300, 256, 0, 858 }, -+{ 0xb00, 256, 0, 881 }, -+{ 0x700, 256, 0, 882 }, -+{ 0x80, 256, 0, 904 }, -+{ 0x280, 256, 0, 902 }, -+{ 0x180, 256, 0, 903 }, -+{ 0x40, 256, 0, 916 }, -+{ 0x140, 256, 0, 914 }, -+{ 0xc0, 256, 0, 915 }, -+{ 0x20, 256, 0, 928 }, -+{ 0xa0, 256, 0, 926 }, -+{ 0x60, 256, 0, 927 }, -+{ 0x10, 256, 0, 940 }, -+{ 0x50, 256, 0, 938 }, -+{ 0x30, 256, 0, 939 }, -+{ 0x8, 256, 0, 952 }, -+{ 0x28, 256, 0, 950 }, -+{ 0x18, 256, 0, 951 }, -+{ 0x4, 256, 0, 968 }, -+{ 0x2, 256, 0, 969 }, -+{ 0x1, 256, 0, 970 }, -+{ 0x500, 251, 0, 874 }, -+{ 0x1500, 251, 0, 872 }, -+{ 0xd00, 251, 0, 873 }, -+{ 0x300, 251, 0, 886 }, -+{ 0x1300, 251, 0, 862 }, -+{ 0x5300, 251, 0, 860 }, -+{ 0x3300, 251, 0, 861 }, -+{ 0xb00, 251, 0, 884 }, -+{ 0x700, 251, 0, 885 }, -+{ 0x80, 251, 0, 907 }, -+{ 0x280, 251, 0, 905 }, -+{ 0x180, 251, 0, 906 }, -+{ 0x40, 251, 0, 919 }, -+{ 0x140, 251, 0, 917 }, -+{ 0xc0, 251, 0, 918 }, -+{ 0x20, 251, 0, 931 }, -+{ 0xa0, 251, 0, 929 }, -+{ 0x60, 251, 0, 930 }, -+{ 0x10, 251, 0, 943 }, -+{ 0x50, 251, 0, 941 }, -+{ 0x30, 251, 0, 942 }, -+{ 0x8, 251, 0, 955 }, -+{ 0x28, 251, 0, 953 }, -+{ 0x18, 251, 0, 954 }, -+{ 0x4, 251, 0, 971 }, -+{ 0x2, 251, 0, 972 }, -+{ 0x1, 251, 0, 973 }, -+{ 0x2, 150, 0, 975 }, -+{ 0x1, 150, 0, 976 }, -+{ 0x1, 50, 0, 977 }, -+{ 0x3, 49, 0, 978 }, -+{ 0x1, 428, 0, 979 }, -+{ 0x1, 438, 0, 980 }, -+{ 0x2, 386, 0, 983 }, -+{ 0x1, 386, 0, 984 }, -+{ 0x2, 384, 0, 985 }, -+{ 0x1, 384, 0, 986 }, -+{ 0x1, 383, 0, 987 }, -+{ 0x1, 328, 0, 992 }, -+{ 0x1, 327, 0, 993 }, -+{ 0x1, 326, 0, 994 }, -+{ 0x1, 325, 0, 995 }, -+{ 0x1, 250, 0, 996 }, -+{ 0x1, 249, 0, 997 }, -+{ 0x1, 324, 0, 998 }, -+{ 0x1, 323, 0, 999 }, -+{ 0x1, 322, 0, 1000 }, -+{ 0x1, 321, 0, 1001 }, -+{ 0x1, 320, 0, 1002 }, -+{ 0x1, 319, 0, 1003 }, -+{ 0x1, 318, 0, 1004 }, -+{ 0x2, 248, 0, 1005 }, -+{ 0x1, 248, 0, 1006 }, -+{ 0x2, 366, 0, 1012 }, -+{ 0x1, 366, 0, 1013 }, -+{ 0x1, 317, 0, 1014 }, -+{ 0x1, 316, 0, 1015 }, -+{ 0x1, 315, 0, 1016 }, -+{ 0x1, 314, 0, 1017 }, -+{ 0x1, 8, 1, 1019 }, -+{ 0x1, 9, 0, 1018 }, -+{ 0x1, 313, 0, 1020 }, -+{ 0x1, 312, 0, 1021 }, -+{ 0x1, 311, 0, 1022 }, -+{ 0x1, 310, 0, 1023 }, -+{ 0x1, 388, 0, 1024 }, -+{ 0x1, 399, 0, 1025 }, -+{ 0x1, 389, 0, 1026 }, -+{ 0x1, 423, 0, 1027 }, -+{ 0x1, 309, 0, 1031 }, -+{ 0x1, 247, 0, 1032 }, -+{ 0x1, 177, 0, 1035 }, -+{ 0x2, 291, 0, 1039 }, -+{ 0x1, 291, 0, 1040 }, -+{ 0x1, 236, 0, 1041 }, -+{ 0x5, 48, 0, 1043 }, -+{ 0x3, 48, 0, 1044 }, -+{ 0x5, 47, 0, 1045 }, -+{ 0x3, 47, 0, 1046 }, -+{ 0x1, 365, 0, 1047 }, -+{ 0x1, 373, 0, 1048 }, -+{ 0x1, 371, 0, 1049 }, -+{ 0x1, 392, 0, 1050 }, -+{ 0x1, 372, 0, 1051 }, -+{ 0x1, 370, 0, 1052 }, -+{ 0x2, 378, 0, 1053 }, -+{ 0x1, 378, 0, 1055 }, -+{ 0x2, 376, 0, 1054 }, -+{ 0x1, 376, 0, 1056 }, -+{ 0x2, 396, 0, 1057 }, -+{ 0x1, 396, 0, 1060 }, -+{ 0x2, 377, 0, 1058 }, -+{ 0x1, 377, 0, 1061 }, -+{ 0x2, 375, 0, 1059 }, -+{ 0x1, 375, 0, 1062 }, -+{ 0x1, 338, 0, 1063 }, -+{ 0x1, 337, 0, 1064 }, -+{ 0x1, 369, 0, 1065 }, -+{ 0x1, 360, 0, 1066 }, -+{ 0x1, 362, 0, 1067 }, -+{ 0x1, 359, 0, 1068 }, -+{ 0x1, 361, 0, 1069 }, -+{ 0x2, 442, 0, 1070 }, -+{ 0x1, 442, 0, 1073 }, -+{ 0x2, 441, 0, 1071 }, -+{ 0x1, 441, 0, 1074 }, -+{ 0x2, 440, 0, 1072 }, -+{ 0x1, 440, 0, 1075 }, -+{ 0x1, 348, 0, 1076 }, -+{ 0x2, 347, 0, 1077 }, -+{ 0x1, 347, 0, 1078 }, -+{ 0x2, 294, 0, 1079 }, -+{ 0x1, 294, 0, 1082 }, -+{ 0x2, 293, 0, 1080 }, -+{ 0x1, 293, 0, 1083 }, -+{ 0x2, 292, 0, 1081 }, -+{ 0x1, 292, 0, 1084 }, -+{ 0x2, 363, 0, 1085 }, -+{ 0x1, 363, 0, 1086 }, -+{ 0x2, 364, 0, 1087 }, -+{ 0x1, 364, 0, 1088 }, -+{ 0xa, 434, 1, 1100 }, -+{ 0xa, 435, 1, 1099 }, -+{ 0xa, 436, 1, 1098 }, -+{ 0xa, 437, 0, 1097 }, -+{ 0x1a, 434, 1, 1092 }, -+{ 0x1a, 435, 1, 1091 }, -+{ 0x32, 436, 1, 1090 }, -+{ 0x32, 437, 0, 1089 }, -+{ 0x6, 434, 1, 1108 }, -+{ 0x6, 435, 1, 1107 }, -+{ 0x6, 436, 1, 1106 }, -+{ 0x6, 437, 0, 1105 }, -+{ 0x1, 434, 1, 1120 }, -+{ 0x1, 435, 1, 1119 }, -+{ 0x1, 436, 1, 1118 }, -+{ 0x1, 437, 0, 1117 }, -+{ 0x9, 434, 1, 1104 }, -+{ 0x9, 435, 1, 1103 }, -+{ 0x9, 436, 1, 1102 }, -+{ 0x9, 437, 0, 1101 }, -+{ 0x19, 434, 1, 1096 }, -+{ 0x19, 435, 1, 1095 }, -+{ 0x31, 436, 1, 1094 }, -+{ 0x31, 437, 0, 1093 }, -+{ 0x5, 434, 1, 1112 }, -+{ 0x5, 435, 1, 1111 }, -+{ 0x5, 436, 1, 1110 }, -+{ 0x5, 437, 0, 1109 }, -+{ 0x3, 434, 1, 1116 }, -+{ 0x3, 435, 1, 1115 }, -+{ 0x3, 436, 1, 1114 }, -+{ 0x3, 437, 0, 1113 }, -+{ 0xa, 429, 1, 1132 }, -+{ 0xa, 430, 1, 1131 }, -+{ 0xa, 431, 1, 1130 }, -+{ 0xa, 432, 0, 1129 }, -+{ 0x1a, 429, 1, 1124 }, -+{ 0x1a, 430, 1, 1123 }, -+{ 0x32, 431, 1, 1122 }, -+{ 0x32, 432, 0, 1121 }, -+{ 0x6, 429, 1, 1140 }, -+{ 0x6, 430, 1, 1139 }, -+{ 0x6, 431, 1, 1138 }, -+{ 0x6, 432, 0, 1137 }, -+{ 0x1, 429, 1, 1152 }, -+{ 0x1, 430, 1, 1151 }, -+{ 0x1, 431, 1, 1150 }, -+{ 0x1, 432, 0, 1149 }, -+{ 0x9, 429, 1, 1136 }, -+{ 0x9, 430, 1, 1135 }, -+{ 0x9, 431, 1, 1134 }, -+{ 0x9, 432, 0, 1133 }, -+{ 0x19, 429, 1, 1128 }, -+{ 0x19, 430, 1, 1127 }, -+{ 0x31, 431, 1, 1126 }, -+{ 0x31, 432, 0, 1125 }, -+{ 0x5, 429, 1, 1144 }, -+{ 0x5, 430, 1, 1143 }, -+{ 0x5, 431, 1, 1142 }, -+{ 0x5, 432, 0, 1141 }, -+{ 0x3, 429, 1, 1148 }, -+{ 0x3, 430, 1, 1147 }, -+{ 0x3, 431, 1, 1146 }, -+{ 0x3, 432, 0, 1145 }, -+{ 0x1, 139, 0, 1153 }, -+{ 0x1, 138, 0, 1154 }, -+{ 0x1, 391, 1, 1156 }, -+{ 0x1, 137, 0, 1155 }, -+{ 0x2, 395, 1, 1158 }, -+{ 0x2, 141, 0, 1157 }, -+{ 0x1, 395, 1, 1160 }, -+{ 0x1, 141, 0, 1159 }, -+{ 0x1, 397, 0, 1161 }, -+{ 0x1, 136, 0, 1162 }, -+{ 0x2, 135, 0, 1163 }, -+{ 0x2, 134, 0, 1164 }, -+{ 0x1, 454, 1, 1170 }, -+{ 0x1, 246, 0, 1033 }, -+{ 0x1, 453, 0, 1171 }, -+{ 0x1, 452, 1, 1172 }, -+{ 0x1, 245, 0, 1042 }, -+{ 0x1, 308, 0, 1173 }, -+{ 0x1, 307, 1, 1174 }, -+{ 0x1, 290, 0, 1034 }, -+{ 0x1, 306, 0, 1175 }, -+{ 0x1, 305, 1, 1176 }, -+{ 0x1, 427, 0, 1036 }, -+{ 0x1, 304, 1, 1177 }, -+{ 0x1, 398, 0, 1038 }, -+{ 0x1, 303, 0, 1178 }, -+{ 0x1, 302, 0, 1179 }, -+{ 0x1, 301, 0, 1180 }, -+{ 0x1, 300, 1, 1181 }, -+{ 0x2, 398, 0, 1037 }, -+{ 0x10, 299, 0, 1185 }, -+{ 0x90, 299, 0, 1183 }, -+{ 0x190, 299, 0, 1182 }, -+{ 0x50, 299, 0, 1184 }, -+{ 0x30, 299, 0, 1187 }, -+{ 0x70, 299, 0, 1186 }, -+{ 0x8, 299, 0, 1189 }, -+{ 0x18, 299, 0, 1188 }, -+{ 0x4, 299, 0, 1190 }, -+{ 0x1, 299, 0, 1193 }, -+{ 0x3, 299, 0, 1192 }, -+{ 0x1, 298, 1, 1194 }, -+{ 0x2, 299, 0, 1191 }, -+{ 0x3, 46, 0, 1195 }, -+{ 0x1, 241, 1, 1196 }, -+{ 0x1, 242, 1, 1028 }, -+{ 0x1, 243, 0, 88 }, -+{ 0x1, 341, 1, 1197 }, -+{ 0x1, 342, 1, 1029 }, -+{ 0x1, 343, 0, 89 }, -+{ 0x1, 34, 1, 1198 }, -+{ 0x1, 35, 1, 1030 }, -+{ 0x1, 36, 0, 90 }, -+{ 0x1, 230, 0, 1199 }, -+{ 0x4, 447, 0, 1200 }, -+{ 0x2, 447, 0, 1201 }, -+{ 0x1, 447, 1, 1203 }, -+{ 0x1, 448, 0, 1202 }, -+{ 0x8, 449, 0, 1204 }, -+{ 0x4, 449, 0, 1205 }, -+{ 0x1, 449, 1, 1207 }, -+{ 0x2, 449, 0, 1206 }, -+{ 0x8, 219, 0, 1208 }, -+{ 0x4, 219, 0, 1209 }, -+{ 0x2, 219, 0, 1210 }, -+{ 0x1, 219, 1, 1212 }, -+{ 0x1, 220, 0, 1211 }, -+{ 0x10, 221, 0, 1213 }, -+{ 0x8, 221, 0, 1214 }, -+{ 0x4, 221, 0, 1215 }, -+{ 0x1, 221, 1, 1217 }, -+{ 0x2, 221, 0, 1216 }, -+{ 0x220, 191, 0, 1218 }, -+{ 0x120, 191, 0, 1219 }, -+{ 0xa0, 191, 0, 1220 }, -+{ 0x60, 191, 1, 1222 }, -+{ 0x4, 192, 0, 1221 }, -+{ 0x110, 191, 0, 1228 }, -+{ 0x90, 191, 0, 1229 }, -+{ 0x50, 191, 0, 1230 }, -+{ 0x30, 191, 1, 1232 }, -+{ 0x2, 192, 0, 1231 }, -+{ 0x8, 191, 0, 1233 }, -+{ 0x4, 191, 0, 1234 }, -+{ 0x2, 191, 0, 1235 }, -+{ 0x1, 191, 1, 1237 }, -+{ 0x1, 192, 0, 1236 }, -+{ 0x440, 193, 0, 1223 }, -+{ 0x240, 193, 0, 1224 }, -+{ 0x140, 193, 0, 1225 }, -+{ 0xc0, 193, 1, 1227 }, -+{ 0x40, 193, 0, 1226 }, -+{ 0x220, 193, 0, 1238 }, -+{ 0x120, 193, 0, 1239 }, -+{ 0xa0, 193, 0, 1240 }, -+{ 0x60, 193, 1, 1242 }, -+{ 0x20, 193, 0, 1241 }, -+{ 0x10, 193, 0, 1243 }, -+{ 0x8, 193, 0, 1244 }, -+{ 0x4, 193, 0, 1245 }, -+{ 0x1, 193, 1, 1247 }, -+{ 0x2, 193, 0, 1246 }, -+{ 0x8, 215, 0, 1248 }, -+{ 0x4, 215, 0, 1249 }, -+{ 0x2, 215, 0, 1250 }, -+{ 0x1, 215, 1, 1252 }, -+{ 0x1, 216, 0, 1251 }, -+{ 0x220, 187, 0, 1253 }, -+{ 0x120, 187, 0, 1254 }, -+{ 0xa0, 187, 0, 1255 }, -+{ 0x60, 187, 1, 1257 }, -+{ 0x4, 188, 0, 1256 }, -+{ 0x110, 187, 0, 1263 }, -+{ 0x90, 187, 0, 1264 }, -+{ 0x50, 187, 0, 1265 }, -+{ 0x30, 187, 1, 1267 }, -+{ 0x2, 188, 0, 1266 }, -+{ 0x8, 187, 0, 1268 }, -+{ 0x4, 187, 0, 1269 }, -+{ 0x2, 187, 0, 1270 }, -+{ 0x1, 187, 1, 1272 }, -+{ 0x1, 188, 0, 1271 }, -+{ 0x440, 233, 0, 1258 }, -+{ 0x240, 233, 0, 1259 }, -+{ 0x140, 233, 0, 1260 }, -+{ 0xc0, 233, 1, 1262 }, -+{ 0x40, 233, 0, 1261 }, -+{ 0x220, 233, 0, 1273 }, -+{ 0x120, 233, 0, 1274 }, -+{ 0xa0, 233, 0, 1275 }, -+{ 0x60, 233, 1, 1277 }, -+{ 0x20, 233, 0, 1276 }, -+{ 0x10, 233, 0, 1278 }, -+{ 0x8, 233, 0, 1279 }, -+{ 0x4, 233, 0, 1280 }, -+{ 0x1, 233, 1, 1282 }, -+{ 0x2, 233, 0, 1281 }, -+{ 0x8, 207, 0, 1283 }, -+{ 0x4, 207, 0, 1284 }, -+{ 0x2, 207, 0, 1285 }, -+{ 0x1, 207, 1, 1287 }, -+{ 0x1, 208, 0, 1286 }, -+{ 0x10, 214, 0, 1288 }, -+{ 0x8, 214, 0, 1289 }, -+{ 0x4, 214, 0, 1290 }, -+{ 0x1, 214, 1, 1292 }, -+{ 0x2, 214, 0, 1291 }, -+{ 0x220, 178, 0, 1293 }, -+{ 0x120, 178, 0, 1294 }, -+{ 0xa0, 178, 0, 1295 }, -+{ 0x60, 178, 1, 1297 }, -+{ 0x4, 179, 0, 1296 }, -+{ 0x110, 178, 0, 1318 }, -+{ 0x90, 178, 0, 1319 }, -+{ 0x50, 178, 0, 1320 }, -+{ 0x30, 178, 1, 1322 }, -+{ 0x2, 179, 0, 1321 }, -+{ 0x8, 178, 0, 1323 }, -+{ 0x4, 178, 0, 1324 }, -+{ 0x2, 178, 0, 1325 }, -+{ 0x1, 178, 1, 1327 }, -+{ 0x1, 179, 0, 1326 }, -+{ 0x440, 186, 0, 1298 }, -+{ 0x240, 186, 0, 1299 }, -+{ 0x140, 186, 0, 1300 }, -+{ 0xc0, 186, 1, 1302 }, -+{ 0x40, 186, 0, 1301 }, -+{ 0x220, 186, 0, 1328 }, -+{ 0x120, 186, 0, 1329 }, -+{ 0xa0, 186, 0, 1330 }, -+{ 0x60, 186, 1, 1332 }, -+{ 0x20, 186, 0, 1331 }, -+{ 0x10, 186, 0, 1333 }, -+{ 0x8, 186, 0, 1334 }, -+{ 0x4, 186, 0, 1335 }, -+{ 0x1, 186, 1, 1337 }, -+{ 0x2, 186, 0, 1336 }, -+{ 0x440, 143, 0, 1303 }, -+{ 0x240, 143, 0, 1304 }, -+{ 0x140, 143, 0, 1305 }, -+{ 0xc0, 143, 1, 1307 }, -+{ 0x40, 143, 0, 1306 }, -+{ 0x220, 143, 0, 1338 }, -+{ 0x120, 143, 0, 1339 }, -+{ 0xa0, 143, 0, 1340 }, -+{ 0x60, 143, 1, 1342 }, -+{ 0x20, 143, 0, 1341 }, -+{ 0x10, 143, 0, 1343 }, -+{ 0x8, 143, 0, 1344 }, -+{ 0x1, 143, 1, 1347 }, -+{ 0x2, 143, 0, 1346 }, -+{ 0x440, 194, 1, 1313 }, -+{ 0x441, 174, 0, 1308 }, -+{ 0x240, 194, 1, 1314 }, -+{ 0x241, 174, 0, 1309 }, -+{ 0x140, 194, 1, 1315 }, -+{ 0x141, 174, 0, 1310 }, -+{ 0xc0, 194, 1, 1317 }, -+{ 0x40, 194, 1, 1316 }, -+{ 0xc1, 174, 1, 1312 }, -+{ 0x41, 174, 0, 1311 }, -+{ 0x220, 194, 1, 1358 }, -+{ 0x221, 174, 0, 1348 }, -+{ 0x120, 194, 1, 1359 }, -+{ 0x121, 174, 0, 1349 }, -+{ 0xa0, 194, 1, 1360 }, -+{ 0xa1, 174, 0, 1350 }, -+{ 0x60, 194, 1, 1362 }, -+{ 0x20, 194, 1, 1361 }, -+{ 0x61, 174, 1, 1352 }, -+{ 0x21, 174, 0, 1351 }, -+{ 0x10, 194, 1, 1363 }, -+{ 0x11, 174, 0, 1353 }, -+{ 0x8, 194, 1, 1364 }, -+{ 0x9, 174, 0, 1354 }, -+{ 0x4, 194, 1, 1365 }, -+{ 0x5, 174, 0, 1355 }, -+{ 0x1, 194, 1, 1367 }, -+{ 0x2, 194, 1, 1366 }, -+{ 0x3, 174, 1, 1357 }, -+{ 0x1, 174, 0, 1356 }, -+{ 0x1, 153, 1, 1375 }, -+{ 0x1, 154, 1, 1374 }, -+{ 0x1, 155, 1, 1373 }, -+{ 0x1, 156, 0, 1372 }, -+{ 0x3, 153, 1, 1371 }, -+{ 0x3, 154, 1, 1370 }, -+{ 0x3, 155, 1, 1369 }, -+{ 0x3, 156, 0, 1368 }, -+{ 0x1108, 159, 1, 1537 }, -+{ 0x1108, 160, 1, 1536 }, -+{ 0x1108, 165, 1, 1377 }, -+{ 0x1108, 166, 0, 1376 }, -+{ 0x908, 159, 1, 1539 }, -+{ 0x908, 160, 1, 1538 }, -+{ 0x908, 165, 1, 1379 }, -+{ 0x908, 166, 0, 1378 }, -+{ 0x508, 159, 1, 1541 }, -+{ 0x508, 160, 1, 1540 }, -+{ 0x508, 165, 1, 1381 }, -+{ 0x508, 166, 0, 1380 }, -+{ 0x308, 159, 1, 1545 }, -+{ 0x308, 160, 1, 1544 }, -+{ 0x108, 160, 1, 1542 }, -+{ 0x18, 161, 1, 1543 }, -+{ 0x308, 165, 1, 1385 }, -+{ 0x308, 166, 1, 1384 }, -+{ 0x108, 166, 1, 1382 }, -+{ 0x18, 167, 0, 1383 }, -+{ 0x88, 159, 1, 1577 }, -+{ 0x88, 160, 1, 1576 }, -+{ 0x88, 165, 1, 1457 }, -+{ 0x88, 166, 0, 1456 }, -+{ 0x48, 159, 1, 1579 }, -+{ 0x48, 160, 1, 1578 }, -+{ 0x48, 165, 1, 1459 }, -+{ 0x48, 166, 0, 1458 }, -+{ 0x28, 159, 1, 1581 }, -+{ 0x28, 160, 1, 1580 }, -+{ 0x28, 165, 1, 1461 }, -+{ 0x28, 166, 0, 1460 }, -+{ 0x18, 159, 1, 1585 }, -+{ 0x18, 160, 1, 1584 }, -+{ 0x8, 160, 1, 1582 }, -+{ 0x8, 161, 1, 1583 }, -+{ 0x18, 165, 1, 1465 }, -+{ 0x18, 166, 1, 1464 }, -+{ 0x8, 166, 1, 1462 }, -+{ 0x8, 167, 0, 1463 }, -+{ 0x884, 159, 1, 1547 }, -+{ 0x884, 160, 1, 1546 }, -+{ 0x442, 162, 1, 1437 }, -+{ 0x442, 163, 1, 1436 }, -+{ 0x884, 165, 1, 1407 }, -+{ 0x884, 166, 1, 1406 }, -+{ 0x442, 168, 1, 1387 }, -+{ 0x442, 169, 0, 1386 }, -+{ 0x484, 159, 1, 1549 }, -+{ 0x484, 160, 1, 1548 }, -+{ 0x242, 162, 1, 1439 }, -+{ 0x242, 163, 1, 1438 }, -+{ 0x484, 165, 1, 1409 }, -+{ 0x484, 166, 1, 1408 }, -+{ 0x242, 168, 1, 1389 }, -+{ 0x242, 169, 0, 1388 }, -+{ 0x284, 159, 1, 1551 }, -+{ 0x284, 160, 1, 1550 }, -+{ 0x142, 162, 1, 1441 }, -+{ 0x142, 163, 1, 1440 }, -+{ 0x284, 165, 1, 1411 }, -+{ 0x284, 166, 1, 1410 }, -+{ 0x142, 168, 1, 1391 }, -+{ 0x142, 169, 0, 1390 }, -+{ 0x184, 159, 1, 1555 }, -+{ 0x184, 160, 1, 1554 }, -+{ 0x84, 160, 1, 1552 }, -+{ 0xc, 161, 1, 1553 }, -+{ 0xc2, 162, 1, 1445 }, -+{ 0xc2, 163, 1, 1444 }, -+{ 0x42, 163, 1, 1442 }, -+{ 0x6, 164, 1, 1443 }, -+{ 0x184, 165, 1, 1415 }, -+{ 0x184, 166, 1, 1414 }, -+{ 0x84, 166, 1, 1412 }, -+{ 0xc, 167, 1, 1413 }, -+{ 0xc2, 168, 1, 1395 }, -+{ 0xc2, 169, 1, 1394 }, -+{ 0x42, 169, 1, 1392 }, -+{ 0x6, 170, 0, 1393 }, -+{ 0x44, 159, 1, 1587 }, -+{ 0x44, 160, 1, 1586 }, -+{ 0x22, 162, 1, 1517 }, -+{ 0x22, 163, 1, 1516 }, -+{ 0x44, 165, 1, 1487 }, -+{ 0x44, 166, 1, 1486 }, -+{ 0x22, 168, 1, 1467 }, -+{ 0x22, 169, 0, 1466 }, -+{ 0x24, 159, 1, 1589 }, -+{ 0x24, 160, 1, 1588 }, -+{ 0x12, 162, 1, 1519 }, -+{ 0x12, 163, 1, 1518 }, -+{ 0x24, 165, 1, 1489 }, -+{ 0x24, 166, 1, 1488 }, -+{ 0x12, 168, 1, 1469 }, -+{ 0x12, 169, 0, 1468 }, -+{ 0x14, 159, 1, 1591 }, -+{ 0x14, 160, 1, 1590 }, -+{ 0xa, 162, 1, 1521 }, -+{ 0xa, 163, 1, 1520 }, -+{ 0x14, 165, 1, 1491 }, -+{ 0x14, 166, 1, 1490 }, -+{ 0xa, 168, 1, 1471 }, -+{ 0xa, 169, 0, 1470 }, -+{ 0xc, 159, 1, 1595 }, -+{ 0xc, 160, 1, 1594 }, -+{ 0x4, 160, 1, 1592 }, -+{ 0x4, 161, 1, 1593 }, -+{ 0x6, 162, 1, 1525 }, -+{ 0x6, 163, 1, 1524 }, -+{ 0x2, 163, 1, 1522 }, -+{ 0x2, 164, 1, 1523 }, -+{ 0xc, 165, 1, 1495 }, -+{ 0xc, 166, 1, 1494 }, -+{ 0x4, 166, 1, 1492 }, -+{ 0x4, 167, 1, 1493 }, -+{ 0x6, 168, 1, 1475 }, -+{ 0x6, 169, 1, 1474 }, -+{ 0x2, 169, 1, 1472 }, -+{ 0x2, 170, 0, 1473 }, -+{ 0x442, 159, 1, 1557 }, -+{ 0x442, 160, 1, 1556 }, -+{ 0x221, 162, 1, 1447 }, -+{ 0x221, 163, 1, 1446 }, -+{ 0x442, 165, 1, 1417 }, -+{ 0x442, 166, 1, 1416 }, -+{ 0x221, 168, 1, 1397 }, -+{ 0x221, 169, 0, 1396 }, -+{ 0x242, 159, 1, 1559 }, -+{ 0x242, 160, 1, 1558 }, -+{ 0x121, 162, 1, 1449 }, -+{ 0x121, 163, 1, 1448 }, -+{ 0x242, 165, 1, 1419 }, -+{ 0x242, 166, 1, 1418 }, -+{ 0x121, 168, 1, 1399 }, -+{ 0x121, 169, 0, 1398 }, -+{ 0x142, 159, 1, 1561 }, -+{ 0x142, 160, 1, 1560 }, -+{ 0xa1, 162, 1, 1451 }, -+{ 0xa1, 163, 1, 1450 }, -+{ 0x142, 165, 1, 1421 }, -+{ 0x142, 166, 1, 1420 }, -+{ 0xa1, 168, 1, 1401 }, -+{ 0xa1, 169, 0, 1400 }, -+{ 0xc2, 159, 1, 1565 }, -+{ 0xc2, 160, 1, 1564 }, -+{ 0x42, 160, 1, 1562 }, -+{ 0x6, 161, 1, 1563 }, -+{ 0x61, 162, 1, 1455 }, -+{ 0x61, 163, 1, 1454 }, -+{ 0x21, 163, 1, 1452 }, -+{ 0x3, 164, 1, 1453 }, -+{ 0xc2, 165, 1, 1425 }, -+{ 0xc2, 166, 1, 1424 }, -+{ 0x42, 166, 1, 1422 }, -+{ 0x6, 167, 1, 1423 }, -+{ 0x61, 168, 1, 1405 }, -+{ 0x61, 169, 1, 1404 }, -+{ 0x21, 169, 1, 1402 }, -+{ 0x3, 170, 0, 1403 }, -+{ 0x22, 159, 1, 1597 }, -+{ 0x22, 160, 1, 1596 }, -+{ 0x11, 162, 1, 1527 }, -+{ 0x11, 163, 1, 1526 }, -+{ 0x22, 165, 1, 1497 }, -+{ 0x22, 166, 1, 1496 }, -+{ 0x11, 168, 1, 1477 }, -+{ 0x11, 169, 0, 1476 }, -+{ 0x12, 159, 1, 1599 }, -+{ 0x12, 160, 1, 1598 }, -+{ 0x9, 162, 1, 1529 }, -+{ 0x9, 163, 1, 1528 }, -+{ 0x12, 165, 1, 1499 }, -+{ 0x12, 166, 1, 1498 }, -+{ 0x9, 168, 1, 1479 }, -+{ 0x9, 169, 0, 1478 }, -+{ 0xa, 159, 1, 1601 }, -+{ 0xa, 160, 1, 1600 }, -+{ 0x5, 162, 1, 1531 }, -+{ 0x5, 163, 1, 1530 }, -+{ 0xa, 165, 1, 1501 }, -+{ 0xa, 166, 1, 1500 }, -+{ 0x5, 168, 1, 1481 }, -+{ 0x5, 169, 0, 1480 }, -+{ 0x6, 159, 1, 1605 }, -+{ 0x6, 160, 1, 1604 }, -+{ 0x2, 160, 1, 1602 }, -+{ 0x2, 161, 1, 1603 }, -+{ 0x3, 162, 1, 1535 }, -+{ 0x3, 163, 1, 1534 }, -+{ 0x1, 163, 1, 1532 }, -+{ 0x1, 164, 1, 1533 }, -+{ 0x6, 165, 1, 1505 }, -+{ 0x6, 166, 1, 1504 }, -+{ 0x2, 166, 1, 1502 }, -+{ 0x2, 167, 1, 1503 }, -+{ 0x3, 168, 1, 1485 }, -+{ 0x3, 169, 1, 1484 }, -+{ 0x1, 169, 1, 1482 }, -+{ 0x1, 170, 0, 1483 }, -+{ 0x221, 159, 1, 1567 }, -+{ 0x221, 160, 1, 1566 }, -+{ 0x221, 165, 1, 1427 }, -+{ 0x221, 166, 0, 1426 }, -+{ 0x121, 159, 1, 1569 }, -+{ 0x121, 160, 1, 1568 }, -+{ 0x121, 165, 1, 1429 }, -+{ 0x121, 166, 0, 1428 }, -+{ 0xa1, 159, 1, 1571 }, -+{ 0xa1, 160, 1, 1570 }, -+{ 0xa1, 165, 1, 1431 }, -+{ 0xa1, 166, 0, 1430 }, -+{ 0x61, 159, 1, 1575 }, -+{ 0x61, 160, 1, 1574 }, -+{ 0x21, 160, 1, 1572 }, -+{ 0x3, 161, 1, 1573 }, -+{ 0x61, 165, 1, 1435 }, -+{ 0x61, 166, 1, 1434 }, -+{ 0x21, 166, 1, 1432 }, -+{ 0x3, 167, 0, 1433 }, -+{ 0x11, 159, 1, 1607 }, -+{ 0x11, 160, 1, 1606 }, -+{ 0x11, 165, 1, 1507 }, -+{ 0x11, 166, 0, 1506 }, -+{ 0x9, 159, 1, 1609 }, -+{ 0x9, 160, 1, 1608 }, -+{ 0x9, 165, 1, 1509 }, -+{ 0x9, 166, 0, 1508 }, -+{ 0x5, 159, 1, 1611 }, -+{ 0x5, 160, 1, 1610 }, -+{ 0x5, 165, 1, 1511 }, -+{ 0x5, 166, 0, 1510 }, -+{ 0x3, 159, 1, 1615 }, -+{ 0x3, 160, 1, 1614 }, -+{ 0x1, 160, 1, 1612 }, -+{ 0x1, 161, 1, 1613 }, -+{ 0x3, 165, 1, 1515 }, -+{ 0x3, 166, 1, 1514 }, -+{ 0x1, 166, 1, 1512 }, -+{ 0x1, 167, 0, 1513 }, -+{ 0x442, 205, 0, 1616 }, -+{ 0x242, 205, 0, 1617 }, -+{ 0x142, 205, 0, 1618 }, -+{ 0xc2, 205, 1, 1620 }, -+{ 0x6, 206, 1, 1619 }, -+{ 0x1, 439, 0, 981 }, -+{ 0x22, 205, 0, 1626 }, -+{ 0x12, 205, 0, 1627 }, -+{ 0xa, 205, 0, 1628 }, -+{ 0x6, 205, 1, 1630 }, -+{ 0x2, 206, 1, 1629 }, -+{ 0x2, 367, 0, 1010 }, -+{ 0x221, 205, 0, 1621 }, -+{ 0x121, 205, 0, 1622 }, -+{ 0xa1, 205, 0, 1623 }, -+{ 0x61, 205, 1, 1625 }, -+{ 0x3, 206, 1, 1624 }, -+{ 0x1, 433, 0, 982 }, -+{ 0x11, 205, 0, 1631 }, -+{ 0x9, 205, 0, 1632 }, -+{ 0x5, 205, 0, 1633 }, -+{ 0x3, 205, 1, 1635 }, -+{ 0x1, 206, 1, 1634 }, -+{ 0x1, 367, 0, 1011 }, -+{ 0x4, 211, 0, 1636 }, -+{ 0x1, 211, 0, 1638 }, -+{ 0x1, 218, 0, 1639 }, -+{ 0x1, 217, 1, 1640 }, -+{ 0x2, 211, 0, 1637 }, -+{ 0x1, 196, 0, 1641 }, -+{ 0x880, 202, 0, 1642 }, -+{ 0x480, 202, 0, 1643 }, -+{ 0x280, 202, 0, 1644 }, -+{ 0x180, 202, 1, 1646 }, -+{ 0x80, 203, 0, 1645 }, -+{ 0x440, 202, 1, 1657 }, -+{ 0x88, 204, 0, 1647 }, -+{ 0x240, 202, 1, 1658 }, -+{ 0x48, 204, 0, 1648 }, -+{ 0x140, 202, 1, 1659 }, -+{ 0x28, 204, 0, 1649 }, -+{ 0xc0, 202, 1, 1661 }, -+{ 0x40, 203, 1, 1660 }, -+{ 0x18, 204, 1, 1651 }, -+{ 0x8, 204, 0, 1650 }, -+{ 0x220, 202, 1, 1662 }, -+{ 0x44, 204, 0, 1652 }, -+{ 0x120, 202, 1, 1663 }, -+{ 0x24, 204, 0, 1653 }, -+{ 0xa0, 202, 1, 1664 }, -+{ 0x14, 204, 0, 1654 }, -+{ 0x60, 202, 1, 1666 }, -+{ 0x20, 203, 1, 1665 }, -+{ 0xc, 204, 1, 1656 }, -+{ 0x4, 204, 0, 1655 }, -+{ 0x110, 202, 0, 1667 }, -+{ 0x90, 202, 0, 1668 }, -+{ 0x50, 202, 0, 1669 }, -+{ 0x30, 202, 1, 1671 }, -+{ 0x10, 203, 1, 1670 }, -+{ 0x1, 385, 0, 974 }, -+{ 0x88, 202, 0, 1672 }, -+{ 0x48, 202, 0, 1673 }, -+{ 0x28, 202, 0, 1674 }, -+{ 0x18, 202, 1, 1676 }, -+{ 0x8, 203, 1, 1675 }, -+{ 0xc, 368, 0, 1007 }, -+{ 0x44, 202, 1, 1687 }, -+{ 0x22, 204, 0, 1677 }, -+{ 0x24, 202, 1, 1688 }, -+{ 0x12, 204, 0, 1678 }, -+{ 0x14, 202, 1, 1689 }, -+{ 0xa, 204, 0, 1679 }, -+{ 0xc, 202, 1, 1691 }, -+{ 0x4, 203, 1, 1690 }, -+{ 0x6, 204, 1, 1681 }, -+{ 0x2, 204, 1, 1680 }, -+{ 0x6, 368, 0, 1008 }, -+{ 0x22, 202, 1, 1692 }, -+{ 0x11, 204, 0, 1682 }, -+{ 0x12, 202, 1, 1693 }, -+{ 0x9, 204, 0, 1683 }, -+{ 0xa, 202, 1, 1694 }, -+{ 0x5, 204, 0, 1684 }, -+{ 0x6, 202, 1, 1696 }, -+{ 0x2, 203, 1, 1695 }, -+{ 0x3, 204, 1, 1686 }, -+{ 0x1, 204, 1, 1685 }, -+{ 0x3, 368, 0, 1009 }, -+{ 0x11, 202, 0, 1697 }, -+{ 0x9, 202, 0, 1698 }, -+{ 0x5, 202, 0, 1699 }, -+{ 0x3, 202, 1, 1701 }, -+{ 0x1, 203, 0, 1700 }, -+{ 0x8, 198, 0, 1702 }, -+{ 0x4, 198, 0, 1703 }, -+{ 0x2, 198, 0, 1704 }, -+{ 0x1, 198, 1, 1706 }, -+{ 0x1, 199, 1, 1705 }, -+{ 0x1, 332, 0, 988 }, -+{ 0x8, 200, 0, 1707 }, -+{ 0x4, 200, 0, 1708 }, -+{ 0x2, 200, 0, 1709 }, -+{ 0x1, 200, 1, 1711 }, -+{ 0x1, 201, 1, 1710 }, -+{ 0x1, 331, 0, 989 }, -+{ 0x8, 209, 0, 1712 }, -+{ 0x4, 209, 0, 1713 }, -+{ 0x2, 209, 0, 1714 }, -+{ 0x1, 209, 1, 1716 }, -+{ 0x1, 210, 1, 1715 }, -+{ 0x1, 330, 0, 990 }, -+{ 0x8, 212, 0, 1717 }, -+{ 0x4, 212, 0, 1718 }, -+{ 0x2, 212, 0, 1719 }, -+{ 0x1, 212, 1, 1721 }, -+{ 0x1, 213, 1, 1720 }, -+{ 0x1, 329, 0, 991 }, -+{ 0x8, 224, 0, 1722 }, -+{ 0x4, 224, 0, 1723 }, -+{ 0x2, 224, 0, 1724 }, -+{ 0x1, 224, 1, 1726 }, -+{ 0x1, 225, 0, 1725 }, -+{ 0x8, 222, 0, 1727 }, -+{ 0x4, 222, 0, 1728 }, -+{ 0x2, 222, 0, 1729 }, -+{ 0x1, 222, 1, 1731 }, -+{ 0x1, 223, 0, 1730 }, -+{ 0x1, 240, 0, 1732 }, -+{ 0x1, 340, 0, 1733 }, -+{ 0x1, 33, 0, 1734 }, -+{ 0x8, 151, 0, 1735 }, -+{ 0x4, 151, 0, 1736 }, -+{ 0x2, 151, 0, 1737 }, -+{ 0x1, 151, 1, 1739 }, -+{ 0x1, 152, 0, 1738 }, -+{ 0x8, 157, 0, 1740 }, -+{ 0x4, 157, 0, 1741 }, -+{ 0x2, 157, 0, 1742 }, -+{ 0x1, 157, 1, 1744 }, -+{ 0x1, 158, 0, 1743 }, -+{ 0x8, 231, 0, 1745 }, -+{ 0x4, 231, 0, 1746 }, -+{ 0x2, 231, 0, 1747 }, -+{ 0x1, 231, 1, 1749 }, -+{ 0x1, 232, 0, 1748 }, -+{ 0x1, 173, 0, 1750 }, -+{ 0x442, 171, 0, 1751 }, -+{ 0x242, 171, 0, 1752 }, -+{ 0x142, 171, 0, 1753 }, -+{ 0xc2, 171, 1, 1755 }, -+{ 0x6, 172, 0, 1754 }, -+{ 0x22, 171, 0, 1761 }, -+{ 0x12, 171, 0, 1762 }, -+{ 0xa, 171, 0, 1763 }, -+{ 0x6, 171, 1, 1765 }, -+{ 0x2, 172, 1, 1764 }, -+{ 0x1, 135, 0, 1165 }, -+{ 0x221, 171, 0, 1756 }, -+{ 0x121, 171, 0, 1757 }, -+{ 0xa1, 171, 0, 1758 }, -+{ 0x61, 171, 1, 1760 }, -+{ 0x3, 172, 0, 1759 }, -+{ 0x11, 171, 0, 1766 }, -+{ 0x9, 171, 0, 1767 }, -+{ 0x5, 171, 0, 1768 }, -+{ 0x3, 171, 1, 1770 }, -+{ 0x1, 172, 1, 1769 }, -+{ 0x1, 134, 0, 1166 }, -+{ 0x1, 237, 0, 1771 }, -+{ 0x1, 195, 0, 1772 }, -+{ 0x1, 149, 0, 1773 }, -+{ 0x1, 148, 0, 1774 }, -+{ 0x4, 234, 0, 1775 }, -+{ 0x2, 234, 0, 1776 }, -+{ 0x1, 234, 0, 1777 }, -+{ 0x1, 197, 0, 1778 }, -+{ 0x2, 235, 0, 1779 }, -+{ 0x1, 235, 0, 1780 }, -+{ 0x4, 185, 0, 1781 }, -+{ 0x2, 185, 0, 1782 }, -+{ 0x1, 185, 0, 1783 }, -+{ 0x4, 182, 0, 1784 }, -+{ 0x1, 190, 0, 1787 }, -+{ 0x1, 189, 1, 1788 }, -+{ 0x2, 182, 0, 1785 }, -+{ 0x1, 142, 0, 1789 }, -+{ 0x1, 297, 1, 1790 }, -+{ 0x1, 182, 0, 1786 }, -+{ 0x8, 144, 0, 1791 }, -+{ 0x4, 144, 0, 1792 }, -+{ 0x2, 144, 0, 1793 }, -+{ 0x1, 144, 1, 1795 }, -+{ 0x1, 145, 0, 1794 }, -+{ 0x8, 146, 0, 1796 }, -+{ 0x4, 146, 0, 1797 }, -+{ 0x2, 146, 0, 1798 }, -+{ 0x1, 146, 1, 1800 }, -+{ 0x1, 147, 1, 1799 }, -+{ 0x1, 426, 0, 1167 }, -+{ 0x8, 180, 0, 1801 }, -+{ 0x4, 180, 0, 1802 }, -+{ 0x2, 180, 0, 1803 }, -+{ 0x1, 180, 1, 1805 }, -+{ 0x1, 181, 1, 1804 }, -+{ 0x1, 425, 0, 1168 }, -+{ 0x8, 183, 0, 1806 }, -+{ 0x4, 183, 0, 1807 }, -+{ 0x2, 183, 0, 1808 }, -+{ 0x1, 183, 1, 1810 }, -+{ 0x1, 184, 1, 1809 }, -+{ 0x1, 424, 0, 1169 }, -+{ 0x8, 228, 0, 1811 }, -+{ 0x4, 228, 0, 1812 }, -+{ 0x2, 228, 0, 1813 }, -+{ 0x1, 228, 1, 1815 }, -+{ 0x1, 229, 0, 1814 }, -+{ 0x8, 226, 0, 1816 }, -+{ 0x4, 226, 0, 1817 }, -+{ 0x2, 226, 0, 1818 }, -+{ 0x1, 226, 1, 1820 }, -+{ 0x1, 227, 0, 1819 }, -+{ 0x8, 44, 0, 1825 }, -+{ 0x18, 44, 0, 1821 }, -+{ 0x4, 44, 0, 1826 }, -+{ 0xc, 44, 0, 1822 }, -+{ 0x2, 44, 0, 1827 }, -+{ 0x6, 44, 0, 1823 }, -+{ 0x1, 44, 0, 1828 }, -+{ 0x3, 44, 0, 1824 }, -+{ 0x51, 30, 0, 1830 }, -+{ 0xd1, 30, 0, 1829 }, -+{ 0x31, 30, 1, 1840 }, -+{ 0x11, 31, 0, 1839 }, -+{ 0x71, 30, 1, 1838 }, -+{ 0x31, 31, 0, 1837 }, -+{ 0x29, 30, 0, 1832 }, -+{ 0x69, 30, 0, 1831 }, -+{ 0x19, 30, 1, 1844 }, -+{ 0x9, 31, 0, 1843 }, -+{ 0x39, 30, 1, 1842 }, -+{ 0x19, 31, 0, 1841 }, -+{ 0x15, 30, 0, 1834 }, -+{ 0x35, 30, 0, 1833 }, -+{ 0xd, 30, 1, 1848 }, -+{ 0x5, 31, 0, 1847 }, -+{ 0x1d, 30, 1, 1846 }, -+{ 0xd, 31, 0, 1845 }, -+{ 0xb, 30, 0, 1836 }, -+{ 0x1b, 30, 0, 1835 }, -+{ 0x7, 30, 1, 1852 }, -+{ 0x3, 31, 0, 1851 }, -+{ 0xf, 30, 1, 1850 }, -+{ 0x7, 31, 0, 1849 }, -+{ 0xa2, 28, 0, 1854 }, -+{ 0x1a2, 28, 0, 1853 }, -+{ 0x62, 28, 1, 1864 }, -+{ 0x22, 29, 0, 1863 }, -+{ 0xe2, 28, 1, 1862 }, -+{ 0x62, 29, 0, 1861 }, -+{ 0x52, 28, 0, 1856 }, -+{ 0xd2, 28, 0, 1855 }, -+{ 0x32, 28, 1, 1868 }, -+{ 0x12, 29, 0, 1867 }, -+{ 0x72, 28, 1, 1866 }, -+{ 0x32, 29, 0, 1865 }, -+{ 0x2a, 28, 0, 1858 }, -+{ 0x6a, 28, 0, 1857 }, -+{ 0x1a, 28, 1, 1872 }, -+{ 0xa, 29, 0, 1871 }, -+{ 0x3a, 28, 1, 1870 }, -+{ 0x1a, 29, 0, 1869 }, -+{ 0x16, 28, 0, 1860 }, -+{ 0x36, 28, 0, 1859 }, -+{ 0xe, 28, 1, 1876 }, -+{ 0x6, 29, 0, 1875 }, -+{ 0x1e, 28, 1, 1874 }, -+{ 0xe, 29, 0, 1873 }, -+{ 0x51, 28, 0, 1878 }, -+{ 0xd1, 28, 0, 1877 }, -+{ 0x31, 28, 1, 1888 }, -+{ 0x11, 29, 0, 1887 }, -+{ 0x71, 28, 1, 1886 }, -+{ 0x31, 29, 0, 1885 }, -+{ 0x29, 28, 0, 1880 }, -+{ 0x69, 28, 0, 1879 }, -+{ 0x19, 28, 1, 1892 }, -+{ 0x9, 29, 0, 1891 }, -+{ 0x39, 28, 1, 1890 }, -+{ 0x19, 29, 0, 1889 }, -+{ 0x15, 28, 0, 1882 }, -+{ 0x35, 28, 0, 1881 }, -+{ 0xd, 28, 1, 1896 }, -+{ 0x5, 29, 0, 1895 }, -+{ 0x1d, 28, 1, 1894 }, -+{ 0xd, 29, 0, 1893 }, -+{ 0xb, 28, 0, 1884 }, -+{ 0x1b, 28, 0, 1883 }, -+{ 0x7, 28, 1, 1900 }, -+{ 0x3, 29, 0, 1899 }, -+{ 0xf, 28, 1, 1898 }, -+{ 0x7, 29, 0, 1897 }, -+{ 0x51, 26, 0, 1902 }, -+{ 0xd1, 26, 0, 1901 }, -+{ 0x31, 26, 1, 1912 }, -+{ 0x11, 27, 0, 1911 }, -+{ 0x71, 26, 1, 1910 }, -+{ 0x31, 27, 0, 1909 }, -+{ 0x29, 26, 0, 1904 }, -+{ 0x69, 26, 0, 1903 }, -+{ 0x19, 26, 1, 1916 }, -+{ 0x9, 27, 0, 1915 }, -+{ 0x39, 26, 1, 1914 }, -+{ 0x19, 27, 0, 1913 }, -+{ 0x15, 26, 0, 1906 }, -+{ 0x35, 26, 0, 1905 }, -+{ 0xd, 26, 1, 1920 }, -+{ 0x5, 27, 0, 1919 }, -+{ 0x1d, 26, 1, 1918 }, -+{ 0xd, 27, 0, 1917 }, -+{ 0xb, 26, 0, 1908 }, -+{ 0x1b, 26, 0, 1907 }, -+{ 0x7, 26, 1, 1924 }, -+{ 0x3, 27, 0, 1923 }, -+{ 0xf, 26, 1, 1922 }, -+{ 0x7, 27, 0, 1921 }, -+{ 0xa2, 24, 0, 1926 }, -+{ 0x1a2, 24, 0, 1925 }, -+{ 0x62, 24, 1, 1936 }, -+{ 0x22, 25, 0, 1935 }, -+{ 0xe2, 24, 1, 1934 }, -+{ 0x62, 25, 0, 1933 }, -+{ 0x52, 24, 0, 1928 }, -+{ 0xd2, 24, 0, 1927 }, -+{ 0x32, 24, 1, 1940 }, -+{ 0x12, 25, 0, 1939 }, -+{ 0x72, 24, 1, 1938 }, -+{ 0x32, 25, 0, 1937 }, -+{ 0x2a, 24, 0, 1930 }, -+{ 0x6a, 24, 0, 1929 }, -+{ 0x1a, 24, 1, 1944 }, -+{ 0xa, 25, 0, 1943 }, -+{ 0x3a, 24, 1, 1942 }, -+{ 0x1a, 25, 0, 1941 }, -+{ 0x16, 24, 0, 1932 }, -+{ 0x36, 24, 0, 1931 }, -+{ 0xe, 24, 1, 1948 }, -+{ 0x6, 25, 0, 1947 }, -+{ 0x1e, 24, 1, 1946 }, -+{ 0xe, 25, 0, 1945 }, -+{ 0x51, 24, 0, 1950 }, -+{ 0xd1, 24, 0, 1949 }, -+{ 0x31, 24, 1, 1960 }, -+{ 0x11, 25, 0, 1959 }, -+{ 0x71, 24, 1, 1958 }, -+{ 0x31, 25, 0, 1957 }, -+{ 0x29, 24, 0, 1952 }, -+{ 0x69, 24, 0, 1951 }, -+{ 0x19, 24, 1, 1964 }, -+{ 0x9, 25, 0, 1963 }, -+{ 0x39, 24, 1, 1962 }, -+{ 0x19, 25, 0, 1961 }, -+{ 0x15, 24, 0, 1954 }, -+{ 0x35, 24, 0, 1953 }, -+{ 0xd, 24, 1, 1968 }, -+{ 0x5, 25, 0, 1967 }, -+{ 0x1d, 24, 1, 1966 }, -+{ 0xd, 25, 0, 1965 }, -+{ 0xb, 24, 0, 1956 }, -+{ 0x1b, 24, 0, 1955 }, -+{ 0x7, 24, 1, 1972 }, -+{ 0x3, 25, 0, 1971 }, -+{ 0xf, 24, 1, 1970 }, -+{ 0x7, 25, 0, 1969 }, -+{ 0x51, 22, 1, 1998 }, -+{ 0x50, 22, 0, 1974 }, -+{ 0xd1, 22, 1, 1997 }, -+{ 0xd0, 22, 0, 1973 }, -+{ 0x31, 22, 1, 2008 }, -+{ 0x30, 22, 1, 1984 }, -+{ 0x11, 23, 1, 2007 }, -+{ 0x10, 23, 0, 1983 }, -+{ 0x71, 22, 1, 2006 }, -+{ 0x70, 22, 1, 1982 }, -+{ 0x31, 23, 1, 2005 }, -+{ 0x30, 23, 0, 1981 }, -+{ 0x29, 22, 1, 2000 }, -+{ 0x28, 22, 0, 1976 }, -+{ 0x69, 22, 1, 1999 }, -+{ 0x68, 22, 0, 1975 }, -+{ 0x19, 22, 1, 2012 }, -+{ 0x18, 22, 1, 1988 }, -+{ 0x9, 23, 1, 2011 }, -+{ 0x8, 23, 0, 1987 }, -+{ 0x39, 22, 1, 2010 }, -+{ 0x38, 22, 1, 1986 }, -+{ 0x19, 23, 1, 2009 }, -+{ 0x18, 23, 0, 1985 }, -+{ 0x15, 22, 1, 2002 }, -+{ 0x14, 22, 0, 1978 }, -+{ 0x35, 22, 1, 2001 }, -+{ 0x34, 22, 0, 1977 }, -+{ 0xd, 22, 1, 2016 }, -+{ 0xc, 22, 1, 1992 }, -+{ 0x5, 23, 1, 2015 }, -+{ 0x4, 23, 0, 1991 }, -+{ 0x1d, 22, 1, 2014 }, -+{ 0x1c, 22, 1, 1990 }, -+{ 0xd, 23, 1, 2013 }, -+{ 0xc, 23, 0, 1989 }, -+{ 0xb, 22, 1, 2004 }, -+{ 0xa, 22, 0, 1980 }, -+{ 0x1b, 22, 1, 2003 }, -+{ 0x1a, 22, 0, 1979 }, -+{ 0x7, 22, 1, 2020 }, -+{ 0x6, 22, 1, 1996 }, -+{ 0x3, 23, 1, 2019 }, -+{ 0x2, 23, 0, 1995 }, -+{ 0xf, 22, 1, 2018 }, -+{ 0xe, 22, 1, 1994 }, -+{ 0x7, 23, 1, 2017 }, -+{ 0x6, 23, 0, 1993 }, -+{ 0x8, 21, 0, 2022 }, -+{ 0x18, 21, 0, 2021 }, -+{ 0x1, 21, 1, 2026 }, -+{ 0x2, 21, 0, 2025 }, -+{ 0x3, 21, 1, 2024 }, -+{ 0x4, 21, 0, 2023 }, -+{ 0x1, 239, 0, 2027 }, -+{ 0x1, 339, 0, 2028 }, -+{ 0x14, 43, 0, 2031 }, -+{ 0x34, 43, 0, 2029 }, -+{ 0xc, 43, 0, 2032 }, -+{ 0x1c, 43, 0, 2030 }, -+{ 0x2, 43, 0, 2035 }, -+{ 0x6, 43, 0, 2033 }, -+{ 0x1, 43, 0, 2036 }, -+{ 0x3, 43, 0, 2034 }, -+{ 0x51, 19, 0, 2038 }, -+{ 0xd1, 19, 0, 2037 }, -+{ 0x31, 19, 1, 2048 }, -+{ 0x11, 20, 0, 2047 }, -+{ 0x71, 19, 1, 2046 }, -+{ 0x31, 20, 0, 2045 }, -+{ 0x29, 19, 0, 2040 }, -+{ 0x69, 19, 0, 2039 }, -+{ 0x19, 19, 1, 2052 }, -+{ 0x9, 20, 0, 2051 }, -+{ 0x39, 19, 1, 2050 }, -+{ 0x19, 20, 0, 2049 }, -+{ 0x15, 19, 0, 2042 }, -+{ 0x35, 19, 0, 2041 }, -+{ 0xd, 19, 1, 2056 }, -+{ 0x5, 20, 0, 2055 }, -+{ 0x1d, 19, 1, 2054 }, -+{ 0xd, 20, 0, 2053 }, -+{ 0xb, 19, 0, 2044 }, -+{ 0x1b, 19, 0, 2043 }, -+{ 0x7, 19, 1, 2060 }, -+{ 0x3, 20, 0, 2059 }, -+{ 0xf, 19, 1, 2058 }, -+{ 0x7, 20, 0, 2057 }, -+{ 0x1, 32, 0, 2061 }, -+{ 0x1, 140, 0, 2062 }, -+{ 0x2, 45, 0, 2063 }, -+{ 0x1, 45, 0, 2064 }, -+{ 0x1, 387, 0, 2065 }, -+{ 0x2, 52, 0, 2066 }, -+{ 0x1, 52, 0, 2067 }, -+{ 0x1, 133, 0, 2068 }, -+{ 0x51, 17, 0, 2070 }, -+{ 0xd1, 17, 0, 2069 }, -+{ 0x31, 17, 1, 2080 }, -+{ 0x11, 18, 0, 2079 }, -+{ 0x71, 17, 1, 2078 }, -+{ 0x31, 18, 0, 2077 }, -+{ 0x29, 17, 0, 2072 }, -+{ 0x69, 17, 0, 2071 }, -+{ 0x19, 17, 1, 2084 }, -+{ 0x9, 18, 0, 2083 }, -+{ 0x39, 17, 1, 2082 }, -+{ 0x19, 18, 0, 2081 }, -+{ 0x15, 17, 0, 2074 }, -+{ 0x35, 17, 0, 2073 }, -+{ 0xd, 17, 1, 2088 }, -+{ 0x5, 18, 0, 2087 }, -+{ 0x1d, 17, 1, 2086 }, -+{ 0xd, 18, 0, 2085 }, -+{ 0xb, 17, 0, 2076 }, -+{ 0x1b, 17, 0, 2075 }, -+{ 0x7, 17, 1, 2092 }, -+{ 0x3, 18, 0, 2091 }, -+{ 0xf, 17, 1, 2090 }, -+{ 0x7, 18, 0, 2089 }, -+{ 0xa20, 15, 0, 2094 }, -+{ 0x1a20, 15, 0, 2093 }, -+{ 0x620, 15, 1, 2104 }, -+{ 0x220, 16, 0, 2103 }, -+{ 0xe20, 15, 1, 2102 }, -+{ 0x620, 16, 0, 2101 }, -+{ 0x520, 15, 0, 2096 }, -+{ 0xd20, 15, 0, 2095 }, -+{ 0x320, 15, 1, 2108 }, -+{ 0x120, 16, 0, 2107 }, -+{ 0x720, 15, 1, 2106 }, -+{ 0x320, 16, 0, 2105 }, -+{ 0x2a0, 15, 0, 2098 }, -+{ 0x6a0, 15, 0, 2097 }, -+{ 0x1a0, 15, 1, 2112 }, -+{ 0xa0, 16, 0, 2111 }, -+{ 0x3a0, 15, 1, 2110 }, -+{ 0x1a0, 16, 0, 2109 }, -+{ 0x160, 15, 0, 2100 }, -+{ 0x360, 15, 0, 2099 }, -+{ 0xe0, 15, 1, 2116 }, -+{ 0x60, 16, 0, 2115 }, -+{ 0x1e0, 15, 1, 2114 }, -+{ 0xe0, 16, 0, 2113 }, -+{ 0x51, 15, 1, 2142 }, -+{ 0x50, 15, 0, 2118 }, -+{ 0xd1, 15, 1, 2141 }, -+{ 0xd0, 15, 0, 2117 }, -+{ 0x31, 15, 1, 2152 }, -+{ 0x30, 15, 1, 2128 }, -+{ 0x11, 16, 1, 2151 }, -+{ 0x10, 16, 0, 2127 }, -+{ 0x71, 15, 1, 2150 }, -+{ 0x70, 15, 1, 2126 }, -+{ 0x31, 16, 1, 2149 }, -+{ 0x30, 16, 0, 2125 }, -+{ 0x29, 15, 1, 2144 }, -+{ 0x28, 15, 0, 2120 }, -+{ 0x69, 15, 1, 2143 }, -+{ 0x68, 15, 0, 2119 }, -+{ 0x19, 15, 1, 2156 }, -+{ 0x18, 15, 1, 2132 }, -+{ 0x9, 16, 1, 2155 }, -+{ 0x8, 16, 0, 2131 }, -+{ 0x39, 15, 1, 2154 }, -+{ 0x38, 15, 1, 2130 }, -+{ 0x19, 16, 1, 2153 }, -+{ 0x18, 16, 0, 2129 }, -+{ 0x15, 15, 1, 2146 }, -+{ 0x14, 15, 0, 2122 }, -+{ 0x35, 15, 1, 2145 }, -+{ 0x34, 15, 0, 2121 }, -+{ 0xd, 15, 1, 2160 }, -+{ 0xc, 15, 1, 2136 }, -+{ 0x5, 16, 1, 2159 }, -+{ 0x4, 16, 0, 2135 }, -+{ 0x1d, 15, 1, 2158 }, -+{ 0x1c, 15, 1, 2134 }, -+{ 0xd, 16, 1, 2157 }, -+{ 0xc, 16, 0, 2133 }, -+{ 0xb, 15, 1, 2148 }, -+{ 0xa, 15, 0, 2124 }, -+{ 0x1b, 15, 1, 2147 }, -+{ 0x1a, 15, 0, 2123 }, -+{ 0x7, 15, 1, 2164 }, -+{ 0x6, 15, 1, 2140 }, -+{ 0x3, 16, 1, 2163 }, -+{ 0x2, 16, 0, 2139 }, -+{ 0xf, 15, 1, 2162 }, -+{ 0xe, 15, 1, 2138 }, -+{ 0x7, 16, 1, 2161 }, -+{ 0x6, 16, 0, 2137 }, -+{ 0x8, 14, 0, 2166 }, -+{ 0x18, 14, 0, 2165 }, -+{ 0x1, 14, 1, 2170 }, -+{ 0x2, 14, 0, 2169 }, -+{ 0x3, 14, 1, 2168 }, -+{ 0x4, 14, 0, 2167 }, -+{ 0x1, 109, 1, 2322 }, -+{ 0x1, 110, 1, 2321 }, -+{ 0x1, 111, 1, 2320 }, -+{ 0x1, 112, 1, 2319 }, -+{ 0x1, 113, 1, 2318 }, -+{ 0x1, 114, 1, 2317 }, -+{ 0x1, 115, 1, 2316 }, -+{ 0x1, 116, 1, 2315 }, -+{ 0x39, 41, 1, 22 }, -+{ 0x19, 42, 0, 21 }, -+{ 0x3, 109, 1, 2314 }, -+{ 0x3, 110, 1, 2313 }, -+{ 0x3, 111, 1, 2312 }, -+{ 0x3, 112, 1, 2311 }, -+{ 0x3, 113, 1, 2310 }, -+{ 0x3, 114, 1, 2309 }, -+{ 0x3, 115, 1, 2308 }, -+{ 0x3, 116, 1, 2307 }, -+{ 0x69, 41, 0, 11 }, -+{ 0x14, 100, 1, 2302 }, -+{ 0x22, 101, 1, 2299 }, -+{ 0x44, 101, 1, 2301 }, -+{ 0xa, 108, 1, 2300 }, -+{ 0xd1, 41, 0, 9 }, -+{ 0x34, 100, 1, 2174 }, -+{ 0xc4, 101, 1, 2173 }, -+{ 0x1c, 107, 1, 2171 }, -+{ 0xe, 122, 0, 2172 }, -+{ 0xc, 100, 1, 2462 }, -+{ 0xa, 101, 1, 2459 }, -+{ 0x14, 101, 1, 2461 }, -+{ 0x6, 108, 0, 2460 }, -+{ 0x2, 100, 1, 2186 }, -+{ 0x2, 101, 1, 2185 }, -+{ 0x2, 106, 1, 2184 }, -+{ 0x2, 107, 0, 2183 }, -+{ 0x12, 100, 1, 2182 }, -+{ 0x42, 101, 1, 2181 }, -+{ 0x6, 106, 1, 2180 }, -+{ 0x6, 107, 0, 2179 }, -+{ 0xa, 100, 1, 2306 }, -+{ 0x12, 101, 1, 2305 }, -+{ 0x24, 101, 1, 2303 }, -+{ 0x5, 108, 1, 2304 }, -+{ 0x71, 41, 1, 18 }, -+{ 0x31, 42, 0, 17 }, -+{ 0x1a, 100, 1, 2178 }, -+{ 0x32, 101, 1, 2177 }, -+{ 0x1a, 107, 1, 2175 }, -+{ 0x7, 122, 0, 2176 }, -+{ 0x6, 100, 1, 2466 }, -+{ 0x6, 101, 1, 2465 }, -+{ 0xc, 101, 1, 2463 }, -+{ 0x3, 108, 0, 2464 }, -+{ 0x1, 100, 1, 2482 }, -+{ 0x1, 101, 1, 2481 }, -+{ 0x1, 102, 1, 2480 }, -+{ 0x1, 103, 1, 2479 }, -+{ 0x1, 104, 1, 2478 }, -+{ 0x1, 105, 1, 2477 }, -+{ 0x1, 106, 1, 2476 }, -+{ 0x1, 107, 0, 2475 }, -+{ 0x3, 100, 1, 2474 }, -+{ 0x3, 101, 1, 2473 }, -+{ 0x3, 102, 1, 2472 }, -+{ 0x3, 103, 1, 2471 }, -+{ 0x3, 104, 1, 2470 }, -+{ 0x3, 105, 1, 2469 }, -+{ 0x3, 106, 1, 2468 }, -+{ 0x3, 107, 0, 2467 }, -+{ 0x8, 67, 1, 2346 }, -+{ 0x8, 68, 1, 2345 }, -+{ 0x2, 73, 1, 2340 }, -+{ 0x2, 74, 1, 2339 }, -+{ 0x1, 76, 1, 2344 }, -+{ 0x1, 77, 1, 2343 }, -+{ 0x1, 78, 1, 2342 }, -+{ 0x1, 79, 1, 2341 }, -+{ 0xf, 41, 1, 30 }, -+{ 0x7, 42, 0, 29 }, -+{ 0x18, 67, 1, 2338 }, -+{ 0x18, 68, 1, 2337 }, -+{ 0x6, 73, 1, 2332 }, -+{ 0x6, 74, 1, 2331 }, -+{ 0x3, 76, 1, 2336 }, -+{ 0x3, 77, 1, 2335 }, -+{ 0x3, 78, 1, 2334 }, -+{ 0x3, 79, 1, 2333 }, -+{ 0x1b, 41, 0, 15 }, -+{ 0x14, 67, 1, 2326 }, -+{ 0x22, 68, 1, 2323 }, -+{ 0x44, 68, 1, 2325 }, -+{ 0xa, 75, 1, 2324 }, -+{ 0x35, 41, 0, 13 }, -+{ 0x34, 67, 1, 2190 }, -+{ 0xc4, 68, 1, 2189 }, -+{ 0x38, 74, 1, 2187 }, -+{ 0xe, 85, 0, 2188 }, -+{ 0xc, 67, 1, 2486 }, -+{ 0xa, 68, 1, 2483 }, -+{ 0x14, 68, 1, 2485 }, -+{ 0x6, 75, 0, 2484 }, -+{ 0x2, 67, 1, 2202 }, -+{ 0x2, 68, 1, 2201 }, -+{ 0x4, 73, 1, 2200 }, -+{ 0x4, 74, 0, 2199 }, -+{ 0x12, 67, 1, 2198 }, -+{ 0x42, 68, 1, 2197 }, -+{ 0xc, 73, 1, 2196 }, -+{ 0xc, 74, 0, 2195 }, -+{ 0xa, 67, 1, 2330 }, -+{ 0x12, 68, 1, 2329 }, -+{ 0x24, 68, 1, 2327 }, -+{ 0x5, 75, 1, 2328 }, -+{ 0x1d, 41, 1, 26 }, -+{ 0xd, 42, 0, 25 }, -+{ 0x1a, 67, 1, 2194 }, -+{ 0x32, 68, 1, 2193 }, -+{ 0x34, 74, 1, 2191 }, -+{ 0x7, 85, 0, 2192 }, -+{ 0x6, 67, 1, 2490 }, -+{ 0x6, 68, 1, 2489 }, -+{ 0xc, 68, 1, 2487 }, -+{ 0x3, 75, 0, 2488 }, -+{ 0x1, 67, 1, 2506 }, -+{ 0x1, 68, 1, 2505 }, -+{ 0x1, 69, 1, 2504 }, -+{ 0x1, 70, 1, 2503 }, -+{ 0x1, 71, 1, 2502 }, -+{ 0x1, 72, 1, 2501 }, -+{ 0x1, 73, 1, 2500 }, -+{ 0x1, 74, 0, 2499 }, -+{ 0x3, 67, 1, 2498 }, -+{ 0x3, 68, 1, 2497 }, -+{ 0x3, 69, 1, 2496 }, -+{ 0x3, 70, 1, 2495 }, -+{ 0x3, 71, 1, 2494 }, -+{ 0x3, 72, 1, 2493 }, -+{ 0x3, 73, 1, 2492 }, -+{ 0x3, 74, 0, 2491 }, -+{ 0x28, 95, 1, 2354 }, -+{ 0x44, 96, 1, 2349 }, -+{ 0x88, 96, 1, 2353 }, -+{ 0x44, 97, 1, 2348 }, -+{ 0x88, 97, 1, 2352 }, -+{ 0x44, 98, 1, 2347 }, -+{ 0x88, 98, 1, 2351 }, -+{ 0x28, 99, 0, 2350 }, -+{ 0x68, 95, 1, 2210 }, -+{ 0x188, 96, 1, 2209 }, -+{ 0x188, 97, 1, 2208 }, -+{ 0x188, 98, 1, 2207 }, -+{ 0x38, 118, 1, 2206 }, -+{ 0x38, 119, 1, 2205 }, -+{ 0x38, 120, 1, 2204 }, -+{ 0x38, 121, 0, 2203 }, -+{ 0x18, 95, 1, 2514 }, -+{ 0x14, 96, 1, 2509 }, -+{ 0x28, 96, 1, 2513 }, -+{ 0x14, 97, 1, 2508 }, -+{ 0x28, 97, 1, 2512 }, -+{ 0x14, 98, 1, 2507 }, -+{ 0x28, 98, 1, 2511 }, -+{ 0x18, 99, 0, 2510 }, -+{ 0x14, 95, 1, 2362 }, -+{ 0x24, 96, 1, 2361 }, -+{ 0x48, 96, 1, 2357 }, -+{ 0x24, 97, 1, 2360 }, -+{ 0x48, 97, 1, 2356 }, -+{ 0x24, 98, 1, 2359 }, -+{ 0x48, 98, 1, 2355 }, -+{ 0x14, 99, 0, 2358 }, -+{ 0x34, 95, 1, 2218 }, -+{ 0x64, 96, 1, 2217 }, -+{ 0x64, 97, 1, 2216 }, -+{ 0x64, 98, 1, 2215 }, -+{ 0x1c, 118, 1, 2214 }, -+{ 0x1c, 119, 1, 2213 }, -+{ 0x1c, 120, 1, 2212 }, -+{ 0x1c, 121, 0, 2211 }, -+{ 0xc, 95, 1, 2522 }, -+{ 0xc, 96, 1, 2521 }, -+{ 0x18, 96, 1, 2517 }, -+{ 0xc, 97, 1, 2520 }, -+{ 0x18, 97, 1, 2516 }, -+{ 0xc, 98, 1, 2519 }, -+{ 0x18, 98, 1, 2515 }, -+{ 0xc, 99, 0, 2518 }, -+{ 0xa, 95, 1, 2370 }, -+{ 0x11, 96, 1, 2365 }, -+{ 0x22, 96, 1, 2369 }, -+{ 0x11, 97, 1, 2364 }, -+{ 0x22, 97, 1, 2368 }, -+{ 0x11, 98, 1, 2363 }, -+{ 0x22, 98, 1, 2367 }, -+{ 0xa, 99, 0, 2366 }, -+{ 0x1a, 95, 1, 2226 }, -+{ 0x62, 96, 1, 2225 }, -+{ 0x62, 97, 1, 2224 }, -+{ 0x62, 98, 1, 2223 }, -+{ 0xe, 118, 1, 2222 }, -+{ 0xe, 119, 1, 2221 }, -+{ 0xe, 120, 1, 2220 }, -+{ 0xe, 121, 0, 2219 }, -+{ 0x6, 95, 1, 2530 }, -+{ 0x5, 96, 1, 2525 }, -+{ 0xa, 96, 1, 2529 }, -+{ 0x5, 97, 1, 2524 }, -+{ 0xa, 97, 1, 2528 }, -+{ 0x5, 98, 1, 2523 }, -+{ 0xa, 98, 1, 2527 }, -+{ 0x6, 99, 0, 2526 }, -+{ 0x5, 95, 1, 2378 }, -+{ 0x9, 96, 1, 2377 }, -+{ 0x12, 96, 1, 2373 }, -+{ 0x9, 97, 1, 2376 }, -+{ 0x12, 97, 1, 2372 }, -+{ 0x9, 98, 1, 2375 }, -+{ 0x12, 98, 1, 2371 }, -+{ 0x5, 99, 0, 2374 }, -+{ 0xd, 95, 1, 2234 }, -+{ 0x19, 96, 1, 2233 }, -+{ 0x19, 97, 1, 2232 }, -+{ 0x19, 98, 1, 2231 }, -+{ 0x7, 118, 1, 2230 }, -+{ 0x7, 119, 1, 2229 }, -+{ 0x7, 120, 1, 2228 }, -+{ 0x7, 121, 0, 2227 }, -+{ 0x3, 95, 1, 2538 }, -+{ 0x3, 96, 1, 2537 }, -+{ 0x6, 96, 1, 2533 }, -+{ 0x3, 97, 1, 2536 }, -+{ 0x6, 97, 1, 2532 }, -+{ 0x3, 98, 1, 2535 }, -+{ 0x6, 98, 1, 2531 }, -+{ 0x3, 99, 0, 2534 }, -+{ 0x28, 62, 1, 2386 }, -+{ 0x44, 63, 1, 2381 }, -+{ 0x88, 63, 1, 2385 }, -+{ 0x44, 64, 1, 2380 }, -+{ 0x88, 64, 1, 2384 }, -+{ 0x44, 65, 1, 2379 }, -+{ 0x88, 65, 1, 2383 }, -+{ 0x28, 66, 0, 2382 }, -+{ 0x68, 62, 1, 2242 }, -+{ 0x188, 63, 1, 2241 }, -+{ 0x188, 64, 1, 2240 }, -+{ 0x188, 65, 1, 2239 }, -+{ 0x38, 81, 1, 2238 }, -+{ 0x38, 82, 1, 2237 }, -+{ 0x38, 83, 1, 2236 }, -+{ 0x38, 84, 0, 2235 }, -+{ 0x18, 62, 1, 2546 }, -+{ 0x14, 63, 1, 2541 }, -+{ 0x28, 63, 1, 2545 }, -+{ 0x14, 64, 1, 2540 }, -+{ 0x28, 64, 1, 2544 }, -+{ 0x14, 65, 1, 2539 }, -+{ 0x28, 65, 1, 2543 }, -+{ 0x18, 66, 0, 2542 }, -+{ 0x14, 62, 1, 2394 }, -+{ 0x24, 63, 1, 2393 }, -+{ 0x48, 63, 1, 2389 }, -+{ 0x24, 64, 1, 2392 }, -+{ 0x48, 64, 1, 2388 }, -+{ 0x24, 65, 1, 2391 }, -+{ 0x48, 65, 1, 2387 }, -+{ 0x14, 66, 0, 2390 }, -+{ 0x34, 62, 1, 2250 }, -+{ 0x64, 63, 1, 2249 }, -+{ 0x64, 64, 1, 2248 }, -+{ 0x64, 65, 1, 2247 }, -+{ 0x1c, 81, 1, 2246 }, -+{ 0x1c, 82, 1, 2245 }, -+{ 0x1c, 83, 1, 2244 }, -+{ 0x1c, 84, 0, 2243 }, -+{ 0xc, 62, 1, 2554 }, -+{ 0xc, 63, 1, 2553 }, -+{ 0x18, 63, 1, 2549 }, -+{ 0xc, 64, 1, 2552 }, -+{ 0x18, 64, 1, 2548 }, -+{ 0xc, 65, 1, 2551 }, -+{ 0x18, 65, 1, 2547 }, -+{ 0xc, 66, 0, 2550 }, -+{ 0xa, 62, 1, 2402 }, -+{ 0x11, 63, 1, 2397 }, -+{ 0x22, 63, 1, 2401 }, -+{ 0x11, 64, 1, 2396 }, -+{ 0x22, 64, 1, 2400 }, -+{ 0x11, 65, 1, 2395 }, -+{ 0x22, 65, 1, 2399 }, -+{ 0xa, 66, 0, 2398 }, -+{ 0x1a, 62, 1, 2258 }, -+{ 0x62, 63, 1, 2257 }, -+{ 0x62, 64, 1, 2256 }, -+{ 0x62, 65, 1, 2255 }, -+{ 0xe, 81, 1, 2254 }, -+{ 0xe, 82, 1, 2253 }, -+{ 0xe, 83, 1, 2252 }, -+{ 0xe, 84, 0, 2251 }, -+{ 0x6, 62, 1, 2562 }, -+{ 0x5, 63, 1, 2557 }, -+{ 0xa, 63, 1, 2561 }, -+{ 0x5, 64, 1, 2556 }, -+{ 0xa, 64, 1, 2560 }, -+{ 0x5, 65, 1, 2555 }, -+{ 0xa, 65, 1, 2559 }, -+{ 0x6, 66, 0, 2558 }, -+{ 0x5, 62, 1, 2410 }, -+{ 0x9, 63, 1, 2409 }, -+{ 0x12, 63, 1, 2405 }, -+{ 0x9, 64, 1, 2408 }, -+{ 0x12, 64, 1, 2404 }, -+{ 0x9, 65, 1, 2407 }, -+{ 0x12, 65, 1, 2403 }, -+{ 0x5, 66, 0, 2406 }, -+{ 0xd, 62, 1, 2266 }, -+{ 0x19, 63, 1, 2265 }, -+{ 0x19, 64, 1, 2264 }, -+{ 0x19, 65, 1, 2263 }, -+{ 0x7, 81, 1, 2262 }, -+{ 0x7, 82, 1, 2261 }, -+{ 0x7, 83, 1, 2260 }, -+{ 0x7, 84, 0, 2259 }, -+{ 0x3, 62, 1, 2570 }, -+{ 0x3, 63, 1, 2569 }, -+{ 0x6, 63, 1, 2565 }, -+{ 0x3, 64, 1, 2568 }, -+{ 0x6, 64, 1, 2564 }, -+{ 0x3, 65, 1, 2567 }, -+{ 0x6, 65, 1, 2563 }, -+{ 0x3, 66, 0, 2566 }, -+{ 0x8, 86, 1, 2434 }, -+{ 0x8, 87, 1, 2433 }, -+{ 0x2, 88, 1, 2432 }, -+{ 0x2, 89, 1, 2431 }, -+{ 0x2, 90, 1, 2430 }, -+{ 0x2, 91, 1, 2429 }, -+{ 0x2, 92, 1, 2428 }, -+{ 0x2, 93, 0, 2427 }, -+{ 0x18, 86, 1, 2426 }, -+{ 0x18, 87, 1, 2425 }, -+{ 0x6, 88, 1, 2424 }, -+{ 0x6, 89, 1, 2423 }, -+{ 0x6, 90, 1, 2422 }, -+{ 0x6, 91, 1, 2421 }, -+{ 0x6, 92, 1, 2420 }, -+{ 0x6, 93, 0, 2419 }, -+{ 0x14, 86, 1, 2414 }, -+{ 0x22, 87, 1, 2411 }, -+{ 0x44, 87, 1, 2413 }, -+{ 0xa, 94, 0, 2412 }, -+{ 0x34, 86, 1, 2270 }, -+{ 0xc4, 87, 1, 2269 }, -+{ 0x38, 93, 1, 2267 }, -+{ 0xe, 117, 0, 2268 }, -+{ 0xc, 86, 1, 2574 }, -+{ 0xa, 87, 1, 2571 }, -+{ 0x14, 87, 1, 2573 }, -+{ 0x6, 94, 0, 2572 }, -+{ 0x2, 86, 1, 2282 }, -+{ 0x2, 87, 1, 2281 }, -+{ 0x4, 92, 1, 2280 }, -+{ 0x4, 93, 0, 2279 }, -+{ 0x12, 86, 1, 2278 }, -+{ 0x42, 87, 1, 2277 }, -+{ 0xc, 92, 1, 2276 }, -+{ 0xc, 93, 0, 2275 }, -+{ 0xa, 86, 1, 2418 }, -+{ 0x12, 87, 1, 2417 }, -+{ 0x24, 87, 1, 2415 }, -+{ 0x5, 94, 0, 2416 }, -+{ 0x1a, 86, 1, 2274 }, -+{ 0x32, 87, 1, 2273 }, -+{ 0x34, 93, 1, 2271 }, -+{ 0x7, 117, 0, 2272 }, -+{ 0x6, 86, 1, 2578 }, -+{ 0x6, 87, 1, 2577 }, -+{ 0xc, 87, 1, 2575 }, -+{ 0x3, 94, 0, 2576 }, -+{ 0x1, 86, 1, 2594 }, -+{ 0x1, 87, 1, 2593 }, -+{ 0x1, 88, 1, 2592 }, -+{ 0x1, 89, 1, 2591 }, -+{ 0x1, 90, 1, 2590 }, -+{ 0x1, 91, 1, 2589 }, -+{ 0x1, 92, 1, 2588 }, -+{ 0x1, 93, 0, 2587 }, -+{ 0x3, 86, 1, 2586 }, -+{ 0x3, 87, 1, 2585 }, -+{ 0x3, 88, 1, 2584 }, -+{ 0x3, 89, 1, 2583 }, -+{ 0x3, 90, 1, 2582 }, -+{ 0x3, 91, 1, 2581 }, -+{ 0x3, 92, 1, 2580 }, -+{ 0x3, 93, 0, 2579 }, -+{ 0x8, 53, 1, 2458 }, -+{ 0x8, 54, 1, 2457 }, -+{ 0x2, 55, 1, 2456 }, -+{ 0x2, 56, 1, 2455 }, -+{ 0x2, 57, 1, 2454 }, -+{ 0x2, 58, 1, 2453 }, -+{ 0x2, 59, 1, 2452 }, -+{ 0x2, 60, 0, 2451 }, -+{ 0x18, 53, 1, 2450 }, -+{ 0x18, 54, 1, 2449 }, -+{ 0x6, 55, 1, 2448 }, -+{ 0x6, 56, 1, 2447 }, -+{ 0x6, 57, 1, 2446 }, -+{ 0x6, 58, 1, 2445 }, -+{ 0x6, 59, 1, 2444 }, -+{ 0x6, 60, 0, 2443 }, -+{ 0x14, 53, 1, 2438 }, -+{ 0x22, 54, 1, 2435 }, -+{ 0x44, 54, 1, 2437 }, -+{ 0xa, 61, 0, 2436 }, -+{ 0x34, 53, 1, 2286 }, -+{ 0xc4, 54, 1, 2285 }, -+{ 0x38, 60, 1, 2283 }, -+{ 0xe, 80, 0, 2284 }, -+{ 0xc, 53, 1, 2598 }, -+{ 0xa, 54, 1, 2595 }, -+{ 0x14, 54, 1, 2597 }, -+{ 0x6, 61, 0, 2596 }, -+{ 0x2, 53, 1, 2298 }, -+{ 0x2, 54, 1, 2297 }, -+{ 0x4, 59, 1, 2296 }, -+{ 0x4, 60, 0, 2295 }, -+{ 0x12, 53, 1, 2294 }, -+{ 0x42, 54, 1, 2293 }, -+{ 0xc, 59, 1, 2292 }, -+{ 0xc, 60, 0, 2291 }, -+{ 0xa, 53, 1, 2442 }, -+{ 0x12, 54, 1, 2441 }, -+{ 0x24, 54, 1, 2439 }, -+{ 0x5, 61, 0, 2440 }, -+{ 0x1a, 53, 1, 2290 }, -+{ 0x32, 54, 1, 2289 }, -+{ 0x34, 60, 1, 2287 }, -+{ 0x7, 80, 0, 2288 }, -+{ 0x6, 53, 1, 2602 }, -+{ 0x6, 54, 1, 2601 }, -+{ 0xc, 54, 1, 2599 }, -+{ 0x3, 61, 0, 2600 }, -+{ 0x1, 53, 1, 2618 }, -+{ 0x1, 54, 1, 2617 }, -+{ 0x1, 55, 1, 2616 }, -+{ 0x1, 56, 1, 2615 }, -+{ 0x1, 57, 1, 2614 }, -+{ 0x1, 58, 1, 2613 }, -+{ 0x1, 59, 1, 2612 }, -+{ 0x1, 60, 0, 2611 }, -+{ 0x3, 53, 1, 2610 }, -+{ 0x3, 54, 1, 2609 }, -+{ 0x3, 55, 1, 2608 }, -+{ 0x3, 56, 1, 2607 }, -+{ 0x3, 57, 1, 2606 }, -+{ 0x3, 58, 1, 2605 }, -+{ 0x3, 59, 1, 2604 }, -+{ 0x3, 60, 0, 2603 }, -+{ 0x1, 4, 0, 2619 }, -+{ 0x1, 296, 0, 2620 }, -+{ 0x1, 379, 0, 2621 }, -+{ 0x1, 374, 0, 2622 }, -+{ 0x2, 358, 0, 2623 }, -+{ 0x1, 358, 0, 2626 }, -+{ 0x2, 357, 0, 2624 }, -+{ 0x1, 357, 0, 2627 }, -+{ 0x2, 356, 0, 2625 }, -+{ 0x1, 356, 0, 2628 }, -+{ 0x1, 355, 0, 2629 }, -+{ 0x1, 354, 0, 2630 }, -+{ 0x2, 353, 0, 2631 }, -+{ 0x1, 353, 0, 2633 }, -+{ 0x2, 352, 0, 2632 }, -+{ 0x1, 352, 0, 2634 }, -+{ 0x1, 382, 0, 2641 }, -+{ 0x8, 381, 0, 2635 }, -+{ 0x4, 381, 0, 2637 }, -+{ 0x2, 381, 0, 2639 }, -+{ 0x1, 381, 0, 2642 }, -+{ 0x8, 380, 0, 2636 }, -+{ 0x4, 380, 0, 2638 }, -+{ 0x2, 380, 0, 2640 }, -+{ 0x1, 380, 0, 2643 }, -+{ 0x1, 351, 0, 2650 }, -+{ 0x8, 350, 0, 2644 }, -+{ 0x4, 350, 0, 2646 }, -+{ 0x2, 350, 0, 2648 }, -+{ 0x1, 350, 0, 2651 }, -+{ 0x8, 349, 0, 2645 }, -+{ 0x4, 349, 0, 2647 }, -+{ 0x2, 349, 1, 2649 }, -+{ 0x4, 143, 0, 1345 }, -+{ 0x1, 349, 0, 2652 }, -+{ 0x1, 6, 0, 2653 }, -+{ 0x1, 7, 0, 2654 }, -+{ 0x1, 295, 0, 2655 }, -+{ 0x1, 451, 0, 2656 }, -+{ 0x1, 346, 0, 2657 }, -+{ 0x1, 13, 0, 2658 }, -+{ 0x1, 11, 0, 2659 }, -+{ 0x1, 422, 0, 2660 }, -+{ 0x1, 394, 0, 2661 }, -+{ 0x1, 393, 0, 2662 }, -+{ 0x1, 450, 0, 2663 }, -+{ 0x1, 345, 0, 2664 }, -+{ 0x1, 12, 0, 2665 }, -+{ 0x1, 10, 0, 2666 }, -+{ 0x1, 5, 0, 2667 }, -+{ 0x1, 421, 0, 2668 }, -+{ 0x1, 420, 0, 2669 }, -+{ 0x1, 1, 0, 2670 }, -+{ 0x1, 0, 0, 2671 }, -+}; -+ ---- /dev/null -+++ b/arch/ia64/kdb/ia64-asmtab.h -@@ -0,0 +1,158 @@ -+/* ia64-asmtab.h -- Header for compacted IA-64 opcode tables. -+ Copyright 1999, 2000 Free Software Foundation, Inc. -+ Contributed by Bob Manson of Cygnus Support -+ -+ This file is part of GDB, GAS, and the GNU binutils. -+ -+ GDB, GAS, and the GNU binutils are free software; you can redistribute -+ them and/or modify them under the terms of the GNU General Public -+ License as published by the Free Software Foundation; either version -+ 2, or (at your option) any later version. -+ -+ GDB, GAS, and the GNU binutils are distributed in the hope that they -+ will be useful, but WITHOUT ANY WARRANTY; without even the implied -+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See -+ the GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this file; see the file COPYING. If not, write to the -+ Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA -+ 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifndef IA64_ASMTAB_H -+#define IA64_ASMTAB_H -+ -+#ifdef __KERNEL__ -+#include "ia64.h" -+#else /* __KERNEL__ */ -+#include "opcode/ia64.h" -+#endif /* __KERNEL__ */ -+ -+/* The primary opcode table is made up of the following: */ -+struct ia64_main_table -+{ -+ /* The entry in the string table that corresponds to the name of this -+ opcode. */ -+ unsigned short name_index; -+ -+ /* The type of opcode; corresponds to the TYPE field in -+ struct ia64_opcode. */ -+ unsigned char opcode_type; -+ -+ /* The number of outputs for this opcode. */ -+ unsigned char num_outputs; -+ -+ /* The base insn value for this opcode. It may be modified by completers. */ -+ ia64_insn opcode; -+ -+ /* The mask of valid bits in OPCODE. Zeros indicate operand fields. */ -+ ia64_insn mask; -+ -+ /* The operands of this instruction. Corresponds to the OPERANDS field -+ in struct ia64_opcode. */ -+ unsigned char operands[5]; -+ -+ /* The flags for this instruction. Corresponds to the FLAGS field in -+ struct ia64_opcode. */ -+ short flags; -+ -+ /* The tree of completers for this instruction; this is an offset into -+ completer_table. */ -+ short completers; -+}; -+ -+/* Each instruction has a set of possible "completers", or additional -+ suffixes that can alter the instruction's behavior, and which has -+ potentially different dependencies. -+ -+ The completer entries modify certain bits in the instruction opcode. -+ Which bits are to be modified are marked by the BITS, MASK and -+ OFFSET fields. The completer entry may also note dependencies for the -+ opcode. -+ -+ These completers are arranged in a DAG; the pointers are indexes -+ into the completer_table array. The completer DAG is searched by -+ find_completer () and ia64_find_matching_opcode (). -+ -+ Note that each completer needs to be applied in turn, so that if we -+ have the instruction -+ cmp.lt.unc -+ the completer entries for both "lt" and "unc" would need to be applied -+ to the opcode's value. -+ -+ Some instructions do not require any completers; these contain an -+ empty completer entry. Instructions that require a completer do -+ not contain an empty entry. -+ -+ Terminal completers (those completers that validly complete an -+ instruction) are marked by having the TERMINAL_COMPLETER flag set. -+ -+ Only dependencies listed in the terminal completer for an opcode are -+ considered to apply to that opcode instance. */ -+ -+struct ia64_completer_table -+{ -+ /* The bit value that this completer sets. */ -+ unsigned int bits; -+ -+ /* And its mask. 1s are bits that are to be modified in the -+ instruction. */ -+ unsigned int mask; -+ -+ /* The entry in the string table that corresponds to the name of this -+ completer. */ -+ unsigned short name_index; -+ -+ /* An alternative completer, or -1 if this is the end of the chain. */ -+ short alternative; -+ -+ /* A pointer to the DAG of completers that can potentially follow -+ this one, or -1. */ -+ short subentries; -+ -+ /* The bit offset in the instruction where BITS and MASK should be -+ applied. */ -+ unsigned char offset : 7; -+ -+ unsigned char terminal_completer : 1; -+ -+ /* Index into the dependency list table */ -+ short dependencies; -+}; -+ -+/* This contains sufficient information for the disassembler to resolve -+ the complete name of the original instruction. */ -+struct ia64_dis_names -+{ -+ /* COMPLETER_INDEX represents the tree of completers that make up -+ the instruction. The LSB represents the top of the tree for the -+ specified instruction. -+ -+ A 0 bit indicates to go to the next alternate completer via the -+ alternative field; a 1 bit indicates that the current completer -+ is part of the instruction, and to go down the subentries index. -+ We know we've reached the final completer when we run out of 1 -+ bits. -+ -+ There is always at least one 1 bit. */ -+ unsigned int completer_index : 20; -+ -+ /* The index in the main_table[] array for the instruction. */ -+ unsigned short insn_index : 11; -+ -+ /* If set, the next entry in this table is an alternate possibility -+ for this instruction encoding. Which one to use is determined by -+ the instruction type and other factors (see opcode_verify ()). */ -+ unsigned int next_flag : 1; -+ -+ /* The disassembly priority of this entry among instructions. */ -+ unsigned short priority; -+}; -+ -+#endif ---- /dev/null -+++ b/arch/ia64/kdb/ia64-dis.c -@@ -0,0 +1,312 @@ -+/* ia64-dis.c -- Disassemble ia64 instructions -+ Copyright 1998, 1999, 2000, 2002 Free Software Foundation, Inc. -+ Contributed by David Mosberger-Tang -+ -+ This file is part of GDB, GAS, and the GNU binutils. -+ -+ GDB, GAS, and the GNU binutils are free software; you can redistribute -+ them and/or modify them under the terms of the GNU General Public -+ License as published by the Free Software Foundation; either version -+ 2, or (at your option) any later version. -+ -+ GDB, GAS, and the GNU binutils are distributed in the hope that they -+ will be useful, but WITHOUT ANY WARRANTY; without even the implied -+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See -+ the GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this file; see the file COPYING. If not, write to the -+ Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA -+ 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifdef __KERNEL__ -+#include -+#include -+#include -+#include "ia64.h" -+ -+/* imported from bfd/libbfd.c for kernel */ -+bfd_uint64_t -+bfd_getl64 (const void *p ATTRIBUTE_UNUSED) -+{ -+#ifdef BFD_HOST_64_BIT -+ const bfd_byte *addr = p; -+ bfd_uint64_t v; -+ -+ v = addr[7]; v <<= 8; -+ v |= addr[6]; v <<= 8; -+ v |= addr[5]; v <<= 8; -+ v |= addr[4]; v <<= 8; -+ v |= addr[3]; v <<= 8; -+ v |= addr[2]; v <<= 8; -+ v |= addr[1]; v <<= 8; -+ v |= addr[0]; -+ -+ return v; -+#else -+ BFD_FAIL(); -+ return 0; -+#endif -+ -+} -+ -+#else /* __KERNEL__ */ -+#include -+#include -+ -+#include "dis-asm.h" -+#include "opcode/ia64.h" -+#endif /* __KERNEL__ */ -+ -+#define NELEMS(a) ((int) (sizeof (a) / sizeof (a[0]))) -+ -+/* Disassemble ia64 instruction. */ -+ -+/* Return the instruction type for OPCODE found in unit UNIT. */ -+ -+static enum ia64_insn_type -+unit_to_type (ia64_insn opcode, enum ia64_unit unit) -+{ -+ enum ia64_insn_type type; -+ int op; -+ -+ op = IA64_OP (opcode); -+ -+ if (op >= 8 && (unit == IA64_UNIT_I || unit == IA64_UNIT_M)) -+ { -+ type = IA64_TYPE_A; -+ } -+ else -+ { -+ switch (unit) -+ { -+ case IA64_UNIT_I: -+ type = IA64_TYPE_I; break; -+ case IA64_UNIT_M: -+ type = IA64_TYPE_M; break; -+ case IA64_UNIT_B: -+ type = IA64_TYPE_B; break; -+ case IA64_UNIT_F: -+ type = IA64_TYPE_F; break; -+ case IA64_UNIT_L: -+ case IA64_UNIT_X: -+ type = IA64_TYPE_X; break; -+ default: -+ type = -1; -+ } -+ } -+ return type; -+} -+ -+int -+print_insn_ia64 (bfd_vma memaddr, struct disassemble_info *info) -+{ -+ ia64_insn t0, t1, slot[3], template, s_bit, insn; -+ int slotnum, j, status, need_comma, retval, slot_multiplier; -+ const struct ia64_operand *odesc; -+ const struct ia64_opcode *idesc; -+ const char *err, *str, *tname; -+ BFD_HOST_U_64_BIT value; -+ bfd_byte bundle[16]; -+ enum ia64_unit unit; -+ char regname[16]; -+ -+ if (info->bytes_per_line == 0) -+ info->bytes_per_line = 6; -+ info->display_endian = info->endian; -+ -+ slot_multiplier = info->bytes_per_line; -+ retval = slot_multiplier; -+ -+ slotnum = (((long) memaddr) & 0xf) / slot_multiplier; -+ if (slotnum > 2) -+ return -1; -+ -+ memaddr -= (memaddr & 0xf); -+ status = (*info->read_memory_func) (memaddr, bundle, sizeof (bundle), info); -+ if (status != 0) -+ { -+ (*info->memory_error_func) (status, memaddr, info); -+ return -1; -+ } -+ /* bundles are always in little-endian byte order */ -+ t0 = bfd_getl64 (bundle); -+ t1 = bfd_getl64 (bundle + 8); -+ s_bit = t0 & 1; -+ template = (t0 >> 1) & 0xf; -+ slot[0] = (t0 >> 5) & 0x1ffffffffffLL; -+ slot[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18); -+ slot[2] = (t1 >> 23) & 0x1ffffffffffLL; -+ -+ tname = ia64_templ_desc[template].name; -+ if (slotnum == 0) -+ (*info->fprintf_func) (info->stream, "[%s] ", tname); -+ else -+ (*info->fprintf_func) (info->stream, " "); -+ -+ unit = ia64_templ_desc[template].exec_unit[slotnum]; -+ -+ if (template == 2 && slotnum == 1) -+ { -+ /* skip L slot in MLI template: */ -+ slotnum = 2; -+ retval += slot_multiplier; -+ } -+ -+ insn = slot[slotnum]; -+ -+ if (unit == IA64_UNIT_NIL) -+ goto decoding_failed; -+ -+ idesc = ia64_dis_opcode (insn, unit_to_type (insn, unit)); -+ if (idesc == NULL) -+ goto decoding_failed; -+ -+ /* print predicate, if any: */ -+ -+ if ((idesc->flags & IA64_OPCODE_NO_PRED) -+ || (insn & 0x3f) == 0) -+ (*info->fprintf_func) (info->stream, " "); -+ else -+ (*info->fprintf_func) (info->stream, "(p%02d) ", (int)(insn & 0x3f)); -+ -+ /* now the actual instruction: */ -+ -+ (*info->fprintf_func) (info->stream, "%s", idesc->name); -+ if (idesc->operands[0]) -+ (*info->fprintf_func) (info->stream, " "); -+ -+ need_comma = 0; -+ for (j = 0; j < NELEMS (idesc->operands) && idesc->operands[j]; ++j) -+ { -+ odesc = elf64_ia64_operands + idesc->operands[j]; -+ -+ if (need_comma) -+ (*info->fprintf_func) (info->stream, ","); -+ -+ if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64) -+ { -+ /* special case of 64 bit immediate load: */ -+ value = ((insn >> 13) & 0x7f) | (((insn >> 27) & 0x1ff) << 7) -+ | (((insn >> 22) & 0x1f) << 16) | (((insn >> 21) & 0x1) << 21) -+ | (slot[1] << 22) | (((insn >> 36) & 0x1) << 63); -+ } -+ else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62) -+ { -+ /* 62-bit immediate for nop.x/break.x */ -+ value = ((slot[1] & 0x1ffffffffffLL) << 21) -+ | (((insn >> 36) & 0x1) << 20) -+ | ((insn >> 6) & 0xfffff); -+ } -+ else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64) -+ { -+ /* 60-bit immediate for long branches. */ -+ value = (((insn >> 13) & 0xfffff) -+ | (((insn >> 36) & 1) << 59) -+ | (((slot[1] >> 2) & 0x7fffffffffLL) << 20)) << 4; -+ } -+ else -+ { -+ err = (*odesc->extract) (odesc, insn, &value); -+ if (err) -+ { -+ (*info->fprintf_func) (info->stream, "%s", err); -+ goto done; -+ } -+ } -+ -+ switch (odesc->class) -+ { -+ case IA64_OPND_CLASS_CST: -+ (*info->fprintf_func) (info->stream, "%s", odesc->str); -+ break; -+ -+ case IA64_OPND_CLASS_REG: -+ if (odesc->str[0] == 'a' && odesc->str[1] == 'r') -+ { -+ switch (value) -+ { -+ case 0: case 1: case 2: case 3: -+ case 4: case 5: case 6: case 7: -+ sprintf (regname, "ar.k%u", (unsigned int) value); -+ break; -+ case 16: strcpy (regname, "ar.rsc"); break; -+ case 17: strcpy (regname, "ar.bsp"); break; -+ case 18: strcpy (regname, "ar.bspstore"); break; -+ case 19: strcpy (regname, "ar.rnat"); break; -+ case 32: strcpy (regname, "ar.ccv"); break; -+ case 36: strcpy (regname, "ar.unat"); break; -+ case 40: strcpy (regname, "ar.fpsr"); break; -+ case 44: strcpy (regname, "ar.itc"); break; -+ case 64: strcpy (regname, "ar.pfs"); break; -+ case 65: strcpy (regname, "ar.lc"); break; -+ case 66: strcpy (regname, "ar.ec"); break; -+ default: -+ sprintf (regname, "ar%u", (unsigned int) value); -+ break; -+ } -+ (*info->fprintf_func) (info->stream, "%s", regname); -+ } -+ else -+ (*info->fprintf_func) (info->stream, "%s%d", odesc->str, (int)value); -+ break; -+ -+ case IA64_OPND_CLASS_IND: -+ (*info->fprintf_func) (info->stream, "%s[r%d]", odesc->str, (int)value); -+ break; -+ -+ case IA64_OPND_CLASS_ABS: -+ str = 0; -+ if (odesc - elf64_ia64_operands == IA64_OPND_MBTYPE4) -+ switch (value) -+ { -+ case 0x0: str = "@brcst"; break; -+ case 0x8: str = "@mix"; break; -+ case 0x9: str = "@shuf"; break; -+ case 0xa: str = "@alt"; break; -+ case 0xb: str = "@rev"; break; -+ } -+ -+ if (str) -+ (*info->fprintf_func) (info->stream, "%s", str); -+ else if (odesc->flags & IA64_OPND_FLAG_DECIMAL_SIGNED) -+ (*info->fprintf_func) (info->stream, "%lld", (long long) value); -+ else if (odesc->flags & IA64_OPND_FLAG_DECIMAL_UNSIGNED) -+ (*info->fprintf_func) (info->stream, "%llu", (long long) value); -+ else -+ (*info->fprintf_func) (info->stream, "0x%llx", (long long) value); -+ break; -+ -+ case IA64_OPND_CLASS_REL: -+ (*info->print_address_func) (memaddr + value, info); -+ break; -+ } -+ -+ need_comma = 1; -+ if (j + 1 == idesc->num_outputs) -+ { -+ (*info->fprintf_func) (info->stream, "="); -+ need_comma = 0; -+ } -+ } -+ if (slotnum + 1 == ia64_templ_desc[template].group_boundary -+ || ((slotnum == 2) && s_bit)) -+ (*info->fprintf_func) (info->stream, ";;"); -+ -+ done: -+ ia64_free_opcode ((struct ia64_opcode *)idesc); -+ failed: -+ if (slotnum == 2) -+ retval += 16 - 3*slot_multiplier; -+ return retval; -+ -+ decoding_failed: -+ (*info->fprintf_func) (info->stream, " data8 %#011llx", (long long) insn); -+ goto failed; -+} ---- /dev/null -+++ b/arch/ia64/kdb/ia64-opc.c -@@ -0,0 +1,749 @@ -+/* ia64-opc.c -- Functions to access the compacted opcode table -+ Copyright 1999, 2000, 2001, 2003, 2005 Free Software Foundation, Inc. -+ Written by Bob Manson of Cygnus Solutions, -+ -+ This file is part of GDB, GAS, and the GNU binutils. -+ -+ GDB, GAS, and the GNU binutils are free software; you can redistribute -+ them and/or modify them under the terms of the GNU General Public -+ License as published by the Free Software Foundation; either version -+ 2, or (at your option) any later version. -+ -+ GDB, GAS, and the GNU binutils are distributed in the hope that they -+ will be useful, but WITHOUT ANY WARRANTY; without even the implied -+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See -+ the GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this file; see the file COPYING. If not, write to the -+ Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA -+ 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifdef __KERNEL__ -+#include -+#include -+#include -+#include -+ -+#define xstrdup(string) ({ char *res = kdb_strdup(string, GFP_ATOMIC); if (!res) BUG(); res; }) -+#define xmalloc(size) ({ void *res = debug_kmalloc(size, GFP_ATOMIC); if (!res) BUG(); res; }) -+#define free(address) debug_kfree(address) -+#define abort() BUG() -+ -+#else /* __KERNEL__ */ -+#include "ansidecl.h" -+#include "sysdep.h" -+#include "libiberty.h" -+#endif /* __KERNEL__ */ -+#include "ia64-asmtab.h" -+#include "ia64-asmtab.c" -+ -+static void get_opc_prefix (const char **, char *); -+static short int find_string_ent (const char *); -+static short int find_main_ent (short int); -+static short int find_completer (short int, short int, const char *); -+static ia64_insn apply_completer (ia64_insn, int); -+static int extract_op_bits (int, int, int); -+static int extract_op (int, int *, unsigned int *); -+static int opcode_verify (ia64_insn, int, enum ia64_insn_type); -+static int locate_opcode_ent (ia64_insn, enum ia64_insn_type); -+static struct ia64_opcode *make_ia64_opcode -+ (ia64_insn, const char *, int, int); -+static struct ia64_opcode *ia64_find_matching_opcode -+ (const char *, short int); -+ -+const struct ia64_templ_desc ia64_templ_desc[16] = -+ { -+ { 0, { IA64_UNIT_M, IA64_UNIT_I, IA64_UNIT_I }, "MII" }, /* 0 */ -+ { 2, { IA64_UNIT_M, IA64_UNIT_I, IA64_UNIT_I }, "MII" }, -+ { 0, { IA64_UNIT_M, IA64_UNIT_L, IA64_UNIT_X }, "MLX" }, -+ { 0, { 0, }, "-3-" }, -+ { 0, { IA64_UNIT_M, IA64_UNIT_M, IA64_UNIT_I }, "MMI" }, /* 4 */ -+ { 1, { IA64_UNIT_M, IA64_UNIT_M, IA64_UNIT_I }, "MMI" }, -+ { 0, { IA64_UNIT_M, IA64_UNIT_F, IA64_UNIT_I }, "MFI" }, -+ { 0, { IA64_UNIT_M, IA64_UNIT_M, IA64_UNIT_F }, "MMF" }, -+ { 0, { IA64_UNIT_M, IA64_UNIT_I, IA64_UNIT_B }, "MIB" }, /* 8 */ -+ { 0, { IA64_UNIT_M, IA64_UNIT_B, IA64_UNIT_B }, "MBB" }, -+ { 0, { 0, }, "-a-" }, -+ { 0, { IA64_UNIT_B, IA64_UNIT_B, IA64_UNIT_B }, "BBB" }, -+ { 0, { IA64_UNIT_M, IA64_UNIT_M, IA64_UNIT_B }, "MMB" }, /* c */ -+ { 0, { 0, }, "-d-" }, -+ { 0, { IA64_UNIT_M, IA64_UNIT_F, IA64_UNIT_B }, "MFB" }, -+ { 0, { 0, }, "-f-" }, -+ }; -+ -+ -+/* Copy the prefix contained in *PTR (up to a '.' or a NUL) to DEST. -+ PTR will be adjusted to point to the start of the next portion -+ of the opcode, or at the NUL character. */ -+ -+static void -+get_opc_prefix (const char **ptr, char *dest) -+{ -+ char *c = strchr (*ptr, '.'); -+ if (c != NULL) -+ { -+ memcpy (dest, *ptr, c - *ptr); -+ dest[c - *ptr] = '\0'; -+ *ptr = c + 1; -+ } -+ else -+ { -+ int l = strlen (*ptr); -+ memcpy (dest, *ptr, l); -+ dest[l] = '\0'; -+ *ptr += l; -+ } -+} -+ -+/* Find the index of the entry in the string table corresponding to -+ STR; return -1 if one does not exist. */ -+ -+static short -+find_string_ent (const char *str) -+{ -+ short start = 0; -+ short end = sizeof (ia64_strings) / sizeof (const char *); -+ short i = (start + end) / 2; -+ -+ if (strcmp (str, ia64_strings[end - 1]) > 0) -+ { -+ return -1; -+ } -+ while (start <= end) -+ { -+ int c = strcmp (str, ia64_strings[i]); -+ if (c < 0) -+ { -+ end = i - 1; -+ } -+ else if (c == 0) -+ { -+ return i; -+ } -+ else -+ { -+ start = i + 1; -+ } -+ i = (start + end) / 2; -+ } -+ return -1; -+} -+ -+/* Find the opcode in the main opcode table whose name is STRINGINDEX, or -+ return -1 if one does not exist. */ -+ -+static short -+find_main_ent (short nameindex) -+{ -+ short start = 0; -+ short end = sizeof (main_table) / sizeof (struct ia64_main_table); -+ short i = (start + end) / 2; -+ -+ if (nameindex < main_table[0].name_index -+ || nameindex > main_table[end - 1].name_index) -+ { -+ return -1; -+ } -+ while (start <= end) -+ { -+ if (nameindex < main_table[i].name_index) -+ { -+ end = i - 1; -+ } -+ else if (nameindex == main_table[i].name_index) -+ { -+ while (i > 0 && main_table[i - 1].name_index == nameindex) -+ { -+ i--; -+ } -+ return i; -+ } -+ else -+ { -+ start = i + 1; -+ } -+ i = (start + end) / 2; -+ } -+ return -1; -+} -+ -+/* Find the index of the entry in the completer table that is part of -+ MAIN_ENT (starting from PREV_COMPLETER) that matches NAME, or -+ return -1 if one does not exist. */ -+ -+static short -+find_completer (short main_ent, short prev_completer, const char *name) -+{ -+ short name_index = find_string_ent (name); -+ -+ if (name_index < 0) -+ { -+ return -1; -+ } -+ -+ if (prev_completer == -1) -+ { -+ prev_completer = main_table[main_ent].completers; -+ } -+ else -+ { -+ prev_completer = completer_table[prev_completer].subentries; -+ } -+ -+ while (prev_completer != -1) -+ { -+ if (completer_table[prev_completer].name_index == name_index) -+ { -+ return prev_completer; -+ } -+ prev_completer = completer_table[prev_completer].alternative; -+ } -+ return -1; -+} -+ -+/* Apply the completer referred to by COMPLETER_INDEX to OPCODE, and -+ return the result. */ -+ -+static ia64_insn -+apply_completer (ia64_insn opcode, int completer_index) -+{ -+ ia64_insn mask = completer_table[completer_index].mask; -+ ia64_insn bits = completer_table[completer_index].bits; -+ int shiftamt = (completer_table[completer_index].offset & 63); -+ -+ mask = mask << shiftamt; -+ bits = bits << shiftamt; -+ opcode = (opcode & ~mask) | bits; -+ return opcode; -+} -+ -+/* Extract BITS number of bits starting from OP_POINTER + BITOFFSET in -+ the dis_table array, and return its value. (BITOFFSET is numbered -+ starting from MSB to LSB, so a BITOFFSET of 0 indicates the MSB of the -+ first byte in OP_POINTER.) */ -+ -+static int -+extract_op_bits (int op_pointer, int bitoffset, int bits) -+{ -+ int res = 0; -+ -+ op_pointer += (bitoffset / 8); -+ -+ if (bitoffset % 8) -+ { -+ unsigned int op = dis_table[op_pointer++]; -+ int numb = 8 - (bitoffset % 8); -+ int mask = (1 << numb) - 1; -+ int bata = (bits < numb) ? bits : numb; -+ int delta = numb - bata; -+ -+ res = (res << bata) | ((op & mask) >> delta); -+ bitoffset += bata; -+ bits -= bata; -+ } -+ while (bits >= 8) -+ { -+ res = (res << 8) | (dis_table[op_pointer++] & 255); -+ bits -= 8; -+ } -+ if (bits > 0) -+ { -+ unsigned int op = (dis_table[op_pointer++] & 255); -+ res = (res << bits) | (op >> (8 - bits)); -+ } -+ return res; -+} -+ -+/* Examine the state machine entry at OP_POINTER in the dis_table -+ array, and extract its values into OPVAL and OP. The length of the -+ state entry in bits is returned. */ -+ -+static int -+extract_op (int op_pointer, int *opval, unsigned int *op) -+{ -+ int oplen = 5; -+ -+ *op = dis_table[op_pointer]; -+ -+ if ((*op) & 0x40) -+ { -+ opval[0] = extract_op_bits (op_pointer, oplen, 5); -+ oplen += 5; -+ } -+ switch ((*op) & 0x30) -+ { -+ case 0x10: -+ { -+ opval[1] = extract_op_bits (op_pointer, oplen, 8); -+ oplen += 8; -+ opval[1] += op_pointer; -+ break; -+ } -+ case 0x20: -+ { -+ opval[1] = extract_op_bits (op_pointer, oplen, 16); -+ if (! (opval[1] & 32768)) -+ { -+ opval[1] += op_pointer; -+ } -+ oplen += 16; -+ break; -+ } -+ case 0x30: -+ { -+ oplen--; -+ opval[2] = extract_op_bits (op_pointer, oplen, 12); -+ oplen += 12; -+ opval[2] |= 32768; -+ break; -+ } -+ } -+ if (((*op) & 0x08) && (((*op) & 0x30) != 0x30)) -+ { -+ opval[2] = extract_op_bits (op_pointer, oplen, 16); -+ oplen += 16; -+ if (! (opval[2] & 32768)) -+ { -+ opval[2] += op_pointer; -+ } -+ } -+ return oplen; -+} -+ -+/* Returns a non-zero value if the opcode in the main_table list at -+ PLACE matches OPCODE and is of type TYPE. */ -+ -+static int -+opcode_verify (ia64_insn opcode, int place, enum ia64_insn_type type) -+{ -+ if (main_table[place].opcode_type != type) -+ { -+ return 0; -+ } -+ if (main_table[place].flags -+ & (IA64_OPCODE_F2_EQ_F3 | IA64_OPCODE_LEN_EQ_64MCNT)) -+ { -+ const struct ia64_operand *o1, *o2; -+ ia64_insn f2, f3; -+ -+ if (main_table[place].flags & IA64_OPCODE_F2_EQ_F3) -+ { -+ o1 = elf64_ia64_operands + IA64_OPND_F2; -+ o2 = elf64_ia64_operands + IA64_OPND_F3; -+ (*o1->extract) (o1, opcode, &f2); -+ (*o2->extract) (o2, opcode, &f3); -+ if (f2 != f3) -+ return 0; -+ } -+ else -+ { -+ ia64_insn len, count; -+ -+ /* length must equal 64-count: */ -+ o1 = elf64_ia64_operands + IA64_OPND_LEN6; -+ o2 = elf64_ia64_operands + main_table[place].operands[2]; -+ (*o1->extract) (o1, opcode, &len); -+ (*o2->extract) (o2, opcode, &count); -+ if (len != 64 - count) -+ return 0; -+ } -+ } -+ return 1; -+} -+ -+/* Find an instruction entry in the ia64_dis_names array that matches -+ opcode OPCODE and is of type TYPE. Returns either a positive index -+ into the array, or a negative value if an entry for OPCODE could -+ not be found. Checks all matches and returns the one with the highest -+ priority. */ -+ -+static int -+locate_opcode_ent (ia64_insn opcode, enum ia64_insn_type type) -+{ -+ int currtest[41]; -+ int bitpos[41]; -+ int op_ptr[41]; -+ int currstatenum = 0; -+ short found_disent = -1; -+ short found_priority = -1; -+ -+ currtest[currstatenum] = 0; -+ op_ptr[currstatenum] = 0; -+ bitpos[currstatenum] = 40; -+ -+ while (1) -+ { -+ int op_pointer = op_ptr[currstatenum]; -+ unsigned int op; -+ int currbitnum = bitpos[currstatenum]; -+ int oplen; -+ int opval[3] = {0}; -+ int next_op; -+ int currbit; -+ -+ oplen = extract_op (op_pointer, opval, &op); -+ -+ bitpos[currstatenum] = currbitnum; -+ -+ /* Skip opval[0] bits in the instruction. */ -+ if (op & 0x40) -+ { -+ currbitnum -= opval[0]; -+ } -+ -+ /* The value of the current bit being tested. */ -+ currbit = opcode & (((ia64_insn) 1) << currbitnum) ? 1 : 0; -+ next_op = -1; -+ -+ /* We always perform the tests specified in the current state in -+ a particular order, falling through to the next test if the -+ previous one failed. */ -+ switch (currtest[currstatenum]) -+ { -+ case 0: -+ currtest[currstatenum]++; -+ if (currbit == 0 && (op & 0x80)) -+ { -+ /* Check for a zero bit. If this test solely checks for -+ a zero bit, we can check for up to 8 consecutive zero -+ bits (the number to check is specified by the lower 3 -+ bits in the state code.) -+ -+ If the state instruction matches, we go to the very -+ next state instruction; otherwise, try the next test. */ -+ -+ if ((op & 0xf8) == 0x80) -+ { -+ int count = op & 0x7; -+ int x; -+ -+ for (x = 0; x <= count; x++) -+ { -+ int i = -+ opcode & (((ia64_insn) 1) << (currbitnum - x)) ? 1 : 0; -+ if (i) -+ { -+ break; -+ } -+ } -+ if (x > count) -+ { -+ next_op = op_pointer + ((oplen + 7) / 8); -+ currbitnum -= count; -+ break; -+ } -+ } -+ else if (! currbit) -+ { -+ next_op = op_pointer + ((oplen + 7) / 8); -+ break; -+ } -+ } -+ /* FALLTHROUGH */ -+ case 1: -+ /* If the bit in the instruction is one, go to the state -+ instruction specified by opval[1]. */ -+ currtest[currstatenum]++; -+ if (currbit && (op & 0x30) != 0 && ((op & 0x30) != 0x30)) -+ { -+ next_op = opval[1]; -+ break; -+ } -+ /* FALLTHROUGH */ -+ case 2: -+ /* Don't care. Skip the current bit and go to the state -+ instruction specified by opval[2]. -+ -+ An encoding of 0x30 is special; this means that a 12-bit -+ offset into the ia64_dis_names[] array is specified. */ -+ currtest[currstatenum]++; -+ if ((op & 0x08) || ((op & 0x30) == 0x30)) -+ { -+ next_op = opval[2]; -+ break; -+ } -+ } -+ -+ /* If bit 15 is set in the address of the next state, an offset -+ in the ia64_dis_names array was specified instead. We then -+ check to see if an entry in the list of opcodes matches the -+ opcode we were given; if so, we have succeeded. */ -+ -+ if ((next_op >= 0) && (next_op & 32768)) -+ { -+ short disent = next_op & 32767; -+ short priority = -1; -+ -+ if (next_op > 65535) -+ { -+ abort (); -+ } -+ -+ /* Run through the list of opcodes to check, trying to find -+ one that matches. */ -+ while (disent >= 0) -+ { -+ int place = ia64_dis_names[disent].insn_index; -+ -+ priority = ia64_dis_names[disent].priority; -+ -+ if (opcode_verify (opcode, place, type) -+ && priority > found_priority) -+ { -+ break; -+ } -+ if (ia64_dis_names[disent].next_flag) -+ { -+ disent++; -+ } -+ else -+ { -+ disent = -1; -+ } -+ } -+ -+ if (disent >= 0) -+ { -+ found_disent = disent; -+ found_priority = priority; -+ } -+ /* Try the next test in this state, regardless of whether a match -+ was found. */ -+ next_op = -2; -+ } -+ -+ /* next_op == -1 is "back up to the previous state". -+ next_op == -2 is "stay in this state and try the next test". -+ Otherwise, transition to the state indicated by next_op. */ -+ -+ if (next_op == -1) -+ { -+ currstatenum--; -+ if (currstatenum < 0) -+ { -+ return found_disent; -+ } -+ } -+ else if (next_op >= 0) -+ { -+ currstatenum++; -+ bitpos[currstatenum] = currbitnum - 1; -+ op_ptr[currstatenum] = next_op; -+ currtest[currstatenum] = 0; -+ } -+ } -+} -+ -+/* Construct an ia64_opcode entry based on OPCODE, NAME and PLACE. */ -+ -+static struct ia64_opcode * -+make_ia64_opcode (ia64_insn opcode, const char *name, int place, int depind) -+{ -+ struct ia64_opcode *res = -+ (struct ia64_opcode *) xmalloc (sizeof (struct ia64_opcode)); -+ res->name = xstrdup (name); -+ res->type = main_table[place].opcode_type; -+ res->num_outputs = main_table[place].num_outputs; -+ res->opcode = opcode; -+ res->mask = main_table[place].mask; -+ res->operands[0] = main_table[place].operands[0]; -+ res->operands[1] = main_table[place].operands[1]; -+ res->operands[2] = main_table[place].operands[2]; -+ res->operands[3] = main_table[place].operands[3]; -+ res->operands[4] = main_table[place].operands[4]; -+ res->flags = main_table[place].flags; -+ res->ent_index = place; -+ res->dependencies = &op_dependencies[depind]; -+ return res; -+} -+ -+/* Determine the ia64_opcode entry for the opcode specified by INSN -+ and TYPE. If a valid entry is not found, return NULL. */ -+struct ia64_opcode * -+ia64_dis_opcode (ia64_insn insn, enum ia64_insn_type type) -+{ -+ int disent = locate_opcode_ent (insn, type); -+ -+ if (disent < 0) -+ { -+ return NULL; -+ } -+ else -+ { -+ unsigned int cb = ia64_dis_names[disent].completer_index; -+ static char name[128]; -+ int place = ia64_dis_names[disent].insn_index; -+ int ci = main_table[place].completers; -+ ia64_insn tinsn = main_table[place].opcode; -+ -+ strcpy (name, ia64_strings [main_table[place].name_index]); -+ -+ while (cb) -+ { -+ if (cb & 1) -+ { -+ int cname = completer_table[ci].name_index; -+ -+ tinsn = apply_completer (tinsn, ci); -+ -+ if (ia64_strings[cname][0] != '\0') -+ { -+ strcat (name, "."); -+ strcat (name, ia64_strings[cname]); -+ } -+ if (cb != 1) -+ { -+ ci = completer_table[ci].subentries; -+ } -+ } -+ else -+ { -+ ci = completer_table[ci].alternative; -+ } -+ if (ci < 0) -+ { -+ abort (); -+ } -+ cb = cb >> 1; -+ } -+ if (tinsn != (insn & main_table[place].mask)) -+ { -+ abort (); -+ } -+ return make_ia64_opcode (insn, name, place, -+ completer_table[ci].dependencies); -+ } -+} -+ -+/* Search the main_opcode table starting from PLACE for an opcode that -+ matches NAME. Return NULL if one is not found. */ -+ -+static struct ia64_opcode * -+ia64_find_matching_opcode (const char *name, short place) -+{ -+ char op[129]; -+ const char *suffix; -+ short name_index; -+ -+ if (strlen (name) > 128) -+ { -+ return NULL; -+ } -+ suffix = name; -+ get_opc_prefix (&suffix, op); -+ name_index = find_string_ent (op); -+ if (name_index < 0) -+ { -+ return NULL; -+ } -+ -+ while (main_table[place].name_index == name_index) -+ { -+ const char *curr_suffix = suffix; -+ ia64_insn curr_insn = main_table[place].opcode; -+ short completer = -1; -+ -+ do { -+ if (suffix[0] == '\0') -+ { -+ completer = find_completer (place, completer, suffix); -+ } -+ else -+ { -+ get_opc_prefix (&curr_suffix, op); -+ completer = find_completer (place, completer, op); -+ } -+ if (completer != -1) -+ { -+ curr_insn = apply_completer (curr_insn, completer); -+ } -+ } while (completer != -1 && curr_suffix[0] != '\0'); -+ -+ if (completer != -1 && curr_suffix[0] == '\0' -+ && completer_table[completer].terminal_completer) -+ { -+ int depind = completer_table[completer].dependencies; -+ return make_ia64_opcode (curr_insn, name, place, depind); -+ } -+ else -+ { -+ place++; -+ } -+ } -+ return NULL; -+} -+ -+/* Find the next opcode after PREV_ENT that matches PREV_ENT, or return NULL -+ if one does not exist. -+ -+ It is the caller's responsibility to invoke ia64_free_opcode () to -+ release any resources used by the returned entry. */ -+ -+struct ia64_opcode * -+ia64_find_next_opcode (struct ia64_opcode *prev_ent) -+{ -+ return ia64_find_matching_opcode (prev_ent->name, -+ prev_ent->ent_index + 1); -+} -+ -+/* Find the first opcode that matches NAME, or return NULL if it does -+ not exist. -+ -+ It is the caller's responsibility to invoke ia64_free_opcode () to -+ release any resources used by the returned entry. */ -+ -+struct ia64_opcode * -+ia64_find_opcode (const char *name) -+{ -+ char op[129]; -+ const char *suffix; -+ short place; -+ short name_index; -+ -+ if (strlen (name) > 128) -+ { -+ return NULL; -+ } -+ suffix = name; -+ get_opc_prefix (&suffix, op); -+ name_index = find_string_ent (op); -+ if (name_index < 0) -+ { -+ return NULL; -+ } -+ -+ place = find_main_ent (name_index); -+ -+ if (place < 0) -+ { -+ return NULL; -+ } -+ return ia64_find_matching_opcode (name, place); -+} -+ -+/* Free any resources used by ENT. */ -+void -+ia64_free_opcode (struct ia64_opcode *ent) -+{ -+ free ((void *)ent->name); -+ free (ent); -+} -+ -+const struct ia64_dependency * -+ia64_find_dependency (int index) -+{ -+ index = DEP(index); -+ -+ if (index < 0 -+ || index >= (int)(sizeof(dependencies) / sizeof(dependencies[0]))) -+ return NULL; -+ -+ return &dependencies[index]; -+} ---- /dev/null -+++ b/arch/ia64/kdb/ia64-opc.h -@@ -0,0 +1,141 @@ -+/* ia64-opc.h -- IA-64 opcode table. -+ Copyright 1998, 1999, 2000, 2002 Free Software Foundation, Inc. -+ Contributed by David Mosberger-Tang -+ -+ This file is part of GDB, GAS, and the GNU binutils. -+ -+ GDB, GAS, and the GNU binutils are free software; you can redistribute -+ them and/or modify them under the terms of the GNU General Public -+ License as published by the Free Software Foundation; either version -+ 2, or (at your option) any later version. -+ -+ GDB, GAS, and the GNU binutils are distributed in the hope that they -+ will be useful, but WITHOUT ANY WARRANTY; without even the implied -+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See -+ the GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this file; see the file COPYING. If not, write to the -+ Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA -+ 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifndef IA64_OPC_H -+#define IA64_OPC_H -+ -+#ifdef __KERNEL__ -+#include "ia64.h" -+#else /* __KERNEL__ */ -+#include "opcode/ia64.h" -+#endif /* __KERNEL__ */ -+ -+/* define a couple of abbreviations: */ -+ -+#define bOp(x) (((ia64_insn) ((x) & 0xf)) << 37) -+#define mOp bOp (-1) -+#define Op(x) bOp (x), mOp -+ -+#define FIRST IA64_OPCODE_FIRST -+#define X_IN_MLX IA64_OPCODE_X_IN_MLX -+#define LAST IA64_OPCODE_LAST -+#define PRIV IA64_OPCODE_PRIV -+#define NO_PRED IA64_OPCODE_NO_PRED -+#define SLOT2 IA64_OPCODE_SLOT2 -+#define PSEUDO IA64_OPCODE_PSEUDO -+#define F2_EQ_F3 IA64_OPCODE_F2_EQ_F3 -+#define LEN_EQ_64MCNT IA64_OPCODE_LEN_EQ_64MCNT -+#define MOD_RRBS IA64_OPCODE_MOD_RRBS -+#define POSTINC IA64_OPCODE_POSTINC -+ -+#define AR_CCV IA64_OPND_AR_CCV -+#define AR_PFS IA64_OPND_AR_PFS -+#define AR_CSD IA64_OPND_AR_CSD -+#define C1 IA64_OPND_C1 -+#define C8 IA64_OPND_C8 -+#define C16 IA64_OPND_C16 -+#define GR0 IA64_OPND_GR0 -+#define IP IA64_OPND_IP -+#define PR IA64_OPND_PR -+#define PR_ROT IA64_OPND_PR_ROT -+#define PSR IA64_OPND_PSR -+#define PSR_L IA64_OPND_PSR_L -+#define PSR_UM IA64_OPND_PSR_UM -+ -+#define AR3 IA64_OPND_AR3 -+#define B1 IA64_OPND_B1 -+#define B2 IA64_OPND_B2 -+#define CR3 IA64_OPND_CR3 -+#define F1 IA64_OPND_F1 -+#define F2 IA64_OPND_F2 -+#define F3 IA64_OPND_F3 -+#define F4 IA64_OPND_F4 -+#define P1 IA64_OPND_P1 -+#define P2 IA64_OPND_P2 -+#define R1 IA64_OPND_R1 -+#define R2 IA64_OPND_R2 -+#define R3 IA64_OPND_R3 -+#define R3_2 IA64_OPND_R3_2 -+ -+#define CPUID_R3 IA64_OPND_CPUID_R3 -+#define DBR_R3 IA64_OPND_DBR_R3 -+#define DTR_R3 IA64_OPND_DTR_R3 -+#define ITR_R3 IA64_OPND_ITR_R3 -+#define IBR_R3 IA64_OPND_IBR_R3 -+#define MR3 IA64_OPND_MR3 -+#define MSR_R3 IA64_OPND_MSR_R3 -+#define PKR_R3 IA64_OPND_PKR_R3 -+#define PMC_R3 IA64_OPND_PMC_R3 -+#define PMD_R3 IA64_OPND_PMD_R3 -+#define RR_R3 IA64_OPND_RR_R3 -+ -+#define CCNT5 IA64_OPND_CCNT5 -+#define CNT2a IA64_OPND_CNT2a -+#define CNT2b IA64_OPND_CNT2b -+#define CNT2c IA64_OPND_CNT2c -+#define CNT5 IA64_OPND_CNT5 -+#define CNT6 IA64_OPND_CNT6 -+#define CPOS6a IA64_OPND_CPOS6a -+#define CPOS6b IA64_OPND_CPOS6b -+#define CPOS6c IA64_OPND_CPOS6c -+#define IMM1 IA64_OPND_IMM1 -+#define IMM14 IA64_OPND_IMM14 -+#define IMM17 IA64_OPND_IMM17 -+#define IMM22 IA64_OPND_IMM22 -+#define IMM44 IA64_OPND_IMM44 -+#define SOF IA64_OPND_SOF -+#define SOL IA64_OPND_SOL -+#define SOR IA64_OPND_SOR -+#define IMM8 IA64_OPND_IMM8 -+#define IMM8U4 IA64_OPND_IMM8U4 -+#define IMM8M1 IA64_OPND_IMM8M1 -+#define IMM8M1U4 IA64_OPND_IMM8M1U4 -+#define IMM8M1U8 IA64_OPND_IMM8M1U8 -+#define IMM9a IA64_OPND_IMM9a -+#define IMM9b IA64_OPND_IMM9b -+#define IMMU2 IA64_OPND_IMMU2 -+#define IMMU21 IA64_OPND_IMMU21 -+#define IMMU24 IA64_OPND_IMMU24 -+#define IMMU62 IA64_OPND_IMMU62 -+#define IMMU64 IA64_OPND_IMMU64 -+#define IMMU7a IA64_OPND_IMMU7a -+#define IMMU7b IA64_OPND_IMMU7b -+#define IMMU9 IA64_OPND_IMMU9 -+#define INC3 IA64_OPND_INC3 -+#define LEN4 IA64_OPND_LEN4 -+#define LEN6 IA64_OPND_LEN6 -+#define MBTYPE4 IA64_OPND_MBTYPE4 -+#define MHTYPE8 IA64_OPND_MHTYPE8 -+#define POS6 IA64_OPND_POS6 -+#define TAG13 IA64_OPND_TAG13 -+#define TAG13b IA64_OPND_TAG13b -+#define TGT25 IA64_OPND_TGT25 -+#define TGT25b IA64_OPND_TGT25b -+#define TGT25c IA64_OPND_TGT25c -+#define TGT64 IA64_OPND_TGT64 -+ -+#endif ---- /dev/null -+++ b/arch/ia64/kdb/ia64.h -@@ -0,0 +1,402 @@ -+/* ia64.h -- Header file for ia64 opcode table -+ Copyright (C) 1998, 1999, 2000, 2002 Free Software Foundation, Inc. -+ Contributed by David Mosberger-Tang */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifndef opcode_ia64_h -+#define opcode_ia64_h -+ -+#ifdef __KERNEL__ -+#include -+#else /* __KERNEL__ */ -+#include -+ -+#include "bfd.h" -+#endif /* __KERNEL__ */ -+ -+ -+typedef BFD_HOST_U_64_BIT ia64_insn; -+ -+enum ia64_insn_type -+ { -+ IA64_TYPE_NIL = 0, /* illegal type */ -+ IA64_TYPE_A, /* integer alu (I- or M-unit) */ -+ IA64_TYPE_I, /* non-alu integer (I-unit) */ -+ IA64_TYPE_M, /* memory (M-unit) */ -+ IA64_TYPE_B, /* branch (B-unit) */ -+ IA64_TYPE_F, /* floating-point (F-unit) */ -+ IA64_TYPE_X, /* long encoding (X-unit) */ -+ IA64_TYPE_DYN, /* Dynamic opcode */ -+ IA64_NUM_TYPES -+ }; -+ -+enum ia64_unit -+ { -+ IA64_UNIT_NIL = 0, /* illegal unit */ -+ IA64_UNIT_I, /* integer unit */ -+ IA64_UNIT_M, /* memory unit */ -+ IA64_UNIT_B, /* branching unit */ -+ IA64_UNIT_F, /* floating-point unit */ -+ IA64_UNIT_L, /* long "unit" */ -+ IA64_UNIT_X, /* may be integer or branch unit */ -+ IA64_NUM_UNITS -+ }; -+ -+/* Changes to this enumeration must be propagated to the operand table in -+ bfd/cpu-ia64-opc.c -+ */ -+enum ia64_opnd -+ { -+ IA64_OPND_NIL, /* no operand---MUST BE FIRST!*/ -+ -+ /* constants */ -+ IA64_OPND_AR_CSD, /* application register csd (ar.csd) */ -+ IA64_OPND_AR_CCV, /* application register ccv (ar.ccv) */ -+ IA64_OPND_AR_PFS, /* application register pfs (ar.pfs) */ -+ IA64_OPND_C1, /* the constant 1 */ -+ IA64_OPND_C8, /* the constant 8 */ -+ IA64_OPND_C16, /* the constant 16 */ -+ IA64_OPND_GR0, /* gr0 */ -+ IA64_OPND_IP, /* instruction pointer (ip) */ -+ IA64_OPND_PR, /* predicate register (pr) */ -+ IA64_OPND_PR_ROT, /* rotating predicate register (pr.rot) */ -+ IA64_OPND_PSR, /* processor status register (psr) */ -+ IA64_OPND_PSR_L, /* processor status register L (psr.l) */ -+ IA64_OPND_PSR_UM, /* processor status register UM (psr.um) */ -+ -+ /* register operands: */ -+ IA64_OPND_AR3, /* third application register # (bits 20-26) */ -+ IA64_OPND_B1, /* branch register # (bits 6-8) */ -+ IA64_OPND_B2, /* branch register # (bits 13-15) */ -+ IA64_OPND_CR3, /* third control register # (bits 20-26) */ -+ IA64_OPND_F1, /* first floating-point register # */ -+ IA64_OPND_F2, /* second floating-point register # */ -+ IA64_OPND_F3, /* third floating-point register # */ -+ IA64_OPND_F4, /* fourth floating-point register # */ -+ IA64_OPND_P1, /* first predicate # */ -+ IA64_OPND_P2, /* second predicate # */ -+ IA64_OPND_R1, /* first register # */ -+ IA64_OPND_R2, /* second register # */ -+ IA64_OPND_R3, /* third register # */ -+ IA64_OPND_R3_2, /* third register # (limited to gr0-gr3) */ -+ -+ /* indirect operands: */ -+ IA64_OPND_CPUID_R3, /* cpuid[reg] */ -+ IA64_OPND_DBR_R3, /* dbr[reg] */ -+ IA64_OPND_DTR_R3, /* dtr[reg] */ -+ IA64_OPND_ITR_R3, /* itr[reg] */ -+ IA64_OPND_IBR_R3, /* ibr[reg] */ -+ IA64_OPND_MR3, /* memory at addr of third register # */ -+ IA64_OPND_MSR_R3, /* msr[reg] */ -+ IA64_OPND_PKR_R3, /* pkr[reg] */ -+ IA64_OPND_PMC_R3, /* pmc[reg] */ -+ IA64_OPND_PMD_R3, /* pmd[reg] */ -+ IA64_OPND_RR_R3, /* rr[reg] */ -+ -+ /* immediate operands: */ -+ IA64_OPND_CCNT5, /* 5-bit count (31 - bits 20-24) */ -+ IA64_OPND_CNT2a, /* 2-bit count (1 + bits 27-28) */ -+ IA64_OPND_CNT2b, /* 2-bit count (bits 27-28): 1, 2, 3 */ -+ IA64_OPND_CNT2c, /* 2-bit count (bits 30-31): 0, 7, 15, or 16 */ -+ IA64_OPND_CNT5, /* 5-bit count (bits 14-18) */ -+ IA64_OPND_CNT6, /* 6-bit count (bits 27-32) */ -+ IA64_OPND_CPOS6a, /* 6-bit count (63 - bits 20-25) */ -+ IA64_OPND_CPOS6b, /* 6-bit count (63 - bits 14-19) */ -+ IA64_OPND_CPOS6c, /* 6-bit count (63 - bits 31-36) */ -+ IA64_OPND_IMM1, /* signed 1-bit immediate (bit 36) */ -+ IA64_OPND_IMMU2, /* unsigned 2-bit immediate (bits 13-14) */ -+ IA64_OPND_IMMU7a, /* unsigned 7-bit immediate (bits 13-19) */ -+ IA64_OPND_IMMU7b, /* unsigned 7-bit immediate (bits 20-26) */ -+ IA64_OPND_SOF, /* 8-bit stack frame size */ -+ IA64_OPND_SOL, /* 8-bit size of locals */ -+ IA64_OPND_SOR, /* 6-bit number of rotating registers (scaled by 8) */ -+ IA64_OPND_IMM8, /* signed 8-bit immediate (bits 13-19 & 36) */ -+ IA64_OPND_IMM8U4, /* cmp4*u signed 8-bit immediate (bits 13-19 & 36) */ -+ IA64_OPND_IMM8M1, /* signed 8-bit immediate -1 (bits 13-19 & 36) */ -+ IA64_OPND_IMM8M1U4, /* cmp4*u signed 8-bit immediate -1 (bits 13-19 & 36)*/ -+ IA64_OPND_IMM8M1U8, /* cmp*u signed 8-bit immediate -1 (bits 13-19 & 36) */ -+ IA64_OPND_IMMU9, /* unsigned 9-bit immediate (bits 33-34, 20-26) */ -+ IA64_OPND_IMM9a, /* signed 9-bit immediate (bits 6-12, 27, 36) */ -+ IA64_OPND_IMM9b, /* signed 9-bit immediate (bits 13-19, 27, 36) */ -+ IA64_OPND_IMM14, /* signed 14-bit immediate (bits 13-19, 27-32, 36) */ -+ IA64_OPND_IMM17, /* signed 17-bit immediate (2*bits 6-12, 24-31, 36) */ -+ IA64_OPND_IMMU21, /* unsigned 21-bit immediate (bits 6-25, 36) */ -+ IA64_OPND_IMM22, /* signed 22-bit immediate (bits 13-19, 22-36) */ -+ IA64_OPND_IMMU24, /* unsigned 24-bit immediate (bits 6-26, 31-32, 36) */ -+ IA64_OPND_IMM44, /* signed 44-bit immediate (2^16*bits 6-32, 36) */ -+ IA64_OPND_IMMU62, /* unsigned 62-bit immediate */ -+ IA64_OPND_IMMU64, /* unsigned 64-bit immediate (lotsa bits...) */ -+ IA64_OPND_INC3, /* signed 3-bit (bits 13-15): +/-1, 4, 8, 16 */ -+ IA64_OPND_LEN4, /* 4-bit count (bits 27-30 + 1) */ -+ IA64_OPND_LEN6, /* 6-bit count (bits 27-32 + 1) */ -+ IA64_OPND_MBTYPE4, /* 4-bit mux type (bits 20-23) */ -+ IA64_OPND_MHTYPE8, /* 8-bit mux type (bits 20-27) */ -+ IA64_OPND_POS6, /* 6-bit count (bits 14-19) */ -+ IA64_OPND_TAG13, /* signed 13-bit tag (ip + 16*bits 6-12, 33-34) */ -+ IA64_OPND_TAG13b, /* signed 13-bit tag (ip + 16*bits 24-32) */ -+ IA64_OPND_TGT25, /* signed 25-bit (ip + 16*bits 6-25, 36) */ -+ IA64_OPND_TGT25b, /* signed 25-bit (ip + 16*bits 6-12, 20-32, 36) */ -+ IA64_OPND_TGT25c, /* signed 25-bit (ip + 16*bits 13-32, 36) */ -+ IA64_OPND_TGT64, /* 64-bit (ip + 16*bits 13-32, 36, 2-40(L)) */ -+ IA64_OPND_LDXMOV, /* any symbol, generates R_IA64_LDXMOV. */ -+ -+ IA64_OPND_COUNT /* # of operand types (MUST BE LAST!) */ -+ }; -+ -+enum ia64_dependency_mode -+{ -+ IA64_DV_RAW, -+ IA64_DV_WAW, -+ IA64_DV_WAR, -+}; -+ -+enum ia64_dependency_semantics -+{ -+ IA64_DVS_NONE, -+ IA64_DVS_IMPLIED, -+ IA64_DVS_IMPLIEDF, -+ IA64_DVS_DATA, -+ IA64_DVS_INSTR, -+ IA64_DVS_SPECIFIC, -+ IA64_DVS_STOP, -+ IA64_DVS_OTHER, -+}; -+ -+enum ia64_resource_specifier -+{ -+ IA64_RS_ANY, -+ IA64_RS_AR_K, -+ IA64_RS_AR_UNAT, -+ IA64_RS_AR, /* 8-15, 20, 22-23, 31, 33-35, 37-39, 41-43, 45-47, 67-111 */ -+ IA64_RS_ARb, /* 48-63, 112-127 */ -+ IA64_RS_BR, -+ IA64_RS_CFM, -+ IA64_RS_CPUID, -+ IA64_RS_CR_IRR, -+ IA64_RS_CR_LRR, -+ IA64_RS_CR, /* 3-7,10-15,18,26-63,75-79,82-127 */ -+ IA64_RS_DBR, -+ IA64_RS_FR, -+ IA64_RS_FRb, -+ IA64_RS_GR0, -+ IA64_RS_GR, -+ IA64_RS_IBR, -+ IA64_RS_INSERVICE, /* CR[EOI] or CR[IVR] */ -+ IA64_RS_MSR, -+ IA64_RS_PKR, -+ IA64_RS_PMC, -+ IA64_RS_PMD, -+ IA64_RS_PR, /* non-rotating, 1-15 */ -+ IA64_RS_PRr, /* rotating, 16-62 */ -+ IA64_RS_PR63, -+ IA64_RS_RR, -+ -+ IA64_RS_ARX, /* ARs not in RS_AR or RS_ARb */ -+ IA64_RS_CRX, /* CRs not in RS_CR */ -+ IA64_RS_PSR, /* PSR bits */ -+ IA64_RS_RSE, /* implementation-specific RSE resources */ -+ IA64_RS_AR_FPSR, -+}; -+ -+enum ia64_rse_resource -+{ -+ IA64_RSE_N_STACKED_PHYS, -+ IA64_RSE_BOF, -+ IA64_RSE_STORE_REG, -+ IA64_RSE_LOAD_REG, -+ IA64_RSE_BSPLOAD, -+ IA64_RSE_RNATBITINDEX, -+ IA64_RSE_CFLE, -+ IA64_RSE_NDIRTY, -+}; -+ -+/* Information about a given resource dependency */ -+struct ia64_dependency -+{ -+ /* Name of the resource */ -+ const char *name; -+ /* Does this dependency need further specification? */ -+ enum ia64_resource_specifier specifier; -+ /* Mode of dependency */ -+ enum ia64_dependency_mode mode; -+ /* Dependency semantics */ -+ enum ia64_dependency_semantics semantics; -+ /* Register index, if applicable (distinguishes AR, CR, and PSR deps) */ -+#define REG_NONE (-1) -+ int regindex; -+ /* Special info on semantics */ -+ const char *info; -+}; -+ -+/* Two arrays of indexes into the ia64_dependency table. -+ chks are dependencies to check for conflicts when an opcode is -+ encountered; regs are dependencies to register (mark as used) when an -+ opcode is used. chks correspond to readers (RAW) or writers (WAW or -+ WAR) of a resource, while regs correspond to writers (RAW or WAW) and -+ readers (WAR) of a resource. */ -+struct ia64_opcode_dependency -+{ -+ int nchks; -+ const unsigned short *chks; -+ int nregs; -+ const unsigned short *regs; -+}; -+ -+/* encode/extract the note/index for a dependency */ -+#define RDEP(N,X) (((N)<<11)|(X)) -+#define NOTE(X) (((X)>>11)&0x1F) -+#define DEP(X) ((X)&0x7FF) -+ -+/* A template descriptor describes the execution units that are active -+ for each of the three slots. It also specifies the location of -+ instruction group boundaries that may be present between two slots. */ -+struct ia64_templ_desc -+ { -+ int group_boundary; /* 0=no boundary, 1=between slot 0 & 1, etc. */ -+ enum ia64_unit exec_unit[3]; -+ const char *name; -+ }; -+ -+/* The opcode table is an array of struct ia64_opcode. */ -+ -+struct ia64_opcode -+ { -+ /* The opcode name. */ -+ const char *name; -+ -+ /* The type of the instruction: */ -+ enum ia64_insn_type type; -+ -+ /* Number of output operands: */ -+ int num_outputs; -+ -+ /* The opcode itself. Those bits which will be filled in with -+ operands are zeroes. */ -+ ia64_insn opcode; -+ -+ /* The opcode mask. This is used by the disassembler. This is a -+ mask containing ones indicating those bits which must match the -+ opcode field, and zeroes indicating those bits which need not -+ match (and are presumably filled in by operands). */ -+ ia64_insn mask; -+ -+ /* An array of operand codes. Each code is an index into the -+ operand table. They appear in the order which the operands must -+ appear in assembly code, and are terminated by a zero. */ -+ enum ia64_opnd operands[5]; -+ -+ /* One bit flags for the opcode. These are primarily used to -+ indicate specific processors and environments support the -+ instructions. The defined values are listed below. */ -+ unsigned int flags; -+ -+ /* Used by ia64_find_next_opcode (). */ -+ short ent_index; -+ -+ /* Opcode dependencies. */ -+ const struct ia64_opcode_dependency *dependencies; -+ }; -+ -+/* Values defined for the flags field of a struct ia64_opcode. */ -+ -+#define IA64_OPCODE_FIRST (1<<0) /* must be first in an insn group */ -+#define IA64_OPCODE_X_IN_MLX (1<<1) /* insn is allowed in X slot of MLX */ -+#define IA64_OPCODE_LAST (1<<2) /* must be last in an insn group */ -+#define IA64_OPCODE_PRIV (1<<3) /* privileged instruct */ -+#define IA64_OPCODE_SLOT2 (1<<4) /* insn allowed in slot 2 only */ -+#define IA64_OPCODE_NO_PRED (1<<5) /* insn cannot be predicated */ -+#define IA64_OPCODE_PSEUDO (1<<6) /* insn is a pseudo-op */ -+#define IA64_OPCODE_F2_EQ_F3 (1<<7) /* constraint: F2 == F3 */ -+#define IA64_OPCODE_LEN_EQ_64MCNT (1<<8) /* constraint: LEN == 64-CNT */ -+#define IA64_OPCODE_MOD_RRBS (1<<9) /* modifies all rrbs in CFM */ -+#define IA64_OPCODE_POSTINC (1<<10) /* postincrement MR3 operand */ -+ -+/* A macro to extract the major opcode from an instruction. */ -+#define IA64_OP(i) (((i) >> 37) & 0xf) -+ -+enum ia64_operand_class -+ { -+ IA64_OPND_CLASS_CST, /* constant */ -+ IA64_OPND_CLASS_REG, /* register */ -+ IA64_OPND_CLASS_IND, /* indirect register */ -+ IA64_OPND_CLASS_ABS, /* absolute value */ -+ IA64_OPND_CLASS_REL, /* IP-relative value */ -+ }; -+ -+/* The operands table is an array of struct ia64_operand. */ -+ -+struct ia64_operand -+{ -+ enum ia64_operand_class class; -+ -+ /* Set VALUE as the operand bits for the operand of type SELF in the -+ instruction pointed to by CODE. If an error occurs, *CODE is not -+ modified and the returned string describes the cause of the -+ error. If no error occurs, NULL is returned. */ -+ const char *(*insert) (const struct ia64_operand *self, ia64_insn value, -+ ia64_insn *code); -+ -+ /* Extract the operand bits for an operand of type SELF from -+ instruction CODE store them in *VALUE. If an error occurs, the -+ cause of the error is described by the string returned. If no -+ error occurs, NULL is returned. */ -+ const char *(*extract) (const struct ia64_operand *self, ia64_insn code, -+ ia64_insn *value); -+ -+ /* A string whose meaning depends on the operand class. */ -+ -+ const char *str; -+ -+ struct bit_field -+ { -+ /* The number of bits in the operand. */ -+ int bits; -+ -+ /* How far the operand is left shifted in the instruction. */ -+ int shift; -+ } -+ field[4]; /* no operand has more than this many bit-fields */ -+ -+ unsigned int flags; -+ -+ const char *desc; /* brief description */ -+}; -+ -+/* Values defined for the flags field of a struct ia64_operand. */ -+ -+/* Disassemble as signed decimal (instead of hex): */ -+#define IA64_OPND_FLAG_DECIMAL_SIGNED (1<<0) -+/* Disassemble as unsigned decimal (instead of hex): */ -+#define IA64_OPND_FLAG_DECIMAL_UNSIGNED (1<<1) -+ -+extern const struct ia64_templ_desc ia64_templ_desc[16]; -+ -+/* The tables are sorted by major opcode number and are otherwise in -+ the order in which the disassembler should consider instructions. */ -+extern struct ia64_opcode ia64_opcodes_a[]; -+extern struct ia64_opcode ia64_opcodes_i[]; -+extern struct ia64_opcode ia64_opcodes_m[]; -+extern struct ia64_opcode ia64_opcodes_b[]; -+extern struct ia64_opcode ia64_opcodes_f[]; -+extern struct ia64_opcode ia64_opcodes_d[]; -+ -+ -+extern struct ia64_opcode *ia64_find_opcode (const char *name); -+extern struct ia64_opcode *ia64_find_next_opcode (struct ia64_opcode *ent); -+ -+extern struct ia64_opcode *ia64_dis_opcode (ia64_insn insn, -+ enum ia64_insn_type type); -+ -+extern void ia64_free_opcode (struct ia64_opcode *ent); -+extern const struct ia64_dependency *ia64_find_dependency (int index); -+ -+/* To avoid circular library dependencies, this array is implemented -+ in bfd/cpu-ia64-opc.c: */ -+extern const struct ia64_operand elf64_ia64_operands[IA64_OPND_COUNT]; -+ -+#endif /* opcode_ia64_h */ ---- /dev/null -+++ b/arch/ia64/kdb/kdb_cmds -@@ -0,0 +1,17 @@ -+# Standard architecture specific commands for kdb. -+# These commands are appended to those in kdb/kdb_cmds, see that file for -+# restrictions. -+ -+# Standard debugging information for first level support, invoked from archkdb* -+# commands that are defined in kdb/kdb_cmds. -+ -+defcmd archkdbcommon "" "Common arch debugging" -+ set LINES 2000000 -+ set BTAPROMPT 0 -+ -summary -+ -id %ip-0x40 -+ -cpu -+ -ps -+ -dmesg 600 -+ -bt -+endefcmd ---- /dev/null -+++ b/arch/ia64/kdb/kdba_bp.c -@@ -0,0 +1,841 @@ -+/* -+ * Kernel Debugger Architecture Dependent Breakpoint Handling -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+static char *kdba_rwtypes[] = { "Instruction(Register)", "Data Write", -+ "I/O", "Data Access"}; -+ -+/* -+ * Table describing processor architecture hardware -+ * breakpoint registers. -+ */ -+ -+static kdbhard_bp_t kdb_hardbreaks[KDB_MAXHARDBPT]; -+ -+#ifdef CONFIG_KDB_HARDWARE_BREAKPOINTS -+/* -+ * Counters for number of debug registers used on each CPU. -+ * Used to detect when to enable and disable debug traps. -+ */ -+static unsigned char kdb_dbrs_used[NR_CPUS]; -+#endif /* CONFIG_KDB_HARDWARE_BREAKPOINTS */ -+ -+/* -+ * kdba_db_trap -+ * -+ * Perform breakpoint processing upon entry to the -+ * processor debugger fault. Determine and print -+ * the active breakpoint. -+ * -+ * Parameters: -+ * regs Exception frame containing machine register state -+ * error Error number passed to kdb. -+ * Outputs: -+ * None. -+ * Returns: -+ * KDB_DB_BPT Standard instruction or data breakpoint encountered -+ * KDB_DB_SS Single Step fault ('ss' command or end of 'ssb' command) -+ * KDB_DB_SSB Single Step fault, caller should continue ('ssb' command) -+ * KDB_DB_SSBPT Single step over breakpoint -+ * KDB_DB_NOBPT No existing kdb breakpoint matches this debug exception -+ * Locking: -+ * None. -+ * Remarks: -+ * Yup, there be goto's here. -+ * -+ * If multiple processors receive debug exceptions simultaneously, -+ * one may be waiting at the kdb fence in kdb() while the user -+ * issues a 'bc' command to clear the breakpoint the processor -+ * which is waiting has already encountered. If this is the case, -+ * the debug registers will no longer match any entry in the -+ * breakpoint table, and we'll return the value KDB_DB_NOBPT. -+ * This can cause a panic in die_if_kernel(). It is safer to -+ * disable the breakpoint (bd), go until all processors are past -+ * the breakpoint then clear the breakpoint (bc). This code -+ * recognises a breakpoint even when disabled but not when it has -+ * been cleared. -+ * -+ * WARNING: This routine clears the debug state. It should be called -+ * once per debug and the result cached. -+ */ -+ -+kdb_dbtrap_t -+kdba_db_trap(struct pt_regs *regs, int error) -+{ -+ int i; -+ kdb_dbtrap_t rv = KDB_DB_BPT; -+ kdb_bp_t *bp; -+ -+ if (KDB_NULL_REGS(regs)) -+ return KDB_DB_NOBPT; -+ -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_db_trap: error %d\n", error); -+ -+ if (error == 36) { -+ /* Single step */ -+ if (KDB_STATE(SSBPT)) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("ssbpt\n"); -+ KDB_STATE_CLEAR(SSBPT); -+ for(i=0,bp=kdb_breakpoints; -+ i < KDB_MAXBPT; -+ i++, bp++) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("bp 0x%p enabled %d delayed %d global %d cpu %d\n", -+ bp, bp->bp_enabled, bp->bp_delayed, bp->bp_global, bp->bp_cpu); -+ if (!bp->bp_enabled) -+ continue; -+ if (!bp->bp_global && bp->bp_cpu != smp_processor_id()) -+ continue; -+ if (KDB_DEBUG(BP)) -+ kdb_printf("bp for this cpu\n"); -+ if (bp->bp_delayed) { -+ bp->bp_delayed = 0; -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_installbp\n"); -+ kdba_installbp(regs, bp); -+ if (!KDB_STATE(DOING_SS)) { -+ kdba_clearsinglestep(regs); -+ return(KDB_DB_SSBPT); -+ } -+ break; -+ } -+ } -+ if (i == KDB_MAXBPT) { -+ kdb_printf("kdb: Unable to find delayed breakpoint\n"); -+ } -+ if (!KDB_STATE(DOING_SS)) { -+ kdba_clearsinglestep(regs); -+ return(KDB_DB_NOBPT); -+ } -+ /* FALLTHROUGH */ -+ } -+ -+ /* -+ * KDB_STATE_DOING_SS is set when the kernel debugger is using -+ * the processor trap flag to single-step a processor. If a -+ * single step trap occurs and this flag is clear, the SS trap -+ * will be ignored by KDB and the kernel will be allowed to deal -+ * with it as necessary (e.g. for ptrace). -+ */ -+ if (!KDB_STATE(DOING_SS)) -+ return(KDB_DB_NOBPT); -+ -+ /* single step */ -+ rv = KDB_DB_SS; /* Indicate single step */ -+ if (KDB_STATE(DOING_SSB)) /* No ia64 ssb support yet */ -+ KDB_STATE_CLEAR(DOING_SSB); /* No ia64 ssb support yet */ -+ if (KDB_STATE(DOING_SSB)) { -+ /* No IA64 ssb support yet */ -+ } else { -+ /* -+ * Print current insn -+ */ -+ kdb_machreg_t pc = regs->cr_iip + ia64_psr(regs)->ri * 6; -+ kdb_printf("SS trap at "); -+ kdb_symbol_print(pc, NULL, KDB_SP_DEFAULT|KDB_SP_NEWLINE); -+ kdb_id1(pc); -+ KDB_STATE_CLEAR(DOING_SS); -+ } -+ -+ if (rv != KDB_DB_SSB) -+ kdba_clearsinglestep(regs); -+ } -+ -+ return(rv); -+} -+ -+/* -+ * kdba_bp_trap -+ * -+ * Perform breakpoint processing upon entry to the -+ * processor breakpoint instruction fault. Determine and print -+ * the active breakpoint. -+ * -+ * Parameters: -+ * regs Exception frame containing machine register state -+ * error Error number passed to kdb. -+ * Outputs: -+ * None. -+ * Returns: -+ * 0 Standard instruction or data breakpoint encountered -+ * 1 Single Step fault ('ss' command) -+ * 2 Single Step fault, caller should continue ('ssb' command) -+ * 3 No existing kdb breakpoint matches this debug exception -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * If multiple processors receive debug exceptions simultaneously, -+ * one may be waiting at the kdb fence in kdb() while the user -+ * issues a 'bc' command to clear the breakpoint the processor which -+ * is waiting has already encountered. If this is the case, the -+ * debug registers will no longer match any entry in the breakpoint -+ * table, and we'll return the value '3'. This can cause a panic -+ * in die_if_kernel(). It is safer to disable the breakpoint (bd), -+ * 'go' until all processors are past the breakpoint then clear the -+ * breakpoint (bc). This code recognises a breakpoint even when -+ * disabled but not when it has been cleared. -+ * -+ * WARNING: This routine resets the ip. It should be called -+ * once per breakpoint and the result cached. -+ */ -+ -+kdb_dbtrap_t -+kdba_bp_trap(struct pt_regs *regs, int error) -+{ -+ int i; -+ kdb_dbtrap_t rv; -+ kdb_bp_t *bp; -+ -+ if (KDB_NULL_REGS(regs)) -+ return KDB_DB_NOBPT; -+ -+ /* -+ * Determine which breakpoint was encountered. -+ */ -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_bp_trap: ip=0x%lx " -+ "regs=0x%p sp=0x%lx\n", -+ regs->cr_iip, regs, regs->r12); -+ -+ rv = KDB_DB_NOBPT; /* Cause kdb() to return */ -+ -+ for(i=0, bp=kdb_breakpoints; ibp_free) -+ continue; -+ if (!bp->bp_global && bp->bp_cpu != smp_processor_id()) -+ continue; -+ if (bp->bp_addr == regs->cr_iip) { -+ /* Hit this breakpoint. */ -+ kdb_printf("Instruction(i) breakpoint #%d at 0x%lx\n", -+ i, regs->cr_iip); -+ kdb_id1(regs->cr_iip); -+ rv = KDB_DB_BPT; -+ bp->bp_delay = 1; -+ /* SSBPT is set when the kernel debugger must single -+ * step a task in order to re-establish an instruction -+ * breakpoint which uses the instruction replacement -+ * mechanism. It is cleared by any action that removes -+ * the need to single-step the breakpoint. -+ */ -+ KDB_STATE_SET(SSBPT); -+ break; -+ } -+ } -+ -+ return rv; -+} -+ -+/* -+ * kdba_handle_bp -+ * -+ * Handle an instruction-breakpoint trap. Called when re-installing -+ * an enabled breakpoint which has has the bp_delay bit set. -+ * -+ * Parameters: -+ * Returns: -+ * Locking: -+ * Remarks: -+ * -+ * Ok, we really need to: -+ * 1) Restore the original instruction byte(s) -+ * 2) Single Step -+ * 3) Restore breakpoint instruction -+ * 4) Continue. -+ * -+ * -+ */ -+ -+static void -+kdba_handle_bp(struct pt_regs *regs, kdb_bp_t *bp) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return; -+ -+ if (KDB_DEBUG(BP)) -+ kdb_printf("regs->cr_iip = 0x%lx\n", regs->cr_iip); -+ -+ /* -+ * Setup single step -+ */ -+ kdba_setsinglestep(regs); -+ -+ /* -+ * Reset delay attribute -+ */ -+ bp->bp_delay = 0; -+ bp->bp_delayed = 1; -+} -+ -+ -+/* -+ * kdba_bptype -+ * -+ * Return a string describing type of breakpoint. -+ * -+ * Parameters: -+ * bph Pointer to hardware breakpoint description -+ * Outputs: -+ * None. -+ * Returns: -+ * Character string. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+char * -+kdba_bptype(kdbhard_bp_t *bph) -+{ -+ char *mode; -+ -+ mode = kdba_rwtypes[bph->bph_mode]; -+ -+ return mode; -+} -+ -+/* -+ * kdba_printbpreg -+ * -+ * Print register name assigned to breakpoint -+ * -+ * Parameters: -+ * bph Pointer hardware breakpoint structure -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+static void -+kdba_printbpreg(kdbhard_bp_t *bph) -+{ -+ kdb_printf(" in dr%ld", bph->bph_reg); -+} -+ -+/* -+ * kdba_printbp -+ * -+ * Print string describing hardware breakpoint. -+ * -+ * Parameters: -+ * bph Pointer to hardware breakpoint description -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+void -+kdba_printbp(kdb_bp_t *bp) -+{ -+ kdb_printf("\n is enabled"); -+ if (bp->bp_hardtype) { -+ /* Note that bp->bp_hard[NR_CPU] is for x86. -+ * The ia64 uses bp->bp_hard[0] only. -+ */ -+ kdba_printbpreg(bp->bp_hard[0]); -+ if (bp->bp_hard[0]->bph_mode != 0) { -+ kdb_printf(" for %d bytes", -+ bp->bp_hard[0]->bph_length+1); -+ } -+ } -+} -+ -+/* -+ * kdba_parsebp -+ * -+ * Parse architecture dependent portion of the -+ * breakpoint command. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic for failure -+ * Locking: -+ * None. -+ * Remarks: -+ * for IA64 architure, data access, data write and -+ * I/O breakpoints are supported in addition to instruction -+ * breakpoints. -+ * -+ * {datar|dataw|io|inst} [length] -+ */ -+ -+int -+kdba_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp) -+{ -+ int nextarg = *nextargp; -+ int diag; -+ kdbhard_bp_t *bph = &bp->bp_template; -+ -+ bph->bph_mode = 0; /* Default to instruction breakpoint */ -+ bph->bph_length = 0; /* Length must be zero for insn bp */ -+ if ((argc + 1) != nextarg) { -+ if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0) { -+ bph->bph_mode = 3; -+ } else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0) { -+ bph->bph_mode = 1; -+ } else if (strnicmp(argv[nextarg], "io", sizeof("io")) == 0) { -+ bph->bph_mode = 2; -+ } else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0) { -+ bph->bph_mode = 0; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ -+ if (bph->bph_mode == 0) -+ kdba_check_pc(&bp->bp_addr); -+ -+ bph->bph_length = 3; /* Default to 4 byte */ -+ -+ nextarg++; -+ -+ if ((argc + 1) != nextarg) { -+ unsigned long len; -+ -+ diag = kdbgetularg((char *)argv[nextarg], -+ &len); -+ if (diag) -+ return diag; -+ -+ -+ if ((len > 4) || (len == 3)) -+ return KDB_BADLENGTH; -+ -+ bph->bph_length = len; -+ bph->bph_length--; /* Normalize for debug register */ -+ nextarg++; -+ } -+ -+ if ((argc + 1) != nextarg) -+ return KDB_ARGCOUNT; -+ -+ /* -+ * Indicate to architecture independent level that -+ * a hardware register assignment is required to enable -+ * this breakpoint. -+ */ -+ -+ bph->bph_free = 0; -+ } else { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_bp: no args, forcehw is %d\n", bp->bp_forcehw); -+ if (bp->bp_forcehw) { -+ /* -+ * We are forced to use a hardware register for this -+ * breakpoint because either the bph or bpha -+ * commands were used to establish this breakpoint. -+ */ -+ bph->bph_free = 0; -+ } else { -+ /* -+ * Indicate to architecture dependent level that -+ * the instruction replacement breakpoint technique -+ * should be used for this breakpoint. -+ */ -+ bph->bph_free = 1; -+ bp->bp_adjust = 0; /* software, break is fault, not trap */ -+ } -+ } -+ -+ if (bph->bph_mode == 0 && kdba_verify_rw(bp->bp_addr, bph->bph_length+1)) { -+ kdb_printf("Invalid address for breakpoint, ignoring bp command\n"); -+ return KDB_BADADDR; -+ } -+ -+ *nextargp = nextarg; -+#ifndef CONFIG_KDB_HARDWARE_BREAKPOINTS -+ if (!bph->bph_free) { -+ kdb_printf("kdba_parsebp hardware breakpoints are not supported yet\n"); -+ return KDB_NOTIMP; -+ } -+#endif /* CONFIG_KDB_HARDWARE_BREAKPOINTS */ -+ return 0; -+} -+ -+/* -+ * kdba_allocbp -+ * -+ * Associate a hardware register with a breakpoint. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * A pointer to the allocated register kdbhard_bp_t structure for -+ * success, Null and a non-zero diagnostic for failure. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+static kdbhard_bp_t * -+kdba_allocbp(kdbhard_bp_t *bph, int *diagp) -+{ -+ int i; -+ kdbhard_bp_t *newbph; -+ -+ for(i=0,newbph=kdb_hardbreaks; i < KDB_MAXHARDBPT; i++, newbph++) { -+ if (newbph->bph_free) { -+ break; -+ } -+ } -+ -+ if (i == KDB_MAXHARDBPT) { -+ *diagp = KDB_TOOMANYDBREGS; -+ return NULL; -+ } -+ -+ *diagp = 0; -+ -+ /* -+ * Copy data from template. Can't just copy the entire template -+ * here because the register number in kdb_hardbreaks must be -+ * preserved. -+ */ -+ newbph->bph_data = bph->bph_data; -+ newbph->bph_write = bph->bph_write; -+ newbph->bph_mode = bph->bph_mode; -+ newbph->bph_length = bph->bph_length; -+ -+ /* -+ * Mark entry allocated. -+ */ -+ newbph->bph_free = 0; -+ -+ return newbph; -+} -+ -+/* -+ * kdba_alloc_hwbp -+ * -+ * Associate a hardware registers with a breakpoint. -+ * If hw bp is global hw registers descriptor will be allocated -+ * on every CPU. -+ * -+ * Parameters: -+ * bp - hardware bp -+ * diagp - pointer to variable that will store error when -+ * function complete -+ * Outputs: -+ * None. -+ * Returns: -+ * None -+ * Locking: -+ * None. -+ * Remarks: -+ * Should be called with correct bp->bp_template. -+ */ -+ -+void -+kdba_alloc_hwbp(kdb_bp_t *bp, int *diagp) -+{ -+ /* Note that bp->bp_hard[NR_CPU] is for x86. -+ * The ia64 uses bp->bp_hard[0] only. -+ */ -+ bp->bp_hard[0] = kdba_allocbp(&bp->bp_template, diagp); -+ bp->bp_hardtype = 1; -+} -+ -+ -+ -+/* -+ * kdba_freebp -+ * -+ * Deallocate a hardware breakpoint -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic for failure -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+static void -+kdba_freebp(kdbhard_bp_t *bph) -+{ -+ bph->bph_free = 1; -+} -+ -+/* -+ * kdba_free_hwbp -+ * -+ * Frees allocated hw registers descriptors for bp. -+ * If hw bp is global, hw registers descriptors will be freed -+ * on every CPU. -+ * -+ * Parameters: -+ * bp - hardware bp -+ * Outputs: -+ * None. -+ * Returns: -+ * None -+ * Locking: -+ * None. -+ * Remarks: -+ * Should be called with correct bp->bp_template -+ */ -+ -+void -+kdba_free_hwbp(kdb_bp_t *bp) -+{ -+ /* When kernel enters KDB, first, all local bps -+ * are removed, so here we don't need to clear -+ * debug registers. -+ */ -+ -+ kdba_freebp(bp->bp_hard[0]); -+ bp->bp_hard[0] = NULL; -+ bp->bp_hardtype = 0; -+} -+ -+ -+/* -+ * kdba_initbp -+ * -+ * Initialize the breakpoint table for the hardware breakpoint -+ * register. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic for failure -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * There is one entry per register. On the ia64 architecture -+ * all the registers are interchangeable, so no special allocation -+ * criteria are required. -+ */ -+ -+void -+kdba_initbp(void) -+{ -+ int i; -+ kdbhard_bp_t *bph; -+ -+ /* -+ * Clear the hardware breakpoint table -+ */ -+ -+ memset(kdb_hardbreaks, '\0', sizeof(kdb_hardbreaks)); -+ -+ for(i=0,bph=kdb_hardbreaks; ibph_reg = i; -+ bph->bph_free = 1; -+ } -+} -+ -+#ifdef CONFIG_KDB_HARDWARE_BREAKPOINTS -+/* -+ * Enable Instruction Debug & Data Debug faults on this CPU now. -+ */ -+static inline void kdba_enable_debug_faults(void) -+{ -+ unsigned long tmp; -+ -+ tmp = ia64_getreg(_IA64_REG_PSR); -+ ia64_stop(); -+ tmp |= IA64_PSR_DB; -+ ia64_stop(); -+ ia64_setreg(_IA64_REG_PSR_L, tmp); -+ ia64_srlz_i(); -+ if (KDB_DEBUG(BP)) -+ kdb_printf("enabling debug faults: [%d]PSR.L=%08x\n", -+ smp_processor_id(), (unsigned int)tmp); -+} -+ -+/* -+ * Disable Instruction Debug & Data Debug faults on this CPU now. -+ */ -+static inline void kdba_disable_debug_faults(void) -+{ -+ unsigned long tmp; -+ -+ tmp = ia64_getreg(_IA64_REG_PSR); -+ ia64_stop(); -+ tmp &= ~IA64_PSR_DB; -+ ia64_stop(); -+ ia64_setreg(_IA64_REG_PSR_L, tmp); -+ ia64_srlz_i(); -+ if (KDB_DEBUG(BP)) -+ kdb_printf("disabling debug faults: [%d]PSR.L=%08x\n", -+ smp_processor_id(), (unsigned int)tmp); -+} -+#endif /* CONFIG_KDB_HARDWARE_BREAKPOINTS */ -+ -+/* -+ * kdba_installbp -+ * -+ * Install a breakpoint -+ * -+ * Parameters: -+ * regs Exception frame -+ * bp Breakpoint structure for the breakpoint to be installed -+ * Outputs: -+ * None. -+ * Returns: -+ * 0 if breakpoint set, otherwise error. -+ * Locking: -+ * None. -+ * Remarks: -+ * For hardware breakpoints, a debug register is allocated -+ * and assigned to the breakpoint. If no debug register is -+ * available, a warning message is printed and the breakpoint -+ * is disabled. -+ * -+ * For instruction replacement breakpoints, we must single-step -+ * over the replaced instruction at this point so we can re-install -+ * the breakpoint instruction after the single-step. SSBPT is set -+ * when the breakpoint is initially hit and is cleared by any action -+ * that removes the need for single-step over the breakpoint. -+ */ -+ -+int -+kdba_installbp(struct pt_regs *regs, kdb_bp_t *bp) -+{ -+ /* -+ * Install the breakpoint, if it is not already installed. -+ */ -+ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_installbp bp_installed %d\n", bp->bp_installed); -+ } -+ if (!KDB_STATE(SSBPT)) -+ bp->bp_delay = 0; -+ -+ if (bp->bp_hardtype) { -+#ifdef CONFIG_KDB_HARDWARE_BREAKPOINTS -+ /* -+ * Hardware breakpoints are always local for the -+ * purposes of installation (i.e. they use per-cpu -+ * registers), so we don't need to check bp_installed -+ */ -+ kdba_installdbreg(bp); -+ if (++kdb_dbrs_used[smp_processor_id()] == 1) -+ kdba_enable_debug_faults(); -+ bp->bp_installed = 1; -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_installbp hardware reg %ld at " kdb_bfd_vma_fmt0 "\n", -+ bp->bp_hard[0]->bph_reg, bp->bp_addr); -+ } -+#endif /* CONFIG_KDB_HARDWARE_BREAKPOINTS */ -+ -+ } else if (bp->bp_delay) { -+ if (!bp->bp_installed) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_installbp delayed bp\n"); -+ kdba_handle_bp(regs, bp); -+ } -+ } else { -+ if (!bp->bp_installed) { -+ /* Software breakpoints always use slot 0 in the 128 bit -+ * bundle. The template type does not matter, slot 0 -+ * can only be M or B and the encodings for break.m and -+ * break.b are the same. -+ */ -+ unsigned long break_inst; -+ if (kdb_getarea_size(bp->bp_inst.inst, bp->bp_addr, sizeof(bp->bp_inst.inst))) { -+ kdb_printf("kdba_installbp failed to read software breakpoint at 0x%lx\n", bp->bp_addr); -+ return(1); -+ } -+ break_inst = (bp->bp_inst.inst[0] & ~INST_SLOT0_MASK) | BREAK_INSTR; -+ if (kdb_putarea_size(bp->bp_addr, &break_inst, sizeof(break_inst))) { -+ kdb_printf("kdba_installbp failed to set software breakpoint at 0x%lx\n", bp->bp_addr); -+ return(1); -+ } -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_installbp instruction 0x%lx at " kdb_bfd_vma_fmt0 "\n", -+ BREAK_INSTR, bp->bp_addr); -+ bp->bp_installed = 1; -+ flush_icache_range(bp->bp_addr, bp->bp_addr+16); -+ } -+ } -+ return(0); -+} -+ -+/* -+ * kdba_removebp -+ * -+ * Make a breakpoint ineffective. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * 0 if breakpoint removed, otherwise error. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+int -+kdba_removebp(kdb_bp_t *bp) -+{ -+ /* -+ * For hardware breakpoints, remove it from the active register, -+ * for software breakpoints, restore the instruction stream. -+ */ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_removebp bp_installed %d\n", bp->bp_installed); -+ } -+ -+ if (bp->bp_hardtype) { -+#ifdef CONFIG_KDB_HARDWARE_BREAKPOINTS -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdb: removing hardware reg %ld at " kdb_bfd_vma_fmt0 "\n", -+ bp->bp_hard[0]->bph_reg, bp->bp_addr); -+ } -+ if (--kdb_dbrs_used[smp_processor_id()] == 0) -+ kdba_disable_debug_faults(); -+ kdba_removedbreg(bp); -+#endif /* CONFIG_KDB_HARDWARE_BREAKPOINTS */ -+ } else { -+ if (bp->bp_installed) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdb: restoring instruction 0x%016lx%016lx at " kdb_bfd_vma_fmt0 "\n", -+ bp->bp_inst.inst[0], bp->bp_inst.inst[1], bp->bp_addr); -+ if (kdba_putarea_size(bp->bp_addr, bp->bp_inst.inst, sizeof(bp->bp_inst.inst))) -+ return(1); -+ } -+ bp->bp_installed = 0; -+ flush_icache_range(bp->bp_addr, bp->bp_addr+16); -+ } -+ return(0); -+} ---- /dev/null -+++ b/arch/ia64/kdb/kdba_bt.c -@@ -0,0 +1,285 @@ -+/* -+ * Kernel Debugger Architecture Dependent Stack Traceback -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * bt_print_one -+ * -+ * Print one back trace entry. -+ * -+ * Inputs: -+ * ip Current program counter. -+ * symtab Information about symbol that ip falls within. -+ * argcount Maximum number of arguments to print. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * None. -+ */ -+ -+static void -+bt_print_one(kdb_machreg_t ip, -+ const kdb_symtab_t *symtab, int argcount, -+ struct unw_frame_info *info) -+{ -+ int btsymarg = 0; /* Convert arguments to symbols */ -+ int btsp = 0; /* Print stack and backing store pointers */ -+ int nosect = 0; /* Suppress section data */ -+ int args; -+ kdb_machreg_t sp, bsp, cfm; -+ -+ kdbgetintenv("BTSYMARG", &btsymarg); -+ kdbgetintenv("BTSP", &btsp); -+ kdbgetintenv("NOSECT", &nosect); -+ -+ unw_get_sp(info, &sp); -+ unw_get_bsp(info, &bsp); -+ unw_get_cfm(info, &cfm); -+ kdb_symbol_print(ip, symtab, KDB_SP_VALUE|KDB_SP_NEWLINE); -+ args = (cfm >> 7) & 0x7f; /* sol */ -+ if (!args) -+ args = cfm & 0x7f; /* no in/local, use sof instead */ -+ if (argcount && args) { -+ int i, argc = args; -+ -+ kdb_printf(" args ("); -+ if (argc > argcount) -+ argc = argcount; -+ -+ for(i = 0; i < argc; i++){ -+ kdb_machreg_t arg; -+ char nat; -+ if (unw_access_gr(info, i+32, &arg, &nat, 0)) -+ arg = 0; -+ -+ if (i) -+ kdb_printf(", "); -+ kdb_printf("0x%lx", arg); -+ } -+ kdb_printf(")\n"); -+ if (btsymarg) { -+ kdb_symtab_t arg_symtab; -+ kdb_machreg_t arg; -+ for(i = 0; i < argc; i++){ -+ char nat; -+ if (unw_access_gr(info, i+32, &arg, &nat, 0)) -+ arg = 0; -+ if (kdbnearsym(arg, &arg_symtab)) { -+ kdb_printf(" arg %d ", i); -+ kdb_symbol_print(arg, &arg_symtab, KDB_SP_DEFAULT|KDB_SP_NEWLINE); -+ } -+ } -+ } -+ } -+ if (symtab->sym_name) { -+ if (!nosect) { -+ kdb_printf(" %s", symtab->mod_name); -+ if (symtab->sec_name) -+ kdb_printf(" %s 0x%lx", symtab->sec_name, symtab->sec_start); -+ kdb_printf(" 0x%lx", symtab->sym_start); -+ if (symtab->sym_end) -+ kdb_printf(" 0x%lx", symtab->sym_end); -+ kdb_printf("\n"); -+ } -+ if (strncmp(symtab->sym_name, "ia64_spinlock_contention", 24) == 0) { -+ kdb_machreg_t r31; -+ char nat; -+ kdb_printf(" r31 (spinlock address) "); -+ if (unw_access_gr(info, 31, &r31, &nat, 0)) -+ r31 = 0; -+ kdb_symbol_print(r31, NULL, KDB_SP_VALUE|KDB_SP_NEWLINE); -+ } -+ } -+ if (btsp) -+ kdb_printf(" sp 0x%016lx bsp 0x%016lx cfm 0x%016lx info->pfs_loc 0x%016lx 0x%016lx\n", -+ sp, bsp, cfm, (u64) info->pfs_loc, info->pfs_loc ? *(info->pfs_loc) : 0); -+} -+ -+/* -+ * kdba_bt_stack -+ * -+ * Unwind the ia64 backtrace for a specified process. -+ * -+ * Inputs: -+ * argcount -+ * p Pointer to task structure to unwind. -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * none. -+ */ -+ -+static int -+kdba_bt_stack(int argcount, const struct task_struct *p) -+{ -+ kdb_symtab_t symtab; -+ struct unw_frame_info info; -+ struct switch_stack *sw; -+ struct pt_regs *regs = NULL; -+ int count = 0; -+ int btsp = 0; /* Backtrace the kdb code as well */ -+ u64 *prev_pfs_loc = NULL; -+ extern char __attribute__ ((weak)) ia64_spinlock_contention_pre3_4[]; -+ extern char __attribute__ ((weak)) ia64_spinlock_contention_pre3_4_end[]; -+ -+ /* -+ * Upon entering kdb_main_loop, the stack frame looks like this: -+ * -+ * +---------------------+ -+ * | struct pt_regs | -+ * +---------------------+ -+ * | | -+ * | kernel stack | -+ * | | -+ * +=====================+ <--- top of stack upon entering kdb -+ * | struct pt_regs | -+ * +---------------------+ -+ * | | -+ * | kdb stack | -+ * | | -+ * +---------------------+ -+ * | struct switch_stack | -+ * +=====================+ <--- kdb_running_process[cpu].arch.sw from do_kdba_main_loop -+ * -+ * When looking at another process, we do not have the address of the -+ * current pt_regs, it is NULL. If the process has saved its state, use -+ * that pt_regs instead. -+ */ -+ -+ kdbgetintenv("BTSP", &btsp); -+ -+ if (kdb_task_has_cpu(p)) { -+ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p); -+ if (krp->seqno) { -+ sw = krp->arch.sw; -+ regs = krp->regs; -+ } -+ else -+ sw = NULL; -+ } -+ else { -+ /* Not running, assume blocked */ -+ sw = (struct switch_stack *) (p->thread.ksp + 16); -+ } -+ if (!sw) { -+ kdb_printf("Process does not have a switch_stack, cannot backtrace\n"); -+ kdb_ps1(p); -+ return 0; -+ } -+ -+ unw_init_frame_info(&info, (struct task_struct *)p, sw); -+ -+ /* If we have the address of pt_regs, suppress backtrace on the frames below -+ * pt_regs. No point in displaying kdb itself, unless the user is debugging -+ * the unwinder using set BTSP=1. -+ */ -+ if (regs && !btsp) { -+ kdb_machreg_t sp; -+ if (user_mode(regs)) { -+ kdb_printf("Process was interrupted in user mode, no backtrace available\n"); -+ return 0; -+ } -+ do { -+ unw_get_sp(&info, &sp); -+ if (sp >= (kdb_machreg_t)regs) -+ break; -+ } while (unw_unwind(&info) >= 0 && count++ < 200); -+ } -+ -+ do { -+ kdb_machreg_t ip; -+ -+ /* Avoid unsightly console message from unw_unwind() when attempting -+ * to unwind through the Interrupt Vector Table which has no unwind -+ * information. dispatch_illegal_op_fault() is an exception, it sits -+ * in the 0x3c00 slot. -+ */ -+ if (info.ip >= (u64)__start_ivt_text && info.ip < (u64)__end_ivt_text) { -+ if (info.ip < (u64)__start_ivt_text + 0x3c00 || -+ info.ip >= (u64)__start_ivt_text + 0x4000) -+ return 0; -+ } -+ -+ /* WAR for spinlock contention from leaf functions. ia64_spinlock_contention_pre3_4 -+ * has ar.pfs == r0. Leaf functions do not modify ar.pfs so ar.pfs remains -+ * as 0, stopping the backtrace. Record the previous ar.pfs when the current -+ * IP is in ia64_spinlock_contention_pre3_4 then unwind, if pfs_loc has not changed -+ * after unwind then use pt_regs.ar_pfs which is where the real ar.pfs is for -+ * leaf functions. -+ */ -+ if (prev_pfs_loc && regs && info.pfs_loc == prev_pfs_loc) -+ info.pfs_loc = ®s->ar_pfs; -+ prev_pfs_loc = (info.ip >= (u64)ia64_spinlock_contention_pre3_4 && -+ info.ip < (u64)ia64_spinlock_contention_pre3_4_end) ? -+ info.pfs_loc : NULL; -+ -+ unw_get_ip(&info, &ip); -+ if (ip == 0) -+ break; -+ -+ kdbnearsym(ip, &symtab); -+ if (!symtab.sym_name) { -+ kdb_printf("0x%0*lx - No name. May be an area that has no unwind data\n", -+ (int)(2*sizeof(ip)), ip); -+ return 0; -+ } -+ bt_print_one(ip, &symtab, argcount, &info); -+ } while (unw_unwind(&info) >= 0 && count++ < 200); -+ if (count >= 200) -+ kdb_printf("bt truncated, count limit reached\n"); -+ -+ return 0; -+} -+ -+int -+kdba_bt_address(kdb_machreg_t addr, int argcount) -+{ -+ kdb_printf("Backtrace from a stack address is not supported on ia64\n"); -+ return KDB_NOTIMP; -+} -+ -+/* -+ * kdba_bt_process -+ * -+ * Do a backtrace for a specified process. -+ * -+ * Inputs: -+ * p Struct task pointer extracted by 'bt' command. -+ * argcount -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ */ -+ -+int -+kdba_bt_process(const struct task_struct *p, int argcount) -+{ -+ return kdba_bt_stack(argcount, p); -+} ---- /dev/null -+++ b/arch/ia64/kdb/kdba_fru.c -@@ -0,0 +1,65 @@ -+/* -+ * Kernel Debugger Architecture Dependent FRU functions. -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_AUTHOR("Jesse Barnes"); -+MODULE_DESCRIPTION("Capture FRU data"); -+MODULE_LICENSE("GPL"); -+ -+/** -+ * kdba_fru - capture FRU data -+ * @argc: arg count -+ * @argv: arg values -+ * -+ * Tell the system contollers to capture FRU data -+ */ -+static int -+kdba_fru(int argc, const char **argv) -+{ -+ u64 ret; -+ -+ kdb_printf("Capturing FRU data..."); -+ ret = ia64_sn_fru_capture(); -+ kdb_printf("done.\n"); -+ return ret; -+} -+ -+/** -+ * kdba_fru_init - register 'fru' command with kdb -+ * -+ * Register the 'fru' command with kdb at load time. -+ */ -+static int __init -+kdba_fru_init(void) -+{ -+ kdb_register("fru", kdba_fru, 0, "Capture FRU data", 0); -+ -+ return 0; -+} -+ -+/** -+ * kdba_fru_exit - unregister the 'fru' command -+ * -+ * Tell kdb that the 'fru' command is no longer available. -+ */ -+static void __exit -+kdba_fru_exit(void) -+{ -+ kdb_unregister("fru"); -+} -+ -+module_init(kdba_fru_init) -+module_exit(kdba_fru_exit) ---- /dev/null -+++ b/arch/ia64/kdb/kdba_id.c -@@ -0,0 +1,529 @@ -+/* -+ * Kernel Debugger Architecture Dependent Instruction Disassembly -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define KDBA_PRINTBUF_LEN 64 /* buffer len to print a single instr */ -+#define KDBA_READBUFFER_LEN 256 /* buffer for BFD disassembler */ -+ -+#define BUNDLE_MULTIPLIER 3 /* how many instr/bundle */ -+#define BUNDLE_SIZE 16 /* how many bytes/bundle */ -+#define KDBA_DEFAULT_IDLEN 3 /* default number of bundles to disassemble */ -+ -+/* -+ * kdba_dis_getsym -+ * -+ * Get a symbol for the disassembler. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * dip Pointer to disassemble_info -+ * Returns: -+ * 0 -+ * Locking: -+ * Remarks: -+ * Not used for kdb. -+ */ -+ -+/* ARGSUSED */ -+static int -+kdba_dis_getsym(bfd_vma addr, disassemble_info *dip) -+{ -+ -+ return 0; -+} -+ -+/* -+ * kdba_printaddress -+ * -+ * Print (symbolically) an address. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * dip Pointer to disassemble_info -+ * flag True if a ":" sequence should follow the address -+ * Returns: -+ * 0 -+ * Locking: -+ * Remarks: -+ * -+ */ -+ -+/* ARGSUSED */ -+static void -+kdba_printaddress(kdb_machreg_t addr, disassemble_info *dip, int flag) -+{ -+ kdb_symtab_t symtab; -+ int spaces = 5; -+ unsigned int offset; -+ int slot; -+ -+ /* Some code prints slot number, some prints "byte" offset -+ * from start of bundle. Standardise on "byte" offset. -+ */ -+ slot = addr & 0x0f; -+ if (slot < 3) -+ slot *= 6; -+ addr = (addr & ~0x0f) + slot; -+ -+ /* -+ * Print a symbol name or address as necessary. -+ */ -+ dip->fprintf_func(dip->stream, "0x%0*lx ", (int)(2*sizeof(addr)), addr); -+ kdbnearsym(addr, &symtab); -+ if (symtab.sym_name) { -+ /* Do not use kdb_symbol_print here, it always does -+ * kdb_printf but we want dip->fprintf_func. -+ */ -+ dip->fprintf_func(dip->stream, "%s", symtab.sym_name); -+ if ((offset = addr - symtab.sym_start) == 0) { -+ spaces += 4; -+ } -+ else { -+ unsigned int o = offset; -+ while (o >>= 4) -+ --spaces; -+ dip->fprintf_func(dip->stream, "+0x%x", offset); -+ } -+ } -+ -+ if (flag) { -+ if (spaces < 1) { -+ spaces = 1; -+ } -+ dip->fprintf_func(dip->stream, ":%*s", spaces, " "); -+ } -+} -+ -+/* Calls outside the current kernel module use a PLT */ -+ -+static int addr_maybe_plt; -+ -+/* The templates below were extracted from arch/ia64/kernel/module.c. The -+ * masks were generated by this quick and dirty program: -+ */ -+ -+#if 0 /* mask program */ -+#include -+ -+#define u64 unsigned long -+ -+/* routines copied from arch/ia64/kernel/patch.c */ -+ -+static void -+ia64_patch (u64 insn_addr, u64 mask, u64 val) -+{ -+ u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); -+# define insn_mask ((1UL << 41) - 1) -+ unsigned long shift; -+ -+ b0 = b[0]; b1 = b[1]; -+ shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */ -+ if (shift >= 64) { -+ m1 = mask << (shift - 64); -+ v1 = val << (shift - 64); -+ } else { -+ m0 = mask << shift; m1 = mask >> (64 - shift); -+ v0 = val << shift; v1 = val >> (64 - shift); -+ b[0] = (b0 & ~m0) | (v0 & m0); -+ } -+ b[1] = (b1 & ~m1) | (v1 & m1); -+} -+ -+static void -+ia64_patch_imm64 (u64 insn_addr, u64 val) -+{ -+ /* The assembler may generate offset pointing to either slot 1 -+ or slot 2 for a long (2-slot) instruction, occupying slots 1 -+ and 2. */ -+ insn_addr &= -16UL; -+ ia64_patch(insn_addr + 2, -+ 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ -+ | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ -+ | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ -+ | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ -+ | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); -+ ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); -+} -+ -+static void -+ia64_patch_imm60 (u64 insn_addr, u64 val) -+{ -+ /* The assembler may generate offset pointing to either slot 1 -+ or slot 2 for a long (2-slot) instruction, occupying slots 1 -+ and 2. */ -+ insn_addr &= -16UL; -+ ia64_patch(insn_addr + 2, -+ 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ -+ | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); -+ ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18); -+} -+ -+struct plt_entry { -+ unsigned char bundle[3][16]; -+}; -+static struct plt_entry ia64_plt_mask; -+ -+int main(void) -+{ -+ int i, j; -+ printf("2 bundle\n"); -+ for (i = 0; i < 2; ++i) -+ for (j = 0; j < 16; ++j) -+ ia64_plt_mask.bundle[i][j] = 0xff; -+ ia64_patch_imm64((u64) (ia64_plt_mask.bundle + 0), 0); -+ ia64_patch_imm60((u64) (ia64_plt_mask.bundle + 1), 0); -+ for (i = 0; i < 2; ++i) { -+ for (j = 0; j < 16; ++j) { -+ printf("0x%02x", ia64_plt_mask.bundle[i][j]); -+ if (j != 15) -+ printf(", "); -+ if (j % 6 == 5 || j == 15) -+ printf("\n"); -+ } -+ } -+ printf("\n3 bundle\n"); -+ for (i = 0; i < 3; ++i) -+ for (j = 0; j < 16; ++j) -+ ia64_plt_mask.bundle[i][j] = 0xff; -+ ia64_patch_imm64((u64) (ia64_plt_mask.bundle + 0), 0); -+ ia64_patch_imm64((u64) (ia64_plt_mask.bundle + 1), 0); -+ for (i = 0; i < 3; ++i) { -+ for (j = 0; j < 16; ++j) { -+ printf("0x%02x", ia64_plt_mask.bundle[i][j]); -+ if (j != 15) -+ printf(", "); -+ if (j % 6 == 5 || j == 15) -+ printf("\n"); -+ } -+ } -+ return 0; -+} -+#endif /* mask program */ -+ -+#ifdef CONFIG_IA64_BRL_EMU -+#define PLT_BUNDLES 3 -+struct plt_entry { -+ unsigned char bundle[PLT_BUNDLES][16]; -+}; -+static const struct plt_entry ia64_plt_template = { -+ { -+ { -+ 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */ -+ 0x02, 0x00, 0x00, 0x60 -+ }, -+ { -+ 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */ -+ 0x00, 0x00, 0x00, 0x60 -+ }, -+ { -+ 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */ -+ 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */ -+ 0x60, 0x00, 0x80, 0x00 /* br.few b6 */ -+ } -+ } -+}; -+static const struct plt_entry ia64_plt_mask = { -+ { -+ { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, /* movl r16=TARGET_IP */ -+ 0x0f, 0x08, 0x00, 0xf0 -+ }, -+ { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, /* movl gp=TARGET_GP */ -+ 0x0f, 0x08, 0x00, 0xf0 -+ }, -+ { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* [MIB] nop.m 0 */ -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* mov b6=r16 */ -+ 0xff, 0xff, 0xff, 0xff /* br.few b6 */ -+ } -+}; -+ -+#else /* !CONFIG_IA64_BRL_EMU */ -+ -+#define PLT_BUNDLES 2 -+struct plt_entry { -+ unsigned char bundle[PLT_BUNDLES][16]; -+}; -+static const struct plt_entry ia64_plt_template = { -+ { -+ { -+ 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */ -+ 0x00, 0x00, 0x00, 0x60 -+ }, -+ { -+ 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many TARGET_IP */ -+ 0x08, 0x00, 0x00, 0xc0 -+ } -+ } -+}; -+static const struct plt_entry ia64_plt_mask = { -+ { -+ { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, /* movl gp=TARGET_GP */ -+ 0x0f, 0x08, 0x00, 0xf0 -+ }, -+ { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* [MLX] nop.m 0 */ -+ 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, /* brl.many TARGET_IP */ -+ 0x0f, 0x00, 0x00, 0xf7 -+ } -+ } -+}; -+#endif /* CONFIG_IA64_BRL_EMU */ -+ -+static inline u64 -+get_slot(u64 bundle, int slot) -+{ -+ switch (slot) { -+ case 0: -+ return (((u64 *)bundle)[0] >> 5) & 0x1ffffffffffLL; -+ case 1: -+ return ((((u64 *)bundle)[0] >> 46) & 0x3ffff) | -+ ((((u64 *)bundle)[1] & 0x7fffff) << 18); -+ default: -+ return (((u64 *)bundle)[1] >> 23) & 0x1ffffffffffLL; -+ } -+} -+ -+static inline u64 -+get_movl (u64 addr) -+{ -+ /* X2 format */ -+ u64 slot1 = get_slot(addr, 1), slot2 = get_slot(addr, 2); -+ u64 i, imm9d, imm5c, ic, imm7b, imm41; -+ i = (slot2 >> 36) & 0x1UL; -+ imm9d = (slot2 >> 27) & 0xfffUL; -+ imm5c = (slot2 >> 22) & 0x1fUL; -+ ic = (slot2 >> 21) & 0x1UL; -+ imm7b = (slot2 >> 13) & 0x7fUL; -+ imm41 = slot1; -+ return (i << 63) | (imm41 << 22) | (ic << 21) | (imm5c << 16) | -+ (imm9d << 7) | imm7b; -+} -+ -+static inline u64 -+get_brl (u64 addr) -+{ -+ /* X3 format */ -+ u64 slot1 = get_slot(addr, 1), slot2 = get_slot(addr, 2); -+ u64 i, imm20b, imm39; -+ i = (slot2 >> 36) & 0x1UL; -+ imm20b = (slot2 >> 13) & 0xfffffUL; -+ imm39 = slot1 >> 2; -+ return ((i << 59) | (imm39 << 20) | imm20b) << 4; -+} -+ -+static bfd_vma -+is_plt(bfd_vma addr) { -+ int i, j; -+ u64 target; -+ struct plt_entry plt; -+ if (kdb_getarea_size(&plt, addr, sizeof(plt))) -+ return 0; -+ for (i = 0; i < PLT_BUNDLES; ++i) { -+ for (j = 0; j < 16; ++j) { -+ if ((plt.bundle[i][j] & ia64_plt_mask.bundle[i][j]) != -+ ia64_plt_template.bundle[i][j]) -+ return 0; -+ } -+ } -+ if (PLT_BUNDLES == 2) { -+ /* brl is IP relative, in second bundle */ -+ target = get_brl(addr + 16) + addr + 16; -+ } else { -+ /* movl is absolute, in first bundle */ -+ target = get_movl(addr); -+ } -+ return target; -+} -+ -+/* -+ * kdba_dis_printaddr -+ * -+ * Print (symbolically) an address. Called by GNU disassembly -+ * code via disassemble_info structure. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * dip Pointer to disassemble_info -+ * Returns: -+ * 0 -+ * Locking: -+ * Remarks: -+ * This function will never append ":" to the printed -+ * symbolic address. If the address may be a PLT entry then try to decode -+ * the PLT information. -+ */ -+ -+static void -+kdba_dis_printaddr(bfd_vma addr, disassemble_info *dip) -+{ -+ bfd_vma target; -+ kdba_printaddress(addr, dip, 0); -+ if (!addr_maybe_plt) -+ return; -+ if (!(target = is_plt(addr))) -+ return; -+ kdb_printf(" PLT --> "); -+ kdba_printaddress(target, dip, 0); -+} -+ -+/* -+ * kdba_dis_getmem -+ * -+ * Fetch 'length' bytes from 'addr' into 'buf'. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * buf Address of buffer to fill with bytes from 'addr' -+ * length Number of bytes to fetch -+ * dip Pointer to disassemble_info -+ * Returns: -+ * 0 -+ * Locking: -+ * Remarks: -+ * -+ */ -+ -+/* ARGSUSED */ -+static int -+kdba_dis_getmem(bfd_vma addr, bfd_byte *buf, unsigned int length, disassemble_info *dip) -+{ -+ return kdb_getarea_size(buf, addr, length); -+} -+ -+/* -+ * kdba_id_parsemode -+ * -+ * Parse IDMODE environment variable string and -+ * set appropriate value into "disassemble_info" structure. -+ * -+ * Parameters: -+ * mode Mode string -+ * dip Disassemble_info structure pointer -+ * Returns: -+ * Locking: -+ * Remarks: -+ * No mode supported yet. -+ */ -+ -+int -+kdba_id_parsemode(const char *mode, disassemble_info *dip) -+{ -+ if (mode && strcmp(mode, "ia64")) -+ return KDB_BADMODE; -+ return 0; -+} -+ -+/* -+ * kdba_check_pc -+ * -+ * Check that the pc is satisfactory. -+ * -+ * Parameters: -+ * pc Program Counter Value. -+ * Returns: -+ * None -+ * Locking: -+ * None. -+ * Remarks: -+ * Can change pc. -+ */ -+ -+void -+kdba_check_pc(kdb_machreg_t *pc) -+{ -+ (*pc) &= ~0xf; /* pc must be 16 byte aligned */ -+} -+ -+/* -+ * kdba_id_printinsn -+ * -+ * Format and print a single bundle at 'pc'. Return the -+ * length of the bundle. -+ * -+ * Parameters: -+ * pc Program Counter Value. -+ * dip Disassemble_info structure pointer -+ * Returns: -+ * Length of instruction, -1 for error. -+ * Locking: -+ * None. -+ * Remarks: -+ * None. -+ */ -+ -+int -+kdba_id_printinsn(kdb_machreg_t pc, disassemble_info *dip) -+{ -+ int ret; -+ int byte = 0; -+ -+ kdba_check_pc(&pc); -+ while (byte < 16) { -+ kdba_dis_printaddr(pc+byte, dip); -+ addr_maybe_plt = 1; -+ ret = print_insn_ia64((kdb_machreg_t)(pc+byte), dip); -+ addr_maybe_plt = 0; -+ dip->fprintf_func(dip->stream, "\n"); -+ if (ret < 0) -+ break; -+ byte += ret; -+ } -+ return(byte); -+} -+ -+/* -+ * kdba_id_init -+ * -+ * Initialize the architecture dependent elements of -+ * the disassembly information structure -+ * for the GNU disassembler. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+void -+kdba_id_init(disassemble_info *dip) -+{ -+ dip->read_memory_func = kdba_dis_getmem; -+ dip->print_address_func = kdba_dis_printaddr; -+ dip->symbol_at_address_func = kdba_dis_getsym; -+ -+ dip->flavour = bfd_target_elf_flavour; -+ dip->arch = bfd_arch_ia64; -+ dip->endian = BFD_ENDIAN_LITTLE; -+ -+ dip->display_endian = BFD_ENDIAN_LITTLE; -+} ---- /dev/null -+++ b/arch/ia64/kdb/kdba_io.c -@@ -0,0 +1,661 @@ -+/* -+ * Kernel Debugger Architecture Dependent Console I/O handler -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_SGI_L1_CONSOLE) -+#define HAVE_KDBA_SERIAL_CONSOLE -+#endif -+ -+/* from include/linux/pc_keyb.h on 2.4 */ -+#define KBD_STATUS_REG 0x64 /* Status register (R) */ -+#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */ -+#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */ -+#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */ -+#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */ -+#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */ -+ -+#ifdef CONFIG_VT_CONSOLE -+#define KDB_BLINK_LED 1 -+#else -+#undef KDB_BLINK_LED -+#endif -+ -+#ifdef CONFIG_KDB_USB -+ -+/* support up to 8 USB keyboards (probably excessive, but...) */ -+#define KDB_USB_NUM_KEYBOARDS 8 -+struct kdb_usb_kbd_info kdb_usb_kbds[KDB_USB_NUM_KEYBOARDS]; -+ -+extern int kdb_no_usb; -+ -+static unsigned char kdb_usb_keycode[256] = { -+ 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, -+ 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, -+ 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, -+ 27, 43, 84, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, -+ 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, -+ 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, -+ 72, 73, 82, 83, 86,127,116,117, 85, 89, 90, 91, 92, 93, 94, 95, -+ 120,121,122,123,134,138,130,132,128,129,131,137,133,135,136,113, -+ 115,114, 0, 0, 0,124, 0,181,182,183,184,185,186,187,188,189, -+ 190,191,192,193,194,195,196,197,198, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, -+ 150,158,159,128,136,177,178,176,142,152,173,140 -+}; -+ -+/* -+ * kdb_usb_keyboard_attach() -+ * Attach a USB keyboard to kdb. -+ */ -+int -+kdb_usb_keyboard_attach(struct urb *urb, unsigned char *buffer, void *poll_func) -+{ -+ int i; -+ int rc = -1; -+ -+ if (kdb_no_usb) -+ return 0; -+ -+ /* -+ * Search through the array of KDB USB keyboards (kdb_usb_kbds) -+ * looking for a free index. If found, assign the keyboard to -+ * the array index. -+ */ -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ if (kdb_usb_kbds[i].urb) /* index is already assigned */ -+ continue; -+ -+ /* found a free array index */ -+ kdb_usb_kbds[i].urb = urb; -+ kdb_usb_kbds[i].buffer = buffer; -+ kdb_usb_kbds[i].poll_func = poll_func; -+ -+ rc = 0; /* success */ -+ -+ break; -+ } -+ -+ return rc; -+} -+EXPORT_SYMBOL_GPL (kdb_usb_keyboard_attach); -+ -+/* -+ * kdb_usb_keyboard_detach() -+ * Detach a USB keyboard from kdb. -+ */ -+int -+kdb_usb_keyboard_detach(struct urb *urb) -+{ -+ int i; -+ int rc = -1; -+ -+ if (kdb_no_usb) -+ return 0; -+ -+ /* -+ * Search through the array of KDB USB keyboards (kdb_usb_kbds) -+ * looking for the index with the matching URB. If found, -+ * clear the array index. -+ */ -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ if (kdb_usb_kbds[i].urb != urb) -+ continue; -+ -+ /* found it, clear the index */ -+ kdb_usb_kbds[i].urb = NULL; -+ kdb_usb_kbds[i].buffer = NULL; -+ kdb_usb_kbds[i].poll_func = NULL; -+ kdb_usb_kbds[i].caps_lock = 0; -+ -+ rc = 0; /* success */ -+ -+ break; -+ } -+ -+ return rc; -+} -+EXPORT_SYMBOL_GPL (kdb_usb_keyboard_detach); -+ -+/* -+ * get_usb_char -+ * This function drives the USB attached keyboards. -+ * Fetch the USB scancode and decode it. -+ */ -+static int -+get_usb_char(void) -+{ -+ int i; -+ int ret; -+ unsigned char keycode, spec; -+ extern u_short plain_map[], shift_map[], ctrl_map[]; -+ -+ if (kdb_no_usb) -+ return -1; -+ -+ /* -+ * Loop through all the USB keyboard(s) and return -+ * the first character obtained from them. -+ */ -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ /* skip uninitialized keyboard array entries */ -+ if (!kdb_usb_kbds[i].urb || !kdb_usb_kbds[i].buffer || -+ !kdb_usb_kbds[i].poll_func) -+ continue; -+ -+ /* Transfer char */ -+ ret = (*kdb_usb_kbds[i].poll_func)(kdb_usb_kbds[i].urb); -+ -+ if (ret == -EBUSY && kdb_usb_kbds[i].poll_ret != -EBUSY) -+ kdb_printf("NOTICE: USB HC driver BUSY. " -+ "USB keyboard has been disabled.\n"); -+ -+ kdb_usb_kbds[i].poll_ret = ret; -+ -+ if (ret < 0) /* error or no characters, try the next kbd */ -+ continue; -+ -+ spec = kdb_usb_kbds[i].buffer[0]; -+ keycode = kdb_usb_kbds[i].buffer[2]; -+ kdb_usb_kbds[i].buffer[0] = (char)0; -+ kdb_usb_kbds[i].buffer[2] = (char)0; -+ -+ if(kdb_usb_kbds[i].buffer[3]) { -+ kdb_usb_kbds[i].buffer[3] = (char)0; -+ continue; -+ } -+ -+ /* A normal key is pressed, decode it */ -+ if(keycode) -+ keycode = kdb_usb_keycode[keycode]; -+ -+ /* 2 Keys pressed at one time ? */ -+ if (spec && keycode) { -+ switch(spec) -+ { -+ case 0x2: -+ case 0x20: /* Shift */ -+ return shift_map[keycode]; -+ case 0x1: -+ case 0x10: /* Ctrl */ -+ return ctrl_map[keycode]; -+ case 0x4: -+ case 0x40: /* Alt */ -+ break; -+ } -+ } else if (keycode) { /* If only one key pressed */ -+ switch(keycode) -+ { -+ case 0x1C: /* Enter */ -+ return 13; -+ -+ case 0x3A: /* Capslock */ -+ kdb_usb_kbds[i].caps_lock = !(kdb_usb_kbds[i].caps_lock); -+ break; -+ case 0x0E: /* Backspace */ -+ return 8; -+ case 0x0F: /* TAB */ -+ return 9; -+ case 0x77: /* Pause */ -+ break ; -+ default: -+ if(!kdb_usb_kbds[i].caps_lock) { -+ return plain_map[keycode]; -+ } -+ else { -+ return shift_map[keycode]; -+ } -+ } -+ } -+ } -+ -+ /* no chars were returned from any of the USB keyboards */ -+ -+ return -1; -+} -+#endif /* CONFIG_KDB_USB */ -+ -+/* -+ * This module contains code to read characters from the keyboard or a serial -+ * port. -+ * -+ * It is used by the kernel debugger, and is polled, not interrupt driven. -+ * -+ */ -+ -+#ifdef KDB_BLINK_LED -+/* -+ * send: Send a byte to the keyboard controller. Used primarily to -+ * alter LED settings. -+ */ -+ -+static void -+kdb_kbdsend(unsigned char byte) -+{ -+ int timeout; -+ for (timeout = 200 * 1000; timeout && (inb(KBD_STATUS_REG) & KBD_STAT_IBF); timeout--); -+ outb(byte, KBD_DATA_REG); -+ udelay(40); -+ for (timeout = 200 * 1000; timeout && (~inb(KBD_STATUS_REG) & KBD_STAT_OBF); timeout--); -+ inb(KBD_DATA_REG); -+ udelay(40); -+} -+ -+static void -+kdb_toggleled(int led) -+{ -+ static int leds; -+ -+ leds ^= led; -+ -+ kdb_kbdsend(KBD_CMD_SET_LEDS); -+ kdb_kbdsend((unsigned char)leds); -+} -+#endif /* KDB_BLINK_LED */ -+ -+#ifdef HAVE_KDBA_SERIAL_CONSOLE -+ -+struct kdb_serial kdb_serial; -+enum kdba_serial_console kdba_serial_console; -+static int get_serial_char(void); -+ -+/* There must be a serial_inp_xxx() and get_serial_char_xxx() for each type -+ * of console. See enum kdba_serial_console in include/asm-$(ARCH)/kdbprivate.h. -+ */ -+ -+#ifdef CONFIG_SERIAL_8250_CONSOLE -+ -+static unsigned int -+serial_inp_standard(const struct kdb_serial *kdb_serial, int offset) -+{ -+ offset <<= kdb_serial->ioreg_shift; -+ -+ switch (kdb_serial->io_type) { -+ case SERIAL_IO_MEM: -+ return readb((void __iomem *)(kdb_serial->iobase + offset)); -+ break; -+ default: -+ return inb(kdb_serial->iobase + offset); -+ break; -+ } -+} -+ -+/* Check if there is a byte ready at the serial port */ -+static int -+get_serial_char_standard(void) -+{ -+ unsigned char ch; -+ static unsigned long fifon; -+ if (fifon == 0) { -+ /* try to set the FIFO */ -+ fifon = kdb_serial.iobase + -+ (UART_FCR << kdb_serial.ioreg_shift); -+ switch (kdb_serial.io_type) { -+ case SERIAL_IO_MEM: -+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | -+ UART_FCR_CLEAR_XMIT), (void __iomem *)fifon); -+ break; -+ default: -+ outb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | -+ UART_FCR_CLEAR_XMIT), fifon); -+ break; -+ } -+ } -+ -+ if (kdb_serial.iobase == 0) -+ return -1; -+ -+ if (serial_inp_standard(&kdb_serial, UART_LSR) & UART_LSR_DR) { -+ ch = serial_inp_standard(&kdb_serial, UART_RX); -+ if (ch == 0x7f) -+ ch = 8; -+ return ch; -+ } -+ return -1; -+} -+ -+#else /* !CONFIG_SERIAL_8250_CONSOLE */ -+ -+#define get_serial_char_standard() -1 -+ -+#endif /* CONFIG_SERIAL_8250_CONSOLE */ -+ -+#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE -+ -+extern u64 master_node_bedrock_address; -+ -+/* UART registers on the Bedrock start at 0x80 */ -+ -+extern int l1_serial_in_polled(void); -+extern int l1_control_in_polled(int); -+ -+/* Read a byte from the L1 port. kdb_serial is ignored */ -+static unsigned int -+serial_inp_sgi_l1(const struct kdb_serial *kdb_serial, int offset) -+{ -+ if (offset & 0x80) { -+ int counter = 10000; -+ unsigned int value; -+ while ( counter-- ) { -+ value = l1_serial_in_polled(); -+ /* Gobble up the 0's */ -+ if ( value ) -+ return(value); -+ } -+ return(0); -+ } -+ else { -+ return l1_control_in_polled(offset); -+ } -+} -+ -+/* Check if there is a byte ready at the L1 port. */ -+static int -+get_serial_char_sgi_l1(void) -+{ -+ unsigned char ch; -+ int status; -+ -+ if ((status = serial_inp_sgi_l1(&kdb_serial, UART_LSR)) & UART_LSR_DR) { -+ ch = serial_inp_sgi_l1(&kdb_serial, UART_RX | 0x80); /* bedrock offset */ -+ if (ch == 0x7f) -+ ch = 8; -+ return ch; -+ } -+ return -1; -+} -+ -+#else /* !CONFIG_SERIAL_SGI_L1_CONSOLE */ -+ -+#define get_serial_char_sgi_l1() -1 -+ -+#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE */ -+ -+/* Select the serial console input at run time, to handle generic kernels */ -+ -+static int -+get_serial_char(void) -+{ -+ switch (kdba_serial_console) { -+ case KDBA_SC_NONE: -+ return -1; -+ case KDBA_SC_STANDARD: -+ return get_serial_char_standard(); -+ case KDBA_SC_SGI_L1: -+ return get_serial_char_sgi_l1(); -+ } -+ /* gcc is not smart enough to realize that all paths return before here :( */ -+ return -1; -+} -+ -+#endif /* HAVE_KDBA_SERIAL_CONSOLE */ -+ -+#ifdef CONFIG_VT_CONSOLE -+ -+static int kbd_exists; -+ -+/* -+ * Check if the keyboard controller has a keypress for us. -+ * Some parts (Enter Release, LED change) are still blocking polled here, -+ * but hopefully they are all short. -+ */ -+static int get_kbd_char(void) -+{ -+ int scancode, scanstatus; -+ static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */ -+ static int shift_key; /* Shift next keypress */ -+ static int ctrl_key; -+ u_short keychar; -+ extern u_short plain_map[], shift_map[], ctrl_map[]; -+ -+ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) || -+ (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) { -+ kbd_exists = 0; -+ return -1; -+ } -+ kbd_exists = 1; -+ -+ if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) -+ return -1; -+ -+ /* -+ * Fetch the scancode -+ */ -+ scancode = inb(KBD_DATA_REG); -+ scanstatus = inb(KBD_STATUS_REG); -+ -+ /* -+ * Ignore mouse events. -+ */ -+ if (scanstatus & KBD_STAT_MOUSE_OBF) -+ return -1; -+ -+ /* -+ * Ignore release, trigger on make -+ * (except for shift keys, where we want to -+ * keep the shift state so long as the key is -+ * held down). -+ */ -+ -+ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) { -+ /* -+ * Next key may use shift table -+ */ -+ if ((scancode & 0x80) == 0) { -+ shift_key=1; -+ } else { -+ shift_key=0; -+ } -+ return -1; -+ } -+ -+ if ((scancode&0x7f) == 0x1d) { -+ /* -+ * Left ctrl key -+ */ -+ if ((scancode & 0x80) == 0) { -+ ctrl_key = 1; -+ } else { -+ ctrl_key = 0; -+ } -+ return -1; -+ } -+ -+ if ((scancode & 0x80) != 0) -+ return -1; -+ -+ scancode &= 0x7f; -+ -+ /* -+ * Translate scancode -+ */ -+ -+ if (scancode == 0x3a) { -+ /* -+ * Toggle caps lock -+ */ -+ shift_lock ^= 1; -+ -+#ifdef KDB_BLINK_LED -+ kdb_toggleled(0x4); -+#endif -+ return -1; -+ } -+ -+ if (scancode == 0x0e) { -+ /* -+ * Backspace -+ */ -+ return 8; -+ } -+ -+ /* Special Key */ -+ switch (scancode) { -+ case 0xF: /* Tab */ -+ return 9; -+ case 0x53: /* Del */ -+ return 4; -+ case 0x47: /* Home */ -+ return 1; -+ case 0x4F: /* End */ -+ return 5; -+ case 0x4B: /* Left */ -+ return 2; -+ case 0x48: /* Up */ -+ return 16; -+ case 0x50: /* Down */ -+ return 14; -+ case 0x4D: /* Right */ -+ return 6; -+ } -+ -+ if (scancode == 0xe0) { -+ return -1; -+ } -+ -+ /* -+ * For Japanese 86/106 keyboards -+ * See comment in drivers/char/pc_keyb.c. -+ * - Masahiro Adegawa -+ */ -+ if (scancode == 0x73) { -+ scancode = 0x59; -+ } else if (scancode == 0x7d) { -+ scancode = 0x7c; -+ } -+ -+ if (!shift_lock && !shift_key && !ctrl_key) { -+ keychar = plain_map[scancode]; -+ } else if (shift_lock || shift_key) { -+ keychar = shift_map[scancode]; -+ } else if (ctrl_key) { -+ keychar = ctrl_map[scancode]; -+ } else { -+ keychar = 0x0020; -+ kdb_printf("Unknown state/scancode (%d)\n", scancode); -+ } -+ keychar &= 0x0fff; -+ switch (KTYP(keychar)) { -+ case KT_LETTER: -+ case KT_LATIN: -+ if (isprint(keychar)) -+ break; /* printable characters */ -+ /* drop through */ -+ case KT_SPEC: -+ if (keychar == K_ENTER) -+ break; -+ /* drop through */ -+ default: -+ return(-1); /* ignore unprintables */ -+ } -+ -+ if ((scancode & 0x7f) == 0x1c) { -+ /* -+ * enter key. All done. Absorb the release scancode. -+ */ -+ while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) -+ ; -+ -+ /* -+ * Fetch the scancode -+ */ -+ scancode = inb(KBD_DATA_REG); -+ scanstatus = inb(KBD_STATUS_REG); -+ -+ while (scanstatus & KBD_STAT_MOUSE_OBF) { -+ scancode = inb(KBD_DATA_REG); -+ scanstatus = inb(KBD_STATUS_REG); -+ } -+ -+ if (scancode != 0x9c) { -+ /* -+ * Wasn't an enter-release, why not? -+ */ -+ kdb_printf("kdb: expected enter got 0x%x status 0x%x\n", -+ scancode, scanstatus); -+ } -+ -+ kdb_printf("\n"); -+ return 13; -+ } -+ -+ return keychar & 0xff; -+} -+#endif /* CONFIG_VT_CONSOLE */ -+ -+#ifdef KDB_BLINK_LED -+ -+/* Leave numlock alone, setting it messes up laptop keyboards with the keypad -+ * mapped over normal keys. -+ */ -+static int kdba_blink_mask = 0x1 | 0x4; -+ -+#ifdef CONFIG_SMP -+#define BOGOMIPS (local_cpu_data->loops_per_jiffy/(500000/HZ)) -+#else -+#define BOGOMIPS (loops_per_jiffy/(500000/HZ)) -+#endif -+static int blink_led(void) -+{ -+ static long delay; -+ -+ if (kbd_exists == 0) -+ return -1; -+ -+ if (--delay < 0) { -+ if (BOGOMIPS == 0) /* early kdb */ -+ delay = 150000000/1000; /* arbitrary bogomips */ -+ else -+ delay = 150000000/BOGOMIPS; /* Roughly 1 second when polling */ -+ kdb_toggleled(kdba_blink_mask); -+ } -+ return -1; -+} -+#endif -+ -+get_char_func poll_funcs[] = { -+#if defined(CONFIG_VT_CONSOLE) -+ get_kbd_char, -+#endif -+#ifdef HAVE_KDBA_SERIAL_CONSOLE -+ get_serial_char, -+#endif -+#ifdef KDB_BLINK_LED -+ blink_led, -+#endif -+#ifdef CONFIG_KDB_USB -+ get_usb_char, -+#endif -+ NULL -+}; -+ -+/* Dummy versions of kdba_local_arch_setup, kdba_local_arch_cleanup. -+ * FIXME: ia64 with legacy keyboard might need the same code as i386. -+ */ -+ -+void kdba_local_arch_setup(void) {} -+void kdba_local_arch_cleanup(void) {} ---- /dev/null -+++ b/arch/ia64/kdb/kdba_jmp.S -@@ -0,0 +1,394 @@ -+/* -+ * Kernel Debugger Architecture Dependent Longjump Support. -+ */ -+ -+/* setjmp() and longjmp() assembler support for kdb on ia64. -+ -+ This code was copied from glibc CVS as of 2001-06-27 and modified where -+ necessary to fit the kernel. No glibc lines were changed or deleted, all -+ adjustments are wrapped in #ifdef __KERNEL__, except for the added -+ .mem.offset lines, they work in or out of the kenrel. The original code is -+ in sysdeps/unix/sysv/linux/ia64/{setjmp.S,__longjmp.S}. -+ -+ glibc has setjmp (save signals) and _setjmp (do not save signals). Kernel -+ code does not have signals, only kdba_setjmp_asm() is used. -+ -+ Keith Owens 2001-06-27 -+ */ -+ -+/* Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. -+ Contributed by David Mosberger-Tang . -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Library General Public License as -+ published by the Free Software Foundation; either version 2 of the -+ License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Library General Public License for more details. -+ -+ You should have received a copy of the GNU Library General Public -+ License along with the GNU C Library; see the file COPYING.LIB. If -+ not, write to the Free Software Foundation, Inc., -+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -+ -+ The layout of the jmp_buf is as follows. This is subject to change -+ and user-code should never depend on the particular layout of -+ jmp_buf! -+ -+ -+ offset: description: -+ ------- ------------ -+ 0x000 stack pointer (r12) ; unchangeable (see _JMPBUF_UNWINDS) -+ 0x008 r1 (gp) -+ 0x010 caller's unat -+ 0x018 fpsr -+ 0x020 r4 -+ 0x028 r5 -+ 0x030 r6 -+ 0x038 r7 -+ 0x040 rp (b0) -+ 0x048 b1 -+ 0x050 b2 -+ 0x058 b3 -+ 0x060 b4 -+ 0x068 b5 -+ 0x070 ar.pfs -+ 0x078 ar.lc -+ 0x080 pr -+ 0x088 ar.bsp ; unchangeable (see __longjmp.S) -+ 0x090 ar.unat -+ 0x098 &__jmp_buf ; address of the jmpbuf (needed to locate NaT bits in unat) -+ 0x0a0 f2 -+ 0x0b0 f3 -+ 0x0c0 f4 -+ 0x0d0 f5 -+ 0x0e0 f16 -+ 0x0f0 f17 -+ 0x100 f18 -+ 0x110 f19 -+ 0x120 f20 -+ 0x130 f21 -+ 0x130 f22 -+ 0x140 f23 -+ 0x150 f24 -+ 0x160 f25 -+ 0x170 f26 -+ 0x180 f27 -+ 0x190 f28 -+ 0x1a0 f29 -+ 0x1b0 f30 -+ 0x1c0 f31 */ -+ -+#ifndef __KERNEL__ -+ -+#include -+#include -+ -+ /* The following two entry points are the traditional entry points: */ -+ -+LEAF(setjmp) -+ alloc r8=ar.pfs,2,0,0,0 -+ mov in1=1 -+ br.cond.sptk.many __sigsetjmp -+END(setjmp) -+ -+LEAF(_setjmp) -+ alloc r8=ar.pfs,2,0,0,0 -+ mov in1=0 -+ br.cond.sptk.many __sigsetjmp -+END(_setjmp) -+ -+ /* __sigsetjmp(__jmp_buf buf, int savemask) */ -+ -+ENTRY(__sigsetjmp) -+ -+#else /* __KERNEL __ */ -+#include -+#define ret br.ret.sptk.few rp -+GLOBAL_ENTRY(kdba_setjmp) -+#endif /* !__KERNEL__ */ -+ -+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) -+ alloc loc1=ar.pfs,2,2,2,0 -+ mov r16=ar.unat -+ ;; -+ mov r17=ar.fpsr -+ mov r2=in0 -+ add r3=8,in0 -+ ;; -+.mem.offset 0,0; -+ st8.spill.nta [r2]=sp,16 // r12 (sp) -+.mem.offset 8,0; -+ st8.spill.nta [r3]=gp,16 // r1 (gp) -+ ;; -+ st8.nta [r2]=r16,16 // save caller's unat -+ st8.nta [r3]=r17,16 // save fpsr -+ add r8=0xa0,in0 -+ ;; -+.mem.offset 160,0; -+ st8.spill.nta [r2]=r4,16 // r4 -+.mem.offset 168,0; -+ st8.spill.nta [r3]=r5,16 // r5 -+ add r9=0xb0,in0 -+ ;; -+ stf.spill.nta [r8]=f2,32 -+ stf.spill.nta [r9]=f3,32 -+ mov loc0=rp -+ .body -+ ;; -+ stf.spill.nta [r8]=f4,32 -+ stf.spill.nta [r9]=f5,32 -+ mov r17=b1 -+ ;; -+ stf.spill.nta [r8]=f16,32 -+ stf.spill.nta [r9]=f17,32 -+ mov r18=b2 -+ ;; -+ stf.spill.nta [r8]=f18,32 -+ stf.spill.nta [r9]=f19,32 -+ mov r19=b3 -+ ;; -+ stf.spill.nta [r8]=f20,32 -+ stf.spill.nta [r9]=f21,32 -+ mov r20=b4 -+ ;; -+ stf.spill.nta [r8]=f22,32 -+ stf.spill.nta [r9]=f23,32 -+ mov r21=b5 -+ ;; -+ stf.spill.nta [r8]=f24,32 -+ stf.spill.nta [r9]=f25,32 -+ mov r22=ar.lc -+ ;; -+ stf.spill.nta [r8]=f26,32 -+ stf.spill.nta [r9]=f27,32 -+ mov r24=pr -+ ;; -+ stf.spill.nta [r8]=f28,32 -+ stf.spill.nta [r9]=f29,32 -+ ;; -+ stf.spill.nta [r8]=f30 -+ stf.spill.nta [r9]=f31 -+ -+.mem.offset 0,0; -+ st8.spill.nta [r2]=r6,16 // r6 -+.mem.offset 8,0; -+ st8.spill.nta [r3]=r7,16 // r7 -+ ;; -+ mov r23=ar.bsp -+ mov r25=ar.unat -+#ifndef __KERNEL__ -+ mov out0=in0 -+#endif /* !__KERNEL__ */ -+ -+ st8.nta [r2]=loc0,16 // b0 -+ st8.nta [r3]=r17,16 // b1 -+#ifndef __KERNEL__ -+ mov out1=in1 -+#endif /* !__KERNEL__ */ -+ ;; -+ st8.nta [r2]=r18,16 // b2 -+ st8.nta [r3]=r19,16 // b3 -+ ;; -+ st8.nta [r2]=r20,16 // b4 -+ st8.nta [r3]=r21,16 // b5 -+ ;; -+ st8.nta [r2]=loc1,16 // ar.pfs -+ st8.nta [r3]=r22,16 // ar.lc -+ ;; -+ st8.nta [r2]=r24,16 // pr -+ st8.nta [r3]=r23,16 // ar.bsp -+ ;; -+ st8.nta [r2]=r25 // ar.unat -+ st8.nta [r3]=in0 // &__jmp_buf -+#ifndef __KERNEL__ -+ br.call.dpnt.few rp=__sigjmp_save -+.ret0: // force a new bundle ::q -+#endif /* !_KERNEL__ */ -+ mov r8=0 -+ mov rp=loc0 -+ mov ar.pfs=loc1 -+ ret -+#ifndef __KERNEL__ -+END(__sigsetjmp) -+ -+weak_extern(_setjmp) -+weak_extern(setjmp) -+ -+#else /* __KERNEL__ */ -+END(kdba_setjmp) -+#endif /* !_KERNEL__ */ -+ -+/* Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. -+ Contributed by David Mosberger-Tang . -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Library General Public License as -+ published by the Free Software Foundation; either version 2 of the -+ License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Library General Public License for more details. -+ -+ You should have received a copy of the GNU Library General Public -+ License along with the GNU C Library; see the file COPYING.LIB. If -+ not, write to the Free Software Foundation, Inc., -+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -+ -+ Note that __sigsetjmp() did NOT flush the register stack. Instead, -+ we do it here since __longjmp() is usually much less frequently -+ invoked than __sigsetjmp(). The only difficulty is that __sigsetjmp() -+ didn't (and wouldn't be able to) save ar.rnat either. This is a problem -+ because if we're not careful, we could end up loading random NaT bits. -+ There are two cases: -+ -+ (i) ar.bsp < ia64_rse_rnat_addr(jmpbuf.ar_bsp) -+ ar.rnat contains the desired bits---preserve ar.rnat -+ across loadrs and write to ar.bspstore -+ -+ (ii) ar.bsp >= ia64_rse_rnat_addr(jmpbuf.ar_bsp) -+ The desired ar.rnat is stored in -+ ia64_rse_rnat_addr(jmpbuf.ar_bsp). Load those -+ bits into ar.rnat after setting ar.bspstore. */ -+ -+#ifndef __KERNEL__ -+#include -+#include -+#endif /* !__KERNEL__ */ -+ -+# define pPos p6 /* is rotate count positive? */ -+# define pNeg p7 /* is rotate count negative? */ -+ -+ -+ /* __longjmp(__jmp_buf buf, int val) */ -+ -+#ifndef __KERNEL__ -+LEAF(__longjmp) -+#else /* __KERNEL__ */ -+GLOBAL_ENTRY(kdba_longjmp) -+#endif /* !__KERNEL__ */ -+ alloc r8=ar.pfs,2,1,0,0 -+ mov r27=ar.rsc -+ add r2=0x98,in0 // r2 <- &jmpbuf.orig_jmp_buf_addr -+ ;; -+ ld8 r8=[r2],-16 // r8 <- orig_jmp_buf_addr -+ mov r10=ar.bsp -+ and r11=~0x3,r27 // clear ar.rsc.mode -+ ;; -+ flushrs // flush dirty regs to backing store (must be first in insn grp) -+ ld8 r23=[r2],8 // r23 <- jmpbuf.ar_bsp -+ sub r8=r8,in0 // r8 <- &orig_jmpbuf - &jmpbuf -+ ;; -+ ld8 r25=[r2] // r25 <- jmpbuf.ar_unat -+ extr.u r8=r8,3,6 // r8 <- (&orig_jmpbuf - &jmpbuf)/8 & 0x3f -+ ;; -+ cmp.lt pNeg,pPos=r8,r0 -+ mov r2=in0 -+ ;; -+(pPos) mov r16=r8 -+(pNeg) add r16=64,r8 -+(pPos) sub r17=64,r8 -+(pNeg) sub r17=r0,r8 -+ ;; -+ mov ar.rsc=r11 // put RSE in enforced lazy mode -+ shr.u r8=r25,r16 -+ add r3=8,in0 // r3 <- &jmpbuf.r1 -+ shl r9=r25,r17 -+ ;; -+ or r25=r8,r9 -+ ;; -+ mov r26=ar.rnat -+ mov ar.unat=r25 // setup ar.unat (NaT bits for r1, r4-r7, and r12) -+ ;; -+ ld8.fill.nta sp=[r2],16 // r12 (sp) -+ ld8.fill.nta gp=[r3],16 // r1 (gp) -+ dep r11=-1,r23,3,6 // r11 <- ia64_rse_rnat_addr(jmpbuf.ar_bsp) -+ ;; -+ ld8.nta r16=[r2],16 // caller's unat -+ ld8.nta r17=[r3],16 // fpsr -+ ;; -+ ld8.fill.nta r4=[r2],16 // r4 -+ ld8.fill.nta r5=[r3],16 // r5 (gp) -+ cmp.geu p8,p0=r10,r11 // p8 <- (ar.bsp >= jmpbuf.ar_bsp) -+ ;; -+ ld8.fill.nta r6=[r2],16 // r6 -+ ld8.fill.nta r7=[r3],16 // r7 -+ ;; -+ mov ar.unat=r16 // restore caller's unat -+ mov ar.fpsr=r17 // restore fpsr -+ ;; -+ ld8.nta r16=[r2],16 // b0 -+ ld8.nta r17=[r3],16 // b1 -+ ;; -+(p8) ld8 r26=[r11] // r26 <- *ia64_rse_rnat_addr(jmpbuf.ar_bsp) -+ mov ar.bspstore=r23 // restore ar.bspstore -+ ;; -+ ld8.nta r18=[r2],16 // b2 -+ ld8.nta r19=[r3],16 // b3 -+ ;; -+ ld8.nta r20=[r2],16 // b4 -+ ld8.nta r21=[r3],16 // b5 -+ ;; -+ ld8.nta r11=[r2],16 // ar.pfs -+ ld8.nta r22=[r3],56 // ar.lc -+ ;; -+ ld8.nta r24=[r2],32 // pr -+ mov b0=r16 -+ ;; -+ ldf.fill.nta f2=[r2],32 -+ ldf.fill.nta f3=[r3],32 -+ mov b1=r17 -+ ;; -+ ldf.fill.nta f4=[r2],32 -+ ldf.fill.nta f5=[r3],32 -+ mov b2=r18 -+ ;; -+ ldf.fill.nta f16=[r2],32 -+ ldf.fill.nta f17=[r3],32 -+ mov b3=r19 -+ ;; -+ ldf.fill.nta f18=[r2],32 -+ ldf.fill.nta f19=[r3],32 -+ mov b4=r20 -+ ;; -+ ldf.fill.nta f20=[r2],32 -+ ldf.fill.nta f21=[r3],32 -+ mov b5=r21 -+ ;; -+ ldf.fill.nta f22=[r2],32 -+ ldf.fill.nta f23=[r3],32 -+ mov ar.lc=r22 -+ ;; -+ ldf.fill.nta f24=[r2],32 -+ ldf.fill.nta f25=[r3],32 -+ cmp.eq p8,p9=0,in1 -+ ;; -+ ldf.fill.nta f26=[r2],32 -+ ldf.fill.nta f27=[r3],32 -+ mov ar.pfs=r11 -+ ;; -+ ldf.fill.nta f28=[r2],32 -+ ldf.fill.nta f29=[r3],32 -+ ;; -+ ldf.fill.nta f30=[r2] -+ ldf.fill.nta f31=[r3] -+(p8) mov r8=1 -+ -+ mov ar.rnat=r26 // restore ar.rnat -+ ;; -+ mov ar.rsc=r27 // restore ar.rsc -+(p9) mov r8=in1 -+ -+ invala // virt. -> phys. regnum mapping may change -+ mov pr=r24,-1 -+ ret -+#ifndef __KERNEL__ -+END(__longjmp) -+#else /* __KERNEL__ */ -+END(kdba_longjmp) -+#endif /* !_KERNEL__ */ ---- /dev/null -+++ b/arch/ia64/kdb/kdba_pod.c -@@ -0,0 +1,64 @@ -+/* -+ * Kernel Debugger Architecture Dependent POD functions. -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_AUTHOR("Jesse Barnes"); -+MODULE_DESCRIPTION("Enter POD through KDB"); -+MODULE_LICENSE("GPL"); -+ -+/** -+ * kdba_pod - enter POD mode from kdb -+ * @argc: arg count -+ * @argv: arg values -+ * -+ * Enter POD mode from kdb using SGI SN specific SAL function call. -+ */ -+static int -+kdba_pod(int argc, const char **argv) -+{ -+ kdb_printf("WARNING: pod commands are dangerous unless you know exactly\n" -+ "what you are doing. If in doubt, type exit immediately.\n"); -+ return ia64_sn_pod_mode(); -+} -+ -+/** -+ * kdba_pod_init - register 'pod' command with kdb -+ * -+ * Register the 'pod' command with kdb at load time. -+ */ -+static int __init -+kdba_pod_init(void) -+{ -+ if (ia64_platform_is("sn2")) -+ kdb_register("pod", kdba_pod, NULL, "Enter POD", 0); -+ -+ return 0; -+} -+ -+/** -+ * kdba_pod_exit - unregister the 'pod' command -+ * -+ * Tell kdb that the 'pod' command is no longer available. -+ */ -+static void __exit -+kdba_pod_exit(void) -+{ -+ if (ia64_platform_is("sn2")) -+ kdb_unregister("pod"); -+} -+ -+module_init(kdba_pod_init) -+module_exit(kdba_pod_exit) ---- /dev/null -+++ b/arch/ia64/kdb/kdba_support.c -@@ -0,0 +1,1720 @@ -+/* -+ * Kernel Debugger Architecture Independent Support Functions -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. -+ * Copyright (C) David Mosberger-Tang -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#ifdef CONFIG_KDB_KDUMP -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#ifdef CONFIG_SMP -+#include -+#endif -+#include -+ -+struct kdb_running_process *kdb_running_process_save; /* [NR_CPUS] */ -+static int kdba_show_handlers = 0; -+ -+static int -+kdba_itm (int argc, const char **argv) -+{ -+ int diag; -+ unsigned long val; -+ -+ diag = kdbgetularg(argv[1], &val); -+ if (diag) -+ return diag; -+ kdb_printf("new itm=" kdb_machreg_fmt "\n", val); -+ -+ ia64_set_itm(val); -+ return 0; -+} -+ -+static void -+kdba_show_intregs(void) -+{ -+ u64 lid, tpr, lrr0, lrr1, itv, pmv, cmcv; -+ -+ asm ("mov %0=cr.lid" : "=r"(lid)); -+ asm ("mov %0=cr.tpr" : "=r"(tpr)); -+ asm ("mov %0=cr.lrr0" : "=r"(lrr0)); -+ asm ("mov %0=cr.lrr1" : "=r"(lrr1)); -+ kdb_printf("lid=" kdb_machreg_fmt ", tpr=" kdb_machreg_fmt ", lrr0=" kdb_machreg_fmt ", llr1=" kdb_machreg_fmt "\n", lid, tpr, lrr0, lrr1); -+ -+ asm ("mov %0=cr.itv" : "=r"(itv)); -+ asm ("mov %0=cr.pmv" : "=r"(pmv)); -+ asm ("mov %0=cr.cmcv" : "=r"(cmcv)); -+ kdb_printf("itv=" kdb_machreg_fmt ", pmv=" kdb_machreg_fmt ", cmcv=" kdb_machreg_fmt "\n", itv, pmv, cmcv); -+ -+ kdb_printf("irr=0x%016lx,0x%016lx,0x%016lx,0x%016lx\n", -+ ia64_getreg(_IA64_REG_CR_IRR0), ia64_getreg(_IA64_REG_CR_IRR1), ia64_getreg(_IA64_REG_CR_IRR2), ia64_getreg(_IA64_REG_CR_IRR3)); -+ -+ kdb_printf("itc=0x%016lx, itm=0x%016lx\n", ia64_get_itc(), ia64_get_itm()); -+} -+ -+static int -+kdba_sir (int argc, const char **argv) -+{ -+ kdba_show_intregs(); -+ -+ return 0; -+} -+ -+/* -+ * kdba_pt_regs -+ * -+ * Format a struct pt_regs -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * If no address is supplied, it uses the current irq pt_regs. -+ */ -+ -+static int -+kdba_pt_regs(int argc, const char **argv) -+{ -+ int diag; -+ kdb_machreg_t addr; -+ long offset = 0; -+ int nextarg; -+ struct pt_regs *p; -+ -+ if (argc == 0) { -+ addr = (kdb_machreg_t) get_irq_regs(); -+ } else if (argc == 1) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ -+ p = (struct pt_regs *) addr; -+ kdb_printf("struct pt_regs %p-%p\n", p, (unsigned char *)p + sizeof(*p) - 1); -+ kdb_print_nameval("b6", p->b6); -+ kdb_print_nameval("b7", p->b7); -+ kdb_printf(" ar_csd 0x%lx\n", p->ar_csd); -+ kdb_printf(" ar_ssd 0x%lx\n", p->ar_ssd); -+ kdb_print_nameval("r8", p->r8); -+ kdb_print_nameval("r9", p->r9); -+ kdb_print_nameval("r10", p->r10); -+ kdb_print_nameval("r11", p->r11); -+ kdb_printf(" cr_ipsr 0x%lx\n", p->cr_ipsr); -+ kdb_print_nameval("cr_iip", p->cr_iip); -+ kdb_printf(" cr_ifs 0x%lx\n", p->cr_ifs); -+ kdb_printf(" ar_unat 0x%lx\n", p->ar_unat); -+ kdb_printf(" ar_pfs 0x%lx\n", p->ar_pfs); -+ kdb_printf(" ar_rsc 0x%lx\n", p->ar_rsc); -+ kdb_printf(" ar_rnat 0x%lx\n", p->ar_rnat); -+ kdb_printf(" ar_bspstore 0x%lx\n", p->ar_bspstore); -+ kdb_printf(" pr 0x%lx\n", p->pr); -+ kdb_print_nameval("b0", p->b0); -+ kdb_printf(" loadrs 0x%lx\n", p->loadrs); -+ kdb_print_nameval("r1", p->r1); -+ kdb_print_nameval("r12", p->r12); -+ kdb_print_nameval("r13", p->r13); -+ kdb_printf(" ar_fpsr 0x%lx\n", p->ar_fpsr); -+ kdb_print_nameval("r15", p->r15); -+ kdb_print_nameval("r14", p->r14); -+ kdb_print_nameval("r2", p->r2); -+ kdb_print_nameval("r3", p->r3); -+ kdb_print_nameval("r16", p->r16); -+ kdb_print_nameval("r17", p->r17); -+ kdb_print_nameval("r18", p->r18); -+ kdb_print_nameval("r19", p->r19); -+ kdb_print_nameval("r20", p->r20); -+ kdb_print_nameval("r21", p->r21); -+ kdb_print_nameval("r22", p->r22); -+ kdb_print_nameval("r23", p->r23); -+ kdb_print_nameval("r24", p->r24); -+ kdb_print_nameval("r25", p->r25); -+ kdb_print_nameval("r26", p->r26); -+ kdb_print_nameval("r27", p->r27); -+ kdb_print_nameval("r28", p->r28); -+ kdb_print_nameval("r29", p->r29); -+ kdb_print_nameval("r30", p->r30); -+ kdb_print_nameval("r31", p->r31); -+ kdb_printf(" ar_ccv 0x%lx\n", p->ar_ccv); -+ kdb_printf(" f6 0x%lx 0x%lx\n", p->f6.u.bits[0], p->f6.u.bits[1]); -+ kdb_printf(" f7 0x%lx 0x%lx\n", p->f7.u.bits[0], p->f7.u.bits[1]); -+ kdb_printf(" f8 0x%lx 0x%lx\n", p->f8.u.bits[0], p->f8.u.bits[1]); -+ kdb_printf(" f9 0x%lx 0x%lx\n", p->f9.u.bits[0], p->f9.u.bits[1]); -+ kdb_printf(" f10 0x%lx 0x%lx\n", p->f10.u.bits[0], p->f10.u.bits[1]); -+ kdb_printf(" f11 0x%lx 0x%lx\n", p->f11.u.bits[0], p->f11.u.bits[1]); -+ -+ return 0; -+} -+ -+/* -+ * kdba_stackdepth -+ * -+ * Print processes that are using more than a specific percentage of their -+ * stack. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * If no percentage is supplied, it uses 60. -+ */ -+ -+static int -+kdba_stackdepth(int argc, const char **argv) -+{ -+ int diag, threshold, used; -+ unsigned long percentage; -+ unsigned long esp; -+ long offset = 0; -+ int nextarg; -+ struct task_struct *p, *g; -+ struct switch_stack *sw; -+ -+ if (argc == 0) { -+ percentage = 60; -+ } else if (argc == 1) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &percentage, &offset, NULL); -+ if (diag) -+ return diag; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ percentage = max_t(int, percentage, 1); -+ percentage = min_t(int, percentage, 100); -+ threshold = ((2 * THREAD_SIZE * percentage) / 100 + 1) >> 1; -+ kdb_printf("stackdepth: processes using more than %ld%% (%d bytes) of stack\n", -+ percentage, threshold); -+ kdb_do_each_thread(g, p) { -+ if (kdb_task_has_cpu(p)) { -+ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p); -+ if (krp->seqno) -+ sw = krp->arch.sw; -+ else -+ sw = NULL; -+ } else -+ sw = (struct switch_stack *) (p->thread.ksp + 16); -+ if (!sw) -+ continue; -+ esp = (unsigned long) sw; -+ used = THREAD_SIZE - (esp - sw->ar_bspstore); -+ if (used >= threshold) { -+ kdb_ps1(p); -+ kdb_printf(" esp %lx bsp %lx used %d\n", esp, sw->ar_bspstore, used); -+ } -+ } kdb_while_each_thread(g, p); -+ -+ return 0; -+} -+ -+/* -+ * kdb_switch_stack -+ * -+ * Format a struct switch_stack -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * If no address is supplied, it uses kdb_running_process[smp_processor_id()].arch.sw. -+ */ -+ -+static int -+kdba_switch_stack(int argc, const char **argv) -+{ -+ int diag; -+ kdb_machreg_t addr; -+ long offset = 0; -+ int nextarg; -+ struct switch_stack *p; -+ -+ if (argc == 0) { -+ addr = (kdb_machreg_t) kdb_running_process[smp_processor_id()].arch.sw; -+ } else if (argc == 1) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ -+ p = (struct switch_stack *) addr; -+ kdb_printf("struct switch_stack %p-%p\n", p, (unsigned char *)p + sizeof(*p) - 1); -+ kdb_printf(" caller_unat 0x%lx\n", p->caller_unat); -+ kdb_printf(" ar_fpsr 0x%lx\n", p->ar_fpsr); -+ kdb_printf(" f2 0x%lx 0x%lx\n", p->f2.u.bits[0], p->f2.u.bits[1]); -+ kdb_printf(" f3 0x%lx 0x%lx\n", p->f3.u.bits[0], p->f3.u.bits[1]); -+ kdb_printf(" f4 0x%lx 0x%lx\n", p->f4.u.bits[0], p->f4.u.bits[1]); -+ kdb_printf(" f5 0x%lx 0x%lx\n", p->f5.u.bits[0], p->f5.u.bits[1]); -+ kdb_printf(" f12 0x%lx 0x%lx\n", p->f12.u.bits[0], p->f12.u.bits[1]); -+ kdb_printf(" f13 0x%lx 0x%lx\n", p->f13.u.bits[0], p->f13.u.bits[1]); -+ kdb_printf(" f14 0x%lx 0x%lx\n", p->f14.u.bits[0], p->f14.u.bits[1]); -+ kdb_printf(" f15 0x%lx 0x%lx\n", p->f15.u.bits[0], p->f15.u.bits[1]); -+ kdb_printf(" f16 0x%lx 0x%lx\n", p->f16.u.bits[0], p->f16.u.bits[1]); -+ kdb_printf(" f17 0x%lx 0x%lx\n", p->f17.u.bits[0], p->f17.u.bits[1]); -+ kdb_printf(" f18 0x%lx 0x%lx\n", p->f18.u.bits[0], p->f18.u.bits[1]); -+ kdb_printf(" f19 0x%lx 0x%lx\n", p->f19.u.bits[0], p->f19.u.bits[1]); -+ kdb_printf(" f20 0x%lx 0x%lx\n", p->f20.u.bits[0], p->f20.u.bits[1]); -+ kdb_printf(" f21 0x%lx 0x%lx\n", p->f21.u.bits[0], p->f21.u.bits[1]); -+ kdb_printf(" f22 0x%lx 0x%lx\n", p->f22.u.bits[0], p->f22.u.bits[1]); -+ kdb_printf(" f23 0x%lx 0x%lx\n", p->f23.u.bits[0], p->f23.u.bits[1]); -+ kdb_printf(" f24 0x%lx 0x%lx\n", p->f24.u.bits[0], p->f24.u.bits[1]); -+ kdb_printf(" f25 0x%lx 0x%lx\n", p->f25.u.bits[0], p->f25.u.bits[1]); -+ kdb_printf(" f26 0x%lx 0x%lx\n", p->f26.u.bits[0], p->f26.u.bits[1]); -+ kdb_printf(" f27 0x%lx 0x%lx\n", p->f27.u.bits[0], p->f27.u.bits[1]); -+ kdb_printf(" f28 0x%lx 0x%lx\n", p->f28.u.bits[0], p->f28.u.bits[1]); -+ kdb_printf(" f29 0x%lx 0x%lx\n", p->f29.u.bits[0], p->f29.u.bits[1]); -+ kdb_printf(" f30 0x%lx 0x%lx\n", p->f30.u.bits[0], p->f30.u.bits[1]); -+ kdb_printf(" f31 0x%lx 0x%lx\n", p->f31.u.bits[0], p->f31.u.bits[1]); -+ kdb_print_nameval("r4", p->r4); -+ kdb_print_nameval("r5", p->r5); -+ kdb_print_nameval("r6", p->r6); -+ kdb_print_nameval("r7", p->r7); -+ kdb_print_nameval("b0", p->b0); -+ kdb_print_nameval("b1", p->b1); -+ kdb_print_nameval("b2", p->b2); -+ kdb_print_nameval("b3", p->b3); -+ kdb_print_nameval("b4", p->b4); -+ kdb_print_nameval("b5", p->b5); -+ kdb_printf(" ar_pfs 0x%lx\n", p->ar_pfs); -+ kdb_printf(" ar_lc 0x%lx\n", p->ar_lc); -+ kdb_printf(" ar_unat 0x%lx\n", p->ar_unat); -+ kdb_printf(" ar_rnat 0x%lx\n", p->ar_rnat); -+ kdb_printf(" ar_bspstore 0x%lx\n", p->ar_bspstore); -+ kdb_printf(" pr 0x%lx\n", p->pr); -+ -+ return 0; -+} -+ -+/* -+ * kdb_minstate -+ * -+ * Format the PAL minstate area. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * None. -+ */ -+ -+static int -+kdba_minstate(int argc, const char **argv) -+{ -+ int diag; -+ kdb_machreg_t addr; -+ long offset = 0; -+ int nextarg; -+ pal_min_state_area_t *p; -+ -+ if (argc == 1) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ -+ p = (pal_min_state_area_t *) addr; -+ kdb_printf("PAL minstate %p-%p\n", p, (unsigned char *)p + sizeof(*p) - 1); -+ kdb_printf(" pmsa_nat_bits 0x%lx\n", p->pmsa_nat_bits); -+ kdb_print_nameval("r1", p->pmsa_gr[1-1]); -+ kdb_print_nameval("r2", p->pmsa_gr[2-1]); -+ kdb_print_nameval("r3", p->pmsa_gr[3-1]); -+ kdb_print_nameval("r4", p->pmsa_gr[4-1]); -+ kdb_print_nameval("r5", p->pmsa_gr[5-1]); -+ kdb_print_nameval("r6", p->pmsa_gr[6-1]); -+ kdb_print_nameval("r7", p->pmsa_gr[7-1]); -+ kdb_print_nameval("r8", p->pmsa_gr[8-1]); -+ kdb_print_nameval("r9", p->pmsa_gr[9-1]); -+ kdb_print_nameval("r10", p->pmsa_gr[10-1]); -+ kdb_print_nameval("r11", p->pmsa_gr[11-1]); -+ kdb_print_nameval("r12", p->pmsa_gr[12-1]); -+ kdb_print_nameval("r13", p->pmsa_gr[13-1]); -+ kdb_print_nameval("r14", p->pmsa_gr[14-1]); -+ kdb_print_nameval("r15", p->pmsa_gr[15-1]); -+ kdb_printf(" Bank 0\n"); -+ kdb_print_nameval("r16", p->pmsa_bank0_gr[16-16]); -+ kdb_print_nameval("r17", p->pmsa_bank0_gr[17-16]); -+ kdb_print_nameval("r18", p->pmsa_bank0_gr[18-16]); -+ kdb_print_nameval("r19", p->pmsa_bank0_gr[19-16]); -+ kdb_print_nameval("r20", p->pmsa_bank0_gr[20-16]); -+ kdb_print_nameval("r21", p->pmsa_bank0_gr[21-16]); -+ kdb_print_nameval("r22", p->pmsa_bank0_gr[22-16]); -+ kdb_print_nameval("r23", p->pmsa_bank0_gr[23-16]); -+ kdb_print_nameval("r24", p->pmsa_bank0_gr[24-16]); -+ kdb_print_nameval("r25", p->pmsa_bank0_gr[25-16]); -+ kdb_print_nameval("r26", p->pmsa_bank0_gr[26-16]); -+ kdb_print_nameval("r27", p->pmsa_bank0_gr[27-16]); -+ kdb_print_nameval("r28", p->pmsa_bank0_gr[28-16]); -+ kdb_print_nameval("r29", p->pmsa_bank0_gr[29-16]); -+ kdb_print_nameval("r30", p->pmsa_bank0_gr[30-16]); -+ kdb_print_nameval("r31", p->pmsa_bank0_gr[31-16]); -+ kdb_printf(" Bank 1\n"); -+ kdb_print_nameval("r16", p->pmsa_bank1_gr[16-16]); -+ kdb_print_nameval("r17", p->pmsa_bank1_gr[17-16]); -+ kdb_print_nameval("r18", p->pmsa_bank1_gr[18-16]); -+ kdb_print_nameval("r19", p->pmsa_bank1_gr[19-16]); -+ kdb_print_nameval("r20", p->pmsa_bank1_gr[20-16]); -+ kdb_print_nameval("r21", p->pmsa_bank1_gr[21-16]); -+ kdb_print_nameval("r22", p->pmsa_bank1_gr[22-16]); -+ kdb_print_nameval("r23", p->pmsa_bank1_gr[23-16]); -+ kdb_print_nameval("r24", p->pmsa_bank1_gr[24-16]); -+ kdb_print_nameval("r25", p->pmsa_bank1_gr[25-16]); -+ kdb_print_nameval("r26", p->pmsa_bank1_gr[26-16]); -+ kdb_print_nameval("r27", p->pmsa_bank1_gr[27-16]); -+ kdb_print_nameval("r28", p->pmsa_bank1_gr[28-16]); -+ kdb_print_nameval("r29", p->pmsa_bank1_gr[29-16]); -+ kdb_print_nameval("r30", p->pmsa_bank1_gr[30-16]); -+ kdb_print_nameval("r31", p->pmsa_bank1_gr[31-16]); -+ kdb_printf(" pr 0x%lx\n", p->pmsa_pr); -+ kdb_print_nameval("b0", p->pmsa_br0); -+ kdb_printf(" ar.rsc 0x%lx\n", p->pmsa_rsc); -+ kdb_print_nameval("cr.iip", p->pmsa_iip); -+ kdb_printf(" cr.ipsr 0x%lx\n", p->pmsa_ipsr); -+ kdb_printf(" cr.ifs 0x%lx\n", p->pmsa_ifs); -+ kdb_print_nameval("cr.xip", p->pmsa_xip); -+ kdb_printf(" cr.xpsr 0x%lx\n", p->pmsa_xpsr); -+ kdb_printf(" cr.xfs 0x%lx\n", p->pmsa_xfs); -+ kdb_print_nameval("b1", p->pmsa_br1); -+ -+ return 0; -+} -+ -+/* -+ * kdba_cpuinfo -+ * -+ * Format struct cpuinfo_ia64. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * If no cpu is supplied, it prints cpuinfo for all online cpus. -+ */ -+ -+static int -+kdba_cpuinfo(int argc, const char **argv) -+{ -+ int diag; -+ unsigned long cpunum = -1; -+ long offset = 0; -+ int nextarg, c, i; -+ struct cpuinfo_ia64 *cpuinfo; -+ -+ if (argc == 1) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &cpunum, &offset, NULL); -+ if (diag) -+ return diag; -+ if (cpunum >= NR_CPUS || !cpu_online(cpunum)) -+ return KDB_BADCPUNUM; -+ } else if (argc > 1) { -+ return KDB_ARGCOUNT; -+ } -+ -+ for (c = (cpunum == -1 ? 0 : cpunum); -+ c < (cpunum == -1 ? NR_CPUS : cpunum+1); -+ ++c) { -+ if (!cpu_online(c)) -+ continue; -+ cpuinfo = cpu_data(c); -+ kdb_printf("struct cpuinfo_ia64 for cpu %d is at 0x%p\n", c, cpuinfo); -+ kdb_printf(" softirq_pending 0x%x\n", cpuinfo->softirq_pending); -+ kdb_printf(" itm_delta %ld\n", cpuinfo->itm_delta); -+ kdb_printf(" itm_next %ld\n", cpuinfo->itm_next); -+ kdb_printf(" nsec_per_cyc %ld\n", cpuinfo->nsec_per_cyc); -+ kdb_printf(" unimpl_va_mask 0x%lx\n", cpuinfo->unimpl_va_mask); -+ kdb_printf(" unimpl_pa_mask 0x%lx\n", cpuinfo->unimpl_pa_mask); -+ kdb_printf(" itc_freq %ld\n", cpuinfo->itc_freq); -+ kdb_printf(" proc_freq %ld\n", cpuinfo->proc_freq); -+ kdb_printf(" cyc_per_usec %ld\n", cpuinfo->cyc_per_usec); -+ kdb_printf(" cyc_per_usec %ld\n", cpuinfo->cyc_per_usec); -+#if 0 /* RJA per-cpu MCA */ -+ kdb_printf(" percpu_paddr 0x%lx\n", cpuinfo->percpu_paddr); -+#endif -+ kdb_printf(" ptce_base 0x%lx\n", cpuinfo->ptce_base); -+ kdb_printf(" ptce_count %d %d\n", cpuinfo->ptce_count[0], cpuinfo->ptce_count[1]); -+ kdb_printf(" ptce_stride %d %d\n", cpuinfo->ptce_stride[0], cpuinfo->ptce_stride[1]); -+#if 0 /* RJA per-cpu MCA */ -+ kdb_printf(" pal_paddr 0x%lx\n", cpuinfo->pal_paddr); -+ kdb_printf(" pal_base 0x%lx\n", cpuinfo->pal_base); -+#endif -+ kdb_printf(" ksoftirqd 0x%p\n", cpuinfo->ksoftirqd); -+#ifdef CONFIG_SMP -+ kdb_printf(" loops_per_jiffy %ld\n", cpuinfo->loops_per_jiffy); -+ kdb_printf(" cpu %d\n", cpuinfo->cpu); -+ kdb_printf(" socket_id %d\n", cpuinfo->socket_id); -+ kdb_printf(" core_id %d\n", cpuinfo->core_id); -+ kdb_printf(" thread_id %d\n", cpuinfo->thread_id); -+ kdb_printf(" num_log %d\n", cpuinfo->num_log); -+ kdb_printf(" cores_per_socket %d\n", cpuinfo->cores_per_socket); -+ kdb_printf(" threads_per_core %d\n", cpuinfo->threads_per_core); -+#endif -+ kdb_printf(" ppn 0x%lx\n", cpuinfo->ppn); -+ kdb_printf(" features 0x%lx\n", cpuinfo->features); -+ kdb_printf(" number %d\n", cpuinfo->number); -+ kdb_printf(" revision %d\n", cpuinfo->revision); -+ kdb_printf(" model %d\n", cpuinfo->model); -+ kdb_printf(" family %d\n", cpuinfo->family); -+ kdb_printf(" archrev %d\n", cpuinfo->archrev); -+ kdb_printf(" vendor "); -+ for (i = 0; i < sizeof(cpuinfo->vendor); ++i) -+ kdb_printf(" 0x%02x", cpuinfo->vendor[i]); -+ kdb_printf("\n"); -+#ifdef CONFIG_NUMA -+ kdb_printf(" node_data 0x%p\n", cpuinfo->node_data); -+#endif -+#if 0 /* RJA per-cpu MCA */ -+ kdb_printf(" ia64_pa_mca_data 0x%p\n", cpuinfo->ia64_pa_mca_data); -+#endif -+ } -+ return 0; -+} -+ -+#ifdef CONFIG_KDB_HARDWARE_BREAKPOINTS -+void -+kdba_installdbreg(kdb_bp_t *bp) -+{ -+ unsigned long mask; -+ unsigned int regbase; -+ static unsigned long masks[] = { -+ 0x00FFFFFFFFFFFFFFUL, // 1 byte long -+ 0x00FFFFFFFFFFFFFEUL, // 2 bytes long -+ 0x0000000000000000UL, // invalid -+ 0x00FFFFFFFFFFFFFCUL // 4 bytes long -+ }; -+ static unsigned char modes[] = { -+ 0x81, // instruction => x, plm=priv level 0 only -+ 0x41, // write => w, plm=priv level 0 only -+ 0x00, // io -+ 0x81 // read => r, plm=priv level 0 only -+ }; -+ -+ /* Note that bp->bp_hard[NR_CPU] is for x86. -+ * The ia64 uses bp->bp_hard[0] only. -+ */ -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_installdbreg:\n"); -+ mask = masks[bp->bp_hard[0]->bph_length] | -+ (((unsigned long)(modes[bp->bp_hard[0]->bph_mode])) << 56); -+ regbase = 2*bp->bp_hard[0]->bph_reg; -+ -+ switch (bp->bp_hard[0]->bph_mode) -+ { -+ case 1: -+ case 3: -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_installdbreg: dbr[%u]=%016lx\n", -+ regbase, bp->bp_addr); -+ kdb_printf("kdba_installdbreg: dbr[%u]=%016lx\n", -+ regbase+1, mask); -+ } -+ -+ ia64_set_dbr(regbase, bp->bp_addr); -+ ia64_set_dbr(regbase+1, mask); -+ ia64_srlz_d(); -+ break; -+ -+ case 0: /* instruction */ -+#if 0 -+ ia64_set_ibr(regbase, bp->bp_addr); -+ ia64_set_ibr(regbase+1, mask); -+ ia64_srlz_d(); -+#else -+ kdb_printf("\"instr\" mode not implemented\n"); -+#endif -+ break; -+ -+ case 2: /* io */ -+ kdb_printf("\"io\" mode not implemented\n"); -+ break; -+ } -+} -+ -+void -+kdba_removedbreg(kdb_bp_t *bp) -+{ -+ unsigned int regbase = 2*bp->bp_hard[0]->bph_reg; -+ -+ /* Note that bp->bp_hard[NR_CPU] is for x86. -+ * The ia64 uses bp->bp_hard[0] only. -+ */ -+ switch (bp->bp_hard[0]->bph_mode) -+ { -+ case 1: -+ case 3: -+ ia64_set_dbr(regbase, 0); -+ ia64_set_dbr(regbase+1, 0); -+ ia64_srlz_d(); -+ break; -+ -+ case 0: /* instruction */ -+#if 0 -+ ia64_set_ibr(regbase, 0); -+ ia64_set_ibr(regbase+1, 0); -+ ia64_srlz_d(); -+#else -+ kdb_printf("\"instr\" mode not implemented\n"); -+#endif -+ break; -+ -+ case 2: /* io */ -+ kdb_printf("\"io\" mode not implemented\n"); -+ break; -+ } -+} -+#endif /* CONFIG_KDB_HARDWARE_BREAKPOINTS */ -+ -+ -+static kdb_machreg_t -+kdba_getdr(int regnum) -+{ -+ kdb_machreg_t contents = 0; -+ unsigned long reg = (unsigned long)regnum; -+ -+ __asm__ ("mov %0=ibr[%1]"::"r"(contents),"r"(reg)); -+// __asm__ ("mov ibr[%0]=%1"::"r"(dbreg_cond),"r"(value)); -+ -+ return contents; -+} -+ -+ -+static void -+get_fault_regs(fault_regs_t *fr) -+{ -+ fr->ifa = 0 ; -+ fr->isr = 0 ; -+ -+ __asm__ ("rsm psr.ic;;") ; -+ ia64_srlz_d(); -+ __asm__ ("mov %0=cr.ifa" : "=r"(fr->ifa)); -+ __asm__ ("mov %0=cr.isr" : "=r"(fr->isr)); -+ __asm__ ("ssm psr.ic;;") ; -+ ia64_srlz_d(); -+} -+ -+static void -+show_kernel_regs (void) -+{ -+ unsigned long kr[8]; -+ int i; -+ -+ asm ("mov %0=ar.k0" : "=r"(kr[0])); asm ("mov %0=ar.k1" : "=r"(kr[1])); -+ asm ("mov %0=ar.k2" : "=r"(kr[2])); asm ("mov %0=ar.k3" : "=r"(kr[3])); -+ asm ("mov %0=ar.k4" : "=r"(kr[4])); asm ("mov %0=ar.k5" : "=r"(kr[5])); -+ asm ("mov %0=ar.k6" : "=r"(kr[6])); asm ("mov %0=ar.k7" : "=r"(kr[7])); -+ -+ for (i = 0; i < 4; ++i) -+ kdb_printf(" kr%d: %016lx kr%d: %016lx\n", 2*i, kr[2*i], 2*i+1, kr[2*i+1]); -+ kdb_printf("\n"); -+} -+ -+static int -+change_cur_stack_frame(int regno, unsigned long *contents) -+{ -+ unsigned long sof, i, cfm, sp, *bsp, __user *ubsp; -+ struct unw_frame_info info; -+ mm_segment_t old_fs; -+ int cpu = kdb_process_cpu(kdb_current_task); -+ struct kdb_running_process *krp = kdb_running_process + cpu; -+ -+ if (kdb_current_task != krp->p) { -+ kdb_printf("Stacked registers are not available for tasks that are not running.\n"); -+ kdb_printf("Use bt with a large BTARGS value instead\n"); -+ return 0; -+ } -+ unw_init_frame_info(&info, krp->p, krp->arch.sw); -+ do { -+ if (unw_unwind(&info) < 0) { -+ kdb_printf("Failed to unwind\n"); -+ return 0; -+ } -+ unw_get_sp(&info, &sp); -+ } while (sp <= (unsigned long) kdb_current_regs); -+ unw_get_bsp(&info, (unsigned long *) &bsp); -+ unw_get_cfm(&info, &cfm); -+ -+ if (!bsp) { -+ kdb_printf("Unable to get Current Stack Frame\n"); -+ return 0; -+ } -+ -+ sof = (cfm & 0x7f); -+ -+ if(((unsigned long)regno - 32) >= (sof - 2)) return 1; -+ -+ old_fs = set_fs(KERNEL_DS); -+ for (i = 0; i < (regno - 32); ++i) -+ bsp = ia64_rse_skip_regs(bsp, 1); -+ ubsp = (unsigned long __user *) bsp; -+ put_user(*contents, ubsp); -+ set_fs(old_fs); -+ -+ return 0 ; -+} -+ -+static int -+show_cur_stack_frame(int regno, unsigned long *contents) -+{ -+ unsigned long sof, i, cfm, val, sp, *bsp, __user *ubsp; -+ struct unw_frame_info info; -+ mm_segment_t old_fs; -+ int cpu = kdb_process_cpu(kdb_current_task); -+ struct kdb_running_process *krp = kdb_running_process + cpu; -+ -+ if (kdb_current_task != krp->p) { -+ kdb_printf("Stacked registers are not available for tasks that are not running.\n"); -+ kdb_printf("Use bt with a large BTARGS value instead\n"); -+ return 0; -+ } -+ unw_init_frame_info(&info, krp->p, krp->arch.sw); -+ do { -+ if (unw_unwind(&info) < 0) { -+ kdb_printf("Failed to unwind\n"); -+ return 0; -+ } -+ unw_get_sp(&info, &sp); -+ } while (sp <= (unsigned long) kdb_current_regs); -+ unw_get_bsp(&info, (unsigned long *) &bsp); -+ unw_get_cfm(&info, &cfm); -+ -+ if (!bsp) { -+ kdb_printf("Unable to display Current Stack Frame\n"); -+ return 0; -+ } -+ -+ sof = (cfm & 0x7f); -+ -+ if (regno) { -+ if ((unsigned) regno - 32 >= sof) -+ return 0; -+ bsp = ia64_rse_skip_regs(bsp, regno - 32); -+ old_fs = set_fs(KERNEL_DS); -+ ubsp = (unsigned long __user *) bsp; -+ get_user(val, ubsp); -+ set_fs(old_fs); -+ *contents = val; -+ return 1; -+ } -+ -+ old_fs = set_fs(KERNEL_DS); -+ for (i = 0; i < sof; ++i) { -+ ubsp = (unsigned long __user *) bsp; -+ get_user(val, ubsp); -+ kdb_printf(" r%lu: %016lx ", 32 + i, val); -+ if (!((i + 1) % 3)) -+ kdb_printf("\n"); -+ bsp = ia64_rse_skip_regs(bsp, 1); -+ } -+ kdb_printf("\n"); -+ set_fs(old_fs); -+ -+ return 0 ; -+} -+ -+/* -+ * kdba_getregcontents -+ * -+ * Return the contents of the register specified by the -+ * input string argument. Return an error if the string -+ * does not match a machine register. -+ * -+ * The following pseudo register names are supported: -+ * ®s - Prints address of exception frame -+ * kesp - Prints kernel stack pointer at time of fault -+ * sstk - Prints switch stack for ia64 -+ * % - Uses the value of the registers at the -+ * last time the user process entered kernel -+ * mode, instead of the registers at the time -+ * kdb was entered. -+ * -+ * Parameters: -+ * regname Pointer to string naming register -+ * regs Pointer to structure containing registers. -+ * Outputs: -+ * *contents Pointer to unsigned long to recieve register contents -+ * Returns: -+ * 0 Success -+ * KDB_BADREG Invalid register name -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * Note that this function is really machine independent. The kdb -+ * register list is not, however. -+ */ -+ -+static struct kdbregs { -+ char *reg_name; -+ size_t reg_offset; -+} kdbreglist[] = { -+ { "psr", offsetof(struct pt_regs, cr_ipsr) }, -+ { "ifs", offsetof(struct pt_regs, cr_ifs) }, -+ { "ip", offsetof(struct pt_regs, cr_iip) }, -+ -+ { "unat", offsetof(struct pt_regs, ar_unat) }, -+ { "pfs", offsetof(struct pt_regs, ar_pfs) }, -+ { "rsc", offsetof(struct pt_regs, ar_rsc) }, -+ -+ { "rnat", offsetof(struct pt_regs, ar_rnat) }, -+ { "bsps", offsetof(struct pt_regs, ar_bspstore) }, -+ { "pr", offsetof(struct pt_regs, pr) }, -+ -+ { "ldrs", offsetof(struct pt_regs, loadrs) }, -+ { "ccv", offsetof(struct pt_regs, ar_ccv) }, -+ { "fpsr", offsetof(struct pt_regs, ar_fpsr) }, -+ -+ { "b0", offsetof(struct pt_regs, b0) }, -+ { "b6", offsetof(struct pt_regs, b6) }, -+ { "b7", offsetof(struct pt_regs, b7) }, -+ -+ { "r1",offsetof(struct pt_regs, r1) }, -+ { "r2",offsetof(struct pt_regs, r2) }, -+ { "r3",offsetof(struct pt_regs, r3) }, -+ -+ { "r8",offsetof(struct pt_regs, r8) }, -+ { "r9",offsetof(struct pt_regs, r9) }, -+ { "r10",offsetof(struct pt_regs, r10) }, -+ -+ { "r11",offsetof(struct pt_regs, r11) }, -+ { "r12",offsetof(struct pt_regs, r12) }, -+ { "r13",offsetof(struct pt_regs, r13) }, -+ -+ { "r14",offsetof(struct pt_regs, r14) }, -+ { "r15",offsetof(struct pt_regs, r15) }, -+ { "r16",offsetof(struct pt_regs, r16) }, -+ -+ { "r17",offsetof(struct pt_regs, r17) }, -+ { "r18",offsetof(struct pt_regs, r18) }, -+ { "r19",offsetof(struct pt_regs, r19) }, -+ -+ { "r20",offsetof(struct pt_regs, r20) }, -+ { "r21",offsetof(struct pt_regs, r21) }, -+ { "r22",offsetof(struct pt_regs, r22) }, -+ -+ { "r23",offsetof(struct pt_regs, r23) }, -+ { "r24",offsetof(struct pt_regs, r24) }, -+ { "r25",offsetof(struct pt_regs, r25) }, -+ -+ { "r26",offsetof(struct pt_regs, r26) }, -+ { "r27",offsetof(struct pt_regs, r27) }, -+ { "r28",offsetof(struct pt_regs, r28) }, -+ -+ { "r29",offsetof(struct pt_regs, r29) }, -+ { "r30",offsetof(struct pt_regs, r30) }, -+ { "r31",offsetof(struct pt_regs, r31) }, -+ -+}; -+ -+static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs); -+ -+int -+kdba_getregcontents(const char *regname, struct pt_regs *regs, unsigned long *contents) -+{ -+ int i; -+ -+ if (strcmp(regname, "isr") == 0) { -+ fault_regs_t fr ; -+ get_fault_regs(&fr) ; -+ *contents = fr.isr ; -+ return 0 ; -+ } -+ -+ if (!regs) { -+ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__); -+ return KDB_BADREG; -+ } -+ -+ if (strcmp(regname, "®s") == 0) { -+ *contents = (unsigned long)regs; -+ return 0; -+ } -+ -+ if (strcmp(regname, "sstk") == 0) { -+ *contents = (unsigned long)getprsregs(regs) ; -+ return 0; -+ } -+ -+ if (strcmp(regname, "ksp") == 0) { -+ *contents = (unsigned long) (regs + 1); -+ return 0; -+ } -+ -+ for (i=0; i -+ * -+ * Parameters: -+ * regname Pointer to string naming register -+ * regs Pointer to structure containing registers. -+ * contents Unsigned long containing new register contents -+ * Outputs: -+ * Returns: -+ * 0 Success -+ * KDB_BADREG Invalid register name -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+int -+kdba_setregcontents(const char *regname, -+ struct pt_regs *regs, -+ unsigned long contents) -+{ -+ int i, ret = 0, fixed = 0; -+ char *endp; -+ unsigned long regno; -+ -+ if (regname[0] == '%') { -+ regname++; -+ regs = (struct pt_regs *) -+ (kdb_current_task->thread.ksp - sizeof(struct pt_regs)); -+ } -+ -+ if (!regs) { -+ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__); -+ return KDB_BADREG; -+ } -+ -+ /* fixed registers */ -+ for (i=0; i (unsigned long)31) { -+ ret = change_cur_stack_frame(regno, &contents); -+ if(!ret) return 0; -+ } -+ } -+ -+ if ((i == nkdbreglist) -+ || (strlen(kdbreglist[i].reg_name) != strlen(regname)) -+ || ret) { -+ return KDB_BADREG; -+ } -+ -+ /* just in case of "standard" register */ -+ *(unsigned long *)((unsigned long)regs + kdbreglist[i].reg_offset) = -+ contents; -+ -+ return 0; -+} -+ -+/* -+ * kdba_dumpregs -+ * -+ * Dump the specified register set to the display. -+ * -+ * Parameters: -+ * regs Pointer to structure containing registers. -+ * type Character string identifying register set to dump -+ * extra string further identifying register (optional) -+ * Outputs: -+ * Returns: -+ * 0 Success -+ * Locking: -+ * None. -+ * Remarks: -+ * This function will dump the general register set if the type -+ * argument is NULL (struct pt_regs). The alternate register -+ * set types supported by this function: -+ * -+ * d Debug registers -+ * c Control registers -+ * u User registers at most recent entry to kernel -+ * i Interrupt registers -- same as "irr" command -+ * Following not yet implemented: -+ * m Model Specific Registers (extra defines register #) -+ * r Memory Type Range Registers (extra defines register) -+ * -+ * For now, all registers are covered as follows: -+ * -+ * rd - dumps all regs -+ * rd %isr - current interrupt status reg, read freshly -+ * rd s - valid stacked regs -+ * rd %sstk - gets switch stack addr. dump memory and search -+ * rd d - debug regs, may not be too useful -+ * rd k - dump kernel regs -+ * -+ * ARs TB Done -+ * OTHERS TB Decided ?? -+ * -+ * Intel wish list -+ * These will be implemented later - Srinivasa -+ * -+ * type action -+ * ---- ------ -+ * g dump all General static registers -+ * s dump all general Stacked registers -+ * f dump all Floating Point registers -+ * p dump all Predicate registers -+ * b dump all Branch registers -+ * a dump all Application registers -+ * c dump all Control registers -+ * -+ */ -+ -+int -+kdba_dumpregs(struct pt_regs *regs, -+ const char *type, -+ const char *extra) -+ -+{ -+ int i; -+ int count = 0; -+ -+ if (type -+ && (type[0] == 'u')) { -+ type = NULL; -+ regs = (struct pt_regs *) -+ (kdb_current_task->thread.ksp - sizeof(struct pt_regs)); -+ } -+ -+ if (type == NULL) { -+ if (!regs) { -+ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__); -+ return KDB_BADREG; -+ } -+ for (i=0; icr_iip + ia64_psr(regs)->ri : 0; -+} -+ -+int -+kdba_setpc(struct pt_regs *regs, kdb_machreg_t newpc) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return KDB_BADREG; -+ regs->cr_iip = newpc & ~0xf; -+ ia64_psr(regs)->ri = newpc & 0x3; -+ KDB_STATE_SET(IP_ADJUSTED); -+ return 0; -+} -+ -+struct kdba_main_loop_data { -+ kdb_reason_t reason; -+ kdb_reason_t reason2; -+ int error; -+ kdb_dbtrap_t db_result; -+ struct pt_regs *regs; -+ int ret; -+}; -+ -+/* -+ * do_kdba_main_loop -+ * -+ * Invoked from kdba_main_loop via unw_init_running() after that routine -+ * has pushed a struct switch_stack. -+ * -+ * Inputs: -+ * info Unwind information. -+ * data kdb data passed as void * to unw_init_running. -+ * Returns: -+ * none (unw_init_running requires void). vdata->ret is set to -+ * 0 KDB was invoked for an event which it wasn't responsible -+ * 1 KDB handled the event for which it was invoked. -+ * Outputs: -+ * none -+ * Locking: -+ * None. -+ * Remarks: -+ * unw_init_running() creates struct switch_stack then struct -+ * unw_frame_info. We get the address of the info so step over -+ * that to get switch_stack. Just hope that unw_init_running -+ * does not change its stack usage. unw_init_running adds padding -+ * to put switch_stack on a 16 byte boundary. -+ */ -+ -+static void -+do_kdba_main_loop(struct unw_frame_info *info, void *vdata) -+{ -+ struct kdba_main_loop_data *data = vdata; -+ struct switch_stack *sw, *prev_sw; -+ struct pt_regs *prev_regs; -+ struct kdb_running_process *krp = -+ kdb_running_process + smp_processor_id(); -+ KDB_DEBUG_STATE(__FUNCTION__, data->reason); -+ prev_sw = krp->arch.sw; -+ sw = (struct switch_stack *)(info+1); -+ /* padding from unw_init_running */ -+ sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15); -+ krp->arch.sw = sw; -+ prev_regs = krp->regs; -+ data->ret = kdb_save_running(data->regs, data->reason, data->reason2, -+ data->error, data->db_result); -+ kdb_unsave_running(data->regs); -+ krp->regs = prev_regs; -+ krp->arch.sw = prev_sw; -+} -+ -+/* -+ * kdba_main_loop -+ * -+ * Do any architecture specific set up before entering the main kdb loop. -+ * The primary function of this routine is to make all processes look the -+ * same to kdb, kdb must be able to list a process without worrying if the -+ * process is running or blocked, so make all processes look as though they -+ * are blocked. -+ * -+ * Inputs: -+ * reason The reason KDB was invoked -+ * error The hardware-defined error code -+ * error2 kdb's current reason code. Initially error but can change -+ * acording to kdb state. -+ * db_result Result from break or debug point. -+ * regs The exception frame at time of fault/breakpoint. If reason -+ * is SILENT or CPU_UP then regs is NULL, otherwise it should -+ * always be valid. -+ * Returns: -+ * 0 KDB was invoked for an event which it wasn't responsible -+ * 1 KDB handled the event for which it was invoked. -+ * Outputs: -+ * Builds a switch_stack structure before calling the main loop. -+ * Locking: -+ * None. -+ * Remarks: -+ * none. -+ */ -+ -+int -+kdba_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error, -+ kdb_dbtrap_t db_result, struct pt_regs *regs) -+{ -+ struct kdba_main_loop_data data; -+ KDB_DEBUG_STATE("kdba_main_loop", reason); -+ data.reason = reason; -+ data.reason2 = reason2; -+ data.error = error; -+ data.db_result = db_result; -+ data.regs = regs; -+ unw_init_running(do_kdba_main_loop, &data); -+ return(data.ret); -+} -+ -+void -+kdba_disableint(kdb_intstate_t *state) -+{ -+ unsigned long *fp = (unsigned long *)state; -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ *fp = flags; -+} -+ -+void -+kdba_restoreint(kdb_intstate_t *state) -+{ -+ unsigned long flags = *(unsigned long *)state; -+ local_irq_restore(flags); -+} -+ -+void -+kdba_setsinglestep(struct pt_regs *regs) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return; -+ ia64_psr(regs)->ss = 1; -+} -+ -+void -+kdba_clearsinglestep(struct pt_regs *regs) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return; -+ ia64_psr(regs)->ss = 0; -+} -+ -+/* -+ * kdb_tpa -+ * -+ * Virtual to Physical address translation command. -+ * -+ * tpa -+ * -+ * Parameters: -+ * argc Count of arguments in argv -+ * argv Space delimited command line arguments -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic if failure. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+#define __xtpa(x) ({ia64_va _v; asm("tpa %0=%1" : "=r"(_v.l) : "r"(x)); _v.l;}) -+static int -+kdba_tpa(int argc, const char **argv) -+{ -+ kdb_machreg_t addr; -+ int diag; -+ long offset = 0; -+ int nextarg; -+ char c; -+ -+ nextarg = 1; -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ if (kdb_getarea(c, addr)) -+ return(0); -+ kdb_printf("vaddr: 0x%lx , paddr: 0x%lx\n", addr, __xtpa(addr)); -+ return(0); -+} -+#if defined(CONFIG_NUMA) -+static int -+kdba_tpav(int argc, const char **argv) -+{ -+ kdb_machreg_t addr, end, paddr; -+ int diag; -+ long offset = 0; -+ int nextarg, nid, nid_old; -+ char c; -+ -+ nextarg = 1; -+ if (argc != 2) -+ return KDB_ARGCOUNT; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &end, &offset, NULL); -+ if (diag) -+ return diag; -+ if (kdb_getarea(c, addr)) -+ return(0); -+ if (kdb_getarea(c, end)) -+ return(0); -+ paddr=__xtpa(addr); -+ nid = paddr_to_nid(paddr); -+ kdb_printf("begin: 0x%lx , paddr: 0x%lx , nid: %d\n", addr, __xtpa(addr), nid); -+ for(;addr] -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ */ -+ -+static int -+kdba_sendinit(int argc, const char **argv) -+{ -+ unsigned long cpunum; -+ int diag; -+ -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ -+ diag = kdbgetularg(argv[1], &cpunum); -+ if (diag) -+ return diag; -+ -+ if (cpunum >= NR_CPUS || !cpu_online(cpunum)) -+ return KDB_BADCPUNUM; -+ -+ platform_send_ipi(cpunum, 0, IA64_IPI_DM_INIT, 0); -+ return 0; -+} -+ -+/* Invoked once from kdb_wait_for_cpus when waiting for cpus. For those cpus -+ * that have not responded to the normal KDB interrupt yet, hit them with an -+ * INIT event. -+ */ -+void -+kdba_wait_for_cpus(void) -+{ -+ int c; -+ if (KDB_FLAG(CATASTROPHIC)) -+ return; -+ kdb_printf(" Sending INIT to cpus that have not responded yet\n"); -+ for_each_online_cpu(c) -+ if (kdb_running_process[c].seqno < kdb_seqno - 1) -+ platform_send_ipi(c, 0, IA64_IPI_DM_INIT, 0); -+} -+ -+#endif /* CONFIG_SMP */ -+ -+/* This code is sensitive to the layout of the MCA/INIT stack (see mca_asm.h) -+ * and to the stack layout that ia64_mca_modify_original_stack() creates when -+ * it makes the original task look blocked. -+ */ -+static void -+kdba_handlers_modify(struct task_struct *task, int cpu) -+{ -+ struct kdb_running_process *work, *save; -+ work = kdb_running_process + cpu; -+ save = kdb_running_process_save + cpu; -+ *work = *save; -+ if (!kdba_show_handlers && REGION_NUMBER(task) >= RGN_GATE && -+ (task_thread_info(task)->flags & _TIF_MCA_INIT)) { -+ struct ia64_sal_os_state *sos = (struct ia64_sal_os_state *) -+ ((unsigned long)save->p + MCA_SOS_OFFSET); -+ char *p; -+ if (!sos->prev_task) -+ return; -+ work->p = sos->prev_task; -+ p = (char *)sos->prev_task->thread.ksp; -+ p += 16; -+ work->arch.sw = (struct switch_stack *)p; -+ p += sizeof(struct switch_stack); -+ work->regs = (struct pt_regs *)p; -+ work->irq_depth = 2; /* any value >1 will do */ -+ } -+} -+ -+/* Turn the display of the MCA/INIT handlers on or off, or display the status -+ * of the MCA/INIT handlers. -+ */ -+static int -+kdba_handlers(int argc, const char **argv) -+{ -+ int cpu; -+ struct kdb_running_process *krp; -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ if (strcmp(argv[1], "show") == 0) -+ kdba_show_handlers = 1; -+ else if (strcmp(argv[1], "hide") == 0) -+ kdba_show_handlers = 0; -+ else if (strcmp(argv[1], "status") != 0) { -+ kdb_printf("handlers \n"); -+ return 0; -+ } -+ for (cpu = 0, krp = kdb_running_process_save; cpu < NR_CPUS; ++cpu, ++krp) { -+ if (krp->p) -+ kdba_handlers_modify(krp->p, cpu); -+ } -+ if (strcmp(argv[1], "status") != 0) -+ return 0; -+ kdb_printf("handlers status is %s\n", kdba_show_handlers ? "'show'" : "'hide'"); -+ kdb_printf(" cpu handler task command original task command\n"); -+ for (cpu = 0, krp = kdb_running_process_save; cpu < NR_CPUS; ++cpu, ++krp) { -+ struct task_struct *p = krp->p; -+ if (!p) -+ continue; -+ kdb_printf("%4d", cpu); -+ if (task_thread_info(p)->flags & _TIF_MCA_INIT) { -+ struct ia64_sal_os_state *sos; -+ kdb_printf(" " kdb_machreg_fmt0 " %-*s ", -+ (unsigned long)p, (int)sizeof(p->comm), p->comm); -+ sos = (struct ia64_sal_os_state *)((unsigned long)p + MCA_SOS_OFFSET); -+ p = sos->prev_task; -+ } else -+ kdb_printf("%*s", (int)(1+2+16+1+sizeof(p->comm)+2), " "); -+ if (p) -+ kdb_printf(" " kdb_machreg_fmt0 " %-*s", -+ (unsigned long)p, (int)sizeof(p->comm), p->comm); -+ kdb_printf("\n"); -+ } -+ return 0; -+} -+ -+/* Executed once on each cpu at startup. */ -+void -+kdba_cpu_up(void) -+{ -+} -+ -+/* -+ * kdba_init -+ * -+ * Architecture specific initialization. -+ * -+ * Parameters: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * None. -+ */ -+ -+void -+kdba_init(void) -+{ -+ kdb_running_process_save = kzalloc( -+ sizeof(*kdb_running_process_save) * NR_CPUS, GFP_KERNEL); -+ BUG_ON(!kdb_running_process_save); -+ kdb_register("irr", kdba_sir, "", "Show interrupt registers", 0); -+ kdb_register("itm", kdba_itm, "", "Set new ITM value", 0); -+#if defined(CONFIG_SMP) -+ kdb_register("init", kdba_sendinit, "", "Send INIT to cpu", 0); -+#endif -+ kdb_register("pt_regs", kdba_pt_regs, "address", "Format struct pt_regs", 0); -+ kdb_register("switch_stack", kdba_switch_stack, "address", "Format struct switch_stack", 0); -+ kdb_register("minstate", kdba_minstate, "address", "Format PAL minstate", 0); -+ kdb_register("tpa", kdba_tpa, "", "Translate virtual to physical address", 0); -+#if defined(CONFIG_NUMA) -+ kdb_register("tpav", kdba_tpav, " ", "Verify that physical addresses corresponding to virtual addresses from to are in same node", 0); -+#endif -+ kdb_register("stackdepth", kdba_stackdepth, "[percentage]", "Print processes using >= stack percentage", 0); -+ kdb_register("cpuinfo", kdba_cpuinfo, "[cpu]", "Print struct cpuinfo_ia64", 0); -+ kdb_register("handlers", kdba_handlers, "", "Control the display of MCA/INIT handlers", 0); -+ -+#ifdef CONFIG_SERIAL_8250_CONSOLE -+ kdba_serial_console = KDBA_SC_STANDARD; -+#endif -+#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE -+ if (ia64_platform_is("sn2")) -+ kdba_serial_console = KDBA_SC_SGI_L1; -+#endif -+ return; -+} -+ -+/* -+ * kdba_adjust_ip -+ * -+ * Architecture specific adjustment of instruction pointer before leaving -+ * kdb. -+ * -+ * Parameters: -+ * reason The reason KDB was invoked -+ * error The hardware-defined error code -+ * regs The exception frame at time of fault/breakpoint. If reason -+ * is SILENT or CPU_UP then regs is NULL, otherwise it should -+ * always be valid. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * On IA64, KDB_ENTER() and KDB_ENTER_SLAVE() use break which is a fault, -+ * not a trap. The instruction pointer must be stepped before leaving -+ * kdb, otherwise we get a loop. -+ */ -+ -+void -+kdba_adjust_ip(kdb_reason_t reason, int error, struct pt_regs *regs) -+{ -+ if ((reason == KDB_REASON_ENTER || reason == KDB_REASON_ENTER_SLAVE) && -+ !KDB_STATE(IP_ADJUSTED)) { -+ if (KDB_NULL_REGS(regs)) -+ return; -+ if (ia64_psr(regs)->ri < 2) -+ kdba_setpc(regs, regs->cr_iip + ia64_psr(regs)->ri + 1); -+ else -+ kdba_setpc(regs, regs->cr_iip + 16); -+ } -+} -+ -+void -+kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs) -+{ -+ struct kdb_running_process *work, *save; -+ int cpu = smp_processor_id(); -+ work = kdb_running_process + cpu; -+ save = kdb_running_process_save + cpu; -+ *save = *work; -+ if (!regs) -+ return; -+ kdba_handlers_modify((struct task_struct *)regs->r13, cpu); -+} -+ -+void -+kdba_unsave_running(struct kdba_running_process *k, struct pt_regs *regs) -+{ -+ memset(kdb_running_process_save + smp_processor_id(), 0, -+ sizeof(*kdb_running_process_save)); -+} -+ -+void -+kdba_set_current_task(const struct task_struct *p) -+{ -+ int cpu = kdb_process_cpu(p); -+ struct kdb_running_process *work, *save; -+ work = kdb_running_process + cpu; -+ save = kdb_running_process_save + cpu; -+ kdb_current_task = p; -+ if (kdb_task_has_cpu(p)) { -+ kdb_current_regs = work->regs; -+ return; -+ } -+ kdb_current_regs = NULL; -+ /* For most blocked tasks we cannot get the pt_regs without doing an -+ * unwind, which is not worth doing. For tasks interrupted by -+ * MCA/INIT, when the user is not working on the handlers, we must use -+ * the registers at the time of interrupt. -+ */ -+ if (work->p == save->p || work->p != p) -+ return; -+ kdb_current_regs = (struct pt_regs *)(work->p->thread.ksp + 16 + -+ sizeof(struct switch_stack)); -+} -+ -+/* -+ * asm-ia64 uaccess.h supplies __copy_to_user which relies on MMU to -+ * trap invalid addresses in the _xxx fields. Verify the other address -+ * of the pair is valid by accessing the first and last byte ourselves, -+ * then any access violations should only be caused by the _xxx -+ * addresses, -+ */ -+ -+int -+kdba_putarea_size(unsigned long to_xxx, void *from, size_t size) -+{ -+ mm_segment_t oldfs = get_fs(); -+ int r; -+ char c; -+ c = *((volatile char *)from); -+ c = *((volatile char *)from + size - 1); -+ -+ if (to_xxx >> 61 <= 4) { -+ return kdb_putuserarea_size(to_xxx, from, size); -+ } -+ -+ set_fs(KERNEL_DS); -+ r = __copy_to_user_inatomic((void __user *)to_xxx, from, size); -+ set_fs(oldfs); -+ return r; -+} -+ -+int -+kdba_getarea_size(void *to, unsigned long from_xxx, size_t size) -+{ -+ mm_segment_t oldfs = get_fs(); -+ int r; -+ *((volatile char *)to) = '\0'; -+ *((volatile char *)to + size - 1) = '\0'; -+ -+ if (from_xxx >> 61 <= 4) -+ return kdb_getuserarea_size(to, from_xxx, size); -+ -+ set_fs(KERNEL_DS); -+ switch (size) { -+ case 1: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 1); -+ break; -+ case 2: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 2); -+ break; -+ case 4: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 4); -+ break; -+ case 8: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 8); -+ break; -+ default: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, size); -+ break; -+ } -+ set_fs(oldfs); -+ return r; -+} -+ -+int -+kdba_verify_rw(unsigned long addr, size_t size) -+{ -+ unsigned char data[(__force size_t) size]; -+ return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size)); -+} -+ -+#ifdef CONFIG_KDB_KDUMP -+void -+kdba_kdump_prepare(struct pt_regs *fixed_regs) -+{ -+ int i; -+ -+ /* Set on KEXEC bit on all onlinr cpus */ -+ for (i = 1; i < NR_CPUS; ++i) { -+ if (!cpu_online(i)) -+ continue; -+ -+ KDB_STATE_SET_CPU(KEXEC, i); -+ } -+ -+ machine_crash_shutdown(fixed_regs); -+} -+ -+void kdba_kdump_shutdown_slave(struct pt_regs *regs) -+{ -+ if (kdb_kdump_state != KDB_KDUMP_RESET) { -+ unw_init_running(kdump_cpu_freeze, NULL); -+ } -+} -+#endif ---- a/arch/ia64/kernel/head.S -+++ b/arch/ia64/kernel/head.S -@@ -259,8 +259,13 @@ start_ap: - /* - * Switch into virtual mode: - */ -+#ifdef CONFIG_KDB_HARDWARE_BREAKPOINTS -+#define IA64_PSR_KDB_FLAGS IA64_PSR_DB -+#else -+#define IA64_PSR_KDB_FLAGS 0 -+#endif - movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ -- |IA64_PSR_DI|IA64_PSR_AC) -+ |IA64_PSR_DI|IA64_PSR_AC|IA64_PSR_KDB_FLAGS) - ;; - mov cr.ipsr=r16 - movl r17=1f ---- a/arch/ia64/kernel/mca.c -+++ b/arch/ia64/kernel/mca.c -@@ -88,6 +88,10 @@ - #include - #include - #include -+#ifdef CONFIG_KDB -+#include -+#include /* for switch state wrappers */ -+#endif /* CONFIG_KDB */ - - #include - #include -@@ -824,6 +828,14 @@ ia64_mca_rendez_int_handler(int rendez_i - */ - ia64_sal_mc_rendez(); - -+#ifdef CONFIG_KDB -+ /* We get here when the MCA monarch has entered and has woken up the -+ * slaves. Do a KDB rendezvous to meet the monarch cpu. -+ */ -+ if (monarch_cpu != -1) -+ KDB_ENTER_SLAVE(); -+#endif -+ - NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1); - - /* Wait for the monarch cpu to exit. */ -@@ -1379,6 +1391,19 @@ ia64_mca_handler(struct pt_regs *regs, s - mca_insert_tr(0x2); /*Reload dynamic itrs*/ - } - -+#ifdef CONFIG_KDB -+ kdb_save_flags(); -+ KDB_FLAG_CLEAR(CATASTROPHIC); -+ KDB_FLAG_CLEAR(RECOVERY); -+ if (recover) -+ KDB_FLAG_SET(RECOVERY); -+ else -+ KDB_FLAG_SET(CATASTROPHIC); -+ KDB_FLAG_SET(NOIPI); /* do not send IPI for MCA/INIT events */ -+ KDB_ENTER(); -+ kdb_restore_flags(); -+#endif /* CONFIG_KDB */ -+ - NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1); - - if (atomic_dec_return(&mca_count) > 0) { -@@ -1391,6 +1416,12 @@ ia64_mca_handler(struct pt_regs *regs, s - if (cpu_isset(i, mca_cpu)) { - monarch_cpu = i; - cpu_clear(i, mca_cpu); /* wake next cpu */ -+#ifdef CONFIG_KDB -+ /* -+ * No longer a monarch, report in as a slave. -+ */ -+ KDB_ENTER_SLAVE(); -+#endif - while (monarch_cpu != -1) - cpu_relax(); /* spin until last cpu leaves */ - set_curr_task(cpu, previous_current); -@@ -1400,6 +1431,7 @@ ia64_mca_handler(struct pt_regs *regs, s - } - } - } -+ - set_curr_task(cpu, previous_current); - ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; - monarch_cpu = -1; /* This frees the slaves and previous monarchs */ -@@ -1660,6 +1692,11 @@ default_monarch_init_process(struct noti - } - } - printk("\n\n"); -+#ifdef CONFIG_KDB -+ KDB_FLAG_SET(NOIPI); /* do not send IPI for MCA/INIT events */ -+ KDB_ENTER(); -+ KDB_FLAG_CLEAR(NOIPI); -+#else /* !CONFIG_KDB */ - if (read_trylock(&tasklist_lock)) { - do_each_thread (g, t) { - printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); -@@ -1667,6 +1704,7 @@ default_monarch_init_process(struct noti - } while_each_thread (g, t); - read_unlock(&tasklist_lock); - } -+#endif /* CONFIG_KDB */ - /* FIXME: This will not restore zapped printk locks. */ - RESTORE_LOGLEVEL(console_loglevel); - return NOTIFY_DONE; -@@ -1699,6 +1737,20 @@ ia64_init_handler(struct pt_regs *regs, - int cpu = smp_processor_id(); - struct ia64_mca_notify_die nd = - { .sos = sos, .monarch_cpu = &monarch_cpu }; -+#ifdef CONFIG_KDB -+ int kdba_recalcitrant = 0; -+ /* kdba_wait_for_cpus() sends INIT to recalcitrant cpus which ends up -+ * calling this routine. If KDB is waiting for the IPI to be processed -+ * then treat all INIT events as slaves, kdb_initial_cpu is the -+ * monarch. -+ */ -+ if (KDB_STATE(WAIT_IPI)) { -+ monarch_cpu = kdb_initial_cpu; -+ sos->monarch = 0; -+ KDB_STATE_CLEAR(WAIT_IPI); -+ kdba_recalcitrant = 1; -+ } -+#endif /* CONFIG_KDB */ - - NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0); - -@@ -1742,6 +1794,11 @@ ia64_init_handler(struct pt_regs *regs, - #else - while (monarch_cpu == -1) - cpu_relax(); /* spin until monarch enters */ -+#ifdef CONFIG_KDB -+ KDB_ENTER_SLAVE(); -+ if (kdba_recalcitrant) -+ monarch_cpu = -1; -+#endif /* CONFIG_KDB */ - #endif - - NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1); -@@ -1776,6 +1833,14 @@ ia64_init_handler(struct pt_regs *regs, - mprintk("Delaying for 5 seconds...\n"); - udelay(5*1000000); - ia64_wait_for_slaves(cpu, "INIT"); -+ -+#ifdef CONFIG_KDB -+ kdb_save_flags(); -+ KDB_FLAG_SET(NOIPI); /* do not send IPI for MCA/INIT events */ -+ KDB_ENTER(); -+ kdb_restore_flags(); -+#endif /* CONFIG_KDB */ -+ - /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through - * to default_monarch_init_process() above and just print all the - * tasks. -@@ -2014,6 +2079,13 @@ ia64_mca_init(void) - printk(KERN_INFO "Increasing MCA rendezvous timeout from " - "%ld to %ld milliseconds\n", timeout, isrv.v0); - timeout = isrv.v0; -+#ifdef CONFIG_KDB -+ /* kdb must wait long enough for the MCA timeout to trip -+ * and process. The MCA timeout is in milliseconds. -+ */ -+ kdb_wait_for_cpus_secs = max(kdb_wait_for_cpus_secs, -+ (int)(timeout/1000) + 10); -+#endif /* CONFIG_KDB */ - NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0); - continue; - } ---- a/arch/ia64/kernel/smp.c -+++ b/arch/ia64/kernel/smp.c -@@ -36,6 +36,11 @@ - #include - #include - #include -+ -+#ifdef CONFIG_KDB -+#include -+#endif /* CONFIG_KDB */ -+ - #include - #include - #include -@@ -65,6 +70,9 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(uns - #define IPI_CPU_STOP 1 - #define IPI_CALL_FUNC_SINGLE 2 - #define IPI_KDUMP_CPU_STOP 3 -+#ifdef CONFIG_KDB -+#define IPI_KDB_INTERRUPT 4 -+#endif /* CONFIG_KDB */ - - /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ - static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation); -@@ -125,6 +133,12 @@ handle_IPI (int irq, void *dev_id) - unw_init_running(kdump_cpu_freeze, NULL); - break; - #endif -+#ifdef CONFIG_KDB -+ case IPI_KDB_INTERRUPT: -+ if (!kdb_ipi(get_irq_regs(), NULL)) -+ printk(KERN_ERR "kdb_ipi() rejected IPI_KDB_INTERRUPT\n"); -+ break; -+#endif - default: - printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", - this_cpu, which); -@@ -334,3 +348,12 @@ setup_profiling_timer (unsigned int mult - { - return -EINVAL; - } -+ -+#if defined(CONFIG_KDB) -+void -+smp_kdb_stop(void) -+{ -+ if (!KDB_FLAG(NOIPI)) -+ send_IPI_allbutself(IPI_KDB_INTERRUPT); -+} -+#endif /* CONFIG_KDB */ ---- a/arch/ia64/kernel/traps.c -+++ b/arch/ia64/kernel/traps.c -@@ -13,6 +13,9 @@ - #include - #include /* For unblank_screen() */ - #include /* for EXPORT_SYMBOL */ -+#ifdef CONFIG_KDB -+#include -+#endif /* CONFIG_KDB */ - #include - #include - #include /* for ssleep() */ -@@ -77,6 +80,10 @@ die (const char *str, struct pt_regs *re - if (!regs) - return 1; - -+#ifdef CONFIG_KDB -+ (void)kdb(KDB_REASON_OOPS, err, regs); -+#endif /* CONFIG_KDB */ -+ - if (panic_on_oops) - panic("Fatal exception"); - -@@ -170,6 +177,17 @@ __kprobes ia64_bad_break (unsigned long - if (break_num < 0x80000) { - sig = SIGILL; code = __ILL_BREAK; - } else { -+#ifdef CONFIG_KDB -+ if (break_num == KDB_BREAK_ENTER && -+ kdb(KDB_REASON_ENTER, break_num, regs)) -+ return; /* kdb handled it */ -+ if (break_num == KDB_BREAK_ENTER_SLAVE && -+ kdb(KDB_REASON_ENTER_SLAVE, break_num, regs)) -+ return; /* kdb handled it */ -+ if (break_num == KDB_BREAK_BREAK && -+ kdb(KDB_REASON_BREAK, break_num, regs)) -+ return; /* kdb handled it */ -+#endif /* CONFIG_KDB */ - if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP) - == NOTIFY_STOP) - return; -@@ -564,6 +582,10 @@ ia64_fault (unsigned long vector, unsign - if (notify_die(DIE_FAULT, "ia64_fault", ®s, vector, siginfo.si_code, SIGTRAP) - == NOTIFY_STOP) - return; -+#ifdef CONFIG_KDB -+ if (!user_mode(®s) && kdb(KDB_REASON_DEBUG, vector, ®s)) -+ return; /* kdb handled this */ -+#endif /* CONFIG_KDB */ - siginfo.si_signo = SIGTRAP; - siginfo.si_errno = 0; - siginfo.si_addr = (void __user *) ifa; ---- a/arch/ia64/kernel/unwind.c -+++ b/arch/ia64/kernel/unwind.c -@@ -57,14 +57,27 @@ - - #ifdef UNW_DEBUG - static unsigned int unw_debug_level = UNW_DEBUG; --# define UNW_DEBUG_ON(n) unw_debug_level >= n -- /* Do not code a printk level, not all debug lines end in newline */ --# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) -+# ifdef CONFIG_KDB -+# include -+# include -+# define UNW_KMALLOC(s, f) debug_kmalloc(s, f) -+# define UNW_KFREE(p) debug_kfree(p) -+# define UNW_DEBUG_ON(n) (unw_debug_level >= n && !KDB_IS_RUNNING()) -+# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) kdb_printf(__VA_ARGS__) -+# else /* !CONFIG_KDB */ -+# define UNW_DEBUG_ON(n) unw_debug_level >= n -+ /* Do not code a printk level, not all debug lines end in newline */ -+# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) -+# define UNW_KMALLOC(s, f) kmalloc(s, f) -+# define UNW_KFREE(p) kfree(p) -+# endif /* CONFIG_KDB */ - # undef inline - # define inline - #else /* !UNW_DEBUG */ - # define UNW_DEBUG_ON(n) 0 - # define UNW_DPRINT(n, ...) -+# define UNW_KMALLOC(s, f) kmalloc(s, f) -+# define UNW_KFREE(p) kfree(p) - #endif /* UNW_DEBUG */ - - #if UNW_STATS -@@ -73,10 +86,10 @@ - # define STAT(x...) - #endif - --#define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC) --#define free_reg_state(usr) kfree(usr) --#define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC) --#define free_labeled_state(usr) kfree(usr) -+#define alloc_reg_state() UNW_KMALLOC(sizeof(struct unw_reg_state), GFP_ATOMIC) -+#define free_reg_state(usr) UNW_KFREE(usr) -+#define alloc_labeled_state() UNW_KMALLOC(sizeof(struct unw_labeled_state), GFP_ATOMIC) -+#define free_labeled_state(usr) UNW_KFREE(usr) - - typedef unsigned long unw_word; - typedef unsigned char unw_hash_index_t; -@@ -2092,7 +2105,7 @@ unw_add_unwind_table (const char *name, - return NULL; - } - -- table = kmalloc(sizeof(*table), GFP_USER); -+ table = UNW_KMALLOC(sizeof(*table), GFP_USER); - if (!table) - return NULL; - -@@ -2165,7 +2178,7 @@ unw_remove_unwind_table (void *handle) - write_unlock(&tmp->lock); - } - -- kfree(table); -+ UNW_KFREE(table); - } - - static int __init -@@ -2199,7 +2212,7 @@ create_gate_table (void) - size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset)); - size += 8; /* reserve space for "end of table" marker */ - -- unw.gate_table = kmalloc(size, GFP_KERNEL); -+ unw.gate_table = UNW_KMALLOC(size, GFP_KERNEL); - if (!unw.gate_table) { - unw.gate_table_size = 0; - printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__); diff --git a/patches.suse/kdb-usb-rework b/patches.suse/kdb-usb-rework deleted file mode 100644 index 94b70b1..0000000 --- a/patches.suse/kdb-usb-rework +++ /dev/null @@ -1,398 +0,0 @@ -From: Jeff Mahoney -Subject: kdb: Cleanup KDB_USB -Patch-mainline: Who knows? - - kdb-common adds a KDB interface for USB keyboards. Unfortunately it - ends up duplicating some core USB functions wholesale. As these functions - change, the KDB implementation lags behind. - - This patch avoids the duplication of functionality and hooks into the - existing functions. - -Signed-off-by: Jeff Mahoney ---- - arch/ia64/kdb/kdba_io.c | 25 ++++ - drivers/usb/host/ehci-q.c | 276 +++++++++------------------------------------- - 2 files changed, 79 insertions(+), 222 deletions(-) - ---- a/arch/ia64/kdb/kdba_io.c -+++ b/arch/ia64/kdb/kdba_io.c -@@ -70,7 +70,11 @@ static unsigned char kdb_usb_keycode[256 - * Attach a USB keyboard to kdb. - */ - int --kdb_usb_keyboard_attach(struct urb *urb, unsigned char *buffer, void *poll_func) -+kdb_usb_keyboard_attach(struct urb *urb, unsigned char *buffer, -+ void *poll_func, void *compl_func, -+ kdb_hc_keyboard_attach_t kdb_hc_keyboard_attach, -+ kdb_hc_keyboard_detach_t kdb_hc_keyboard_detach, -+ unsigned int bufsize, struct urb *hid_urb) - { - int i; - int rc = -1; -@@ -93,6 +97,17 @@ kdb_usb_keyboard_attach(struct urb *urb, - kdb_usb_kbds[i].buffer = buffer; - kdb_usb_kbds[i].poll_func = poll_func; - -+ kdb_usb_kbds[i].kdb_hc_urb_complete = compl_func; -+ kdb_usb_kbds[i].kdb_hc_keyboard_attach = kdb_hc_keyboard_attach; -+ kdb_usb_kbds[i].kdb_hc_keyboard_detach = kdb_hc_keyboard_detach; -+ -+ /* USB Host Controller specific Keyboadr attach callback. -+ * Currently only UHCI has this callback. -+ */ -+ if (kdb_usb_kbds[i].kdb_hc_keyboard_attach) -+ kdb_usb_kbds[i].kdb_hc_keyboard_attach(i, bufsize); -+ -+ - rc = 0; /* success */ - - break; -@@ -125,11 +140,19 @@ kdb_usb_keyboard_detach(struct urb *urb) - if (kdb_usb_kbds[i].urb != urb) - continue; - -+ /* USB Host Controller specific Keyboard detach callback. -+ * Currently only UHCI has this callback. -+ */ -+ if (kdb_usb_kbds[i].kdb_hc_keyboard_detach) -+ kdb_usb_kbds[i].kdb_hc_keyboard_detach(urb, i); -+ -+ - /* found it, clear the index */ - kdb_usb_kbds[i].urb = NULL; - kdb_usb_kbds[i].buffer = NULL; - kdb_usb_kbds[i].poll_func = NULL; - kdb_usb_kbds[i].caps_lock = 0; -+ kdb_usb_kbds[i].hid_urb = NULL; - - rc = 0; /* success */ - ---- a/drivers/usb/host/ehci-q.c -+++ b/drivers/usb/host/ehci-q.c -@@ -296,6 +296,28 @@ __acquires(ehci->lock) - spin_lock (&ehci->lock); - } - -+/* -+ * Lock hackery here... -+ * ehci_urb_done() makes the assumption that it's called with ehci->lock held. -+ * So, lock it if it isn't already. -+ */ -+static void -+kdb_ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) -+__acquires(ehci->lock) -+__releases(ehci->lock) -+{ -+#ifdef CONFIG_KDB_USB -+ int locked; -+ if (!spin_is_locked(&ehci->lock)) { -+ spin_lock(&ehci->lock); -+ locked = 1; -+ } -+ ehci_urb_done(ehci, urb, status); -+ if (locked) -+ spin_unlock(&ehci->lock); -+#endif -+} -+ - static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); - static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); - -@@ -305,9 +327,16 @@ static int qh_schedule (struct ehci_hcd - * Process and free completed qtds for a qh, returning URBs to drivers. - * Chases up to qh->hw_current. Returns number of completions called, - * indicating how much "real" work we did. -+ * -+ * The KDB part is ugly but KDB wants its own copy and it keeps getting -+ * out of sync. The difference with kdb=1 is that we will only process -+ * qtds that are associated with kdburb. ehci_urb_done also releases -+ * and retakes ehci->lock. We may not have that lock while KDB is -+ * running. - */ - static unsigned --qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) -+__qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, int kdb, -+ struct urb *kdburb) - { - struct ehci_qtd *last, *end = qh->dummy; - struct list_head *entry, *tmp; -@@ -318,6 +347,9 @@ qh_completions (struct ehci_hcd *ehci, s - const __le32 halt = HALT_BIT(ehci); - struct ehci_qh_hw *hw = qh->hw; - -+ if (kdb && !kdburb) -+ return 0; -+ - if (unlikely (list_empty (&qh->qtd_list))) - return count; - -@@ -353,10 +385,18 @@ qh_completions (struct ehci_hcd *ehci, s - qtd = list_entry (entry, struct ehci_qtd, qtd_list); - urb = qtd->urb; - -+ if (kdburb && urb != kdburb) -+ continue; -+ - /* clean up any state from previous QTD ...*/ - if (last) { - if (likely (last->urb != urb)) { -- ehci_urb_done(ehci, last->urb, last_status); -+ if (kdb) -+ kdb_ehci_urb_done(ehci, last->urb, -+ last_status); -+ else -+ ehci_urb_done(ehci, last->urb, -+ last_status); - count++; - last_status = -EINPROGRESS; - } -@@ -522,7 +562,10 @@ halt: - - /* last urb's completion might still need calling */ - if (likely (last != NULL)) { -- ehci_urb_done(ehci, last->urb, last_status); -+ if (kdb) -+ kdb_ehci_urb_done(ehci, last->urb, last_status); -+ else -+ ehci_urb_done(ehci, last->urb, last_status); - count++; - ehci_qtd_free (ehci, last); - } -@@ -577,227 +620,18 @@ halt: - return count; - } - --#ifdef CONFIG_KDB_USB --/* -- * This routine is basically a copy of qh_completions() for use by KDB. -- * It is modified to only work on qtds which are associated -- * with 'kdburb'. Also, there are some fixups related to locking. -- */ --unsigned --qh_completions_kdb(struct ehci_hcd *ehci, struct ehci_qh *qh, struct urb *kdburb) -+static unsigned -+qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) - { -- struct ehci_qtd *last = NULL, *end = qh->dummy; -- struct list_head *entry, *tmp; -- int last_status = -EINPROGRESS; -- int stopped; -- unsigned count = 0; -- int do_status = 0; -- u8 state; -- u32 halt = HALT_BIT(ehci); -- -- /* verify params are valid */ -- if (!qh || !kdburb) -- return 0; -- -- if (unlikely (list_empty (&qh->qtd_list))) -- return count; -- -- /* completions (or tasks on other cpus) must never clobber HALT -- * till we've gone through and cleaned everything up, even when -- * they add urbs to this qh's queue or mark them for unlinking. -- * -- * NOTE: unlinking expects to be done in queue order. -- */ -- state = qh->qh_state; -- qh->qh_state = QH_STATE_COMPLETING; -- stopped = (state == QH_STATE_IDLE); -- -- /* remove de-activated QTDs from front of queue. -- * after faults (including short reads), cleanup this urb -- * then let the queue advance. -- * if queue is stopped, handles unlinks. -- */ -- list_for_each_safe (entry, tmp, &qh->qtd_list) { -- struct ehci_qtd *qtd; -- struct urb *urb; -- u32 token = 0; -- int qtd_status; -- -- qtd = list_entry (entry, struct ehci_qtd, qtd_list); -- urb = qtd->urb; -- -- if (urb != kdburb) -- continue; -- -- /* clean up any state from previous QTD ...*/ -- if (last) { -- if (likely (last->urb != urb)) { -- /* -- * Lock hackery here... -- * ehci_urb_done() makes the assumption -- * that it's called with ehci->lock held. -- * So, lock it if it isn't already. -- */ -- if (!spin_is_locked(&ehci->lock)) -- spin_lock(&ehci->lock); -- -- ehci_urb_done(ehci, last->urb, last_status); -- -- /* -- * ehci_urb_done() releases and reacquires -- * ehci->lock, so release it here. -- */ -- if (spin_is_locked(&ehci->lock)) -- spin_unlock (&ehci->lock); -- -- count++; -- } -- ehci_qtd_free (ehci, last); -- last = NULL; -- last_status = -EINPROGRESS; -- } -- -- /* ignore urbs submitted during completions we reported */ -- if (qtd == end) -- break; -- -- /* hardware copies qtd out of qh overlay */ -- rmb (); -- token = hc32_to_cpu(ehci, qtd->hw_token); -- -- /* always clean up qtds the hc de-activated */ -- if ((token & QTD_STS_ACTIVE) == 0) { -- -- if ((token & QTD_STS_HALT) != 0) { -- stopped = 1; -- -- /* magic dummy for some short reads; qh won't advance. -- * that silicon quirk can kick in with this dummy too. -- */ -- } else if (IS_SHORT_READ (token) -- && !(qtd->hw_alt_next -- & EHCI_LIST_END(ehci))) { -- stopped = 1; -- goto halt; -- } -- -- /* stop scanning when we reach qtds the hc is using */ -- } else if (likely (!stopped -- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { -- break; -- -- } else { -- stopped = 1; -- -- if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) -- last_status = -ESHUTDOWN; -- -- /* ignore active urbs unless some previous qtd -- * for the urb faulted (including short read) or -- * its urb was canceled. we may patch qh or qtds. -- */ -- if (likely(last_status == -EINPROGRESS && -- !urb->unlinked)) -- continue; -- -- /* issue status after short control reads */ -- if (unlikely (do_status != 0) -- && QTD_PID (token) == 0 /* OUT */) { -- do_status = 0; -- continue; -- } -- -- /* token in overlay may be most current */ -- if (state == QH_STATE_IDLE -- && cpu_to_hc32(ehci, qtd->qtd_dma) -- == qh->hw_current) -- token = hc32_to_cpu(ehci, qh->hw_token); -- -- /* force halt for unlinked or blocked qh, so we'll -- * patch the qh later and so that completions can't -- * activate it while we "know" it's stopped. -- */ -- if ((halt & qh->hw_token) == 0) { --halt: -- qh->hw_token |= halt; -- wmb (); -- } -- } -- -- /* remove it from the queue */ -- qtd_status = qtd_copy_status(ehci, urb, qtd->length, token); -- if (unlikely(qtd_status == -EREMOTEIO)) { -- do_status = (!urb->unlinked && -- usb_pipecontrol(urb->pipe)); -- qtd_status = 0; -- } -- if (likely(last_status == -EINPROGRESS)) -- last_status = qtd_status; -- -- if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { -- last = list_entry (qtd->qtd_list.prev, -- struct ehci_qtd, qtd_list); -- last->hw_next = qtd->hw_next; -- } -- list_del (&qtd->qtd_list); -- last = qtd; -- } -- -- /* last urb's completion might still need calling */ -- if (likely (last != NULL)) { -- /* -- * Lock hackery here... -- * ehci_urb_done() makes the assumption -- * that it's called with ehci->lock held. -- * So, lock it if it isn't already. -- */ -- if (!spin_is_locked(&ehci->lock)) -- spin_lock(&ehci->lock); -- -- ehci_urb_done(ehci, last->urb, last_status); -- -- /* -- * ehci_urb_done() releases and reacquires -- * ehci->lock, so release it here. -- */ -- if (spin_is_locked(&ehci->lock)) -- spin_unlock (&ehci->lock); -- -- count++; -- ehci_qtd_free (ehci, last); -- } -- -- /* restore original state; caller must unlink or relink */ -- qh->qh_state = state; -- -- /* be sure the hardware's done with the qh before refreshing -- * it after fault cleanup, or recovering from silicon wrongly -- * overlaying the dummy qtd (which reduces DMA chatter). -- */ -- if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) { -- switch (state) { -- case QH_STATE_IDLE: -- qh_refresh(ehci, qh); -- break; -- case QH_STATE_LINKED: -- /* should be rare for periodic transfers, -- * except maybe high bandwidth ... -- */ -- if ((cpu_to_hc32(ehci, QH_SMASK) -- & qh->hw_info2) != 0) { -- intr_deschedule (ehci, qh); -- (void) qh_schedule (ehci, qh); -- } else -- unlink_async (ehci, qh); -- break; -- /* otherwise, unlink already started */ -- } -- } -- -- return count; -+ return __qh_completions(ehci, qh, 0, NULL); - } - --#endif /* CONFIG_KDB_USB */ -+unsigned -+qh_completions_kdb(struct ehci_hcd *ehci, struct ehci_qh *qh, -+ struct urb *kdburb) -+{ -+ return __qh_completions(ehci, qh, 1, kdburb); -+} - - /*-------------------------------------------------------------------------*/ - diff --git a/patches.suse/kdb-vm-api-changes-for-2-6-34 b/patches.suse/kdb-vm-api-changes-for-2-6-34 deleted file mode 100644 index b59489f..0000000 --- a/patches.suse/kdb-vm-api-changes-for-2-6-34 +++ /dev/null @@ -1,46 +0,0 @@ -From: Jeff Mahoney -Subject: kdb: VM API changes for 2.6.34 -Patch-mainline: Whenever KBB is - - vm_area_struct->anon_vma_node was renamed to anon_vma_chain - - Bootmem is now optional - -Signed-off-by: Jeff Mahoney -Acked-by: Jeff Mahoney ---- - kdb/modules/kdbm_vm.c | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - ---- a/kdb/modules/kdbm_vm.c -+++ b/kdb/modules/kdbm_vm.c -@@ -90,8 +90,8 @@ kdbm_print_vm(struct vm_area_struct *vp, - kdb_printf("shared.vm_set.list.prev = 0x%p\n", (void *) vp->shared.vm_set.list.prev); - kdb_printf("shared.vm_set.parent = 0x%p\n", (void *) vp->shared.vm_set.parent); - kdb_printf("shared.vm_set.head = 0x%p\n", (void *) vp->shared.vm_set.head); -- kdb_printf("anon_vma_node.next = 0x%p\n", (void *) vp->anon_vma_node.next); -- kdb_printf("anon_vma_node.prev = 0x%p\n", (void *) vp->anon_vma_node.prev); -+ kdb_printf("anon_vma_chain.next = 0x%p\n", (void *) vp->anon_vma_chain.next); -+ kdb_printf("anon_vma_chain.prev = 0x%p\n", (void *) vp->anon_vma_chain.prev); - kdb_printf("vm_ops = 0x%p\n", (void *) vp->vm_ops); - if (vp->vm_ops != NULL) { - kdb_printf("vm_ops->open = 0x%p\n", vp->vm_ops->open); -@@ -303,7 +303,9 @@ kdbm_pgdat(int argc, const char **argv) - #ifdef CONFIG_FLAT_NODE_MEM_MAP - kdb_printf(" node_mem_map = 0x%p\n", pgdatp->node_mem_map); - #endif -+#ifndef CONFIG_NO_BOOTMEM - kdb_printf(" bdata = 0x%p", pgdatp->bdata); -+#endif - kdb_printf(" node_start_pfn = 0x%lx\n", pgdatp->node_start_pfn); - kdb_printf(" node_present_pages = %ld (0x%lx)\n", - pgdatp->node_present_pages, pgdatp->node_present_pages); -@@ -752,7 +754,7 @@ kdbm_filp(int argc, const char **argv) - f.f_dentry, f.f_vfsmnt, f.f_op); - - kdb_printf(" f_count = %ld f_flags = 0x%x f_mode = 0x%x\n", -- f.f_count, f.f_flags, f.f_mode); -+ atomic_long_read(&f.f_count), f.f_flags, f.f_mode); - - kdb_printf(" f_pos = %Ld\n", f.f_pos); - #ifdef CONFIG_SECURITY diff --git a/patches.suse/kdb-x86 b/patches.suse/kdb-x86 deleted file mode 100644 index 72fc62e..0000000 --- a/patches.suse/kdb-x86 +++ /dev/null @@ -1,27127 +0,0 @@ -From: Martin Hicks -Date: Mon, 07 Dec 2009 11:52:50 -0600 -Subject: kdb-v4.4-2.6.32-x86-3 -References: FATE#303971 -X-URL: ftp://oss.sgi.com/www/projects/kdb/download/v4.4/ -Patch-mainline: Not yet - -The KDB x86 code. - -Acked-by: Jeff Mahoney ---- - - arch/x86/Kconfig.debug | 87 - arch/x86/Makefile | 3 - arch/x86/include/asm/ansidecl.h | 5 - arch/x86/include/asm/ansidecl_32.h | 383 ++ - arch/x86/include/asm/ansidecl_64.h | 383 ++ - arch/x86/include/asm/bfd.h | 5 - arch/x86/include/asm/bfd_32.h | 4921 +++++++++++++++++++++++++++++++ - arch/x86/include/asm/bfd_64.h | 4917 +++++++++++++++++++++++++++++++ - arch/x86/include/asm/irq_vectors.h | 7 - arch/x86/include/asm/kdb.h | 140 - arch/x86/include/asm/kdbprivate.h | 241 + - arch/x86/include/asm/kdebug.h | 2 - arch/x86/include/asm/ptrace.h | 23 - arch/x86/kdb/ChangeLog | 262 + - arch/x86/kdb/ChangeLog_32 | 865 +++++ - arch/x86/kdb/ChangeLog_64 | 447 ++ - arch/x86/kdb/Makefile | 29 - arch/x86/kdb/kdb_cmds_32 | 17 - arch/x86/kdb/kdb_cmds_64 | 18 - arch/x86/kdb/kdba_bp.c | 914 +++++ - arch/x86/kdb/kdba_bt.c | 5757 +++++++++++++++++++++++++++++++++++++ - arch/x86/kdb/kdba_id.c | 261 + - arch/x86/kdb/kdba_io.c | 666 ++++ - arch/x86/kdb/kdba_support.c | 1536 +++++++++ - arch/x86/kdb/pc_keyb.h | 137 - arch/x86/kdb/x86-dis.c | 4688 ++++++++++++++++++++++++++++++ - arch/x86/kernel/apic/io_apic.c | 8 - arch/x86/kernel/dumpstack.c | 12 - arch/x86/kernel/entry_32.S | 20 - arch/x86/kernel/entry_64.S | 27 - arch/x86/kernel/reboot.c | 35 - arch/x86/kernel/traps.c | 27 - 32 files changed, 26843 insertions(+) - ---- a/arch/x86/Kconfig.debug -+++ b/arch/x86/Kconfig.debug -@@ -310,4 +310,91 @@ config DEBUG_STRICT_USER_COPY_CHECKS - - If unsure, or if you run an older (pre 4.4) gcc, say N. - -+config KDB -+ bool "Built-in Kernel Debugger support" -+ depends on DEBUG_KERNEL -+ select KALLSYMS -+ select KALLSYMS_ALL -+ help -+ This option provides a built-in kernel debugger. The built-in -+ kernel debugger contains commands which allow memory to be examined, -+ instructions to be disassembled and breakpoints to be set. For details, -+ see Documentation/kdb/kdb.mm and the manual pages kdb_bt, kdb_ss, etc. -+ Kdb can also be used via the serial port. Set up the system to -+ have a serial console (see Documentation/serial-console.txt). -+ The key sequence KDB on the serial port will cause the -+ kernel debugger to be entered with input from the serial port and -+ output to the serial console. If unsure, say N. -+ -+config KDB_MODULES -+ tristate "KDB modules" -+ depends on KDB -+ help -+ KDB can be extended by adding your own modules, in directory -+ kdb/modules. This option selects the way that these modules should -+ be compiled, as free standing modules (select M) or built into the -+ kernel (select Y). If unsure say M. -+ -+config KDB_OFF -+ bool "KDB off by default" -+ depends on KDB -+ help -+ Normally kdb is activated by default, as long as CONFIG_KDB is set. -+ If you want to ship a kernel with kdb support but only have kdb -+ turned on when the user requests it then select this option. When -+ compiled with CONFIG_KDB_OFF, kdb ignores all events unless you boot -+ with kdb=on or you echo "1" > /proc/sys/kernel/kdb. This option also -+ works in reverse, if kdb is normally activated, you can boot with -+ kdb=off or echo "0" > /proc/sys/kernel/kdb to deactivate kdb. If -+ unsure, say N. -+ -+config KDB_CONTINUE_CATASTROPHIC -+ int "KDB continues after catastrophic errors" -+ depends on KDB -+ default "0" -+ help -+ This integer controls the behaviour of kdb when the kernel gets a -+ catastrophic error, i.e. for a panic, oops, NMI or other watchdog -+ tripping. CONFIG_KDB_CONTINUE_CATASTROPHIC interacts with -+ /proc/sys/kernel/kdb and CONFIG_LKCD_DUMP (if your kernel has the -+ LKCD patch). -+ When KDB is active (/proc/sys/kernel/kdb == 1) and a catastrophic -+ error occurs, nothing extra happens until you type 'go'. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time -+ you type 'go', kdb warns you. The second time you type 'go', KDB -+ tries to continue - no guarantees that the kernel is still usable. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue - no -+ guarantees that the kernel is still usable. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD -+ patch and LKCD is configured to take a dump then KDB forces a dump. -+ Whether or not a dump is taken, KDB forces a reboot. -+ When KDB is not active (/proc/sys/kernel/kdb == 0) and a catastrophic -+ error occurs, the following steps are automatic, no human -+ intervention is required. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default) or 1. KDB attempts -+ to continue - no guarantees that the kernel is still usable. -+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD -+ patch and LKCD is configured to take a dump then KDB automatically -+ forces a dump. Whether or not a dump is taken, KDB forces a -+ reboot. -+ If you are not sure, say 0. Read Documentation/kdb/dump.txt before -+ setting to 2. -+ -+config KDB_USB -+ bool "Support for USB Keyboard in KDB" -+ depends on KDB && (USB_OHCI_HCD || USB_EHCI_HCD || USB_UHCI_HCD) -+ help -+ If you want to use kdb from USB keyboards then say Y here. If you -+ say N then kdb can only be used from a PC (AT) keyboard or a serial -+ console. -+ -+config KDB_KDUMP -+ bool "Support for Kdump in KDB" -+ depends on KDB -+ select KEXEC -+ default N -+ help -+ If you want to take Kdump kernel vmcore from KDB then say Y here. -+ If unsure, say N. -+ - endmenu ---- a/arch/x86/Makefile -+++ b/arch/x86/Makefile -@@ -137,6 +137,9 @@ drivers-$(CONFIG_PM) += arch/x86/power/ - - drivers-$(CONFIG_FB) += arch/x86/video/ - -+# KDB support -+drivers-$(CONFIG_KDB) += arch/x86/kdb/ -+ - #### - # boot loader support. Several targets are kept for legacy purposes - ---- /dev/null -+++ b/arch/x86/include/asm/ansidecl.h -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "ansidecl_32.h" -+#else -+# include "ansidecl_64.h" -+#endif ---- /dev/null -+++ b/arch/x86/include/asm/ansidecl_32.h -@@ -0,0 +1,383 @@ -+/* ANSI and traditional C compatability macros -+ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 -+ Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+This program is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 2 of the License, or -+(at your option) any later version. -+ -+This program is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with this program; if not, write to the Free Software -+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+/* ANSI and traditional C compatibility macros -+ -+ ANSI C is assumed if __STDC__ is #defined. -+ -+ Macro ANSI C definition Traditional C definition -+ ----- ---- - ---------- ----------- - ---------- -+ ANSI_PROTOTYPES 1 not defined -+ PTR `void *' `char *' -+ PTRCONST `void *const' `char *' -+ LONG_DOUBLE `long double' `double' -+ const not defined `' -+ volatile not defined `' -+ signed not defined `' -+ VA_START(ap, var) va_start(ap, var) va_start(ap) -+ -+ Note that it is safe to write "void foo();" indicating a function -+ with no return value, in all K+R compilers we have been able to test. -+ -+ For declaring functions with prototypes, we also provide these: -+ -+ PARAMS ((prototype)) -+ -- for functions which take a fixed number of arguments. Use this -+ when declaring the function. When defining the function, write a -+ K+R style argument list. For example: -+ -+ char *strcpy PARAMS ((char *dest, char *source)); -+ ... -+ char * -+ strcpy (dest, source) -+ char *dest; -+ char *source; -+ { ... } -+ -+ -+ VPARAMS ((prototype, ...)) -+ -- for functions which take a variable number of arguments. Use -+ PARAMS to declare the function, VPARAMS to define it. For example: -+ -+ int printf PARAMS ((const char *format, ...)); -+ ... -+ int -+ printf VPARAMS ((const char *format, ...)) -+ { -+ ... -+ } -+ -+ For writing functions which take variable numbers of arguments, we -+ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These -+ hide the differences between K+R and C89 more -+ thoroughly than the simple VA_START() macro mentioned above. -+ -+ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end. -+ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls -+ corresponding to the list of fixed arguments. Then use va_arg -+ normally to get the variable arguments, or pass your va_list object -+ around. You do not declare the va_list yourself; VA_OPEN does it -+ for you. -+ -+ Here is a complete example: -+ -+ int -+ printf VPARAMS ((const char *format, ...)) -+ { -+ int result; -+ -+ VA_OPEN (ap, format); -+ VA_FIXEDARG (ap, const char *, format); -+ -+ result = vfprintf (stdout, format, ap); -+ VA_CLOSE (ap); -+ -+ return result; -+ } -+ -+ -+ You can declare variables either before or after the VA_OPEN, -+ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning -+ and end of a block. They must appear at the same nesting level, -+ and any variables declared after VA_OPEN go out of scope at -+ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the -+ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE -+ pairs in a single function in case you need to traverse the -+ argument list more than once. -+ -+ For ease of writing code which uses GCC extensions but needs to be -+ portable to other compilers, we provide the GCC_VERSION macro that -+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various -+ wrappers around __attribute__. Also, __extension__ will be #defined -+ to nothing if it doesn't work. See below. -+ -+ This header also defines a lot of obsolete macros: -+ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID, -+ AND, DOTS, NOARGS. Don't use them. */ -+ -+#ifndef _ANSIDECL_H -+#define _ANSIDECL_H 1 -+ -+/* Every source file includes this file, -+ so they will all get the switch for lint. */ -+/* LINTLIBRARY */ -+ -+/* Using MACRO(x,y) in cpp #if conditionals does not work with some -+ older preprocessors. Thus we can't define something like this: -+ -+#define HAVE_GCC_VERSION(MAJOR, MINOR) \ -+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) -+ -+and then test "#if HAVE_GCC_VERSION(2,7)". -+ -+So instead we use the macro below and test it against specific values. */ -+ -+/* This macro simplifies testing whether we are using gcc, and if it -+ is of a particular minimum version. (Both major & minor numbers are -+ significant.) This macro will evaluate to 0 if we are not using -+ gcc at all. */ -+#ifndef GCC_VERSION -+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) -+#endif /* GCC_VERSION */ -+ -+#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus)) -+/* All known AIX compilers implement these things (but don't always -+ define __STDC__). The RISC/OS MIPS compiler defines these things -+ in SVR4 mode, but does not define __STDC__. */ -+/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other -+ C++ compilers, does not define __STDC__, though it acts as if this -+ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ -+ -+#define ANSI_PROTOTYPES 1 -+#define PTR void * -+#define PTRCONST void *const -+#define LONG_DOUBLE long double -+ -+/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in -+ a #ifndef. */ -+//#ifndef PARAMS -+//#define PARAMS(ARGS) ARGS -+//#endif -+ -+#define VPARAMS(ARGS) ARGS -+#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR) -+ -+/* variadic function helper macros */ -+/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's -+ use without inhibiting further decls and without declaring an -+ actual variable. */ -+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy -+#define VA_CLOSE(AP) } va_end(AP); } -+#define VA_FIXEDARG(AP, T, N) struct Qdmy -+ -+#undef const -+#undef volatile -+#undef signed -+ -+#ifdef __KERNEL__ -+#ifndef __STDC_VERSION__ -+#define __STDC_VERSION__ 0 -+#endif -+#endif /* __KERNEL__ */ -+ -+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports -+ it too, but it's not in C89. */ -+#undef inline -+#if __STDC_VERSION__ > 199901L -+/* it's a keyword */ -+#else -+# if GCC_VERSION >= 2007 -+# define inline __inline__ /* __inline__ prevents -pedantic warnings */ -+# else -+# define inline /* nothing */ -+# endif -+#endif -+ -+/* These are obsolete. Do not use. */ -+#ifndef IN_GCC -+#define CONST const -+#define VOLATILE volatile -+#define SIGNED signed -+ -+#define PROTO(type, name, arglist) type name arglist -+#define EXFUN(name, proto) name proto -+#define DEFUN(name, arglist, args) name(args) -+#define DEFUN_VOID(name) name(void) -+#define AND , -+#define DOTS , ... -+#define NOARGS void -+#endif /* ! IN_GCC */ -+ -+#else /* Not ANSI C. */ -+ -+#undef ANSI_PROTOTYPES -+#define PTR char * -+#define PTRCONST PTR -+#define LONG_DOUBLE double -+ -+//#define PARAMS(args) () -+#define VPARAMS(args) (va_alist) va_dcl -+#define VA_START(va_list, var) va_start(va_list) -+ -+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy -+#define VA_CLOSE(AP) } va_end(AP); } -+#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE) -+ -+/* some systems define these in header files for non-ansi mode */ -+#undef const -+#undef volatile -+#undef signed -+#undef inline -+#define const -+#define volatile -+#define signed -+#define inline -+ -+#ifndef IN_GCC -+#define CONST -+#define VOLATILE -+#define SIGNED -+ -+#define PROTO(type, name, arglist) type name () -+#define EXFUN(name, proto) name() -+#define DEFUN(name, arglist, args) name arglist args; -+#define DEFUN_VOID(name) name() -+#define AND ; -+#define DOTS -+#define NOARGS -+#endif /* ! IN_GCC */ -+ -+#endif /* ANSI C. */ -+ -+/* Define macros for some gcc attributes. This permits us to use the -+ macros freely, and know that they will come into play for the -+ version of gcc in which they are supported. */ -+ -+#if (GCC_VERSION < 2007) -+# define __attribute__(x) -+#endif -+ -+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */ -+#ifndef ATTRIBUTE_MALLOC -+# if (GCC_VERSION >= 2096) -+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) -+# else -+# define ATTRIBUTE_MALLOC -+# endif /* GNUC >= 2.96 */ -+#endif /* ATTRIBUTE_MALLOC */ -+ -+/* Attributes on labels were valid as of gcc 2.93. */ -+#ifndef ATTRIBUTE_UNUSED_LABEL -+# if (!defined (__cplusplus) && GCC_VERSION >= 2093) -+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED -+# else -+# define ATTRIBUTE_UNUSED_LABEL -+# endif /* !__cplusplus && GNUC >= 2.93 */ -+#endif /* ATTRIBUTE_UNUSED_LABEL */ -+ -+#ifndef ATTRIBUTE_UNUSED -+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) -+#endif /* ATTRIBUTE_UNUSED */ -+ -+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the -+ identifier name. */ -+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004) -+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED -+#else /* !__cplusplus || GNUC >= 3.4 */ -+# define ARG_UNUSED(NAME) NAME -+#endif /* !__cplusplus || GNUC >= 3.4 */ -+ -+#ifndef ATTRIBUTE_NORETURN -+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) -+#endif /* ATTRIBUTE_NORETURN */ -+ -+/* Attribute `nonnull' was valid as of gcc 3.3. */ -+#ifndef ATTRIBUTE_NONNULL -+# if (GCC_VERSION >= 3003) -+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) -+# else -+# define ATTRIBUTE_NONNULL(m) -+# endif /* GNUC >= 3.3 */ -+#endif /* ATTRIBUTE_NONNULL */ -+ -+/* Attribute `pure' was valid as of gcc 3.0. */ -+#ifndef ATTRIBUTE_PURE -+# if (GCC_VERSION >= 3000) -+# define ATTRIBUTE_PURE __attribute__ ((__pure__)) -+# else -+# define ATTRIBUTE_PURE -+# endif /* GNUC >= 3.0 */ -+#endif /* ATTRIBUTE_PURE */ -+ -+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. -+ This was the case for the `printf' format attribute by itself -+ before GCC 3.3, but as of 3.3 we need to add the `nonnull' -+ attribute to retain this behavior. */ -+#ifndef ATTRIBUTE_PRINTF -+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) -+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) -+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) -+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) -+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) -+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) -+#endif /* ATTRIBUTE_PRINTF */ -+ -+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on -+ a function pointer. Format attributes were allowed on function -+ pointers as of gcc 3.1. */ -+#ifndef ATTRIBUTE_FPTR_PRINTF -+# if (GCC_VERSION >= 3001) -+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n) -+# else -+# define ATTRIBUTE_FPTR_PRINTF(m, n) -+# endif /* GNUC >= 3.1 */ -+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2) -+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3) -+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4) -+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5) -+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6) -+#endif /* ATTRIBUTE_FPTR_PRINTF */ -+ -+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A -+ NULL format specifier was allowed as of gcc 3.3. */ -+#ifndef ATTRIBUTE_NULL_PRINTF -+# if (GCC_VERSION >= 3003) -+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) -+# else -+# define ATTRIBUTE_NULL_PRINTF(m, n) -+# endif /* GNUC >= 3.3 */ -+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) -+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) -+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) -+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) -+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) -+#endif /* ATTRIBUTE_NULL_PRINTF */ -+ -+/* Attribute `sentinel' was valid as of gcc 3.5. */ -+#ifndef ATTRIBUTE_SENTINEL -+# if (GCC_VERSION >= 3005) -+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__)) -+# else -+# define ATTRIBUTE_SENTINEL -+# endif /* GNUC >= 3.5 */ -+#endif /* ATTRIBUTE_SENTINEL */ -+ -+ -+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF -+# if (GCC_VERSION >= 3000) -+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m)))) -+# else -+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) -+# endif /* GNUC >= 3.0 */ -+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */ -+ -+/* We use __extension__ in some places to suppress -pedantic warnings -+ about GCC extensions. This feature didn't work properly before -+ gcc 2.8. */ -+#if GCC_VERSION < 2008 -+#define __extension__ -+#endif -+ -+#endif /* ansidecl.h */ ---- /dev/null -+++ b/arch/x86/include/asm/ansidecl_64.h -@@ -0,0 +1,383 @@ -+/* ANSI and traditional C compatability macros -+ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 -+ Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+This program is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 2 of the License, or -+(at your option) any later version. -+ -+This program is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with this program; if not, write to the Free Software -+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+/* ANSI and traditional C compatibility macros -+ -+ ANSI C is assumed if __STDC__ is #defined. -+ -+ Macro ANSI C definition Traditional C definition -+ ----- ---- - ---------- ----------- - ---------- -+ ANSI_PROTOTYPES 1 not defined -+ PTR `void *' `char *' -+ PTRCONST `void *const' `char *' -+ LONG_DOUBLE `long double' `double' -+ const not defined `' -+ volatile not defined `' -+ signed not defined `' -+ VA_START(ap, var) va_start(ap, var) va_start(ap) -+ -+ Note that it is safe to write "void foo();" indicating a function -+ with no return value, in all K+R compilers we have been able to test. -+ -+ For declaring functions with prototypes, we also provide these: -+ -+ PARAMS ((prototype)) -+ -- for functions which take a fixed number of arguments. Use this -+ when declaring the function. When defining the function, write a -+ K+R style argument list. For example: -+ -+ char *strcpy PARAMS ((char *dest, char *source)); -+ ... -+ char * -+ strcpy (dest, source) -+ char *dest; -+ char *source; -+ { ... } -+ -+ -+ VPARAMS ((prototype, ...)) -+ -- for functions which take a variable number of arguments. Use -+ PARAMS to declare the function, VPARAMS to define it. For example: -+ -+ int printf PARAMS ((const char *format, ...)); -+ ... -+ int -+ printf VPARAMS ((const char *format, ...)) -+ { -+ ... -+ } -+ -+ For writing functions which take variable numbers of arguments, we -+ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These -+ hide the differences between K+R and C89 more -+ thoroughly than the simple VA_START() macro mentioned above. -+ -+ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end. -+ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls -+ corresponding to the list of fixed arguments. Then use va_arg -+ normally to get the variable arguments, or pass your va_list object -+ around. You do not declare the va_list yourself; VA_OPEN does it -+ for you. -+ -+ Here is a complete example: -+ -+ int -+ printf VPARAMS ((const char *format, ...)) -+ { -+ int result; -+ -+ VA_OPEN (ap, format); -+ VA_FIXEDARG (ap, const char *, format); -+ -+ result = vfprintf (stdout, format, ap); -+ VA_CLOSE (ap); -+ -+ return result; -+ } -+ -+ -+ You can declare variables either before or after the VA_OPEN, -+ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning -+ and end of a block. They must appear at the same nesting level, -+ and any variables declared after VA_OPEN go out of scope at -+ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the -+ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE -+ pairs in a single function in case you need to traverse the -+ argument list more than once. -+ -+ For ease of writing code which uses GCC extensions but needs to be -+ portable to other compilers, we provide the GCC_VERSION macro that -+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various -+ wrappers around __attribute__. Also, __extension__ will be #defined -+ to nothing if it doesn't work. See below. -+ -+ This header also defines a lot of obsolete macros: -+ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID, -+ AND, DOTS, NOARGS. Don't use them. */ -+ -+#ifndef _ANSIDECL_H -+#define _ANSIDECL_H 1 -+ -+/* Every source file includes this file, -+ so they will all get the switch for lint. */ -+/* LINTLIBRARY */ -+ -+/* Using MACRO(x,y) in cpp #if conditionals does not work with some -+ older preprocessors. Thus we can't define something like this: -+ -+#define HAVE_GCC_VERSION(MAJOR, MINOR) \ -+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) -+ -+and then test "#if HAVE_GCC_VERSION(2,7)". -+ -+So instead we use the macro below and test it against specific values. */ -+ -+/* This macro simplifies testing whether we are using gcc, and if it -+ is of a particular minimum version. (Both major & minor numbers are -+ significant.) This macro will evaluate to 0 if we are not using -+ gcc at all. */ -+#ifndef GCC_VERSION -+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) -+#endif /* GCC_VERSION */ -+ -+#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus)) -+/* All known AIX compilers implement these things (but don't always -+ define __STDC__). The RISC/OS MIPS compiler defines these things -+ in SVR4 mode, but does not define __STDC__. */ -+/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other -+ C++ compilers, does not define __STDC__, though it acts as if this -+ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ -+ -+#define ANSI_PROTOTYPES 1 -+#define PTR void * -+#define PTRCONST void *const -+#define LONG_DOUBLE long double -+ -+/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in -+ a #ifndef. */ -+//#ifndef PARAMS -+//#define PARAMS(ARGS) ARGS -+//#endif -+ -+#define VPARAMS(ARGS) ARGS -+#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR) -+ -+/* variadic function helper macros */ -+/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's -+ use without inhibiting further decls and without declaring an -+ actual variable. */ -+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy -+#define VA_CLOSE(AP) } va_end(AP); } -+#define VA_FIXEDARG(AP, T, N) struct Qdmy -+ -+#undef const -+#undef volatile -+#undef signed -+ -+#ifdef __KERNEL__ -+#ifndef __STDC_VERSION__ -+#define __STDC_VERSION__ 0 -+#endif -+#endif /* __KERNEL__ */ -+ -+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports -+ it too, but it's not in C89. */ -+#undef inline -+#if __STDC_VERSION__ > 199901L -+/* it's a keyword */ -+#else -+# if GCC_VERSION >= 2007 -+# define inline __inline__ /* __inline__ prevents -pedantic warnings */ -+# else -+# define inline /* nothing */ -+# endif -+#endif -+ -+/* These are obsolete. Do not use. */ -+#ifndef IN_GCC -+#define CONST const -+#define VOLATILE volatile -+#define SIGNED signed -+ -+#define PROTO(type, name, arglist) type name arglist -+#define EXFUN(name, proto) name proto -+#define DEFUN(name, arglist, args) name(args) -+#define DEFUN_VOID(name) name(void) -+#define AND , -+#define DOTS , ... -+#define NOARGS void -+#endif /* ! IN_GCC */ -+ -+#else /* Not ANSI C. */ -+ -+#undef ANSI_PROTOTYPES -+#define PTR char * -+#define PTRCONST PTR -+#define LONG_DOUBLE double -+ -+//#define PARAMS(args) () -+#define VPARAMS(args) (va_alist) va_dcl -+#define VA_START(va_list, var) va_start(va_list) -+ -+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy -+#define VA_CLOSE(AP) } va_end(AP); } -+#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE) -+ -+/* some systems define these in header files for non-ansi mode */ -+#undef const -+#undef volatile -+#undef signed -+#undef inline -+#define const -+#define volatile -+#define signed -+#define inline -+ -+#ifndef IN_GCC -+#define CONST -+#define VOLATILE -+#define SIGNED -+ -+#define PROTO(type, name, arglist) type name () -+#define EXFUN(name, proto) name() -+#define DEFUN(name, arglist, args) name arglist args; -+#define DEFUN_VOID(name) name() -+#define AND ; -+#define DOTS -+#define NOARGS -+#endif /* ! IN_GCC */ -+ -+#endif /* ANSI C. */ -+ -+/* Define macros for some gcc attributes. This permits us to use the -+ macros freely, and know that they will come into play for the -+ version of gcc in which they are supported. */ -+ -+#if (GCC_VERSION < 2007) -+# define __attribute__(x) -+#endif -+ -+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */ -+#ifndef ATTRIBUTE_MALLOC -+# if (GCC_VERSION >= 2096) -+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) -+# else -+# define ATTRIBUTE_MALLOC -+# endif /* GNUC >= 2.96 */ -+#endif /* ATTRIBUTE_MALLOC */ -+ -+/* Attributes on labels were valid as of gcc 2.93. */ -+#ifndef ATTRIBUTE_UNUSED_LABEL -+# if (!defined (__cplusplus) && GCC_VERSION >= 2093) -+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED -+# else -+# define ATTRIBUTE_UNUSED_LABEL -+# endif /* !__cplusplus && GNUC >= 2.93 */ -+#endif /* ATTRIBUTE_UNUSED_LABEL */ -+ -+#ifndef ATTRIBUTE_UNUSED -+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) -+#endif /* ATTRIBUTE_UNUSED */ -+ -+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the -+ identifier name. */ -+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004) -+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED -+#else /* !__cplusplus || GNUC >= 3.4 */ -+# define ARG_UNUSED(NAME) NAME -+#endif /* !__cplusplus || GNUC >= 3.4 */ -+ -+#ifndef ATTRIBUTE_NORETURN -+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) -+#endif /* ATTRIBUTE_NORETURN */ -+ -+/* Attribute `nonnull' was valid as of gcc 3.3. */ -+#ifndef ATTRIBUTE_NONNULL -+# if (GCC_VERSION >= 3003) -+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) -+# else -+# define ATTRIBUTE_NONNULL(m) -+# endif /* GNUC >= 3.3 */ -+#endif /* ATTRIBUTE_NONNULL */ -+ -+/* Attribute `pure' was valid as of gcc 3.0. */ -+#ifndef ATTRIBUTE_PURE -+# if (GCC_VERSION >= 3000) -+# define ATTRIBUTE_PURE __attribute__ ((__pure__)) -+# else -+# define ATTRIBUTE_PURE -+# endif /* GNUC >= 3.0 */ -+#endif /* ATTRIBUTE_PURE */ -+ -+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. -+ This was the case for the `printf' format attribute by itself -+ before GCC 3.3, but as of 3.3 we need to add the `nonnull' -+ attribute to retain this behavior. */ -+#ifndef ATTRIBUTE_PRINTF -+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) -+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) -+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) -+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) -+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) -+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) -+#endif /* ATTRIBUTE_PRINTF */ -+ -+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on -+ a function pointer. Format attributes were allowed on function -+ pointers as of gcc 3.1. */ -+#ifndef ATTRIBUTE_FPTR_PRINTF -+# if (GCC_VERSION >= 3001) -+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n) -+# else -+# define ATTRIBUTE_FPTR_PRINTF(m, n) -+# endif /* GNUC >= 3.1 */ -+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2) -+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3) -+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4) -+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5) -+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6) -+#endif /* ATTRIBUTE_FPTR_PRINTF */ -+ -+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A -+ NULL format specifier was allowed as of gcc 3.3. */ -+#ifndef ATTRIBUTE_NULL_PRINTF -+# if (GCC_VERSION >= 3003) -+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) -+# else -+# define ATTRIBUTE_NULL_PRINTF(m, n) -+# endif /* GNUC >= 3.3 */ -+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) -+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) -+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) -+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) -+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) -+#endif /* ATTRIBUTE_NULL_PRINTF */ -+ -+/* Attribute `sentinel' was valid as of gcc 3.5. */ -+#ifndef ATTRIBUTE_SENTINEL -+# if (GCC_VERSION >= 3005) -+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__)) -+# else -+# define ATTRIBUTE_SENTINEL -+# endif /* GNUC >= 3.5 */ -+#endif /* ATTRIBUTE_SENTINEL */ -+ -+ -+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF -+# if (GCC_VERSION >= 3000) -+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m)))) -+# else -+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) -+# endif /* GNUC >= 3.0 */ -+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */ -+ -+/* We use __extension__ in some places to suppress -pedantic warnings -+ about GCC extensions. This feature didn't work properly before -+ gcc 2.8. */ -+#if GCC_VERSION < 2008 -+#define __extension__ -+#endif -+ -+#endif /* ansidecl.h */ ---- /dev/null -+++ b/arch/x86/include/asm/bfd.h -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "bfd_32.h" -+#else -+# include "bfd_64.h" -+#endif ---- /dev/null -+++ b/arch/x86/include/asm/bfd_32.h -@@ -0,0 +1,4921 @@ -+/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically -+ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c", -+ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c", -+ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c", -+ "linker.c" and "simple.c". -+ Run "make headers" in your build bfd/ to regenerate. */ -+ -+/* Main header file for the bfd library -- portable access to object files. -+ -+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, -+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. -+ -+ Contributed by Cygnus Support. -+ -+ This file is part of BFD, the Binary File Descriptor library. -+ -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 2 of the License, or -+ (at your option) any later version. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifndef __BFD_H_SEEN__ -+#define __BFD_H_SEEN__ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#ifdef __KERNEL__ -+#include -+#else /* __KERNEL__ */ -+#include "ansidecl.h" -+#include "symcat.h" -+#endif /* __KERNEL__ */ -+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) -+#ifndef SABER -+/* This hack is to avoid a problem with some strict ANSI C preprocessors. -+ The problem is, "32_" is not a valid preprocessing token, and we don't -+ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will -+ cause the inner CONCAT2 macros to be evaluated first, producing -+ still-valid pp-tokens. Then the final concatenation can be done. */ -+#undef CONCAT4 -+#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d)) -+#endif -+#endif -+ -+/* The word size used by BFD on the host. This may be 64 with a 32 -+ bit target if the host is 64 bit, or if other 64 bit targets have -+ been selected with --enable-targets, or if --enable-64-bit-bfd. */ -+#ifdef __KERNEL__ -+#define BFD_ARCH_SIZE 32 -+#else /* __KERNEL__ */ -+#define BFD_ARCH_SIZE 64 -+#endif /* __KERNEL__ */ -+ -+/* The word size of the default bfd target. */ -+#define BFD_DEFAULT_TARGET_SIZE 32 -+ -+#define BFD_HOST_64BIT_LONG 0 -+#define BFD_HOST_LONG_LONG 1 -+#if 1 -+#define BFD_HOST_64_BIT long long -+#define BFD_HOST_U_64_BIT unsigned long long -+typedef BFD_HOST_64_BIT bfd_int64_t; -+typedef BFD_HOST_U_64_BIT bfd_uint64_t; -+#endif -+ -+#if BFD_ARCH_SIZE >= 64 -+#define BFD64 -+#endif -+ -+#ifndef INLINE -+#if __GNUC__ >= 2 -+#define INLINE __inline__ -+#else -+#define INLINE -+#endif -+#endif -+ -+/* Forward declaration. */ -+typedef struct bfd bfd; -+ -+/* Boolean type used in bfd. Too many systems define their own -+ versions of "boolean" for us to safely typedef a "boolean" of -+ our own. Using an enum for "bfd_boolean" has its own set of -+ problems, with strange looking casts required to avoid warnings -+ on some older compilers. Thus we just use an int. -+ -+ General rule: Functions which are bfd_boolean return TRUE on -+ success and FALSE on failure (unless they're a predicate). */ -+ -+typedef int bfd_boolean; -+#undef FALSE -+#undef TRUE -+#define FALSE 0 -+#define TRUE 1 -+ -+#ifdef BFD64 -+ -+#ifndef BFD_HOST_64_BIT -+ #error No 64 bit integer type available -+#endif /* ! defined (BFD_HOST_64_BIT) */ -+ -+typedef BFD_HOST_U_64_BIT bfd_vma; -+typedef BFD_HOST_64_BIT bfd_signed_vma; -+typedef BFD_HOST_U_64_BIT bfd_size_type; -+typedef BFD_HOST_U_64_BIT symvalue; -+ -+#ifndef fprintf_vma -+#if BFD_HOST_64BIT_LONG -+#define sprintf_vma(s,x) sprintf (s, "%016lx", x) -+#define fprintf_vma(f,x) fprintf (f, "%016lx", x) -+#else -+#define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff))) -+#define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff)) -+#define fprintf_vma(s,x) \ -+ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) -+#define sprintf_vma(s,x) \ -+ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) -+#endif -+#endif -+ -+#else /* not BFD64 */ -+ -+/* Represent a target address. Also used as a generic unsigned type -+ which is guaranteed to be big enough to hold any arithmetic types -+ we need to deal with. */ -+typedef unsigned long bfd_vma; -+ -+/* A generic signed type which is guaranteed to be big enough to hold any -+ arithmetic types we need to deal with. Can be assumed to be compatible -+ with bfd_vma in the same way that signed and unsigned ints are compatible -+ (as parameters, in assignment, etc). */ -+typedef long bfd_signed_vma; -+ -+typedef unsigned long symvalue; -+typedef unsigned long bfd_size_type; -+ -+/* Print a bfd_vma x on stream s. */ -+#define fprintf_vma(s,x) fprintf (s, "%08lx", x) -+#define sprintf_vma(s,x) sprintf (s, "%08lx", x) -+ -+#endif /* not BFD64 */ -+ -+#define HALF_BFD_SIZE_TYPE \ -+ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2)) -+ -+#ifndef BFD_HOST_64_BIT -+/* Fall back on a 32 bit type. The idea is to make these types always -+ available for function return types, but in the case that -+ BFD_HOST_64_BIT is undefined such a function should abort or -+ otherwise signal an error. */ -+typedef bfd_signed_vma bfd_int64_t; -+typedef bfd_vma bfd_uint64_t; -+#endif -+ -+/* An offset into a file. BFD always uses the largest possible offset -+ based on the build time availability of fseek, fseeko, or fseeko64. */ -+typedef BFD_HOST_64_BIT file_ptr; -+typedef unsigned BFD_HOST_64_BIT ufile_ptr; -+ -+extern void bfd_sprintf_vma (bfd *, char *, bfd_vma); -+extern void bfd_fprintf_vma (bfd *, void *, bfd_vma); -+ -+#define printf_vma(x) fprintf_vma(stdout,x) -+#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x) -+ -+typedef unsigned int flagword; /* 32 bits of flags */ -+typedef unsigned char bfd_byte; -+ -+/* File formats. */ -+ -+typedef enum bfd_format -+{ -+ bfd_unknown = 0, /* File format is unknown. */ -+ bfd_object, /* Linker/assembler/compiler output. */ -+ bfd_archive, /* Object archive file. */ -+ bfd_core, /* Core dump. */ -+ bfd_type_end /* Marks the end; don't use it! */ -+} -+bfd_format; -+ -+/* Values that may appear in the flags field of a BFD. These also -+ appear in the object_flags field of the bfd_target structure, where -+ they indicate the set of flags used by that backend (not all flags -+ are meaningful for all object file formats) (FIXME: at the moment, -+ the object_flags values have mostly just been copied from backend -+ to another, and are not necessarily correct). */ -+ -+/* No flags. */ -+#define BFD_NO_FLAGS 0x00 -+ -+/* BFD contains relocation entries. */ -+#define HAS_RELOC 0x01 -+ -+/* BFD is directly executable. */ -+#define EXEC_P 0x02 -+ -+/* BFD has line number information (basically used for F_LNNO in a -+ COFF header). */ -+#define HAS_LINENO 0x04 -+ -+/* BFD has debugging information. */ -+#define HAS_DEBUG 0x08 -+ -+/* BFD has symbols. */ -+#define HAS_SYMS 0x10 -+ -+/* BFD has local symbols (basically used for F_LSYMS in a COFF -+ header). */ -+#define HAS_LOCALS 0x20 -+ -+/* BFD is a dynamic object. */ -+#define DYNAMIC 0x40 -+ -+/* Text section is write protected (if D_PAGED is not set, this is -+ like an a.out NMAGIC file) (the linker sets this by default, but -+ clears it for -r or -N). */ -+#define WP_TEXT 0x80 -+ -+/* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the -+ linker sets this by default, but clears it for -r or -n or -N). */ -+#define D_PAGED 0x100 -+ -+/* BFD is relaxable (this means that bfd_relax_section may be able to -+ do something) (sometimes bfd_relax_section can do something even if -+ this is not set). */ -+#define BFD_IS_RELAXABLE 0x200 -+ -+/* This may be set before writing out a BFD to request using a -+ traditional format. For example, this is used to request that when -+ writing out an a.out object the symbols not be hashed to eliminate -+ duplicates. */ -+#define BFD_TRADITIONAL_FORMAT 0x400 -+ -+/* This flag indicates that the BFD contents are actually cached in -+ memory. If this is set, iostream points to a bfd_in_memory struct. */ -+#define BFD_IN_MEMORY 0x800 -+ -+/* The sections in this BFD specify a memory page. */ -+#define HAS_LOAD_PAGE 0x1000 -+ -+/* This BFD has been created by the linker and doesn't correspond -+ to any input file. */ -+#define BFD_LINKER_CREATED 0x2000 -+ -+/* Symbols and relocation. */ -+ -+/* A count of carsyms (canonical archive symbols). */ -+typedef unsigned long symindex; -+ -+/* How to perform a relocation. */ -+typedef const struct reloc_howto_struct reloc_howto_type; -+ -+#define BFD_NO_MORE_SYMBOLS ((symindex) ~0) -+ -+/* General purpose part of a symbol X; -+ target specific parts are in libcoff.h, libaout.h, etc. */ -+ -+#define bfd_get_section(x) ((x)->section) -+#define bfd_get_output_section(x) ((x)->section->output_section) -+#define bfd_set_section(x,y) ((x)->section) = (y) -+#define bfd_asymbol_base(x) ((x)->section->vma) -+#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value) -+#define bfd_asymbol_name(x) ((x)->name) -+/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/ -+#define bfd_asymbol_bfd(x) ((x)->the_bfd) -+#define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour) -+ -+/* A canonical archive symbol. */ -+/* This is a type pun with struct ranlib on purpose! */ -+typedef struct carsym -+{ -+ char *name; -+ file_ptr file_offset; /* Look here to find the file. */ -+} -+carsym; /* To make these you call a carsymogen. */ -+ -+/* Used in generating armaps (archive tables of contents). -+ Perhaps just a forward definition would do? */ -+struct orl /* Output ranlib. */ -+{ -+ char **name; /* Symbol name. */ -+ union -+ { -+ file_ptr pos; -+ bfd *abfd; -+ } u; /* bfd* or file position. */ -+ int namidx; /* Index into string table. */ -+}; -+ -+/* Linenumber stuff. */ -+typedef struct lineno_cache_entry -+{ -+ unsigned int line_number; /* Linenumber from start of function. */ -+ union -+ { -+ struct bfd_symbol *sym; /* Function name. */ -+ bfd_vma offset; /* Offset into section. */ -+ } u; -+} -+alent; -+ -+/* Object and core file sections. */ -+ -+#define align_power(addr, align) \ -+ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align))) -+ -+typedef struct bfd_section *sec_ptr; -+ -+#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0) -+#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0) -+#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0) -+#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0) -+#define bfd_section_name(bfd, ptr) ((ptr)->name) -+#define bfd_section_size(bfd, ptr) ((ptr)->size) -+#define bfd_get_section_size(ptr) ((ptr)->size) -+#define bfd_section_vma(bfd, ptr) ((ptr)->vma) -+#define bfd_section_lma(bfd, ptr) ((ptr)->lma) -+#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power) -+#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0) -+#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata) -+ -+#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0) -+ -+#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE) -+#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE) -+#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE) -+/* Find the address one past the end of SEC. */ -+#define bfd_get_section_limit(bfd, sec) \ -+ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \ -+ / bfd_octets_per_byte (bfd)) -+ -+typedef struct stat stat_type; -+ -+typedef enum bfd_print_symbol -+{ -+ bfd_print_symbol_name, -+ bfd_print_symbol_more, -+ bfd_print_symbol_all -+} bfd_print_symbol_type; -+ -+/* Information about a symbol that nm needs. */ -+ -+typedef struct _symbol_info -+{ -+ symvalue value; -+ char type; -+ const char *name; /* Symbol name. */ -+ unsigned char stab_type; /* Stab type. */ -+ char stab_other; /* Stab other. */ -+ short stab_desc; /* Stab desc. */ -+ const char *stab_name; /* String for stab type. */ -+} symbol_info; -+ -+/* Get the name of a stabs type code. */ -+ -+extern const char *bfd_get_stab_name (int); -+ -+/* Hash table routines. There is no way to free up a hash table. */ -+ -+/* An element in the hash table. Most uses will actually use a larger -+ structure, and an instance of this will be the first field. */ -+ -+struct bfd_hash_entry -+{ -+ /* Next entry for this hash code. */ -+ struct bfd_hash_entry *next; -+ /* String being hashed. */ -+ const char *string; -+ /* Hash code. This is the full hash code, not the index into the -+ table. */ -+ unsigned long hash; -+}; -+ -+/* A hash table. */ -+ -+struct bfd_hash_table -+{ -+ /* The hash array. */ -+ struct bfd_hash_entry **table; -+ /* The number of slots in the hash table. */ -+ unsigned int size; -+ /* A function used to create new elements in the hash table. The -+ first entry is itself a pointer to an element. When this -+ function is first invoked, this pointer will be NULL. However, -+ having the pointer permits a hierarchy of method functions to be -+ built each of which calls the function in the superclass. Thus -+ each function should be written to allocate a new block of memory -+ only if the argument is NULL. */ -+ struct bfd_hash_entry *(*newfunc) -+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); -+ /* An objalloc for this hash table. This is a struct objalloc *, -+ but we use void * to avoid requiring the inclusion of objalloc.h. */ -+ void *memory; -+}; -+ -+/* Initialize a hash table. */ -+extern bfd_boolean bfd_hash_table_init -+ (struct bfd_hash_table *, -+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *, -+ struct bfd_hash_table *, -+ const char *)); -+ -+/* Initialize a hash table specifying a size. */ -+extern bfd_boolean bfd_hash_table_init_n -+ (struct bfd_hash_table *, -+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *, -+ struct bfd_hash_table *, -+ const char *), -+ unsigned int size); -+ -+/* Free up a hash table. */ -+extern void bfd_hash_table_free -+ (struct bfd_hash_table *); -+ -+/* Look up a string in a hash table. If CREATE is TRUE, a new entry -+ will be created for this string if one does not already exist. The -+ COPY argument must be TRUE if this routine should copy the string -+ into newly allocated memory when adding an entry. */ -+extern struct bfd_hash_entry *bfd_hash_lookup -+ (struct bfd_hash_table *, const char *, bfd_boolean create, -+ bfd_boolean copy); -+ -+/* Replace an entry in a hash table. */ -+extern void bfd_hash_replace -+ (struct bfd_hash_table *, struct bfd_hash_entry *old, -+ struct bfd_hash_entry *nw); -+ -+/* Base method for creating a hash table entry. */ -+extern struct bfd_hash_entry *bfd_hash_newfunc -+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); -+ -+/* Grab some space for a hash table entry. */ -+extern void *bfd_hash_allocate -+ (struct bfd_hash_table *, unsigned int); -+ -+/* Traverse a hash table in a random order, calling a function on each -+ element. If the function returns FALSE, the traversal stops. The -+ INFO argument is passed to the function. */ -+extern void bfd_hash_traverse -+ (struct bfd_hash_table *, -+ bfd_boolean (*) (struct bfd_hash_entry *, void *), -+ void *info); -+ -+/* Allows the default size of a hash table to be configured. New hash -+ tables allocated using bfd_hash_table_init will be created with -+ this size. */ -+extern void bfd_hash_set_default_size (bfd_size_type); -+ -+/* This structure is used to keep track of stabs in sections -+ information while linking. */ -+ -+struct stab_info -+{ -+ /* A hash table used to hold stabs strings. */ -+ struct bfd_strtab_hash *strings; -+ /* The header file hash table. */ -+ struct bfd_hash_table includes; -+ /* The first .stabstr section. */ -+ struct bfd_section *stabstr; -+}; -+ -+#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table -+ -+/* User program access to BFD facilities. */ -+ -+/* Direct I/O routines, for programs which know more about the object -+ file than BFD does. Use higher level routines if possible. */ -+ -+extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *); -+extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *); -+extern int bfd_seek (bfd *, file_ptr, int); -+extern file_ptr bfd_tell (bfd *); -+extern int bfd_flush (bfd *); -+extern int bfd_stat (bfd *, struct stat *); -+ -+/* Deprecated old routines. */ -+#if __GNUC__ -+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \ -+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \ -+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#else -+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \ -+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\ -+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#endif -+extern void warn_deprecated (const char *, const char *, int, const char *); -+ -+/* Cast from const char * to char * so that caller can assign to -+ a char * without a warning. */ -+#define bfd_get_filename(abfd) ((char *) (abfd)->filename) -+#define bfd_get_cacheable(abfd) ((abfd)->cacheable) -+#define bfd_get_format(abfd) ((abfd)->format) -+#define bfd_get_target(abfd) ((abfd)->xvec->name) -+#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour) -+#define bfd_family_coff(abfd) \ -+ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \ -+ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour) -+#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG) -+#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE) -+#define bfd_header_big_endian(abfd) \ -+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG) -+#define bfd_header_little_endian(abfd) \ -+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE) -+#define bfd_get_file_flags(abfd) ((abfd)->flags) -+#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags) -+#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags) -+#define bfd_my_archive(abfd) ((abfd)->my_archive) -+#define bfd_has_map(abfd) ((abfd)->has_armap) -+ -+#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types) -+#define bfd_usrdata(abfd) ((abfd)->usrdata) -+ -+#define bfd_get_start_address(abfd) ((abfd)->start_address) -+#define bfd_get_symcount(abfd) ((abfd)->symcount) -+#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols) -+#define bfd_count_sections(abfd) ((abfd)->section_count) -+ -+#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount) -+ -+#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char) -+ -+#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE) -+ -+extern bfd_boolean bfd_cache_close -+ (bfd *abfd); -+/* NB: This declaration should match the autogenerated one in libbfd.h. */ -+ -+extern bfd_boolean bfd_cache_close_all (void); -+ -+extern bfd_boolean bfd_record_phdr -+ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma, -+ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **); -+ -+/* Byte swapping routines. */ -+ -+bfd_uint64_t bfd_getb64 (const void *); -+bfd_uint64_t bfd_getl64 (const void *); -+bfd_int64_t bfd_getb_signed_64 (const void *); -+bfd_int64_t bfd_getl_signed_64 (const void *); -+bfd_vma bfd_getb32 (const void *); -+bfd_vma bfd_getl32 (const void *); -+bfd_signed_vma bfd_getb_signed_32 (const void *); -+bfd_signed_vma bfd_getl_signed_32 (const void *); -+bfd_vma bfd_getb16 (const void *); -+bfd_vma bfd_getl16 (const void *); -+bfd_signed_vma bfd_getb_signed_16 (const void *); -+bfd_signed_vma bfd_getl_signed_16 (const void *); -+void bfd_putb64 (bfd_uint64_t, void *); -+void bfd_putl64 (bfd_uint64_t, void *); -+void bfd_putb32 (bfd_vma, void *); -+void bfd_putl32 (bfd_vma, void *); -+void bfd_putb16 (bfd_vma, void *); -+void bfd_putl16 (bfd_vma, void *); -+ -+/* Byte swapping routines which take size and endiannes as arguments. */ -+ -+bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean); -+void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean); -+ -+extern bfd_boolean bfd_section_already_linked_table_init (void); -+extern void bfd_section_already_linked_table_free (void); -+ -+/* Externally visible ECOFF routines. */ -+ -+#if defined(__STDC__) || defined(ALMOST_STDC) -+struct ecoff_debug_info; -+struct ecoff_debug_swap; -+struct ecoff_extr; -+struct bfd_symbol; -+struct bfd_link_info; -+struct bfd_link_hash_entry; -+struct bfd_elf_version_tree; -+#endif -+extern bfd_vma bfd_ecoff_get_gp_value -+ (bfd * abfd); -+extern bfd_boolean bfd_ecoff_set_gp_value -+ (bfd *abfd, bfd_vma gp_value); -+extern bfd_boolean bfd_ecoff_set_regmasks -+ (bfd *abfd, unsigned long gprmask, unsigned long fprmask, -+ unsigned long *cprmask); -+extern void *bfd_ecoff_debug_init -+ (bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); -+extern void bfd_ecoff_debug_free -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_accumulate -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd, -+ struct ecoff_debug_info *input_debug, -+ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_accumulate_other -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd, -+ struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_externals -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, bfd_boolean relocatable, -+ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *), -+ void (*set_index) (struct bfd_symbol *, bfd_size_type)); -+extern bfd_boolean bfd_ecoff_debug_one_external -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, const char *name, -+ struct ecoff_extr *esym); -+extern bfd_size_type bfd_ecoff_debug_size -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap); -+extern bfd_boolean bfd_ecoff_write_debug -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, file_ptr where); -+extern bfd_boolean bfd_ecoff_write_accumulated_debug -+ (void *handle, bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, -+ struct bfd_link_info *info, file_ptr where); -+ -+/* Externally visible ELF routines. */ -+ -+struct bfd_link_needed_list -+{ -+ struct bfd_link_needed_list *next; -+ bfd *by; -+ const char *name; -+}; -+ -+enum dynamic_lib_link_class { -+ DYN_NORMAL = 0, -+ DYN_AS_NEEDED = 1, -+ DYN_DT_NEEDED = 2, -+ DYN_NO_ADD_NEEDED = 4, -+ DYN_NO_NEEDED = 8 -+}; -+ -+extern bfd_boolean bfd_elf_record_link_assignment -+ (struct bfd_link_info *, const char *, bfd_boolean); -+extern struct bfd_link_needed_list *bfd_elf_get_needed_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_elf_get_bfd_needed_list -+ (bfd *, struct bfd_link_needed_list **); -+extern bfd_boolean bfd_elf_size_dynamic_sections -+ (bfd *, const char *, const char *, const char *, const char * const *, -+ struct bfd_link_info *, struct bfd_section **, -+ struct bfd_elf_version_tree *); -+extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr -+ (bfd *, struct bfd_link_info *); -+extern void bfd_elf_set_dt_needed_name -+ (bfd *, const char *); -+extern const char *bfd_elf_get_dt_soname -+ (bfd *); -+extern void bfd_elf_set_dyn_lib_class -+ (bfd *, int); -+extern int bfd_elf_get_dyn_lib_class -+ (bfd *); -+extern struct bfd_link_needed_list *bfd_elf_get_runpath_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_elf_discard_info -+ (bfd *, struct bfd_link_info *); -+extern unsigned int _bfd_elf_default_action_discarded -+ (struct bfd_section *); -+ -+/* Return an upper bound on the number of bytes required to store a -+ copy of ABFD's program header table entries. Return -1 if an error -+ occurs; bfd_get_error will return an appropriate code. */ -+extern long bfd_get_elf_phdr_upper_bound -+ (bfd *abfd); -+ -+/* Copy ABFD's program header table entries to *PHDRS. The entries -+ will be stored as an array of Elf_Internal_Phdr structures, as -+ defined in include/elf/internal.h. To find out how large the -+ buffer needs to be, call bfd_get_elf_phdr_upper_bound. -+ -+ Return the number of program header table entries read, or -1 if an -+ error occurs; bfd_get_error will return an appropriate code. */ -+extern int bfd_get_elf_phdrs -+ (bfd *abfd, void *phdrs); -+ -+/* Create a new BFD as if by bfd_openr. Rather than opening a file, -+ reconstruct an ELF file by reading the segments out of remote memory -+ based on the ELF file header at EHDR_VMA and the ELF program headers it -+ points to. If not null, *LOADBASEP is filled in with the difference -+ between the VMAs from which the segments were read, and the VMAs the -+ file headers (and hence BFD's idea of each section's VMA) put them at. -+ -+ The function TARGET_READ_MEMORY is called to copy LEN bytes from the -+ remote memory at target address VMA into the local buffer at MYADDR; it -+ should return zero on success or an `errno' code on failure. TEMPL must -+ be a BFD for an ELF target with the word size and byte order found in -+ the remote memory. */ -+extern bfd *bfd_elf_bfd_from_remote_memory -+ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep, -+ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len)); -+ -+/* Return the arch_size field of an elf bfd, or -1 if not elf. */ -+extern int bfd_get_arch_size -+ (bfd *); -+ -+/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */ -+extern int bfd_get_sign_extend_vma -+ (bfd *); -+ -+extern struct bfd_section *_bfd_elf_tls_setup -+ (bfd *, struct bfd_link_info *); -+ -+extern void _bfd_elf_provide_symbol -+ (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *); -+ -+extern void _bfd_elf_provide_section_bound_symbols -+ (struct bfd_link_info *, struct bfd_section *, const char *, const char *); -+ -+extern void _bfd_elf_fix_excluded_sec_syms -+ (bfd *, struct bfd_link_info *); -+ -+extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs -+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, -+ char **); -+ -+/* SunOS shared library support routines for the linker. */ -+ -+extern struct bfd_link_needed_list *bfd_sunos_get_needed_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_sunos_record_link_assignment -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_sunos_size_dynamic_sections -+ (bfd *, struct bfd_link_info *, struct bfd_section **, -+ struct bfd_section **, struct bfd_section **); -+ -+/* Linux shared library support routines for the linker. */ -+ -+extern bfd_boolean bfd_i386linux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_m68klinux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_sparclinux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+ -+/* mmap hacks */ -+ -+struct _bfd_window_internal; -+typedef struct _bfd_window_internal bfd_window_internal; -+ -+typedef struct _bfd_window -+{ -+ /* What the user asked for. */ -+ void *data; -+ bfd_size_type size; -+ /* The actual window used by BFD. Small user-requested read-only -+ regions sharing a page may share a single window into the object -+ file. Read-write versions shouldn't until I've fixed things to -+ keep track of which portions have been claimed by the -+ application; don't want to give the same region back when the -+ application wants two writable copies! */ -+ struct _bfd_window_internal *i; -+} -+bfd_window; -+ -+extern void bfd_init_window -+ (bfd_window *); -+extern void bfd_free_window -+ (bfd_window *); -+extern bfd_boolean bfd_get_file_window -+ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean); -+ -+/* XCOFF support routines for the linker. */ -+ -+extern bfd_boolean bfd_xcoff_link_record_set -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type); -+extern bfd_boolean bfd_xcoff_import_symbol -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma, -+ const char *, const char *, const char *, unsigned int); -+extern bfd_boolean bfd_xcoff_export_symbol -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *); -+extern bfd_boolean bfd_xcoff_link_count_reloc -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_xcoff_record_link_assignment -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_xcoff_size_dynamic_sections -+ (bfd *, struct bfd_link_info *, const char *, const char *, -+ unsigned long, unsigned long, unsigned long, bfd_boolean, -+ int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean); -+extern bfd_boolean bfd_xcoff_link_generate_rtinit -+ (bfd *, const char *, const char *, bfd_boolean); -+ -+/* XCOFF support routines for ar. */ -+extern bfd_boolean bfd_xcoff_ar_archive_set_magic -+ (bfd *, char *); -+ -+/* Externally visible COFF routines. */ -+ -+#if defined(__STDC__) || defined(ALMOST_STDC) -+struct internal_syment; -+union internal_auxent; -+#endif -+ -+extern bfd_boolean bfd_coff_get_syment -+ (bfd *, struct bfd_symbol *, struct internal_syment *); -+ -+extern bfd_boolean bfd_coff_get_auxent -+ (bfd *, struct bfd_symbol *, int, union internal_auxent *); -+ -+extern bfd_boolean bfd_coff_set_symbol_class -+ (bfd *, struct bfd_symbol *, unsigned int); -+ -+extern bfd_boolean bfd_m68k_coff_create_embedded_relocs -+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **); -+ -+/* ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_arm_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_arm_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+extern bfd_boolean bfd_arm_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+/* PE ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_arm_pe_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_arm_pe_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+/* ELF ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_elf32_arm_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+void bfd_elf32_arm_set_target_relocs -+ (struct bfd_link_info *, int, char *, int, int); -+ -+extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd -+ (bfd *, struct bfd_link_info *); -+ -+/* ELF ARM mapping symbol support */ -+extern bfd_boolean bfd_is_arm_mapping_symbol_name -+ (const char * name); -+ -+/* ARM Note section processing. */ -+extern bfd_boolean bfd_arm_merge_machines -+ (bfd *, bfd *); -+ -+extern bfd_boolean bfd_arm_update_notes -+ (bfd *, const char *); -+ -+extern unsigned int bfd_arm_get_mach_from_notes -+ (bfd *, const char *); -+ -+/* TI COFF load page support. */ -+extern void bfd_ticoff_set_section_load_page -+ (struct bfd_section *, int); -+ -+extern int bfd_ticoff_get_section_load_page -+ (struct bfd_section *); -+ -+/* H8/300 functions. */ -+extern bfd_vma bfd_h8300_pad_address -+ (bfd *, bfd_vma); -+ -+/* IA64 Itanium code generation. Called from linker. */ -+extern void bfd_elf32_ia64_after_parse -+ (int); -+ -+extern void bfd_elf64_ia64_after_parse -+ (int); -+ -+/* This structure is used for a comdat section, as in PE. A comdat -+ section is associated with a particular symbol. When the linker -+ sees a comdat section, it keeps only one of the sections with a -+ given name and associated with a given symbol. */ -+ -+struct coff_comdat_info -+{ -+ /* The name of the symbol associated with a comdat section. */ -+ const char *name; -+ -+ /* The local symbol table index of the symbol associated with a -+ comdat section. This is only meaningful to the object file format -+ specific code; it is not an index into the list returned by -+ bfd_canonicalize_symtab. */ -+ long symbol; -+}; -+ -+extern struct coff_comdat_info *bfd_coff_get_comdat_section -+ (bfd *, struct bfd_section *); -+ -+/* Extracted from init.c. */ -+void bfd_init (void); -+ -+/* Extracted from opncls.c. */ -+bfd *bfd_fopen (const char *filename, const char *target, -+ const char *mode, int fd); -+ -+bfd *bfd_openr (const char *filename, const char *target); -+ -+bfd *bfd_fdopenr (const char *filename, const char *target, int fd); -+ -+bfd *bfd_openstreamr (const char *, const char *, void *); -+ -+bfd *bfd_openr_iovec (const char *filename, const char *target, -+ void *(*open) (struct bfd *nbfd, -+ void *open_closure), -+ void *open_closure, -+ file_ptr (*pread) (struct bfd *nbfd, -+ void *stream, -+ void *buf, -+ file_ptr nbytes, -+ file_ptr offset), -+ int (*close) (struct bfd *nbfd, -+ void *stream)); -+ -+bfd *bfd_openw (const char *filename, const char *target); -+ -+bfd_boolean bfd_close (bfd *abfd); -+ -+bfd_boolean bfd_close_all_done (bfd *); -+ -+bfd *bfd_create (const char *filename, bfd *templ); -+ -+bfd_boolean bfd_make_writable (bfd *abfd); -+ -+bfd_boolean bfd_make_readable (bfd *abfd); -+ -+unsigned long bfd_calc_gnu_debuglink_crc32 -+ (unsigned long crc, const unsigned char *buf, bfd_size_type len); -+ -+char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir); -+ -+struct bfd_section *bfd_create_gnu_debuglink_section -+ (bfd *abfd, const char *filename); -+ -+bfd_boolean bfd_fill_in_gnu_debuglink_section -+ (bfd *abfd, struct bfd_section *sect, const char *filename); -+ -+/* Extracted from libbfd.c. */ -+ -+/* Byte swapping macros for user section data. */ -+ -+#define bfd_put_8(abfd, val, ptr) \ -+ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff)) -+#define bfd_put_signed_8 \ -+ bfd_put_8 -+#define bfd_get_8(abfd, ptr) \ -+ (*(unsigned char *) (ptr) & 0xff) -+#define bfd_get_signed_8(abfd, ptr) \ -+ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80) -+ -+#define bfd_put_16(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx16, ((val),(ptr))) -+#define bfd_put_signed_16 \ -+ bfd_put_16 -+#define bfd_get_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx16, (ptr)) -+#define bfd_get_signed_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_16, (ptr)) -+ -+#define bfd_put_32(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx32, ((val),(ptr))) -+#define bfd_put_signed_32 \ -+ bfd_put_32 -+#define bfd_get_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx32, (ptr)) -+#define bfd_get_signed_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_32, (ptr)) -+ -+#define bfd_put_64(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx64, ((val), (ptr))) -+#define bfd_put_signed_64 \ -+ bfd_put_64 -+#define bfd_get_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx64, (ptr)) -+#define bfd_get_signed_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_64, (ptr)) -+ -+#define bfd_get(bits, abfd, ptr) \ -+ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \ -+ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \ -+ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \ -+ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \ -+ : (abort (), (bfd_vma) - 1)) -+ -+#define bfd_put(bits, abfd, val, ptr) \ -+ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \ -+ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \ -+ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \ -+ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \ -+ : (abort (), (void) 0)) -+ -+ -+/* Byte swapping macros for file header data. */ -+ -+#define bfd_h_put_8(abfd, val, ptr) \ -+ bfd_put_8 (abfd, val, ptr) -+#define bfd_h_put_signed_8(abfd, val, ptr) \ -+ bfd_put_8 (abfd, val, ptr) -+#define bfd_h_get_8(abfd, ptr) \ -+ bfd_get_8 (abfd, ptr) -+#define bfd_h_get_signed_8(abfd, ptr) \ -+ bfd_get_signed_8 (abfd, ptr) -+ -+#define bfd_h_put_16(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx16, (val, ptr)) -+#define bfd_h_put_signed_16 \ -+ bfd_h_put_16 -+#define bfd_h_get_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx16, (ptr)) -+#define bfd_h_get_signed_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr)) -+ -+#define bfd_h_put_32(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx32, (val, ptr)) -+#define bfd_h_put_signed_32 \ -+ bfd_h_put_32 -+#define bfd_h_get_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx32, (ptr)) -+#define bfd_h_get_signed_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr)) -+ -+#define bfd_h_put_64(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx64, (val, ptr)) -+#define bfd_h_put_signed_64 \ -+ bfd_h_put_64 -+#define bfd_h_get_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx64, (ptr)) -+#define bfd_h_get_signed_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr)) -+ -+/* Aliases for the above, which should eventually go away. */ -+ -+#define H_PUT_64 bfd_h_put_64 -+#define H_PUT_32 bfd_h_put_32 -+#define H_PUT_16 bfd_h_put_16 -+#define H_PUT_8 bfd_h_put_8 -+#define H_PUT_S64 bfd_h_put_signed_64 -+#define H_PUT_S32 bfd_h_put_signed_32 -+#define H_PUT_S16 bfd_h_put_signed_16 -+#define H_PUT_S8 bfd_h_put_signed_8 -+#define H_GET_64 bfd_h_get_64 -+#define H_GET_32 bfd_h_get_32 -+#define H_GET_16 bfd_h_get_16 -+#define H_GET_8 bfd_h_get_8 -+#define H_GET_S64 bfd_h_get_signed_64 -+#define H_GET_S32 bfd_h_get_signed_32 -+#define H_GET_S16 bfd_h_get_signed_16 -+#define H_GET_S8 bfd_h_get_signed_8 -+ -+ -+/* Extracted from bfdio.c. */ -+long bfd_get_mtime (bfd *abfd); -+ -+long bfd_get_size (bfd *abfd); -+ -+/* Extracted from bfdwin.c. */ -+/* Extracted from section.c. */ -+typedef struct bfd_section -+{ -+ /* The name of the section; the name isn't a copy, the pointer is -+ the same as that passed to bfd_make_section. */ -+ const char *name; -+ -+ /* A unique sequence number. */ -+ int id; -+ -+ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */ -+ int index; -+ -+ /* The next section in the list belonging to the BFD, or NULL. */ -+ struct bfd_section *next; -+ -+ /* The previous section in the list belonging to the BFD, or NULL. */ -+ struct bfd_section *prev; -+ -+ /* The field flags contains attributes of the section. Some -+ flags are read in from the object file, and some are -+ synthesized from other information. */ -+ flagword flags; -+ -+#define SEC_NO_FLAGS 0x000 -+ -+ /* Tells the OS to allocate space for this section when loading. -+ This is clear for a section containing debug information only. */ -+#define SEC_ALLOC 0x001 -+ -+ /* Tells the OS to load the section from the file when loading. -+ This is clear for a .bss section. */ -+#define SEC_LOAD 0x002 -+ -+ /* The section contains data still to be relocated, so there is -+ some relocation information too. */ -+#define SEC_RELOC 0x004 -+ -+ /* A signal to the OS that the section contains read only data. */ -+#define SEC_READONLY 0x008 -+ -+ /* The section contains code only. */ -+#define SEC_CODE 0x010 -+ -+ /* The section contains data only. */ -+#define SEC_DATA 0x020 -+ -+ /* The section will reside in ROM. */ -+#define SEC_ROM 0x040 -+ -+ /* The section contains constructor information. This section -+ type is used by the linker to create lists of constructors and -+ destructors used by <>. When a back end sees a symbol -+ which should be used in a constructor list, it creates a new -+ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches -+ the symbol to it, and builds a relocation. To build the lists -+ of constructors, all the linker has to do is catenate all the -+ sections called <<__CTOR_LIST__>> and relocate the data -+ contained within - exactly the operations it would peform on -+ standard data. */ -+#define SEC_CONSTRUCTOR 0x080 -+ -+ /* The section has contents - a data section could be -+ <> | <>; a debug section could be -+ <> */ -+#define SEC_HAS_CONTENTS 0x100 -+ -+ /* An instruction to the linker to not output the section -+ even if it has information which would normally be written. */ -+#define SEC_NEVER_LOAD 0x200 -+ -+ /* The section contains thread local data. */ -+#define SEC_THREAD_LOCAL 0x400 -+ -+ /* The section has GOT references. This flag is only for the -+ linker, and is currently only used by the elf32-hppa back end. -+ It will be set if global offset table references were detected -+ in this section, which indicate to the linker that the section -+ contains PIC code, and must be handled specially when doing a -+ static link. */ -+#define SEC_HAS_GOT_REF 0x800 -+ -+ /* The section contains common symbols (symbols may be defined -+ multiple times, the value of a symbol is the amount of -+ space it requires, and the largest symbol value is the one -+ used). Most targets have exactly one of these (which we -+ translate to bfd_com_section_ptr), but ECOFF has two. */ -+#define SEC_IS_COMMON 0x1000 -+ -+ /* The section contains only debugging information. For -+ example, this is set for ELF .debug and .stab sections. -+ strip tests this flag to see if a section can be -+ discarded. */ -+#define SEC_DEBUGGING 0x2000 -+ -+ /* The contents of this section are held in memory pointed to -+ by the contents field. This is checked by bfd_get_section_contents, -+ and the data is retrieved from memory if appropriate. */ -+#define SEC_IN_MEMORY 0x4000 -+ -+ /* The contents of this section are to be excluded by the -+ linker for executable and shared objects unless those -+ objects are to be further relocated. */ -+#define SEC_EXCLUDE 0x8000 -+ -+ /* The contents of this section are to be sorted based on the sum of -+ the symbol and addend values specified by the associated relocation -+ entries. Entries without associated relocation entries will be -+ appended to the end of the section in an unspecified order. */ -+#define SEC_SORT_ENTRIES 0x10000 -+ -+ /* When linking, duplicate sections of the same name should be -+ discarded, rather than being combined into a single section as -+ is usually done. This is similar to how common symbols are -+ handled. See SEC_LINK_DUPLICATES below. */ -+#define SEC_LINK_ONCE 0x20000 -+ -+ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker -+ should handle duplicate sections. */ -+#define SEC_LINK_DUPLICATES 0x40000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that duplicate -+ sections with the same name should simply be discarded. */ -+#define SEC_LINK_DUPLICATES_DISCARD 0x0 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if there are any duplicate sections, although -+ it should still only link one copy. */ -+#define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if any duplicate sections are a different size. */ -+#define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if any duplicate sections contain different -+ contents. */ -+#define SEC_LINK_DUPLICATES_SAME_CONTENTS \ -+ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE) -+ -+ /* This section was created by the linker as part of dynamic -+ relocation or other arcane processing. It is skipped when -+ going through the first-pass output, trusting that someone -+ else up the line will take care of it later. */ -+#define SEC_LINKER_CREATED 0x200000 -+ -+ /* This section should not be subject to garbage collection. */ -+#define SEC_KEEP 0x400000 -+ -+ /* This section contains "short" data, and should be placed -+ "near" the GP. */ -+#define SEC_SMALL_DATA 0x800000 -+ -+ /* Attempt to merge identical entities in the section. -+ Entity size is given in the entsize field. */ -+#define SEC_MERGE 0x1000000 -+ -+ /* If given with SEC_MERGE, entities to merge are zero terminated -+ strings where entsize specifies character size instead of fixed -+ size entries. */ -+#define SEC_STRINGS 0x2000000 -+ -+ /* This section contains data about section groups. */ -+#define SEC_GROUP 0x4000000 -+ -+ /* The section is a COFF shared library section. This flag is -+ only for the linker. If this type of section appears in -+ the input file, the linker must copy it to the output file -+ without changing the vma or size. FIXME: Although this -+ was originally intended to be general, it really is COFF -+ specific (and the flag was renamed to indicate this). It -+ might be cleaner to have some more general mechanism to -+ allow the back end to control what the linker does with -+ sections. */ -+#define SEC_COFF_SHARED_LIBRARY 0x10000000 -+ -+ /* This section contains data which may be shared with other -+ executables or shared objects. This is for COFF only. */ -+#define SEC_COFF_SHARED 0x20000000 -+ -+ /* When a section with this flag is being linked, then if the size of -+ the input section is less than a page, it should not cross a page -+ boundary. If the size of the input section is one page or more, -+ it should be aligned on a page boundary. This is for TI -+ TMS320C54X only. */ -+#define SEC_TIC54X_BLOCK 0x40000000 -+ -+ /* Conditionally link this section; do not link if there are no -+ references found to any symbol in the section. This is for TI -+ TMS320C54X only. */ -+#define SEC_TIC54X_CLINK 0x80000000 -+ -+ /* End of section flags. */ -+ -+ /* Some internal packed boolean fields. */ -+ -+ /* See the vma field. */ -+ unsigned int user_set_vma : 1; -+ -+ /* A mark flag used by some of the linker backends. */ -+ unsigned int linker_mark : 1; -+ -+ /* Another mark flag used by some of the linker backends. Set for -+ output sections that have an input section. */ -+ unsigned int linker_has_input : 1; -+ -+ /* Mark flags used by some linker backends for garbage collection. */ -+ unsigned int gc_mark : 1; -+ unsigned int gc_mark_from_eh : 1; -+ -+ /* The following flags are used by the ELF linker. */ -+ -+ /* Mark sections which have been allocated to segments. */ -+ unsigned int segment_mark : 1; -+ -+ /* Type of sec_info information. */ -+ unsigned int sec_info_type:3; -+#define ELF_INFO_TYPE_NONE 0 -+#define ELF_INFO_TYPE_STABS 1 -+#define ELF_INFO_TYPE_MERGE 2 -+#define ELF_INFO_TYPE_EH_FRAME 3 -+#define ELF_INFO_TYPE_JUST_SYMS 4 -+ -+ /* Nonzero if this section uses RELA relocations, rather than REL. */ -+ unsigned int use_rela_p:1; -+ -+ /* Bits used by various backends. The generic code doesn't touch -+ these fields. */ -+ -+ /* Nonzero if this section has TLS related relocations. */ -+ unsigned int has_tls_reloc:1; -+ -+ /* Nonzero if this section has a gp reloc. */ -+ unsigned int has_gp_reloc:1; -+ -+ /* Nonzero if this section needs the relax finalize pass. */ -+ unsigned int need_finalize_relax:1; -+ -+ /* Whether relocations have been processed. */ -+ unsigned int reloc_done : 1; -+ -+ /* End of internal packed boolean fields. */ -+ -+ /* The virtual memory address of the section - where it will be -+ at run time. The symbols are relocated against this. The -+ user_set_vma flag is maintained by bfd; if it's not set, the -+ backend can assign addresses (for example, in <>, where -+ the default address for <<.data>> is dependent on the specific -+ target and various flags). */ -+ bfd_vma vma; -+ -+ /* The load address of the section - where it would be in a -+ rom image; really only used for writing section header -+ information. */ -+ bfd_vma lma; -+ -+ /* The size of the section in octets, as it will be output. -+ Contains a value even if the section has no contents (e.g., the -+ size of <<.bss>>). */ -+ bfd_size_type size; -+ -+ /* For input sections, the original size on disk of the section, in -+ octets. This field is used by the linker relaxation code. It is -+ currently only set for sections where the linker relaxation scheme -+ doesn't cache altered section and reloc contents (stabs, eh_frame, -+ SEC_MERGE, some coff relaxing targets), and thus the original size -+ needs to be kept to read the section multiple times. -+ For output sections, rawsize holds the section size calculated on -+ a previous linker relaxation pass. */ -+ bfd_size_type rawsize; -+ -+ /* If this section is going to be output, then this value is the -+ offset in *bytes* into the output section of the first byte in the -+ input section (byte ==> smallest addressable unit on the -+ target). In most cases, if this was going to start at the -+ 100th octet (8-bit quantity) in the output section, this value -+ would be 100. However, if the target byte size is 16 bits -+ (bfd_octets_per_byte is "2"), this value would be 50. */ -+ bfd_vma output_offset; -+ -+ /* The output section through which to map on output. */ -+ struct bfd_section *output_section; -+ -+ /* The alignment requirement of the section, as an exponent of 2 - -+ e.g., 3 aligns to 2^3 (or 8). */ -+ unsigned int alignment_power; -+ -+ /* If an input section, a pointer to a vector of relocation -+ records for the data in this section. */ -+ struct reloc_cache_entry *relocation; -+ -+ /* If an output section, a pointer to a vector of pointers to -+ relocation records for the data in this section. */ -+ struct reloc_cache_entry **orelocation; -+ -+ /* The number of relocation records in one of the above. */ -+ unsigned reloc_count; -+ -+ /* Information below is back end specific - and not always used -+ or updated. */ -+ -+ /* File position of section data. */ -+ file_ptr filepos; -+ -+ /* File position of relocation info. */ -+ file_ptr rel_filepos; -+ -+ /* File position of line data. */ -+ file_ptr line_filepos; -+ -+ /* Pointer to data for applications. */ -+ void *userdata; -+ -+ /* If the SEC_IN_MEMORY flag is set, this points to the actual -+ contents. */ -+ unsigned char *contents; -+ -+ /* Attached line number information. */ -+ alent *lineno; -+ -+ /* Number of line number records. */ -+ unsigned int lineno_count; -+ -+ /* Entity size for merging purposes. */ -+ unsigned int entsize; -+ -+ /* Points to the kept section if this section is a link-once section, -+ and is discarded. */ -+ struct bfd_section *kept_section; -+ -+ /* When a section is being output, this value changes as more -+ linenumbers are written out. */ -+ file_ptr moving_line_filepos; -+ -+ /* What the section number is in the target world. */ -+ int target_index; -+ -+ void *used_by_bfd; -+ -+ /* If this is a constructor section then here is a list of the -+ relocations created to relocate items within it. */ -+ struct relent_chain *constructor_chain; -+ -+ /* The BFD which owns the section. */ -+ bfd *owner; -+ -+ /* A symbol which points at this section only. */ -+ struct bfd_symbol *symbol; -+ struct bfd_symbol **symbol_ptr_ptr; -+ -+ /* Early in the link process, map_head and map_tail are used to build -+ a list of input sections attached to an output section. Later, -+ output sections use these fields for a list of bfd_link_order -+ structs. */ -+ union { -+ struct bfd_link_order *link_order; -+ struct bfd_section *s; -+ } map_head, map_tail; -+} asection; -+ -+/* These sections are global, and are managed by BFD. The application -+ and target back end are not permitted to change the values in -+ these sections. New code should use the section_ptr macros rather -+ than referring directly to the const sections. The const sections -+ may eventually vanish. */ -+#define BFD_ABS_SECTION_NAME "*ABS*" -+#define BFD_UND_SECTION_NAME "*UND*" -+#define BFD_COM_SECTION_NAME "*COM*" -+#define BFD_IND_SECTION_NAME "*IND*" -+ -+/* The absolute section. */ -+extern asection bfd_abs_section; -+#define bfd_abs_section_ptr ((asection *) &bfd_abs_section) -+#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr) -+/* Pointer to the undefined section. */ -+extern asection bfd_und_section; -+#define bfd_und_section_ptr ((asection *) &bfd_und_section) -+#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr) -+/* Pointer to the common section. */ -+extern asection bfd_com_section; -+#define bfd_com_section_ptr ((asection *) &bfd_com_section) -+/* Pointer to the indirect section. */ -+extern asection bfd_ind_section; -+#define bfd_ind_section_ptr ((asection *) &bfd_ind_section) -+#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr) -+ -+#define bfd_is_const_section(SEC) \ -+ ( ((SEC) == bfd_abs_section_ptr) \ -+ || ((SEC) == bfd_und_section_ptr) \ -+ || ((SEC) == bfd_com_section_ptr) \ -+ || ((SEC) == bfd_ind_section_ptr)) -+ -+extern const struct bfd_symbol * const bfd_abs_symbol; -+extern const struct bfd_symbol * const bfd_com_symbol; -+extern const struct bfd_symbol * const bfd_und_symbol; -+extern const struct bfd_symbol * const bfd_ind_symbol; -+ -+/* Macros to handle insertion and deletion of a bfd's sections. These -+ only handle the list pointers, ie. do not adjust section_count, -+ target_index etc. */ -+#define bfd_section_list_remove(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ asection *_next = _s->next; \ -+ asection *_prev = _s->prev; \ -+ if (_prev) \ -+ _prev->next = _next; \ -+ else \ -+ (ABFD)->sections = _next; \ -+ if (_next) \ -+ _next->prev = _prev; \ -+ else \ -+ (ABFD)->section_last = _prev; \ -+ } \ -+ while (0) -+#define bfd_section_list_append(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ bfd *_abfd = ABFD; \ -+ _s->next = NULL; \ -+ if (_abfd->section_last) \ -+ { \ -+ _s->prev = _abfd->section_last; \ -+ _abfd->section_last->next = _s; \ -+ } \ -+ else \ -+ { \ -+ _s->prev = NULL; \ -+ _abfd->sections = _s; \ -+ } \ -+ _abfd->section_last = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_prepend(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ bfd *_abfd = ABFD; \ -+ _s->prev = NULL; \ -+ if (_abfd->sections) \ -+ { \ -+ _s->next = _abfd->sections; \ -+ _abfd->sections->prev = _s; \ -+ } \ -+ else \ -+ { \ -+ _s->next = NULL; \ -+ _abfd->section_last = _s; \ -+ } \ -+ _abfd->sections = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_insert_after(ABFD, A, S) \ -+ do \ -+ { \ -+ asection *_a = A; \ -+ asection *_s = S; \ -+ asection *_next = _a->next; \ -+ _s->next = _next; \ -+ _s->prev = _a; \ -+ _a->next = _s; \ -+ if (_next) \ -+ _next->prev = _s; \ -+ else \ -+ (ABFD)->section_last = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_insert_before(ABFD, B, S) \ -+ do \ -+ { \ -+ asection *_b = B; \ -+ asection *_s = S; \ -+ asection *_prev = _b->prev; \ -+ _s->prev = _prev; \ -+ _s->next = _b; \ -+ _b->prev = _s; \ -+ if (_prev) \ -+ _prev->next = _s; \ -+ else \ -+ (ABFD)->sections = _s; \ -+ } \ -+ while (0) -+#define bfd_section_removed_from_list(ABFD, S) \ -+ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S)) -+ -+void bfd_section_list_clear (bfd *); -+ -+asection *bfd_get_section_by_name (bfd *abfd, const char *name); -+ -+asection *bfd_get_section_by_name_if -+ (bfd *abfd, -+ const char *name, -+ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+char *bfd_get_unique_section_name -+ (bfd *abfd, const char *templat, int *count); -+ -+asection *bfd_make_section_old_way (bfd *abfd, const char *name); -+ -+asection *bfd_make_section_anyway_with_flags -+ (bfd *abfd, const char *name, flagword flags); -+ -+asection *bfd_make_section_anyway (bfd *abfd, const char *name); -+ -+asection *bfd_make_section_with_flags -+ (bfd *, const char *name, flagword flags); -+ -+asection *bfd_make_section (bfd *, const char *name); -+ -+bfd_boolean bfd_set_section_flags -+ (bfd *abfd, asection *sec, flagword flags); -+ -+void bfd_map_over_sections -+ (bfd *abfd, -+ void (*func) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+asection *bfd_sections_find_if -+ (bfd *abfd, -+ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+bfd_boolean bfd_set_section_size -+ (bfd *abfd, asection *sec, bfd_size_type val); -+ -+bfd_boolean bfd_set_section_contents -+ (bfd *abfd, asection *section, const void *data, -+ file_ptr offset, bfd_size_type count); -+ -+bfd_boolean bfd_get_section_contents -+ (bfd *abfd, asection *section, void *location, file_ptr offset, -+ bfd_size_type count); -+ -+bfd_boolean bfd_malloc_and_get_section -+ (bfd *abfd, asection *section, bfd_byte **buf); -+ -+bfd_boolean bfd_copy_private_section_data -+ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec); -+ -+#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \ -+ BFD_SEND (obfd, _bfd_copy_private_section_data, \ -+ (ibfd, isection, obfd, osection)) -+bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec); -+ -+bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group); -+ -+/* Extracted from archures.c. */ -+enum bfd_architecture -+{ -+ bfd_arch_unknown, /* File arch not known. */ -+ bfd_arch_obscure, /* Arch known, not one of these. */ -+ bfd_arch_m68k, /* Motorola 68xxx */ -+#define bfd_mach_m68000 1 -+#define bfd_mach_m68008 2 -+#define bfd_mach_m68010 3 -+#define bfd_mach_m68020 4 -+#define bfd_mach_m68030 5 -+#define bfd_mach_m68040 6 -+#define bfd_mach_m68060 7 -+#define bfd_mach_cpu32 8 -+#define bfd_mach_mcf5200 9 -+#define bfd_mach_mcf5206e 10 -+#define bfd_mach_mcf5307 11 -+#define bfd_mach_mcf5407 12 -+#define bfd_mach_mcf528x 13 -+#define bfd_mach_mcfv4e 14 -+#define bfd_mach_mcf521x 15 -+#define bfd_mach_mcf5249 16 -+#define bfd_mach_mcf547x 17 -+#define bfd_mach_mcf548x 18 -+ bfd_arch_vax, /* DEC Vax */ -+ bfd_arch_i960, /* Intel 960 */ -+ /* The order of the following is important. -+ lower number indicates a machine type that -+ only accepts a subset of the instructions -+ available to machines with higher numbers. -+ The exception is the "ca", which is -+ incompatible with all other machines except -+ "core". */ -+ -+#define bfd_mach_i960_core 1 -+#define bfd_mach_i960_ka_sa 2 -+#define bfd_mach_i960_kb_sb 3 -+#define bfd_mach_i960_mc 4 -+#define bfd_mach_i960_xa 5 -+#define bfd_mach_i960_ca 6 -+#define bfd_mach_i960_jx 7 -+#define bfd_mach_i960_hx 8 -+ -+ bfd_arch_or32, /* OpenRISC 32 */ -+ -+ bfd_arch_a29k, /* AMD 29000 */ -+ bfd_arch_sparc, /* SPARC */ -+#define bfd_mach_sparc 1 -+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */ -+#define bfd_mach_sparc_sparclet 2 -+#define bfd_mach_sparc_sparclite 3 -+#define bfd_mach_sparc_v8plus 4 -+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */ -+#define bfd_mach_sparc_sparclite_le 6 -+#define bfd_mach_sparc_v9 7 -+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */ -+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */ -+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */ -+/* Nonzero if MACH has the v9 instruction set. */ -+#define bfd_mach_sparc_v9_p(mach) \ -+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \ -+ && (mach) != bfd_mach_sparc_sparclite_le) -+/* Nonzero if MACH is a 64 bit sparc architecture. */ -+#define bfd_mach_sparc_64bit_p(mach) \ -+ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb) -+ bfd_arch_mips, /* MIPS Rxxxx */ -+#define bfd_mach_mips3000 3000 -+#define bfd_mach_mips3900 3900 -+#define bfd_mach_mips4000 4000 -+#define bfd_mach_mips4010 4010 -+#define bfd_mach_mips4100 4100 -+#define bfd_mach_mips4111 4111 -+#define bfd_mach_mips4120 4120 -+#define bfd_mach_mips4300 4300 -+#define bfd_mach_mips4400 4400 -+#define bfd_mach_mips4600 4600 -+#define bfd_mach_mips4650 4650 -+#define bfd_mach_mips5000 5000 -+#define bfd_mach_mips5400 5400 -+#define bfd_mach_mips5500 5500 -+#define bfd_mach_mips6000 6000 -+#define bfd_mach_mips7000 7000 -+#define bfd_mach_mips8000 8000 -+#define bfd_mach_mips9000 9000 -+#define bfd_mach_mips10000 10000 -+#define bfd_mach_mips12000 12000 -+#define bfd_mach_mips16 16 -+#define bfd_mach_mips5 5 -+#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */ -+#define bfd_mach_mipsisa32 32 -+#define bfd_mach_mipsisa32r2 33 -+#define bfd_mach_mipsisa64 64 -+#define bfd_mach_mipsisa64r2 65 -+ bfd_arch_i386, /* Intel 386 */ -+#define bfd_mach_i386_i386 1 -+#define bfd_mach_i386_i8086 2 -+#define bfd_mach_i386_i386_intel_syntax 3 -+#define bfd_mach_x86_64 64 -+#define bfd_mach_x86_64_intel_syntax 65 -+ bfd_arch_we32k, /* AT&T WE32xxx */ -+ bfd_arch_tahoe, /* CCI/Harris Tahoe */ -+ bfd_arch_i860, /* Intel 860 */ -+ bfd_arch_i370, /* IBM 360/370 Mainframes */ -+ bfd_arch_romp, /* IBM ROMP PC/RT */ -+ bfd_arch_alliant, /* Alliant */ -+ bfd_arch_convex, /* Convex */ -+ bfd_arch_m88k, /* Motorola 88xxx */ -+ bfd_arch_m98k, /* Motorola 98xxx */ -+ bfd_arch_pyramid, /* Pyramid Technology */ -+ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */ -+#define bfd_mach_h8300 1 -+#define bfd_mach_h8300h 2 -+#define bfd_mach_h8300s 3 -+#define bfd_mach_h8300hn 4 -+#define bfd_mach_h8300sn 5 -+#define bfd_mach_h8300sx 6 -+#define bfd_mach_h8300sxn 7 -+ bfd_arch_pdp11, /* DEC PDP-11 */ -+ bfd_arch_powerpc, /* PowerPC */ -+#define bfd_mach_ppc 32 -+#define bfd_mach_ppc64 64 -+#define bfd_mach_ppc_403 403 -+#define bfd_mach_ppc_403gc 4030 -+#define bfd_mach_ppc_505 505 -+#define bfd_mach_ppc_601 601 -+#define bfd_mach_ppc_602 602 -+#define bfd_mach_ppc_603 603 -+#define bfd_mach_ppc_ec603e 6031 -+#define bfd_mach_ppc_604 604 -+#define bfd_mach_ppc_620 620 -+#define bfd_mach_ppc_630 630 -+#define bfd_mach_ppc_750 750 -+#define bfd_mach_ppc_860 860 -+#define bfd_mach_ppc_a35 35 -+#define bfd_mach_ppc_rs64ii 642 -+#define bfd_mach_ppc_rs64iii 643 -+#define bfd_mach_ppc_7400 7400 -+#define bfd_mach_ppc_e500 500 -+ bfd_arch_rs6000, /* IBM RS/6000 */ -+#define bfd_mach_rs6k 6000 -+#define bfd_mach_rs6k_rs1 6001 -+#define bfd_mach_rs6k_rsc 6003 -+#define bfd_mach_rs6k_rs2 6002 -+ bfd_arch_hppa, /* HP PA RISC */ -+#define bfd_mach_hppa10 10 -+#define bfd_mach_hppa11 11 -+#define bfd_mach_hppa20 20 -+#define bfd_mach_hppa20w 25 -+ bfd_arch_d10v, /* Mitsubishi D10V */ -+#define bfd_mach_d10v 1 -+#define bfd_mach_d10v_ts2 2 -+#define bfd_mach_d10v_ts3 3 -+ bfd_arch_d30v, /* Mitsubishi D30V */ -+ bfd_arch_dlx, /* DLX */ -+ bfd_arch_m68hc11, /* Motorola 68HC11 */ -+ bfd_arch_m68hc12, /* Motorola 68HC12 */ -+#define bfd_mach_m6812_default 0 -+#define bfd_mach_m6812 1 -+#define bfd_mach_m6812s 2 -+ bfd_arch_z8k, /* Zilog Z8000 */ -+#define bfd_mach_z8001 1 -+#define bfd_mach_z8002 2 -+ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */ -+ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */ -+#define bfd_mach_sh 1 -+#define bfd_mach_sh2 0x20 -+#define bfd_mach_sh_dsp 0x2d -+#define bfd_mach_sh2a 0x2a -+#define bfd_mach_sh2a_nofpu 0x2b -+#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1 -+#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2 -+#define bfd_mach_sh2a_or_sh4 0x2a3 -+#define bfd_mach_sh2a_or_sh3e 0x2a4 -+#define bfd_mach_sh2e 0x2e -+#define bfd_mach_sh3 0x30 -+#define bfd_mach_sh3_nommu 0x31 -+#define bfd_mach_sh3_dsp 0x3d -+#define bfd_mach_sh3e 0x3e -+#define bfd_mach_sh4 0x40 -+#define bfd_mach_sh4_nofpu 0x41 -+#define bfd_mach_sh4_nommu_nofpu 0x42 -+#define bfd_mach_sh4a 0x4a -+#define bfd_mach_sh4a_nofpu 0x4b -+#define bfd_mach_sh4al_dsp 0x4d -+#define bfd_mach_sh5 0x50 -+ bfd_arch_alpha, /* Dec Alpha */ -+#define bfd_mach_alpha_ev4 0x10 -+#define bfd_mach_alpha_ev5 0x20 -+#define bfd_mach_alpha_ev6 0x30 -+ bfd_arch_arm, /* Advanced Risc Machines ARM. */ -+#define bfd_mach_arm_unknown 0 -+#define bfd_mach_arm_2 1 -+#define bfd_mach_arm_2a 2 -+#define bfd_mach_arm_3 3 -+#define bfd_mach_arm_3M 4 -+#define bfd_mach_arm_4 5 -+#define bfd_mach_arm_4T 6 -+#define bfd_mach_arm_5 7 -+#define bfd_mach_arm_5T 8 -+#define bfd_mach_arm_5TE 9 -+#define bfd_mach_arm_XScale 10 -+#define bfd_mach_arm_ep9312 11 -+#define bfd_mach_arm_iWMMXt 12 -+ bfd_arch_ns32k, /* National Semiconductors ns32000 */ -+ bfd_arch_w65, /* WDC 65816 */ -+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */ -+ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */ -+#define bfd_mach_tic3x 30 -+#define bfd_mach_tic4x 40 -+ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */ -+ bfd_arch_tic80, /* TI TMS320c80 (MVP) */ -+ bfd_arch_v850, /* NEC V850 */ -+#define bfd_mach_v850 1 -+#define bfd_mach_v850e 'E' -+#define bfd_mach_v850e1 '1' -+ bfd_arch_arc, /* ARC Cores */ -+#define bfd_mach_arc_5 5 -+#define bfd_mach_arc_6 6 -+#define bfd_mach_arc_7 7 -+#define bfd_mach_arc_8 8 -+ bfd_arch_m32c, /* Renesas M16C/M32C. */ -+#define bfd_mach_m16c 0x75 -+#define bfd_mach_m32c 0x78 -+ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */ -+#define bfd_mach_m32r 1 /* For backwards compatibility. */ -+#define bfd_mach_m32rx 'x' -+#define bfd_mach_m32r2 '2' -+ bfd_arch_mn10200, /* Matsushita MN10200 */ -+ bfd_arch_mn10300, /* Matsushita MN10300 */ -+#define bfd_mach_mn10300 300 -+#define bfd_mach_am33 330 -+#define bfd_mach_am33_2 332 -+ bfd_arch_fr30, -+#define bfd_mach_fr30 0x46523330 -+ bfd_arch_frv, -+#define bfd_mach_frv 1 -+#define bfd_mach_frvsimple 2 -+#define bfd_mach_fr300 300 -+#define bfd_mach_fr400 400 -+#define bfd_mach_fr450 450 -+#define bfd_mach_frvtomcat 499 /* fr500 prototype */ -+#define bfd_mach_fr500 500 -+#define bfd_mach_fr550 550 -+ bfd_arch_mcore, -+ bfd_arch_ia64, /* HP/Intel ia64 */ -+#define bfd_mach_ia64_elf64 64 -+#define bfd_mach_ia64_elf32 32 -+ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */ -+#define bfd_mach_ip2022 1 -+#define bfd_mach_ip2022ext 2 -+ bfd_arch_iq2000, /* Vitesse IQ2000. */ -+#define bfd_mach_iq2000 1 -+#define bfd_mach_iq10 2 -+ bfd_arch_ms1, -+#define bfd_mach_ms1 1 -+#define bfd_mach_mrisc2 2 -+ bfd_arch_pj, -+ bfd_arch_avr, /* Atmel AVR microcontrollers. */ -+#define bfd_mach_avr1 1 -+#define bfd_mach_avr2 2 -+#define bfd_mach_avr3 3 -+#define bfd_mach_avr4 4 -+#define bfd_mach_avr5 5 -+ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */ -+#define bfd_mach_cr16c 1 -+ bfd_arch_crx, /* National Semiconductor CRX. */ -+#define bfd_mach_crx 1 -+ bfd_arch_cris, /* Axis CRIS */ -+#define bfd_mach_cris_v0_v10 255 -+#define bfd_mach_cris_v32 32 -+#define bfd_mach_cris_v10_v32 1032 -+ bfd_arch_s390, /* IBM s390 */ -+#define bfd_mach_s390_31 31 -+#define bfd_mach_s390_64 64 -+ bfd_arch_openrisc, /* OpenRISC */ -+ bfd_arch_mmix, /* Donald Knuth's educational processor. */ -+ bfd_arch_xstormy16, -+#define bfd_mach_xstormy16 1 -+ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */ -+#define bfd_mach_msp11 11 -+#define bfd_mach_msp110 110 -+#define bfd_mach_msp12 12 -+#define bfd_mach_msp13 13 -+#define bfd_mach_msp14 14 -+#define bfd_mach_msp15 15 -+#define bfd_mach_msp16 16 -+#define bfd_mach_msp31 31 -+#define bfd_mach_msp32 32 -+#define bfd_mach_msp33 33 -+#define bfd_mach_msp41 41 -+#define bfd_mach_msp42 42 -+#define bfd_mach_msp43 43 -+#define bfd_mach_msp44 44 -+ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */ -+#define bfd_mach_xtensa 1 -+ bfd_arch_maxq, /* Dallas MAXQ 10/20 */ -+#define bfd_mach_maxq10 10 -+#define bfd_mach_maxq20 20 -+ bfd_arch_last -+ }; -+ -+typedef struct bfd_arch_info -+{ -+ int bits_per_word; -+ int bits_per_address; -+ int bits_per_byte; -+ enum bfd_architecture arch; -+ unsigned long mach; -+ const char *arch_name; -+ const char *printable_name; -+ unsigned int section_align_power; -+ /* TRUE if this is the default machine for the architecture. -+ The default arch should be the first entry for an arch so that -+ all the entries for that arch can be accessed via <>. */ -+ bfd_boolean the_default; -+ const struct bfd_arch_info * (*compatible) -+ (const struct bfd_arch_info *a, const struct bfd_arch_info *b); -+ -+ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *); -+ -+ const struct bfd_arch_info *next; -+} -+bfd_arch_info_type; -+ -+const char *bfd_printable_name (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_scan_arch (const char *string); -+ -+const char **bfd_arch_list (void); -+ -+const bfd_arch_info_type *bfd_arch_get_compatible -+ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns); -+ -+void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg); -+ -+enum bfd_architecture bfd_get_arch (bfd *abfd); -+ -+unsigned long bfd_get_mach (bfd *abfd); -+ -+unsigned int bfd_arch_bits_per_byte (bfd *abfd); -+ -+unsigned int bfd_arch_bits_per_address (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_lookup_arch -+ (enum bfd_architecture arch, unsigned long machine); -+ -+const char *bfd_printable_arch_mach -+ (enum bfd_architecture arch, unsigned long machine); -+ -+unsigned int bfd_octets_per_byte (bfd *abfd); -+ -+unsigned int bfd_arch_mach_octets_per_byte -+ (enum bfd_architecture arch, unsigned long machine); -+ -+/* Extracted from reloc.c. */ -+typedef enum bfd_reloc_status -+{ -+ /* No errors detected. */ -+ bfd_reloc_ok, -+ -+ /* The relocation was performed, but there was an overflow. */ -+ bfd_reloc_overflow, -+ -+ /* The address to relocate was not within the section supplied. */ -+ bfd_reloc_outofrange, -+ -+ /* Used by special functions. */ -+ bfd_reloc_continue, -+ -+ /* Unsupported relocation size requested. */ -+ bfd_reloc_notsupported, -+ -+ /* Unused. */ -+ bfd_reloc_other, -+ -+ /* The symbol to relocate against was undefined. */ -+ bfd_reloc_undefined, -+ -+ /* The relocation was performed, but may not be ok - presently -+ generated only when linking i960 coff files with i960 b.out -+ symbols. If this type is returned, the error_message argument -+ to bfd_perform_relocation will be set. */ -+ bfd_reloc_dangerous -+ } -+ bfd_reloc_status_type; -+ -+ -+typedef struct reloc_cache_entry -+{ -+ /* A pointer into the canonical table of pointers. */ -+ struct bfd_symbol **sym_ptr_ptr; -+ -+ /* offset in section. */ -+ bfd_size_type address; -+ -+ /* addend for relocation value. */ -+ bfd_vma addend; -+ -+ /* Pointer to how to perform the required relocation. */ -+ reloc_howto_type *howto; -+ -+} -+arelent; -+ -+enum complain_overflow -+{ -+ /* Do not complain on overflow. */ -+ complain_overflow_dont, -+ -+ /* Complain if the bitfield overflows, whether it is considered -+ as signed or unsigned. */ -+ complain_overflow_bitfield, -+ -+ /* Complain if the value overflows when considered as signed -+ number. */ -+ complain_overflow_signed, -+ -+ /* Complain if the value overflows when considered as an -+ unsigned number. */ -+ complain_overflow_unsigned -+}; -+ -+struct reloc_howto_struct -+{ -+ /* The type field has mainly a documentary use - the back end can -+ do what it wants with it, though normally the back end's -+ external idea of what a reloc number is stored -+ in this field. For example, a PC relative word relocation -+ in a coff environment has the type 023 - because that's -+ what the outside world calls a R_PCRWORD reloc. */ -+ unsigned int type; -+ -+ /* The value the final relocation is shifted right by. This drops -+ unwanted data from the relocation. */ -+ unsigned int rightshift; -+ -+ /* The size of the item to be relocated. This is *not* a -+ power-of-two measure. To get the number of bytes operated -+ on by a type of relocation, use bfd_get_reloc_size. */ -+ int size; -+ -+ /* The number of bits in the item to be relocated. This is used -+ when doing overflow checking. */ -+ unsigned int bitsize; -+ -+ /* Notes that the relocation is relative to the location in the -+ data section of the addend. The relocation function will -+ subtract from the relocation value the address of the location -+ being relocated. */ -+ bfd_boolean pc_relative; -+ -+ /* The bit position of the reloc value in the destination. -+ The relocated value is left shifted by this amount. */ -+ unsigned int bitpos; -+ -+ /* What type of overflow error should be checked for when -+ relocating. */ -+ enum complain_overflow complain_on_overflow; -+ -+ /* If this field is non null, then the supplied function is -+ called rather than the normal function. This allows really -+ strange relocation methods to be accommodated (e.g., i960 callj -+ instructions). */ -+ bfd_reloc_status_type (*special_function) -+ (bfd *, arelent *, struct bfd_symbol *, void *, asection *, -+ bfd *, char **); -+ -+ /* The textual name of the relocation type. */ -+ char *name; -+ -+ /* Some formats record a relocation addend in the section contents -+ rather than with the relocation. For ELF formats this is the -+ distinction between USE_REL and USE_RELA (though the code checks -+ for USE_REL == 1/0). The value of this field is TRUE if the -+ addend is recorded with the section contents; when performing a -+ partial link (ld -r) the section contents (the data) will be -+ modified. The value of this field is FALSE if addends are -+ recorded with the relocation (in arelent.addend); when performing -+ a partial link the relocation will be modified. -+ All relocations for all ELF USE_RELA targets should set this field -+ to FALSE (values of TRUE should be looked on with suspicion). -+ However, the converse is not true: not all relocations of all ELF -+ USE_REL targets set this field to TRUE. Why this is so is peculiar -+ to each particular target. For relocs that aren't used in partial -+ links (e.g. GOT stuff) it doesn't matter what this is set to. */ -+ bfd_boolean partial_inplace; -+ -+ /* src_mask selects the part of the instruction (or data) to be used -+ in the relocation sum. If the target relocations don't have an -+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal -+ dst_mask to extract the addend from the section contents. If -+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this -+ field should be zero. Non-zero values for ELF USE_RELA targets are -+ bogus as in those cases the value in the dst_mask part of the -+ section contents should be treated as garbage. */ -+ bfd_vma src_mask; -+ -+ /* dst_mask selects which parts of the instruction (or data) are -+ replaced with a relocated value. */ -+ bfd_vma dst_mask; -+ -+ /* When some formats create PC relative instructions, they leave -+ the value of the pc of the place being relocated in the offset -+ slot of the instruction, so that a PC relative relocation can -+ be made just by adding in an ordinary offset (e.g., sun3 a.out). -+ Some formats leave the displacement part of an instruction -+ empty (e.g., m88k bcs); this flag signals the fact. */ -+ bfd_boolean pcrel_offset; -+}; -+ -+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \ -+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC } -+#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \ -+ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \ -+ NAME, FALSE, 0, 0, IN) -+ -+#define EMPTY_HOWTO(C) \ -+ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \ -+ NULL, FALSE, 0, 0, FALSE) -+ -+#define HOWTO_PREPARE(relocation, symbol) \ -+ { \ -+ if (symbol != NULL) \ -+ { \ -+ if (bfd_is_com_section (symbol->section)) \ -+ { \ -+ relocation = 0; \ -+ } \ -+ else \ -+ { \ -+ relocation = symbol->value; \ -+ } \ -+ } \ -+ } -+ -+unsigned int bfd_get_reloc_size (reloc_howto_type *); -+ -+typedef struct relent_chain -+{ -+ arelent relent; -+ struct relent_chain *next; -+} -+arelent_chain; -+ -+bfd_reloc_status_type bfd_check_overflow -+ (enum complain_overflow how, -+ unsigned int bitsize, -+ unsigned int rightshift, -+ unsigned int addrsize, -+ bfd_vma relocation); -+ -+bfd_reloc_status_type bfd_perform_relocation -+ (bfd *abfd, -+ arelent *reloc_entry, -+ void *data, -+ asection *input_section, -+ bfd *output_bfd, -+ char **error_message); -+ -+bfd_reloc_status_type bfd_install_relocation -+ (bfd *abfd, -+ arelent *reloc_entry, -+ void *data, bfd_vma data_start, -+ asection *input_section, -+ char **error_message); -+ -+enum bfd_reloc_code_real { -+ _dummy_first_bfd_reloc_code_real, -+ -+ -+/* Basic absolute relocations of N bits. */ -+ BFD_RELOC_64, -+ BFD_RELOC_32, -+ BFD_RELOC_26, -+ BFD_RELOC_24, -+ BFD_RELOC_16, -+ BFD_RELOC_14, -+ BFD_RELOC_8, -+ -+/* PC-relative relocations. Sometimes these are relative to the address -+of the relocation itself; sometimes they are relative to the start of -+the section containing the relocation. It depends on the specific target. -+ -+The 24-bit relocation is used in some Intel 960 configurations. */ -+ BFD_RELOC_64_PCREL, -+ BFD_RELOC_32_PCREL, -+ BFD_RELOC_24_PCREL, -+ BFD_RELOC_16_PCREL, -+ BFD_RELOC_12_PCREL, -+ BFD_RELOC_8_PCREL, -+ -+/* Section relative relocations. Some targets need this for DWARF2. */ -+ BFD_RELOC_32_SECREL, -+ -+/* For ELF. */ -+ BFD_RELOC_32_GOT_PCREL, -+ BFD_RELOC_16_GOT_PCREL, -+ BFD_RELOC_8_GOT_PCREL, -+ BFD_RELOC_32_GOTOFF, -+ BFD_RELOC_16_GOTOFF, -+ BFD_RELOC_LO16_GOTOFF, -+ BFD_RELOC_HI16_GOTOFF, -+ BFD_RELOC_HI16_S_GOTOFF, -+ BFD_RELOC_8_GOTOFF, -+ BFD_RELOC_64_PLT_PCREL, -+ BFD_RELOC_32_PLT_PCREL, -+ BFD_RELOC_24_PLT_PCREL, -+ BFD_RELOC_16_PLT_PCREL, -+ BFD_RELOC_8_PLT_PCREL, -+ BFD_RELOC_64_PLTOFF, -+ BFD_RELOC_32_PLTOFF, -+ BFD_RELOC_16_PLTOFF, -+ BFD_RELOC_LO16_PLTOFF, -+ BFD_RELOC_HI16_PLTOFF, -+ BFD_RELOC_HI16_S_PLTOFF, -+ BFD_RELOC_8_PLTOFF, -+ -+/* Relocations used by 68K ELF. */ -+ BFD_RELOC_68K_GLOB_DAT, -+ BFD_RELOC_68K_JMP_SLOT, -+ BFD_RELOC_68K_RELATIVE, -+ -+/* Linkage-table relative. */ -+ BFD_RELOC_32_BASEREL, -+ BFD_RELOC_16_BASEREL, -+ BFD_RELOC_LO16_BASEREL, -+ BFD_RELOC_HI16_BASEREL, -+ BFD_RELOC_HI16_S_BASEREL, -+ BFD_RELOC_8_BASEREL, -+ BFD_RELOC_RVA, -+ -+/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */ -+ BFD_RELOC_8_FFnn, -+ -+/* These PC-relative relocations are stored as word displacements -- -+i.e., byte displacements shifted right two bits. The 30-bit word -+displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the -+SPARC. (SPARC tools generally refer to this as <>.) The -+signed 16-bit displacement is used on the MIPS, and the 23-bit -+displacement is used on the Alpha. */ -+ BFD_RELOC_32_PCREL_S2, -+ BFD_RELOC_16_PCREL_S2, -+ BFD_RELOC_23_PCREL_S2, -+ -+/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of -+the target word. These are used on the SPARC. */ -+ BFD_RELOC_HI22, -+ BFD_RELOC_LO10, -+ -+/* For systems that allocate a Global Pointer register, these are -+displacements off that register. These relocation types are -+handled specially, because the value the register will have is -+decided relatively late. */ -+ BFD_RELOC_GPREL16, -+ BFD_RELOC_GPREL32, -+ -+/* Reloc types used for i960/b.out. */ -+ BFD_RELOC_I960_CALLJ, -+ -+/* SPARC ELF relocations. There is probably some overlap with other -+relocation types already defined. */ -+ BFD_RELOC_NONE, -+ BFD_RELOC_SPARC_WDISP22, -+ BFD_RELOC_SPARC22, -+ BFD_RELOC_SPARC13, -+ BFD_RELOC_SPARC_GOT10, -+ BFD_RELOC_SPARC_GOT13, -+ BFD_RELOC_SPARC_GOT22, -+ BFD_RELOC_SPARC_PC10, -+ BFD_RELOC_SPARC_PC22, -+ BFD_RELOC_SPARC_WPLT30, -+ BFD_RELOC_SPARC_COPY, -+ BFD_RELOC_SPARC_GLOB_DAT, -+ BFD_RELOC_SPARC_JMP_SLOT, -+ BFD_RELOC_SPARC_RELATIVE, -+ BFD_RELOC_SPARC_UA16, -+ BFD_RELOC_SPARC_UA32, -+ BFD_RELOC_SPARC_UA64, -+ -+/* I think these are specific to SPARC a.out (e.g., Sun 4). */ -+ BFD_RELOC_SPARC_BASE13, -+ BFD_RELOC_SPARC_BASE22, -+ -+/* SPARC64 relocations */ -+#define BFD_RELOC_SPARC_64 BFD_RELOC_64 -+ BFD_RELOC_SPARC_10, -+ BFD_RELOC_SPARC_11, -+ BFD_RELOC_SPARC_OLO10, -+ BFD_RELOC_SPARC_HH22, -+ BFD_RELOC_SPARC_HM10, -+ BFD_RELOC_SPARC_LM22, -+ BFD_RELOC_SPARC_PC_HH22, -+ BFD_RELOC_SPARC_PC_HM10, -+ BFD_RELOC_SPARC_PC_LM22, -+ BFD_RELOC_SPARC_WDISP16, -+ BFD_RELOC_SPARC_WDISP19, -+ BFD_RELOC_SPARC_7, -+ BFD_RELOC_SPARC_6, -+ BFD_RELOC_SPARC_5, -+#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL -+ BFD_RELOC_SPARC_PLT32, -+ BFD_RELOC_SPARC_PLT64, -+ BFD_RELOC_SPARC_HIX22, -+ BFD_RELOC_SPARC_LOX10, -+ BFD_RELOC_SPARC_H44, -+ BFD_RELOC_SPARC_M44, -+ BFD_RELOC_SPARC_L44, -+ BFD_RELOC_SPARC_REGISTER, -+ -+/* SPARC little endian relocation */ -+ BFD_RELOC_SPARC_REV32, -+ -+/* SPARC TLS relocations */ -+ BFD_RELOC_SPARC_TLS_GD_HI22, -+ BFD_RELOC_SPARC_TLS_GD_LO10, -+ BFD_RELOC_SPARC_TLS_GD_ADD, -+ BFD_RELOC_SPARC_TLS_GD_CALL, -+ BFD_RELOC_SPARC_TLS_LDM_HI22, -+ BFD_RELOC_SPARC_TLS_LDM_LO10, -+ BFD_RELOC_SPARC_TLS_LDM_ADD, -+ BFD_RELOC_SPARC_TLS_LDM_CALL, -+ BFD_RELOC_SPARC_TLS_LDO_HIX22, -+ BFD_RELOC_SPARC_TLS_LDO_LOX10, -+ BFD_RELOC_SPARC_TLS_LDO_ADD, -+ BFD_RELOC_SPARC_TLS_IE_HI22, -+ BFD_RELOC_SPARC_TLS_IE_LO10, -+ BFD_RELOC_SPARC_TLS_IE_LD, -+ BFD_RELOC_SPARC_TLS_IE_LDX, -+ BFD_RELOC_SPARC_TLS_IE_ADD, -+ BFD_RELOC_SPARC_TLS_LE_HIX22, -+ BFD_RELOC_SPARC_TLS_LE_LOX10, -+ BFD_RELOC_SPARC_TLS_DTPMOD32, -+ BFD_RELOC_SPARC_TLS_DTPMOD64, -+ BFD_RELOC_SPARC_TLS_DTPOFF32, -+ BFD_RELOC_SPARC_TLS_DTPOFF64, -+ BFD_RELOC_SPARC_TLS_TPOFF32, -+ BFD_RELOC_SPARC_TLS_TPOFF64, -+ -+/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or -+"addend" in some special way. -+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when -+writing; when reading, it will be the absolute section symbol. The -+addend is the displacement in bytes of the "lda" instruction from -+the "ldah" instruction (which is at the address of this reloc). */ -+ BFD_RELOC_ALPHA_GPDISP_HI16, -+ -+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as -+with GPDISP_HI16 relocs. The addend is ignored when writing the -+relocations out, and is filled in with the file's GP value on -+reading, for convenience. */ -+ BFD_RELOC_ALPHA_GPDISP_LO16, -+ -+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16 -+relocation except that there is no accompanying GPDISP_LO16 -+relocation. */ -+ BFD_RELOC_ALPHA_GPDISP, -+ -+/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference; -+the assembler turns it into a LDQ instruction to load the address of -+the symbol, and then fills in a register in the real instruction. -+ -+The LITERAL reloc, at the LDQ instruction, refers to the .lita -+section symbol. The addend is ignored when writing, but is filled -+in with the file's GP value on reading, for convenience, as with the -+GPDISP_LO16 reloc. -+ -+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16. -+It should refer to the symbol to be referenced, as with 16_GOTOFF, -+but it generates output not based on the position within the .got -+section, but relative to the GP value chosen for the file during the -+final link stage. -+ -+The LITUSE reloc, on the instruction using the loaded address, gives -+information to the linker that it might be able to use to optimize -+away some literal section references. The symbol is ignored (read -+as the absolute section symbol), and the "addend" indicates the type -+of instruction using the register: -+1 - "memory" fmt insn -+2 - byte-manipulation (byte offset reg) -+3 - jsr (target of branch) */ -+ BFD_RELOC_ALPHA_LITERAL, -+ BFD_RELOC_ALPHA_ELF_LITERAL, -+ BFD_RELOC_ALPHA_LITUSE, -+ -+/* The HINT relocation indicates a value that should be filled into the -+"hint" field of a jmp/jsr/ret instruction, for possible branch- -+prediction logic which may be provided on some processors. */ -+ BFD_RELOC_ALPHA_HINT, -+ -+/* The LINKAGE relocation outputs a linkage pair in the object file, -+which is filled by the linker. */ -+ BFD_RELOC_ALPHA_LINKAGE, -+ -+/* The CODEADDR relocation outputs a STO_CA in the object file, -+which is filled by the linker. */ -+ BFD_RELOC_ALPHA_CODEADDR, -+ -+/* The GPREL_HI/LO relocations together form a 32-bit offset from the -+GP register. */ -+ BFD_RELOC_ALPHA_GPREL_HI16, -+ BFD_RELOC_ALPHA_GPREL_LO16, -+ -+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must -+share a common GP, and the target address is adjusted for -+STO_ALPHA_STD_GPLOAD. */ -+ BFD_RELOC_ALPHA_BRSGP, -+ -+/* Alpha thread-local storage relocations. */ -+ BFD_RELOC_ALPHA_TLSGD, -+ BFD_RELOC_ALPHA_TLSLDM, -+ BFD_RELOC_ALPHA_DTPMOD64, -+ BFD_RELOC_ALPHA_GOTDTPREL16, -+ BFD_RELOC_ALPHA_DTPREL64, -+ BFD_RELOC_ALPHA_DTPREL_HI16, -+ BFD_RELOC_ALPHA_DTPREL_LO16, -+ BFD_RELOC_ALPHA_DTPREL16, -+ BFD_RELOC_ALPHA_GOTTPREL16, -+ BFD_RELOC_ALPHA_TPREL64, -+ BFD_RELOC_ALPHA_TPREL_HI16, -+ BFD_RELOC_ALPHA_TPREL_LO16, -+ BFD_RELOC_ALPHA_TPREL16, -+ -+/* Bits 27..2 of the relocation address shifted right 2 bits; -+simple reloc otherwise. */ -+ BFD_RELOC_MIPS_JMP, -+ -+/* The MIPS16 jump instruction. */ -+ BFD_RELOC_MIPS16_JMP, -+ -+/* MIPS16 GP relative reloc. */ -+ BFD_RELOC_MIPS16_GPREL, -+ -+/* High 16 bits of 32-bit value; simple reloc. */ -+ BFD_RELOC_HI16, -+ -+/* High 16 bits of 32-bit value but the low 16 bits will be sign -+extended and added to form the final result. If the low 16 -+bits form a negative number, we need to add one to the high value -+to compensate for the borrow when the low bits are added. */ -+ BFD_RELOC_HI16_S, -+ -+/* Low 16 bits. */ -+ BFD_RELOC_LO16, -+ -+/* High 16 bits of 32-bit pc-relative value */ -+ BFD_RELOC_HI16_PCREL, -+ -+/* High 16 bits of 32-bit pc-relative value, adjusted */ -+ BFD_RELOC_HI16_S_PCREL, -+ -+/* Low 16 bits of pc-relative value */ -+ BFD_RELOC_LO16_PCREL, -+ -+/* MIPS16 high 16 bits of 32-bit value. */ -+ BFD_RELOC_MIPS16_HI16, -+ -+/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign -+extended and added to form the final result. If the low 16 -+bits form a negative number, we need to add one to the high value -+to compensate for the borrow when the low bits are added. */ -+ BFD_RELOC_MIPS16_HI16_S, -+ -+/* MIPS16 low 16 bits. */ -+ BFD_RELOC_MIPS16_LO16, -+ -+/* Relocation against a MIPS literal section. */ -+ BFD_RELOC_MIPS_LITERAL, -+ -+/* MIPS ELF relocations. */ -+ BFD_RELOC_MIPS_GOT16, -+ BFD_RELOC_MIPS_CALL16, -+ BFD_RELOC_MIPS_GOT_HI16, -+ BFD_RELOC_MIPS_GOT_LO16, -+ BFD_RELOC_MIPS_CALL_HI16, -+ BFD_RELOC_MIPS_CALL_LO16, -+ BFD_RELOC_MIPS_SUB, -+ BFD_RELOC_MIPS_GOT_PAGE, -+ BFD_RELOC_MIPS_GOT_OFST, -+ BFD_RELOC_MIPS_GOT_DISP, -+ BFD_RELOC_MIPS_SHIFT5, -+ BFD_RELOC_MIPS_SHIFT6, -+ BFD_RELOC_MIPS_INSERT_A, -+ BFD_RELOC_MIPS_INSERT_B, -+ BFD_RELOC_MIPS_DELETE, -+ BFD_RELOC_MIPS_HIGHEST, -+ BFD_RELOC_MIPS_HIGHER, -+ BFD_RELOC_MIPS_SCN_DISP, -+ BFD_RELOC_MIPS_REL16, -+ BFD_RELOC_MIPS_RELGOT, -+ BFD_RELOC_MIPS_JALR, -+ BFD_RELOC_MIPS_TLS_DTPMOD32, -+ BFD_RELOC_MIPS_TLS_DTPREL32, -+ BFD_RELOC_MIPS_TLS_DTPMOD64, -+ BFD_RELOC_MIPS_TLS_DTPREL64, -+ BFD_RELOC_MIPS_TLS_GD, -+ BFD_RELOC_MIPS_TLS_LDM, -+ BFD_RELOC_MIPS_TLS_DTPREL_HI16, -+ BFD_RELOC_MIPS_TLS_DTPREL_LO16, -+ BFD_RELOC_MIPS_TLS_GOTTPREL, -+ BFD_RELOC_MIPS_TLS_TPREL32, -+ BFD_RELOC_MIPS_TLS_TPREL64, -+ BFD_RELOC_MIPS_TLS_TPREL_HI16, -+ BFD_RELOC_MIPS_TLS_TPREL_LO16, -+ -+ -+/* Fujitsu Frv Relocations. */ -+ BFD_RELOC_FRV_LABEL16, -+ BFD_RELOC_FRV_LABEL24, -+ BFD_RELOC_FRV_LO16, -+ BFD_RELOC_FRV_HI16, -+ BFD_RELOC_FRV_GPREL12, -+ BFD_RELOC_FRV_GPRELU12, -+ BFD_RELOC_FRV_GPREL32, -+ BFD_RELOC_FRV_GPRELHI, -+ BFD_RELOC_FRV_GPRELLO, -+ BFD_RELOC_FRV_GOT12, -+ BFD_RELOC_FRV_GOTHI, -+ BFD_RELOC_FRV_GOTLO, -+ BFD_RELOC_FRV_FUNCDESC, -+ BFD_RELOC_FRV_FUNCDESC_GOT12, -+ BFD_RELOC_FRV_FUNCDESC_GOTHI, -+ BFD_RELOC_FRV_FUNCDESC_GOTLO, -+ BFD_RELOC_FRV_FUNCDESC_VALUE, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFF12, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO, -+ BFD_RELOC_FRV_GOTOFF12, -+ BFD_RELOC_FRV_GOTOFFHI, -+ BFD_RELOC_FRV_GOTOFFLO, -+ BFD_RELOC_FRV_GETTLSOFF, -+ BFD_RELOC_FRV_TLSDESC_VALUE, -+ BFD_RELOC_FRV_GOTTLSDESC12, -+ BFD_RELOC_FRV_GOTTLSDESCHI, -+ BFD_RELOC_FRV_GOTTLSDESCLO, -+ BFD_RELOC_FRV_TLSMOFF12, -+ BFD_RELOC_FRV_TLSMOFFHI, -+ BFD_RELOC_FRV_TLSMOFFLO, -+ BFD_RELOC_FRV_GOTTLSOFF12, -+ BFD_RELOC_FRV_GOTTLSOFFHI, -+ BFD_RELOC_FRV_GOTTLSOFFLO, -+ BFD_RELOC_FRV_TLSOFF, -+ BFD_RELOC_FRV_TLSDESC_RELAX, -+ BFD_RELOC_FRV_GETTLSOFF_RELAX, -+ BFD_RELOC_FRV_TLSOFF_RELAX, -+ BFD_RELOC_FRV_TLSMOFF, -+ -+ -+/* This is a 24bit GOT-relative reloc for the mn10300. */ -+ BFD_RELOC_MN10300_GOTOFF24, -+ -+/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT32, -+ -+/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT24, -+ -+/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT16, -+ -+/* Copy symbol at runtime. */ -+ BFD_RELOC_MN10300_COPY, -+ -+/* Create GOT entry. */ -+ BFD_RELOC_MN10300_GLOB_DAT, -+ -+/* Create PLT entry. */ -+ BFD_RELOC_MN10300_JMP_SLOT, -+ -+/* Adjust by program base. */ -+ BFD_RELOC_MN10300_RELATIVE, -+ -+ -+/* i386/elf relocations */ -+ BFD_RELOC_386_GOT32, -+ BFD_RELOC_386_PLT32, -+ BFD_RELOC_386_COPY, -+ BFD_RELOC_386_GLOB_DAT, -+ BFD_RELOC_386_JUMP_SLOT, -+ BFD_RELOC_386_RELATIVE, -+ BFD_RELOC_386_GOTOFF, -+ BFD_RELOC_386_GOTPC, -+ BFD_RELOC_386_TLS_TPOFF, -+ BFD_RELOC_386_TLS_IE, -+ BFD_RELOC_386_TLS_GOTIE, -+ BFD_RELOC_386_TLS_LE, -+ BFD_RELOC_386_TLS_GD, -+ BFD_RELOC_386_TLS_LDM, -+ BFD_RELOC_386_TLS_LDO_32, -+ BFD_RELOC_386_TLS_IE_32, -+ BFD_RELOC_386_TLS_LE_32, -+ BFD_RELOC_386_TLS_DTPMOD32, -+ BFD_RELOC_386_TLS_DTPOFF32, -+ BFD_RELOC_386_TLS_TPOFF32, -+ -+/* x86-64/elf relocations */ -+ BFD_RELOC_X86_64_GOT32, -+ BFD_RELOC_X86_64_PLT32, -+ BFD_RELOC_X86_64_COPY, -+ BFD_RELOC_X86_64_GLOB_DAT, -+ BFD_RELOC_X86_64_JUMP_SLOT, -+ BFD_RELOC_X86_64_RELATIVE, -+ BFD_RELOC_X86_64_GOTPCREL, -+ BFD_RELOC_X86_64_32S, -+ BFD_RELOC_X86_64_DTPMOD64, -+ BFD_RELOC_X86_64_DTPOFF64, -+ BFD_RELOC_X86_64_TPOFF64, -+ BFD_RELOC_X86_64_TLSGD, -+ BFD_RELOC_X86_64_TLSLD, -+ BFD_RELOC_X86_64_DTPOFF32, -+ BFD_RELOC_X86_64_GOTTPOFF, -+ BFD_RELOC_X86_64_TPOFF32, -+ BFD_RELOC_X86_64_GOTOFF64, -+ BFD_RELOC_X86_64_GOTPC32, -+ -+/* ns32k relocations */ -+ BFD_RELOC_NS32K_IMM_8, -+ BFD_RELOC_NS32K_IMM_16, -+ BFD_RELOC_NS32K_IMM_32, -+ BFD_RELOC_NS32K_IMM_8_PCREL, -+ BFD_RELOC_NS32K_IMM_16_PCREL, -+ BFD_RELOC_NS32K_IMM_32_PCREL, -+ BFD_RELOC_NS32K_DISP_8, -+ BFD_RELOC_NS32K_DISP_16, -+ BFD_RELOC_NS32K_DISP_32, -+ BFD_RELOC_NS32K_DISP_8_PCREL, -+ BFD_RELOC_NS32K_DISP_16_PCREL, -+ BFD_RELOC_NS32K_DISP_32_PCREL, -+ -+/* PDP11 relocations */ -+ BFD_RELOC_PDP11_DISP_8_PCREL, -+ BFD_RELOC_PDP11_DISP_6_PCREL, -+ -+/* Picojava relocs. Not all of these appear in object files. */ -+ BFD_RELOC_PJ_CODE_HI16, -+ BFD_RELOC_PJ_CODE_LO16, -+ BFD_RELOC_PJ_CODE_DIR16, -+ BFD_RELOC_PJ_CODE_DIR32, -+ BFD_RELOC_PJ_CODE_REL16, -+ BFD_RELOC_PJ_CODE_REL32, -+ -+/* Power(rs6000) and PowerPC relocations. */ -+ BFD_RELOC_PPC_B26, -+ BFD_RELOC_PPC_BA26, -+ BFD_RELOC_PPC_TOC16, -+ BFD_RELOC_PPC_B16, -+ BFD_RELOC_PPC_B16_BRTAKEN, -+ BFD_RELOC_PPC_B16_BRNTAKEN, -+ BFD_RELOC_PPC_BA16, -+ BFD_RELOC_PPC_BA16_BRTAKEN, -+ BFD_RELOC_PPC_BA16_BRNTAKEN, -+ BFD_RELOC_PPC_COPY, -+ BFD_RELOC_PPC_GLOB_DAT, -+ BFD_RELOC_PPC_JMP_SLOT, -+ BFD_RELOC_PPC_RELATIVE, -+ BFD_RELOC_PPC_LOCAL24PC, -+ BFD_RELOC_PPC_EMB_NADDR32, -+ BFD_RELOC_PPC_EMB_NADDR16, -+ BFD_RELOC_PPC_EMB_NADDR16_LO, -+ BFD_RELOC_PPC_EMB_NADDR16_HI, -+ BFD_RELOC_PPC_EMB_NADDR16_HA, -+ BFD_RELOC_PPC_EMB_SDAI16, -+ BFD_RELOC_PPC_EMB_SDA2I16, -+ BFD_RELOC_PPC_EMB_SDA2REL, -+ BFD_RELOC_PPC_EMB_SDA21, -+ BFD_RELOC_PPC_EMB_MRKREF, -+ BFD_RELOC_PPC_EMB_RELSEC16, -+ BFD_RELOC_PPC_EMB_RELST_LO, -+ BFD_RELOC_PPC_EMB_RELST_HI, -+ BFD_RELOC_PPC_EMB_RELST_HA, -+ BFD_RELOC_PPC_EMB_BIT_FLD, -+ BFD_RELOC_PPC_EMB_RELSDA, -+ BFD_RELOC_PPC64_HIGHER, -+ BFD_RELOC_PPC64_HIGHER_S, -+ BFD_RELOC_PPC64_HIGHEST, -+ BFD_RELOC_PPC64_HIGHEST_S, -+ BFD_RELOC_PPC64_TOC16_LO, -+ BFD_RELOC_PPC64_TOC16_HI, -+ BFD_RELOC_PPC64_TOC16_HA, -+ BFD_RELOC_PPC64_TOC, -+ BFD_RELOC_PPC64_PLTGOT16, -+ BFD_RELOC_PPC64_PLTGOT16_LO, -+ BFD_RELOC_PPC64_PLTGOT16_HI, -+ BFD_RELOC_PPC64_PLTGOT16_HA, -+ BFD_RELOC_PPC64_ADDR16_DS, -+ BFD_RELOC_PPC64_ADDR16_LO_DS, -+ BFD_RELOC_PPC64_GOT16_DS, -+ BFD_RELOC_PPC64_GOT16_LO_DS, -+ BFD_RELOC_PPC64_PLT16_LO_DS, -+ BFD_RELOC_PPC64_SECTOFF_DS, -+ BFD_RELOC_PPC64_SECTOFF_LO_DS, -+ BFD_RELOC_PPC64_TOC16_DS, -+ BFD_RELOC_PPC64_TOC16_LO_DS, -+ BFD_RELOC_PPC64_PLTGOT16_DS, -+ BFD_RELOC_PPC64_PLTGOT16_LO_DS, -+ -+/* PowerPC and PowerPC64 thread-local storage relocations. */ -+ BFD_RELOC_PPC_TLS, -+ BFD_RELOC_PPC_DTPMOD, -+ BFD_RELOC_PPC_TPREL16, -+ BFD_RELOC_PPC_TPREL16_LO, -+ BFD_RELOC_PPC_TPREL16_HI, -+ BFD_RELOC_PPC_TPREL16_HA, -+ BFD_RELOC_PPC_TPREL, -+ BFD_RELOC_PPC_DTPREL16, -+ BFD_RELOC_PPC_DTPREL16_LO, -+ BFD_RELOC_PPC_DTPREL16_HI, -+ BFD_RELOC_PPC_DTPREL16_HA, -+ BFD_RELOC_PPC_DTPREL, -+ BFD_RELOC_PPC_GOT_TLSGD16, -+ BFD_RELOC_PPC_GOT_TLSGD16_LO, -+ BFD_RELOC_PPC_GOT_TLSGD16_HI, -+ BFD_RELOC_PPC_GOT_TLSGD16_HA, -+ BFD_RELOC_PPC_GOT_TLSLD16, -+ BFD_RELOC_PPC_GOT_TLSLD16_LO, -+ BFD_RELOC_PPC_GOT_TLSLD16_HI, -+ BFD_RELOC_PPC_GOT_TLSLD16_HA, -+ BFD_RELOC_PPC_GOT_TPREL16, -+ BFD_RELOC_PPC_GOT_TPREL16_LO, -+ BFD_RELOC_PPC_GOT_TPREL16_HI, -+ BFD_RELOC_PPC_GOT_TPREL16_HA, -+ BFD_RELOC_PPC_GOT_DTPREL16, -+ BFD_RELOC_PPC_GOT_DTPREL16_LO, -+ BFD_RELOC_PPC_GOT_DTPREL16_HI, -+ BFD_RELOC_PPC_GOT_DTPREL16_HA, -+ BFD_RELOC_PPC64_TPREL16_DS, -+ BFD_RELOC_PPC64_TPREL16_LO_DS, -+ BFD_RELOC_PPC64_TPREL16_HIGHER, -+ BFD_RELOC_PPC64_TPREL16_HIGHERA, -+ BFD_RELOC_PPC64_TPREL16_HIGHEST, -+ BFD_RELOC_PPC64_TPREL16_HIGHESTA, -+ BFD_RELOC_PPC64_DTPREL16_DS, -+ BFD_RELOC_PPC64_DTPREL16_LO_DS, -+ BFD_RELOC_PPC64_DTPREL16_HIGHER, -+ BFD_RELOC_PPC64_DTPREL16_HIGHERA, -+ BFD_RELOC_PPC64_DTPREL16_HIGHEST, -+ BFD_RELOC_PPC64_DTPREL16_HIGHESTA, -+ -+/* IBM 370/390 relocations */ -+ BFD_RELOC_I370_D12, -+ -+/* The type of reloc used to build a constructor table - at the moment -+probably a 32 bit wide absolute relocation, but the target can choose. -+It generally does map to one of the other relocation types. */ -+ BFD_RELOC_CTOR, -+ -+/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are -+not stored in the instruction. */ -+ BFD_RELOC_ARM_PCREL_BRANCH, -+ -+/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is -+not stored in the instruction. The 2nd lowest bit comes from a 1 bit -+field in the instruction. */ -+ BFD_RELOC_ARM_PCREL_BLX, -+ -+/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is -+not stored in the instruction. The 2nd lowest bit comes from a 1 bit -+field in the instruction. */ -+ BFD_RELOC_THUMB_PCREL_BLX, -+ -+/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches. -+The lowest bit must be zero and is not stored in the instruction. -+Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an -+"nn" one smaller in all cases. Note further that BRANCH23 -+corresponds to R_ARM_THM_CALL. */ -+ BFD_RELOC_THUMB_PCREL_BRANCH7, -+ BFD_RELOC_THUMB_PCREL_BRANCH9, -+ BFD_RELOC_THUMB_PCREL_BRANCH12, -+ BFD_RELOC_THUMB_PCREL_BRANCH20, -+ BFD_RELOC_THUMB_PCREL_BRANCH23, -+ BFD_RELOC_THUMB_PCREL_BRANCH25, -+ -+/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */ -+ BFD_RELOC_ARM_OFFSET_IMM, -+ -+/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */ -+ BFD_RELOC_ARM_THUMB_OFFSET, -+ -+/* Pc-relative or absolute relocation depending on target. Used for -+entries in .init_array sections. */ -+ BFD_RELOC_ARM_TARGET1, -+ -+/* Read-only segment base relative address. */ -+ BFD_RELOC_ARM_ROSEGREL32, -+ -+/* Data segment base relative address. */ -+ BFD_RELOC_ARM_SBREL32, -+ -+/* This reloc is used for references to RTTI data from exception handling -+tables. The actual definition depends on the target. It may be a -+pc-relative or some form of GOT-indirect relocation. */ -+ BFD_RELOC_ARM_TARGET2, -+ -+/* 31-bit PC relative address. */ -+ BFD_RELOC_ARM_PREL31, -+ -+/* Relocations for setting up GOTs and PLTs for shared libraries. */ -+ BFD_RELOC_ARM_JUMP_SLOT, -+ BFD_RELOC_ARM_GLOB_DAT, -+ BFD_RELOC_ARM_GOT32, -+ BFD_RELOC_ARM_PLT32, -+ BFD_RELOC_ARM_RELATIVE, -+ BFD_RELOC_ARM_GOTOFF, -+ BFD_RELOC_ARM_GOTPC, -+ -+/* ARM thread-local storage relocations. */ -+ BFD_RELOC_ARM_TLS_GD32, -+ BFD_RELOC_ARM_TLS_LDO32, -+ BFD_RELOC_ARM_TLS_LDM32, -+ BFD_RELOC_ARM_TLS_DTPOFF32, -+ BFD_RELOC_ARM_TLS_DTPMOD32, -+ BFD_RELOC_ARM_TLS_TPOFF32, -+ BFD_RELOC_ARM_TLS_IE32, -+ BFD_RELOC_ARM_TLS_LE32, -+ -+/* These relocs are only used within the ARM assembler. They are not -+(at present) written to any object files. */ -+ BFD_RELOC_ARM_IMMEDIATE, -+ BFD_RELOC_ARM_ADRL_IMMEDIATE, -+ BFD_RELOC_ARM_T32_IMMEDIATE, -+ BFD_RELOC_ARM_SHIFT_IMM, -+ BFD_RELOC_ARM_SMI, -+ BFD_RELOC_ARM_SWI, -+ BFD_RELOC_ARM_MULTI, -+ BFD_RELOC_ARM_CP_OFF_IMM, -+ BFD_RELOC_ARM_CP_OFF_IMM_S2, -+ BFD_RELOC_ARM_ADR_IMM, -+ BFD_RELOC_ARM_LDR_IMM, -+ BFD_RELOC_ARM_LITERAL, -+ BFD_RELOC_ARM_IN_POOL, -+ BFD_RELOC_ARM_OFFSET_IMM8, -+ BFD_RELOC_ARM_T32_OFFSET_U8, -+ BFD_RELOC_ARM_T32_OFFSET_IMM, -+ BFD_RELOC_ARM_HWLITERAL, -+ BFD_RELOC_ARM_THUMB_ADD, -+ BFD_RELOC_ARM_THUMB_IMM, -+ BFD_RELOC_ARM_THUMB_SHIFT, -+ -+/* Renesas / SuperH SH relocs. Not all of these appear in object files. */ -+ BFD_RELOC_SH_PCDISP8BY2, -+ BFD_RELOC_SH_PCDISP12BY2, -+ BFD_RELOC_SH_IMM3, -+ BFD_RELOC_SH_IMM3U, -+ BFD_RELOC_SH_DISP12, -+ BFD_RELOC_SH_DISP12BY2, -+ BFD_RELOC_SH_DISP12BY4, -+ BFD_RELOC_SH_DISP12BY8, -+ BFD_RELOC_SH_DISP20, -+ BFD_RELOC_SH_DISP20BY8, -+ BFD_RELOC_SH_IMM4, -+ BFD_RELOC_SH_IMM4BY2, -+ BFD_RELOC_SH_IMM4BY4, -+ BFD_RELOC_SH_IMM8, -+ BFD_RELOC_SH_IMM8BY2, -+ BFD_RELOC_SH_IMM8BY4, -+ BFD_RELOC_SH_PCRELIMM8BY2, -+ BFD_RELOC_SH_PCRELIMM8BY4, -+ BFD_RELOC_SH_SWITCH16, -+ BFD_RELOC_SH_SWITCH32, -+ BFD_RELOC_SH_USES, -+ BFD_RELOC_SH_COUNT, -+ BFD_RELOC_SH_ALIGN, -+ BFD_RELOC_SH_CODE, -+ BFD_RELOC_SH_DATA, -+ BFD_RELOC_SH_LABEL, -+ BFD_RELOC_SH_LOOP_START, -+ BFD_RELOC_SH_LOOP_END, -+ BFD_RELOC_SH_COPY, -+ BFD_RELOC_SH_GLOB_DAT, -+ BFD_RELOC_SH_JMP_SLOT, -+ BFD_RELOC_SH_RELATIVE, -+ BFD_RELOC_SH_GOTPC, -+ BFD_RELOC_SH_GOT_LOW16, -+ BFD_RELOC_SH_GOT_MEDLOW16, -+ BFD_RELOC_SH_GOT_MEDHI16, -+ BFD_RELOC_SH_GOT_HI16, -+ BFD_RELOC_SH_GOTPLT_LOW16, -+ BFD_RELOC_SH_GOTPLT_MEDLOW16, -+ BFD_RELOC_SH_GOTPLT_MEDHI16, -+ BFD_RELOC_SH_GOTPLT_HI16, -+ BFD_RELOC_SH_PLT_LOW16, -+ BFD_RELOC_SH_PLT_MEDLOW16, -+ BFD_RELOC_SH_PLT_MEDHI16, -+ BFD_RELOC_SH_PLT_HI16, -+ BFD_RELOC_SH_GOTOFF_LOW16, -+ BFD_RELOC_SH_GOTOFF_MEDLOW16, -+ BFD_RELOC_SH_GOTOFF_MEDHI16, -+ BFD_RELOC_SH_GOTOFF_HI16, -+ BFD_RELOC_SH_GOTPC_LOW16, -+ BFD_RELOC_SH_GOTPC_MEDLOW16, -+ BFD_RELOC_SH_GOTPC_MEDHI16, -+ BFD_RELOC_SH_GOTPC_HI16, -+ BFD_RELOC_SH_COPY64, -+ BFD_RELOC_SH_GLOB_DAT64, -+ BFD_RELOC_SH_JMP_SLOT64, -+ BFD_RELOC_SH_RELATIVE64, -+ BFD_RELOC_SH_GOT10BY4, -+ BFD_RELOC_SH_GOT10BY8, -+ BFD_RELOC_SH_GOTPLT10BY4, -+ BFD_RELOC_SH_GOTPLT10BY8, -+ BFD_RELOC_SH_GOTPLT32, -+ BFD_RELOC_SH_SHMEDIA_CODE, -+ BFD_RELOC_SH_IMMU5, -+ BFD_RELOC_SH_IMMS6, -+ BFD_RELOC_SH_IMMS6BY32, -+ BFD_RELOC_SH_IMMU6, -+ BFD_RELOC_SH_IMMS10, -+ BFD_RELOC_SH_IMMS10BY2, -+ BFD_RELOC_SH_IMMS10BY4, -+ BFD_RELOC_SH_IMMS10BY8, -+ BFD_RELOC_SH_IMMS16, -+ BFD_RELOC_SH_IMMU16, -+ BFD_RELOC_SH_IMM_LOW16, -+ BFD_RELOC_SH_IMM_LOW16_PCREL, -+ BFD_RELOC_SH_IMM_MEDLOW16, -+ BFD_RELOC_SH_IMM_MEDLOW16_PCREL, -+ BFD_RELOC_SH_IMM_MEDHI16, -+ BFD_RELOC_SH_IMM_MEDHI16_PCREL, -+ BFD_RELOC_SH_IMM_HI16, -+ BFD_RELOC_SH_IMM_HI16_PCREL, -+ BFD_RELOC_SH_PT_16, -+ BFD_RELOC_SH_TLS_GD_32, -+ BFD_RELOC_SH_TLS_LD_32, -+ BFD_RELOC_SH_TLS_LDO_32, -+ BFD_RELOC_SH_TLS_IE_32, -+ BFD_RELOC_SH_TLS_LE_32, -+ BFD_RELOC_SH_TLS_DTPMOD32, -+ BFD_RELOC_SH_TLS_DTPOFF32, -+ BFD_RELOC_SH_TLS_TPOFF32, -+ -+/* ARC Cores relocs. -+ARC 22 bit pc-relative branch. The lowest two bits must be zero and are -+not stored in the instruction. The high 20 bits are installed in bits 26 -+through 7 of the instruction. */ -+ BFD_RELOC_ARC_B22_PCREL, -+ -+/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not -+stored in the instruction. The high 24 bits are installed in bits 23 -+through 0. */ -+ BFD_RELOC_ARC_B26, -+ -+/* Mitsubishi D10V relocs. -+This is a 10-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_10_PCREL_R, -+ -+/* Mitsubishi D10V relocs. -+This is a 10-bit reloc with the right 2 bits -+assumed to be 0. This is the same as the previous reloc -+except it is in the left container, i.e., -+shifted left 15 bits. */ -+ BFD_RELOC_D10V_10_PCREL_L, -+ -+/* This is an 18-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_18, -+ -+/* This is an 18-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_18_PCREL, -+ -+/* Mitsubishi D30V relocs. -+This is a 6-bit absolute reloc. */ -+ BFD_RELOC_D30V_6, -+ -+/* This is a 6-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_9_PCREL, -+ -+/* This is a 6-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_9_PCREL_R, -+ -+/* This is a 12-bit absolute reloc with the -+right 3 bitsassumed to be 0. */ -+ BFD_RELOC_D30V_15, -+ -+/* This is a 12-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_15_PCREL, -+ -+/* This is a 12-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_15_PCREL_R, -+ -+/* This is an 18-bit absolute reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_21, -+ -+/* This is an 18-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_21_PCREL, -+ -+/* This is an 18-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_21_PCREL_R, -+ -+/* This is a 32-bit absolute reloc. */ -+ BFD_RELOC_D30V_32, -+ -+/* This is a 32-bit pc-relative reloc. */ -+ BFD_RELOC_D30V_32_PCREL, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_HI16_S, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_LO16, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_JMP26, -+ -+/* Renesas M16C/M32C Relocations. */ -+ BFD_RELOC_M16C_8_PCREL8, -+ BFD_RELOC_M16C_16_PCREL8, -+ BFD_RELOC_M16C_8_PCREL16, -+ BFD_RELOC_M16C_8_ELABEL24, -+ BFD_RELOC_M16C_8_ABS16, -+ BFD_RELOC_M16C_16_ABS16, -+ BFD_RELOC_M16C_16_ABS24, -+ BFD_RELOC_M16C_16_ABS32, -+ BFD_RELOC_M16C_24_ABS16, -+ BFD_RELOC_M16C_24_ABS24, -+ BFD_RELOC_M16C_24_ABS32, -+ BFD_RELOC_M16C_32_ABS16, -+ BFD_RELOC_M16C_32_ABS24, -+ BFD_RELOC_M16C_32_ABS32, -+ BFD_RELOC_M16C_40_ABS16, -+ BFD_RELOC_M16C_40_ABS24, -+ BFD_RELOC_M16C_40_ABS32, -+ -+/* Renesas M32R (formerly Mitsubishi M32R) relocs. -+This is a 24 bit absolute address. */ -+ BFD_RELOC_M32R_24, -+ -+/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_10_PCREL, -+ -+/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_18_PCREL, -+ -+/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_26_PCREL, -+ -+/* This is a 16-bit reloc containing the high 16 bits of an address -+used when the lower 16 bits are treated as unsigned. */ -+ BFD_RELOC_M32R_HI16_ULO, -+ -+/* This is a 16-bit reloc containing the high 16 bits of an address -+used when the lower 16 bits are treated as signed. */ -+ BFD_RELOC_M32R_HI16_SLO, -+ -+/* This is a 16-bit reloc containing the lower 16 bits of an address. */ -+ BFD_RELOC_M32R_LO16, -+ -+/* This is a 16-bit reloc containing the small data area offset for use in -+add3, load, and store instructions. */ -+ BFD_RELOC_M32R_SDA16, -+ -+/* For PIC. */ -+ BFD_RELOC_M32R_GOT24, -+ BFD_RELOC_M32R_26_PLTREL, -+ BFD_RELOC_M32R_COPY, -+ BFD_RELOC_M32R_GLOB_DAT, -+ BFD_RELOC_M32R_JMP_SLOT, -+ BFD_RELOC_M32R_RELATIVE, -+ BFD_RELOC_M32R_GOTOFF, -+ BFD_RELOC_M32R_GOTOFF_HI_ULO, -+ BFD_RELOC_M32R_GOTOFF_HI_SLO, -+ BFD_RELOC_M32R_GOTOFF_LO, -+ BFD_RELOC_M32R_GOTPC24, -+ BFD_RELOC_M32R_GOT16_HI_ULO, -+ BFD_RELOC_M32R_GOT16_HI_SLO, -+ BFD_RELOC_M32R_GOT16_LO, -+ BFD_RELOC_M32R_GOTPC_HI_ULO, -+ BFD_RELOC_M32R_GOTPC_HI_SLO, -+ BFD_RELOC_M32R_GOTPC_LO, -+ -+/* This is a 9-bit reloc */ -+ BFD_RELOC_V850_9_PCREL, -+ -+/* This is a 22-bit reloc */ -+ BFD_RELOC_V850_22_PCREL, -+ -+/* This is a 16 bit offset from the short data area pointer. */ -+ BFD_RELOC_V850_SDA_16_16_OFFSET, -+ -+/* This is a 16 bit offset (of which only 15 bits are used) from the -+short data area pointer. */ -+ BFD_RELOC_V850_SDA_15_16_OFFSET, -+ -+/* This is a 16 bit offset from the zero data area pointer. */ -+ BFD_RELOC_V850_ZDA_16_16_OFFSET, -+ -+/* This is a 16 bit offset (of which only 15 bits are used) from the -+zero data area pointer. */ -+ BFD_RELOC_V850_ZDA_15_16_OFFSET, -+ -+/* This is an 8 bit offset (of which only 6 bits are used) from the -+tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_6_8_OFFSET, -+ -+/* This is an 8bit offset (of which only 7 bits are used) from the tiny -+data area pointer. */ -+ BFD_RELOC_V850_TDA_7_8_OFFSET, -+ -+/* This is a 7 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_7_7_OFFSET, -+ -+/* This is a 16 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_16_16_OFFSET, -+ -+/* This is a 5 bit offset (of which only 4 bits are used) from the tiny -+data area pointer. */ -+ BFD_RELOC_V850_TDA_4_5_OFFSET, -+ -+/* This is a 4 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_4_4_OFFSET, -+ -+/* This is a 16 bit offset from the short data area pointer, with the -+bits placed non-contiguously in the instruction. */ -+ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET, -+ -+/* This is a 16 bit offset from the zero data area pointer, with the -+bits placed non-contiguously in the instruction. */ -+ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET, -+ -+/* This is a 6 bit offset from the call table base pointer. */ -+ BFD_RELOC_V850_CALLT_6_7_OFFSET, -+ -+/* This is a 16 bit offset from the call table base pointer. */ -+ BFD_RELOC_V850_CALLT_16_16_OFFSET, -+ -+/* Used for relaxing indirect function calls. */ -+ BFD_RELOC_V850_LONGCALL, -+ -+/* Used for relaxing indirect jumps. */ -+ BFD_RELOC_V850_LONGJUMP, -+ -+/* Used to maintain alignment whilst relaxing. */ -+ BFD_RELOC_V850_ALIGN, -+ -+/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu -+instructions. */ -+ BFD_RELOC_V850_LO16_SPLIT_OFFSET, -+ -+/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the -+instruction. */ -+ BFD_RELOC_MN10300_32_PCREL, -+ -+/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the -+instruction. */ -+ BFD_RELOC_MN10300_16_PCREL, -+ -+/* This is a 8bit DP reloc for the tms320c30, where the most -+significant 8 bits of a 24 bit word are placed into the least -+significant 8 bits of the opcode. */ -+ BFD_RELOC_TIC30_LDP, -+ -+/* This is a 7bit reloc for the tms320c54x, where the least -+significant 7 bits of a 16 bit word are placed into the least -+significant 7 bits of the opcode. */ -+ BFD_RELOC_TIC54X_PARTLS7, -+ -+/* This is a 9bit DP reloc for the tms320c54x, where the most -+significant 9 bits of a 16 bit word are placed into the least -+significant 9 bits of the opcode. */ -+ BFD_RELOC_TIC54X_PARTMS9, -+ -+/* This is an extended address 23-bit reloc for the tms320c54x. */ -+ BFD_RELOC_TIC54X_23, -+ -+/* This is a 16-bit reloc for the tms320c54x, where the least -+significant 16 bits of a 23-bit extended address are placed into -+the opcode. */ -+ BFD_RELOC_TIC54X_16_OF_23, -+ -+/* This is a reloc for the tms320c54x, where the most -+significant 7 bits of a 23-bit extended address are placed into -+the opcode. */ -+ BFD_RELOC_TIC54X_MS7_OF_23, -+ -+/* This is a 48 bit reloc for the FR30 that stores 32 bits. */ -+ BFD_RELOC_FR30_48, -+ -+/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into -+two sections. */ -+ BFD_RELOC_FR30_20, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in -+4 bits. */ -+ BFD_RELOC_FR30_6_IN_4, -+ -+/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset -+into 8 bits. */ -+ BFD_RELOC_FR30_8_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset -+into 8 bits. */ -+ BFD_RELOC_FR30_9_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset -+into 8 bits. */ -+ BFD_RELOC_FR30_10_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative -+short offset into 8 bits. */ -+ BFD_RELOC_FR30_9_PCREL, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative -+short offset into 11 bits. */ -+ BFD_RELOC_FR30_12_PCREL, -+ -+/* Motorola Mcore relocations. */ -+ BFD_RELOC_MCORE_PCREL_IMM8BY4, -+ BFD_RELOC_MCORE_PCREL_IMM11BY2, -+ BFD_RELOC_MCORE_PCREL_IMM4BY2, -+ BFD_RELOC_MCORE_PCREL_32, -+ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2, -+ BFD_RELOC_MCORE_RVA, -+ -+/* These are relocations for the GETA instruction. */ -+ BFD_RELOC_MMIX_GETA, -+ BFD_RELOC_MMIX_GETA_1, -+ BFD_RELOC_MMIX_GETA_2, -+ BFD_RELOC_MMIX_GETA_3, -+ -+/* These are relocations for a conditional branch instruction. */ -+ BFD_RELOC_MMIX_CBRANCH, -+ BFD_RELOC_MMIX_CBRANCH_J, -+ BFD_RELOC_MMIX_CBRANCH_1, -+ BFD_RELOC_MMIX_CBRANCH_2, -+ BFD_RELOC_MMIX_CBRANCH_3, -+ -+/* These are relocations for the PUSHJ instruction. */ -+ BFD_RELOC_MMIX_PUSHJ, -+ BFD_RELOC_MMIX_PUSHJ_1, -+ BFD_RELOC_MMIX_PUSHJ_2, -+ BFD_RELOC_MMIX_PUSHJ_3, -+ BFD_RELOC_MMIX_PUSHJ_STUBBABLE, -+ -+/* These are relocations for the JMP instruction. */ -+ BFD_RELOC_MMIX_JMP, -+ BFD_RELOC_MMIX_JMP_1, -+ BFD_RELOC_MMIX_JMP_2, -+ BFD_RELOC_MMIX_JMP_3, -+ -+/* This is a relocation for a relative address as in a GETA instruction or -+a branch. */ -+ BFD_RELOC_MMIX_ADDR19, -+ -+/* This is a relocation for a relative address as in a JMP instruction. */ -+ BFD_RELOC_MMIX_ADDR27, -+ -+/* This is a relocation for an instruction field that may be a general -+register or a value 0..255. */ -+ BFD_RELOC_MMIX_REG_OR_BYTE, -+ -+/* This is a relocation for an instruction field that may be a general -+register. */ -+ BFD_RELOC_MMIX_REG, -+ -+/* This is a relocation for two instruction fields holding a register and -+an offset, the equivalent of the relocation. */ -+ BFD_RELOC_MMIX_BASE_PLUS_OFFSET, -+ -+/* This relocation is an assertion that the expression is not allocated as -+a global register. It does not modify contents. */ -+ BFD_RELOC_MMIX_LOCAL, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative -+short offset into 7 bits. */ -+ BFD_RELOC_AVR_7_PCREL, -+ -+/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative -+short offset into 12 bits. */ -+ BFD_RELOC_AVR_13_PCREL, -+ -+/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually -+program memory address) into 16 bits. */ -+ BFD_RELOC_AVR_16_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually -+data memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_LO8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit -+of data memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HI8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit -+of program memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HH8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(usually data memory address) into 8 bit immediate value of SUBI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 8 bit of data memory address) into 8 bit immediate value of -+SUBI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(most high 8 bit of program memory address) into 8 bit immediate value -+of LDI or SUBI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually -+command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit -+of command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit -+of command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(usually command address) into 8 bit immediate value of SUBI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_PM_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 8 bit of 16 bit command address) into 8 bit immediate value -+of SUBI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_PM_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 6 bit of 22 bit command address) into 8 bit immediate -+value of SUBI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_PM_NEG, -+ -+/* This is a 32 bit reloc for the AVR that stores 23 bit value -+into 22 bits. */ -+ BFD_RELOC_AVR_CALL, -+ -+/* This is a 16 bit reloc for the AVR that stores all needed bits -+for absolute addressing with ldi with overflow check to linktime */ -+ BFD_RELOC_AVR_LDI, -+ -+/* This is a 6 bit reloc for the AVR that stores offset for ldd/std -+instructions */ -+ BFD_RELOC_AVR_6, -+ -+/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw -+instructions */ -+ BFD_RELOC_AVR_6_ADIW, -+ -+/* Direct 12 bit. */ -+ BFD_RELOC_390_12, -+ -+/* 12 bit GOT offset. */ -+ BFD_RELOC_390_GOT12, -+ -+/* 32 bit PC relative PLT address. */ -+ BFD_RELOC_390_PLT32, -+ -+/* Copy symbol at runtime. */ -+ BFD_RELOC_390_COPY, -+ -+/* Create GOT entry. */ -+ BFD_RELOC_390_GLOB_DAT, -+ -+/* Create PLT entry. */ -+ BFD_RELOC_390_JMP_SLOT, -+ -+/* Adjust by program base. */ -+ BFD_RELOC_390_RELATIVE, -+ -+/* 32 bit PC relative offset to GOT. */ -+ BFD_RELOC_390_GOTPC, -+ -+/* 16 bit GOT offset. */ -+ BFD_RELOC_390_GOT16, -+ -+/* PC relative 16 bit shifted by 1. */ -+ BFD_RELOC_390_PC16DBL, -+ -+/* 16 bit PC rel. PLT shifted by 1. */ -+ BFD_RELOC_390_PLT16DBL, -+ -+/* PC relative 32 bit shifted by 1. */ -+ BFD_RELOC_390_PC32DBL, -+ -+/* 32 bit PC rel. PLT shifted by 1. */ -+ BFD_RELOC_390_PLT32DBL, -+ -+/* 32 bit PC rel. GOT shifted by 1. */ -+ BFD_RELOC_390_GOTPCDBL, -+ -+/* 64 bit GOT offset. */ -+ BFD_RELOC_390_GOT64, -+ -+/* 64 bit PC relative PLT address. */ -+ BFD_RELOC_390_PLT64, -+ -+/* 32 bit rel. offset to GOT entry. */ -+ BFD_RELOC_390_GOTENT, -+ -+/* 64 bit offset to GOT. */ -+ BFD_RELOC_390_GOTOFF64, -+ -+/* 12-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT12, -+ -+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT16, -+ -+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT32, -+ -+/* 64-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT64, -+ -+/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLTENT, -+ -+/* 16-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF16, -+ -+/* 32-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF32, -+ -+/* 64-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF64, -+ -+/* s390 tls relocations. */ -+ BFD_RELOC_390_TLS_LOAD, -+ BFD_RELOC_390_TLS_GDCALL, -+ BFD_RELOC_390_TLS_LDCALL, -+ BFD_RELOC_390_TLS_GD32, -+ BFD_RELOC_390_TLS_GD64, -+ BFD_RELOC_390_TLS_GOTIE12, -+ BFD_RELOC_390_TLS_GOTIE32, -+ BFD_RELOC_390_TLS_GOTIE64, -+ BFD_RELOC_390_TLS_LDM32, -+ BFD_RELOC_390_TLS_LDM64, -+ BFD_RELOC_390_TLS_IE32, -+ BFD_RELOC_390_TLS_IE64, -+ BFD_RELOC_390_TLS_IEENT, -+ BFD_RELOC_390_TLS_LE32, -+ BFD_RELOC_390_TLS_LE64, -+ BFD_RELOC_390_TLS_LDO32, -+ BFD_RELOC_390_TLS_LDO64, -+ BFD_RELOC_390_TLS_DTPMOD, -+ BFD_RELOC_390_TLS_DTPOFF, -+ BFD_RELOC_390_TLS_TPOFF, -+ -+/* Long displacement extension. */ -+ BFD_RELOC_390_20, -+ BFD_RELOC_390_GOT20, -+ BFD_RELOC_390_GOTPLT20, -+ BFD_RELOC_390_TLS_GOTIE20, -+ -+/* Scenix IP2K - 9-bit register number / data address */ -+ BFD_RELOC_IP2K_FR9, -+ -+/* Scenix IP2K - 4-bit register/data bank number */ -+ BFD_RELOC_IP2K_BANK, -+ -+/* Scenix IP2K - low 13 bits of instruction word address */ -+ BFD_RELOC_IP2K_ADDR16CJP, -+ -+/* Scenix IP2K - high 3 bits of instruction word address */ -+ BFD_RELOC_IP2K_PAGE3, -+ -+/* Scenix IP2K - ext/low/high 8 bits of data address */ -+ BFD_RELOC_IP2K_LO8DATA, -+ BFD_RELOC_IP2K_HI8DATA, -+ BFD_RELOC_IP2K_EX8DATA, -+ -+/* Scenix IP2K - low/high 8 bits of instruction word address */ -+ BFD_RELOC_IP2K_LO8INSN, -+ BFD_RELOC_IP2K_HI8INSN, -+ -+/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */ -+ BFD_RELOC_IP2K_PC_SKIP, -+ -+/* Scenix IP2K - 16 bit word address in text section. */ -+ BFD_RELOC_IP2K_TEXT, -+ -+/* Scenix IP2K - 7-bit sp or dp offset */ -+ BFD_RELOC_IP2K_FR_OFFSET, -+ -+/* Scenix VPE4K coprocessor - data/insn-space addressing */ -+ BFD_RELOC_VPE4KMATH_DATA, -+ BFD_RELOC_VPE4KMATH_INSN, -+ -+/* These two relocations are used by the linker to determine which of -+the entries in a C++ virtual function table are actually used. When -+the --gc-sections option is given, the linker will zero out the entries -+that are not used, so that the code for those functions need not be -+included in the output. -+ -+VTABLE_INHERIT is a zero-space relocation used to describe to the -+linker the inheritance tree of a C++ virtual function table. The -+relocation's symbol should be the parent class' vtable, and the -+relocation should be located at the child vtable. -+ -+VTABLE_ENTRY is a zero-space relocation that describes the use of a -+virtual function table entry. The reloc's symbol should refer to the -+table of the class mentioned in the code. Off of that base, an offset -+describes the entry that is being used. For Rela hosts, this offset -+is stored in the reloc's addend. For Rel hosts, we are forced to put -+this offset in the reloc's section offset. */ -+ BFD_RELOC_VTABLE_INHERIT, -+ BFD_RELOC_VTABLE_ENTRY, -+ -+/* Intel IA64 Relocations. */ -+ BFD_RELOC_IA64_IMM14, -+ BFD_RELOC_IA64_IMM22, -+ BFD_RELOC_IA64_IMM64, -+ BFD_RELOC_IA64_DIR32MSB, -+ BFD_RELOC_IA64_DIR32LSB, -+ BFD_RELOC_IA64_DIR64MSB, -+ BFD_RELOC_IA64_DIR64LSB, -+ BFD_RELOC_IA64_GPREL22, -+ BFD_RELOC_IA64_GPREL64I, -+ BFD_RELOC_IA64_GPREL32MSB, -+ BFD_RELOC_IA64_GPREL32LSB, -+ BFD_RELOC_IA64_GPREL64MSB, -+ BFD_RELOC_IA64_GPREL64LSB, -+ BFD_RELOC_IA64_LTOFF22, -+ BFD_RELOC_IA64_LTOFF64I, -+ BFD_RELOC_IA64_PLTOFF22, -+ BFD_RELOC_IA64_PLTOFF64I, -+ BFD_RELOC_IA64_PLTOFF64MSB, -+ BFD_RELOC_IA64_PLTOFF64LSB, -+ BFD_RELOC_IA64_FPTR64I, -+ BFD_RELOC_IA64_FPTR32MSB, -+ BFD_RELOC_IA64_FPTR32LSB, -+ BFD_RELOC_IA64_FPTR64MSB, -+ BFD_RELOC_IA64_FPTR64LSB, -+ BFD_RELOC_IA64_PCREL21B, -+ BFD_RELOC_IA64_PCREL21BI, -+ BFD_RELOC_IA64_PCREL21M, -+ BFD_RELOC_IA64_PCREL21F, -+ BFD_RELOC_IA64_PCREL22, -+ BFD_RELOC_IA64_PCREL60B, -+ BFD_RELOC_IA64_PCREL64I, -+ BFD_RELOC_IA64_PCREL32MSB, -+ BFD_RELOC_IA64_PCREL32LSB, -+ BFD_RELOC_IA64_PCREL64MSB, -+ BFD_RELOC_IA64_PCREL64LSB, -+ BFD_RELOC_IA64_LTOFF_FPTR22, -+ BFD_RELOC_IA64_LTOFF_FPTR64I, -+ BFD_RELOC_IA64_LTOFF_FPTR32MSB, -+ BFD_RELOC_IA64_LTOFF_FPTR32LSB, -+ BFD_RELOC_IA64_LTOFF_FPTR64MSB, -+ BFD_RELOC_IA64_LTOFF_FPTR64LSB, -+ BFD_RELOC_IA64_SEGREL32MSB, -+ BFD_RELOC_IA64_SEGREL32LSB, -+ BFD_RELOC_IA64_SEGREL64MSB, -+ BFD_RELOC_IA64_SEGREL64LSB, -+ BFD_RELOC_IA64_SECREL32MSB, -+ BFD_RELOC_IA64_SECREL32LSB, -+ BFD_RELOC_IA64_SECREL64MSB, -+ BFD_RELOC_IA64_SECREL64LSB, -+ BFD_RELOC_IA64_REL32MSB, -+ BFD_RELOC_IA64_REL32LSB, -+ BFD_RELOC_IA64_REL64MSB, -+ BFD_RELOC_IA64_REL64LSB, -+ BFD_RELOC_IA64_LTV32MSB, -+ BFD_RELOC_IA64_LTV32LSB, -+ BFD_RELOC_IA64_LTV64MSB, -+ BFD_RELOC_IA64_LTV64LSB, -+ BFD_RELOC_IA64_IPLTMSB, -+ BFD_RELOC_IA64_IPLTLSB, -+ BFD_RELOC_IA64_COPY, -+ BFD_RELOC_IA64_LTOFF22X, -+ BFD_RELOC_IA64_LDXMOV, -+ BFD_RELOC_IA64_TPREL14, -+ BFD_RELOC_IA64_TPREL22, -+ BFD_RELOC_IA64_TPREL64I, -+ BFD_RELOC_IA64_TPREL64MSB, -+ BFD_RELOC_IA64_TPREL64LSB, -+ BFD_RELOC_IA64_LTOFF_TPREL22, -+ BFD_RELOC_IA64_DTPMOD64MSB, -+ BFD_RELOC_IA64_DTPMOD64LSB, -+ BFD_RELOC_IA64_LTOFF_DTPMOD22, -+ BFD_RELOC_IA64_DTPREL14, -+ BFD_RELOC_IA64_DTPREL22, -+ BFD_RELOC_IA64_DTPREL64I, -+ BFD_RELOC_IA64_DTPREL32MSB, -+ BFD_RELOC_IA64_DTPREL32LSB, -+ BFD_RELOC_IA64_DTPREL64MSB, -+ BFD_RELOC_IA64_DTPREL64LSB, -+ BFD_RELOC_IA64_LTOFF_DTPREL22, -+ -+/* Motorola 68HC11 reloc. -+This is the 8 bit high part of an absolute address. */ -+ BFD_RELOC_M68HC11_HI8, -+ -+/* Motorola 68HC11 reloc. -+This is the 8 bit low part of an absolute address. */ -+ BFD_RELOC_M68HC11_LO8, -+ -+/* Motorola 68HC11 reloc. -+This is the 3 bit of a value. */ -+ BFD_RELOC_M68HC11_3B, -+ -+/* Motorola 68HC11 reloc. -+This reloc marks the beginning of a jump/call instruction. -+It is used for linker relaxation to correctly identify beginning -+of instruction and change some branches to use PC-relative -+addressing mode. */ -+ BFD_RELOC_M68HC11_RL_JUMP, -+ -+/* Motorola 68HC11 reloc. -+This reloc marks a group of several instructions that gcc generates -+and for which the linker relaxation pass can modify and/or remove -+some of them. */ -+ BFD_RELOC_M68HC11_RL_GROUP, -+ -+/* Motorola 68HC11 reloc. -+This is the 16-bit lower part of an address. It is used for 'call' -+instruction to specify the symbol address without any special -+transformation (due to memory bank window). */ -+ BFD_RELOC_M68HC11_LO16, -+ -+/* Motorola 68HC11 reloc. -+This is a 8-bit reloc that specifies the page number of an address. -+It is used by 'call' instruction to specify the page number of -+the symbol. */ -+ BFD_RELOC_M68HC11_PAGE, -+ -+/* Motorola 68HC11 reloc. -+This is a 24-bit reloc that represents the address with a 16-bit -+value and a 8-bit page number. The symbol address is transformed -+to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */ -+ BFD_RELOC_M68HC11_24, -+ -+/* Motorola 68HC12 reloc. -+This is the 5 bits of a value. */ -+ BFD_RELOC_M68HC12_5B, -+ -+/* NS CR16C Relocations. */ -+ BFD_RELOC_16C_NUM08, -+ BFD_RELOC_16C_NUM08_C, -+ BFD_RELOC_16C_NUM16, -+ BFD_RELOC_16C_NUM16_C, -+ BFD_RELOC_16C_NUM32, -+ BFD_RELOC_16C_NUM32_C, -+ BFD_RELOC_16C_DISP04, -+ BFD_RELOC_16C_DISP04_C, -+ BFD_RELOC_16C_DISP08, -+ BFD_RELOC_16C_DISP08_C, -+ BFD_RELOC_16C_DISP16, -+ BFD_RELOC_16C_DISP16_C, -+ BFD_RELOC_16C_DISP24, -+ BFD_RELOC_16C_DISP24_C, -+ BFD_RELOC_16C_DISP24a, -+ BFD_RELOC_16C_DISP24a_C, -+ BFD_RELOC_16C_REG04, -+ BFD_RELOC_16C_REG04_C, -+ BFD_RELOC_16C_REG04a, -+ BFD_RELOC_16C_REG04a_C, -+ BFD_RELOC_16C_REG14, -+ BFD_RELOC_16C_REG14_C, -+ BFD_RELOC_16C_REG16, -+ BFD_RELOC_16C_REG16_C, -+ BFD_RELOC_16C_REG20, -+ BFD_RELOC_16C_REG20_C, -+ BFD_RELOC_16C_ABS20, -+ BFD_RELOC_16C_ABS20_C, -+ BFD_RELOC_16C_ABS24, -+ BFD_RELOC_16C_ABS24_C, -+ BFD_RELOC_16C_IMM04, -+ BFD_RELOC_16C_IMM04_C, -+ BFD_RELOC_16C_IMM16, -+ BFD_RELOC_16C_IMM16_C, -+ BFD_RELOC_16C_IMM20, -+ BFD_RELOC_16C_IMM20_C, -+ BFD_RELOC_16C_IMM24, -+ BFD_RELOC_16C_IMM24_C, -+ BFD_RELOC_16C_IMM32, -+ BFD_RELOC_16C_IMM32_C, -+ -+/* NS CRX Relocations. */ -+ BFD_RELOC_CRX_REL4, -+ BFD_RELOC_CRX_REL8, -+ BFD_RELOC_CRX_REL8_CMP, -+ BFD_RELOC_CRX_REL16, -+ BFD_RELOC_CRX_REL24, -+ BFD_RELOC_CRX_REL32, -+ BFD_RELOC_CRX_REGREL12, -+ BFD_RELOC_CRX_REGREL22, -+ BFD_RELOC_CRX_REGREL28, -+ BFD_RELOC_CRX_REGREL32, -+ BFD_RELOC_CRX_ABS16, -+ BFD_RELOC_CRX_ABS32, -+ BFD_RELOC_CRX_NUM8, -+ BFD_RELOC_CRX_NUM16, -+ BFD_RELOC_CRX_NUM32, -+ BFD_RELOC_CRX_IMM16, -+ BFD_RELOC_CRX_IMM32, -+ BFD_RELOC_CRX_SWITCH8, -+ BFD_RELOC_CRX_SWITCH16, -+ BFD_RELOC_CRX_SWITCH32, -+ -+/* These relocs are only used within the CRIS assembler. They are not -+(at present) written to any object files. */ -+ BFD_RELOC_CRIS_BDISP8, -+ BFD_RELOC_CRIS_UNSIGNED_5, -+ BFD_RELOC_CRIS_SIGNED_6, -+ BFD_RELOC_CRIS_UNSIGNED_6, -+ BFD_RELOC_CRIS_SIGNED_8, -+ BFD_RELOC_CRIS_UNSIGNED_8, -+ BFD_RELOC_CRIS_SIGNED_16, -+ BFD_RELOC_CRIS_UNSIGNED_16, -+ BFD_RELOC_CRIS_LAPCQ_OFFSET, -+ BFD_RELOC_CRIS_UNSIGNED_4, -+ -+/* Relocs used in ELF shared libraries for CRIS. */ -+ BFD_RELOC_CRIS_COPY, -+ BFD_RELOC_CRIS_GLOB_DAT, -+ BFD_RELOC_CRIS_JUMP_SLOT, -+ BFD_RELOC_CRIS_RELATIVE, -+ -+/* 32-bit offset to symbol-entry within GOT. */ -+ BFD_RELOC_CRIS_32_GOT, -+ -+/* 16-bit offset to symbol-entry within GOT. */ -+ BFD_RELOC_CRIS_16_GOT, -+ -+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_CRIS_32_GOTPLT, -+ -+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_CRIS_16_GOTPLT, -+ -+/* 32-bit offset to symbol, relative to GOT. */ -+ BFD_RELOC_CRIS_32_GOTREL, -+ -+/* 32-bit offset to symbol with PLT entry, relative to GOT. */ -+ BFD_RELOC_CRIS_32_PLT_GOTREL, -+ -+/* 32-bit offset to symbol with PLT entry, relative to this relocation. */ -+ BFD_RELOC_CRIS_32_PLT_PCREL, -+ -+/* Intel i860 Relocations. */ -+ BFD_RELOC_860_COPY, -+ BFD_RELOC_860_GLOB_DAT, -+ BFD_RELOC_860_JUMP_SLOT, -+ BFD_RELOC_860_RELATIVE, -+ BFD_RELOC_860_PC26, -+ BFD_RELOC_860_PLT26, -+ BFD_RELOC_860_PC16, -+ BFD_RELOC_860_LOW0, -+ BFD_RELOC_860_SPLIT0, -+ BFD_RELOC_860_LOW1, -+ BFD_RELOC_860_SPLIT1, -+ BFD_RELOC_860_LOW2, -+ BFD_RELOC_860_SPLIT2, -+ BFD_RELOC_860_LOW3, -+ BFD_RELOC_860_LOGOT0, -+ BFD_RELOC_860_SPGOT0, -+ BFD_RELOC_860_LOGOT1, -+ BFD_RELOC_860_SPGOT1, -+ BFD_RELOC_860_LOGOTOFF0, -+ BFD_RELOC_860_SPGOTOFF0, -+ BFD_RELOC_860_LOGOTOFF1, -+ BFD_RELOC_860_SPGOTOFF1, -+ BFD_RELOC_860_LOGOTOFF2, -+ BFD_RELOC_860_LOGOTOFF3, -+ BFD_RELOC_860_LOPC, -+ BFD_RELOC_860_HIGHADJ, -+ BFD_RELOC_860_HAGOT, -+ BFD_RELOC_860_HAGOTOFF, -+ BFD_RELOC_860_HAPC, -+ BFD_RELOC_860_HIGH, -+ BFD_RELOC_860_HIGOT, -+ BFD_RELOC_860_HIGOTOFF, -+ -+/* OpenRISC Relocations. */ -+ BFD_RELOC_OPENRISC_ABS_26, -+ BFD_RELOC_OPENRISC_REL_26, -+ -+/* H8 elf Relocations. */ -+ BFD_RELOC_H8_DIR16A8, -+ BFD_RELOC_H8_DIR16R8, -+ BFD_RELOC_H8_DIR24A8, -+ BFD_RELOC_H8_DIR24R8, -+ BFD_RELOC_H8_DIR32A16, -+ -+/* Sony Xstormy16 Relocations. */ -+ BFD_RELOC_XSTORMY16_REL_12, -+ BFD_RELOC_XSTORMY16_12, -+ BFD_RELOC_XSTORMY16_24, -+ BFD_RELOC_XSTORMY16_FPTR16, -+ -+/* Relocations used by VAX ELF. */ -+ BFD_RELOC_VAX_GLOB_DAT, -+ BFD_RELOC_VAX_JMP_SLOT, -+ BFD_RELOC_VAX_RELATIVE, -+ -+/* Morpho MS1 - 16 bit immediate relocation. */ -+ BFD_RELOC_MS1_PC16, -+ -+/* Morpho MS1 - Hi 16 bits of an address. */ -+ BFD_RELOC_MS1_HI16, -+ -+/* Morpho MS1 - Low 16 bits of an address. */ -+ BFD_RELOC_MS1_LO16, -+ -+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */ -+ BFD_RELOC_MS1_GNU_VTINHERIT, -+ -+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */ -+ BFD_RELOC_MS1_GNU_VTENTRY, -+ -+/* msp430 specific relocation codes */ -+ BFD_RELOC_MSP430_10_PCREL, -+ BFD_RELOC_MSP430_16_PCREL, -+ BFD_RELOC_MSP430_16, -+ BFD_RELOC_MSP430_16_PCREL_BYTE, -+ BFD_RELOC_MSP430_16_BYTE, -+ BFD_RELOC_MSP430_2X_PCREL, -+ BFD_RELOC_MSP430_RL_PCREL, -+ -+/* IQ2000 Relocations. */ -+ BFD_RELOC_IQ2000_OFFSET_16, -+ BFD_RELOC_IQ2000_OFFSET_21, -+ BFD_RELOC_IQ2000_UHI16, -+ -+/* Special Xtensa relocation used only by PLT entries in ELF shared -+objects to indicate that the runtime linker should set the value -+to one of its own internal functions or data structures. */ -+ BFD_RELOC_XTENSA_RTLD, -+ -+/* Xtensa relocations for ELF shared objects. */ -+ BFD_RELOC_XTENSA_GLOB_DAT, -+ BFD_RELOC_XTENSA_JMP_SLOT, -+ BFD_RELOC_XTENSA_RELATIVE, -+ -+/* Xtensa relocation used in ELF object files for symbols that may require -+PLT entries. Otherwise, this is just a generic 32-bit relocation. */ -+ BFD_RELOC_XTENSA_PLT, -+ -+/* Xtensa relocations to mark the difference of two local symbols. -+These are only needed to support linker relaxation and can be ignored -+when not relaxing. The field is set to the value of the difference -+assuming no relaxation. The relocation encodes the position of the -+first symbol so the linker can determine whether to adjust the field -+value. */ -+ BFD_RELOC_XTENSA_DIFF8, -+ BFD_RELOC_XTENSA_DIFF16, -+ BFD_RELOC_XTENSA_DIFF32, -+ -+/* Generic Xtensa relocations for instruction operands. Only the slot -+number is encoded in the relocation. The relocation applies to the -+last PC-relative immediate operand, or if there are no PC-relative -+immediates, to the last immediate operand. */ -+ BFD_RELOC_XTENSA_SLOT0_OP, -+ BFD_RELOC_XTENSA_SLOT1_OP, -+ BFD_RELOC_XTENSA_SLOT2_OP, -+ BFD_RELOC_XTENSA_SLOT3_OP, -+ BFD_RELOC_XTENSA_SLOT4_OP, -+ BFD_RELOC_XTENSA_SLOT5_OP, -+ BFD_RELOC_XTENSA_SLOT6_OP, -+ BFD_RELOC_XTENSA_SLOT7_OP, -+ BFD_RELOC_XTENSA_SLOT8_OP, -+ BFD_RELOC_XTENSA_SLOT9_OP, -+ BFD_RELOC_XTENSA_SLOT10_OP, -+ BFD_RELOC_XTENSA_SLOT11_OP, -+ BFD_RELOC_XTENSA_SLOT12_OP, -+ BFD_RELOC_XTENSA_SLOT13_OP, -+ BFD_RELOC_XTENSA_SLOT14_OP, -+ -+/* Alternate Xtensa relocations. Only the slot is encoded in the -+relocation. The meaning of these relocations is opcode-specific. */ -+ BFD_RELOC_XTENSA_SLOT0_ALT, -+ BFD_RELOC_XTENSA_SLOT1_ALT, -+ BFD_RELOC_XTENSA_SLOT2_ALT, -+ BFD_RELOC_XTENSA_SLOT3_ALT, -+ BFD_RELOC_XTENSA_SLOT4_ALT, -+ BFD_RELOC_XTENSA_SLOT5_ALT, -+ BFD_RELOC_XTENSA_SLOT6_ALT, -+ BFD_RELOC_XTENSA_SLOT7_ALT, -+ BFD_RELOC_XTENSA_SLOT8_ALT, -+ BFD_RELOC_XTENSA_SLOT9_ALT, -+ BFD_RELOC_XTENSA_SLOT10_ALT, -+ BFD_RELOC_XTENSA_SLOT11_ALT, -+ BFD_RELOC_XTENSA_SLOT12_ALT, -+ BFD_RELOC_XTENSA_SLOT13_ALT, -+ BFD_RELOC_XTENSA_SLOT14_ALT, -+ -+/* Xtensa relocations for backward compatibility. These have all been -+replaced by BFD_RELOC_XTENSA_SLOT0_OP. */ -+ BFD_RELOC_XTENSA_OP0, -+ BFD_RELOC_XTENSA_OP1, -+ BFD_RELOC_XTENSA_OP2, -+ -+/* Xtensa relocation to mark that the assembler expanded the -+instructions from an original target. The expansion size is -+encoded in the reloc size. */ -+ BFD_RELOC_XTENSA_ASM_EXPAND, -+ -+/* Xtensa relocation to mark that the linker should simplify -+assembler-expanded instructions. This is commonly used -+internally by the linker after analysis of a -+BFD_RELOC_XTENSA_ASM_EXPAND. */ -+ BFD_RELOC_XTENSA_ASM_SIMPLIFY, -+ BFD_RELOC_UNUSED }; -+typedef enum bfd_reloc_code_real bfd_reloc_code_real_type; -+reloc_howto_type *bfd_reloc_type_lookup -+ (bfd *abfd, bfd_reloc_code_real_type code); -+ -+const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code); -+ -+/* Extracted from syms.c. */ -+ -+typedef struct bfd_symbol -+{ -+ /* A pointer to the BFD which owns the symbol. This information -+ is necessary so that a back end can work out what additional -+ information (invisible to the application writer) is carried -+ with the symbol. -+ -+ This field is *almost* redundant, since you can use section->owner -+ instead, except that some symbols point to the global sections -+ bfd_{abs,com,und}_section. This could be fixed by making -+ these globals be per-bfd (or per-target-flavor). FIXME. */ -+ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */ -+ -+ /* The text of the symbol. The name is left alone, and not copied; the -+ application may not alter it. */ -+ const char *name; -+ -+ /* The value of the symbol. This really should be a union of a -+ numeric value with a pointer, since some flags indicate that -+ a pointer to another symbol is stored here. */ -+ symvalue value; -+ -+ /* Attributes of a symbol. */ -+#define BSF_NO_FLAGS 0x00 -+ -+ /* The symbol has local scope; <> in <>. The value -+ is the offset into the section of the data. */ -+#define BSF_LOCAL 0x01 -+ -+ /* The symbol has global scope; initialized data in <>. The -+ value is the offset into the section of the data. */ -+#define BSF_GLOBAL 0x02 -+ -+ /* The symbol has global scope and is exported. The value is -+ the offset into the section of the data. */ -+#define BSF_EXPORT BSF_GLOBAL /* No real difference. */ -+ -+ /* A normal C symbol would be one of: -+ <>, <>, <> or -+ <>. */ -+ -+ /* The symbol is a debugging record. The value has an arbitrary -+ meaning, unless BSF_DEBUGGING_RELOC is also set. */ -+#define BSF_DEBUGGING 0x08 -+ -+ /* The symbol denotes a function entry point. Used in ELF, -+ perhaps others someday. */ -+#define BSF_FUNCTION 0x10 -+ -+ /* Used by the linker. */ -+#define BSF_KEEP 0x20 -+#define BSF_KEEP_G 0x40 -+ -+ /* A weak global symbol, overridable without warnings by -+ a regular global symbol of the same name. */ -+#define BSF_WEAK 0x80 -+ -+ /* This symbol was created to point to a section, e.g. ELF's -+ STT_SECTION symbols. */ -+#define BSF_SECTION_SYM 0x100 -+ -+ /* The symbol used to be a common symbol, but now it is -+ allocated. */ -+#define BSF_OLD_COMMON 0x200 -+ -+ /* The default value for common data. */ -+#define BFD_FORT_COMM_DEFAULT_VALUE 0 -+ -+ /* In some files the type of a symbol sometimes alters its -+ location in an output file - ie in coff a <> symbol -+ which is also <> symbol appears where it was -+ declared and not at the end of a section. This bit is set -+ by the target BFD part to convey this information. */ -+#define BSF_NOT_AT_END 0x400 -+ -+ /* Signal that the symbol is the label of constructor section. */ -+#define BSF_CONSTRUCTOR 0x800 -+ -+ /* Signal that the symbol is a warning symbol. The name is a -+ warning. The name of the next symbol is the one to warn about; -+ if a reference is made to a symbol with the same name as the next -+ symbol, a warning is issued by the linker. */ -+#define BSF_WARNING 0x1000 -+ -+ /* Signal that the symbol is indirect. This symbol is an indirect -+ pointer to the symbol with the same name as the next symbol. */ -+#define BSF_INDIRECT 0x2000 -+ -+ /* BSF_FILE marks symbols that contain a file name. This is used -+ for ELF STT_FILE symbols. */ -+#define BSF_FILE 0x4000 -+ -+ /* Symbol is from dynamic linking information. */ -+#define BSF_DYNAMIC 0x8000 -+ -+ /* The symbol denotes a data object. Used in ELF, and perhaps -+ others someday. */ -+#define BSF_OBJECT 0x10000 -+ -+ /* This symbol is a debugging symbol. The value is the offset -+ into the section of the data. BSF_DEBUGGING should be set -+ as well. */ -+#define BSF_DEBUGGING_RELOC 0x20000 -+ -+ /* This symbol is thread local. Used in ELF. */ -+#define BSF_THREAD_LOCAL 0x40000 -+ -+ flagword flags; -+ -+ /* A pointer to the section to which this symbol is -+ relative. This will always be non NULL, there are special -+ sections for undefined and absolute symbols. */ -+ struct bfd_section *section; -+ -+ /* Back end special data. */ -+ union -+ { -+ void *p; -+ bfd_vma i; -+ } -+ udata; -+} -+asymbol; -+ -+#define bfd_get_symtab_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd)) -+ -+bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym); -+ -+bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name); -+ -+#define bfd_is_local_label_name(abfd, name) \ -+ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name)) -+ -+bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym); -+ -+#define bfd_is_target_special_symbol(abfd, sym) \ -+ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym)) -+ -+#define bfd_canonicalize_symtab(abfd, location) \ -+ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location)) -+ -+bfd_boolean bfd_set_symtab -+ (bfd *abfd, asymbol **location, unsigned int count); -+ -+void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol); -+ -+#define bfd_make_empty_symbol(abfd) \ -+ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd)) -+ -+asymbol *_bfd_generic_make_empty_symbol (bfd *); -+ -+#define bfd_make_debug_symbol(abfd,ptr,size) \ -+ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size)) -+ -+int bfd_decode_symclass (asymbol *symbol); -+ -+bfd_boolean bfd_is_undefined_symclass (int symclass); -+ -+void bfd_symbol_info (asymbol *symbol, symbol_info *ret); -+ -+bfd_boolean bfd_copy_private_symbol_data -+ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym); -+ -+#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \ -+ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \ -+ (ibfd, isymbol, obfd, osymbol)) -+ -+/* Extracted from bfd.c. */ -+struct bfd -+{ -+ /* A unique identifier of the BFD */ -+ unsigned int id; -+ -+ /* The filename the application opened the BFD with. */ -+ const char *filename; -+ -+ /* A pointer to the target jump table. */ -+ const struct bfd_target *xvec; -+ -+ /* The IOSTREAM, and corresponding IO vector that provide access -+ to the file backing the BFD. */ -+ void *iostream; -+ const struct bfd_iovec *iovec; -+ -+ /* Is the file descriptor being cached? That is, can it be closed as -+ needed, and re-opened when accessed later? */ -+ bfd_boolean cacheable; -+ -+ /* Marks whether there was a default target specified when the -+ BFD was opened. This is used to select which matching algorithm -+ to use to choose the back end. */ -+ bfd_boolean target_defaulted; -+ -+ /* The caching routines use these to maintain a -+ least-recently-used list of BFDs. */ -+ struct bfd *lru_prev, *lru_next; -+ -+ /* When a file is closed by the caching routines, BFD retains -+ state information on the file here... */ -+ ufile_ptr where; -+ -+ /* ... and here: (``once'' means at least once). */ -+ bfd_boolean opened_once; -+ -+ /* Set if we have a locally maintained mtime value, rather than -+ getting it from the file each time. */ -+ bfd_boolean mtime_set; -+ -+ /* File modified time, if mtime_set is TRUE. */ -+ long mtime; -+ -+ /* Reserved for an unimplemented file locking extension. */ -+ int ifd; -+ -+ /* The format which belongs to the BFD. (object, core, etc.) */ -+ bfd_format format; -+ -+ /* The direction with which the BFD was opened. */ -+ enum bfd_direction -+ { -+ no_direction = 0, -+ read_direction = 1, -+ write_direction = 2, -+ both_direction = 3 -+ } -+ direction; -+ -+ /* Format_specific flags. */ -+ flagword flags; -+ -+ /* Currently my_archive is tested before adding origin to -+ anything. I believe that this can become always an add of -+ origin, with origin set to 0 for non archive files. */ -+ ufile_ptr origin; -+ -+ /* Remember when output has begun, to stop strange things -+ from happening. */ -+ bfd_boolean output_has_begun; -+ -+ /* A hash table for section names. */ -+ struct bfd_hash_table section_htab; -+ -+ /* Pointer to linked list of sections. */ -+ struct bfd_section *sections; -+ -+ /* The last section on the section list. */ -+ struct bfd_section *section_last; -+ -+ /* The number of sections. */ -+ unsigned int section_count; -+ -+ /* Stuff only useful for object files: -+ The start address. */ -+ bfd_vma start_address; -+ -+ /* Used for input and output. */ -+ unsigned int symcount; -+ -+ /* Symbol table for output BFD (with symcount entries). */ -+ struct bfd_symbol **outsymbols; -+ -+ /* Used for slurped dynamic symbol tables. */ -+ unsigned int dynsymcount; -+ -+ /* Pointer to structure which contains architecture information. */ -+ const struct bfd_arch_info *arch_info; -+ -+ /* Flag set if symbols from this BFD should not be exported. */ -+ bfd_boolean no_export; -+ -+ /* Stuff only useful for archives. */ -+ void *arelt_data; -+ struct bfd *my_archive; /* The containing archive BFD. */ -+ struct bfd *next; /* The next BFD in the archive. */ -+ struct bfd *archive_head; /* The first BFD in the archive. */ -+ bfd_boolean has_armap; -+ -+ /* A chain of BFD structures involved in a link. */ -+ struct bfd *link_next; -+ -+ /* A field used by _bfd_generic_link_add_archive_symbols. This will -+ be used only for archive elements. */ -+ int archive_pass; -+ -+ /* Used by the back end to hold private data. */ -+ union -+ { -+ struct aout_data_struct *aout_data; -+ struct artdata *aout_ar_data; -+ struct _oasys_data *oasys_obj_data; -+ struct _oasys_ar_data *oasys_ar_data; -+ struct coff_tdata *coff_obj_data; -+ struct pe_tdata *pe_obj_data; -+ struct xcoff_tdata *xcoff_obj_data; -+ struct ecoff_tdata *ecoff_obj_data; -+ struct ieee_data_struct *ieee_data; -+ struct ieee_ar_data_struct *ieee_ar_data; -+ struct srec_data_struct *srec_data; -+ struct ihex_data_struct *ihex_data; -+ struct tekhex_data_struct *tekhex_data; -+ struct elf_obj_tdata *elf_obj_data; -+ struct nlm_obj_tdata *nlm_obj_data; -+ struct bout_data_struct *bout_data; -+ struct mmo_data_struct *mmo_data; -+ struct sun_core_struct *sun_core_data; -+ struct sco5_core_struct *sco5_core_data; -+ struct trad_core_struct *trad_core_data; -+ struct som_data_struct *som_data; -+ struct hpux_core_struct *hpux_core_data; -+ struct hppabsd_core_struct *hppabsd_core_data; -+ struct sgi_core_struct *sgi_core_data; -+ struct lynx_core_struct *lynx_core_data; -+ struct osf_core_struct *osf_core_data; -+ struct cisco_core_struct *cisco_core_data; -+ struct versados_data_struct *versados_data; -+ struct netbsd_core_struct *netbsd_core_data; -+ struct mach_o_data_struct *mach_o_data; -+ struct mach_o_fat_data_struct *mach_o_fat_data; -+ struct bfd_pef_data_struct *pef_data; -+ struct bfd_pef_xlib_data_struct *pef_xlib_data; -+ struct bfd_sym_data_struct *sym_data; -+ void *any; -+ } -+ tdata; -+ -+ /* Used by the application to hold private data. */ -+ void *usrdata; -+ -+ /* Where all the allocated stuff under this BFD goes. This is a -+ struct objalloc *, but we use void * to avoid requiring the inclusion -+ of objalloc.h. */ -+ void *memory; -+}; -+ -+typedef enum bfd_error -+{ -+ bfd_error_no_error = 0, -+ bfd_error_system_call, -+ bfd_error_invalid_target, -+ bfd_error_wrong_format, -+ bfd_error_wrong_object_format, -+ bfd_error_invalid_operation, -+ bfd_error_no_memory, -+ bfd_error_no_symbols, -+ bfd_error_no_armap, -+ bfd_error_no_more_archived_files, -+ bfd_error_malformed_archive, -+ bfd_error_file_not_recognized, -+ bfd_error_file_ambiguously_recognized, -+ bfd_error_no_contents, -+ bfd_error_nonrepresentable_section, -+ bfd_error_no_debug_section, -+ bfd_error_bad_value, -+ bfd_error_file_truncated, -+ bfd_error_file_too_big, -+ bfd_error_invalid_error_code -+} -+bfd_error_type; -+ -+bfd_error_type bfd_get_error (void); -+ -+void bfd_set_error (bfd_error_type error_tag); -+ -+const char *bfd_errmsg (bfd_error_type error_tag); -+ -+void bfd_perror (const char *message); -+ -+typedef void (*bfd_error_handler_type) (const char *, ...); -+ -+bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type); -+ -+void bfd_set_error_program_name (const char *); -+ -+bfd_error_handler_type bfd_get_error_handler (void); -+ -+long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect); -+ -+long bfd_canonicalize_reloc -+ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms); -+ -+void bfd_set_reloc -+ (bfd *abfd, asection *sec, arelent **rel, unsigned int count); -+ -+bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags); -+ -+int bfd_get_arch_size (bfd *abfd); -+ -+int bfd_get_sign_extend_vma (bfd *abfd); -+ -+bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma); -+ -+unsigned int bfd_get_gp_size (bfd *abfd); -+ -+void bfd_set_gp_size (bfd *abfd, unsigned int i); -+ -+bfd_vma bfd_scan_vma (const char *string, const char **end, int base); -+ -+bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_copy_private_header_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_copy_private_header_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_copy_private_bfd_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_merge_private_bfd_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags); -+ -+#define bfd_set_private_flags(abfd, flags) \ -+ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags)) -+#define bfd_sizeof_headers(abfd, reloc) \ -+ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc)) -+ -+#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \ -+ BFD_SEND (abfd, _bfd_find_nearest_line, \ -+ (abfd, sec, syms, off, file, func, line)) -+ -+#define bfd_find_line(abfd, syms, sym, file, line) \ -+ BFD_SEND (abfd, _bfd_find_line, \ -+ (abfd, syms, sym, file, line)) -+ -+#define bfd_find_inliner_info(abfd, file, func, line) \ -+ BFD_SEND (abfd, _bfd_find_inliner_info, \ -+ (abfd, file, func, line)) -+ -+#define bfd_debug_info_start(abfd) \ -+ BFD_SEND (abfd, _bfd_debug_info_start, (abfd)) -+ -+#define bfd_debug_info_end(abfd) \ -+ BFD_SEND (abfd, _bfd_debug_info_end, (abfd)) -+ -+#define bfd_debug_info_accumulate(abfd, section) \ -+ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section)) -+ -+#define bfd_stat_arch_elt(abfd, stat) \ -+ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat)) -+ -+#define bfd_update_armap_timestamp(abfd) \ -+ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd)) -+ -+#define bfd_set_arch_mach(abfd, arch, mach)\ -+ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach)) -+ -+#define bfd_relax_section(abfd, section, link_info, again) \ -+ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again)) -+ -+#define bfd_gc_sections(abfd, link_info) \ -+ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info)) -+ -+#define bfd_merge_sections(abfd, link_info) \ -+ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info)) -+ -+#define bfd_is_group_section(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec)) -+ -+#define bfd_discard_group(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec)) -+ -+#define bfd_link_hash_table_create(abfd) \ -+ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd)) -+ -+#define bfd_link_hash_table_free(abfd, hash) \ -+ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash)) -+ -+#define bfd_link_add_symbols(abfd, info) \ -+ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info)) -+ -+#define bfd_link_just_syms(abfd, sec, info) \ -+ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info)) -+ -+#define bfd_final_link(abfd, info) \ -+ BFD_SEND (abfd, _bfd_final_link, (abfd, info)) -+ -+#define bfd_free_cached_info(abfd) \ -+ BFD_SEND (abfd, _bfd_free_cached_info, (abfd)) -+ -+#define bfd_get_dynamic_symtab_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd)) -+ -+#define bfd_print_private_bfd_data(abfd, file)\ -+ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file)) -+ -+#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \ -+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols)) -+ -+#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \ -+ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \ -+ dyncount, dynsyms, ret)) -+ -+#define bfd_get_dynamic_reloc_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd)) -+ -+#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \ -+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms)) -+ -+extern bfd_byte *bfd_get_relocated_section_contents -+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *, -+ bfd_boolean, asymbol **); -+ -+bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative); -+ -+struct bfd_preserve -+{ -+ void *marker; -+ void *tdata; -+ flagword flags; -+ const struct bfd_arch_info *arch_info; -+ struct bfd_section *sections; -+ struct bfd_section *section_last; -+ unsigned int section_count; -+ struct bfd_hash_table section_htab; -+}; -+ -+bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *); -+ -+void bfd_preserve_restore (bfd *, struct bfd_preserve *); -+ -+void bfd_preserve_finish (bfd *, struct bfd_preserve *); -+ -+/* Extracted from archive.c. */ -+symindex bfd_get_next_mapent -+ (bfd *abfd, symindex previous, carsym **sym); -+ -+bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head); -+ -+bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous); -+ -+/* Extracted from corefile.c. */ -+const char *bfd_core_file_failing_command (bfd *abfd); -+ -+int bfd_core_file_failing_signal (bfd *abfd); -+ -+bfd_boolean core_file_matches_executable_p -+ (bfd *core_bfd, bfd *exec_bfd); -+ -+/* Extracted from targets.c. */ -+#define BFD_SEND(bfd, message, arglist) \ -+ ((*((bfd)->xvec->message)) arglist) -+ -+#ifdef DEBUG_BFD_SEND -+#undef BFD_SEND -+#define BFD_SEND(bfd, message, arglist) \ -+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ -+ ((*((bfd)->xvec->message)) arglist) : \ -+ (bfd_assert (__FILE__,__LINE__), NULL)) -+#endif -+#define BFD_SEND_FMT(bfd, message, arglist) \ -+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) -+ -+#ifdef DEBUG_BFD_SEND -+#undef BFD_SEND_FMT -+#define BFD_SEND_FMT(bfd, message, arglist) \ -+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ -+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \ -+ (bfd_assert (__FILE__,__LINE__), NULL)) -+#endif -+ -+enum bfd_flavour -+{ -+ bfd_target_unknown_flavour, -+ bfd_target_aout_flavour, -+ bfd_target_coff_flavour, -+ bfd_target_ecoff_flavour, -+ bfd_target_xcoff_flavour, -+ bfd_target_elf_flavour, -+ bfd_target_ieee_flavour, -+ bfd_target_nlm_flavour, -+ bfd_target_oasys_flavour, -+ bfd_target_tekhex_flavour, -+ bfd_target_srec_flavour, -+ bfd_target_ihex_flavour, -+ bfd_target_som_flavour, -+ bfd_target_os9k_flavour, -+ bfd_target_versados_flavour, -+ bfd_target_msdos_flavour, -+ bfd_target_ovax_flavour, -+ bfd_target_evax_flavour, -+ bfd_target_mmo_flavour, -+ bfd_target_mach_o_flavour, -+ bfd_target_pef_flavour, -+ bfd_target_pef_xlib_flavour, -+ bfd_target_sym_flavour -+}; -+ -+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN }; -+ -+/* Forward declaration. */ -+typedef struct bfd_link_info _bfd_link_info; -+ -+typedef struct bfd_target -+{ -+ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */ -+ char *name; -+ -+ /* The "flavour" of a back end is a general indication about -+ the contents of a file. */ -+ enum bfd_flavour flavour; -+ -+ /* The order of bytes within the data area of a file. */ -+ enum bfd_endian byteorder; -+ -+ /* The order of bytes within the header parts of a file. */ -+ enum bfd_endian header_byteorder; -+ -+ /* A mask of all the flags which an executable may have set - -+ from the set <>, <>, ...<>. */ -+ flagword object_flags; -+ -+ /* A mask of all the flags which a section may have set - from -+ the set <>, <>, ...<>. */ -+ flagword section_flags; -+ -+ /* The character normally found at the front of a symbol. -+ (if any), perhaps `_'. */ -+ char symbol_leading_char; -+ -+ /* The pad character for file names within an archive header. */ -+ char ar_pad_char; -+ -+ /* The maximum number of characters in an archive header. */ -+ unsigned short ar_max_namelen; -+ -+ /* Entries for byte swapping for data. These are different from the -+ other entry points, since they don't take a BFD as the first argument. -+ Certain other handlers could do the same. */ -+ bfd_uint64_t (*bfd_getx64) (const void *); -+ bfd_int64_t (*bfd_getx_signed_64) (const void *); -+ void (*bfd_putx64) (bfd_uint64_t, void *); -+ bfd_vma (*bfd_getx32) (const void *); -+ bfd_signed_vma (*bfd_getx_signed_32) (const void *); -+ void (*bfd_putx32) (bfd_vma, void *); -+ bfd_vma (*bfd_getx16) (const void *); -+ bfd_signed_vma (*bfd_getx_signed_16) (const void *); -+ void (*bfd_putx16) (bfd_vma, void *); -+ -+ /* Byte swapping for the headers. */ -+ bfd_uint64_t (*bfd_h_getx64) (const void *); -+ bfd_int64_t (*bfd_h_getx_signed_64) (const void *); -+ void (*bfd_h_putx64) (bfd_uint64_t, void *); -+ bfd_vma (*bfd_h_getx32) (const void *); -+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *); -+ void (*bfd_h_putx32) (bfd_vma, void *); -+ bfd_vma (*bfd_h_getx16) (const void *); -+ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *); -+ void (*bfd_h_putx16) (bfd_vma, void *); -+ -+ /* Format dependent routines: these are vectors of entry points -+ within the target vector structure, one for each format to check. */ -+ -+ /* Check the format of a file being read. Return a <> or zero. */ -+ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *); -+ -+ /* Set the format of a file being written. */ -+ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *); -+ -+ /* Write cached information into a file being written, at <>. */ -+ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *); -+ -+ -+ /* Generic entry points. */ -+#define BFD_JUMP_TABLE_GENERIC(NAME) \ -+ NAME##_close_and_cleanup, \ -+ NAME##_bfd_free_cached_info, \ -+ NAME##_new_section_hook, \ -+ NAME##_get_section_contents, \ -+ NAME##_get_section_contents_in_window -+ -+ /* Called when the BFD is being closed to do any necessary cleanup. */ -+ bfd_boolean (*_close_and_cleanup) (bfd *); -+ /* Ask the BFD to free all cached information. */ -+ bfd_boolean (*_bfd_free_cached_info) (bfd *); -+ /* Called when a new section is created. */ -+ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr); -+ /* Read the contents of a section. */ -+ bfd_boolean (*_bfd_get_section_contents) -+ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type); -+ bfd_boolean (*_bfd_get_section_contents_in_window) -+ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type); -+ -+ /* Entry points to copy private data. */ -+#define BFD_JUMP_TABLE_COPY(NAME) \ -+ NAME##_bfd_copy_private_bfd_data, \ -+ NAME##_bfd_merge_private_bfd_data, \ -+ NAME##_bfd_copy_private_section_data, \ -+ NAME##_bfd_copy_private_symbol_data, \ -+ NAME##_bfd_copy_private_header_data, \ -+ NAME##_bfd_set_private_flags, \ -+ NAME##_bfd_print_private_bfd_data -+ -+ /* Called to copy BFD general private data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *); -+ /* Called to merge BFD general private data from one object file -+ to a common output file when linking. */ -+ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *); -+ /* Called to copy BFD private section data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_section_data) -+ (bfd *, sec_ptr, bfd *, sec_ptr); -+ /* Called to copy BFD private symbol data from one symbol -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_symbol_data) -+ (bfd *, asymbol *, bfd *, asymbol *); -+ /* Called to copy BFD private header data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_header_data) -+ (bfd *, bfd *); -+ /* Called to set private backend flags. */ -+ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword); -+ -+ /* Called to print private BFD data. */ -+ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *); -+ -+ /* Core file entry points. */ -+#define BFD_JUMP_TABLE_CORE(NAME) \ -+ NAME##_core_file_failing_command, \ -+ NAME##_core_file_failing_signal, \ -+ NAME##_core_file_matches_executable_p -+ -+ char * (*_core_file_failing_command) (bfd *); -+ int (*_core_file_failing_signal) (bfd *); -+ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *); -+ -+ /* Archive entry points. */ -+#define BFD_JUMP_TABLE_ARCHIVE(NAME) \ -+ NAME##_slurp_armap, \ -+ NAME##_slurp_extended_name_table, \ -+ NAME##_construct_extended_name_table, \ -+ NAME##_truncate_arname, \ -+ NAME##_write_armap, \ -+ NAME##_read_ar_hdr, \ -+ NAME##_openr_next_archived_file, \ -+ NAME##_get_elt_at_index, \ -+ NAME##_generic_stat_arch_elt, \ -+ NAME##_update_armap_timestamp -+ -+ bfd_boolean (*_bfd_slurp_armap) (bfd *); -+ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *); -+ bfd_boolean (*_bfd_construct_extended_name_table) -+ (bfd *, char **, bfd_size_type *, const char **); -+ void (*_bfd_truncate_arname) (bfd *, const char *, char *); -+ bfd_boolean (*write_armap) -+ (bfd *, unsigned int, struct orl *, unsigned int, int); -+ void * (*_bfd_read_ar_hdr_fn) (bfd *); -+ bfd * (*openr_next_archived_file) (bfd *, bfd *); -+#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i)) -+ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex); -+ int (*_bfd_stat_arch_elt) (bfd *, struct stat *); -+ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *); -+ -+ /* Entry points used for symbols. */ -+#define BFD_JUMP_TABLE_SYMBOLS(NAME) \ -+ NAME##_get_symtab_upper_bound, \ -+ NAME##_canonicalize_symtab, \ -+ NAME##_make_empty_symbol, \ -+ NAME##_print_symbol, \ -+ NAME##_get_symbol_info, \ -+ NAME##_bfd_is_local_label_name, \ -+ NAME##_bfd_is_target_special_symbol, \ -+ NAME##_get_lineno, \ -+ NAME##_find_nearest_line, \ -+ _bfd_generic_find_line, \ -+ NAME##_find_inliner_info, \ -+ NAME##_bfd_make_debug_symbol, \ -+ NAME##_read_minisymbols, \ -+ NAME##_minisymbol_to_symbol -+ -+ long (*_bfd_get_symtab_upper_bound) (bfd *); -+ long (*_bfd_canonicalize_symtab) -+ (bfd *, struct bfd_symbol **); -+ struct bfd_symbol * -+ (*_bfd_make_empty_symbol) (bfd *); -+ void (*_bfd_print_symbol) -+ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type); -+#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e)) -+ void (*_bfd_get_symbol_info) -+ (bfd *, struct bfd_symbol *, symbol_info *); -+#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e)) -+ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *); -+ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *); -+ alent * (*_get_lineno) (bfd *, struct bfd_symbol *); -+ bfd_boolean (*_bfd_find_nearest_line) -+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma, -+ const char **, const char **, unsigned int *); -+ bfd_boolean (*_bfd_find_line) -+ (bfd *, struct bfd_symbol **, struct bfd_symbol *, -+ const char **, unsigned int *); -+ bfd_boolean (*_bfd_find_inliner_info) -+ (bfd *, const char **, const char **, unsigned int *); -+ /* Back-door to allow format-aware applications to create debug symbols -+ while using BFD for everything else. Currently used by the assembler -+ when creating COFF files. */ -+ asymbol * (*_bfd_make_debug_symbol) -+ (bfd *, void *, unsigned long size); -+#define bfd_read_minisymbols(b, d, m, s) \ -+ BFD_SEND (b, _read_minisymbols, (b, d, m, s)) -+ long (*_read_minisymbols) -+ (bfd *, bfd_boolean, void **, unsigned int *); -+#define bfd_minisymbol_to_symbol(b, d, m, f) \ -+ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f)) -+ asymbol * (*_minisymbol_to_symbol) -+ (bfd *, bfd_boolean, const void *, asymbol *); -+ -+ /* Routines for relocs. */ -+#define BFD_JUMP_TABLE_RELOCS(NAME) \ -+ NAME##_get_reloc_upper_bound, \ -+ NAME##_canonicalize_reloc, \ -+ NAME##_bfd_reloc_type_lookup -+ -+ long (*_get_reloc_upper_bound) (bfd *, sec_ptr); -+ long (*_bfd_canonicalize_reloc) -+ (bfd *, sec_ptr, arelent **, struct bfd_symbol **); -+ /* See documentation on reloc types. */ -+ reloc_howto_type * -+ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type); -+ -+ /* Routines used when writing an object file. */ -+#define BFD_JUMP_TABLE_WRITE(NAME) \ -+ NAME##_set_arch_mach, \ -+ NAME##_set_section_contents -+ -+ bfd_boolean (*_bfd_set_arch_mach) -+ (bfd *, enum bfd_architecture, unsigned long); -+ bfd_boolean (*_bfd_set_section_contents) -+ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type); -+ -+ /* Routines used by the linker. */ -+#define BFD_JUMP_TABLE_LINK(NAME) \ -+ NAME##_sizeof_headers, \ -+ NAME##_bfd_get_relocated_section_contents, \ -+ NAME##_bfd_relax_section, \ -+ NAME##_bfd_link_hash_table_create, \ -+ NAME##_bfd_link_hash_table_free, \ -+ NAME##_bfd_link_add_symbols, \ -+ NAME##_bfd_link_just_syms, \ -+ NAME##_bfd_final_link, \ -+ NAME##_bfd_link_split_section, \ -+ NAME##_bfd_gc_sections, \ -+ NAME##_bfd_merge_sections, \ -+ NAME##_bfd_is_group_section, \ -+ NAME##_bfd_discard_group, \ -+ NAME##_section_already_linked \ -+ -+ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean); -+ bfd_byte * (*_bfd_get_relocated_section_contents) -+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, -+ bfd_byte *, bfd_boolean, struct bfd_symbol **); -+ -+ bfd_boolean (*_bfd_relax_section) -+ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *); -+ -+ /* Create a hash table for the linker. Different backends store -+ different information in this table. */ -+ struct bfd_link_hash_table * -+ (*_bfd_link_hash_table_create) (bfd *); -+ -+ /* Release the memory associated with the linker hash table. */ -+ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *); -+ -+ /* Add symbols from this object file into the hash table. */ -+ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *); -+ -+ /* Indicate that we are only retrieving symbol values from this section. */ -+ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *); -+ -+ /* Do a link based on the link_order structures attached to each -+ section of the BFD. */ -+ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *); -+ -+ /* Should this section be split up into smaller pieces during linking. */ -+ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *); -+ -+ /* Remove sections that are not referenced from the output. */ -+ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *); -+ -+ /* Attempt to merge SEC_MERGE sections. */ -+ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *); -+ -+ /* Is this section a member of a group? */ -+ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *); -+ -+ /* Discard members of a group. */ -+ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *); -+ -+ /* Check if SEC has been already linked during a reloceatable or -+ final link. */ -+ void (*_section_already_linked) (bfd *, struct bfd_section *); -+ -+ /* Routines to handle dynamic symbols and relocs. */ -+#define BFD_JUMP_TABLE_DYNAMIC(NAME) \ -+ NAME##_get_dynamic_symtab_upper_bound, \ -+ NAME##_canonicalize_dynamic_symtab, \ -+ NAME##_get_synthetic_symtab, \ -+ NAME##_get_dynamic_reloc_upper_bound, \ -+ NAME##_canonicalize_dynamic_reloc -+ -+ /* Get the amount of memory required to hold the dynamic symbols. */ -+ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *); -+ /* Read in the dynamic symbols. */ -+ long (*_bfd_canonicalize_dynamic_symtab) -+ (bfd *, struct bfd_symbol **); -+ /* Create synthetized symbols. */ -+ long (*_bfd_get_synthetic_symtab) -+ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **, -+ struct bfd_symbol **); -+ /* Get the amount of memory required to hold the dynamic relocs. */ -+ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *); -+ /* Read in the dynamic relocs. */ -+ long (*_bfd_canonicalize_dynamic_reloc) -+ (bfd *, arelent **, struct bfd_symbol **); -+ -+ /* Opposite endian version of this target. */ -+ const struct bfd_target * alternative_target; -+ -+ /* Data for use by back-end routines, which isn't -+ generic enough to belong in this structure. */ -+ const void *backend_data; -+ -+} bfd_target; -+ -+bfd_boolean bfd_set_default_target (const char *name); -+ -+const bfd_target *bfd_find_target (const char *target_name, bfd *abfd); -+ -+const char ** bfd_target_list (void); -+ -+const bfd_target *bfd_search_for_target -+ (int (*search_func) (const bfd_target *, void *), -+ void *); -+ -+/* Extracted from format.c. */ -+bfd_boolean bfd_check_format (bfd *abfd, bfd_format format); -+ -+bfd_boolean bfd_check_format_matches -+ (bfd *abfd, bfd_format format, char ***matching); -+ -+bfd_boolean bfd_set_format (bfd *abfd, bfd_format format); -+ -+const char *bfd_format_string (bfd_format format); -+ -+/* Extracted from linker.c. */ -+bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec); -+ -+#define bfd_link_split_section(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec)) -+ -+void bfd_section_already_linked (bfd *abfd, asection *sec); -+ -+#define bfd_section_already_linked(abfd, sec) \ -+ BFD_SEND (abfd, _section_already_linked, (abfd, sec)) -+ -+/* Extracted from simple.c. */ -+bfd_byte *bfd_simple_get_relocated_section_contents -+ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table); -+ -+#ifdef __cplusplus -+} -+#endif -+#endif ---- /dev/null -+++ b/arch/x86/include/asm/bfd_64.h -@@ -0,0 +1,4917 @@ -+/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically -+ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c", -+ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c", -+ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c", -+ "linker.c" and "simple.c". -+ Run "make headers" in your build bfd/ to regenerate. */ -+ -+/* Main header file for the bfd library -- portable access to object files. -+ -+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, -+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. -+ -+ Contributed by Cygnus Support. -+ -+ This file is part of BFD, the Binary File Descriptor library. -+ -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 2 of the License, or -+ (at your option) any later version. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as -+ * required. -+ * Keith Owens 15 May 2006 -+ */ -+ -+#ifndef __BFD_H_SEEN__ -+#define __BFD_H_SEEN__ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#ifdef __KERNEL__ -+#include -+#else /* __KERNEL__ */ -+#include "ansidecl.h" -+#include "symcat.h" -+#endif /* __KERNEL__ */ -+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) -+#ifndef SABER -+/* This hack is to avoid a problem with some strict ANSI C preprocessors. -+ The problem is, "32_" is not a valid preprocessing token, and we don't -+ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will -+ cause the inner CONCAT2 macros to be evaluated first, producing -+ still-valid pp-tokens. Then the final concatenation can be done. */ -+#undef CONCAT4 -+#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d)) -+#endif -+#endif -+ -+/* The word size used by BFD on the host. This may be 64 with a 32 -+ bit target if the host is 64 bit, or if other 64 bit targets have -+ been selected with --enable-targets, or if --enable-64-bit-bfd. */ -+#define BFD_ARCH_SIZE 64 -+ -+/* The word size of the default bfd target. */ -+#define BFD_DEFAULT_TARGET_SIZE 64 -+ -+#define BFD_HOST_64BIT_LONG 1 -+#define BFD_HOST_LONG_LONG 1 -+#if 1 -+#define BFD_HOST_64_BIT long -+#define BFD_HOST_U_64_BIT unsigned long -+typedef BFD_HOST_64_BIT bfd_int64_t; -+typedef BFD_HOST_U_64_BIT bfd_uint64_t; -+#endif -+ -+#if BFD_ARCH_SIZE >= 64 -+#define BFD64 -+#endif -+ -+#ifndef INLINE -+#if __GNUC__ >= 2 -+#define INLINE __inline__ -+#else -+#define INLINE -+#endif -+#endif -+ -+/* Forward declaration. */ -+typedef struct bfd bfd; -+ -+/* Boolean type used in bfd. Too many systems define their own -+ versions of "boolean" for us to safely typedef a "boolean" of -+ our own. Using an enum for "bfd_boolean" has its own set of -+ problems, with strange looking casts required to avoid warnings -+ on some older compilers. Thus we just use an int. -+ -+ General rule: Functions which are bfd_boolean return TRUE on -+ success and FALSE on failure (unless they're a predicate). */ -+ -+typedef int bfd_boolean; -+#undef FALSE -+#undef TRUE -+#define FALSE 0 -+#define TRUE 1 -+ -+#ifdef BFD64 -+ -+#ifndef BFD_HOST_64_BIT -+ #error No 64 bit integer type available -+#endif /* ! defined (BFD_HOST_64_BIT) */ -+ -+typedef BFD_HOST_U_64_BIT bfd_vma; -+typedef BFD_HOST_64_BIT bfd_signed_vma; -+typedef BFD_HOST_U_64_BIT bfd_size_type; -+typedef BFD_HOST_U_64_BIT symvalue; -+ -+#ifndef fprintf_vma -+#if BFD_HOST_64BIT_LONG -+#define sprintf_vma(s,x) sprintf (s, "%016lx", x) -+#define fprintf_vma(f,x) fprintf (f, "%016lx", x) -+#else -+#define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff))) -+#define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff)) -+#define fprintf_vma(s,x) \ -+ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) -+#define sprintf_vma(s,x) \ -+ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) -+#endif -+#endif -+ -+#else /* not BFD64 */ -+ -+/* Represent a target address. Also used as a generic unsigned type -+ which is guaranteed to be big enough to hold any arithmetic types -+ we need to deal with. */ -+typedef unsigned long bfd_vma; -+ -+/* A generic signed type which is guaranteed to be big enough to hold any -+ arithmetic types we need to deal with. Can be assumed to be compatible -+ with bfd_vma in the same way that signed and unsigned ints are compatible -+ (as parameters, in assignment, etc). */ -+typedef long bfd_signed_vma; -+ -+typedef unsigned long symvalue; -+typedef unsigned long bfd_size_type; -+ -+/* Print a bfd_vma x on stream s. */ -+#define fprintf_vma(s,x) fprintf (s, "%08lx", x) -+#define sprintf_vma(s,x) sprintf (s, "%08lx", x) -+ -+#endif /* not BFD64 */ -+ -+#define HALF_BFD_SIZE_TYPE \ -+ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2)) -+ -+#ifndef BFD_HOST_64_BIT -+/* Fall back on a 32 bit type. The idea is to make these types always -+ available for function return types, but in the case that -+ BFD_HOST_64_BIT is undefined such a function should abort or -+ otherwise signal an error. */ -+typedef bfd_signed_vma bfd_int64_t; -+typedef bfd_vma bfd_uint64_t; -+#endif -+ -+/* An offset into a file. BFD always uses the largest possible offset -+ based on the build time availability of fseek, fseeko, or fseeko64. */ -+typedef BFD_HOST_64_BIT file_ptr; -+typedef unsigned BFD_HOST_64_BIT ufile_ptr; -+ -+extern void bfd_sprintf_vma (bfd *, char *, bfd_vma); -+extern void bfd_fprintf_vma (bfd *, void *, bfd_vma); -+ -+#define printf_vma(x) fprintf_vma(stdout,x) -+#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x) -+ -+typedef unsigned int flagword; /* 32 bits of flags */ -+typedef unsigned char bfd_byte; -+ -+/* File formats. */ -+ -+typedef enum bfd_format -+{ -+ bfd_unknown = 0, /* File format is unknown. */ -+ bfd_object, /* Linker/assembler/compiler output. */ -+ bfd_archive, /* Object archive file. */ -+ bfd_core, /* Core dump. */ -+ bfd_type_end /* Marks the end; don't use it! */ -+} -+bfd_format; -+ -+/* Values that may appear in the flags field of a BFD. These also -+ appear in the object_flags field of the bfd_target structure, where -+ they indicate the set of flags used by that backend (not all flags -+ are meaningful for all object file formats) (FIXME: at the moment, -+ the object_flags values have mostly just been copied from backend -+ to another, and are not necessarily correct). */ -+ -+/* No flags. */ -+#define BFD_NO_FLAGS 0x00 -+ -+/* BFD contains relocation entries. */ -+#define HAS_RELOC 0x01 -+ -+/* BFD is directly executable. */ -+#define EXEC_P 0x02 -+ -+/* BFD has line number information (basically used for F_LNNO in a -+ COFF header). */ -+#define HAS_LINENO 0x04 -+ -+/* BFD has debugging information. */ -+#define HAS_DEBUG 0x08 -+ -+/* BFD has symbols. */ -+#define HAS_SYMS 0x10 -+ -+/* BFD has local symbols (basically used for F_LSYMS in a COFF -+ header). */ -+#define HAS_LOCALS 0x20 -+ -+/* BFD is a dynamic object. */ -+#define DYNAMIC 0x40 -+ -+/* Text section is write protected (if D_PAGED is not set, this is -+ like an a.out NMAGIC file) (the linker sets this by default, but -+ clears it for -r or -N). */ -+#define WP_TEXT 0x80 -+ -+/* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the -+ linker sets this by default, but clears it for -r or -n or -N). */ -+#define D_PAGED 0x100 -+ -+/* BFD is relaxable (this means that bfd_relax_section may be able to -+ do something) (sometimes bfd_relax_section can do something even if -+ this is not set). */ -+#define BFD_IS_RELAXABLE 0x200 -+ -+/* This may be set before writing out a BFD to request using a -+ traditional format. For example, this is used to request that when -+ writing out an a.out object the symbols not be hashed to eliminate -+ duplicates. */ -+#define BFD_TRADITIONAL_FORMAT 0x400 -+ -+/* This flag indicates that the BFD contents are actually cached in -+ memory. If this is set, iostream points to a bfd_in_memory struct. */ -+#define BFD_IN_MEMORY 0x800 -+ -+/* The sections in this BFD specify a memory page. */ -+#define HAS_LOAD_PAGE 0x1000 -+ -+/* This BFD has been created by the linker and doesn't correspond -+ to any input file. */ -+#define BFD_LINKER_CREATED 0x2000 -+ -+/* Symbols and relocation. */ -+ -+/* A count of carsyms (canonical archive symbols). */ -+typedef unsigned long symindex; -+ -+/* How to perform a relocation. */ -+typedef const struct reloc_howto_struct reloc_howto_type; -+ -+#define BFD_NO_MORE_SYMBOLS ((symindex) ~0) -+ -+/* General purpose part of a symbol X; -+ target specific parts are in libcoff.h, libaout.h, etc. */ -+ -+#define bfd_get_section(x) ((x)->section) -+#define bfd_get_output_section(x) ((x)->section->output_section) -+#define bfd_set_section(x,y) ((x)->section) = (y) -+#define bfd_asymbol_base(x) ((x)->section->vma) -+#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value) -+#define bfd_asymbol_name(x) ((x)->name) -+/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/ -+#define bfd_asymbol_bfd(x) ((x)->the_bfd) -+#define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour) -+ -+/* A canonical archive symbol. */ -+/* This is a type pun with struct ranlib on purpose! */ -+typedef struct carsym -+{ -+ char *name; -+ file_ptr file_offset; /* Look here to find the file. */ -+} -+carsym; /* To make these you call a carsymogen. */ -+ -+/* Used in generating armaps (archive tables of contents). -+ Perhaps just a forward definition would do? */ -+struct orl /* Output ranlib. */ -+{ -+ char **name; /* Symbol name. */ -+ union -+ { -+ file_ptr pos; -+ bfd *abfd; -+ } u; /* bfd* or file position. */ -+ int namidx; /* Index into string table. */ -+}; -+ -+/* Linenumber stuff. */ -+typedef struct lineno_cache_entry -+{ -+ unsigned int line_number; /* Linenumber from start of function. */ -+ union -+ { -+ struct bfd_symbol *sym; /* Function name. */ -+ bfd_vma offset; /* Offset into section. */ -+ } u; -+} -+alent; -+ -+/* Object and core file sections. */ -+ -+#define align_power(addr, align) \ -+ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align))) -+ -+typedef struct bfd_section *sec_ptr; -+ -+#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0) -+#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0) -+#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0) -+#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0) -+#define bfd_section_name(bfd, ptr) ((ptr)->name) -+#define bfd_section_size(bfd, ptr) ((ptr)->size) -+#define bfd_get_section_size(ptr) ((ptr)->size) -+#define bfd_section_vma(bfd, ptr) ((ptr)->vma) -+#define bfd_section_lma(bfd, ptr) ((ptr)->lma) -+#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power) -+#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0) -+#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata) -+ -+#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0) -+ -+#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE) -+#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE) -+#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE) -+/* Find the address one past the end of SEC. */ -+#define bfd_get_section_limit(bfd, sec) \ -+ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \ -+ / bfd_octets_per_byte (bfd)) -+ -+typedef struct stat stat_type; -+ -+typedef enum bfd_print_symbol -+{ -+ bfd_print_symbol_name, -+ bfd_print_symbol_more, -+ bfd_print_symbol_all -+} bfd_print_symbol_type; -+ -+/* Information about a symbol that nm needs. */ -+ -+typedef struct _symbol_info -+{ -+ symvalue value; -+ char type; -+ const char *name; /* Symbol name. */ -+ unsigned char stab_type; /* Stab type. */ -+ char stab_other; /* Stab other. */ -+ short stab_desc; /* Stab desc. */ -+ const char *stab_name; /* String for stab type. */ -+} symbol_info; -+ -+/* Get the name of a stabs type code. */ -+ -+extern const char *bfd_get_stab_name (int); -+ -+/* Hash table routines. There is no way to free up a hash table. */ -+ -+/* An element in the hash table. Most uses will actually use a larger -+ structure, and an instance of this will be the first field. */ -+ -+struct bfd_hash_entry -+{ -+ /* Next entry for this hash code. */ -+ struct bfd_hash_entry *next; -+ /* String being hashed. */ -+ const char *string; -+ /* Hash code. This is the full hash code, not the index into the -+ table. */ -+ unsigned long hash; -+}; -+ -+/* A hash table. */ -+ -+struct bfd_hash_table -+{ -+ /* The hash array. */ -+ struct bfd_hash_entry **table; -+ /* The number of slots in the hash table. */ -+ unsigned int size; -+ /* A function used to create new elements in the hash table. The -+ first entry is itself a pointer to an element. When this -+ function is first invoked, this pointer will be NULL. However, -+ having the pointer permits a hierarchy of method functions to be -+ built each of which calls the function in the superclass. Thus -+ each function should be written to allocate a new block of memory -+ only if the argument is NULL. */ -+ struct bfd_hash_entry *(*newfunc) -+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); -+ /* An objalloc for this hash table. This is a struct objalloc *, -+ but we use void * to avoid requiring the inclusion of objalloc.h. */ -+ void *memory; -+}; -+ -+/* Initialize a hash table. */ -+extern bfd_boolean bfd_hash_table_init -+ (struct bfd_hash_table *, -+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *, -+ struct bfd_hash_table *, -+ const char *)); -+ -+/* Initialize a hash table specifying a size. */ -+extern bfd_boolean bfd_hash_table_init_n -+ (struct bfd_hash_table *, -+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *, -+ struct bfd_hash_table *, -+ const char *), -+ unsigned int size); -+ -+/* Free up a hash table. */ -+extern void bfd_hash_table_free -+ (struct bfd_hash_table *); -+ -+/* Look up a string in a hash table. If CREATE is TRUE, a new entry -+ will be created for this string if one does not already exist. The -+ COPY argument must be TRUE if this routine should copy the string -+ into newly allocated memory when adding an entry. */ -+extern struct bfd_hash_entry *bfd_hash_lookup -+ (struct bfd_hash_table *, const char *, bfd_boolean create, -+ bfd_boolean copy); -+ -+/* Replace an entry in a hash table. */ -+extern void bfd_hash_replace -+ (struct bfd_hash_table *, struct bfd_hash_entry *old, -+ struct bfd_hash_entry *nw); -+ -+/* Base method for creating a hash table entry. */ -+extern struct bfd_hash_entry *bfd_hash_newfunc -+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); -+ -+/* Grab some space for a hash table entry. */ -+extern void *bfd_hash_allocate -+ (struct bfd_hash_table *, unsigned int); -+ -+/* Traverse a hash table in a random order, calling a function on each -+ element. If the function returns FALSE, the traversal stops. The -+ INFO argument is passed to the function. */ -+extern void bfd_hash_traverse -+ (struct bfd_hash_table *, -+ bfd_boolean (*) (struct bfd_hash_entry *, void *), -+ void *info); -+ -+/* Allows the default size of a hash table to be configured. New hash -+ tables allocated using bfd_hash_table_init will be created with -+ this size. */ -+extern void bfd_hash_set_default_size (bfd_size_type); -+ -+/* This structure is used to keep track of stabs in sections -+ information while linking. */ -+ -+struct stab_info -+{ -+ /* A hash table used to hold stabs strings. */ -+ struct bfd_strtab_hash *strings; -+ /* The header file hash table. */ -+ struct bfd_hash_table includes; -+ /* The first .stabstr section. */ -+ struct bfd_section *stabstr; -+}; -+ -+#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table -+ -+/* User program access to BFD facilities. */ -+ -+/* Direct I/O routines, for programs which know more about the object -+ file than BFD does. Use higher level routines if possible. */ -+ -+extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *); -+extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *); -+extern int bfd_seek (bfd *, file_ptr, int); -+extern file_ptr bfd_tell (bfd *); -+extern int bfd_flush (bfd *); -+extern int bfd_stat (bfd *, struct stat *); -+ -+/* Deprecated old routines. */ -+#if __GNUC__ -+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \ -+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \ -+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#else -+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \ -+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ -+ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\ -+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) -+#endif -+extern void warn_deprecated (const char *, const char *, int, const char *); -+ -+/* Cast from const char * to char * so that caller can assign to -+ a char * without a warning. */ -+#define bfd_get_filename(abfd) ((char *) (abfd)->filename) -+#define bfd_get_cacheable(abfd) ((abfd)->cacheable) -+#define bfd_get_format(abfd) ((abfd)->format) -+#define bfd_get_target(abfd) ((abfd)->xvec->name) -+#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour) -+#define bfd_family_coff(abfd) \ -+ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \ -+ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour) -+#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG) -+#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE) -+#define bfd_header_big_endian(abfd) \ -+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG) -+#define bfd_header_little_endian(abfd) \ -+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE) -+#define bfd_get_file_flags(abfd) ((abfd)->flags) -+#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags) -+#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags) -+#define bfd_my_archive(abfd) ((abfd)->my_archive) -+#define bfd_has_map(abfd) ((abfd)->has_armap) -+ -+#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types) -+#define bfd_usrdata(abfd) ((abfd)->usrdata) -+ -+#define bfd_get_start_address(abfd) ((abfd)->start_address) -+#define bfd_get_symcount(abfd) ((abfd)->symcount) -+#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols) -+#define bfd_count_sections(abfd) ((abfd)->section_count) -+ -+#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount) -+ -+#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char) -+ -+#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE) -+ -+extern bfd_boolean bfd_cache_close -+ (bfd *abfd); -+/* NB: This declaration should match the autogenerated one in libbfd.h. */ -+ -+extern bfd_boolean bfd_cache_close_all (void); -+ -+extern bfd_boolean bfd_record_phdr -+ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma, -+ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **); -+ -+/* Byte swapping routines. */ -+ -+bfd_uint64_t bfd_getb64 (const void *); -+bfd_uint64_t bfd_getl64 (const void *); -+bfd_int64_t bfd_getb_signed_64 (const void *); -+bfd_int64_t bfd_getl_signed_64 (const void *); -+bfd_vma bfd_getb32 (const void *); -+bfd_vma bfd_getl32 (const void *); -+bfd_signed_vma bfd_getb_signed_32 (const void *); -+bfd_signed_vma bfd_getl_signed_32 (const void *); -+bfd_vma bfd_getb16 (const void *); -+bfd_vma bfd_getl16 (const void *); -+bfd_signed_vma bfd_getb_signed_16 (const void *); -+bfd_signed_vma bfd_getl_signed_16 (const void *); -+void bfd_putb64 (bfd_uint64_t, void *); -+void bfd_putl64 (bfd_uint64_t, void *); -+void bfd_putb32 (bfd_vma, void *); -+void bfd_putl32 (bfd_vma, void *); -+void bfd_putb16 (bfd_vma, void *); -+void bfd_putl16 (bfd_vma, void *); -+ -+/* Byte swapping routines which take size and endiannes as arguments. */ -+ -+bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean); -+void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean); -+ -+extern bfd_boolean bfd_section_already_linked_table_init (void); -+extern void bfd_section_already_linked_table_free (void); -+ -+/* Externally visible ECOFF routines. */ -+ -+#if defined(__STDC__) || defined(ALMOST_STDC) -+struct ecoff_debug_info; -+struct ecoff_debug_swap; -+struct ecoff_extr; -+struct bfd_symbol; -+struct bfd_link_info; -+struct bfd_link_hash_entry; -+struct bfd_elf_version_tree; -+#endif -+extern bfd_vma bfd_ecoff_get_gp_value -+ (bfd * abfd); -+extern bfd_boolean bfd_ecoff_set_gp_value -+ (bfd *abfd, bfd_vma gp_value); -+extern bfd_boolean bfd_ecoff_set_regmasks -+ (bfd *abfd, unsigned long gprmask, unsigned long fprmask, -+ unsigned long *cprmask); -+extern void *bfd_ecoff_debug_init -+ (bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); -+extern void bfd_ecoff_debug_free -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_accumulate -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd, -+ struct ecoff_debug_info *input_debug, -+ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_accumulate_other -+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, -+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd, -+ struct bfd_link_info *); -+extern bfd_boolean bfd_ecoff_debug_externals -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, bfd_boolean relocatable, -+ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *), -+ void (*set_index) (struct bfd_symbol *, bfd_size_type)); -+extern bfd_boolean bfd_ecoff_debug_one_external -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, const char *name, -+ struct ecoff_extr *esym); -+extern bfd_size_type bfd_ecoff_debug_size -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap); -+extern bfd_boolean bfd_ecoff_write_debug -+ (bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, file_ptr where); -+extern bfd_boolean bfd_ecoff_write_accumulated_debug -+ (void *handle, bfd *abfd, struct ecoff_debug_info *debug, -+ const struct ecoff_debug_swap *swap, -+ struct bfd_link_info *info, file_ptr where); -+ -+/* Externally visible ELF routines. */ -+ -+struct bfd_link_needed_list -+{ -+ struct bfd_link_needed_list *next; -+ bfd *by; -+ const char *name; -+}; -+ -+enum dynamic_lib_link_class { -+ DYN_NORMAL = 0, -+ DYN_AS_NEEDED = 1, -+ DYN_DT_NEEDED = 2, -+ DYN_NO_ADD_NEEDED = 4, -+ DYN_NO_NEEDED = 8 -+}; -+ -+extern bfd_boolean bfd_elf_record_link_assignment -+ (struct bfd_link_info *, const char *, bfd_boolean); -+extern struct bfd_link_needed_list *bfd_elf_get_needed_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_elf_get_bfd_needed_list -+ (bfd *, struct bfd_link_needed_list **); -+extern bfd_boolean bfd_elf_size_dynamic_sections -+ (bfd *, const char *, const char *, const char *, const char * const *, -+ struct bfd_link_info *, struct bfd_section **, -+ struct bfd_elf_version_tree *); -+extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr -+ (bfd *, struct bfd_link_info *); -+extern void bfd_elf_set_dt_needed_name -+ (bfd *, const char *); -+extern const char *bfd_elf_get_dt_soname -+ (bfd *); -+extern void bfd_elf_set_dyn_lib_class -+ (bfd *, int); -+extern int bfd_elf_get_dyn_lib_class -+ (bfd *); -+extern struct bfd_link_needed_list *bfd_elf_get_runpath_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_elf_discard_info -+ (bfd *, struct bfd_link_info *); -+extern unsigned int _bfd_elf_default_action_discarded -+ (struct bfd_section *); -+ -+/* Return an upper bound on the number of bytes required to store a -+ copy of ABFD's program header table entries. Return -1 if an error -+ occurs; bfd_get_error will return an appropriate code. */ -+extern long bfd_get_elf_phdr_upper_bound -+ (bfd *abfd); -+ -+/* Copy ABFD's program header table entries to *PHDRS. The entries -+ will be stored as an array of Elf_Internal_Phdr structures, as -+ defined in include/elf/internal.h. To find out how large the -+ buffer needs to be, call bfd_get_elf_phdr_upper_bound. -+ -+ Return the number of program header table entries read, or -1 if an -+ error occurs; bfd_get_error will return an appropriate code. */ -+extern int bfd_get_elf_phdrs -+ (bfd *abfd, void *phdrs); -+ -+/* Create a new BFD as if by bfd_openr. Rather than opening a file, -+ reconstruct an ELF file by reading the segments out of remote memory -+ based on the ELF file header at EHDR_VMA and the ELF program headers it -+ points to. If not null, *LOADBASEP is filled in with the difference -+ between the VMAs from which the segments were read, and the VMAs the -+ file headers (and hence BFD's idea of each section's VMA) put them at. -+ -+ The function TARGET_READ_MEMORY is called to copy LEN bytes from the -+ remote memory at target address VMA into the local buffer at MYADDR; it -+ should return zero on success or an `errno' code on failure. TEMPL must -+ be a BFD for an ELF target with the word size and byte order found in -+ the remote memory. */ -+extern bfd *bfd_elf_bfd_from_remote_memory -+ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep, -+ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len)); -+ -+/* Return the arch_size field of an elf bfd, or -1 if not elf. */ -+extern int bfd_get_arch_size -+ (bfd *); -+ -+/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */ -+extern int bfd_get_sign_extend_vma -+ (bfd *); -+ -+extern struct bfd_section *_bfd_elf_tls_setup -+ (bfd *, struct bfd_link_info *); -+ -+extern void _bfd_elf_provide_symbol -+ (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *); -+ -+extern void _bfd_elf_provide_section_bound_symbols -+ (struct bfd_link_info *, struct bfd_section *, const char *, const char *); -+ -+extern void _bfd_elf_fix_excluded_sec_syms -+ (bfd *, struct bfd_link_info *); -+ -+extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs -+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, -+ char **); -+ -+/* SunOS shared library support routines for the linker. */ -+ -+extern struct bfd_link_needed_list *bfd_sunos_get_needed_list -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_sunos_record_link_assignment -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_sunos_size_dynamic_sections -+ (bfd *, struct bfd_link_info *, struct bfd_section **, -+ struct bfd_section **, struct bfd_section **); -+ -+/* Linux shared library support routines for the linker. */ -+ -+extern bfd_boolean bfd_i386linux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_m68klinux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+extern bfd_boolean bfd_sparclinux_size_dynamic_sections -+ (bfd *, struct bfd_link_info *); -+ -+/* mmap hacks */ -+ -+struct _bfd_window_internal; -+typedef struct _bfd_window_internal bfd_window_internal; -+ -+typedef struct _bfd_window -+{ -+ /* What the user asked for. */ -+ void *data; -+ bfd_size_type size; -+ /* The actual window used by BFD. Small user-requested read-only -+ regions sharing a page may share a single window into the object -+ file. Read-write versions shouldn't until I've fixed things to -+ keep track of which portions have been claimed by the -+ application; don't want to give the same region back when the -+ application wants two writable copies! */ -+ struct _bfd_window_internal *i; -+} -+bfd_window; -+ -+extern void bfd_init_window -+ (bfd_window *); -+extern void bfd_free_window -+ (bfd_window *); -+extern bfd_boolean bfd_get_file_window -+ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean); -+ -+/* XCOFF support routines for the linker. */ -+ -+extern bfd_boolean bfd_xcoff_link_record_set -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type); -+extern bfd_boolean bfd_xcoff_import_symbol -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma, -+ const char *, const char *, const char *, unsigned int); -+extern bfd_boolean bfd_xcoff_export_symbol -+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *); -+extern bfd_boolean bfd_xcoff_link_count_reloc -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_xcoff_record_link_assignment -+ (bfd *, struct bfd_link_info *, const char *); -+extern bfd_boolean bfd_xcoff_size_dynamic_sections -+ (bfd *, struct bfd_link_info *, const char *, const char *, -+ unsigned long, unsigned long, unsigned long, bfd_boolean, -+ int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean); -+extern bfd_boolean bfd_xcoff_link_generate_rtinit -+ (bfd *, const char *, const char *, bfd_boolean); -+ -+/* XCOFF support routines for ar. */ -+extern bfd_boolean bfd_xcoff_ar_archive_set_magic -+ (bfd *, char *); -+ -+/* Externally visible COFF routines. */ -+ -+#if defined(__STDC__) || defined(ALMOST_STDC) -+struct internal_syment; -+union internal_auxent; -+#endif -+ -+extern bfd_boolean bfd_coff_get_syment -+ (bfd *, struct bfd_symbol *, struct internal_syment *); -+ -+extern bfd_boolean bfd_coff_get_auxent -+ (bfd *, struct bfd_symbol *, int, union internal_auxent *); -+ -+extern bfd_boolean bfd_coff_set_symbol_class -+ (bfd *, struct bfd_symbol *, unsigned int); -+ -+extern bfd_boolean bfd_m68k_coff_create_embedded_relocs -+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **); -+ -+/* ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_arm_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_arm_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+extern bfd_boolean bfd_arm_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+/* PE ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_arm_pe_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_arm_pe_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+/* ELF ARM Interworking support. Called from linker. */ -+extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections -+ (struct bfd_link_info *); -+ -+extern bfd_boolean bfd_elf32_arm_process_before_allocation -+ (bfd *, struct bfd_link_info *, int); -+ -+void bfd_elf32_arm_set_target_relocs -+ (struct bfd_link_info *, int, char *, int, int); -+ -+extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking -+ (bfd *, struct bfd_link_info *); -+ -+extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd -+ (bfd *, struct bfd_link_info *); -+ -+/* ELF ARM mapping symbol support */ -+extern bfd_boolean bfd_is_arm_mapping_symbol_name -+ (const char * name); -+ -+/* ARM Note section processing. */ -+extern bfd_boolean bfd_arm_merge_machines -+ (bfd *, bfd *); -+ -+extern bfd_boolean bfd_arm_update_notes -+ (bfd *, const char *); -+ -+extern unsigned int bfd_arm_get_mach_from_notes -+ (bfd *, const char *); -+ -+/* TI COFF load page support. */ -+extern void bfd_ticoff_set_section_load_page -+ (struct bfd_section *, int); -+ -+extern int bfd_ticoff_get_section_load_page -+ (struct bfd_section *); -+ -+/* H8/300 functions. */ -+extern bfd_vma bfd_h8300_pad_address -+ (bfd *, bfd_vma); -+ -+/* IA64 Itanium code generation. Called from linker. */ -+extern void bfd_elf32_ia64_after_parse -+ (int); -+ -+extern void bfd_elf64_ia64_after_parse -+ (int); -+ -+/* This structure is used for a comdat section, as in PE. A comdat -+ section is associated with a particular symbol. When the linker -+ sees a comdat section, it keeps only one of the sections with a -+ given name and associated with a given symbol. */ -+ -+struct coff_comdat_info -+{ -+ /* The name of the symbol associated with a comdat section. */ -+ const char *name; -+ -+ /* The local symbol table index of the symbol associated with a -+ comdat section. This is only meaningful to the object file format -+ specific code; it is not an index into the list returned by -+ bfd_canonicalize_symtab. */ -+ long symbol; -+}; -+ -+extern struct coff_comdat_info *bfd_coff_get_comdat_section -+ (bfd *, struct bfd_section *); -+ -+/* Extracted from init.c. */ -+void bfd_init (void); -+ -+/* Extracted from opncls.c. */ -+bfd *bfd_fopen (const char *filename, const char *target, -+ const char *mode, int fd); -+ -+bfd *bfd_openr (const char *filename, const char *target); -+ -+bfd *bfd_fdopenr (const char *filename, const char *target, int fd); -+ -+bfd *bfd_openstreamr (const char *, const char *, void *); -+ -+bfd *bfd_openr_iovec (const char *filename, const char *target, -+ void *(*open) (struct bfd *nbfd, -+ void *open_closure), -+ void *open_closure, -+ file_ptr (*pread) (struct bfd *nbfd, -+ void *stream, -+ void *buf, -+ file_ptr nbytes, -+ file_ptr offset), -+ int (*close) (struct bfd *nbfd, -+ void *stream)); -+ -+bfd *bfd_openw (const char *filename, const char *target); -+ -+bfd_boolean bfd_close (bfd *abfd); -+ -+bfd_boolean bfd_close_all_done (bfd *); -+ -+bfd *bfd_create (const char *filename, bfd *templ); -+ -+bfd_boolean bfd_make_writable (bfd *abfd); -+ -+bfd_boolean bfd_make_readable (bfd *abfd); -+ -+unsigned long bfd_calc_gnu_debuglink_crc32 -+ (unsigned long crc, const unsigned char *buf, bfd_size_type len); -+ -+char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir); -+ -+struct bfd_section *bfd_create_gnu_debuglink_section -+ (bfd *abfd, const char *filename); -+ -+bfd_boolean bfd_fill_in_gnu_debuglink_section -+ (bfd *abfd, struct bfd_section *sect, const char *filename); -+ -+/* Extracted from libbfd.c. */ -+ -+/* Byte swapping macros for user section data. */ -+ -+#define bfd_put_8(abfd, val, ptr) \ -+ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff)) -+#define bfd_put_signed_8 \ -+ bfd_put_8 -+#define bfd_get_8(abfd, ptr) \ -+ (*(unsigned char *) (ptr) & 0xff) -+#define bfd_get_signed_8(abfd, ptr) \ -+ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80) -+ -+#define bfd_put_16(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx16, ((val),(ptr))) -+#define bfd_put_signed_16 \ -+ bfd_put_16 -+#define bfd_get_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx16, (ptr)) -+#define bfd_get_signed_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_16, (ptr)) -+ -+#define bfd_put_32(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx32, ((val),(ptr))) -+#define bfd_put_signed_32 \ -+ bfd_put_32 -+#define bfd_get_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx32, (ptr)) -+#define bfd_get_signed_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_32, (ptr)) -+ -+#define bfd_put_64(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_putx64, ((val), (ptr))) -+#define bfd_put_signed_64 \ -+ bfd_put_64 -+#define bfd_get_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx64, (ptr)) -+#define bfd_get_signed_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_getx_signed_64, (ptr)) -+ -+#define bfd_get(bits, abfd, ptr) \ -+ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \ -+ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \ -+ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \ -+ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \ -+ : (abort (), (bfd_vma) - 1)) -+ -+#define bfd_put(bits, abfd, val, ptr) \ -+ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \ -+ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \ -+ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \ -+ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \ -+ : (abort (), (void) 0)) -+ -+ -+/* Byte swapping macros for file header data. */ -+ -+#define bfd_h_put_8(abfd, val, ptr) \ -+ bfd_put_8 (abfd, val, ptr) -+#define bfd_h_put_signed_8(abfd, val, ptr) \ -+ bfd_put_8 (abfd, val, ptr) -+#define bfd_h_get_8(abfd, ptr) \ -+ bfd_get_8 (abfd, ptr) -+#define bfd_h_get_signed_8(abfd, ptr) \ -+ bfd_get_signed_8 (abfd, ptr) -+ -+#define bfd_h_put_16(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx16, (val, ptr)) -+#define bfd_h_put_signed_16 \ -+ bfd_h_put_16 -+#define bfd_h_get_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx16, (ptr)) -+#define bfd_h_get_signed_16(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr)) -+ -+#define bfd_h_put_32(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx32, (val, ptr)) -+#define bfd_h_put_signed_32 \ -+ bfd_h_put_32 -+#define bfd_h_get_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx32, (ptr)) -+#define bfd_h_get_signed_32(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr)) -+ -+#define bfd_h_put_64(abfd, val, ptr) \ -+ BFD_SEND (abfd, bfd_h_putx64, (val, ptr)) -+#define bfd_h_put_signed_64 \ -+ bfd_h_put_64 -+#define bfd_h_get_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx64, (ptr)) -+#define bfd_h_get_signed_64(abfd, ptr) \ -+ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr)) -+ -+/* Aliases for the above, which should eventually go away. */ -+ -+#define H_PUT_64 bfd_h_put_64 -+#define H_PUT_32 bfd_h_put_32 -+#define H_PUT_16 bfd_h_put_16 -+#define H_PUT_8 bfd_h_put_8 -+#define H_PUT_S64 bfd_h_put_signed_64 -+#define H_PUT_S32 bfd_h_put_signed_32 -+#define H_PUT_S16 bfd_h_put_signed_16 -+#define H_PUT_S8 bfd_h_put_signed_8 -+#define H_GET_64 bfd_h_get_64 -+#define H_GET_32 bfd_h_get_32 -+#define H_GET_16 bfd_h_get_16 -+#define H_GET_8 bfd_h_get_8 -+#define H_GET_S64 bfd_h_get_signed_64 -+#define H_GET_S32 bfd_h_get_signed_32 -+#define H_GET_S16 bfd_h_get_signed_16 -+#define H_GET_S8 bfd_h_get_signed_8 -+ -+ -+/* Extracted from bfdio.c. */ -+long bfd_get_mtime (bfd *abfd); -+ -+long bfd_get_size (bfd *abfd); -+ -+/* Extracted from bfdwin.c. */ -+/* Extracted from section.c. */ -+typedef struct bfd_section -+{ -+ /* The name of the section; the name isn't a copy, the pointer is -+ the same as that passed to bfd_make_section. */ -+ const char *name; -+ -+ /* A unique sequence number. */ -+ int id; -+ -+ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */ -+ int index; -+ -+ /* The next section in the list belonging to the BFD, or NULL. */ -+ struct bfd_section *next; -+ -+ /* The previous section in the list belonging to the BFD, or NULL. */ -+ struct bfd_section *prev; -+ -+ /* The field flags contains attributes of the section. Some -+ flags are read in from the object file, and some are -+ synthesized from other information. */ -+ flagword flags; -+ -+#define SEC_NO_FLAGS 0x000 -+ -+ /* Tells the OS to allocate space for this section when loading. -+ This is clear for a section containing debug information only. */ -+#define SEC_ALLOC 0x001 -+ -+ /* Tells the OS to load the section from the file when loading. -+ This is clear for a .bss section. */ -+#define SEC_LOAD 0x002 -+ -+ /* The section contains data still to be relocated, so there is -+ some relocation information too. */ -+#define SEC_RELOC 0x004 -+ -+ /* A signal to the OS that the section contains read only data. */ -+#define SEC_READONLY 0x008 -+ -+ /* The section contains code only. */ -+#define SEC_CODE 0x010 -+ -+ /* The section contains data only. */ -+#define SEC_DATA 0x020 -+ -+ /* The section will reside in ROM. */ -+#define SEC_ROM 0x040 -+ -+ /* The section contains constructor information. This section -+ type is used by the linker to create lists of constructors and -+ destructors used by <>. When a back end sees a symbol -+ which should be used in a constructor list, it creates a new -+ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches -+ the symbol to it, and builds a relocation. To build the lists -+ of constructors, all the linker has to do is catenate all the -+ sections called <<__CTOR_LIST__>> and relocate the data -+ contained within - exactly the operations it would peform on -+ standard data. */ -+#define SEC_CONSTRUCTOR 0x080 -+ -+ /* The section has contents - a data section could be -+ <> | <>; a debug section could be -+ <> */ -+#define SEC_HAS_CONTENTS 0x100 -+ -+ /* An instruction to the linker to not output the section -+ even if it has information which would normally be written. */ -+#define SEC_NEVER_LOAD 0x200 -+ -+ /* The section contains thread local data. */ -+#define SEC_THREAD_LOCAL 0x400 -+ -+ /* The section has GOT references. This flag is only for the -+ linker, and is currently only used by the elf32-hppa back end. -+ It will be set if global offset table references were detected -+ in this section, which indicate to the linker that the section -+ contains PIC code, and must be handled specially when doing a -+ static link. */ -+#define SEC_HAS_GOT_REF 0x800 -+ -+ /* The section contains common symbols (symbols may be defined -+ multiple times, the value of a symbol is the amount of -+ space it requires, and the largest symbol value is the one -+ used). Most targets have exactly one of these (which we -+ translate to bfd_com_section_ptr), but ECOFF has two. */ -+#define SEC_IS_COMMON 0x1000 -+ -+ /* The section contains only debugging information. For -+ example, this is set for ELF .debug and .stab sections. -+ strip tests this flag to see if a section can be -+ discarded. */ -+#define SEC_DEBUGGING 0x2000 -+ -+ /* The contents of this section are held in memory pointed to -+ by the contents field. This is checked by bfd_get_section_contents, -+ and the data is retrieved from memory if appropriate. */ -+#define SEC_IN_MEMORY 0x4000 -+ -+ /* The contents of this section are to be excluded by the -+ linker for executable and shared objects unless those -+ objects are to be further relocated. */ -+#define SEC_EXCLUDE 0x8000 -+ -+ /* The contents of this section are to be sorted based on the sum of -+ the symbol and addend values specified by the associated relocation -+ entries. Entries without associated relocation entries will be -+ appended to the end of the section in an unspecified order. */ -+#define SEC_SORT_ENTRIES 0x10000 -+ -+ /* When linking, duplicate sections of the same name should be -+ discarded, rather than being combined into a single section as -+ is usually done. This is similar to how common symbols are -+ handled. See SEC_LINK_DUPLICATES below. */ -+#define SEC_LINK_ONCE 0x20000 -+ -+ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker -+ should handle duplicate sections. */ -+#define SEC_LINK_DUPLICATES 0x40000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that duplicate -+ sections with the same name should simply be discarded. */ -+#define SEC_LINK_DUPLICATES_DISCARD 0x0 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if there are any duplicate sections, although -+ it should still only link one copy. */ -+#define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if any duplicate sections are a different size. */ -+#define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000 -+ -+ /* This value for SEC_LINK_DUPLICATES means that the linker -+ should warn if any duplicate sections contain different -+ contents. */ -+#define SEC_LINK_DUPLICATES_SAME_CONTENTS \ -+ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE) -+ -+ /* This section was created by the linker as part of dynamic -+ relocation or other arcane processing. It is skipped when -+ going through the first-pass output, trusting that someone -+ else up the line will take care of it later. */ -+#define SEC_LINKER_CREATED 0x200000 -+ -+ /* This section should not be subject to garbage collection. */ -+#define SEC_KEEP 0x400000 -+ -+ /* This section contains "short" data, and should be placed -+ "near" the GP. */ -+#define SEC_SMALL_DATA 0x800000 -+ -+ /* Attempt to merge identical entities in the section. -+ Entity size is given in the entsize field. */ -+#define SEC_MERGE 0x1000000 -+ -+ /* If given with SEC_MERGE, entities to merge are zero terminated -+ strings where entsize specifies character size instead of fixed -+ size entries. */ -+#define SEC_STRINGS 0x2000000 -+ -+ /* This section contains data about section groups. */ -+#define SEC_GROUP 0x4000000 -+ -+ /* The section is a COFF shared library section. This flag is -+ only for the linker. If this type of section appears in -+ the input file, the linker must copy it to the output file -+ without changing the vma or size. FIXME: Although this -+ was originally intended to be general, it really is COFF -+ specific (and the flag was renamed to indicate this). It -+ might be cleaner to have some more general mechanism to -+ allow the back end to control what the linker does with -+ sections. */ -+#define SEC_COFF_SHARED_LIBRARY 0x10000000 -+ -+ /* This section contains data which may be shared with other -+ executables or shared objects. This is for COFF only. */ -+#define SEC_COFF_SHARED 0x20000000 -+ -+ /* When a section with this flag is being linked, then if the size of -+ the input section is less than a page, it should not cross a page -+ boundary. If the size of the input section is one page or more, -+ it should be aligned on a page boundary. This is for TI -+ TMS320C54X only. */ -+#define SEC_TIC54X_BLOCK 0x40000000 -+ -+ /* Conditionally link this section; do not link if there are no -+ references found to any symbol in the section. This is for TI -+ TMS320C54X only. */ -+#define SEC_TIC54X_CLINK 0x80000000 -+ -+ /* End of section flags. */ -+ -+ /* Some internal packed boolean fields. */ -+ -+ /* See the vma field. */ -+ unsigned int user_set_vma : 1; -+ -+ /* A mark flag used by some of the linker backends. */ -+ unsigned int linker_mark : 1; -+ -+ /* Another mark flag used by some of the linker backends. Set for -+ output sections that have an input section. */ -+ unsigned int linker_has_input : 1; -+ -+ /* Mark flags used by some linker backends for garbage collection. */ -+ unsigned int gc_mark : 1; -+ unsigned int gc_mark_from_eh : 1; -+ -+ /* The following flags are used by the ELF linker. */ -+ -+ /* Mark sections which have been allocated to segments. */ -+ unsigned int segment_mark : 1; -+ -+ /* Type of sec_info information. */ -+ unsigned int sec_info_type:3; -+#define ELF_INFO_TYPE_NONE 0 -+#define ELF_INFO_TYPE_STABS 1 -+#define ELF_INFO_TYPE_MERGE 2 -+#define ELF_INFO_TYPE_EH_FRAME 3 -+#define ELF_INFO_TYPE_JUST_SYMS 4 -+ -+ /* Nonzero if this section uses RELA relocations, rather than REL. */ -+ unsigned int use_rela_p:1; -+ -+ /* Bits used by various backends. The generic code doesn't touch -+ these fields. */ -+ -+ /* Nonzero if this section has TLS related relocations. */ -+ unsigned int has_tls_reloc:1; -+ -+ /* Nonzero if this section has a gp reloc. */ -+ unsigned int has_gp_reloc:1; -+ -+ /* Nonzero if this section needs the relax finalize pass. */ -+ unsigned int need_finalize_relax:1; -+ -+ /* Whether relocations have been processed. */ -+ unsigned int reloc_done : 1; -+ -+ /* End of internal packed boolean fields. */ -+ -+ /* The virtual memory address of the section - where it will be -+ at run time. The symbols are relocated against this. The -+ user_set_vma flag is maintained by bfd; if it's not set, the -+ backend can assign addresses (for example, in <>, where -+ the default address for <<.data>> is dependent on the specific -+ target and various flags). */ -+ bfd_vma vma; -+ -+ /* The load address of the section - where it would be in a -+ rom image; really only used for writing section header -+ information. */ -+ bfd_vma lma; -+ -+ /* The size of the section in octets, as it will be output. -+ Contains a value even if the section has no contents (e.g., the -+ size of <<.bss>>). */ -+ bfd_size_type size; -+ -+ /* For input sections, the original size on disk of the section, in -+ octets. This field is used by the linker relaxation code. It is -+ currently only set for sections where the linker relaxation scheme -+ doesn't cache altered section and reloc contents (stabs, eh_frame, -+ SEC_MERGE, some coff relaxing targets), and thus the original size -+ needs to be kept to read the section multiple times. -+ For output sections, rawsize holds the section size calculated on -+ a previous linker relaxation pass. */ -+ bfd_size_type rawsize; -+ -+ /* If this section is going to be output, then this value is the -+ offset in *bytes* into the output section of the first byte in the -+ input section (byte ==> smallest addressable unit on the -+ target). In most cases, if this was going to start at the -+ 100th octet (8-bit quantity) in the output section, this value -+ would be 100. However, if the target byte size is 16 bits -+ (bfd_octets_per_byte is "2"), this value would be 50. */ -+ bfd_vma output_offset; -+ -+ /* The output section through which to map on output. */ -+ struct bfd_section *output_section; -+ -+ /* The alignment requirement of the section, as an exponent of 2 - -+ e.g., 3 aligns to 2^3 (or 8). */ -+ unsigned int alignment_power; -+ -+ /* If an input section, a pointer to a vector of relocation -+ records for the data in this section. */ -+ struct reloc_cache_entry *relocation; -+ -+ /* If an output section, a pointer to a vector of pointers to -+ relocation records for the data in this section. */ -+ struct reloc_cache_entry **orelocation; -+ -+ /* The number of relocation records in one of the above. */ -+ unsigned reloc_count; -+ -+ /* Information below is back end specific - and not always used -+ or updated. */ -+ -+ /* File position of section data. */ -+ file_ptr filepos; -+ -+ /* File position of relocation info. */ -+ file_ptr rel_filepos; -+ -+ /* File position of line data. */ -+ file_ptr line_filepos; -+ -+ /* Pointer to data for applications. */ -+ void *userdata; -+ -+ /* If the SEC_IN_MEMORY flag is set, this points to the actual -+ contents. */ -+ unsigned char *contents; -+ -+ /* Attached line number information. */ -+ alent *lineno; -+ -+ /* Number of line number records. */ -+ unsigned int lineno_count; -+ -+ /* Entity size for merging purposes. */ -+ unsigned int entsize; -+ -+ /* Points to the kept section if this section is a link-once section, -+ and is discarded. */ -+ struct bfd_section *kept_section; -+ -+ /* When a section is being output, this value changes as more -+ linenumbers are written out. */ -+ file_ptr moving_line_filepos; -+ -+ /* What the section number is in the target world. */ -+ int target_index; -+ -+ void *used_by_bfd; -+ -+ /* If this is a constructor section then here is a list of the -+ relocations created to relocate items within it. */ -+ struct relent_chain *constructor_chain; -+ -+ /* The BFD which owns the section. */ -+ bfd *owner; -+ -+ /* A symbol which points at this section only. */ -+ struct bfd_symbol *symbol; -+ struct bfd_symbol **symbol_ptr_ptr; -+ -+ /* Early in the link process, map_head and map_tail are used to build -+ a list of input sections attached to an output section. Later, -+ output sections use these fields for a list of bfd_link_order -+ structs. */ -+ union { -+ struct bfd_link_order *link_order; -+ struct bfd_section *s; -+ } map_head, map_tail; -+} asection; -+ -+/* These sections are global, and are managed by BFD. The application -+ and target back end are not permitted to change the values in -+ these sections. New code should use the section_ptr macros rather -+ than referring directly to the const sections. The const sections -+ may eventually vanish. */ -+#define BFD_ABS_SECTION_NAME "*ABS*" -+#define BFD_UND_SECTION_NAME "*UND*" -+#define BFD_COM_SECTION_NAME "*COM*" -+#define BFD_IND_SECTION_NAME "*IND*" -+ -+/* The absolute section. */ -+extern asection bfd_abs_section; -+#define bfd_abs_section_ptr ((asection *) &bfd_abs_section) -+#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr) -+/* Pointer to the undefined section. */ -+extern asection bfd_und_section; -+#define bfd_und_section_ptr ((asection *) &bfd_und_section) -+#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr) -+/* Pointer to the common section. */ -+extern asection bfd_com_section; -+#define bfd_com_section_ptr ((asection *) &bfd_com_section) -+/* Pointer to the indirect section. */ -+extern asection bfd_ind_section; -+#define bfd_ind_section_ptr ((asection *) &bfd_ind_section) -+#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr) -+ -+#define bfd_is_const_section(SEC) \ -+ ( ((SEC) == bfd_abs_section_ptr) \ -+ || ((SEC) == bfd_und_section_ptr) \ -+ || ((SEC) == bfd_com_section_ptr) \ -+ || ((SEC) == bfd_ind_section_ptr)) -+ -+extern const struct bfd_symbol * const bfd_abs_symbol; -+extern const struct bfd_symbol * const bfd_com_symbol; -+extern const struct bfd_symbol * const bfd_und_symbol; -+extern const struct bfd_symbol * const bfd_ind_symbol; -+ -+/* Macros to handle insertion and deletion of a bfd's sections. These -+ only handle the list pointers, ie. do not adjust section_count, -+ target_index etc. */ -+#define bfd_section_list_remove(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ asection *_next = _s->next; \ -+ asection *_prev = _s->prev; \ -+ if (_prev) \ -+ _prev->next = _next; \ -+ else \ -+ (ABFD)->sections = _next; \ -+ if (_next) \ -+ _next->prev = _prev; \ -+ else \ -+ (ABFD)->section_last = _prev; \ -+ } \ -+ while (0) -+#define bfd_section_list_append(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ bfd *_abfd = ABFD; \ -+ _s->next = NULL; \ -+ if (_abfd->section_last) \ -+ { \ -+ _s->prev = _abfd->section_last; \ -+ _abfd->section_last->next = _s; \ -+ } \ -+ else \ -+ { \ -+ _s->prev = NULL; \ -+ _abfd->sections = _s; \ -+ } \ -+ _abfd->section_last = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_prepend(ABFD, S) \ -+ do \ -+ { \ -+ asection *_s = S; \ -+ bfd *_abfd = ABFD; \ -+ _s->prev = NULL; \ -+ if (_abfd->sections) \ -+ { \ -+ _s->next = _abfd->sections; \ -+ _abfd->sections->prev = _s; \ -+ } \ -+ else \ -+ { \ -+ _s->next = NULL; \ -+ _abfd->section_last = _s; \ -+ } \ -+ _abfd->sections = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_insert_after(ABFD, A, S) \ -+ do \ -+ { \ -+ asection *_a = A; \ -+ asection *_s = S; \ -+ asection *_next = _a->next; \ -+ _s->next = _next; \ -+ _s->prev = _a; \ -+ _a->next = _s; \ -+ if (_next) \ -+ _next->prev = _s; \ -+ else \ -+ (ABFD)->section_last = _s; \ -+ } \ -+ while (0) -+#define bfd_section_list_insert_before(ABFD, B, S) \ -+ do \ -+ { \ -+ asection *_b = B; \ -+ asection *_s = S; \ -+ asection *_prev = _b->prev; \ -+ _s->prev = _prev; \ -+ _s->next = _b; \ -+ _b->prev = _s; \ -+ if (_prev) \ -+ _prev->next = _s; \ -+ else \ -+ (ABFD)->sections = _s; \ -+ } \ -+ while (0) -+#define bfd_section_removed_from_list(ABFD, S) \ -+ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S)) -+ -+void bfd_section_list_clear (bfd *); -+ -+asection *bfd_get_section_by_name (bfd *abfd, const char *name); -+ -+asection *bfd_get_section_by_name_if -+ (bfd *abfd, -+ const char *name, -+ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+char *bfd_get_unique_section_name -+ (bfd *abfd, const char *templat, int *count); -+ -+asection *bfd_make_section_old_way (bfd *abfd, const char *name); -+ -+asection *bfd_make_section_anyway_with_flags -+ (bfd *abfd, const char *name, flagword flags); -+ -+asection *bfd_make_section_anyway (bfd *abfd, const char *name); -+ -+asection *bfd_make_section_with_flags -+ (bfd *, const char *name, flagword flags); -+ -+asection *bfd_make_section (bfd *, const char *name); -+ -+bfd_boolean bfd_set_section_flags -+ (bfd *abfd, asection *sec, flagword flags); -+ -+void bfd_map_over_sections -+ (bfd *abfd, -+ void (*func) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+asection *bfd_sections_find_if -+ (bfd *abfd, -+ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj), -+ void *obj); -+ -+bfd_boolean bfd_set_section_size -+ (bfd *abfd, asection *sec, bfd_size_type val); -+ -+bfd_boolean bfd_set_section_contents -+ (bfd *abfd, asection *section, const void *data, -+ file_ptr offset, bfd_size_type count); -+ -+bfd_boolean bfd_get_section_contents -+ (bfd *abfd, asection *section, void *location, file_ptr offset, -+ bfd_size_type count); -+ -+bfd_boolean bfd_malloc_and_get_section -+ (bfd *abfd, asection *section, bfd_byte **buf); -+ -+bfd_boolean bfd_copy_private_section_data -+ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec); -+ -+#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \ -+ BFD_SEND (obfd, _bfd_copy_private_section_data, \ -+ (ibfd, isection, obfd, osection)) -+bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec); -+ -+bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group); -+ -+/* Extracted from archures.c. */ -+enum bfd_architecture -+{ -+ bfd_arch_unknown, /* File arch not known. */ -+ bfd_arch_obscure, /* Arch known, not one of these. */ -+ bfd_arch_m68k, /* Motorola 68xxx */ -+#define bfd_mach_m68000 1 -+#define bfd_mach_m68008 2 -+#define bfd_mach_m68010 3 -+#define bfd_mach_m68020 4 -+#define bfd_mach_m68030 5 -+#define bfd_mach_m68040 6 -+#define bfd_mach_m68060 7 -+#define bfd_mach_cpu32 8 -+#define bfd_mach_mcf5200 9 -+#define bfd_mach_mcf5206e 10 -+#define bfd_mach_mcf5307 11 -+#define bfd_mach_mcf5407 12 -+#define bfd_mach_mcf528x 13 -+#define bfd_mach_mcfv4e 14 -+#define bfd_mach_mcf521x 15 -+#define bfd_mach_mcf5249 16 -+#define bfd_mach_mcf547x 17 -+#define bfd_mach_mcf548x 18 -+ bfd_arch_vax, /* DEC Vax */ -+ bfd_arch_i960, /* Intel 960 */ -+ /* The order of the following is important. -+ lower number indicates a machine type that -+ only accepts a subset of the instructions -+ available to machines with higher numbers. -+ The exception is the "ca", which is -+ incompatible with all other machines except -+ "core". */ -+ -+#define bfd_mach_i960_core 1 -+#define bfd_mach_i960_ka_sa 2 -+#define bfd_mach_i960_kb_sb 3 -+#define bfd_mach_i960_mc 4 -+#define bfd_mach_i960_xa 5 -+#define bfd_mach_i960_ca 6 -+#define bfd_mach_i960_jx 7 -+#define bfd_mach_i960_hx 8 -+ -+ bfd_arch_or32, /* OpenRISC 32 */ -+ -+ bfd_arch_a29k, /* AMD 29000 */ -+ bfd_arch_sparc, /* SPARC */ -+#define bfd_mach_sparc 1 -+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */ -+#define bfd_mach_sparc_sparclet 2 -+#define bfd_mach_sparc_sparclite 3 -+#define bfd_mach_sparc_v8plus 4 -+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */ -+#define bfd_mach_sparc_sparclite_le 6 -+#define bfd_mach_sparc_v9 7 -+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */ -+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */ -+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */ -+/* Nonzero if MACH has the v9 instruction set. */ -+#define bfd_mach_sparc_v9_p(mach) \ -+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \ -+ && (mach) != bfd_mach_sparc_sparclite_le) -+/* Nonzero if MACH is a 64 bit sparc architecture. */ -+#define bfd_mach_sparc_64bit_p(mach) \ -+ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb) -+ bfd_arch_mips, /* MIPS Rxxxx */ -+#define bfd_mach_mips3000 3000 -+#define bfd_mach_mips3900 3900 -+#define bfd_mach_mips4000 4000 -+#define bfd_mach_mips4010 4010 -+#define bfd_mach_mips4100 4100 -+#define bfd_mach_mips4111 4111 -+#define bfd_mach_mips4120 4120 -+#define bfd_mach_mips4300 4300 -+#define bfd_mach_mips4400 4400 -+#define bfd_mach_mips4600 4600 -+#define bfd_mach_mips4650 4650 -+#define bfd_mach_mips5000 5000 -+#define bfd_mach_mips5400 5400 -+#define bfd_mach_mips5500 5500 -+#define bfd_mach_mips6000 6000 -+#define bfd_mach_mips7000 7000 -+#define bfd_mach_mips8000 8000 -+#define bfd_mach_mips9000 9000 -+#define bfd_mach_mips10000 10000 -+#define bfd_mach_mips12000 12000 -+#define bfd_mach_mips16 16 -+#define bfd_mach_mips5 5 -+#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */ -+#define bfd_mach_mipsisa32 32 -+#define bfd_mach_mipsisa32r2 33 -+#define bfd_mach_mipsisa64 64 -+#define bfd_mach_mipsisa64r2 65 -+ bfd_arch_i386, /* Intel 386 */ -+#define bfd_mach_i386_i386 1 -+#define bfd_mach_i386_i8086 2 -+#define bfd_mach_i386_i386_intel_syntax 3 -+#define bfd_mach_x86_64 64 -+#define bfd_mach_x86_64_intel_syntax 65 -+ bfd_arch_we32k, /* AT&T WE32xxx */ -+ bfd_arch_tahoe, /* CCI/Harris Tahoe */ -+ bfd_arch_i860, /* Intel 860 */ -+ bfd_arch_i370, /* IBM 360/370 Mainframes */ -+ bfd_arch_romp, /* IBM ROMP PC/RT */ -+ bfd_arch_alliant, /* Alliant */ -+ bfd_arch_convex, /* Convex */ -+ bfd_arch_m88k, /* Motorola 88xxx */ -+ bfd_arch_m98k, /* Motorola 98xxx */ -+ bfd_arch_pyramid, /* Pyramid Technology */ -+ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */ -+#define bfd_mach_h8300 1 -+#define bfd_mach_h8300h 2 -+#define bfd_mach_h8300s 3 -+#define bfd_mach_h8300hn 4 -+#define bfd_mach_h8300sn 5 -+#define bfd_mach_h8300sx 6 -+#define bfd_mach_h8300sxn 7 -+ bfd_arch_pdp11, /* DEC PDP-11 */ -+ bfd_arch_powerpc, /* PowerPC */ -+#define bfd_mach_ppc 32 -+#define bfd_mach_ppc64 64 -+#define bfd_mach_ppc_403 403 -+#define bfd_mach_ppc_403gc 4030 -+#define bfd_mach_ppc_505 505 -+#define bfd_mach_ppc_601 601 -+#define bfd_mach_ppc_602 602 -+#define bfd_mach_ppc_603 603 -+#define bfd_mach_ppc_ec603e 6031 -+#define bfd_mach_ppc_604 604 -+#define bfd_mach_ppc_620 620 -+#define bfd_mach_ppc_630 630 -+#define bfd_mach_ppc_750 750 -+#define bfd_mach_ppc_860 860 -+#define bfd_mach_ppc_a35 35 -+#define bfd_mach_ppc_rs64ii 642 -+#define bfd_mach_ppc_rs64iii 643 -+#define bfd_mach_ppc_7400 7400 -+#define bfd_mach_ppc_e500 500 -+ bfd_arch_rs6000, /* IBM RS/6000 */ -+#define bfd_mach_rs6k 6000 -+#define bfd_mach_rs6k_rs1 6001 -+#define bfd_mach_rs6k_rsc 6003 -+#define bfd_mach_rs6k_rs2 6002 -+ bfd_arch_hppa, /* HP PA RISC */ -+#define bfd_mach_hppa10 10 -+#define bfd_mach_hppa11 11 -+#define bfd_mach_hppa20 20 -+#define bfd_mach_hppa20w 25 -+ bfd_arch_d10v, /* Mitsubishi D10V */ -+#define bfd_mach_d10v 1 -+#define bfd_mach_d10v_ts2 2 -+#define bfd_mach_d10v_ts3 3 -+ bfd_arch_d30v, /* Mitsubishi D30V */ -+ bfd_arch_dlx, /* DLX */ -+ bfd_arch_m68hc11, /* Motorola 68HC11 */ -+ bfd_arch_m68hc12, /* Motorola 68HC12 */ -+#define bfd_mach_m6812_default 0 -+#define bfd_mach_m6812 1 -+#define bfd_mach_m6812s 2 -+ bfd_arch_z8k, /* Zilog Z8000 */ -+#define bfd_mach_z8001 1 -+#define bfd_mach_z8002 2 -+ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */ -+ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */ -+#define bfd_mach_sh 1 -+#define bfd_mach_sh2 0x20 -+#define bfd_mach_sh_dsp 0x2d -+#define bfd_mach_sh2a 0x2a -+#define bfd_mach_sh2a_nofpu 0x2b -+#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1 -+#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2 -+#define bfd_mach_sh2a_or_sh4 0x2a3 -+#define bfd_mach_sh2a_or_sh3e 0x2a4 -+#define bfd_mach_sh2e 0x2e -+#define bfd_mach_sh3 0x30 -+#define bfd_mach_sh3_nommu 0x31 -+#define bfd_mach_sh3_dsp 0x3d -+#define bfd_mach_sh3e 0x3e -+#define bfd_mach_sh4 0x40 -+#define bfd_mach_sh4_nofpu 0x41 -+#define bfd_mach_sh4_nommu_nofpu 0x42 -+#define bfd_mach_sh4a 0x4a -+#define bfd_mach_sh4a_nofpu 0x4b -+#define bfd_mach_sh4al_dsp 0x4d -+#define bfd_mach_sh5 0x50 -+ bfd_arch_alpha, /* Dec Alpha */ -+#define bfd_mach_alpha_ev4 0x10 -+#define bfd_mach_alpha_ev5 0x20 -+#define bfd_mach_alpha_ev6 0x30 -+ bfd_arch_arm, /* Advanced Risc Machines ARM. */ -+#define bfd_mach_arm_unknown 0 -+#define bfd_mach_arm_2 1 -+#define bfd_mach_arm_2a 2 -+#define bfd_mach_arm_3 3 -+#define bfd_mach_arm_3M 4 -+#define bfd_mach_arm_4 5 -+#define bfd_mach_arm_4T 6 -+#define bfd_mach_arm_5 7 -+#define bfd_mach_arm_5T 8 -+#define bfd_mach_arm_5TE 9 -+#define bfd_mach_arm_XScale 10 -+#define bfd_mach_arm_ep9312 11 -+#define bfd_mach_arm_iWMMXt 12 -+ bfd_arch_ns32k, /* National Semiconductors ns32000 */ -+ bfd_arch_w65, /* WDC 65816 */ -+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */ -+ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */ -+#define bfd_mach_tic3x 30 -+#define bfd_mach_tic4x 40 -+ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */ -+ bfd_arch_tic80, /* TI TMS320c80 (MVP) */ -+ bfd_arch_v850, /* NEC V850 */ -+#define bfd_mach_v850 1 -+#define bfd_mach_v850e 'E' -+#define bfd_mach_v850e1 '1' -+ bfd_arch_arc, /* ARC Cores */ -+#define bfd_mach_arc_5 5 -+#define bfd_mach_arc_6 6 -+#define bfd_mach_arc_7 7 -+#define bfd_mach_arc_8 8 -+ bfd_arch_m32c, /* Renesas M16C/M32C. */ -+#define bfd_mach_m16c 0x75 -+#define bfd_mach_m32c 0x78 -+ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */ -+#define bfd_mach_m32r 1 /* For backwards compatibility. */ -+#define bfd_mach_m32rx 'x' -+#define bfd_mach_m32r2 '2' -+ bfd_arch_mn10200, /* Matsushita MN10200 */ -+ bfd_arch_mn10300, /* Matsushita MN10300 */ -+#define bfd_mach_mn10300 300 -+#define bfd_mach_am33 330 -+#define bfd_mach_am33_2 332 -+ bfd_arch_fr30, -+#define bfd_mach_fr30 0x46523330 -+ bfd_arch_frv, -+#define bfd_mach_frv 1 -+#define bfd_mach_frvsimple 2 -+#define bfd_mach_fr300 300 -+#define bfd_mach_fr400 400 -+#define bfd_mach_fr450 450 -+#define bfd_mach_frvtomcat 499 /* fr500 prototype */ -+#define bfd_mach_fr500 500 -+#define bfd_mach_fr550 550 -+ bfd_arch_mcore, -+ bfd_arch_ia64, /* HP/Intel ia64 */ -+#define bfd_mach_ia64_elf64 64 -+#define bfd_mach_ia64_elf32 32 -+ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */ -+#define bfd_mach_ip2022 1 -+#define bfd_mach_ip2022ext 2 -+ bfd_arch_iq2000, /* Vitesse IQ2000. */ -+#define bfd_mach_iq2000 1 -+#define bfd_mach_iq10 2 -+ bfd_arch_ms1, -+#define bfd_mach_ms1 1 -+#define bfd_mach_mrisc2 2 -+ bfd_arch_pj, -+ bfd_arch_avr, /* Atmel AVR microcontrollers. */ -+#define bfd_mach_avr1 1 -+#define bfd_mach_avr2 2 -+#define bfd_mach_avr3 3 -+#define bfd_mach_avr4 4 -+#define bfd_mach_avr5 5 -+ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */ -+#define bfd_mach_cr16c 1 -+ bfd_arch_crx, /* National Semiconductor CRX. */ -+#define bfd_mach_crx 1 -+ bfd_arch_cris, /* Axis CRIS */ -+#define bfd_mach_cris_v0_v10 255 -+#define bfd_mach_cris_v32 32 -+#define bfd_mach_cris_v10_v32 1032 -+ bfd_arch_s390, /* IBM s390 */ -+#define bfd_mach_s390_31 31 -+#define bfd_mach_s390_64 64 -+ bfd_arch_openrisc, /* OpenRISC */ -+ bfd_arch_mmix, /* Donald Knuth's educational processor. */ -+ bfd_arch_xstormy16, -+#define bfd_mach_xstormy16 1 -+ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */ -+#define bfd_mach_msp11 11 -+#define bfd_mach_msp110 110 -+#define bfd_mach_msp12 12 -+#define bfd_mach_msp13 13 -+#define bfd_mach_msp14 14 -+#define bfd_mach_msp15 15 -+#define bfd_mach_msp16 16 -+#define bfd_mach_msp31 31 -+#define bfd_mach_msp32 32 -+#define bfd_mach_msp33 33 -+#define bfd_mach_msp41 41 -+#define bfd_mach_msp42 42 -+#define bfd_mach_msp43 43 -+#define bfd_mach_msp44 44 -+ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */ -+#define bfd_mach_xtensa 1 -+ bfd_arch_maxq, /* Dallas MAXQ 10/20 */ -+#define bfd_mach_maxq10 10 -+#define bfd_mach_maxq20 20 -+ bfd_arch_last -+ }; -+ -+typedef struct bfd_arch_info -+{ -+ int bits_per_word; -+ int bits_per_address; -+ int bits_per_byte; -+ enum bfd_architecture arch; -+ unsigned long mach; -+ const char *arch_name; -+ const char *printable_name; -+ unsigned int section_align_power; -+ /* TRUE if this is the default machine for the architecture. -+ The default arch should be the first entry for an arch so that -+ all the entries for that arch can be accessed via <>. */ -+ bfd_boolean the_default; -+ const struct bfd_arch_info * (*compatible) -+ (const struct bfd_arch_info *a, const struct bfd_arch_info *b); -+ -+ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *); -+ -+ const struct bfd_arch_info *next; -+} -+bfd_arch_info_type; -+ -+const char *bfd_printable_name (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_scan_arch (const char *string); -+ -+const char **bfd_arch_list (void); -+ -+const bfd_arch_info_type *bfd_arch_get_compatible -+ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns); -+ -+void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg); -+ -+enum bfd_architecture bfd_get_arch (bfd *abfd); -+ -+unsigned long bfd_get_mach (bfd *abfd); -+ -+unsigned int bfd_arch_bits_per_byte (bfd *abfd); -+ -+unsigned int bfd_arch_bits_per_address (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd); -+ -+const bfd_arch_info_type *bfd_lookup_arch -+ (enum bfd_architecture arch, unsigned long machine); -+ -+const char *bfd_printable_arch_mach -+ (enum bfd_architecture arch, unsigned long machine); -+ -+unsigned int bfd_octets_per_byte (bfd *abfd); -+ -+unsigned int bfd_arch_mach_octets_per_byte -+ (enum bfd_architecture arch, unsigned long machine); -+ -+/* Extracted from reloc.c. */ -+typedef enum bfd_reloc_status -+{ -+ /* No errors detected. */ -+ bfd_reloc_ok, -+ -+ /* The relocation was performed, but there was an overflow. */ -+ bfd_reloc_overflow, -+ -+ /* The address to relocate was not within the section supplied. */ -+ bfd_reloc_outofrange, -+ -+ /* Used by special functions. */ -+ bfd_reloc_continue, -+ -+ /* Unsupported relocation size requested. */ -+ bfd_reloc_notsupported, -+ -+ /* Unused. */ -+ bfd_reloc_other, -+ -+ /* The symbol to relocate against was undefined. */ -+ bfd_reloc_undefined, -+ -+ /* The relocation was performed, but may not be ok - presently -+ generated only when linking i960 coff files with i960 b.out -+ symbols. If this type is returned, the error_message argument -+ to bfd_perform_relocation will be set. */ -+ bfd_reloc_dangerous -+ } -+ bfd_reloc_status_type; -+ -+ -+typedef struct reloc_cache_entry -+{ -+ /* A pointer into the canonical table of pointers. */ -+ struct bfd_symbol **sym_ptr_ptr; -+ -+ /* offset in section. */ -+ bfd_size_type address; -+ -+ /* addend for relocation value. */ -+ bfd_vma addend; -+ -+ /* Pointer to how to perform the required relocation. */ -+ reloc_howto_type *howto; -+ -+} -+arelent; -+ -+enum complain_overflow -+{ -+ /* Do not complain on overflow. */ -+ complain_overflow_dont, -+ -+ /* Complain if the bitfield overflows, whether it is considered -+ as signed or unsigned. */ -+ complain_overflow_bitfield, -+ -+ /* Complain if the value overflows when considered as signed -+ number. */ -+ complain_overflow_signed, -+ -+ /* Complain if the value overflows when considered as an -+ unsigned number. */ -+ complain_overflow_unsigned -+}; -+ -+struct reloc_howto_struct -+{ -+ /* The type field has mainly a documentary use - the back end can -+ do what it wants with it, though normally the back end's -+ external idea of what a reloc number is stored -+ in this field. For example, a PC relative word relocation -+ in a coff environment has the type 023 - because that's -+ what the outside world calls a R_PCRWORD reloc. */ -+ unsigned int type; -+ -+ /* The value the final relocation is shifted right by. This drops -+ unwanted data from the relocation. */ -+ unsigned int rightshift; -+ -+ /* The size of the item to be relocated. This is *not* a -+ power-of-two measure. To get the number of bytes operated -+ on by a type of relocation, use bfd_get_reloc_size. */ -+ int size; -+ -+ /* The number of bits in the item to be relocated. This is used -+ when doing overflow checking. */ -+ unsigned int bitsize; -+ -+ /* Notes that the relocation is relative to the location in the -+ data section of the addend. The relocation function will -+ subtract from the relocation value the address of the location -+ being relocated. */ -+ bfd_boolean pc_relative; -+ -+ /* The bit position of the reloc value in the destination. -+ The relocated value is left shifted by this amount. */ -+ unsigned int bitpos; -+ -+ /* What type of overflow error should be checked for when -+ relocating. */ -+ enum complain_overflow complain_on_overflow; -+ -+ /* If this field is non null, then the supplied function is -+ called rather than the normal function. This allows really -+ strange relocation methods to be accommodated (e.g., i960 callj -+ instructions). */ -+ bfd_reloc_status_type (*special_function) -+ (bfd *, arelent *, struct bfd_symbol *, void *, asection *, -+ bfd *, char **); -+ -+ /* The textual name of the relocation type. */ -+ char *name; -+ -+ /* Some formats record a relocation addend in the section contents -+ rather than with the relocation. For ELF formats this is the -+ distinction between USE_REL and USE_RELA (though the code checks -+ for USE_REL == 1/0). The value of this field is TRUE if the -+ addend is recorded with the section contents; when performing a -+ partial link (ld -r) the section contents (the data) will be -+ modified. The value of this field is FALSE if addends are -+ recorded with the relocation (in arelent.addend); when performing -+ a partial link the relocation will be modified. -+ All relocations for all ELF USE_RELA targets should set this field -+ to FALSE (values of TRUE should be looked on with suspicion). -+ However, the converse is not true: not all relocations of all ELF -+ USE_REL targets set this field to TRUE. Why this is so is peculiar -+ to each particular target. For relocs that aren't used in partial -+ links (e.g. GOT stuff) it doesn't matter what this is set to. */ -+ bfd_boolean partial_inplace; -+ -+ /* src_mask selects the part of the instruction (or data) to be used -+ in the relocation sum. If the target relocations don't have an -+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal -+ dst_mask to extract the addend from the section contents. If -+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this -+ field should be zero. Non-zero values for ELF USE_RELA targets are -+ bogus as in those cases the value in the dst_mask part of the -+ section contents should be treated as garbage. */ -+ bfd_vma src_mask; -+ -+ /* dst_mask selects which parts of the instruction (or data) are -+ replaced with a relocated value. */ -+ bfd_vma dst_mask; -+ -+ /* When some formats create PC relative instructions, they leave -+ the value of the pc of the place being relocated in the offset -+ slot of the instruction, so that a PC relative relocation can -+ be made just by adding in an ordinary offset (e.g., sun3 a.out). -+ Some formats leave the displacement part of an instruction -+ empty (e.g., m88k bcs); this flag signals the fact. */ -+ bfd_boolean pcrel_offset; -+}; -+ -+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \ -+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC } -+#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \ -+ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \ -+ NAME, FALSE, 0, 0, IN) -+ -+#define EMPTY_HOWTO(C) \ -+ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \ -+ NULL, FALSE, 0, 0, FALSE) -+ -+#define HOWTO_PREPARE(relocation, symbol) \ -+ { \ -+ if (symbol != NULL) \ -+ { \ -+ if (bfd_is_com_section (symbol->section)) \ -+ { \ -+ relocation = 0; \ -+ } \ -+ else \ -+ { \ -+ relocation = symbol->value; \ -+ } \ -+ } \ -+ } -+ -+unsigned int bfd_get_reloc_size (reloc_howto_type *); -+ -+typedef struct relent_chain -+{ -+ arelent relent; -+ struct relent_chain *next; -+} -+arelent_chain; -+ -+bfd_reloc_status_type bfd_check_overflow -+ (enum complain_overflow how, -+ unsigned int bitsize, -+ unsigned int rightshift, -+ unsigned int addrsize, -+ bfd_vma relocation); -+ -+bfd_reloc_status_type bfd_perform_relocation -+ (bfd *abfd, -+ arelent *reloc_entry, -+ void *data, -+ asection *input_section, -+ bfd *output_bfd, -+ char **error_message); -+ -+bfd_reloc_status_type bfd_install_relocation -+ (bfd *abfd, -+ arelent *reloc_entry, -+ void *data, bfd_vma data_start, -+ asection *input_section, -+ char **error_message); -+ -+enum bfd_reloc_code_real { -+ _dummy_first_bfd_reloc_code_real, -+ -+ -+/* Basic absolute relocations of N bits. */ -+ BFD_RELOC_64, -+ BFD_RELOC_32, -+ BFD_RELOC_26, -+ BFD_RELOC_24, -+ BFD_RELOC_16, -+ BFD_RELOC_14, -+ BFD_RELOC_8, -+ -+/* PC-relative relocations. Sometimes these are relative to the address -+of the relocation itself; sometimes they are relative to the start of -+the section containing the relocation. It depends on the specific target. -+ -+The 24-bit relocation is used in some Intel 960 configurations. */ -+ BFD_RELOC_64_PCREL, -+ BFD_RELOC_32_PCREL, -+ BFD_RELOC_24_PCREL, -+ BFD_RELOC_16_PCREL, -+ BFD_RELOC_12_PCREL, -+ BFD_RELOC_8_PCREL, -+ -+/* Section relative relocations. Some targets need this for DWARF2. */ -+ BFD_RELOC_32_SECREL, -+ -+/* For ELF. */ -+ BFD_RELOC_32_GOT_PCREL, -+ BFD_RELOC_16_GOT_PCREL, -+ BFD_RELOC_8_GOT_PCREL, -+ BFD_RELOC_32_GOTOFF, -+ BFD_RELOC_16_GOTOFF, -+ BFD_RELOC_LO16_GOTOFF, -+ BFD_RELOC_HI16_GOTOFF, -+ BFD_RELOC_HI16_S_GOTOFF, -+ BFD_RELOC_8_GOTOFF, -+ BFD_RELOC_64_PLT_PCREL, -+ BFD_RELOC_32_PLT_PCREL, -+ BFD_RELOC_24_PLT_PCREL, -+ BFD_RELOC_16_PLT_PCREL, -+ BFD_RELOC_8_PLT_PCREL, -+ BFD_RELOC_64_PLTOFF, -+ BFD_RELOC_32_PLTOFF, -+ BFD_RELOC_16_PLTOFF, -+ BFD_RELOC_LO16_PLTOFF, -+ BFD_RELOC_HI16_PLTOFF, -+ BFD_RELOC_HI16_S_PLTOFF, -+ BFD_RELOC_8_PLTOFF, -+ -+/* Relocations used by 68K ELF. */ -+ BFD_RELOC_68K_GLOB_DAT, -+ BFD_RELOC_68K_JMP_SLOT, -+ BFD_RELOC_68K_RELATIVE, -+ -+/* Linkage-table relative. */ -+ BFD_RELOC_32_BASEREL, -+ BFD_RELOC_16_BASEREL, -+ BFD_RELOC_LO16_BASEREL, -+ BFD_RELOC_HI16_BASEREL, -+ BFD_RELOC_HI16_S_BASEREL, -+ BFD_RELOC_8_BASEREL, -+ BFD_RELOC_RVA, -+ -+/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */ -+ BFD_RELOC_8_FFnn, -+ -+/* These PC-relative relocations are stored as word displacements -- -+i.e., byte displacements shifted right two bits. The 30-bit word -+displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the -+SPARC. (SPARC tools generally refer to this as <>.) The -+signed 16-bit displacement is used on the MIPS, and the 23-bit -+displacement is used on the Alpha. */ -+ BFD_RELOC_32_PCREL_S2, -+ BFD_RELOC_16_PCREL_S2, -+ BFD_RELOC_23_PCREL_S2, -+ -+/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of -+the target word. These are used on the SPARC. */ -+ BFD_RELOC_HI22, -+ BFD_RELOC_LO10, -+ -+/* For systems that allocate a Global Pointer register, these are -+displacements off that register. These relocation types are -+handled specially, because the value the register will have is -+decided relatively late. */ -+ BFD_RELOC_GPREL16, -+ BFD_RELOC_GPREL32, -+ -+/* Reloc types used for i960/b.out. */ -+ BFD_RELOC_I960_CALLJ, -+ -+/* SPARC ELF relocations. There is probably some overlap with other -+relocation types already defined. */ -+ BFD_RELOC_NONE, -+ BFD_RELOC_SPARC_WDISP22, -+ BFD_RELOC_SPARC22, -+ BFD_RELOC_SPARC13, -+ BFD_RELOC_SPARC_GOT10, -+ BFD_RELOC_SPARC_GOT13, -+ BFD_RELOC_SPARC_GOT22, -+ BFD_RELOC_SPARC_PC10, -+ BFD_RELOC_SPARC_PC22, -+ BFD_RELOC_SPARC_WPLT30, -+ BFD_RELOC_SPARC_COPY, -+ BFD_RELOC_SPARC_GLOB_DAT, -+ BFD_RELOC_SPARC_JMP_SLOT, -+ BFD_RELOC_SPARC_RELATIVE, -+ BFD_RELOC_SPARC_UA16, -+ BFD_RELOC_SPARC_UA32, -+ BFD_RELOC_SPARC_UA64, -+ -+/* I think these are specific to SPARC a.out (e.g., Sun 4). */ -+ BFD_RELOC_SPARC_BASE13, -+ BFD_RELOC_SPARC_BASE22, -+ -+/* SPARC64 relocations */ -+#define BFD_RELOC_SPARC_64 BFD_RELOC_64 -+ BFD_RELOC_SPARC_10, -+ BFD_RELOC_SPARC_11, -+ BFD_RELOC_SPARC_OLO10, -+ BFD_RELOC_SPARC_HH22, -+ BFD_RELOC_SPARC_HM10, -+ BFD_RELOC_SPARC_LM22, -+ BFD_RELOC_SPARC_PC_HH22, -+ BFD_RELOC_SPARC_PC_HM10, -+ BFD_RELOC_SPARC_PC_LM22, -+ BFD_RELOC_SPARC_WDISP16, -+ BFD_RELOC_SPARC_WDISP19, -+ BFD_RELOC_SPARC_7, -+ BFD_RELOC_SPARC_6, -+ BFD_RELOC_SPARC_5, -+#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL -+ BFD_RELOC_SPARC_PLT32, -+ BFD_RELOC_SPARC_PLT64, -+ BFD_RELOC_SPARC_HIX22, -+ BFD_RELOC_SPARC_LOX10, -+ BFD_RELOC_SPARC_H44, -+ BFD_RELOC_SPARC_M44, -+ BFD_RELOC_SPARC_L44, -+ BFD_RELOC_SPARC_REGISTER, -+ -+/* SPARC little endian relocation */ -+ BFD_RELOC_SPARC_REV32, -+ -+/* SPARC TLS relocations */ -+ BFD_RELOC_SPARC_TLS_GD_HI22, -+ BFD_RELOC_SPARC_TLS_GD_LO10, -+ BFD_RELOC_SPARC_TLS_GD_ADD, -+ BFD_RELOC_SPARC_TLS_GD_CALL, -+ BFD_RELOC_SPARC_TLS_LDM_HI22, -+ BFD_RELOC_SPARC_TLS_LDM_LO10, -+ BFD_RELOC_SPARC_TLS_LDM_ADD, -+ BFD_RELOC_SPARC_TLS_LDM_CALL, -+ BFD_RELOC_SPARC_TLS_LDO_HIX22, -+ BFD_RELOC_SPARC_TLS_LDO_LOX10, -+ BFD_RELOC_SPARC_TLS_LDO_ADD, -+ BFD_RELOC_SPARC_TLS_IE_HI22, -+ BFD_RELOC_SPARC_TLS_IE_LO10, -+ BFD_RELOC_SPARC_TLS_IE_LD, -+ BFD_RELOC_SPARC_TLS_IE_LDX, -+ BFD_RELOC_SPARC_TLS_IE_ADD, -+ BFD_RELOC_SPARC_TLS_LE_HIX22, -+ BFD_RELOC_SPARC_TLS_LE_LOX10, -+ BFD_RELOC_SPARC_TLS_DTPMOD32, -+ BFD_RELOC_SPARC_TLS_DTPMOD64, -+ BFD_RELOC_SPARC_TLS_DTPOFF32, -+ BFD_RELOC_SPARC_TLS_DTPOFF64, -+ BFD_RELOC_SPARC_TLS_TPOFF32, -+ BFD_RELOC_SPARC_TLS_TPOFF64, -+ -+/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or -+"addend" in some special way. -+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when -+writing; when reading, it will be the absolute section symbol. The -+addend is the displacement in bytes of the "lda" instruction from -+the "ldah" instruction (which is at the address of this reloc). */ -+ BFD_RELOC_ALPHA_GPDISP_HI16, -+ -+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as -+with GPDISP_HI16 relocs. The addend is ignored when writing the -+relocations out, and is filled in with the file's GP value on -+reading, for convenience. */ -+ BFD_RELOC_ALPHA_GPDISP_LO16, -+ -+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16 -+relocation except that there is no accompanying GPDISP_LO16 -+relocation. */ -+ BFD_RELOC_ALPHA_GPDISP, -+ -+/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference; -+the assembler turns it into a LDQ instruction to load the address of -+the symbol, and then fills in a register in the real instruction. -+ -+The LITERAL reloc, at the LDQ instruction, refers to the .lita -+section symbol. The addend is ignored when writing, but is filled -+in with the file's GP value on reading, for convenience, as with the -+GPDISP_LO16 reloc. -+ -+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16. -+It should refer to the symbol to be referenced, as with 16_GOTOFF, -+but it generates output not based on the position within the .got -+section, but relative to the GP value chosen for the file during the -+final link stage. -+ -+The LITUSE reloc, on the instruction using the loaded address, gives -+information to the linker that it might be able to use to optimize -+away some literal section references. The symbol is ignored (read -+as the absolute section symbol), and the "addend" indicates the type -+of instruction using the register: -+1 - "memory" fmt insn -+2 - byte-manipulation (byte offset reg) -+3 - jsr (target of branch) */ -+ BFD_RELOC_ALPHA_LITERAL, -+ BFD_RELOC_ALPHA_ELF_LITERAL, -+ BFD_RELOC_ALPHA_LITUSE, -+ -+/* The HINT relocation indicates a value that should be filled into the -+"hint" field of a jmp/jsr/ret instruction, for possible branch- -+prediction logic which may be provided on some processors. */ -+ BFD_RELOC_ALPHA_HINT, -+ -+/* The LINKAGE relocation outputs a linkage pair in the object file, -+which is filled by the linker. */ -+ BFD_RELOC_ALPHA_LINKAGE, -+ -+/* The CODEADDR relocation outputs a STO_CA in the object file, -+which is filled by the linker. */ -+ BFD_RELOC_ALPHA_CODEADDR, -+ -+/* The GPREL_HI/LO relocations together form a 32-bit offset from the -+GP register. */ -+ BFD_RELOC_ALPHA_GPREL_HI16, -+ BFD_RELOC_ALPHA_GPREL_LO16, -+ -+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must -+share a common GP, and the target address is adjusted for -+STO_ALPHA_STD_GPLOAD. */ -+ BFD_RELOC_ALPHA_BRSGP, -+ -+/* Alpha thread-local storage relocations. */ -+ BFD_RELOC_ALPHA_TLSGD, -+ BFD_RELOC_ALPHA_TLSLDM, -+ BFD_RELOC_ALPHA_DTPMOD64, -+ BFD_RELOC_ALPHA_GOTDTPREL16, -+ BFD_RELOC_ALPHA_DTPREL64, -+ BFD_RELOC_ALPHA_DTPREL_HI16, -+ BFD_RELOC_ALPHA_DTPREL_LO16, -+ BFD_RELOC_ALPHA_DTPREL16, -+ BFD_RELOC_ALPHA_GOTTPREL16, -+ BFD_RELOC_ALPHA_TPREL64, -+ BFD_RELOC_ALPHA_TPREL_HI16, -+ BFD_RELOC_ALPHA_TPREL_LO16, -+ BFD_RELOC_ALPHA_TPREL16, -+ -+/* Bits 27..2 of the relocation address shifted right 2 bits; -+simple reloc otherwise. */ -+ BFD_RELOC_MIPS_JMP, -+ -+/* The MIPS16 jump instruction. */ -+ BFD_RELOC_MIPS16_JMP, -+ -+/* MIPS16 GP relative reloc. */ -+ BFD_RELOC_MIPS16_GPREL, -+ -+/* High 16 bits of 32-bit value; simple reloc. */ -+ BFD_RELOC_HI16, -+ -+/* High 16 bits of 32-bit value but the low 16 bits will be sign -+extended and added to form the final result. If the low 16 -+bits form a negative number, we need to add one to the high value -+to compensate for the borrow when the low bits are added. */ -+ BFD_RELOC_HI16_S, -+ -+/* Low 16 bits. */ -+ BFD_RELOC_LO16, -+ -+/* High 16 bits of 32-bit pc-relative value */ -+ BFD_RELOC_HI16_PCREL, -+ -+/* High 16 bits of 32-bit pc-relative value, adjusted */ -+ BFD_RELOC_HI16_S_PCREL, -+ -+/* Low 16 bits of pc-relative value */ -+ BFD_RELOC_LO16_PCREL, -+ -+/* MIPS16 high 16 bits of 32-bit value. */ -+ BFD_RELOC_MIPS16_HI16, -+ -+/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign -+extended and added to form the final result. If the low 16 -+bits form a negative number, we need to add one to the high value -+to compensate for the borrow when the low bits are added. */ -+ BFD_RELOC_MIPS16_HI16_S, -+ -+/* MIPS16 low 16 bits. */ -+ BFD_RELOC_MIPS16_LO16, -+ -+/* Relocation against a MIPS literal section. */ -+ BFD_RELOC_MIPS_LITERAL, -+ -+/* MIPS ELF relocations. */ -+ BFD_RELOC_MIPS_GOT16, -+ BFD_RELOC_MIPS_CALL16, -+ BFD_RELOC_MIPS_GOT_HI16, -+ BFD_RELOC_MIPS_GOT_LO16, -+ BFD_RELOC_MIPS_CALL_HI16, -+ BFD_RELOC_MIPS_CALL_LO16, -+ BFD_RELOC_MIPS_SUB, -+ BFD_RELOC_MIPS_GOT_PAGE, -+ BFD_RELOC_MIPS_GOT_OFST, -+ BFD_RELOC_MIPS_GOT_DISP, -+ BFD_RELOC_MIPS_SHIFT5, -+ BFD_RELOC_MIPS_SHIFT6, -+ BFD_RELOC_MIPS_INSERT_A, -+ BFD_RELOC_MIPS_INSERT_B, -+ BFD_RELOC_MIPS_DELETE, -+ BFD_RELOC_MIPS_HIGHEST, -+ BFD_RELOC_MIPS_HIGHER, -+ BFD_RELOC_MIPS_SCN_DISP, -+ BFD_RELOC_MIPS_REL16, -+ BFD_RELOC_MIPS_RELGOT, -+ BFD_RELOC_MIPS_JALR, -+ BFD_RELOC_MIPS_TLS_DTPMOD32, -+ BFD_RELOC_MIPS_TLS_DTPREL32, -+ BFD_RELOC_MIPS_TLS_DTPMOD64, -+ BFD_RELOC_MIPS_TLS_DTPREL64, -+ BFD_RELOC_MIPS_TLS_GD, -+ BFD_RELOC_MIPS_TLS_LDM, -+ BFD_RELOC_MIPS_TLS_DTPREL_HI16, -+ BFD_RELOC_MIPS_TLS_DTPREL_LO16, -+ BFD_RELOC_MIPS_TLS_GOTTPREL, -+ BFD_RELOC_MIPS_TLS_TPREL32, -+ BFD_RELOC_MIPS_TLS_TPREL64, -+ BFD_RELOC_MIPS_TLS_TPREL_HI16, -+ BFD_RELOC_MIPS_TLS_TPREL_LO16, -+ -+ -+/* Fujitsu Frv Relocations. */ -+ BFD_RELOC_FRV_LABEL16, -+ BFD_RELOC_FRV_LABEL24, -+ BFD_RELOC_FRV_LO16, -+ BFD_RELOC_FRV_HI16, -+ BFD_RELOC_FRV_GPREL12, -+ BFD_RELOC_FRV_GPRELU12, -+ BFD_RELOC_FRV_GPREL32, -+ BFD_RELOC_FRV_GPRELHI, -+ BFD_RELOC_FRV_GPRELLO, -+ BFD_RELOC_FRV_GOT12, -+ BFD_RELOC_FRV_GOTHI, -+ BFD_RELOC_FRV_GOTLO, -+ BFD_RELOC_FRV_FUNCDESC, -+ BFD_RELOC_FRV_FUNCDESC_GOT12, -+ BFD_RELOC_FRV_FUNCDESC_GOTHI, -+ BFD_RELOC_FRV_FUNCDESC_GOTLO, -+ BFD_RELOC_FRV_FUNCDESC_VALUE, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFF12, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI, -+ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO, -+ BFD_RELOC_FRV_GOTOFF12, -+ BFD_RELOC_FRV_GOTOFFHI, -+ BFD_RELOC_FRV_GOTOFFLO, -+ BFD_RELOC_FRV_GETTLSOFF, -+ BFD_RELOC_FRV_TLSDESC_VALUE, -+ BFD_RELOC_FRV_GOTTLSDESC12, -+ BFD_RELOC_FRV_GOTTLSDESCHI, -+ BFD_RELOC_FRV_GOTTLSDESCLO, -+ BFD_RELOC_FRV_TLSMOFF12, -+ BFD_RELOC_FRV_TLSMOFFHI, -+ BFD_RELOC_FRV_TLSMOFFLO, -+ BFD_RELOC_FRV_GOTTLSOFF12, -+ BFD_RELOC_FRV_GOTTLSOFFHI, -+ BFD_RELOC_FRV_GOTTLSOFFLO, -+ BFD_RELOC_FRV_TLSOFF, -+ BFD_RELOC_FRV_TLSDESC_RELAX, -+ BFD_RELOC_FRV_GETTLSOFF_RELAX, -+ BFD_RELOC_FRV_TLSOFF_RELAX, -+ BFD_RELOC_FRV_TLSMOFF, -+ -+ -+/* This is a 24bit GOT-relative reloc for the mn10300. */ -+ BFD_RELOC_MN10300_GOTOFF24, -+ -+/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT32, -+ -+/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT24, -+ -+/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes -+in the instruction. */ -+ BFD_RELOC_MN10300_GOT16, -+ -+/* Copy symbol at runtime. */ -+ BFD_RELOC_MN10300_COPY, -+ -+/* Create GOT entry. */ -+ BFD_RELOC_MN10300_GLOB_DAT, -+ -+/* Create PLT entry. */ -+ BFD_RELOC_MN10300_JMP_SLOT, -+ -+/* Adjust by program base. */ -+ BFD_RELOC_MN10300_RELATIVE, -+ -+ -+/* i386/elf relocations */ -+ BFD_RELOC_386_GOT32, -+ BFD_RELOC_386_PLT32, -+ BFD_RELOC_386_COPY, -+ BFD_RELOC_386_GLOB_DAT, -+ BFD_RELOC_386_JUMP_SLOT, -+ BFD_RELOC_386_RELATIVE, -+ BFD_RELOC_386_GOTOFF, -+ BFD_RELOC_386_GOTPC, -+ BFD_RELOC_386_TLS_TPOFF, -+ BFD_RELOC_386_TLS_IE, -+ BFD_RELOC_386_TLS_GOTIE, -+ BFD_RELOC_386_TLS_LE, -+ BFD_RELOC_386_TLS_GD, -+ BFD_RELOC_386_TLS_LDM, -+ BFD_RELOC_386_TLS_LDO_32, -+ BFD_RELOC_386_TLS_IE_32, -+ BFD_RELOC_386_TLS_LE_32, -+ BFD_RELOC_386_TLS_DTPMOD32, -+ BFD_RELOC_386_TLS_DTPOFF32, -+ BFD_RELOC_386_TLS_TPOFF32, -+ -+/* x86-64/elf relocations */ -+ BFD_RELOC_X86_64_GOT32, -+ BFD_RELOC_X86_64_PLT32, -+ BFD_RELOC_X86_64_COPY, -+ BFD_RELOC_X86_64_GLOB_DAT, -+ BFD_RELOC_X86_64_JUMP_SLOT, -+ BFD_RELOC_X86_64_RELATIVE, -+ BFD_RELOC_X86_64_GOTPCREL, -+ BFD_RELOC_X86_64_32S, -+ BFD_RELOC_X86_64_DTPMOD64, -+ BFD_RELOC_X86_64_DTPOFF64, -+ BFD_RELOC_X86_64_TPOFF64, -+ BFD_RELOC_X86_64_TLSGD, -+ BFD_RELOC_X86_64_TLSLD, -+ BFD_RELOC_X86_64_DTPOFF32, -+ BFD_RELOC_X86_64_GOTTPOFF, -+ BFD_RELOC_X86_64_TPOFF32, -+ BFD_RELOC_X86_64_GOTOFF64, -+ BFD_RELOC_X86_64_GOTPC32, -+ -+/* ns32k relocations */ -+ BFD_RELOC_NS32K_IMM_8, -+ BFD_RELOC_NS32K_IMM_16, -+ BFD_RELOC_NS32K_IMM_32, -+ BFD_RELOC_NS32K_IMM_8_PCREL, -+ BFD_RELOC_NS32K_IMM_16_PCREL, -+ BFD_RELOC_NS32K_IMM_32_PCREL, -+ BFD_RELOC_NS32K_DISP_8, -+ BFD_RELOC_NS32K_DISP_16, -+ BFD_RELOC_NS32K_DISP_32, -+ BFD_RELOC_NS32K_DISP_8_PCREL, -+ BFD_RELOC_NS32K_DISP_16_PCREL, -+ BFD_RELOC_NS32K_DISP_32_PCREL, -+ -+/* PDP11 relocations */ -+ BFD_RELOC_PDP11_DISP_8_PCREL, -+ BFD_RELOC_PDP11_DISP_6_PCREL, -+ -+/* Picojava relocs. Not all of these appear in object files. */ -+ BFD_RELOC_PJ_CODE_HI16, -+ BFD_RELOC_PJ_CODE_LO16, -+ BFD_RELOC_PJ_CODE_DIR16, -+ BFD_RELOC_PJ_CODE_DIR32, -+ BFD_RELOC_PJ_CODE_REL16, -+ BFD_RELOC_PJ_CODE_REL32, -+ -+/* Power(rs6000) and PowerPC relocations. */ -+ BFD_RELOC_PPC_B26, -+ BFD_RELOC_PPC_BA26, -+ BFD_RELOC_PPC_TOC16, -+ BFD_RELOC_PPC_B16, -+ BFD_RELOC_PPC_B16_BRTAKEN, -+ BFD_RELOC_PPC_B16_BRNTAKEN, -+ BFD_RELOC_PPC_BA16, -+ BFD_RELOC_PPC_BA16_BRTAKEN, -+ BFD_RELOC_PPC_BA16_BRNTAKEN, -+ BFD_RELOC_PPC_COPY, -+ BFD_RELOC_PPC_GLOB_DAT, -+ BFD_RELOC_PPC_JMP_SLOT, -+ BFD_RELOC_PPC_RELATIVE, -+ BFD_RELOC_PPC_LOCAL24PC, -+ BFD_RELOC_PPC_EMB_NADDR32, -+ BFD_RELOC_PPC_EMB_NADDR16, -+ BFD_RELOC_PPC_EMB_NADDR16_LO, -+ BFD_RELOC_PPC_EMB_NADDR16_HI, -+ BFD_RELOC_PPC_EMB_NADDR16_HA, -+ BFD_RELOC_PPC_EMB_SDAI16, -+ BFD_RELOC_PPC_EMB_SDA2I16, -+ BFD_RELOC_PPC_EMB_SDA2REL, -+ BFD_RELOC_PPC_EMB_SDA21, -+ BFD_RELOC_PPC_EMB_MRKREF, -+ BFD_RELOC_PPC_EMB_RELSEC16, -+ BFD_RELOC_PPC_EMB_RELST_LO, -+ BFD_RELOC_PPC_EMB_RELST_HI, -+ BFD_RELOC_PPC_EMB_RELST_HA, -+ BFD_RELOC_PPC_EMB_BIT_FLD, -+ BFD_RELOC_PPC_EMB_RELSDA, -+ BFD_RELOC_PPC64_HIGHER, -+ BFD_RELOC_PPC64_HIGHER_S, -+ BFD_RELOC_PPC64_HIGHEST, -+ BFD_RELOC_PPC64_HIGHEST_S, -+ BFD_RELOC_PPC64_TOC16_LO, -+ BFD_RELOC_PPC64_TOC16_HI, -+ BFD_RELOC_PPC64_TOC16_HA, -+ BFD_RELOC_PPC64_TOC, -+ BFD_RELOC_PPC64_PLTGOT16, -+ BFD_RELOC_PPC64_PLTGOT16_LO, -+ BFD_RELOC_PPC64_PLTGOT16_HI, -+ BFD_RELOC_PPC64_PLTGOT16_HA, -+ BFD_RELOC_PPC64_ADDR16_DS, -+ BFD_RELOC_PPC64_ADDR16_LO_DS, -+ BFD_RELOC_PPC64_GOT16_DS, -+ BFD_RELOC_PPC64_GOT16_LO_DS, -+ BFD_RELOC_PPC64_PLT16_LO_DS, -+ BFD_RELOC_PPC64_SECTOFF_DS, -+ BFD_RELOC_PPC64_SECTOFF_LO_DS, -+ BFD_RELOC_PPC64_TOC16_DS, -+ BFD_RELOC_PPC64_TOC16_LO_DS, -+ BFD_RELOC_PPC64_PLTGOT16_DS, -+ BFD_RELOC_PPC64_PLTGOT16_LO_DS, -+ -+/* PowerPC and PowerPC64 thread-local storage relocations. */ -+ BFD_RELOC_PPC_TLS, -+ BFD_RELOC_PPC_DTPMOD, -+ BFD_RELOC_PPC_TPREL16, -+ BFD_RELOC_PPC_TPREL16_LO, -+ BFD_RELOC_PPC_TPREL16_HI, -+ BFD_RELOC_PPC_TPREL16_HA, -+ BFD_RELOC_PPC_TPREL, -+ BFD_RELOC_PPC_DTPREL16, -+ BFD_RELOC_PPC_DTPREL16_LO, -+ BFD_RELOC_PPC_DTPREL16_HI, -+ BFD_RELOC_PPC_DTPREL16_HA, -+ BFD_RELOC_PPC_DTPREL, -+ BFD_RELOC_PPC_GOT_TLSGD16, -+ BFD_RELOC_PPC_GOT_TLSGD16_LO, -+ BFD_RELOC_PPC_GOT_TLSGD16_HI, -+ BFD_RELOC_PPC_GOT_TLSGD16_HA, -+ BFD_RELOC_PPC_GOT_TLSLD16, -+ BFD_RELOC_PPC_GOT_TLSLD16_LO, -+ BFD_RELOC_PPC_GOT_TLSLD16_HI, -+ BFD_RELOC_PPC_GOT_TLSLD16_HA, -+ BFD_RELOC_PPC_GOT_TPREL16, -+ BFD_RELOC_PPC_GOT_TPREL16_LO, -+ BFD_RELOC_PPC_GOT_TPREL16_HI, -+ BFD_RELOC_PPC_GOT_TPREL16_HA, -+ BFD_RELOC_PPC_GOT_DTPREL16, -+ BFD_RELOC_PPC_GOT_DTPREL16_LO, -+ BFD_RELOC_PPC_GOT_DTPREL16_HI, -+ BFD_RELOC_PPC_GOT_DTPREL16_HA, -+ BFD_RELOC_PPC64_TPREL16_DS, -+ BFD_RELOC_PPC64_TPREL16_LO_DS, -+ BFD_RELOC_PPC64_TPREL16_HIGHER, -+ BFD_RELOC_PPC64_TPREL16_HIGHERA, -+ BFD_RELOC_PPC64_TPREL16_HIGHEST, -+ BFD_RELOC_PPC64_TPREL16_HIGHESTA, -+ BFD_RELOC_PPC64_DTPREL16_DS, -+ BFD_RELOC_PPC64_DTPREL16_LO_DS, -+ BFD_RELOC_PPC64_DTPREL16_HIGHER, -+ BFD_RELOC_PPC64_DTPREL16_HIGHERA, -+ BFD_RELOC_PPC64_DTPREL16_HIGHEST, -+ BFD_RELOC_PPC64_DTPREL16_HIGHESTA, -+ -+/* IBM 370/390 relocations */ -+ BFD_RELOC_I370_D12, -+ -+/* The type of reloc used to build a constructor table - at the moment -+probably a 32 bit wide absolute relocation, but the target can choose. -+It generally does map to one of the other relocation types. */ -+ BFD_RELOC_CTOR, -+ -+/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are -+not stored in the instruction. */ -+ BFD_RELOC_ARM_PCREL_BRANCH, -+ -+/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is -+not stored in the instruction. The 2nd lowest bit comes from a 1 bit -+field in the instruction. */ -+ BFD_RELOC_ARM_PCREL_BLX, -+ -+/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is -+not stored in the instruction. The 2nd lowest bit comes from a 1 bit -+field in the instruction. */ -+ BFD_RELOC_THUMB_PCREL_BLX, -+ -+/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches. -+The lowest bit must be zero and is not stored in the instruction. -+Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an -+"nn" one smaller in all cases. Note further that BRANCH23 -+corresponds to R_ARM_THM_CALL. */ -+ BFD_RELOC_THUMB_PCREL_BRANCH7, -+ BFD_RELOC_THUMB_PCREL_BRANCH9, -+ BFD_RELOC_THUMB_PCREL_BRANCH12, -+ BFD_RELOC_THUMB_PCREL_BRANCH20, -+ BFD_RELOC_THUMB_PCREL_BRANCH23, -+ BFD_RELOC_THUMB_PCREL_BRANCH25, -+ -+/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */ -+ BFD_RELOC_ARM_OFFSET_IMM, -+ -+/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */ -+ BFD_RELOC_ARM_THUMB_OFFSET, -+ -+/* Pc-relative or absolute relocation depending on target. Used for -+entries in .init_array sections. */ -+ BFD_RELOC_ARM_TARGET1, -+ -+/* Read-only segment base relative address. */ -+ BFD_RELOC_ARM_ROSEGREL32, -+ -+/* Data segment base relative address. */ -+ BFD_RELOC_ARM_SBREL32, -+ -+/* This reloc is used for references to RTTI data from exception handling -+tables. The actual definition depends on the target. It may be a -+pc-relative or some form of GOT-indirect relocation. */ -+ BFD_RELOC_ARM_TARGET2, -+ -+/* 31-bit PC relative address. */ -+ BFD_RELOC_ARM_PREL31, -+ -+/* Relocations for setting up GOTs and PLTs for shared libraries. */ -+ BFD_RELOC_ARM_JUMP_SLOT, -+ BFD_RELOC_ARM_GLOB_DAT, -+ BFD_RELOC_ARM_GOT32, -+ BFD_RELOC_ARM_PLT32, -+ BFD_RELOC_ARM_RELATIVE, -+ BFD_RELOC_ARM_GOTOFF, -+ BFD_RELOC_ARM_GOTPC, -+ -+/* ARM thread-local storage relocations. */ -+ BFD_RELOC_ARM_TLS_GD32, -+ BFD_RELOC_ARM_TLS_LDO32, -+ BFD_RELOC_ARM_TLS_LDM32, -+ BFD_RELOC_ARM_TLS_DTPOFF32, -+ BFD_RELOC_ARM_TLS_DTPMOD32, -+ BFD_RELOC_ARM_TLS_TPOFF32, -+ BFD_RELOC_ARM_TLS_IE32, -+ BFD_RELOC_ARM_TLS_LE32, -+ -+/* These relocs are only used within the ARM assembler. They are not -+(at present) written to any object files. */ -+ BFD_RELOC_ARM_IMMEDIATE, -+ BFD_RELOC_ARM_ADRL_IMMEDIATE, -+ BFD_RELOC_ARM_T32_IMMEDIATE, -+ BFD_RELOC_ARM_SHIFT_IMM, -+ BFD_RELOC_ARM_SMI, -+ BFD_RELOC_ARM_SWI, -+ BFD_RELOC_ARM_MULTI, -+ BFD_RELOC_ARM_CP_OFF_IMM, -+ BFD_RELOC_ARM_CP_OFF_IMM_S2, -+ BFD_RELOC_ARM_ADR_IMM, -+ BFD_RELOC_ARM_LDR_IMM, -+ BFD_RELOC_ARM_LITERAL, -+ BFD_RELOC_ARM_IN_POOL, -+ BFD_RELOC_ARM_OFFSET_IMM8, -+ BFD_RELOC_ARM_T32_OFFSET_U8, -+ BFD_RELOC_ARM_T32_OFFSET_IMM, -+ BFD_RELOC_ARM_HWLITERAL, -+ BFD_RELOC_ARM_THUMB_ADD, -+ BFD_RELOC_ARM_THUMB_IMM, -+ BFD_RELOC_ARM_THUMB_SHIFT, -+ -+/* Renesas / SuperH SH relocs. Not all of these appear in object files. */ -+ BFD_RELOC_SH_PCDISP8BY2, -+ BFD_RELOC_SH_PCDISP12BY2, -+ BFD_RELOC_SH_IMM3, -+ BFD_RELOC_SH_IMM3U, -+ BFD_RELOC_SH_DISP12, -+ BFD_RELOC_SH_DISP12BY2, -+ BFD_RELOC_SH_DISP12BY4, -+ BFD_RELOC_SH_DISP12BY8, -+ BFD_RELOC_SH_DISP20, -+ BFD_RELOC_SH_DISP20BY8, -+ BFD_RELOC_SH_IMM4, -+ BFD_RELOC_SH_IMM4BY2, -+ BFD_RELOC_SH_IMM4BY4, -+ BFD_RELOC_SH_IMM8, -+ BFD_RELOC_SH_IMM8BY2, -+ BFD_RELOC_SH_IMM8BY4, -+ BFD_RELOC_SH_PCRELIMM8BY2, -+ BFD_RELOC_SH_PCRELIMM8BY4, -+ BFD_RELOC_SH_SWITCH16, -+ BFD_RELOC_SH_SWITCH32, -+ BFD_RELOC_SH_USES, -+ BFD_RELOC_SH_COUNT, -+ BFD_RELOC_SH_ALIGN, -+ BFD_RELOC_SH_CODE, -+ BFD_RELOC_SH_DATA, -+ BFD_RELOC_SH_LABEL, -+ BFD_RELOC_SH_LOOP_START, -+ BFD_RELOC_SH_LOOP_END, -+ BFD_RELOC_SH_COPY, -+ BFD_RELOC_SH_GLOB_DAT, -+ BFD_RELOC_SH_JMP_SLOT, -+ BFD_RELOC_SH_RELATIVE, -+ BFD_RELOC_SH_GOTPC, -+ BFD_RELOC_SH_GOT_LOW16, -+ BFD_RELOC_SH_GOT_MEDLOW16, -+ BFD_RELOC_SH_GOT_MEDHI16, -+ BFD_RELOC_SH_GOT_HI16, -+ BFD_RELOC_SH_GOTPLT_LOW16, -+ BFD_RELOC_SH_GOTPLT_MEDLOW16, -+ BFD_RELOC_SH_GOTPLT_MEDHI16, -+ BFD_RELOC_SH_GOTPLT_HI16, -+ BFD_RELOC_SH_PLT_LOW16, -+ BFD_RELOC_SH_PLT_MEDLOW16, -+ BFD_RELOC_SH_PLT_MEDHI16, -+ BFD_RELOC_SH_PLT_HI16, -+ BFD_RELOC_SH_GOTOFF_LOW16, -+ BFD_RELOC_SH_GOTOFF_MEDLOW16, -+ BFD_RELOC_SH_GOTOFF_MEDHI16, -+ BFD_RELOC_SH_GOTOFF_HI16, -+ BFD_RELOC_SH_GOTPC_LOW16, -+ BFD_RELOC_SH_GOTPC_MEDLOW16, -+ BFD_RELOC_SH_GOTPC_MEDHI16, -+ BFD_RELOC_SH_GOTPC_HI16, -+ BFD_RELOC_SH_COPY64, -+ BFD_RELOC_SH_GLOB_DAT64, -+ BFD_RELOC_SH_JMP_SLOT64, -+ BFD_RELOC_SH_RELATIVE64, -+ BFD_RELOC_SH_GOT10BY4, -+ BFD_RELOC_SH_GOT10BY8, -+ BFD_RELOC_SH_GOTPLT10BY4, -+ BFD_RELOC_SH_GOTPLT10BY8, -+ BFD_RELOC_SH_GOTPLT32, -+ BFD_RELOC_SH_SHMEDIA_CODE, -+ BFD_RELOC_SH_IMMU5, -+ BFD_RELOC_SH_IMMS6, -+ BFD_RELOC_SH_IMMS6BY32, -+ BFD_RELOC_SH_IMMU6, -+ BFD_RELOC_SH_IMMS10, -+ BFD_RELOC_SH_IMMS10BY2, -+ BFD_RELOC_SH_IMMS10BY4, -+ BFD_RELOC_SH_IMMS10BY8, -+ BFD_RELOC_SH_IMMS16, -+ BFD_RELOC_SH_IMMU16, -+ BFD_RELOC_SH_IMM_LOW16, -+ BFD_RELOC_SH_IMM_LOW16_PCREL, -+ BFD_RELOC_SH_IMM_MEDLOW16, -+ BFD_RELOC_SH_IMM_MEDLOW16_PCREL, -+ BFD_RELOC_SH_IMM_MEDHI16, -+ BFD_RELOC_SH_IMM_MEDHI16_PCREL, -+ BFD_RELOC_SH_IMM_HI16, -+ BFD_RELOC_SH_IMM_HI16_PCREL, -+ BFD_RELOC_SH_PT_16, -+ BFD_RELOC_SH_TLS_GD_32, -+ BFD_RELOC_SH_TLS_LD_32, -+ BFD_RELOC_SH_TLS_LDO_32, -+ BFD_RELOC_SH_TLS_IE_32, -+ BFD_RELOC_SH_TLS_LE_32, -+ BFD_RELOC_SH_TLS_DTPMOD32, -+ BFD_RELOC_SH_TLS_DTPOFF32, -+ BFD_RELOC_SH_TLS_TPOFF32, -+ -+/* ARC Cores relocs. -+ARC 22 bit pc-relative branch. The lowest two bits must be zero and are -+not stored in the instruction. The high 20 bits are installed in bits 26 -+through 7 of the instruction. */ -+ BFD_RELOC_ARC_B22_PCREL, -+ -+/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not -+stored in the instruction. The high 24 bits are installed in bits 23 -+through 0. */ -+ BFD_RELOC_ARC_B26, -+ -+/* Mitsubishi D10V relocs. -+This is a 10-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_10_PCREL_R, -+ -+/* Mitsubishi D10V relocs. -+This is a 10-bit reloc with the right 2 bits -+assumed to be 0. This is the same as the previous reloc -+except it is in the left container, i.e., -+shifted left 15 bits. */ -+ BFD_RELOC_D10V_10_PCREL_L, -+ -+/* This is an 18-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_18, -+ -+/* This is an 18-bit reloc with the right 2 bits -+assumed to be 0. */ -+ BFD_RELOC_D10V_18_PCREL, -+ -+/* Mitsubishi D30V relocs. -+This is a 6-bit absolute reloc. */ -+ BFD_RELOC_D30V_6, -+ -+/* This is a 6-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_9_PCREL, -+ -+/* This is a 6-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_9_PCREL_R, -+ -+/* This is a 12-bit absolute reloc with the -+right 3 bitsassumed to be 0. */ -+ BFD_RELOC_D30V_15, -+ -+/* This is a 12-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_15_PCREL, -+ -+/* This is a 12-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_15_PCREL_R, -+ -+/* This is an 18-bit absolute reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_21, -+ -+/* This is an 18-bit pc-relative reloc with -+the right 3 bits assumed to be 0. */ -+ BFD_RELOC_D30V_21_PCREL, -+ -+/* This is an 18-bit pc-relative reloc with -+the right 3 bits assumed to be 0. Same -+as the previous reloc but on the right side -+of the container. */ -+ BFD_RELOC_D30V_21_PCREL_R, -+ -+/* This is a 32-bit absolute reloc. */ -+ BFD_RELOC_D30V_32, -+ -+/* This is a 32-bit pc-relative reloc. */ -+ BFD_RELOC_D30V_32_PCREL, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_HI16_S, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_LO16, -+ -+/* DLX relocs */ -+ BFD_RELOC_DLX_JMP26, -+ -+/* Renesas M16C/M32C Relocations. */ -+ BFD_RELOC_M16C_8_PCREL8, -+ BFD_RELOC_M16C_16_PCREL8, -+ BFD_RELOC_M16C_8_PCREL16, -+ BFD_RELOC_M16C_8_ELABEL24, -+ BFD_RELOC_M16C_8_ABS16, -+ BFD_RELOC_M16C_16_ABS16, -+ BFD_RELOC_M16C_16_ABS24, -+ BFD_RELOC_M16C_16_ABS32, -+ BFD_RELOC_M16C_24_ABS16, -+ BFD_RELOC_M16C_24_ABS24, -+ BFD_RELOC_M16C_24_ABS32, -+ BFD_RELOC_M16C_32_ABS16, -+ BFD_RELOC_M16C_32_ABS24, -+ BFD_RELOC_M16C_32_ABS32, -+ BFD_RELOC_M16C_40_ABS16, -+ BFD_RELOC_M16C_40_ABS24, -+ BFD_RELOC_M16C_40_ABS32, -+ -+/* Renesas M32R (formerly Mitsubishi M32R) relocs. -+This is a 24 bit absolute address. */ -+ BFD_RELOC_M32R_24, -+ -+/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_10_PCREL, -+ -+/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_18_PCREL, -+ -+/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */ -+ BFD_RELOC_M32R_26_PCREL, -+ -+/* This is a 16-bit reloc containing the high 16 bits of an address -+used when the lower 16 bits are treated as unsigned. */ -+ BFD_RELOC_M32R_HI16_ULO, -+ -+/* This is a 16-bit reloc containing the high 16 bits of an address -+used when the lower 16 bits are treated as signed. */ -+ BFD_RELOC_M32R_HI16_SLO, -+ -+/* This is a 16-bit reloc containing the lower 16 bits of an address. */ -+ BFD_RELOC_M32R_LO16, -+ -+/* This is a 16-bit reloc containing the small data area offset for use in -+add3, load, and store instructions. */ -+ BFD_RELOC_M32R_SDA16, -+ -+/* For PIC. */ -+ BFD_RELOC_M32R_GOT24, -+ BFD_RELOC_M32R_26_PLTREL, -+ BFD_RELOC_M32R_COPY, -+ BFD_RELOC_M32R_GLOB_DAT, -+ BFD_RELOC_M32R_JMP_SLOT, -+ BFD_RELOC_M32R_RELATIVE, -+ BFD_RELOC_M32R_GOTOFF, -+ BFD_RELOC_M32R_GOTOFF_HI_ULO, -+ BFD_RELOC_M32R_GOTOFF_HI_SLO, -+ BFD_RELOC_M32R_GOTOFF_LO, -+ BFD_RELOC_M32R_GOTPC24, -+ BFD_RELOC_M32R_GOT16_HI_ULO, -+ BFD_RELOC_M32R_GOT16_HI_SLO, -+ BFD_RELOC_M32R_GOT16_LO, -+ BFD_RELOC_M32R_GOTPC_HI_ULO, -+ BFD_RELOC_M32R_GOTPC_HI_SLO, -+ BFD_RELOC_M32R_GOTPC_LO, -+ -+/* This is a 9-bit reloc */ -+ BFD_RELOC_V850_9_PCREL, -+ -+/* This is a 22-bit reloc */ -+ BFD_RELOC_V850_22_PCREL, -+ -+/* This is a 16 bit offset from the short data area pointer. */ -+ BFD_RELOC_V850_SDA_16_16_OFFSET, -+ -+/* This is a 16 bit offset (of which only 15 bits are used) from the -+short data area pointer. */ -+ BFD_RELOC_V850_SDA_15_16_OFFSET, -+ -+/* This is a 16 bit offset from the zero data area pointer. */ -+ BFD_RELOC_V850_ZDA_16_16_OFFSET, -+ -+/* This is a 16 bit offset (of which only 15 bits are used) from the -+zero data area pointer. */ -+ BFD_RELOC_V850_ZDA_15_16_OFFSET, -+ -+/* This is an 8 bit offset (of which only 6 bits are used) from the -+tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_6_8_OFFSET, -+ -+/* This is an 8bit offset (of which only 7 bits are used) from the tiny -+data area pointer. */ -+ BFD_RELOC_V850_TDA_7_8_OFFSET, -+ -+/* This is a 7 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_7_7_OFFSET, -+ -+/* This is a 16 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_16_16_OFFSET, -+ -+/* This is a 5 bit offset (of which only 4 bits are used) from the tiny -+data area pointer. */ -+ BFD_RELOC_V850_TDA_4_5_OFFSET, -+ -+/* This is a 4 bit offset from the tiny data area pointer. */ -+ BFD_RELOC_V850_TDA_4_4_OFFSET, -+ -+/* This is a 16 bit offset from the short data area pointer, with the -+bits placed non-contiguously in the instruction. */ -+ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET, -+ -+/* This is a 16 bit offset from the zero data area pointer, with the -+bits placed non-contiguously in the instruction. */ -+ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET, -+ -+/* This is a 6 bit offset from the call table base pointer. */ -+ BFD_RELOC_V850_CALLT_6_7_OFFSET, -+ -+/* This is a 16 bit offset from the call table base pointer. */ -+ BFD_RELOC_V850_CALLT_16_16_OFFSET, -+ -+/* Used for relaxing indirect function calls. */ -+ BFD_RELOC_V850_LONGCALL, -+ -+/* Used for relaxing indirect jumps. */ -+ BFD_RELOC_V850_LONGJUMP, -+ -+/* Used to maintain alignment whilst relaxing. */ -+ BFD_RELOC_V850_ALIGN, -+ -+/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu -+instructions. */ -+ BFD_RELOC_V850_LO16_SPLIT_OFFSET, -+ -+/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the -+instruction. */ -+ BFD_RELOC_MN10300_32_PCREL, -+ -+/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the -+instruction. */ -+ BFD_RELOC_MN10300_16_PCREL, -+ -+/* This is a 8bit DP reloc for the tms320c30, where the most -+significant 8 bits of a 24 bit word are placed into the least -+significant 8 bits of the opcode. */ -+ BFD_RELOC_TIC30_LDP, -+ -+/* This is a 7bit reloc for the tms320c54x, where the least -+significant 7 bits of a 16 bit word are placed into the least -+significant 7 bits of the opcode. */ -+ BFD_RELOC_TIC54X_PARTLS7, -+ -+/* This is a 9bit DP reloc for the tms320c54x, where the most -+significant 9 bits of a 16 bit word are placed into the least -+significant 9 bits of the opcode. */ -+ BFD_RELOC_TIC54X_PARTMS9, -+ -+/* This is an extended address 23-bit reloc for the tms320c54x. */ -+ BFD_RELOC_TIC54X_23, -+ -+/* This is a 16-bit reloc for the tms320c54x, where the least -+significant 16 bits of a 23-bit extended address are placed into -+the opcode. */ -+ BFD_RELOC_TIC54X_16_OF_23, -+ -+/* This is a reloc for the tms320c54x, where the most -+significant 7 bits of a 23-bit extended address are placed into -+the opcode. */ -+ BFD_RELOC_TIC54X_MS7_OF_23, -+ -+/* This is a 48 bit reloc for the FR30 that stores 32 bits. */ -+ BFD_RELOC_FR30_48, -+ -+/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into -+two sections. */ -+ BFD_RELOC_FR30_20, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in -+4 bits. */ -+ BFD_RELOC_FR30_6_IN_4, -+ -+/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset -+into 8 bits. */ -+ BFD_RELOC_FR30_8_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset -+into 8 bits. */ -+ BFD_RELOC_FR30_9_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset -+into 8 bits. */ -+ BFD_RELOC_FR30_10_IN_8, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative -+short offset into 8 bits. */ -+ BFD_RELOC_FR30_9_PCREL, -+ -+/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative -+short offset into 11 bits. */ -+ BFD_RELOC_FR30_12_PCREL, -+ -+/* Motorola Mcore relocations. */ -+ BFD_RELOC_MCORE_PCREL_IMM8BY4, -+ BFD_RELOC_MCORE_PCREL_IMM11BY2, -+ BFD_RELOC_MCORE_PCREL_IMM4BY2, -+ BFD_RELOC_MCORE_PCREL_32, -+ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2, -+ BFD_RELOC_MCORE_RVA, -+ -+/* These are relocations for the GETA instruction. */ -+ BFD_RELOC_MMIX_GETA, -+ BFD_RELOC_MMIX_GETA_1, -+ BFD_RELOC_MMIX_GETA_2, -+ BFD_RELOC_MMIX_GETA_3, -+ -+/* These are relocations for a conditional branch instruction. */ -+ BFD_RELOC_MMIX_CBRANCH, -+ BFD_RELOC_MMIX_CBRANCH_J, -+ BFD_RELOC_MMIX_CBRANCH_1, -+ BFD_RELOC_MMIX_CBRANCH_2, -+ BFD_RELOC_MMIX_CBRANCH_3, -+ -+/* These are relocations for the PUSHJ instruction. */ -+ BFD_RELOC_MMIX_PUSHJ, -+ BFD_RELOC_MMIX_PUSHJ_1, -+ BFD_RELOC_MMIX_PUSHJ_2, -+ BFD_RELOC_MMIX_PUSHJ_3, -+ BFD_RELOC_MMIX_PUSHJ_STUBBABLE, -+ -+/* These are relocations for the JMP instruction. */ -+ BFD_RELOC_MMIX_JMP, -+ BFD_RELOC_MMIX_JMP_1, -+ BFD_RELOC_MMIX_JMP_2, -+ BFD_RELOC_MMIX_JMP_3, -+ -+/* This is a relocation for a relative address as in a GETA instruction or -+a branch. */ -+ BFD_RELOC_MMIX_ADDR19, -+ -+/* This is a relocation for a relative address as in a JMP instruction. */ -+ BFD_RELOC_MMIX_ADDR27, -+ -+/* This is a relocation for an instruction field that may be a general -+register or a value 0..255. */ -+ BFD_RELOC_MMIX_REG_OR_BYTE, -+ -+/* This is a relocation for an instruction field that may be a general -+register. */ -+ BFD_RELOC_MMIX_REG, -+ -+/* This is a relocation for two instruction fields holding a register and -+an offset, the equivalent of the relocation. */ -+ BFD_RELOC_MMIX_BASE_PLUS_OFFSET, -+ -+/* This relocation is an assertion that the expression is not allocated as -+a global register. It does not modify contents. */ -+ BFD_RELOC_MMIX_LOCAL, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative -+short offset into 7 bits. */ -+ BFD_RELOC_AVR_7_PCREL, -+ -+/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative -+short offset into 12 bits. */ -+ BFD_RELOC_AVR_13_PCREL, -+ -+/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually -+program memory address) into 16 bits. */ -+ BFD_RELOC_AVR_16_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually -+data memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_LO8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit -+of data memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HI8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit -+of program memory address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HH8_LDI, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(usually data memory address) into 8 bit immediate value of SUBI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 8 bit of data memory address) into 8 bit immediate value of -+SUBI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(most high 8 bit of program memory address) into 8 bit immediate value -+of LDI or SUBI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually -+command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit -+of command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit -+of command address) into 8 bit immediate value of LDI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_PM, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(usually command address) into 8 bit immediate value of SUBI insn. */ -+ BFD_RELOC_AVR_LO8_LDI_PM_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 8 bit of 16 bit command address) into 8 bit immediate value -+of SUBI insn. */ -+ BFD_RELOC_AVR_HI8_LDI_PM_NEG, -+ -+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value -+(high 6 bit of 22 bit command address) into 8 bit immediate -+value of SUBI insn. */ -+ BFD_RELOC_AVR_HH8_LDI_PM_NEG, -+ -+/* This is a 32 bit reloc for the AVR that stores 23 bit value -+into 22 bits. */ -+ BFD_RELOC_AVR_CALL, -+ -+/* This is a 16 bit reloc for the AVR that stores all needed bits -+for absolute addressing with ldi with overflow check to linktime */ -+ BFD_RELOC_AVR_LDI, -+ -+/* This is a 6 bit reloc for the AVR that stores offset for ldd/std -+instructions */ -+ BFD_RELOC_AVR_6, -+ -+/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw -+instructions */ -+ BFD_RELOC_AVR_6_ADIW, -+ -+/* Direct 12 bit. */ -+ BFD_RELOC_390_12, -+ -+/* 12 bit GOT offset. */ -+ BFD_RELOC_390_GOT12, -+ -+/* 32 bit PC relative PLT address. */ -+ BFD_RELOC_390_PLT32, -+ -+/* Copy symbol at runtime. */ -+ BFD_RELOC_390_COPY, -+ -+/* Create GOT entry. */ -+ BFD_RELOC_390_GLOB_DAT, -+ -+/* Create PLT entry. */ -+ BFD_RELOC_390_JMP_SLOT, -+ -+/* Adjust by program base. */ -+ BFD_RELOC_390_RELATIVE, -+ -+/* 32 bit PC relative offset to GOT. */ -+ BFD_RELOC_390_GOTPC, -+ -+/* 16 bit GOT offset. */ -+ BFD_RELOC_390_GOT16, -+ -+/* PC relative 16 bit shifted by 1. */ -+ BFD_RELOC_390_PC16DBL, -+ -+/* 16 bit PC rel. PLT shifted by 1. */ -+ BFD_RELOC_390_PLT16DBL, -+ -+/* PC relative 32 bit shifted by 1. */ -+ BFD_RELOC_390_PC32DBL, -+ -+/* 32 bit PC rel. PLT shifted by 1. */ -+ BFD_RELOC_390_PLT32DBL, -+ -+/* 32 bit PC rel. GOT shifted by 1. */ -+ BFD_RELOC_390_GOTPCDBL, -+ -+/* 64 bit GOT offset. */ -+ BFD_RELOC_390_GOT64, -+ -+/* 64 bit PC relative PLT address. */ -+ BFD_RELOC_390_PLT64, -+ -+/* 32 bit rel. offset to GOT entry. */ -+ BFD_RELOC_390_GOTENT, -+ -+/* 64 bit offset to GOT. */ -+ BFD_RELOC_390_GOTOFF64, -+ -+/* 12-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT12, -+ -+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT16, -+ -+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT32, -+ -+/* 64-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLT64, -+ -+/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_390_GOTPLTENT, -+ -+/* 16-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF16, -+ -+/* 32-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF32, -+ -+/* 64-bit rel. offset from the GOT to a PLT entry. */ -+ BFD_RELOC_390_PLTOFF64, -+ -+/* s390 tls relocations. */ -+ BFD_RELOC_390_TLS_LOAD, -+ BFD_RELOC_390_TLS_GDCALL, -+ BFD_RELOC_390_TLS_LDCALL, -+ BFD_RELOC_390_TLS_GD32, -+ BFD_RELOC_390_TLS_GD64, -+ BFD_RELOC_390_TLS_GOTIE12, -+ BFD_RELOC_390_TLS_GOTIE32, -+ BFD_RELOC_390_TLS_GOTIE64, -+ BFD_RELOC_390_TLS_LDM32, -+ BFD_RELOC_390_TLS_LDM64, -+ BFD_RELOC_390_TLS_IE32, -+ BFD_RELOC_390_TLS_IE64, -+ BFD_RELOC_390_TLS_IEENT, -+ BFD_RELOC_390_TLS_LE32, -+ BFD_RELOC_390_TLS_LE64, -+ BFD_RELOC_390_TLS_LDO32, -+ BFD_RELOC_390_TLS_LDO64, -+ BFD_RELOC_390_TLS_DTPMOD, -+ BFD_RELOC_390_TLS_DTPOFF, -+ BFD_RELOC_390_TLS_TPOFF, -+ -+/* Long displacement extension. */ -+ BFD_RELOC_390_20, -+ BFD_RELOC_390_GOT20, -+ BFD_RELOC_390_GOTPLT20, -+ BFD_RELOC_390_TLS_GOTIE20, -+ -+/* Scenix IP2K - 9-bit register number / data address */ -+ BFD_RELOC_IP2K_FR9, -+ -+/* Scenix IP2K - 4-bit register/data bank number */ -+ BFD_RELOC_IP2K_BANK, -+ -+/* Scenix IP2K - low 13 bits of instruction word address */ -+ BFD_RELOC_IP2K_ADDR16CJP, -+ -+/* Scenix IP2K - high 3 bits of instruction word address */ -+ BFD_RELOC_IP2K_PAGE3, -+ -+/* Scenix IP2K - ext/low/high 8 bits of data address */ -+ BFD_RELOC_IP2K_LO8DATA, -+ BFD_RELOC_IP2K_HI8DATA, -+ BFD_RELOC_IP2K_EX8DATA, -+ -+/* Scenix IP2K - low/high 8 bits of instruction word address */ -+ BFD_RELOC_IP2K_LO8INSN, -+ BFD_RELOC_IP2K_HI8INSN, -+ -+/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */ -+ BFD_RELOC_IP2K_PC_SKIP, -+ -+/* Scenix IP2K - 16 bit word address in text section. */ -+ BFD_RELOC_IP2K_TEXT, -+ -+/* Scenix IP2K - 7-bit sp or dp offset */ -+ BFD_RELOC_IP2K_FR_OFFSET, -+ -+/* Scenix VPE4K coprocessor - data/insn-space addressing */ -+ BFD_RELOC_VPE4KMATH_DATA, -+ BFD_RELOC_VPE4KMATH_INSN, -+ -+/* These two relocations are used by the linker to determine which of -+the entries in a C++ virtual function table are actually used. When -+the --gc-sections option is given, the linker will zero out the entries -+that are not used, so that the code for those functions need not be -+included in the output. -+ -+VTABLE_INHERIT is a zero-space relocation used to describe to the -+linker the inheritance tree of a C++ virtual function table. The -+relocation's symbol should be the parent class' vtable, and the -+relocation should be located at the child vtable. -+ -+VTABLE_ENTRY is a zero-space relocation that describes the use of a -+virtual function table entry. The reloc's symbol should refer to the -+table of the class mentioned in the code. Off of that base, an offset -+describes the entry that is being used. For Rela hosts, this offset -+is stored in the reloc's addend. For Rel hosts, we are forced to put -+this offset in the reloc's section offset. */ -+ BFD_RELOC_VTABLE_INHERIT, -+ BFD_RELOC_VTABLE_ENTRY, -+ -+/* Intel IA64 Relocations. */ -+ BFD_RELOC_IA64_IMM14, -+ BFD_RELOC_IA64_IMM22, -+ BFD_RELOC_IA64_IMM64, -+ BFD_RELOC_IA64_DIR32MSB, -+ BFD_RELOC_IA64_DIR32LSB, -+ BFD_RELOC_IA64_DIR64MSB, -+ BFD_RELOC_IA64_DIR64LSB, -+ BFD_RELOC_IA64_GPREL22, -+ BFD_RELOC_IA64_GPREL64I, -+ BFD_RELOC_IA64_GPREL32MSB, -+ BFD_RELOC_IA64_GPREL32LSB, -+ BFD_RELOC_IA64_GPREL64MSB, -+ BFD_RELOC_IA64_GPREL64LSB, -+ BFD_RELOC_IA64_LTOFF22, -+ BFD_RELOC_IA64_LTOFF64I, -+ BFD_RELOC_IA64_PLTOFF22, -+ BFD_RELOC_IA64_PLTOFF64I, -+ BFD_RELOC_IA64_PLTOFF64MSB, -+ BFD_RELOC_IA64_PLTOFF64LSB, -+ BFD_RELOC_IA64_FPTR64I, -+ BFD_RELOC_IA64_FPTR32MSB, -+ BFD_RELOC_IA64_FPTR32LSB, -+ BFD_RELOC_IA64_FPTR64MSB, -+ BFD_RELOC_IA64_FPTR64LSB, -+ BFD_RELOC_IA64_PCREL21B, -+ BFD_RELOC_IA64_PCREL21BI, -+ BFD_RELOC_IA64_PCREL21M, -+ BFD_RELOC_IA64_PCREL21F, -+ BFD_RELOC_IA64_PCREL22, -+ BFD_RELOC_IA64_PCREL60B, -+ BFD_RELOC_IA64_PCREL64I, -+ BFD_RELOC_IA64_PCREL32MSB, -+ BFD_RELOC_IA64_PCREL32LSB, -+ BFD_RELOC_IA64_PCREL64MSB, -+ BFD_RELOC_IA64_PCREL64LSB, -+ BFD_RELOC_IA64_LTOFF_FPTR22, -+ BFD_RELOC_IA64_LTOFF_FPTR64I, -+ BFD_RELOC_IA64_LTOFF_FPTR32MSB, -+ BFD_RELOC_IA64_LTOFF_FPTR32LSB, -+ BFD_RELOC_IA64_LTOFF_FPTR64MSB, -+ BFD_RELOC_IA64_LTOFF_FPTR64LSB, -+ BFD_RELOC_IA64_SEGREL32MSB, -+ BFD_RELOC_IA64_SEGREL32LSB, -+ BFD_RELOC_IA64_SEGREL64MSB, -+ BFD_RELOC_IA64_SEGREL64LSB, -+ BFD_RELOC_IA64_SECREL32MSB, -+ BFD_RELOC_IA64_SECREL32LSB, -+ BFD_RELOC_IA64_SECREL64MSB, -+ BFD_RELOC_IA64_SECREL64LSB, -+ BFD_RELOC_IA64_REL32MSB, -+ BFD_RELOC_IA64_REL32LSB, -+ BFD_RELOC_IA64_REL64MSB, -+ BFD_RELOC_IA64_REL64LSB, -+ BFD_RELOC_IA64_LTV32MSB, -+ BFD_RELOC_IA64_LTV32LSB, -+ BFD_RELOC_IA64_LTV64MSB, -+ BFD_RELOC_IA64_LTV64LSB, -+ BFD_RELOC_IA64_IPLTMSB, -+ BFD_RELOC_IA64_IPLTLSB, -+ BFD_RELOC_IA64_COPY, -+ BFD_RELOC_IA64_LTOFF22X, -+ BFD_RELOC_IA64_LDXMOV, -+ BFD_RELOC_IA64_TPREL14, -+ BFD_RELOC_IA64_TPREL22, -+ BFD_RELOC_IA64_TPREL64I, -+ BFD_RELOC_IA64_TPREL64MSB, -+ BFD_RELOC_IA64_TPREL64LSB, -+ BFD_RELOC_IA64_LTOFF_TPREL22, -+ BFD_RELOC_IA64_DTPMOD64MSB, -+ BFD_RELOC_IA64_DTPMOD64LSB, -+ BFD_RELOC_IA64_LTOFF_DTPMOD22, -+ BFD_RELOC_IA64_DTPREL14, -+ BFD_RELOC_IA64_DTPREL22, -+ BFD_RELOC_IA64_DTPREL64I, -+ BFD_RELOC_IA64_DTPREL32MSB, -+ BFD_RELOC_IA64_DTPREL32LSB, -+ BFD_RELOC_IA64_DTPREL64MSB, -+ BFD_RELOC_IA64_DTPREL64LSB, -+ BFD_RELOC_IA64_LTOFF_DTPREL22, -+ -+/* Motorola 68HC11 reloc. -+This is the 8 bit high part of an absolute address. */ -+ BFD_RELOC_M68HC11_HI8, -+ -+/* Motorola 68HC11 reloc. -+This is the 8 bit low part of an absolute address. */ -+ BFD_RELOC_M68HC11_LO8, -+ -+/* Motorola 68HC11 reloc. -+This is the 3 bit of a value. */ -+ BFD_RELOC_M68HC11_3B, -+ -+/* Motorola 68HC11 reloc. -+This reloc marks the beginning of a jump/call instruction. -+It is used for linker relaxation to correctly identify beginning -+of instruction and change some branches to use PC-relative -+addressing mode. */ -+ BFD_RELOC_M68HC11_RL_JUMP, -+ -+/* Motorola 68HC11 reloc. -+This reloc marks a group of several instructions that gcc generates -+and for which the linker relaxation pass can modify and/or remove -+some of them. */ -+ BFD_RELOC_M68HC11_RL_GROUP, -+ -+/* Motorola 68HC11 reloc. -+This is the 16-bit lower part of an address. It is used for 'call' -+instruction to specify the symbol address without any special -+transformation (due to memory bank window). */ -+ BFD_RELOC_M68HC11_LO16, -+ -+/* Motorola 68HC11 reloc. -+This is a 8-bit reloc that specifies the page number of an address. -+It is used by 'call' instruction to specify the page number of -+the symbol. */ -+ BFD_RELOC_M68HC11_PAGE, -+ -+/* Motorola 68HC11 reloc. -+This is a 24-bit reloc that represents the address with a 16-bit -+value and a 8-bit page number. The symbol address is transformed -+to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */ -+ BFD_RELOC_M68HC11_24, -+ -+/* Motorola 68HC12 reloc. -+This is the 5 bits of a value. */ -+ BFD_RELOC_M68HC12_5B, -+ -+/* NS CR16C Relocations. */ -+ BFD_RELOC_16C_NUM08, -+ BFD_RELOC_16C_NUM08_C, -+ BFD_RELOC_16C_NUM16, -+ BFD_RELOC_16C_NUM16_C, -+ BFD_RELOC_16C_NUM32, -+ BFD_RELOC_16C_NUM32_C, -+ BFD_RELOC_16C_DISP04, -+ BFD_RELOC_16C_DISP04_C, -+ BFD_RELOC_16C_DISP08, -+ BFD_RELOC_16C_DISP08_C, -+ BFD_RELOC_16C_DISP16, -+ BFD_RELOC_16C_DISP16_C, -+ BFD_RELOC_16C_DISP24, -+ BFD_RELOC_16C_DISP24_C, -+ BFD_RELOC_16C_DISP24a, -+ BFD_RELOC_16C_DISP24a_C, -+ BFD_RELOC_16C_REG04, -+ BFD_RELOC_16C_REG04_C, -+ BFD_RELOC_16C_REG04a, -+ BFD_RELOC_16C_REG04a_C, -+ BFD_RELOC_16C_REG14, -+ BFD_RELOC_16C_REG14_C, -+ BFD_RELOC_16C_REG16, -+ BFD_RELOC_16C_REG16_C, -+ BFD_RELOC_16C_REG20, -+ BFD_RELOC_16C_REG20_C, -+ BFD_RELOC_16C_ABS20, -+ BFD_RELOC_16C_ABS20_C, -+ BFD_RELOC_16C_ABS24, -+ BFD_RELOC_16C_ABS24_C, -+ BFD_RELOC_16C_IMM04, -+ BFD_RELOC_16C_IMM04_C, -+ BFD_RELOC_16C_IMM16, -+ BFD_RELOC_16C_IMM16_C, -+ BFD_RELOC_16C_IMM20, -+ BFD_RELOC_16C_IMM20_C, -+ BFD_RELOC_16C_IMM24, -+ BFD_RELOC_16C_IMM24_C, -+ BFD_RELOC_16C_IMM32, -+ BFD_RELOC_16C_IMM32_C, -+ -+/* NS CRX Relocations. */ -+ BFD_RELOC_CRX_REL4, -+ BFD_RELOC_CRX_REL8, -+ BFD_RELOC_CRX_REL8_CMP, -+ BFD_RELOC_CRX_REL16, -+ BFD_RELOC_CRX_REL24, -+ BFD_RELOC_CRX_REL32, -+ BFD_RELOC_CRX_REGREL12, -+ BFD_RELOC_CRX_REGREL22, -+ BFD_RELOC_CRX_REGREL28, -+ BFD_RELOC_CRX_REGREL32, -+ BFD_RELOC_CRX_ABS16, -+ BFD_RELOC_CRX_ABS32, -+ BFD_RELOC_CRX_NUM8, -+ BFD_RELOC_CRX_NUM16, -+ BFD_RELOC_CRX_NUM32, -+ BFD_RELOC_CRX_IMM16, -+ BFD_RELOC_CRX_IMM32, -+ BFD_RELOC_CRX_SWITCH8, -+ BFD_RELOC_CRX_SWITCH16, -+ BFD_RELOC_CRX_SWITCH32, -+ -+/* These relocs are only used within the CRIS assembler. They are not -+(at present) written to any object files. */ -+ BFD_RELOC_CRIS_BDISP8, -+ BFD_RELOC_CRIS_UNSIGNED_5, -+ BFD_RELOC_CRIS_SIGNED_6, -+ BFD_RELOC_CRIS_UNSIGNED_6, -+ BFD_RELOC_CRIS_SIGNED_8, -+ BFD_RELOC_CRIS_UNSIGNED_8, -+ BFD_RELOC_CRIS_SIGNED_16, -+ BFD_RELOC_CRIS_UNSIGNED_16, -+ BFD_RELOC_CRIS_LAPCQ_OFFSET, -+ BFD_RELOC_CRIS_UNSIGNED_4, -+ -+/* Relocs used in ELF shared libraries for CRIS. */ -+ BFD_RELOC_CRIS_COPY, -+ BFD_RELOC_CRIS_GLOB_DAT, -+ BFD_RELOC_CRIS_JUMP_SLOT, -+ BFD_RELOC_CRIS_RELATIVE, -+ -+/* 32-bit offset to symbol-entry within GOT. */ -+ BFD_RELOC_CRIS_32_GOT, -+ -+/* 16-bit offset to symbol-entry within GOT. */ -+ BFD_RELOC_CRIS_16_GOT, -+ -+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_CRIS_32_GOTPLT, -+ -+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */ -+ BFD_RELOC_CRIS_16_GOTPLT, -+ -+/* 32-bit offset to symbol, relative to GOT. */ -+ BFD_RELOC_CRIS_32_GOTREL, -+ -+/* 32-bit offset to symbol with PLT entry, relative to GOT. */ -+ BFD_RELOC_CRIS_32_PLT_GOTREL, -+ -+/* 32-bit offset to symbol with PLT entry, relative to this relocation. */ -+ BFD_RELOC_CRIS_32_PLT_PCREL, -+ -+/* Intel i860 Relocations. */ -+ BFD_RELOC_860_COPY, -+ BFD_RELOC_860_GLOB_DAT, -+ BFD_RELOC_860_JUMP_SLOT, -+ BFD_RELOC_860_RELATIVE, -+ BFD_RELOC_860_PC26, -+ BFD_RELOC_860_PLT26, -+ BFD_RELOC_860_PC16, -+ BFD_RELOC_860_LOW0, -+ BFD_RELOC_860_SPLIT0, -+ BFD_RELOC_860_LOW1, -+ BFD_RELOC_860_SPLIT1, -+ BFD_RELOC_860_LOW2, -+ BFD_RELOC_860_SPLIT2, -+ BFD_RELOC_860_LOW3, -+ BFD_RELOC_860_LOGOT0, -+ BFD_RELOC_860_SPGOT0, -+ BFD_RELOC_860_LOGOT1, -+ BFD_RELOC_860_SPGOT1, -+ BFD_RELOC_860_LOGOTOFF0, -+ BFD_RELOC_860_SPGOTOFF0, -+ BFD_RELOC_860_LOGOTOFF1, -+ BFD_RELOC_860_SPGOTOFF1, -+ BFD_RELOC_860_LOGOTOFF2, -+ BFD_RELOC_860_LOGOTOFF3, -+ BFD_RELOC_860_LOPC, -+ BFD_RELOC_860_HIGHADJ, -+ BFD_RELOC_860_HAGOT, -+ BFD_RELOC_860_HAGOTOFF, -+ BFD_RELOC_860_HAPC, -+ BFD_RELOC_860_HIGH, -+ BFD_RELOC_860_HIGOT, -+ BFD_RELOC_860_HIGOTOFF, -+ -+/* OpenRISC Relocations. */ -+ BFD_RELOC_OPENRISC_ABS_26, -+ BFD_RELOC_OPENRISC_REL_26, -+ -+/* H8 elf Relocations. */ -+ BFD_RELOC_H8_DIR16A8, -+ BFD_RELOC_H8_DIR16R8, -+ BFD_RELOC_H8_DIR24A8, -+ BFD_RELOC_H8_DIR24R8, -+ BFD_RELOC_H8_DIR32A16, -+ -+/* Sony Xstormy16 Relocations. */ -+ BFD_RELOC_XSTORMY16_REL_12, -+ BFD_RELOC_XSTORMY16_12, -+ BFD_RELOC_XSTORMY16_24, -+ BFD_RELOC_XSTORMY16_FPTR16, -+ -+/* Relocations used by VAX ELF. */ -+ BFD_RELOC_VAX_GLOB_DAT, -+ BFD_RELOC_VAX_JMP_SLOT, -+ BFD_RELOC_VAX_RELATIVE, -+ -+/* Morpho MS1 - 16 bit immediate relocation. */ -+ BFD_RELOC_MS1_PC16, -+ -+/* Morpho MS1 - Hi 16 bits of an address. */ -+ BFD_RELOC_MS1_HI16, -+ -+/* Morpho MS1 - Low 16 bits of an address. */ -+ BFD_RELOC_MS1_LO16, -+ -+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */ -+ BFD_RELOC_MS1_GNU_VTINHERIT, -+ -+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */ -+ BFD_RELOC_MS1_GNU_VTENTRY, -+ -+/* msp430 specific relocation codes */ -+ BFD_RELOC_MSP430_10_PCREL, -+ BFD_RELOC_MSP430_16_PCREL, -+ BFD_RELOC_MSP430_16, -+ BFD_RELOC_MSP430_16_PCREL_BYTE, -+ BFD_RELOC_MSP430_16_BYTE, -+ BFD_RELOC_MSP430_2X_PCREL, -+ BFD_RELOC_MSP430_RL_PCREL, -+ -+/* IQ2000 Relocations. */ -+ BFD_RELOC_IQ2000_OFFSET_16, -+ BFD_RELOC_IQ2000_OFFSET_21, -+ BFD_RELOC_IQ2000_UHI16, -+ -+/* Special Xtensa relocation used only by PLT entries in ELF shared -+objects to indicate that the runtime linker should set the value -+to one of its own internal functions or data structures. */ -+ BFD_RELOC_XTENSA_RTLD, -+ -+/* Xtensa relocations for ELF shared objects. */ -+ BFD_RELOC_XTENSA_GLOB_DAT, -+ BFD_RELOC_XTENSA_JMP_SLOT, -+ BFD_RELOC_XTENSA_RELATIVE, -+ -+/* Xtensa relocation used in ELF object files for symbols that may require -+PLT entries. Otherwise, this is just a generic 32-bit relocation. */ -+ BFD_RELOC_XTENSA_PLT, -+ -+/* Xtensa relocations to mark the difference of two local symbols. -+These are only needed to support linker relaxation and can be ignored -+when not relaxing. The field is set to the value of the difference -+assuming no relaxation. The relocation encodes the position of the -+first symbol so the linker can determine whether to adjust the field -+value. */ -+ BFD_RELOC_XTENSA_DIFF8, -+ BFD_RELOC_XTENSA_DIFF16, -+ BFD_RELOC_XTENSA_DIFF32, -+ -+/* Generic Xtensa relocations for instruction operands. Only the slot -+number is encoded in the relocation. The relocation applies to the -+last PC-relative immediate operand, or if there are no PC-relative -+immediates, to the last immediate operand. */ -+ BFD_RELOC_XTENSA_SLOT0_OP, -+ BFD_RELOC_XTENSA_SLOT1_OP, -+ BFD_RELOC_XTENSA_SLOT2_OP, -+ BFD_RELOC_XTENSA_SLOT3_OP, -+ BFD_RELOC_XTENSA_SLOT4_OP, -+ BFD_RELOC_XTENSA_SLOT5_OP, -+ BFD_RELOC_XTENSA_SLOT6_OP, -+ BFD_RELOC_XTENSA_SLOT7_OP, -+ BFD_RELOC_XTENSA_SLOT8_OP, -+ BFD_RELOC_XTENSA_SLOT9_OP, -+ BFD_RELOC_XTENSA_SLOT10_OP, -+ BFD_RELOC_XTENSA_SLOT11_OP, -+ BFD_RELOC_XTENSA_SLOT12_OP, -+ BFD_RELOC_XTENSA_SLOT13_OP, -+ BFD_RELOC_XTENSA_SLOT14_OP, -+ -+/* Alternate Xtensa relocations. Only the slot is encoded in the -+relocation. The meaning of these relocations is opcode-specific. */ -+ BFD_RELOC_XTENSA_SLOT0_ALT, -+ BFD_RELOC_XTENSA_SLOT1_ALT, -+ BFD_RELOC_XTENSA_SLOT2_ALT, -+ BFD_RELOC_XTENSA_SLOT3_ALT, -+ BFD_RELOC_XTENSA_SLOT4_ALT, -+ BFD_RELOC_XTENSA_SLOT5_ALT, -+ BFD_RELOC_XTENSA_SLOT6_ALT, -+ BFD_RELOC_XTENSA_SLOT7_ALT, -+ BFD_RELOC_XTENSA_SLOT8_ALT, -+ BFD_RELOC_XTENSA_SLOT9_ALT, -+ BFD_RELOC_XTENSA_SLOT10_ALT, -+ BFD_RELOC_XTENSA_SLOT11_ALT, -+ BFD_RELOC_XTENSA_SLOT12_ALT, -+ BFD_RELOC_XTENSA_SLOT13_ALT, -+ BFD_RELOC_XTENSA_SLOT14_ALT, -+ -+/* Xtensa relocations for backward compatibility. These have all been -+replaced by BFD_RELOC_XTENSA_SLOT0_OP. */ -+ BFD_RELOC_XTENSA_OP0, -+ BFD_RELOC_XTENSA_OP1, -+ BFD_RELOC_XTENSA_OP2, -+ -+/* Xtensa relocation to mark that the assembler expanded the -+instructions from an original target. The expansion size is -+encoded in the reloc size. */ -+ BFD_RELOC_XTENSA_ASM_EXPAND, -+ -+/* Xtensa relocation to mark that the linker should simplify -+assembler-expanded instructions. This is commonly used -+internally by the linker after analysis of a -+BFD_RELOC_XTENSA_ASM_EXPAND. */ -+ BFD_RELOC_XTENSA_ASM_SIMPLIFY, -+ BFD_RELOC_UNUSED }; -+typedef enum bfd_reloc_code_real bfd_reloc_code_real_type; -+reloc_howto_type *bfd_reloc_type_lookup -+ (bfd *abfd, bfd_reloc_code_real_type code); -+ -+const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code); -+ -+/* Extracted from syms.c. */ -+ -+typedef struct bfd_symbol -+{ -+ /* A pointer to the BFD which owns the symbol. This information -+ is necessary so that a back end can work out what additional -+ information (invisible to the application writer) is carried -+ with the symbol. -+ -+ This field is *almost* redundant, since you can use section->owner -+ instead, except that some symbols point to the global sections -+ bfd_{abs,com,und}_section. This could be fixed by making -+ these globals be per-bfd (or per-target-flavor). FIXME. */ -+ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */ -+ -+ /* The text of the symbol. The name is left alone, and not copied; the -+ application may not alter it. */ -+ const char *name; -+ -+ /* The value of the symbol. This really should be a union of a -+ numeric value with a pointer, since some flags indicate that -+ a pointer to another symbol is stored here. */ -+ symvalue value; -+ -+ /* Attributes of a symbol. */ -+#define BSF_NO_FLAGS 0x00 -+ -+ /* The symbol has local scope; <> in <>. The value -+ is the offset into the section of the data. */ -+#define BSF_LOCAL 0x01 -+ -+ /* The symbol has global scope; initialized data in <>. The -+ value is the offset into the section of the data. */ -+#define BSF_GLOBAL 0x02 -+ -+ /* The symbol has global scope and is exported. The value is -+ the offset into the section of the data. */ -+#define BSF_EXPORT BSF_GLOBAL /* No real difference. */ -+ -+ /* A normal C symbol would be one of: -+ <>, <>, <> or -+ <>. */ -+ -+ /* The symbol is a debugging record. The value has an arbitrary -+ meaning, unless BSF_DEBUGGING_RELOC is also set. */ -+#define BSF_DEBUGGING 0x08 -+ -+ /* The symbol denotes a function entry point. Used in ELF, -+ perhaps others someday. */ -+#define BSF_FUNCTION 0x10 -+ -+ /* Used by the linker. */ -+#define BSF_KEEP 0x20 -+#define BSF_KEEP_G 0x40 -+ -+ /* A weak global symbol, overridable without warnings by -+ a regular global symbol of the same name. */ -+#define BSF_WEAK 0x80 -+ -+ /* This symbol was created to point to a section, e.g. ELF's -+ STT_SECTION symbols. */ -+#define BSF_SECTION_SYM 0x100 -+ -+ /* The symbol used to be a common symbol, but now it is -+ allocated. */ -+#define BSF_OLD_COMMON 0x200 -+ -+ /* The default value for common data. */ -+#define BFD_FORT_COMM_DEFAULT_VALUE 0 -+ -+ /* In some files the type of a symbol sometimes alters its -+ location in an output file - ie in coff a <> symbol -+ which is also <> symbol appears where it was -+ declared and not at the end of a section. This bit is set -+ by the target BFD part to convey this information. */ -+#define BSF_NOT_AT_END 0x400 -+ -+ /* Signal that the symbol is the label of constructor section. */ -+#define BSF_CONSTRUCTOR 0x800 -+ -+ /* Signal that the symbol is a warning symbol. The name is a -+ warning. The name of the next symbol is the one to warn about; -+ if a reference is made to a symbol with the same name as the next -+ symbol, a warning is issued by the linker. */ -+#define BSF_WARNING 0x1000 -+ -+ /* Signal that the symbol is indirect. This symbol is an indirect -+ pointer to the symbol with the same name as the next symbol. */ -+#define BSF_INDIRECT 0x2000 -+ -+ /* BSF_FILE marks symbols that contain a file name. This is used -+ for ELF STT_FILE symbols. */ -+#define BSF_FILE 0x4000 -+ -+ /* Symbol is from dynamic linking information. */ -+#define BSF_DYNAMIC 0x8000 -+ -+ /* The symbol denotes a data object. Used in ELF, and perhaps -+ others someday. */ -+#define BSF_OBJECT 0x10000 -+ -+ /* This symbol is a debugging symbol. The value is the offset -+ into the section of the data. BSF_DEBUGGING should be set -+ as well. */ -+#define BSF_DEBUGGING_RELOC 0x20000 -+ -+ /* This symbol is thread local. Used in ELF. */ -+#define BSF_THREAD_LOCAL 0x40000 -+ -+ flagword flags; -+ -+ /* A pointer to the section to which this symbol is -+ relative. This will always be non NULL, there are special -+ sections for undefined and absolute symbols. */ -+ struct bfd_section *section; -+ -+ /* Back end special data. */ -+ union -+ { -+ void *p; -+ bfd_vma i; -+ } -+ udata; -+} -+asymbol; -+ -+#define bfd_get_symtab_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd)) -+ -+bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym); -+ -+bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name); -+ -+#define bfd_is_local_label_name(abfd, name) \ -+ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name)) -+ -+bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym); -+ -+#define bfd_is_target_special_symbol(abfd, sym) \ -+ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym)) -+ -+#define bfd_canonicalize_symtab(abfd, location) \ -+ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location)) -+ -+bfd_boolean bfd_set_symtab -+ (bfd *abfd, asymbol **location, unsigned int count); -+ -+void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol); -+ -+#define bfd_make_empty_symbol(abfd) \ -+ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd)) -+ -+asymbol *_bfd_generic_make_empty_symbol (bfd *); -+ -+#define bfd_make_debug_symbol(abfd,ptr,size) \ -+ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size)) -+ -+int bfd_decode_symclass (asymbol *symbol); -+ -+bfd_boolean bfd_is_undefined_symclass (int symclass); -+ -+void bfd_symbol_info (asymbol *symbol, symbol_info *ret); -+ -+bfd_boolean bfd_copy_private_symbol_data -+ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym); -+ -+#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \ -+ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \ -+ (ibfd, isymbol, obfd, osymbol)) -+ -+/* Extracted from bfd.c. */ -+struct bfd -+{ -+ /* A unique identifier of the BFD */ -+ unsigned int id; -+ -+ /* The filename the application opened the BFD with. */ -+ const char *filename; -+ -+ /* A pointer to the target jump table. */ -+ const struct bfd_target *xvec; -+ -+ /* The IOSTREAM, and corresponding IO vector that provide access -+ to the file backing the BFD. */ -+ void *iostream; -+ const struct bfd_iovec *iovec; -+ -+ /* Is the file descriptor being cached? That is, can it be closed as -+ needed, and re-opened when accessed later? */ -+ bfd_boolean cacheable; -+ -+ /* Marks whether there was a default target specified when the -+ BFD was opened. This is used to select which matching algorithm -+ to use to choose the back end. */ -+ bfd_boolean target_defaulted; -+ -+ /* The caching routines use these to maintain a -+ least-recently-used list of BFDs. */ -+ struct bfd *lru_prev, *lru_next; -+ -+ /* When a file is closed by the caching routines, BFD retains -+ state information on the file here... */ -+ ufile_ptr where; -+ -+ /* ... and here: (``once'' means at least once). */ -+ bfd_boolean opened_once; -+ -+ /* Set if we have a locally maintained mtime value, rather than -+ getting it from the file each time. */ -+ bfd_boolean mtime_set; -+ -+ /* File modified time, if mtime_set is TRUE. */ -+ long mtime; -+ -+ /* Reserved for an unimplemented file locking extension. */ -+ int ifd; -+ -+ /* The format which belongs to the BFD. (object, core, etc.) */ -+ bfd_format format; -+ -+ /* The direction with which the BFD was opened. */ -+ enum bfd_direction -+ { -+ no_direction = 0, -+ read_direction = 1, -+ write_direction = 2, -+ both_direction = 3 -+ } -+ direction; -+ -+ /* Format_specific flags. */ -+ flagword flags; -+ -+ /* Currently my_archive is tested before adding origin to -+ anything. I believe that this can become always an add of -+ origin, with origin set to 0 for non archive files. */ -+ ufile_ptr origin; -+ -+ /* Remember when output has begun, to stop strange things -+ from happening. */ -+ bfd_boolean output_has_begun; -+ -+ /* A hash table for section names. */ -+ struct bfd_hash_table section_htab; -+ -+ /* Pointer to linked list of sections. */ -+ struct bfd_section *sections; -+ -+ /* The last section on the section list. */ -+ struct bfd_section *section_last; -+ -+ /* The number of sections. */ -+ unsigned int section_count; -+ -+ /* Stuff only useful for object files: -+ The start address. */ -+ bfd_vma start_address; -+ -+ /* Used for input and output. */ -+ unsigned int symcount; -+ -+ /* Symbol table for output BFD (with symcount entries). */ -+ struct bfd_symbol **outsymbols; -+ -+ /* Used for slurped dynamic symbol tables. */ -+ unsigned int dynsymcount; -+ -+ /* Pointer to structure which contains architecture information. */ -+ const struct bfd_arch_info *arch_info; -+ -+ /* Flag set if symbols from this BFD should not be exported. */ -+ bfd_boolean no_export; -+ -+ /* Stuff only useful for archives. */ -+ void *arelt_data; -+ struct bfd *my_archive; /* The containing archive BFD. */ -+ struct bfd *next; /* The next BFD in the archive. */ -+ struct bfd *archive_head; /* The first BFD in the archive. */ -+ bfd_boolean has_armap; -+ -+ /* A chain of BFD structures involved in a link. */ -+ struct bfd *link_next; -+ -+ /* A field used by _bfd_generic_link_add_archive_symbols. This will -+ be used only for archive elements. */ -+ int archive_pass; -+ -+ /* Used by the back end to hold private data. */ -+ union -+ { -+ struct aout_data_struct *aout_data; -+ struct artdata *aout_ar_data; -+ struct _oasys_data *oasys_obj_data; -+ struct _oasys_ar_data *oasys_ar_data; -+ struct coff_tdata *coff_obj_data; -+ struct pe_tdata *pe_obj_data; -+ struct xcoff_tdata *xcoff_obj_data; -+ struct ecoff_tdata *ecoff_obj_data; -+ struct ieee_data_struct *ieee_data; -+ struct ieee_ar_data_struct *ieee_ar_data; -+ struct srec_data_struct *srec_data; -+ struct ihex_data_struct *ihex_data; -+ struct tekhex_data_struct *tekhex_data; -+ struct elf_obj_tdata *elf_obj_data; -+ struct nlm_obj_tdata *nlm_obj_data; -+ struct bout_data_struct *bout_data; -+ struct mmo_data_struct *mmo_data; -+ struct sun_core_struct *sun_core_data; -+ struct sco5_core_struct *sco5_core_data; -+ struct trad_core_struct *trad_core_data; -+ struct som_data_struct *som_data; -+ struct hpux_core_struct *hpux_core_data; -+ struct hppabsd_core_struct *hppabsd_core_data; -+ struct sgi_core_struct *sgi_core_data; -+ struct lynx_core_struct *lynx_core_data; -+ struct osf_core_struct *osf_core_data; -+ struct cisco_core_struct *cisco_core_data; -+ struct versados_data_struct *versados_data; -+ struct netbsd_core_struct *netbsd_core_data; -+ struct mach_o_data_struct *mach_o_data; -+ struct mach_o_fat_data_struct *mach_o_fat_data; -+ struct bfd_pef_data_struct *pef_data; -+ struct bfd_pef_xlib_data_struct *pef_xlib_data; -+ struct bfd_sym_data_struct *sym_data; -+ void *any; -+ } -+ tdata; -+ -+ /* Used by the application to hold private data. */ -+ void *usrdata; -+ -+ /* Where all the allocated stuff under this BFD goes. This is a -+ struct objalloc *, but we use void * to avoid requiring the inclusion -+ of objalloc.h. */ -+ void *memory; -+}; -+ -+typedef enum bfd_error -+{ -+ bfd_error_no_error = 0, -+ bfd_error_system_call, -+ bfd_error_invalid_target, -+ bfd_error_wrong_format, -+ bfd_error_wrong_object_format, -+ bfd_error_invalid_operation, -+ bfd_error_no_memory, -+ bfd_error_no_symbols, -+ bfd_error_no_armap, -+ bfd_error_no_more_archived_files, -+ bfd_error_malformed_archive, -+ bfd_error_file_not_recognized, -+ bfd_error_file_ambiguously_recognized, -+ bfd_error_no_contents, -+ bfd_error_nonrepresentable_section, -+ bfd_error_no_debug_section, -+ bfd_error_bad_value, -+ bfd_error_file_truncated, -+ bfd_error_file_too_big, -+ bfd_error_invalid_error_code -+} -+bfd_error_type; -+ -+bfd_error_type bfd_get_error (void); -+ -+void bfd_set_error (bfd_error_type error_tag); -+ -+const char *bfd_errmsg (bfd_error_type error_tag); -+ -+void bfd_perror (const char *message); -+ -+typedef void (*bfd_error_handler_type) (const char *, ...); -+ -+bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type); -+ -+void bfd_set_error_program_name (const char *); -+ -+bfd_error_handler_type bfd_get_error_handler (void); -+ -+long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect); -+ -+long bfd_canonicalize_reloc -+ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms); -+ -+void bfd_set_reloc -+ (bfd *abfd, asection *sec, arelent **rel, unsigned int count); -+ -+bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags); -+ -+int bfd_get_arch_size (bfd *abfd); -+ -+int bfd_get_sign_extend_vma (bfd *abfd); -+ -+bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma); -+ -+unsigned int bfd_get_gp_size (bfd *abfd); -+ -+void bfd_set_gp_size (bfd *abfd, unsigned int i); -+ -+bfd_vma bfd_scan_vma (const char *string, const char **end, int base); -+ -+bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_copy_private_header_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_copy_private_header_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_copy_private_bfd_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd); -+ -+#define bfd_merge_private_bfd_data(ibfd, obfd) \ -+ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \ -+ (ibfd, obfd)) -+bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags); -+ -+#define bfd_set_private_flags(abfd, flags) \ -+ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags)) -+#define bfd_sizeof_headers(abfd, reloc) \ -+ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc)) -+ -+#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \ -+ BFD_SEND (abfd, _bfd_find_nearest_line, \ -+ (abfd, sec, syms, off, file, func, line)) -+ -+#define bfd_find_line(abfd, syms, sym, file, line) \ -+ BFD_SEND (abfd, _bfd_find_line, \ -+ (abfd, syms, sym, file, line)) -+ -+#define bfd_find_inliner_info(abfd, file, func, line) \ -+ BFD_SEND (abfd, _bfd_find_inliner_info, \ -+ (abfd, file, func, line)) -+ -+#define bfd_debug_info_start(abfd) \ -+ BFD_SEND (abfd, _bfd_debug_info_start, (abfd)) -+ -+#define bfd_debug_info_end(abfd) \ -+ BFD_SEND (abfd, _bfd_debug_info_end, (abfd)) -+ -+#define bfd_debug_info_accumulate(abfd, section) \ -+ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section)) -+ -+#define bfd_stat_arch_elt(abfd, stat) \ -+ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat)) -+ -+#define bfd_update_armap_timestamp(abfd) \ -+ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd)) -+ -+#define bfd_set_arch_mach(abfd, arch, mach)\ -+ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach)) -+ -+#define bfd_relax_section(abfd, section, link_info, again) \ -+ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again)) -+ -+#define bfd_gc_sections(abfd, link_info) \ -+ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info)) -+ -+#define bfd_merge_sections(abfd, link_info) \ -+ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info)) -+ -+#define bfd_is_group_section(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec)) -+ -+#define bfd_discard_group(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec)) -+ -+#define bfd_link_hash_table_create(abfd) \ -+ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd)) -+ -+#define bfd_link_hash_table_free(abfd, hash) \ -+ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash)) -+ -+#define bfd_link_add_symbols(abfd, info) \ -+ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info)) -+ -+#define bfd_link_just_syms(abfd, sec, info) \ -+ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info)) -+ -+#define bfd_final_link(abfd, info) \ -+ BFD_SEND (abfd, _bfd_final_link, (abfd, info)) -+ -+#define bfd_free_cached_info(abfd) \ -+ BFD_SEND (abfd, _bfd_free_cached_info, (abfd)) -+ -+#define bfd_get_dynamic_symtab_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd)) -+ -+#define bfd_print_private_bfd_data(abfd, file)\ -+ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file)) -+ -+#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \ -+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols)) -+ -+#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \ -+ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \ -+ dyncount, dynsyms, ret)) -+ -+#define bfd_get_dynamic_reloc_upper_bound(abfd) \ -+ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd)) -+ -+#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \ -+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms)) -+ -+extern bfd_byte *bfd_get_relocated_section_contents -+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *, -+ bfd_boolean, asymbol **); -+ -+bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative); -+ -+struct bfd_preserve -+{ -+ void *marker; -+ void *tdata; -+ flagword flags; -+ const struct bfd_arch_info *arch_info; -+ struct bfd_section *sections; -+ struct bfd_section *section_last; -+ unsigned int section_count; -+ struct bfd_hash_table section_htab; -+}; -+ -+bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *); -+ -+void bfd_preserve_restore (bfd *, struct bfd_preserve *); -+ -+void bfd_preserve_finish (bfd *, struct bfd_preserve *); -+ -+/* Extracted from archive.c. */ -+symindex bfd_get_next_mapent -+ (bfd *abfd, symindex previous, carsym **sym); -+ -+bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head); -+ -+bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous); -+ -+/* Extracted from corefile.c. */ -+const char *bfd_core_file_failing_command (bfd *abfd); -+ -+int bfd_core_file_failing_signal (bfd *abfd); -+ -+bfd_boolean core_file_matches_executable_p -+ (bfd *core_bfd, bfd *exec_bfd); -+ -+/* Extracted from targets.c. */ -+#define BFD_SEND(bfd, message, arglist) \ -+ ((*((bfd)->xvec->message)) arglist) -+ -+#ifdef DEBUG_BFD_SEND -+#undef BFD_SEND -+#define BFD_SEND(bfd, message, arglist) \ -+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ -+ ((*((bfd)->xvec->message)) arglist) : \ -+ (bfd_assert (__FILE__,__LINE__), NULL)) -+#endif -+#define BFD_SEND_FMT(bfd, message, arglist) \ -+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) -+ -+#ifdef DEBUG_BFD_SEND -+#undef BFD_SEND_FMT -+#define BFD_SEND_FMT(bfd, message, arglist) \ -+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ -+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \ -+ (bfd_assert (__FILE__,__LINE__), NULL)) -+#endif -+ -+enum bfd_flavour -+{ -+ bfd_target_unknown_flavour, -+ bfd_target_aout_flavour, -+ bfd_target_coff_flavour, -+ bfd_target_ecoff_flavour, -+ bfd_target_xcoff_flavour, -+ bfd_target_elf_flavour, -+ bfd_target_ieee_flavour, -+ bfd_target_nlm_flavour, -+ bfd_target_oasys_flavour, -+ bfd_target_tekhex_flavour, -+ bfd_target_srec_flavour, -+ bfd_target_ihex_flavour, -+ bfd_target_som_flavour, -+ bfd_target_os9k_flavour, -+ bfd_target_versados_flavour, -+ bfd_target_msdos_flavour, -+ bfd_target_ovax_flavour, -+ bfd_target_evax_flavour, -+ bfd_target_mmo_flavour, -+ bfd_target_mach_o_flavour, -+ bfd_target_pef_flavour, -+ bfd_target_pef_xlib_flavour, -+ bfd_target_sym_flavour -+}; -+ -+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN }; -+ -+/* Forward declaration. */ -+typedef struct bfd_link_info _bfd_link_info; -+ -+typedef struct bfd_target -+{ -+ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */ -+ char *name; -+ -+ /* The "flavour" of a back end is a general indication about -+ the contents of a file. */ -+ enum bfd_flavour flavour; -+ -+ /* The order of bytes within the data area of a file. */ -+ enum bfd_endian byteorder; -+ -+ /* The order of bytes within the header parts of a file. */ -+ enum bfd_endian header_byteorder; -+ -+ /* A mask of all the flags which an executable may have set - -+ from the set <>, <>, ...<>. */ -+ flagword object_flags; -+ -+ /* A mask of all the flags which a section may have set - from -+ the set <>, <>, ...<>. */ -+ flagword section_flags; -+ -+ /* The character normally found at the front of a symbol. -+ (if any), perhaps `_'. */ -+ char symbol_leading_char; -+ -+ /* The pad character for file names within an archive header. */ -+ char ar_pad_char; -+ -+ /* The maximum number of characters in an archive header. */ -+ unsigned short ar_max_namelen; -+ -+ /* Entries for byte swapping for data. These are different from the -+ other entry points, since they don't take a BFD as the first argument. -+ Certain other handlers could do the same. */ -+ bfd_uint64_t (*bfd_getx64) (const void *); -+ bfd_int64_t (*bfd_getx_signed_64) (const void *); -+ void (*bfd_putx64) (bfd_uint64_t, void *); -+ bfd_vma (*bfd_getx32) (const void *); -+ bfd_signed_vma (*bfd_getx_signed_32) (const void *); -+ void (*bfd_putx32) (bfd_vma, void *); -+ bfd_vma (*bfd_getx16) (const void *); -+ bfd_signed_vma (*bfd_getx_signed_16) (const void *); -+ void (*bfd_putx16) (bfd_vma, void *); -+ -+ /* Byte swapping for the headers. */ -+ bfd_uint64_t (*bfd_h_getx64) (const void *); -+ bfd_int64_t (*bfd_h_getx_signed_64) (const void *); -+ void (*bfd_h_putx64) (bfd_uint64_t, void *); -+ bfd_vma (*bfd_h_getx32) (const void *); -+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *); -+ void (*bfd_h_putx32) (bfd_vma, void *); -+ bfd_vma (*bfd_h_getx16) (const void *); -+ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *); -+ void (*bfd_h_putx16) (bfd_vma, void *); -+ -+ /* Format dependent routines: these are vectors of entry points -+ within the target vector structure, one for each format to check. */ -+ -+ /* Check the format of a file being read. Return a <> or zero. */ -+ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *); -+ -+ /* Set the format of a file being written. */ -+ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *); -+ -+ /* Write cached information into a file being written, at <>. */ -+ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *); -+ -+ -+ /* Generic entry points. */ -+#define BFD_JUMP_TABLE_GENERIC(NAME) \ -+ NAME##_close_and_cleanup, \ -+ NAME##_bfd_free_cached_info, \ -+ NAME##_new_section_hook, \ -+ NAME##_get_section_contents, \ -+ NAME##_get_section_contents_in_window -+ -+ /* Called when the BFD is being closed to do any necessary cleanup. */ -+ bfd_boolean (*_close_and_cleanup) (bfd *); -+ /* Ask the BFD to free all cached information. */ -+ bfd_boolean (*_bfd_free_cached_info) (bfd *); -+ /* Called when a new section is created. */ -+ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr); -+ /* Read the contents of a section. */ -+ bfd_boolean (*_bfd_get_section_contents) -+ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type); -+ bfd_boolean (*_bfd_get_section_contents_in_window) -+ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type); -+ -+ /* Entry points to copy private data. */ -+#define BFD_JUMP_TABLE_COPY(NAME) \ -+ NAME##_bfd_copy_private_bfd_data, \ -+ NAME##_bfd_merge_private_bfd_data, \ -+ NAME##_bfd_copy_private_section_data, \ -+ NAME##_bfd_copy_private_symbol_data, \ -+ NAME##_bfd_copy_private_header_data, \ -+ NAME##_bfd_set_private_flags, \ -+ NAME##_bfd_print_private_bfd_data -+ -+ /* Called to copy BFD general private data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *); -+ /* Called to merge BFD general private data from one object file -+ to a common output file when linking. */ -+ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *); -+ /* Called to copy BFD private section data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_section_data) -+ (bfd *, sec_ptr, bfd *, sec_ptr); -+ /* Called to copy BFD private symbol data from one symbol -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_symbol_data) -+ (bfd *, asymbol *, bfd *, asymbol *); -+ /* Called to copy BFD private header data from one object file -+ to another. */ -+ bfd_boolean (*_bfd_copy_private_header_data) -+ (bfd *, bfd *); -+ /* Called to set private backend flags. */ -+ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword); -+ -+ /* Called to print private BFD data. */ -+ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *); -+ -+ /* Core file entry points. */ -+#define BFD_JUMP_TABLE_CORE(NAME) \ -+ NAME##_core_file_failing_command, \ -+ NAME##_core_file_failing_signal, \ -+ NAME##_core_file_matches_executable_p -+ -+ char * (*_core_file_failing_command) (bfd *); -+ int (*_core_file_failing_signal) (bfd *); -+ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *); -+ -+ /* Archive entry points. */ -+#define BFD_JUMP_TABLE_ARCHIVE(NAME) \ -+ NAME##_slurp_armap, \ -+ NAME##_slurp_extended_name_table, \ -+ NAME##_construct_extended_name_table, \ -+ NAME##_truncate_arname, \ -+ NAME##_write_armap, \ -+ NAME##_read_ar_hdr, \ -+ NAME##_openr_next_archived_file, \ -+ NAME##_get_elt_at_index, \ -+ NAME##_generic_stat_arch_elt, \ -+ NAME##_update_armap_timestamp -+ -+ bfd_boolean (*_bfd_slurp_armap) (bfd *); -+ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *); -+ bfd_boolean (*_bfd_construct_extended_name_table) -+ (bfd *, char **, bfd_size_type *, const char **); -+ void (*_bfd_truncate_arname) (bfd *, const char *, char *); -+ bfd_boolean (*write_armap) -+ (bfd *, unsigned int, struct orl *, unsigned int, int); -+ void * (*_bfd_read_ar_hdr_fn) (bfd *); -+ bfd * (*openr_next_archived_file) (bfd *, bfd *); -+#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i)) -+ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex); -+ int (*_bfd_stat_arch_elt) (bfd *, struct stat *); -+ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *); -+ -+ /* Entry points used for symbols. */ -+#define BFD_JUMP_TABLE_SYMBOLS(NAME) \ -+ NAME##_get_symtab_upper_bound, \ -+ NAME##_canonicalize_symtab, \ -+ NAME##_make_empty_symbol, \ -+ NAME##_print_symbol, \ -+ NAME##_get_symbol_info, \ -+ NAME##_bfd_is_local_label_name, \ -+ NAME##_bfd_is_target_special_symbol, \ -+ NAME##_get_lineno, \ -+ NAME##_find_nearest_line, \ -+ _bfd_generic_find_line, \ -+ NAME##_find_inliner_info, \ -+ NAME##_bfd_make_debug_symbol, \ -+ NAME##_read_minisymbols, \ -+ NAME##_minisymbol_to_symbol -+ -+ long (*_bfd_get_symtab_upper_bound) (bfd *); -+ long (*_bfd_canonicalize_symtab) -+ (bfd *, struct bfd_symbol **); -+ struct bfd_symbol * -+ (*_bfd_make_empty_symbol) (bfd *); -+ void (*_bfd_print_symbol) -+ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type); -+#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e)) -+ void (*_bfd_get_symbol_info) -+ (bfd *, struct bfd_symbol *, symbol_info *); -+#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e)) -+ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *); -+ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *); -+ alent * (*_get_lineno) (bfd *, struct bfd_symbol *); -+ bfd_boolean (*_bfd_find_nearest_line) -+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma, -+ const char **, const char **, unsigned int *); -+ bfd_boolean (*_bfd_find_line) -+ (bfd *, struct bfd_symbol **, struct bfd_symbol *, -+ const char **, unsigned int *); -+ bfd_boolean (*_bfd_find_inliner_info) -+ (bfd *, const char **, const char **, unsigned int *); -+ /* Back-door to allow format-aware applications to create debug symbols -+ while using BFD for everything else. Currently used by the assembler -+ when creating COFF files. */ -+ asymbol * (*_bfd_make_debug_symbol) -+ (bfd *, void *, unsigned long size); -+#define bfd_read_minisymbols(b, d, m, s) \ -+ BFD_SEND (b, _read_minisymbols, (b, d, m, s)) -+ long (*_read_minisymbols) -+ (bfd *, bfd_boolean, void **, unsigned int *); -+#define bfd_minisymbol_to_symbol(b, d, m, f) \ -+ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f)) -+ asymbol * (*_minisymbol_to_symbol) -+ (bfd *, bfd_boolean, const void *, asymbol *); -+ -+ /* Routines for relocs. */ -+#define BFD_JUMP_TABLE_RELOCS(NAME) \ -+ NAME##_get_reloc_upper_bound, \ -+ NAME##_canonicalize_reloc, \ -+ NAME##_bfd_reloc_type_lookup -+ -+ long (*_get_reloc_upper_bound) (bfd *, sec_ptr); -+ long (*_bfd_canonicalize_reloc) -+ (bfd *, sec_ptr, arelent **, struct bfd_symbol **); -+ /* See documentation on reloc types. */ -+ reloc_howto_type * -+ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type); -+ -+ /* Routines used when writing an object file. */ -+#define BFD_JUMP_TABLE_WRITE(NAME) \ -+ NAME##_set_arch_mach, \ -+ NAME##_set_section_contents -+ -+ bfd_boolean (*_bfd_set_arch_mach) -+ (bfd *, enum bfd_architecture, unsigned long); -+ bfd_boolean (*_bfd_set_section_contents) -+ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type); -+ -+ /* Routines used by the linker. */ -+#define BFD_JUMP_TABLE_LINK(NAME) \ -+ NAME##_sizeof_headers, \ -+ NAME##_bfd_get_relocated_section_contents, \ -+ NAME##_bfd_relax_section, \ -+ NAME##_bfd_link_hash_table_create, \ -+ NAME##_bfd_link_hash_table_free, \ -+ NAME##_bfd_link_add_symbols, \ -+ NAME##_bfd_link_just_syms, \ -+ NAME##_bfd_final_link, \ -+ NAME##_bfd_link_split_section, \ -+ NAME##_bfd_gc_sections, \ -+ NAME##_bfd_merge_sections, \ -+ NAME##_bfd_is_group_section, \ -+ NAME##_bfd_discard_group, \ -+ NAME##_section_already_linked \ -+ -+ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean); -+ bfd_byte * (*_bfd_get_relocated_section_contents) -+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, -+ bfd_byte *, bfd_boolean, struct bfd_symbol **); -+ -+ bfd_boolean (*_bfd_relax_section) -+ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *); -+ -+ /* Create a hash table for the linker. Different backends store -+ different information in this table. */ -+ struct bfd_link_hash_table * -+ (*_bfd_link_hash_table_create) (bfd *); -+ -+ /* Release the memory associated with the linker hash table. */ -+ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *); -+ -+ /* Add symbols from this object file into the hash table. */ -+ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *); -+ -+ /* Indicate that we are only retrieving symbol values from this section. */ -+ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *); -+ -+ /* Do a link based on the link_order structures attached to each -+ section of the BFD. */ -+ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *); -+ -+ /* Should this section be split up into smaller pieces during linking. */ -+ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *); -+ -+ /* Remove sections that are not referenced from the output. */ -+ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *); -+ -+ /* Attempt to merge SEC_MERGE sections. */ -+ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *); -+ -+ /* Is this section a member of a group? */ -+ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *); -+ -+ /* Discard members of a group. */ -+ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *); -+ -+ /* Check if SEC has been already linked during a reloceatable or -+ final link. */ -+ void (*_section_already_linked) (bfd *, struct bfd_section *); -+ -+ /* Routines to handle dynamic symbols and relocs. */ -+#define BFD_JUMP_TABLE_DYNAMIC(NAME) \ -+ NAME##_get_dynamic_symtab_upper_bound, \ -+ NAME##_canonicalize_dynamic_symtab, \ -+ NAME##_get_synthetic_symtab, \ -+ NAME##_get_dynamic_reloc_upper_bound, \ -+ NAME##_canonicalize_dynamic_reloc -+ -+ /* Get the amount of memory required to hold the dynamic symbols. */ -+ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *); -+ /* Read in the dynamic symbols. */ -+ long (*_bfd_canonicalize_dynamic_symtab) -+ (bfd *, struct bfd_symbol **); -+ /* Create synthetized symbols. */ -+ long (*_bfd_get_synthetic_symtab) -+ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **, -+ struct bfd_symbol **); -+ /* Get the amount of memory required to hold the dynamic relocs. */ -+ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *); -+ /* Read in the dynamic relocs. */ -+ long (*_bfd_canonicalize_dynamic_reloc) -+ (bfd *, arelent **, struct bfd_symbol **); -+ -+ /* Opposite endian version of this target. */ -+ const struct bfd_target * alternative_target; -+ -+ /* Data for use by back-end routines, which isn't -+ generic enough to belong in this structure. */ -+ const void *backend_data; -+ -+} bfd_target; -+ -+bfd_boolean bfd_set_default_target (const char *name); -+ -+const bfd_target *bfd_find_target (const char *target_name, bfd *abfd); -+ -+const char ** bfd_target_list (void); -+ -+const bfd_target *bfd_search_for_target -+ (int (*search_func) (const bfd_target *, void *), -+ void *); -+ -+/* Extracted from format.c. */ -+bfd_boolean bfd_check_format (bfd *abfd, bfd_format format); -+ -+bfd_boolean bfd_check_format_matches -+ (bfd *abfd, bfd_format format, char ***matching); -+ -+bfd_boolean bfd_set_format (bfd *abfd, bfd_format format); -+ -+const char *bfd_format_string (bfd_format format); -+ -+/* Extracted from linker.c. */ -+bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec); -+ -+#define bfd_link_split_section(abfd, sec) \ -+ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec)) -+ -+void bfd_section_already_linked (bfd *abfd, asection *sec); -+ -+#define bfd_section_already_linked(abfd, sec) \ -+ BFD_SEND (abfd, _section_already_linked, (abfd, sec)) -+ -+/* Extracted from simple.c. */ -+bfd_byte *bfd_simple_get_relocated_section_contents -+ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table); -+ -+#ifdef __cplusplus -+} -+#endif -+#endif ---- a/arch/x86/include/asm/irq_vectors.h -+++ b/arch/x86/include/asm/irq_vectors.h -@@ -49,6 +49,7 @@ - #ifdef CONFIG_X86_32 - # define SYSCALL_VECTOR 0x80 - #endif -+#define KDBENTER_VECTOR 0x81 - - /* - * Vectors 0x30-0x3f are used for ISA interrupts. -@@ -102,6 +103,12 @@ - #define NUM_INVALIDATE_TLB_VECTORS 8 - - /* -+ * KDB_VECTOR will take over vector 0xfe when it is needed, as in theory -+ * it should not be used anyway. -+ */ -+#define KDB_VECTOR 0xfe -+ -+/* - * Local APIC timer IRQ vector is on a different priority level, - * to work around the 'lost local interrupt if more than 2 IRQ - * sources per level' errata. ---- /dev/null -+++ b/arch/x86/include/asm/kdb.h -@@ -0,0 +1,140 @@ -+#ifndef _ASM_KDB_H -+#define _ASM_KDB_H -+ -+/* -+ * Kernel Debugger Architecture Dependent (x86) Global Headers -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2008 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+/* -+ * KDB_ENTER() is a macro which causes entry into the kernel -+ * debugger from any point in the kernel code stream. If it -+ * is intended to be used from interrupt level, it must use -+ * a non-maskable entry method. The vector is KDB_VECTOR, -+ * defined in hw_irq.h -+ */ -+#define KDB_ENTER() do {if (kdb_on && !KDB_IS_RUNNING()) { asm("\tint $129\n"); }} while(0) -+ -+/* -+ * Needed for exported symbols. -+ */ -+typedef unsigned long kdb_machreg_t; -+ -+/* -+ * Per cpu arch specific kdb state. Must be in range 0xff000000. -+ */ -+#define KDB_STATE_A_IF 0x01000000 /* Saved IF flag */ -+ -+ -+#ifdef CONFIG_X86_32 -+ -+#define kdb_machreg_fmt "0x%lx" -+#define kdb_machreg_fmt0 "0x%08lx" -+#define kdb_bfd_vma_fmt "0x%lx" -+#define kdb_bfd_vma_fmt0 "0x%08lx" -+#define kdb_elfw_addr_fmt "0x%x" -+#define kdb_elfw_addr_fmt0 "0x%08x" -+#define kdb_f_count_fmt "%ld" -+ -+#else /* CONFIG_X86_32 */ -+ -+#define kdb_machreg_fmt "0x%lx" -+#define kdb_machreg_fmt0 "0x%016lx" -+#define kdb_bfd_vma_fmt "0x%lx" -+#define kdb_bfd_vma_fmt0 "0x%016lx" -+#define kdb_elfw_addr_fmt "0x%x" -+#define kdb_elfw_addr_fmt0 "0x%016x" -+#define kdb_f_count_fmt "%ld" -+ -+/* -+ * Functions to safely read and write kernel areas. The {to,from}_xxx -+ * addresses are not necessarily valid, these functions must check for -+ * validity. If the arch already supports get and put routines with -+ * suitable validation and/or recovery on invalid addresses then use -+ * those routines, otherwise check it yourself. -+ */ -+ -+/* -+ * asm-i386 uaccess.h supplies __copy_to_user which relies on MMU to -+ * trap invalid addresses in the _xxx fields. Verify the other address -+ * of the pair is valid by accessing the first and last byte ourselves, -+ * then any access violations should only be caused by the _xxx -+ * addresses, -+ */ -+ -+#include -+ -+static inline int -+__kdba_putarea_size(unsigned long to_xxx, void *from, size_t size) -+{ -+ mm_segment_t oldfs = get_fs(); -+ int r; -+ char c; -+ c = *((volatile char *)from); -+ c = *((volatile char *)from + size - 1); -+ -+ if (to_xxx < PAGE_OFFSET) { -+ return kdb_putuserarea_size(to_xxx, from, size); -+ } -+ -+ set_fs(KERNEL_DS); -+ r = __copy_to_user_inatomic((void *)to_xxx, from, size); -+ set_fs(oldfs); -+ return r; -+} -+ -+static inline int -+__kdba_getarea_size(void *to, unsigned long from_xxx, size_t size) -+{ -+ mm_segment_t oldfs = get_fs(); -+ int r; -+ *((volatile char *)to) = '\0'; -+ *((volatile char *)to + size - 1) = '\0'; -+ -+ if (from_xxx < PAGE_OFFSET) { -+ return kdb_getuserarea_size(to, from_xxx, size); -+ } -+ -+ set_fs(KERNEL_DS); -+ r = __copy_to_user_inatomic(to, (void *)from_xxx, size); -+ set_fs(oldfs); -+ return r; -+} -+ -+/* For numa with replicated code/data, the platform must supply its own -+ * kdba_putarea_size and kdba_getarea_size routines. Without replication kdb -+ * uses the standard architecture routines. -+ */ -+#ifdef CONFIG_NUMA_REPLICATE -+extern int kdba_putarea_size(unsigned long to_xxx, void *from, size_t size); -+extern int kdba_getarea_size(void *to, unsigned long from_xxx, size_t size); -+#else -+#define kdba_putarea_size __kdba_putarea_size -+#define kdba_getarea_size __kdba_getarea_size -+#endif -+ -+static inline int -+kdba_verify_rw(unsigned long addr, size_t size) -+{ -+ unsigned char data[size]; -+ return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size)); -+} -+ -+#endif /* !CONFIG_X86_32 */ -+ -+static inline unsigned long -+kdba_funcptr_value(void *fp) -+{ -+ return (unsigned long)fp; -+} -+ -+#ifdef CONFIG_SMP -+extern void kdba_giveback_vector(int); -+#endif -+ -+#endif /* !_ASM_KDB_H */ ---- /dev/null -+++ b/arch/x86/include/asm/kdbprivate.h -@@ -0,0 +1,241 @@ -+#ifndef _ASM_KDBPRIVATE_H -+#define _ASM_KDBPRIVATE_H -+ -+/* -+ * Kernel Debugger Architecture Dependent (x86) Private Headers -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2008 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+typedef unsigned char kdb_machinst_t; -+ -+/* -+ * KDB_MAXBPT describes the total number of breakpoints -+ * supported by this architecure. -+ */ -+#define KDB_MAXBPT 16 -+ -+/* -+ * KDB_MAXHARDBPT describes the total number of hardware -+ * breakpoint registers that exist. -+ */ -+#define KDB_MAXHARDBPT 4 -+ -+/* Maximum number of arguments to a function */ -+#define KDBA_MAXARGS 16 -+ -+/* -+ * Support for ia32 debug registers -+ */ -+typedef struct _kdbhard_bp { -+ kdb_machreg_t bph_reg; /* Register this breakpoint uses */ -+ -+ unsigned int bph_free:1; /* Register available for use */ -+ unsigned int bph_data:1; /* Data Access breakpoint */ -+ -+ unsigned int bph_write:1; /* Write Data breakpoint */ -+ unsigned int bph_mode:2; /* 0=inst, 1=write, 2=io, 3=read */ -+ unsigned int bph_length:2; /* 0=1, 1=2, 2=BAD, 3=4 (bytes) */ -+ unsigned int bph_installed; /* flag: hw bp is installed */ -+} kdbhard_bp_t; -+ -+#define IA32_BREAKPOINT_INSTRUCTION 0xcc -+ -+#define DR6_BT 0x00008000 -+#define DR6_BS 0x00004000 -+#define DR6_BD 0x00002000 -+ -+#define DR6_B3 0x00000008 -+#define DR6_B2 0x00000004 -+#define DR6_B1 0x00000002 -+#define DR6_B0 0x00000001 -+#define DR6_DR_MASK 0x0000000F -+ -+#define DR7_RW_VAL(dr, drnum) \ -+ (((dr) >> (16 + (4 * (drnum)))) & 0x3) -+ -+#define DR7_RW_SET(dr, drnum, rw) \ -+ do { \ -+ (dr) &= ~(0x3 << (16 + (4 * (drnum)))); \ -+ (dr) |= (((rw) & 0x3) << (16 + (4 * (drnum)))); \ -+ } while (0) -+ -+#define DR7_RW0(dr) DR7_RW_VAL(dr, 0) -+#define DR7_RW0SET(dr,rw) DR7_RW_SET(dr, 0, rw) -+#define DR7_RW1(dr) DR7_RW_VAL(dr, 1) -+#define DR7_RW1SET(dr,rw) DR7_RW_SET(dr, 1, rw) -+#define DR7_RW2(dr) DR7_RW_VAL(dr, 2) -+#define DR7_RW2SET(dr,rw) DR7_RW_SET(dr, 2, rw) -+#define DR7_RW3(dr) DR7_RW_VAL(dr, 3) -+#define DR7_RW3SET(dr,rw) DR7_RW_SET(dr, 3, rw) -+ -+ -+#define DR7_LEN_VAL(dr, drnum) \ -+ (((dr) >> (18 + (4 * (drnum)))) & 0x3) -+ -+#define DR7_LEN_SET(dr, drnum, rw) \ -+ do { \ -+ (dr) &= ~(0x3 << (18 + (4 * (drnum)))); \ -+ (dr) |= (((rw) & 0x3) << (18 + (4 * (drnum)))); \ -+ } while (0) -+ -+#define DR7_LEN0(dr) DR7_LEN_VAL(dr, 0) -+#define DR7_LEN0SET(dr,len) DR7_LEN_SET(dr, 0, len) -+#define DR7_LEN1(dr) DR7_LEN_VAL(dr, 1) -+#define DR7_LEN1SET(dr,len) DR7_LEN_SET(dr, 1, len) -+#define DR7_LEN2(dr) DR7_LEN_VAL(dr, 2) -+#define DR7_LEN2SET(dr,len) DR7_LEN_SET(dr, 2, len) -+#define DR7_LEN3(dr) DR7_LEN_VAL(dr, 3) -+#define DR7_LEN3SET(dr,len) DR7_LEN_SET(dr, 3, len) -+ -+#define DR7_G0(dr) (((dr)>>1)&0x1) -+#define DR7_G0SET(dr) ((dr) |= 0x2) -+#define DR7_G0CLR(dr) ((dr) &= ~0x2) -+#define DR7_G1(dr) (((dr)>>3)&0x1) -+#define DR7_G1SET(dr) ((dr) |= 0x8) -+#define DR7_G1CLR(dr) ((dr) &= ~0x8) -+#define DR7_G2(dr) (((dr)>>5)&0x1) -+#define DR7_G2SET(dr) ((dr) |= 0x20) -+#define DR7_G2CLR(dr) ((dr) &= ~0x20) -+#define DR7_G3(dr) (((dr)>>7)&0x1) -+#define DR7_G3SET(dr) ((dr) |= 0x80) -+#define DR7_G3CLR(dr) ((dr) &= ~0x80) -+ -+#define DR7_L0(dr) (((dr))&0x1) -+#define DR7_L0SET(dr) ((dr) |= 0x1) -+#define DR7_L0CLR(dr) ((dr) &= ~0x1) -+#define DR7_L1(dr) (((dr)>>2)&0x1) -+#define DR7_L1SET(dr) ((dr) |= 0x4) -+#define DR7_L1CLR(dr) ((dr) &= ~0x4) -+#define DR7_L2(dr) (((dr)>>4)&0x1) -+#define DR7_L2SET(dr) ((dr) |= 0x10) -+#define DR7_L2CLR(dr) ((dr) &= ~0x10) -+#define DR7_L3(dr) (((dr)>>6)&0x1) -+#define DR7_L3SET(dr) ((dr) |= 0x40) -+#define DR7_L3CLR(dr) ((dr) &= ~0x40) -+ -+#define DR7_GD 0x00002000 /* General Detect Enable */ -+#define DR7_GE 0x00000200 /* Global exact */ -+#define DR7_LE 0x00000100 /* Local exact */ -+ -+extern kdb_machreg_t kdba_getdr6(void); -+extern void kdba_putdr6(kdb_machreg_t); -+ -+extern kdb_machreg_t kdba_getdr7(void); -+ -+struct kdba_running_process { -+ long sp; /* KDB may be on a different stack */ -+ long ip; /* eip when esp was set */ -+}; -+ -+static inline -+void kdba_unsave_running(struct kdba_running_process *k, struct pt_regs *regs) -+{ -+} -+ -+struct kdb_activation_record; -+extern void kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu, -+ struct kdb_activation_record *ar); -+ -+extern void kdba_wait_for_cpus(void); -+ -+ -+#ifdef CONFIG_X86_32 -+ -+#define DR_TYPE_EXECUTE 0x0 -+#define DR_TYPE_WRITE 0x1 -+#define DR_TYPE_IO 0x2 -+#define DR_TYPE_RW 0x3 -+ -+/* -+ * Platform specific environment entries -+ */ -+#define KDB_PLATFORM_ENV "IDMODE=x86", "BYTESPERWORD=4", "IDCOUNT=16" -+ -+/* -+ * Support for setjmp/longjmp -+ */ -+#define JB_BX 0 -+#define JB_SI 1 -+#define JB_DI 2 -+#define JB_BP 3 -+#define JB_SP 4 -+#define JB_PC 5 -+ -+typedef struct __kdb_jmp_buf { -+ unsigned long regs[6]; /* kdba_setjmp assumes fixed offsets here */ -+} kdb_jmp_buf; -+ -+extern int asmlinkage kdba_setjmp(kdb_jmp_buf *); -+extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int); -+#define kdba_setjmp kdba_setjmp -+ -+extern kdb_jmp_buf *kdbjmpbuf; -+ -+/* Arch specific data saved for running processes */ -+static inline -+void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs) -+{ -+ k->sp = current_stack_pointer; -+ __asm__ __volatile__ ( " lea 1f,%%eax; movl %%eax,%0 ; 1: " : "=r"(k->ip) : : "eax" ); -+} -+ -+extern void kdb_interrupt(void); -+ -+#define KDB_INT_REGISTERS 8 -+ -+#else /* CONFIG_X86_32 */ -+ -+extern kdb_machreg_t kdba_getdr(int); -+extern void kdba_putdr(int, kdb_machreg_t); -+ -+extern kdb_machreg_t kdb_getcr(int); -+ -+/* -+ * Platform specific environment entries -+ */ -+#define KDB_PLATFORM_ENV "IDMODE=x86_64", "BYTESPERWORD=8", "IDCOUNT=16" -+ -+/* -+ * reg indicies for x86_64 setjmp/longjmp -+ */ -+#define JB_RBX 0 -+#define JB_RBP 1 -+#define JB_R12 2 -+#define JB_R13 3 -+#define JB_R14 4 -+#define JB_R15 5 -+#define JB_RSP 6 -+#define JB_PC 7 -+ -+typedef struct __kdb_jmp_buf { -+ unsigned long regs[8]; /* kdba_setjmp assumes fixed offsets here */ -+} kdb_jmp_buf; -+ -+extern int asmlinkage kdba_setjmp(kdb_jmp_buf *); -+extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int); -+#define kdba_setjmp kdba_setjmp -+ -+extern kdb_jmp_buf *kdbjmpbuf; -+ -+/* Arch specific data saved for running processes */ -+register unsigned long current_stack_pointer asm("rsp") __used; -+ -+static inline -+void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs) -+{ -+ k->sp = current_stack_pointer; -+ __asm__ __volatile__ ( " lea 0(%%rip),%%rax; movq %%rax,%0 ; " : "=r"(k->ip) : : "rax" ); -+} -+ -+extern asmlinkage void kdb_interrupt(void); -+ -+#define KDB_INT_REGISTERS 16 -+ -+#endif /* !CONFIG_X86_32 */ -+ -+#endif /* !_ASM_KDBPRIVATE_H */ ---- a/arch/x86/include/asm/kdebug.h -+++ b/arch/x86/include/asm/kdebug.h -@@ -15,6 +15,8 @@ enum die_val { - DIE_DIE, - DIE_NMIWATCHDOG, - DIE_KERNELDEBUG, -+ DIE_KDEBUG_ENTER, -+ DIE_KDEBUG_LEAVE, - DIE_TRAP, - DIE_GPF, - DIE_CALL, ---- a/arch/x86/include/asm/ptrace.h -+++ b/arch/x86/include/asm/ptrace.h -@@ -16,6 +16,29 @@ - /* this struct defines the way the registers are stored on the - stack during a system call. */ - -+enum EFLAGS { -+ EF_CF = 0x00000001, -+ EF_PF = 0x00000004, -+ EF_AF = 0x00000010, -+ EF_ZF = 0x00000040, -+ EF_SF = 0x00000080, -+ EF_TF = 0x00000100, -+ EF_IE = 0x00000200, -+ EF_DF = 0x00000400, -+ EF_OF = 0x00000800, -+ EF_IOPL = 0x00003000, -+ EF_IOPL_RING0 = 0x00000000, -+ EF_IOPL_RING1 = 0x00001000, -+ EF_IOPL_RING2 = 0x00002000, -+ EF_NT = 0x00004000, /* nested task */ -+ EF_RF = 0x00010000, /* resume */ -+ EF_VM = 0x00020000, /* virtual mode */ -+ EF_AC = 0x00040000, /* alignment */ -+ EF_VIF = 0x00080000, /* virtual interrupt */ -+ EF_VIP = 0x00100000, /* virtual interrupt pending */ -+ EF_ID = 0x00200000, /* id */ -+}; -+ - #ifndef __KERNEL__ - - struct pt_regs { ---- /dev/null -+++ b/arch/x86/kdb/ChangeLog -@@ -0,0 +1,262 @@ -+2008-11-26 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc6-x86-1. -+ -+2008-11-12 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc4-x86-1. -+ -+2008-11-04 Jay Lan -+ -+ * kdb-v4.4-2.6.28-rc3-x86-1. -+ -+2008-10-28 Jay Lan -+ -+ * "Commandeer vector 0xfe for KDB_VECTOR", version 2. -+ Cliff Wickman -+ * kdb-v4.4-2.6.28-rc2-x86-2. -+ -+2008-10-27 Jay Lan -+ -+ * Commandeer vector 0xfe for KDB_VECTOR, -+ Cliff Wickman -+ * Fix KDB-KDUMP problems on IBM xSeries, -+ Bernhard Walle , Jay Lan -+ * Fix crash when panic() from task context, -+ Bernhard Walle -+ * kdb-v4.4-2.6.28-rc2-x86-1. -+ -+2008-10-20 Jay Lan -+ -+ * kdb-v4.4-2.6.27-x86-1. -+ -+2008-09-30 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc8-x86-1. -+ -+2008-09-22 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc7-x86-1. -+ -+2008-09-03 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc5-x86-1. -+ -+2008-08-19 Jay Lan -+ -+ * kdb-v4.4-2.6.27-rc3-x86-1. -+ -+2008-08-14 Jay Lan -+ -+ * Support 'kdump' command to take a kdump vmcore from KDB, -+ Dan Aloni (da-x@monatomic.org), -+ Jason Xiao (jidong.xiao@gmail.com), -+ Jay Lan (jlan@sgi.com) -+ * kdb-v4.4-2.6.27-rc2-x86-2. -+ -+2008-08-06 Jay Lan -+ -+ * Fix up the NULL pointer deference issue in ohci_kdb_poll_char, -+ Jason Xiao -+ * Backtrace on x86_64 and i386 were incomplete since 2.6.27-rc2. -+ * kdb-v4.4-2.6.27-rc2-x86-1. -+ -+2008-07-18 Jay Lan -+ -+ * support Hardware Breakpoint (bph/bpha) commands -+ IA64: Greg Banks -+ X86: Konstantin Baydarov -+ * kdb-v4.4-2.6.26-x86-2. -+ -+2008-07-14 Jay Lan -+ -+ * kdb-v4.4-2.6.26-x86-1. -+ -+2008-07-11 Jay Lan -+ -+ * New commands and some fixups and enhancements, -+ Joe Korty -+ John Blackwood -+ Jim Houston -+ - Use the non-sleeping copy_from_user_atomic. -+ - Enhance kdb_cmderror diagnostic output. -+ - Expand the KDB 'duplicate command' error message. -+ - Touch NMI watchdog in various KDB busy-loops. -+ - Support IMB HS20 Blade 8843 platform. -+ - Display exactly which cpus needed an NMI to get them into kdb. -+ - Better document that kdb's 'ps A' command can be used to show -+ _all_ processes and threads -+ - Suppress KDB boottime INFO messages if quiet boot. -+ - Add a KDB breakpoint to the OOPs path. -+ - Add CONFIG_DISCONTIGMEM support to kdbm_memmap. -+ - Extend the KDB task command to handle CONFIG_NUMA fields. -+ - Extend the KDB vm command to support NUMA stuff. -+ - Create the KDB mempolicy command. -+ - Create a pgdat command for KDB. -+ - Fix a hang on boot on some i386 systems. -+ * kdb-v4.4-2.6.26-rc9-x86-1. -+ -+2008-06-30 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc8-x86-1. -+ -+2008-06-25 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc7-x86-1. -+ -+2008-06-06 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc5-x86-1. -+ -+2008-05-30 Jay Lan -+ -+ * kdb-v4.4-2.6.26-rc4-x86-1. -+ -+2008-05-20 Jay Lan -+ -+ * Merged and to . -+ * Merged and to -+ . -+ * kdb-v4.4-2.6.26-rc3-x86-1. -+ -+2008-05-15 Jay Lan -+ -+ * Fixed the i386 backtrace problem where KDB failed to find stacks -+ in the kernel space. -+ * kdb-v4.4-2.6.26-rc1-x86-3. -+ -+2008-05-14 Jay Lan -+ -+ * Fixed a bug that bb_all scans only odd number entries of kallsyms. -+ * kdb-v4.4-2.6.26-rc1-x86-2. -+ -+2008-05-13 Jay Lan -+ -+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1. -+ * kdb-v4.4-2.6.26-rc1-x86-1. -+ -+2008-05-13 Jay Lan -+ -+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1. -+ * Fixed a couple of x86_64 problems: -+ - "iret_label" are replaced by "irq_return". -+ - bb1 failure on ia32_sysenter_target() & ia32_cstar_target() -+ * kdb-v4.4-2.6.25-x86-2. -+ -+2008-04-17 Jay Lan -+ -+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1. -+ * kdb-v4.4-2.6.25-x86-1. -+ -+2008-03-19 Jay Lan -+ -+ * i386: systenter_entry was replaced with ia32_sysenter_target since -+ 2.6.25-rc1, Jay Lan -+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1. -+ * kdb-v4.4-2.6.25-rc6-x86-2. -+ -+2008-03-16 Jay Lan -+ -+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1. -+ * kdb-v4.4-2.6.25-rc6-x86-1. -+ -+2008-03-03 Jay Lan -+ -+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1. -+ * kdb-v4.4-2.6.25-rc3-x86-1. -+ -+2008-02-26 Jay Lan -+ -+ * remove 'fastcall' from kdb code. -+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1. -+ * kdb-v4.4-2.6.25-rc2-x86-1. -+ -+2008-02-19 Jay Lan -+ -+ * Known problem: backtrace for i386 is broken. -+ * kdb-v4.4-2.6.25-rc1-x86-1. -+ -+2008-02-01 Jay Lan -+ -+ * Backed out USB UHCI support since it caused dropped characters and -+ broke OHCI. -+ * Restored "archkdbcommon" commands for x86. It was lost at the x86 -+ merge. -+ * Detecting if the HC was "busy", Aaron Young -+ * kdb-v4.4-2.6.24-x86-2. -+ -+2008-01-29 Jay Lan -+ -+ * kdb-v4.4-2.6.24-x86-1. -+ -+2008-01-22 Jay Lan -+ -+ * USB UHCI kdb support, Konstantin Baydarov -+ * kdb-v4.4-2.6.24-rc8-x86-3. -+ -+2008-01-18 Jay Lan -+ -+ * USB EHCI kdb support, Aaron Young -+ * kdb-v4.4-2.6.24-rc8-x86-2. -+ -+2008-01-18 Jay Lan -+ -+ * kdb-v4.4-2.6.24-rc8-x86-1. -+ -+2008-01-09 Jay Lan -+ -+ * Merge arch/x86/kdb/kdba_io_64.c and arch/x86/kdb/kdba_io_32.c to -+ arch/x86/kdb/kdba_io.c -+ * Merge arch/x86/kdb/kdba_id_64.c and arch/x86/kdb/kdba_id_32.c to -+ arch/x86/kdb/kdba_id.c -+ * Merge arch/x86/kdb/pc_keyb_64.h and arch/x86/kdb/pc_keyb_32.h to -+ arch/x86/kdb/pc_keyb.h -+ * kdb-v4.4-2.6.24-rc7-x86-2. -+ -+2008-01-07 Jay Lan -+ -+ * kdb-v4.4-2.6.24-rc7-x86-1. -+ -+2007-12-21 Jay Lan -+ -+ * Renamed kdb/kdba_bt_x86.c to arch/x86/kdba_bt.c. -+ * Find gcc options 'no-optimize-sibling-calls' & 'regparm' from -+ $(KBUILD_CFLAGS) in arch/x86/kdb/Makefile_{32,64}. We used to -+ get them from $(CFLAGS). -+ * Default regparm to 3 on x86_32 if not defined. -+ * kdb v4.4-2.6.24-rc6-x86-1. -+ -+2007-12-12 Jay Lan -+ -+ * Fixed a Makefile_32 error. -+ * kdb v4.4-2.6.24-rc5-x86-1. -+ -+2007-12-05 Jay Lan -+ -+ * Fixed a 'sysctl table check failed' problem. -+ * kdb v4.4-2.6.24-rc4-x86-1. -+ -+2007-11-26 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc3-x86-1. -+ -+2007-11-13 Jay Lan -+ -+ * Back ported "New KDB USB interface" from Aaron Young in -+ v4.4-2.6.23-{i386,x86_64}-2 to 2.6.24 kdb patchset. -+ * Fixed a make problem at arch/x86/Makefile_{32,64}. -+ * kdb v4.4-2.6.24-rc2-x86-2. -+ -+2007-11-12 Jay Lan -+ -+ * kdb v4.4-2.6.24-rc2-x86-1. -+ -+2007-11-09 Jay Lan -+ -+ * Rebase to 2.6.24-rc1 kernel -+ * - merged kdb-v4.4-2.6.23-i386-1 and kdb-v4.4-2.6.23-x86_64-1 -+ * into kdb-v4.4-2.6.24-rc1-x86-1 -+ * - Fields "done", "sglist_len", and "pid" are removed from -+ * struct scsi_cmnd. Thus, these fields are no longer displayed -+ * on "sc" command. -+ * kdb v4.4-2.6.24-rc1-x86-1. ---- /dev/null -+++ b/arch/x86/kdb/ChangeLog_32 -@@ -0,0 +1,865 @@ -+2007-11-08 Jay Lan -+ -+ * New KDB USB interface, Aaron Young -+ * 1. This patch allows KDB to work with any Host Contoller driver -+ * and call the correct HC driver poll routine (as long as the -+ * HC driver provides a .kdb_poll_char routine via it's -+ * associated hc_driver struct). -+ * 2. Hotplugged keyboards are now recognized by KDB. -+ * 3. Currently KDB can only make use of 1 USB type keyboard. -+ * New code can handle up to 8 attached keyboards - input is -+ * multiplexed from all of them while in kdb. -+ * kdb v4.4-2.6.23-common-2. -+ -+2007-10-24 Jay Lan -+ -+ * kdb v4.4-2.6.23-i386-1. -+ -+2007-09-26 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc8-i386-1. -+ -+2007-09-21 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc7-i386-1. -+ -+2007-09-12 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc6-i386-1. -+ -+2007-09-06 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc5-i386-1. -+ -+2007-08-30 Keith Owens -+ -+ * New i386/x86_64 backtrace requires that kdb_save_running() does not -+ exit until after kdb_main_loop() has completed. -+ * kdb v4.4-2.6.23-rc4-i386-2. -+ -+2007-08-30 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc4-i386-1. -+ -+2007-08-24 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc3-i386-1. -+ -+2007-08-07 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc2-i386-1. -+ -+2007-07-31 Keith Owens -+ -+ * Delete obsolete kdba_bt.c. -+ * kdb v4.4-2.6.23-rc1-i386-2. -+ -+2007-07-30 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc1-i386-1. -+ -+2007-07-26 Keith Owens -+ -+ * New x86 backtrace code. -+ * kdb v4.4-2.6.22-i386-2. -+ -+2007-07-09 Keith Owens -+ -+ * kdb v4.4-2.6.22-i386-1. -+ -+2007-07-02 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc7-i386-1. -+ -+2007-06-20 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc5-i386-1. -+ -+2007-06-08 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc4-i386-1. -+ -+2007-05-28 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc3-i386-1. -+ -+2007-05-22 Keith Owens -+ -+ * Register KDBENTER_VECTOR early on the boot cpu. -+ * kdb v4.4-2.6.22-rc2-i386-2. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc2-i386-1. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc1-i386-1. -+ -+2007-05-17 Keith Owens -+ -+ * Update dumpregs comments for rdmsr and wrmsr commands. -+ Bernardo Innocenti. -+ * kdb v4.4-2.6.21-i386-3. -+ -+2007-05-15 Keith Owens -+ -+ * Change kdba_late_init to kdba_arch_init so KDB_ENTER() can be used -+ earlier. -+ * kdb v4.4-2.6.21-i386-2. -+ -+2007-04-29 Keith Owens -+ -+ * kdb v4.4-2.6.21-i386-1. -+ -+2007-04-16 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc7-i386-1. -+ -+2007-04-10 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc6-i386-1. -+ -+2007-04-02 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc5-i386-1. -+ -+2007-03-19 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc4-i386-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc3-i386-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc2-i386-1. -+ -+2007-03-01 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc1-i386-1. -+ -+2007-03-01 Keith Owens -+ -+ * Remove sparse warnings. -+ * kdb v4.4-2.6.20-i386-3. -+ -+2007-02-16 Keith Owens -+ -+ * Initialise variable bits of struct disassemble_info each time. -+ * kdb v4.4-2.6.20-i386-2. -+ -+2007-02-06 Keith Owens -+ -+ * kdb v4.4-2.6.20-i386-1. -+ -+2007-02-01 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc7-i386-1. -+ -+2007-01-08 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc4-i386-1. -+ -+2007-01-02 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc3-i386-1. -+ -+2006-12-20 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc1-i386-1. -+ -+2006-11-30 Keith Owens -+ -+ * kdb v4.4-2.6.19-i386-1. -+ -+2006-11-27 Keith Owens -+ -+ * Only use VT keyboard if the command line allows it and ACPI indicates -+ that there is an i8042. -+ * kdb v4.4-2.6.19-rc6-i386-2. -+ -+2006-11-20 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc6-i386-1. -+ -+2006-11-09 Keith Owens -+ -+ * Change kdb() to fastcall. -+ * Add unwind info to kdb_call(). Steve Lord. -+ * Only use VT console if the command line allows it. -+ * kdb v4.4-2.6.19-rc5-i386-2. -+ -+2006-11-08 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc5-i386-1. -+ -+2006-11-01 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc4-i386-1. -+ -+2006-10-24 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc3-i386-1. -+ -+2006-10-24 Keith Owens -+ -+ * Remove redundant regs and envp parameters. -+ * kdb v4.4-2.6.19-rc2-i386-2. -+ -+2006-10-18 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc2-i386-1. -+ -+2006-10-11 Keith Owens -+ -+ * Move kdbm_x86.c from the i386 to the common KDB patch. -+ * Make the KDBENTER_VECTOR an interrupt gate instead of a trap gate, it -+ simplifies the code and disables interrupts on KDBENTER(). -+ * Exclude the KDBENTER_VECTOR from irq assignment. -+ * kdb v4.4-2.6.19-rc1-i386-2. -+ -+2006-10-09 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc1-i386-1. -+ -+2006-10-06 Keith Owens -+ -+ * Remove #include -+ * kdb v4.4-2.6.18-i386-2. -+ -+2006-09-20 Keith Owens -+ -+ * kdb v4.4-2.6.18-i386-1. -+ -+2006-09-15 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc7-i386-1. -+ -+2006-08-30 Keith Owens -+ -+ * Add warning for problems when following alternate stacks. -+ * kdb v4.4-2.6.18-rc5-i386-3. -+ -+2006-08-29 Keith Owens -+ -+ * Rewrite all backtrace code. -+ * kdb v4.4-2.6.18-rc5-i386-2. -+ -+2006-08-28 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc5-i386-1. -+ -+2006-08-08 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc4-i386-1. -+ -+2006-08-04 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc3-i386-1. -+ -+2006-07-18 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc2-i386-1. -+ -+2006-07-12 Keith Owens -+ -+ * Remove dead KDB_REASON codes. -+ * sparse cleanups. -+ * kdb v4.4-2.6.18-rc1-i386-2. -+ -+2006-07-07 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc1-i386-1. -+ -+2006-07-04 Keith Owens -+ -+ * Make KDB rendezvous on i386 a two stage approach. -+ * Clean up generation of KDB interrupt code. -+ * Move smp_kdb_stop() and smp_kdb_interrupt() to kdbasupport.c. -+ * Move setting of interrupt traps to kdbasupport.c. -+ * Remove KDB hooks from arch/i386/kernel smp.c, smpboot.c, i8259.c, -+ io_apic.c. -+ * Add KDB_REASON_CPU_UP support. -+ * Move per cpu setup to kdba_cpu_up(). -+ * Rework support for 4K stacks to make backtrace more accurate. -+ * Add BTSP option to get the full backtrace, including kdb routines. -+ * Delete kdba_enable_mce, architectures now do their own setup. -+ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr, -+ page_fault_mca. Only ever implemented on x86, difficult to maintain -+ and rarely used in the field. -+ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp. -+ * kdb v4.4-2.6.17-i386-2. -+ -+2006-06-19 Keith Owens -+ -+ * kdb v4.4-2.6.17-i386-1. -+ -+2006-05-25 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc5-i386-1. -+ -+2006-05-15 Keith Owens -+ -+ * Refresh bfd related files from binutils 2.16.91.0.2. -+ * kdb v4.4-2.6.17-rc4-i386-2. -+ -+2006-05-12 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc4-i386-1. -+ -+2006-04-28 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc3-i386-1. -+ -+2006-04-22 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc2-i386-1. -+ -+2006-04-11 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc1-i386-1. -+ -+2006-03-30 Keith Owens -+ -+ * Change CONFIG_LKCD to CONFIG_LKCD_DUMP. -+ * kdb v4.4-2.6.16-i386-3. -+ -+2006-03-24 Keith Owens -+ -+ * Define a dummy kdba_wait_for_cpus(). -+ * kdb v4.4-2.6.16-i386-2. -+ -+2006-03-21 Keith Owens -+ -+ * kdb v4.4-2.6.16-i386-1. -+ -+2006-03-14 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc6-i386-1. -+ -+2006-02-28 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc5-i386-1. -+ -+2006-02-20 Nathan Scott -+ -+ * kdb v4.4-2.6.16-rc4-i386-1. -+ -+2006-02-06 Keith Owens -+ -+ * Change CONFIG_CRASH_DUMP to CONFIG_LKCD. -+ * kdb v4.4-2.6.16-rc2-i386-2. -+ -+2006-02-06 Keith Owens -+ -+ * kdb v4.4-2.6.16-rc2-i386-1. -+ -+2006-01-18 Keith Owens -+ -+ * kdb v4.4-2.6.16-rc1-i386-1. -+ -+2006-01-08 Keith Owens -+ -+ * Add DIE_KDEBUG_ENTER and DIE_KDEBUG_LEAVE to notify_die. -+ * kdb v4.4-2.6.15-i386-2. -+ -+2006-01-04 Keith Owens -+ -+ * Remove some inlines and the last vestige of CONFIG_NUMA_REPLICATE. -+ * Read the keyboard acknowledgment after sending a character. SuSE -+ Bugzilla 60240. -+ * kdb v4.4-2.6.15-i386-1. -+ -+2005-12-25 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc7-i386-1. -+ -+2005-12-20 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc6-i386-1. -+ -+2005-12-05 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc5-i386-1. -+ -+2005-12-02 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc4-i386-1. -+ -+2005-11-30 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc3-i386-1. -+ -+2005-11-21 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc2-i386-1. -+ -+2005-11-15 Keith Owens -+ -+ * kdb v4.4-2.6.15-rc1-i386-1. -+ -+2005-10-28 Keith Owens -+ -+ * kdb v4.4-2.6.14-i386-1. -+ -+2005-10-21 Keith Owens -+ -+ * kdb v4.4-2.6.14-rc5-i386-1. -+ -+2005-10-11 Keith Owens -+ -+ * kdb v4.4-2.6.14-rc4-i386-1. -+ -+2005-10-04 Keith Owens -+ -+ * kdb v4.4-2.6.14-rc3-i386-1. -+ -+2005-09-21 Keith Owens -+ -+ * Support kdb_current_task in register display and modify commands. -+ * kdb v4.4-2.6.14-rc2-i386-1. -+ -+2005-09-20 Keith Owens -+ -+ * Remove use of __STDC_VERSION__ in ansidecl.h. -+ * kdb v4.4-2.6.14-rc1-i386-1. -+ -+2005-08-29 Keith Owens -+ -+ * kdb v4.4-2.6.13-i386-1. -+ -+2005-08-24 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc7-i386-1. -+ -+2005-08-08 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc6-i386-1. -+ -+2005-08-02 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc5-i386-1. -+ -+2005-07-30 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc4-i386-1. -+ -+2005-07-22 Keith Owens -+ -+ * Compile fix for kprobes. -+ * kdb v4.4-2.6.13-rc3-i386-2. -+ -+2005-07-19 Keith Owens -+ -+ * Add support for USB keyboard (OHCI only). Aaron Young, SGI. -+ * kdb v4.4-2.6.13-rc3-i386-1. -+ -+2005-07-08 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc2-i386-1. -+ -+2005-07-01 Keith Owens -+ -+ * kdb v4.4-2.6.13-rc1-i386-1. -+ -+2005-06-19 Keith Owens -+ -+ * gcc 4 compile fix, remove extern kdb_hardbreaks. Steve Lord. -+ * kdb v4.4-2.6.12-i386-2. -+ -+2005-06-18 Keith Owens -+ -+ * kdb v4.4-2.6.12-i386-1. -+ -+2005-06-08 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc6-i386-1. -+ -+2005-05-25 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc5-i386-1. -+ -+2005-05-08 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc4-i386-1. -+ -+2005-04-21 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc3-i386-1. -+ -+2005-04-06 Keith Owens -+ -+ * kdb v4.4-2.6.12-rc2-i386-1. -+ -+2005-03-29 Keith Owens -+ -+ * Replace __copy_to_user with __copy_to_user_inatomic. -+ * kdb v4.4-2.6.12-rc1-i386-1. -+ -+2005-03-08 Keith Owens -+ -+ * Coexistence patches for lkcd. -+ * kdb v4.4-2.6.11-i386-2. -+ -+2005-03-03 Keith Owens -+ -+ * kdb v4.4-2.6.11-i386-1. -+ -+2005-02-14 Keith Owens -+ -+ * kdb v4.4-2.6.11-rc4-i386-1. -+ -+2005-02-08 Keith Owens -+ -+ * kdb v4.4-2.6.11-rc3-bk4-i386-1. -+ -+2005-02-03 Keith Owens -+ -+ * kdb v4.4-2.6.11-rc3-i386-1. -+ -+2005-01-27 Keith Owens -+ -+ * kdb v4.4-2.6.11-rc2-i386-1. -+ -+2005-01-12 Keith Owens -+ -+ * kdb v4.4-2.6.11-rc1-i386-1. -+ -+2004-12-25 Keith Owens -+ -+ * kdb v4.4-2.6.10-i386-1. -+ -+2004-12-07 Keith Owens -+ -+ * kdb v4.4-2.6.10-rc3-i386-1. -+ -+2004-11-23 Keith Owens -+ -+ * Coexist with asmlinkage/fastcall changes. -+ * kdb v4.4-2.6.10-rc2-i386-1. -+ -+2004-10-29 Keith Owens -+ -+ * Handle change defintions for hard and soft irq context. -+ * Make stack switch in kdb backtrace look more like the oops output. -+ * kdb v4.4-2.6.10-rc1-i386-1. -+ -+2004-10-19 Keith Owens -+ -+ * kdb v4.4-2.6.9-i386-1. -+ -+2004-10-12 Keith Owens -+ -+ * kdb v4.4-2.6.9-rc4-i386-1. -+ -+2004-10-01 Keith Owens -+ -+ * kdb v4.4-2.6.9-rc3-i386-1. -+ -+2004-09-30 Keith Owens -+ -+ * Add stackdepth command. -+ * Handle backtrace with separate soft and hard irq stacks -+ (CONFIG_4KSTACKS). -+ * Work around RESTORE_ALL macro, which can only be used once. -+ * Export kdba_dumpregs. Bryan Cardillo, UPenn. -+ * kdb v4.4-2.6.9-rc2-i386-2. -+ -+2004-09-14 Keith Owens -+ -+ * kdb v4.4-2.6.9-rc2-i386-1. -+ -+2004-08-27 Keith Owens -+ -+ * kdb v4.4-2.6.9-rc1-i386-1. -+ -+2004-08-14 Keith Owens -+ -+ * kdb v4.4-2.6.8-i386-1. -+ -+2004-08-12 Keith Owens -+ -+ * kdb v4.4-2.6.8-rc4-i386-1. -+ -+2004-08-04 Keith Owens -+ -+ * kdb v4.4-2.6.8-rc3-i386-1. -+ -+2004-07-18 Keith Owens -+ -+ * kdb v4.4-2.6.8-rc2-i386-1. -+ -+2004-07-12 Keith Owens -+ -+ * kdb v4.4-2.6.8-rc1-i386-1. -+ -+2004-06-16 Keith Owens -+ -+ * kdb v4.4-2.6.7-i386-1. -+ -+2004-06-10 Keith Owens -+ -+ * kdb v4.4-2.6.7-rc3-i386-1. -+ -+2004-06-09 Keith Owens -+ -+ * Namespace clean up. Mark code/variables as static when it is only -+ used in one file, delete dead code/variables. -+ * kdb v4.4-2.6.7-rc2-i386-3. -+ -+2004-06-08 Keith Owens -+ -+ * Whitespace clean up, no code changes. -+ * kdb v4.4-2.6.7-rc2-i386-2. -+ -+2004-06-07 Keith Owens -+ -+ * Force KALLSYMS and KALLSYMS_ALL for CONFIG_KDB. -+ * kdb v4.4-2.6.7-rc2-i386-1. -+ -+2004-06-06 Keith Owens -+ -+ * Correct Kconfig help text. -+ * Coexist with CONFIG_REGPARM. -+ * Add standard archkdb commands. -+ * Move kdb_{get,put}userarea_size definitions to linux/kdb.h. -+ * kdb v4.4-2.6.6-i386-2. -+ -+2004-05-23 Keith Owens -+ -+ * Move bfd.h and ansidecl.h from arch/$(ARCH)/kdb to include/asm-$(ARCH). -+ * Update copyright notices. -+ * kdb v4.4-2.6.6-i386-1. -+ -+2004-05-10 Keith Owens -+ -+ * kdb v4.3-2.6.6-i386-1. -+ -+2004-05-06 Keith Owens -+ -+ * kdb v4.3-2.6.6-rc3-i386-1. -+ -+2004-05-06 Keith Owens -+ -+ * kdb v4.3-2.6.6-rc2-i386-1. -+ -+2004-04-30 Keith Owens -+ -+ * kdb v4.3-2.6.6-rc1-i386-1. -+ -+2004-04-05 Keith Owens -+ -+ * kdb v4.3-2.6-5-i386-1. -+ -+2004-02-29 Keith Owens -+ -+ * kdb v4.3-2.6-4-rc1-i386-1. -+ -+2004-02-18 Keith Owens -+ -+ * kdb v4.3-2.6-3-i386-1. -+ -+2004-02-17 Keith Owens -+ -+ * Pick up changes from Jim Houston for 2.6. -+ * Sync with kdb v4.3-2.4.25-rc1-i386-1. -+ * Adjust for LDT changes in i386 mainline. -+ * Convert longjmp buffers from static to dynamic allocation, for large -+ cpu counts. -+ * Do not use USB keyboard if it has not been probed. -+ * Do not print section data, 2.6 kallsyms does not support sections :(. -+ * kdb v4.3-2.6-3-rc3-i386-1. -+ -+2003-08-29 Keith Owens -+ -+ * kdb v4.3-2.4.22-i386-1. -+ -+2003-08-05 Keith Owens -+ -+ * Remove duplicate setting of trap for machine_check. -+ * Only reset keyboard when CONFIG_VT_CONSOLE is defined. -+ -+2003-07-27 Keith Owens -+ -+ * kdb v4.3-2.4.22-pre8-i386-5. -+ -+2003-07-20 Keith Owens -+ -+ * Remove compile warning on x86 commands. -+ * kdb v4.3-2.4.21-i386-5. -+ -+2003-07-08 Keith Owens -+ -+ * Add new x86 commands - rdv, gdt, idt, ldt, ldtp, ptex. -+ Vamsi Krishna S., IBM. -+ * kdb v4.3-2.4.21-i386-4. -+ -+2003-07-01 Keith Owens -+ -+ * Convert kdba_find_return() to two passes to reduce false positives. -+ * Correct jmp disp8 offset calculation for out of line lock code. -+ * Use NMI for kdb IPI in clustered APIC mode. Sachin Sant, IBM. -+ * kdb v4.3-2.4.21-i386-3. -+ -+2003-06-23 Keith Owens -+ -+ * Sync with XFS 2.4.21 tree. -+ * kdb v4.3-2.4.21-i386-2. -+ -+2003-06-20 Keith Owens -+ -+ * kdb v4.3-2.4.21-i386-1. -+ -+2003-06-20 Keith Owens -+ -+ * Add CONFIG_KDB_CONTINUE_CATASTROPHIC. -+ * Correct KDB_ENTER() definition. -+ * kdb v4.3-2.4.20-i386-1. -+ -+2003-05-02 Keith Owens -+ -+ * Add kdba_fp_value(). -+ * Limit backtrace size to catch loops. -+ * Add read/write access to user pages. Vamsi Krishna S., IBM -+ * Clean up USB keyboard support. Steven Dake. -+ * kdb v4.2-2.4.20-i386-1. -+ -+2003-04-04 Keith Owens -+ -+ * Workarounds for scheduler bugs. -+ * kdb v4.1-2.4.20-i386-1. -+ -+2003-03-16 Keith Owens -+ -+ * Each cpu saves its state as it enters kdb or before it enters code -+ which cannot call kdb, converting kdb from a pull to a push model. -+ * Clean up kdb interaction with CONFIG_SERIAL_CONSOLE. -+ * Removal of special cases for i386 backtrace from common code -+ simplifies the architecture code. -+ * Add command to dump i386 struct pt_regs. -+ * kdb v4.0-2.4.20-i386-1. -+ -+2003-02-03 Keith Owens -+ -+ * Register kdb commands early. -+ * Handle KDB_ENTER() when kdb=off. -+ * Optimize __kdba_getarea_size when width is a constant. -+ * Decode oops via kallsyms if it is available. -+ * Update copyright notices to 2003. -+ * Handle call *disp32(%reg) in backtrace. -+ * Correct keyboard freeze. Ashish Kalra. -+ * Add command history and editing. Sonic Zhang. -+ * kdb_toggleled is conditional on KDB_BLINK_LED. Bernhard Fischer. -+ * Allow tab on serial line for symbol completion. -+ * Ignore KDB_ENTER() when kdb is already running. -+ * kdb v3.0-2.4.20-i386-1. -+ -+2002-11-29 Keith Owens -+ -+ * Upgrade to 2.4.20. -+ * kdb v2.5-2.4.20-i386-1. -+ -+2002-11-14 Keith Owens -+ -+ * Upgrade to 2.4.20-rc1. -+ * kdb v2.5-2.4.20-rc1-i386-1. -+ -+2002-11-14 Keith Owens -+ -+ * General clean up of handling for breakpoints and single stepping over -+ software breakpoints. -+ * Accept ff 1x as well as ff dx for call *(%reg) in backtrace. -+ * kdb v2.5-2.4.19-i386-1. -+ -+2002-11-01 Keith Owens -+ -+ * Prevent SMP IRQ overwriting KDB_ENTER(). -+ * kdb v2.4-2.4.19-i386-2. -+ -+2002-10-31 Keith Owens -+ -+ * Avoid KDB_VECTOR conflict with DUMP_VECTOR. -+ * Remove kdb_eframe_t. -+ * Sanity check if we have pt_regs. -+ * Remove kdba_getcurrentframe(). -+ * Reinstate missing nmi_watchdog/kdb hook. -+ * kdb v2.4-2.4.19-i386-1. -+ -+2002-10-17 Keith Owens -+ -+ * Correct compile with CONFIG_VT_CONSOLE=n. -+ * kdb v2.3-2.4.19-i386-5. -+ -+2002-10-04 Keith Owens -+ -+ * Add USB keyboard option. -+ * Minimize differences between patches for 2.4 and 2.5 kernels. -+ * kdb v2.3-2.4.19-i386-4. -+ -+2002-08-10 Keith Owens -+ -+ * Replace kdb_port with kdb_serial to support memory mapped I/O. -+ Note: This needs kdb v2.3-2.4.19-common-2 or later. -+ * kdb v2.3-2.4.19-i386-3. -+ -+2002-08-09 Keith Owens -+ -+ * Use -fno-optimize-sibling-calls for kdb if gcc supports it. -+ * .text.lock does not consume an activation frame. -+ * kdb v2.3-2.4.19-i386-2. -+ -+2002-08-07 Keith Owens -+ -+ * Upgrade to 2.4.19. -+ * Remove individual SGI copyrights, the general SGI copyright applies. -+ * New .text.lock name. Hugh Dickins. -+ * Set KERNEL_CS in kdba_getcurrentframe. Hugh Dickins. -+ * Clean up disassembly layout. Hugh Dickins, Keith Owens. -+ * Replace hard coded stack size with THREAD_SIZE. Hugh Dickins. -+ * Better stack layout on bt with no frame pointers. Hugh Dickins. -+ * Make i386 IO breakpoints (bpha
IO) work again. -+ Martin Wilck, Keith Owens. -+ * Remove fixed KDB_MAX_COMMANDS size. -+ * Add set_fs() around __copy_to_user on kernel addresses. -+ Randolph Chung. -+ * Position i386 for CONFIG_NUMA_REPLICATE. -+ * kdb v2.3-2.4.19-i386-1. -+ -+2002-07-09 Keith Owens -+ -+ * Upgrade to 2.4.19-rc1. -+ -+2002-06-14 Keith Owens -+ -+ * Upgrade to 2.4.19-pre10. -+ * kdb v2.1-2.4.19-pre10-i386-1. -+ -+2002-04-09 Keith Owens -+ -+ * Upgrade to 2.4.19-pre6. -+ * kdb v2.1-2.4.19-pre6-i386-1. -+ -+2002-02-26 Keith Owens -+ -+ * Upgrade to 2.4.18. -+ * kdb v2.1-2.4.18-i386-1. -+ -+2002-01-18 Keith Owens -+ -+ * Use new kdb_get/put functions. -+ * Define kdba_{get,put}area_size functions for i386. -+ * Remove over-engineered dblist callback functions. -+ * Correctly handle failing call disp32 in backtrace. -+ * Remove bp_instvalid flag, redundant code. -+ * Remove dead code. -+ * kdb v2.1-2.4.17-i386-1. -+ -+2002-01-04 Keith Owens -+ -+ * Sync xfs <-> kdb i386 code. -+ -+2001-12-22 Keith Owens -+ -+ * Split kdb for i386 as kdb v2.0-2.4.17-i386-1. ---- /dev/null -+++ b/arch/x86/kdb/ChangeLog_64 -@@ -0,0 +1,447 @@ -+2007-11-08 Jay Lan -+ -+ * New KDB USB interface, Aaron Young -+ * 1. This patch allows KDB to work with any Host Contoller driver -+ * and call the correct HC driver poll routine (as long as the -+ * HC driver provides a .kdb_poll_char routine via it's -+ * associated hc_driver struct). -+ * 2. Hotplugged keyboards are now recognized by KDB. -+ * 3. Currently KDB can only make use of 1 USB type keyboard. -+ * New code can handle up to 8 attached keyboards - input is -+ * multiplexed from all of them while in kdb. -+ * kdb v4.4-2.6.23-common-2. -+ -+2007-10-24 Jay Lan -+ -+ * kdb v4.4-2.6.23-x86_64-1. -+ -+2007-09-26 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc8-x86_64-1. -+ -+2007-09-21 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc7-x86_64-1. -+ -+2007-09-12 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc6-x86_64-1. -+ -+2007-09-06 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc5-x86_64-1. -+ -+2007-08-30 Keith Owens -+ -+ * New i386/x86_64 backtrace requires that kdb_save_running() does not -+ exit until after kdb_main_loop() has completed. -+ * kdb v4.4-2.6.23-rc4-x86_64-2. -+ -+2007-08-30 Jay Lan -+ -+ * kdb v4.4-2.6.23-rc4-x86_64-1. -+ -+2007-08-24 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc3-x86_64-1. -+ -+2007-08-07 Jay Lan -+ -+ * v4.4-2.6.23-rc2-x86_64-1. -+ -+2007-07-31 Keith Owens -+ -+ * Delete obsolete kdba_bt.c. -+ * kdb v4.4-2.6.23-rc1-x86_64-2. -+ -+2007-07-30 Keith Owens -+ -+ * kdb v4.4-2.6.23-rc1-x86_64-1. -+ -+2007-07-26 Keith Owens -+ -+ * New x86 backtrace code. -+ * kdb v4.4-2.6.22-x86_64-2. -+ -+2007-07-09 Keith Owens -+ -+ * kdb v4.4-2.6.22-x86_64-1. -+ -+2007-07-02 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc7-x86_64-1. -+ -+2007-06-25 Keith Owens -+ -+ * Hook into DIE_NMIWATCHDOG. -+ * kdb v4.4-2.6.22-rc5-x86_64-2. -+ -+2007-06-20 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc5-x86_64-1. -+ -+2007-06-08 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc4-x86_64-1. -+ -+2007-05-28 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc3-x86_64-1. -+ -+2007-05-22 Keith Owens -+ -+ * Register KDBENTER_VECTOR early on the boot cpu. -+ * kdb v4.4-2.6.22-rc2-x86_64-2. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc2-x86_64-1. -+ -+2007-05-22 Keith Owens -+ -+ * kdb v4.4-2.6.22-rc1-x86_64-1. -+ -+2007-05-17 Keith Owens -+ -+ * Update dumpregs comments for rdmsr and wrmsr commands. -+ Bernardo Innocenti. -+ * kdb v4.4-2.6.21-x86_64-3. -+ -+2007-05-15 Keith Owens -+ -+ * Change kdba_late_init to kdba_arch_init so KDB_ENTER() can be used -+ earlier. -+ * kdb v4.4-2.6.21-x86_64-2. -+ -+2007-04-29 Keith Owens -+ -+ * kdb v4.4-2.6.21-x86_64-1. -+ -+2007-04-16 Keith Owens -+ -+ * Select KALLSYMS and KALLSYMS_ALL when KDB is selected. -+ * kdb v4.4-2.6.21-rc7-x86_64-2. -+ -+2007-04-16 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc7-x86_64-1. -+ -+2007-04-10 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc6-x86_64-1. -+ -+2007-04-02 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc5-x86_64-1. -+ -+2007-03-19 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc4-x86_64-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc3-x86_64-1. -+ -+2007-03-14 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc2-x86_64-1. -+ -+2007-03-01 Keith Owens -+ -+ * kdb v4.4-2.6.21-rc1-x86_64-1. -+ -+2007-03-01 Keith Owens -+ -+ * Remove sparse warnings. -+ * kdb v4.4-2.6.20-x86_64-3. -+ -+2007-02-16 Keith Owens -+ -+ * Initialise variable bits of struct disassemble_info each time. -+ * kdb v4.4-2.6.20-x86_64-2. -+ -+2007-02-06 Keith Owens -+ -+ * kdb v4.4-2.6.20-x86_64-1. -+ -+2007-02-01 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc7-x86_64-1. -+ -+2007-01-10 Keith Owens -+ -+ * Correct setjmp for the FRAME_POINTER=y case. -+ * Remove duplicate longjmp code for FRAME_POINTER=n/y. -+ * kdb v4.4-2.6.20-rc4-x86_64-2. -+ -+2007-01-08 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc4-x86_64-1. -+ -+2007-01-02 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc3-x86_64-1. -+ -+2006-12-20 Keith Owens -+ -+ * kdb v4.4-2.6.20-rc1-x86_64-1. -+ -+2006-12-07 Keith Owens -+ -+ * Export kdba_dumpregs. -+ * kdb v4.4-2.6.19-x86_64-2. -+ -+2006-11-30 Keith Owens -+ -+ * kdb v4.4-2.6.19-x86_64-1. -+ -+2006-11-27 Keith Owens -+ -+ * Only use VT keyboard if the command line allows it and ACPI indicates -+ that there is an i8042. -+ * kdb v4.4-2.6.19-rc6-x86_64-2. -+ -+2006-11-20 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc6-x86_64-1. -+ -+2006-11-09 Keith Owens -+ -+ * Only use VT console if the command line allows it. -+ * kdb v4.4-2.6.19-rc5-x86_64-2. -+ -+2006-11-08 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc5-x86_64-1. -+ -+2006-11-01 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc4-x86_64-1. -+ -+2006-10-24 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc3-x86_64-1. -+ -+2006-10-24 Keith Owens -+ -+ * Remove redundant regs and envp parameters. -+ * kdb v4.4-2.6.19-rc2-x86_64-2. -+ -+2006-10-18 Keith Owens -+ -+ * kdb v4.4-2.6.19-rc2-x86_64-1. -+ -+2006-10-11 Keith Owens -+ -+ * Make the KDBENTER_VECTOR an interrupt gate instead of a trap gate, it -+ simplifies the code and disables interrupts on KDB_ENTER(). -+ * Exclude the KDBENTER_VECTOR from irq assignment. -+ * Enable KDB_ENTER() again. -+ * kdb v4.4-2.6.19-rc1-x86_64-2. -+ -+2006-10-09 Keith Owens -+ -+ * KDB_ENTER() is getting spurious activations on some x86_64 hardware. -+ Deactivate KDB_ENTER() until it is fixed. -+ * kdb v4.4-2.6.19-rc1-x86_64-1. -+ -+2006-10-06 Keith Owens -+ -+ * Remove #include -+ * kdb v4.4-2.6.18-x86_64-2. -+ -+2006-09-20 Keith Owens -+ -+ * kdb v4.4-2.6.18-x86_64-1. -+ -+2006-09-15 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc7-x86_64-1. -+ -+2006-08-30 Keith Owens -+ -+ * Do not print debugstackptr in cpu_pda, it will be deleted soon. -+ * Add KDB_ENTER(). -+ * Add warning for problems when following alternate stacks. -+ * kdb v4.4-2.6.18-rc5-x86_64-3. -+ -+2006-08-29 Keith Owens -+ -+ * Rewrite all backtrace code. -+ * Add pt_regs and cpu_pda commands. -+ * Include patch to define orig_ist, to be removed once that patch is in -+ the community tree. -+ * kdb v4.4-2.6.18-rc5-x86_64-2. -+ -+2006-08-28 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc5-x86_64-1. -+ -+2006-08-08 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc4-x86_64-1. -+ -+2006-08-04 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc3-x86_64-1. -+ -+2006-07-18 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc2-x86_64-1. -+ -+2006-07-12 Keith Owens -+ -+ * sparse cleanups -+ * kdb v4.4-2.6.18-rc1-x86_64-2. -+ -+2006-07-07 Keith Owens -+ -+ * kdb v4.4-2.6.18-rc1-x86_64-1. -+ -+2006-07-04 Keith Owens -+ -+ * Make KDB rendezvous on x86_64 a two stage approach. -+ * Move smp_kdb_stop() and smp_kdb_interrupt() to kdbasupport.c. -+ * Move setting of interrupt traps to kdbasupport.c. -+ * Add KDB_REASON_CPU_UP support. -+ * Move per cpu setup to kdba_cpu_up(). -+ * Delete kdba_enable_mce, architectures now do their own setup. -+ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr, -+ page_fault_mca. Only ever implemented on x86, difficult to maintain -+ and rarely used in the field. -+ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp. -+ * kdb v4.4-2.6.17-x86_64-2. -+ -+2006-06-19 Keith Owens -+ -+ * kdb v4.4-2.6.17-x86_64-1. -+ -+2006-05-31 Keith Owens -+ -+ * Define arch/x86_64/kdb/kdb_cmds. -+ * kdb v4.4-2.6.17-rc5-x86_64-2. -+ -+2006-05-25 Keith Owens -+ -+ * kdb v4.4-2.6.17-rc5-x86_64-1. -+ -+2006-05-15 Keith Owens -+ -+ * Refresh bfd related files from binutils 2.16.91.0.2. -+ * kdb v4.4-2.6.17-rc4-x86_64-2. -+ -+2006-05-12 Keith Owens -+ -+ * kdb v4.4-2.6-17-rc4-x86_64-1. -+ -+2006-04-22 Keith Owens -+ -+ * kdb v4.4-2.6-17-rc2-x86_64-1. -+ -+2006-04-13 Keith Owens -+ -+ * Remove trailing white space. -+ * kdb v4.4-2.6-17-rc1-x86_64-1. -+ -+2006-03-25 Jack F. Vogel -+ * Sync with Keith's changes for 2.6.16 -+ * code from Andi Kleen to support above -+ -+2005-09-30 Jack F. Vogel -+ * Port to 2.6.14-rc2 -+ * sync with a couple changes from Keith -+ * Add backtrace code from Jim Houston -+ (thanks Jim) -+ -+2005-08-31 Jack F. Vogel -+ * Change to linker script for kexec -+ thanks to Steven Dake -+ -+2005-08-30 Jack F. Vogel -+ * Notify struct should not be devinit -+ thanks IWAMOTO Toshihiro -+ -+2005-08-25 Jack F. Vogel -+ * Update to 2.6.11 -+ * Fix to synchronize with the notify changes -+ thanks to Jim Houston. -+ -+2004-09-30 Keith Owens -+ * Port to 2.6.9-rc2 -+ * Fix line editting characters. Jim Houston, Comcast. -+ * kdb v4.4-2.6.9-rc2-x86-64-1. -+ -+2004-08-15 Jack F. Vogel -+ * Port to 2.6.8 -+ * tighten up the code, using the built-in -+ die_chain notify interface, thanks to -+ Andi Kleen for pointing this out. -+ -+2004-05-15 Jack F. Vogel -+ * port to 2.6.6 for x86_64 -+ -+2003-12-15 Cliff Neighbors -+ * initial port from i386 to x86_64 -+ -+2002-08-10 Keith Owens -+ -+ * Replace kdb_port with kdb_serial to support memory mapped I/O. -+ Note: This needs kdb v2.3-2.4.19-common-2 or later. -+ * kdb v2.3-2.4.19-i386-3. -+ -+2002-08-09 Keith Owens -+ -+ * Use -fno-optimize-sibling-calls for kdb if gcc supports it. -+ * .text.lock does not consume an activation frame. -+ * kdb v2.3-2.4.19-i386-2. -+ -+2002-08-07 Keith Owens -+ -+ * Upgrade to 2.4.19. -+ * Remove individual SGI copyrights, the general SGI copyright applies. -+ * New .text.lock name. Hugh Dickins. -+ * Set KERNEL_CS in kdba_getcurrentframe. Hugh Dickins. -+ * Clean up disassembly layout. Hugh Dickins, Keith Owens. -+ * Replace hard coded stack size with THREAD_SIZE. Hugh Dickins. -+ * Better stack layout on bt with no frame pointers. Hugh Dickins. -+ * Make i386 IO breakpoints (bpha
IO) work again. -+ Martin Wilck, Keith Owens. -+ * Remove fixed KDB_MAX_COMMANDS size. -+ * Add set_fs() around __copy_to_user on kernel addresses. -+ Randolph Chung. -+ * Position i386 for CONFIG_NUMA_REPLICATE. -+ * kdb v2.3-2.4.19-i386-1. -+ -+2002-07-09 Keith Owens -+ -+ * Upgrade to 2.4.19-rc1. -+ -+2002-06-14 Keith Owens -+ -+ * Upgrade to 2.4.19-pre10. -+ * kdb v2.1-2.4.19-pre10-i386-1. -+ -+2002-04-09 Keith Owens -+ -+ * Upgrade to 2.4.19-pre6. -+ * kdb v2.1-2.4.19-pre6-i386-1. -+ -+2002-02-26 Keith Owens -+ -+ * Upgrade to 2.4.18. -+ * kdb v2.1-2.4.18-i386-1. -+ -+2002-01-18 Keith Owens -+ -+ * Use new kdb_get/put functions. -+ * Define kdba_{get,put}area_size functions for i386. -+ * Remove over-engineered dblist callback functions. -+ * Correctly handle failing call disp32 in backtrace. -+ * Remove bp_instvalid flag, redundant code. -+ * Remove dead code. -+ * kdb v2.1-2.4.17-i386-1. -+ -+2002-01-04 Keith Owens -+ -+ * Sync xfs <-> kdb i386 code. -+ ---- /dev/null -+++ b/arch/x86/kdb/Makefile -@@ -0,0 +1,29 @@ -+# -+# This file is subject to the terms and conditions of the GNU General Public -+# License. See the file "COPYING" in the main directory of this archive -+# for more details. -+# -+# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+# -+ -+obj-$(CONFIG_KDB) += kdba_bp.o x86-dis.o kdba_bt.o \ -+ kdba_io.o kdba_id.o kdba_support.o -+ -+ifneq (,$(findstring -fno-optimize-sibling-calls,$(KBUILD_CFLAGS))) -+ CFLAGS_kdba_bt.o += -DNO_SIBLINGS -+endif -+ -+REGPARM := $(subst -mregparm=,,$(filter -mregparm=%,$(KBUILD_CFLAGS))) -+ifeq (,$(REGPARM)) -+ifeq ($(CONFIG_X86_32),y) -+ REGPARM := 3 -+else -+ REGPARM := 6 -+endif -+endif -+ -+CFLAGS_kdba_bt.o += -DREGPARM=$(REGPARM) -DCCVERSION="$(CCVERSION)" -+ -+override CFLAGS := $(CFLAGS:%-pg=% ) -+ -+CFLAGS_kdba_io.o += -I $(TOPDIR)/arch/$(SRCARCH)/kdb ---- /dev/null -+++ b/arch/x86/kdb/kdb_cmds_32 -@@ -0,0 +1,17 @@ -+# Standard architecture specific commands for kdb. -+# These commands are appended to those in kdb/kdb_cmds, see that file for -+# restrictions. -+ -+# Standard debugging information for first level support, invoked from archkdb* -+# commands that are defined in kdb/kdb_cmds. -+ -+defcmd archkdbcommon "" "Common arch debugging" -+ set LINES 2000000 -+ set BTAPROMPT 0 -+ -summary -+ -id %eip-24 -+ -cpu -+ -ps -+ -dmesg 600 -+ -bt -+endefcmd ---- /dev/null -+++ b/arch/x86/kdb/kdb_cmds_64 -@@ -0,0 +1,18 @@ -+# Standard architecture specific commands for kdb. -+# These commands are appended to those in kdb/kdb_cmds, see that file for -+# restrictions. -+ -+# Standard debugging information for first level support, invoked from archkdb* -+# commands that are defined in kdb/kdb_cmds. -+ -+defcmd archkdbcommon "" "Common arch debugging" -+ set LINES 2000000 -+ set BTAPROMPT 0 -+ -summary -+ -id %rip-24 -+ -cpu -+ -ps -+ -dmesg 600 -+ -bt -+ -cpu_pda * -+endefcmd ---- /dev/null -+++ b/arch/x86/kdb/kdba_bp.c -@@ -0,0 +1,914 @@ -+/* -+ * Kernel Debugger Architecture Dependent Breakpoint Handling -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+static char *kdba_rwtypes[] = { "Instruction(Register)", "Data Write", -+ "I/O", "Data Access"}; -+ -+/* -+ * Table describing processor architecture hardware -+ * breakpoint registers for every CPU. -+ */ -+ -+static kdbhard_bp_t kdb_hardbreaks[NR_CPUS][KDB_MAXHARDBPT]; -+ -+/* -+ * kdba_db_trap -+ * -+ * Perform breakpoint processing upon entry to the -+ * processor debugger fault. Determine and print -+ * the active breakpoint. -+ * -+ * Parameters: -+ * regs Exception frame containing machine register state -+ * error Error number passed to kdb. -+ * Outputs: -+ * None. -+ * Returns: -+ * KDB_DB_BPT Standard instruction or data breakpoint encountered -+ * KDB_DB_SS Single Step fault ('ss' command or end of 'ssb' command) -+ * KDB_DB_SSB Single Step fault, caller should continue ('ssb' command) -+ * KDB_DB_SSBPT Single step over breakpoint -+ * KDB_DB_NOBPT No existing kdb breakpoint matches this debug exception -+ * Locking: -+ * None. -+ * Remarks: -+ * Yup, there be goto's here. -+ * -+ * If multiple processors receive debug exceptions simultaneously, -+ * one may be waiting at the kdb fence in kdb() while the user -+ * issues a 'bc' command to clear the breakpoint the processor -+ * which is waiting has already encountered. If this is the case, -+ * the debug registers will no longer match any entry in the -+ * breakpoint table, and we'll return the value KDB_DB_NOBPT. -+ * This can cause a panic in die_if_kernel(). It is safer to -+ * disable the breakpoint (bd), go until all processors are past -+ * the breakpoint then clear the breakpoint (bc). This code -+ * recognises a breakpoint even when disabled but not when it has -+ * been cleared. -+ * -+ * WARNING: This routine clears the debug state. It should be called -+ * once per debug and the result cached. -+ */ -+ -+kdb_dbtrap_t -+kdba_db_trap(struct pt_regs *regs, int error_unused) -+{ -+ kdb_machreg_t dr6; -+ kdb_machreg_t dr7; -+ int rw, reg; -+ int i; -+ kdb_dbtrap_t rv = KDB_DB_BPT; -+ kdb_bp_t *bp; -+ int cpu = smp_processor_id(); -+ -+ if (KDB_NULL_REGS(regs)) -+ return KDB_DB_NOBPT; -+ -+ dr6 = kdba_getdr6(); -+ dr7 = kdba_getdr7(); -+ -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdb: dr6 0x%lx dr7 0x%lx\n", dr6, dr7); -+ if (dr6 & DR6_BS) { -+ if (KDB_STATE(SSBPT)) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("ssbpt\n"); -+ KDB_STATE_CLEAR(SSBPT); -+ for(i=0,bp=kdb_breakpoints; -+ i < KDB_MAXBPT; -+ i++, bp++) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("bp 0x%p enabled %d delayed %d global %d cpu %d\n", -+ bp, bp->bp_enabled, bp->bp_delayed, bp->bp_global, bp->bp_cpu); -+ if (!bp->bp_enabled) -+ continue; -+ if (!bp->bp_global && bp->bp_cpu != smp_processor_id()) -+ continue; -+ if (KDB_DEBUG(BP)) -+ kdb_printf("bp for this cpu\n"); -+ if (bp->bp_delayed) { -+ bp->bp_delayed = 0; -+ if (KDB_DEBUG(BP)){ -+ /* Can't be hw breakpoint */ -+ if (bp->bp_hardtype) -+ kdb_printf("kdb: Error - hw bp delayed\n"); -+ kdb_printf("kdba_installbp\n"); -+ } -+ kdba_installbp(regs, bp); -+ if (!KDB_STATE(DOING_SS)) { -+ regs->flags &= ~X86_EFLAGS_TF; -+ return(KDB_DB_SSBPT); -+ } -+ break; -+ } -+ } -+ if (i == KDB_MAXBPT) { -+ kdb_printf("kdb: Unable to find delayed breakpoint\n"); -+ } -+ if (!KDB_STATE(DOING_SS)) { -+ regs->flags &= ~X86_EFLAGS_TF; -+ return(KDB_DB_NOBPT); -+ } -+ /* FALLTHROUGH */ -+ } -+ -+ /* -+ * KDB_STATE_DOING_SS is set when the kernel debugger is using -+ * the processor trap flag to single-step a processor. If a -+ * single step trap occurs and this flag is clear, the SS trap -+ * will be ignored by KDB and the kernel will be allowed to deal -+ * with it as necessary (e.g. for ptrace). -+ */ -+ if (!KDB_STATE(DOING_SS)) -+ goto unknown; -+ -+ /* single step */ -+ rv = KDB_DB_SS; /* Indicate single step */ -+ if (KDB_STATE(DOING_SSB)) { -+ unsigned char instruction[2]; -+ -+ kdb_id1(regs->ip); -+ if (kdb_getarea(instruction, regs->ip) || -+ (instruction[0]&0xf0) == 0xe0 || /* short disp jumps */ -+ (instruction[0]&0xf0) == 0x70 || /* Misc. jumps */ -+ instruction[0] == 0xc2 || /* ret */ -+ instruction[0] == 0x9a || /* call */ -+ (instruction[0]&0xf8) == 0xc8 || /* enter, leave, iret, int, */ -+ ((instruction[0] == 0x0f) && -+ ((instruction[1]&0xf0)== 0x80)) -+ ) { -+ /* -+ * End the ssb command here. -+ */ -+ KDB_STATE_CLEAR(DOING_SSB); -+ KDB_STATE_CLEAR(DOING_SS); -+ } else { -+ rv = KDB_DB_SSB; /* Indicate ssb - dismiss immediately */ -+ } -+ } else { -+ /* -+ * Print current insn -+ */ -+ kdb_printf("SS trap at "); -+ kdb_symbol_print(regs->ip, NULL, KDB_SP_DEFAULT|KDB_SP_NEWLINE); -+ kdb_id1(regs->ip); -+ KDB_STATE_CLEAR(DOING_SS); -+ } -+ -+ if (rv != KDB_DB_SSB) -+ regs->flags &= ~X86_EFLAGS_TF; -+ } -+ -+ if (dr6 & DR6_B0) { -+ rw = DR7_RW0(dr7); -+ reg = 0; -+ goto handle; -+ } -+ -+ if (dr6 & DR6_B1) { -+ rw = DR7_RW1(dr7); -+ reg = 1; -+ goto handle; -+ } -+ -+ if (dr6 & DR6_B2) { -+ rw = DR7_RW2(dr7); -+ reg = 2; -+ goto handle; -+ } -+ -+ if (dr6 & DR6_B3) { -+ rw = DR7_RW3(dr7); -+ reg = 3; -+ goto handle; -+ } -+ -+ if (rv > 0) -+ goto handled; -+ -+ goto unknown; /* dismiss */ -+ -+handle: -+ /* -+ * Set Resume Flag -+ */ -+ regs->flags |= X86_EFLAGS_RF; -+ -+ /* -+ * Determine which breakpoint was encountered. -+ */ -+ for(i=0, bp=kdb_breakpoints; ibp_free) -+ && (bp->bp_global || bp->bp_cpu == smp_processor_id()) -+ && (bp->bp_hard[cpu]) -+ && (bp->bp_hard[cpu]->bph_reg == reg)) { -+ /* -+ * Hit this breakpoint. -+ */ -+ kdb_printf("%s breakpoint #%d at " kdb_bfd_vma_fmt "\n", -+ kdba_rwtypes[rw], -+ i, bp->bp_addr); -+ -+ /* -+ * For an instruction breakpoint, disassemble -+ * the current instruction. -+ */ -+ if (rw == 0) { -+ kdb_id1(regs->ip); -+ } -+ -+ goto handled; -+ } -+ } -+ -+unknown: -+ regs->flags |= X86_EFLAGS_RF; /* Supress further faults */ -+ rv = KDB_DB_NOBPT; /* Cause kdb() to return */ -+ -+handled: -+ -+ /* -+ * Clear the pending exceptions. -+ */ -+ kdba_putdr6(0); -+ -+ return rv; -+} -+ -+/* -+ * kdba_bp_trap -+ * -+ * Perform breakpoint processing upon entry to the -+ * processor breakpoint instruction fault. Determine and print -+ * the active breakpoint. -+ * -+ * Parameters: -+ * regs Exception frame containing machine register state -+ * error Error number passed to kdb. -+ * Outputs: -+ * None. -+ * Returns: -+ * 0 Standard instruction or data breakpoint encountered -+ * 1 Single Step fault ('ss' command) -+ * 2 Single Step fault, caller should continue ('ssb' command) -+ * 3 No existing kdb breakpoint matches this debug exception -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * If multiple processors receive debug exceptions simultaneously, -+ * one may be waiting at the kdb fence in kdb() while the user -+ * issues a 'bc' command to clear the breakpoint the processor which -+ * is waiting has already encountered. If this is the case, the -+ * debug registers will no longer match any entry in the breakpoint -+ * table, and we'll return the value '3'. This can cause a panic -+ * in die_if_kernel(). It is safer to disable the breakpoint (bd), -+ * 'go' until all processors are past the breakpoint then clear the -+ * breakpoint (bc). This code recognises a breakpoint even when -+ * disabled but not when it has been cleared. -+ * -+ * WARNING: This routine resets the ip. It should be called -+ * once per breakpoint and the result cached. -+ */ -+ -+kdb_dbtrap_t -+kdba_bp_trap(struct pt_regs *regs, int error_unused) -+{ -+ int i; -+ kdb_dbtrap_t rv; -+ kdb_bp_t *bp; -+ -+ if (KDB_NULL_REGS(regs)) -+ return KDB_DB_NOBPT; -+ -+ /* -+ * Determine which breakpoint was encountered. -+ */ -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_bp_trap: ip=0x%lx (not adjusted) " -+ "flags=0x%lx regs=0x%p sp=0x%lx\n", -+ regs->ip, regs->flags, regs, regs->sp); -+ -+ rv = KDB_DB_NOBPT; /* Cause kdb() to return */ -+ -+ for(i=0, bp=kdb_breakpoints; ibp_free) -+ continue; -+ if (!bp->bp_global && bp->bp_cpu != smp_processor_id()) -+ continue; -+ if ((void *)bp->bp_addr == (void *)(regs->ip - bp->bp_adjust)) { -+ /* Hit this breakpoint. */ -+ regs->ip -= bp->bp_adjust; -+ kdb_printf("Instruction(i) breakpoint #%d at 0x%lx (adjusted)\n", -+ i, regs->ip); -+ kdb_id1(regs->ip); -+ rv = KDB_DB_BPT; -+ bp->bp_delay = 1; -+ /* SSBPT is set when the kernel debugger must single -+ * step a task in order to re-establish an instruction -+ * breakpoint which uses the instruction replacement -+ * mechanism. It is cleared by any action that removes -+ * the need to single-step the breakpoint. -+ */ -+ KDB_STATE_SET(SSBPT); -+ break; -+ } -+ } -+ -+ return rv; -+} -+ -+/* -+ * kdba_handle_bp -+ * -+ * Handle an instruction-breakpoint trap. Called when re-installing -+ * an enabled breakpoint which has has the bp_delay bit set. -+ * -+ * Parameters: -+ * Returns: -+ * Locking: -+ * Remarks: -+ * -+ * Ok, we really need to: -+ * 1) Restore the original instruction byte -+ * 2) Single Step -+ * 3) Restore breakpoint instruction -+ * 4) Continue. -+ * -+ * -+ */ -+ -+static void -+kdba_handle_bp(struct pt_regs *regs, kdb_bp_t *bp) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return; -+ -+ if (KDB_DEBUG(BP)) -+ kdb_printf("regs->ip = 0x%lx\n", regs->ip); -+ -+ /* -+ * Setup single step -+ */ -+ kdba_setsinglestep(regs); -+ -+ /* -+ * Reset delay attribute -+ */ -+ bp->bp_delay = 0; -+ bp->bp_delayed = 1; -+} -+ -+ -+/* -+ * kdba_bptype -+ * -+ * Return a string describing type of breakpoint. -+ * -+ * Parameters: -+ * bph Pointer to hardware breakpoint description -+ * Outputs: -+ * None. -+ * Returns: -+ * Character string. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+char * -+kdba_bptype(kdbhard_bp_t *bph) -+{ -+ char *mode; -+ -+ mode = kdba_rwtypes[bph->bph_mode]; -+ -+ return mode; -+} -+ -+/* -+ * kdba_printbpreg -+ * -+ * Print register name assigned to breakpoint -+ * -+ * Parameters: -+ * bph Pointer hardware breakpoint structure -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+static void -+kdba_printbpreg(kdbhard_bp_t *bph) -+{ -+ kdb_printf(" in dr%ld", bph->bph_reg); -+} -+ -+/* -+ * kdba_printbp -+ * -+ * Print string describing hardware breakpoint. -+ * -+ * Parameters: -+ * bph Pointer to hardware breakpoint description -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+void -+kdba_printbp(kdb_bp_t *bp) -+{ -+ int cpu; -+ -+ kdb_printf("\n is enabled"); -+ if (bp->bp_hardtype) { -+ if (bp->bp_global) -+ cpu = smp_processor_id(); -+ else -+ cpu = bp->bp_cpu; -+ kdba_printbpreg(bp->bp_hard[cpu]); -+ if (bp->bp_hard[cpu]->bph_mode != 0) { -+ kdb_printf(" for %d bytes", -+ bp->bp_hard[cpu]->bph_length+1); -+ } -+ } -+} -+ -+/* -+ * kdba_parsebp -+ * -+ * Parse architecture dependent portion of the -+ * breakpoint command. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic for failure -+ * Locking: -+ * None. -+ * Remarks: -+ * for Ia32 architure, data access, data write and -+ * I/O breakpoints are supported in addition to instruction -+ * breakpoints. -+ * -+ * {datar|dataw|io|inst} [length] -+ */ -+ -+int -+kdba_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp) -+{ -+ int nextarg = *nextargp; -+ int diag; -+ kdbhard_bp_t *bph = &bp->bp_template; -+ -+ bph->bph_mode = 0; /* Default to instruction breakpoint */ -+ bph->bph_length = 0; /* Length must be zero for insn bp */ -+ if ((argc + 1) != nextarg) { -+ if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0) { -+ bph->bph_mode = 3; -+ } else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0) { -+ bph->bph_mode = 1; -+ } else if (strnicmp(argv[nextarg], "io", sizeof("io")) == 0) { -+ bph->bph_mode = 2; -+ } else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0) { -+ bph->bph_mode = 0; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ -+ bph->bph_length = 3; /* Default to 4 byte */ -+ -+ nextarg++; -+ -+ if ((argc + 1) != nextarg) { -+ unsigned long len; -+ -+ diag = kdbgetularg((char *)argv[nextarg], -+ &len); -+ if (diag) -+ return diag; -+ -+ -+ if ((len > 4) || (len == 3)) -+ return KDB_BADLENGTH; -+ -+ bph->bph_length = len; -+ bph->bph_length--; /* Normalize for debug register */ -+ nextarg++; -+ } -+ -+ if ((argc + 1) != nextarg) -+ return KDB_ARGCOUNT; -+ -+ /* -+ * Indicate to architecture independent level that -+ * a hardware register assignment is required to enable -+ * this breakpoint. -+ */ -+ -+ bph->bph_free = 0; -+ } else { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_bp: no args, forcehw is %d\n", bp->bp_forcehw); -+ if (bp->bp_forcehw) { -+ /* -+ * We are forced to use a hardware register for this -+ * breakpoint because either the bph or bpha -+ * commands were used to establish this breakpoint. -+ */ -+ bph->bph_free = 0; -+ } else { -+ /* -+ * Indicate to architecture dependent level that -+ * the instruction replacement breakpoint technique -+ * should be used for this breakpoint. -+ */ -+ bph->bph_free = 1; -+ bp->bp_adjust = 1; /* software, int 3 is one byte */ -+ } -+ } -+ -+ if (bph->bph_mode != 2 && kdba_verify_rw(bp->bp_addr, bph->bph_length+1)) { -+ kdb_printf("Invalid address for breakpoint, ignoring bp command\n"); -+ return KDB_BADADDR; -+ } -+ -+ *nextargp = nextarg; -+ return 0; -+} -+ -+/* -+ * kdba_allocbp -+ * -+ * Allocate hw register for bp on specific CPU -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * A pointer to the allocated register kdbhard_bp_t structure for -+ * success, Null and a non-zero diagnostic for failure. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+static kdbhard_bp_t * -+kdba_allocbp(kdbhard_bp_t *bph, int *diagp, unsigned int cpu) -+{ -+ int i; -+ kdbhard_bp_t *newbph; -+ -+ for(i=0; i < KDB_MAXHARDBPT; i++) { -+ newbph=&(kdb_hardbreaks[cpu][i]); -+ if (newbph->bph_free) { -+ break; -+ } -+ } -+ -+ if (i == KDB_MAXHARDBPT) { -+ *diagp = KDB_TOOMANYDBREGS; -+ return NULL; -+ } -+ -+ *diagp = 0; -+ -+ /* -+ * Copy data from template. Can't just copy the entire template -+ * here because the register number in kdb_hardbreaks must be -+ * preserved. -+ */ -+ newbph->bph_data = bph->bph_data; -+ newbph->bph_write = bph->bph_write; -+ newbph->bph_mode = bph->bph_mode; -+ newbph->bph_length = bph->bph_length; -+ -+ /* -+ * Mark entry allocated. -+ */ -+ newbph->bph_free = 0; -+ -+ return newbph; -+} -+ -+/* -+ * kdba_alloc_hwbp -+ * -+ * Associate a hardware registers with a breakpoint. -+ * If hw bp is global hw registers descriptor will be allocated -+ * on every CPU. -+ * -+ * Parameters: -+ * bp - hardware bp -+ * diagp - pointer to variable that will store error when -+ * function complete -+ * Outputs: -+ * None. -+ * Returns: -+ * None -+ * Locking: -+ * None. -+ * Remarks: -+ * Should be called with correct bp->bp_template -+ */ -+ -+void -+kdba_alloc_hwbp(kdb_bp_t *bp, int *diagp) -+{ -+ int i; -+ -+ if (bp->bp_global){ -+ for (i = 0; i < NR_CPUS; ++i) { -+ if (!cpu_online(i)) -+ continue; -+ bp->bp_hard[i] = kdba_allocbp(&bp->bp_template, diagp, i); -+ if (*diagp) -+ break; -+ } -+ } else { -+ bp->bp_hard[bp->bp_cpu] = kdba_allocbp(&bp->bp_template, diagp, bp->bp_cpu); -+ } -+ bp->bp_hardtype = 1; -+} -+ -+/* -+ * kdba_freebp -+ * -+ * Deallocate hw registers descriptor for bp on specific CPU -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic for failure -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+static void -+kdba_freebp(kdbhard_bp_t *bph) -+{ -+ bph->bph_free = 1; -+} -+ -+/* -+ * kdba_free_hwbp -+ * -+ * Frees allocated hw registers descriptors for bp. -+ * If hw bp is global, hw registers descriptors will be freed -+ * on every CPU. -+ * -+ * Parameters: -+ * bp - hardware bp -+ * Outputs: -+ * None. -+ * Returns: -+ * None -+ * Locking: -+ * None. -+ * Remarks: -+ * Should be called with correct bp->bp_template -+ */ -+ -+void -+kdba_free_hwbp(kdb_bp_t *bp) -+{ -+ int i; -+ -+ /* When kernel enters KDB, first, all local bps -+ * are removed, so here we don't need to clear -+ * debug registers. -+ */ -+ -+ if (bp->bp_global){ -+ for (i = 0; i < NR_CPUS; ++i) { -+ if (!cpu_online(i)) -+ continue; -+ if (bp->bp_hard[i]) -+ kdba_freebp(bp->bp_hard[i]); -+ bp->bp_hard[i] = 0; -+ } -+ } else { -+ kdba_freebp(bp->bp_hard[bp->bp_cpu]); -+ bp->bp_hard[bp->bp_cpu] = NULL; -+ } -+ bp->bp_hardtype = 0; -+} -+ -+/* -+ * kdba_initbp -+ * -+ * Initialize the breakpoint table for the hardware breakpoint -+ * register. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * Zero for success, a kdb diagnostic for failure -+ * Locking: -+ * None. -+ * Remarks: -+ * -+ * There is one entry per register. On the ia32 architecture -+ * all the registers are interchangeable, so no special allocation -+ * criteria are required. -+ */ -+ -+void -+kdba_initbp(void) -+{ -+ int i,j; -+ kdbhard_bp_t *bph; -+ -+ /* -+ * Clear the hardware breakpoint table -+ */ -+ -+ memset(kdb_hardbreaks, '\0', sizeof(kdb_hardbreaks)); -+ -+ for (i = 0; i < NR_CPUS; ++i) { -+ /* Called early so we don't know actual -+ * ammount of CPUs -+ */ -+ for(j=0; j < KDB_MAXHARDBPT; j++) { -+ bph=&(kdb_hardbreaks[i][j]); -+ bph->bph_reg = j; -+ bph->bph_free = 1; -+ } -+ } -+} -+ -+/* -+ * kdba_installbp -+ * -+ * Install a breakpoint -+ * -+ * Parameters: -+ * regs Exception frame -+ * bp Breakpoint structure for the breakpoint to be installed -+ * Outputs: -+ * None. -+ * Returns: -+ * 0 if breakpoint installed. -+ * Locking: -+ * None. -+ * Remarks: -+ * For hardware breakpoints, a debug register is allocated -+ * and assigned to the breakpoint. If no debug register is -+ * available, a warning message is printed and the breakpoint -+ * is disabled. -+ * -+ * For instruction replacement breakpoints, we must single-step -+ * over the replaced instruction at this point so we can re-install -+ * the breakpoint instruction after the single-step. SSBPT is set -+ * when the breakpoint is initially hit and is cleared by any action -+ * that removes the need for single-step over the breakpoint. -+ */ -+ -+int -+kdba_installbp(struct pt_regs *regs, kdb_bp_t *bp) -+{ -+ int cpu = smp_processor_id(); -+ -+ /* -+ * Install the breakpoint, if it is not already installed. -+ */ -+ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_installbp bp_installed %d\n", bp->bp_installed); -+ } -+ if (!KDB_STATE(SSBPT)) -+ bp->bp_delay = 0; -+ -+ if (bp->bp_hardtype) { -+ if (KDB_DEBUG(BP) && !bp->bp_global && cpu != bp->bp_cpu){ -+ kdb_printf("kdba_installbp: cpu != bp->bp_cpu for local hw bp\n"); -+ } -+ -+ if (KDB_DEBUG(BP) && !bp->bp_hard[cpu]){ -+ kdb_printf("kdba_installbp: Error - bp_hard[smp_processor_id()] is emply\n"); -+ return 1; -+ } -+ -+ if (!bp->bp_hard[cpu]->bph_installed){ -+ kdba_installdbreg(bp); -+ bp->bp_hard[cpu]->bph_installed = 1; -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_installbp hardware reg %ld at " kdb_bfd_vma_fmt "\n", -+ bp->bp_hard[cpu]->bph_reg, bp->bp_addr); -+ } -+ } -+ } else if (!bp->bp_installed) { -+ if (bp->bp_delay) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_installbp delayed bp\n"); -+ kdba_handle_bp(regs, bp); -+ } else { -+ if (kdb_getarea_size(&(bp->bp_inst), bp->bp_addr, 1) || -+ kdb_putword(bp->bp_addr, IA32_BREAKPOINT_INSTRUCTION, 1)) { -+ kdb_printf("kdba_installbp failed to set software breakpoint at " kdb_bfd_vma_fmt "\n", bp->bp_addr); -+ return(1); -+ } -+ bp->bp_installed = 1; -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdba_installbp instruction 0x%x at " kdb_bfd_vma_fmt "\n", -+ IA32_BREAKPOINT_INSTRUCTION, bp->bp_addr); -+ } -+ } -+ return(0); -+} -+ -+/* -+ * kdba_removebp -+ * -+ * Make a breakpoint ineffective. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+int -+kdba_removebp(kdb_bp_t *bp) -+{ -+ int cpu = smp_processor_id(); -+ -+ /* -+ * For hardware breakpoints, remove it from the active register, -+ * for software breakpoints, restore the instruction stream. -+ */ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_removebp bp_installed %d\n", bp->bp_installed); -+ } -+ -+ if (bp->bp_hardtype) { -+ if (KDB_DEBUG(BP) && !bp->bp_global && cpu != bp->bp_cpu){ -+ kdb_printf("kdba_removebp: cpu != bp->bp_cpu for local hw bp\n"); -+ } -+ -+ if (KDB_DEBUG(BP) && !bp->bp_hard[cpu]){ -+ kdb_printf("kdba_removebp: Error - bp_hard[smp_processor_id()] is emply\n"); -+ return 1; -+ } -+ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdb: removing hardware reg %ld at " kdb_bfd_vma_fmt "\n", -+ bp->bp_hard[cpu]->bph_reg, bp->bp_addr); -+ } -+ -+ if (bp->bp_hard[cpu]->bph_installed){ -+ if (KDB_DEBUG(BP)) { -+ kdb_printf("kdba_installbp hardware reg %ld at " kdb_bfd_vma_fmt "\n", -+ bp->bp_hard[cpu]->bph_reg, bp->bp_addr); -+ } -+ kdba_removedbreg(bp); -+ bp->bp_hard[cpu]->bph_installed = 0; -+ } -+ } else if (bp->bp_installed) { -+ if (KDB_DEBUG(BP)) -+ kdb_printf("kdb: restoring instruction 0x%x at " kdb_bfd_vma_fmt "\n", -+ bp->bp_inst, bp->bp_addr); -+ if (kdb_putword(bp->bp_addr, bp->bp_inst, 1)) -+ return(1); -+ bp->bp_installed = 0; -+ } -+ return(0); -+} ---- /dev/null -+++ b/arch/x86/kdb/kdba_bt.c -@@ -0,0 +1,5758 @@ -+/* -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 2006, 2007-2009 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * Common code for doing accurate backtraces on i386 and x86_64, including -+ * printing the values of arguments. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define KDB_DEBUG_BB(fmt, ...) \ -+ {if (KDB_DEBUG(BB)) kdb_printf(fmt, ## __VA_ARGS__);} -+#define KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix) \ -+ kdb_printf(prefix "%c0x%x" suffix, \ -+ offset >= 0 ? '+' : '-', \ -+ offset >= 0 ? offset : -offset) -+#define KDB_DEBUG_BB_OFFSET(offset, prefix, suffix) \ -+ {if (KDB_DEBUG(BB)) KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix);} -+ -+#define BB_CHECK(expr, val, ret) \ -+({ \ -+ if (unlikely(expr)) { \ -+ kdb_printf("%s, line %d: BB_CHECK(" #expr ") failed " \ -+ #val "=%lx\n", \ -+ __FUNCTION__, __LINE__, (long)val); \ -+ bb_giveup = 1; \ -+ return ret; \ -+ } \ -+}) -+ -+static int bb_giveup; -+ -+/* Use BBRG_Rxx for both i386 and x86_64. RAX through R15 must be at the end, -+ * starting with RAX. Some of these codes do not reflect actual registers, -+ * such codes are special cases when parsing the record of register changes. -+ * When updating BBRG_ entries, update bbrg_name as well. -+ */ -+ -+enum bb_reg_code -+{ -+ BBRG_UNDEFINED = 0, /* Register contents are undefined */ -+ BBRG_OSP, /* original stack pointer on entry to function */ -+ BBRG_RAX, -+ BBRG_RBX, -+ BBRG_RCX, -+ BBRG_RDX, -+ BBRG_RDI, -+ BBRG_RSI, -+ BBRG_RBP, -+ BBRG_RSP, -+ BBRG_R8, -+ BBRG_R9, -+ BBRG_R10, -+ BBRG_R11, -+ BBRG_R12, -+ BBRG_R13, -+ BBRG_R14, -+ BBRG_R15, -+}; -+ -+const static char *bbrg_name[] = { -+ [BBRG_UNDEFINED] = "undefined", -+ [BBRG_OSP] = "osp", -+ [BBRG_RAX] = "rax", -+ [BBRG_RBX] = "rbx", -+ [BBRG_RCX] = "rcx", -+ [BBRG_RDX] = "rdx", -+ [BBRG_RDI] = "rdi", -+ [BBRG_RSI] = "rsi", -+ [BBRG_RBP] = "rbp", -+ [BBRG_RSP] = "rsp", -+ [BBRG_R8] = "r8", -+ [BBRG_R9] = "r9", -+ [BBRG_R10] = "r10", -+ [BBRG_R11] = "r11", -+ [BBRG_R12] = "r12", -+ [BBRG_R13] = "r13", -+ [BBRG_R14] = "r14", -+ [BBRG_R15] = "r15", -+}; -+ -+/* Map a register name to its register code. This includes the sub-register -+ * addressable fields, e.g. parts of rax can be addressed as ax, al, ah, eax. -+ * The list is sorted so it can be binary chopped, sort command is: -+ * LANG=C sort -t '"' -k2 -+ */ -+ -+struct bb_reg_code_map { -+ enum bb_reg_code reg; -+ const char *name; -+}; -+ -+const static struct bb_reg_code_map -+bb_reg_code_map[] = { -+ { BBRG_RAX, "ah" }, -+ { BBRG_RAX, "al" }, -+ { BBRG_RAX, "ax" }, -+ { BBRG_RBX, "bh" }, -+ { BBRG_RBX, "bl" }, -+ { BBRG_RBP, "bp" }, -+ { BBRG_RBP, "bpl" }, -+ { BBRG_RBX, "bx" }, -+ { BBRG_RCX, "ch" }, -+ { BBRG_RCX, "cl" }, -+ { BBRG_RCX, "cx" }, -+ { BBRG_RDX, "dh" }, -+ { BBRG_RDI, "di" }, -+ { BBRG_RDI, "dil" }, -+ { BBRG_RDX, "dl" }, -+ { BBRG_RDX, "dx" }, -+ { BBRG_RAX, "eax" }, -+ { BBRG_RBP, "ebp" }, -+ { BBRG_RBX, "ebx" }, -+ { BBRG_RCX, "ecx" }, -+ { BBRG_RDI, "edi" }, -+ { BBRG_RDX, "edx" }, -+ { BBRG_RSI, "esi" }, -+ { BBRG_RSP, "esp" }, -+ { BBRG_R10, "r10" }, -+ { BBRG_R10, "r10d" }, -+ { BBRG_R10, "r10l" }, -+ { BBRG_R10, "r10w" }, -+ { BBRG_R11, "r11" }, -+ { BBRG_R11, "r11d" }, -+ { BBRG_R11, "r11l" }, -+ { BBRG_R11, "r11w" }, -+ { BBRG_R12, "r12" }, -+ { BBRG_R12, "r12d" }, -+ { BBRG_R12, "r12l" }, -+ { BBRG_R12, "r12w" }, -+ { BBRG_R13, "r13" }, -+ { BBRG_R13, "r13d" }, -+ { BBRG_R13, "r13l" }, -+ { BBRG_R13, "r13w" }, -+ { BBRG_R14, "r14" }, -+ { BBRG_R14, "r14d" }, -+ { BBRG_R14, "r14l" }, -+ { BBRG_R14, "r14w" }, -+ { BBRG_R15, "r15" }, -+ { BBRG_R15, "r15d" }, -+ { BBRG_R15, "r15l" }, -+ { BBRG_R15, "r15w" }, -+ { BBRG_R8, "r8" }, -+ { BBRG_R8, "r8d" }, -+ { BBRG_R8, "r8l" }, -+ { BBRG_R8, "r8w" }, -+ { BBRG_R9, "r9" }, -+ { BBRG_R9, "r9d" }, -+ { BBRG_R9, "r9l" }, -+ { BBRG_R9, "r9w" }, -+ { BBRG_RAX, "rax" }, -+ { BBRG_RBP, "rbp" }, -+ { BBRG_RBX, "rbx" }, -+ { BBRG_RCX, "rcx" }, -+ { BBRG_RDI, "rdi" }, -+ { BBRG_RDX, "rdx" }, -+ { BBRG_RSI, "rsi" }, -+ { BBRG_RSP, "rsp" }, -+ { BBRG_RSI, "si" }, -+ { BBRG_RSI, "sil" }, -+ { BBRG_RSP, "sp" }, -+ { BBRG_RSP, "spl" }, -+}; -+ -+/* Record register contents in terms of the values that were passed to this -+ * function, IOW track which registers contain an input value. A register's -+ * contents can be undefined, it can contain an input register value or it can -+ * contain an offset from the original stack pointer. -+ * -+ * This structure is used to represent the current contents of the integer -+ * registers, it is held in an array that is indexed by BBRG_xxx. The element -+ * for BBRG_xxx indicates what input value is currently in BBRG_xxx. When -+ * 'value' is BBRG_OSP then register BBRG_xxx contains a stack pointer, -+ * pointing at 'offset' from the original stack pointer on entry to the -+ * function. When 'value' is not BBRG_OSP then element BBRG_xxx contains the -+ * original contents of an input register and offset is ignored. -+ * -+ * An input register 'value' can be stored in more than one register and/or in -+ * more than one memory location. -+ */ -+ -+struct bb_reg_contains -+{ -+ enum bb_reg_code value: 8; -+ short offset; -+}; -+ -+/* Note: the offsets in struct bb_mem_contains in this code are _NOT_ offsets -+ * from OSP, they are offsets from current RSP. It fits better with the way -+ * that struct pt_regs is built, some code pushes extra data before pt_regs so -+ * working with OSP relative offsets gets messy. struct bb_mem_contains -+ * entries must be in descending order of RSP offset. -+ */ -+ -+typedef struct { DECLARE_BITMAP(bits, BBRG_R15+1); } bbrgmask_t; -+#define BB_SKIP(reg) (1 << (BBRG_ ## reg)) -+struct bb_mem_contains { -+ short offset_address; -+ enum bb_reg_code value: 8; -+}; -+ -+/* Transfer of control to a label outside the current function. If the -+ * transfer is to a known common restore path that expects known registers -+ * and/or a known memory state (e.g. struct pt_regs) then do a sanity check on -+ * the state at this point. -+ */ -+ -+struct bb_name_state { -+ const char *name; /* target function */ -+ bfd_vma address; /* Address of target function */ -+ const char *fname; /* optional from function name */ -+ const struct bb_mem_contains *mem; /* expected memory state */ -+ const struct bb_reg_contains *regs; /* expected register state */ -+ const unsigned short mem_size; /* ARRAY_SIZE(mem) */ -+ const unsigned short regs_size; /* ARRAY_SIZE(regs) */ -+ const short osp_offset; /* RSP in regs == OSP+osp_offset */ -+ const bbrgmask_t skip_mem; /* Some slots in mem may be undefined */ -+ const bbrgmask_t skip_regs; /* Some slots in regs may be undefined */ -+}; -+ -+/* NS (NAME_STATE) macros define the register and memory state when we transfer -+ * control to or start decoding a special case name. Use NS when the target -+ * label always has the same state. Use NS_FROM and specify the source label -+ * if the target state is slightly different depending on where it is branched -+ * from. This gives better state checking, by isolating the special cases. -+ * -+ * Note: for the same target label, NS_FROM entries must be followed by a -+ * single NS entry. -+ */ -+ -+#define NS_FROM(iname, ifname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \ -+ { \ -+ .name = iname, \ -+ .fname = ifname, \ -+ .mem = imem, \ -+ .regs = iregs, \ -+ .mem_size = ARRAY_SIZE(imem), \ -+ .regs_size = ARRAY_SIZE(iregs), \ -+ .skip_mem.bits[0] = iskip_mem, \ -+ .skip_regs.bits[0] = iskip_regs, \ -+ .osp_offset = iosp_offset, \ -+ .address = 0 \ -+ } -+ -+/* Shorter forms for the common cases */ -+#define NS(iname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \ -+ NS_FROM(iname, NULL, imem, iregs, iskip_mem, iskip_regs, iosp_offset) -+#define NS_MEM(iname, imem, iskip_mem) \ -+ NS_FROM(iname, NULL, imem, no_regs, iskip_mem, 0, 0) -+#define NS_MEM_FROM(iname, ifname, imem, iskip_mem) \ -+ NS_FROM(iname, ifname, imem, no_regs, iskip_mem, 0, 0) -+#define NS_REG(iname, iregs, iskip_regs) \ -+ NS_FROM(iname, NULL, no_memory, iregs, 0, iskip_regs, 0) -+#define NS_REG_FROM(iname, ifname, iregs, iskip_regs) \ -+ NS_FROM(iname, ifname, no_memory, iregs, 0, iskip_regs, 0) -+ -+static void -+bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src); -+ -+static const char *bb_mod_name, *bb_func_name; -+ -+static int -+bb_noret(const char *name) -+{ -+ if (strcmp(name, "panic") == 0 || -+ strcmp(name, "do_exit") == 0 || -+ strcmp(name, "do_group_exit") == 0 || -+ strcmp(name, "complete_and_exit") == 0) -+ return 1; -+ return 0; -+} -+ -+/*============================================================================*/ -+/* */ -+/* Most of the basic block code and data is common to x86_64 and i386. This */ -+/* large ifdef contains almost all of the differences between the two */ -+/* architectures. */ -+/* */ -+/* Make sure you update the correct section of this ifdef. */ -+/* */ -+/*============================================================================*/ -+ -+#ifdef CONFIG_X86_64 -+ -+/* Registers that can be used to pass parameters, in the order that parameters -+ * are passed. -+ */ -+ -+const static enum bb_reg_code -+bb_param_reg[] = { -+ BBRG_RDI, -+ BBRG_RSI, -+ BBRG_RDX, -+ BBRG_RCX, -+ BBRG_R8, -+ BBRG_R9, -+}; -+ -+const static enum bb_reg_code -+bb_preserved_reg[] = { -+ BBRG_RBX, -+ BBRG_RBP, -+ BBRG_RSP, -+ BBRG_R12, -+ BBRG_R13, -+ BBRG_R14, -+ BBRG_R15, -+}; -+ -+static const struct bb_mem_contains full_pt_regs[] = { -+ { 0x70, BBRG_RDI }, -+ { 0x68, BBRG_RSI }, -+ { 0x60, BBRG_RDX }, -+ { 0x58, BBRG_RCX }, -+ { 0x50, BBRG_RAX }, -+ { 0x48, BBRG_R8 }, -+ { 0x40, BBRG_R9 }, -+ { 0x38, BBRG_R10 }, -+ { 0x30, BBRG_R11 }, -+ { 0x28, BBRG_RBX }, -+ { 0x20, BBRG_RBP }, -+ { 0x18, BBRG_R12 }, -+ { 0x10, BBRG_R13 }, -+ { 0x08, BBRG_R14 }, -+ { 0x00, BBRG_R15 }, -+}; -+static const struct bb_mem_contains full_pt_regs_plus_1[] = { -+ { 0x78, BBRG_RDI }, -+ { 0x70, BBRG_RSI }, -+ { 0x68, BBRG_RDX }, -+ { 0x60, BBRG_RCX }, -+ { 0x58, BBRG_RAX }, -+ { 0x50, BBRG_R8 }, -+ { 0x48, BBRG_R9 }, -+ { 0x40, BBRG_R10 }, -+ { 0x38, BBRG_R11 }, -+ { 0x30, BBRG_RBX }, -+ { 0x28, BBRG_RBP }, -+ { 0x20, BBRG_R12 }, -+ { 0x18, BBRG_R13 }, -+ { 0x10, BBRG_R14 }, -+ { 0x08, BBRG_R15 }, -+}; -+/* -+ * Going into error_exit we have the hardware pushed error_code on the stack -+ * plus a full pt_regs -+ */ -+static const struct bb_mem_contains error_code_full_pt_regs[] = { -+ { 0x78, BBRG_UNDEFINED }, -+ { 0x70, BBRG_RDI }, -+ { 0x68, BBRG_RSI }, -+ { 0x60, BBRG_RDX }, -+ { 0x58, BBRG_RCX }, -+ { 0x50, BBRG_RAX }, -+ { 0x48, BBRG_R8 }, -+ { 0x40, BBRG_R9 }, -+ { 0x38, BBRG_R10 }, -+ { 0x30, BBRG_R11 }, -+ { 0x28, BBRG_RBX }, -+ { 0x20, BBRG_RBP }, -+ { 0x18, BBRG_R12 }, -+ { 0x10, BBRG_R13 }, -+ { 0x08, BBRG_R14 }, -+ { 0x00, BBRG_R15 }, -+}; -+static const struct bb_mem_contains partial_pt_regs[] = { -+ { 0x40, BBRG_RDI }, -+ { 0x38, BBRG_RSI }, -+ { 0x30, BBRG_RDX }, -+ { 0x28, BBRG_RCX }, -+ { 0x20, BBRG_RAX }, -+ { 0x18, BBRG_R8 }, -+ { 0x10, BBRG_R9 }, -+ { 0x08, BBRG_R10 }, -+ { 0x00, BBRG_R11 }, -+}; -+static const struct bb_mem_contains partial_pt_regs_plus_1[] = { -+ { 0x48, BBRG_RDI }, -+ { 0x40, BBRG_RSI }, -+ { 0x38, BBRG_RDX }, -+ { 0x30, BBRG_RCX }, -+ { 0x28, BBRG_RAX }, -+ { 0x20, BBRG_R8 }, -+ { 0x18, BBRG_R9 }, -+ { 0x10, BBRG_R10 }, -+ { 0x08, BBRG_R11 }, -+}; -+static const struct bb_mem_contains partial_pt_regs_plus_2[] = { -+ { 0x50, BBRG_RDI }, -+ { 0x48, BBRG_RSI }, -+ { 0x40, BBRG_RDX }, -+ { 0x38, BBRG_RCX }, -+ { 0x30, BBRG_RAX }, -+ { 0x28, BBRG_R8 }, -+ { 0x20, BBRG_R9 }, -+ { 0x18, BBRG_R10 }, -+ { 0x10, BBRG_R11 }, -+}; -+static const struct bb_mem_contains no_memory[] = { -+}; -+/* Hardware has already pushed an error_code on the stack. Use undefined just -+ * to set the initial stack offset. -+ */ -+static const struct bb_mem_contains error_code[] = { -+ { 0x0, BBRG_UNDEFINED }, -+}; -+/* error_code plus original rax */ -+static const struct bb_mem_contains error_code_rax[] = { -+ { 0x8, BBRG_UNDEFINED }, -+ { 0x0, BBRG_RAX }, -+}; -+ -+static const struct bb_reg_contains all_regs[] = { -+ [BBRG_RAX] = { BBRG_RAX, 0 }, -+ [BBRG_RBX] = { BBRG_RBX, 0 }, -+ [BBRG_RCX] = { BBRG_RCX, 0 }, -+ [BBRG_RDX] = { BBRG_RDX, 0 }, -+ [BBRG_RDI] = { BBRG_RDI, 0 }, -+ [BBRG_RSI] = { BBRG_RSI, 0 }, -+ [BBRG_RBP] = { BBRG_RBP, 0 }, -+ [BBRG_RSP] = { BBRG_OSP, 0 }, -+ [BBRG_R8 ] = { BBRG_R8, 0 }, -+ [BBRG_R9 ] = { BBRG_R9, 0 }, -+ [BBRG_R10] = { BBRG_R10, 0 }, -+ [BBRG_R11] = { BBRG_R11, 0 }, -+ [BBRG_R12] = { BBRG_R12, 0 }, -+ [BBRG_R13] = { BBRG_R13, 0 }, -+ [BBRG_R14] = { BBRG_R14, 0 }, -+ [BBRG_R15] = { BBRG_R15, 0 }, -+}; -+static const struct bb_reg_contains no_regs[] = { -+}; -+ -+static struct bb_name_state bb_special_cases[] = { -+ -+ /* First the cases that pass data only in memory. We do not check any -+ * register state for these cases. -+ */ -+ -+ /* Simple cases, no exceptions */ -+ NS_MEM("ia32_ptregs_common", partial_pt_regs_plus_1, 0), -+ NS_MEM("ia32_sysret", partial_pt_regs, 0), -+ NS_MEM("int_careful", partial_pt_regs, 0), -+ NS_MEM("ia32_badarg", partial_pt_regs, 0), -+ NS_MEM("int_restore_rest", full_pt_regs, 0), -+ NS_MEM("int_signal", full_pt_regs, 0), -+ NS_MEM("int_very_careful", partial_pt_regs, 0), -+ NS_MEM("ptregscall_common", full_pt_regs_plus_1, 0), -+ NS_MEM("ret_from_intr", partial_pt_regs_plus_2, 0), -+ NS_MEM("stub32_clone", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub32_execve", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub32_fork", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub32_iopl", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub32_rt_sigreturn", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub32_sigaltstack", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub32_sigreturn", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub32_vfork", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub_clone", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub_execve", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub_fork", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub_iopl", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub_rt_sigreturn", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub_sigaltstack", partial_pt_regs_plus_1, 0), -+ NS_MEM("stub_vfork", partial_pt_regs_plus_1, 0), -+ NS_MEM("sysenter_auditsys", partial_pt_regs, -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)), -+ -+ NS_MEM("paranoid_exit", error_code_full_pt_regs, 0), -+ -+ NS_MEM_FROM("ia32_badsys", "ia32_sysenter_target", -+ partial_pt_regs, -+ /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on -+ * some paths. It also stomps on RAX. -+ */ -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) | -+ BB_SKIP(RAX)), -+ NS_MEM_FROM("ia32_badsys", "ia32_cstar_target", -+ partial_pt_regs, -+ /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some -+ * paths. It also stomps on RAX. Even more confusing, instead -+ * of storing RCX it stores RBP. WTF? -+ */ -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) | -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ NS_MEM_FROM("ia32_badsys", "ia32_syscall", -+ partial_pt_regs, -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)), -+ NS_MEM("ia32_badsys", partial_pt_regs, 0), -+ -+#ifdef CONFIG_AUDITSYSCALL -+ NS_MEM_FROM("int_with_check", "sysexit_audit", partial_pt_regs, -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) | -+ BB_SKIP(RAX)), -+ NS_MEM_FROM("int_with_check", "ia32_cstar_target", partial_pt_regs, -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) | -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+#endif -+ NS_MEM("int_with_check", no_memory, 0), -+ -+ /* Various bits of code branch to int_ret_from_sys_call, with slightly -+ * different missing values in pt_regs. -+ */ -+ NS_MEM_FROM("int_ret_from_sys_call", "ret_from_fork", -+ partial_pt_regs, -+ BB_SKIP(R11)), -+ NS_MEM_FROM("int_ret_from_sys_call", "stub_execve", -+ partial_pt_regs, -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ NS_MEM_FROM("int_ret_from_sys_call", "stub_rt_sigreturn", -+ partial_pt_regs, -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ NS_MEM_FROM("int_ret_from_sys_call", "kernel_execve", -+ partial_pt_regs, -+ BB_SKIP(RAX)), -+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_syscall", -+ partial_pt_regs, -+ /* ia32_syscall only saves RDI through RCX. */ -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) | -+ BB_SKIP(RAX)), -+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_sysenter_target", -+ partial_pt_regs, -+ /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on -+ * some paths. It also stomps on RAX. -+ */ -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) | -+ BB_SKIP(RAX)), -+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_cstar_target", -+ partial_pt_regs, -+ /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some -+ * paths. It also stomps on RAX. Even more confusing, instead -+ * of storing RCX it stores RBP. WTF? -+ */ -+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) | -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_badsys", -+ partial_pt_regs, BB_SKIP(RAX)), -+ NS_MEM("int_ret_from_sys_call", partial_pt_regs, 0), -+ -+#ifdef CONFIG_PREEMPT -+ NS_MEM("retint_kernel", partial_pt_regs, BB_SKIP(RAX)), -+#endif /* CONFIG_PREEMPT */ -+ -+ NS_MEM("retint_careful", partial_pt_regs, BB_SKIP(RAX)), -+ -+ /* Horrible hack: For a brand new x86_64 task, switch_to() branches to -+ * ret_from_fork with a totally different stack state from all the -+ * other tasks that come out of switch_to(). This non-standard state -+ * cannot be represented so just ignore the branch from switch_to() to -+ * ret_from_fork. Due to inlining and linker labels, switch_to() can -+ * appear as several different function labels, including schedule, -+ * context_switch and __sched_text_start. -+ */ -+ NS_MEM_FROM("ret_from_fork", "schedule", no_memory, 0), -+ NS_MEM_FROM("ret_from_fork", "__schedule", no_memory, 0), -+ NS_MEM_FROM("ret_from_fork", "__sched_text_start", no_memory, 0), -+ NS_MEM_FROM("ret_from_fork", "context_switch", no_memory, 0), -+ NS_MEM("ret_from_fork", full_pt_regs, 0), -+ -+ NS_MEM_FROM("ret_from_sys_call", "ret_from_fork", -+ partial_pt_regs, -+ BB_SKIP(R11)), -+ NS_MEM("ret_from_sys_call", partial_pt_regs, 0), -+ -+ NS_MEM("retint_restore_args", -+ partial_pt_regs, -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ -+ NS_MEM("retint_swapgs", -+ partial_pt_regs, -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ -+ /* Now the cases that pass data in registers. We do not check any -+ * memory state for these cases. -+ */ -+ -+ NS_REG("bad_put_user", -+ all_regs, BB_SKIP(RBX)), -+ -+ NS_REG("bad_get_user", -+ all_regs, BB_SKIP(RAX) | BB_SKIP(RDX)), -+ -+ NS_REG("bad_to_user", -+ all_regs, -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ -+ NS_REG("ia32_ptregs_common", -+ all_regs, -+ 0), -+ -+ NS_REG("copy_user_generic_unrolled", -+ all_regs, -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ -+ NS_REG("copy_user_generic_string", -+ all_regs, -+ BB_SKIP(RAX) | BB_SKIP(RCX)), -+ -+ NS_REG("irq_return", -+ all_regs, -+ 0), -+ -+ /* Finally the cases that pass data in both registers and memory. -+ */ -+ -+ NS("invalid_TSS", error_code, all_regs, 0, 0, 0), -+ NS("segment_not_present", error_code, all_regs, 0, 0, 0), -+ NS("alignment_check", error_code, all_regs, 0, 0, 0), -+ NS("page_fault", error_code, all_regs, 0, 0, 0), -+ NS("general_protection", error_code, all_regs, 0, 0, 0), -+ NS("error_entry", error_code_rax, all_regs, 0, BB_SKIP(RAX), -0x10), -+ NS("error_exit", error_code_full_pt_regs, no_regs, 0, 0, 0x30), -+ NS("common_interrupt", error_code, all_regs, 0, 0, -0x8), -+ NS("save_args", error_code, all_regs, 0, 0, -0x50), -+ NS("int3", no_memory, all_regs, 0, 0, -0x80), -+}; -+ -+static const char *bb_spurious[] = { -+ /* schedule */ -+ "thread_return", -+ /* system_call */ -+ "system_call_after_swapgs", -+ "system_call_fastpath", -+ "ret_from_sys_call", -+ "sysret_check", -+ "sysret_careful", -+ "sysret_signal", -+ "badsys", -+#ifdef CONFIG_AUDITSYSCALL -+ "auditsys", -+ "sysret_audit", -+#endif -+ "tracesys", -+ "int_ret_from_sys_call", -+ "int_with_check", -+ "int_careful", -+ "int_very_careful", -+ "int_signal", -+ "int_restore_rest", -+ /* common_interrupt */ -+ "ret_from_intr", -+ "exit_intr", -+ "retint_with_reschedule", -+ "retint_check", -+ "retint_swapgs", -+ "retint_restore_args", -+ "restore_args", -+ "irq_return", -+ "bad_iret", -+ "retint_careful", -+ "retint_signal", -+#ifdef CONFIG_PREEMPT -+ "retint_kernel", -+#endif /* CONFIG_PREEMPT */ -+ /* paranoid_exit */ -+ "paranoid_swapgs", -+ "paranoid_restore", -+ "paranoid_userspace", -+ "paranoid_schedule", -+ /* error_entry */ -+ "error_swapgs", -+ "error_sti", -+ "error_kernelspace", -+ /* nmi */ -+#ifdef CONFIG_TRACE_IRQFLAGS -+ "nmi_swapgs", -+ "nmi_restore", -+ "nmi_userspace", -+ "nmi_schedule", -+#endif -+ /* load_gs_index */ -+ "gs_change", -+ "bad_gs", -+ /* ia32_sysenter_target */ -+ "sysenter_do_call", -+ "sysenter_dispatch", -+ "sysexit_from_sys_call", -+#ifdef CONFIG_AUDITSYSCALL -+ "sysenter_auditsys", -+ "sysexit_audit", -+#endif -+ "sysenter_tracesys", -+ /* ia32_cstar_target */ -+ "cstar_do_call", -+ "cstar_dispatch", -+ "sysretl_from_sys_call", -+#ifdef CONFIG_AUDITSYSCALL -+ "cstar_auditsys", -+ "sysretl_audit", -+#endif -+ "cstar_tracesys", -+ /* ia32_syscall */ -+ "ia32_do_call", -+ "ia32_sysret", -+ "ia32_tracesys", -+#ifdef CONFIG_HIBERNATION -+ /* restore_image */ -+ "loop", -+ "done", -+#endif /* CONFIG_HIBERNATION */ -+#ifdef CONFIG_KPROBES -+ /* jprobe_return */ -+ "jprobe_return_end", -+ /* kretprobe_trampoline_holder */ -+ "kretprobe_trampoline", -+#endif /* CONFIG_KPROBES */ -+#ifdef CONFIG_KEXEC -+ /* relocate_kernel */ -+ "relocate_new_kernel", -+#endif /* CONFIG_KEXEC */ -+#ifdef CONFIG_XEN -+ /* arch/i386/xen/xen-asm.S */ -+ "xen_irq_enable_direct_end", -+ "xen_irq_disable_direct_end", -+ "xen_save_fl_direct_end", -+ "xen_restore_fl_direct_end", -+ "xen_iret_start_crit", -+ "iret_restore_end", -+ "xen_iret_end_crit", -+ "hyper_iret", -+#endif /* CONFIG_XEN */ -+}; -+ -+static const char *bb_hardware_handlers[] = { -+ "system_call", -+ "common_interrupt", -+ "error_entry", -+ "debug", -+ "nmi", -+ "int3", -+ "double_fault", -+ "stack_segment", -+ "machine_check", -+ "kdb_call", -+}; -+ -+static int -+bb_hardware_pushed_arch(kdb_machreg_t rsp, -+ const struct kdb_activation_record *ar) -+{ -+ /* x86_64 interrupt stacks are 16 byte aligned and you must get the -+ * next rsp from stack, it cannot be statically calculated. Do not -+ * include the word at rsp, it is pushed by hardware but is treated as -+ * a normal software return value. -+ * -+ * When an IST switch occurs (e.g. NMI) then the saved rsp points to -+ * another stack entirely. Assume that the IST stack is 16 byte -+ * aligned and just return the size of the hardware data on this stack. -+ * The stack unwind code will take care of the stack switch. -+ */ -+ kdb_machreg_t saved_rsp = *((kdb_machreg_t *)rsp + 3); -+ int hardware_pushed = saved_rsp - rsp - KDB_WORD_SIZE; -+ if (hardware_pushed < 4 * KDB_WORD_SIZE || -+ saved_rsp < ar->stack.logical_start || -+ saved_rsp >= ar->stack.logical_end) -+ return 4 * KDB_WORD_SIZE; -+ else -+ return hardware_pushed; -+} -+ -+static void -+bb_start_block0(void) -+{ -+ bb_reg_code_set_value(BBRG_RAX, BBRG_RAX); -+ bb_reg_code_set_value(BBRG_RBX, BBRG_RBX); -+ bb_reg_code_set_value(BBRG_RCX, BBRG_RCX); -+ bb_reg_code_set_value(BBRG_RDX, BBRG_RDX); -+ bb_reg_code_set_value(BBRG_RDI, BBRG_RDI); -+ bb_reg_code_set_value(BBRG_RSI, BBRG_RSI); -+ bb_reg_code_set_value(BBRG_RBP, BBRG_RBP); -+ bb_reg_code_set_value(BBRG_RSP, BBRG_OSP); -+ bb_reg_code_set_value(BBRG_R8, BBRG_R8); -+ bb_reg_code_set_value(BBRG_R9, BBRG_R9); -+ bb_reg_code_set_value(BBRG_R10, BBRG_R10); -+ bb_reg_code_set_value(BBRG_R11, BBRG_R11); -+ bb_reg_code_set_value(BBRG_R12, BBRG_R12); -+ bb_reg_code_set_value(BBRG_R13, BBRG_R13); -+ bb_reg_code_set_value(BBRG_R14, BBRG_R14); -+ bb_reg_code_set_value(BBRG_R15, BBRG_R15); -+} -+ -+/* x86_64 does not have a special case for __switch_to */ -+ -+static void -+bb_fixup_switch_to(char *p) -+{ -+} -+ -+static int -+bb_asmlinkage_arch(void) -+{ -+ return strncmp(bb_func_name, "__down", 6) == 0 || -+ strncmp(bb_func_name, "__up", 4) == 0 || -+ strncmp(bb_func_name, "stub_", 5) == 0 || -+ strcmp(bb_func_name, "ret_from_fork") == 0 || -+ strcmp(bb_func_name, "ptregscall_common") == 0; -+} -+ -+#else /* !CONFIG_X86_64 */ -+ -+/* Registers that can be used to pass parameters, in the order that parameters -+ * are passed. -+ */ -+ -+const static enum bb_reg_code -+bb_param_reg[] = { -+ BBRG_RAX, -+ BBRG_RDX, -+ BBRG_RCX, -+}; -+ -+const static enum bb_reg_code -+bb_preserved_reg[] = { -+ BBRG_RBX, -+ BBRG_RBP, -+ BBRG_RSP, -+ BBRG_RSI, -+ BBRG_RDI, -+}; -+ -+static const struct bb_mem_contains full_pt_regs[] = { -+ { 0x18, BBRG_RAX }, -+ { 0x14, BBRG_RBP }, -+ { 0x10, BBRG_RDI }, -+ { 0x0c, BBRG_RSI }, -+ { 0x08, BBRG_RDX }, -+ { 0x04, BBRG_RCX }, -+ { 0x00, BBRG_RBX }, -+}; -+static const struct bb_mem_contains no_memory[] = { -+}; -+/* Hardware has already pushed an error_code on the stack. Use undefined just -+ * to set the initial stack offset. -+ */ -+static const struct bb_mem_contains error_code[] = { -+ { 0x0, BBRG_UNDEFINED }, -+}; -+/* rbx already pushed */ -+static const struct bb_mem_contains rbx_pushed[] = { -+ { 0x0, BBRG_RBX }, -+}; -+#ifdef CONFIG_MATH_EMULATION -+static const struct bb_mem_contains mem_fpu_reg_round[] = { -+ { 0xc, BBRG_RBP }, -+ { 0x8, BBRG_RSI }, -+ { 0x4, BBRG_RDI }, -+ { 0x0, BBRG_RBX }, -+}; -+#endif /* CONFIG_MATH_EMULATION */ -+ -+static const struct bb_reg_contains all_regs[] = { -+ [BBRG_RAX] = { BBRG_RAX, 0 }, -+ [BBRG_RBX] = { BBRG_RBX, 0 }, -+ [BBRG_RCX] = { BBRG_RCX, 0 }, -+ [BBRG_RDX] = { BBRG_RDX, 0 }, -+ [BBRG_RDI] = { BBRG_RDI, 0 }, -+ [BBRG_RSI] = { BBRG_RSI, 0 }, -+ [BBRG_RBP] = { BBRG_RBP, 0 }, -+ [BBRG_RSP] = { BBRG_OSP, 0 }, -+}; -+static const struct bb_reg_contains no_regs[] = { -+}; -+#ifdef CONFIG_MATH_EMULATION -+static const struct bb_reg_contains reg_fpu_reg_round[] = { -+ [BBRG_RBP] = { BBRG_OSP, -0x4 }, -+ [BBRG_RSP] = { BBRG_OSP, -0x10 }, -+}; -+#endif /* CONFIG_MATH_EMULATION */ -+ -+static struct bb_name_state bb_special_cases[] = { -+ -+ /* First the cases that pass data only in memory. We do not check any -+ * register state for these cases. -+ */ -+ -+ /* Simple cases, no exceptions */ -+ NS_MEM("check_userspace", full_pt_regs, 0), -+ NS_MEM("device_not_available_emulate", full_pt_regs, 0), -+ NS_MEM("ldt_ss", full_pt_regs, 0), -+ NS_MEM("no_singlestep", full_pt_regs, 0), -+ NS_MEM("restore_all", full_pt_regs, 0), -+ NS_MEM("restore_nocheck", full_pt_regs, 0), -+ NS_MEM("restore_nocheck_notrace", full_pt_regs, 0), -+ NS_MEM("ret_from_exception", full_pt_regs, 0), -+ NS_MEM("ret_from_fork", full_pt_regs, 0), -+ NS_MEM("ret_from_intr", full_pt_regs, 0), -+ NS_MEM("work_notifysig", full_pt_regs, 0), -+ NS_MEM("work_pending", full_pt_regs, 0), -+ -+#ifdef CONFIG_PREEMPT -+ NS_MEM("resume_kernel", full_pt_regs, 0), -+#endif /* CONFIG_PREEMPT */ -+ -+ NS_MEM("common_interrupt", error_code, 0), -+ NS_MEM("error_code", error_code, 0), -+ -+ NS_MEM("bad_put_user", rbx_pushed, 0), -+ -+ NS_MEM_FROM("resume_userspace", "syscall_badsys", -+ full_pt_regs, BB_SKIP(RAX)), -+ NS_MEM_FROM("resume_userspace", "syscall_fault", -+ full_pt_regs, BB_SKIP(RAX)), -+ NS_MEM_FROM("resume_userspace", "syscall_trace_entry", -+ full_pt_regs, BB_SKIP(RAX)), -+ /* Too difficult to trace through the various vm86 functions for now. -+ * They are C functions that start off with some memory state, fiddle -+ * the registers then jmp directly to resume_userspace. For the -+ * moment, just assume that they are valid and do no checks. -+ */ -+ NS_FROM("resume_userspace", "do_int", -+ no_memory, no_regs, 0, 0, 0), -+ NS_FROM("resume_userspace", "do_sys_vm86", -+ no_memory, no_regs, 0, 0, 0), -+ NS_FROM("resume_userspace", "handle_vm86_fault", -+ no_memory, no_regs, 0, 0, 0), -+ NS_FROM("resume_userspace", "handle_vm86_trap", -+ no_memory, no_regs, 0, 0, 0), -+ NS_MEM("resume_userspace", full_pt_regs, 0), -+ -+ NS_MEM_FROM("syscall_badsys", "ia32_sysenter_target", -+ full_pt_regs, BB_SKIP(RBP)), -+ NS_MEM("syscall_badsys", full_pt_regs, 0), -+ -+ NS_MEM_FROM("syscall_call", "syscall_trace_entry", -+ full_pt_regs, BB_SKIP(RAX)), -+ NS_MEM("syscall_call", full_pt_regs, 0), -+ -+ NS_MEM_FROM("syscall_exit", "syscall_trace_entry", -+ full_pt_regs, BB_SKIP(RAX)), -+ NS_MEM("syscall_exit", full_pt_regs, 0), -+ -+ NS_MEM_FROM("syscall_exit_work", "ia32_sysenter_target", -+ full_pt_regs, BB_SKIP(RAX) | BB_SKIP(RBP)), -+ NS_MEM_FROM("syscall_exit_work", "system_call", -+ full_pt_regs, BB_SKIP(RAX)), -+ NS_MEM("syscall_exit_work", full_pt_regs, 0), -+ -+ NS_MEM_FROM("syscall_trace_entry", "ia32_sysenter_target", -+ full_pt_regs, BB_SKIP(RBP)), -+ NS_MEM_FROM("syscall_trace_entry", "system_call", -+ full_pt_regs, BB_SKIP(RAX)), -+ NS_MEM("syscall_trace_entry", full_pt_regs, 0), -+ -+ /* Now the cases that pass data in registers. We do not check any -+ * memory state for these cases. -+ */ -+ -+ NS_REG("syscall_fault", all_regs, 0), -+ -+ NS_REG("bad_get_user", all_regs, -+ BB_SKIP(RAX) | BB_SKIP(RDX)), -+ -+ /* Finally the cases that pass data in both registers and memory. -+ */ -+ -+ /* This entry is redundant now because bb_fixup_switch_to() hides the -+ * jmp __switch_to case, however the entry is left here as -+ * documentation. -+ * -+ * NS("__switch_to", no_memory, no_regs, 0, 0, 0), -+ */ -+ -+ NS("iret_exc", no_memory, all_regs, 0, 0, 0x20), -+ -+#ifdef CONFIG_MATH_EMULATION -+ NS("fpu_reg_round", mem_fpu_reg_round, reg_fpu_reg_round, 0, 0, 0), -+#endif /* CONFIG_MATH_EMULATION */ -+}; -+ -+static const char *bb_spurious[] = { -+ /* ret_from_exception */ -+ "ret_from_intr", -+ "check_userspace", -+ "resume_userspace", -+ /* resume_kernel */ -+#ifdef CONFIG_PREEMPT -+ "need_resched", -+#endif /* CONFIG_PREEMPT */ -+ /* ia32_sysenter_target */ -+ "sysenter_past_esp", -+ /* system_call */ -+ "no_singlestep", -+ "syscall_call", -+ "syscall_exit", -+ "restore_all", -+ "restore_nocheck", -+ "restore_nocheck_notrace", -+ "ldt_ss", -+ /* do not include iret_exc, it is in a .fixup section */ -+ /* work_pending */ -+ "work_resched", -+ "work_notifysig", -+#ifdef CONFIG_VM86 -+ "work_notifysig_v86", -+#endif /* CONFIG_VM86 */ -+ /* page_fault */ -+ "error_code", -+ /* device_not_available */ -+ "device_not_available_emulate", -+ /* debug */ -+ "debug_esp_fix_insn", -+ "debug_stack_correct", -+ /* nmi */ -+ "nmi_stack_correct", -+ "nmi_stack_fixup", -+ "nmi_debug_stack_check", -+ "nmi_espfix_stack", -+#ifdef CONFIG_HIBERNATION -+ /* restore_image */ -+ "copy_loop", -+ "done", -+#endif /* CONFIG_HIBERNATION */ -+#ifdef CONFIG_KPROBES -+ /* jprobe_return */ -+ "jprobe_return_end", -+#endif /* CONFIG_KPROBES */ -+#ifdef CONFIG_KEXEC -+ /* relocate_kernel */ -+ "relocate_new_kernel", -+#endif /* CONFIG_KEXEC */ -+#ifdef CONFIG_MATH_EMULATION -+ /* assorted *.S files in arch/i386/math_emu */ -+ "Denorm_done", -+ "Denorm_shift_more_than_32", -+ "Denorm_shift_more_than_63", -+ "Denorm_shift_more_than_64", -+ "Do_unmasked_underflow", -+ "Exp_not_underflow", -+ "fpu_Arith_exit", -+ "fpu_reg_round", -+ "fpu_reg_round_signed_special_exit", -+ "fpu_reg_round_special_exit", -+ "L_accum_done", -+ "L_accum_loaded", -+ "L_accum_loop", -+ "L_arg1_larger", -+ "L_bugged", -+ "L_bugged_1", -+ "L_bugged_2", -+ "L_bugged_3", -+ "L_bugged_4", -+ "L_bugged_denorm_486", -+ "L_bugged_round24", -+ "L_bugged_round53", -+ "L_bugged_round64", -+ "LCheck_24_round_up", -+ "LCheck_53_round_up", -+ "LCheck_Round_Overflow", -+ "LCheck_truncate_24", -+ "LCheck_truncate_53", -+ "LCheck_truncate_64", -+ "LDenormal_adj_exponent", -+ "L_deNormalised", -+ "LDo_24_round_up", -+ "LDo_2nd_32_bits", -+ "LDo_2nd_div", -+ "LDo_3rd_32_bits", -+ "LDo_3rd_div", -+ "LDo_53_round_up", -+ "LDo_64_round_up", -+ "L_done", -+ "LDo_truncate_24", -+ "LDown_24", -+ "LDown_53", -+ "LDown_64", -+ "L_entry_bugged", -+ "L_error_exit", -+ "L_exactly_32", -+ "L_exception_exit", -+ "L_exit", -+ "L_exit_nuo_valid", -+ "L_exit_nuo_zero", -+ "L_exit_valid", -+ "L_extent_zero", -+ "LFirst_div_done", -+ "LFirst_div_not_1", -+ "L_Full_Division", -+ "LGreater_Half_24", -+ "LGreater_Half_53", -+ "LGreater_than_1", -+ "LLess_than_1", -+ "L_Make_denorm", -+ "L_more_31_no_low", -+ "L_more_63_no_low", -+ "L_more_than_31", -+ "L_more_than_63", -+ "L_more_than_64", -+ "L_more_than_65", -+ "L_more_than_95", -+ "L_must_be_zero", -+ "L_n_exit", -+ "L_no_adjust", -+ "L_no_bit_lost", -+ "L_no_overflow", -+ "L_no_precision_loss", -+ "L_Normalised", -+ "L_norm_bugged", -+ "L_n_shift_1", -+ "L_nuo_shift_1", -+ "L_overflow", -+ "L_precision_lost_down", -+ "L_precision_lost_up", -+ "LPrevent_2nd_overflow", -+ "LPrevent_3rd_overflow", -+ "LPseudoDenormal", -+ "L_Re_normalise", -+ "LResult_Normalised", -+ "L_round", -+ "LRound_large", -+ "LRound_nearest_24", -+ "LRound_nearest_53", -+ "LRound_nearest_64", -+ "LRound_not_small", -+ "LRound_ovfl", -+ "LRound_precision", -+ "LRound_prep", -+ "L_round_the_result", -+ "LRound_To_24", -+ "LRound_To_53", -+ "LRound_To_64", -+ "LSecond_div_done", -+ "LSecond_div_not_1", -+ "L_shift_1", -+ "L_shift_32", -+ "L_shift_65_nc", -+ "L_shift_done", -+ "Ls_less_than_32", -+ "Ls_more_than_63", -+ "Ls_more_than_95", -+ "L_Store_significand", -+ "L_subtr", -+ "LTest_over", -+ "LTruncate_53", -+ "LTruncate_64", -+ "L_underflow", -+ "L_underflow_to_zero", -+ "LUp_24", -+ "LUp_53", -+ "LUp_64", -+ "L_zero", -+ "Normalise_result", -+ "Signal_underflow", -+ "sqrt_arg_ge_2", -+ "sqrt_get_more_precision", -+ "sqrt_more_prec_large", -+ "sqrt_more_prec_ok", -+ "sqrt_more_prec_small", -+ "sqrt_near_exact", -+ "sqrt_near_exact_large", -+ "sqrt_near_exact_ok", -+ "sqrt_near_exact_small", -+ "sqrt_near_exact_x", -+ "sqrt_prelim_no_adjust", -+ "sqrt_round_result", -+ "sqrt_stage_2_done", -+ "sqrt_stage_2_error", -+ "sqrt_stage_2_finish", -+ "sqrt_stage_2_positive", -+ "sqrt_stage_3_error", -+ "sqrt_stage_3_finished", -+ "sqrt_stage_3_no_error", -+ "sqrt_stage_3_positive", -+ "Unmasked_underflow", -+ "xExp_not_underflow", -+#endif /* CONFIG_MATH_EMULATION */ -+}; -+ -+static const char *bb_hardware_handlers[] = { -+ "ret_from_exception", -+ "system_call", -+ "work_pending", -+ "syscall_fault", -+ "page_fault", -+ "coprocessor_error", -+ "simd_coprocessor_error", -+ "device_not_available", -+ "debug", -+ "nmi", -+ "int3", -+ "overflow", -+ "bounds", -+ "invalid_op", -+ "coprocessor_segment_overrun", -+ "invalid_TSS", -+ "segment_not_present", -+ "stack_segment", -+ "general_protection", -+ "alignment_check", -+ "kdb_call", -+ "divide_error", -+ "machine_check", -+ "spurious_interrupt_bug", -+}; -+ -+static int -+bb_hardware_pushed_arch(kdb_machreg_t rsp, -+ const struct kdb_activation_record *ar) -+{ -+ return (2 * KDB_WORD_SIZE); -+} -+ -+static void -+bb_start_block0(void) -+{ -+ bb_reg_code_set_value(BBRG_RAX, BBRG_RAX); -+ bb_reg_code_set_value(BBRG_RBX, BBRG_RBX); -+ bb_reg_code_set_value(BBRG_RCX, BBRG_RCX); -+ bb_reg_code_set_value(BBRG_RDX, BBRG_RDX); -+ bb_reg_code_set_value(BBRG_RDI, BBRG_RDI); -+ bb_reg_code_set_value(BBRG_RSI, BBRG_RSI); -+ bb_reg_code_set_value(BBRG_RBP, BBRG_RBP); -+ bb_reg_code_set_value(BBRG_RSP, BBRG_OSP); -+} -+ -+/* The i386 code that switches stack in a context switch is an extremely -+ * special case. It saves the rip pointing to a label that is not otherwise -+ * referenced, saves the current rsp then pushes a word. The magic code that -+ * resumes the new task picks up the saved rip and rsp, effectively referencing -+ * a label that otherwise is not used and ignoring the pushed word. -+ * -+ * The simplest way to handle this very strange case is to recognise jmp -+ * address <__switch_to> and treat it as a popfl instruction. This avoids -+ * terminating the block on this jmp and removes one word from the stack state, -+ * which is the end effect of all the magic code. -+ * -+ * Called with the instruction line, starting after the first ':'. -+ */ -+ -+static void -+bb_fixup_switch_to(char *p) -+{ -+ char *p1 = p; -+ p += strspn(p, " \t"); /* start of instruction */ -+ if (strncmp(p, "jmp", 3)) -+ return; -+ p += strcspn(p, " \t"); /* end of instruction */ -+ p += strspn(p, " \t"); /* start of address */ -+ p += strcspn(p, " \t"); /* end of address */ -+ p += strspn(p, " \t"); /* start of comment */ -+ if (strcmp(p, "<__switch_to>") == 0) -+ strcpy(p1, "popfl"); -+} -+ -+static int -+bb_asmlinkage_arch(void) -+{ -+ return strcmp(bb_func_name, "ret_from_exception") == 0 || -+ strcmp(bb_func_name, "syscall_trace_entry") == 0; -+} -+ -+#endif /* CONFIG_X86_64 */ -+ -+ -+/*============================================================================*/ -+/* */ -+/* Common code and data. */ -+/* */ -+/*============================================================================*/ -+ -+ -+/* Tracking registers by decoding the instructions is quite a bit harder than -+ * doing the same tracking using compiler generated information. Register -+ * contents can remain in the same register, they can be copied to other -+ * registers, they can be stored on stack or they can be modified/overwritten. -+ * At any one time, there are 0 or more copies of the original value that was -+ * supplied in each register on input to the current function. If a register -+ * exists in multiple places, one copy of that register is the master version, -+ * the others are temporary copies which may or may not be destroyed before the -+ * end of the function. -+ * -+ * The compiler knows which copy of a register is the master and which are -+ * temporary copies, which makes it relatively easy to track register contents -+ * as they are saved and restored. Without that compiler based knowledge, this -+ * code has to track _every_ possible copy of each register, simply because we -+ * do not know which is the master copy and which are temporary copies which -+ * may be destroyed later. -+ * -+ * It gets worse: registers that contain parameters can be copied to other -+ * registers which are then saved on stack in a lower level function. Also the -+ * stack pointer may be held in multiple registers (typically RSP and RBP) -+ * which contain different offsets from the base of the stack on entry to this -+ * function. All of which means that we have to track _all_ register -+ * movements, or at least as much as possible. -+ * -+ * Start with the basic block that contains the start of the function, by -+ * definition all registers contain their initial value. Track each -+ * instruction's effect on register contents, this includes reading from a -+ * parameter register before any write to that register, IOW the register -+ * really does contain a parameter. The register state is represented by a -+ * dynamically sized array with each entry containing :- -+ * -+ * Register name -+ * Location it is copied to (another register or stack + offset) -+ * -+ * Besides the register tracking array, we track which parameter registers are -+ * read before being written, to determine how many parameters are passed in -+ * registers. We also track which registers contain stack pointers, including -+ * their offset from the original stack pointer on entry to the function. -+ * -+ * At each exit from the current basic block (via JMP instruction or drop -+ * through), the register state is cloned to form the state on input to the -+ * target basic block and the target is marked for processing using this state. -+ * When there are multiple ways to enter a basic block (e.g. several JMP -+ * instructions referencing the same target) then there will be multiple sets -+ * of register state to form the "input" for that basic block, there is no -+ * guarantee that all paths to that block will have the same register state. -+ * -+ * As each target block is processed, all the known sets of register state are -+ * merged to form a suitable subset of the state which agrees with all the -+ * inputs. The most common case is where one path to this block copies a -+ * register to another register but another path does not, therefore the copy -+ * is only a temporary and should not be propogated into this block. -+ * -+ * If the target block already has an input state from the current transfer -+ * point and the new input state is identical to the previous input state then -+ * we have reached a steady state for the arc from the current location to the -+ * target block. Therefore there is no need to process the target block again. -+ * -+ * The steps of "process a block, create state for target block(s), pick a new -+ * target block, merge state for target block, process target block" will -+ * continue until all the state changes have propogated all the way down the -+ * basic block tree, including round any cycles in the tree. The merge step -+ * only deletes tracking entries from the input state(s), it never adds a -+ * tracking entry. Therefore the overall algorithm is guaranteed to converge -+ * to a steady state, the worst possible case is that every tracking entry into -+ * a block is deleted, which will result in an empty output state. -+ * -+ * As each instruction is decoded, it is checked to see if this is the point at -+ * which execution left this function. This can be a call to another function -+ * (actually the return address to this function) or is the instruction which -+ * was about to be executed when an interrupt occurred (including an oops). -+ * Save the register state at this point. -+ * -+ * We always know what the registers contain when execution left this function. -+ * For an interrupt, the registers are in struct pt_regs. For a call to -+ * another function, we have already deduced the register state on entry to the -+ * other function by unwinding to the start of that function. Given the -+ * register state on exit from this function plus the known register contents -+ * on entry to the next function, we can determine the stack pointer value on -+ * input to this function. That in turn lets us calculate the address of input -+ * registers that have been stored on stack, giving us the input parameters. -+ * Finally the stack pointer gives us the return address which is the exit -+ * point from the calling function, repeat the unwind process on that function. -+ * -+ * The data that tracks which registers contain input parameters is function -+ * global, not local to any basic block. To determine which input registers -+ * contain parameters, we have to decode the entire function. Otherwise an -+ * exit early in the function might not have read any parameters yet. -+ */ -+ -+/* Record memory contents in terms of the values that were passed to this -+ * function, IOW track which memory locations contain an input value. A memory -+ * location's contents can be undefined, it can contain an input register value -+ * or it can contain an offset from the original stack pointer. -+ * -+ * This structure is used to record register contents that have been stored in -+ * memory. Location (BBRG_OSP + 'offset_address') contains the input value -+ * from register 'value'. When 'value' is BBRG_OSP then offset_value contains -+ * the offset from the original stack pointer that was stored in this memory -+ * location. When 'value' is not BBRG_OSP then the memory location contains -+ * the original contents of an input register and offset_value is ignored. -+ * -+ * An input register 'value' can be stored in more than one register and/or in -+ * more than one memory location. -+ */ -+ -+struct bb_memory_contains -+{ -+ short offset_address; -+ enum bb_reg_code value: 8; -+ short offset_value; -+}; -+ -+/* Track the register state in each basic block. */ -+ -+struct bb_reg_state -+{ -+ /* Indexed by register value 'reg - BBRG_RAX' */ -+ struct bb_reg_contains contains[KDB_INT_REGISTERS]; -+ int ref_count; -+ int mem_count; -+ /* dynamic size for memory locations, see mem_count */ -+ struct bb_memory_contains memory[0]; -+}; -+ -+static struct bb_reg_state *bb_reg_state, *bb_exit_state; -+static int bb_reg_state_max, bb_reg_params, bb_memory_params; -+ -+struct bb_actual -+{ -+ bfd_vma value; -+ int valid; -+}; -+ -+/* Contains the actual hex value of a register, plus a valid bit. Indexed by -+ * register value 'reg - BBRG_RAX' -+ */ -+static struct bb_actual bb_actual[KDB_INT_REGISTERS]; -+ -+static bfd_vma bb_func_start, bb_func_end; -+static bfd_vma bb_common_interrupt, bb_error_entry, bb_ret_from_intr, -+ bb_thread_return, bb_sync_regs, bb_save_v86_state, -+ bb__sched_text_start, bb__sched_text_end, -+ bb_save_args, bb_save_rest, bb_save_paranoid; -+ -+/* Record jmp instructions, both conditional and unconditional. These form the -+ * arcs between the basic blocks. This is also used to record the state when -+ * one block drops through into the next. -+ * -+ * A bb can have multiple associated bb_jmp entries, one for each jcc -+ * instruction plus at most one bb_jmp for the drop through case. If a bb -+ * drops through to the next bb then the drop through bb_jmp entry will be the -+ * last entry in the set of bb_jmp's that are associated with the bb. This is -+ * enforced by the fact that jcc entries are added during the disassembly phase -+ * of pass 1, the drop through entries are added near the end of pass 1. -+ * -+ * At address 'from' in this block, we have a jump to address 'to'. The -+ * register state at 'from' is copied to the target block. -+ */ -+ -+struct bb_jmp -+{ -+ bfd_vma from; -+ bfd_vma to; -+ struct bb_reg_state *state; -+ unsigned int drop_through: 1; -+}; -+ -+struct bb -+{ -+ bfd_vma start; -+ /* The end address of a basic block is sloppy. It can be the first -+ * byte of the last instruction in the block or it can be the last byte -+ * of the block. -+ */ -+ bfd_vma end; -+ unsigned int changed: 1; -+ unsigned int drop_through: 1; -+}; -+ -+static struct bb **bb_list, *bb_curr; -+static int bb_max, bb_count; -+ -+static struct bb_jmp *bb_jmp_list; -+static int bb_jmp_max, bb_jmp_count; -+ -+/* Add a new bb entry to the list. This does an insert sort. */ -+ -+static struct bb * -+bb_new(bfd_vma order) -+{ -+ int i, j; -+ struct bb *bb, *p; -+ if (bb_giveup) -+ return NULL; -+ if (bb_count == bb_max) { -+ struct bb **bb_list_new; -+ bb_max += 10; -+ bb_list_new = debug_kmalloc(bb_max*sizeof(*bb_list_new), -+ GFP_ATOMIC); -+ if (!bb_list_new) { -+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__); -+ bb_giveup = 1; -+ return NULL; -+ } -+ memcpy(bb_list_new, bb_list, bb_count*sizeof(*bb_list)); -+ debug_kfree(bb_list); -+ bb_list = bb_list_new; -+ } -+ bb = debug_kmalloc(sizeof(*bb), GFP_ATOMIC); -+ if (!bb) { -+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__); -+ bb_giveup = 1; -+ return NULL; -+ } -+ memset(bb, 0, sizeof(*bb)); -+ for (i = 0; i < bb_count; ++i) { -+ p = bb_list[i]; -+ if ((p->start && p->start > order) || -+ (p->end && p->end > order)) -+ break; -+ } -+ for (j = bb_count-1; j >= i; --j) -+ bb_list[j+1] = bb_list[j]; -+ bb_list[i] = bb; -+ ++bb_count; -+ return bb; -+} -+ -+/* Add a new bb_jmp entry to the list. This list is not sorted. */ -+ -+static struct bb_jmp * -+bb_jmp_new(bfd_vma from, bfd_vma to, unsigned int drop_through) -+{ -+ struct bb_jmp *bb_jmp; -+ if (bb_giveup) -+ return NULL; -+ if (bb_jmp_count == bb_jmp_max) { -+ struct bb_jmp *bb_jmp_list_new; -+ bb_jmp_max += 10; -+ bb_jmp_list_new = -+ debug_kmalloc(bb_jmp_max*sizeof(*bb_jmp_list_new), -+ GFP_ATOMIC); -+ if (!bb_jmp_list_new) { -+ kdb_printf("\n\n%s: out of debug_kmalloc\n", -+ __FUNCTION__); -+ bb_giveup = 1; -+ return NULL; -+ } -+ memcpy(bb_jmp_list_new, bb_jmp_list, -+ bb_jmp_count*sizeof(*bb_jmp_list)); -+ debug_kfree(bb_jmp_list); -+ bb_jmp_list = bb_jmp_list_new; -+ } -+ bb_jmp = bb_jmp_list + bb_jmp_count++; -+ bb_jmp->from = from; -+ bb_jmp->to = to; -+ bb_jmp->drop_through = drop_through; -+ bb_jmp->state = NULL; -+ return bb_jmp; -+} -+ -+static void -+bb_delete(int i) -+{ -+ struct bb *bb = bb_list[i]; -+ memcpy(bb_list+i, bb_list+i+1, (bb_count-i-1)*sizeof(*bb_list)); -+ bb_list[--bb_count] = NULL; -+ debug_kfree(bb); -+} -+ -+static struct bb * -+bb_add(bfd_vma start, bfd_vma end) -+{ -+ int i; -+ struct bb *bb; -+ /* Ignore basic blocks whose start address is outside the current -+ * function. These occur for call instructions and for tail recursion. -+ */ -+ if (start && -+ (start < bb_func_start || start >= bb_func_end)) -+ return NULL; -+ for (i = 0; i < bb_count; ++i) { -+ bb = bb_list[i]; -+ if ((start && bb->start == start) || -+ (end && bb->end == end)) -+ return bb; -+ } -+ bb = bb_new(start ? start : end); -+ if (bb) { -+ bb->start = start; -+ bb->end = end; -+ } -+ return bb; -+} -+ -+static struct bb_jmp * -+bb_jmp_add(bfd_vma from, bfd_vma to, unsigned int drop_through) -+{ -+ int i; -+ struct bb_jmp *bb_jmp; -+ for (i = 0, bb_jmp = bb_jmp_list; i < bb_jmp_count; ++i, ++bb_jmp) { -+ if (bb_jmp->from == from && -+ bb_jmp->to == to && -+ bb_jmp->drop_through == drop_through) -+ return bb_jmp; -+ } -+ bb_jmp = bb_jmp_new(from, to, drop_through); -+ return bb_jmp; -+} -+ -+static unsigned long bb_curr_addr, bb_exit_addr; -+static char bb_buffer[256]; /* A bit too big to go on stack */ -+ -+/* Computed jmp uses 'jmp *addr(,%reg,[48])' where 'addr' is the start of a -+ * table of addresses that point into the current function. Run the table and -+ * generate bb starts for each target address plus a bb_jmp from this address -+ * to the target address. -+ * -+ * Only called for 'jmp' instructions, with the pointer starting at 'jmp'. -+ */ -+ -+static void -+bb_pass1_computed_jmp(char *p) -+{ -+ unsigned long table, scale; -+ kdb_machreg_t addr; -+ struct bb* bb; -+ p += strcspn(p, " \t"); /* end of instruction */ -+ p += strspn(p, " \t"); /* start of address */ -+ if (*p++ != '*') -+ return; -+ table = simple_strtoul(p, &p, 0); -+ if (strncmp(p, "(,%", 3) != 0) -+ return; -+ p += 3; -+ p += strcspn(p, ","); /* end of reg */ -+ if (*p++ != ',') -+ return; -+ scale = simple_strtoul(p, &p, 0); -+ if (scale != KDB_WORD_SIZE || strcmp(p, ")")) -+ return; -+ while (!bb_giveup) { -+ if (kdb_getword(&addr, table, sizeof(addr))) -+ return; -+ if (addr < bb_func_start || addr >= bb_func_end) -+ return; -+ bb = bb_add(addr, 0); -+ if (bb) -+ bb_jmp_add(bb_curr_addr, addr, 0); -+ table += KDB_WORD_SIZE; -+ } -+} -+ -+/* Pass 1, identify the start and end of each basic block */ -+ -+static int -+bb_dis_pass1(PTR file, const char *fmt, ...) -+{ -+ int l = strlen(bb_buffer); -+ char *p; -+ va_list ap; -+ va_start(ap, fmt); -+ vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap); -+ va_end(ap); -+ if ((p = strchr(bb_buffer, '\n'))) { -+ *p = '\0'; -+ /* ret[q], iret[q], sysexit, sysret, ud2a or jmp[q] end a -+ * block. As does a call to a function marked noret. -+ */ -+ p = bb_buffer; -+ p += strcspn(p, ":"); -+ if (*p++ == ':') { -+ bb_fixup_switch_to(p); -+ p += strspn(p, " \t"); /* start of instruction */ -+ if (strncmp(p, "ret", 3) == 0 || -+ strncmp(p, "iret", 4) == 0 || -+ strncmp(p, "sysexit", 7) == 0 || -+ strncmp(p, "sysret", 6) == 0 || -+ strncmp(p, "ud2a", 4) == 0 || -+ strncmp(p, "jmp", 3) == 0) { -+ if (strncmp(p, "jmp", 3) == 0) -+ bb_pass1_computed_jmp(p); -+ bb_add(0, bb_curr_addr); -+ }; -+ if (strncmp(p, "call", 4) == 0) { -+ strsep(&p, " \t"); /* end of opcode */ -+ if (p) -+ p += strspn(p, " \t"); /* operand(s) */ -+ if (p && strchr(p, '<')) { -+ p = strchr(p, '<') + 1; -+ *strchr(p, '>') = '\0'; -+ if (bb_noret(p)) -+ bb_add(0, bb_curr_addr); -+ } -+ }; -+ } -+ bb_buffer[0] = '\0'; -+ } -+ return 0; -+} -+ -+static void -+bb_printaddr_pass1(bfd_vma addr, disassemble_info *dip) -+{ -+ kdb_symtab_t symtab; -+ unsigned int offset; -+ struct bb* bb; -+ /* disasm only calls the printaddr routine for the target of jmp, loop -+ * or call instructions, i.e. the start of a basic block. call is -+ * ignored by bb_add because the target address is outside the current -+ * function. -+ */ -+ dip->fprintf_func(dip->stream, "0x%lx", addr); -+ kdbnearsym(addr, &symtab); -+ if (symtab.sym_name) { -+ dip->fprintf_func(dip->stream, " <%s", symtab.sym_name); -+ if ((offset = addr - symtab.sym_start)) -+ dip->fprintf_func(dip->stream, "+0x%x", offset); -+ dip->fprintf_func(dip->stream, ">"); -+ } -+ bb = bb_add(addr, 0); -+ if (bb) -+ bb_jmp_add(bb_curr_addr, addr, 0); -+} -+ -+static void -+bb_pass1(void) -+{ -+ int i; -+ unsigned long addr; -+ struct bb *bb; -+ struct bb_jmp *bb_jmp; -+ -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) -+ kdb_printf("%s: func_name %s func_start " kdb_bfd_vma_fmt0 -+ " func_end " kdb_bfd_vma_fmt0 "\n", -+ __FUNCTION__, -+ bb_func_name, -+ bb_func_start, -+ bb_func_end); -+ kdb_di.fprintf_func = bb_dis_pass1; -+ kdb_di.print_address_func = bb_printaddr_pass1; -+ -+ bb_add(bb_func_start, 0); -+ for (bb_curr_addr = bb_func_start; -+ bb_curr_addr < bb_func_end; -+ ++bb_curr_addr) { -+ unsigned char c; -+ if (kdb_getarea(c, bb_curr_addr)) { -+ kdb_printf("%s: unreadable function code at ", -+ __FUNCTION__); -+ kdb_symbol_print(bb_curr_addr, NULL, KDB_SP_DEFAULT); -+ kdb_printf(", giving up\n"); -+ bb_giveup = 1; -+ return; -+ } -+ } -+ for (addr = bb_func_start; addr < bb_func_end; ) { -+ bb_curr_addr = addr; -+ addr += kdba_id_printinsn(addr, &kdb_di); -+ kdb_di.fprintf_func(NULL, "\n"); -+ } -+ if (bb_giveup) -+ goto out; -+ -+ /* Special case: a block consisting of a single instruction which is -+ * both the target of a jmp and is also an ending instruction, so we -+ * add two blocks using the same address, one as a start and one as an -+ * end, in no guaranteed order. The end must be ordered after the -+ * start. -+ */ -+ for (i = 0; i < bb_count-1; ++i) { -+ struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1]; -+ if (bb1->end && bb1->end == bb2->start) { -+ bb = bb_list[i+1]; -+ bb_list[i+1] = bb_list[i]; -+ bb_list[i] = bb; -+ } -+ } -+ -+ /* Some bb have a start address, some have an end address. Collapse -+ * them into entries that have both start and end addresses. The first -+ * entry is guaranteed to have a start address. -+ */ -+ for (i = 0; i < bb_count-1; ++i) { -+ struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1]; -+ if (bb1->end) -+ continue; -+ if (bb2->start) { -+ bb1->end = bb2->start - 1; -+ bb1->drop_through = 1; -+ bb_jmp_add(bb1->end, bb2->start, 1); -+ } else { -+ bb1->end = bb2->end; -+ bb_delete(i+1); -+ } -+ } -+ bb = bb_list[bb_count-1]; -+ if (!bb->end) -+ bb->end = bb_func_end - 1; -+ -+ /* It would be nice to check that all bb have a valid start and end -+ * address but there is just too much garbage code in the kernel to do -+ * that check. Aligned functions in assembler code mean that there is -+ * space between the end of one function and the start of the next and -+ * that space contains previous code from the assembler's buffers. It -+ * looks like dead code with nothing that branches to it, so no start -+ * address. do_sys_vm86() ends with 'jmp resume_userspace' which the C -+ * compiler does not know about so gcc appends the normal exit code, -+ * again nothing branches to this dangling code. -+ * -+ * The best we can do is delete bb entries with no start address. -+ */ -+ for (i = 0; i < bb_count; ++i) { -+ struct bb *bb = bb_list[i]; -+ if (!bb->start) -+ bb_delete(i--); -+ } -+ for (i = 0; i < bb_count; ++i) { -+ struct bb *bb = bb_list[i]; -+ if (!bb->end) { -+ kdb_printf("%s: incomplete bb state\n", __FUNCTION__); -+ bb_giveup = 1; -+ goto debug; -+ } -+ } -+ -+out: -+ if (!KDB_DEBUG(BB)) -+ return; -+debug: -+ kdb_printf("%s: end\n", __FUNCTION__); -+ for (i = 0; i < bb_count; ++i) { -+ bb = bb_list[i]; -+ kdb_printf(" bb[%d] start " -+ kdb_bfd_vma_fmt0 -+ " end " kdb_bfd_vma_fmt0 -+ " drop_through %d", -+ i, bb->start, bb->end, bb->drop_through); -+ kdb_printf("\n"); -+ } -+ for (i = 0; i < bb_jmp_count; ++i) { -+ bb_jmp = bb_jmp_list + i; -+ kdb_printf(" bb_jmp[%d] from " -+ kdb_bfd_vma_fmt0 -+ " to " kdb_bfd_vma_fmt0 -+ " drop_through %d\n", -+ i, bb_jmp->from, bb_jmp->to, bb_jmp->drop_through); -+ } -+} -+ -+/* Pass 2, record register changes in each basic block */ -+ -+/* For each opcode that we care about, indicate how it uses its operands. Most -+ * opcodes can be handled generically because they completely specify their -+ * operands in the instruction, however many opcodes have side effects such as -+ * reading or writing rax or updating rsp. Instructions that change registers -+ * that are not listed in the operands must be handled as special cases. In -+ * addition, instructions that copy registers while preserving their contents -+ * (push, pop, mov) or change the contents in a well defined way (add with an -+ * immediate, lea) must be handled as special cases in order to track the -+ * register contents. -+ * -+ * The tables below only list opcodes that are actually used in the Linux -+ * kernel, so they omit most of the floating point and all of the SSE type -+ * instructions. The operand usage entries only cater for accesses to memory -+ * and to the integer registers, accesses to floating point registers and flags -+ * are not relevant for kernel backtraces. -+ */ -+ -+enum bb_operand_usage { -+ BBOU_UNKNOWN = 0, -+ /* generic entries. because xchg can do any combinations of -+ * read src, write src, read dst and write dst we need to -+ * define all 16 possibilities. These are ordered by rs = 1, -+ * rd = 2, ws = 4, wd = 8, bb_usage_x*() functions rely on this -+ * order. -+ */ -+ BBOU_RS = 1, /* read src */ /* 1 */ -+ BBOU_RD, /* read dst */ /* 2 */ -+ BBOU_RSRD, /* 3 */ -+ BBOU_WS, /* write src */ /* 4 */ -+ BBOU_RSWS, /* 5 */ -+ BBOU_RDWS, /* 6 */ -+ BBOU_RSRDWS, /* 7 */ -+ BBOU_WD, /* write dst */ /* 8 */ -+ BBOU_RSWD, /* 9 */ -+ BBOU_RDWD, /* 10 */ -+ BBOU_RSRDWD, /* 11 */ -+ BBOU_WSWD, /* 12 */ -+ BBOU_RSWSWD, /* 13 */ -+ BBOU_RDWSWD, /* 14 */ -+ BBOU_RSRDWSWD, /* 15 */ -+ /* opcode specific entries */ -+ BBOU_ADD, -+ BBOU_AND, -+ BBOU_CALL, -+ BBOU_CBW, -+ BBOU_CMOV, -+ BBOU_CMPXCHG, -+ BBOU_CMPXCHGD, -+ BBOU_CPUID, -+ BBOU_CWD, -+ BBOU_DIV, -+ BBOU_IDIV, -+ BBOU_IMUL, -+ BBOU_IRET, -+ BBOU_JMP, -+ BBOU_LAHF, -+ BBOU_LEA, -+ BBOU_LEAVE, -+ BBOU_LODS, -+ BBOU_LOOP, -+ BBOU_LSS, -+ BBOU_MONITOR, -+ BBOU_MOV, -+ BBOU_MOVS, -+ BBOU_MUL, -+ BBOU_MWAIT, -+ BBOU_NOP, -+ BBOU_OUTS, -+ BBOU_POP, -+ BBOU_POPF, -+ BBOU_PUSH, -+ BBOU_PUSHF, -+ BBOU_RDMSR, -+ BBOU_RDTSC, -+ BBOU_RET, -+ BBOU_SAHF, -+ BBOU_SCAS, -+ BBOU_SUB, -+ BBOU_SYSEXIT, -+ BBOU_SYSRET, -+ BBOU_WRMSR, -+ BBOU_XADD, -+ BBOU_XCHG, -+ BBOU_XOR, -+}; -+ -+struct bb_opcode_usage { -+ int length; -+ enum bb_operand_usage usage; -+ const char *opcode; -+}; -+ -+/* This table is sorted in alphabetical order of opcode, except that the -+ * trailing '"' is treated as a high value. For example, 'in' sorts after -+ * 'inc', 'bt' after 'btc'. This modified sort order ensures that shorter -+ * opcodes come after long ones. A normal sort would put 'in' first, so 'in' -+ * would match both 'inc' and 'in'. When adding any new entries to this table, -+ * be careful to put shorter entries last in their group. -+ * -+ * To automatically sort the table (in vi) -+ * Mark the first and last opcode line with 'a and 'b -+ * 'a -+ * !'bsed -e 's/"}/}}/' | LANG=C sort -t '"' -k2 | sed -e 's/}}/"}/' -+ * -+ * If a new instruction has to be added, first consider if it affects registers -+ * other than those listed in the operands. Also consider if you want to track -+ * the results of issuing the instruction, IOW can you extract useful -+ * information by looking in detail at the modified registers or memory. If -+ * either test is true then you need a special case to handle the instruction. -+ * -+ * The generic entries at the start of enum bb_operand_usage all have one thing -+ * in common, if a register or memory location is updated then that location -+ * becomes undefined, i.e. we lose track of anything that was previously saved -+ * in that location. So only use a generic BBOU_* value when the result of the -+ * instruction cannot be calculated exactly _and_ when all the affected -+ * registers are listed in the operands. -+ * -+ * Examples: -+ * -+ * 'call' does not generate a known result, but as a side effect of call, -+ * several scratch registers become undefined, so it needs a special BBOU_CALL -+ * entry. -+ * -+ * 'adc' generates a variable result, it depends on the carry flag, so 'adc' -+ * gets a generic entry. 'add' can generate an exact result (add with -+ * immediate on a register that points to the stack) or it can generate an -+ * unknown result (add a variable, or add immediate to a register that does not -+ * contain a stack pointer) so 'add' has its own BBOU_ADD entry. -+ */ -+ -+static const struct bb_opcode_usage -+bb_opcode_usage_all[] = { -+ {3, BBOU_RSRDWD, "adc"}, -+ {3, BBOU_ADD, "add"}, -+ {3, BBOU_AND, "and"}, -+ {3, BBOU_RSWD, "bsf"}, -+ {3, BBOU_RSWD, "bsr"}, -+ {5, BBOU_RSWS, "bswap"}, -+ {3, BBOU_RSRDWD, "btc"}, -+ {3, BBOU_RSRDWD, "btr"}, -+ {3, BBOU_RSRDWD, "bts"}, -+ {2, BBOU_RSRD, "bt"}, -+ {4, BBOU_CALL, "call"}, -+ {4, BBOU_CBW, "cbtw"}, /* Intel cbw */ -+ {3, BBOU_NOP, "clc"}, -+ {3, BBOU_NOP, "cld"}, -+ {7, BBOU_RS, "clflush"}, -+ {4, BBOU_NOP, "clgi"}, -+ {3, BBOU_NOP, "cli"}, -+ {4, BBOU_CWD, "cltd"}, /* Intel cdq */ -+ {4, BBOU_CBW, "cltq"}, /* Intel cdqe */ -+ {4, BBOU_NOP, "clts"}, -+ {4, BBOU_CMOV, "cmov"}, -+ {9, BBOU_CMPXCHGD,"cmpxchg16"}, -+ {8, BBOU_CMPXCHGD,"cmpxchg8"}, -+ {7, BBOU_CMPXCHG, "cmpxchg"}, -+ {3, BBOU_RSRD, "cmp"}, -+ {5, BBOU_CPUID, "cpuid"}, -+ {4, BBOU_CWD, "cqto"}, /* Intel cdo */ -+ {4, BBOU_CWD, "cwtd"}, /* Intel cwd */ -+ {4, BBOU_CBW, "cwtl"}, /* Intel cwde */ -+ {4, BBOU_NOP, "data"}, /* alternative ASM_NOP generates data16 on x86_64 */ -+ {3, BBOU_RSWS, "dec"}, -+ {3, BBOU_DIV, "div"}, -+ {5, BBOU_RS, "fdivl"}, -+ {5, BBOU_NOP, "finit"}, -+ {6, BBOU_RS, "fistpl"}, -+ {4, BBOU_RS, "fldl"}, -+ {4, BBOU_RS, "fmul"}, -+ {6, BBOU_NOP, "fnclex"}, -+ {6, BBOU_NOP, "fninit"}, -+ {6, BBOU_RS, "fnsave"}, -+ {7, BBOU_NOP, "fnsetpm"}, -+ {6, BBOU_RS, "frstor"}, -+ {5, BBOU_WS, "fstsw"}, -+ {5, BBOU_RS, "fsubp"}, -+ {5, BBOU_NOP, "fwait"}, -+ {7, BBOU_RS, "fxrstor"}, -+ {6, BBOU_RS, "fxsave"}, -+ {3, BBOU_NOP, "hlt"}, -+ {4, BBOU_IDIV, "idiv"}, -+ {4, BBOU_IMUL, "imul"}, -+ {3, BBOU_RSWS, "inc"}, -+ {3, BBOU_NOP, "int"}, -+ {7, BBOU_RSRD, "invlpga"}, -+ {6, BBOU_RS, "invlpg"}, -+ {2, BBOU_RSWD, "in"}, -+ {4, BBOU_IRET, "iret"}, -+ {1, BBOU_JMP, "j"}, -+ {4, BBOU_LAHF, "lahf"}, -+ {3, BBOU_RSWD, "lar"}, -+ {5, BBOU_RS, "lcall"}, -+ {5, BBOU_LEAVE, "leave"}, -+ {3, BBOU_LEA, "lea"}, -+ {6, BBOU_NOP, "lfence"}, -+ {4, BBOU_RS, "lgdt"}, -+ {4, BBOU_RS, "lidt"}, -+ {4, BBOU_RS, "ljmp"}, -+ {4, BBOU_RS, "lldt"}, -+ {4, BBOU_RS, "lmsw"}, -+ {4, BBOU_LODS, "lods"}, -+ {4, BBOU_LOOP, "loop"}, -+ {4, BBOU_NOP, "lret"}, -+ {3, BBOU_RSWD, "lsl"}, -+ {3, BBOU_LSS, "lss"}, -+ {3, BBOU_RS, "ltr"}, -+ {6, BBOU_NOP, "mfence"}, -+ {7, BBOU_MONITOR, "monitor"}, -+ {4, BBOU_MOVS, "movs"}, -+ {3, BBOU_MOV, "mov"}, -+ {3, BBOU_MUL, "mul"}, -+ {5, BBOU_MWAIT, "mwait"}, -+ {3, BBOU_RSWS, "neg"}, -+ {3, BBOU_NOP, "nop"}, -+ {3, BBOU_RSWS, "not"}, -+ {2, BBOU_RSRDWD, "or"}, -+ {4, BBOU_OUTS, "outs"}, -+ {3, BBOU_RSRD, "out"}, -+ {5, BBOU_NOP, "pause"}, -+ {4, BBOU_POPF, "popf"}, -+ {3, BBOU_POP, "pop"}, -+ {8, BBOU_RS, "prefetch"}, -+ {5, BBOU_PUSHF, "pushf"}, -+ {4, BBOU_PUSH, "push"}, -+ {3, BBOU_RSRDWD, "rcl"}, -+ {3, BBOU_RSRDWD, "rcr"}, -+ {5, BBOU_RDMSR, "rdmsr"}, -+ {5, BBOU_RDMSR, "rdpmc"}, /* same side effects as rdmsr */ -+ {5, BBOU_RDTSC, "rdtsc"}, -+ {3, BBOU_RET, "ret"}, -+ {3, BBOU_RSRDWD, "rol"}, -+ {3, BBOU_RSRDWD, "ror"}, -+ {4, BBOU_SAHF, "sahf"}, -+ {3, BBOU_RSRDWD, "sar"}, -+ {3, BBOU_RSRDWD, "sbb"}, -+ {4, BBOU_SCAS, "scas"}, -+ {3, BBOU_WS, "set"}, -+ {6, BBOU_NOP, "sfence"}, -+ {4, BBOU_WS, "sgdt"}, -+ {3, BBOU_RSRDWD, "shl"}, -+ {3, BBOU_RSRDWD, "shr"}, -+ {4, BBOU_WS, "sidt"}, -+ {4, BBOU_WS, "sldt"}, -+ {3, BBOU_NOP, "stc"}, -+ {3, BBOU_NOP, "std"}, -+ {4, BBOU_NOP, "stgi"}, -+ {3, BBOU_NOP, "sti"}, -+ {4, BBOU_SCAS, "stos"}, -+ {4, BBOU_WS, "strl"}, -+ {3, BBOU_WS, "str"}, -+ {3, BBOU_SUB, "sub"}, -+ {6, BBOU_NOP, "swapgs"}, -+ {7, BBOU_SYSEXIT, "sysexit"}, -+ {6, BBOU_SYSRET, "sysret"}, -+ {4, BBOU_NOP, "test"}, -+ {4, BBOU_NOP, "ud2a"}, -+ {7, BBOU_RS, "vmclear"}, -+ {8, BBOU_NOP, "vmlaunch"}, -+ {6, BBOU_RS, "vmload"}, -+ {7, BBOU_RS, "vmptrld"}, -+ {6, BBOU_WD, "vmread"}, /* vmread src is an encoding, not a register */ -+ {8, BBOU_NOP, "vmresume"}, -+ {5, BBOU_RS, "vmrun"}, -+ {6, BBOU_RS, "vmsave"}, -+ {7, BBOU_WD, "vmwrite"}, /* vmwrite src is an encoding, not a register */ -+ {3, BBOU_NOP, "vmxoff"}, -+ {6, BBOU_NOP, "wbinvd"}, -+ {5, BBOU_WRMSR, "wrmsr"}, -+ {4, BBOU_XADD, "xadd"}, -+ {4, BBOU_XCHG, "xchg"}, -+ {3, BBOU_XOR, "xor"}, -+ {4, BBOU_NOP, "xrstor"}, -+ {4, BBOU_NOP, "xsave"}, -+ {10, BBOU_WS, "xstore-rng"}, -+}; -+ -+/* To speed up searching, index bb_opcode_usage_all by the first letter of each -+ * opcode. -+ */ -+static struct { -+ const struct bb_opcode_usage *opcode; -+ int size; -+} bb_opcode_usage[26]; -+ -+struct bb_operand { -+ char *base; -+ char *index; -+ char *segment; -+ long disp; -+ unsigned int scale; -+ enum bb_reg_code base_rc; /* UNDEFINED or RAX through R15 */ -+ enum bb_reg_code index_rc; /* UNDEFINED or RAX through R15 */ -+ unsigned int present :1; -+ unsigned int disp_present :1; -+ unsigned int indirect :1; /* must be combined with reg or memory */ -+ unsigned int immediate :1; /* exactly one of these 3 must be set */ -+ unsigned int reg :1; -+ unsigned int memory :1; -+}; -+ -+struct bb_decode { -+ char *prefix; -+ char *opcode; -+ const struct bb_opcode_usage *match; -+ struct bb_operand src; -+ struct bb_operand dst; -+ struct bb_operand dst2; -+}; -+ -+static struct bb_decode bb_decode; -+ -+static enum bb_reg_code -+bb_reg_map(const char *reg) -+{ -+ int lo, hi, c; -+ const struct bb_reg_code_map *p; -+ lo = 0; -+ hi = ARRAY_SIZE(bb_reg_code_map) - 1; -+ while (lo <= hi) { -+ int mid = (hi + lo) / 2; -+ p = bb_reg_code_map + mid; -+ c = strcmp(p->name, reg+1); -+ if (c == 0) -+ return p->reg; -+ else if (c > 0) -+ hi = mid - 1; -+ else -+ lo = mid + 1; -+ } -+ return BBRG_UNDEFINED; -+} -+ -+static void -+bb_parse_operand(char *str, struct bb_operand *operand) -+{ -+ char *p = str; -+ int sign = 1; -+ operand->present = 1; -+ /* extract any segment prefix */ -+ if (p[0] == '%' && p[1] && p[2] == 's' && p[3] == ':') { -+ operand->memory = 1; -+ operand->segment = p; -+ p[3] = '\0'; -+ p += 4; -+ } -+ /* extract displacement, base, index, scale */ -+ if (*p == '*') { -+ /* jmp/call *disp(%reg), *%reg or *0xnnn */ -+ operand->indirect = 1; -+ ++p; -+ } -+ if (*p == '-') { -+ sign = -1; -+ ++p; -+ } -+ if (*p == '$') { -+ operand->immediate = 1; -+ operand->disp_present = 1; -+ operand->disp = simple_strtoul(p+1, &p, 0); -+ } else if (isdigit(*p)) { -+ operand->memory = 1; -+ operand->disp_present = 1; -+ operand->disp = simple_strtoul(p, &p, 0) * sign; -+ } -+ if (*p == '%') { -+ operand->reg = 1; -+ operand->base = p; -+ } else if (*p == '(') { -+ operand->memory = 1; -+ operand->base = ++p; -+ p += strcspn(p, ",)"); -+ if (p == operand->base) -+ operand->base = NULL; -+ if (*p == ',') { -+ *p = '\0'; -+ operand->index = ++p; -+ p += strcspn(p, ",)"); -+ if (p == operand->index) -+ operand->index = NULL; -+ } -+ if (*p == ',') { -+ *p = '\0'; -+ operand->scale = simple_strtoul(p+1, &p, 0); -+ } -+ *p = '\0'; -+ } else if (*p) { -+ kdb_printf("%s: unexpected token '%c' after disp '%s'\n", -+ __FUNCTION__, *p, str); -+ bb_giveup = 1; -+ } -+ if ((operand->immediate + operand->reg + operand->memory != 1) || -+ (operand->indirect && operand->immediate)) { -+ kdb_printf("%s: incorrect decode '%s' N %d I %d R %d M %d\n", -+ __FUNCTION__, str, -+ operand->indirect, operand->immediate, operand->reg, -+ operand->memory); -+ bb_giveup = 1; -+ } -+ if (operand->base) -+ operand->base_rc = bb_reg_map(operand->base); -+ if (operand->index) -+ operand->index_rc = bb_reg_map(operand->index); -+} -+ -+static void -+bb_print_operand(const char *type, const struct bb_operand *operand) -+{ -+ if (!operand->present) -+ return; -+ kdb_printf(" %s %c%c: ", -+ type, -+ operand->indirect ? 'N' : ' ', -+ operand->immediate ? 'I' : -+ operand->reg ? 'R' : -+ operand->memory ? 'M' : -+ '?' -+ ); -+ if (operand->segment) -+ kdb_printf("%s:", operand->segment); -+ if (operand->immediate) { -+ kdb_printf("$0x%lx", operand->disp); -+ } else if (operand->reg) { -+ if (operand->indirect) -+ kdb_printf("*"); -+ kdb_printf("%s", operand->base); -+ } else if (operand->memory) { -+ if (operand->indirect && (operand->base || operand->index)) -+ kdb_printf("*"); -+ if (operand->disp_present) { -+ kdb_printf("0x%lx", operand->disp); -+ } -+ if (operand->base || operand->index || operand->scale) { -+ kdb_printf("("); -+ if (operand->base) -+ kdb_printf("%s", operand->base); -+ if (operand->index || operand->scale) -+ kdb_printf(","); -+ if (operand->index) -+ kdb_printf("%s", operand->index); -+ if (operand->scale) -+ kdb_printf(",%d", operand->scale); -+ kdb_printf(")"); -+ } -+ } -+ if (operand->base_rc) -+ kdb_printf(" base_rc %d (%s)", -+ operand->base_rc, bbrg_name[operand->base_rc]); -+ if (operand->index_rc) -+ kdb_printf(" index_rc %d (%s)", -+ operand->index_rc, -+ bbrg_name[operand->index_rc]); -+ kdb_printf("\n"); -+} -+ -+static void -+bb_print_opcode(void) -+{ -+ const struct bb_opcode_usage *o = bb_decode.match; -+ kdb_printf(" "); -+ if (bb_decode.prefix) -+ kdb_printf("%s ", bb_decode.prefix); -+ kdb_printf("opcode '%s' matched by '%s', usage %d\n", -+ bb_decode.opcode, o->opcode, o->usage); -+} -+ -+static int -+bb_parse_opcode(void) -+{ -+ int c, i; -+ const struct bb_opcode_usage *o; -+ static int bb_parse_opcode_error_limit = 5; -+ c = bb_decode.opcode[0] - 'a'; -+ if (c < 0 || c >= ARRAY_SIZE(bb_opcode_usage)) -+ goto nomatch; -+ o = bb_opcode_usage[c].opcode; -+ if (!o) -+ goto nomatch; -+ for (i = 0; i < bb_opcode_usage[c].size; ++i, ++o) { -+ if (strncmp(bb_decode.opcode, o->opcode, o->length) == 0) { -+ bb_decode.match = o; -+ if (KDB_DEBUG(BB)) -+ bb_print_opcode(); -+ return 0; -+ } -+ } -+nomatch: -+ if (!bb_parse_opcode_error_limit) -+ return 1; -+ --bb_parse_opcode_error_limit; -+ kdb_printf("%s: no match at [%s]%s " kdb_bfd_vma_fmt0 " - '%s'\n", -+ __FUNCTION__, -+ bb_mod_name, bb_func_name, bb_curr_addr, -+ bb_decode.opcode); -+ return 1; -+} -+ -+static bool -+bb_is_int_reg(enum bb_reg_code reg) -+{ -+ return reg >= BBRG_RAX && reg < (BBRG_RAX + KDB_INT_REGISTERS); -+} -+ -+static bool -+bb_is_simple_memory(const struct bb_operand *operand) -+{ -+ return operand->memory && -+ bb_is_int_reg(operand->base_rc) && -+ !operand->index_rc && -+ operand->scale == 0 && -+ !operand->segment; -+} -+ -+static bool -+bb_is_static_disp(const struct bb_operand *operand) -+{ -+ return operand->memory && -+ !operand->base_rc && -+ !operand->index_rc && -+ operand->scale == 0 && -+ !operand->segment && -+ !operand->indirect; -+} -+ -+static enum bb_reg_code -+bb_reg_code_value(enum bb_reg_code reg) -+{ -+ BB_CHECK(!bb_is_int_reg(reg), reg, 0); -+ return bb_reg_state->contains[reg - BBRG_RAX].value; -+} -+ -+static short -+bb_reg_code_offset(enum bb_reg_code reg) -+{ -+ BB_CHECK(!bb_is_int_reg(reg), reg, 0); -+ return bb_reg_state->contains[reg - BBRG_RAX].offset; -+} -+ -+static void -+bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src) -+{ -+ BB_CHECK(!bb_is_int_reg(dst), dst, ); -+ bb_reg_state->contains[dst - BBRG_RAX].value = src; -+} -+ -+static void -+bb_reg_code_set_offset(enum bb_reg_code dst, short offset) -+{ -+ BB_CHECK(!bb_is_int_reg(dst), dst, ); -+ bb_reg_state->contains[dst - BBRG_RAX].offset = offset; -+} -+ -+static bool -+bb_is_osp_defined(enum bb_reg_code reg) -+{ -+ if (bb_is_int_reg(reg)) -+ return bb_reg_code_value(reg) == BBRG_OSP; -+ else -+ return 0; -+} -+ -+static bfd_vma -+bb_actual_value(enum bb_reg_code reg) -+{ -+ BB_CHECK(!bb_is_int_reg(reg), reg, 0); -+ return bb_actual[reg - BBRG_RAX].value; -+} -+ -+static int -+bb_actual_valid(enum bb_reg_code reg) -+{ -+ BB_CHECK(!bb_is_int_reg(reg), reg, 0); -+ return bb_actual[reg - BBRG_RAX].valid; -+} -+ -+static void -+bb_actual_set_value(enum bb_reg_code reg, bfd_vma value) -+{ -+ BB_CHECK(!bb_is_int_reg(reg), reg, ); -+ bb_actual[reg - BBRG_RAX].value = value; -+} -+ -+static void -+bb_actual_set_valid(enum bb_reg_code reg, int valid) -+{ -+ BB_CHECK(!bb_is_int_reg(reg), reg, ); -+ bb_actual[reg - BBRG_RAX].valid = valid; -+} -+ -+/* The scheduler code switches RSP then does PUSH, it is not an error for RSP -+ * to be undefined in this area of the code. -+ */ -+static bool -+bb_is_scheduler_address(void) -+{ -+ return bb_curr_addr >= bb__sched_text_start && -+ bb_curr_addr < bb__sched_text_end; -+} -+ -+static void -+bb_reg_read(enum bb_reg_code reg) -+{ -+ int i, r = 0; -+ if (!bb_is_int_reg(reg) || -+ bb_reg_code_value(reg) != reg) -+ return; -+ for (i = 0; -+ i < min_t(unsigned int, REGPARM, ARRAY_SIZE(bb_param_reg)); -+ ++i) { -+ if (reg == bb_param_reg[i]) { -+ r = i + 1; -+ break; -+ } -+ } -+ bb_reg_params = max(bb_reg_params, r); -+} -+ -+static void -+bb_do_reg_state_print(const struct bb_reg_state *s) -+{ -+ int i, offset_address, offset_value; -+ const struct bb_memory_contains *c; -+ enum bb_reg_code value; -+ kdb_printf(" bb_reg_state %p\n", s); -+ for (i = 0; i < ARRAY_SIZE(s->contains); ++i) { -+ value = s->contains[i].value; -+ offset_value = s->contains[i].offset; -+ kdb_printf(" %s = %s", -+ bbrg_name[i + BBRG_RAX], bbrg_name[value]); -+ if (value == BBRG_OSP) -+ KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", ""); -+ kdb_printf("\n"); -+ } -+ for (i = 0, c = s->memory; i < s->mem_count; ++i, ++c) { -+ offset_address = c->offset_address; -+ value = c->value; -+ offset_value = c->offset_value; -+ kdb_printf(" slot %d offset_address %c0x%x %s", -+ i, -+ offset_address >= 0 ? '+' : '-', -+ offset_address >= 0 ? offset_address : -offset_address, -+ bbrg_name[value]); -+ if (value == BBRG_OSP) -+ KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", ""); -+ kdb_printf("\n"); -+ } -+} -+ -+static void -+bb_reg_state_print(const struct bb_reg_state *s) -+{ -+ if (KDB_DEBUG(BB)) -+ bb_do_reg_state_print(s); -+} -+ -+/* Set register 'dst' to contain the value from 'src'. This includes reading -+ * from 'src' and writing to 'dst'. The offset value is copied iff 'src' -+ * contains a stack pointer. -+ * -+ * Be very careful about the context here. 'dst' and 'src' reflect integer -+ * registers by name, _not_ by the value of their contents. "mov %rax,%rsi" -+ * will call this function as bb_reg_set_reg(BBRG_RSI, BBRG_RAX), which -+ * reflects what the assembler code is doing. However we need to track the -+ * _values_ in the registers, not their names. IOW, we really care about "what -+ * value does rax contain when it is copied into rsi?", so we can record the -+ * fact that we now have two copies of that value, one in rax and one in rsi. -+ */ -+ -+static void -+bb_reg_set_reg(enum bb_reg_code dst, enum bb_reg_code src) -+{ -+ enum bb_reg_code src_value = BBRG_UNDEFINED; -+ short offset_value = 0; -+ KDB_DEBUG_BB(" %s = %s", bbrg_name[dst], bbrg_name[src]); -+ if (bb_is_int_reg(src)) { -+ bb_reg_read(src); -+ src_value = bb_reg_code_value(src); -+ KDB_DEBUG_BB(" (%s", bbrg_name[src_value]); -+ if (bb_is_osp_defined(src)) { -+ offset_value = bb_reg_code_offset(src); -+ KDB_DEBUG_BB_OFFSET(offset_value, "", ""); -+ } -+ KDB_DEBUG_BB(")"); -+ } -+ if (bb_is_int_reg(dst)) { -+ bb_reg_code_set_value(dst, src_value); -+ bb_reg_code_set_offset(dst, offset_value); -+ } -+ KDB_DEBUG_BB("\n"); -+} -+ -+static void -+bb_reg_set_undef(enum bb_reg_code dst) -+{ -+ bb_reg_set_reg(dst, BBRG_UNDEFINED); -+} -+ -+/* Delete any record of a stored register held in osp + 'offset' */ -+ -+static void -+bb_delete_memory(short offset) -+{ -+ int i; -+ struct bb_memory_contains *c; -+ for (i = 0, c = bb_reg_state->memory; -+ i < bb_reg_state->mem_count; -+ ++i, ++c) { -+ if (c->offset_address == offset && -+ c->value != BBRG_UNDEFINED) { -+ KDB_DEBUG_BB(" delete %s from ", -+ bbrg_name[c->value]); -+ KDB_DEBUG_BB_OFFSET(offset, "osp", ""); -+ KDB_DEBUG_BB(" slot %d\n", -+ (int)(c - bb_reg_state->memory)); -+ memset(c, BBRG_UNDEFINED, sizeof(*c)); -+ if (i == bb_reg_state->mem_count - 1) -+ --bb_reg_state->mem_count; -+ } -+ } -+} -+ -+/* Set memory location *('dst' + 'offset_address') to contain the supplied -+ * value and offset. 'dst' is assumed to be a register that contains a stack -+ * pointer. -+ */ -+ -+static void -+bb_memory_set_reg_value(enum bb_reg_code dst, short offset_address, -+ enum bb_reg_code value, short offset_value) -+{ -+ int i; -+ struct bb_memory_contains *c, *free = NULL; -+ BB_CHECK(!bb_is_osp_defined(dst), dst, ); -+ KDB_DEBUG_BB(" *(%s", bbrg_name[dst]); -+ KDB_DEBUG_BB_OFFSET(offset_address, "", ""); -+ offset_address += bb_reg_code_offset(dst); -+ KDB_DEBUG_BB_OFFSET(offset_address, " osp", ") = "); -+ KDB_DEBUG_BB("%s", bbrg_name[value]); -+ if (value == BBRG_OSP) -+ KDB_DEBUG_BB_OFFSET(offset_value, "", ""); -+ for (i = 0, c = bb_reg_state->memory; -+ i < bb_reg_state_max; -+ ++i, ++c) { -+ if (c->offset_address == offset_address) -+ free = c; -+ else if (c->value == BBRG_UNDEFINED && !free) -+ free = c; -+ } -+ if (!free) { -+ struct bb_reg_state *new, *old = bb_reg_state; -+ size_t old_size, new_size; -+ int slot; -+ old_size = sizeof(*old) + bb_reg_state_max * -+ sizeof(old->memory[0]); -+ slot = bb_reg_state_max; -+ bb_reg_state_max += 5; -+ new_size = sizeof(*new) + bb_reg_state_max * -+ sizeof(new->memory[0]); -+ new = debug_kmalloc(new_size, GFP_ATOMIC); -+ if (!new) { -+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__); -+ bb_giveup = 1; -+ } else { -+ memcpy(new, old, old_size); -+ memset((char *)new + old_size, BBRG_UNDEFINED, -+ new_size - old_size); -+ bb_reg_state = new; -+ debug_kfree(old); -+ free = bb_reg_state->memory + slot; -+ } -+ } -+ if (free) { -+ int slot = free - bb_reg_state->memory; -+ free->offset_address = offset_address; -+ free->value = value; -+ free->offset_value = offset_value; -+ KDB_DEBUG_BB(" slot %d", slot); -+ bb_reg_state->mem_count = max(bb_reg_state->mem_count, slot+1); -+ } -+ KDB_DEBUG_BB("\n"); -+} -+ -+/* Set memory location *('dst' + 'offset') to contain the value from register -+ * 'src'. 'dst' is assumed to be a register that contains a stack pointer. -+ * This differs from bb_memory_set_reg_value because it takes a src register -+ * which contains a value and possibly an offset, bb_memory_set_reg_value is -+ * passed the value and offset directly. -+ */ -+ -+static void -+bb_memory_set_reg(enum bb_reg_code dst, enum bb_reg_code src, -+ short offset_address) -+{ -+ int offset_value; -+ enum bb_reg_code value; -+ BB_CHECK(!bb_is_osp_defined(dst), dst, ); -+ if (!bb_is_int_reg(src)) -+ return; -+ value = bb_reg_code_value(src); -+ if (value == BBRG_UNDEFINED) { -+ bb_delete_memory(offset_address + bb_reg_code_offset(dst)); -+ return; -+ } -+ offset_value = bb_reg_code_offset(src); -+ bb_reg_read(src); -+ bb_memory_set_reg_value(dst, offset_address, value, offset_value); -+} -+ -+/* Set register 'dst' to contain the value from memory *('src' + offset_address). -+ * 'src' is assumed to be a register that contains a stack pointer. -+ */ -+ -+static void -+bb_reg_set_memory(enum bb_reg_code dst, enum bb_reg_code src, short offset_address) -+{ -+ int i, defined = 0; -+ struct bb_memory_contains *s; -+ BB_CHECK(!bb_is_osp_defined(src), src, ); -+ KDB_DEBUG_BB(" %s = *(%s", -+ bbrg_name[dst], bbrg_name[src]); -+ KDB_DEBUG_BB_OFFSET(offset_address, "", ")"); -+ offset_address += bb_reg_code_offset(src); -+ KDB_DEBUG_BB_OFFSET(offset_address, " (osp", ")"); -+ for (i = 0, s = bb_reg_state->memory; -+ i < bb_reg_state->mem_count; -+ ++i, ++s) { -+ if (s->offset_address == offset_address && bb_is_int_reg(dst)) { -+ bb_reg_code_set_value(dst, s->value); -+ KDB_DEBUG_BB(" value %s", bbrg_name[s->value]); -+ if (s->value == BBRG_OSP) { -+ bb_reg_code_set_offset(dst, s->offset_value); -+ KDB_DEBUG_BB_OFFSET(s->offset_value, "", ""); -+ } else { -+ bb_reg_code_set_offset(dst, 0); -+ } -+ defined = 1; -+ } -+ } -+ if (!defined) -+ bb_reg_set_reg(dst, BBRG_UNDEFINED); -+ else -+ KDB_DEBUG_BB("\n"); -+} -+ -+/* A generic read from an operand. */ -+ -+static void -+bb_read_operand(const struct bb_operand *operand) -+{ -+ int m = 0; -+ if (operand->base_rc) -+ bb_reg_read(operand->base_rc); -+ if (operand->index_rc) -+ bb_reg_read(operand->index_rc); -+ if (bb_is_simple_memory(operand) && -+ bb_is_osp_defined(operand->base_rc) && -+ bb_decode.match->usage != BBOU_LEA) { -+ m = (bb_reg_code_offset(operand->base_rc) + operand->disp + -+ KDB_WORD_SIZE - 1) / KDB_WORD_SIZE; -+ bb_memory_params = max(bb_memory_params, m); -+ } -+} -+ -+/* A generic write to an operand, resulting in an undefined value in that -+ * location. All well defined operands are handled separately, this function -+ * only handles the opcodes where the result is undefined. -+ */ -+ -+static void -+bb_write_operand(const struct bb_operand *operand) -+{ -+ enum bb_reg_code base_rc = operand->base_rc; -+ if (operand->memory) { -+ if (base_rc) -+ bb_reg_read(base_rc); -+ if (operand->index_rc) -+ bb_reg_read(operand->index_rc); -+ } else if (operand->reg && base_rc) { -+ bb_reg_set_undef(base_rc); -+ } -+ if (bb_is_simple_memory(operand) && bb_is_osp_defined(base_rc)) { -+ int offset; -+ offset = bb_reg_code_offset(base_rc) + operand->disp; -+ offset = ALIGN(offset - KDB_WORD_SIZE + 1, KDB_WORD_SIZE); -+ bb_delete_memory(offset); -+ } -+} -+ -+/* Adjust a register that contains a stack pointer */ -+ -+static void -+bb_adjust_osp(enum bb_reg_code reg, int adjust) -+{ -+ int offset = bb_reg_code_offset(reg), old_offset = offset; -+ KDB_DEBUG_BB(" %s osp offset ", bbrg_name[reg]); -+ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", " -> "); -+ offset += adjust; -+ bb_reg_code_set_offset(reg, offset); -+ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", "\n"); -+ /* When RSP is adjusted upwards, it invalidates any memory -+ * stored between the old and current stack offsets. -+ */ -+ if (reg == BBRG_RSP) { -+ while (old_offset < bb_reg_code_offset(reg)) { -+ bb_delete_memory(old_offset); -+ old_offset += KDB_WORD_SIZE; -+ } -+ } -+} -+ -+/* The current instruction adjusts a register that contains a stack pointer. -+ * Direction is 1 or -1, depending on whether the instruction is add/lea or -+ * sub. -+ */ -+ -+static void -+bb_adjust_osp_instruction(int direction) -+{ -+ enum bb_reg_code dst_reg = bb_decode.dst.base_rc; -+ if (bb_decode.src.immediate || -+ bb_decode.match->usage == BBOU_LEA /* lea has its own checks */) { -+ int adjust = direction * bb_decode.src.disp; -+ bb_adjust_osp(dst_reg, adjust); -+ } else { -+ /* variable stack adjustment, osp offset is not well defined */ -+ KDB_DEBUG_BB(" %s osp offset ", bbrg_name[dst_reg]); -+ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(dst_reg), "", " -> undefined\n"); -+ bb_reg_code_set_value(dst_reg, BBRG_UNDEFINED); -+ bb_reg_code_set_offset(dst_reg, 0); -+ } -+} -+ -+/* Some instructions using memory have an explicit length suffix (b, w, l, q). -+ * The equivalent instructions using a register imply the length from the -+ * register name. Deduce the operand length. -+ */ -+ -+static int -+bb_operand_length(const struct bb_operand *operand, char opcode_suffix) -+{ -+ int l = 0; -+ switch (opcode_suffix) { -+ case 'b': -+ l = 8; -+ break; -+ case 'w': -+ l = 16; -+ break; -+ case 'l': -+ l = 32; -+ break; -+ case 'q': -+ l = 64; -+ break; -+ } -+ if (l == 0 && operand->reg) { -+ switch (strlen(operand->base)) { -+ case 3: -+ switch (operand->base[2]) { -+ case 'h': -+ case 'l': -+ l = 8; -+ break; -+ default: -+ l = 16; -+ break; -+ } -+ case 4: -+ if (operand->base[1] == 'r') -+ l = 64; -+ else -+ l = 32; -+ break; -+ } -+ } -+ return l; -+} -+ -+static int -+bb_reg_state_size(const struct bb_reg_state *state) -+{ -+ return sizeof(*state) + -+ state->mem_count * sizeof(state->memory[0]); -+} -+ -+/* Canonicalize the current bb_reg_state so it can be compared against -+ * previously created states. Sort the memory entries in descending order of -+ * offset_address (stack grows down). Empty slots are moved to the end of the -+ * list and trimmed. -+ */ -+ -+static void -+bb_reg_state_canonicalize(void) -+{ -+ int i, order, changed; -+ struct bb_memory_contains *p1, *p2, temp; -+ do { -+ changed = 0; -+ for (i = 0, p1 = bb_reg_state->memory; -+ i < bb_reg_state->mem_count-1; -+ ++i, ++p1) { -+ p2 = p1 + 1; -+ if (p2->value == BBRG_UNDEFINED) { -+ order = 0; -+ } else if (p1->value == BBRG_UNDEFINED) { -+ order = 1; -+ } else if (p1->offset_address < p2->offset_address) { -+ order = 1; -+ } else if (p1->offset_address > p2->offset_address) { -+ order = -1; -+ } else { -+ order = 0; -+ } -+ if (order > 0) { -+ temp = *p2; -+ *p2 = *p1; -+ *p1 = temp; -+ changed = 1; -+ } -+ } -+ } while(changed); -+ for (i = 0, p1 = bb_reg_state->memory; -+ i < bb_reg_state_max; -+ ++i, ++p1) { -+ if (p1->value != BBRG_UNDEFINED) -+ bb_reg_state->mem_count = i + 1; -+ } -+ bb_reg_state_print(bb_reg_state); -+} -+ -+static int -+bb_special_case(bfd_vma to) -+{ -+ int i, j, rsp_offset, expect_offset, offset, errors = 0, max_errors = 40; -+ enum bb_reg_code reg, expect_value, value; -+ struct bb_name_state *r; -+ -+ for (i = 0, r = bb_special_cases; -+ i < ARRAY_SIZE(bb_special_cases); -+ ++i, ++r) { -+ if (to == r->address && -+ (r->fname == NULL || strcmp(bb_func_name, r->fname) == 0)) -+ goto match; -+ } -+ /* Some inline assembler code has jumps to .fixup sections which result -+ * in out of line transfers with undefined state, ignore them. -+ */ -+ if (strcmp(bb_func_name, "strnlen_user") == 0 || -+ strcmp(bb_func_name, "copy_from_user") == 0) -+ return 1; -+ return 0; -+ -+match: -+ /* Check the running registers match */ -+ for (reg = BBRG_RAX; reg < r->regs_size; ++reg) { -+ expect_value = r->regs[reg].value; -+ if (test_bit(expect_value, r->skip_regs.bits)) { -+ /* this regs entry is not defined for this label */ -+ continue; -+ } -+ if (expect_value == BBRG_UNDEFINED) -+ continue; -+ expect_offset = r->regs[reg].offset; -+ value = bb_reg_code_value(reg); -+ offset = bb_reg_code_offset(reg); -+ if (expect_value == value && -+ (value != BBRG_OSP || r->osp_offset == offset)) -+ continue; -+ kdb_printf("%s: Expected %s to contain %s", -+ __FUNCTION__, -+ bbrg_name[reg], -+ bbrg_name[expect_value]); -+ if (r->osp_offset) -+ KDB_DEBUG_BB_OFFSET_PRINTF(r->osp_offset, "", ""); -+ kdb_printf(". It actually contains %s", bbrg_name[value]); -+ if (offset) -+ KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", ""); -+ kdb_printf("\n"); -+ ++errors; -+ if (max_errors-- == 0) -+ goto fail; -+ } -+ /* Check that any memory data on stack matches */ -+ i = j = 0; -+ while (i < bb_reg_state->mem_count && -+ j < r->mem_size) { -+ expect_value = r->mem[j].value; -+ if (test_bit(expect_value, r->skip_mem.bits) || -+ expect_value == BBRG_UNDEFINED) { -+ /* this memory slot is not defined for this label */ -+ ++j; -+ continue; -+ } -+ rsp_offset = bb_reg_state->memory[i].offset_address - -+ bb_reg_code_offset(BBRG_RSP); -+ if (rsp_offset > -+ r->mem[j].offset_address) { -+ /* extra slots in memory are OK */ -+ ++i; -+ } else if (rsp_offset < -+ r->mem[j].offset_address) { -+ /* Required memory slot is missing */ -+ kdb_printf("%s: Invalid bb_reg_state.memory, " -+ "missing memory entry[%d] %s\n", -+ __FUNCTION__, j, bbrg_name[expect_value]); -+ ++errors; -+ if (max_errors-- == 0) -+ goto fail; -+ ++j; -+ } else { -+ if (bb_reg_state->memory[i].offset_value || -+ bb_reg_state->memory[i].value != expect_value) { -+ /* memory slot is present but contains wrong -+ * value. -+ */ -+ kdb_printf("%s: Invalid bb_reg_state.memory, " -+ "wrong value in slot %d, " -+ "should be %s, it is %s\n", -+ __FUNCTION__, i, -+ bbrg_name[expect_value], -+ bbrg_name[bb_reg_state->memory[i].value]); -+ ++errors; -+ if (max_errors-- == 0) -+ goto fail; -+ } -+ ++i; -+ ++j; -+ } -+ } -+ while (j < r->mem_size) { -+ expect_value = r->mem[j].value; -+ if (test_bit(expect_value, r->skip_mem.bits) || -+ expect_value == BBRG_UNDEFINED) -+ ++j; -+ else -+ break; -+ } -+ if (j != r->mem_size) { -+ /* Hit end of memory before testing all the pt_reg slots */ -+ kdb_printf("%s: Invalid bb_reg_state.memory, " -+ "missing trailing entries\n", -+ __FUNCTION__); -+ ++errors; -+ if (max_errors-- == 0) -+ goto fail; -+ } -+ if (errors) -+ goto fail; -+ return 1; -+fail: -+ kdb_printf("%s: on transfer to %s\n", __FUNCTION__, r->name); -+ bb_giveup = 1; -+ return 1; -+} -+ -+/* Transfer of control to a label outside the current function. If the -+ * transfer is to a known common code path then do a sanity check on the state -+ * at this point. -+ */ -+ -+static void -+bb_sanity_check(int type) -+{ -+ enum bb_reg_code expect, actual; -+ int i, offset, error = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(bb_preserved_reg); ++i) { -+ expect = bb_preserved_reg[i]; -+ actual = bb_reg_code_value(expect); -+ offset = bb_reg_code_offset(expect); -+ if (expect == actual) -+ continue; -+ /* type == 1 is sysret/sysexit, ignore RSP */ -+ if (type && expect == BBRG_RSP) -+ continue; -+ /* type == 1 is sysret/sysexit, ignore RBP for i386 */ -+ /* We used to have "#ifndef CONFIG_X86_64" for the type=1 RBP -+ * test; however, x86_64 can run ia32 compatible mode and -+ * hit this problem. Perform the following test anyway! -+ */ -+ if (type && expect == BBRG_RBP) -+ continue; -+ /* RSP should contain OSP+0. Except for ptregscall_common and -+ * ia32_ptregs_common, they get a partial pt_regs, fudge the -+ * stack to make it a full pt_regs then reverse the effect on -+ * exit, so the offset is -0x50 on exit. -+ */ -+ if (expect == BBRG_RSP && -+ bb_is_osp_defined(expect) && -+ (offset == 0 || -+ (offset == -0x50 && -+ (strcmp(bb_func_name, "ptregscall_common") == 0 || -+ strcmp(bb_func_name, "ia32_ptregs_common") == 0)))) -+ continue; -+ /* The put_user and save_paranoid functions are special. -+ * %rbx gets clobbered */ -+ if (expect == BBRG_RBX && -+ (strncmp(bb_func_name, "__put_user_", 11) == 0 || -+ strcmp(bb_func_name, "save_paranoid") == 0)) -+ continue; -+ /* Ignore rbp and rsp for error_entry */ -+ if ((strcmp(bb_func_name, "error_entry") == 0) && -+ (expect == BBRG_RBX || -+ (expect == BBRG_RSP && bb_is_osp_defined(expect) && offset == -0x10))) -+ continue; -+ kdb_printf("%s: Expected %s, got %s", -+ __FUNCTION__, -+ bbrg_name[expect], bbrg_name[actual]); -+ if (offset) -+ KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", ""); -+ kdb_printf("\n"); -+ error = 1; -+ } -+ BB_CHECK(error, error, ); -+} -+ -+/* Transfer of control. Follow the arc and save the current state as input to -+ * another basic block. -+ */ -+ -+static void -+bb_transfer(bfd_vma from, bfd_vma to, unsigned int drop_through) -+{ -+ int i, found; -+ size_t size; -+ struct bb* bb = NULL; /*stupid gcc */ -+ struct bb_jmp *bb_jmp; -+ struct bb_reg_state *state; -+ bb_reg_state_canonicalize(); -+ found = 0; -+ for (i = 0; i < bb_jmp_count; ++i) { -+ bb_jmp = bb_jmp_list + i; -+ if (bb_jmp->from == from && -+ bb_jmp->to == to && -+ bb_jmp->drop_through == drop_through) { -+ found = 1; -+ break; -+ } -+ } -+ if (!found) { -+ /* Transfer outside the current function. Check the special -+ * cases (mainly in entry.S) first. If it is not a known -+ * special case then check if the target address is the start -+ * of a function or not. If it is the start of a function then -+ * assume tail recursion and require that the state be the same -+ * as on entry. Otherwise assume out of line code (e.g. -+ * spinlock contention path) and ignore it, the state can be -+ * anything. -+ */ -+ kdb_symtab_t symtab; -+ if (bb_special_case(to)) -+ return; -+ kdbnearsym(to, &symtab); -+ if (symtab.sym_start != to) -+ return; -+ bb_sanity_check(0); -+ if (bb_giveup) -+ return; -+#ifdef NO_SIBLINGS -+ /* Only print this message when the kernel is compiled with -+ * -fno-optimize-sibling-calls. Otherwise it would print a -+ * message for every tail recursion call. If you see the -+ * message below then you probably have an assembler label that -+ * is not listed in the special cases. -+ */ -+ kdb_printf(" not matched: from " -+ kdb_bfd_vma_fmt0 -+ " to " kdb_bfd_vma_fmt0 -+ " drop_through %d bb_jmp[%d]\n", -+ from, to, drop_through, i); -+#endif /* NO_SIBLINGS */ -+ return; -+ } -+ KDB_DEBUG_BB(" matched: from " kdb_bfd_vma_fmt0 -+ " to " kdb_bfd_vma_fmt0 -+ " drop_through %d bb_jmp[%d]\n", -+ from, to, drop_through, i); -+ found = 0; -+ for (i = 0; i < bb_count; ++i) { -+ bb = bb_list[i]; -+ if (bb->start == to) { -+ found = 1; -+ break; -+ } -+ } -+ BB_CHECK(!found, to, ); -+ /* If the register state for this arc has already been set (we are -+ * rescanning the block that originates the arc) and the state is the -+ * same as the previous state for this arc then this input to the -+ * target block is the same as last time, so there is no need to rescan -+ * the target block. -+ */ -+ state = bb_jmp->state; -+ size = bb_reg_state_size(bb_reg_state); -+ if (state) { -+ bb_reg_state->ref_count = state->ref_count; -+ if (memcmp(state, bb_reg_state, size) == 0) { -+ KDB_DEBUG_BB(" no state change\n"); -+ return; -+ } -+ if (--state->ref_count == 0) -+ debug_kfree(state); -+ bb_jmp->state = NULL; -+ } -+ /* New input state is required. To save space, check if any other arcs -+ * have the same state and reuse them where possible. The overall set -+ * of inputs to the target block is now different so the target block -+ * must be rescanned. -+ */ -+ bb->changed = 1; -+ for (i = 0; i < bb_jmp_count; ++i) { -+ state = bb_jmp_list[i].state; -+ if (!state) -+ continue; -+ bb_reg_state->ref_count = state->ref_count; -+ if (memcmp(state, bb_reg_state, size) == 0) { -+ KDB_DEBUG_BB(" reuse bb_jmp[%d]\n", i); -+ bb_jmp->state = state; -+ ++state->ref_count; -+ return; -+ } -+ } -+ state = debug_kmalloc(size, GFP_ATOMIC); -+ if (!state) { -+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__); -+ bb_giveup = 1; -+ return; -+ } -+ memcpy(state, bb_reg_state, size); -+ state->ref_count = 1; -+ bb_jmp->state = state; -+ KDB_DEBUG_BB(" new state %p\n", state); -+} -+ -+/* Isolate the processing for 'mov' so it can be used for 'xadd'/'xchg' as -+ * well. -+ * -+ * xadd/xchg expect this function to return BBOU_NOP for special cases, -+ * otherwise it returns BBOU_RSWD. All special cases must be handled entirely -+ * within this function, including doing bb_read_operand or bb_write_operand -+ * where necessary. -+ */ -+ -+static enum bb_operand_usage -+bb_usage_mov(const struct bb_operand *src, const struct bb_operand *dst, int l) -+{ -+ int full_register_src, full_register_dst; -+ full_register_src = bb_operand_length(src, bb_decode.opcode[l]) -+ == KDB_WORD_SIZE * 8; -+ full_register_dst = bb_operand_length(dst, bb_decode.opcode[l]) -+ == KDB_WORD_SIZE * 8; -+ /* If both src and dst are full integer registers then record the -+ * register change. -+ */ -+ if (src->reg && -+ bb_is_int_reg(src->base_rc) && -+ dst->reg && -+ bb_is_int_reg(dst->base_rc) && -+ full_register_src && -+ full_register_dst) { -+ /* Special case for the code that switches stacks in -+ * jprobe_return. That code must modify RSP but it does it in -+ * a well defined manner. Do not invalidate RSP. -+ */ -+ if (src->base_rc == BBRG_RBX && -+ dst->base_rc == BBRG_RSP && -+ strcmp(bb_func_name, "jprobe_return") == 0) { -+ bb_read_operand(src); -+ return BBOU_NOP; -+ } -+ /* math_abort takes the equivalent of a longjmp structure and -+ * resets the stack. Ignore this, it leaves RSP well defined. -+ */ -+ if (dst->base_rc == BBRG_RSP && -+ strcmp(bb_func_name, "math_abort") == 0) { -+ bb_read_operand(src); -+ return BBOU_NOP; -+ } -+ bb_reg_set_reg(dst->base_rc, src->base_rc); -+ return BBOU_NOP; -+ } -+ /* If the move is from a full integer register to stack then record it. -+ */ -+ if (src->reg && -+ bb_is_simple_memory(dst) && -+ bb_is_osp_defined(dst->base_rc) && -+ full_register_src) { -+ /* Ugly special case. Initializing list heads on stack causes -+ * false references to stack variables when the list head is -+ * used. Static code analysis cannot detect that the list head -+ * has been changed by a previous execution loop and that a -+ * basic block is only executed after the list head has been -+ * changed. -+ * -+ * These false references can result in valid stack variables -+ * being incorrectly cleared on some logic paths. Ignore -+ * stores to stack variables which point to themselves or to -+ * the previous word so the list head initialization is not -+ * recorded. -+ */ -+ if (bb_is_osp_defined(src->base_rc)) { -+ int stack1 = bb_reg_code_offset(src->base_rc); -+ int stack2 = bb_reg_code_offset(dst->base_rc) + -+ dst->disp; -+ if (stack1 == stack2 || -+ stack1 == stack2 - KDB_WORD_SIZE) -+ return BBOU_NOP; -+ } -+ bb_memory_set_reg(dst->base_rc, src->base_rc, dst->disp); -+ return BBOU_NOP; -+ } -+ /* If the move is from stack to a full integer register then record it. -+ */ -+ if (bb_is_simple_memory(src) && -+ bb_is_osp_defined(src->base_rc) && -+ dst->reg && -+ bb_is_int_reg(dst->base_rc) && -+ full_register_dst) { -+#ifdef CONFIG_X86_32 -+ /* mov from TSS_sysenter_sp0+offset to esp to fix up the -+ * sysenter stack, it leaves esp well defined. mov -+ * TSS_ysenter_sp0+offset(%esp),%esp is followed by up to 5 -+ * push instructions to mimic the hardware stack push. If -+ * TSS_sysenter_sp0 is offset then only 3 words will be -+ * pushed. -+ */ -+ if (dst->base_rc == BBRG_RSP && -+ src->disp >= TSS_sysenter_sp0 && -+ bb_is_osp_defined(BBRG_RSP)) { -+ int pushes; -+ pushes = src->disp == TSS_sysenter_sp0 ? 5 : 3; -+ bb_reg_code_set_offset(BBRG_RSP, -+ bb_reg_code_offset(BBRG_RSP) + -+ pushes * KDB_WORD_SIZE); -+ KDB_DEBUG_BB_OFFSET( -+ bb_reg_code_offset(BBRG_RSP), -+ " sysenter fixup, RSP", -+ "\n"); -+ return BBOU_NOP; -+ } -+#endif /* CONFIG_X86_32 */ -+ bb_read_operand(src); -+ bb_reg_set_memory(dst->base_rc, src->base_rc, src->disp); -+ return BBOU_NOP; -+ } -+ /* move %gs:0x,%rsp is used to unconditionally switch to another -+ * stack. Ignore this special case, it is handled by the stack -+ * unwinding code. -+ */ -+ if (src->segment && -+ strcmp(src->segment, "%gs") == 0 && -+ dst->reg && -+ dst->base_rc == BBRG_RSP) -+ return BBOU_NOP; -+ /* move %reg,%reg is a nop */ -+ if (src->reg && -+ dst->reg && -+ !src->segment && -+ !dst->segment && -+ strcmp(src->base, dst->base) == 0) -+ return BBOU_NOP; -+ /* Special case for the code that switches stacks in the scheduler -+ * (switch_to()). That code must modify RSP but it does it in a well -+ * defined manner. Do not invalidate RSP. -+ */ -+ if (dst->reg && -+ dst->base_rc == BBRG_RSP && -+ full_register_dst && -+ bb_is_scheduler_address()) { -+ bb_read_operand(src); -+ return BBOU_NOP; -+ } -+ /* Special case for the code that switches stacks in resume from -+ * hibernation code. That code must modify RSP but it does it in a -+ * well defined manner. Do not invalidate RSP. -+ */ -+ if (src->memory && -+ dst->reg && -+ dst->base_rc == BBRG_RSP && -+ full_register_dst && -+ strcmp(bb_func_name, "restore_image") == 0) { -+ bb_read_operand(src); -+ return BBOU_NOP; -+ } -+ return BBOU_RSWD; -+} -+ -+static enum bb_operand_usage -+bb_usage_xadd(const struct bb_operand *src, const struct bb_operand *dst) -+{ -+ /* Simulate xadd as a series of instructions including mov, that way we -+ * get the benefit of all the special cases already handled by -+ * BBOU_MOV. -+ * -+ * tmp = src + dst, src = dst, dst = tmp. -+ * -+ * For tmp, pick a register that is undefined. If all registers are -+ * defined then pick one that is not being used by xadd. -+ */ -+ enum bb_reg_code reg = BBRG_UNDEFINED; -+ struct bb_operand tmp; -+ struct bb_reg_contains save_tmp; -+ enum bb_operand_usage usage; -+ int undefined = 0; -+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) { -+ if (bb_reg_code_value(reg) == BBRG_UNDEFINED) { -+ undefined = 1; -+ break; -+ } -+ } -+ if (!undefined) { -+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) { -+ if (reg != src->base_rc && -+ reg != src->index_rc && -+ reg != dst->base_rc && -+ reg != dst->index_rc && -+ reg != BBRG_RSP) -+ break; -+ } -+ } -+ KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]); -+ save_tmp = bb_reg_state->contains[reg - BBRG_RAX]; -+ bb_reg_set_undef(reg); -+ memset(&tmp, 0, sizeof(tmp)); -+ tmp.present = 1; -+ tmp.reg = 1; -+ tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC); -+ if (tmp.base) { -+ tmp.base[0] = '%'; -+ strcpy(tmp.base + 1, bbrg_name[reg]); -+ } -+ tmp.base_rc = reg; -+ bb_read_operand(src); -+ bb_read_operand(dst); -+ if (bb_usage_mov(src, dst, sizeof("xadd")-1) == BBOU_NOP) -+ usage = BBOU_RSRD; -+ else -+ usage = BBOU_RSRDWS; -+ bb_usage_mov(&tmp, dst, sizeof("xadd")-1); -+ KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]); -+ bb_reg_state->contains[reg - BBRG_RAX] = save_tmp; -+ debug_kfree(tmp.base); -+ return usage; -+} -+ -+static enum bb_operand_usage -+bb_usage_xchg(const struct bb_operand *src, const struct bb_operand *dst) -+{ -+ /* Simulate xchg as a series of mov instructions, that way we get the -+ * benefit of all the special cases already handled by BBOU_MOV. -+ * -+ * mov dst,tmp; mov src,dst; mov tmp,src; -+ * -+ * For tmp, pick a register that is undefined. If all registers are -+ * defined then pick one that is not being used by xchg. -+ */ -+ enum bb_reg_code reg = BBRG_UNDEFINED; -+ int rs = BBOU_RS, rd = BBOU_RD, ws = BBOU_WS, wd = BBOU_WD; -+ struct bb_operand tmp; -+ struct bb_reg_contains save_tmp; -+ int undefined = 0; -+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) { -+ if (bb_reg_code_value(reg) == BBRG_UNDEFINED) { -+ undefined = 1; -+ break; -+ } -+ } -+ if (!undefined) { -+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) { -+ if (reg != src->base_rc && -+ reg != src->index_rc && -+ reg != dst->base_rc && -+ reg != dst->index_rc && -+ reg != BBRG_RSP) -+ break; -+ } -+ } -+ KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]); -+ save_tmp = bb_reg_state->contains[reg - BBRG_RAX]; -+ memset(&tmp, 0, sizeof(tmp)); -+ tmp.present = 1; -+ tmp.reg = 1; -+ tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC); -+ if (tmp.base) { -+ tmp.base[0] = '%'; -+ strcpy(tmp.base + 1, bbrg_name[reg]); -+ } -+ tmp.base_rc = reg; -+ if (bb_usage_mov(dst, &tmp, sizeof("xchg")-1) == BBOU_NOP) -+ rd = 0; -+ if (bb_usage_mov(src, dst, sizeof("xchg")-1) == BBOU_NOP) { -+ rs = 0; -+ wd = 0; -+ } -+ if (bb_usage_mov(&tmp, src, sizeof("xchg")-1) == BBOU_NOP) -+ ws = 0; -+ KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]); -+ bb_reg_state->contains[reg - BBRG_RAX] = save_tmp; -+ debug_kfree(tmp.base); -+ return rs | rd | ws | wd; -+} -+ -+/* Invalidate all the scratch registers */ -+ -+static void -+bb_invalidate_scratch_reg(void) -+{ -+ int i, j; -+ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) { -+ for (j = 0; j < ARRAY_SIZE(bb_preserved_reg); ++j) { -+ if (i == bb_preserved_reg[j]) -+ goto preserved; -+ } -+ bb_reg_set_undef(i); -+preserved: -+ continue; -+ } -+} -+ -+static void -+bb_pass2_computed_jmp(const struct bb_operand *src) -+{ -+ unsigned long table = src->disp; -+ kdb_machreg_t addr; -+ while (!bb_giveup) { -+ if (kdb_getword(&addr, table, sizeof(addr))) -+ return; -+ if (addr < bb_func_start || addr >= bb_func_end) -+ return; -+ bb_transfer(bb_curr_addr, addr, 0); -+ table += KDB_WORD_SIZE; -+ } -+} -+ -+/* The current instruction has been decoded and all the information is in -+ * bb_decode. Based on the opcode, track any operand usage that we care about. -+ */ -+ -+static void -+bb_usage(void) -+{ -+ enum bb_operand_usage usage = bb_decode.match->usage; -+ struct bb_operand *src = &bb_decode.src; -+ struct bb_operand *dst = &bb_decode.dst; -+ struct bb_operand *dst2 = &bb_decode.dst2; -+ int opcode_suffix, operand_length; -+ -+ /* First handle all the special usage cases, and map them to a generic -+ * case after catering for the side effects. -+ */ -+ -+ if (usage == BBOU_IMUL && -+ src->present && !dst->present && !dst2->present) { -+ /* single operand imul, same effects as mul */ -+ usage = BBOU_MUL; -+ } -+ -+ /* AT&T syntax uses movs for move with sign extension, instead -+ * of the Intel movsx. The AT&T syntax causes problems for the opcode -+ * mapping; movs with sign extension needs to be treated as a generic -+ * read src, write dst, but instead it falls under the movs I/O -+ * instruction. Fix it. -+ */ -+ if (usage == BBOU_MOVS && strlen(bb_decode.opcode) > 5) -+ usage = BBOU_RSWD; -+ -+ /* This switch statement deliberately does not use 'default' at the top -+ * level. That way the compiler will complain if a new BBOU_ enum is -+ * added above and not explicitly handled here. -+ */ -+ switch (usage) { -+ case BBOU_UNKNOWN: /* drop through */ -+ case BBOU_RS: /* drop through */ -+ case BBOU_RD: /* drop through */ -+ case BBOU_RSRD: /* drop through */ -+ case BBOU_WS: /* drop through */ -+ case BBOU_RSWS: /* drop through */ -+ case BBOU_RDWS: /* drop through */ -+ case BBOU_RSRDWS: /* drop through */ -+ case BBOU_WD: /* drop through */ -+ case BBOU_RSWD: /* drop through */ -+ case BBOU_RDWD: /* drop through */ -+ case BBOU_RSRDWD: /* drop through */ -+ case BBOU_WSWD: /* drop through */ -+ case BBOU_RSWSWD: /* drop through */ -+ case BBOU_RDWSWD: /* drop through */ -+ case BBOU_RSRDWSWD: -+ break; /* ignore generic usage for now */ -+ case BBOU_ADD: -+ /* Special case for add instructions that adjust registers -+ * which are mapping the stack. -+ */ -+ if (dst->reg && bb_is_osp_defined(dst->base_rc)) { -+ bb_adjust_osp_instruction(1); -+ usage = BBOU_RS; -+ } else { -+ usage = BBOU_RSRDWD; -+ } -+ break; -+ case BBOU_AND: -+ /* Special case when trying to round the stack pointer -+ * to achieve byte alignment -+ */ -+ if (dst->reg && dst->base_rc == BBRG_RSP && -+ src->immediate && strncmp(bb_func_name, "efi_call", 8) == 0) { -+ usage = BBOU_NOP; -+ } else { -+ usage = BBOU_RSRDWD; -+ } -+ break; -+ case BBOU_CALL: -+ bb_reg_state_print(bb_reg_state); -+ usage = BBOU_NOP; -+ if (bb_is_static_disp(src)) { -+ /* save_args is special. It saves -+ * a partial pt_regs onto the stack and switches -+ * to the interrupt stack. -+ */ -+ if (src->disp == bb_save_args) { -+ bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x48); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x40); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x38); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x30); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x28); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x20); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x18); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x10); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x08); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0); -+ /* This is actually on the interrupt stack, -+ * but we fudge it so the unwind works. -+ */ -+ bb_memory_set_reg_value(BBRG_RSP, -0x8, BBRG_RBP, 0); -+ bb_reg_set_reg(BBRG_RBP, BBRG_RSP); -+ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE); -+ } -+ /* save_rest juggles the stack frame to append the -+ * rest of the pt_regs onto a stack where SAVE_ARGS -+ * or save_args has already been done. -+ */ -+ else if (src->disp == bb_save_rest) { -+ bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x30); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x28); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x20); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x18); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x10); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0x08); -+ } -+ /* error_entry and save_paranoid save a full pt_regs. -+ * Break out so the scratch registers aren't invalidated. -+ */ -+ else if (src->disp == bb_error_entry || src->disp == bb_save_paranoid) { -+ bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x70); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x68); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x60); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x58); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x50); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x48); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x40); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x38); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x30); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x28); -+ bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x20); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x18); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x10); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x08); -+ bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0); -+ break; -+ } -+ } -+ /* Invalidate the scratch registers */ -+ bb_invalidate_scratch_reg(); -+ -+ /* These special cases need scratch registers invalidated first */ -+ if (bb_is_static_disp(src)) { -+ /* Function sync_regs and save_v86_state are special. -+ * Their return value is the new stack pointer -+ */ -+ if (src->disp == bb_sync_regs) { -+ bb_reg_set_reg(BBRG_RAX, BBRG_RSP); -+ } else if (src->disp == bb_save_v86_state) { -+ bb_reg_set_reg(BBRG_RAX, BBRG_RSP); -+ bb_adjust_osp(BBRG_RAX, +KDB_WORD_SIZE); -+ } -+ } -+ break; -+ case BBOU_CBW: -+ /* Convert word in RAX. Read RAX, write RAX */ -+ bb_reg_read(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RAX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_CMOV: -+ /* cmove %gs:0x,%rsp is used to conditionally switch to -+ * another stack. Ignore this special case, it is handled by -+ * the stack unwinding code. -+ */ -+ if (src->segment && -+ strcmp(src->segment, "%gs") == 0 && -+ dst->reg && -+ dst->base_rc == BBRG_RSP) -+ usage = BBOU_NOP; -+ else -+ usage = BBOU_RSWD; -+ break; -+ case BBOU_CMPXCHG: -+ /* Read RAX, write RAX plus src read, dst write */ -+ bb_reg_read(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RAX); -+ usage = BBOU_RSWD; -+ break; -+ case BBOU_CMPXCHGD: -+ /* Read RAX, RBX, RCX, RDX, write RAX, RDX plus src read/write */ -+ bb_reg_read(BBRG_RAX); -+ bb_reg_read(BBRG_RBX); -+ bb_reg_read(BBRG_RCX); -+ bb_reg_read(BBRG_RDX); -+ bb_reg_set_undef(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RDX); -+ usage = BBOU_RSWS; -+ break; -+ case BBOU_CPUID: -+ /* Read RAX, write RAX, RBX, RCX, RDX */ -+ bb_reg_read(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RBX); -+ bb_reg_set_undef(BBRG_RCX); -+ bb_reg_set_undef(BBRG_RDX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_CWD: -+ /* Convert word in RAX, RDX. Read RAX, write RDX */ -+ bb_reg_read(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RDX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_DIV: /* drop through */ -+ case BBOU_IDIV: -+ /* The 8 bit variants only affect RAX, the 16, 32 and 64 bit -+ * variants affect RDX as well. -+ */ -+ switch (usage) { -+ case BBOU_DIV: -+ opcode_suffix = bb_decode.opcode[3]; -+ break; -+ case BBOU_IDIV: -+ opcode_suffix = bb_decode.opcode[4]; -+ break; -+ default: -+ opcode_suffix = 'q'; -+ break; -+ } -+ operand_length = bb_operand_length(src, opcode_suffix); -+ bb_reg_read(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RAX); -+ if (operand_length != 8) { -+ bb_reg_read(BBRG_RDX); -+ bb_reg_set_undef(BBRG_RDX); -+ } -+ usage = BBOU_RS; -+ break; -+ case BBOU_IMUL: -+ /* Only the two and three operand forms get here. The one -+ * operand form is treated as mul. -+ */ -+ if (dst2->present) { -+ /* The three operand form is a special case, read the first two -+ * operands, write the third. -+ */ -+ bb_read_operand(src); -+ bb_read_operand(dst); -+ bb_write_operand(dst2); -+ usage = BBOU_NOP; -+ } else { -+ usage = BBOU_RSRDWD; -+ } -+ break; -+ case BBOU_IRET: -+ bb_sanity_check(0); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_JMP: -+ if (bb_is_static_disp(src)) -+ bb_transfer(bb_curr_addr, src->disp, 0); -+ else if (src->indirect && -+ src->disp && -+ src->base == NULL && -+ src->index && -+ src->scale == KDB_WORD_SIZE) -+ bb_pass2_computed_jmp(src); -+ usage = BBOU_RS; -+ break; -+ case BBOU_LAHF: -+ /* Write RAX */ -+ bb_reg_set_undef(BBRG_RAX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_LEA: -+ /* dst = src + disp. Often used to calculate offsets into the -+ * stack, so check if it uses a stack pointer. -+ */ -+ usage = BBOU_RSWD; -+ if (bb_is_simple_memory(src)) { -+ if (bb_is_osp_defined(src->base_rc)) { -+ bb_reg_set_reg(dst->base_rc, src->base_rc); -+ bb_adjust_osp_instruction(1); -+ usage = BBOU_RS; -+ } else if (src->disp == 0 && -+ src->base_rc == dst->base_rc) { -+ /* lea 0(%reg),%reg is generated by i386 -+ * GENERIC_NOP7. -+ */ -+ usage = BBOU_NOP; -+ } else if (src->disp == 4096 && -+ (src->base_rc == BBRG_R8 || -+ src->base_rc == BBRG_RDI) && -+ strcmp(bb_func_name, "relocate_kernel") == 0) { -+ /* relocate_kernel: setup a new stack at the -+ * end of the physical control page, using -+ * (x86_64) lea 4096(%r8),%rsp or (i386) lea -+ * 4096(%edi),%esp -+ */ -+ usage = BBOU_NOP; -+ } -+ } -+ break; -+ case BBOU_LEAVE: -+ /* RSP = RBP; RBP = *(RSP); RSP += KDB_WORD_SIZE; */ -+ bb_reg_set_reg(BBRG_RSP, BBRG_RBP); -+ if (bb_is_osp_defined(BBRG_RSP)) -+ bb_reg_set_memory(BBRG_RBP, BBRG_RSP, 0); -+ else -+ bb_reg_set_undef(BBRG_RBP); -+ if (bb_is_osp_defined(BBRG_RSP)) -+ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE); -+ /* common_interrupt uses leave in a non-standard manner */ -+ if (strcmp(bb_func_name, "common_interrupt") != 0) -+ bb_sanity_check(0); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_LODS: -+ /* Read RSI, write RAX, RSI */ -+ bb_reg_read(BBRG_RSI); -+ bb_reg_set_undef(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RSI); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_LOOP: -+ /* Read and write RCX */ -+ bb_reg_read(BBRG_RCX); -+ bb_reg_set_undef(BBRG_RCX); -+ if (bb_is_static_disp(src)) -+ bb_transfer(bb_curr_addr, src->disp, 0); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_LSS: -+ /* lss offset(%esp),%esp leaves esp well defined */ -+ if (dst->reg && -+ dst->base_rc == BBRG_RSP && -+ bb_is_simple_memory(src) && -+ src->base_rc == BBRG_RSP) { -+ bb_adjust_osp(BBRG_RSP, 2*KDB_WORD_SIZE + src->disp); -+ usage = BBOU_NOP; -+ } else { -+ usage = BBOU_RSWD; -+ } -+ break; -+ case BBOU_MONITOR: -+ /* Read RAX, RCX, RDX */ -+ bb_reg_set_undef(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RCX); -+ bb_reg_set_undef(BBRG_RDX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_MOV: -+ usage = bb_usage_mov(src, dst, sizeof("mov")-1); -+ break; -+ case BBOU_MOVS: -+ /* Read RSI, RDI, write RSI, RDI */ -+ bb_reg_read(BBRG_RSI); -+ bb_reg_read(BBRG_RDI); -+ bb_reg_set_undef(BBRG_RSI); -+ bb_reg_set_undef(BBRG_RDI); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_MUL: -+ /* imul (one operand form only) or mul. Read RAX. If the -+ * operand length is not 8 then write RDX. -+ */ -+ if (bb_decode.opcode[0] == 'i') -+ opcode_suffix = bb_decode.opcode[4]; -+ else -+ opcode_suffix = bb_decode.opcode[3]; -+ operand_length = bb_operand_length(src, opcode_suffix); -+ bb_reg_read(BBRG_RAX); -+ if (operand_length != 8) -+ bb_reg_set_undef(BBRG_RDX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_MWAIT: -+ /* Read RAX, RCX */ -+ bb_reg_read(BBRG_RAX); -+ bb_reg_read(BBRG_RCX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_NOP: -+ break; -+ case BBOU_OUTS: -+ /* Read RSI, RDX, write RSI */ -+ bb_reg_read(BBRG_RSI); -+ bb_reg_read(BBRG_RDX); -+ bb_reg_set_undef(BBRG_RSI); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_POP: -+ /* Complicated by the fact that you can pop from top of stack -+ * to a stack location, for this case the destination location -+ * is calculated after adjusting RSP. Analysis of the kernel -+ * code shows that gcc only uses this strange format to get the -+ * flags into a local variable, e.g. pushf; popl 0x10(%esp); so -+ * I am going to ignore this special case. -+ */ -+ usage = BBOU_WS; -+ if (!bb_is_osp_defined(BBRG_RSP)) { -+ if (!bb_is_scheduler_address()) { -+ kdb_printf("pop when BBRG_RSP is undefined?\n"); -+ bb_giveup = 1; -+ } -+ } else { -+ if (src->reg) { -+ bb_reg_set_memory(src->base_rc, BBRG_RSP, 0); -+ usage = BBOU_NOP; -+ } -+ /* pop %rsp does not adjust rsp */ -+ if (!src->reg || -+ src->base_rc != BBRG_RSP) -+ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE); -+ } -+ break; -+ case BBOU_POPF: -+ /* Do not care about flags, just adjust RSP */ -+ if (!bb_is_osp_defined(BBRG_RSP)) { -+ if (!bb_is_scheduler_address()) { -+ kdb_printf("popf when BBRG_RSP is undefined?\n"); -+ bb_giveup = 1; -+ } -+ } else { -+ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE); -+ } -+ usage = BBOU_WS; -+ break; -+ case BBOU_PUSH: -+ /* Complicated by the fact that you can push from a stack -+ * location to top of stack, the source location is calculated -+ * before adjusting RSP. Analysis of the kernel code shows -+ * that gcc only uses this strange format to restore the flags -+ * from a local variable, e.g. pushl 0x10(%esp); popf; so I am -+ * going to ignore this special case. -+ */ -+ usage = BBOU_RS; -+ if (!bb_is_osp_defined(BBRG_RSP)) { -+ if (!bb_is_scheduler_address()) { -+ kdb_printf("push when BBRG_RSP is undefined?\n"); -+ bb_giveup = 1; -+ } -+ } else { -+ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE); -+ if (src->reg && -+ bb_reg_code_offset(BBRG_RSP) <= 0) -+ bb_memory_set_reg(BBRG_RSP, src->base_rc, 0); -+ } -+ break; -+ case BBOU_PUSHF: -+ /* Do not care about flags, just adjust RSP */ -+ if (!bb_is_osp_defined(BBRG_RSP)) { -+ if (!bb_is_scheduler_address()) { -+ kdb_printf("pushf when BBRG_RSP is undefined?\n"); -+ bb_giveup = 1; -+ } -+ } else { -+ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE); -+ } -+ usage = BBOU_WS; -+ break; -+ case BBOU_RDMSR: -+ /* Read RCX, write RAX, RDX */ -+ bb_reg_read(BBRG_RCX); -+ bb_reg_set_undef(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RDX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_RDTSC: -+ /* Write RAX, RDX */ -+ bb_reg_set_undef(BBRG_RAX); -+ bb_reg_set_undef(BBRG_RDX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_RET: -+ usage = BBOU_NOP; -+ if (src->immediate && bb_is_osp_defined(BBRG_RSP)) { -+ bb_adjust_osp(BBRG_RSP, src->disp); -+ } -+ /* Functions that restore state which was saved by another -+ * function or build new kernel stacks. We cannot verify what -+ * is being restored so skip the sanity check. -+ */ -+ if (strcmp(bb_func_name, "restore_image") == 0 || -+ strcmp(bb_func_name, "relocate_kernel") == 0 || -+ strcmp(bb_func_name, "identity_mapped") == 0 || -+ strcmp(bb_func_name, "xen_iret_crit_fixup") == 0 || -+ strcmp(bb_func_name, "math_abort") == 0 || -+ strcmp(bb_func_name, "save_args") == 0 || -+ strcmp(bb_func_name, "kretprobe_trampoline_holder") == 0) -+ break; -+ bb_sanity_check(0); -+ break; -+ case BBOU_SAHF: -+ /* Read RAX */ -+ bb_reg_read(BBRG_RAX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_SCAS: -+ /* Read RAX, RDI, write RDI */ -+ bb_reg_read(BBRG_RAX); -+ bb_reg_read(BBRG_RDI); -+ bb_reg_set_undef(BBRG_RDI); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_SUB: -+ /* Special case for sub instructions that adjust registers -+ * which are mapping the stack. -+ */ -+ if (dst->reg && bb_is_osp_defined(dst->base_rc)) { -+ bb_adjust_osp_instruction(-1); -+ usage = BBOU_RS; -+ } else { -+ usage = BBOU_RSRDWD; -+ } -+ break; -+ case BBOU_SYSEXIT: -+ bb_sanity_check(1); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_SYSRET: -+ bb_sanity_check(1); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_WRMSR: -+ /* Read RCX, RAX, RDX */ -+ bb_reg_read(BBRG_RCX); -+ bb_reg_read(BBRG_RAX); -+ bb_reg_read(BBRG_RDX); -+ usage = BBOU_NOP; -+ break; -+ case BBOU_XADD: -+ usage = bb_usage_xadd(src, dst); -+ break; -+ case BBOU_XCHG: -+ /* i386 do_IRQ with 4K stacks does xchg %ebx,%esp; call -+ * irq_handler; mov %ebx,%esp; to switch stacks. Ignore this -+ * stack switch when tracking registers, it is handled by -+ * higher level backtrace code. Convert xchg %ebx,%esp to mov -+ * %esp,%ebx so the later mov %ebx,%esp becomes a NOP and the -+ * stack remains defined so we can backtrace through do_IRQ's -+ * stack switch. -+ * -+ * Ditto for do_softirq. -+ */ -+ if (src->reg && -+ dst->reg && -+ src->base_rc == BBRG_RBX && -+ dst->base_rc == BBRG_RSP && -+ (strcmp(bb_func_name, "do_IRQ") == 0 || -+ strcmp(bb_func_name, "do_softirq") == 0)) { -+ strcpy(bb_decode.opcode, "mov"); -+ usage = bb_usage_mov(dst, src, sizeof("mov")-1); -+ } else { -+ usage = bb_usage_xchg(src, dst); -+ } -+ break; -+ case BBOU_XOR: -+ /* xor %reg,%reg only counts as a register write, the original -+ * contents of reg are irrelevant. -+ */ -+ if (src->reg && dst->reg && src->base_rc == dst->base_rc) -+ usage = BBOU_WS; -+ else -+ usage = BBOU_RSRDWD; -+ break; -+ } -+ -+ /* The switch statement above handled all the special cases. Every -+ * opcode should now have a usage of NOP or one of the generic cases. -+ */ -+ if (usage == BBOU_UNKNOWN || usage == BBOU_NOP) { -+ /* nothing to do */ -+ } else if (usage >= BBOU_RS && usage <= BBOU_RSRDWSWD) { -+ if (usage & BBOU_RS) -+ bb_read_operand(src); -+ if (usage & BBOU_RD) -+ bb_read_operand(dst); -+ if (usage & BBOU_WS) -+ bb_write_operand(src); -+ if (usage & BBOU_WD) -+ bb_write_operand(dst); -+ } else { -+ kdb_printf("%s: opcode not fully handled\n", __FUNCTION__); -+ if (!KDB_DEBUG(BB)) { -+ bb_print_opcode(); -+ if (bb_decode.src.present) -+ bb_print_operand("src", &bb_decode.src); -+ if (bb_decode.dst.present) -+ bb_print_operand("dst", &bb_decode.dst); -+ if (bb_decode.dst2.present) -+ bb_print_operand("dst2", &bb_decode.dst2); -+ } -+ bb_giveup = 1; -+ } -+} -+ -+static void -+bb_parse_buffer(void) -+{ -+ char *p, *src, *dst = NULL, *dst2 = NULL; -+ int paren = 0; -+ p = bb_buffer; -+ memset(&bb_decode, 0, sizeof(bb_decode)); -+ KDB_DEBUG_BB(" '%s'\n", p); -+ p += strcspn(p, ":"); /* skip address and function name+offset: */ -+ if (*p++ != ':') { -+ kdb_printf("%s: cannot find ':' in buffer '%s'\n", -+ __FUNCTION__, bb_buffer); -+ bb_giveup = 1; -+ return; -+ } -+ p += strspn(p, " \t"); /* step to opcode */ -+ if (strncmp(p, "(bad)", 5) == 0) -+ strcpy(p, "nop"); -+ /* separate any opcode prefix */ -+ if (strncmp(p, "lock", 4) == 0 || -+ strncmp(p, "rep", 3) == 0 || -+ strncmp(p, "rex", 3) == 0 || -+ strncmp(p, "addr", 4) == 0) { -+ bb_decode.prefix = p; -+ p += strcspn(p, " \t"); -+ *p++ = '\0'; -+ p += strspn(p, " \t"); -+ } -+ bb_decode.opcode = p; -+ strsep(&p, " \t"); /* step to end of opcode */ -+ if (bb_parse_opcode()) -+ return; -+ if (!p) -+ goto no_operands; -+ p += strspn(p, " \t"); /* step to operand(s) */ -+ if (!*p) -+ goto no_operands; -+ src = p; -+ p = strsep(&p, " \t"); /* strip comments after operands */ -+ /* split 'src','dst' but ignore ',' inside '(' ')' */ -+ while (*p) { -+ if (*p == '(') { -+ ++paren; -+ } else if (*p == ')') { -+ --paren; -+ } else if (*p == ',' && paren == 0) { -+ *p = '\0'; -+ if (dst) -+ dst2 = p+1; -+ else -+ dst = p+1; -+ } -+ ++p; -+ } -+ bb_parse_operand(src, &bb_decode.src); -+ if (KDB_DEBUG(BB)) -+ bb_print_operand("src", &bb_decode.src); -+ if (dst && !bb_giveup) { -+ bb_parse_operand(dst, &bb_decode.dst); -+ if (KDB_DEBUG(BB)) -+ bb_print_operand("dst", &bb_decode.dst); -+ } -+ if (dst2 && !bb_giveup) { -+ bb_parse_operand(dst2, &bb_decode.dst2); -+ if (KDB_DEBUG(BB)) -+ bb_print_operand("dst2", &bb_decode.dst2); -+ } -+no_operands: -+ if (!bb_giveup) -+ bb_usage(); -+} -+ -+static int -+bb_dis_pass2(PTR file, const char *fmt, ...) -+{ -+ char *p; -+ int l = strlen(bb_buffer); -+ va_list ap; -+ va_start(ap, fmt); -+ vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap); -+ va_end(ap); -+ if ((p = strchr(bb_buffer, '\n'))) { -+ *p = '\0'; -+ p = bb_buffer; -+ p += strcspn(p, ":"); -+ if (*p++ == ':') -+ bb_fixup_switch_to(p); -+ bb_parse_buffer(); -+ bb_buffer[0] = '\0'; -+ } -+ return 0; -+} -+ -+static void -+bb_printaddr_pass2(bfd_vma addr, disassemble_info *dip) -+{ -+ kdb_symtab_t symtab; -+ unsigned int offset; -+ dip->fprintf_func(dip->stream, "0x%lx", addr); -+ kdbnearsym(addr, &symtab); -+ if (symtab.sym_name) { -+ dip->fprintf_func(dip->stream, " <%s", symtab.sym_name); -+ if ((offset = addr - symtab.sym_start)) -+ dip->fprintf_func(dip->stream, "+0x%x", offset); -+ dip->fprintf_func(dip->stream, ">"); -+ } -+} -+ -+/* Set the starting register and memory state for the current bb */ -+ -+static void -+bb_start_block0_special(void) -+{ -+ int i; -+ short offset_address; -+ enum bb_reg_code reg, value; -+ struct bb_name_state *r; -+ for (i = 0, r = bb_special_cases; -+ i < ARRAY_SIZE(bb_special_cases); -+ ++i, ++r) { -+ if (bb_func_start == r->address && r->fname == NULL) -+ goto match; -+ } -+ return; -+match: -+ /* Set the running registers */ -+ for (reg = BBRG_RAX; reg < r->regs_size; ++reg) { -+ value = r->regs[reg].value; -+ if (test_bit(value, r->skip_regs.bits)) { -+ /* this regs entry is not defined for this label */ -+ continue; -+ } -+ bb_reg_code_set_value(reg, value); -+ bb_reg_code_set_offset(reg, r->regs[reg].offset); -+ } -+ /* Set any memory contents, e.g. pt_regs. Adjust RSP as required. */ -+ offset_address = 0; -+ for (i = 0; i < r->mem_size; ++i) { -+ offset_address = max_t(int, -+ r->mem[i].offset_address + KDB_WORD_SIZE, -+ offset_address); -+ } -+ if (bb_reg_code_offset(BBRG_RSP) > -offset_address) -+ bb_adjust_osp(BBRG_RSP, -offset_address - bb_reg_code_offset(BBRG_RSP)); -+ for (i = 0; i < r->mem_size; ++i) { -+ value = r->mem[i].value; -+ if (test_bit(value, r->skip_mem.bits)) { -+ /* this memory entry is not defined for this label */ -+ continue; -+ } -+ bb_memory_set_reg_value(BBRG_RSP, r->mem[i].offset_address, -+ value, 0); -+ bb_reg_set_undef(value); -+ } -+ return; -+} -+ -+static void -+bb_pass2_start_block(int number) -+{ -+ int i, j, k, first, changed; -+ size_t size; -+ struct bb_jmp *bb_jmp; -+ struct bb_reg_state *state; -+ struct bb_memory_contains *c1, *c2; -+ bb_reg_state->mem_count = bb_reg_state_max; -+ size = bb_reg_state_size(bb_reg_state); -+ memset(bb_reg_state, 0, size); -+ -+ if (number == 0) { -+ /* The first block is assumed to have well defined inputs */ -+ bb_start_block0(); -+ /* Some assembler labels have non-standard entry -+ * states. -+ */ -+ bb_start_block0_special(); -+ bb_reg_state_print(bb_reg_state); -+ return; -+ } -+ -+ /* Merge all the input states for the current bb together */ -+ first = 1; -+ changed = 0; -+ for (i = 0; i < bb_jmp_count; ++i) { -+ bb_jmp = bb_jmp_list + i; -+ if (bb_jmp->to != bb_curr->start) -+ continue; -+ state = bb_jmp->state; -+ if (!state) -+ continue; -+ if (first) { -+ size = bb_reg_state_size(state); -+ memcpy(bb_reg_state, state, size); -+ KDB_DEBUG_BB(" first state %p\n", state); -+ bb_reg_state_print(bb_reg_state); -+ first = 0; -+ continue; -+ } -+ -+ KDB_DEBUG_BB(" merging state %p\n", state); -+ /* Merge the register states */ -+ for (j = 0; j < ARRAY_SIZE(state->contains); ++j) { -+ if (memcmp(bb_reg_state->contains + j, -+ state->contains + j, -+ sizeof(bb_reg_state->contains[0]))) { -+ /* Different states for this register from two -+ * or more inputs, make it undefined. -+ */ -+ if (bb_reg_state->contains[j].value == -+ BBRG_UNDEFINED) { -+ KDB_DEBUG_BB(" ignoring %s\n", -+ bbrg_name[j + BBRG_RAX]); -+ } else { -+ bb_reg_set_undef(BBRG_RAX + j); -+ changed = 1; -+ } -+ } -+ } -+ -+ /* Merge the memory states. This relies on both -+ * bb_reg_state->memory and state->memory being sorted in -+ * descending order, with undefined entries at the end. -+ */ -+ c1 = bb_reg_state->memory; -+ c2 = state->memory; -+ j = k = 0; -+ while (j < bb_reg_state->mem_count && -+ k < state->mem_count) { -+ if (c1->offset_address < c2->offset_address) { -+ KDB_DEBUG_BB_OFFSET(c2->offset_address, -+ " ignoring c2->offset_address ", -+ "\n"); -+ ++c2; -+ ++k; -+ continue; -+ } -+ if (c1->offset_address > c2->offset_address) { -+ /* Memory location is not in all input states, -+ * delete the memory location. -+ */ -+ bb_delete_memory(c1->offset_address); -+ changed = 1; -+ ++c1; -+ ++j; -+ continue; -+ } -+ if (memcmp(c1, c2, sizeof(*c1))) { -+ /* Same location, different contents, delete -+ * the memory location. -+ */ -+ bb_delete_memory(c1->offset_address); -+ KDB_DEBUG_BB_OFFSET(c2->offset_address, -+ " ignoring c2->offset_address ", -+ "\n"); -+ changed = 1; -+ } -+ ++c1; -+ ++c2; -+ ++j; -+ ++k; -+ } -+ while (j < bb_reg_state->mem_count) { -+ bb_delete_memory(c1->offset_address); -+ changed = 1; -+ ++c1; -+ ++j; -+ } -+ } -+ if (changed) { -+ KDB_DEBUG_BB(" final state\n"); -+ bb_reg_state_print(bb_reg_state); -+ } -+} -+ -+/* We have reached the exit point from the current function, either a call to -+ * the next function or the instruction that was about to executed when an -+ * interrupt occurred. Save the current register state in bb_exit_state. -+ */ -+ -+static void -+bb_save_exit_state(void) -+{ -+ size_t size; -+ debug_kfree(bb_exit_state); -+ bb_exit_state = NULL; -+ bb_reg_state_canonicalize(); -+ size = bb_reg_state_size(bb_reg_state); -+ bb_exit_state = debug_kmalloc(size, GFP_ATOMIC); -+ if (!bb_exit_state) { -+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__); -+ bb_giveup = 1; -+ return; -+ } -+ memcpy(bb_exit_state, bb_reg_state, size); -+} -+ -+static int -+bb_pass2_do_changed_blocks(int allow_missing) -+{ -+ int i, j, missing, changed, maxloops; -+ unsigned long addr; -+ struct bb_jmp *bb_jmp; -+ KDB_DEBUG_BB("\n %s: allow_missing %d\n", __FUNCTION__, allow_missing); -+ /* Absolute worst case is we have to iterate over all the basic blocks -+ * in an "out of order" state, each iteration losing one register or -+ * memory state. Any more loops than that is a bug. "out of order" -+ * means that the layout of blocks in memory does not match the logic -+ * flow through those blocks so (for example) block 27 comes before -+ * block 2. To allow for out of order blocks, multiply maxloops by the -+ * number of blocks. -+ */ -+ maxloops = (KDB_INT_REGISTERS + bb_reg_state_max) * bb_count; -+ changed = 1; -+ do { -+ changed = 0; -+ for (i = 0; i < bb_count; ++i) { -+ bb_curr = bb_list[i]; -+ if (!bb_curr->changed) -+ continue; -+ missing = 0; -+ for (j = 0, bb_jmp = bb_jmp_list; -+ j < bb_jmp_count; -+ ++j, ++bb_jmp) { -+ if (bb_jmp->to == bb_curr->start && -+ !bb_jmp->state) -+ ++missing; -+ } -+ if (missing > allow_missing) -+ continue; -+ bb_curr->changed = 0; -+ changed = 1; -+ KDB_DEBUG_BB("\n bb[%d]\n", i); -+ bb_pass2_start_block(i); -+ for (addr = bb_curr->start; -+ addr <= bb_curr->end; ) { -+ bb_curr_addr = addr; -+ if (addr == bb_exit_addr) -+ bb_save_exit_state(); -+ addr += kdba_id_printinsn(addr, &kdb_di); -+ kdb_di.fprintf_func(NULL, "\n"); -+ if (bb_giveup) -+ goto done; -+ } -+ if (!bb_exit_state) { -+ /* ATTRIB_NORET functions are a problem with -+ * the current gcc. Allow the trailing address -+ * a bit of leaway. -+ */ -+ if (addr == bb_exit_addr || -+ addr == bb_exit_addr + 1) -+ bb_save_exit_state(); -+ } -+ if (bb_curr->drop_through) -+ bb_transfer(bb_curr->end, -+ bb_list[i+1]->start, 1); -+ } -+ if (maxloops-- == 0) { -+ kdb_printf("\n\n%s maxloops reached\n", -+ __FUNCTION__); -+ bb_giveup = 1; -+ goto done; -+ } -+ } while(changed); -+done: -+ for (i = 0; i < bb_count; ++i) { -+ bb_curr = bb_list[i]; -+ if (bb_curr->changed) -+ return 1; /* more to do, increase allow_missing */ -+ } -+ return 0; /* all blocks done */ -+} -+ -+/* Assume that the current function is a pass through function that does not -+ * refer to its register parameters. Exclude known asmlinkage functions and -+ * assume the other functions actually use their registers. -+ */ -+ -+static void -+bb_assume_pass_through(void) -+{ -+ static int first_time = 1; -+ if (strncmp(bb_func_name, "sys_", 4) == 0 || -+ strncmp(bb_func_name, "compat_sys_", 11) == 0 || -+ strcmp(bb_func_name, "schedule") == 0 || -+ strcmp(bb_func_name, "do_softirq") == 0 || -+ strcmp(bb_func_name, "printk") == 0 || -+ strcmp(bb_func_name, "vprintk") == 0 || -+ strcmp(bb_func_name, "preempt_schedule") == 0 || -+ strcmp(bb_func_name, "start_kernel") == 0 || -+ strcmp(bb_func_name, "csum_partial") == 0 || -+ strcmp(bb_func_name, "csum_partial_copy_generic") == 0 || -+ strcmp(bb_func_name, "math_state_restore") == 0 || -+ strcmp(bb_func_name, "panic") == 0 || -+ strcmp(bb_func_name, "kdb_printf") == 0 || -+ strcmp(bb_func_name, "kdb_interrupt") == 0) -+ return; -+ if (bb_asmlinkage_arch()) -+ return; -+ bb_reg_params = REGPARM; -+ if (first_time) { -+ kdb_printf(" %s has memory parameters but no register " -+ "parameters.\n Assuming it is a 'pass " -+ "through' function that does not refer to " -+ "its register\n parameters and setting %d " -+ "register parameters\n", -+ bb_func_name, REGPARM); -+ first_time = 0; -+ return; -+ } -+ kdb_printf(" Assuming %s is 'pass through' with %d register " -+ "parameters\n", -+ bb_func_name, REGPARM); -+} -+ -+static void -+bb_pass2(void) -+{ -+ int allow_missing; -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) -+ kdb_printf("%s: start\n", __FUNCTION__); -+ -+ kdb_di.fprintf_func = bb_dis_pass2; -+ kdb_di.print_address_func = bb_printaddr_pass2; -+ -+ bb_reg_state = debug_kmalloc(sizeof(*bb_reg_state), GFP_ATOMIC); -+ if (!bb_reg_state) { -+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__); -+ bb_giveup = 1; -+ return; -+ } -+ bb_list[0]->changed = 1; -+ -+ /* If a block does not have all its input states available then it is -+ * possible for a register to initially appear to hold a known value, -+ * but when other inputs are available then it becomes a variable -+ * value. The initial false state of "known" can generate false values -+ * for other registers and can even make it look like stack locations -+ * are being changed. -+ * -+ * To avoid these false positives, only process blocks which have all -+ * their inputs defined. That gives a clean depth first traversal of -+ * the tree, except for loops. If there are any loops, then start -+ * processing blocks with one missing input, then two missing inputs -+ * etc. -+ * -+ * Absolute worst case is we have to iterate over all the jmp entries, -+ * each iteration allowing one more missing input. Any more loops than -+ * that is a bug. Watch out for the corner case of 0 jmp entries. -+ */ -+ for (allow_missing = 0; allow_missing <= bb_jmp_count; ++allow_missing) { -+ if (!bb_pass2_do_changed_blocks(allow_missing)) -+ break; -+ if (bb_giveup) -+ break; -+ } -+ if (allow_missing > bb_jmp_count) { -+ kdb_printf("\n\n%s maxloops reached\n", -+ __FUNCTION__); -+ bb_giveup = 1; -+ return; -+ } -+ -+ if (bb_memory_params && bb_reg_params) -+ bb_reg_params = REGPARM; -+ if (REGPARM && -+ bb_memory_params && -+ !bb_reg_params) -+ bb_assume_pass_through(); -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) { -+ kdb_printf("%s: end bb_reg_params %d bb_memory_params %d\n", -+ __FUNCTION__, bb_reg_params, bb_memory_params); -+ if (bb_exit_state) { -+ kdb_printf("%s: bb_exit_state at " kdb_bfd_vma_fmt0 "\n", -+ __FUNCTION__, bb_exit_addr); -+ bb_do_reg_state_print(bb_exit_state); -+ } -+ } -+} -+ -+static void -+bb_cleanup(void) -+{ -+ int i; -+ struct bb* bb; -+ struct bb_reg_state *state; -+ while (bb_count) { -+ bb = bb_list[0]; -+ bb_delete(0); -+ } -+ debug_kfree(bb_list); -+ bb_list = NULL; -+ bb_count = bb_max = 0; -+ for (i = 0; i < bb_jmp_count; ++i) { -+ state = bb_jmp_list[i].state; -+ if (state && --state->ref_count == 0) -+ debug_kfree(state); -+ } -+ debug_kfree(bb_jmp_list); -+ bb_jmp_list = NULL; -+ bb_jmp_count = bb_jmp_max = 0; -+ debug_kfree(bb_reg_state); -+ bb_reg_state = NULL; -+ bb_reg_state_max = 0; -+ debug_kfree(bb_exit_state); -+ bb_exit_state = NULL; -+ bb_reg_params = bb_memory_params = 0; -+ bb_giveup = 0; -+} -+ -+static int -+bb_spurious_global_label(const char *func_name) -+{ -+ int i; -+ for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) { -+ if (strcmp(bb_spurious[i], func_name) == 0) -+ return 1; -+ } -+ return 0; -+} -+ -+/* Given the current actual register contents plus the exit state deduced from -+ * a basic block analysis of the current function, rollback the actual register -+ * contents to the values they had on entry to this function. -+ */ -+ -+static void -+bb_actual_rollback(const struct kdb_activation_record *ar) -+{ -+ int i, offset_address; -+ struct bb_memory_contains *c; -+ enum bb_reg_code reg; -+ unsigned long address, osp = 0; -+ struct bb_actual new[ARRAY_SIZE(bb_actual)]; -+ -+ -+ if (!bb_exit_state) { -+ kdb_printf("%s: no bb_exit_state, cannot rollback\n", -+ __FUNCTION__); -+ bb_giveup = 1; -+ return; -+ } -+ memcpy(bb_reg_state, bb_exit_state, bb_reg_state_size(bb_exit_state)); -+ memset(new, 0, sizeof(new)); -+ -+ /* The most important register for obtaining saved state is rsp so get -+ * its new value first. Prefer rsp if it is valid, then other -+ * registers. Saved values of rsp in memory are unusable without a -+ * register that points to memory. -+ */ -+ if (!bb_actual_valid(BBRG_RSP)) { -+ kdb_printf("%s: no starting value for RSP, cannot rollback\n", -+ __FUNCTION__); -+ bb_giveup = 1; -+ return; -+ } -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) -+ kdb_printf("%s: rsp " kdb_bfd_vma_fmt0, -+ __FUNCTION__, bb_actual_value(BBRG_RSP)); -+ i = BBRG_RSP; -+ if (!bb_is_osp_defined(i)) { -+ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) { -+ if (bb_is_osp_defined(i) && bb_actual_valid(i)) -+ break; -+ } -+ } -+ if (bb_is_osp_defined(i) && bb_actual_valid(i)) { -+ osp = new[BBRG_RSP - BBRG_RAX].value = -+ bb_actual_value(i) - bb_reg_code_offset(i); -+ new[BBRG_RSP - BBRG_RAX].valid = 1; -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) -+ kdb_printf(" -> osp " kdb_bfd_vma_fmt0 "\n", osp); -+ } else { -+ bb_actual_set_valid(BBRG_RSP, 0); -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) -+ kdb_printf(" -> undefined\n"); -+ kdb_printf("%s: no ending value for RSP, cannot rollback\n", -+ __FUNCTION__); -+ bb_giveup = 1; -+ return; -+ } -+ -+ /* Now the other registers. First look at register values that have -+ * been copied to other registers. -+ */ -+ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) { -+ reg = bb_reg_code_value(i); -+ if (bb_is_int_reg(reg)) { -+ new[reg - BBRG_RAX] = bb_actual[i - BBRG_RAX]; -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) { -+ kdb_printf("%s: %s is in %s ", -+ __FUNCTION__, -+ bbrg_name[reg], -+ bbrg_name[i]); -+ if (bb_actual_valid(i)) -+ kdb_printf(" -> " kdb_bfd_vma_fmt0 "\n", -+ bb_actual_value(i)); -+ else -+ kdb_printf("(invalid)\n"); -+ } -+ } -+ } -+ -+ /* Finally register values that have been saved on stack */ -+ for (i = 0, c = bb_reg_state->memory; -+ i < bb_reg_state->mem_count; -+ ++i, ++c) { -+ offset_address = c->offset_address; -+ reg = c->value; -+ if (!bb_is_int_reg(reg)) -+ continue; -+ address = osp + offset_address; -+ if (address < ar->stack.logical_start || -+ address >= ar->stack.logical_end) { -+ new[reg - BBRG_RAX].value = 0; -+ new[reg - BBRG_RAX].valid = 0; -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) -+ kdb_printf("%s: %s -> undefined\n", -+ __FUNCTION__, -+ bbrg_name[reg]); -+ } else { -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) { -+ kdb_printf("%s: %s -> *(osp", -+ __FUNCTION__, -+ bbrg_name[reg]); -+ KDB_DEBUG_BB_OFFSET_PRINTF(offset_address, "", " "); -+ kdb_printf(kdb_bfd_vma_fmt0, address); -+ } -+ new[reg - BBRG_RAX].value = *(bfd_vma *)address; -+ new[reg - BBRG_RAX].valid = 1; -+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) -+ kdb_printf(") = " kdb_bfd_vma_fmt0 "\n", -+ new[reg - BBRG_RAX].value); -+ } -+ } -+ -+ memcpy(bb_actual, new, sizeof(bb_actual)); -+} -+ -+/* Return true if the current function is an interrupt handler */ -+ -+static bool -+bb_interrupt_handler(kdb_machreg_t rip) -+{ -+ unsigned long disp8, disp32, target, addr = (unsigned long)rip; -+ unsigned char code[5]; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(bb_hardware_handlers); ++i) -+ if (strcmp(bb_func_name, bb_hardware_handlers[i]) == 0) -+ return 1; -+ -+ /* Given the large number of interrupt handlers, it is easiest to look -+ * at the next instruction and see if it is a jmp to the common exit -+ * routines. -+ */ -+ if (kdb_getarea(code, addr) || -+ kdb_getword(&disp32, addr+1, 4) || -+ kdb_getword(&disp8, addr+1, 1)) -+ return 0; /* not a valid code address */ -+ if (code[0] == 0xe9) { -+ target = addr + (s32) disp32 + 5; /* jmp disp32 */ -+ if (target == bb_ret_from_intr || -+ target == bb_common_interrupt || -+ target == bb_error_entry) -+ return 1; -+ } -+ if (code[0] == 0xeb) { -+ target = addr + (s8) disp8 + 2; /* jmp disp8 */ -+ if (target == bb_ret_from_intr || -+ target == bb_common_interrupt || -+ target == bb_error_entry) -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/* Copy argument information that was deduced by the basic block analysis and -+ * rollback into the kdb stack activation record. -+ */ -+ -+static void -+bb_arguments(struct kdb_activation_record *ar) -+{ -+ int i; -+ enum bb_reg_code reg; -+ kdb_machreg_t rsp; -+ ar->args = bb_reg_params + bb_memory_params; -+ bitmap_zero(ar->valid.bits, KDBA_MAXARGS); -+ for (i = 0; i < bb_reg_params; ++i) { -+ reg = bb_param_reg[i]; -+ if (bb_actual_valid(reg)) { -+ ar->arg[i] = bb_actual_value(reg); -+ set_bit(i, ar->valid.bits); -+ } -+ } -+ if (!bb_actual_valid(BBRG_RSP)) -+ return; -+ rsp = bb_actual_value(BBRG_RSP); -+ for (i = bb_reg_params; i < ar->args; ++i) { -+ rsp += KDB_WORD_SIZE; -+ if (kdb_getarea(ar->arg[i], rsp) == 0) -+ set_bit(i, ar->valid.bits); -+ } -+} -+ -+/* Given an exit address from a function, decompose the entire function into -+ * basic blocks and determine the register state at the exit point. -+ */ -+ -+static void -+kdb_bb(unsigned long exit) -+{ -+ kdb_symtab_t symtab; -+ if (!kdbnearsym(exit, &symtab)) { -+ kdb_printf("%s: address " kdb_bfd_vma_fmt0 " not recognised\n", -+ __FUNCTION__, exit); -+ bb_giveup = 1; -+ return; -+ } -+ bb_exit_addr = exit; -+ bb_mod_name = symtab.mod_name; -+ bb_func_name = symtab.sym_name; -+ bb_func_start = symtab.sym_start; -+ bb_func_end = symtab.sym_end; -+ /* Various global labels exist in the middle of assembler code and have -+ * a non-standard state. Ignore these labels and use the start of the -+ * previous label instead. -+ */ -+ while (bb_spurious_global_label(symtab.sym_name)) { -+ if (!kdbnearsym(symtab.sym_start - 1, &symtab)) -+ break; -+ bb_func_start = symtab.sym_start; -+ } -+ bb_mod_name = symtab.mod_name; -+ bb_func_name = symtab.sym_name; -+ bb_func_start = symtab.sym_start; -+ /* Ignore spurious labels past this point and use the next non-spurious -+ * label as the end point. -+ */ -+ if (kdbnearsym(bb_func_end, &symtab)) { -+ while (bb_spurious_global_label(symtab.sym_name)) { -+ bb_func_end = symtab.sym_end; -+ if (!kdbnearsym(symtab.sym_end + 1, &symtab)) -+ break; -+ } -+ } -+ bb_pass1(); -+ if (!bb_giveup) -+ bb_pass2(); -+ if (bb_giveup) -+ kdb_printf("%s: " kdb_bfd_vma_fmt0 -+ " [%s]%s failed at " kdb_bfd_vma_fmt0 "\n\n", -+ __FUNCTION__, exit, -+ bb_mod_name, bb_func_name, bb_curr_addr); -+} -+ -+static int -+kdb_bb1(int argc, const char **argv) -+{ -+ int diag, nextarg = 1; -+ kdb_machreg_t addr; -+ unsigned long offset; -+ -+ bb_cleanup(); /* in case previous command was interrupted */ -+ kdba_id_init(&kdb_di); -+ if (argc != 1) -+ return KDB_ARGCOUNT; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -+ if (diag) -+ return diag; -+ if (!addr) -+ return KDB_BADADDR; -+ kdb_save_flags(); -+ kdb_flags |= KDB_DEBUG_FLAG_BB << KDB_DEBUG_FLAG_SHIFT; -+ kdb_bb(addr); -+ bb_cleanup(); -+ kdb_restore_flags(); -+ kdbnearsym_cleanup(); -+ return 0; -+} -+ -+/* Run a basic block analysis on every function in the base kernel. Used as a -+ * global sanity check to find errors in the basic block code. -+ */ -+ -+static int -+kdb_bb_all(int argc, const char **argv) -+{ -+ loff_t pos = 0; -+ const char *symname; -+ unsigned long addr; -+ int i, max_errors = 20; -+ struct bb_name_state *r; -+ kdb_printf("%s: build variables:" -+ " CCVERSION \"" __stringify(CCVERSION) "\"" -+#ifdef CONFIG_X86_64 -+ " CONFIG_X86_64" -+#endif -+#ifdef CONFIG_4KSTACKS -+ " CONFIG_4KSTACKS" -+#endif -+#ifdef CONFIG_PREEMPT -+ " CONFIG_PREEMPT" -+#endif -+#ifdef CONFIG_VM86 -+ " CONFIG_VM86" -+#endif -+#ifdef CONFIG_FRAME_POINTER -+ " CONFIG_FRAME_POINTER" -+#endif -+#ifdef CONFIG_TRACE_IRQFLAGS -+ " CONFIG_TRACE_IRQFLAGS" -+#endif -+#ifdef CONFIG_HIBERNATION -+ " CONFIG_HIBERNATION" -+#endif -+#ifdef CONFIG_KPROBES -+ " CONFIG_KPROBES" -+#endif -+#ifdef CONFIG_KEXEC -+ " CONFIG_KEXEC" -+#endif -+#ifdef CONFIG_MATH_EMULATION -+ " CONFIG_MATH_EMULATION" -+#endif -+#ifdef CONFIG_XEN -+ " CONFIG_XEN" -+#endif -+#ifdef CONFIG_DEBUG_INFO -+ " CONFIG_DEBUG_INFO" -+#endif -+#ifdef NO_SIBLINGS -+ " NO_SIBLINGS" -+#endif -+ " REGPARM=" __stringify(REGPARM) -+ "\n\n", __FUNCTION__); -+ for (i = 0, r = bb_special_cases; -+ i < ARRAY_SIZE(bb_special_cases); -+ ++i, ++r) { -+ if (!r->address) -+ kdb_printf("%s: cannot find special_case name %s\n", -+ __FUNCTION__, r->name); -+ } -+ for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) { -+ if (!kallsyms_lookup_name(bb_spurious[i])) -+ kdb_printf("%s: cannot find spurious label %s\n", -+ __FUNCTION__, bb_spurious[i]); -+ } -+ while ((symname = kdb_walk_kallsyms(&pos))) { -+ if (strcmp(symname, "_stext") == 0 || -+ strcmp(symname, "stext") == 0) -+ break; -+ } -+ if (!symname) { -+ kdb_printf("%s: cannot find _stext\n", __FUNCTION__); -+ return 0; -+ } -+ kdba_id_init(&kdb_di); -+ i = 0; -+ while ((symname = kdb_walk_kallsyms(&pos))) { -+ if (strcmp(symname, "_etext") == 0) -+ break; -+ if (i++ % 100 == 0) -+ kdb_printf("."); -+ /* x86_64 has some 16 bit functions that appear between stext -+ * and _etext. Skip them. -+ */ -+ if (strcmp(symname, "verify_cpu") == 0 || -+ strcmp(symname, "verify_cpu_noamd") == 0 || -+ strcmp(symname, "verify_cpu_sse_test") == 0 || -+ strcmp(symname, "verify_cpu_no_longmode") == 0 || -+ strcmp(symname, "verify_cpu_sse_ok") == 0 || -+ strcmp(symname, "mode_seta") == 0 || -+ strcmp(symname, "bad_address") == 0 || -+ strcmp(symname, "wakeup_code") == 0 || -+ strcmp(symname, "wakeup_code_start") == 0 || -+ strcmp(symname, "wakeup_start") == 0 || -+ strcmp(symname, "wakeup_32_vector") == 0 || -+ strcmp(symname, "wakeup_32") == 0 || -+ strcmp(symname, "wakeup_long64_vector") == 0 || -+ strcmp(symname, "wakeup_long64") == 0 || -+ strcmp(symname, "gdta") == 0 || -+ strcmp(symname, "idt_48a") == 0 || -+ strcmp(symname, "gdt_48a") == 0 || -+ strcmp(symname, "bogus_real_magic") == 0 || -+ strcmp(symname, "bogus_64_magic") == 0 || -+ strcmp(symname, "no_longmode") == 0 || -+ strcmp(symname, "mode_set") == 0 || -+ strcmp(symname, "mode_seta") == 0 || -+ strcmp(symname, "setbada") == 0 || -+ strcmp(symname, "check_vesa") == 0 || -+ strcmp(symname, "check_vesaa") == 0 || -+ strcmp(symname, "_setbada") == 0 || -+ strcmp(symname, "wakeup_stack_begin") == 0 || -+ strcmp(symname, "wakeup_stack") == 0 || -+ strcmp(symname, "wakeup_level4_pgt") == 0 || -+ strcmp(symname, "acpi_copy_wakeup_routine") == 0 || -+ strcmp(symname, "wakeup_end") == 0 || -+ strcmp(symname, "do_suspend_lowlevel_s4bios") == 0 || -+ strcmp(symname, "do_suspend_lowlevel") == 0 || -+ strcmp(symname, "wakeup_pmode_return") == 0 || -+ strcmp(symname, "restore_registers") == 0) -+ continue; -+ /* __kprobes_text_end contains branches to the middle of code, -+ * with undefined states. -+ */ -+ if (strcmp(symname, "__kprobes_text_end") == 0) -+ continue; -+ /* Data in the middle of the text segment :( */ -+ if (strcmp(symname, "level2_kernel_pgt") == 0 || -+ strcmp(symname, "level3_kernel_pgt") == 0) -+ continue; -+ if (bb_spurious_global_label(symname)) -+ continue; -+ if ((addr = kallsyms_lookup_name(symname)) == 0) -+ continue; -+ // kdb_printf("BB " kdb_bfd_vma_fmt0 " %s\n", addr, symname); -+ bb_cleanup(); /* in case previous command was interrupted */ -+ kdbnearsym_cleanup(); -+ kdb_bb(addr); -+ touch_nmi_watchdog(); -+ if (bb_giveup) { -+ if (max_errors-- == 0) { -+ kdb_printf("%s: max_errors reached, giving up\n", -+ __FUNCTION__); -+ break; -+ } else { -+ bb_giveup = 0; -+ } -+ } -+ } -+ kdb_printf("\n"); -+ bb_cleanup(); -+ kdbnearsym_cleanup(); -+ return 0; -+} -+ -+/* -+ *============================================================================= -+ * -+ * Everything above this line is doing basic block analysis, function by -+ * function. Everything below this line uses the basic block data to do a -+ * complete backtrace over all functions that are used by a process. -+ * -+ *============================================================================= -+ */ -+ -+ -+/*============================================================================*/ -+/* */ -+/* Most of the backtrace code and data is common to x86_64 and i386. This */ -+/* large ifdef contains all of the differences between the two architectures. */ -+/* */ -+/* Make sure you update the correct section of this ifdef. */ -+/* */ -+/*============================================================================*/ -+#define XCS "cs" -+#define RSP "sp" -+#define RIP "ip" -+#define ARCH_RSP sp -+#define ARCH_RIP ip -+ -+#ifdef CONFIG_X86_64 -+ -+#define ARCH_NORMAL_PADDING (16 * 8) -+ -+/* x86_64 has multiple alternate stacks, with different sizes and different -+ * offsets to get the link from one stack to the next. All of the stacks are -+ * in the per_cpu area: either in the orig_ist or irq_stack_ptr. Debug events -+ * can even have multiple nested stacks within the single physical stack, -+ * each nested stack has its own link and some of those links are wrong. -+ * -+ * Consistent it's not! -+ * -+ * Do not assume that these stacks are aligned on their size. -+ */ -+#define INTERRUPT_STACK (N_EXCEPTION_STACKS + 1) -+void -+kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu, -+ struct kdb_activation_record *ar) -+{ -+ static struct { -+ const char *id; -+ unsigned int total_size; -+ unsigned int nested_size; -+ unsigned int next; -+ } *sdp, stack_data[] = { -+ [STACKFAULT_STACK - 1] = { "stackfault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) }, -+ [DOUBLEFAULT_STACK - 1] = { "doublefault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) }, -+ [NMI_STACK - 1] = { "nmi", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) }, -+ [DEBUG_STACK - 1] = { "debug", DEBUG_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) }, -+ [MCE_STACK - 1] = { "machine check", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) }, -+ [INTERRUPT_STACK - 1] = { "interrupt", IRQ_STACK_SIZE, IRQ_STACK_SIZE, IRQ_STACK_SIZE - sizeof(void *) }, -+ }; -+ unsigned long total_start = 0, total_size, total_end; -+ int sd, found = 0; -+ extern unsigned long kdba_orig_ist(int, int); -+ -+ for (sd = 0, sdp = stack_data; -+ sd < ARRAY_SIZE(stack_data); -+ ++sd, ++sdp) { -+ total_size = sdp->total_size; -+ if (!total_size) -+ continue; /* in case stack_data[] has any holes */ -+ if (cpu < 0) { -+ /* Arbitrary address which can be on any cpu, see if it -+ * falls within any of the alternate stacks -+ */ -+ int c; -+ for_each_online_cpu(c) { -+ if (sd == INTERRUPT_STACK - 1) -+ total_end = (unsigned long)per_cpu(irq_stack_ptr, c); -+ else -+ total_end = per_cpu(orig_ist, c).ist[sd]; -+ total_start = total_end - total_size; -+ if (addr >= total_start && addr < total_end) { -+ found = 1; -+ cpu = c; -+ break; -+ } -+ } -+ if (!found) -+ continue; -+ } -+ /* Only check the supplied or found cpu */ -+ if (sd == INTERRUPT_STACK - 1) -+ total_end = (unsigned long)per_cpu(irq_stack_ptr, cpu); -+ else -+ total_end = per_cpu(orig_ist, cpu).ist[sd]; -+ total_start = total_end - total_size; -+ if (addr >= total_start && addr < total_end) { -+ found = 1; -+ break; -+ } -+ } -+ if (!found) -+ return; -+ /* find which nested stack the address is in */ -+ while (addr > total_start + sdp->nested_size) -+ total_start += sdp->nested_size; -+ ar->stack.physical_start = total_start; -+ ar->stack.physical_end = total_start + sdp->nested_size; -+ ar->stack.logical_start = total_start; -+ ar->stack.logical_end = total_start + sdp->next; -+ ar->stack.next = *(unsigned long *)ar->stack.logical_end; -+ ar->stack.id = sdp->id; -+ -+ /* Nasty: when switching to the interrupt stack, the stack state of the -+ * caller is split over two stacks, the original stack and the -+ * interrupt stack. One word (the previous frame pointer) is stored on -+ * the interrupt stack, the rest of the interrupt data is in the old -+ * frame. To make the interrupted stack state look as though it is -+ * contiguous, copy the missing word from the interrupt stack to the -+ * original stack and adjust the new stack pointer accordingly. -+ */ -+ -+ if (sd == INTERRUPT_STACK - 1) { -+ *(unsigned long *)(ar->stack.next - KDB_WORD_SIZE) = -+ ar->stack.next; -+ ar->stack.next -= KDB_WORD_SIZE; -+ } -+} -+ -+/* rip is not in the thread struct for x86_64. We know that the stack value -+ * was saved in schedule near the label thread_return. Setting rip to -+ * thread_return lets the stack trace find that we are in schedule and -+ * correctly decode its prologue. -+ */ -+ -+static kdb_machreg_t -+kdba_bt_stack_rip(const struct task_struct *p) -+{ -+ return bb_thread_return; -+} -+ -+#else /* !CONFIG_X86_64 */ -+ -+#define ARCH_NORMAL_PADDING (19 * 4) -+ -+#ifdef CONFIG_4KSTACKS -+static struct thread_info **kdba_hardirq_ctx, **kdba_softirq_ctx; -+#endif /* CONFIG_4KSTACKS */ -+ -+/* On a 4K stack kernel, hardirq_ctx and softirq_ctx are [NR_CPUS] arrays. The -+ * first element of each per-cpu stack is a struct thread_info. -+ */ -+void -+kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu, -+ struct kdb_activation_record *ar) -+{ -+#ifdef CONFIG_4KSTACKS -+ struct thread_info *tinfo; -+ tinfo = (struct thread_info *)(addr & -THREAD_SIZE); -+ if (cpu < 0) { -+ /* Arbitrary address, see if it falls within any of the irq -+ * stacks -+ */ -+ int found = 0; -+ for_each_online_cpu(cpu) { -+ if (tinfo == kdba_hardirq_ctx[cpu] || -+ tinfo == kdba_softirq_ctx[cpu]) { -+ found = 1; -+ break; -+ } -+ } -+ if (!found) -+ return; -+ } -+ if (tinfo == kdba_hardirq_ctx[cpu] || -+ tinfo == kdba_softirq_ctx[cpu]) { -+ ar->stack.physical_start = (kdb_machreg_t)tinfo; -+ ar->stack.physical_end = ar->stack.physical_start + THREAD_SIZE; -+ ar->stack.logical_start = ar->stack.physical_start + -+ sizeof(struct thread_info); -+ ar->stack.logical_end = ar->stack.physical_end; -+ ar->stack.next = tinfo->previous_esp; -+ if (tinfo == kdba_hardirq_ctx[cpu]) -+ ar->stack.id = "hardirq_ctx"; -+ else -+ ar->stack.id = "softirq_ctx"; -+ } -+#endif /* CONFIG_4KSTACKS */ -+} -+ -+/* rip is in the thread struct for i386 */ -+ -+static kdb_machreg_t -+kdba_bt_stack_rip(const struct task_struct *p) -+{ -+ return p->thread.ip; -+} -+ -+#endif /* CONFIG_X86_64 */ -+ -+/* Given an address which claims to be on a stack, an optional cpu number and -+ * an optional task address, get information about the stack. -+ * -+ * t == NULL, cpu < 0 indicates an arbitrary stack address with no associated -+ * struct task, the address can be in an alternate stack or any task's normal -+ * stack. -+ * -+ * t != NULL, cpu >= 0 indicates a running task, the address can be in an -+ * alternate stack or that task's normal stack. -+ * -+ * t != NULL, cpu < 0 indicates a blocked task, the address can only be in that -+ * task's normal stack. -+ * -+ * t == NULL, cpu >= 0 is not a valid combination. -+ */ -+ -+static void -+kdba_get_stack_info(kdb_machreg_t rsp, int cpu, -+ struct kdb_activation_record *ar, -+ const struct task_struct *t) -+{ -+ struct thread_info *tinfo; -+ struct task_struct *g, *p; -+ memset(&ar->stack, 0, sizeof(ar->stack)); -+ if (KDB_DEBUG(ARA)) -+ kdb_printf("%s: " RSP "=0x%lx cpu=%d task=%p\n", -+ __FUNCTION__, rsp, cpu, t); -+ if (t == NULL || cpu >= 0) { -+ kdba_get_stack_info_alternate(rsp, cpu, ar); -+ if (ar->stack.logical_start) -+ goto out; -+ } -+ rsp &= -THREAD_SIZE; -+ tinfo = (struct thread_info *)rsp; -+ if (t == NULL) { -+ /* Arbitrary stack address without an associated task, see if -+ * it falls within any normal process stack, including the idle -+ * tasks. -+ */ -+ kdb_do_each_thread(g, p) { -+ if (tinfo == task_thread_info(p)) { -+ t = p; -+ goto found; -+ } -+ } kdb_while_each_thread(g, p); -+ for_each_online_cpu(cpu) { -+ p = idle_task(cpu); -+ if (tinfo == task_thread_info(p)) { -+ t = p; -+ goto found; -+ } -+ } -+ found: -+ if (KDB_DEBUG(ARA)) -+ kdb_printf("%s: found task %p\n", __FUNCTION__, t); -+ } else if (cpu >= 0) { -+ /* running task */ -+ struct kdb_running_process *krp = kdb_running_process + cpu; -+ if (krp->p != t || tinfo != task_thread_info(t)) -+ t = NULL; -+ if (KDB_DEBUG(ARA)) -+ kdb_printf("%s: running task %p\n", __FUNCTION__, t); -+ } else { -+ /* blocked task */ -+ if (tinfo != task_thread_info(t)) -+ t = NULL; -+ if (KDB_DEBUG(ARA)) -+ kdb_printf("%s: blocked task %p\n", __FUNCTION__, t); -+ } -+ if (t) { -+ ar->stack.physical_start = rsp; -+ ar->stack.physical_end = rsp + THREAD_SIZE; -+ ar->stack.logical_start = rsp + sizeof(struct thread_info); -+ ar->stack.logical_end = ar->stack.physical_end - ARCH_NORMAL_PADDING; -+ ar->stack.next = 0; -+ ar->stack.id = "normal"; -+ } -+out: -+ if (ar->stack.physical_start && KDB_DEBUG(ARA)) { -+ kdb_printf("%s: ar->stack\n", __FUNCTION__); -+ kdb_printf(" physical_start=0x%lx\n", ar->stack.physical_start); -+ kdb_printf(" physical_end=0x%lx\n", ar->stack.physical_end); -+ kdb_printf(" logical_start=0x%lx\n", ar->stack.logical_start); -+ kdb_printf(" logical_end=0x%lx\n", ar->stack.logical_end); -+ kdb_printf(" next=0x%lx\n", ar->stack.next); -+ kdb_printf(" id=%s\n", ar->stack.id); -+ kdb_printf(" set MDCOUNT %ld\n", -+ (ar->stack.physical_end - ar->stack.physical_start) / -+ KDB_WORD_SIZE); -+ kdb_printf(" mds " kdb_machreg_fmt0 "\n", -+ ar->stack.physical_start); -+ } -+} -+ -+static void -+bt_print_one(kdb_machreg_t rip, kdb_machreg_t rsp, -+ const struct kdb_activation_record *ar, -+ const kdb_symtab_t *symtab, int argcount) -+{ -+ int btsymarg = 0; -+ int nosect = 0; -+ -+ kdbgetintenv("BTSYMARG", &btsymarg); -+ kdbgetintenv("NOSECT", &nosect); -+ -+ kdb_printf(kdb_machreg_fmt0, rsp); -+ kdb_symbol_print(rip, symtab, -+ KDB_SP_SPACEB|KDB_SP_VALUE); -+ if (argcount && ar->args) { -+ int i, argc = ar->args; -+ kdb_printf(" ("); -+ if (argc > argcount) -+ argc = argcount; -+ for (i = 0; i < argc; i++) { -+ if (i) -+ kdb_printf(", "); -+ if (test_bit(i, ar->valid.bits)) -+ kdb_printf("0x%lx", ar->arg[i]); -+ else -+ kdb_printf("invalid"); -+ } -+ kdb_printf(")"); -+ } -+ kdb_printf("\n"); -+ if (symtab->sym_name) { -+ if (!nosect) { -+ kdb_printf(" %s", -+ symtab->mod_name); -+ if (symtab->sec_name && symtab->sec_start) -+ kdb_printf(" 0x%lx 0x%lx", -+ symtab->sec_start, symtab->sec_end); -+ kdb_printf(" 0x%lx 0x%lx\n", -+ symtab->sym_start, symtab->sym_end); -+ } -+ } -+ if (argcount && ar->args && btsymarg) { -+ int i, argc = ar->args; -+ kdb_symtab_t arg_symtab; -+ for (i = 0; i < argc; i++) { -+ kdb_machreg_t arg = ar->arg[i]; -+ if (test_bit(i, ar->valid.bits) && -+ kdbnearsym(arg, &arg_symtab)) { -+ kdb_printf(" ARG %2d ", i); -+ kdb_symbol_print(arg, &arg_symtab, -+ KDB_SP_DEFAULT|KDB_SP_NEWLINE); -+ } -+ } -+ } -+} -+ -+static void -+kdba_bt_new_stack(struct kdb_activation_record *ar, kdb_machreg_t *rsp, -+ int *count, int *suppress) -+{ -+ /* Nasty: save_args builds a partial pt_regs, with r15 through -+ * rbx not being filled in. It passes struct pt_regs* to do_IRQ (in -+ * rdi) but the stack pointer is not adjusted to account for r15 -+ * through rbx. This has two effects :- -+ * -+ * (1) struct pt_regs on an external interrupt actually overlaps with -+ * the local stack area used by do_IRQ. Not only are r15-rbx -+ * undefined, the area that claims to hold their values can even -+ * change as the irq is processed. -+ * -+ * (2) The back stack pointer saved for the new frame is not pointing -+ * at pt_regs, it is pointing at rbx within the pt_regs passed to -+ * do_IRQ. -+ * -+ * There is nothing that I can do about (1) but I have to fix (2) -+ * because kdb backtrace looks for the "start" address of pt_regs as it -+ * walks back through the stacks. When switching from the interrupt -+ * stack to another stack, we have to assume that pt_regs has been -+ * seen and turn off backtrace supression. -+ */ -+ int probable_pt_regs = strcmp(ar->stack.id, "interrupt") == 0; -+ *rsp = ar->stack.next; -+ if (KDB_DEBUG(ARA)) -+ kdb_printf("new " RSP "=" kdb_machreg_fmt0 "\n", *rsp); -+ bb_actual_set_value(BBRG_RSP, *rsp); -+ kdba_get_stack_info(*rsp, -1, ar, NULL); -+ if (!ar->stack.physical_start) { -+ kdb_printf("+++ Cannot resolve next stack\n"); -+ } else if (!*suppress) { -+ kdb_printf(" ======================= <%s>\n", -+ ar->stack.id); -+ ++*count; -+ } -+ if (probable_pt_regs) -+ *suppress = 0; -+} -+ -+/* -+ * kdba_bt_stack -+ * -+ * Inputs: -+ * addr Address provided to 'bt' command, if any. -+ * argcount -+ * p Pointer to task for 'btp' command. -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * Ultimately all the bt* commands come through this routine. If -+ * old_style is 0 then it uses the basic block analysis to get an accurate -+ * backtrace with arguments, otherwise it falls back to the old method of -+ * printing anything on stack that looks like a kernel address. -+ * -+ * Allowing for the stack data pushed by the hardware is tricky. We -+ * deduce the presence of hardware pushed data by looking for interrupt -+ * handlers, either by name or by the code that they contain. This -+ * information must be applied to the next function up the stack, because -+ * the hardware data is above the saved rip for the interrupted (next) -+ * function. -+ * -+ * To make things worse, the amount of data pushed is arch specific and -+ * may depend on the rsp for the next function, not the current function. -+ * The number of bytes pushed by hardware cannot be calculated until we -+ * are actually processing the stack for the interrupted function and have -+ * its rsp. -+ * -+ * It is also possible for an interrupt to occur in user space and for the -+ * interrupt handler to also be interrupted. Check the code selector -+ * whenever the previous function is an interrupt handler and stop -+ * backtracing if the interrupt was not in kernel space. -+ */ -+ -+static int -+kdba_bt_stack(kdb_machreg_t addr, int argcount, const struct task_struct *p, -+ int old_style) -+{ -+ struct kdb_activation_record ar; -+ kdb_machreg_t rip = 0, rsp = 0, prev_rsp, cs; -+ kdb_symtab_t symtab; -+ int rip_at_rsp = 0, count = 0, btsp = 0, suppress, -+ interrupt_handler = 0, prev_interrupt_handler = 0, hardware_pushed, -+ prev_noret = 0; -+ struct pt_regs *regs = NULL; -+ -+ kdbgetintenv("BTSP", &btsp); -+ suppress = !btsp; -+ memset(&ar, 0, sizeof(ar)); -+ if (old_style) -+ kdb_printf("Using old style backtrace, unreliable with no arguments\n"); -+ -+ /* -+ * The caller may have supplied an address at which the stack traceback -+ * operation should begin. This address is assumed by this code to -+ * point to a return address on the stack to be traced back. -+ * -+ * Warning: type in the wrong address and you will get garbage in the -+ * backtrace. -+ */ -+ if (addr) { -+ rsp = addr; -+ kdb_getword(&rip, rsp, sizeof(rip)); -+ rip_at_rsp = 1; -+ suppress = 0; -+ kdba_get_stack_info(rsp, -1, &ar, NULL); -+ } else { -+ if (task_curr(p)) { -+ struct kdb_running_process *krp = -+ kdb_running_process + task_cpu(p); -+ kdb_machreg_t cs; -+ regs = krp->regs; -+ if (krp->seqno && -+ krp->p == p && -+ krp->seqno >= kdb_seqno - 1 && -+ !KDB_NULL_REGS(regs)) { -+ /* valid saved state, continue processing */ -+ } else { -+ kdb_printf -+ ("Process did not save state, cannot backtrace\n"); -+ kdb_ps1(p); -+ return 0; -+ } -+ kdba_getregcontents(XCS, regs, &cs); -+ if ((cs & 0xffff) != __KERNEL_CS) { -+ kdb_printf("Stack is not in kernel space, backtrace not available\n"); -+ return 0; -+ } -+ rip = krp->arch.ARCH_RIP; -+ rsp = krp->arch.ARCH_RSP; -+ kdba_get_stack_info(rsp, kdb_process_cpu(p), &ar, p); -+ } else { -+ /* Not on cpu, assume blocked. Blocked tasks do not -+ * have pt_regs. p->thread contains some data, alas -+ * what it contains differs between i386 and x86_64. -+ */ -+ rip = kdba_bt_stack_rip(p); -+ rsp = p->thread.sp; -+ suppress = 0; -+ kdba_get_stack_info(rsp, -1, &ar, p); -+ } -+ } -+ if (!ar.stack.physical_start) { -+ kdb_printf(RSP "=0x%lx is not in a valid kernel stack, backtrace not available\n", -+ rsp); -+ return 0; -+ } -+ memset(&bb_actual, 0, sizeof(bb_actual)); -+ bb_actual_set_value(BBRG_RSP, rsp); -+ bb_actual_set_valid(BBRG_RSP, 1); -+ -+ kdb_printf(RSP "%*s" RIP "%*sFunction (args)\n", -+ 2*KDB_WORD_SIZE, " ", -+ 2*KDB_WORD_SIZE, " "); -+ if (ar.stack.next && !suppress) -+ kdb_printf(" ======================= <%s>\n", -+ ar.stack.id); -+ -+ bb_cleanup(); -+ /* Run through all the stacks */ -+ while (ar.stack.physical_start) { -+ if (rip_at_rsp) { -+ rip = *(kdb_machreg_t *)rsp; -+ /* I wish that gcc was fixed to include a nop -+ * instruction after ATTRIB_NORET functions. The lack -+ * of a nop means that the return address points to the -+ * start of next function, so fudge it to point to one -+ * byte previous. -+ * -+ * No, we cannot just decrement all rip values. -+ * Sometimes an rip legally points to the start of a -+ * function, e.g. interrupted code or hand crafted -+ * assembler. -+ */ -+ if (prev_noret) { -+ kdbnearsym(rip, &symtab); -+ if (rip == symtab.sym_start) { -+ --rip; -+ if (KDB_DEBUG(ARA)) -+ kdb_printf("\tprev_noret, " RIP -+ "=0x%lx\n", rip); -+ } -+ } -+ } -+ kdbnearsym(rip, &symtab); -+ if (old_style) { -+ if (__kernel_text_address(rip) && !suppress) { -+ bt_print_one(rip, rsp, &ar, &symtab, 0); -+ ++count; -+ } -+ if (rsp == (unsigned long)regs) { -+ if (ar.stack.next && suppress) -+ kdb_printf(" ======================= <%s>\n", -+ ar.stack.id); -+ ++count; -+ suppress = 0; -+ } -+ rsp += sizeof(rip); -+ rip_at_rsp = 1; -+ if (rsp >= ar.stack.logical_end) { -+ if (!ar.stack.next) -+ break; -+ kdba_bt_new_stack(&ar, &rsp, &count, &suppress); -+ rip_at_rsp = 0; -+ continue; -+ } -+ } else { -+ /* Start each analysis with no dynamic data from the -+ * previous kdb_bb() run. -+ */ -+ bb_cleanup(); -+ kdb_bb(rip); -+ if (bb_giveup) -+ break; -+ prev_interrupt_handler = interrupt_handler; -+ interrupt_handler = bb_interrupt_handler(rip); -+ prev_rsp = rsp; -+ if (rip_at_rsp) { -+ if (prev_interrupt_handler) { -+ cs = *((kdb_machreg_t *)rsp + 1) & 0xffff; -+ hardware_pushed = -+ bb_hardware_pushed_arch(rsp, &ar); -+ } else { -+ cs = __KERNEL_CS; -+ hardware_pushed = 0; -+ } -+ rsp += sizeof(rip) + hardware_pushed; -+ if (KDB_DEBUG(ARA)) -+ kdb_printf("%s: " RSP " " -+ kdb_machreg_fmt0 -+ " -> " kdb_machreg_fmt0 -+ " hardware_pushed %d" -+ " prev_interrupt_handler %d" -+ " cs 0x%lx\n", -+ __FUNCTION__, -+ prev_rsp, -+ rsp, -+ hardware_pushed, -+ prev_interrupt_handler, -+ cs); -+ if (rsp >= ar.stack.logical_end && -+ ar.stack.next) { -+ kdba_bt_new_stack(&ar, &rsp, &count, -+ &suppress); -+ rip_at_rsp = 0; -+ continue; -+ } -+ bb_actual_set_value(BBRG_RSP, rsp); -+ } else { -+ cs = __KERNEL_CS; -+ } -+ rip_at_rsp = 1; -+ bb_actual_rollback(&ar); -+ if (bb_giveup) -+ break; -+ if (bb_actual_value(BBRG_RSP) < rsp) { -+ kdb_printf("%s: " RSP " is going backwards, " -+ kdb_machreg_fmt0 " -> " -+ kdb_machreg_fmt0 "\n", -+ __FUNCTION__, -+ rsp, -+ bb_actual_value(BBRG_RSP)); -+ bb_giveup = 1; -+ break; -+ } -+ bb_arguments(&ar); -+ if (!suppress) { -+ bt_print_one(rip, prev_rsp, &ar, &symtab, argcount); -+ ++count; -+ } -+ /* Functions that terminate the backtrace */ -+ if (strcmp(bb_func_name, "cpu_idle") == 0 || -+ strcmp(bb_func_name, "child_rip") == 0) -+ break; -+ if (rsp >= ar.stack.logical_end && -+ !ar.stack.next) -+ break; -+ if (rsp <= (unsigned long)regs && -+ bb_actual_value(BBRG_RSP) > (unsigned long)regs) { -+ if (ar.stack.next && suppress) -+ kdb_printf(" ======================= <%s>\n", -+ ar.stack.id); -+ ++count; -+ suppress = 0; -+ } -+ if (cs != __KERNEL_CS) { -+ kdb_printf("Reached user space\n"); -+ break; -+ } -+ rsp = bb_actual_value(BBRG_RSP); -+ } -+ prev_noret = bb_noret(bb_func_name); -+ if (count > 200) -+ break; -+ } -+ if (bb_giveup) -+ return 1; -+ bb_cleanup(); -+ kdbnearsym_cleanup(); -+ -+ if (count > 200) { -+ kdb_printf("bt truncated, count limit reached\n"); -+ return 1; -+ } else if (suppress) { -+ kdb_printf -+ ("bt did not find pt_regs - no trace produced. Suggest 'set BTSP 1'\n"); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/* -+ * kdba_bt_address -+ * -+ * Do a backtrace starting at a specified stack address. Use this if the -+ * heuristics get the stack decode wrong. -+ * -+ * Inputs: -+ * addr Address provided to 'bt' command. -+ * argcount -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * mds %rsp comes in handy when examining the stack to do a manual -+ * traceback. -+ */ -+ -+int kdba_bt_address(kdb_machreg_t addr, int argcount) -+{ -+ int ret; -+ kdba_id_init(&kdb_di); /* kdb_bb needs this done once */ -+ ret = kdba_bt_stack(addr, argcount, NULL, 0); -+ if (ret == 1) -+ ret = kdba_bt_stack(addr, argcount, NULL, 1); -+ return ret; -+} -+ -+/* -+ * kdba_bt_process -+ * -+ * Do a backtrace for a specified process. -+ * -+ * Inputs: -+ * p Struct task pointer extracted by 'bt' command. -+ * argcount -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ */ -+ -+int kdba_bt_process(const struct task_struct *p, int argcount) -+{ -+ int ret; -+ kdba_id_init(&kdb_di); /* kdb_bb needs this done once */ -+ ret = kdba_bt_stack(0, argcount, p, 0); -+ if (ret == 1) -+ ret = kdba_bt_stack(0, argcount, p, 1); -+ return ret; -+} -+ -+static int __init kdba_bt_x86_init(void) -+{ -+ int i, c, cp = -1; -+ struct bb_name_state *r; -+ -+ kdb_register_repeat("bb1", kdb_bb1, "", "Analyse one basic block", 0, KDB_REPEAT_NONE); -+ kdb_register_repeat("bb_all", kdb_bb_all, "", "Backtrace check on all built in functions", 0, KDB_REPEAT_NONE); -+ -+ /* Split the opcode usage table by the first letter of each set of -+ * opcodes, for faster mapping of opcode to its operand usage. -+ */ -+ for (i = 0; i < ARRAY_SIZE(bb_opcode_usage_all); ++i) { -+ c = bb_opcode_usage_all[i].opcode[0] - 'a'; -+ if (c != cp) { -+ cp = c; -+ bb_opcode_usage[c].opcode = bb_opcode_usage_all + i; -+ } -+ ++bb_opcode_usage[c].size; -+ } -+ -+ bb_common_interrupt = kallsyms_lookup_name("common_interrupt"); -+ bb_error_entry = kallsyms_lookup_name("error_entry"); -+ bb_ret_from_intr = kallsyms_lookup_name("ret_from_intr"); -+ bb_thread_return = kallsyms_lookup_name("thread_return"); -+ bb_sync_regs = kallsyms_lookup_name("sync_regs"); -+ bb_save_v86_state = kallsyms_lookup_name("save_v86_state"); -+ bb__sched_text_start = kallsyms_lookup_name("__sched_text_start"); -+ bb__sched_text_end = kallsyms_lookup_name("__sched_text_end"); -+ bb_save_args = kallsyms_lookup_name("save_args"); -+ bb_save_rest = kallsyms_lookup_name("save_rest"); -+ bb_save_paranoid = kallsyms_lookup_name("save_paranoid"); -+ for (i = 0, r = bb_special_cases; -+ i < ARRAY_SIZE(bb_special_cases); -+ ++i, ++r) { -+ r->address = kallsyms_lookup_name(r->name); -+ } -+ -+#ifdef CONFIG_4KSTACKS -+ kdba_hardirq_ctx = (struct thread_info **)kallsyms_lookup_name("hardirq_ctx"); -+ kdba_softirq_ctx = (struct thread_info **)kallsyms_lookup_name("softirq_ctx"); -+#endif /* CONFIG_4KSTACKS */ -+ -+ return 0; -+} -+ -+static void __exit kdba_bt_x86_exit(void) -+{ -+ kdb_unregister("bb1"); -+ kdb_unregister("bb_all"); -+} -+ -+module_init(kdba_bt_x86_init) -+module_exit(kdba_bt_x86_exit) ---- /dev/null -+++ b/arch/x86/kdb/kdba_id.c -@@ -0,0 +1,261 @@ -+/* -+ * Kernel Debugger Architecture Dependent Instruction Disassembly -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * kdba_dis_getsym -+ * -+ * Get a symbol for the disassembler. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * dip Pointer to disassemble_info -+ * Returns: -+ * 0 -+ * Locking: -+ * Remarks: -+ * Not used for kdb. -+ */ -+ -+/* ARGSUSED */ -+static int -+kdba_dis_getsym(bfd_vma addr, disassemble_info *dip) -+{ -+ -+ return 0; -+} -+ -+/* -+ * kdba_printaddress -+ * -+ * Print (symbolically) an address. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * dip Pointer to disassemble_info -+ * flag True if a ":" sequence should follow the address -+ * Returns: -+ * 0 -+ * Locking: -+ * Remarks: -+ * -+ */ -+ -+/* ARGSUSED */ -+static void -+kdba_printaddress(kdb_machreg_t addr, disassemble_info *dip, int flag) -+{ -+ kdb_symtab_t symtab; -+ int spaces = 5; -+ unsigned int offset; -+ -+ /* -+ * Print a symbol name or address as necessary. -+ */ -+ kdbnearsym(addr, &symtab); -+ if (symtab.sym_name) { -+ /* Do not use kdb_symbol_print here, it always does -+ * kdb_printf but we want dip->fprintf_func. -+ */ -+ dip->fprintf_func(dip->stream, -+ "0x%0*lx %s", -+ (int)(2*sizeof(addr)), addr, symtab.sym_name); -+ if ((offset = addr - symtab.sym_start) == 0) { -+ spaces += 4; -+ } -+ else { -+ unsigned int o = offset; -+ while (o >>= 4) -+ --spaces; -+ dip->fprintf_func(dip->stream, "+0x%x", offset); -+ } -+ -+ } else { -+ dip->fprintf_func(dip->stream, "0x%lx", addr); -+ } -+ -+ if (flag) { -+ if (spaces < 1) { -+ spaces = 1; -+ } -+ dip->fprintf_func(dip->stream, ":%*s", spaces, " "); -+ } -+} -+ -+/* -+ * kdba_dis_printaddr -+ * -+ * Print (symbolically) an address. Called by GNU disassembly -+ * code via disassemble_info structure. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * dip Pointer to disassemble_info -+ * Returns: -+ * 0 -+ * Locking: -+ * Remarks: -+ * This function will never append ":" to the printed -+ * symbolic address. -+ */ -+ -+static void -+kdba_dis_printaddr(bfd_vma addr, disassemble_info *dip) -+{ -+ kdba_printaddress(addr, dip, 0); -+} -+ -+/* -+ * kdba_dis_getmem -+ * -+ * Fetch 'length' bytes from 'addr' into 'buf'. -+ * -+ * Parameters: -+ * addr Address for which to get symbol -+ * buf Address of buffer to fill with bytes from 'addr' -+ * length Number of bytes to fetch -+ * dip Pointer to disassemble_info -+ * Returns: -+ * 0 if data is available, otherwise error. -+ * Locking: -+ * Remarks: -+ * -+ */ -+ -+/* ARGSUSED */ -+static int -+kdba_dis_getmem(bfd_vma addr, bfd_byte *buf, unsigned int length, disassemble_info *dip) -+{ -+ return kdb_getarea_size(buf, addr, length); -+} -+ -+/* -+ * kdba_id_parsemode -+ * -+ * Parse IDMODE environment variable string and -+ * set appropriate value into "disassemble_info" structure. -+ * -+ * Parameters: -+ * mode Mode string -+ * dip Disassemble_info structure pointer -+ * Returns: -+ * Locking: -+ * Remarks: -+ * We handle the values 'x86' and '8086' to enable either -+ * 32-bit instruction set or 16-bit legacy instruction set. -+ */ -+ -+int -+kdba_id_parsemode(const char *mode, disassemble_info *dip) -+{ -+ if (mode) { -+ if (strcmp(mode, "x86_64") == 0) { -+ dip->mach = bfd_mach_x86_64; -+ } else if (strcmp(mode, "x86") == 0) { -+ dip->mach = bfd_mach_i386_i386; -+ } else if (strcmp(mode, "8086") == 0) { -+ dip->mach = bfd_mach_i386_i8086; -+ } else { -+ return KDB_BADMODE; -+ } -+ } -+ -+ return 0; -+} -+ -+/* -+ * kdba_check_pc -+ * -+ * Check that the pc is satisfactory. -+ * -+ * Parameters: -+ * pc Program Counter Value. -+ * Returns: -+ * None -+ * Locking: -+ * None. -+ * Remarks: -+ * Can change pc. -+ */ -+ -+void -+kdba_check_pc(kdb_machreg_t *pc) -+{ -+ /* No action */ -+} -+ -+/* -+ * kdba_id_printinsn -+ * -+ * Format and print a single instruction at 'pc'. Return the -+ * length of the instruction. -+ * -+ * Parameters: -+ * pc Program Counter Value. -+ * dip Disassemble_info structure pointer -+ * Returns: -+ * Length of instruction, -1 for error. -+ * Locking: -+ * None. -+ * Remarks: -+ * Depends on 'IDMODE' environment variable. -+ */ -+ -+int -+kdba_id_printinsn(kdb_machreg_t pc, disassemble_info *dip) -+{ -+ kdba_printaddress(pc, dip, 1); -+ return print_insn_i386_att(pc, dip); -+} -+ -+/* -+ * kdba_id_init -+ * -+ * Initialize the architecture dependent elements of -+ * the disassembly information structure -+ * for the GNU disassembler. -+ * -+ * Parameters: -+ * None. -+ * Outputs: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+void -+kdba_id_init(disassemble_info *dip) -+{ -+ dip->read_memory_func = kdba_dis_getmem; -+ dip->print_address_func = kdba_dis_printaddr; -+ dip->symbol_at_address_func = kdba_dis_getsym; -+ -+ dip->flavour = bfd_target_elf_flavour; -+ dip->arch = bfd_arch_i386; -+#ifdef CONFIG_X86_64 -+ dip->mach = bfd_mach_x86_64; -+#endif -+#ifdef CONFIG_X86_32 -+ dip->mach = bfd_mach_i386_i386; -+#endif -+ dip->endian = BFD_ENDIAN_LITTLE; -+ -+ dip->display_endian = BFD_ENDIAN_LITTLE; -+} ---- /dev/null -+++ b/arch/x86/kdb/kdba_io.c -@@ -0,0 +1,666 @@ -+/* -+ * Kernel Debugger Architecture Dependent Console I/O handler -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include "pc_keyb.h" -+ -+#ifdef CONFIG_VT_CONSOLE -+#define KDB_BLINK_LED 1 -+#else -+#undef KDB_BLINK_LED -+#endif -+ -+#ifdef CONFIG_KDB_USB -+ -+struct kdb_usb_kbd_info kdb_usb_kbds[KDB_USB_NUM_KEYBOARDS]; -+EXPORT_SYMBOL(kdb_usb_kbds); -+ -+extern int kdb_no_usb; -+ -+static unsigned char kdb_usb_keycode[256] = { -+ 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, -+ 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, -+ 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, -+ 27, 43, 84, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, -+ 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, -+ 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, -+ 72, 73, 82, 83, 86,127,116,117, 85, 89, 90, 91, 92, 93, 94, 95, -+ 120,121,122,123,134,138,130,132,128,129,131,137,133,135,136,113, -+ 115,114, 0, 0, 0,124, 0,181,182,183,184,185,186,187,188,189, -+ 190,191,192,193,194,195,196,197,198, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, -+ 150,158,159,128,136,177,178,176,142,152,173,140 -+}; -+ -+/* -+ * kdb_usb_keyboard_attach() -+ * Attach a USB keyboard to kdb. -+ */ -+int -+kdb_usb_keyboard_attach(struct urb *urb, unsigned char *buffer, -+ void *poll_func, void *compl_func, -+ kdb_hc_keyboard_attach_t kdb_hc_keyboard_attach, -+ kdb_hc_keyboard_detach_t kdb_hc_keyboard_detach, -+ unsigned int bufsize, -+ struct urb *hid_urb) -+{ -+ int i; -+ int rc = -1; -+ -+ if (kdb_no_usb) -+ return 0; -+ -+ /* -+ * Search through the array of KDB USB keyboards (kdb_usb_kbds) -+ * looking for a free index. If found, assign the keyboard to -+ * the array index. -+ */ -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ if (kdb_usb_kbds[i].urb) /* index is already assigned */ -+ continue; -+ -+ /* found a free array index */ -+ kdb_usb_kbds[i].urb = urb; -+ kdb_usb_kbds[i].buffer = buffer; -+ kdb_usb_kbds[i].poll_func = poll_func; -+ -+ kdb_usb_kbds[i].kdb_hc_urb_complete = compl_func; -+ kdb_usb_kbds[i].kdb_hc_keyboard_attach = kdb_hc_keyboard_attach; -+ kdb_usb_kbds[i].kdb_hc_keyboard_detach = kdb_hc_keyboard_detach; -+ -+ /* USB Host Controller specific Keyboadr attach callback. -+ * Currently only UHCI has this callback. -+ */ -+ if (kdb_usb_kbds[i].kdb_hc_keyboard_attach) -+ kdb_usb_kbds[i].kdb_hc_keyboard_attach(i, bufsize); -+ -+ rc = 0; /* success */ -+ -+ break; -+ } -+ -+ return rc; -+} -+EXPORT_SYMBOL_GPL (kdb_usb_keyboard_attach); -+ -+/* -+ * kdb_usb_keyboard_detach() -+ * Detach a USB keyboard from kdb. -+ */ -+int -+kdb_usb_keyboard_detach(struct urb *urb) -+{ -+ int i; -+ int rc = -1; -+ -+ if (kdb_no_usb) -+ return 0; -+ -+ /* -+ * Search through the array of KDB USB keyboards (kdb_usb_kbds) -+ * looking for the index with the matching URB. If found, -+ * clear the array index. -+ */ -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ if ((kdb_usb_kbds[i].urb != urb) && -+ (kdb_usb_kbds[i].hid_urb != urb)) -+ continue; -+ -+ /* found it, clear the index */ -+ -+ /* USB Host Controller specific Keyboard detach callback. -+ * Currently only UHCI has this callback. -+ */ -+ if (kdb_usb_kbds[i].kdb_hc_keyboard_detach) -+ kdb_usb_kbds[i].kdb_hc_keyboard_detach(urb, i); -+ -+ kdb_usb_kbds[i].urb = NULL; -+ kdb_usb_kbds[i].buffer = NULL; -+ kdb_usb_kbds[i].poll_func = NULL; -+ kdb_usb_kbds[i].caps_lock = 0; -+ kdb_usb_kbds[i].hid_urb = NULL; -+ -+ rc = 0; /* success */ -+ -+ break; -+ } -+ -+ return rc; -+} -+EXPORT_SYMBOL_GPL (kdb_usb_keyboard_detach); -+ -+/* -+ * get_usb_char -+ * This function drives the USB attached keyboards. -+ * Fetch the USB scancode and decode it. -+ */ -+static int -+get_usb_char(void) -+{ -+ int i; -+ unsigned char keycode, spec; -+ extern u_short plain_map[], shift_map[], ctrl_map[]; -+ int ret = 1; -+ int ret_key = -1, j, max; -+ -+ if (kdb_no_usb) -+ return -1; -+ -+ /* -+ * Loop through all the USB keyboard(s) and return -+ * the first character obtained from them. -+ */ -+ -+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) { -+ /* skip uninitialized keyboard array entries */ -+ if (!kdb_usb_kbds[i].urb || !kdb_usb_kbds[i].buffer || -+ !kdb_usb_kbds[i].poll_func) -+ continue; -+ -+ /* Transfer char */ -+ ret = (*kdb_usb_kbds[i].poll_func)(kdb_usb_kbds[i].urb); -+ if (ret == -EBUSY && kdb_usb_kbds[i].poll_ret != -EBUSY) -+ kdb_printf("NOTICE: USB HD driver BUSY. " -+ "USB keyboard has been disabled.\n"); -+ -+ kdb_usb_kbds[i].poll_ret = ret; -+ -+ if (ret < 0) /* error or no characters, try the next kbd */ -+ continue; -+ -+ /* If 2 keys was pressed simultaneously, -+ * both keycodes will be in buffer. -+ * Last pressed key will be last non -+ * zero byte. -+ */ -+ for (j=0; j<4; j++){ -+ if (!kdb_usb_kbds[i].buffer[2+j]) -+ break; -+ } -+ /* Last pressed key */ -+ max = j + 1; -+ -+ spec = kdb_usb_kbds[i].buffer[0]; -+ keycode = kdb_usb_kbds[i].buffer[2]; -+ kdb_usb_kbds[i].buffer[0] = (char)0; -+ kdb_usb_kbds[i].buffer[2] = (char)0; -+ -+ ret_key = -1; -+ -+ /* A normal key is pressed, decode it */ -+ if(keycode) -+ keycode = kdb_usb_keycode[keycode]; -+ -+ /* 2 Keys pressed at one time ? */ -+ if (spec && keycode) { -+ switch(spec) -+ { -+ case 0x2: -+ case 0x20: /* Shift */ -+ ret_key = shift_map[keycode]; -+ break; -+ case 0x1: -+ case 0x10: /* Ctrl */ -+ ret_key = ctrl_map[keycode]; -+ break; -+ case 0x4: -+ case 0x40: /* Alt */ -+ break; -+ } -+ } else if (keycode) { /* If only one key pressed */ -+ switch(keycode) -+ { -+ case 0x1C: /* Enter */ -+ ret_key = 13; -+ break; -+ -+ case 0x3A: /* Capslock */ -+ kdb_usb_kbds[i].caps_lock = !(kdb_usb_kbds[i].caps_lock); -+ break; -+ case 0x0E: /* Backspace */ -+ ret_key = 8; -+ break; -+ case 0x0F: /* TAB */ -+ ret_key = 9; -+ break; -+ case 0x77: /* Pause */ -+ break ; -+ default: -+ if(!kdb_usb_kbds[i].caps_lock) { -+ ret_key = plain_map[keycode]; -+ } -+ else { -+ ret_key = shift_map[keycode]; -+ } -+ } -+ } -+ -+ if (ret_key != 1) { -+ /* Key was pressed, return keycode */ -+ -+ /* Clear buffer before urb resending */ -+ if (kdb_usb_kbds[i].buffer) -+ for(j=0; j<8; j++) -+ kdb_usb_kbds[i].buffer[j] = (char)0; -+ -+ /* USB Host Controller specific Urb complete callback. -+ * Currently only UHCI has this callback. -+ */ -+ if (kdb_usb_kbds[i].kdb_hc_urb_complete) -+ (*kdb_usb_kbds[i].kdb_hc_urb_complete)((struct urb *)kdb_usb_kbds[i].urb); -+ -+ return ret_key; -+ } -+ } -+ -+ -+ -+ /* no chars were returned from any of the USB keyboards */ -+ -+ return -1; -+} -+#endif /* CONFIG_KDB_USB */ -+ -+/* -+ * This module contains code to read characters from the keyboard or a serial -+ * port. -+ * -+ * It is used by the kernel debugger, and is polled, not interrupt driven. -+ * -+ */ -+ -+#ifdef KDB_BLINK_LED -+/* -+ * send: Send a byte to the keyboard controller. Used primarily to -+ * alter LED settings. -+ */ -+ -+static void -+kdb_kbdsend(unsigned char byte) -+{ -+ int timeout; -+ for (timeout = 200 * 1000; timeout && (inb(KBD_STATUS_REG) & KBD_STAT_IBF); timeout--); -+ outb(byte, KBD_DATA_REG); -+ udelay(40); -+ for (timeout = 200 * 1000; timeout && (~inb(KBD_STATUS_REG) & KBD_STAT_OBF); timeout--); -+ inb(KBD_DATA_REG); -+ udelay(40); -+} -+ -+static void -+kdb_toggleled(int led) -+{ -+ static int leds; -+ -+ leds ^= led; -+ -+ kdb_kbdsend(KBD_CMD_SET_LEDS); -+ kdb_kbdsend((unsigned char)leds); -+} -+#endif /* KDB_BLINK_LED */ -+ -+#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_CORE_CONSOLE) -+#define CONFIG_SERIAL_CONSOLE -+#endif -+ -+#if defined(CONFIG_SERIAL_CONSOLE) -+ -+struct kdb_serial kdb_serial; -+ -+static unsigned int -+serial_inp(struct kdb_serial *kdb_serial, unsigned long offset) -+{ -+ offset <<= kdb_serial->ioreg_shift; -+ -+ switch (kdb_serial->io_type) { -+ case SERIAL_IO_MEM: -+ return readb((void __iomem *)(kdb_serial->iobase + offset)); -+ break; -+ default: -+ return inb(kdb_serial->iobase + offset); -+ break; -+ } -+} -+ -+/* Check if there is a byte ready at the serial port */ -+static int get_serial_char(void) -+{ -+ unsigned char ch; -+ -+ if (kdb_serial.iobase == 0) -+ return -1; -+ -+ if (serial_inp(&kdb_serial, UART_LSR) & UART_LSR_DR) { -+ ch = serial_inp(&kdb_serial, UART_RX); -+ if (ch == 0x7f) -+ ch = 8; -+ return ch; -+ } -+ return -1; -+} -+#endif /* CONFIG_SERIAL_CONSOLE */ -+ -+#ifdef CONFIG_VT_CONSOLE -+ -+static int kbd_exists; -+ -+/* -+ * Check if the keyboard controller has a keypress for us. -+ * Some parts (Enter Release, LED change) are still blocking polled here, -+ * but hopefully they are all short. -+ */ -+static int get_kbd_char(void) -+{ -+ int scancode, scanstatus; -+ static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */ -+ static int shift_key; /* Shift next keypress */ -+ static int ctrl_key; -+ u_short keychar; -+ extern u_short plain_map[], shift_map[], ctrl_map[]; -+ -+ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) || -+ (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) { -+ kbd_exists = 0; -+ return -1; -+ } -+ kbd_exists = 1; -+ -+ if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) -+ return -1; -+ -+ /* -+ * Fetch the scancode -+ */ -+ scancode = inb(KBD_DATA_REG); -+ scanstatus = inb(KBD_STATUS_REG); -+ -+ /* -+ * Ignore mouse events. -+ */ -+ if (scanstatus & KBD_STAT_MOUSE_OBF) -+ return -1; -+ -+ /* -+ * Ignore release, trigger on make -+ * (except for shift keys, where we want to -+ * keep the shift state so long as the key is -+ * held down). -+ */ -+ -+ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) { -+ /* -+ * Next key may use shift table -+ */ -+ if ((scancode & 0x80) == 0) { -+ shift_key=1; -+ } else { -+ shift_key=0; -+ } -+ return -1; -+ } -+ -+ if ((scancode&0x7f) == 0x1d) { -+ /* -+ * Left ctrl key -+ */ -+ if ((scancode & 0x80) == 0) { -+ ctrl_key = 1; -+ } else { -+ ctrl_key = 0; -+ } -+ return -1; -+ } -+ -+ if ((scancode & 0x80) != 0) -+ return -1; -+ -+ scancode &= 0x7f; -+ -+ /* -+ * Translate scancode -+ */ -+ -+ if (scancode == 0x3a) { -+ /* -+ * Toggle caps lock -+ */ -+ shift_lock ^= 1; -+ -+#ifdef KDB_BLINK_LED -+ kdb_toggleled(0x4); -+#endif -+ return -1; -+ } -+ -+ if (scancode == 0x0e) { -+ /* -+ * Backspace -+ */ -+ return 8; -+ } -+ -+ /* Special Key */ -+ switch (scancode) { -+ case 0xF: /* Tab */ -+ return 9; -+ case 0x53: /* Del */ -+ return 4; -+ case 0x47: /* Home */ -+ return 1; -+ case 0x4F: /* End */ -+ return 5; -+ case 0x4B: /* Left */ -+ return 2; -+ case 0x48: /* Up */ -+ return 16; -+ case 0x50: /* Down */ -+ return 14; -+ case 0x4D: /* Right */ -+ return 6; -+ } -+ -+ if (scancode == 0xe0) { -+ return -1; -+ } -+ -+ /* -+ * For Japanese 86/106 keyboards -+ * See comment in drivers/char/pc_keyb.c. -+ * - Masahiro Adegawa -+ */ -+ if (scancode == 0x73) { -+ scancode = 0x59; -+ } else if (scancode == 0x7d) { -+ scancode = 0x7c; -+ } -+ -+ if (!shift_lock && !shift_key && !ctrl_key) { -+ keychar = plain_map[scancode]; -+ } else if (shift_lock || shift_key) { -+ keychar = shift_map[scancode]; -+ } else if (ctrl_key) { -+ keychar = ctrl_map[scancode]; -+ } else { -+ keychar = 0x0020; -+ kdb_printf("Unknown state/scancode (%d)\n", scancode); -+ } -+ keychar &= 0x0fff; -+ if (keychar == '\t') -+ keychar = ' '; -+ switch (KTYP(keychar)) { -+ case KT_LETTER: -+ case KT_LATIN: -+ if (isprint(keychar)) -+ break; /* printable characters */ -+ /* drop through */ -+ case KT_SPEC: -+ if (keychar == K_ENTER) -+ break; -+ /* drop through */ -+ default: -+ return(-1); /* ignore unprintables */ -+ } -+ -+ if ((scancode & 0x7f) == 0x1c) { -+ /* -+ * enter key. All done. Absorb the release scancode. -+ */ -+ while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) -+ ; -+ -+ /* -+ * Fetch the scancode -+ */ -+ scancode = inb(KBD_DATA_REG); -+ scanstatus = inb(KBD_STATUS_REG); -+ -+ while (scanstatus & KBD_STAT_MOUSE_OBF) { -+ scancode = inb(KBD_DATA_REG); -+ scanstatus = inb(KBD_STATUS_REG); -+ } -+ -+ if (scancode != 0x9c) { -+ /* -+ * Wasn't an enter-release, why not? -+ */ -+ kdb_printf("kdb: expected enter got 0x%x status 0x%x\n", -+ scancode, scanstatus); -+ } -+ -+ kdb_printf("\n"); -+ return 13; -+ } -+ -+ return keychar & 0xff; -+} -+#endif /* CONFIG_VT_CONSOLE */ -+ -+#ifdef KDB_BLINK_LED -+ -+/* Leave numlock alone, setting it messes up laptop keyboards with the keypad -+ * mapped over normal keys. -+ */ -+static int kdba_blink_mask = 0x1 | 0x4; -+ -+#define BOGOMIPS (boot_cpu_data.loops_per_jiffy/(500000/HZ)) -+static int blink_led(void) -+{ -+ static long delay; -+ -+ if (kbd_exists == 0) -+ return -1; -+ -+ if (--delay < 0) { -+ if (BOGOMIPS == 0) /* early kdb */ -+ delay = 150000000/1000; /* arbitrary bogomips */ -+ else -+ delay = 150000000/BOGOMIPS; /* Roughly 1 second when polling */ -+ kdb_toggleled(kdba_blink_mask); -+ } -+ return -1; -+} -+#endif -+ -+get_char_func poll_funcs[] = { -+#if defined(CONFIG_VT_CONSOLE) -+ get_kbd_char, -+#endif -+#if defined(CONFIG_SERIAL_CONSOLE) -+ get_serial_char, -+#endif -+#ifdef KDB_BLINK_LED -+ blink_led, -+#endif -+#ifdef CONFIG_KDB_USB -+ get_usb_char, -+#endif -+ NULL -+}; -+ -+/* -+ * On some Compaq Deskpro's, there is a keyboard freeze many times after -+ * exiting from the kdb. As kdb's keyboard handler is not interrupt-driven and -+ * uses a polled interface, it makes more sense to disable motherboard keyboard -+ * controller's OBF interrupts during kdb's polling.In case, of interrupts -+ * remaining enabled during kdb's polling, it may cause un-necessary -+ * interrupts being signalled during keypresses, which are also sometimes seen -+ * as spurious interrupts after exiting from kdb. This hack to disable OBF -+ * interrupts before entry to kdb and re-enabling them at kdb exit point also -+ * solves the keyboard freeze issue. These functions are called from -+ * kdb_local(), hence these are arch. specific setup and cleanup functions -+ * executing only on the local processor - ashishk@sco.com -+ */ -+ -+void kdba_local_arch_setup(void) -+{ -+#ifdef CONFIG_VT_CONSOLE -+ int timeout; -+ unsigned char c; -+ -+ while (kbd_read_status() & KBD_STAT_IBF); -+ kbd_write_command(KBD_CCMD_READ_MODE); -+ mdelay(1); -+ while (kbd_read_status() & KBD_STAT_IBF); -+ for (timeout = 200 * 1000; timeout && -+ (!(kbd_read_status() & KBD_STAT_OBF)); timeout--); -+ c = kbd_read_input(); -+ c &= ~KBD_MODE_KBD_INT; -+ while (kbd_read_status() & KBD_STAT_IBF); -+ kbd_write_command(KBD_CCMD_WRITE_MODE); -+ mdelay(1); -+ while (kbd_read_status() & KBD_STAT_IBF); -+ kbd_write_output(c); -+ mdelay(1); -+ while (kbd_read_status() & KBD_STAT_IBF); -+ mdelay(1); -+#endif /* CONFIG_VT_CONSOLE */ -+} -+ -+void kdba_local_arch_cleanup(void) -+{ -+#ifdef CONFIG_VT_CONSOLE -+ int timeout; -+ unsigned char c; -+ -+ while (kbd_read_status() & KBD_STAT_IBF); -+ kbd_write_command(KBD_CCMD_READ_MODE); -+ mdelay(1); -+ while (kbd_read_status() & KBD_STAT_IBF); -+ for (timeout = 200 * 1000; timeout && -+ (!(kbd_read_status() & KBD_STAT_OBF)); timeout--); -+ c = kbd_read_input(); -+ c |= KBD_MODE_KBD_INT; -+ while (kbd_read_status() & KBD_STAT_IBF); -+ kbd_write_command(KBD_CCMD_WRITE_MODE); -+ mdelay(1); -+ while (kbd_read_status() & KBD_STAT_IBF); -+ kbd_write_output(c); -+ mdelay(1); -+ while (kbd_read_status() & KBD_STAT_IBF); -+ mdelay(1); -+#endif /* CONFIG_VT_CONSOLE */ -+} ---- /dev/null -+++ b/arch/x86/kdb/kdba_support.c -@@ -0,0 +1,1536 @@ -+/* -+ * Kernel Debugger Architecture Independent Support Functions -+ * -+ * This file is subject to the terms and conditions of the GNU General Public -+ * License. See the file "COPYING" in the main directory of this archive -+ * for more details. -+ * -+ * Copyright (c) 1999-2008 Silicon Graphics, Inc. All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+static kdb_machreg_t -+kdba_getcr(int regnum) -+{ -+ kdb_machreg_t contents = 0; -+ switch(regnum) { -+ case 0: -+ __asm__ (_ASM_MOV " %%cr0,%0\n\t":"=r"(contents)); -+ break; -+ case 1: -+ break; -+ case 2: -+ __asm__ (_ASM_MOV " %%cr2,%0\n\t":"=r"(contents)); -+ break; -+ case 3: -+ __asm__ (_ASM_MOV " %%cr3,%0\n\t":"=r"(contents)); -+ break; -+ case 4: -+ __asm__ (_ASM_MOV " %%cr4,%0\n\t":"=r"(contents)); -+ break; -+ default: -+ break; -+ } -+ -+ return contents; -+} -+ -+void -+kdba_putdr(int regnum, kdb_machreg_t contents) -+{ -+ switch(regnum) { -+ case 0: -+ __asm__ (_ASM_MOV " %0,%%db0\n\t"::"r"(contents)); -+ break; -+ case 1: -+ __asm__ (_ASM_MOV " %0,%%db1\n\t"::"r"(contents)); -+ break; -+ case 2: -+ __asm__ (_ASM_MOV " %0,%%db2\n\t"::"r"(contents)); -+ break; -+ case 3: -+ __asm__ (_ASM_MOV " %0,%%db3\n\t"::"r"(contents)); -+ break; -+ case 4: -+ case 5: -+ break; -+ case 6: -+ __asm__ (_ASM_MOV " %0,%%db6\n\t"::"r"(contents)); -+ break; -+ case 7: -+ __asm__ (_ASM_MOV " %0,%%db7\n\t"::"r"(contents)); -+ break; -+ default: -+ break; -+ } -+} -+ -+kdb_machreg_t -+kdba_getdr(int regnum) -+{ -+ kdb_machreg_t contents = 0; -+ switch(regnum) { -+ case 0: -+ __asm__ (_ASM_MOV " %%db0,%0\n\t":"=r"(contents)); -+ break; -+ case 1: -+ __asm__ (_ASM_MOV " %%db1,%0\n\t":"=r"(contents)); -+ break; -+ case 2: -+ __asm__ (_ASM_MOV " %%db2,%0\n\t":"=r"(contents)); -+ break; -+ case 3: -+ __asm__ (_ASM_MOV " %%db3,%0\n\t":"=r"(contents)); -+ break; -+ case 4: -+ case 5: -+ break; -+ case 6: -+ __asm__ (_ASM_MOV " %%db6,%0\n\t":"=r"(contents)); -+ break; -+ case 7: -+ __asm__ (_ASM_MOV " %%db7,%0\n\t":"=r"(contents)); -+ break; -+ default: -+ break; -+ } -+ -+ return contents; -+} -+ -+kdb_machreg_t -+kdba_getdr6(void) -+{ -+ return kdba_getdr(6); -+} -+ -+kdb_machreg_t -+kdba_getdr7(void) -+{ -+ return kdba_getdr(7); -+} -+ -+void -+kdba_putdr6(kdb_machreg_t contents) -+{ -+ kdba_putdr(6, contents); -+} -+ -+static void -+kdba_putdr7(kdb_machreg_t contents) -+{ -+ kdba_putdr(7, contents); -+} -+ -+void -+kdba_installdbreg(kdb_bp_t *bp) -+{ -+ int cpu = smp_processor_id(); -+ -+ kdb_machreg_t dr7; -+ -+ dr7 = kdba_getdr7(); -+ -+ kdba_putdr(bp->bp_hard[cpu]->bph_reg, bp->bp_addr); -+ -+ dr7 |= DR7_GE; -+ if (cpu_has_de) -+ set_in_cr4(X86_CR4_DE); -+ -+ switch (bp->bp_hard[cpu]->bph_reg){ -+ case 0: -+ DR7_RW0SET(dr7,bp->bp_hard[cpu]->bph_mode); -+ DR7_LEN0SET(dr7,bp->bp_hard[cpu]->bph_length); -+ DR7_G0SET(dr7); -+ break; -+ case 1: -+ DR7_RW1SET(dr7,bp->bp_hard[cpu]->bph_mode); -+ DR7_LEN1SET(dr7,bp->bp_hard[cpu]->bph_length); -+ DR7_G1SET(dr7); -+ break; -+ case 2: -+ DR7_RW2SET(dr7,bp->bp_hard[cpu]->bph_mode); -+ DR7_LEN2SET(dr7,bp->bp_hard[cpu]->bph_length); -+ DR7_G2SET(dr7); -+ break; -+ case 3: -+ DR7_RW3SET(dr7,bp->bp_hard[cpu]->bph_mode); -+ DR7_LEN3SET(dr7,bp->bp_hard[cpu]->bph_length); -+ DR7_G3SET(dr7); -+ break; -+ default: -+ kdb_printf("kdb: Bad debug register!! %ld\n", -+ bp->bp_hard[cpu]->bph_reg); -+ break; -+ } -+ -+ kdba_putdr7(dr7); -+ return; -+} -+ -+void -+kdba_removedbreg(kdb_bp_t *bp) -+{ -+ int regnum; -+ kdb_machreg_t dr7; -+ int cpu = smp_processor_id(); -+ -+ if (!bp->bp_hard[cpu]) -+ return; -+ -+ regnum = bp->bp_hard[cpu]->bph_reg; -+ -+ dr7 = kdba_getdr7(); -+ -+ kdba_putdr(regnum, 0); -+ -+ switch (regnum) { -+ case 0: -+ DR7_G0CLR(dr7); -+ DR7_L0CLR(dr7); -+ break; -+ case 1: -+ DR7_G1CLR(dr7); -+ DR7_L1CLR(dr7); -+ break; -+ case 2: -+ DR7_G2CLR(dr7); -+ DR7_L2CLR(dr7); -+ break; -+ case 3: -+ DR7_G3CLR(dr7); -+ DR7_L3CLR(dr7); -+ break; -+ default: -+ kdb_printf("kdb: Bad debug register!! %d\n", regnum); -+ break; -+ } -+ -+ kdba_putdr7(dr7); -+} -+ -+struct kdbregs { -+ char *reg_name; -+ size_t reg_offset; -+}; -+ -+static struct kdbregs dbreglist[] = { -+ { "dr0", 0 }, -+ { "dr1", 1 }, -+ { "dr2", 2 }, -+ { "dr3", 3 }, -+ { "dr6", 6 }, -+ { "dr7", 7 }, -+}; -+ -+static const int ndbreglist = sizeof(dbreglist) / sizeof(struct kdbregs); -+ -+#ifdef CONFIG_X86_32 -+static struct kdbregs kdbreglist[] = { -+ { "ax", offsetof(struct pt_regs, ax) }, -+ { "bx", offsetof(struct pt_regs, bx) }, -+ { "cx", offsetof(struct pt_regs, cx) }, -+ { "dx", offsetof(struct pt_regs, dx) }, -+ -+ { "si", offsetof(struct pt_regs, si) }, -+ { "di", offsetof(struct pt_regs, di) }, -+ { "sp", offsetof(struct pt_regs, sp) }, -+ { "ip", offsetof(struct pt_regs, ip) }, -+ -+ { "bp", offsetof(struct pt_regs, bp) }, -+ { "ss", offsetof(struct pt_regs, ss) }, -+ { "cs", offsetof(struct pt_regs, cs) }, -+ { "flags", offsetof(struct pt_regs, flags) }, -+ -+ { "ds", offsetof(struct pt_regs, ds) }, -+ { "es", offsetof(struct pt_regs, es) }, -+ { "origax", offsetof(struct pt_regs, orig_ax) }, -+ -+}; -+ -+static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs); -+ -+ -+/* -+ * kdba_getregcontents -+ * -+ * Return the contents of the register specified by the -+ * input string argument. Return an error if the string -+ * does not match a machine register. -+ * -+ * The following pseudo register names are supported: -+ * ®s - Prints address of exception frame -+ * kesp - Prints kernel stack pointer at time of fault -+ * cesp - Prints current kernel stack pointer, inside kdb -+ * ceflags - Prints current flags, inside kdb -+ * % - Uses the value of the registers at the -+ * last time the user process entered kernel -+ * mode, instead of the registers at the time -+ * kdb was entered. -+ * -+ * Parameters: -+ * regname Pointer to string naming register -+ * regs Pointer to structure containing registers. -+ * Outputs: -+ * *contents Pointer to unsigned long to recieve register contents -+ * Returns: -+ * 0 Success -+ * KDB_BADREG Invalid register name -+ * Locking: -+ * None. -+ * Remarks: -+ * If kdb was entered via an interrupt from the kernel itself then -+ * ss and sp are *not* on the stack. -+ */ -+ -+int -+kdba_getregcontents(const char *regname, -+ struct pt_regs *regs, -+ kdb_machreg_t *contents) -+{ -+ int i; -+ -+ if (strcmp(regname, "cesp") == 0) { -+ asm volatile("movl %%esp,%0":"=m" (*contents)); -+ return 0; -+ } -+ -+ if (strcmp(regname, "ceflags") == 0) { -+ unsigned long flags; -+ local_save_flags(flags); -+ *contents = flags; -+ return 0; -+ } -+ -+ if (regname[0] == '%') { -+ /* User registers: %%e[a-c]x, etc */ -+ regname++; -+ regs = (struct pt_regs *) -+ (kdb_current_task->thread.sp0 - sizeof(struct pt_regs)); -+ } -+ -+ for (i=0; ics & 0xffff) == __KERNEL_CS) { -+ /* sp and ss are not on stack */ -+ *contents -= 2*4; -+ } -+ return 0; -+ } -+ -+ for (i=0; ics & 0xffff) == __KERNEL_CS) { -+ /* No cpl switch, sp and ss are not on stack */ -+ if (strcmp(kdbreglist[i].reg_name, "sp") == 0) { -+ *contents = (kdb_machreg_t)regs + -+ sizeof(struct pt_regs) - 2*4; -+ return(0); -+ } -+ if (strcmp(kdbreglist[i].reg_name, "xss") == 0) { -+ asm volatile( -+ "pushl %%ss\n" -+ "popl %0\n" -+ :"=m" (*contents)); -+ return(0); -+ } -+ } -+ *contents = *(unsigned long *)((unsigned long)regs + -+ kdbreglist[i].reg_offset); -+ return(0); -+ } -+ -+ return KDB_BADREG; -+} -+ -+/* -+ * kdba_setregcontents -+ * -+ * Set the contents of the register specified by the -+ * input string argument. Return an error if the string -+ * does not match a machine register. -+ * -+ * Supports modification of user-mode registers via -+ * % -+ * -+ * Parameters: -+ * regname Pointer to string naming register -+ * regs Pointer to structure containing registers. -+ * contents Unsigned long containing new register contents -+ * Outputs: -+ * Returns: -+ * 0 Success -+ * KDB_BADREG Invalid register name -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+int -+kdba_setregcontents(const char *regname, -+ struct pt_regs *regs, -+ unsigned long contents) -+{ -+ int i; -+ -+ if (regname[0] == '%') { -+ regname++; -+ regs = (struct pt_regs *) -+ (kdb_current_task->thread.sp0 - sizeof(struct pt_regs)); -+ } -+ -+ for (i=0; ibx); -+ kdb_print_nameval("cx", p->cx); -+ kdb_print_nameval("dx", p->dx); -+ kdb_print_nameval("si", p->si); -+ kdb_print_nameval("di", p->di); -+ kdb_print_nameval("bp", p->bp); -+ kdb_print_nameval("ax", p->ax); -+ kdb_printf(fmt, "ds", p->ds); -+ kdb_printf(fmt, "es", p->es); -+ kdb_print_nameval("orig_ax", p->orig_ax); -+ kdb_print_nameval("ip", p->ip); -+ kdb_printf(fmt, "cs", p->cs); -+ kdb_printf(fmt, "flags", p->flags); -+ kdb_printf(fmt, "sp", p->sp); -+ kdb_printf(fmt, "ss", p->ss); -+ return 0; -+} -+ -+#else /* CONFIG_X86_32 */ -+ -+static struct kdbregs kdbreglist[] = { -+ { "r15", offsetof(struct pt_regs, r15) }, -+ { "r14", offsetof(struct pt_regs, r14) }, -+ { "r13", offsetof(struct pt_regs, r13) }, -+ { "r12", offsetof(struct pt_regs, r12) }, -+ { "bp", offsetof(struct pt_regs, bp) }, -+ { "bx", offsetof(struct pt_regs, bx) }, -+ { "r11", offsetof(struct pt_regs, r11) }, -+ { "r10", offsetof(struct pt_regs, r10) }, -+ { "r9", offsetof(struct pt_regs, r9) }, -+ { "r8", offsetof(struct pt_regs, r8) }, -+ { "ax", offsetof(struct pt_regs, ax) }, -+ { "cx", offsetof(struct pt_regs, cx) }, -+ { "dx", offsetof(struct pt_regs, dx) }, -+ { "si", offsetof(struct pt_regs, si) }, -+ { "di", offsetof(struct pt_regs, di) }, -+ { "orig_ax", offsetof(struct pt_regs, orig_ax) }, -+ { "ip", offsetof(struct pt_regs, ip) }, -+ { "cs", offsetof(struct pt_regs, cs) }, -+ { "flags", offsetof(struct pt_regs, flags) }, -+ { "sp", offsetof(struct pt_regs, sp) }, -+ { "ss", offsetof(struct pt_regs, ss) }, -+}; -+ -+static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs); -+ -+ -+/* -+ * kdba_getregcontents -+ * -+ * Return the contents of the register specified by the -+ * input string argument. Return an error if the string -+ * does not match a machine register. -+ * -+ * The following pseudo register names are supported: -+ * ®s - Prints address of exception frame -+ * krsp - Prints kernel stack pointer at time of fault -+ * crsp - Prints current kernel stack pointer, inside kdb -+ * ceflags - Prints current flags, inside kdb -+ * % - Uses the value of the registers at the -+ * last time the user process entered kernel -+ * mode, instead of the registers at the time -+ * kdb was entered. -+ * -+ * Parameters: -+ * regname Pointer to string naming register -+ * regs Pointer to structure containing registers. -+ * Outputs: -+ * *contents Pointer to unsigned long to recieve register contents -+ * Returns: -+ * 0 Success -+ * KDB_BADREG Invalid register name -+ * Locking: -+ * None. -+ * Remarks: -+ * If kdb was entered via an interrupt from the kernel itself then -+ * ss and sp are *not* on the stack. -+ */ -+int -+kdba_getregcontents(const char *regname, -+ struct pt_regs *regs, -+ kdb_machreg_t *contents) -+{ -+ int i; -+ -+ if (strcmp(regname, "®s") == 0) { -+ *contents = (unsigned long)regs; -+ return 0; -+ } -+ -+ if (strcmp(regname, "krsp") == 0) { -+ *contents = (unsigned long)regs + sizeof(struct pt_regs); -+ if ((regs->cs & 0xffff) == __KERNEL_CS) { -+ /* sp and ss are not on stack */ -+ *contents -= 2*4; -+ } -+ return 0; -+ } -+ -+ if (strcmp(regname, "crsp") == 0) { -+ asm volatile("movq %%rsp,%0":"=m" (*contents)); -+ return 0; -+ } -+ -+ if (strcmp(regname, "ceflags") == 0) { -+ unsigned long flags; -+ local_save_flags(flags); -+ *contents = flags; -+ return 0; -+ } -+ -+ if (regname[0] == '%') { -+ /* User registers: %%r[a-c]x, etc */ -+ regname++; -+ regs = (struct pt_regs *) -+ (current->thread.sp0 - sizeof(struct pt_regs)); -+ } -+ -+ for (i=0; ics & 0xffff) == __KERNEL_CS) { -+ /* No cpl switch, sp is not on stack */ -+ if (strcmp(kdbreglist[i].reg_name, "sp") == 0) { -+ *contents = (kdb_machreg_t)regs + -+ sizeof(struct pt_regs) - 2*8; -+ return(0); -+ } -+#if 0 /* FIXME */ -+ if (strcmp(kdbreglist[i].reg_name, "ss") == 0) { -+ kdb_machreg_t r; -+ -+ r = (kdb_machreg_t)regs + -+ sizeof(struct pt_regs) - 2*8; -+ *contents = (kdb_machreg_t)SS(r); /* XXX */ -+ return(0); -+ } -+#endif -+ } -+ *contents = *(unsigned long *)((unsigned long)regs + -+ kdbreglist[i].reg_offset); -+ return(0); -+ } -+ -+ for (i=0; i -+ * -+ * Parameters: -+ * regname Pointer to string naming register -+ * regs Pointer to structure containing registers. -+ * contents Unsigned long containing new register contents -+ * Outputs: -+ * Returns: -+ * 0 Success -+ * KDB_BADREG Invalid register name -+ * Locking: -+ * None. -+ * Remarks: -+ */ -+ -+int -+kdba_setregcontents(const char *regname, -+ struct pt_regs *regs, -+ unsigned long contents) -+{ -+ int i; -+ -+ if (regname[0] == '%') { -+ regname++; -+ regs = (struct pt_regs *) -+ (current->thread.sp0 - sizeof(struct pt_regs)); -+ } -+ -+ for (i=0; ir15); -+ kdb_print_nameval("r14", p->r14); -+ kdb_print_nameval("r13", p->r13); -+ kdb_print_nameval("r12", p->r12); -+ kdb_print_nameval("bp", p->bp); -+ kdb_print_nameval("bx", p->bx); -+ kdb_print_nameval("r11", p->r11); -+ kdb_print_nameval("r10", p->r10); -+ kdb_print_nameval("r9", p->r9); -+ kdb_print_nameval("r8", p->r8); -+ kdb_print_nameval("ax", p->ax); -+ kdb_print_nameval("cx", p->cx); -+ kdb_print_nameval("dx", p->dx); -+ kdb_print_nameval("si", p->si); -+ kdb_print_nameval("di", p->di); -+ kdb_print_nameval("orig_ax", p->orig_ax); -+ kdb_print_nameval("ip", p->ip); -+ kdb_printf(fmt, "cs", p->cs); -+ kdb_printf(fmt, "flags", p->flags); -+ kdb_printf(fmt, "sp", p->sp); -+ kdb_printf(fmt, "ss", p->ss); -+ return 0; -+} -+#endif /* CONFIG_X86_32 */ -+ -+/* -+ * kdba_dumpregs -+ * -+ * Dump the specified register set to the display. -+ * -+ * Parameters: -+ * regs Pointer to structure containing registers. -+ * type Character string identifying register set to dump -+ * extra string further identifying register (optional) -+ * Outputs: -+ * Returns: -+ * 0 Success -+ * Locking: -+ * None. -+ * Remarks: -+ * This function will dump the general register set if the type -+ * argument is NULL (struct pt_regs). The alternate register -+ * set types supported by this function: -+ * -+ * d Debug registers -+ * c Control registers -+ * u User registers at most recent entry to kernel -+ * for the process currently selected with "pid" command. -+ * Following not yet implemented: -+ * r Memory Type Range Registers (extra defines register) -+ * -+ * MSR on i386/x86_64 are handled by rdmsr/wrmsr commands. -+ */ -+ -+int -+kdba_dumpregs(struct pt_regs *regs, -+ const char *type, -+ const char *extra) -+{ -+ int i; -+ int count = 0; -+ -+ if (type -+ && (type[0] == 'u')) { -+ type = NULL; -+ regs = (struct pt_regs *) -+ (kdb_current_task->thread.sp0 - sizeof(struct pt_regs)); -+ } -+ -+ if (type == NULL) { -+ struct kdbregs *rlp; -+ kdb_machreg_t contents; -+ -+ if (!regs) { -+ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__); -+ return KDB_BADREG; -+ } -+ -+#ifdef CONFIG_X86_32 -+ for (i=0, rlp=kdbreglist; iip : 0; -+} -+ -+int -+kdba_setpc(struct pt_regs *regs, kdb_machreg_t newpc) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return KDB_BADREG; -+ regs->ip = newpc; -+ KDB_STATE_SET(IP_ADJUSTED); -+ return 0; -+} -+ -+/* -+ * kdba_main_loop -+ * -+ * Do any architecture specific set up before entering the main kdb loop. -+ * The primary function of this routine is to make all processes look the -+ * same to kdb, kdb must be able to list a process without worrying if the -+ * process is running or blocked, so make all process look as though they -+ * are blocked. -+ * -+ * Inputs: -+ * reason The reason KDB was invoked -+ * error The hardware-defined error code -+ * error2 kdb's current reason code. Initially error but can change -+ * acording to kdb state. -+ * db_result Result from break or debug point. -+ * regs The exception frame at time of fault/breakpoint. If reason -+ * is SILENT or CPU_UP then regs is NULL, otherwise it should -+ * always be valid. -+ * Returns: -+ * 0 KDB was invoked for an event which it wasn't responsible -+ * 1 KDB handled the event for which it was invoked. -+ * Outputs: -+ * Sets ip and sp in current->thread. -+ * Locking: -+ * None. -+ * Remarks: -+ * none. -+ */ -+ -+int -+kdba_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error, -+ kdb_dbtrap_t db_result, struct pt_regs *regs) -+{ -+ int ret; -+ -+#ifdef CONFIG_X86_64 -+ if (regs) -+ kdba_getregcontents("sp", regs, &(current->thread.sp)); -+#endif -+ ret = kdb_save_running(regs, reason, reason2, error, db_result); -+ kdb_unsave_running(regs); -+ return ret; -+} -+ -+void -+kdba_disableint(kdb_intstate_t *state) -+{ -+ unsigned long *fp = (unsigned long *)state; -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ *fp = flags; -+} -+ -+void -+kdba_restoreint(kdb_intstate_t *state) -+{ -+ unsigned long flags = *(unsigned long *)state; -+ local_irq_restore(flags); -+} -+ -+void -+kdba_setsinglestep(struct pt_regs *regs) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return; -+ if (regs->flags & X86_EFLAGS_IF) -+ KDB_STATE_SET(A_IF); -+ else -+ KDB_STATE_CLEAR(A_IF); -+ regs->flags = (regs->flags | X86_EFLAGS_TF) & ~X86_EFLAGS_IF; -+} -+ -+void -+kdba_clearsinglestep(struct pt_regs *regs) -+{ -+ if (KDB_NULL_REGS(regs)) -+ return; -+ if (KDB_STATE(A_IF)) -+ regs->flags |= X86_EFLAGS_IF; -+ else -+ regs->flags &= ~X86_EFLAGS_IF; -+} -+ -+#ifdef CONFIG_X86_32 -+int asmlinkage -+kdba_setjmp(kdb_jmp_buf *jb) -+{ -+#ifdef CONFIG_FRAME_POINTER -+ __asm__ ("movl 8(%esp), %eax\n\t" -+ "movl %ebx, 0(%eax)\n\t" -+ "movl %esi, 4(%eax)\n\t" -+ "movl %edi, 8(%eax)\n\t" -+ "movl (%esp), %ecx\n\t" -+ "movl %ecx, 12(%eax)\n\t" -+ "leal 8(%esp), %ecx\n\t" -+ "movl %ecx, 16(%eax)\n\t" -+ "movl 4(%esp), %ecx\n\t" -+ "movl %ecx, 20(%eax)\n\t"); -+#else /* CONFIG_FRAME_POINTER */ -+ __asm__ ("movl 4(%esp), %eax\n\t" -+ "movl %ebx, 0(%eax)\n\t" -+ "movl %esi, 4(%eax)\n\t" -+ "movl %edi, 8(%eax)\n\t" -+ "movl %ebp, 12(%eax)\n\t" -+ "leal 4(%esp), %ecx\n\t" -+ "movl %ecx, 16(%eax)\n\t" -+ "movl 0(%esp), %ecx\n\t" -+ "movl %ecx, 20(%eax)\n\t"); -+#endif /* CONFIG_FRAME_POINTER */ -+ return 0; -+} -+ -+void asmlinkage -+kdba_longjmp(kdb_jmp_buf *jb, int reason) -+{ -+#ifdef CONFIG_FRAME_POINTER -+ __asm__("movl 8(%esp), %ecx\n\t" -+ "movl 12(%esp), %eax\n\t" -+ "movl 20(%ecx), %edx\n\t" -+ "movl 0(%ecx), %ebx\n\t" -+ "movl 4(%ecx), %esi\n\t" -+ "movl 8(%ecx), %edi\n\t" -+ "movl 12(%ecx), %ebp\n\t" -+ "movl 16(%ecx), %esp\n\t" -+ "jmp *%edx\n"); -+#else /* CONFIG_FRAME_POINTER */ -+ __asm__("movl 4(%esp), %ecx\n\t" -+ "movl 8(%esp), %eax\n\t" -+ "movl 20(%ecx), %edx\n\t" -+ "movl 0(%ecx), %ebx\n\t" -+ "movl 4(%ecx), %esi\n\t" -+ "movl 8(%ecx), %edi\n\t" -+ "movl 12(%ecx), %ebp\n\t" -+ "movl 16(%ecx), %esp\n\t" -+ "jmp *%edx\n"); -+#endif /* CONFIG_FRAME_POINTER */ -+} -+ -+#else /* CONFIG_X86_32 */ -+ -+int asmlinkage -+kdba_setjmp(kdb_jmp_buf *jb) -+{ -+#ifdef CONFIG_FRAME_POINTER -+ __asm__ __volatile__ -+ ("movq %%rbx, (0*8)(%%rdi);" -+ "movq %%rcx, (1*8)(%%rdi);" -+ "movq %%r12, (2*8)(%%rdi);" -+ "movq %%r13, (3*8)(%%rdi);" -+ "movq %%r14, (4*8)(%%rdi);" -+ "movq %%r15, (5*8)(%%rdi);" -+ "leaq 16(%%rsp), %%rdx;" -+ "movq %%rdx, (6*8)(%%rdi);" -+ "movq %%rax, (7*8)(%%rdi)" -+ : -+ : "a" (__builtin_return_address(0)), -+ "c" (__builtin_frame_address(1)) -+ ); -+#else /* !CONFIG_FRAME_POINTER */ -+ __asm__ __volatile__ -+ ("movq %%rbx, (0*8)(%%rdi);" -+ "movq %%rbp, (1*8)(%%rdi);" -+ "movq %%r12, (2*8)(%%rdi);" -+ "movq %%r13, (3*8)(%%rdi);" -+ "movq %%r14, (4*8)(%%rdi);" -+ "movq %%r15, (5*8)(%%rdi);" -+ "leaq 8(%%rsp), %%rdx;" -+ "movq %%rdx, (6*8)(%%rdi);" -+ "movq %%rax, (7*8)(%%rdi)" -+ : -+ : "a" (__builtin_return_address(0)) -+ ); -+#endif /* CONFIG_FRAME_POINTER */ -+ return 0; -+} -+ -+void asmlinkage -+kdba_longjmp(kdb_jmp_buf *jb, int reason) -+{ -+ __asm__("movq (0*8)(%rdi),%rbx;" -+ "movq (1*8)(%rdi),%rbp;" -+ "movq (2*8)(%rdi),%r12;" -+ "movq (3*8)(%rdi),%r13;" -+ "movq (4*8)(%rdi),%r14;" -+ "movq (5*8)(%rdi),%r15;" -+ "movq (7*8)(%rdi),%rdx;" -+ "movq (6*8)(%rdi),%rsp;" -+ "mov %rsi, %rax;" -+ "jmpq *%rdx"); -+} -+#endif /* CONFIG_X86_32 */ -+ -+#ifdef CONFIG_X86_32 -+/* -+ * kdba_stackdepth -+ * -+ * Print processes that are using more than a specific percentage of their -+ * stack. -+ * -+ * Inputs: -+ * argc argument count -+ * argv argument vector -+ * Outputs: -+ * None. -+ * Returns: -+ * zero for success, a kdb diagnostic if error -+ * Locking: -+ * none. -+ * Remarks: -+ * If no percentage is supplied, it uses 60. -+ */ -+ -+static void -+kdba_stackdepth1(struct task_struct *p, unsigned long sp) -+{ -+ struct thread_info *tinfo; -+ int used; -+ const char *type; -+ kdb_ps1(p); -+ do { -+ tinfo = (struct thread_info *)(sp & -THREAD_SIZE); -+ used = sizeof(*tinfo) + THREAD_SIZE - (sp & (THREAD_SIZE-1)); -+ type = NULL; -+ if (kdb_task_has_cpu(p)) { -+ struct kdb_activation_record ar; -+ memset(&ar, 0, sizeof(ar)); -+ kdba_get_stack_info_alternate(sp, -1, &ar); -+ type = ar.stack.id; -+ } -+ if (!type) -+ type = "process"; -+ kdb_printf(" %s stack %p sp %lx used %d\n", type, tinfo, sp, used); -+ sp = tinfo->previous_esp; -+ } while (sp); -+} -+ -+static int -+kdba_stackdepth(int argc, const char **argv) -+{ -+ int diag, cpu, threshold, used, over; -+ unsigned long percentage; -+ unsigned long esp; -+ long offset = 0; -+ int nextarg; -+ struct task_struct *p, *g; -+ struct kdb_running_process *krp; -+ struct thread_info *tinfo; -+ -+ if (argc == 0) { -+ percentage = 60; -+ } else if (argc == 1) { -+ nextarg = 1; -+ diag = kdbgetaddrarg(argc, argv, &nextarg, &percentage, &offset, NULL); -+ if (diag) -+ return diag; -+ } else { -+ return KDB_ARGCOUNT; -+ } -+ percentage = max_t(int, percentage, 1); -+ percentage = min_t(int, percentage, 100); -+ threshold = ((2 * THREAD_SIZE * percentage) / 100 + 1) >> 1; -+ kdb_printf("stackdepth: processes using more than %ld%% (%d bytes) of stack\n", -+ percentage, threshold); -+ -+ /* Run the active tasks first, they can have multiple stacks */ -+ for (cpu = 0, krp = kdb_running_process; cpu < NR_CPUS; ++cpu, ++krp) { -+ if (!cpu_online(cpu)) -+ continue; -+ p = krp->p; -+ esp = krp->arch.sp; -+ over = 0; -+ do { -+ tinfo = (struct thread_info *)(esp & -THREAD_SIZE); -+ used = sizeof(*tinfo) + THREAD_SIZE - (esp & (THREAD_SIZE-1)); -+ if (used >= threshold) -+ over = 1; -+ esp = tinfo->previous_esp; -+ } while (esp); -+ if (over) -+ kdba_stackdepth1(p, krp->arch.sp); -+ } -+ /* Now the tasks that are not on cpus */ -+ kdb_do_each_thread(g, p) { -+ if (kdb_task_has_cpu(p)) -+ continue; -+ esp = p->thread.sp; -+ used = sizeof(*tinfo) + THREAD_SIZE - (esp & (THREAD_SIZE-1)); -+ over = used >= threshold; -+ if (over) -+ kdba_stackdepth1(p, esp); -+ } kdb_while_each_thread(g, p); -+ -+ return 0; -+} -+#else /* CONFIG_X86_32 */ -+ -+ -+/* -+ * kdba_entry -+ * -+ * This is the interface routine between -+ * the notifier die_chain and kdb -+ */ -+static int kdba_entry( struct notifier_block *b, unsigned long val, void *v) -+{ -+ struct die_args *args = v; -+ int err, trap, ret = 0; -+ struct pt_regs *regs; -+ -+ regs = args->regs; -+ err = args->err; -+ trap = args->trapnr; -+ switch (val){ -+#ifdef CONFIG_SMP -+ case DIE_NMI_IPI: -+ ret = kdb_ipi(regs, NULL); -+ break; -+#endif /* CONFIG_SMP */ -+ case DIE_OOPS: -+ ret = kdb(KDB_REASON_OOPS, err, regs); -+ break; -+ case DIE_CALL: -+ ret = kdb(KDB_REASON_ENTER, err, regs); -+ break; -+ case DIE_DEBUG: -+ ret = kdb(KDB_REASON_DEBUG, err, regs); -+ break; -+ case DIE_NMIWATCHDOG: -+ ret = kdb(KDB_REASON_NMI, err, regs); -+ break; -+ case DIE_INT3: -+ ret = kdb(KDB_REASON_BREAK, err, regs); -+ // falls thru -+ default: -+ break; -+ } -+ return (ret ? NOTIFY_STOP : NOTIFY_DONE); -+} -+ -+/* -+ * notifier block for kdb entry -+ */ -+static struct notifier_block kdba_notifier = { -+ .notifier_call = kdba_entry -+}; -+#endif /* CONFIG_X86_32 */ -+ -+asmlinkage int kdb_call(void); -+ -+/* Executed once on each cpu at startup. */ -+void -+kdba_cpu_up(void) -+{ -+} -+ -+static int __init -+kdba_arch_init(void) -+{ -+ set_intr_gate(KDBENTER_VECTOR, kdb_call); -+ return 0; -+} -+ -+arch_initcall(kdba_arch_init); -+ -+/* -+ * kdba_init -+ * -+ * Architecture specific initialization. -+ * -+ * Parameters: -+ * None. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * None. -+ */ -+ -+void __init -+kdba_init(void) -+{ -+ kdba_arch_init(); /* Need to register KDBENTER_VECTOR early */ -+ kdb_register("pt_regs", kdba_pt_regs, "address", "Format struct pt_regs", 0); -+#ifdef CONFIG_X86_32 -+ kdb_register("stackdepth", kdba_stackdepth, "[percentage]", "Print processes using >= stack percentage", 0); -+#else -+ register_die_notifier(&kdba_notifier); -+#endif -+ return; -+} -+ -+/* -+ * kdba_adjust_ip -+ * -+ * Architecture specific adjustment of instruction pointer before leaving -+ * kdb. -+ * -+ * Parameters: -+ * reason The reason KDB was invoked -+ * error The hardware-defined error code -+ * regs The exception frame at time of fault/breakpoint. If reason -+ * is SILENT or CPU_UP then regs is NULL, otherwise it should -+ * always be valid. -+ * Returns: -+ * None. -+ * Locking: -+ * None. -+ * Remarks: -+ * noop on ix86. -+ */ -+ -+void -+kdba_adjust_ip(kdb_reason_t reason, int error, struct pt_regs *regs) -+{ -+ return; -+} -+ -+void -+kdba_set_current_task(const struct task_struct *p) -+{ -+ kdb_current_task = p; -+ if (kdb_task_has_cpu(p)) { -+ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p); -+ kdb_current_regs = krp->regs; -+ return; -+ } -+ kdb_current_regs = NULL; -+} -+ -+#ifdef CONFIG_X86_32 -+/* -+ * asm-i386 uaccess.h supplies __copy_to_user which relies on MMU to -+ * trap invalid addresses in the _xxx fields. Verify the other address -+ * of the pair is valid by accessing the first and last byte ourselves, -+ * then any access violations should only be caused by the _xxx -+ * addresses, -+ */ -+ -+int -+kdba_putarea_size(unsigned long to_xxx, void *from, size_t size) -+{ -+ mm_segment_t oldfs = get_fs(); -+ int r; -+ char c; -+ c = *((volatile char *)from); -+ c = *((volatile char *)from + size - 1); -+ -+ if (to_xxx < PAGE_OFFSET) { -+ return kdb_putuserarea_size(to_xxx, from, size); -+ } -+ -+ set_fs(KERNEL_DS); -+ r = __copy_to_user_inatomic((void __user *)to_xxx, from, size); -+ set_fs(oldfs); -+ return r; -+} -+ -+int -+kdba_getarea_size(void *to, unsigned long from_xxx, size_t size) -+{ -+ mm_segment_t oldfs = get_fs(); -+ int r; -+ *((volatile char *)to) = '\0'; -+ *((volatile char *)to + size - 1) = '\0'; -+ -+ if (from_xxx < PAGE_OFFSET) { -+ return kdb_getuserarea_size(to, from_xxx, size); -+ } -+ -+ set_fs(KERNEL_DS); -+ switch (size) { -+ case 1: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 1); -+ break; -+ case 2: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 2); -+ break; -+ case 4: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 4); -+ break; -+ case 8: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 8); -+ break; -+ default: -+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, size); -+ break; -+ } -+ set_fs(oldfs); -+ return r; -+} -+ -+int -+kdba_verify_rw(unsigned long addr, size_t size) -+{ -+ unsigned char data[size]; -+ return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size)); -+} -+#endif /* CONFIG_X86_32 */ -+ -+#ifdef CONFIG_SMP -+ -+#include -+ -+gate_desc save_idt[NR_VECTORS]; -+ -+void kdba_takeover_vector(int vector) -+{ -+ memcpy(&save_idt[vector], &idt_table[vector], sizeof(gate_desc)); -+ set_intr_gate(KDB_VECTOR, kdb_interrupt); -+ return; -+} -+ -+void kdba_giveback_vector(int vector) -+{ -+ native_write_idt_entry(idt_table, vector, &save_idt[vector]); -+ return; -+} -+ -+/* When first entering KDB, try a normal IPI. That reduces backtrace problems -+ * on the other cpus. -+ */ -+void -+smp_kdb_stop(void) -+{ -+ if (!KDB_FLAG(NOIPI)) { -+ kdba_takeover_vector(KDB_VECTOR); -+ apic->send_IPI_allbutself(KDB_VECTOR); -+ } -+} -+ -+/* The normal KDB IPI handler */ -+#ifdef CONFIG_X86_64 -+asmlinkage -+#endif -+void -+smp_kdb_interrupt(struct pt_regs *regs) -+{ -+ struct pt_regs *old_regs = set_irq_regs(regs); -+ ack_APIC_irq(); -+ irq_enter(); -+ kdb_ipi(regs, NULL); -+ irq_exit(); -+ set_irq_regs(old_regs); -+} -+ -+/* Invoked once from kdb_wait_for_cpus when waiting for cpus. For those cpus -+ * that have not responded to the normal KDB interrupt yet, hit them with an -+ * NMI event. -+ */ -+void -+kdba_wait_for_cpus(void) -+{ -+ int c; -+ if (KDB_FLAG(CATASTROPHIC)) -+ return; -+ kdb_printf(" Sending NMI to non-responding cpus: "); -+ for_each_online_cpu(c) { -+ if (kdb_running_process[c].seqno < kdb_seqno - 1) { -+ kdb_printf(" %d", c); -+ apic->send_IPI_mask(cpumask_of(c), NMI_VECTOR); -+ } -+ } -+ kdb_printf(".\n"); -+} -+ -+#endif /* CONFIG_SMP */ -+ -+#ifdef CONFIG_KDB_KDUMP -+void kdba_kdump_prepare(struct pt_regs *regs) -+{ -+ int i; -+ struct pt_regs r; -+ if (regs == NULL) -+ regs = &r; -+ -+ for (i = 1; i < NR_CPUS; ++i) { -+ if (!cpu_online(i)) -+ continue; -+ -+ KDB_STATE_SET_CPU(KEXEC, i); -+ } -+ -+ machine_crash_shutdown(regs); -+} -+ -+extern void halt_current_cpu(struct pt_regs *); -+ -+void kdba_kdump_shutdown_slave(struct pt_regs *regs) -+{ -+#ifndef CONFIG_XEN -+ halt_current_cpu(regs); -+#endif /* CONFIG_XEN */ -+} -+ -+#endif /* CONFIG_KDB_KDUMP */ ---- /dev/null -+++ b/arch/x86/kdb/pc_keyb.h -@@ -0,0 +1,137 @@ -+/* -+ * include/linux/pc_keyb.h -+ * -+ * PC Keyboard And Keyboard Controller -+ * -+ * (c) 1997 Martin Mares -+ */ -+ -+/* -+ * Configuration Switches -+ */ -+ -+#undef KBD_REPORT_ERR /* Report keyboard errors */ -+#define KBD_REPORT_UNKN /* Report unknown scan codes */ -+#define KBD_REPORT_TIMEOUTS /* Report keyboard timeouts */ -+#undef KBD_IS_FOCUS_9000 /* We have the brain-damaged FOCUS-9000 keyboard */ -+#undef INITIALIZE_MOUSE /* Define if your PS/2 mouse needs initialization. */ -+ -+ -+ -+#define KBD_INIT_TIMEOUT 1000 /* Timeout in ms for initializing the keyboard */ -+#define KBC_TIMEOUT 250 /* Timeout in ms for sending to keyboard controller */ -+#define KBD_TIMEOUT 1000 /* Timeout in ms for keyboard command acknowledge */ -+ -+/* -+ * Internal variables of the driver -+ */ -+ -+extern unsigned char pckbd_read_mask; -+extern unsigned char aux_device_present; -+ -+/* -+ * Keyboard Controller Registers on normal PCs. -+ */ -+ -+#define KBD_STATUS_REG 0x64 /* Status register (R) */ -+#define KBD_CNTL_REG 0x64 /* Controller command register (W) */ -+#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */ -+ -+/* -+ * Keyboard Controller Commands -+ */ -+ -+#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */ -+#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */ -+#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */ -+#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */ -+#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */ -+#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */ -+#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */ -+#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */ -+#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */ -+#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */ -+#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if -+ initiated by the auxiliary device */ -+#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */ -+ -+/* -+ * Keyboard Commands -+ */ -+ -+#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */ -+#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */ -+#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */ -+#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */ -+#define KBD_CMD_RESET 0xFF /* Reset */ -+ -+/* -+ * Keyboard Replies -+ */ -+ -+#define KBD_REPLY_POR 0xAA /* Power on reset */ -+#define KBD_REPLY_ACK 0xFA /* Command ACK */ -+#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */ -+ -+/* -+ * Status Register Bits -+ */ -+ -+#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */ -+#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */ -+#define KBD_STAT_SELFTEST 0x04 /* Self test successful */ -+#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */ -+#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */ -+#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */ -+#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */ -+#define KBD_STAT_PERR 0x80 /* Parity error */ -+ -+#define AUX_STAT_OBF (KBD_STAT_OBF | KBD_STAT_MOUSE_OBF) -+ -+/* -+ * Controller Mode Register Bits -+ */ -+ -+#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */ -+#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */ -+#define KBD_MODE_SYS 0x04 /* The system flag (?) */ -+#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */ -+#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */ -+#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */ -+#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */ -+#define KBD_MODE_RFU 0x80 -+ -+/* -+ * Mouse Commands -+ */ -+ -+#define AUX_SET_RES 0xE8 /* Set resolution */ -+#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */ -+#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */ -+#define AUX_GET_SCALE 0xE9 /* Get scaling factor */ -+#define AUX_SET_STREAM 0xEA /* Set stream mode */ -+#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */ -+#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */ -+#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */ -+#define AUX_RESET 0xFF /* Reset aux device */ -+#define AUX_ACK 0xFA /* Command byte ACK. */ -+ -+#define AUX_BUF_SIZE 2048 /* This might be better divisible by -+ three to make overruns stay in sync -+ but then the read function would need -+ a lock etc - ick */ -+ -+struct aux_queue { -+ unsigned long head; -+ unsigned long tail; -+ wait_queue_head_t proc_list; -+ struct fasync_struct *fasync; -+ unsigned char buf[AUX_BUF_SIZE]; -+}; -+ -+ -+/* How to access the keyboard macros on this platform. */ -+#define kbd_read_input() inb(KBD_DATA_REG) -+#define kbd_read_status() inb(KBD_STATUS_REG) -+#define kbd_write_output(val) outb(val, KBD_DATA_REG) -+#define kbd_write_command(val) outb(val, KBD_CNTL_REG) ---- /dev/null -+++ b/arch/x86/kdb/x86-dis.c -@@ -0,0 +1,4688 @@ -+/* Print i386 instructions for GDB, the GNU debugger. -+ Copyright 1988, 1989, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -+ 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. -+ -+ This file is part of GDB. -+ -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 2 of the License, or -+ (at your option) any later version. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -+ -+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. -+ * Run through col -b to remove trailing whitespace and various #ifdef/ifndef -+ * __KERNEL__ added. -+ * Keith Owens 15 May 2006 -+ */ -+ -+/* 80386 instruction printer by Pace Willisson (pace@prep.ai.mit.edu) -+ July 1988 -+ modified by John Hassey (hassey@dg-rtp.dg.com) -+ x86-64 support added by Jan Hubicka (jh@suse.cz) -+ VIA PadLock support by Michal Ludvig (mludvig@suse.cz). */ -+ -+/* The main tables describing the instructions is essentially a copy -+ of the "Opcode Map" chapter (Appendix A) of the Intel 80386 -+ Programmers Manual. Usually, there is a capital letter, followed -+ by a small letter. The capital letter tell the addressing mode, -+ and the small letter tells about the operand size. Refer to -+ the Intel manual for details. */ -+ -+#ifdef __KERNEL__ -+#include -+#include -+#include -+#include -+#define abort() BUG() -+#else /* __KERNEL__ */ -+#include "dis-asm.h" -+#include "sysdep.h" -+#include "opintl.h" -+#endif /* __KERNEL__ */ -+ -+#define MAXLEN 20 -+ -+#ifndef __KERNEL__ -+#include -+#endif /* __KERNEL__ */ -+ -+#ifndef UNIXWARE_COMPAT -+/* Set non-zero for broken, compatible instructions. Set to zero for -+ non-broken opcodes. */ -+#define UNIXWARE_COMPAT 1 -+#endif -+ -+static int fetch_data (struct disassemble_info *, bfd_byte *); -+static void ckprefix (void); -+static const char *prefix_name (int, int); -+static int print_insn (bfd_vma, disassemble_info *); -+static void dofloat (int); -+static void OP_ST (int, int); -+static void OP_STi (int, int); -+static int putop (const char *, int); -+static void oappend (const char *); -+static void append_seg (void); -+static void OP_indirE (int, int); -+static void print_operand_value (char *, int, bfd_vma); -+static void OP_E (int, int); -+static void OP_G (int, int); -+static bfd_vma get64 (void); -+static bfd_signed_vma get32 (void); -+static bfd_signed_vma get32s (void); -+static int get16 (void); -+static void set_op (bfd_vma, int); -+static void OP_REG (int, int); -+static void OP_IMREG (int, int); -+static void OP_I (int, int); -+static void OP_I64 (int, int); -+static void OP_sI (int, int); -+static void OP_J (int, int); -+static void OP_SEG (int, int); -+static void OP_DIR (int, int); -+static void OP_OFF (int, int); -+static void OP_OFF64 (int, int); -+static void ptr_reg (int, int); -+static void OP_ESreg (int, int); -+static void OP_DSreg (int, int); -+static void OP_C (int, int); -+static void OP_D (int, int); -+static void OP_T (int, int); -+static void OP_Rd (int, int); -+static void OP_MMX (int, int); -+static void OP_XMM (int, int); -+static void OP_EM (int, int); -+static void OP_EX (int, int); -+static void OP_MS (int, int); -+static void OP_XS (int, int); -+static void OP_M (int, int); -+static void OP_VMX (int, int); -+static void OP_0fae (int, int); -+static void OP_0f07 (int, int); -+static void NOP_Fixup (int, int); -+static void OP_3DNowSuffix (int, int); -+static void OP_SIMD_Suffix (int, int); -+static void SIMD_Fixup (int, int); -+static void PNI_Fixup (int, int); -+static void SVME_Fixup (int, int); -+static void INVLPG_Fixup (int, int); -+static void BadOp (void); -+static void SEG_Fixup (int, int); -+static void VMX_Fixup (int, int); -+ -+struct dis_private { -+ /* Points to first byte not fetched. */ -+ bfd_byte *max_fetched; -+ bfd_byte the_buffer[MAXLEN]; -+ bfd_vma insn_start; -+ int orig_sizeflag; -+#ifndef __KERNEL__ -+ jmp_buf bailout; -+#endif /* __KERNEL__ */ -+}; -+ -+/* The opcode for the fwait instruction, which we treat as a prefix -+ when we can. */ -+#define FWAIT_OPCODE (0x9b) -+ -+/* Set to 1 for 64bit mode disassembly. */ -+static int mode_64bit; -+ -+/* Flags for the prefixes for the current instruction. See below. */ -+static int prefixes; -+ -+/* REX prefix the current instruction. See below. */ -+static int rex; -+/* Bits of REX we've already used. */ -+static int rex_used; -+#define REX_MODE64 8 -+#define REX_EXTX 4 -+#define REX_EXTY 2 -+#define REX_EXTZ 1 -+/* Mark parts used in the REX prefix. When we are testing for -+ empty prefix (for 8bit register REX extension), just mask it -+ out. Otherwise test for REX bit is excuse for existence of REX -+ only in case value is nonzero. */ -+#define USED_REX(value) \ -+ { \ -+ if (value) \ -+ rex_used |= (rex & value) ? (value) | 0x40 : 0; \ -+ else \ -+ rex_used |= 0x40; \ -+ } -+ -+/* Flags for prefixes which we somehow handled when printing the -+ current instruction. */ -+static int used_prefixes; -+ -+/* Flags stored in PREFIXES. */ -+#define PREFIX_REPZ 1 -+#define PREFIX_REPNZ 2 -+#define PREFIX_LOCK 4 -+#define PREFIX_CS 8 -+#define PREFIX_SS 0x10 -+#define PREFIX_DS 0x20 -+#define PREFIX_ES 0x40 -+#define PREFIX_FS 0x80 -+#define PREFIX_GS 0x100 -+#define PREFIX_DATA 0x200 -+#define PREFIX_ADDR 0x400 -+#define PREFIX_FWAIT 0x800 -+ -+/* Make sure that bytes from INFO->PRIVATE_DATA->BUFFER (inclusive) -+ to ADDR (exclusive) are valid. Returns 1 for success, longjmps -+ on error. */ -+#define FETCH_DATA(info, addr) \ -+ ((addr) <= ((struct dis_private *) (info->private_data))->max_fetched \ -+ ? 1 : fetch_data ((info), (addr))) -+ -+static int -+fetch_data (struct disassemble_info *info, bfd_byte *addr) -+{ -+ int status; -+ struct dis_private *priv = (struct dis_private *) info->private_data; -+ bfd_vma start = priv->insn_start + (priv->max_fetched - priv->the_buffer); -+ -+ status = (*info->read_memory_func) (start, -+ priv->max_fetched, -+ addr - priv->max_fetched, -+ info); -+ if (status != 0) -+ { -+ /* If we did manage to read at least one byte, then -+ print_insn_i386 will do something sensible. Otherwise, print -+ an error. We do that here because this is where we know -+ STATUS. */ -+ if (priv->max_fetched == priv->the_buffer) -+ (*info->memory_error_func) (status, start, info); -+#ifndef __KERNEL__ -+ longjmp (priv->bailout, 1); -+#else /* __KERNEL__ */ -+ /* XXX - what to do? */ -+ kdb_printf("Hmm. longjmp.\n"); -+#endif /* __KERNEL__ */ -+ } -+ else -+ priv->max_fetched = addr; -+ return 1; -+} -+ -+#define XX NULL, 0 -+ -+#define Eb OP_E, b_mode -+#define Ev OP_E, v_mode -+#define Ed OP_E, d_mode -+#define Eq OP_E, q_mode -+#define Edq OP_E, dq_mode -+#define Edqw OP_E, dqw_mode -+#define indirEv OP_indirE, branch_v_mode -+#define indirEp OP_indirE, f_mode -+#define Em OP_E, m_mode -+#define Ew OP_E, w_mode -+#define Ma OP_E, v_mode -+#define M OP_M, 0 /* lea, lgdt, etc. */ -+#define Mp OP_M, f_mode /* 32 or 48 bit memory operand for LDS, LES etc */ -+#define Gb OP_G, b_mode -+#define Gv OP_G, v_mode -+#define Gd OP_G, d_mode -+#define Gdq OP_G, dq_mode -+#define Gm OP_G, m_mode -+#define Gw OP_G, w_mode -+#define Rd OP_Rd, d_mode -+#define Rm OP_Rd, m_mode -+#define Ib OP_I, b_mode -+#define sIb OP_sI, b_mode /* sign extened byte */ -+#define Iv OP_I, v_mode -+#define Iq OP_I, q_mode -+#define Iv64 OP_I64, v_mode -+#define Iw OP_I, w_mode -+#define I1 OP_I, const_1_mode -+#define Jb OP_J, b_mode -+#define Jv OP_J, v_mode -+#define Cm OP_C, m_mode -+#define Dm OP_D, m_mode -+#define Td OP_T, d_mode -+#define Sv SEG_Fixup, v_mode -+ -+#define RMeAX OP_REG, eAX_reg -+#define RMeBX OP_REG, eBX_reg -+#define RMeCX OP_REG, eCX_reg -+#define RMeDX OP_REG, eDX_reg -+#define RMeSP OP_REG, eSP_reg -+#define RMeBP OP_REG, eBP_reg -+#define RMeSI OP_REG, eSI_reg -+#define RMeDI OP_REG, eDI_reg -+#define RMrAX OP_REG, rAX_reg -+#define RMrBX OP_REG, rBX_reg -+#define RMrCX OP_REG, rCX_reg -+#define RMrDX OP_REG, rDX_reg -+#define RMrSP OP_REG, rSP_reg -+#define RMrBP OP_REG, rBP_reg -+#define RMrSI OP_REG, rSI_reg -+#define RMrDI OP_REG, rDI_reg -+#define RMAL OP_REG, al_reg -+#define RMAL OP_REG, al_reg -+#define RMCL OP_REG, cl_reg -+#define RMDL OP_REG, dl_reg -+#define RMBL OP_REG, bl_reg -+#define RMAH OP_REG, ah_reg -+#define RMCH OP_REG, ch_reg -+#define RMDH OP_REG, dh_reg -+#define RMBH OP_REG, bh_reg -+#define RMAX OP_REG, ax_reg -+#define RMDX OP_REG, dx_reg -+ -+#define eAX OP_IMREG, eAX_reg -+#define eBX OP_IMREG, eBX_reg -+#define eCX OP_IMREG, eCX_reg -+#define eDX OP_IMREG, eDX_reg -+#define eSP OP_IMREG, eSP_reg -+#define eBP OP_IMREG, eBP_reg -+#define eSI OP_IMREG, eSI_reg -+#define eDI OP_IMREG, eDI_reg -+#define AL OP_IMREG, al_reg -+#define AL OP_IMREG, al_reg -+#define CL OP_IMREG, cl_reg -+#define DL OP_IMREG, dl_reg -+#define BL OP_IMREG, bl_reg -+#define AH OP_IMREG, ah_reg -+#define CH OP_IMREG, ch_reg -+#define DH OP_IMREG, dh_reg -+#define BH OP_IMREG, bh_reg -+#define AX OP_IMREG, ax_reg -+#define DX OP_IMREG, dx_reg -+#define indirDX OP_IMREG, indir_dx_reg -+ -+#define Sw OP_SEG, w_mode -+#define Ap OP_DIR, 0 -+#define Ob OP_OFF, b_mode -+#define Ob64 OP_OFF64, b_mode -+#define Ov OP_OFF, v_mode -+#define Ov64 OP_OFF64, v_mode -+#define Xb OP_DSreg, eSI_reg -+#define Xv OP_DSreg, eSI_reg -+#define Yb OP_ESreg, eDI_reg -+#define Yv OP_ESreg, eDI_reg -+#define DSBX OP_DSreg, eBX_reg -+ -+#define es OP_REG, es_reg -+#define ss OP_REG, ss_reg -+#define cs OP_REG, cs_reg -+#define ds OP_REG, ds_reg -+#define fs OP_REG, fs_reg -+#define gs OP_REG, gs_reg -+ -+#define MX OP_MMX, 0 -+#define XM OP_XMM, 0 -+#define EM OP_EM, v_mode -+#define EX OP_EX, v_mode -+#define MS OP_MS, v_mode -+#define XS OP_XS, v_mode -+#define VM OP_VMX, q_mode -+#define OPSUF OP_3DNowSuffix, 0 -+#define OPSIMD OP_SIMD_Suffix, 0 -+ -+#define cond_jump_flag NULL, cond_jump_mode -+#define loop_jcxz_flag NULL, loop_jcxz_mode -+ -+/* bits in sizeflag */ -+#define SUFFIX_ALWAYS 4 -+#define AFLAG 2 -+#define DFLAG 1 -+ -+#define b_mode 1 /* byte operand */ -+#define v_mode 2 /* operand size depends on prefixes */ -+#define w_mode 3 /* word operand */ -+#define d_mode 4 /* double word operand */ -+#define q_mode 5 /* quad word operand */ -+#define t_mode 6 /* ten-byte operand */ -+#define x_mode 7 /* 16-byte XMM operand */ -+#define m_mode 8 /* d_mode in 32bit, q_mode in 64bit mode. */ -+#define cond_jump_mode 9 -+#define loop_jcxz_mode 10 -+#define dq_mode 11 /* operand size depends on REX prefixes. */ -+#define dqw_mode 12 /* registers like dq_mode, memory like w_mode. */ -+#define f_mode 13 /* 4- or 6-byte pointer operand */ -+#define const_1_mode 14 -+#define branch_v_mode 15 /* v_mode for branch. */ -+ -+#define es_reg 100 -+#define cs_reg 101 -+#define ss_reg 102 -+#define ds_reg 103 -+#define fs_reg 104 -+#define gs_reg 105 -+ -+#define eAX_reg 108 -+#define eCX_reg 109 -+#define eDX_reg 110 -+#define eBX_reg 111 -+#define eSP_reg 112 -+#define eBP_reg 113 -+#define eSI_reg 114 -+#define eDI_reg 115 -+ -+#define al_reg 116 -+#define cl_reg 117 -+#define dl_reg 118 -+#define bl_reg 119 -+#define ah_reg 120 -+#define ch_reg 121 -+#define dh_reg 122 -+#define bh_reg 123 -+ -+#define ax_reg 124 -+#define cx_reg 125 -+#define dx_reg 126 -+#define bx_reg 127 -+#define sp_reg 128 -+#define bp_reg 129 -+#define si_reg 130 -+#define di_reg 131 -+ -+#define rAX_reg 132 -+#define rCX_reg 133 -+#define rDX_reg 134 -+#define rBX_reg 135 -+#define rSP_reg 136 -+#define rBP_reg 137 -+#define rSI_reg 138 -+#define rDI_reg 139 -+ -+#define indir_dx_reg 150 -+ -+#define FLOATCODE 1 -+#define USE_GROUPS 2 -+#define USE_PREFIX_USER_TABLE 3 -+#define X86_64_SPECIAL 4 -+ -+#define FLOAT NULL, NULL, FLOATCODE, NULL, 0, NULL, 0 -+ -+#define GRP1b NULL, NULL, USE_GROUPS, NULL, 0, NULL, 0 -+#define GRP1S NULL, NULL, USE_GROUPS, NULL, 1, NULL, 0 -+#define GRP1Ss NULL, NULL, USE_GROUPS, NULL, 2, NULL, 0 -+#define GRP2b NULL, NULL, USE_GROUPS, NULL, 3, NULL, 0 -+#define GRP2S NULL, NULL, USE_GROUPS, NULL, 4, NULL, 0 -+#define GRP2b_one NULL, NULL, USE_GROUPS, NULL, 5, NULL, 0 -+#define GRP2S_one NULL, NULL, USE_GROUPS, NULL, 6, NULL, 0 -+#define GRP2b_cl NULL, NULL, USE_GROUPS, NULL, 7, NULL, 0 -+#define GRP2S_cl NULL, NULL, USE_GROUPS, NULL, 8, NULL, 0 -+#define GRP3b NULL, NULL, USE_GROUPS, NULL, 9, NULL, 0 -+#define GRP3S NULL, NULL, USE_GROUPS, NULL, 10, NULL, 0 -+#define GRP4 NULL, NULL, USE_GROUPS, NULL, 11, NULL, 0 -+#define GRP5 NULL, NULL, USE_GROUPS, NULL, 12, NULL, 0 -+#define GRP6 NULL, NULL, USE_GROUPS, NULL, 13, NULL, 0 -+#define GRP7 NULL, NULL, USE_GROUPS, NULL, 14, NULL, 0 -+#define GRP8 NULL, NULL, USE_GROUPS, NULL, 15, NULL, 0 -+#define GRP9 NULL, NULL, USE_GROUPS, NULL, 16, NULL, 0 -+#define GRP10 NULL, NULL, USE_GROUPS, NULL, 17, NULL, 0 -+#define GRP11 NULL, NULL, USE_GROUPS, NULL, 18, NULL, 0 -+#define GRP12 NULL, NULL, USE_GROUPS, NULL, 19, NULL, 0 -+#define GRP13 NULL, NULL, USE_GROUPS, NULL, 20, NULL, 0 -+#define GRP14 NULL, NULL, USE_GROUPS, NULL, 21, NULL, 0 -+#define GRPAMD NULL, NULL, USE_GROUPS, NULL, 22, NULL, 0 -+#define GRPPADLCK1 NULL, NULL, USE_GROUPS, NULL, 23, NULL, 0 -+#define GRPPADLCK2 NULL, NULL, USE_GROUPS, NULL, 24, NULL, 0 -+ -+#define PREGRP0 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 0, NULL, 0 -+#define PREGRP1 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 1, NULL, 0 -+#define PREGRP2 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 2, NULL, 0 -+#define PREGRP3 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 3, NULL, 0 -+#define PREGRP4 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 4, NULL, 0 -+#define PREGRP5 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 5, NULL, 0 -+#define PREGRP6 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 6, NULL, 0 -+#define PREGRP7 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 7, NULL, 0 -+#define PREGRP8 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 8, NULL, 0 -+#define PREGRP9 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 9, NULL, 0 -+#define PREGRP10 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 10, NULL, 0 -+#define PREGRP11 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 11, NULL, 0 -+#define PREGRP12 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 12, NULL, 0 -+#define PREGRP13 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 13, NULL, 0 -+#define PREGRP14 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 14, NULL, 0 -+#define PREGRP15 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 15, NULL, 0 -+#define PREGRP16 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 16, NULL, 0 -+#define PREGRP17 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 17, NULL, 0 -+#define PREGRP18 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 18, NULL, 0 -+#define PREGRP19 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 19, NULL, 0 -+#define PREGRP20 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 20, NULL, 0 -+#define PREGRP21 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 21, NULL, 0 -+#define PREGRP22 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 22, NULL, 0 -+#define PREGRP23 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 23, NULL, 0 -+#define PREGRP24 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 24, NULL, 0 -+#define PREGRP25 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 25, NULL, 0 -+#define PREGRP26 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 26, NULL, 0 -+#define PREGRP27 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 27, NULL, 0 -+#define PREGRP28 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 28, NULL, 0 -+#define PREGRP29 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 29, NULL, 0 -+#define PREGRP30 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 30, NULL, 0 -+#define PREGRP31 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 31, NULL, 0 -+#define PREGRP32 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 32, NULL, 0 -+ -+#define X86_64_0 NULL, NULL, X86_64_SPECIAL, NULL, 0, NULL, 0 -+ -+typedef void (*op_rtn) (int bytemode, int sizeflag); -+ -+struct dis386 { -+ const char *name; -+ op_rtn op1; -+ int bytemode1; -+ op_rtn op2; -+ int bytemode2; -+ op_rtn op3; -+ int bytemode3; -+}; -+ -+/* Upper case letters in the instruction names here are macros. -+ 'A' => print 'b' if no register operands or suffix_always is true -+ 'B' => print 'b' if suffix_always is true -+ 'C' => print 's' or 'l' ('w' or 'd' in Intel mode) depending on operand -+ . size prefix -+ 'E' => print 'e' if 32-bit form of jcxz -+ 'F' => print 'w' or 'l' depending on address size prefix (loop insns) -+ 'H' => print ",pt" or ",pn" branch hint -+ 'I' => honor following macro letter even in Intel mode (implemented only -+ . for some of the macro letters) -+ 'J' => print 'l' -+ 'L' => print 'l' if suffix_always is true -+ 'N' => print 'n' if instruction has no wait "prefix" -+ 'O' => print 'd', or 'o' -+ 'P' => print 'w', 'l' or 'q' if instruction has an operand size prefix, -+ . or suffix_always is true. print 'q' if rex prefix is present. -+ 'Q' => print 'w', 'l' or 'q' if no register operands or suffix_always -+ . is true -+ 'R' => print 'w', 'l' or 'q' ("wd" or "dq" in intel mode) -+ 'S' => print 'w', 'l' or 'q' if suffix_always is true -+ 'T' => print 'q' in 64bit mode and behave as 'P' otherwise -+ 'U' => print 'q' in 64bit mode and behave as 'Q' otherwise -+ 'W' => print 'b' or 'w' ("w" or "de" in intel mode) -+ 'X' => print 's', 'd' depending on data16 prefix (for XMM) -+ 'Y' => 'q' if instruction has an REX 64bit overwrite prefix -+ -+ Many of the above letters print nothing in Intel mode. See "putop" -+ for the details. -+ -+ Braces '{' and '}', and vertical bars '|', indicate alternative -+ mnemonic strings for AT&T, Intel, X86_64 AT&T, and X86_64 Intel -+ modes. In cases where there are only two alternatives, the X86_64 -+ instruction is reserved, and "(bad)" is printed. -+*/ -+ -+static const struct dis386 dis386[] = { -+ /* 00 */ -+ { "addB", Eb, Gb, XX }, -+ { "addS", Ev, Gv, XX }, -+ { "addB", Gb, Eb, XX }, -+ { "addS", Gv, Ev, XX }, -+ { "addB", AL, Ib, XX }, -+ { "addS", eAX, Iv, XX }, -+ { "push{T|}", es, XX, XX }, -+ { "pop{T|}", es, XX, XX }, -+ /* 08 */ -+ { "orB", Eb, Gb, XX }, -+ { "orS", Ev, Gv, XX }, -+ { "orB", Gb, Eb, XX }, -+ { "orS", Gv, Ev, XX }, -+ { "orB", AL, Ib, XX }, -+ { "orS", eAX, Iv, XX }, -+ { "push{T|}", cs, XX, XX }, -+ { "(bad)", XX, XX, XX }, /* 0x0f extended opcode escape */ -+ /* 10 */ -+ { "adcB", Eb, Gb, XX }, -+ { "adcS", Ev, Gv, XX }, -+ { "adcB", Gb, Eb, XX }, -+ { "adcS", Gv, Ev, XX }, -+ { "adcB", AL, Ib, XX }, -+ { "adcS", eAX, Iv, XX }, -+ { "push{T|}", ss, XX, XX }, -+ { "popT|}", ss, XX, XX }, -+ /* 18 */ -+ { "sbbB", Eb, Gb, XX }, -+ { "sbbS", Ev, Gv, XX }, -+ { "sbbB", Gb, Eb, XX }, -+ { "sbbS", Gv, Ev, XX }, -+ { "sbbB", AL, Ib, XX }, -+ { "sbbS", eAX, Iv, XX }, -+ { "push{T|}", ds, XX, XX }, -+ { "pop{T|}", ds, XX, XX }, -+ /* 20 */ -+ { "andB", Eb, Gb, XX }, -+ { "andS", Ev, Gv, XX }, -+ { "andB", Gb, Eb, XX }, -+ { "andS", Gv, Ev, XX }, -+ { "andB", AL, Ib, XX }, -+ { "andS", eAX, Iv, XX }, -+ { "(bad)", XX, XX, XX }, /* SEG ES prefix */ -+ { "daa{|}", XX, XX, XX }, -+ /* 28 */ -+ { "subB", Eb, Gb, XX }, -+ { "subS", Ev, Gv, XX }, -+ { "subB", Gb, Eb, XX }, -+ { "subS", Gv, Ev, XX }, -+ { "subB", AL, Ib, XX }, -+ { "subS", eAX, Iv, XX }, -+ { "(bad)", XX, XX, XX }, /* SEG CS prefix */ -+ { "das{|}", XX, XX, XX }, -+ /* 30 */ -+ { "xorB", Eb, Gb, XX }, -+ { "xorS", Ev, Gv, XX }, -+ { "xorB", Gb, Eb, XX }, -+ { "xorS", Gv, Ev, XX }, -+ { "xorB", AL, Ib, XX }, -+ { "xorS", eAX, Iv, XX }, -+ { "(bad)", XX, XX, XX }, /* SEG SS prefix */ -+ { "aaa{|}", XX, XX, XX }, -+ /* 38 */ -+ { "cmpB", Eb, Gb, XX }, -+ { "cmpS", Ev, Gv, XX }, -+ { "cmpB", Gb, Eb, XX }, -+ { "cmpS", Gv, Ev, XX }, -+ { "cmpB", AL, Ib, XX }, -+ { "cmpS", eAX, Iv, XX }, -+ { "(bad)", XX, XX, XX }, /* SEG DS prefix */ -+ { "aas{|}", XX, XX, XX }, -+ /* 40 */ -+ { "inc{S|}", RMeAX, XX, XX }, -+ { "inc{S|}", RMeCX, XX, XX }, -+ { "inc{S|}", RMeDX, XX, XX }, -+ { "inc{S|}", RMeBX, XX, XX }, -+ { "inc{S|}", RMeSP, XX, XX }, -+ { "inc{S|}", RMeBP, XX, XX }, -+ { "inc{S|}", RMeSI, XX, XX }, -+ { "inc{S|}", RMeDI, XX, XX }, -+ /* 48 */ -+ { "dec{S|}", RMeAX, XX, XX }, -+ { "dec{S|}", RMeCX, XX, XX }, -+ { "dec{S|}", RMeDX, XX, XX }, -+ { "dec{S|}", RMeBX, XX, XX }, -+ { "dec{S|}", RMeSP, XX, XX }, -+ { "dec{S|}", RMeBP, XX, XX }, -+ { "dec{S|}", RMeSI, XX, XX }, -+ { "dec{S|}", RMeDI, XX, XX }, -+ /* 50 */ -+ { "pushS", RMrAX, XX, XX }, -+ { "pushS", RMrCX, XX, XX }, -+ { "pushS", RMrDX, XX, XX }, -+ { "pushS", RMrBX, XX, XX }, -+ { "pushS", RMrSP, XX, XX }, -+ { "pushS", RMrBP, XX, XX }, -+ { "pushS", RMrSI, XX, XX }, -+ { "pushS", RMrDI, XX, XX }, -+ /* 58 */ -+ { "popS", RMrAX, XX, XX }, -+ { "popS", RMrCX, XX, XX }, -+ { "popS", RMrDX, XX, XX }, -+ { "popS", RMrBX, XX, XX }, -+ { "popS", RMrSP, XX, XX }, -+ { "popS", RMrBP, XX, XX }, -+ { "popS", RMrSI, XX, XX }, -+ { "popS", RMrDI, XX, XX }, -+ /* 60 */ -+ { "pusha{P|}", XX, XX, XX }, -+ { "popa{P|}", XX, XX, XX }, -+ { "bound{S|}", Gv, Ma, XX }, -+ { X86_64_0 }, -+ { "(bad)", XX, XX, XX }, /* seg fs */ -+ { "(bad)", XX, XX, XX }, /* seg gs */ -+ { "(bad)", XX, XX, XX }, /* op size prefix */ -+ { "(bad)", XX, XX, XX }, /* adr size prefix */ -+ /* 68 */ -+ { "pushT", Iq, XX, XX }, -+ { "imulS", Gv, Ev, Iv }, -+ { "pushT", sIb, XX, XX }, -+ { "imulS", Gv, Ev, sIb }, -+ { "ins{b||b|}", Yb, indirDX, XX }, -+ { "ins{R||R|}", Yv, indirDX, XX }, -+ { "outs{b||b|}", indirDX, Xb, XX }, -+ { "outs{R||R|}", indirDX, Xv, XX }, -+ /* 70 */ -+ { "joH", Jb, XX, cond_jump_flag }, -+ { "jnoH", Jb, XX, cond_jump_flag }, -+ { "jbH", Jb, XX, cond_jump_flag }, -+ { "jaeH", Jb, XX, cond_jump_flag }, -+ { "jeH", Jb, XX, cond_jump_flag }, -+ { "jneH", Jb, XX, cond_jump_flag }, -+ { "jbeH", Jb, XX, cond_jump_flag }, -+ { "jaH", Jb, XX, cond_jump_flag }, -+ /* 78 */ -+ { "jsH", Jb, XX, cond_jump_flag }, -+ { "jnsH", Jb, XX, cond_jump_flag }, -+ { "jpH", Jb, XX, cond_jump_flag }, -+ { "jnpH", Jb, XX, cond_jump_flag }, -+ { "jlH", Jb, XX, cond_jump_flag }, -+ { "jgeH", Jb, XX, cond_jump_flag }, -+ { "jleH", Jb, XX, cond_jump_flag }, -+ { "jgH", Jb, XX, cond_jump_flag }, -+ /* 80 */ -+ { GRP1b }, -+ { GRP1S }, -+ { "(bad)", XX, XX, XX }, -+ { GRP1Ss }, -+ { "testB", Eb, Gb, XX }, -+ { "testS", Ev, Gv, XX }, -+ { "xchgB", Eb, Gb, XX }, -+ { "xchgS", Ev, Gv, XX }, -+ /* 88 */ -+ { "movB", Eb, Gb, XX }, -+ { "movS", Ev, Gv, XX }, -+ { "movB", Gb, Eb, XX }, -+ { "movS", Gv, Ev, XX }, -+ { "movQ", Sv, Sw, XX }, -+ { "leaS", Gv, M, XX }, -+ { "movQ", Sw, Sv, XX }, -+ { "popU", Ev, XX, XX }, -+ /* 90 */ -+ { "nop", NOP_Fixup, 0, XX, XX }, -+ { "xchgS", RMeCX, eAX, XX }, -+ { "xchgS", RMeDX, eAX, XX }, -+ { "xchgS", RMeBX, eAX, XX }, -+ { "xchgS", RMeSP, eAX, XX }, -+ { "xchgS", RMeBP, eAX, XX }, -+ { "xchgS", RMeSI, eAX, XX }, -+ { "xchgS", RMeDI, eAX, XX }, -+ /* 98 */ -+ { "cW{tR||tR|}", XX, XX, XX }, -+ { "cR{tO||tO|}", XX, XX, XX }, -+ { "Jcall{T|}", Ap, XX, XX }, -+ { "(bad)", XX, XX, XX }, /* fwait */ -+ { "pushfT", XX, XX, XX }, -+ { "popfT", XX, XX, XX }, -+ { "sahf{|}", XX, XX, XX }, -+ { "lahf{|}", XX, XX, XX }, -+ /* a0 */ -+ { "movB", AL, Ob64, XX }, -+ { "movS", eAX, Ov64, XX }, -+ { "movB", Ob64, AL, XX }, -+ { "movS", Ov64, eAX, XX }, -+ { "movs{b||b|}", Yb, Xb, XX }, -+ { "movs{R||R|}", Yv, Xv, XX }, -+ { "cmps{b||b|}", Xb, Yb, XX }, -+ { "cmps{R||R|}", Xv, Yv, XX }, -+ /* a8 */ -+ { "testB", AL, Ib, XX }, -+ { "testS", eAX, Iv, XX }, -+ { "stosB", Yb, AL, XX }, -+ { "stosS", Yv, eAX, XX }, -+ { "lodsB", AL, Xb, XX }, -+ { "lodsS", eAX, Xv, XX }, -+ { "scasB", AL, Yb, XX }, -+ { "scasS", eAX, Yv, XX }, -+ /* b0 */ -+ { "movB", RMAL, Ib, XX }, -+ { "movB", RMCL, Ib, XX }, -+ { "movB", RMDL, Ib, XX }, -+ { "movB", RMBL, Ib, XX }, -+ { "movB", RMAH, Ib, XX }, -+ { "movB", RMCH, Ib, XX }, -+ { "movB", RMDH, Ib, XX }, -+ { "movB", RMBH, Ib, XX }, -+ /* b8 */ -+ { "movS", RMeAX, Iv64, XX }, -+ { "movS", RMeCX, Iv64, XX }, -+ { "movS", RMeDX, Iv64, XX }, -+ { "movS", RMeBX, Iv64, XX }, -+ { "movS", RMeSP, Iv64, XX }, -+ { "movS", RMeBP, Iv64, XX }, -+ { "movS", RMeSI, Iv64, XX }, -+ { "movS", RMeDI, Iv64, XX }, -+ /* c0 */ -+ { GRP2b }, -+ { GRP2S }, -+ { "retT", Iw, XX, XX }, -+ { "retT", XX, XX, XX }, -+ { "les{S|}", Gv, Mp, XX }, -+ { "ldsS", Gv, Mp, XX }, -+ { "movA", Eb, Ib, XX }, -+ { "movQ", Ev, Iv, XX }, -+ /* c8 */ -+ { "enterT", Iw, Ib, XX }, -+ { "leaveT", XX, XX, XX }, -+ { "lretP", Iw, XX, XX }, -+ { "lretP", XX, XX, XX }, -+ { "int3", XX, XX, XX }, -+ { "int", Ib, XX, XX }, -+ { "into{|}", XX, XX, XX }, -+ { "iretP", XX, XX, XX }, -+ /* d0 */ -+ { GRP2b_one }, -+ { GRP2S_one }, -+ { GRP2b_cl }, -+ { GRP2S_cl }, -+ { "aam{|}", sIb, XX, XX }, -+ { "aad{|}", sIb, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "xlat", DSBX, XX, XX }, -+ /* d8 */ -+ { FLOAT }, -+ { FLOAT }, -+ { FLOAT }, -+ { FLOAT }, -+ { FLOAT }, -+ { FLOAT }, -+ { FLOAT }, -+ { FLOAT }, -+ /* e0 */ -+ { "loopneFH", Jb, XX, loop_jcxz_flag }, -+ { "loopeFH", Jb, XX, loop_jcxz_flag }, -+ { "loopFH", Jb, XX, loop_jcxz_flag }, -+ { "jEcxzH", Jb, XX, loop_jcxz_flag }, -+ { "inB", AL, Ib, XX }, -+ { "inS", eAX, Ib, XX }, -+ { "outB", Ib, AL, XX }, -+ { "outS", Ib, eAX, XX }, -+ /* e8 */ -+ { "callT", Jv, XX, XX }, -+ { "jmpT", Jv, XX, XX }, -+ { "Jjmp{T|}", Ap, XX, XX }, -+ { "jmp", Jb, XX, XX }, -+ { "inB", AL, indirDX, XX }, -+ { "inS", eAX, indirDX, XX }, -+ { "outB", indirDX, AL, XX }, -+ { "outS", indirDX, eAX, XX }, -+ /* f0 */ -+ { "(bad)", XX, XX, XX }, /* lock prefix */ -+ { "icebp", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, /* repne */ -+ { "(bad)", XX, XX, XX }, /* repz */ -+ { "hlt", XX, XX, XX }, -+ { "cmc", XX, XX, XX }, -+ { GRP3b }, -+ { GRP3S }, -+ /* f8 */ -+ { "clc", XX, XX, XX }, -+ { "stc", XX, XX, XX }, -+ { "cli", XX, XX, XX }, -+ { "sti", XX, XX, XX }, -+ { "cld", XX, XX, XX }, -+ { "std", XX, XX, XX }, -+ { GRP4 }, -+ { GRP5 }, -+}; -+ -+static const struct dis386 dis386_twobyte[] = { -+ /* 00 */ -+ { GRP6 }, -+ { GRP7 }, -+ { "larS", Gv, Ew, XX }, -+ { "lslS", Gv, Ew, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "syscall", XX, XX, XX }, -+ { "clts", XX, XX, XX }, -+ { "sysretP", XX, XX, XX }, -+ /* 08 */ -+ { "invd", XX, XX, XX }, -+ { "wbinvd", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "ud2a", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { GRPAMD }, -+ { "femms", XX, XX, XX }, -+ { "", MX, EM, OPSUF }, /* See OP_3DNowSuffix. */ -+ /* 10 */ -+ { PREGRP8 }, -+ { PREGRP9 }, -+ { PREGRP30 }, -+ { "movlpX", EX, XM, SIMD_Fixup, 'h' }, -+ { "unpcklpX", XM, EX, XX }, -+ { "unpckhpX", XM, EX, XX }, -+ { PREGRP31 }, -+ { "movhpX", EX, XM, SIMD_Fixup, 'l' }, -+ /* 18 */ -+ { GRP14 }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ /* 20 */ -+ { "movL", Rm, Cm, XX }, -+ { "movL", Rm, Dm, XX }, -+ { "movL", Cm, Rm, XX }, -+ { "movL", Dm, Rm, XX }, -+ { "movL", Rd, Td, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "movL", Td, Rd, XX }, -+ { "(bad)", XX, XX, XX }, -+ /* 28 */ -+ { "movapX", XM, EX, XX }, -+ { "movapX", EX, XM, XX }, -+ { PREGRP2 }, -+ { "movntpX", Ev, XM, XX }, -+ { PREGRP4 }, -+ { PREGRP3 }, -+ { "ucomisX", XM,EX, XX }, -+ { "comisX", XM,EX, XX }, -+ /* 30 */ -+ { "wrmsr", XX, XX, XX }, -+ { "rdtsc", XX, XX, XX }, -+ { "rdmsr", XX, XX, XX }, -+ { "rdpmc", XX, XX, XX }, -+ { "sysenter", XX, XX, XX }, -+ { "sysexit", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ /* 38 */ -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ /* 40 */ -+ { "cmovo", Gv, Ev, XX }, -+ { "cmovno", Gv, Ev, XX }, -+ { "cmovb", Gv, Ev, XX }, -+ { "cmovae", Gv, Ev, XX }, -+ { "cmove", Gv, Ev, XX }, -+ { "cmovne", Gv, Ev, XX }, -+ { "cmovbe", Gv, Ev, XX }, -+ { "cmova", Gv, Ev, XX }, -+ /* 48 */ -+ { "cmovs", Gv, Ev, XX }, -+ { "cmovns", Gv, Ev, XX }, -+ { "cmovp", Gv, Ev, XX }, -+ { "cmovnp", Gv, Ev, XX }, -+ { "cmovl", Gv, Ev, XX }, -+ { "cmovge", Gv, Ev, XX }, -+ { "cmovle", Gv, Ev, XX }, -+ { "cmovg", Gv, Ev, XX }, -+ /* 50 */ -+ { "movmskpX", Gdq, XS, XX }, -+ { PREGRP13 }, -+ { PREGRP12 }, -+ { PREGRP11 }, -+ { "andpX", XM, EX, XX }, -+ { "andnpX", XM, EX, XX }, -+ { "orpX", XM, EX, XX }, -+ { "xorpX", XM, EX, XX }, -+ /* 58 */ -+ { PREGRP0 }, -+ { PREGRP10 }, -+ { PREGRP17 }, -+ { PREGRP16 }, -+ { PREGRP14 }, -+ { PREGRP7 }, -+ { PREGRP5 }, -+ { PREGRP6 }, -+ /* 60 */ -+ { "punpcklbw", MX, EM, XX }, -+ { "punpcklwd", MX, EM, XX }, -+ { "punpckldq", MX, EM, XX }, -+ { "packsswb", MX, EM, XX }, -+ { "pcmpgtb", MX, EM, XX }, -+ { "pcmpgtw", MX, EM, XX }, -+ { "pcmpgtd", MX, EM, XX }, -+ { "packuswb", MX, EM, XX }, -+ /* 68 */ -+ { "punpckhbw", MX, EM, XX }, -+ { "punpckhwd", MX, EM, XX }, -+ { "punpckhdq", MX, EM, XX }, -+ { "packssdw", MX, EM, XX }, -+ { PREGRP26 }, -+ { PREGRP24 }, -+ { "movd", MX, Edq, XX }, -+ { PREGRP19 }, -+ /* 70 */ -+ { PREGRP22 }, -+ { GRP10 }, -+ { GRP11 }, -+ { GRP12 }, -+ { "pcmpeqb", MX, EM, XX }, -+ { "pcmpeqw", MX, EM, XX }, -+ { "pcmpeqd", MX, EM, XX }, -+ { "emms", XX, XX, XX }, -+ /* 78 */ -+ { "vmread", Em, Gm, XX }, -+ { "vmwrite", Gm, Em, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { PREGRP28 }, -+ { PREGRP29 }, -+ { PREGRP23 }, -+ { PREGRP20 }, -+ /* 80 */ -+ { "joH", Jv, XX, cond_jump_flag }, -+ { "jnoH", Jv, XX, cond_jump_flag }, -+ { "jbH", Jv, XX, cond_jump_flag }, -+ { "jaeH", Jv, XX, cond_jump_flag }, -+ { "jeH", Jv, XX, cond_jump_flag }, -+ { "jneH", Jv, XX, cond_jump_flag }, -+ { "jbeH", Jv, XX, cond_jump_flag }, -+ { "jaH", Jv, XX, cond_jump_flag }, -+ /* 88 */ -+ { "jsH", Jv, XX, cond_jump_flag }, -+ { "jnsH", Jv, XX, cond_jump_flag }, -+ { "jpH", Jv, XX, cond_jump_flag }, -+ { "jnpH", Jv, XX, cond_jump_flag }, -+ { "jlH", Jv, XX, cond_jump_flag }, -+ { "jgeH", Jv, XX, cond_jump_flag }, -+ { "jleH", Jv, XX, cond_jump_flag }, -+ { "jgH", Jv, XX, cond_jump_flag }, -+ /* 90 */ -+ { "seto", Eb, XX, XX }, -+ { "setno", Eb, XX, XX }, -+ { "setb", Eb, XX, XX }, -+ { "setae", Eb, XX, XX }, -+ { "sete", Eb, XX, XX }, -+ { "setne", Eb, XX, XX }, -+ { "setbe", Eb, XX, XX }, -+ { "seta", Eb, XX, XX }, -+ /* 98 */ -+ { "sets", Eb, XX, XX }, -+ { "setns", Eb, XX, XX }, -+ { "setp", Eb, XX, XX }, -+ { "setnp", Eb, XX, XX }, -+ { "setl", Eb, XX, XX }, -+ { "setge", Eb, XX, XX }, -+ { "setle", Eb, XX, XX }, -+ { "setg", Eb, XX, XX }, -+ /* a0 */ -+ { "pushT", fs, XX, XX }, -+ { "popT", fs, XX, XX }, -+ { "cpuid", XX, XX, XX }, -+ { "btS", Ev, Gv, XX }, -+ { "shldS", Ev, Gv, Ib }, -+ { "shldS", Ev, Gv, CL }, -+ { GRPPADLCK2 }, -+ { GRPPADLCK1 }, -+ /* a8 */ -+ { "pushT", gs, XX, XX }, -+ { "popT", gs, XX, XX }, -+ { "rsm", XX, XX, XX }, -+ { "btsS", Ev, Gv, XX }, -+ { "shrdS", Ev, Gv, Ib }, -+ { "shrdS", Ev, Gv, CL }, -+ { GRP13 }, -+ { "imulS", Gv, Ev, XX }, -+ /* b0 */ -+ { "cmpxchgB", Eb, Gb, XX }, -+ { "cmpxchgS", Ev, Gv, XX }, -+ { "lssS", Gv, Mp, XX }, -+ { "btrS", Ev, Gv, XX }, -+ { "lfsS", Gv, Mp, XX }, -+ { "lgsS", Gv, Mp, XX }, -+ { "movz{bR|x|bR|x}", Gv, Eb, XX }, -+ { "movz{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movzww ! */ -+ /* b8 */ -+ { "(bad)", XX, XX, XX }, -+ { "ud2b", XX, XX, XX }, -+ { GRP8 }, -+ { "btcS", Ev, Gv, XX }, -+ { "bsfS", Gv, Ev, XX }, -+ { "bsrS", Gv, Ev, XX }, -+ { "movs{bR|x|bR|x}", Gv, Eb, XX }, -+ { "movs{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movsww ! */ -+ /* c0 */ -+ { "xaddB", Eb, Gb, XX }, -+ { "xaddS", Ev, Gv, XX }, -+ { PREGRP1 }, -+ { "movntiS", Ev, Gv, XX }, -+ { "pinsrw", MX, Edqw, Ib }, -+ { "pextrw", Gdq, MS, Ib }, -+ { "shufpX", XM, EX, Ib }, -+ { GRP9 }, -+ /* c8 */ -+ { "bswap", RMeAX, XX, XX }, -+ { "bswap", RMeCX, XX, XX }, -+ { "bswap", RMeDX, XX, XX }, -+ { "bswap", RMeBX, XX, XX }, -+ { "bswap", RMeSP, XX, XX }, -+ { "bswap", RMeBP, XX, XX }, -+ { "bswap", RMeSI, XX, XX }, -+ { "bswap", RMeDI, XX, XX }, -+ /* d0 */ -+ { PREGRP27 }, -+ { "psrlw", MX, EM, XX }, -+ { "psrld", MX, EM, XX }, -+ { "psrlq", MX, EM, XX }, -+ { "paddq", MX, EM, XX }, -+ { "pmullw", MX, EM, XX }, -+ { PREGRP21 }, -+ { "pmovmskb", Gdq, MS, XX }, -+ /* d8 */ -+ { "psubusb", MX, EM, XX }, -+ { "psubusw", MX, EM, XX }, -+ { "pminub", MX, EM, XX }, -+ { "pand", MX, EM, XX }, -+ { "paddusb", MX, EM, XX }, -+ { "paddusw", MX, EM, XX }, -+ { "pmaxub", MX, EM, XX }, -+ { "pandn", MX, EM, XX }, -+ /* e0 */ -+ { "pavgb", MX, EM, XX }, -+ { "psraw", MX, EM, XX }, -+ { "psrad", MX, EM, XX }, -+ { "pavgw", MX, EM, XX }, -+ { "pmulhuw", MX, EM, XX }, -+ { "pmulhw", MX, EM, XX }, -+ { PREGRP15 }, -+ { PREGRP25 }, -+ /* e8 */ -+ { "psubsb", MX, EM, XX }, -+ { "psubsw", MX, EM, XX }, -+ { "pminsw", MX, EM, XX }, -+ { "por", MX, EM, XX }, -+ { "paddsb", MX, EM, XX }, -+ { "paddsw", MX, EM, XX }, -+ { "pmaxsw", MX, EM, XX }, -+ { "pxor", MX, EM, XX }, -+ /* f0 */ -+ { PREGRP32 }, -+ { "psllw", MX, EM, XX }, -+ { "pslld", MX, EM, XX }, -+ { "psllq", MX, EM, XX }, -+ { "pmuludq", MX, EM, XX }, -+ { "pmaddwd", MX, EM, XX }, -+ { "psadbw", MX, EM, XX }, -+ { PREGRP18 }, -+ /* f8 */ -+ { "psubb", MX, EM, XX }, -+ { "psubw", MX, EM, XX }, -+ { "psubd", MX, EM, XX }, -+ { "psubq", MX, EM, XX }, -+ { "paddb", MX, EM, XX }, -+ { "paddw", MX, EM, XX }, -+ { "paddd", MX, EM, XX }, -+ { "(bad)", XX, XX, XX } -+}; -+ -+static const unsigned char onebyte_has_modrm[256] = { -+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -+ /* ------------------------------- */ -+ /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */ -+ /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */ -+ /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */ -+ /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */ -+ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */ -+ /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */ -+ /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */ -+ /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */ -+ /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */ -+ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */ -+ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */ -+ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */ -+ /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */ -+ /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */ -+ /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */ -+ /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */ -+ /* ------------------------------- */ -+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -+}; -+ -+static const unsigned char twobyte_has_modrm[256] = { -+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -+ /* ------------------------------- */ -+ /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */ -+ /* 10 */ 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0, /* 1f */ -+ /* 20 */ 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1, /* 2f */ -+ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ -+ /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */ -+ /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */ -+ /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */ -+ /* 70 */ 1,1,1,1,1,1,1,0,1,1,0,0,1,1,1,1, /* 7f */ -+ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ -+ /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */ -+ /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */ -+ /* b0 */ 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1, /* bf */ -+ /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */ -+ /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */ -+ /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */ -+ /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */ -+ /* ------------------------------- */ -+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -+}; -+ -+static const unsigned char twobyte_uses_SSE_prefix[256] = { -+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -+ /* ------------------------------- */ -+ /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ -+ /* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */ -+ /* 20 */ 0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0, /* 2f */ -+ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ -+ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */ -+ /* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */ -+ /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1, /* 6f */ -+ /* 70 */ 1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, /* 7f */ -+ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ -+ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */ -+ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */ -+ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */ -+ /* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */ -+ /* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */ -+ /* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */ -+ /* f0 */ 1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0 /* ff */ -+ /* ------------------------------- */ -+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -+}; -+ -+static char obuf[100]; -+static char *obufp; -+static char scratchbuf[100]; -+static unsigned char *start_codep; -+static unsigned char *insn_codep; -+static unsigned char *codep; -+static disassemble_info *the_info; -+static int mod; -+static int rm; -+static int reg; -+static unsigned char need_modrm; -+ -+/* If we are accessing mod/rm/reg without need_modrm set, then the -+ values are stale. Hitting this abort likely indicates that you -+ need to update onebyte_has_modrm or twobyte_has_modrm. */ -+#define MODRM_CHECK if (!need_modrm) abort () -+ -+static const char **names64; -+static const char **names32; -+static const char **names16; -+static const char **names8; -+static const char **names8rex; -+static const char **names_seg; -+static const char **index16; -+ -+static const char *intel_names64[] = { -+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", -+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" -+}; -+static const char *intel_names32[] = { -+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", -+ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" -+}; -+static const char *intel_names16[] = { -+ "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", -+ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" -+}; -+static const char *intel_names8[] = { -+ "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", -+}; -+static const char *intel_names8rex[] = { -+ "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil", -+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" -+}; -+static const char *intel_names_seg[] = { -+ "es", "cs", "ss", "ds", "fs", "gs", "?", "?", -+}; -+static const char *intel_index16[] = { -+ "bx+si", "bx+di", "bp+si", "bp+di", "si", "di", "bp", "bx" -+}; -+ -+static const char *att_names64[] = { -+ "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", -+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" -+}; -+static const char *att_names32[] = { -+ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", -+ "%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d" -+}; -+static const char *att_names16[] = { -+ "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di", -+ "%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w" -+}; -+static const char *att_names8[] = { -+ "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh", -+}; -+static const char *att_names8rex[] = { -+ "%al", "%cl", "%dl", "%bl", "%spl", "%bpl", "%sil", "%dil", -+ "%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b" -+}; -+static const char *att_names_seg[] = { -+ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "%?", "%?", -+}; -+static const char *att_index16[] = { -+ "%bx,%si", "%bx,%di", "%bp,%si", "%bp,%di", "%si", "%di", "%bp", "%bx" -+}; -+ -+static const struct dis386 grps[][8] = { -+ /* GRP1b */ -+ { -+ { "addA", Eb, Ib, XX }, -+ { "orA", Eb, Ib, XX }, -+ { "adcA", Eb, Ib, XX }, -+ { "sbbA", Eb, Ib, XX }, -+ { "andA", Eb, Ib, XX }, -+ { "subA", Eb, Ib, XX }, -+ { "xorA", Eb, Ib, XX }, -+ { "cmpA", Eb, Ib, XX } -+ }, -+ /* GRP1S */ -+ { -+ { "addQ", Ev, Iv, XX }, -+ { "orQ", Ev, Iv, XX }, -+ { "adcQ", Ev, Iv, XX }, -+ { "sbbQ", Ev, Iv, XX }, -+ { "andQ", Ev, Iv, XX }, -+ { "subQ", Ev, Iv, XX }, -+ { "xorQ", Ev, Iv, XX }, -+ { "cmpQ", Ev, Iv, XX } -+ }, -+ /* GRP1Ss */ -+ { -+ { "addQ", Ev, sIb, XX }, -+ { "orQ", Ev, sIb, XX }, -+ { "adcQ", Ev, sIb, XX }, -+ { "sbbQ", Ev, sIb, XX }, -+ { "andQ", Ev, sIb, XX }, -+ { "subQ", Ev, sIb, XX }, -+ { "xorQ", Ev, sIb, XX }, -+ { "cmpQ", Ev, sIb, XX } -+ }, -+ /* GRP2b */ -+ { -+ { "rolA", Eb, Ib, XX }, -+ { "rorA", Eb, Ib, XX }, -+ { "rclA", Eb, Ib, XX }, -+ { "rcrA", Eb, Ib, XX }, -+ { "shlA", Eb, Ib, XX }, -+ { "shrA", Eb, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "sarA", Eb, Ib, XX }, -+ }, -+ /* GRP2S */ -+ { -+ { "rolQ", Ev, Ib, XX }, -+ { "rorQ", Ev, Ib, XX }, -+ { "rclQ", Ev, Ib, XX }, -+ { "rcrQ", Ev, Ib, XX }, -+ { "shlQ", Ev, Ib, XX }, -+ { "shrQ", Ev, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "sarQ", Ev, Ib, XX }, -+ }, -+ /* GRP2b_one */ -+ { -+ { "rolA", Eb, I1, XX }, -+ { "rorA", Eb, I1, XX }, -+ { "rclA", Eb, I1, XX }, -+ { "rcrA", Eb, I1, XX }, -+ { "shlA", Eb, I1, XX }, -+ { "shrA", Eb, I1, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "sarA", Eb, I1, XX }, -+ }, -+ /* GRP2S_one */ -+ { -+ { "rolQ", Ev, I1, XX }, -+ { "rorQ", Ev, I1, XX }, -+ { "rclQ", Ev, I1, XX }, -+ { "rcrQ", Ev, I1, XX }, -+ { "shlQ", Ev, I1, XX }, -+ { "shrQ", Ev, I1, XX }, -+ { "(bad)", XX, XX, XX}, -+ { "sarQ", Ev, I1, XX }, -+ }, -+ /* GRP2b_cl */ -+ { -+ { "rolA", Eb, CL, XX }, -+ { "rorA", Eb, CL, XX }, -+ { "rclA", Eb, CL, XX }, -+ { "rcrA", Eb, CL, XX }, -+ { "shlA", Eb, CL, XX }, -+ { "shrA", Eb, CL, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "sarA", Eb, CL, XX }, -+ }, -+ /* GRP2S_cl */ -+ { -+ { "rolQ", Ev, CL, XX }, -+ { "rorQ", Ev, CL, XX }, -+ { "rclQ", Ev, CL, XX }, -+ { "rcrQ", Ev, CL, XX }, -+ { "shlQ", Ev, CL, XX }, -+ { "shrQ", Ev, CL, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "sarQ", Ev, CL, XX } -+ }, -+ /* GRP3b */ -+ { -+ { "testA", Eb, Ib, XX }, -+ { "(bad)", Eb, XX, XX }, -+ { "notA", Eb, XX, XX }, -+ { "negA", Eb, XX, XX }, -+ { "mulA", Eb, XX, XX }, /* Don't print the implicit %al register, */ -+ { "imulA", Eb, XX, XX }, /* to distinguish these opcodes from other */ -+ { "divA", Eb, XX, XX }, /* mul/imul opcodes. Do the same for div */ -+ { "idivA", Eb, XX, XX } /* and idiv for consistency. */ -+ }, -+ /* GRP3S */ -+ { -+ { "testQ", Ev, Iv, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "notQ", Ev, XX, XX }, -+ { "negQ", Ev, XX, XX }, -+ { "mulQ", Ev, XX, XX }, /* Don't print the implicit register. */ -+ { "imulQ", Ev, XX, XX }, -+ { "divQ", Ev, XX, XX }, -+ { "idivQ", Ev, XX, XX }, -+ }, -+ /* GRP4 */ -+ { -+ { "incA", Eb, XX, XX }, -+ { "decA", Eb, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* GRP5 */ -+ { -+ { "incQ", Ev, XX, XX }, -+ { "decQ", Ev, XX, XX }, -+ { "callT", indirEv, XX, XX }, -+ { "JcallT", indirEp, XX, XX }, -+ { "jmpT", indirEv, XX, XX }, -+ { "JjmpT", indirEp, XX, XX }, -+ { "pushU", Ev, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* GRP6 */ -+ { -+ { "sldtQ", Ev, XX, XX }, -+ { "strQ", Ev, XX, XX }, -+ { "lldt", Ew, XX, XX }, -+ { "ltr", Ew, XX, XX }, -+ { "verr", Ew, XX, XX }, -+ { "verw", Ew, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX } -+ }, -+ /* GRP7 */ -+ { -+ { "sgdtIQ", VMX_Fixup, 0, XX, XX }, -+ { "sidtIQ", PNI_Fixup, 0, XX, XX }, -+ { "lgdt{Q|Q||}", M, XX, XX }, -+ { "lidt{Q|Q||}", SVME_Fixup, 0, XX, XX }, -+ { "smswQ", Ev, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "lmsw", Ew, XX, XX }, -+ { "invlpg", INVLPG_Fixup, w_mode, XX, XX }, -+ }, -+ /* GRP8 */ -+ { -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "btQ", Ev, Ib, XX }, -+ { "btsQ", Ev, Ib, XX }, -+ { "btrQ", Ev, Ib, XX }, -+ { "btcQ", Ev, Ib, XX }, -+ }, -+ /* GRP9 */ -+ { -+ { "(bad)", XX, XX, XX }, -+ { "cmpxchg8b", Eq, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "", VM, XX, XX }, /* See OP_VMX. */ -+ { "vmptrst", Eq, XX, XX }, -+ }, -+ /* GRP10 */ -+ { -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "psrlw", MS, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "psraw", MS, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "psllw", MS, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* GRP11 */ -+ { -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "psrld", MS, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "psrad", MS, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "pslld", MS, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* GRP12 */ -+ { -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "psrlq", MS, Ib, XX }, -+ { "psrldq", MS, Ib, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "psllq", MS, Ib, XX }, -+ { "pslldq", MS, Ib, XX }, -+ }, -+ /* GRP13 */ -+ { -+ { "fxsave", Ev, XX, XX }, -+ { "fxrstor", Ev, XX, XX }, -+ { "ldmxcsr", Ev, XX, XX }, -+ { "stmxcsr", Ev, XX, XX }, -+ { "xsave", Ev, XX, XX }, -+ { "xrstor", OP_0fae, 0, XX, XX }, -+ { "mfence", OP_0fae, 0, XX, XX }, -+ { "clflush", OP_0fae, 0, XX, XX }, -+ }, -+ /* GRP14 */ -+ { -+ { "prefetchnta", Ev, XX, XX }, -+ { "prefetcht0", Ev, XX, XX }, -+ { "prefetcht1", Ev, XX, XX }, -+ { "prefetcht2", Ev, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* GRPAMD */ -+ { -+ { "prefetch", Eb, XX, XX }, -+ { "prefetchw", Eb, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* GRPPADLCK1 */ -+ { -+ { "xstore-rng", OP_0f07, 0, XX, XX }, -+ { "xcrypt-ecb", OP_0f07, 0, XX, XX }, -+ { "xcrypt-cbc", OP_0f07, 0, XX, XX }, -+ { "xcrypt-ctr", OP_0f07, 0, XX, XX }, -+ { "xcrypt-cfb", OP_0f07, 0, XX, XX }, -+ { "xcrypt-ofb", OP_0f07, 0, XX, XX }, -+ { "(bad)", OP_0f07, 0, XX, XX }, -+ { "(bad)", OP_0f07, 0, XX, XX }, -+ }, -+ /* GRPPADLCK2 */ -+ { -+ { "montmul", OP_0f07, 0, XX, XX }, -+ { "xsha1", OP_0f07, 0, XX, XX }, -+ { "xsha256", OP_0f07, 0, XX, XX }, -+ { "(bad)", OP_0f07, 0, XX, XX }, -+ { "(bad)", OP_0f07, 0, XX, XX }, -+ { "(bad)", OP_0f07, 0, XX, XX }, -+ { "(bad)", OP_0f07, 0, XX, XX }, -+ { "(bad)", OP_0f07, 0, XX, XX }, -+ } -+}; -+ -+static const struct dis386 prefix_user_table[][4] = { -+ /* PREGRP0 */ -+ { -+ { "addps", XM, EX, XX }, -+ { "addss", XM, EX, XX }, -+ { "addpd", XM, EX, XX }, -+ { "addsd", XM, EX, XX }, -+ }, -+ /* PREGRP1 */ -+ { -+ { "", XM, EX, OPSIMD }, /* See OP_SIMD_SUFFIX. */ -+ { "", XM, EX, OPSIMD }, -+ { "", XM, EX, OPSIMD }, -+ { "", XM, EX, OPSIMD }, -+ }, -+ /* PREGRP2 */ -+ { -+ { "cvtpi2ps", XM, EM, XX }, -+ { "cvtsi2ssY", XM, Ev, XX }, -+ { "cvtpi2pd", XM, EM, XX }, -+ { "cvtsi2sdY", XM, Ev, XX }, -+ }, -+ /* PREGRP3 */ -+ { -+ { "cvtps2pi", MX, EX, XX }, -+ { "cvtss2siY", Gv, EX, XX }, -+ { "cvtpd2pi", MX, EX, XX }, -+ { "cvtsd2siY", Gv, EX, XX }, -+ }, -+ /* PREGRP4 */ -+ { -+ { "cvttps2pi", MX, EX, XX }, -+ { "cvttss2siY", Gv, EX, XX }, -+ { "cvttpd2pi", MX, EX, XX }, -+ { "cvttsd2siY", Gv, EX, XX }, -+ }, -+ /* PREGRP5 */ -+ { -+ { "divps", XM, EX, XX }, -+ { "divss", XM, EX, XX }, -+ { "divpd", XM, EX, XX }, -+ { "divsd", XM, EX, XX }, -+ }, -+ /* PREGRP6 */ -+ { -+ { "maxps", XM, EX, XX }, -+ { "maxss", XM, EX, XX }, -+ { "maxpd", XM, EX, XX }, -+ { "maxsd", XM, EX, XX }, -+ }, -+ /* PREGRP7 */ -+ { -+ { "minps", XM, EX, XX }, -+ { "minss", XM, EX, XX }, -+ { "minpd", XM, EX, XX }, -+ { "minsd", XM, EX, XX }, -+ }, -+ /* PREGRP8 */ -+ { -+ { "movups", XM, EX, XX }, -+ { "movss", XM, EX, XX }, -+ { "movupd", XM, EX, XX }, -+ { "movsd", XM, EX, XX }, -+ }, -+ /* PREGRP9 */ -+ { -+ { "movups", EX, XM, XX }, -+ { "movss", EX, XM, XX }, -+ { "movupd", EX, XM, XX }, -+ { "movsd", EX, XM, XX }, -+ }, -+ /* PREGRP10 */ -+ { -+ { "mulps", XM, EX, XX }, -+ { "mulss", XM, EX, XX }, -+ { "mulpd", XM, EX, XX }, -+ { "mulsd", XM, EX, XX }, -+ }, -+ /* PREGRP11 */ -+ { -+ { "rcpps", XM, EX, XX }, -+ { "rcpss", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP12 */ -+ { -+ { "rsqrtps", XM, EX, XX }, -+ { "rsqrtss", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP13 */ -+ { -+ { "sqrtps", XM, EX, XX }, -+ { "sqrtss", XM, EX, XX }, -+ { "sqrtpd", XM, EX, XX }, -+ { "sqrtsd", XM, EX, XX }, -+ }, -+ /* PREGRP14 */ -+ { -+ { "subps", XM, EX, XX }, -+ { "subss", XM, EX, XX }, -+ { "subpd", XM, EX, XX }, -+ { "subsd", XM, EX, XX }, -+ }, -+ /* PREGRP15 */ -+ { -+ { "(bad)", XM, EX, XX }, -+ { "cvtdq2pd", XM, EX, XX }, -+ { "cvttpd2dq", XM, EX, XX }, -+ { "cvtpd2dq", XM, EX, XX }, -+ }, -+ /* PREGRP16 */ -+ { -+ { "cvtdq2ps", XM, EX, XX }, -+ { "cvttps2dq",XM, EX, XX }, -+ { "cvtps2dq",XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP17 */ -+ { -+ { "cvtps2pd", XM, EX, XX }, -+ { "cvtss2sd", XM, EX, XX }, -+ { "cvtpd2ps", XM, EX, XX }, -+ { "cvtsd2ss", XM, EX, XX }, -+ }, -+ /* PREGRP18 */ -+ { -+ { "maskmovq", MX, MS, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "maskmovdqu", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP19 */ -+ { -+ { "movq", MX, EM, XX }, -+ { "movdqu", XM, EX, XX }, -+ { "movdqa", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP20 */ -+ { -+ { "movq", EM, MX, XX }, -+ { "movdqu", EX, XM, XX }, -+ { "movdqa", EX, XM, XX }, -+ { "(bad)", EX, XM, XX }, -+ }, -+ /* PREGRP21 */ -+ { -+ { "(bad)", EX, XM, XX }, -+ { "movq2dq", XM, MS, XX }, -+ { "movq", EX, XM, XX }, -+ { "movdq2q", MX, XS, XX }, -+ }, -+ /* PREGRP22 */ -+ { -+ { "pshufw", MX, EM, Ib }, -+ { "pshufhw", XM, EX, Ib }, -+ { "pshufd", XM, EX, Ib }, -+ { "pshuflw", XM, EX, Ib }, -+ }, -+ /* PREGRP23 */ -+ { -+ { "movd", Edq, MX, XX }, -+ { "movq", XM, EX, XX }, -+ { "movd", Edq, XM, XX }, -+ { "(bad)", Ed, XM, XX }, -+ }, -+ /* PREGRP24 */ -+ { -+ { "(bad)", MX, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "punpckhqdq", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP25 */ -+ { -+ { "movntq", EM, MX, XX }, -+ { "(bad)", EM, XM, XX }, -+ { "movntdq", EM, XM, XX }, -+ { "(bad)", EM, XM, XX }, -+ }, -+ /* PREGRP26 */ -+ { -+ { "(bad)", MX, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "punpcklqdq", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP27 */ -+ { -+ { "(bad)", MX, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "addsubpd", XM, EX, XX }, -+ { "addsubps", XM, EX, XX }, -+ }, -+ /* PREGRP28 */ -+ { -+ { "(bad)", MX, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "haddpd", XM, EX, XX }, -+ { "haddps", XM, EX, XX }, -+ }, -+ /* PREGRP29 */ -+ { -+ { "(bad)", MX, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "hsubpd", XM, EX, XX }, -+ { "hsubps", XM, EX, XX }, -+ }, -+ /* PREGRP30 */ -+ { -+ { "movlpX", XM, EX, SIMD_Fixup, 'h' }, /* really only 2 operands */ -+ { "movsldup", XM, EX, XX }, -+ { "movlpd", XM, EX, XX }, -+ { "movddup", XM, EX, XX }, -+ }, -+ /* PREGRP31 */ -+ { -+ { "movhpX", XM, EX, SIMD_Fixup, 'l' }, -+ { "movshdup", XM, EX, XX }, -+ { "movhpd", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ }, -+ /* PREGRP32 */ -+ { -+ { "(bad)", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "(bad)", XM, EX, XX }, -+ { "lddqu", XM, M, XX }, -+ }, -+}; -+ -+static const struct dis386 x86_64_table[][2] = { -+ { -+ { "arpl", Ew, Gw, XX }, -+ { "movs{||lq|xd}", Gv, Ed, XX }, -+ }, -+}; -+ -+#ifdef __KERNEL__ -+#define INTERNAL_DISASSEMBLER_ERROR "" -+#else /* __KERNEL__ */ -+#define INTERNAL_DISASSEMBLER_ERROR _("") -+#endif /* __KERNEL__ */ -+ -+static void -+ckprefix (void) -+{ -+ int newrex; -+ rex = 0; -+ prefixes = 0; -+ used_prefixes = 0; -+ rex_used = 0; -+ while (1) -+ { -+ FETCH_DATA (the_info, codep + 1); -+ newrex = 0; -+ switch (*codep) -+ { -+ /* REX prefixes family. */ -+ case 0x40: -+ case 0x41: -+ case 0x42: -+ case 0x43: -+ case 0x44: -+ case 0x45: -+ case 0x46: -+ case 0x47: -+ case 0x48: -+ case 0x49: -+ case 0x4a: -+ case 0x4b: -+ case 0x4c: -+ case 0x4d: -+ case 0x4e: -+ case 0x4f: -+ if (mode_64bit) -+ newrex = *codep; -+ else -+ return; -+ break; -+ case 0xf3: -+ prefixes |= PREFIX_REPZ; -+ break; -+ case 0xf2: -+ prefixes |= PREFIX_REPNZ; -+ break; -+ case 0xf0: -+ prefixes |= PREFIX_LOCK; -+ break; -+ case 0x2e: -+ prefixes |= PREFIX_CS; -+ break; -+ case 0x36: -+ prefixes |= PREFIX_SS; -+ break; -+ case 0x3e: -+ prefixes |= PREFIX_DS; -+ break; -+ case 0x26: -+ prefixes |= PREFIX_ES; -+ break; -+ case 0x64: -+ prefixes |= PREFIX_FS; -+ break; -+ case 0x65: -+ prefixes |= PREFIX_GS; -+ break; -+ case 0x66: -+ prefixes |= PREFIX_DATA; -+ break; -+ case 0x67: -+ prefixes |= PREFIX_ADDR; -+ break; -+ case FWAIT_OPCODE: -+ /* fwait is really an instruction. If there are prefixes -+ before the fwait, they belong to the fwait, *not* to the -+ following instruction. */ -+ if (prefixes) -+ { -+ prefixes |= PREFIX_FWAIT; -+ codep++; -+ return; -+ } -+ prefixes = PREFIX_FWAIT; -+ break; -+ default: -+ return; -+ } -+ /* Rex is ignored when followed by another prefix. */ -+ if (rex) -+ { -+ oappend (prefix_name (rex, 0)); -+ oappend (" "); -+ } -+ rex = newrex; -+ codep++; -+ } -+} -+ -+/* Return the name of the prefix byte PREF, or NULL if PREF is not a -+ prefix byte. */ -+ -+static const char * -+prefix_name (int pref, int sizeflag) -+{ -+ switch (pref) -+ { -+ /* REX prefixes family. */ -+ case 0x40: -+ return "rex"; -+ case 0x41: -+ return "rexZ"; -+ case 0x42: -+ return "rexY"; -+ case 0x43: -+ return "rexYZ"; -+ case 0x44: -+ return "rexX"; -+ case 0x45: -+ return "rexXZ"; -+ case 0x46: -+ return "rexXY"; -+ case 0x47: -+ return "rexXYZ"; -+ case 0x48: -+ return "rex64"; -+ case 0x49: -+ return "rex64Z"; -+ case 0x4a: -+ return "rex64Y"; -+ case 0x4b: -+ return "rex64YZ"; -+ case 0x4c: -+ return "rex64X"; -+ case 0x4d: -+ return "rex64XZ"; -+ case 0x4e: -+ return "rex64XY"; -+ case 0x4f: -+ return "rex64XYZ"; -+ case 0xf3: -+ return "repz"; -+ case 0xf2: -+ return "repnz"; -+ case 0xf0: -+ return "lock"; -+ case 0x2e: -+ return "cs"; -+ case 0x36: -+ return "ss"; -+ case 0x3e: -+ return "ds"; -+ case 0x26: -+ return "es"; -+ case 0x64: -+ return "fs"; -+ case 0x65: -+ return "gs"; -+ case 0x66: -+ return (sizeflag & DFLAG) ? "data16" : "data32"; -+ case 0x67: -+ if (mode_64bit) -+ return (sizeflag & AFLAG) ? "addr32" : "addr64"; -+ else -+ return (sizeflag & AFLAG) ? "addr16" : "addr32"; -+ case FWAIT_OPCODE: -+ return "fwait"; -+ default: -+ return NULL; -+ } -+} -+ -+static char op1out[100], op2out[100], op3out[100]; -+static int op_ad, op_index[3]; -+static int two_source_ops; -+static bfd_vma op_address[3]; -+static bfd_vma op_riprel[3]; -+static bfd_vma start_pc; -+ -+/* -+ * On the 386's of 1988, the maximum length of an instruction is 15 bytes. -+ * (see topic "Redundant prefixes" in the "Differences from 8086" -+ * section of the "Virtual 8086 Mode" chapter.) -+ * 'pc' should be the address of this instruction, it will -+ * be used to print the target address if this is a relative jump or call -+ * The function returns the length of this instruction in bytes. -+ */ -+ -+static char intel_syntax; -+static char open_char; -+static char close_char; -+static char separator_char; -+static char scale_char; -+ -+/* Here for backwards compatibility. When gdb stops using -+ print_insn_i386_att and print_insn_i386_intel these functions can -+ disappear, and print_insn_i386 be merged into print_insn. */ -+int -+print_insn_i386_att (bfd_vma pc, disassemble_info *info) -+{ -+ intel_syntax = 0; -+ -+ return print_insn (pc, info); -+} -+ -+int -+print_insn_i386_intel (bfd_vma pc, disassemble_info *info) -+{ -+ intel_syntax = 1; -+ -+ return print_insn (pc, info); -+} -+ -+int -+print_insn_i386 (bfd_vma pc, disassemble_info *info) -+{ -+ intel_syntax = -1; -+ -+ return print_insn (pc, info); -+} -+ -+static int -+print_insn (bfd_vma pc, disassemble_info *info) -+{ -+ const struct dis386 *dp; -+ int i; -+ char *first, *second, *third; -+ int needcomma; -+ unsigned char uses_SSE_prefix, uses_LOCK_prefix; -+ int sizeflag; -+ const char *p; -+ struct dis_private priv; -+ -+ mode_64bit = (info->mach == bfd_mach_x86_64_intel_syntax -+ || info->mach == bfd_mach_x86_64); -+ -+ if (intel_syntax == (char) -1) -+ intel_syntax = (info->mach == bfd_mach_i386_i386_intel_syntax -+ || info->mach == bfd_mach_x86_64_intel_syntax); -+ -+ if (info->mach == bfd_mach_i386_i386 -+ || info->mach == bfd_mach_x86_64 -+ || info->mach == bfd_mach_i386_i386_intel_syntax -+ || info->mach == bfd_mach_x86_64_intel_syntax) -+ priv.orig_sizeflag = AFLAG | DFLAG; -+ else if (info->mach == bfd_mach_i386_i8086) -+ priv.orig_sizeflag = 0; -+ else -+ abort (); -+ -+ for (p = info->disassembler_options; p != NULL; ) -+ { -+ if (strncmp (p, "x86-64", 6) == 0) -+ { -+ mode_64bit = 1; -+ priv.orig_sizeflag = AFLAG | DFLAG; -+ } -+ else if (strncmp (p, "i386", 4) == 0) -+ { -+ mode_64bit = 0; -+ priv.orig_sizeflag = AFLAG | DFLAG; -+ } -+ else if (strncmp (p, "i8086", 5) == 0) -+ { -+ mode_64bit = 0; -+ priv.orig_sizeflag = 0; -+ } -+ else if (strncmp (p, "intel", 5) == 0) -+ { -+ intel_syntax = 1; -+ } -+ else if (strncmp (p, "att", 3) == 0) -+ { -+ intel_syntax = 0; -+ } -+ else if (strncmp (p, "addr", 4) == 0) -+ { -+ if (p[4] == '1' && p[5] == '6') -+ priv.orig_sizeflag &= ~AFLAG; -+ else if (p[4] == '3' && p[5] == '2') -+ priv.orig_sizeflag |= AFLAG; -+ } -+ else if (strncmp (p, "data", 4) == 0) -+ { -+ if (p[4] == '1' && p[5] == '6') -+ priv.orig_sizeflag &= ~DFLAG; -+ else if (p[4] == '3' && p[5] == '2') -+ priv.orig_sizeflag |= DFLAG; -+ } -+ else if (strncmp (p, "suffix", 6) == 0) -+ priv.orig_sizeflag |= SUFFIX_ALWAYS; -+ -+ p = strchr (p, ','); -+ if (p != NULL) -+ p++; -+ } -+ -+ if (intel_syntax) -+ { -+ names64 = intel_names64; -+ names32 = intel_names32; -+ names16 = intel_names16; -+ names8 = intel_names8; -+ names8rex = intel_names8rex; -+ names_seg = intel_names_seg; -+ index16 = intel_index16; -+ open_char = '['; -+ close_char = ']'; -+ separator_char = '+'; -+ scale_char = '*'; -+ } -+ else -+ { -+ names64 = att_names64; -+ names32 = att_names32; -+ names16 = att_names16; -+ names8 = att_names8; -+ names8rex = att_names8rex; -+ names_seg = att_names_seg; -+ index16 = att_index16; -+ open_char = '('; -+ close_char = ')'; -+ separator_char = ','; -+ scale_char = ','; -+ } -+ -+ /* The output looks better if we put 7 bytes on a line, since that -+ puts most long word instructions on a single line. */ -+ info->bytes_per_line = 7; -+ -+ info->private_data = &priv; -+ priv.max_fetched = priv.the_buffer; -+ priv.insn_start = pc; -+ -+ obuf[0] = 0; -+ op1out[0] = 0; -+ op2out[0] = 0; -+ op3out[0] = 0; -+ -+ op_index[0] = op_index[1] = op_index[2] = -1; -+ -+ the_info = info; -+ start_pc = pc; -+ start_codep = priv.the_buffer; -+ codep = priv.the_buffer; -+ -+#ifndef __KERNEL__ -+ if (setjmp (priv.bailout) != 0) -+ { -+ const char *name; -+ -+ /* Getting here means we tried for data but didn't get it. That -+ means we have an incomplete instruction of some sort. Just -+ print the first byte as a prefix or a .byte pseudo-op. */ -+ if (codep > priv.the_buffer) -+ { -+ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag); -+ if (name != NULL) -+ (*info->fprintf_func) (info->stream, "%s", name); -+ else -+ { -+ /* Just print the first byte as a .byte instruction. */ -+ (*info->fprintf_func) (info->stream, ".byte 0x%x", -+ (unsigned int) priv.the_buffer[0]); -+ } -+ -+ return 1; -+ } -+ -+ return -1; -+ } -+#endif /* __KERNEL__ */ -+ -+ obufp = obuf; -+ ckprefix (); -+ -+ insn_codep = codep; -+ sizeflag = priv.orig_sizeflag; -+ -+ FETCH_DATA (info, codep + 1); -+ two_source_ops = (*codep == 0x62) || (*codep == 0xc8); -+ -+ if ((prefixes & PREFIX_FWAIT) -+ && ((*codep < 0xd8) || (*codep > 0xdf))) -+ { -+ const char *name; -+ -+ /* fwait not followed by floating point instruction. Print the -+ first prefix, which is probably fwait itself. */ -+ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag); -+ if (name == NULL) -+ name = INTERNAL_DISASSEMBLER_ERROR; -+ (*info->fprintf_func) (info->stream, "%s", name); -+ return 1; -+ } -+ -+ if (*codep == 0x0f) -+ { -+ FETCH_DATA (info, codep + 2); -+ dp = &dis386_twobyte[*++codep]; -+ need_modrm = twobyte_has_modrm[*codep]; -+ uses_SSE_prefix = twobyte_uses_SSE_prefix[*codep]; -+ uses_LOCK_prefix = (*codep & ~0x02) == 0x20; -+ } -+ else -+ { -+ dp = &dis386[*codep]; -+ need_modrm = onebyte_has_modrm[*codep]; -+ uses_SSE_prefix = 0; -+ uses_LOCK_prefix = 0; -+ } -+ codep++; -+ -+ if (!uses_SSE_prefix && (prefixes & PREFIX_REPZ)) -+ { -+ oappend ("repz "); -+ used_prefixes |= PREFIX_REPZ; -+ } -+ if (!uses_SSE_prefix && (prefixes & PREFIX_REPNZ)) -+ { -+ oappend ("repnz "); -+ used_prefixes |= PREFIX_REPNZ; -+ } -+ if (!uses_LOCK_prefix && (prefixes & PREFIX_LOCK)) -+ { -+ oappend ("lock "); -+ used_prefixes |= PREFIX_LOCK; -+ } -+ -+ if (prefixes & PREFIX_ADDR) -+ { -+ sizeflag ^= AFLAG; -+ if (dp->bytemode3 != loop_jcxz_mode || intel_syntax) -+ { -+ if ((sizeflag & AFLAG) || mode_64bit) -+ oappend ("addr32 "); -+ else -+ oappend ("addr16 "); -+ used_prefixes |= PREFIX_ADDR; -+ } -+ } -+ -+ if (!uses_SSE_prefix && (prefixes & PREFIX_DATA)) -+ { -+ sizeflag ^= DFLAG; -+ if (dp->bytemode3 == cond_jump_mode -+ && dp->bytemode1 == v_mode -+ && !intel_syntax) -+ { -+ if (sizeflag & DFLAG) -+ oappend ("data32 "); -+ else -+ oappend ("data16 "); -+ used_prefixes |= PREFIX_DATA; -+ } -+ } -+ -+ if (need_modrm) -+ { -+ FETCH_DATA (info, codep + 1); -+ mod = (*codep >> 6) & 3; -+ reg = (*codep >> 3) & 7; -+ rm = *codep & 7; -+ } -+ -+ if (dp->name == NULL && dp->bytemode1 == FLOATCODE) -+ { -+ dofloat (sizeflag); -+ } -+ else -+ { -+ int index; -+ if (dp->name == NULL) -+ { -+ switch (dp->bytemode1) -+ { -+ case USE_GROUPS: -+ dp = &grps[dp->bytemode2][reg]; -+ break; -+ -+ case USE_PREFIX_USER_TABLE: -+ index = 0; -+ used_prefixes |= (prefixes & PREFIX_REPZ); -+ if (prefixes & PREFIX_REPZ) -+ index = 1; -+ else -+ { -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ if (prefixes & PREFIX_DATA) -+ index = 2; -+ else -+ { -+ used_prefixes |= (prefixes & PREFIX_REPNZ); -+ if (prefixes & PREFIX_REPNZ) -+ index = 3; -+ } -+ } -+ dp = &prefix_user_table[dp->bytemode2][index]; -+ break; -+ -+ case X86_64_SPECIAL: -+ dp = &x86_64_table[dp->bytemode2][mode_64bit]; -+ break; -+ -+ default: -+ oappend (INTERNAL_DISASSEMBLER_ERROR); -+ break; -+ } -+ } -+ -+ if (putop (dp->name, sizeflag) == 0) -+ { -+ obufp = op1out; -+ op_ad = 2; -+ if (dp->op1) -+ (*dp->op1) (dp->bytemode1, sizeflag); -+ -+ obufp = op2out; -+ op_ad = 1; -+ if (dp->op2) -+ (*dp->op2) (dp->bytemode2, sizeflag); -+ -+ obufp = op3out; -+ op_ad = 0; -+ if (dp->op3) -+ (*dp->op3) (dp->bytemode3, sizeflag); -+ } -+ } -+ -+ /* See if any prefixes were not used. If so, print the first one -+ separately. If we don't do this, we'll wind up printing an -+ instruction stream which does not precisely correspond to the -+ bytes we are disassembling. */ -+ if ((prefixes & ~used_prefixes) != 0) -+ { -+ const char *name; -+ -+ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag); -+ if (name == NULL) -+ name = INTERNAL_DISASSEMBLER_ERROR; -+ (*info->fprintf_func) (info->stream, "%s", name); -+ return 1; -+ } -+ if (rex & ~rex_used) -+ { -+ const char *name; -+ name = prefix_name (rex | 0x40, priv.orig_sizeflag); -+ if (name == NULL) -+ name = INTERNAL_DISASSEMBLER_ERROR; -+ (*info->fprintf_func) (info->stream, "%s ", name); -+ } -+ -+ obufp = obuf + strlen (obuf); -+ for (i = strlen (obuf); i < 6; i++) -+ oappend (" "); -+ oappend (" "); -+ (*info->fprintf_func) (info->stream, "%s", obuf); -+ -+ /* The enter and bound instructions are printed with operands in the same -+ order as the intel book; everything else is printed in reverse order. */ -+ if (intel_syntax || two_source_ops) -+ { -+ first = op1out; -+ second = op2out; -+ third = op3out; -+ op_ad = op_index[0]; -+ op_index[0] = op_index[2]; -+ op_index[2] = op_ad; -+ } -+ else -+ { -+ first = op3out; -+ second = op2out; -+ third = op1out; -+ } -+ needcomma = 0; -+ if (*first) -+ { -+ if (op_index[0] != -1 && !op_riprel[0]) -+ (*info->print_address_func) ((bfd_vma) op_address[op_index[0]], info); -+ else -+ (*info->fprintf_func) (info->stream, "%s", first); -+ needcomma = 1; -+ } -+ if (*second) -+ { -+ if (needcomma) -+ (*info->fprintf_func) (info->stream, ","); -+ if (op_index[1] != -1 && !op_riprel[1]) -+ (*info->print_address_func) ((bfd_vma) op_address[op_index[1]], info); -+ else -+ (*info->fprintf_func) (info->stream, "%s", second); -+ needcomma = 1; -+ } -+ if (*third) -+ { -+ if (needcomma) -+ (*info->fprintf_func) (info->stream, ","); -+ if (op_index[2] != -1 && !op_riprel[2]) -+ (*info->print_address_func) ((bfd_vma) op_address[op_index[2]], info); -+ else -+ (*info->fprintf_func) (info->stream, "%s", third); -+ } -+ for (i = 0; i < 3; i++) -+ if (op_index[i] != -1 && op_riprel[i]) -+ { -+ (*info->fprintf_func) (info->stream, " # "); -+ (*info->print_address_func) ((bfd_vma) (start_pc + codep - start_codep -+ + op_address[op_index[i]]), info); -+ } -+ return codep - priv.the_buffer; -+} -+ -+static const char *float_mem[] = { -+ /* d8 */ -+ "fadd{s||s|}", -+ "fmul{s||s|}", -+ "fcom{s||s|}", -+ "fcomp{s||s|}", -+ "fsub{s||s|}", -+ "fsubr{s||s|}", -+ "fdiv{s||s|}", -+ "fdivr{s||s|}", -+ /* d9 */ -+ "fld{s||s|}", -+ "(bad)", -+ "fst{s||s|}", -+ "fstp{s||s|}", -+ "fldenvIC", -+ "fldcw", -+ "fNstenvIC", -+ "fNstcw", -+ /* da */ -+ "fiadd{l||l|}", -+ "fimul{l||l|}", -+ "ficom{l||l|}", -+ "ficomp{l||l|}", -+ "fisub{l||l|}", -+ "fisubr{l||l|}", -+ "fidiv{l||l|}", -+ "fidivr{l||l|}", -+ /* db */ -+ "fild{l||l|}", -+ "fisttp{l||l|}", -+ "fist{l||l|}", -+ "fistp{l||l|}", -+ "(bad)", -+ "fld{t||t|}", -+ "(bad)", -+ "fstp{t||t|}", -+ /* dc */ -+ "fadd{l||l|}", -+ "fmul{l||l|}", -+ "fcom{l||l|}", -+ "fcomp{l||l|}", -+ "fsub{l||l|}", -+ "fsubr{l||l|}", -+ "fdiv{l||l|}", -+ "fdivr{l||l|}", -+ /* dd */ -+ "fld{l||l|}", -+ "fisttp{ll||ll|}", -+ "fst{l||l|}", -+ "fstp{l||l|}", -+ "frstorIC", -+ "(bad)", -+ "fNsaveIC", -+ "fNstsw", -+ /* de */ -+ "fiadd", -+ "fimul", -+ "ficom", -+ "ficomp", -+ "fisub", -+ "fisubr", -+ "fidiv", -+ "fidivr", -+ /* df */ -+ "fild", -+ "fisttp", -+ "fist", -+ "fistp", -+ "fbld", -+ "fild{ll||ll|}", -+ "fbstp", -+ "fistp{ll||ll|}", -+}; -+ -+static const unsigned char float_mem_mode[] = { -+ /* d8 */ -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ /* d9 */ -+ d_mode, -+ 0, -+ d_mode, -+ d_mode, -+ 0, -+ w_mode, -+ 0, -+ w_mode, -+ /* da */ -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ /* db */ -+ d_mode, -+ d_mode, -+ d_mode, -+ d_mode, -+ 0, -+ t_mode, -+ 0, -+ t_mode, -+ /* dc */ -+ q_mode, -+ q_mode, -+ q_mode, -+ q_mode, -+ q_mode, -+ q_mode, -+ q_mode, -+ q_mode, -+ /* dd */ -+ q_mode, -+ q_mode, -+ q_mode, -+ q_mode, -+ 0, -+ 0, -+ 0, -+ w_mode, -+ /* de */ -+ w_mode, -+ w_mode, -+ w_mode, -+ w_mode, -+ w_mode, -+ w_mode, -+ w_mode, -+ w_mode, -+ /* df */ -+ w_mode, -+ w_mode, -+ w_mode, -+ w_mode, -+ t_mode, -+ q_mode, -+ t_mode, -+ q_mode -+}; -+ -+#define ST OP_ST, 0 -+#define STi OP_STi, 0 -+ -+#define FGRPd9_2 NULL, NULL, 0, NULL, 0, NULL, 0 -+#define FGRPd9_4 NULL, NULL, 1, NULL, 0, NULL, 0 -+#define FGRPd9_5 NULL, NULL, 2, NULL, 0, NULL, 0 -+#define FGRPd9_6 NULL, NULL, 3, NULL, 0, NULL, 0 -+#define FGRPd9_7 NULL, NULL, 4, NULL, 0, NULL, 0 -+#define FGRPda_5 NULL, NULL, 5, NULL, 0, NULL, 0 -+#define FGRPdb_4 NULL, NULL, 6, NULL, 0, NULL, 0 -+#define FGRPde_3 NULL, NULL, 7, NULL, 0, NULL, 0 -+#define FGRPdf_4 NULL, NULL, 8, NULL, 0, NULL, 0 -+ -+static const struct dis386 float_reg[][8] = { -+ /* d8 */ -+ { -+ { "fadd", ST, STi, XX }, -+ { "fmul", ST, STi, XX }, -+ { "fcom", STi, XX, XX }, -+ { "fcomp", STi, XX, XX }, -+ { "fsub", ST, STi, XX }, -+ { "fsubr", ST, STi, XX }, -+ { "fdiv", ST, STi, XX }, -+ { "fdivr", ST, STi, XX }, -+ }, -+ /* d9 */ -+ { -+ { "fld", STi, XX, XX }, -+ { "fxch", STi, XX, XX }, -+ { FGRPd9_2 }, -+ { "(bad)", XX, XX, XX }, -+ { FGRPd9_4 }, -+ { FGRPd9_5 }, -+ { FGRPd9_6 }, -+ { FGRPd9_7 }, -+ }, -+ /* da */ -+ { -+ { "fcmovb", ST, STi, XX }, -+ { "fcmove", ST, STi, XX }, -+ { "fcmovbe",ST, STi, XX }, -+ { "fcmovu", ST, STi, XX }, -+ { "(bad)", XX, XX, XX }, -+ { FGRPda_5 }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* db */ -+ { -+ { "fcmovnb",ST, STi, XX }, -+ { "fcmovne",ST, STi, XX }, -+ { "fcmovnbe",ST, STi, XX }, -+ { "fcmovnu",ST, STi, XX }, -+ { FGRPdb_4 }, -+ { "fucomi", ST, STi, XX }, -+ { "fcomi", ST, STi, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* dc */ -+ { -+ { "fadd", STi, ST, XX }, -+ { "fmul", STi, ST, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+#if UNIXWARE_COMPAT -+ { "fsub", STi, ST, XX }, -+ { "fsubr", STi, ST, XX }, -+ { "fdiv", STi, ST, XX }, -+ { "fdivr", STi, ST, XX }, -+#else -+ { "fsubr", STi, ST, XX }, -+ { "fsub", STi, ST, XX }, -+ { "fdivr", STi, ST, XX }, -+ { "fdiv", STi, ST, XX }, -+#endif -+ }, -+ /* dd */ -+ { -+ { "ffree", STi, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "fst", STi, XX, XX }, -+ { "fstp", STi, XX, XX }, -+ { "fucom", STi, XX, XX }, -+ { "fucomp", STi, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+ /* de */ -+ { -+ { "faddp", STi, ST, XX }, -+ { "fmulp", STi, ST, XX }, -+ { "(bad)", XX, XX, XX }, -+ { FGRPde_3 }, -+#if UNIXWARE_COMPAT -+ { "fsubp", STi, ST, XX }, -+ { "fsubrp", STi, ST, XX }, -+ { "fdivp", STi, ST, XX }, -+ { "fdivrp", STi, ST, XX }, -+#else -+ { "fsubrp", STi, ST, XX }, -+ { "fsubp", STi, ST, XX }, -+ { "fdivrp", STi, ST, XX }, -+ { "fdivp", STi, ST, XX }, -+#endif -+ }, -+ /* df */ -+ { -+ { "ffreep", STi, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { "(bad)", XX, XX, XX }, -+ { FGRPdf_4 }, -+ { "fucomip",ST, STi, XX }, -+ { "fcomip", ST, STi, XX }, -+ { "(bad)", XX, XX, XX }, -+ }, -+}; -+ -+static char *fgrps[][8] = { -+ /* d9_2 0 */ -+ { -+ "fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", -+ }, -+ -+ /* d9_4 1 */ -+ { -+ "fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)", -+ }, -+ -+ /* d9_5 2 */ -+ { -+ "fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)", -+ }, -+ -+ /* d9_6 3 */ -+ { -+ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp", -+ }, -+ -+ /* d9_7 4 */ -+ { -+ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos", -+ }, -+ -+ /* da_5 5 */ -+ { -+ "(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", -+ }, -+ -+ /* db_4 6 */ -+ { -+ "feni(287 only)","fdisi(287 only)","fNclex","fNinit", -+ "fNsetpm(287 only)","(bad)","(bad)","(bad)", -+ }, -+ -+ /* de_3 7 */ -+ { -+ "(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", -+ }, -+ -+ /* df_4 8 */ -+ { -+ "fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", -+ }, -+}; -+ -+static void -+dofloat (int sizeflag) -+{ -+ const struct dis386 *dp; -+ unsigned char floatop; -+ -+ floatop = codep[-1]; -+ -+ if (mod != 3) -+ { -+ int fp_indx = (floatop - 0xd8) * 8 + reg; -+ -+ putop (float_mem[fp_indx], sizeflag); -+ obufp = op1out; -+ OP_E (float_mem_mode[fp_indx], sizeflag); -+ return; -+ } -+ /* Skip mod/rm byte. */ -+ MODRM_CHECK; -+ codep++; -+ -+ dp = &float_reg[floatop - 0xd8][reg]; -+ if (dp->name == NULL) -+ { -+ putop (fgrps[dp->bytemode1][rm], sizeflag); -+ -+ /* Instruction fnstsw is only one with strange arg. */ -+ if (floatop == 0xdf && codep[-1] == 0xe0) -+ strcpy (op1out, names16[0]); -+ } -+ else -+ { -+ putop (dp->name, sizeflag); -+ -+ obufp = op1out; -+ if (dp->op1) -+ (*dp->op1) (dp->bytemode1, sizeflag); -+ obufp = op2out; -+ if (dp->op2) -+ (*dp->op2) (dp->bytemode2, sizeflag); -+ } -+} -+ -+static void -+OP_ST (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ oappend ("%st"); -+} -+ -+static void -+OP_STi (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ sprintf (scratchbuf, "%%st(%d)", rm); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+/* Capital letters in template are macros. */ -+static int -+putop (const char *template, int sizeflag) -+{ -+ const char *p; -+ int alt = 0; -+ -+ for (p = template; *p; p++) -+ { -+ switch (*p) -+ { -+ default: -+ *obufp++ = *p; -+ break; -+ case '{': -+ alt = 0; -+ if (intel_syntax) -+ alt += 1; -+ if (mode_64bit) -+ alt += 2; -+ while (alt != 0) -+ { -+ while (*++p != '|') -+ { -+ if (*p == '}') -+ { -+ /* Alternative not valid. */ -+ strcpy (obuf, "(bad)"); -+ obufp = obuf + 5; -+ return 1; -+ } -+ else if (*p == '\0') -+ abort (); -+ } -+ alt--; -+ } -+ /* Fall through. */ -+ case 'I': -+ alt = 1; -+ continue; -+ case '|': -+ while (*++p != '}') -+ { -+ if (*p == '\0') -+ abort (); -+ } -+ break; -+ case '}': -+ break; -+ case 'A': -+ if (intel_syntax) -+ break; -+ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS)) -+ *obufp++ = 'b'; -+ break; -+ case 'B': -+ if (intel_syntax) -+ break; -+ if (sizeflag & SUFFIX_ALWAYS) -+ *obufp++ = 'b'; -+ break; -+ case 'C': -+ if (intel_syntax && !alt) -+ break; -+ if ((prefixes & PREFIX_DATA) || (sizeflag & SUFFIX_ALWAYS)) -+ { -+ if (sizeflag & DFLAG) -+ *obufp++ = intel_syntax ? 'd' : 'l'; -+ else -+ *obufp++ = intel_syntax ? 'w' : 's'; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ } -+ break; -+ case 'E': /* For jcxz/jecxz */ -+ if (mode_64bit) -+ { -+ if (sizeflag & AFLAG) -+ *obufp++ = 'r'; -+ else -+ *obufp++ = 'e'; -+ } -+ else -+ if (sizeflag & AFLAG) -+ *obufp++ = 'e'; -+ used_prefixes |= (prefixes & PREFIX_ADDR); -+ break; -+ case 'F': -+ if (intel_syntax) -+ break; -+ if ((prefixes & PREFIX_ADDR) || (sizeflag & SUFFIX_ALWAYS)) -+ { -+ if (sizeflag & AFLAG) -+ *obufp++ = mode_64bit ? 'q' : 'l'; -+ else -+ *obufp++ = mode_64bit ? 'l' : 'w'; -+ used_prefixes |= (prefixes & PREFIX_ADDR); -+ } -+ break; -+ case 'H': -+ if (intel_syntax) -+ break; -+ if ((prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_CS -+ || (prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_DS) -+ { -+ used_prefixes |= prefixes & (PREFIX_CS | PREFIX_DS); -+ *obufp++ = ','; -+ *obufp++ = 'p'; -+ if (prefixes & PREFIX_DS) -+ *obufp++ = 't'; -+ else -+ *obufp++ = 'n'; -+ } -+ break; -+ case 'J': -+ if (intel_syntax) -+ break; -+ *obufp++ = 'l'; -+ break; -+ case 'L': -+ if (intel_syntax) -+ break; -+ if (sizeflag & SUFFIX_ALWAYS) -+ *obufp++ = 'l'; -+ break; -+ case 'N': -+ if ((prefixes & PREFIX_FWAIT) == 0) -+ *obufp++ = 'n'; -+ else -+ used_prefixes |= PREFIX_FWAIT; -+ break; -+ case 'O': -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ *obufp++ = 'o'; -+ else -+ *obufp++ = 'd'; -+ break; -+ case 'T': -+ if (intel_syntax) -+ break; -+ if (mode_64bit) -+ { -+ *obufp++ = 'q'; -+ break; -+ } -+ /* Fall through. */ -+ case 'P': -+ if (intel_syntax) -+ break; -+ if ((prefixes & PREFIX_DATA) -+ || (rex & REX_MODE64) -+ || (sizeflag & SUFFIX_ALWAYS)) -+ { -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ *obufp++ = 'q'; -+ else -+ { -+ if (sizeflag & DFLAG) -+ *obufp++ = 'l'; -+ else -+ *obufp++ = 'w'; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ } -+ } -+ break; -+ case 'U': -+ if (intel_syntax) -+ break; -+ if (mode_64bit) -+ { -+ *obufp++ = 'q'; -+ break; -+ } -+ /* Fall through. */ -+ case 'Q': -+ if (intel_syntax && !alt) -+ break; -+ USED_REX (REX_MODE64); -+ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS)) -+ { -+ if (rex & REX_MODE64) -+ *obufp++ = 'q'; -+ else -+ { -+ if (sizeflag & DFLAG) -+ *obufp++ = intel_syntax ? 'd' : 'l'; -+ else -+ *obufp++ = 'w'; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ } -+ } -+ break; -+ case 'R': -+ USED_REX (REX_MODE64); -+ if (intel_syntax) -+ { -+ if (rex & REX_MODE64) -+ { -+ *obufp++ = 'q'; -+ *obufp++ = 't'; -+ } -+ else if (sizeflag & DFLAG) -+ { -+ *obufp++ = 'd'; -+ *obufp++ = 'q'; -+ } -+ else -+ { -+ *obufp++ = 'w'; -+ *obufp++ = 'd'; -+ } -+ } -+ else -+ { -+ if (rex & REX_MODE64) -+ *obufp++ = 'q'; -+ else if (sizeflag & DFLAG) -+ *obufp++ = 'l'; -+ else -+ *obufp++ = 'w'; -+ } -+ if (!(rex & REX_MODE64)) -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case 'S': -+ if (intel_syntax) -+ break; -+ if (sizeflag & SUFFIX_ALWAYS) -+ { -+ if (rex & REX_MODE64) -+ *obufp++ = 'q'; -+ else -+ { -+ if (sizeflag & DFLAG) -+ *obufp++ = 'l'; -+ else -+ *obufp++ = 'w'; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ } -+ } -+ break; -+ case 'X': -+ if (prefixes & PREFIX_DATA) -+ *obufp++ = 'd'; -+ else -+ *obufp++ = 's'; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case 'Y': -+ if (intel_syntax) -+ break; -+ if (rex & REX_MODE64) -+ { -+ USED_REX (REX_MODE64); -+ *obufp++ = 'q'; -+ } -+ break; -+ /* implicit operand size 'l' for i386 or 'q' for x86-64 */ -+ case 'W': -+ /* operand size flag for cwtl, cbtw */ -+ USED_REX (0); -+ if (rex) -+ *obufp++ = 'l'; -+ else if (sizeflag & DFLAG) -+ *obufp++ = 'w'; -+ else -+ *obufp++ = 'b'; -+ if (intel_syntax) -+ { -+ if (rex) -+ { -+ *obufp++ = 'q'; -+ *obufp++ = 'e'; -+ } -+ if (sizeflag & DFLAG) -+ { -+ *obufp++ = 'd'; -+ *obufp++ = 'e'; -+ } -+ else -+ { -+ *obufp++ = 'w'; -+ } -+ } -+ if (!rex) -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ } -+ alt = 0; -+ } -+ *obufp = 0; -+ return 0; -+} -+ -+static void -+oappend (const char *s) -+{ -+ strcpy (obufp, s); -+ obufp += strlen (s); -+} -+ -+static void -+append_seg (void) -+{ -+ if (prefixes & PREFIX_CS) -+ { -+ used_prefixes |= PREFIX_CS; -+ oappend ("%cs:" + intel_syntax); -+ } -+ if (prefixes & PREFIX_DS) -+ { -+ used_prefixes |= PREFIX_DS; -+ oappend ("%ds:" + intel_syntax); -+ } -+ if (prefixes & PREFIX_SS) -+ { -+ used_prefixes |= PREFIX_SS; -+ oappend ("%ss:" + intel_syntax); -+ } -+ if (prefixes & PREFIX_ES) -+ { -+ used_prefixes |= PREFIX_ES; -+ oappend ("%es:" + intel_syntax); -+ } -+ if (prefixes & PREFIX_FS) -+ { -+ used_prefixes |= PREFIX_FS; -+ oappend ("%fs:" + intel_syntax); -+ } -+ if (prefixes & PREFIX_GS) -+ { -+ used_prefixes |= PREFIX_GS; -+ oappend ("%gs:" + intel_syntax); -+ } -+} -+ -+static void -+OP_indirE (int bytemode, int sizeflag) -+{ -+ if (!intel_syntax) -+ oappend ("*"); -+ OP_E (bytemode, sizeflag); -+} -+ -+static void -+print_operand_value (char *buf, int hex, bfd_vma disp) -+{ -+ if (mode_64bit) -+ { -+ if (hex) -+ { -+ char tmp[30]; -+ int i; -+ buf[0] = '0'; -+ buf[1] = 'x'; -+ sprintf_vma (tmp, disp); -+ for (i = 0; tmp[i] == '0' && tmp[i + 1]; i++); -+ strcpy (buf + 2, tmp + i); -+ } -+ else -+ { -+ bfd_signed_vma v = disp; -+ char tmp[30]; -+ int i; -+ if (v < 0) -+ { -+ *(buf++) = '-'; -+ v = -disp; -+ /* Check for possible overflow on 0x8000000000000000. */ -+ if (v < 0) -+ { -+ strcpy (buf, "9223372036854775808"); -+ return; -+ } -+ } -+ if (!v) -+ { -+ strcpy (buf, "0"); -+ return; -+ } -+ -+ i = 0; -+ tmp[29] = 0; -+ while (v) -+ { -+ tmp[28 - i] = (v % 10) + '0'; -+ v /= 10; -+ i++; -+ } -+ strcpy (buf, tmp + 29 - i); -+ } -+ } -+ else -+ { -+ if (hex) -+ sprintf (buf, "0x%x", (unsigned int) disp); -+ else -+ sprintf (buf, "%d", (int) disp); -+ } -+} -+ -+static void -+OP_E (int bytemode, int sizeflag) -+{ -+ bfd_vma disp; -+ int add = 0; -+ int riprel = 0; -+ USED_REX (REX_EXTZ); -+ if (rex & REX_EXTZ) -+ add += 8; -+ -+ /* Skip mod/rm byte. */ -+ MODRM_CHECK; -+ codep++; -+ -+ if (mod == 3) -+ { -+ switch (bytemode) -+ { -+ case b_mode: -+ USED_REX (0); -+ if (rex) -+ oappend (names8rex[rm + add]); -+ else -+ oappend (names8[rm + add]); -+ break; -+ case w_mode: -+ oappend (names16[rm + add]); -+ break; -+ case d_mode: -+ oappend (names32[rm + add]); -+ break; -+ case q_mode: -+ oappend (names64[rm + add]); -+ break; -+ case m_mode: -+ if (mode_64bit) -+ oappend (names64[rm + add]); -+ else -+ oappend (names32[rm + add]); -+ break; -+ case branch_v_mode: -+ if (mode_64bit) -+ oappend (names64[rm + add]); -+ else -+ { -+ if ((sizeflag & DFLAG) || bytemode != branch_v_mode) -+ oappend (names32[rm + add]); -+ else -+ oappend (names16[rm + add]); -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ } -+ break; -+ case v_mode: -+ case dq_mode: -+ case dqw_mode: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ oappend (names64[rm + add]); -+ else if ((sizeflag & DFLAG) || bytemode != v_mode) -+ oappend (names32[rm + add]); -+ else -+ oappend (names16[rm + add]); -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case 0: -+ break; -+ default: -+ oappend (INTERNAL_DISASSEMBLER_ERROR); -+ break; -+ } -+ return; -+ } -+ -+ disp = 0; -+ append_seg (); -+ -+ if ((sizeflag & AFLAG) || mode_64bit) /* 32 bit address mode */ -+ { -+ int havesib; -+ int havebase; -+ int base; -+ int index = 0; -+ int scale = 0; -+ -+ havesib = 0; -+ havebase = 1; -+ base = rm; -+ -+ if (base == 4) -+ { -+ havesib = 1; -+ FETCH_DATA (the_info, codep + 1); -+ index = (*codep >> 3) & 7; -+ if (mode_64bit || index != 0x4) -+ /* When INDEX == 0x4 in 32 bit mode, SCALE is ignored. */ -+ scale = (*codep >> 6) & 3; -+ base = *codep & 7; -+ USED_REX (REX_EXTY); -+ if (rex & REX_EXTY) -+ index += 8; -+ codep++; -+ } -+ base += add; -+ -+ switch (mod) -+ { -+ case 0: -+ if ((base & 7) == 5) -+ { -+ havebase = 0; -+ if (mode_64bit && !havesib) -+ riprel = 1; -+ disp = get32s (); -+ } -+ break; -+ case 1: -+ FETCH_DATA (the_info, codep + 1); -+ disp = *codep++; -+ if ((disp & 0x80) != 0) -+ disp -= 0x100; -+ break; -+ case 2: -+ disp = get32s (); -+ break; -+ } -+ -+ if (!intel_syntax) -+ if (mod != 0 || (base & 7) == 5) -+ { -+ print_operand_value (scratchbuf, !riprel, disp); -+ oappend (scratchbuf); -+ if (riprel) -+ { -+ set_op (disp, 1); -+ oappend ("(%rip)"); -+ } -+ } -+ -+ if (havebase || (havesib && (index != 4 || scale != 0))) -+ { -+ if (intel_syntax) -+ { -+ switch (bytemode) -+ { -+ case b_mode: -+ oappend ("BYTE PTR "); -+ break; -+ case w_mode: -+ case dqw_mode: -+ oappend ("WORD PTR "); -+ break; -+ case branch_v_mode: -+ case v_mode: -+ case dq_mode: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ oappend ("QWORD PTR "); -+ else if ((sizeflag & DFLAG) || bytemode == dq_mode) -+ oappend ("DWORD PTR "); -+ else -+ oappend ("WORD PTR "); -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case d_mode: -+ oappend ("DWORD PTR "); -+ break; -+ case q_mode: -+ oappend ("QWORD PTR "); -+ break; -+ case m_mode: -+ if (mode_64bit) -+ oappend ("QWORD PTR "); -+ else -+ oappend ("DWORD PTR "); -+ break; -+ case f_mode: -+ if (sizeflag & DFLAG) -+ { -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ oappend ("FWORD PTR "); -+ } -+ else -+ oappend ("DWORD PTR "); -+ break; -+ case t_mode: -+ oappend ("TBYTE PTR "); -+ break; -+ case x_mode: -+ oappend ("XMMWORD PTR "); -+ break; -+ default: -+ break; -+ } -+ } -+ *obufp++ = open_char; -+ if (intel_syntax && riprel) -+ oappend ("rip + "); -+ *obufp = '\0'; -+ if (havebase) -+ oappend (mode_64bit && (sizeflag & AFLAG) -+ ? names64[base] : names32[base]); -+ if (havesib) -+ { -+ if (index != 4) -+ { -+ if (!intel_syntax || havebase) -+ { -+ *obufp++ = separator_char; -+ *obufp = '\0'; -+ } -+ oappend (mode_64bit && (sizeflag & AFLAG) -+ ? names64[index] : names32[index]); -+ } -+ if (scale != 0 || (!intel_syntax && index != 4)) -+ { -+ *obufp++ = scale_char; -+ *obufp = '\0'; -+ sprintf (scratchbuf, "%d", 1 << scale); -+ oappend (scratchbuf); -+ } -+ } -+ if (intel_syntax && disp) -+ { -+ if ((bfd_signed_vma) disp > 0) -+ { -+ *obufp++ = '+'; -+ *obufp = '\0'; -+ } -+ else if (mod != 1) -+ { -+ *obufp++ = '-'; -+ *obufp = '\0'; -+ disp = - (bfd_signed_vma) disp; -+ } -+ -+ print_operand_value (scratchbuf, mod != 1, disp); -+ oappend (scratchbuf); -+ } -+ -+ *obufp++ = close_char; -+ *obufp = '\0'; -+ } -+ else if (intel_syntax) -+ { -+ if (mod != 0 || (base & 7) == 5) -+ { -+ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS -+ | PREFIX_ES | PREFIX_FS | PREFIX_GS)) -+ ; -+ else -+ { -+ oappend (names_seg[ds_reg - es_reg]); -+ oappend (":"); -+ } -+ print_operand_value (scratchbuf, 1, disp); -+ oappend (scratchbuf); -+ } -+ } -+ } -+ else -+ { /* 16 bit address mode */ -+ switch (mod) -+ { -+ case 0: -+ if (rm == 6) -+ { -+ disp = get16 (); -+ if ((disp & 0x8000) != 0) -+ disp -= 0x10000; -+ } -+ break; -+ case 1: -+ FETCH_DATA (the_info, codep + 1); -+ disp = *codep++; -+ if ((disp & 0x80) != 0) -+ disp -= 0x100; -+ break; -+ case 2: -+ disp = get16 (); -+ if ((disp & 0x8000) != 0) -+ disp -= 0x10000; -+ break; -+ } -+ -+ if (!intel_syntax) -+ if (mod != 0 || rm == 6) -+ { -+ print_operand_value (scratchbuf, 0, disp); -+ oappend (scratchbuf); -+ } -+ -+ if (mod != 0 || rm != 6) -+ { -+ *obufp++ = open_char; -+ *obufp = '\0'; -+ oappend (index16[rm]); -+ if (intel_syntax && disp) -+ { -+ if ((bfd_signed_vma) disp > 0) -+ { -+ *obufp++ = '+'; -+ *obufp = '\0'; -+ } -+ else if (mod != 1) -+ { -+ *obufp++ = '-'; -+ *obufp = '\0'; -+ disp = - (bfd_signed_vma) disp; -+ } -+ -+ print_operand_value (scratchbuf, mod != 1, disp); -+ oappend (scratchbuf); -+ } -+ -+ *obufp++ = close_char; -+ *obufp = '\0'; -+ } -+ else if (intel_syntax) -+ { -+ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS -+ | PREFIX_ES | PREFIX_FS | PREFIX_GS)) -+ ; -+ else -+ { -+ oappend (names_seg[ds_reg - es_reg]); -+ oappend (":"); -+ } -+ print_operand_value (scratchbuf, 1, disp & 0xffff); -+ oappend (scratchbuf); -+ } -+ } -+} -+ -+static void -+OP_G (int bytemode, int sizeflag) -+{ -+ int add = 0; -+ USED_REX (REX_EXTX); -+ if (rex & REX_EXTX) -+ add += 8; -+ switch (bytemode) -+ { -+ case b_mode: -+ USED_REX (0); -+ if (rex) -+ oappend (names8rex[reg + add]); -+ else -+ oappend (names8[reg + add]); -+ break; -+ case w_mode: -+ oappend (names16[reg + add]); -+ break; -+ case d_mode: -+ oappend (names32[reg + add]); -+ break; -+ case q_mode: -+ oappend (names64[reg + add]); -+ break; -+ case v_mode: -+ case dq_mode: -+ case dqw_mode: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ oappend (names64[reg + add]); -+ else if ((sizeflag & DFLAG) || bytemode != v_mode) -+ oappend (names32[reg + add]); -+ else -+ oappend (names16[reg + add]); -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case m_mode: -+ if (mode_64bit) -+ oappend (names64[reg + add]); -+ else -+ oappend (names32[reg + add]); -+ break; -+ default: -+ oappend (INTERNAL_DISASSEMBLER_ERROR); -+ break; -+ } -+} -+ -+static bfd_vma -+get64 (void) -+{ -+ bfd_vma x; -+#ifdef BFD64 -+ unsigned int a; -+ unsigned int b; -+ -+ FETCH_DATA (the_info, codep + 8); -+ a = *codep++ & 0xff; -+ a |= (*codep++ & 0xff) << 8; -+ a |= (*codep++ & 0xff) << 16; -+ a |= (*codep++ & 0xff) << 24; -+ b = *codep++ & 0xff; -+ b |= (*codep++ & 0xff) << 8; -+ b |= (*codep++ & 0xff) << 16; -+ b |= (*codep++ & 0xff) << 24; -+ x = a + ((bfd_vma) b << 32); -+#else -+ abort (); -+ x = 0; -+#endif -+ return x; -+} -+ -+static bfd_signed_vma -+get32 (void) -+{ -+ bfd_signed_vma x = 0; -+ -+ FETCH_DATA (the_info, codep + 4); -+ x = *codep++ & (bfd_signed_vma) 0xff; -+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8; -+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16; -+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24; -+ return x; -+} -+ -+static bfd_signed_vma -+get32s (void) -+{ -+ bfd_signed_vma x = 0; -+ -+ FETCH_DATA (the_info, codep + 4); -+ x = *codep++ & (bfd_signed_vma) 0xff; -+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8; -+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16; -+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24; -+ -+ x = (x ^ ((bfd_signed_vma) 1 << 31)) - ((bfd_signed_vma) 1 << 31); -+ -+ return x; -+} -+ -+static int -+get16 (void) -+{ -+ int x = 0; -+ -+ FETCH_DATA (the_info, codep + 2); -+ x = *codep++ & 0xff; -+ x |= (*codep++ & 0xff) << 8; -+ return x; -+} -+ -+static void -+set_op (bfd_vma op, int riprel) -+{ -+ op_index[op_ad] = op_ad; -+ if (mode_64bit) -+ { -+ op_address[op_ad] = op; -+ op_riprel[op_ad] = riprel; -+ } -+ else -+ { -+ /* Mask to get a 32-bit address. */ -+ op_address[op_ad] = op & 0xffffffff; -+ op_riprel[op_ad] = riprel & 0xffffffff; -+ } -+} -+ -+static void -+OP_REG (int code, int sizeflag) -+{ -+ const char *s; -+ int add = 0; -+ USED_REX (REX_EXTZ); -+ if (rex & REX_EXTZ) -+ add = 8; -+ -+ switch (code) -+ { -+ case indir_dx_reg: -+ if (intel_syntax) -+ s = "[dx]"; -+ else -+ s = "(%dx)"; -+ break; -+ case ax_reg: case cx_reg: case dx_reg: case bx_reg: -+ case sp_reg: case bp_reg: case si_reg: case di_reg: -+ s = names16[code - ax_reg + add]; -+ break; -+ case es_reg: case ss_reg: case cs_reg: -+ case ds_reg: case fs_reg: case gs_reg: -+ s = names_seg[code - es_reg + add]; -+ break; -+ case al_reg: case ah_reg: case cl_reg: case ch_reg: -+ case dl_reg: case dh_reg: case bl_reg: case bh_reg: -+ USED_REX (0); -+ if (rex) -+ s = names8rex[code - al_reg + add]; -+ else -+ s = names8[code - al_reg]; -+ break; -+ case rAX_reg: case rCX_reg: case rDX_reg: case rBX_reg: -+ case rSP_reg: case rBP_reg: case rSI_reg: case rDI_reg: -+ if (mode_64bit) -+ { -+ s = names64[code - rAX_reg + add]; -+ break; -+ } -+ code += eAX_reg - rAX_reg; -+ /* Fall through. */ -+ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg: -+ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ s = names64[code - eAX_reg + add]; -+ else if (sizeflag & DFLAG) -+ s = names32[code - eAX_reg + add]; -+ else -+ s = names16[code - eAX_reg + add]; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ default: -+ s = INTERNAL_DISASSEMBLER_ERROR; -+ break; -+ } -+ oappend (s); -+} -+ -+static void -+OP_IMREG (int code, int sizeflag) -+{ -+ const char *s; -+ -+ switch (code) -+ { -+ case indir_dx_reg: -+ if (intel_syntax) -+ s = "[dx]"; -+ else -+ s = "(%dx)"; -+ break; -+ case ax_reg: case cx_reg: case dx_reg: case bx_reg: -+ case sp_reg: case bp_reg: case si_reg: case di_reg: -+ s = names16[code - ax_reg]; -+ break; -+ case es_reg: case ss_reg: case cs_reg: -+ case ds_reg: case fs_reg: case gs_reg: -+ s = names_seg[code - es_reg]; -+ break; -+ case al_reg: case ah_reg: case cl_reg: case ch_reg: -+ case dl_reg: case dh_reg: case bl_reg: case bh_reg: -+ USED_REX (0); -+ if (rex) -+ s = names8rex[code - al_reg]; -+ else -+ s = names8[code - al_reg]; -+ break; -+ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg: -+ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ s = names64[code - eAX_reg]; -+ else if (sizeflag & DFLAG) -+ s = names32[code - eAX_reg]; -+ else -+ s = names16[code - eAX_reg]; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ default: -+ s = INTERNAL_DISASSEMBLER_ERROR; -+ break; -+ } -+ oappend (s); -+} -+ -+static void -+OP_I (int bytemode, int sizeflag) -+{ -+ bfd_signed_vma op; -+ bfd_signed_vma mask = -1; -+ -+ switch (bytemode) -+ { -+ case b_mode: -+ FETCH_DATA (the_info, codep + 1); -+ op = *codep++; -+ mask = 0xff; -+ break; -+ case q_mode: -+ if (mode_64bit) -+ { -+ op = get32s (); -+ break; -+ } -+ /* Fall through. */ -+ case v_mode: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ op = get32s (); -+ else if (sizeflag & DFLAG) -+ { -+ op = get32 (); -+ mask = 0xffffffff; -+ } -+ else -+ { -+ op = get16 (); -+ mask = 0xfffff; -+ } -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case w_mode: -+ mask = 0xfffff; -+ op = get16 (); -+ break; -+ case const_1_mode: -+ if (intel_syntax) -+ oappend ("1"); -+ return; -+ default: -+ oappend (INTERNAL_DISASSEMBLER_ERROR); -+ return; -+ } -+ -+ op &= mask; -+ scratchbuf[0] = '$'; -+ print_operand_value (scratchbuf + 1, 1, op); -+ oappend (scratchbuf + intel_syntax); -+ scratchbuf[0] = '\0'; -+} -+ -+static void -+OP_I64 (int bytemode, int sizeflag) -+{ -+ bfd_signed_vma op; -+ bfd_signed_vma mask = -1; -+ -+ if (!mode_64bit) -+ { -+ OP_I (bytemode, sizeflag); -+ return; -+ } -+ -+ switch (bytemode) -+ { -+ case b_mode: -+ FETCH_DATA (the_info, codep + 1); -+ op = *codep++; -+ mask = 0xff; -+ break; -+ case v_mode: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ op = get64 (); -+ else if (sizeflag & DFLAG) -+ { -+ op = get32 (); -+ mask = 0xffffffff; -+ } -+ else -+ { -+ op = get16 (); -+ mask = 0xfffff; -+ } -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case w_mode: -+ mask = 0xfffff; -+ op = get16 (); -+ break; -+ default: -+ oappend (INTERNAL_DISASSEMBLER_ERROR); -+ return; -+ } -+ -+ op &= mask; -+ scratchbuf[0] = '$'; -+ print_operand_value (scratchbuf + 1, 1, op); -+ oappend (scratchbuf + intel_syntax); -+ scratchbuf[0] = '\0'; -+} -+ -+static void -+OP_sI (int bytemode, int sizeflag) -+{ -+ bfd_signed_vma op; -+ bfd_signed_vma mask = -1; -+ -+ switch (bytemode) -+ { -+ case b_mode: -+ FETCH_DATA (the_info, codep + 1); -+ op = *codep++; -+ if ((op & 0x80) != 0) -+ op -= 0x100; -+ mask = 0xffffffff; -+ break; -+ case v_mode: -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ op = get32s (); -+ else if (sizeflag & DFLAG) -+ { -+ op = get32s (); -+ mask = 0xffffffff; -+ } -+ else -+ { -+ mask = 0xffffffff; -+ op = get16 (); -+ if ((op & 0x8000) != 0) -+ op -= 0x10000; -+ } -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ break; -+ case w_mode: -+ op = get16 (); -+ mask = 0xffffffff; -+ if ((op & 0x8000) != 0) -+ op -= 0x10000; -+ break; -+ default: -+ oappend (INTERNAL_DISASSEMBLER_ERROR); -+ return; -+ } -+ -+ scratchbuf[0] = '$'; -+ print_operand_value (scratchbuf + 1, 1, op); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+static void -+OP_J (int bytemode, int sizeflag) -+{ -+ bfd_vma disp; -+ bfd_vma mask = -1; -+ -+ switch (bytemode) -+ { -+ case b_mode: -+ FETCH_DATA (the_info, codep + 1); -+ disp = *codep++; -+ if ((disp & 0x80) != 0) -+ disp -= 0x100; -+ break; -+ case v_mode: -+ if (sizeflag & DFLAG) -+ disp = get32s (); -+ else -+ { -+ disp = get16 (); -+ /* For some reason, a data16 prefix on a jump instruction -+ means that the pc is masked to 16 bits after the -+ displacement is added! */ -+ mask = 0xffff; -+ } -+ break; -+ default: -+ oappend (INTERNAL_DISASSEMBLER_ERROR); -+ return; -+ } -+ disp = (start_pc + codep - start_codep + disp) & mask; -+ set_op (disp, 0); -+ print_operand_value (scratchbuf, 1, disp); -+ oappend (scratchbuf); -+} -+ -+static void -+OP_SEG (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ oappend (names_seg[reg]); -+} -+ -+static void -+OP_DIR (int dummy ATTRIBUTE_UNUSED, int sizeflag) -+{ -+ int seg, offset; -+ -+ if (sizeflag & DFLAG) -+ { -+ offset = get32 (); -+ seg = get16 (); -+ } -+ else -+ { -+ offset = get16 (); -+ seg = get16 (); -+ } -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ if (intel_syntax) -+ sprintf (scratchbuf, "0x%x,0x%x", seg, offset); -+ else -+ sprintf (scratchbuf, "$0x%x,$0x%x", seg, offset); -+ oappend (scratchbuf); -+} -+ -+static void -+OP_OFF (int bytemode ATTRIBUTE_UNUSED, int sizeflag) -+{ -+ bfd_vma off; -+ -+ append_seg (); -+ -+ if ((sizeflag & AFLAG) || mode_64bit) -+ off = get32 (); -+ else -+ off = get16 (); -+ -+ if (intel_syntax) -+ { -+ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS -+ | PREFIX_ES | PREFIX_FS | PREFIX_GS))) -+ { -+ oappend (names_seg[ds_reg - es_reg]); -+ oappend (":"); -+ } -+ } -+ print_operand_value (scratchbuf, 1, off); -+ oappend (scratchbuf); -+} -+ -+static void -+OP_OFF64 (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ bfd_vma off; -+ -+ if (!mode_64bit) -+ { -+ OP_OFF (bytemode, sizeflag); -+ return; -+ } -+ -+ append_seg (); -+ -+ off = get64 (); -+ -+ if (intel_syntax) -+ { -+ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS -+ | PREFIX_ES | PREFIX_FS | PREFIX_GS))) -+ { -+ oappend (names_seg[ds_reg - es_reg]); -+ oappend (":"); -+ } -+ } -+ print_operand_value (scratchbuf, 1, off); -+ oappend (scratchbuf); -+} -+ -+static void -+ptr_reg (int code, int sizeflag) -+{ -+ const char *s; -+ -+ *obufp++ = open_char; -+ used_prefixes |= (prefixes & PREFIX_ADDR); -+ if (mode_64bit) -+ { -+ if (!(sizeflag & AFLAG)) -+ s = names32[code - eAX_reg]; -+ else -+ s = names64[code - eAX_reg]; -+ } -+ else if (sizeflag & AFLAG) -+ s = names32[code - eAX_reg]; -+ else -+ s = names16[code - eAX_reg]; -+ oappend (s); -+ *obufp++ = close_char; -+ *obufp = 0; -+} -+ -+static void -+OP_ESreg (int code, int sizeflag) -+{ -+ if (intel_syntax) -+ { -+ if (codep[-1] & 1) -+ { -+ USED_REX (REX_MODE64); -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ if (rex & REX_MODE64) -+ oappend ("QWORD PTR "); -+ else if ((sizeflag & DFLAG)) -+ oappend ("DWORD PTR "); -+ else -+ oappend ("WORD PTR "); -+ } -+ else -+ oappend ("BYTE PTR "); -+ } -+ -+ oappend ("%es:" + intel_syntax); -+ ptr_reg (code, sizeflag); -+} -+ -+static void -+OP_DSreg (int code, int sizeflag) -+{ -+ if (intel_syntax) -+ { -+ if (codep[-1] != 0xd7 && (codep[-1] & 1)) -+ { -+ USED_REX (REX_MODE64); -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ if (rex & REX_MODE64) -+ oappend ("QWORD PTR "); -+ else if ((sizeflag & DFLAG)) -+ oappend ("DWORD PTR "); -+ else -+ oappend ("WORD PTR "); -+ } -+ else -+ oappend ("BYTE PTR "); -+ } -+ -+ if ((prefixes -+ & (PREFIX_CS -+ | PREFIX_DS -+ | PREFIX_SS -+ | PREFIX_ES -+ | PREFIX_FS -+ | PREFIX_GS)) == 0) -+ prefixes |= PREFIX_DS; -+ append_seg (); -+ ptr_reg (code, sizeflag); -+} -+ -+static void -+OP_C (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ int add = 0; -+ if (rex & REX_EXTX) -+ { -+ USED_REX (REX_EXTX); -+ add = 8; -+ } -+ else if (!mode_64bit && (prefixes & PREFIX_LOCK)) -+ { -+ used_prefixes |= PREFIX_LOCK; -+ add = 8; -+ } -+ sprintf (scratchbuf, "%%cr%d", reg + add); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+static void -+OP_D (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ int add = 0; -+ USED_REX (REX_EXTX); -+ if (rex & REX_EXTX) -+ add = 8; -+ if (intel_syntax) -+ sprintf (scratchbuf, "db%d", reg + add); -+ else -+ sprintf (scratchbuf, "%%db%d", reg + add); -+ oappend (scratchbuf); -+} -+ -+static void -+OP_T (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ sprintf (scratchbuf, "%%tr%d", reg); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+static void -+OP_Rd (int bytemode, int sizeflag) -+{ -+ if (mod == 3) -+ OP_E (bytemode, sizeflag); -+ else -+ BadOp (); -+} -+ -+static void -+OP_MMX (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ if (prefixes & PREFIX_DATA) -+ { -+ int add = 0; -+ USED_REX (REX_EXTX); -+ if (rex & REX_EXTX) -+ add = 8; -+ sprintf (scratchbuf, "%%xmm%d", reg + add); -+ } -+ else -+ sprintf (scratchbuf, "%%mm%d", reg); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+static void -+OP_XMM (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ int add = 0; -+ USED_REX (REX_EXTX); -+ if (rex & REX_EXTX) -+ add = 8; -+ sprintf (scratchbuf, "%%xmm%d", reg + add); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+static void -+OP_EM (int bytemode, int sizeflag) -+{ -+ if (mod != 3) -+ { -+ if (intel_syntax && bytemode == v_mode) -+ { -+ bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ } -+ OP_E (bytemode, sizeflag); -+ return; -+ } -+ -+ /* Skip mod/rm byte. */ -+ MODRM_CHECK; -+ codep++; -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ if (prefixes & PREFIX_DATA) -+ { -+ int add = 0; -+ -+ USED_REX (REX_EXTZ); -+ if (rex & REX_EXTZ) -+ add = 8; -+ sprintf (scratchbuf, "%%xmm%d", rm + add); -+ } -+ else -+ sprintf (scratchbuf, "%%mm%d", rm); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+static void -+OP_EX (int bytemode, int sizeflag) -+{ -+ int add = 0; -+ if (mod != 3) -+ { -+ if (intel_syntax && bytemode == v_mode) -+ { -+ switch (prefixes & (PREFIX_DATA|PREFIX_REPZ|PREFIX_REPNZ)) -+ { -+ case 0: bytemode = x_mode; break; -+ case PREFIX_REPZ: bytemode = d_mode; used_prefixes |= PREFIX_REPZ; break; -+ case PREFIX_DATA: bytemode = x_mode; used_prefixes |= PREFIX_DATA; break; -+ case PREFIX_REPNZ: bytemode = q_mode; used_prefixes |= PREFIX_REPNZ; break; -+ default: bytemode = 0; break; -+ } -+ } -+ OP_E (bytemode, sizeflag); -+ return; -+ } -+ USED_REX (REX_EXTZ); -+ if (rex & REX_EXTZ) -+ add = 8; -+ -+ /* Skip mod/rm byte. */ -+ MODRM_CHECK; -+ codep++; -+ sprintf (scratchbuf, "%%xmm%d", rm + add); -+ oappend (scratchbuf + intel_syntax); -+} -+ -+static void -+OP_MS (int bytemode, int sizeflag) -+{ -+ if (mod == 3) -+ OP_EM (bytemode, sizeflag); -+ else -+ BadOp (); -+} -+ -+static void -+OP_XS (int bytemode, int sizeflag) -+{ -+ if (mod == 3) -+ OP_EX (bytemode, sizeflag); -+ else -+ BadOp (); -+} -+ -+static void -+OP_M (int bytemode, int sizeflag) -+{ -+ if (mod == 3) -+ BadOp (); /* bad lea,lds,les,lfs,lgs,lss modrm */ -+ else -+ OP_E (bytemode, sizeflag); -+} -+ -+static void -+OP_0f07 (int bytemode, int sizeflag) -+{ -+ if (mod != 3 || rm != 0) -+ BadOp (); -+ else -+ OP_E (bytemode, sizeflag); -+} -+ -+static void -+OP_0fae (int bytemode, int sizeflag) -+{ -+ if (mod == 3) -+ { -+ if (reg == 5) -+ strcpy (obuf + strlen (obuf) - sizeof ("xrstor") + 1, "lfence"); -+ if (reg == 7) -+ strcpy (obuf + strlen (obuf) - sizeof ("clflush") + 1, "sfence"); -+ -+ if (reg < 5 || rm != 0) -+ { -+ BadOp (); /* bad sfence, mfence, or lfence */ -+ return; -+ } -+ } -+ else if (reg != 5 && reg != 7) -+ { -+ BadOp (); /* bad xrstor or clflush */ -+ return; -+ } -+ -+ OP_E (bytemode, sizeflag); -+} -+ -+static void -+NOP_Fixup (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ /* NOP with REPZ prefix is called PAUSE. */ -+ if (prefixes == PREFIX_REPZ) -+ strcpy (obuf, "pause"); -+} -+ -+static const char *const Suffix3DNow[] = { -+/* 00 */ NULL, NULL, NULL, NULL, -+/* 04 */ NULL, NULL, NULL, NULL, -+/* 08 */ NULL, NULL, NULL, NULL, -+/* 0C */ "pi2fw", "pi2fd", NULL, NULL, -+/* 10 */ NULL, NULL, NULL, NULL, -+/* 14 */ NULL, NULL, NULL, NULL, -+/* 18 */ NULL, NULL, NULL, NULL, -+/* 1C */ "pf2iw", "pf2id", NULL, NULL, -+/* 20 */ NULL, NULL, NULL, NULL, -+/* 24 */ NULL, NULL, NULL, NULL, -+/* 28 */ NULL, NULL, NULL, NULL, -+/* 2C */ NULL, NULL, NULL, NULL, -+/* 30 */ NULL, NULL, NULL, NULL, -+/* 34 */ NULL, NULL, NULL, NULL, -+/* 38 */ NULL, NULL, NULL, NULL, -+/* 3C */ NULL, NULL, NULL, NULL, -+/* 40 */ NULL, NULL, NULL, NULL, -+/* 44 */ NULL, NULL, NULL, NULL, -+/* 48 */ NULL, NULL, NULL, NULL, -+/* 4C */ NULL, NULL, NULL, NULL, -+/* 50 */ NULL, NULL, NULL, NULL, -+/* 54 */ NULL, NULL, NULL, NULL, -+/* 58 */ NULL, NULL, NULL, NULL, -+/* 5C */ NULL, NULL, NULL, NULL, -+/* 60 */ NULL, NULL, NULL, NULL, -+/* 64 */ NULL, NULL, NULL, NULL, -+/* 68 */ NULL, NULL, NULL, NULL, -+/* 6C */ NULL, NULL, NULL, NULL, -+/* 70 */ NULL, NULL, NULL, NULL, -+/* 74 */ NULL, NULL, NULL, NULL, -+/* 78 */ NULL, NULL, NULL, NULL, -+/* 7C */ NULL, NULL, NULL, NULL, -+/* 80 */ NULL, NULL, NULL, NULL, -+/* 84 */ NULL, NULL, NULL, NULL, -+/* 88 */ NULL, NULL, "pfnacc", NULL, -+/* 8C */ NULL, NULL, "pfpnacc", NULL, -+/* 90 */ "pfcmpge", NULL, NULL, NULL, -+/* 94 */ "pfmin", NULL, "pfrcp", "pfrsqrt", -+/* 98 */ NULL, NULL, "pfsub", NULL, -+/* 9C */ NULL, NULL, "pfadd", NULL, -+/* A0 */ "pfcmpgt", NULL, NULL, NULL, -+/* A4 */ "pfmax", NULL, "pfrcpit1", "pfrsqit1", -+/* A8 */ NULL, NULL, "pfsubr", NULL, -+/* AC */ NULL, NULL, "pfacc", NULL, -+/* B0 */ "pfcmpeq", NULL, NULL, NULL, -+/* B4 */ "pfmul", NULL, "pfrcpit2", "pfmulhrw", -+/* B8 */ NULL, NULL, NULL, "pswapd", -+/* BC */ NULL, NULL, NULL, "pavgusb", -+/* C0 */ NULL, NULL, NULL, NULL, -+/* C4 */ NULL, NULL, NULL, NULL, -+/* C8 */ NULL, NULL, NULL, NULL, -+/* CC */ NULL, NULL, NULL, NULL, -+/* D0 */ NULL, NULL, NULL, NULL, -+/* D4 */ NULL, NULL, NULL, NULL, -+/* D8 */ NULL, NULL, NULL, NULL, -+/* DC */ NULL, NULL, NULL, NULL, -+/* E0 */ NULL, NULL, NULL, NULL, -+/* E4 */ NULL, NULL, NULL, NULL, -+/* E8 */ NULL, NULL, NULL, NULL, -+/* EC */ NULL, NULL, NULL, NULL, -+/* F0 */ NULL, NULL, NULL, NULL, -+/* F4 */ NULL, NULL, NULL, NULL, -+/* F8 */ NULL, NULL, NULL, NULL, -+/* FC */ NULL, NULL, NULL, NULL, -+}; -+ -+static void -+OP_3DNowSuffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ const char *mnemonic; -+ -+ FETCH_DATA (the_info, codep + 1); -+ /* AMD 3DNow! instructions are specified by an opcode suffix in the -+ place where an 8-bit immediate would normally go. ie. the last -+ byte of the instruction. */ -+ obufp = obuf + strlen (obuf); -+ mnemonic = Suffix3DNow[*codep++ & 0xff]; -+ if (mnemonic) -+ oappend (mnemonic); -+ else -+ { -+ /* Since a variable sized modrm/sib chunk is between the start -+ of the opcode (0x0f0f) and the opcode suffix, we need to do -+ all the modrm processing first, and don't know until now that -+ we have a bad opcode. This necessitates some cleaning up. */ -+ op1out[0] = '\0'; -+ op2out[0] = '\0'; -+ BadOp (); -+ } -+} -+ -+static const char *simd_cmp_op[] = { -+ "eq", -+ "lt", -+ "le", -+ "unord", -+ "neq", -+ "nlt", -+ "nle", -+ "ord" -+}; -+ -+static void -+OP_SIMD_Suffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ unsigned int cmp_type; -+ -+ FETCH_DATA (the_info, codep + 1); -+ obufp = obuf + strlen (obuf); -+ cmp_type = *codep++ & 0xff; -+ if (cmp_type < 8) -+ { -+ char suffix1 = 'p', suffix2 = 's'; -+ used_prefixes |= (prefixes & PREFIX_REPZ); -+ if (prefixes & PREFIX_REPZ) -+ suffix1 = 's'; -+ else -+ { -+ used_prefixes |= (prefixes & PREFIX_DATA); -+ if (prefixes & PREFIX_DATA) -+ suffix2 = 'd'; -+ else -+ { -+ used_prefixes |= (prefixes & PREFIX_REPNZ); -+ if (prefixes & PREFIX_REPNZ) -+ suffix1 = 's', suffix2 = 'd'; -+ } -+ } -+ sprintf (scratchbuf, "cmp%s%c%c", -+ simd_cmp_op[cmp_type], suffix1, suffix2); -+ used_prefixes |= (prefixes & PREFIX_REPZ); -+ oappend (scratchbuf); -+ } -+ else -+ { -+ /* We have a bad extension byte. Clean up. */ -+ op1out[0] = '\0'; -+ op2out[0] = '\0'; -+ BadOp (); -+ } -+} -+ -+static void -+SIMD_Fixup (int extrachar, int sizeflag ATTRIBUTE_UNUSED) -+{ -+ /* Change movlps/movhps to movhlps/movlhps for 2 register operand -+ forms of these instructions. */ -+ if (mod == 3) -+ { -+ char *p = obuf + strlen (obuf); -+ *(p + 1) = '\0'; -+ *p = *(p - 1); -+ *(p - 1) = *(p - 2); -+ *(p - 2) = *(p - 3); -+ *(p - 3) = extrachar; -+ } -+} -+ -+static void -+PNI_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag) -+{ -+ if (mod == 3 && reg == 1 && rm <= 1) -+ { -+ /* Override "sidt". */ -+ char *p = obuf + strlen (obuf) - 4; -+ -+ /* We might have a suffix when disassembling with -Msuffix. */ -+ if (*p == 'i') -+ --p; -+ -+ if (rm) -+ { -+ /* mwait %eax,%ecx */ -+ strcpy (p, "mwait"); -+ if (!intel_syntax) -+ strcpy (op1out, names32[0]); -+ } -+ else -+ { -+ /* monitor %eax,%ecx,%edx" */ -+ strcpy (p, "monitor"); -+ if (!intel_syntax) -+ { -+ if (!mode_64bit) -+ strcpy (op1out, names32[0]); -+ else if (!(prefixes & PREFIX_ADDR)) -+ strcpy (op1out, names64[0]); -+ else -+ { -+ strcpy (op1out, names32[0]); -+ used_prefixes |= PREFIX_ADDR; -+ } -+ strcpy (op3out, names32[2]); -+ } -+ } -+ if (!intel_syntax) -+ { -+ strcpy (op2out, names32[1]); -+ two_source_ops = 1; -+ } -+ -+ codep++; -+ } -+ else -+ OP_M (0, sizeflag); -+} -+ -+static void -+SVME_Fixup (int bytemode, int sizeflag) -+{ -+ const char *alt; -+ char *p; -+ -+ switch (*codep) -+ { -+ case 0xd8: -+ alt = "vmrun"; -+ break; -+ case 0xd9: -+ alt = "vmmcall"; -+ break; -+ case 0xda: -+ alt = "vmload"; -+ break; -+ case 0xdb: -+ alt = "vmsave"; -+ break; -+ case 0xdc: -+ alt = "stgi"; -+ break; -+ case 0xdd: -+ alt = "clgi"; -+ break; -+ case 0xde: -+ alt = "skinit"; -+ break; -+ case 0xdf: -+ alt = "invlpga"; -+ break; -+ default: -+ OP_M (bytemode, sizeflag); -+ return; -+ } -+ /* Override "lidt". */ -+ p = obuf + strlen (obuf) - 4; -+ /* We might have a suffix. */ -+ if (*p == 'i') -+ --p; -+ strcpy (p, alt); -+ if (!(prefixes & PREFIX_ADDR)) -+ { -+ ++codep; -+ return; -+ } -+ used_prefixes |= PREFIX_ADDR; -+ switch (*codep++) -+ { -+ case 0xdf: -+ strcpy (op2out, names32[1]); -+ two_source_ops = 1; -+ /* Fall through. */ -+ case 0xd8: -+ case 0xda: -+ case 0xdb: -+ *obufp++ = open_char; -+ if (mode_64bit || (sizeflag & AFLAG)) -+ alt = names32[0]; -+ else -+ alt = names16[0]; -+ strcpy (obufp, alt); -+ obufp += strlen (alt); -+ *obufp++ = close_char; -+ *obufp = '\0'; -+ break; -+ } -+} -+ -+static void -+INVLPG_Fixup (int bytemode, int sizeflag) -+{ -+ const char *alt; -+ -+ switch (*codep) -+ { -+ case 0xf8: -+ alt = "swapgs"; -+ break; -+ case 0xf9: -+ alt = "rdtscp"; -+ break; -+ default: -+ OP_M (bytemode, sizeflag); -+ return; -+ } -+ /* Override "invlpg". */ -+ strcpy (obuf + strlen (obuf) - 6, alt); -+ codep++; -+} -+ -+static void -+BadOp (void) -+{ -+ /* Throw away prefixes and 1st. opcode byte. */ -+ codep = insn_codep + 1; -+ oappend ("(bad)"); -+} -+ -+static void -+SEG_Fixup (int extrachar, int sizeflag) -+{ -+ if (mod == 3) -+ { -+ /* We need to add a proper suffix with -+ -+ movw %ds,%ax -+ movl %ds,%eax -+ movq %ds,%rax -+ movw %ax,%ds -+ movl %eax,%ds -+ movq %rax,%ds -+ */ -+ const char *suffix; -+ -+ if (prefixes & PREFIX_DATA) -+ suffix = "w"; -+ else -+ { -+ USED_REX (REX_MODE64); -+ if (rex & REX_MODE64) -+ suffix = "q"; -+ else -+ suffix = "l"; -+ } -+ strcat (obuf, suffix); -+ } -+ else -+ { -+ /* We need to fix the suffix for -+ -+ movw %ds,(%eax) -+ movw %ds,(%rax) -+ movw (%eax),%ds -+ movw (%rax),%ds -+ -+ Override "mov[l|q]". */ -+ char *p = obuf + strlen (obuf) - 1; -+ -+ /* We might not have a suffix. */ -+ if (*p == 'v') -+ ++p; -+ *p = 'w'; -+ } -+ -+ OP_E (extrachar, sizeflag); -+} -+ -+static void -+VMX_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag) -+{ -+ if (mod == 3 && reg == 0 && rm >=1 && rm <= 4) -+ { -+ /* Override "sgdt". */ -+ char *p = obuf + strlen (obuf) - 4; -+ -+ /* We might have a suffix when disassembling with -Msuffix. */ -+ if (*p == 'g') -+ --p; -+ -+ switch (rm) -+ { -+ case 1: -+ strcpy (p, "vmcall"); -+ break; -+ case 2: -+ strcpy (p, "vmlaunch"); -+ break; -+ case 3: -+ strcpy (p, "vmresume"); -+ break; -+ case 4: -+ strcpy (p, "vmxoff"); -+ break; -+ } -+ -+ codep++; -+ } -+ else -+ OP_E (0, sizeflag); -+} -+ -+static void -+OP_VMX (int bytemode, int sizeflag) -+{ -+ used_prefixes |= (prefixes & (PREFIX_DATA | PREFIX_REPZ)); -+ if (prefixes & PREFIX_DATA) -+ strcpy (obuf, "vmclear"); -+ else if (prefixes & PREFIX_REPZ) -+ strcpy (obuf, "vmxon"); -+ else -+ strcpy (obuf, "vmptrld"); -+ OP_E (bytemode, sizeflag); -+} ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -42,6 +42,9 @@ - #include - #include - #include -+#ifdef CONFIG_KDB -+#include -+#endif - - #include - #include -@@ -1193,6 +1196,11 @@ next: - if (test_bit(vector, used_vectors)) - goto next; - -+#ifdef CONFIG_KDB -+ if (vector == KDBENTER_VECTOR) -+ goto next; -+#endif -+ - for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) - if (per_cpu(vector_irq, new_cpu)[vector] != -1) - goto next; ---- a/arch/x86/kernel/dumpstack.c -+++ b/arch/x86/kernel/dumpstack.c -@@ -15,6 +15,9 @@ - #include - #include - #include -+#ifdef CONFIG_KDB -+#include -+#endif - - #include - -@@ -260,6 +263,9 @@ void __kprobes oops_end(unsigned long fl - /* Nest count reaches zero, release the lock. */ - arch_spin_unlock(&die_lock); - raw_local_irq_restore(flags); -+#ifdef CONFIG_KB -+ kdb(KDB_REASON_OOPS, signr, regs); -+#endif - oops_exit(); - - if (!signr) -@@ -328,6 +334,9 @@ void die(const char *str, struct pt_regs - - if (__die(str, regs, err)) - sig = 0; -+#ifdef CONFIG_KDB -+ kdb_diemsg = str; -+#endif - oops_end(flags, regs, sig); - } - -@@ -348,6 +357,9 @@ die_nmi(char *str, struct pt_regs *regs, - printk(" on CPU%d, ip %08lx, registers:\n", - smp_processor_id(), regs->ip); - show_registers(regs); -+#ifdef CONFIG_KDB -+ kdb(KDB_REASON_NMI, 0, regs); -+#endif - oops_end(flags, regs, 0); - if (do_panic || panic_on_oops) - panic("Non maskable interrupt"); ---- a/arch/x86/kernel/entry_32.S -+++ b/arch/x86/kernel/entry_32.S -@@ -1008,6 +1008,26 @@ ENTRY(alignment_check) - CFI_ENDPROC - END(alignment_check) - -+#ifdef CONFIG_KDB -+ -+ENTRY(kdb_call) -+ RING0_INT_FRAME -+ pushl %eax # save orig EAX -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ movl %esp,%ecx # struct pt_regs -+ movl $0,%edx # error_code -+ movl $1,%eax # KDB_REASON_ENTER -+ call kdb -+ jmp restore_all -+ CFI_ENDPROC -+ -+#ifdef CONFIG_SMP -+BUILD_INTERRUPT(kdb_interrupt,KDB_VECTOR) -+#endif /* CONFIG_SMP */ -+ -+#endif /* CONFIG_KDB */ -+ - ENTRY(divide_error) - RING0_INT_FRAME - pushl $0 # no error code ---- a/arch/x86/kernel/entry_64.S -+++ b/arch/x86/kernel/entry_64.S -@@ -1331,6 +1331,33 @@ END(xen_failsafe_callback) - - #endif /* CONFIG_XEN */ - -+#ifdef CONFIG_KDB -+ -+#ifdef CONFIG_SMP -+apicinterrupt KDB_VECTOR \ -+ kdb_interrupt, smp_kdb_interrupt -+#endif /* CONFIG_SMP */ -+ -+ENTRY(kdb_call) -+ INTR_FRAME -+ cld -+ pushq $-1 # orig_eax -+ CFI_ADJUST_CFA_OFFSET 8 -+ SAVE_ALL -+ movq $1,%rdi # KDB_REASON_ENTER -+ movq $0,%rsi # error_code -+ movq %rsp,%rdx # struct pt_regs -+ call kdb -+ RESTORE_ALL -+ addq $8,%rsp # forget orig_eax -+ CFI_ADJUST_CFA_OFFSET -8 -+ iretq -+ CFI_ENDPROC -+END(kdb_call) -+ -+#endif /* CONFIG_KDB */ -+ -+ - /* - * Some functions should be protected against kprobes - */ ---- a/arch/x86/kernel/reboot.c -+++ b/arch/x86/kernel/reboot.c -@@ -3,6 +3,10 @@ - #include - #include - #include -+#ifdef CONFIG_KDB -+#include -+#endif /* CONFIG_KDB */ -+#include - #include - #include - #include -@@ -630,6 +634,14 @@ void native_machine_shutdown(void) - /* Make certain I only run on the appropriate processor */ - set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); - -+#if defined(CONFIG_X86_32) && defined(CONFIG_KDB) -+ /* -+ * If this restart is occuring while kdb is running (e.g. reboot -+ * command), the other CPU's are already stopped. Don't try to -+ * stop them yet again. -+ */ -+ if (!KDB_IS_RUNNING()) -+#endif /* defined(CONFIG_X86_32) && defined(CONFIG_KDB) */ - /* O.K Now that I'm on the appropriate processor, - * stop all of the others. - */ -@@ -740,6 +752,29 @@ static nmi_shootdown_cb shootdown_callba - - static atomic_t waiting_for_crash_ipi; - -+#ifdef CONFIG_KDB_KDUMP -+void halt_current_cpu(struct pt_regs *regs) -+{ -+#ifdef CONFIG_X86_32 -+ struct pt_regs fixed_regs; -+#endif -+ local_irq_disable(); -+#ifdef CONFIG_X86_32 -+ if (!user_mode_vm(regs)) { -+ crash_fixup_ss_esp(&fixed_regs, regs); -+ regs = &fixed_regs; -+ } -+#endif -+ crash_save_cpu(regs, raw_smp_processor_id()); -+ disable_local_APIC(); -+ atomic_dec(&waiting_for_crash_ipi); -+ /* Assume hlt works */ -+ halt(); -+ for(;;) -+ cpu_relax(); -+} -+#endif /* CONFIG_KDB_KDUMP */ -+ - static int crash_nmi_callback(struct notifier_block *self, - unsigned long val, void *data) - { ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -44,6 +44,10 @@ - #include - #endif - -+#ifdef CONFIG_KDB -+#include -+#endif /* CONFIG_KDB */ -+ - #include - #include - #include -@@ -361,6 +365,10 @@ io_check_error(unsigned char reason, str - static notrace __kprobes void - unknown_nmi_error(unsigned char reason, struct pt_regs *regs) - { -+#ifdef CONFIG_KDB -+ (void)kdb(KDB_REASON_NMI, reason, regs); -+#endif /* CONFIG_KDB */ -+ - if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == - NOTIFY_STOP) - return; -@@ -396,6 +404,16 @@ static notrace __kprobes void default_do - if (!cpu) - reason = get_nmi_reason(); - -+#if defined(CONFIG_SMP) && defined(CONFIG_KDB) -+ /* -+ * Call the kernel debugger to see if this NMI is due -+ * to an KDB requested IPI. If so, kdb will handle it. -+ */ -+ if (kdb_ipi(regs, NULL)) { -+ return; -+ } -+#endif /* defined(CONFIG_SMP) && defined(CONFIG_KDB) */ -+ - if (!(reason & 0xc0)) { - if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) - == NOTIFY_STOP) -@@ -460,6 +478,10 @@ void restart_nmi(void) - /* May run on IST stack. */ - dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) - { -+#ifdef CONFIG_KDB -+ if (kdb(KDB_REASON_BREAK, error_code, regs)) -+ return; -+#endif - #ifdef CONFIG_KPROBES - if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) - == NOTIFY_STOP) -@@ -552,6 +574,11 @@ dotraplinkage void __kprobes do_debug(st - /* Store the virtualized DR6 value */ - tsk->thread.debugreg6 = dr6; - -+#ifdef CONFIG_KDB -+ if (kdb(KDB_REASON_DEBUG, error_code, regs)) -+ return; -+#endif /* CONFIG_KDB */ -+ - if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, - SIGTRAP) == NOTIFY_STOP) - return; diff --git a/patches.suse/kdb-x86-build-fixes b/patches.suse/kdb-x86-build-fixes deleted file mode 100644 index 216b695..0000000 --- a/patches.suse/kdb-x86-build-fixes +++ /dev/null @@ -1,19 +0,0 @@ -From: Jeff Mahoney -Subject: kdb: Use $srctree not $TOPDIR in Makefile -Patch-mainline: not yet, whenever KDB is upstream - - $TOPDIR doesn't work when building out of tree. Use $srctree instead. - -Signed-off-by: Jeff Mahoney ---- - arch/x86/kdb/Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/x86/kdb/Makefile -+++ b/arch/x86/kdb/Makefile -@@ -26,4 +26,4 @@ CFLAGS_kdba_bt.o += -DREGPARM=$(REGPARM) - - override CFLAGS := $(CFLAGS:%-pg=% ) - --CFLAGS_kdba_io.o += -I $(TOPDIR)/arch/$(SRCARCH)/kdb -+CFLAGS_kdba_io.o += -I $(srctree)/arch/$(SRCARCH)/kdb diff --git a/patches.suse/kdb_dont_touch_i8042_early.patch b/patches.suse/kdb_dont_touch_i8042_early.patch deleted file mode 100644 index acf9789..0000000 --- a/patches.suse/kdb_dont_touch_i8042_early.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Thomas Renninger -Subject: Avoid early hang when i8042 controller is missing -Patch-Mainline: no -References: bnc#528811 - -On latest machines without i8042 controller LED control -will hang when BIOS option "PORT 60/64 emulation" is enabled -(which is required for specific extra storage BIOS to work). - -Workaround for now: Don't touch LEDs at all. - ---- - arch/x86/kdb/kdba_io.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/x86/kdb/kdba_io.c -+++ b/arch/x86/kdb/kdba_io.c -@@ -23,7 +23,7 @@ - #include "pc_keyb.h" - - #ifdef CONFIG_VT_CONSOLE --#define KDB_BLINK_LED 1 -+#undef KDB_BLINK_LED - #else - #undef KDB_BLINK_LED - #endif diff --git a/patches.suse/kdb_fix_ia64_build.patch b/patches.suse/kdb_fix_ia64_build.patch deleted file mode 100644 index 9860077..0000000 --- a/patches.suse/kdb_fix_ia64_build.patch +++ /dev/null @@ -1,19 +0,0 @@ -From: Thomas Renninger -Subject: Fix ia64 - Export kdb_usb_kbds -Patch-Mainline: no -References: none - ---- - arch/ia64/kdb/kdba_io.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/ia64/kdb/kdba_io.c -+++ b/arch/ia64/kdb/kdba_io.c -@@ -43,6 +43,7 @@ - /* support up to 8 USB keyboards (probably excessive, but...) */ - #define KDB_USB_NUM_KEYBOARDS 8 - struct kdb_usb_kbd_info kdb_usb_kbds[KDB_USB_NUM_KEYBOARDS]; -+EXPORT_SYMBOL(kdb_usb_kbds); - - extern int kdb_no_usb; - diff --git a/patches.suse/linux-2.6.29-dont-wait-for-mouse.patch b/patches.suse/linux-2.6.29-dont-wait-for-mouse.patch index 533ab9e..c5fd2b4 100644 --- a/patches.suse/linux-2.6.29-dont-wait-for-mouse.patch +++ b/patches.suse/linux-2.6.29-dont-wait-for-mouse.patch @@ -27,7 +27,7 @@ Signed-off-by: Greg Kroah-Hartman --- a/init/do_mounts.c +++ b/init/do_mounts.c -@@ -372,6 +372,7 @@ void __init prepare_namespace(void) +@@ -373,6 +373,7 @@ void __init prepare_namespace(void) ssleep(root_delay); } @@ -35,7 +35,7 @@ Signed-off-by: Greg Kroah-Hartman /* * wait for the known devices to complete their probing * -@@ -380,6 +381,8 @@ void __init prepare_namespace(void) +@@ -381,6 +382,8 @@ void __init prepare_namespace(void) * for the touchpad of a laptop to initialize. */ wait_for_device_probe(); diff --git a/patches.suse/linux-2.6.29-enable-async-by-default.patch b/patches.suse/linux-2.6.29-enable-async-by-default.patch deleted file mode 100644 index 76d14b6..0000000 --- a/patches.suse/linux-2.6.29-enable-async-by-default.patch +++ /dev/null @@ -1,22 +0,0 @@ -From: Arjan van de Ven -Subject: enable async_enabled by default -Patch-mainline: not yet - - -Signed-off-by: Greg Kroah-Hartman - ---- - kernel/async.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/async.c -+++ b/kernel/async.c -@@ -67,7 +67,7 @@ static LIST_HEAD(async_pending); - static LIST_HEAD(async_running); - static DEFINE_SPINLOCK(async_lock); - --static int async_enabled = 0; -+static int async_enabled = 1; - - struct async_entry { - struct list_head list; diff --git a/patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch b/patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch index 824491e..c773be5 100644 --- a/patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch +++ b/patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch @@ -15,7 +15,7 @@ Signed-off-by: Greg Kroah-Hartman --- a/include/linux/jbd.h +++ b/include/linux/jbd.h -@@ -46,7 +46,7 @@ +@@ -47,7 +47,7 @@ /* * The default maximum commit age, in seconds. */ diff --git a/patches.suse/linux-2.6.29-kms-after-sata.patch b/patches.suse/linux-2.6.29-kms-after-sata.patch index 17cab4b..31db509 100644 --- a/patches.suse/linux-2.6.29-kms-after-sata.patch +++ b/patches.suse/linux-2.6.29-kms-after-sata.patch @@ -14,8 +14,8 @@ Signed-off-by: Greg Kroah-Hartman --- a/drivers/Makefile +++ b/drivers/Makefile -@@ -26,15 +26,8 @@ obj-$(CONFIG_REGULATOR) += regulator/ - # default. +@@ -28,15 +28,8 @@ obj-$(CONFIG_REGULATOR) += regulator/ + obj-y += tty/ obj-y += char/ -# gpu/ comes after char for AGP vs DRM startup @@ -27,10 +27,10 @@ Signed-off-by: Greg Kroah-Hartman -obj-$(CONFIG_FB_I810) += video/i810/ -obj-$(CONFIG_FB_INTEL) += video/intelfb/ - - obj-y += serial/ obj-$(CONFIG_PARPORT) += parport/ - obj-y += base/ block/ misc/ mfd/ -@@ -46,6 +39,13 @@ obj-$(CONFIG_ATA) += ata/ + obj-y += base/ block/ misc/ mfd/ nfc/ + obj-$(CONFIG_NUBUS) += nubus/ +@@ -48,6 +41,13 @@ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_MTD) += mtd/ obj-$(CONFIG_SPI) += spi/ obj-y += net/ @@ -43,4 +43,4 @@ Signed-off-by: Greg Kroah-Hartman + obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_FUSION) += message/ - obj-$(CONFIG_FIREWIRE) += firewire/ + obj-y += firewire/ diff --git a/patches.suse/linux-2.6.29-silence-acer-message.patch b/patches.suse/linux-2.6.29-silence-acer-message.patch deleted file mode 100644 index b151702..0000000 --- a/patches.suse/linux-2.6.29-silence-acer-message.patch +++ /dev/null @@ -1,24 +0,0 @@ -From: Arjan van de Ven -Subject: Silence acer wmi driver on non-acer machines -Date: Fri, 23 Jan 2009 -Patch-mainline: Not yet - -Small fix changing error msg to info msg in acer wmi driver - -Signed-off-by: Greg Kroah-Hartman ---- ---- - drivers/platform/x86/acer-wmi.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/platform/x86/acer-wmi.c -+++ b/drivers/platform/x86/acer-wmi.c -@@ -1313,7 +1313,7 @@ static int __init acer_wmi_init(void) - AMW0_find_mailled(); - - if (!interface) { -- printk(ACER_ERR "No or unsupported WMI interface, unable to " -+ printk(ACER_INFO "No or unsupported WMI interface, unable to " - "load\n"); - return -ENODEV; - } diff --git a/patches.suse/linux-2.6.29-touchkit.patch b/patches.suse/linux-2.6.29-touchkit.patch index 773bb1d..52c5d87 100644 --- a/patches.suse/linux-2.6.29-touchkit.patch +++ b/patches.suse/linux-2.6.29-touchkit.patch @@ -14,7 +14,7 @@ Signed-off-by: Greg Kroah-Hartman --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c -@@ -703,6 +703,9 @@ static int psmouse_extensions(struct psm +@@ -708,6 +708,9 @@ static int psmouse_extensions(struct psm if (touchkit_ps2_detect(psmouse, set_properties) == 0) return PSMOUSE_TOUCHKIT_PS2; @@ -24,7 +24,7 @@ Signed-off-by: Greg Kroah-Hartman } /* -@@ -828,6 +831,12 @@ static const struct psmouse_protocol psm +@@ -836,6 +839,12 @@ static const struct psmouse_protocol psm .alias = "trackpoint", .detect = trackpoint_detect, }, @@ -39,7 +39,7 @@ Signed-off-by: Greg Kroah-Hartman { --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h -@@ -89,6 +89,7 @@ enum psmouse_type { +@@ -90,6 +90,7 @@ enum psmouse_type { PSMOUSE_TRACKPOINT, PSMOUSE_TOUCHKIT_PS2, PSMOUSE_CORTRON, @@ -49,7 +49,7 @@ Signed-off-by: Greg Kroah-Hartman PSMOUSE_FSP, --- a/drivers/input/mouse/touchkit_ps2.c +++ b/drivers/input/mouse/touchkit_ps2.c -@@ -51,6 +51,11 @@ +@@ -50,6 +50,11 @@ #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2]) #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4]) @@ -61,7 +61,7 @@ Signed-off-by: Greg Kroah-Hartman static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse) { unsigned char *packet = psmouse->packet; -@@ -59,9 +64,15 @@ static psmouse_ret_t touchkit_ps2_proces +@@ -58,9 +63,15 @@ static psmouse_ret_t touchkit_ps2_proces if (psmouse->pktcnt != 5) return PSMOUSE_GOOD_DATA; @@ -79,7 +79,7 @@ Signed-off-by: Greg Kroah-Hartman input_sync(dev); return PSMOUSE_FULL_PACKET; -@@ -99,3 +110,33 @@ int touchkit_ps2_detect(struct psmouse * +@@ -98,3 +109,33 @@ int touchkit_ps2_detect(struct psmouse * return 0; } diff --git a/patches.suse/mm-devzero-optimisation.patch b/patches.suse/mm-devzero-optimisation.patch index cf4f58c..53ed0fa 100644 --- a/patches.suse/mm-devzero-optimisation.patch +++ b/patches.suse/mm-devzero-optimisation.patch @@ -24,7 +24,7 @@ Signed-off-by: Nick Piggin --- a/drivers/char/mem.c +++ b/drivers/char/mem.c -@@ -637,6 +637,100 @@ static ssize_t splice_write_null(struct +@@ -639,6 +639,100 @@ static ssize_t splice_write_null(struct return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); } @@ -125,7 +125,7 @@ Signed-off-by: Nick Piggin static ssize_t read_zero(struct file *file, char __user *buf, size_t count, loff_t *ppos) { -@@ -667,15 +761,24 @@ static ssize_t read_zero(struct file *fi +@@ -669,15 +763,24 @@ static ssize_t read_zero(struct file *fi } return written ? written : -EFAULT; } @@ -153,7 +153,7 @@ Signed-off-by: Nick Piggin static ssize_t write_full(struct file *file, const char __user *buf, --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -795,6 +795,8 @@ void free_pgd_range(struct mmu_gather *t +@@ -799,6 +799,8 @@ void free_pgd_range(struct mmu_gather *t unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); @@ -164,7 +164,7 @@ Signed-off-by: Nick Piggin int follow_pfn(struct vm_area_struct *vma, unsigned long address, --- a/mm/memory.c +++ b/mm/memory.c -@@ -1572,6 +1572,93 @@ struct page *get_dump_page(unsigned long +@@ -1590,6 +1590,93 @@ struct page *get_dump_page(unsigned long } #endif /* CONFIG_ELF_CORE */ @@ -255,6 +255,6 @@ Signed-off-by: Nick Piggin + return err; +} + - pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, + pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { diff --git a/patches.suse/mm-tune-dirty-limits.patch b/patches.suse/mm-tune-dirty-limits.patch index d0342bf..7e5fbc9 100644 --- a/patches.suse/mm-tune-dirty-limits.patch +++ b/patches.suse/mm-tune-dirty-limits.patch @@ -27,13 +27,12 @@ Acked-by: Jeff Mahoney --- a/init/Kconfig +++ b/init/Kconfig -@@ -32,6 +32,13 @@ config SPLIT_PACKAGE +@@ -32,6 +32,12 @@ config SPLIT_PACKAGE If you aren't packaging a kernel for distribution, it's safe to say n. +config KERNEL_DESKTOP + bool "Kernel to suit desktop workloads" -+ default n + help + This is an option used to tune kernel parameters to better suit + desktop workloads. @@ -41,7 +40,7 @@ Acked-by: Jeff Mahoney config ARCH string option env="ARCH" -@@ -1131,6 +1138,23 @@ config MMAP_ALLOW_UNINITIALIZED +@@ -1169,6 +1176,23 @@ config MMAP_ALLOW_UNINITIALIZED See Documentation/nommu-mmap.txt for more information. @@ -67,7 +66,7 @@ Acked-by: Jeff Mahoney help --- a/mm/page-writeback.c +++ b/mm/page-writeback.c -@@ -77,7 +77,7 @@ int vm_highmem_is_dirtyable; +@@ -78,7 +78,7 @@ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ diff --git a/patches.suse/mpath-fix b/patches.suse/mpath-fix new file mode 100644 index 0000000..d3f55c2 --- /dev/null +++ b/patches.suse/mpath-fix @@ -0,0 +1,50 @@ +From: Jeff Mahoney +Subject: dm-mpath: pgpath->path.pdev -> pgpath->path.dev->name +Patch-mainline: Dependent on local patches + + 2.6.38-rc1 moved the device name from path.pdev to pgpath->path.dev->name. + + This patch addresses that. + +Signed-off-by: Jeff Mahoney +--- + drivers/md/dm-mpath.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -165,7 +165,6 @@ static struct priority_group *alloc_prio + static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) + { + struct pgpath *pgpath, *tmp; +- struct multipath *m = ti->private; + + list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { + list_del(&pgpath->list); +@@ -1201,7 +1200,7 @@ static void pg_init_done(void *data, int + break; + } + DMERR("Count not failover device %s: Handler scsi_dh_%s " +- "was not loaded.", pgpath->path.pdev, ++ "was not loaded.", pgpath->path.dev->name, + m->hw_handler_name); + /* + * Fail path for now, so we do not ping pong +@@ -1216,7 +1215,7 @@ static void pg_init_done(void *data, int + bypass_pg(m, pg, 1); + break; + case SCSI_DH_DEV_OFFLINED: +- DMWARN("Device %s offlined.", pgpath->path.pdev); ++ DMWARN("Device %s offlined.", pgpath->path.dev->name); + errors = 0; + break; + case SCSI_DH_RETRY: +@@ -1241,7 +1240,7 @@ static void pg_init_done(void *data, int + if (errors) { + if (pgpath == m->current_pgpath) { + DMERR("Could not failover device %s, error %d.", +- pgpath->path.pdev, errors); ++ pgpath->path.dev->name, errors); + m->current_pgpath = NULL; + m->current_pg = NULL; + } diff --git a/patches.suse/nameif-track-rename.patch b/patches.suse/nameif-track-rename.patch index da5ece4..90dcf64 100644 --- a/patches.suse/nameif-track-rename.patch +++ b/patches.suse/nameif-track-rename.patch @@ -37,16 +37,16 @@ Signed-off-by: Olaf Hering --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -945,8 +945,12 @@ static int dev_get_valid_name(struct net - return __dev_alloc_name(net, name, buf); +@@ -962,8 +962,12 @@ static int dev_get_valid_name(struct net + return dev_alloc_name(dev, name); else if (__dev_get_by_name(net, name)) return -EEXIST; -- else if (buf != name) -+ else if (buf != name) { -+ if (strncmp(name, buf, IFNAMSIZ)) +- else if (dev->name != name) ++ else if (dev->name != name) { ++ if (strncmp(name, dev->name, IFNAMSIZ)) + printk(KERN_INFO "%s renamed to %s by %s [%u]\n", -+ buf, name, current->comm, current->pid); - strlcpy(buf, name, IFNAMSIZ); ++ dev->name, name, current->comm, current->pid); + strlcpy(dev->name, name, IFNAMSIZ); + } return 0; diff --git a/patches.suse/netfilter-ip_conntrack_slp.patch b/patches.suse/netfilter-ip_conntrack_slp.patch index 97ae2e6..e7cf5f5 100644 --- a/patches.suse/netfilter-ip_conntrack_slp.patch +++ b/patches.suse/netfilter-ip_conntrack_slp.patch @@ -17,7 +17,7 @@ Signed-off-by: Jiri Bohac --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -281,6 +281,21 @@ config NF_CONNTRACK_TFTP +@@ -260,6 +260,21 @@ config NF_CONNTRACK_TFTP To compile it as a module, choose M here. If unsure, say N. @@ -115,7 +115,7 @@ Signed-off-by: Jiri Bohac + goto out; + + rcu_read_lock(); -+ in_dev = __in_dev_get_rcu(rt->u.dst.dev); ++ in_dev = __in_dev_get_rcu(rt->dst.dev); + if (in_dev != NULL) { + for_primary_ifa(in_dev) { + if (ifa->ifa_broadcast == iph->daddr) { diff --git a/patches.suse/netfilter-ipt_LOG-mac b/patches.suse/netfilter-ipt_LOG-mac deleted file mode 100644 index 010fd96..0000000 --- a/patches.suse/netfilter-ipt_LOG-mac +++ /dev/null @@ -1,33 +0,0 @@ -From: Jaroslav Kysela -Subject: LTC23987-iptables LOG output shows too long MAC info -References: 176921 -Patch-mainline: not yet - -LTC23987-iptables LOG output shows too long MAC info for qeth VLAN interface - -Signed-off-by: Jaroslav Kysela - ---- - net/ipv4/netfilter/ipt_LOG.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - ---- a/net/ipv4/netfilter/ipt_LOG.c -+++ b/net/ipv4/netfilter/ipt_LOG.c -@@ -409,12 +409,12 @@ ipt_log_packet(u_int8_t pf, - printk("MAC="); - if (skb->dev && skb->dev->hard_header_len && - skb->mac_header != skb->network_header) { -- int i; -+ int i, len; - const unsigned char *p = skb_mac_header(skb); -- for (i = 0; i < skb->dev->hard_header_len; i++,p++) -- printk("%02x%c", *p, -- i==skb->dev->hard_header_len - 1 -- ? ' ':':'); -+ len = (int)(skb_network_header(skb) - p); -+ len = min((int)skb->dev->hard_header_len, len); -+ for (i = 0; i < len; i++,p++) -+ printk("%02x%c", *p, i==len - 1 ? ' ':':'); - } else - printk(" "); - } diff --git a/patches.suse/nfs4acl-ai.diff b/patches.suse/nfs4acl-ai.diff deleted file mode 100644 index e03be77..0000000 --- a/patches.suse/nfs4acl-ai.diff +++ /dev/null @@ -1,124 +0,0 @@ -From: Andreas Gruenbacher -Subject: Implement those parts of Automatic Inheritance (AI) which are safe under POSIX -Patch-mainline: not yet - -If AI is disabled for a directory (ACL4_AUTO_INHERIT -not set), nothing changes. If AI is enabled for a directory, the -create-time inheritance algorithm changes as follows: - -* All inherited ACEs will have the ACE4_INHERITED_ACE flag set. - -* The create mode is applied to the ACL (by setting the file masks), -which means that the ACL must no longer be subject to AI permission -propagation, and so the ACL4_PROTECTED is set. - -By itelf, this is relatively useless because it will not allow -permissions to propagate, but AI aware applications can clear the -ACL4_PROTECTED flag when they know what they are doing, and this will -enable AI permission propagation. - -It would be nice if AI aware applications could indicate this fact to -the kernel so that the kernel can avoid setting the ACL4_PROTECTED flag -in the first place, but there is no such user-space interface at this -point. - -Signed-off-by: Andreas Gruenbacher - ---- - fs/nfs4acl_base.c | 12 ++++++++++-- - include/linux/nfs4acl.h | 26 +++++++++++++++++++++++--- - 2 files changed, 33 insertions(+), 5 deletions(-) - ---- a/fs/nfs4acl_base.c -+++ b/fs/nfs4acl_base.c -@@ -152,7 +152,8 @@ nfs4acl_chmod(struct nfs4acl *acl, mode_ - - if (acl->a_owner_mask == owner_mask && - acl->a_group_mask == group_mask && -- acl->a_other_mask == other_mask) -+ acl->a_other_mask == other_mask && -+ (!nfs4acl_is_auto_inherit(acl) || nfs4acl_is_protected(acl))) - return acl; - - clone = nfs4acl_clone(acl); -@@ -163,6 +164,8 @@ nfs4acl_chmod(struct nfs4acl *acl, mode_ - clone->a_owner_mask = owner_mask; - clone->a_group_mask = group_mask; - clone->a_other_mask = other_mask; -+ if (nfs4acl_is_auto_inherit(clone)) -+ clone->a_flags |= ACL4_PROTECTED; - - if (nfs4acl_write_through(&clone)) { - nfs4acl_put(clone); -@@ -559,7 +562,12 @@ nfs4acl_inherit(const struct nfs4acl *di - return ERR_PTR(-ENOMEM); - } - -- acl->a_flags = (dir_acl->a_flags & ACL4_WRITE_THROUGH); -+ acl->a_flags = (dir_acl->a_flags & ~ACL4_PROTECTED); -+ if (nfs4acl_is_auto_inherit(acl)) { -+ nfs4acl_for_each_entry(ace, acl) -+ ace->e_flags |= ACE4_INHERITED_ACE; -+ acl->a_flags |= ACL4_PROTECTED; -+ } - - return acl; - } ---- a/include/linux/nfs4acl.h -+++ b/include/linux/nfs4acl.h -@@ -32,10 +32,16 @@ struct nfs4acl { - _ace--) - - /* a_flags values */ -+#define ACL4_AUTO_INHERIT 0x01 -+#define ACL4_PROTECTED 0x02 -+#define ACL4_DEFAULTED 0x04 - #define ACL4_WRITE_THROUGH 0x40 - --#define ACL4_VALID_FLAGS \ -- ACL4_WRITE_THROUGH -+#define ACL4_VALID_FLAGS ( \ -+ ACL4_AUTO_INHERIT | \ -+ ACL4_PROTECTED | \ -+ ACL4_DEFAULTED | \ -+ ACL4_WRITE_THROUGH ) - - /* e_type values */ - #define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000 -@@ -51,6 +57,7 @@ struct nfs4acl { - /*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/ - /*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/ - #define ACE4_IDENTIFIER_GROUP 0x0040 -+#define ACE4_INHERITED_ACE 0x0080 - #define ACE4_SPECIAL_WHO 0x4000 /* in-memory representation only */ - - #define ACE4_VALID_FLAGS ( \ -@@ -58,7 +65,8 @@ struct nfs4acl { - ACE4_DIRECTORY_INHERIT_ACE | \ - ACE4_NO_PROPAGATE_INHERIT_ACE | \ - ACE4_INHERIT_ONLY_ACE | \ -- ACE4_IDENTIFIER_GROUP ) -+ ACE4_IDENTIFIER_GROUP | \ -+ ACE4_INHERITED_ACE ) - - /* e_mask bitflags */ - #define ACE4_READ_DATA 0x00000001 -@@ -128,6 +136,18 @@ extern const char nfs4ace_group_who[]; - extern const char nfs4ace_everyone_who[]; - - static inline int -+nfs4acl_is_auto_inherit(const struct nfs4acl *acl) -+{ -+ return acl->a_flags & ACL4_AUTO_INHERIT; -+} -+ -+static inline int -+nfs4acl_is_protected(const struct nfs4acl *acl) -+{ -+ return acl->a_flags & ACL4_PROTECTED; -+} -+ -+static inline int - nfs4ace_is_owner(const struct nfs4ace *ace) - { - return (ace->e_flags & ACE4_SPECIAL_WHO) && diff --git a/patches.suse/nfs4acl-common.diff b/patches.suse/nfs4acl-common.diff deleted file mode 100644 index 839f700..0000000 --- a/patches.suse/nfs4acl-common.diff +++ /dev/null @@ -1,1774 +0,0 @@ -From: Andreas Gruenbacher -Subject: NFSv4 ACL in-memory representation and manipulation -Patch-mainline: not yet - -* In-memory representation (struct nfs4acl). -* Functionality a filesystem needs such as permission checking, - apply mode to acl, compute mode from acl, inheritance upon file - create. -* Compute a mask-less acl from struct nfs4acl that grants the same - permissions. Protocols which don't understand the masks need - this. -* Convert to/from xattrs. - -Signed-off-by: Andreas Gruenbacher - ---- - fs/Kconfig | 4 - fs/Makefile | 4 - fs/nfs4acl_base.c | 566 +++++++++++++++++++++++++++++++ - fs/nfs4acl_compat.c | 757 ++++++++++++++++++++++++++++++++++++++++++ - fs/nfs4acl_xattr.c | 146 ++++++++ - include/linux/nfs4acl.h | 205 +++++++++++ - include/linux/nfs4acl_xattr.h | 32 + - 7 files changed, 1714 insertions(+) - ---- a/fs/Kconfig -+++ b/fs/Kconfig -@@ -39,6 +39,10 @@ config FS_POSIX_ACL - bool - default n - -+config FS_NFS4ACL -+ bool -+ default n -+ - source "fs/xfs/Kconfig" - source "fs/gfs2/Kconfig" - source "fs/ocfs2/Kconfig" ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -51,6 +51,10 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl. - obj-$(CONFIG_NFS_COMMON) += nfs_common/ - obj-$(CONFIG_GENERIC_ACL) += generic_acl.o - -+obj-$(CONFIG_FS_NFS4ACL) += nfs4acl.o -+nfs4acl-y := nfs4acl_base.o nfs4acl_xattr.o \ -+ nfs4acl_compat.o -+ - obj-y += quota/ - - obj-$(CONFIG_DMAPI) += dmapi/ ---- /dev/null -+++ b/fs/nfs4acl_base.c -@@ -0,0 +1,567 @@ -+/* -+ * Copyright (C) 2006 Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_LICENSE("GPL"); -+ -+/* -+ * ACL entries that have ACE4_SPECIAL_WHO set in ace->e_flags use the -+ * pointer values of these constants in ace->u.e_who to avoid massive -+ * amounts of string comparisons. -+ */ -+ -+const char nfs4ace_owner_who[] = "OWNER@"; -+const char nfs4ace_group_who[] = "GROUP@"; -+const char nfs4ace_everyone_who[] = "EVERYONE@"; -+ -+EXPORT_SYMBOL(nfs4ace_owner_who); -+EXPORT_SYMBOL(nfs4ace_group_who); -+EXPORT_SYMBOL(nfs4ace_everyone_who); -+ -+/** -+ * nfs4acl_alloc - allocate an acl -+ * @count: number of entries -+ */ -+struct nfs4acl * -+nfs4acl_alloc(int count) -+{ -+ size_t size = sizeof(struct nfs4acl) + count * sizeof(struct nfs4ace); -+ struct nfs4acl *acl = kmalloc(size, GFP_KERNEL); -+ -+ if (acl) { -+ memset(acl, 0, size); -+ atomic_set(&acl->a_refcount, 1); -+ acl->a_count = count; -+ } -+ return acl; -+} -+EXPORT_SYMBOL(nfs4acl_alloc); -+ -+/** -+ * nfs4acl_clone - create a copy of an acl -+ */ -+struct nfs4acl * -+nfs4acl_clone(const struct nfs4acl *acl) -+{ -+ int count = acl->a_count; -+ size_t size = sizeof(struct nfs4acl) + count * sizeof(struct nfs4ace); -+ struct nfs4acl *dup = kmalloc(size, GFP_KERNEL); -+ -+ if (dup) { -+ memcpy(dup, acl, size); -+ atomic_set(&dup->a_refcount, 1); -+ } -+ return dup; -+} -+ -+/* -+ * The POSIX permissions are supersets of the below mask flags. -+ * -+ * The ACE4_READ_ATTRIBUTES and ACE4_READ_ACL flags are always granted -+ * in POSIX. The ACE4_SYNCHRONIZE flag has no meaning under POSIX. We -+ * make sure that we do not mask them if they are set, so that users who -+ * rely on these flags won't get confused. -+ */ -+#define ACE4_POSIX_MODE_READ ( \ -+ ACE4_READ_DATA | ACE4_LIST_DIRECTORY ) -+#define ACE4_POSIX_MODE_WRITE ( \ -+ ACE4_WRITE_DATA | ACE4_ADD_FILE | \ -+ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \ -+ ACE4_DELETE_CHILD ) -+#define ACE4_POSIX_MODE_EXEC ( \ -+ ACE4_EXECUTE) -+ -+static int -+nfs4acl_mask_to_mode(unsigned int mask) -+{ -+ int mode = 0; -+ -+ if (mask & ACE4_POSIX_MODE_READ) -+ mode |= MAY_READ; -+ if (mask & ACE4_POSIX_MODE_WRITE) -+ mode |= MAY_WRITE; -+ if (mask & ACE4_POSIX_MODE_EXEC) -+ mode |= MAY_EXEC; -+ -+ return mode; -+} -+ -+/** -+ * nfs4acl_masks_to_mode - compute file mode permission bits from file masks -+ * -+ * Compute the file mode permission bits from the file masks in the acl. -+ */ -+int -+nfs4acl_masks_to_mode(const struct nfs4acl *acl) -+{ -+ return nfs4acl_mask_to_mode(acl->a_owner_mask) << 6 | -+ nfs4acl_mask_to_mode(acl->a_group_mask) << 3 | -+ nfs4acl_mask_to_mode(acl->a_other_mask); -+} -+EXPORT_SYMBOL(nfs4acl_masks_to_mode); -+ -+static unsigned int -+nfs4acl_mode_to_mask(mode_t mode) -+{ -+ unsigned int mask = ACE4_POSIX_ALWAYS_ALLOWED; -+ -+ if (mode & MAY_READ) -+ mask |= ACE4_POSIX_MODE_READ; -+ if (mode & MAY_WRITE) -+ mask |= ACE4_POSIX_MODE_WRITE; -+ if (mode & MAY_EXEC) -+ mask |= ACE4_POSIX_MODE_EXEC; -+ -+ return mask; -+} -+ -+/** -+ * nfs4acl_chmod - update the file masks to reflect the new mode -+ * @mode: file mode permission bits to apply to the @acl -+ * -+ * Converts the mask flags corresponding to the owner, group, and other file -+ * permissions and computes the file masks. Returns @acl if it already has the -+ * appropriate file masks, or updates the flags in a copy of @acl. Takes over -+ * @acl. -+ */ -+struct nfs4acl * -+nfs4acl_chmod(struct nfs4acl *acl, mode_t mode) -+{ -+ unsigned int owner_mask, group_mask, other_mask; -+ struct nfs4acl *clone; -+ -+ owner_mask = nfs4acl_mode_to_mask(mode >> 6); -+ group_mask = nfs4acl_mode_to_mask(mode >> 3); -+ other_mask = nfs4acl_mode_to_mask(mode); -+ -+ if (acl->a_owner_mask == owner_mask && -+ acl->a_group_mask == group_mask && -+ acl->a_other_mask == other_mask) -+ return acl; -+ -+ clone = nfs4acl_clone(acl); -+ nfs4acl_put(acl); -+ if (!clone) -+ return ERR_PTR(-ENOMEM); -+ -+ clone->a_owner_mask = owner_mask; -+ clone->a_group_mask = group_mask; -+ clone->a_other_mask = other_mask; -+ -+ if (nfs4acl_write_through(&clone)) { -+ nfs4acl_put(clone); -+ clone = ERR_PTR(-ENOMEM); -+ } -+ return clone; -+} -+EXPORT_SYMBOL(nfs4acl_chmod); -+ -+/** -+ * nfs4acl_want_to_mask - convert permission want argument to a mask -+ * @want: @want argument of the permission inode operation -+ * -+ * When checking for append, @want is (MAY_WRITE | MAY_APPEND). -+ */ -+unsigned int -+nfs4acl_want_to_mask(int want) -+{ -+ unsigned int mask = 0; -+ -+ if (want & MAY_READ) -+ mask |= ACE4_READ_DATA; -+ if (want & MAY_APPEND) -+ mask |= ACE4_APPEND_DATA; -+ else if (want & MAY_WRITE) -+ mask |= ACE4_WRITE_DATA; -+ if (want & MAY_EXEC) -+ mask |= ACE4_EXECUTE; -+ -+ return mask; -+} -+EXPORT_SYMBOL(nfs4acl_want_to_mask); -+ -+/** -+ * nfs4acl_capability_check - check for capabilities overriding read/write access -+ * @inode: inode to check -+ * @mask: requested access (ACE4_* bitmask) -+ * -+ * Capabilities other than CAP_DAC_OVERRIDE and CAP_DAC_READ_SEARCH must be checked -+ * separately. -+ */ -+static inline int nfs4acl_capability_check(struct inode *inode, unsigned int mask) -+{ -+ /* -+ * Read/write DACs are always overridable. -+ * Executable DACs are overridable if at least one exec bit is set. -+ */ -+ if (!(mask & (ACE4_WRITE_ACL | ACE4_WRITE_OWNER)) && -+ (!(mask & ACE4_EXECUTE) || -+ (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))) -+ if (capable(CAP_DAC_OVERRIDE)) -+ return 0; -+ -+ /* -+ * Searching includes executable on directories, else just read. -+ */ -+ if (!(mask & ~(ACE4_READ_DATA | ACE4_EXECUTE)) && -+ (S_ISDIR(inode->i_mode) || !(mask & ACE4_EXECUTE))) -+ if (capable(CAP_DAC_READ_SEARCH)) -+ return 0; -+ -+ return -EACCES; -+} -+ -+/** -+ * nfs4acl_permission - permission check algorithm with masking -+ * @inode: inode to check -+ * @acl: nfs4 acl of the inode -+ * @mask: requested access (ACE4_* bitmask) -+ * -+ * Checks if the current process is granted @mask flags in @acl. With -+ * write-through, the OWNER@ is always granted the owner file mask, the -+ * GROUP@ is always granted the group file mask, and EVERYONE@ is always -+ * granted the other file mask. Otherwise, processes are only granted -+ * @mask flags which they are granted in the @acl as well as in their -+ * file mask. -+ */ -+int nfs4acl_permission(struct inode *inode, const struct nfs4acl *acl, -+ unsigned int mask) -+{ -+ const struct nfs4ace *ace; -+ unsigned int file_mask, requested = mask, denied = 0; -+ int in_owning_group = in_group_p(inode->i_gid); -+ int owner_or_group_class = in_owning_group; -+ -+ /* -+ * A process is in the -+ * - owner file class if it owns the file, in the -+ * - group file class if it is in the file's owning group or -+ * it matches any of the user or group entries, and in the -+ * - other file class otherwise. -+ */ -+ -+ nfs4acl_for_each_entry(ace, acl) { -+ unsigned int ace_mask = ace->e_mask; -+ -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_owner(ace)) { -+ if (current_fsuid() != inode->i_uid) -+ continue; -+ goto is_owner; -+ } else if (nfs4ace_is_group(ace)) { -+ if (!in_owning_group) -+ continue; -+ } else if (nfs4ace_is_unix_id(ace)) { -+ if (ace->e_flags & ACE4_IDENTIFIER_GROUP) { -+ if (!in_group_p(ace->u.e_id)) -+ continue; -+ } else { -+ if (current_fsuid() != ace->u.e_id) -+ continue; -+ } -+ } else -+ goto is_everyone; -+ -+ /* -+ * Apply the group file mask to entries other than OWNER@ and -+ * EVERYONE@. This is not required for correct access checking -+ * but ensures that we grant the same permissions as the acl -+ * computed by nfs4acl_apply_masks(). -+ * -+ * For example, without this restriction, 'group@:rw::allow' -+ * with mode 0600 would grant rw access to owner processes -+ * which are also in the owning group. This cannot be expressed -+ * in an acl. -+ */ -+ if (nfs4ace_is_allow(ace)) -+ ace_mask &= acl->a_group_mask; -+ -+ is_owner: -+ /* The process is in the owner or group file class. */ -+ owner_or_group_class = 1; -+ -+ is_everyone: -+ /* Check which mask flags the ACE allows or denies. */ -+ if (nfs4ace_is_deny(ace)) -+ denied |= ace_mask & mask; -+ mask &= ~ace_mask; -+ -+ /* Keep going until we know which file class the process is in. */ -+ if (!mask && owner_or_group_class) -+ break; -+ } -+ denied |= mask; -+ -+ /* -+ * Figure out which file mask applies. -+ * Clear write-through if the process is in the file group class but -+ * not in the owning group, and so the denied permissions apply. -+ */ -+ if (current_fsuid() == inode->i_uid) -+ file_mask = acl->a_owner_mask; -+ else if (in_owning_group || owner_or_group_class) -+ file_mask = acl->a_group_mask; -+ else -+ file_mask = acl->a_other_mask; -+ -+ denied |= requested & ~file_mask; -+ if (!denied) -+ return 0; -+ return nfs4acl_capability_check(inode, requested); -+} -+EXPORT_SYMBOL(nfs4acl_permission); -+ -+/** -+ * nfs4acl_generic_permission - permission check algorithm without explicit acl -+ * @inode: inode to check permissions for -+ * @mask: requested access (ACE4_* bitmask) -+ * -+ * The file mode of a file without ACL corresponds to an ACL with a single -+ * "EVERYONE:~0::ALLOW" entry, with file masks that correspond to the file mode -+ * permissions. Instead of constructing a temporary ACL and applying -+ * nfs4acl_permission() to it, compute the identical result directly from the file -+ * mode. -+ */ -+int nfs4acl_generic_permission(struct inode *inode, unsigned int mask) -+{ -+ int mode = inode->i_mode; -+ -+ if (current_fsuid() == inode->i_uid) -+ mode >>= 6; -+ else if (in_group_p(inode->i_gid)) -+ mode >>= 3; -+ if (!(mask & ~nfs4acl_mode_to_mask(mode))) -+ return 0; -+ return nfs4acl_capability_check(inode, mask); -+} -+EXPORT_SYMBOL(nfs4acl_generic_permission); -+ -+/* -+ * nfs4ace_is_same_who - do both acl entries refer to the same identifier? -+ */ -+int -+nfs4ace_is_same_who(const struct nfs4ace *a, const struct nfs4ace *b) -+{ -+#define WHO_FLAGS (ACE4_SPECIAL_WHO | ACE4_IDENTIFIER_GROUP) -+ if ((a->e_flags & WHO_FLAGS) != (b->e_flags & WHO_FLAGS)) -+ return 0; -+ if (a->e_flags & ACE4_SPECIAL_WHO) -+ return a->u.e_who == b->u.e_who; -+ else -+ return a->u.e_id == b->u.e_id; -+#undef WHO_FLAGS -+} -+ -+/** -+ * nfs4acl_set_who - set a special who value -+ * @ace: acl entry -+ * @who: who value to use -+ */ -+int -+nfs4ace_set_who(struct nfs4ace *ace, const char *who) -+{ -+ if (!strcmp(who, nfs4ace_owner_who)) -+ who = nfs4ace_owner_who; -+ else if (!strcmp(who, nfs4ace_group_who)) -+ who = nfs4ace_group_who; -+ else if (!strcmp(who, nfs4ace_everyone_who)) -+ who = nfs4ace_everyone_who; -+ else -+ return -EINVAL; -+ -+ ace->u.e_who = who; -+ ace->e_flags |= ACE4_SPECIAL_WHO; -+ ace->e_flags &= ~ACE4_IDENTIFIER_GROUP; -+ return 0; -+} -+EXPORT_SYMBOL(nfs4ace_set_who); -+ -+/** -+ * nfs4acl_allowed_to_who - mask flags allowed to a specific who value -+ * -+ * Computes the mask values allowed to a specific who value, taking -+ * EVERYONE@ entries into account. -+ */ -+static unsigned int -+nfs4acl_allowed_to_who(struct nfs4acl *acl, struct nfs4ace *who) -+{ -+ struct nfs4ace *ace; -+ unsigned int allowed = 0; -+ -+ nfs4acl_for_each_entry_reverse(ace, acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_same_who(ace, who) || -+ nfs4ace_is_everyone(ace)) { -+ if (nfs4ace_is_allow(ace)) -+ allowed |= ace->e_mask; -+ else if (nfs4ace_is_deny(ace)) -+ allowed &= ~ace->e_mask; -+ } -+ } -+ return allowed; -+} -+ -+/** -+ * nfs4acl_compute_max_masks - compute upper bound masks -+ * -+ * Computes upper bound owner, group, and other masks so that none of -+ * the mask flags allowed by the acl are disabled (for any choice of the -+ * file owner or group membership). -+ */ -+static void -+nfs4acl_compute_max_masks(struct nfs4acl *acl) -+{ -+ struct nfs4ace *ace; -+ -+ acl->a_owner_mask = 0; -+ acl->a_group_mask = 0; -+ acl->a_other_mask = 0; -+ -+ nfs4acl_for_each_entry_reverse(ace, acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ -+ if (nfs4ace_is_owner(ace)) { -+ if (nfs4ace_is_allow(ace)) -+ acl->a_owner_mask |= ace->e_mask; -+ else if (nfs4ace_is_deny(ace)) -+ acl->a_owner_mask &= ~ace->e_mask; -+ } else if (nfs4ace_is_everyone(ace)) { -+ if (nfs4ace_is_allow(ace)) { -+ struct nfs4ace who = { -+ .e_flags = ACE4_SPECIAL_WHO, -+ .u.e_who = nfs4ace_group_who, -+ }; -+ -+ acl->a_other_mask |= ace->e_mask; -+ acl->a_group_mask |= -+ nfs4acl_allowed_to_who(acl, &who); -+ acl->a_owner_mask |= ace->e_mask; -+ } else if (nfs4ace_is_deny(ace)) { -+ acl->a_other_mask &= ~ace->e_mask; -+ acl->a_group_mask &= ~ace->e_mask; -+ acl->a_owner_mask &= ~ace->e_mask; -+ } -+ } else { -+ if (nfs4ace_is_allow(ace)) { -+ unsigned int mask = -+ nfs4acl_allowed_to_who(acl, ace); -+ -+ acl->a_group_mask |= mask; -+ acl->a_owner_mask |= mask; -+ } -+ } -+ } -+} -+ -+/** -+ * nfs4acl_inherit - compute the acl a new file will inherit -+ * @dir_acl: acl of the containing direcory -+ * @mode: file type and create mode of the new file -+ * -+ * Given the containing directory's acl, this function will compute the -+ * acl that new files in that directory will inherit, or %NULL if -+ * @dir_acl does not contain acl entries inheritable by this file. -+ * -+ * Without write-through, the file masks in the returned acl are set to -+ * the intersection of the create mode and the maximum permissions -+ * allowed to each file class. With write-through, the file masks are -+ * set to the create mode. -+ */ -+struct nfs4acl * -+nfs4acl_inherit(const struct nfs4acl *dir_acl, mode_t mode) -+{ -+ const struct nfs4ace *dir_ace; -+ struct nfs4acl *acl; -+ struct nfs4ace *ace; -+ int count = 0; -+ -+ if (S_ISDIR(mode)) { -+ nfs4acl_for_each_entry(dir_ace, dir_acl) { -+ if (!nfs4ace_is_inheritable(dir_ace)) -+ continue; -+ count++; -+ } -+ if (!count) -+ return NULL; -+ acl = nfs4acl_alloc(count); -+ if (!acl) -+ return ERR_PTR(-ENOMEM); -+ ace = acl->a_entries; -+ nfs4acl_for_each_entry(dir_ace, dir_acl) { -+ if (!nfs4ace_is_inheritable(dir_ace)) -+ continue; -+ memcpy(ace, dir_ace, sizeof(struct nfs4ace)); -+ if (dir_ace->e_flags & ACE4_NO_PROPAGATE_INHERIT_ACE) -+ nfs4ace_clear_inheritance_flags(ace); -+ if ((dir_ace->e_flags & ACE4_FILE_INHERIT_ACE) && -+ !(dir_ace->e_flags & ACE4_DIRECTORY_INHERIT_ACE)) -+ ace->e_flags |= ACE4_INHERIT_ONLY_ACE; -+ ace++; -+ } -+ } else { -+ nfs4acl_for_each_entry(dir_ace, dir_acl) { -+ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE)) -+ continue; -+ count++; -+ } -+ if (!count) -+ return NULL; -+ acl = nfs4acl_alloc(count); -+ if (!acl) -+ return ERR_PTR(-ENOMEM); -+ ace = acl->a_entries; -+ nfs4acl_for_each_entry(dir_ace, dir_acl) { -+ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE)) -+ continue; -+ memcpy(ace, dir_ace, sizeof(struct nfs4ace)); -+ nfs4ace_clear_inheritance_flags(ace); -+ ace++; -+ } -+ } -+ -+ /* The maximum max flags that the owner, group, and other classes -+ are allowed. */ -+ if (dir_acl->a_flags & ACL4_WRITE_THROUGH) { -+ acl->a_owner_mask = ACE4_VALID_MASK; -+ acl->a_group_mask = ACE4_VALID_MASK; -+ acl->a_other_mask = ACE4_VALID_MASK; -+ -+ mode &= ~current->fs->umask; -+ } else -+ nfs4acl_compute_max_masks(acl); -+ -+ /* Apply the create mode. */ -+ acl->a_owner_mask &= nfs4acl_mode_to_mask(mode >> 6); -+ acl->a_group_mask &= nfs4acl_mode_to_mask(mode >> 3); -+ acl->a_other_mask &= nfs4acl_mode_to_mask(mode); -+ -+ if (nfs4acl_write_through(&acl)) { -+ nfs4acl_put(acl); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ acl->a_flags = (dir_acl->a_flags & ACL4_WRITE_THROUGH); -+ -+ return acl; -+} -+EXPORT_SYMBOL(nfs4acl_inherit); ---- /dev/null -+++ b/fs/nfs4acl_compat.c -@@ -0,0 +1,758 @@ -+/* -+ * Copyright (C) 2006 Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+/** -+ * struct nfs4acl_alloc - remember how many entries are actually allocated -+ * @acl: acl with a_count <= @count -+ * @count: the actual number of entries allocated in @acl -+ * -+ * We pass around this structure while modifying an acl, so that we do -+ * not have to reallocate when we remove existing entries followed by -+ * adding new entries. -+ */ -+struct nfs4acl_alloc { -+ struct nfs4acl *acl; -+ unsigned int count; -+}; -+ -+/** -+ * nfs4acl_delete_entry - delete an entry in an acl -+ * @x: acl and number of allocated entries -+ * @ace: an entry in @x->acl -+ * -+ * Updates @ace so that it points to the entry before the deleted entry -+ * on return. (When deleting the first entry, @ace will point to the -+ * (non-existant) entry before the first entry). This behavior is the -+ * expected behavior when deleting entries while forward iterating over -+ * an acl. -+ */ -+static void -+nfs4acl_delete_entry(struct nfs4acl_alloc *x, struct nfs4ace **ace) -+{ -+ void *end = x->acl->a_entries + x->acl->a_count; -+ -+ memmove(*ace, *ace + 1, end - (void *)(*ace + 1)); -+ (*ace)--; -+ x->acl->a_count--; -+} -+ -+/** -+ * nfs4acl_insert_entry - insert an entry in an acl -+ * @x: acl and number of allocated entries -+ * @ace: entry before which the new entry shall be inserted -+ * -+ * Insert a new entry in @x->acl at position @ace, and zero-initialize -+ * it. This may require reallocating @x->acl. -+ */ -+static int -+nfs4acl_insert_entry(struct nfs4acl_alloc *x, struct nfs4ace **ace) -+{ -+ if (x->count == x->acl->a_count) { -+ int n = *ace - x->acl->a_entries; -+ struct nfs4acl *acl2; -+ -+ acl2 = nfs4acl_alloc(x->acl->a_count + 1); -+ if (!acl2) -+ return -1; -+ acl2->a_flags = x->acl->a_flags; -+ acl2->a_owner_mask = x->acl->a_owner_mask; -+ acl2->a_group_mask = x->acl->a_group_mask; -+ acl2->a_other_mask = x->acl->a_other_mask; -+ memcpy(acl2->a_entries, x->acl->a_entries, -+ n * sizeof(struct nfs4ace)); -+ memcpy(acl2->a_entries + n + 1, *ace, -+ (x->acl->a_count - n) * sizeof(struct nfs4ace)); -+ kfree(x->acl); -+ x->acl = acl2; -+ x->count = acl2->a_count; -+ *ace = acl2->a_entries + n; -+ } else { -+ void *end = x->acl->a_entries + x->acl->a_count; -+ -+ memmove(*ace + 1, *ace, end - (void *)*ace); -+ x->acl->a_count++; -+ } -+ memset(*ace, 0, sizeof(struct nfs4ace)); -+ return 0; -+} -+ -+/** -+ * nfs4ace_change_mask - change the mask in @ace to @mask -+ * @x: acl and number of allocated entries -+ * @ace: entry to modify -+ * @mask: new mask for @ace -+ * -+ * Set the effective mask of @ace to @mask. This will require splitting -+ * off a separate acl entry if @ace is inheritable. In that case, the -+ * effective- only acl entry is inserted after the inheritable acl -+ * entry, end the inheritable acl entry is set to inheritable-only. If -+ * @mode is 0, either set the original acl entry to inheritable-only if -+ * it was inheritable, or remove it otherwise. The returned @ace points -+ * to the modified or inserted effective-only acl entry if that entry -+ * exists, to the entry that has become inheritable-only, or else to the -+ * previous entry in the acl. This is the expected behavior when -+ * modifying masks while forward iterating over an acl. -+ */ -+static int -+nfs4ace_change_mask(struct nfs4acl_alloc *x, struct nfs4ace **ace, -+ unsigned int mask) -+{ -+ if (mask && (*ace)->e_mask == mask) -+ return 0; -+ if (mask & ~ACE4_POSIX_ALWAYS_ALLOWED) { -+ if (nfs4ace_is_inheritable(*ace)) { -+ if (nfs4acl_insert_entry(x, ace)) -+ return -1; -+ memcpy(*ace, *ace + 1, sizeof(struct nfs4ace)); -+ (*ace)->e_flags |= ACE4_INHERIT_ONLY_ACE; -+ (*ace)++; -+ nfs4ace_clear_inheritance_flags(*ace); -+ } -+ (*ace)->e_mask = mask; -+ } else { -+ if (nfs4ace_is_inheritable(*ace)) -+ (*ace)->e_flags |= ACE4_INHERIT_ONLY_ACE; -+ else -+ nfs4acl_delete_entry(x, ace); -+ } -+ return 0; -+} -+ -+/** -+ * nfs4acl_move_everyone_aces_down - move everyone@ acl entries to the end -+ * @x: acl and number of allocated entries -+ * -+ * Move all everyone acl entries to the bottom of the acl so that only a -+ * single everyone@ allow acl entry remains at the end, and update the -+ * mask fields of all acl entries on the way. If everyone@ is not -+ * granted any permissions, no empty everyone@ acl entry is inserted. -+ * -+ * This transformation does not modify the permissions that the acl -+ * grants, but we need it to simplify successive transformations. -+ */ -+static int -+nfs4acl_move_everyone_aces_down(struct nfs4acl_alloc *x) -+{ -+ struct nfs4ace *ace; -+ unsigned int allowed = 0, denied = 0; -+ -+ nfs4acl_for_each_entry(ace, x->acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_everyone(ace)) { -+ if (nfs4ace_is_allow(ace)) -+ allowed |= (ace->e_mask & ~denied); -+ else if (nfs4ace_is_deny(ace)) -+ denied |= (ace->e_mask & ~allowed); -+ else -+ continue; -+ if (nfs4ace_change_mask(x, &ace, 0)) -+ return -1; -+ } else { -+ if (nfs4ace_is_allow(ace)) { -+ if (nfs4ace_change_mask(x, &ace, allowed | -+ (ace->e_mask & ~denied))) -+ return -1; -+ } else if (nfs4ace_is_deny(ace)) { -+ if (nfs4ace_change_mask(x, &ace, denied | -+ (ace->e_mask & ~allowed))) -+ return -1; -+ } -+ } -+ } -+ if (allowed & ~ACE4_POSIX_ALWAYS_ALLOWED) { -+ struct nfs4ace *last_ace = ace - 1; -+ -+ if (nfs4ace_is_everyone(last_ace) && -+ nfs4ace_is_allow(last_ace) && -+ nfs4ace_is_inherit_only(last_ace) && -+ last_ace->e_mask == allowed) -+ last_ace->e_flags &= ~ACE4_INHERIT_ONLY_ACE; -+ else { -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE; -+ ace->e_flags = ACE4_SPECIAL_WHO; -+ ace->e_mask = allowed; -+ ace->u.e_who = nfs4ace_everyone_who; -+ } -+ } -+ return 0; -+} -+ -+/** -+ * __nfs4acl_propagate_everyone - propagate everyone@ mask flags up for @who -+ * @x: acl and number of allocated entries -+ * @who: identifier to propagate mask flags for -+ * @allow: mask flags to propagate up -+ * -+ * Propagate mask flags from the trailing everyone@ allow acl entry up -+ * for the specified @who. -+ * -+ * The idea here is to precede the trailing EVERYONE@ ALLOW entry by an -+ * additional @who ALLOW entry, but with the following optimizations: -+ * (1) we don't bother setting any flags in the new @who ALLOW entry -+ * that has already been allowed or denied by a previous @who entry, (2) -+ * we merge the new @who entry with a previous @who entry if there is -+ * such a previous @who entry and there are no intervening DENY entries -+ * with mask flags that overlap the flags we care about. -+ */ -+static int -+__nfs4acl_propagate_everyone(struct nfs4acl_alloc *x, struct nfs4ace *who, -+ unsigned int allow) -+{ -+ struct nfs4ace *allow_last = NULL, *ace; -+ -+ /* Remove the mask flags from allow that are already determined for -+ this who value, and figure out if there is an ALLOW entry for -+ this who value that is "reachable" from the trailing EVERYONE@ -+ ALLOW ACE. */ -+ nfs4acl_for_each_entry(ace, x->acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_allow(ace)) { -+ if (nfs4ace_is_same_who(ace, who)) { -+ allow &= ~ace->e_mask; -+ allow_last = ace; -+ } -+ } else if (nfs4ace_is_deny(ace)) { -+ if (nfs4ace_is_same_who(ace, who)) -+ allow &= ~ace->e_mask; -+ if (allow & ace->e_mask) -+ allow_last = NULL; -+ } -+ } -+ -+ if (allow) { -+ if (allow_last) -+ return nfs4ace_change_mask(x, &allow_last, -+ allow_last->e_mask | allow); -+ else { -+ struct nfs4ace who_copy; -+ -+ ace = x->acl->a_entries + x->acl->a_count - 1; -+ memcpy(&who_copy, who, sizeof(struct nfs4ace)); -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ memcpy(ace, &who_copy, sizeof(struct nfs4ace)); -+ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE; -+ nfs4ace_clear_inheritance_flags(ace); -+ ace->e_mask = allow; -+ } -+ } -+ return 0; -+} -+ -+/** -+ * nfs4acl_propagate_everyone - propagate everyone@ mask flags up the acl -+ * @x: acl and number of allocated entries -+ * -+ * Make sure for owner@, group@, and all other users, groups, and -+ * special identifiers that they are allowed or denied all permissions -+ * that are granted be the trailing everyone@ acl entry. If they are -+ * not, try to add the missing permissions to existing allow acl entries -+ * for those users, or introduce additional acl entries if that is not -+ * possible. -+ * -+ * We do this so that no mask flags will get lost when finally applying -+ * the file masks to the acl entries: otherwise, with an other file mask -+ * that is more restrictive than the owner and/or group file mask, mask -+ * flags that were allowed to processes in the owner and group classes -+ * and that the other mask denies would be lost. For example, the -+ * following two acls show the problem when mode 0664 is applied to -+ * them: -+ * -+ * masking without propagation (wrong) -+ * =========================================================== -+ * joe:r::allow => joe:r::allow -+ * everyone@:rwx::allow => everyone@:r::allow -+ * ----------------------------------------------------------- -+ * joe:w::deny => joe:w::deny -+ * everyone@:rwx::allow everyone@:r::allow -+ * -+ * Note that the permissions of joe end up being more restrictive than -+ * what the acl would allow when first computing the allowed flags and -+ * then applying the respective mask. With propagation of permissions, -+ * we get: -+ * -+ * masking after propagation (correct) -+ * =========================================================== -+ * joe:r::allow => joe:rw::allow -+ * owner@:rw::allow -+ * group@:rw::allow -+ * everyone@:rwx::allow everyone@:r::allow -+ * ----------------------------------------------------------- -+ * joe:w::deny => owner@:x::deny -+ * joe:w::deny -+ * owner@:rw::allow -+ * owner@:rw::allow -+ * joe:r::allow -+ * everyone@:rwx::allow everyone@:r::allow -+ * -+ * The examples show the acls that would result from propagation with no -+ * masking performed. In fact, we do apply the respective mask to the -+ * acl entries before computing the propagation because this will save -+ * us from adding acl entries that would end up with empty mask fields -+ * after applying the masks. -+ * -+ * It is ensured that no more than one entry will be inserted for each -+ * who value, no matter how many entries each who value has already. -+ */ -+static int -+nfs4acl_propagate_everyone(struct nfs4acl_alloc *x) -+{ -+ int write_through = (x->acl->a_flags & ACL4_WRITE_THROUGH); -+ struct nfs4ace who = { .e_flags = ACE4_SPECIAL_WHO }; -+ struct nfs4ace *ace; -+ unsigned int owner_allow, group_allow; -+ int retval; -+ -+ if (!((x->acl->a_owner_mask | x->acl->a_group_mask) & -+ ~x->acl->a_other_mask)) -+ return 0; -+ if (!x->acl->a_count) -+ return 0; -+ ace = x->acl->a_entries + x->acl->a_count - 1; -+ if (nfs4ace_is_inherit_only(ace) || !nfs4ace_is_everyone(ace)) -+ return 0; -+ if (!(ace->e_mask & ~x->acl->a_other_mask)) { -+ /* None of the allowed permissions will get masked. */ -+ return 0; -+ } -+ owner_allow = ace->e_mask & x->acl->a_owner_mask; -+ group_allow = ace->e_mask & x->acl->a_group_mask; -+ -+ /* Propagate everyone@ permissions through to owner@. */ -+ if (owner_allow && !write_through && -+ (x->acl->a_owner_mask & ~x->acl->a_other_mask)) { -+ who.u.e_who = nfs4ace_owner_who; -+ retval = __nfs4acl_propagate_everyone(x, &who, owner_allow); -+ if (retval) -+ return -1; -+ } -+ -+ if (group_allow && (x->acl->a_group_mask & ~x->acl->a_other_mask)) { -+ int n; -+ -+ if (!write_through) { -+ /* Propagate everyone@ permissions through to group@. */ -+ who.u.e_who = nfs4ace_group_who; -+ retval = __nfs4acl_propagate_everyone(x, &who, -+ group_allow); -+ if (retval) -+ return -1; -+ } -+ -+ /* Start from the entry before the trailing EVERYONE@ ALLOW -+ entry. We will not hit EVERYONE@ entries in the loop. */ -+ for (n = x->acl->a_count - 2; n != -1; n--) { -+ ace = x->acl->a_entries + n; -+ -+ if (nfs4ace_is_inherit_only(ace) || -+ nfs4ace_is_owner(ace) || -+ nfs4ace_is_group(ace)) -+ continue; -+ if (nfs4ace_is_allow(ace) || nfs4ace_is_deny(ace)) { -+ /* Any inserted entry will end up below the -+ current entry. */ -+ retval = __nfs4acl_propagate_everyone(x, ace, -+ group_allow); -+ if (retval) -+ return -1; -+ } -+ } -+ } -+ return 0; -+} -+ -+/** -+ * __nfs4acl_apply_masks - apply the masks to the acl entries -+ * @x: acl and number of allocated entries -+ * -+ * Apply the owner file mask to owner@ entries, the intersection of the -+ * group and other file masks to everyone@ entries, and the group file -+ * mask to all other entries. -+ */ -+static int -+__nfs4acl_apply_masks(struct nfs4acl_alloc *x) -+{ -+ struct nfs4ace *ace; -+ -+ nfs4acl_for_each_entry(ace, x->acl) { -+ unsigned int mask; -+ -+ if (nfs4ace_is_inherit_only(ace) || !nfs4ace_is_allow(ace)) -+ continue; -+ if (nfs4ace_is_owner(ace)) -+ mask = x->acl->a_owner_mask; -+ else if (nfs4ace_is_everyone(ace)) -+ mask = x->acl->a_other_mask; -+ else -+ mask = x->acl->a_group_mask; -+ if (nfs4ace_change_mask(x, &ace, ace->e_mask & mask)) -+ return -1; -+ } -+ return 0; -+} -+ -+/** -+ * nfs4acl_max_allowed - maximum mask flags that anybody is allowed -+ */ -+static unsigned int -+nfs4acl_max_allowed(struct nfs4acl *acl) -+{ -+ struct nfs4ace *ace; -+ unsigned int allowed = 0; -+ -+ nfs4acl_for_each_entry_reverse(ace, acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_allow(ace)) -+ allowed |= ace->e_mask; -+ else if (nfs4ace_is_deny(ace)) { -+ if (nfs4ace_is_everyone(ace)) -+ allowed &= ~ace->e_mask; -+ } -+ } -+ return allowed; -+} -+ -+/** -+ * nfs4acl_isolate_owner_class - limit the owner class to the owner file mask -+ * @x: acl and number of allocated entries -+ * -+ * Make sure the owner class (owner@) is granted no more than the owner -+ * mask by first checking which permissions anyone is granted, and then -+ * denying owner@ all permissions beyond that. -+ */ -+static int -+nfs4acl_isolate_owner_class(struct nfs4acl_alloc *x) -+{ -+ struct nfs4ace *ace; -+ unsigned int allowed = 0; -+ -+ allowed = nfs4acl_max_allowed(x->acl); -+ if (allowed & ~x->acl->a_owner_mask) { -+ /* Figure out if we can update an existig OWNER@ DENY entry. */ -+ nfs4acl_for_each_entry(ace, x->acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_deny(ace)) { -+ if (nfs4ace_is_owner(ace)) -+ break; -+ } else if (nfs4ace_is_allow(ace)) { -+ ace = x->acl->a_entries + x->acl->a_count; -+ break; -+ } -+ } -+ if (ace != x->acl->a_entries + x->acl->a_count) { -+ if (nfs4ace_change_mask(x, &ace, ace->e_mask | -+ (allowed & ~x->acl->a_owner_mask))) -+ return -1; -+ } else { -+ /* Insert an owner@ deny entry at the front. */ -+ ace = x->acl->a_entries; -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ ace->e_type = ACE4_ACCESS_DENIED_ACE_TYPE; -+ ace->e_flags = ACE4_SPECIAL_WHO; -+ ace->e_mask = allowed & ~x->acl->a_owner_mask; -+ ace->u.e_who = nfs4ace_owner_who; -+ } -+ } -+ return 0; -+} -+ -+/** -+ * __nfs4acl_isolate_who - isolate entry from EVERYONE@ ALLOW entry -+ * @x: acl and number of allocated entries -+ * @who: identifier to isolate -+ * @deny: mask flags this identifier should not be allowed -+ * -+ * Make sure that @who is not allowed any mask flags in @deny by checking -+ * which mask flags this identifier is allowed, and adding excess allowed -+ * mask flags to an existing DENY entry before the trailing EVERYONE@ ALLOW -+ * entry, or inserting such an entry. -+ */ -+static int -+__nfs4acl_isolate_who(struct nfs4acl_alloc *x, struct nfs4ace *who, -+ unsigned int deny) -+{ -+ struct nfs4ace *ace; -+ unsigned int allowed = 0, n; -+ -+ /* Compute the mask flags granted to this who value. */ -+ nfs4acl_for_each_entry_reverse(ace, x->acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_same_who(ace, who)) { -+ if (nfs4ace_is_allow(ace)) -+ allowed |= ace->e_mask; -+ else if (nfs4ace_is_deny(ace)) -+ allowed &= ~ace->e_mask; -+ deny &= ~ace->e_mask; -+ } -+ } -+ if (!deny) -+ return 0; -+ -+ /* Figure out if we can update an existig DENY entry. Start -+ from the entry before the trailing EVERYONE@ ALLOW entry. We -+ will not hit EVERYONE@ entries in the loop. */ -+ for (n = x->acl->a_count - 2; n != -1; n--) { -+ ace = x->acl->a_entries + n; -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_deny(ace)) { -+ if (nfs4ace_is_same_who(ace, who)) -+ break; -+ } else if (nfs4ace_is_allow(ace) && -+ (ace->e_mask & deny)) { -+ n = -1; -+ break; -+ } -+ } -+ if (n != -1) { -+ if (nfs4ace_change_mask(x, &ace, ace->e_mask | deny)) -+ return -1; -+ } else { -+ /* Insert a eny entry before the trailing EVERYONE@ DENY -+ entry. */ -+ struct nfs4ace who_copy; -+ -+ ace = x->acl->a_entries + x->acl->a_count - 1; -+ memcpy(&who_copy, who, sizeof(struct nfs4ace)); -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ memcpy(ace, &who_copy, sizeof(struct nfs4ace)); -+ ace->e_type = ACE4_ACCESS_DENIED_ACE_TYPE; -+ nfs4ace_clear_inheritance_flags(ace); -+ ace->e_mask = deny; -+ } -+ return 0; -+} -+ -+/** -+ * nfs4acl_isolate_group_class - limit the group class to the group file mask -+ * @x: acl and number of allocated entries -+ * -+ * Make sure the group class (all entries except owner@ and everyone@) is -+ * granted no more than the group mask by inserting DENY entries for group -+ * class entries where necessary. -+ */ -+static int -+nfs4acl_isolate_group_class(struct nfs4acl_alloc *x) -+{ -+ struct nfs4ace who = { -+ .e_flags = ACE4_SPECIAL_WHO, -+ .u.e_who = nfs4ace_group_who, -+ }; -+ struct nfs4ace *ace; -+ unsigned int deny; -+ -+ if (!x->acl->a_count) -+ return 0; -+ ace = x->acl->a_entries + x->acl->a_count - 1; -+ if (nfs4ace_is_inherit_only(ace) || !nfs4ace_is_everyone(ace)) -+ return 0; -+ deny = ace->e_mask & ~x->acl->a_group_mask; -+ -+ if (deny) { -+ unsigned int n; -+ -+ if (__nfs4acl_isolate_who(x, &who, deny)) -+ return -1; -+ -+ /* Start from the entry before the trailing EVERYONE@ ALLOW -+ entry. We will not hit EVERYONE@ entries in the loop. */ -+ for (n = x->acl->a_count - 2; n != -1; n--) { -+ ace = x->acl->a_entries + n; -+ -+ if (nfs4ace_is_inherit_only(ace) || -+ nfs4ace_is_owner(ace) || -+ nfs4ace_is_group(ace)) -+ continue; -+ if (__nfs4acl_isolate_who(x, ace, deny)) -+ return -1; -+ } -+ } -+ return 0; -+} -+ -+/** -+ * __nfs4acl_write_through - grant the full masks to owner@, group@, everyone@ -+ * -+ * Make sure that owner, group@, and everyone@ are allowed the full mask -+ * permissions, and not only the permissions granted both by the acl and -+ * the masks. -+ */ -+static int -+__nfs4acl_write_through(struct nfs4acl_alloc *x) -+{ -+ struct nfs4ace *ace; -+ unsigned int allowed; -+ -+ /* Remove all owner@ and group@ ACEs: we re-insert them at the -+ top. */ -+ nfs4acl_for_each_entry(ace, x->acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if ((nfs4ace_is_owner(ace) || nfs4ace_is_group(ace)) && -+ nfs4ace_change_mask(x, &ace, 0)) -+ return -1; -+ } -+ -+ /* Insert the everyone@ allow entry at the end, or update the -+ existing entry. */ -+ allowed = x->acl->a_other_mask; -+ if (allowed & ~ACE4_POSIX_ALWAYS_ALLOWED) { -+ ace = x->acl->a_entries + x->acl->a_count - 1; -+ if (x->acl->a_count && nfs4ace_is_everyone(ace) && -+ !nfs4ace_is_inherit_only(ace)) { -+ if (nfs4ace_change_mask(x, &ace, allowed)) -+ return -1; -+ } else { -+ ace = x->acl->a_entries + x->acl->a_count; -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE; -+ ace->e_flags = ACE4_SPECIAL_WHO; -+ ace->e_mask = allowed; -+ ace->u.e_who = nfs4ace_everyone_who; -+ } -+ } -+ -+ /* Compute the permissions that owner@ and group@ are already granted -+ though the everyone@ allow entry at the end. Note that the acl -+ contains no owner@ or group@ entries at this point. */ -+ allowed = 0; -+ nfs4acl_for_each_entry_reverse(ace, x->acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_allow(ace)) { -+ if (nfs4ace_is_everyone(ace)) -+ allowed |= ace->e_mask; -+ } else if (nfs4ace_is_deny(ace)) -+ allowed &= ~ace->e_mask; -+ } -+ -+ /* Insert the appropriate group@ allow entry at the front. */ -+ if (x->acl->a_group_mask & ~allowed) { -+ ace = x->acl->a_entries; -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE; -+ ace->e_flags = ACE4_SPECIAL_WHO; -+ ace->e_mask = x->acl->a_group_mask /*& ~allowed*/; -+ ace->u.e_who = nfs4ace_group_who; -+ } -+ -+ /* Insert the appropriate owner@ allow entry at the front. */ -+ if (x->acl->a_owner_mask & ~allowed) { -+ ace = x->acl->a_entries; -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE; -+ ace->e_flags = ACE4_SPECIAL_WHO; -+ ace->e_mask = x->acl->a_owner_mask /*& ~allowed*/; -+ ace->u.e_who = nfs4ace_owner_who; -+ } -+ -+ /* Insert the appropriate owner@ deny entry at the front. */ -+ allowed = nfs4acl_max_allowed(x->acl); -+ if (allowed & ~x->acl->a_owner_mask) { -+ nfs4acl_for_each_entry(ace, x->acl) { -+ if (nfs4ace_is_inherit_only(ace)) -+ continue; -+ if (nfs4ace_is_allow(ace)) { -+ ace = x->acl->a_entries + x->acl->a_count; -+ break; -+ } -+ if (nfs4ace_is_deny(ace) && nfs4ace_is_owner(ace)) -+ break; -+ } -+ if (ace != x->acl->a_entries + x->acl->a_count) { -+ if (nfs4ace_change_mask(x, &ace, ace->e_mask | -+ (allowed & ~x->acl->a_owner_mask))) -+ return -1; -+ } else { -+ ace = x->acl->a_entries; -+ if (nfs4acl_insert_entry(x, &ace)) -+ return -1; -+ ace->e_type = ACE4_ACCESS_DENIED_ACE_TYPE; -+ ace->e_flags = ACE4_SPECIAL_WHO; -+ ace->e_mask = allowed & ~x->acl->a_owner_mask; -+ ace->u.e_who = nfs4ace_owner_who; -+ } -+ } -+ -+ return 0; -+} -+ -+/** -+ * nfs4acl_apply_masks - apply the masks to the acl -+ * -+ * Apply the masks so that the acl allows no more flags than the -+ * intersection between the flags that the original acl allows and the -+ * mask matching the process. -+ * -+ * Note: this algorithm may push the number of entries in the acl above -+ * ACL4_XATTR_MAX_COUNT, so a read-modify-write cycle would fail. -+ */ -+int -+nfs4acl_apply_masks(struct nfs4acl **acl) -+{ -+ struct nfs4acl_alloc x = { -+ .acl = *acl, -+ .count = (*acl)->a_count, -+ }; -+ int retval = 0; -+ -+ if (nfs4acl_move_everyone_aces_down(&x) || -+ nfs4acl_propagate_everyone(&x) || -+ __nfs4acl_apply_masks(&x) || -+ nfs4acl_isolate_owner_class(&x) || -+ nfs4acl_isolate_group_class(&x)) -+ retval = -ENOMEM; -+ -+ *acl = x.acl; -+ return retval; -+} -+EXPORT_SYMBOL(nfs4acl_apply_masks); -+ -+int nfs4acl_write_through(struct nfs4acl **acl) -+{ -+ struct nfs4acl_alloc x = { -+ .acl = *acl, -+ .count = (*acl)->a_count, -+ }; -+ int retval = 0; -+ -+ if (!((*acl)->a_flags & ACL4_WRITE_THROUGH)) -+ goto out; -+ -+ if (nfs4acl_move_everyone_aces_down(&x) || -+ nfs4acl_propagate_everyone(&x) || -+ __nfs4acl_write_through(&x)) -+ retval = -ENOMEM; -+ -+ *acl = x.acl; -+out: -+ return retval; -+} ---- /dev/null -+++ b/fs/nfs4acl_xattr.c -@@ -0,0 +1,146 @@ -+/* -+ * Copyright (C) 2006 Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_LICENSE("GPL"); -+ -+struct nfs4acl * -+nfs4acl_from_xattr(const void *value, size_t size) -+{ -+ const struct nfs4acl_xattr *xattr_acl = value; -+ const struct nfs4ace_xattr *xattr_ace = (void *)(xattr_acl + 1); -+ struct nfs4acl *acl; -+ struct nfs4ace *ace; -+ int count; -+ -+ if (size < sizeof(struct nfs4acl_xattr) || -+ xattr_acl->a_version != ACL4_XATTR_VERSION || -+ (xattr_acl->a_flags & ~ACL4_VALID_FLAGS)) -+ return ERR_PTR(-EINVAL); -+ -+ count = be16_to_cpu(xattr_acl->a_count); -+ if (count > ACL4_XATTR_MAX_COUNT) -+ return ERR_PTR(-EINVAL); -+ -+ acl = nfs4acl_alloc(count); -+ if (!acl) -+ return ERR_PTR(-ENOMEM); -+ -+ acl->a_flags = xattr_acl->a_flags; -+ acl->a_owner_mask = be32_to_cpu(xattr_acl->a_owner_mask); -+ if (acl->a_owner_mask & ~ACE4_VALID_MASK) -+ goto fail_einval; -+ acl->a_group_mask = be32_to_cpu(xattr_acl->a_group_mask); -+ if (acl->a_group_mask & ~ACE4_VALID_MASK) -+ goto fail_einval; -+ acl->a_other_mask = be32_to_cpu(xattr_acl->a_other_mask); -+ if (acl->a_other_mask & ~ACE4_VALID_MASK) -+ goto fail_einval; -+ -+ nfs4acl_for_each_entry(ace, acl) { -+ const char *who = (void *)(xattr_ace + 1), *end; -+ ssize_t used = (void *)who - value; -+ -+ if (used > size) -+ goto fail_einval; -+ end = memchr(who, 0, size - used); -+ if (!end) -+ goto fail_einval; -+ -+ ace->e_type = be16_to_cpu(xattr_ace->e_type); -+ ace->e_flags = be16_to_cpu(xattr_ace->e_flags); -+ ace->e_mask = be32_to_cpu(xattr_ace->e_mask); -+ ace->u.e_id = be32_to_cpu(xattr_ace->e_id); -+ -+ if (ace->e_flags & ~ACE4_VALID_FLAGS) { -+ memset(ace, 0, sizeof(struct nfs4ace)); -+ goto fail_einval; -+ } -+ if (ace->e_type > ACE4_ACCESS_DENIED_ACE_TYPE || -+ (ace->e_mask & ~ACE4_VALID_MASK)) -+ goto fail_einval; -+ -+ if (who == end) { -+ if (ace->u.e_id == -1) -+ goto fail_einval; /* uid/gid needed */ -+ } else if (nfs4ace_set_who(ace, who)) -+ goto fail_einval; -+ -+ xattr_ace = (void *)who + ALIGN(end - who + 1, 4); -+ } -+ -+ return acl; -+ -+fail_einval: -+ nfs4acl_put(acl); -+ return ERR_PTR(-EINVAL); -+} -+EXPORT_SYMBOL(nfs4acl_from_xattr); -+ -+size_t -+nfs4acl_xattr_size(const struct nfs4acl *acl) -+{ -+ size_t size = sizeof(struct nfs4acl_xattr); -+ const struct nfs4ace *ace; -+ -+ nfs4acl_for_each_entry(ace, acl) { -+ size += sizeof(struct nfs4ace_xattr) + -+ (nfs4ace_is_unix_id(ace) ? 4 : -+ ALIGN(strlen(ace->u.e_who) + 1, 4)); -+ } -+ return size; -+} -+EXPORT_SYMBOL(nfs4acl_xattr_size); -+ -+void -+nfs4acl_to_xattr(const struct nfs4acl *acl, void *buffer) -+{ -+ struct nfs4acl_xattr *xattr_acl = buffer; -+ struct nfs4ace_xattr *xattr_ace; -+ const struct nfs4ace *ace; -+ -+ xattr_acl->a_version = ACL4_XATTR_VERSION; -+ xattr_acl->a_flags = acl->a_flags; -+ xattr_acl->a_count = cpu_to_be16(acl->a_count); -+ -+ xattr_acl->a_owner_mask = cpu_to_be32(acl->a_owner_mask); -+ xattr_acl->a_group_mask = cpu_to_be32(acl->a_group_mask); -+ xattr_acl->a_other_mask = cpu_to_be32(acl->a_other_mask); -+ -+ xattr_ace = (void *)(xattr_acl + 1); -+ nfs4acl_for_each_entry(ace, acl) { -+ xattr_ace->e_type = cpu_to_be16(ace->e_type); -+ xattr_ace->e_flags = cpu_to_be16(ace->e_flags & -+ ACE4_VALID_FLAGS); -+ xattr_ace->e_mask = cpu_to_be32(ace->e_mask); -+ if (nfs4ace_is_unix_id(ace)) { -+ xattr_ace->e_id = cpu_to_be32(ace->u.e_id); -+ memset(xattr_ace->e_who, 0, 4); -+ xattr_ace = (void *)xattr_ace->e_who + 4; -+ } else { -+ int sz = ALIGN(strlen(ace->u.e_who) + 1, 4); -+ -+ xattr_ace->e_id = cpu_to_be32(-1); -+ memset(xattr_ace->e_who + sz - 4, 0, 4); -+ strcpy(xattr_ace->e_who, ace->u.e_who); -+ xattr_ace = (void *)xattr_ace->e_who + sz; -+ } -+ } -+} -+EXPORT_SYMBOL(nfs4acl_to_xattr); ---- /dev/null -+++ b/include/linux/nfs4acl.h -@@ -0,0 +1,205 @@ -+#ifndef __NFS4ACL_H -+#define __NFS4ACL_H -+ -+struct nfs4ace { -+ unsigned short e_type; -+ unsigned short e_flags; -+ unsigned int e_mask; -+ union { -+ unsigned int e_id; -+ const char *e_who; -+ } u; -+}; -+ -+struct nfs4acl { -+ atomic_t a_refcount; -+ unsigned int a_owner_mask; -+ unsigned int a_group_mask; -+ unsigned int a_other_mask; -+ unsigned short a_count; -+ unsigned short a_flags; -+ struct nfs4ace a_entries[0]; -+}; -+ -+#define nfs4acl_for_each_entry(_ace, _acl) \ -+ for (_ace = _acl->a_entries; \ -+ _ace != _acl->a_entries + _acl->a_count; \ -+ _ace++) -+ -+#define nfs4acl_for_each_entry_reverse(_ace, _acl) \ -+ for (_ace = _acl->a_entries + _acl->a_count - 1; \ -+ _ace != _acl->a_entries - 1; \ -+ _ace--) -+ -+/* a_flags values */ -+#define ACL4_WRITE_THROUGH 0x40 -+ -+#define ACL4_VALID_FLAGS \ -+ ACL4_WRITE_THROUGH -+ -+/* e_type values */ -+#define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000 -+#define ACE4_ACCESS_DENIED_ACE_TYPE 0x0001 -+/*#define ACE4_SYSTEM_AUDIT_ACE_TYPE 0x0002*/ -+/*#define ACE4_SYSTEM_ALARM_ACE_TYPE 0x0003*/ -+ -+/* e_flags bitflags */ -+#define ACE4_FILE_INHERIT_ACE 0x0001 -+#define ACE4_DIRECTORY_INHERIT_ACE 0x0002 -+#define ACE4_NO_PROPAGATE_INHERIT_ACE 0x0004 -+#define ACE4_INHERIT_ONLY_ACE 0x0008 -+/*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/ -+/*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/ -+#define ACE4_IDENTIFIER_GROUP 0x0040 -+#define ACE4_SPECIAL_WHO 0x4000 /* in-memory representation only */ -+ -+#define ACE4_VALID_FLAGS ( \ -+ ACE4_FILE_INHERIT_ACE | \ -+ ACE4_DIRECTORY_INHERIT_ACE | \ -+ ACE4_NO_PROPAGATE_INHERIT_ACE | \ -+ ACE4_INHERIT_ONLY_ACE | \ -+ ACE4_IDENTIFIER_GROUP ) -+ -+/* e_mask bitflags */ -+#define ACE4_READ_DATA 0x00000001 -+#define ACE4_LIST_DIRECTORY 0x00000001 -+#define ACE4_WRITE_DATA 0x00000002 -+#define ACE4_ADD_FILE 0x00000002 -+#define ACE4_APPEND_DATA 0x00000004 -+#define ACE4_ADD_SUBDIRECTORY 0x00000004 -+#define ACE4_READ_NAMED_ATTRS 0x00000008 -+#define ACE4_WRITE_NAMED_ATTRS 0x00000010 -+#define ACE4_EXECUTE 0x00000020 -+#define ACE4_DELETE_CHILD 0x00000040 -+#define ACE4_READ_ATTRIBUTES 0x00000080 -+#define ACE4_WRITE_ATTRIBUTES 0x00000100 -+#define ACE4_DELETE 0x00010000 -+#define ACE4_READ_ACL 0x00020000 -+#define ACE4_WRITE_ACL 0x00040000 -+#define ACE4_WRITE_OWNER 0x00080000 -+#define ACE4_SYNCHRONIZE 0x00100000 -+ -+#define ACE4_VALID_MASK ( \ -+ ACE4_READ_DATA | ACE4_LIST_DIRECTORY | \ -+ ACE4_WRITE_DATA | ACE4_ADD_FILE | \ -+ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \ -+ ACE4_READ_NAMED_ATTRS | \ -+ ACE4_WRITE_NAMED_ATTRS | \ -+ ACE4_EXECUTE | \ -+ ACE4_DELETE_CHILD | \ -+ ACE4_READ_ATTRIBUTES | \ -+ ACE4_WRITE_ATTRIBUTES | \ -+ ACE4_DELETE | \ -+ ACE4_READ_ACL | \ -+ ACE4_WRITE_ACL | \ -+ ACE4_WRITE_OWNER | \ -+ ACE4_SYNCHRONIZE ) -+ -+#define ACE4_POSIX_ALWAYS_ALLOWED ( \ -+ ACE4_SYNCHRONIZE | \ -+ ACE4_READ_ATTRIBUTES | \ -+ ACE4_READ_ACL ) -+/* -+ * Duplicate an NFS4ACL handle. -+ */ -+static inline struct nfs4acl * -+nfs4acl_get(struct nfs4acl *acl) -+{ -+ if (acl) -+ atomic_inc(&acl->a_refcount); -+ return acl; -+} -+ -+/* -+ * Free an NFS4ACL handle -+ */ -+static inline void -+nfs4acl_put(struct nfs4acl *acl) -+{ -+ if (acl && atomic_dec_and_test(&acl->a_refcount)) -+ kfree(acl); -+} -+ -+/* Special e_who identifiers: we use these pointer values in comparisons -+ instead of strcmp for efficiency. */ -+ -+extern const char nfs4ace_owner_who[]; -+extern const char nfs4ace_group_who[]; -+extern const char nfs4ace_everyone_who[]; -+ -+static inline int -+nfs4ace_is_owner(const struct nfs4ace *ace) -+{ -+ return (ace->e_flags & ACE4_SPECIAL_WHO) && -+ ace->u.e_who == nfs4ace_owner_who; -+} -+ -+static inline int -+nfs4ace_is_group(const struct nfs4ace *ace) -+{ -+ return (ace->e_flags & ACE4_SPECIAL_WHO) && -+ ace->u.e_who == nfs4ace_group_who; -+} -+ -+static inline int -+nfs4ace_is_everyone(const struct nfs4ace *ace) -+{ -+ return (ace->e_flags & ACE4_SPECIAL_WHO) && -+ ace->u.e_who == nfs4ace_everyone_who; -+} -+ -+static inline int -+nfs4ace_is_unix_id(const struct nfs4ace *ace) -+{ -+ return !(ace->e_flags & ACE4_SPECIAL_WHO); -+} -+ -+static inline int -+nfs4ace_is_inherit_only(const struct nfs4ace *ace) -+{ -+ return ace->e_flags & ACE4_INHERIT_ONLY_ACE; -+} -+ -+static inline int -+nfs4ace_is_inheritable(const struct nfs4ace *ace) -+{ -+ return ace->e_flags & (ACE4_FILE_INHERIT_ACE | -+ ACE4_DIRECTORY_INHERIT_ACE); -+} -+ -+static inline void -+nfs4ace_clear_inheritance_flags(struct nfs4ace *ace) -+{ -+ ace->e_flags &= ~(ACE4_FILE_INHERIT_ACE | -+ ACE4_DIRECTORY_INHERIT_ACE | -+ ACE4_NO_PROPAGATE_INHERIT_ACE | -+ ACE4_INHERIT_ONLY_ACE); -+} -+ -+static inline int -+nfs4ace_is_allow(const struct nfs4ace *ace) -+{ -+ return ace->e_type == ACE4_ACCESS_ALLOWED_ACE_TYPE; -+} -+ -+static inline int -+nfs4ace_is_deny(const struct nfs4ace *ace) -+{ -+ return ace->e_type == ACE4_ACCESS_DENIED_ACE_TYPE; -+} -+ -+extern struct nfs4acl *nfs4acl_alloc(int count); -+extern struct nfs4acl *nfs4acl_clone(const struct nfs4acl *acl); -+ -+extern unsigned int nfs4acl_want_to_mask(int want); -+extern int nfs4acl_permission(struct inode *, const struct nfs4acl *, unsigned int); -+extern int nfs4acl_generic_permission(struct inode *, unsigned int); -+extern int nfs4ace_is_same_who(const struct nfs4ace *, const struct nfs4ace *); -+extern int nfs4ace_set_who(struct nfs4ace *ace, const char *who); -+extern struct nfs4acl *nfs4acl_inherit(const struct nfs4acl *, mode_t); -+extern int nfs4acl_masks_to_mode(const struct nfs4acl *); -+extern struct nfs4acl *nfs4acl_chmod(struct nfs4acl *, mode_t); -+extern int nfs4acl_apply_masks(struct nfs4acl **acl); -+extern int nfs4acl_write_through(struct nfs4acl **acl); -+ -+#endif /* __NFS4ACL_H */ ---- /dev/null -+++ b/include/linux/nfs4acl_xattr.h -@@ -0,0 +1,32 @@ -+#ifndef __NFS4ACL_XATTR_H -+#define __NFS4ACL_XATTR_H -+ -+#include -+ -+#define NFS4ACL_XATTR "system.nfs4acl" -+ -+struct nfs4ace_xattr { -+ __be16 e_type; -+ __be16 e_flags; -+ __be32 e_mask; -+ __be32 e_id; -+ char e_who[0]; -+}; -+ -+struct nfs4acl_xattr { -+ unsigned char a_version; -+ unsigned char a_flags; -+ __be16 a_count; -+ __be32 a_owner_mask; -+ __be32 a_group_mask; -+ __be32 a_other_mask; -+}; -+ -+#define ACL4_XATTR_VERSION 0 -+#define ACL4_XATTR_MAX_COUNT 1024 -+ -+extern struct nfs4acl *nfs4acl_from_xattr(const void *, size_t); -+extern size_t nfs4acl_xattr_size(const struct nfs4acl *acl); -+extern void nfs4acl_to_xattr(const struct nfs4acl *, void *); -+ -+#endif /* __NFS4ACL_XATTR_H */ diff --git a/patches.suse/nfs4acl-ext3.diff b/patches.suse/nfs4acl-ext3.diff deleted file mode 100644 index f2ac3eb..0000000 --- a/patches.suse/nfs4acl-ext3.diff +++ /dev/null @@ -1,872 +0,0 @@ -From: Andreas Gruenbacher -Subject: NFSv4 ACLs for ext3 -Patch-mainline: Not yet - -With the acl=nfs4 mount option, ext3 will use NFSv4 ACLs instead of -POSIX ACLs. See http://www.suse.de/~agruen/nfs4acl/ for some -documentation and examples. - -Signed-off-by: Andreas Gruenbacher - ---- - fs/ext3/Kconfig | 7 - fs/ext3/Makefile | 1 - fs/ext3/file.c | 4 - fs/ext3/ialloc.c | 6 - fs/ext3/inode.c | 73 ++++++++ - fs/ext3/namei.c | 15 + - fs/ext3/namei.h | 1 - fs/ext3/nfs4acl.c | 378 ++++++++++++++++++++++++++++++++++++++++++++++ - fs/ext3/nfs4acl.h | 36 ++++ - fs/ext3/super.c | 61 +++++-- - fs/ext3/xattr.c | 9 + - fs/ext3/xattr.h | 5 - include/linux/ext3_fs.h | 1 - include/linux/ext3_fs_i.h | 3 - 14 files changed, 582 insertions(+), 18 deletions(-) - ---- a/fs/ext3/Kconfig -+++ b/fs/ext3/Kconfig -@@ -97,6 +97,13 @@ config EXT3_FS_POSIX_ACL - - If you don't know what Access Control Lists are, say N - -+config EXT3_FS_NFS4ACL -+ bool "Native NFSv4 ACLs (EXPERIMENTAL)" -+ depends on EXT3_FS_XATTR && EXPERIMENTAL -+ select FS_NFS4ACL -+ help -+ Allow to use NFSv4 ACLs instead of POSIX ACLs. -+ - config EXT3_FS_SECURITY - bool "Ext3 Security Labels" - depends on EXT3_FS_XATTR ---- a/fs/ext3/Makefile -+++ b/fs/ext3/Makefile -@@ -10,3 +10,4 @@ ext3-y := balloc.o bitmap.o dir.o file.o - ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o - ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o - ext3-$(CONFIG_EXT3_FS_SECURITY) += xattr_security.o -+ext3-$(CONFIG_EXT3_FS_NFS4ACL) += nfs4acl.o ---- a/fs/ext3/file.c -+++ b/fs/ext3/file.c -@@ -24,8 +24,10 @@ - #include - #include - #include -+#include "namei.h" - #include "xattr.h" - #include "acl.h" -+#include "nfs4acl.h" - - /* - * Called when an inode is released. Note that this is different -@@ -81,5 +83,7 @@ const struct inode_operations ext3_file_ - #endif - .check_acl = ext3_check_acl, - .fiemap = ext3_fiemap, -+ .may_create = ext3_may_create, -+ .may_delete = ext3_may_delete, - }; - ---- a/fs/ext3/ialloc.c -+++ b/fs/ext3/ialloc.c -@@ -28,6 +28,7 @@ - - #include "xattr.h" - #include "acl.h" -+#include "nfs4acl.h" - - /* - * ialloc.c contains the inodes allocation and deallocation routines -@@ -593,7 +594,10 @@ got: - if (err) - goto fail_drop; - -- err = ext3_init_acl(handle, inode, dir); -+ if (test_opt(sb, NFS4ACL)) -+ err = ext3_nfs4acl_init(handle, inode, dir); -+ else -+ err = ext3_init_acl(handle, inode, dir); - if (err) - goto fail_free_drop; - ---- a/fs/ext3/inode.c -+++ b/fs/ext3/inode.c -@@ -40,6 +40,7 @@ - #include - #include "xattr.h" - #include "acl.h" -+#include "nfs4acl.h" - - static int ext3_writepage_trans_blocks(struct inode *inode); - -@@ -2790,6 +2791,9 @@ struct inode *ext3_iget(struct super_blo - return inode; - - ei = EXT3_I(inode); -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ ei->i_nfs4acl = EXT3_NFS4ACL_NOT_CACHED; -+#endif - ei->i_block_alloc_info = NULL; - - ret = __ext3_get_inode_loc(inode, &iloc, 0); -@@ -3124,6 +3128,65 @@ int ext3_write_inode(struct inode *inode - return ext3_force_commit(inode->i_sb); - } - -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+static int ext3_inode_change_ok(struct inode *inode, struct iattr *attr) -+{ -+ unsigned int ia_valid = attr->ia_valid; -+ -+ if (!test_opt(inode->i_sb, NFS4ACL)) -+ return inode_change_ok(inode, attr); -+ -+ /* If force is set do it anyway. */ -+ if (ia_valid & ATTR_FORCE) -+ return 0; -+ -+ /* Make sure a caller can chown. */ -+ if ((ia_valid & ATTR_UID) && -+ (current_fsuid() != inode->i_uid || -+ attr->ia_uid != inode->i_uid) && -+ (current_fsuid() != attr->ia_uid || -+ ext3_nfs4acl_permission(inode, ACE4_WRITE_OWNER)) && -+ !capable(CAP_CHOWN)) -+ goto error; -+ -+ /* Make sure caller can chgrp. */ -+ if ((ia_valid & ATTR_GID)) { -+ int in_group = in_group_p(attr->ia_gid); -+ if ((current_fsuid() != inode->i_uid || -+ (!in_group && attr->ia_gid != inode->i_gid)) && -+ (!in_group || -+ ext3_nfs4acl_permission(inode, ACE4_WRITE_OWNER)) && -+ !capable(CAP_CHOWN)) -+ goto error; -+ } -+ -+ /* Make sure a caller can chmod. */ -+ if (ia_valid & ATTR_MODE) { -+ if (current_fsuid() != inode->i_uid && -+ ext3_nfs4acl_permission(inode, ACE4_WRITE_ACL) && -+ !capable(CAP_FOWNER)) -+ goto error; -+ /* Also check the setgid bit! */ -+ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : -+ inode->i_gid) && !capable(CAP_FSETID)) -+ attr->ia_mode &= ~S_ISGID; -+ } -+ -+ /* Check for setting the inode time. */ -+ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) { -+ if (current_fsuid() != inode->i_uid && -+ ext3_nfs4acl_permission(inode, ACE4_WRITE_ATTRIBUTES) && -+ !capable(CAP_FOWNER)) -+ goto error; -+ } -+ return 0; -+error: -+ return -EPERM; -+} -+#else -+# define ext3_inode_change_ok inode_change_ok -+#endif -+ - /* - * ext3_setattr() - * -@@ -3147,7 +3210,7 @@ int ext3_setattr(struct dentry *dentry, - int error, rc = 0; - const unsigned int ia_valid = attr->ia_valid; - -- error = inode_change_ok(inode, attr); -+ error = ext3_inode_change_ok(inode, attr); - if (error) - return error; - -@@ -3200,8 +3263,12 @@ int ext3_setattr(struct dentry *dentry, - - rc = inode_setattr(inode, attr); - -- if (!rc && (ia_valid & ATTR_MODE)) -- rc = ext3_acl_chmod(inode); -+ if (!rc && (ia_valid & ATTR_MODE)) { -+ if (test_opt(inode->i_sb, NFS4ACL)) -+ rc = ext3_nfs4acl_chmod(inode); -+ else -+ rc = ext3_acl_chmod(inode); -+ } - - err_out: - ext3_std_error(inode->i_sb, error); ---- a/fs/ext3/namei.c -+++ b/fs/ext3/namei.c -@@ -40,6 +40,7 @@ - #include "namei.h" - #include "xattr.h" - #include "acl.h" -+#include "nfs4acl.h" - - /* - * define how far ahead to read directories while searching them. -@@ -2445,6 +2446,16 @@ end_rename: - return retval; - } - -+int ext3_permission(struct inode *inode, int mask) -+{ -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ if (test_opt(inode->i_sb, NFS4ACL)) -+ return ext3_nfs4acl_permission(inode, nfs4acl_want_to_mask(mask)); -+ else -+#endif -+ return generic_permission(inode, mask, ext3_check_acl); -+} -+ - /* - * directories can handle most operations... - */ -@@ -2466,6 +2477,8 @@ const struct inode_operations ext3_dir_i - .removexattr = generic_removexattr, - #endif - .check_acl = ext3_check_acl, -+ .may_create = ext3_may_create, -+ .may_delete = ext3_may_delete, - }; - - const struct inode_operations ext3_special_inode_operations = { -@@ -2477,4 +2490,6 @@ const struct inode_operations ext3_speci - .removexattr = generic_removexattr, - #endif - .check_acl = ext3_check_acl, -+ .may_create = ext3_may_create, -+ .may_delete = ext3_may_delete, - }; ---- a/fs/ext3/namei.h -+++ b/fs/ext3/namei.h -@@ -5,4 +5,5 @@ - * - */ - -+extern int ext3_permission (struct inode *, int); - extern struct dentry *ext3_get_parent(struct dentry *child); ---- /dev/null -+++ b/fs/ext3/nfs4acl.c -@@ -0,0 +1,378 @@ -+/* -+ * Copyright (C) 2006 Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "namei.h" -+#include "xattr.h" -+#include "nfs4acl.h" -+ -+static inline struct nfs4acl * -+ext3_iget_nfs4acl(struct inode *inode) -+{ -+ struct nfs4acl *acl = EXT3_NFS4ACL_NOT_CACHED; -+ struct ext3_inode_info *ei = EXT3_I(inode); -+ -+ spin_lock(&inode->i_lock); -+ if (ei->i_nfs4acl != EXT3_NFS4ACL_NOT_CACHED) -+ acl = nfs4acl_get(ei->i_nfs4acl); -+ spin_unlock(&inode->i_lock); -+ -+ return acl; -+} -+ -+static inline void -+ext3_iset_nfs4acl(struct inode *inode, struct nfs4acl *acl) -+{ -+ struct ext3_inode_info *ei = EXT3_I(inode); -+ -+ spin_lock(&inode->i_lock); -+ if (ei->i_nfs4acl != EXT3_NFS4ACL_NOT_CACHED) -+ nfs4acl_put(ei->i_nfs4acl); -+ ei->i_nfs4acl = nfs4acl_get(acl); -+ spin_unlock(&inode->i_lock); -+} -+ -+static struct nfs4acl * -+ext3_get_nfs4acl(struct inode *inode) -+{ -+ const int name_index = EXT3_XATTR_INDEX_NFS4ACL; -+ void *value = NULL; -+ struct nfs4acl *acl; -+ int retval; -+ -+ if (!test_opt(inode->i_sb, NFS4ACL)) -+ return NULL; -+ -+ acl = ext3_iget_nfs4acl(inode); -+ if (acl != EXT3_NFS4ACL_NOT_CACHED) -+ return acl; -+ retval = ext3_xattr_get(inode, name_index, "", NULL, 0); -+ if (retval > 0) { -+ value = kmalloc(retval, GFP_KERNEL); -+ if (!value) -+ return ERR_PTR(-ENOMEM); -+ retval = ext3_xattr_get(inode, name_index, "", value, retval); -+ } -+ if (retval > 0) { -+ acl = nfs4acl_from_xattr(value, retval); -+ if (acl == ERR_PTR(-EINVAL)) -+ acl = ERR_PTR(-EIO); -+ } else if (retval == -ENODATA || retval == -ENOSYS) -+ acl = NULL; -+ else -+ acl = ERR_PTR(retval); -+ kfree(value); -+ -+ if (!IS_ERR(acl)) -+ ext3_iset_nfs4acl(inode, acl); -+ -+ return acl; -+} -+ -+static int -+ext3_set_nfs4acl(handle_t *handle, struct inode *inode, struct nfs4acl *acl) -+{ -+ const int name_index = EXT3_XATTR_INDEX_NFS4ACL; -+ size_t size = 0; -+ void *value = NULL; -+ int retval; -+ -+ if (acl) { -+ size = nfs4acl_xattr_size(acl); -+ value = kmalloc(size, GFP_KERNEL); -+ if (!value) -+ return -ENOMEM; -+ nfs4acl_to_xattr(acl, value); -+ } -+ if (handle) -+ retval = ext3_xattr_set_handle(handle, inode, name_index, "", -+ value, size, 0); -+ else -+ retval = ext3_xattr_set(inode, name_index, "", value, size, 0); -+ if (value) -+ kfree(value); -+ if (!retval) -+ ext3_iset_nfs4acl(inode, acl); -+ -+ return retval; -+} -+ -+int -+ext3_nfs4acl_permission(struct inode *inode, unsigned int mask) -+{ -+ struct nfs4acl *acl; -+ int retval; -+ -+ BUG_ON(!test_opt(inode->i_sb, NFS4ACL)); -+ -+ acl = ext3_get_nfs4acl(inode); -+ if (!acl) -+ retval = nfs4acl_generic_permission(inode, mask); -+ else if (IS_ERR(acl)) -+ retval = PTR_ERR(acl); -+ else { -+ retval = nfs4acl_permission(inode, acl, mask); -+ nfs4acl_put(acl); -+ } -+ -+ return retval; -+} -+ -+int ext3_may_create(struct inode *dir, int isdir) -+{ -+ int error; -+ -+ if (test_opt(dir->i_sb, NFS4ACL)) { -+ unsigned int mask = (isdir ? ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE) | -+ ACE4_EXECUTE; -+ -+ error = ext3_nfs4acl_permission(dir, mask); -+ } else -+ error = ext3_permission(dir, MAY_WRITE | MAY_EXEC); -+ -+ return error; -+} -+ -+static int check_sticky(struct inode *dir, struct inode *inode) -+{ -+ if (!(dir->i_mode & S_ISVTX)) -+ return 0; -+ if (inode->i_uid == current_fsuid()) -+ return 0; -+ if (dir->i_uid == current_fsuid()) -+ return 0; -+ return !capable(CAP_FOWNER); -+} -+ -+int ext3_may_delete(struct inode *dir, struct inode *inode) -+{ -+ int error; -+ -+ if (test_opt(inode->i_sb, NFS4ACL)) { -+ error = ext3_nfs4acl_permission(dir, ACE4_DELETE_CHILD | ACE4_EXECUTE); -+ if (!error && check_sticky(dir, inode)) -+ error = -EPERM; -+ if (error && !ext3_nfs4acl_permission(inode, ACE4_DELETE)) -+ error = 0; -+ } else { -+ error = ext3_permission(dir, MAY_WRITE | MAY_EXEC); -+ if (!error && check_sticky(dir, inode)) -+ error = -EPERM; -+ } -+ -+ return error; -+} -+ -+int -+ext3_nfs4acl_init(handle_t *handle, struct inode *inode, struct inode *dir) -+{ -+ struct nfs4acl *dir_acl = NULL, *acl; -+ int retval; -+ -+ if (!S_ISLNK(inode->i_mode)) -+ dir_acl = ext3_get_nfs4acl(dir); -+ if (!dir_acl || IS_ERR(dir_acl)) { -+ inode->i_mode &= ~current->fs->umask; -+ return PTR_ERR(dir_acl); -+ } -+ acl = nfs4acl_inherit(dir_acl, inode->i_mode); -+ nfs4acl_put(dir_acl); -+ -+ retval = PTR_ERR(acl); -+ if (acl && !IS_ERR(acl)) { -+ retval = ext3_set_nfs4acl(handle, inode, acl); -+ inode->i_mode = (inode->i_mode & ~S_IRWXUGO) | -+ nfs4acl_masks_to_mode(acl); -+ nfs4acl_put(acl); -+ } -+ return retval; -+} -+ -+int -+ext3_nfs4acl_chmod(struct inode *inode) -+{ -+ struct nfs4acl *acl; -+ int retval; -+ -+ if (S_ISLNK(inode->i_mode)) -+ return -EOPNOTSUPP; -+ acl = ext3_get_nfs4acl(inode); -+ if (!acl || IS_ERR(acl)) -+ return PTR_ERR(acl); -+ acl = nfs4acl_chmod(acl, inode->i_mode); -+ if (IS_ERR(acl)) -+ return PTR_ERR(acl); -+ retval = ext3_set_nfs4acl(NULL, inode, acl); -+ nfs4acl_put(acl); -+ -+ return retval; -+} -+ -+static size_t -+ext3_xattr_list_nfs4acl(struct dentry *dentry, char *list, size_t list_len, -+ const char *name, size_t name_len, int handler_flags) -+{ -+ struct inode *inode = dentry->d_inode; -+ const size_t size = sizeof(NFS4ACL_XATTR); -+ -+ if (!test_opt(inode->i_sb, NFS4ACL)) -+ return 0; -+ if (list && size <= list_len) -+ memcpy(list, NFS4ACL_XATTR, size); -+ return size; -+} -+ -+static int -+ext3_xattr_get_nfs4acl(struct dentry *dentry, const char *name, void *buffer, -+ size_t buffer_size, int handler_flags) -+{ -+ struct inode *inode = dentry->d_inode; -+ struct nfs4acl *acl; -+ size_t size; -+ -+ if (!test_opt(inode->i_sb, NFS4ACL)) -+ return -EOPNOTSUPP; -+ if (strcmp(name, "") != 0) -+ return -EINVAL; -+ -+ acl = ext3_get_nfs4acl(inode); -+ if (IS_ERR(acl)) -+ return PTR_ERR(acl); -+ if (acl == NULL) -+ return -ENODATA; -+ size = nfs4acl_xattr_size(acl); -+ if (buffer) { -+ if (size > buffer_size) -+ return -ERANGE; -+ nfs4acl_to_xattr(acl, buffer); -+ } -+ nfs4acl_put(acl); -+ -+ return size; -+} -+ -+#ifdef NFS4ACL_DEBUG -+static size_t -+ext3_xattr_list_masked_nfs4acl(struct dentry *dentry, char *list, -+ size_t list_len, const char *name, -+ size_t name_len, int handler_flags) -+{ -+ return 0; -+} -+ -+static int -+ext3_xattr_get_masked_nfs4acl(struct dentry *dentry, const char *name, -+ void *buffer, size_t buffer_size, -+ int handler_flags) -+{ -+ struct inode *inode = dentry->d_inode; -+ const int name_index = EXT3_XATTR_INDEX_NFS4ACL; -+ struct nfs4acl *acl; -+ void *xattr; -+ size_t size; -+ int retval; -+ -+ if (!test_opt(inode->i_sb, NFS4ACL)) -+ return -EOPNOTSUPP; -+ if (strcmp(name, "") != 0) -+ return -EINVAL; -+ retval = ext3_xattr_get(inode, name_index, "", NULL, 0); -+ if (retval <= 0) -+ return retval; -+ xattr = kmalloc(retval, GFP_KERNEL); -+ if (!xattr) -+ return -ENOMEM; -+ retval = ext3_xattr_get(inode, name_index, "", xattr, retval); -+ if (retval <= 0) -+ return retval; -+ acl = nfs4acl_from_xattr(xattr, retval); -+ kfree(xattr); -+ if (IS_ERR(acl)) -+ return PTR_ERR(acl); -+ retval = nfs4acl_apply_masks(&acl); -+ if (retval) { -+ nfs4acl_put(acl); -+ return retval; -+ } -+ size = nfs4acl_xattr_size(acl); -+ if (buffer) { -+ if (size > buffer_size) -+ return -ERANGE; -+ nfs4acl_to_xattr(acl, buffer); -+ } -+ nfs4acl_put(acl); -+ return size; -+} -+#endif -+ -+static int -+ext3_xattr_set_nfs4acl(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags, -+ int handler_flags) -+{ -+ struct inode *inode = dentry->d_inode; -+ handle_t *handle; -+ struct nfs4acl *acl = NULL; -+ int retval, retries = 0; -+ -+ if (S_ISLNK(inode->i_mode) || !test_opt(inode->i_sb, NFS4ACL)) -+ return -EOPNOTSUPP; -+ if (strcmp(name, "") != 0) -+ return -EINVAL; -+ if (current_fsuid() != inode->i_uid && -+ ext3_nfs4acl_permission(inode, ACE4_WRITE_ACL) && -+ !capable(CAP_FOWNER)) -+ return -EPERM; -+ if (value) { -+ acl = nfs4acl_from_xattr(value, size); -+ if (IS_ERR(acl)) -+ return PTR_ERR(acl); -+ -+ inode->i_mode &= ~S_IRWXUGO; -+ inode->i_mode |= nfs4acl_masks_to_mode(acl); -+ } -+ -+retry: -+ handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb)); -+ if (IS_ERR(handle)) -+ return PTR_ERR(handle); -+ ext3_mark_inode_dirty(handle, inode); -+ retval = ext3_set_nfs4acl(handle, inode, acl); -+ ext3_journal_stop(handle); -+ if (retval == ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) -+ goto retry; -+ nfs4acl_put(acl); -+ return retval; -+} -+ -+struct xattr_handler ext3_nfs4acl_xattr_handler = { -+ .prefix = NFS4ACL_XATTR, -+ .list = ext3_xattr_list_nfs4acl, -+ .get = ext3_xattr_get_nfs4acl, -+ .set = ext3_xattr_set_nfs4acl, -+}; -+ -+#ifdef NFS4ACL_DEBUG -+struct xattr_handler ext3_masked_nfs4acl_xattr_handler = { -+ .prefix = "system.masked-nfs4acl", -+ .list = ext3_xattr_list_masked_nfs4acl, -+ .get = ext3_xattr_get_masked_nfs4acl, -+ .set = ext3_xattr_set_nfs4acl, -+}; -+#endif ---- /dev/null -+++ b/fs/ext3/nfs4acl.h -@@ -0,0 +1,36 @@ -+#ifndef __FS_EXT3_NFS4ACL_H -+#define __FS_EXT3_NFS4ACL_H -+ -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ -+#include -+ -+/* Value for i_nfs4acl if NFS4ACL has not been cached */ -+#define EXT3_NFS4ACL_NOT_CACHED ((void *)-1) -+ -+extern int ext3_nfs4acl_permission(struct inode *, unsigned int); -+extern int ext3_may_create(struct inode *, int); -+extern int ext3_may_delete(struct inode *, struct inode *); -+extern int ext3_nfs4acl_init(handle_t *, struct inode *, struct inode *); -+extern int ext3_nfs4acl_chmod(struct inode *); -+ -+#else /* CONFIG_FS_EXT3_NFS4ACL */ -+ -+#define ext3_may_create NULL -+#define ext3_may_delete NULL -+ -+static inline int -+ext3_nfs4acl_init(handle_t *handle, struct inode *inode, struct inode *dir) -+{ -+ return 0; -+} -+ -+static inline int -+ext3_nfs4acl_chmod(struct inode *inode) -+{ -+ return 0; -+} -+ -+#endif /* CONFIG_FS_EXT3_NFS4ACL */ -+ -+#endif /* __FS_EXT3_NFS4ACL_H */ ---- a/fs/ext3/super.c -+++ b/fs/ext3/super.c -@@ -36,12 +36,14 @@ - #include - #include - #include -+#include - #include - - #include - - #include "xattr.h" - #include "acl.h" -+#include "nfs4acl.h" - #include "namei.h" - - #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED -@@ -476,6 +478,9 @@ static struct inode *ext3_alloc_inode(st - ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); - if (!ei) - return NULL; -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ ei->i_nfs4acl = EXT3_NFS4ACL_NOT_CACHED; -+#endif - ei->i_block_alloc_info = NULL; - ei->vfs_inode.i_version = 1; - atomic_set(&ei->i_datasync_tid, 0); -@@ -529,6 +534,13 @@ static void ext3_clear_inode(struct inod - { - struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info; - -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ if (EXT3_I(inode)->i_nfs4acl && -+ EXT3_I(inode)->i_nfs4acl != EXT3_NFS4ACL_NOT_CACHED) { -+ nfs4acl_put(EXT3_I(inode)->i_nfs4acl); -+ EXT3_I(inode)->i_nfs4acl = EXT3_NFS4ACL_NOT_CACHED; -+ } -+#endif - dquot_drop(inode); - ext3_discard_reservation(inode); - EXT3_I(inode)->i_block_alloc_info = NULL; -@@ -803,7 +815,7 @@ enum { - Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, - Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, - Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, -- Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, -+ Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_acl_flavor, Opt_noacl, - Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, - Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, - Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, -@@ -836,6 +848,7 @@ static const match_table_t tokens = { - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_acl, "acl"}, -+ {Opt_acl_flavor, "acl=%s"}, - {Opt_noacl, "noacl"}, - {Opt_reservation, "reservation"}, - {Opt_noreservation, "noreservation"}, -@@ -1040,20 +1053,33 @@ static int parse_options (char *options, - "(no)user_xattr options not supported"); - break; - #endif --#ifdef CONFIG_EXT3_FS_POSIX_ACL - case Opt_acl: -- set_opt(sbi->s_mount_opt, POSIX_ACL); -+ args[0].to = args[0].from; -+ /* fall through */ -+ case Opt_acl_flavor: -+#ifdef CONFIG_EXT3_FS_POSIX_ACL -+ if (match_string(&args[0], "") || -+ match_string(&args[0], "posix")) { -+ set_opt(sbi->s_mount_opt, POSIX_ACL); -+ clear_opt(sbi->s_mount_opt, NFS4ACL); -+ } else -+#endif -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ if (match_string(&args[0], "nfs4")) { -+ clear_opt(sbi->s_mount_opt, POSIX_ACL); -+ set_opt(sbi->s_mount_opt, NFS4ACL); -+ } else -+#endif -+ { -+ ext3_msg(sb, KERN_ERR, -+ "unsupported acl flavor"); -+ return 0; -+ } - break; - case Opt_noacl: - clear_opt(sbi->s_mount_opt, POSIX_ACL); -+ clear_opt(sbi->s_mount_opt, NFS4ACL); - break; --#else -- case Opt_acl: -- case Opt_noacl: -- ext3_msg(sb, KERN_INFO, -- "(no)acl options not supported"); -- break; --#endif - case Opt_reservation: - set_opt(sbi->s_mount_opt, RESERVATION); - break; -@@ -1698,8 +1724,11 @@ static int ext3_fill_super (struct super - NULL, 0)) - goto failed_mount; - -- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | -- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); -+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL); -+ if (test_opt(sb, POSIX_ACL)) -+ sb->s_flags |= MS_POSIXACL; -+ if (test_opt(sb, NFS4ACL)) -+ sb->s_flags |= MS_POSIXACL | MS_WITHAPPEND; - - if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && - (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || -@@ -2576,8 +2605,12 @@ static int ext3_remount (struct super_bl - if (test_opt(sb, ABORT)) - ext3_abort(sb, __func__, "Abort forced by user"); - -- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | -- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); -+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL); -+ if (test_opt(sb, POSIX_ACL)) -+ sb->s_flags |= MS_POSIXACL; -+ if (test_opt(sb, NFS4ACL)) -+ sb->s_flags |= MS_POSIXACL; -+ - - es = sbi->s_es; - ---- a/fs/ext3/xattr.c -+++ b/fs/ext3/xattr.c -@@ -114,6 +114,9 @@ static struct xattr_handler *ext3_xattr_ - #ifdef CONFIG_EXT3_FS_SECURITY - [EXT3_XATTR_INDEX_SECURITY] = &ext3_xattr_security_handler, - #endif -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ [EXT3_XATTR_INDEX_NFS4ACL] = &ext3_nfs4acl_xattr_handler, -+#endif - }; - - struct xattr_handler *ext3_xattr_handlers[] = { -@@ -126,6 +129,12 @@ struct xattr_handler *ext3_xattr_handler - #ifdef CONFIG_EXT3_FS_SECURITY - &ext3_xattr_security_handler, - #endif -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ &ext3_nfs4acl_xattr_handler, -+#ifdef NFS4ACL_DEBUG -+ &ext3_masked_nfs4acl_xattr_handler, -+#endif -+#endif - NULL - }; - ---- a/fs/ext3/xattr.h -+++ b/fs/ext3/xattr.h -@@ -21,6 +21,7 @@ - #define EXT3_XATTR_INDEX_TRUSTED 4 - #define EXT3_XATTR_INDEX_LUSTRE 5 - #define EXT3_XATTR_INDEX_SECURITY 6 -+#define EXT3_XATTR_INDEX_NFS4ACL 7 - - struct ext3_xattr_header { - __le32 h_magic; /* magic number for identification */ -@@ -63,6 +64,10 @@ extern struct xattr_handler ext3_xattr_t - extern struct xattr_handler ext3_xattr_acl_access_handler; - extern struct xattr_handler ext3_xattr_acl_default_handler; - extern struct xattr_handler ext3_xattr_security_handler; -+extern struct xattr_handler ext3_nfs4acl_xattr_handler; -+#ifdef NFS4ACL_DEBUG -+extern struct xattr_handler ext3_masked_nfs4acl_xattr_handler; -+#endif - - extern ssize_t ext3_listxattr(struct dentry *, char *, size_t); - ---- a/include/linux/ext3_fs.h -+++ b/include/linux/ext3_fs.h -@@ -406,6 +406,7 @@ struct ext3_inode { - #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ - #define EXT3_MOUNT_DATA_ERR_ABORT 0x400000 /* Abort on file data write - * error in ordered mode */ -+#define EXT3_MOUNT_NFS4ACL 0x800000 /* NFS version 4 ACLs */ - - /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ - #ifndef _LINUX_EXT2_FS_H ---- a/include/linux/ext3_fs_i.h -+++ b/include/linux/ext3_fs_i.h -@@ -103,6 +103,9 @@ struct ext3_inode_info { - */ - struct rw_semaphore xattr_sem; - #endif -+#ifdef CONFIG_EXT3_FS_NFS4ACL -+ struct nfs4acl *i_nfs4acl; -+#endif - - struct list_head i_orphan; /* unlinked but open inodes */ - diff --git a/patches.suse/no-frame-pointer-select b/patches.suse/no-frame-pointer-select index 29454f2..5c6091b 100644 --- a/patches.suse/no-frame-pointer-select +++ b/patches.suse/no-frame-pointer-select @@ -17,20 +17,24 @@ Acked-by: Jan Beulich --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -965,13 +965,15 @@ config FAULT_INJECTION_STACKTRACE_FILTER +@@ -1020,17 +1020,19 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE -- select FRAME_POINTER if !PPC && !S390 -+ select FRAME_POINTER if !PPC && !S390 && !X86 +- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE ++ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !X86 + select UNWIND_INFO if X86 && !FRAME_POINTER help Provide stacktrace filter for fault-injection capabilities config LATENCYTOP bool "Latency measuring infrastructure" -- select FRAME_POINTER if !MIPS && !PPC && !S390 -+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !X86 + depends on HAVE_LATENCYTOP_SUPPORT + depends on DEBUG_KERNEL + depends on STACKTRACE_SUPPORT + depends on PROC_FS +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE ++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !X86 + select UNWIND_INFO if X86 && !FRAME_POINTER select KALLSYMS select KALLSYMS_ALL diff --git a/patches.suse/no-partition-scan b/patches.suse/no-partition-scan index 00c1717..6f15f5f 100644 --- a/patches.suse/no-partition-scan +++ b/patches.suse/no-partition-scan @@ -18,8 +18,8 @@ Signed-off-by: Hannes Reinecke --- a/block/genhd.c +++ b/block/genhd.c -@@ -513,6 +513,18 @@ static int exact_lock(dev_t devt, void * - return 0; +@@ -504,6 +504,18 @@ static int exact_lock(dev_t devt, void * + disk_part_iter_exit(&piter); } +static int __read_mostly no_partition_scan; @@ -37,7 +37,7 @@ Signed-off-by: Hannes Reinecke /** * add_disk - add partitioning information to kernel list * @disk: per-device partitioning information -@@ -537,6 +549,9 @@ void add_disk(struct gendisk *disk) +@@ -528,6 +540,9 @@ void add_disk(struct gendisk *disk) disk->flags |= GENHD_FL_UP; @@ -47,7 +47,7 @@ Signed-off-by: Hannes Reinecke retval = blk_alloc_devt(&disk->part0, &devt); if (retval) { WARN_ON(1); -@@ -825,7 +840,27 @@ static ssize_t disk_range_show(struct de +@@ -817,7 +832,27 @@ static ssize_t disk_range_show(struct de { struct gendisk *disk = dev_to_disk(dev); @@ -76,7 +76,7 @@ Signed-off-by: Hannes Reinecke } static ssize_t disk_ext_range_show(struct device *dev, -@@ -879,7 +914,7 @@ static ssize_t disk_discard_alignment_sh +@@ -871,7 +906,7 @@ static ssize_t disk_discard_alignment_sh return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); } @@ -87,7 +87,7 @@ Signed-off-by: Hannes Reinecke static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); --- a/fs/partitions/check.c +++ b/fs/partitions/check.c -@@ -559,6 +559,8 @@ int rescan_partitions(struct gendisk *di +@@ -597,6 +597,8 @@ rescan: disk->fops->revalidate_disk(disk); check_disk_size_change(disk, bdev); bdev->bd_invalidated = 0; @@ -95,14 +95,14 @@ Signed-off-by: Hannes Reinecke + return 0; if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) return 0; - if (IS_ERR(state)) /* I/O error reading the partition table */ + if (IS_ERR(state)) { --- a/include/linux/genhd.h +++ b/include/linux/genhd.h -@@ -122,6 +122,7 @@ struct hd_struct { +@@ -116,6 +116,7 @@ struct hd_struct { #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ #define GENHD_FL_NATIVE_CAPACITY 128 +#define GENHD_FL_NO_PARTITION_SCAN 256 - #define BLK_SCSI_MAX_CMDS (256) - #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + enum { + DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ diff --git a/patches.suse/novfs-2.6.35-api-changes b/patches.suse/novfs-2.6.35-api-changes new file mode 100644 index 0000000..0119764 --- /dev/null +++ b/patches.suse/novfs-2.6.35-api-changes @@ -0,0 +1,54 @@ +From: Jeff Mahoney +Subject: [PATCH] novfs: API changes for 2.6.35 +Patch-mainline: WHenever novfs makes it upstream + + This patch contains API changes for 2.6.35 + +Acked-by: Jeff Mahoney +--- + fs/novfs/inode.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -84,7 +84,7 @@ void addtodentry(struct dentry *Parent, + int novfs_filldir(void *data, const char *name, int namelen, loff_t off, + ino_t ino, unsigned ftype); + int novfs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir); +-int novfs_dir_fsync(struct file *file, struct dentry *dentry, int datasync); ++int novfs_dir_fsync(struct file *file, int datasync); + + /* + * Declared address space operations +@@ -115,7 +115,7 @@ int novfs_f_mmap(struct file *file, stru + int novfs_f_open(struct inode *, struct file *); + int novfs_f_flush(struct file *, fl_owner_t); + int novfs_f_release(struct inode *, struct file *); +-int novfs_f_fsync(struct file *, struct dentry *, int datasync); ++int novfs_f_fsync(struct file *, int datasync); + int novfs_f_lock(struct file *, int, struct file_lock *); + + /* +@@ -1278,11 +1278,11 @@ int novfs_dir_readdir(struct file *file, + return 1; + } + +-int novfs_dir_fsync(struct file *file, struct dentry *dentry, int datasync) ++int novfs_dir_fsync(struct file *file, int datasync) + { + DbgPrint("Name %.*s", file->f_dentry->d_name.len, + file->f_dentry->d_name.name); +- return (simple_sync_file(file, dentry, datasync)); ++ return generic_file_fsync(file, datasync); + } + + ssize_t novfs_f_read(struct file * file, char *buf, size_t len, loff_t * off) +@@ -1709,7 +1709,7 @@ int novfs_f_release(struct inode *inode, + return (retCode); + } + +-int novfs_f_fsync(struct file *file, struct dentry *dentry, int datasync) ++int novfs_f_fsync(struct file *file, int datasync) + { + return 0; + } diff --git a/patches.suse/novfs-2.6.37-api-changes b/patches.suse/novfs-2.6.37-api-changes new file mode 100644 index 0000000..33d19c1 --- /dev/null +++ b/patches.suse/novfs-2.6.37-api-changes @@ -0,0 +1,298 @@ +From: Jeff Mahoney +Subject: novfs: 2.6.37 api changes +Patch-mainline: If novfs gets merged + + 2.6.37-rc1 removed the mutex interface to semaphores. This patch + replaces uses of semaphores as mutex with the mutex interface. + +Signed-off-by: Jeff Mahoney +--- + fs/novfs/daemon.c | 22 +++++++++++----------- + fs/novfs/inode.c | 33 +++++++++++++++++++-------------- + fs/novfs/profile.c | 10 +++++----- + fs/novfs/scope.c | 4 ++-- + 4 files changed, 37 insertions(+), 32 deletions(-) + +--- a/fs/novfs/daemon.c ++++ b/fs/novfs/daemon.c +@@ -109,7 +109,7 @@ static atomic_t Daemon_Open_Count = ATOM + + static unsigned long Daemon_Command_Timeout = TIMEOUT_VALUE; + +-static DECLARE_MUTEX(DriveMapLock); ++static DEFINE_MUTEX(DriveMapLock); + static LIST_HEAD(DriveMapList); + + int novfs_max_iosize = PAGE_SIZE; +@@ -118,7 +118,7 @@ void novfs_daemon_queue_init() + { + INIT_LIST_HEAD(&Daemon_Queue.list); + spin_lock_init(&Daemon_Queue.lock); +- init_MUTEX_LOCKED(&Daemon_Queue.semaphore); ++ sema_init(&Daemon_Queue.semaphore, 0); + } + + void novfs_daemon_queue_exit(void) +@@ -159,7 +159,7 @@ int Queue_Daemon_Command(void *request, + que->status = QUEUE_SENDING; + que->flags = 0; + +- init_MUTEX_LOCKED(&que->semaphore); ++ sema_init(&que->semaphore, 0); + + que->sequence = atomic_inc_return(&Sequence); + +@@ -881,7 +881,7 @@ int novfs_daemon_destroy_sessionId(struc + * When destroying the session check to see if there are any + * mapped drives. If there are then remove them. + */ +- down(&DriveMapLock); ++ mutex_lock(&DriveMapLock); + list_for_each(list, &DriveMapList) { + dm = list_entry(list, struct drive_map, list); + if (SC_EQUAL(SessionId, dm->session)) { +@@ -892,7 +892,7 @@ int novfs_daemon_destroy_sessionId(struc + } + + } +- up(&DriveMapLock); ++ mutex_unlock(&DriveMapLock); + + } else { + retCode = -EIO; +@@ -1740,7 +1740,7 @@ static int set_map_drive(struct novfs_xp + + dm = (struct drive_map *)&DriveMapList.next; + +- down(&DriveMapLock); ++ mutex_lock(&DriveMapLock); + + list_for_each(list, &DriveMapList) { + dm = list_entry(list, struct drive_map, list); +@@ -1766,7 +1766,7 @@ static int set_map_drive(struct novfs_xp + } + } else + kfree(drivemap); +- up(&DriveMapLock); ++ mutex_unlock(&DriveMapLock); + return (retVal); + } + +@@ -1799,7 +1799,7 @@ static int unmap_drive(struct novfs_xpla + + dm = NULL; + +- down(&DriveMapLock); ++ mutex_lock(&DriveMapLock); + + list_for_each(list, &DriveMapList) { + dm = list_entry(list, struct drive_map, list); +@@ -1823,7 +1823,7 @@ static int unmap_drive(struct novfs_xpla + kfree(dm); + } + +- up(&DriveMapLock); ++ mutex_unlock(&DriveMapLock); + return (retVal); + } + +@@ -1832,7 +1832,7 @@ static void RemoveDriveMaps(void) + struct drive_map *dm; + struct list_head *list; + +- down(&DriveMapLock); ++ mutex_lock(&DriveMapLock); + list_for_each(list, &DriveMapList) { + dm = list_entry(list, struct drive_map, list); + +@@ -1844,7 +1844,7 @@ static void RemoveDriveMaps(void) + list_del(&dm->list); + kfree(dm); + } +- up(&DriveMapLock); ++ mutex_unlock(&DriveMapLock); + } + + /* As picked from do_unlinkat() */ +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -43,7 +43,7 @@ struct inode_data { + struct inode *Inode; + unsigned long cntDC; + struct list_head DirCache; +- struct semaphore DirCacheLock; ++ struct mutex DirCacheLock; + void *FileHandle; + int CacheFlag; + char Name[1]; /* Needs to be last entry */ +@@ -268,11 +268,11 @@ static atomic_t novfs_Inode_Number = ATO + struct dentry *novfs_root = NULL; + char *novfs_current_mnt = NULL; + +-DECLARE_MUTEX(InodeList_lock); ++DEFINE_MUTEX(InodeList_lock); + + LIST_HEAD(InodeList); + +-DECLARE_MUTEX(TimeDir_Lock); ++DEFINE_MUTEX(TimeDir_Lock); + uint64_t lastTime; + char lastDir[PATH_MAX]; + +@@ -1050,7 +1050,7 @@ int novfs_dir_readdir(struct file *file, + // Use this hack by default + #ifndef SKIP_CROSSOVER_HACK + // Hack for crossover - begin +- down(&TimeDir_Lock); ++ mutex_lock(&TimeDir_Lock); + if ((file->f_dentry->d_name.len == 7) && + ((0 == strncmp(file->f_dentry->d_name.name, " !xover", 7)) || + (0 == strncmp(file->f_dentry->d_name.name, "z!xover", 7)))) { +@@ -1076,7 +1076,7 @@ int novfs_dir_readdir(struct file *file, + } + } + +- up(&TimeDir_Lock); ++ mutex_unlock(&TimeDir_Lock); + // Hack for crossover - end + #endif + +@@ -3157,9 +3157,9 @@ void novfs_evict_inode(struct inode *ino + + novfs_free_inode_cache(inode); + +- down(&InodeList_lock); ++ mutex_lock(&InodeList_lock); + list_del(&id->IList); +- up(&InodeList_lock); ++ mutex_unlock(&InodeList_lock); + + kfree(inode->i_private); + inode->i_private = NULL; +@@ -3292,15 +3292,15 @@ struct inode *novfs_get_inode(struct sup + id->cntDC = 1; + + INIT_LIST_HEAD(&id->DirCache); +- init_MUTEX(&id->DirCacheLock); ++ mutex_init(&id->DirCacheLock); + + id->FileHandle = 0; + id->CacheFlag = 0; + +- down(&InodeList_lock); ++ mutex_lock(&InodeList_lock); + + list_add_tail(&id->IList, &InodeList); +- up(&InodeList_lock); ++ mutex_unlock(&InodeList_lock); + + id->Name[0] = '\0'; + +@@ -3443,6 +3443,11 @@ static void novfs_kill_sb(struct super_b + kill_litter_super(super); + } + ++/* This should be removed */ ++#ifndef kernel_locked ++#define kernel_locked() (current->lock_depth >= 0) ++#endif ++ + ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, loff_t * ppos) + { + ssize_t retval = 0; +@@ -3532,7 +3537,7 @@ int novfs_lock_inode_cache(struct inode + + DbgPrint("0x%p", i); + if (i && (id = i->i_private) && id->DirCache.next) { +- down(&id->DirCacheLock); ++ mutex_lock(&id->DirCacheLock); + retVal = 1; + } + DbgPrint("return %d", retVal); +@@ -3544,7 +3549,7 @@ void novfs_unlock_inode_cache(struct ino + struct inode_data *id; + + if (i && (id = i->i_private) && id->DirCache.next) { +- up(&id->DirCacheLock); ++ mutex_unlock(&id->DirCacheLock); + } + } + +@@ -4042,7 +4047,7 @@ void novfs_dump_inode(void *pf) + char ctime_buf[32]; + unsigned long icnt = 0, dccnt = 0; + +- down(&InodeList_lock); ++ mutex_lock(&InodeList_lock); + list_for_each(il, &InodeList) { + id = list_entry(il, struct inode_data, IList); + inode = id->Inode; +@@ -4087,7 +4092,7 @@ void novfs_dump_inode(void *pf) + } + } + } +- up(&InodeList_lock); ++ mutex_unlock(&InodeList_lock); + + pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, dccnt); + +--- a/fs/novfs/profile.c ++++ b/fs/novfs/profile.c +@@ -60,7 +60,7 @@ static struct proc_dir_entry *dbg_file = + static struct proc_dir_entry *dentry_file = NULL; + static struct proc_dir_entry *inode_file = NULL; + +-static DECLARE_MUTEX(LocalPrint_lock); ++static DEFINE_MUTEX(LocalPrint_lock); + + static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user * buf, size_t nbytes, loff_t * ppos) + { +@@ -513,7 +513,7 @@ static ssize_t novfs_profile_read_inode( + static char save_DbgPrintOn; + + if (offset == 0) { +- down(&LocalPrint_lock); ++ mutex_lock(&LocalPrint_lock); + save_DbgPrintOn = DbgPrintOn; + DbgPrintOn = 0; + +@@ -527,7 +527,7 @@ static ssize_t novfs_profile_read_inode( + DbgPrintOn = save_DbgPrintOn; + DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; + +- up(&LocalPrint_lock); ++ mutex_unlock(&LocalPrint_lock); + } + + return retval; +@@ -541,7 +541,7 @@ static ssize_t novfs_profile_dentry_read + static char save_DbgPrintOn; + + if (offset == 0) { +- down(&LocalPrint_lock); ++ mutex_lock(&LocalPrint_lock); + save_DbgPrintOn = DbgPrintOn; + DbgPrintOn = 0; + DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; +@@ -554,7 +554,7 @@ static ssize_t novfs_profile_dentry_read + DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; + DbgPrintOn = save_DbgPrintOn; + +- up(&LocalPrint_lock); ++ mutex_unlock(&LocalPrint_lock); + } + + return retval; +--- a/fs/novfs/scope.c ++++ b/fs/novfs/scope.c +@@ -601,8 +601,8 @@ char *novfs_scope_dget_path(struct dentr + void novfs_scope_init(void) + { + INIT_LIST_HEAD(&Scope_List); +- init_MUTEX(&Scope_Lock); +- init_MUTEX_LOCKED(&Scope_Thread_Delay); ++ sema_init(&Scope_Lock, 1); ++ sema_init(&Scope_Thread_Delay, 0); + kthread_run(Scope_Cleanup_Thread, NULL, "novfs_ST"); + } + diff --git a/patches.suse/novfs-build-fix b/patches.suse/novfs-build-fix new file mode 100644 index 0000000..2f8daaa --- /dev/null +++ b/patches.suse/novfs-build-fix @@ -0,0 +1,142 @@ +From: Jeff Mahoney +Subject: novfs: Adopt 2.6.38-rc1 API changes +Patch-mainline: depends on local patches + + 2.6.38 changed a few FS-related APIs including + dentry_operations->d_{hash,compare}. + + This patch addresses those. + +Signed-off-by: Jeff Mahoney +--- + fs/novfs/inode.c | 46 +++++++++++++++++++++++----------------------- + 1 file changed, 23 insertions(+), 23 deletions(-) + +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -64,8 +64,10 @@ static void novfs_kill_sb(struct super_b + * Declared dentry_operations + */ + int novfs_d_revalidate(struct dentry *, struct nameidata *); +-int novfs_d_hash(struct dentry *, struct qstr *); +-int novfs_d_compare(struct dentry *, struct qstr *, struct qstr *); ++int novfs_d_hash(const struct dentry *, const struct inode *, struct qstr *); ++int novfs_d_compare(const struct dentry *, const struct inode *, ++ const struct dentry *, const struct inode *, ++ unsigned int, const char *, const struct qstr *); + int novfs_d_delete(struct dentry *dentry); + void novfs_d_release(struct dentry *dentry); + void novfs_d_iput(struct dentry *dentry, struct inode *inode); +@@ -306,7 +308,6 @@ static void PRINT_DENTRY(const char *s, + __DbgPrint(" d_op: 0x%p\n", d->d_op); + __DbgPrint(" d_sb: 0x%p\n", d->d_sb); + __DbgPrint(" d_flags: 0x%x\n", d->d_flags); +- __DbgPrint(" d_mounted: 0x%x\n", d->d_mounted); + __DbgPrint(" d_fsdata: 0x%p\n", d->d_fsdata); + /* DbgPrint(" d_cookie: 0x%x\n", d->d_cookie); */ + __DbgPrint(" d_parent: 0x%p\n", d->d_parent); +@@ -327,7 +328,7 @@ int novfs_remove_from_root(char *RemoveN + DbgPrint("%s", RemoveName); + name.len = strlen(RemoveName); + name.name = RemoveName; +- novfs_d_hash(novfs_root, &name); ++ novfs_d_hash(novfs_root, novfs_root->d_inode, &name); + + dentry = d_lookup(novfs_root, &name); + if (dentry) { +@@ -358,7 +359,7 @@ int novfs_add_to_root(char *AddName) + DbgPrint("%s", AddName); + name.len = strlen(AddName); + name.name = AddName; +- novfs_d_hash(novfs_root, &name); ++ novfs_d_hash(novfs_root, novfs_root->d_inode, &name); + + dir = novfs_root->d_inode; + +@@ -392,7 +393,7 @@ int novfs_Add_to_Root2(char *AddName) + name.len = strlen(AddName); + name.name = AddName; + +- novfs_d_hash(novfs_root, &name); ++ novfs_d_hash(novfs_root, novfs_root->d_inode, &name); + + entry = d_lookup(novfs_root, &name); + DbgPrint("novfs_d_lookup 0x%p", entry); +@@ -735,7 +736,8 @@ static unsigned long novfs_internal_hash + return (hash); + } + +-int novfs_d_hash(struct dentry *dentry, struct qstr *name) ++int novfs_d_hash(const struct dentry *dentry, const struct inode *inode, ++ struct qstr *name) + { + DbgPrint("%.*s", name->len, name->name); + +@@ -744,18 +746,15 @@ int novfs_d_hash(struct dentry *dentry, + return (0); + } + +-int novfs_d_strcmp(struct qstr *s1, struct qstr *s2) ++static int novfs_d_strcmp(const char *str1, unsigned int len, ++ const struct qstr *s2) + { + int retCode = 1; +- unsigned char *str1, *str2; +- unsigned int len; ++ const unsigned char *str2 = s2->name; + +- DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, s2->len, s2->name); ++ DbgPrint("s1=%.*s s2=%.*s", len, str1, s2->len, s2->name); + +- if (s1->len && (s1->len == s2->len) && (s1->hash == s2->hash)) { +- len = s1->len; +- str1 = (unsigned char *)s1->name; +- str2 = (unsigned char *)s2->name; ++ if (len && (len == s2->len)) { + for (retCode = 0; len--; str1++, str2++) { + if (*str1 != *str2) { + if (tolower(*str1) != tolower(*str2)) { +@@ -770,11 +769,14 @@ int novfs_d_strcmp(struct qstr *s1, stru + return (retCode); + } + +-int novfs_d_compare(struct dentry *parent, struct qstr *s1, struct qstr *s2) ++int novfs_d_compare(const struct dentry *parent, ++ const struct inode *parent_inode, ++ const struct dentry *dentry, const struct inode *inode, ++ unsigned int len, const char *s1, const struct qstr *s2) + { + int retCode; + +- retCode = novfs_d_strcmp(s1, s2); ++ retCode = novfs_d_strcmp(s1, len, s2); + + DbgPrint("retCode=0x%x", retCode); + return (retCode); +@@ -2647,7 +2649,7 @@ int novfs_i_rename(struct inode *odir, s + int retCode = -ENOTEMPTY; + char *newpath, *newbuf, *newcon; + char *oldpath, *oldbuf, *oldcon; +- struct qstr newname, oldname; ++ struct qstr oldname; + struct novfs_entry_info *info = NULL; + int oldlen, newlen; + struct novfs_schandle session; +@@ -2693,14 +2695,12 @@ int novfs_i_rename(struct inode *odir, s + DbgPrint("2; newcon=0x%p newpath=0x%p", newcon, newpath); + DbgPrint("2; oldcon=0x%p oldpath=0x%p", oldcon, oldpath); + if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { +- newname.name = newpath; +- newname.len = (int)(newcon - newpath); +- newname.hash = 0; +- + oldname.name = oldpath; + oldname.len = (int)(oldcon - oldpath); + oldname.hash = 0; +- if (!novfs_d_strcmp(&newname, &oldname)) { ++ if (!novfs_d_strcmp(newpath, ++ newcon - newpath, ++ &oldname)) { + + if (od->d_inode && od->d_inode->i_private) { + diff --git a/patches.suse/novfs-client-module b/patches.suse/novfs-client-module index 9c6327e..a7d25e6 100644 --- a/patches.suse/novfs-client-module +++ b/patches.suse/novfs-client-module @@ -52,7 +52,7 @@ Signed-off-by: Greg Kroah-Hartman --- a/fs/Kconfig +++ b/fs/Kconfig -@@ -260,6 +260,7 @@ source "fs/ncpfs/Kconfig" +@@ -241,6 +241,7 @@ source "fs/ncpfs/Kconfig" source "fs/coda/Kconfig" source "fs/afs/Kconfig" source "fs/9p/Kconfig" @@ -62,7 +62,7 @@ Signed-off-by: Greg Kroah-Hartman --- a/fs/Makefile +++ b/fs/Makefile -@@ -127,4 +127,5 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/ +@@ -125,4 +125,5 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/ obj-$(CONFIG_BTRFS_FS) += btrfs/ obj-$(CONFIG_GFS2_FS) += gfs2/ obj-$(CONFIG_EXOFS_FS) += exofs/ diff --git a/patches.suse/novfs-fix-ioctl-usage b/patches.suse/novfs-fix-ioctl-usage new file mode 100644 index 0000000..dffcb01 --- /dev/null +++ b/patches.suse/novfs-fix-ioctl-usage @@ -0,0 +1,202 @@ +From: Jeff Mahoney +Subject: novfs: Fix ioctl usage +Patch-mainline: Whenever novfs is merged + + Upstream commit b19dd42faf413b4705d4adb38521e82d73fa4249 removed support + for locked ioctls. This patch pushes the BKL into the novfs ioctl calls, + switches to ->unlocked_ioctl, and removes ioctls that were empty. + +Signed-off-by: Jeff Mahoney +--- + fs/novfs/daemon.c | 22 ++++++++++++++++++---- + fs/novfs/inode.c | 23 +---------------------- + fs/novfs/proc.c | 4 ++-- + fs/novfs/vfs.h | 4 ++-- + 4 files changed, 23 insertions(+), 30 deletions(-) + +--- a/fs/novfs/daemon.c ++++ b/fs/novfs/daemon.c +@@ -1022,11 +1022,14 @@ int novfs_daemon_debug_cmd_send(char *Co + return (retCode); + } + +-int novfs_daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) ++long novfs_daemon_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + { + int retCode = -ENOSYS; + unsigned long cpylen; + struct novfs_schandle session_id; ++ ++ lock_kernel(); /* needed? */ ++ + session_id = novfs_scope_get_sessionId(NULL); + + switch (cmd) { +@@ -1046,8 +1049,10 @@ int novfs_daemon_ioctl(struct inode *ino + char *buf; + io.length = 0; + cpylen = copy_from_user(&io, (char *)arg, sizeof(io)); +- if (io.length <= 0 || io.length > 1024) ++ if (io.length <= 0 || io.length > 1024) { ++ unlock_kernel(); + return -EINVAL; ++ } + if (io.length) { + buf = kmalloc(io.length + 1, GFP_KERNEL); + if (buf) { +@@ -1081,6 +1086,9 @@ int novfs_daemon_ioctl(struct inode *ino + } + + } ++ ++ unlock_kernel(); ++ + return (retCode); + } + +@@ -1337,13 +1345,15 @@ loff_t novfs_daemon_lib_llseek(struct fi + + #define DbgIocCall(str) __DbgPrint("[VFS XPLAT] Call " str "\n") + +-int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) ++long novfs_daemon_lib_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + { + int retCode = -ENOSYS; + struct daemon_handle *dh; + void *handle = NULL; + unsigned long cpylen; + ++ lock_kernel(); /* needed? */ ++ + dh = file->private_data; + + DbgPrint("file=0x%p 0x%x 0x%p dh=0x%p", file, cmd, arg, dh); +@@ -1368,8 +1378,10 @@ int novfs_daemon_lib_ioctl(struct inode + char *buf; + io.length = 0; + cpylen = copy_from_user(&io, (void *)arg, sizeof(io)); +- if (io.length <= 0 || io.length > 1024) ++ if (io.length <= 0 || io.length > 1024) { ++ unlock_kernel(); + return -EINVAL; ++ } + if (io.length) { + buf = kmalloc(io.length + 1, GFP_KERNEL); + if (buf) { +@@ -1596,6 +1608,8 @@ int novfs_daemon_lib_ioctl(struct inode + } + } + ++ unlock_kernel(); ++ + return (retCode); + } + +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -101,7 +101,6 @@ ssize_t novfs_a_direct_IO(int rw, struct + ssize_t novfs_f_read(struct file *, char *, size_t, loff_t *); + ssize_t novfs_f_write(struct file *, const char *, size_t, loff_t *); + int novfs_f_readdir(struct file *, void *, filldir_t); +-int novfs_f_ioctl(struct inode *, struct file *, unsigned int, unsigned long); + int novfs_f_mmap(struct file *file, struct vm_area_struct *vma); + int novfs_f_open(struct inode *, struct file *); + int novfs_f_flush(struct file *, fl_owner_t); +@@ -151,8 +150,6 @@ ssize_t novfs_control_Read(struct file * + + ssize_t novfs_control_write(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); + +-int novfs_control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); +- + int __init init_novfs(void); + void __exit exit_novfs(void); + +@@ -197,7 +194,6 @@ static struct file_operations novfs_file + .read = novfs_f_read, + .write = novfs_f_write, + .readdir = novfs_f_readdir, +- .ioctl = novfs_f_ioctl, + .mmap = novfs_f_mmap, + .open = novfs_f_open, + .flush = novfs_f_flush, +@@ -254,7 +250,7 @@ static struct inode_operations novfs_fil + + static struct super_operations novfs_ops = { + .statfs = novfs_statfs, +- .clear_inode = novfs_clear_inode, ++ .evict_inode = novfs_clear_inode, + .drop_inode = generic_delete_inode, + .show_options = novfs_show_options, + +@@ -264,7 +260,6 @@ static struct super_operations novfs_ops + static struct file_operations novfs_Control_operations = { + .read = novfs_Control_read, + .write = novfs_Control_write, +- .ioctl = novfs_Control_ioctl, + }; + */ + +@@ -1277,13 +1272,6 @@ int novfs_f_readdir(struct file *file, v + return -EISDIR; + } + +-int novfs_f_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +-{ +- DbgPrint("file=0x%p cmd=0x%x arg=0x%p", file, cmd, arg); +- +- return -ENOSYS; +-} +- + int novfs_f_mmap(struct file *file, struct vm_area_struct *vma) + { + int retCode = -EINVAL; +@@ -3471,15 +3459,6 @@ ssize_t novfs_Control_write(struct file + + return (retval); + } +- +-int novfs_Control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +-{ +- int retval = 0; +- +- DbgPrint("kernel_locked 0x%x", kernel_locked()); +- +- return (retval); +-} + + static struct file_system_type novfs_fs_type = { + .name = "novfs", +--- a/fs/novfs/proc.c ++++ b/fs/novfs/proc.c +@@ -77,7 +77,7 @@ int novfs_proc_init(void) + novfs_daemon_proc_fops.release = novfs_daemon_close_control; + novfs_daemon_proc_fops.read = novfs_daemon_cmd_send; + novfs_daemon_proc_fops.write = novfs_daemon_recv_reply; +- novfs_daemon_proc_fops.ioctl = novfs_daemon_ioctl; ++ novfs_daemon_proc_fops.unlocked_ioctl = novfs_daemon_ioctl; + + Novfs_Control->proc_fops = &novfs_daemon_proc_fops; + } else { +@@ -99,7 +99,7 @@ int novfs_proc_init(void) + novfs_lib_proc_fops.read = novfs_daemon_lib_read; + novfs_lib_proc_fops.write = novfs_daemon_lib_write; + novfs_lib_proc_fops.llseek = novfs_daemon_lib_llseek; +- novfs_lib_proc_fops.ioctl = novfs_daemon_lib_ioctl; ++ novfs_lib_proc_fops.unlocked_ioctl = novfs_daemon_lib_ioctl; + Novfs_Library->proc_fops = &novfs_lib_proc_fops; + } else { + remove_proc_entry("Control", novfs_procfs_dir); +--- a/fs/novfs/vfs.h ++++ b/fs/novfs/vfs.h +@@ -246,9 +246,9 @@ extern int novfs_daemon_get_userspace(st + extern int novfs_daemon_debug_cmd_send(char *Command); + extern ssize_t novfs_daemon_recv_reply(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); + extern ssize_t novfs_daemon_cmd_send(struct file *file, char *buf, size_t len, loff_t * off); +-extern int novfs_daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); ++extern long novfs_daemon_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + extern int novfs_daemon_lib_close(struct inode *inode, struct file *file); +-extern int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); ++extern long novfs_daemon_lib_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + extern int novfs_daemon_lib_open(struct inode *inode, struct file *file); + extern ssize_t novfs_daemon_lib_read(struct file *file, char *buf, size_t len, loff_t * off); + extern ssize_t novfs_daemon_lib_write(struct file *file, const char *buf, size_t len, loff_t * off); diff --git a/patches.suse/novfs-use-evict_inode b/patches.suse/novfs-use-evict_inode new file mode 100644 index 0000000..d64d723 --- /dev/null +++ b/patches.suse/novfs-use-evict_inode @@ -0,0 +1,47 @@ +From: Jeff Mahoney +Subject: novfs: use evict_inode +Patch-mainline: When novfs is merged + + Upstream commit b57922d97fd6f79b6dbe6db0c4fd30d219fa08c1 removed + support for ->clear_inode in favor of ->evict_inode. + + This patch implements support for ->evict_inode. + +Signed-off-by: Jeff Mahoney +--- + fs/novfs/inode.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/fs/novfs/inode.c ++++ b/fs/novfs/inode.c +@@ -138,7 +138,7 @@ void update_inode(struct inode *Inode, s + void novfs_read_inode(struct inode *inode); + void novfs_write_inode(struct inode *inode); + int novfs_notify_change(struct dentry *dentry, struct iattr *attr); +-void novfs_clear_inode(struct inode *inode); ++void novfs_evict_inode(struct inode *inode); + int novfs_show_options(struct seq_file *s, struct vfsmount *m); + + int novfs_statfs(struct dentry *de, struct kstatfs *buf); +@@ -250,7 +250,7 @@ static struct inode_operations novfs_fil + + static struct super_operations novfs_ops = { + .statfs = novfs_statfs, +- .evict_inode = novfs_clear_inode, ++ .evict_inode = novfs_evict_inode, + .drop_inode = generic_delete_inode, + .show_options = novfs_show_options, + +@@ -3143,8 +3143,11 @@ int novfs_notify_change(struct dentry *d + return (0); + } + +-void novfs_clear_inode(struct inode *inode) ++void novfs_evict_inode(struct inode *inode) + { ++ truncate_inode_pages(&inode->i_data, 0); ++ end_writeback(inode); ++ + InodeCount--; + + if (inode->i_private) { diff --git a/patches.suse/ocfs2-allocation-resrvations.patch b/patches.suse/ocfs2-allocation-resrvations.patch deleted file mode 100644 index 4d26ee8..0000000 --- a/patches.suse/ocfs2-allocation-resrvations.patch +++ /dev/null @@ -1,1243 +0,0 @@ -From: Mark Fasheh -Date: Thu, 19 Nov 2009 15:15:38 -0800 -Subject: ocfs2: allocation reservations -Patch-mainline: 2.6.33? -References: bnc#501563 FATE#307247 - -ocfs2: allocation reservations - -This patch improves Ocfs2 allocation policy by allowing an inode to -reserve a portion of the local alloc bitmap for itself. Allocation windows -are advisory in that they won't block use of that portion of the bitmap. -This makes dealing with corner cases much easier - we can always fall back -to previous policy. - -Reservation windows are represented internally by a red-black tree. Within -that tree, each node represents the reservation window of one inode. When -new data is written, we try to allocate from the window first. If that -allocation fails, we fall back to our old heuristics and a new window is -computed from the results. Allocation windows will also be extended if -allocation from them succeeds. - -Signed-off-by: Mark Fasheh - ---- - Documentation/filesystems/ocfs2.txt | 3 - fs/ocfs2/Makefile | 1 - fs/ocfs2/aops.c | 2 - fs/ocfs2/cluster/masklog.c | 1 - fs/ocfs2/cluster/masklog.h | 1 - fs/ocfs2/dir.c | 2 - fs/ocfs2/file.c | 19 + - fs/ocfs2/inode.c | 4 - fs/ocfs2/inode.h | 2 - fs/ocfs2/localalloc.c | 39 +- - fs/ocfs2/ocfs2.h | 5 - fs/ocfs2/reservations.c | 668 ++++++++++++++++++++++++++++++++++++ - fs/ocfs2/reservations.h | 151 ++++++++ - fs/ocfs2/suballoc.c | 1 - fs/ocfs2/suballoc.h | 2 - fs/ocfs2/super.c | 27 + - 16 files changed, 922 insertions(+), 6 deletions(-) - ---- a/Documentation/filesystems/ocfs2.txt -+++ b/Documentation/filesystems/ocfs2.txt -@@ -80,3 +80,6 @@ user_xattr (*) Enables Extended User Att - nouser_xattr Disables Extended User Attributes. - acl Enables POSIX Access Control Lists support. - noacl (*) Disables POSIX Access Control Lists support. -+resv_level=3 (*) Set how agressive allocation reservations will be. -+ Valid values are between 0 (reservations off) to 6 -+ (maximum space for reservations). ---- a/fs/ocfs2/Makefile -+++ b/fs/ocfs2/Makefile -@@ -29,6 +29,7 @@ ocfs2-objs := \ - mmap.o \ - namei.o \ - refcounttree.o \ -+ reservations.o \ - resize.o \ - slot_map.o \ - suballoc.o \ ---- a/fs/ocfs2/aops.c -+++ b/fs/ocfs2/aops.c -@@ -1735,6 +1735,8 @@ int ocfs2_write_begin_nolock(struct addr - goto out; - } - -+ data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; -+ - credits = ocfs2_calc_extend_credits(inode->i_sb, - &di->id2.i_list, - clusters_to_alloc); ---- a/fs/ocfs2/cluster/masklog.c -+++ b/fs/ocfs2/cluster/masklog.c -@@ -116,6 +116,7 @@ static struct mlog_attribute mlog_attrs[ - define_mask(ERROR), - define_mask(NOTICE), - define_mask(KTHREAD), -+ define_mask(RESERVATIONS), - }; - - static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, }; ---- a/fs/ocfs2/cluster/masklog.h -+++ b/fs/ocfs2/cluster/masklog.h -@@ -119,6 +119,7 @@ - #define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ - #define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ - #define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */ -+#define ML_RESERVATIONS 0x0000000800000000ULL /* ocfs2 alloc reservations */ - - #define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) - #define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT) ---- a/fs/ocfs2/dir.c -+++ b/fs/ocfs2/dir.c -@@ -2991,6 +2991,7 @@ static int ocfs2_expand_inline_dir(struc - * if we only get one now, that's enough to continue. The rest - * will be claimed after the conversion to extents. - */ -+ data_ac->ac_resv = &oi->ip_la_data_resv; - ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len); - if (ret) { - mlog_errno(ret); -@@ -3368,6 +3369,7 @@ static int ocfs2_extend_dir(struct ocfs2 - mlog_errno(status); - goto bail; - } -+ data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv; - - credits = ocfs2_calc_extend_credits(sb, el, 1); - } else { ---- a/fs/ocfs2/file.c -+++ b/fs/ocfs2/file.c -@@ -147,6 +147,7 @@ leave: - static int ocfs2_file_release(struct inode *inode, struct file *file) - { - struct ocfs2_inode_info *oi = OCFS2_I(inode); -+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - - mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, - file->f_path.dentry->d_name.len, -@@ -157,6 +158,21 @@ static int ocfs2_file_release(struct ino - oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; - spin_unlock(&oi->ip_lock); - -+#if 0 -+ /* -+ * Disable this for now. Keeping the reservation around a bit -+ * longer gives an improvement for workloads which rapidly do -+ * open()/write()/close() against a file. -+ */ -+ if ((file->f_mode & FMODE_WRITE) && -+ (atomic_read(&inode->i_writecount) == 1)) { -+ down_write(&oi->ip_alloc_sem); -+ ocfs2_resv_discard(&osb->osb_la_resmap, -+ &oi->ip_la_data_resv); -+ up_write(&oi->ip_alloc_sem); -+ } -+#endif -+ - ocfs2_free_file_private(inode, file); - - mlog_exit(0); -@@ -488,6 +504,9 @@ static int ocfs2_truncate_file(struct in - - down_write(&OCFS2_I(inode)->ip_alloc_sem); - -+ ocfs2_resv_discard(&osb->osb_la_resmap, -+ &OCFS2_I(inode)->ip_la_data_resv); -+ - /* - * The inode lock forced other nodes to sync and drop their - * pages, which (correctly) happens even if we have a truncate ---- a/fs/ocfs2/inode.c -+++ b/fs/ocfs2/inode.c -@@ -1101,6 +1101,10 @@ void ocfs2_clear_inode(struct inode *ino - ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres); - ocfs2_mark_lockres_freeing(&oi->ip_open_lockres); - -+ ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap, -+ &oi->ip_la_data_resv); -+ ocfs2_resv_init_once(&oi->ip_la_data_resv); -+ - /* We very well may get a clear_inode before all an inodes - * metadata has hit disk. Of course, we can't drop any cluster - * locks until the journal has finished with it. The only ---- a/fs/ocfs2/inode.h -+++ b/fs/ocfs2/inode.h -@@ -70,6 +70,8 @@ struct ocfs2_inode_info - /* Only valid if the inode is the dir. */ - u32 ip_last_used_slot; - u64 ip_last_used_group; -+ -+ struct ocfs2_alloc_reservation ip_la_data_resv; - }; - - /* ---- a/fs/ocfs2/localalloc.c -+++ b/fs/ocfs2/localalloc.c -@@ -52,7 +52,8 @@ static u32 ocfs2_local_alloc_count_bits( - - static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, - struct ocfs2_dinode *alloc, -- u32 numbits); -+ u32 numbits, -+ struct ocfs2_alloc_reservation *resv); - - static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc); - -@@ -262,6 +263,8 @@ void ocfs2_shutdown_local_alloc(struct o - - osb->local_alloc_state = OCFS2_LA_DISABLED; - -+ ocfs2_resmap_uninit(&osb->osb_la_resmap); -+ - main_bm_inode = ocfs2_get_system_file_inode(osb, - GLOBAL_BITMAP_SYSTEM_INODE, - OCFS2_INVALID_SLOT); -@@ -498,7 +501,7 @@ static int ocfs2_local_alloc_in_range(st - alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; - la = OCFS2_LOCAL_ALLOC(alloc); - -- start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted); -+ start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted, NULL); - if (start == -1) { - mlog_errno(-ENOSPC); - return 0; -@@ -664,7 +667,8 @@ int ocfs2_claim_local_alloc_bits(struct - alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; - la = OCFS2_LOCAL_ALLOC(alloc); - -- start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted); -+ start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted, -+ ac->ac_resv); - if (start == -1) { - /* TODO: Shouldn't we just BUG here? */ - status = -ENOSPC; -@@ -687,6 +691,9 @@ int ocfs2_claim_local_alloc_bits(struct - goto bail; - } - -+ ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start, -+ bits_wanted); -+ - while(bits_wanted--) - ocfs2_set_bit(start++, bitmap); - -@@ -722,11 +729,13 @@ static u32 ocfs2_local_alloc_count_bits( - } - - static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, -- struct ocfs2_dinode *alloc, -- u32 numbits) -+ struct ocfs2_dinode *alloc, -+ u32 numbits, -+ struct ocfs2_alloc_reservation *resv) - { - int numfound, bitoff, left, startoff, lastzero; - void *bitmap = NULL; -+ struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap; - - mlog_entry("(numbits wanted = %u)\n", numbits); - -@@ -738,6 +747,20 @@ static int ocfs2_local_alloc_find_clear_ - - bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap; - -+ /* -+ * Ask the reservations code first whether this request can be -+ * easily fulfilled. No errors here are fatal - if we didn't -+ * find the number of bits needed, we'll just take the slow -+ * path. -+ */ -+ if (ocfs2_resmap_resv_bits(resmap, resv, bitmap, &bitoff, &numfound) -+ == 0) { -+ if (numfound >= numbits) { -+ numfound = numbits; -+ goto bail; -+ } -+ } -+ - numfound = bitoff = startoff = 0; - lastzero = -1; - left = le32_to_cpu(alloc->id1.bitmap1.i_total); -@@ -772,8 +795,10 @@ static int ocfs2_local_alloc_find_clear_ - - if (numfound == numbits) - bitoff = startoff - numfound; -- else -+ else { -+ numfound = 0; - bitoff = -1; -+ } - - bail: - mlog_exit(bitoff); -@@ -1096,6 +1121,8 @@ retry_enospc: - memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0, - le16_to_cpu(la->la_size)); - -+ ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count); -+ - mlog(0, "New window allocated:\n"); - mlog(0, "window la_bm_off = %u\n", - OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); ---- a/fs/ocfs2/ocfs2.h -+++ b/fs/ocfs2/ocfs2.h -@@ -47,6 +47,7 @@ - /* For struct ocfs2_blockcheck_stats */ - #include "blockcheck.h" - -+#include "reservations.h" - - /* Caching of metadata buffers */ - -@@ -349,6 +350,10 @@ struct ocfs2_super - - u64 la_last_gd; - -+ struct ocfs2_reservation_map osb_la_resmap; -+ -+ unsigned int osb_resv_level; -+ - /* Next three fields are for local node slot recovery during - * mount. */ - int dirty; ---- /dev/null -+++ b/fs/ocfs2/reservations.c -@@ -0,0 +1,668 @@ -+/* -*- mode: c; c-basic-offset: 8; -*- -+ * vim: noexpandtab sw=8 ts=8 sts=0: -+ * -+ * reservations.c -+ * -+ * Allocation reservations implementation -+ * -+ * Some code borrowed from fs/ext3/balloc.c and is: -+ * -+ * Copyright (C) 1992, 1993, 1994, 1995 -+ * Remy Card (card@masi.ibp.fr) -+ * Laboratoire MASI - Institut Blaise Pascal -+ * Universite Pierre et Marie Curie (Paris VI) -+ * -+ * The rest is copyright (C) 2009 Novell. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public -+ * License as published by the Free Software Foundation; either -+ * version 2 of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#define MLOG_MASK_PREFIX ML_RESERVATIONS -+#include -+ -+#include "ocfs2.h" -+ -+#ifdef CONFIG_OCFS2_DEBUG_FS -+#define OCFS2_CHECK_RESERVATIONS -+#endif -+ -+#define OCFS2_CHECK_RESERVATIONS -+ -+ -+DEFINE_SPINLOCK(resv_lock); -+ -+#define OCFS2_MIN_RESV_WINDOW_BITS 8 -+#define OCFS2_MAX_RESV_WINDOW_BITS 1024 -+ -+static unsigned int ocfs2_resv_window_bits(struct ocfs2_reservation_map *resmap) -+{ -+ struct ocfs2_super *osb = resmap->m_osb; -+ -+ mlog(0, "resv_level: %u\n", osb->osb_resv_level); -+ -+ switch (osb->osb_resv_level) { -+ case 6: -+ return OCFS2_MAX_RESV_WINDOW_BITS; -+ case 5: -+ return 512; -+ case 4: -+ return 256; -+ case 3: -+ return 128; -+ case 2: -+ return 64; -+ } -+ -+ return OCFS2_MIN_RESV_WINDOW_BITS; -+} -+ -+static inline unsigned int ocfs2_resv_end(struct ocfs2_alloc_reservation *resv) -+{ -+ if (resv->r_len) -+ return resv->r_start + resv->r_len - 1; -+ return resv->r_start; -+} -+ -+static inline int ocfs2_resv_empty(struct ocfs2_alloc_reservation *resv) -+{ -+ return !!(resv->r_len == 0); -+} -+ -+static inline int ocfs2_resmap_disabled(struct ocfs2_reservation_map *resmap) -+{ -+ if (resmap->m_osb->osb_resv_level == 0) -+ return 1; -+ return 0; -+} -+ -+static void ocfs2_dump_resv(struct ocfs2_reservation_map *resmap) -+{ -+ struct ocfs2_super *osb = resmap->m_osb; -+ struct rb_node *node; -+ struct ocfs2_alloc_reservation *resv; -+ int i = 0; -+ -+ mlog(ML_NOTICE, "Dumping resmap for device %s. Bitmap length: %u\n", -+ osb->dev_str, resmap->m_bitmap_len); -+ -+ node = rb_first(&resmap->m_reservations); -+ while (node) { -+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); -+ -+ mlog(ML_NOTICE, "start: %u\tend: %u\tlen: %u\tlast_start: %u" -+ "\tlast_len: %u\tallocated: %u\n", resv->r_start, -+ ocfs2_resv_end(resv), resv->r_len, resv->r_last_start, -+ resv->r_last_len, resv->r_allocated); -+ -+ node = rb_next(node); -+ i++; -+ } -+ -+ mlog(ML_NOTICE, "%d reservations found\n", i); -+} -+ -+#ifdef OCFS2_CHECK_RESERVATIONS -+static void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap) -+{ -+ unsigned int off = 0; -+ int i = 0; -+ struct rb_node *node; -+ struct ocfs2_alloc_reservation *resv; -+ -+ node = rb_first(&resmap->m_reservations); -+ while (node) { -+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); -+ -+ if (i > 0 && resv->r_start <= off) { -+ mlog(ML_ERROR, "reservation %d has bad start off!\n", -+ i); -+ goto bad; -+ } -+ -+ if (resv->r_len == 0) { -+ mlog(ML_ERROR, "reservation %d has no length!\n", -+ i); -+ goto bad; -+ } -+ -+ if (resv->r_start > ocfs2_resv_end(resv)) { -+ mlog(ML_ERROR, "reservation %d has invalid range!\n", -+ i); -+ goto bad; -+ } -+ -+ if (ocfs2_resv_end(resv) > resmap->m_bitmap_len) { -+ mlog(ML_ERROR, "reservation %d extends past bitmap!\n", -+ i); -+ goto bad; -+ } -+ -+ off = ocfs2_resv_end(resv); -+ node = rb_next(node); -+ -+ i++; -+ } -+ return; -+ -+bad: -+ ocfs2_dump_resv(resmap); -+ BUG(); -+} -+#else -+static inline void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap) -+{ -+ -+} -+#endif -+ -+void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv) -+{ -+ memset(resv, 0, sizeof(*resv)); -+} -+ -+int ocfs2_resmap_init(struct ocfs2_super *osb, -+ struct ocfs2_reservation_map *resmap) -+{ -+ memset(resmap, 0, sizeof(*resmap)); -+ -+ resmap->m_osb = osb; -+ resmap->m_reservations = RB_ROOT; -+ /* m_bitmap_len is initialized to zero by the above memset. */ -+ -+ return 0; -+} -+ -+static void __ocfs2_resv_trunc(struct ocfs2_alloc_reservation *resv) -+{ -+ resv->r_len = 0; -+ resv->r_allocated = 0; -+} -+ -+static void ocfs2_resv_remove(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv) -+{ -+ if (resv->r_inuse) { -+ rb_erase(&resv->r_node, &resmap->m_reservations); -+ resv->r_inuse = 0; -+ } -+} -+ -+static void __ocfs2_resv_discard(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv) -+{ -+ assert_spin_locked(&resv_lock); -+ -+ __ocfs2_resv_trunc(resv); -+ ocfs2_resv_remove(resmap, resv); -+} -+ -+/* does nothing if 'resv' is null */ -+void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv) -+{ -+ if (resv) { -+ spin_lock(&resv_lock); -+ __ocfs2_resv_discard(resmap, resv); -+ spin_unlock(&resv_lock); -+ } -+} -+ -+static void ocfs2_resmap_clear_all_resv(struct ocfs2_reservation_map *resmap) -+{ -+ struct rb_node *node; -+ struct ocfs2_alloc_reservation *resv; -+ -+ assert_spin_locked(&resv_lock); -+ -+ while ((node = rb_last(&resmap->m_reservations)) != NULL) { -+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); -+ -+ __ocfs2_resv_discard(resmap, resv); -+ /* -+ * last_len and last_start no longer make sense if -+ * we're changing the range of our allocations. -+ */ -+ resv->r_last_len = resv->r_last_start = 0; -+ } -+} -+ -+/* If any parameters have changed, this function will call -+ * ocfs2_resv_trunc against all existing reservations. */ -+void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap, -+ unsigned int clen) -+{ -+ if (ocfs2_resmap_disabled(resmap)) -+ return; -+ -+ spin_lock(&resv_lock); -+ -+ ocfs2_resmap_clear_all_resv(resmap); -+ resmap->m_bitmap_len = clen; -+ -+ spin_unlock(&resv_lock); -+} -+ -+void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap) -+{ -+ /* Does nothing for now. Keep this around for API symmetry */ -+} -+ -+/* -+ * Determine the number of available bits between my_resv and the next -+ * window and extends my_resv accordingly. -+ */ -+static int ocfs2_try_to_extend_resv(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *my_resv) -+{ -+ unsigned int available, avail_end; -+ struct rb_node *next, *node = &my_resv->r_node; -+ struct ocfs2_alloc_reservation *next_resv; -+ unsigned int bits = ocfs2_resv_window_bits(resmap); -+ -+ next = rb_next(node); -+ -+ if (next) { -+ next_resv = rb_entry(next, struct ocfs2_alloc_reservation, -+ r_node); -+ avail_end = next_resv->r_start; -+ } else { -+ avail_end = resmap->m_bitmap_len - 1; -+ } -+ -+ if (ocfs2_resv_end(my_resv) == avail_end) -+ return -ENOENT; -+ -+ available = avail_end - ocfs2_resv_end(my_resv) - 1; -+ -+ my_resv->r_len += available; -+ if (my_resv->r_len > bits) -+ my_resv->r_len = bits; -+ -+ ocfs2_check_resmap(resmap); -+ -+ return 0; -+} -+ -+static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *new) -+{ -+ struct rb_root *root = &resmap->m_reservations; -+ struct rb_node *parent = NULL; -+ struct rb_node **p = &root->rb_node; -+ struct ocfs2_alloc_reservation *tmp; -+ -+ assert_spin_locked(&resv_lock); -+ -+ mlog(0, "Insert reservation start: %u len: %u\n", new->r_start, -+ new->r_len); -+ -+ while(*p) { -+ parent = *p; -+ -+ tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node); -+ -+ if (new->r_start < tmp->r_start) -+ p = &(*p)->rb_left; -+ else if (new->r_start > ocfs2_resv_end(tmp)) -+ p = &(*p)->rb_right; -+ else { -+ /* This should never happen! */ -+ mlog(ML_ERROR, "Duplicate reservation window!\n"); -+ BUG(); -+ } -+ } -+ -+ rb_link_node(&new->r_node, parent, p); -+ rb_insert_color(&new->r_node, root); -+ new->r_inuse = 1; -+ -+ ocfs2_check_resmap(resmap); -+} -+ -+/** -+ * ocfs2_find_resv() - find the window which contains goal -+ * @resmap: reservation map to search -+ * @goal: which bit to search for -+ * -+ * If a window containing that goal is not found, we return the window -+ * which comes before goal. Returns NULL on empty rbtree or no window -+ * before goal. -+ */ -+static struct ocfs2_alloc_reservation * -+ocfs2_find_resv(struct ocfs2_reservation_map *resmap, unsigned int goal) -+{ -+ struct ocfs2_alloc_reservation *resv; -+ struct rb_node *n = resmap->m_reservations.rb_node; -+ -+ assert_spin_locked(&resv_lock); -+ -+ if (!n) -+ return NULL; -+ -+ do { -+ resv = rb_entry(n, struct ocfs2_alloc_reservation, r_node); -+ -+ if (goal < resv->r_start) -+ n = n->rb_left; -+ else if (goal > ocfs2_resv_end(resv)) -+ n = n->rb_right; -+ else -+ return resv; -+ } while (n); -+ -+ /* -+ * The goal sits on one end of the tree. If it's the leftmost -+ * end, we return NULL. -+ */ -+ if (resv->r_start > goal) -+ return NULL; -+ -+ return resv; -+} -+ -+static void ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv) -+{ -+ struct rb_root *root = &resmap->m_reservations; -+ unsigned int last_start = resv->r_last_start; -+ unsigned int goal = 0; -+ unsigned int len = ocfs2_resv_window_bits(resmap); -+ unsigned int gap_start, gap_end, gap_len; -+ struct ocfs2_alloc_reservation *prev_resv, *next_resv; -+ struct rb_node *prev, *next; -+ -+ if (resv->r_last_len) { -+ unsigned int last_end = last_start + resv->r_last_len - 1; -+ -+ goal = last_end + 1; -+ -+ if (goal >= resmap->m_bitmap_len) -+ goal = 0; -+ } -+ -+ /* -+ * Nasty cases to consider: -+ * -+ * - rbtree is empty -+ * - our window should be first in all reservations -+ * - our window should be last in all reservations -+ * - need to make sure we don't go past end of bitmap -+ */ -+ -+ assert_spin_locked(&resv_lock); -+ -+ if (RB_EMPTY_ROOT(root)) { -+ /* -+ * Easiest case - empty tree. We can just take -+ * whatever window we want. -+ */ -+ -+ mlog(0, "Empty root\n"); -+ -+ resv->r_start = goal; -+ resv->r_len = len; -+ if (ocfs2_resv_end(resv) >= resmap->m_bitmap_len) -+ resv->r_len = resmap->m_bitmap_len - resv->r_start; -+ -+ ocfs2_resv_insert(resmap, resv); -+ return; -+ } -+ -+ prev_resv = ocfs2_find_resv(resmap, goal); -+ -+ if (prev_resv == NULL) { -+ mlog(0, "Farthest left window\n"); -+ -+ /* Ok, we're the farthest left window. */ -+ next = rb_first(root); -+ next_resv = rb_entry(next, struct ocfs2_alloc_reservation, -+ r_node); -+ -+ /* -+ * Try to allocate at far left of tree. If that -+ * doesn't fit, we just start our linear search from -+ * next_resv -+ */ -+ if (next_resv->r_start > (goal + len - 1)) { -+ resv->r_start = goal; -+ resv->r_len = len; -+ -+ ocfs2_resv_insert(resmap, resv); -+ return; -+ } -+ -+ prev_resv = next_resv; -+ next_resv = NULL; -+ } -+ -+ prev = &prev_resv->r_node; -+ -+ /* Now we do a linear search for a window, starting at 'prev_rsv' */ -+ while (1) { -+ next = rb_next(prev); -+ if (next) { -+ mlog(0, "One more resv found in linear search\n"); -+ next_resv = rb_entry(next, -+ struct ocfs2_alloc_reservation, -+ r_node); -+ -+ gap_start = ocfs2_resv_end(prev_resv) + 1; -+ gap_end = next_resv->r_start - 1; -+ gap_len = gap_end - gap_start + 1; -+ } else { -+ mlog(0, "No next node\n"); -+ /* -+ * We're at the rightmost edge of the -+ * tree. See if a reservation between this -+ * window and the end of the bitmap will work. -+ */ -+ gap_start = ocfs2_resv_end(prev_resv) + 1; -+ gap_end = resmap->m_bitmap_len - 1; -+ gap_len = gap_end - gap_start + 1; -+ } -+ -+ if (gap_start <= gap_end -+ && gap_start >= goal -+ && gap_len >= len) { -+ resv->r_start = gap_start; -+ resv->r_len = len; -+ -+ ocfs2_resv_insert(resmap, resv); -+ return; -+ } -+ -+ if (!next) -+ break; -+ -+ prev = next; -+ prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation, -+ r_node); -+ } -+} -+ -+void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv, -+ u32 cstart, u32 clen) -+{ -+ unsigned int cend = cstart + clen - 1; -+ -+ if (resmap == NULL || ocfs2_resmap_disabled(resmap)) -+ return; -+ -+ if (resv == NULL) -+ return; -+ -+ spin_lock(&resv_lock); -+ -+ mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u " -+ "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n", -+ cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv), -+ resv->r_len, resv->r_last_start, resv->r_last_len); -+ -+ resv->r_last_len = clen; -+ resv->r_last_start = cstart; -+ -+ if (ocfs2_resv_empty(resv)) { -+ mlog(0, "Empty reservation, find a new window.\n"); -+ /* -+ * Allocation occured without a window. We find an -+ * initial reservation for this inode, based on what -+ * was allocated already. -+ */ -+ ocfs2_resv_find_window(resmap, resv); -+ goto out_unlock; -+ } -+ -+ /* -+ * Did the allocation occur completely outside our -+ * reservation? Clear it then. Otherwise, try to extend our -+ * reservation or alloc a new one, if we've used all the bits. -+ */ -+ if (cend < resv->r_start || -+ cstart > ocfs2_resv_end(resv)) { -+ mlog(0, "Allocated outside reservation\n"); -+ -+ /* Truncate and remove reservation */ -+ __ocfs2_resv_discard(resmap, resv); -+ -+ if (cend < resv->r_start) { -+ /* -+ * The window wasn't used for some reason. We -+ * should start our search *past* it to give a -+ * better chance the next window will be -+ * used. Best way to do this right now is to -+ * fool the search code... -+ */ -+ resv->r_last_start = ocfs2_resv_end(resv) + 1; -+ resv->r_last_len = 1; -+ } -+ -+ ocfs2_resv_find_window(resmap, resv); -+ goto out_unlock; -+ } -+ -+ /* -+ * We allocated at least partially from our -+ * reservation. Adjust it and try to extend. Otherwise, we -+ * search for a new window. -+ */ -+ -+ resv->r_allocated += clen; -+ -+ if (cend < ocfs2_resv_end(resv)) { -+ u32 old_end; -+ -+ mlog(0, "Allocation left at end\n"); -+ -+ /* -+ * Partial allocation, leaving some bits free at -+ * end. We move over the start of the window to take -+ * this into account and try to extend it. -+ */ -+ old_end = ocfs2_resv_end(resv); -+ resv->r_start = cend + 1; /* Start just past last allocation */ -+ resv->r_len = old_end - resv->r_start + 1; -+ -+ if (ocfs2_try_to_extend_resv(resmap, resv) == 0) -+ goto out_unlock; -+ } -+ -+ mlog(0, "discard reservation\n"); -+ -+ /* -+ * No free bits at end or extend failed above. Truncate and -+ * re-search for a new window. -+ */ -+ -+ __ocfs2_resv_discard(resmap, resv); -+ -+ ocfs2_resv_find_window(resmap, resv); -+ -+out_unlock: -+ mlog(0, "Reservation now looks like: r_start: %u r_end: %u " -+ "r_len: %u r_last_start: %u r_last_len: %u\n", -+ resv->r_start, ocfs2_resv_end(resv), resv->r_len, -+ resv->r_last_start, resv->r_last_len); -+ -+ spin_unlock(&resv_lock); -+} -+ -+int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv, -+ char *disk_bitmap, int *cstart, int *clen) -+{ -+ int ret = -ENOSPC; -+ unsigned int start, len, best_start = 0, best_len = 0; -+ -+ if (resv == NULL || ocfs2_resmap_disabled(resmap)) -+ return -ENOSPC; -+ -+ spin_lock(&resv_lock); -+ -+ if (ocfs2_resv_empty(resv)) { -+ mlog(0, "empty reservation, find new window\n"); -+ -+ ocfs2_resv_find_window(resmap, resv); -+ -+ if (ocfs2_resv_empty(resv)) { -+ /* -+ * If resv is still empty, we return zero -+ * bytes and allow ocfs2_resmap_claimed_bits() -+ * to start our new reservation after the -+ * allocator has done it's work. -+ */ -+ *cstart = *clen = 0; -+ ret = 0; -+ goto out; -+ } -+ } -+ -+ start = resv->r_start; -+ len = 0; -+ -+ while (start <= ocfs2_resv_end(resv)) { -+ if (ocfs2_test_bit(start, disk_bitmap)) { -+ mlog(0, -+ "Reservation was taken at bit %d\n", -+ start + len); -+ best_len = 0; -+ goto next; -+ } -+ -+ /* This is basic, but since the local alloc is -+ * used very predictably, I think we're ok. */ -+ if (!best_len) { -+ best_start = start; -+ best_len = 1; -+ } else { -+ best_len++; -+ } -+ -+next: -+ start++; -+ } -+ -+ if (best_len) { -+ ret = 0; -+ *cstart = best_start; -+ *clen = best_len; -+ } -+out: -+ spin_unlock(&resv_lock); -+ -+ return ret; -+} ---- /dev/null -+++ b/fs/ocfs2/reservations.h -@@ -0,0 +1,151 @@ -+/* -*- mode: c; c-basic-offset: 8; -*- -+ * vim: noexpandtab sw=8 ts=8 sts=0: -+ * -+ * reservations.h -+ * -+ * Allocation reservations function prototypes and structures. -+ * -+ * Copyright (C) 2009 Novell. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public -+ * License as published by the Free Software Foundation; either -+ * version 2 of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public -+ * License along with this program; if not, write to the -+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330, -+ * Boston, MA 021110-1307, USA. -+ */ -+ -+#ifndef OCFS2_RESERVATIONS_H -+#define OCFS2_RESERVATIONS_H -+ -+#include -+ -+struct ocfs2_bitmap_resv_ops; -+ -+#define OCFS2_DEFAULT_RESV_LEVEL 3 -+#define OCFS2_MAX_RESV_LEVEL 7 -+#define OCFS2_MIN_RESV_LEVEL 0 -+ -+struct ocfs2_alloc_reservation { -+ struct rb_node r_node; -+ -+ unsigned int r_start; -+ unsigned int r_len; -+ -+ unsigned int r_last_len; -+ unsigned int r_last_start; -+ -+ unsigned int r_allocated; -+ -+ int r_inuse; -+}; -+ -+struct ocfs2_reservation_map { -+ struct rb_root m_reservations; -+ -+ struct ocfs2_super *m_osb; -+ -+ /* The following are not initialized to meaningful values until a disk -+ * bitmap is provided. */ -+ u32 m_bitmap_len; /* Number of valid -+ * bits available */ -+}; -+ -+void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv); -+ -+/** -+ * ocfs2_resv_discard() - truncate a reservation -+ * @resmap: -+ * @resv: the reservation to truncate. -+ * -+ * After this function is called, the reservation will be empty, and -+ * unlinked from the rbtree. -+ */ -+void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv); -+ -+ -+/** -+ * ocfs2_resmap_init() - Initialize fields of a reservations bitmap -+ * @resmap: struct ocfs2_reservation_map to initialize -+ * @obj: unused for now -+ * @ops: unused for now -+ * @max_bitmap_bytes: Maximum size of the bitmap (typically blocksize) -+ * -+ * Only possible return value other than '0' is -ENOMEM for failure to -+ * allocation mirror bitmap. -+ */ -+int ocfs2_resmap_init(struct ocfs2_super *osb, -+ struct ocfs2_reservation_map *resmap); -+ -+/** -+ * ocfs2_resmap_restart() - "restart" a reservation bitmap -+ * @resmap: reservations bitmap -+ * @clen: Number of valid bits in the bitmap -+ * -+ * Re-initialize the parameters of a reservation bitmap. This is -+ * useful for local alloc window slides. -+ * -+ * If any bitmap parameters have changed, this function will call -+ * ocfs2_trunc_resv against all existing reservations. A future -+ * version will recalculate existing reservations based on the new -+ * bitmap. -+ */ -+void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap, -+ unsigned int clen); -+ -+/** -+ * ocfs2_resmap_uninit() - uninitialize a reservation bitmap structure -+ * @resmap: the struct ocfs2_reservation_map to uninitialize -+ */ -+void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap); -+ -+/** -+ * ocfs2_resmap_resv_bits() - Return still-valid reservation bits -+ * @resmap: reservations bitmap -+ * @resv: reservation to base search from -+ * @disk_bitmap: up to date (from disk) allocation bitmap -+ * @cstart: start of proposed allocation -+ * @clen: length (in clusters) of proposed allocation -+ * -+ * Using the reservation data from resv, this function will compare -+ * resmap and disk_bitmap to determine what part (if any) of the -+ * reservation window is still clear to use. An empty resv passed here -+ * will just return no allocation. -+ * -+ * On success, zero is returned and the valid allocation area is set in cstart -+ * and clen. If no allocation is found, they are set to zero. -+ * -+ * Returns nonzero on error. -+ */ -+int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv, -+ char *disk_bitmap, int *cstart, int *clen); -+ -+/** -+ * ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used. -+ * @resmap: reservations bitmap -+ * @resv: optional reservation to recalulate based on new bitmap -+ * @cstart: start of allocation in clusters -+ * @clen: end of allocation in clusters. -+ * -+ * Tell the reservation code that bits were used to fulfill allocation in -+ * resmap. The bits don't have to have been part of any existing -+ * reservation. But we must always call this function when bits are claimed. -+ * Internally, the reservations code will use this information to mark the -+ * reservations bitmap. If resv is passed, it's next allocation window will be -+ * calculated. -+ */ -+void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap, -+ struct ocfs2_alloc_reservation *resv, -+ u32 cstart, u32 clen); -+ -+#endif /* OCFS2_RESERVATIONS_H */ ---- a/fs/ocfs2/suballoc.c -+++ b/fs/ocfs2/suballoc.c -@@ -137,6 +137,7 @@ void ocfs2_free_ac_resource(struct ocfs2 - } - brelse(ac->ac_bh); - ac->ac_bh = NULL; -+ ac->ac_resv = NULL; - } - - void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) ---- a/fs/ocfs2/suballoc.h -+++ b/fs/ocfs2/suballoc.h -@@ -54,6 +54,8 @@ struct ocfs2_alloc_context { - u64 ac_last_group; - u64 ac_max_block; /* Highest block number to allocate. 0 is - is the same as ~0 - unlimited */ -+ -+ struct ocfs2_alloc_reservation *ac_resv; - }; - - void ocfs2_init_steal_slots(struct ocfs2_super *osb); ---- a/fs/ocfs2/super.c -+++ b/fs/ocfs2/super.c -@@ -95,6 +95,7 @@ struct mount_options - unsigned int atime_quantum; - signed short slot; - unsigned int localalloc_opt; -+ unsigned int resv_level; - char cluster_stack[OCFS2_STACK_LABEL_LEN + 1]; - }; - -@@ -176,6 +177,7 @@ enum { - Opt_noacl, - Opt_usrquota, - Opt_grpquota, -+ Opt_resv_level, - Opt_err, - }; - -@@ -202,6 +204,7 @@ static const match_table_t tokens = { - {Opt_noacl, "noacl"}, - {Opt_usrquota, "usrquota"}, - {Opt_grpquota, "grpquota"}, -+ {Opt_resv_level, "resv_level=%u"}, - {Opt_err, NULL} - }; - -@@ -1030,6 +1033,7 @@ static int ocfs2_fill_super(struct super - osb->osb_commit_interval = parsed_options.commit_interval; - osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt); - osb->local_alloc_bits = osb->local_alloc_default_bits; -+ osb->osb_resv_level = parsed_options.resv_level; - - status = ocfs2_verify_userspace_stack(osb, &parsed_options); - if (status) -@@ -1290,6 +1294,7 @@ static int ocfs2_parse_options(struct su - mopt->slot = OCFS2_INVALID_SLOT; - mopt->localalloc_opt = OCFS2_DEFAULT_LOCAL_ALLOC_SIZE; - mopt->cluster_stack[0] = '\0'; -+ mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; - - if (!options) { - status = 1; -@@ -1433,6 +1438,17 @@ static int ocfs2_parse_options(struct su - mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; - mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; - break; -+ case Opt_resv_level: -+ if (is_remount) -+ break; -+ if (match_int(&args[0], &option)) { -+ status = 0; -+ goto bail; -+ } -+ if (option >= OCFS2_MIN_RESV_LEVEL && -+ option < OCFS2_MAX_RESV_LEVEL) -+ mopt->resv_level = option; -+ break; - default: - mlog(ML_ERROR, - "Unrecognized mount option \"%s\" " -@@ -1514,6 +1530,9 @@ static int ocfs2_show_options(struct seq - else - seq_printf(s, ",noacl"); - -+ if (osb->osb_resv_level != OCFS2_DEFAULT_RESV_LEVEL) -+ seq_printf(s, ",resv_level=%d", osb->osb_resv_level); -+ - return 0; - } - -@@ -1688,6 +1707,8 @@ static void ocfs2_inode_init_once(void * - oi->ip_blkno = 0ULL; - oi->ip_clusters = 0; - -+ ocfs2_resv_init_once(&oi->ip_la_data_resv); -+ - ocfs2_lock_res_init_once(&oi->ip_rw_lockres); - ocfs2_lock_res_init_once(&oi->ip_inode_lockres); - ocfs2_lock_res_init_once(&oi->ip_open_lockres); -@@ -2042,6 +2063,12 @@ static int ocfs2_initialize_super(struct - - init_waitqueue_head(&osb->osb_mount_event); - -+ status = ocfs2_resmap_init(osb, &osb->osb_la_resmap); -+ if (status) { -+ mlog_errno(status); -+ goto bail; -+ } -+ - osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL); - if (!osb->vol_label) { - mlog(ML_ERROR, "unable to alloc vol label\n"); diff --git a/patches.suse/of_platform_driver.module-owner.patch b/patches.suse/of_platform_driver.module-owner.patch deleted file mode 100644 index b87ded8..0000000 --- a/patches.suse/of_platform_driver.module-owner.patch +++ /dev/null @@ -1,1191 +0,0 @@ -Subject: add missing module symlink to /sys/bus/*/driver/* in struct of_platform_driver -Patch-mainline: Not yet -From: olh@suse.de -Patch-mainline: not yet ---- - arch/powerpc/kernel/of_platform.c | 1 + - arch/powerpc/platforms/52xx/mpc52xx_gpio.c | 2 ++ - arch/powerpc/platforms/52xx/mpc52xx_gpt.c | 1 + - arch/powerpc/platforms/82xx/ep8248e.c | 1 + - arch/powerpc/platforms/83xx/suspend.c | 1 + - arch/powerpc/platforms/cell/axon_msi.c | 1 + - arch/powerpc/platforms/pasemi/gpio_mdio.c | 1 + - arch/powerpc/sysdev/fsl_msi.c | 1 + - arch/powerpc/sysdev/fsl_rio.c | 1 + - arch/powerpc/sysdev/pmi.c | 1 + - arch/sparc/include/asm/parport.h | 1 + - arch/sparc/kernel/apc.c | 1 + - arch/sparc/kernel/auxio_64.c | 1 + - arch/sparc/kernel/central.c | 2 ++ - arch/sparc/kernel/chmc.c | 1 + - arch/sparc/kernel/pci_fire.c | 1 + - arch/sparc/kernel/pci_psycho.c | 1 + - arch/sparc/kernel/pci_sabre.c | 1 + - arch/sparc/kernel/pci_schizo.c | 1 + - arch/sparc/kernel/pci_sun4v.c | 1 + - arch/sparc/kernel/pmc.c | 1 + - arch/sparc/kernel/power.c | 1 + - arch/sparc/kernel/time_32.c | 1 + - arch/sparc/kernel/time_64.c | 3 +++ - drivers/ata/pata_of_platform.c | 1 + - drivers/ata/sata_fsl.c | 1 + - drivers/atm/fore200e.c | 1 + - drivers/char/hw_random/n2-drv.c | 1 + - drivers/char/hw_random/pasemi-rng.c | 1 + - drivers/char/ipmi/ipmi_si_intf.c | 1 + - drivers/crypto/amcc/crypto4xx_core.c | 1 + - drivers/crypto/talitos.c | 1 + - drivers/dma/fsldma.c | 1 + - drivers/hwmon/ultra45_env.c | 1 + - drivers/i2c/busses/i2c-ibm_iic.c | 1 + - drivers/infiniband/hw/ehca/ehca_main.c | 1 + - drivers/input/misc/sparcspkr.c | 2 ++ - drivers/input/serio/i8042-sparcio.h | 1 + - drivers/input/serio/xilinx_ps2.c | 1 + - drivers/macintosh/smu.c | 1 + - drivers/macintosh/therm_pm72.c | 1 + - drivers/macintosh/therm_windtunnel.c | 1 + - drivers/mmc/host/sdhci-of-core.c | 1 + - drivers/mtd/maps/physmap_of.c | 1 + - drivers/mtd/maps/sun_uflash.c | 1 + - drivers/mtd/nand/fsl_elbc_nand.c | 1 + - drivers/mtd/nand/fsl_upm.c | 1 + - drivers/mtd/nand/ndfc.c | 1 + - drivers/mtd/nand/pasemi_nand.c | 1 + - drivers/mtd/nand/socrates_nand.c | 1 + - drivers/net/ehea/ehea_main.c | 1 + - drivers/net/fec_mpc52xx_phy.c | 1 + - drivers/net/fs_enet/fs_enet-main.c | 1 + - drivers/net/fs_enet/mii-bitbang.c | 1 + - drivers/net/fs_enet/mii-fec.c | 1 + - drivers/net/fsl_pq_mdio.c | 1 + - drivers/net/gianfar.c | 1 + - drivers/net/ibm_newemac/core.c | 1 + - drivers/net/ibm_newemac/mal.c | 1 + - drivers/net/ibm_newemac/rgmii.c | 1 + - drivers/net/ibm_newemac/tah.c | 1 + - drivers/net/ibm_newemac/zmii.c | 1 + - drivers/net/myri_sbus.c | 1 + - drivers/net/niu.c | 1 + - drivers/net/phy/mdio-gpio.c | 1 + - drivers/net/sunbmac.c | 1 + - drivers/net/sunhme.c | 1 + - drivers/net/sunlance.c | 1 + - drivers/net/sunqe.c | 1 + - drivers/net/ucc_geth.c | 1 + - drivers/parport/parport_sunbpp.c | 1 + - drivers/pcmcia/electra_cf.c | 1 + - drivers/pcmcia/m8xx_pcmcia.c | 1 + - drivers/sbus/char/bbc_i2c.c | 1 + - drivers/sbus/char/display7seg.c | 1 + - drivers/sbus/char/envctrl.c | 1 + - drivers/sbus/char/flash.c | 1 + - drivers/sbus/char/uctrl.c | 1 + - drivers/scsi/qlogicpti.c | 1 + - drivers/scsi/sun_esp.c | 1 + - drivers/serial/cpm_uart/cpm_uart_core.c | 1 + - drivers/serial/mpc52xx_uart.c | 1 + - drivers/serial/sunhv.c | 1 + - drivers/serial/sunsab.c | 1 + - drivers/serial/sunsu.c | 1 + - drivers/serial/sunzilog.c | 1 + - drivers/spi/spi_mpc8xxx.c | 1 + - drivers/usb/gadget/fsl_qe_udc.c | 1 + - drivers/usb/host/fhci-hcd.c | 1 + - drivers/usb/host/isp1760-if.c | 1 + - drivers/video/bw2.c | 1 + - drivers/video/cg14.c | 1 + - drivers/video/cg3.c | 1 + - drivers/video/cg6.c | 1 + - drivers/video/ffb.c | 1 + - drivers/video/leo.c | 1 + - drivers/video/p9100.c | 1 + - drivers/video/platinumfb.c | 1 + - drivers/video/tcx.c | 1 + - drivers/watchdog/cpwd.c | 1 + - drivers/watchdog/riowd.c | 1 + - sound/sparc/amd7930.c | 1 + - sound/sparc/cs4231.c | 1 + - sound/sparc/dbri.c | 1 + - 104 files changed, 109 insertions(+) - ---- a/arch/powerpc/kernel/of_platform.c -+++ b/arch/powerpc/kernel/of_platform.c -@@ -307,6 +307,7 @@ static struct of_device_id of_pci_phb_id - }; - - static struct of_platform_driver of_pci_phb_driver = { -+ .owner = THIS_MODULE, - .match_table = of_pci_phb_ids, - .probe = of_pci_phb_probe, - .driver = { ---- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c -+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c -@@ -192,6 +192,7 @@ static const struct of_device_id mpc52xx - }; - - static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = { -+ .owner = THIS_MODULE, - .name = "gpio_wkup", - .match_table = mpc52xx_wkup_gpiochip_match, - .probe = mpc52xx_wkup_gpiochip_probe, -@@ -348,6 +349,7 @@ static const struct of_device_id mpc52xx - }; - - static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { -+ .owner = THIS_MODULE, - .name = "gpio", - .match_table = mpc52xx_simple_gpiochip_match, - .probe = mpc52xx_simple_gpiochip_probe, ---- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c -+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c -@@ -783,6 +783,7 @@ static const struct of_device_id mpc52xx - }; - - static struct of_platform_driver mpc52xx_gpt_driver = { -+ .owner = THIS_MODULE, - .name = "mpc52xx-gpt", - .match_table = mpc52xx_gpt_match, - .probe = mpc52xx_gpt_probe, ---- a/arch/powerpc/platforms/82xx/ep8248e.c -+++ b/arch/powerpc/platforms/82xx/ep8248e.c -@@ -173,6 +173,7 @@ static struct of_platform_driver ep8248e - .match_table = ep8248e_mdio_match, - .probe = ep8248e_mdio_probe, - .remove = ep8248e_mdio_remove, -+ .owner = THIS_MODULE, - }; - - struct cpm_pin { ---- a/arch/powerpc/platforms/83xx/suspend.c -+++ b/arch/powerpc/platforms/83xx/suspend.c -@@ -423,6 +423,7 @@ static struct of_device_id pmc_match[] = - }; - - static struct of_platform_driver pmc_driver = { -+ .owner = THIS_MODULE, - .name = "mpc83xx-pmc", - .match_table = pmc_match, - .probe = pmc_probe, ---- a/arch/powerpc/platforms/cell/axon_msi.c -+++ b/arch/powerpc/platforms/cell/axon_msi.c -@@ -446,6 +446,7 @@ static const struct of_device_id axon_ms - }; - - static struct of_platform_driver axon_msi_driver = { -+ .owner = THIS_MODULE, - .match_table = axon_msi_device_id, - .probe = axon_msi_probe, - .shutdown = axon_msi_shutdown, ---- a/arch/powerpc/platforms/pasemi/gpio_mdio.c -+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c -@@ -300,6 +300,7 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match) - - static struct of_platform_driver gpio_mdio_driver = - { -+ .owner = THIS_MODULE, - .match_table = gpio_mdio_match, - .probe = gpio_mdio_probe, - .remove = gpio_mdio_remove, ---- a/arch/powerpc/sysdev/fsl_msi.c -+++ b/arch/powerpc/sysdev/fsl_msi.c -@@ -344,6 +344,7 @@ static const struct of_device_id fsl_of_ - }; - - static struct of_platform_driver fsl_of_msi_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-msi", - .match_table = fsl_of_msi_ids, - .probe = fsl_of_msi_probe, ---- a/arch/powerpc/sysdev/fsl_rio.c -+++ b/arch/powerpc/sysdev/fsl_rio.c -@@ -1214,6 +1214,7 @@ static const struct of_device_id fsl_of_ - }; - - static struct of_platform_driver fsl_of_rio_rpn_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-of-rio", - .match_table = fsl_of_rio_rpn_ids, - .probe = fsl_of_rio_rpn_probe, ---- a/arch/powerpc/sysdev/pmi.c -+++ b/arch/powerpc/sysdev/pmi.c -@@ -205,6 +205,7 @@ static int pmi_of_remove(struct of_devic - } - - static struct of_platform_driver pmi_of_platform_driver = { -+ .owner = THIS_MODULE, - .match_table = pmi_match, - .probe = pmi_of_probe, - .remove = pmi_of_remove, ---- a/arch/sparc/include/asm/parport.h -+++ b/arch/sparc/include/asm/parport.h -@@ -232,6 +232,7 @@ static const struct of_device_id ecpp_ma - }; - - static struct of_platform_driver ecpp_driver = { -+ .owner = THIS_MODULE, - .name = "ecpp", - .match_table = ecpp_match, - .probe = ecpp_probe, ---- a/arch/sparc/kernel/apc.c -+++ b/arch/sparc/kernel/apc.c -@@ -174,6 +174,7 @@ static struct of_device_id __initdata ap - MODULE_DEVICE_TABLE(of, apc_match); - - static struct of_platform_driver apc_driver = { -+ .owner = THIS_MODULE, - .name = "apc", - .match_table = apc_match, - .probe = apc_probe, ---- a/arch/sparc/kernel/auxio_64.c -+++ b/arch/sparc/kernel/auxio_64.c -@@ -132,6 +132,7 @@ static int __devinit auxio_probe(struct - } - - static struct of_platform_driver auxio_driver = { -+ .owner = THIS_MODULE, - .match_table = auxio_match, - .probe = auxio_probe, - .driver = { ---- a/arch/sparc/kernel/central.c -+++ b/arch/sparc/kernel/central.c -@@ -148,6 +148,7 @@ static struct of_device_id __initdata cl - }; - - static struct of_platform_driver clock_board_driver = { -+ .owner = THIS_MODULE, - .match_table = clock_board_match, - .probe = clock_board_probe, - .driver = { -@@ -253,6 +254,7 @@ static struct of_device_id __initdata fh - }; - - static struct of_platform_driver fhc_driver = { -+ .owner = THIS_MODULE, - .match_table = fhc_match, - .probe = fhc_probe, - .driver = { ---- a/arch/sparc/kernel/chmc.c -+++ b/arch/sparc/kernel/chmc.c -@@ -811,6 +811,7 @@ static const struct of_device_id us3mc_m - MODULE_DEVICE_TABLE(of, us3mc_match); - - static struct of_platform_driver us3mc_driver = { -+ .owner = THIS_MODULE, - .name = "us3mc", - .match_table = us3mc_match, - .probe = us3mc_probe, ---- a/arch/sparc/kernel/pci_fire.c -+++ b/arch/sparc/kernel/pci_fire.c -@@ -508,6 +508,7 @@ static struct of_device_id __initdata fi - }; - - static struct of_platform_driver fire_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = fire_match, - .probe = fire_probe, ---- a/arch/sparc/kernel/pci_psycho.c -+++ b/arch/sparc/kernel/pci_psycho.c -@@ -602,6 +602,7 @@ static struct of_device_id __initdata ps - }; - - static struct of_platform_driver psycho_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = psycho_match, - .probe = psycho_probe, ---- a/arch/sparc/kernel/pci_sabre.c -+++ b/arch/sparc/kernel/pci_sabre.c -@@ -596,6 +596,7 @@ static struct of_device_id __initdata sa - }; - - static struct of_platform_driver sabre_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = sabre_match, - .probe = sabre_probe, ---- a/arch/sparc/kernel/pci_schizo.c -+++ b/arch/sparc/kernel/pci_schizo.c -@@ -1491,6 +1491,7 @@ static struct of_device_id __initdata sc - }; - - static struct of_platform_driver schizo_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = schizo_match, - .probe = schizo_probe, ---- a/arch/sparc/kernel/pci_sun4v.c -+++ b/arch/sparc/kernel/pci_sun4v.c -@@ -1009,6 +1009,7 @@ static struct of_device_id __initdata pc - }; - - static struct of_platform_driver pci_sun4v_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = pci_sun4v_match, - .probe = pci_sun4v_probe, ---- a/arch/sparc/kernel/pmc.c -+++ b/arch/sparc/kernel/pmc.c -@@ -79,6 +79,7 @@ static struct of_device_id __initdata pm - MODULE_DEVICE_TABLE(of, pmc_match); - - static struct of_platform_driver pmc_driver = { -+ .owner = THIS_MODULE, - .name = "pmc", - .match_table = pmc_match, - .probe = pmc_probe, ---- a/arch/sparc/kernel/power.c -+++ b/arch/sparc/kernel/power.c -@@ -60,6 +60,7 @@ static struct of_device_id __initdata po - }; - - static struct of_platform_driver power_driver = { -+ .owner = THIS_MODULE, - .match_table = power_match, - .probe = power_probe, - .driver = { ---- a/arch/sparc/kernel/time_32.c -+++ b/arch/sparc/kernel/time_32.c -@@ -185,6 +185,7 @@ static struct of_device_id __initdata cl - }; - - static struct of_platform_driver clock_driver = { -+ .owner = THIS_MODULE, - .match_table = clock_match, - .probe = clock_probe, - .driver = { ---- a/arch/sparc/kernel/time_64.c -+++ b/arch/sparc/kernel/time_64.c -@@ -462,6 +462,7 @@ static struct of_device_id __initdata rt - }; - - static struct of_platform_driver rtc_driver = { -+ .owner = THIS_MODULE, - .match_table = rtc_match, - .probe = rtc_probe, - .driver = { -@@ -494,6 +495,7 @@ static struct of_device_id __initdata bq - }; - - static struct of_platform_driver bq4802_driver = { -+ .owner = THIS_MODULE, - .match_table = bq4802_match, - .probe = bq4802_probe, - .driver = { -@@ -557,6 +559,7 @@ static struct of_device_id __initdata mo - }; - - static struct of_platform_driver mostek_driver = { -+ .owner = THIS_MODULE, - .match_table = mostek_match, - .probe = mostek_probe, - .driver = { ---- a/drivers/ata/pata_of_platform.c -+++ b/drivers/ata/pata_of_platform.c -@@ -91,6 +91,7 @@ static struct of_device_id pata_of_platf - MODULE_DEVICE_TABLE(of, pata_of_platform_match); - - static struct of_platform_driver pata_of_platform_driver = { -+ .owner = THIS_MODULE, - .name = "pata_of_platform", - .match_table = pata_of_platform_match, - .probe = pata_of_platform_probe, ---- a/drivers/ata/sata_fsl.c -+++ b/drivers/ata/sata_fsl.c -@@ -1426,6 +1426,7 @@ static struct of_device_id fsl_sata_matc - MODULE_DEVICE_TABLE(of, fsl_sata_match); - - static struct of_platform_driver fsl_sata_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-sata", - .match_table = fsl_sata_match, - .probe = sata_fsl_probe, ---- a/drivers/atm/fore200e.c -+++ b/drivers/atm/fore200e.c -@@ -2693,6 +2693,7 @@ static const struct of_device_id fore200 - MODULE_DEVICE_TABLE(of, fore200e_sba_match); - - static struct of_platform_driver fore200e_sba_driver = { -+ .owner = THIS_MODULE, - .name = "fore_200e", - .match_table = fore200e_sba_match, - .probe = fore200e_sba_probe, ---- a/drivers/char/hw_random/n2-drv.c -+++ b/drivers/char/hw_random/n2-drv.c -@@ -751,6 +751,7 @@ static const struct of_device_id n2rng_m - MODULE_DEVICE_TABLE(of, n2rng_match); - - static struct of_platform_driver n2rng_driver = { -+ .owner = THIS_MODULE, - .name = "n2rng", - .match_table = n2rng_match, - .probe = n2rng_probe, ---- a/drivers/char/hw_random/pasemi-rng.c -+++ b/drivers/char/hw_random/pasemi-rng.c -@@ -140,6 +140,7 @@ static struct of_device_id rng_match[] = - }; - - static struct of_platform_driver rng_driver = { -+ .owner = THIS_MODULE, - .name = "pasemi-rng", - .match_table = rng_match, - .probe = rng_probe, ---- a/drivers/char/ipmi/ipmi_si_intf.c -+++ b/drivers/char/ipmi/ipmi_si_intf.c -@@ -2555,6 +2555,7 @@ static struct of_device_id ipmi_match[] - }; - - static struct of_platform_driver ipmi_of_platform_driver = { -+ .owner = THIS_MODULE, - .name = "ipmi", - .match_table = ipmi_match, - .probe = ipmi_of_probe, ---- a/drivers/crypto/amcc/crypto4xx_core.c -+++ b/drivers/crypto/amcc/crypto4xx_core.c -@@ -1280,6 +1280,7 @@ static const struct of_device_id crypto4 - }; - - static struct of_platform_driver crypto4xx_driver = { -+ .owner = THIS_MODULE, - .name = "crypto4xx", - .match_table = crypto4xx_match, - .probe = crypto4xx_probe, ---- a/drivers/crypto/talitos.c -+++ b/drivers/crypto/talitos.c -@@ -1967,6 +1967,7 @@ static const struct of_device_id talitos - MODULE_DEVICE_TABLE(of, talitos_match); - - static struct of_platform_driver talitos_driver = { -+ .owner = THIS_MODULE, - .name = "talitos", - .match_table = talitos_match, - .probe = talitos_probe, ---- a/drivers/dma/fsldma.c -+++ b/drivers/dma/fsldma.c -@@ -1408,6 +1408,7 @@ static const struct of_device_id fsldma_ - }; - - static struct of_platform_driver fsldma_of_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-elo-dma", - .match_table = fsldma_of_ids, - .probe = fsldma_of_probe, ---- a/drivers/hwmon/ultra45_env.c -+++ b/drivers/hwmon/ultra45_env.c -@@ -300,6 +300,7 @@ static const struct of_device_id env_mat - MODULE_DEVICE_TABLE(of, env_match); - - static struct of_platform_driver env_driver = { -+ .owner = THIS_MODULE, - .name = "ultra45_env", - .match_table = env_match, - .probe = env_probe, ---- a/drivers/i2c/busses/i2c-ibm_iic.c -+++ b/drivers/i2c/busses/i2c-ibm_iic.c -@@ -807,6 +807,7 @@ static const struct of_device_id ibm_iic - }; - - static struct of_platform_driver ibm_iic_driver = { -+ .owner = THIS_MODULE, - .name = "ibm-iic", - .match_table = ibm_iic_match, - .probe = iic_probe, ---- a/drivers/infiniband/hw/ehca/ehca_main.c -+++ b/drivers/infiniband/hw/ehca/ehca_main.c -@@ -937,6 +937,7 @@ MODULE_DEVICE_TABLE(of, ehca_device_tabl - - static struct of_platform_driver ehca_driver = { - .name = "ehca", -+ .owner = THIS_MODULE, - .match_table = ehca_device_table, - .probe = ehca_probe, - .remove = ehca_remove, ---- a/drivers/input/misc/sparcspkr.c -+++ b/drivers/input/misc/sparcspkr.c -@@ -258,6 +258,7 @@ static const struct of_device_id bbc_bee - }; - - static struct of_platform_driver bbc_beep_driver = { -+ .owner = THIS_MODULE, - .name = "bbcbeep", - .match_table = bbc_beep_match, - .probe = bbc_beep_probe, -@@ -337,6 +338,7 @@ static const struct of_device_id grover_ - }; - - static struct of_platform_driver grover_beep_driver = { -+ .owner = THIS_MODULE, - .name = "groverbeep", - .match_table = grover_beep_match, - .probe = grover_beep_probe, ---- a/drivers/input/serio/i8042-sparcio.h -+++ b/drivers/input/serio/i8042-sparcio.h -@@ -96,6 +96,7 @@ static const struct of_device_id sparc_i - MODULE_DEVICE_TABLE(of, sparc_i8042_match); - - static struct of_platform_driver sparc_i8042_driver = { -+ .owner = THIS_MODULE, - .name = "i8042", - .match_table = sparc_i8042_match, - .probe = sparc_i8042_probe, ---- a/drivers/input/serio/xilinx_ps2.c -+++ b/drivers/input/serio/xilinx_ps2.c -@@ -361,6 +361,7 @@ static const struct of_device_id xps2_of - MODULE_DEVICE_TABLE(of, xps2_of_match); - - static struct of_platform_driver xps2_of_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = xps2_of_match, - .probe = xps2_of_probe, ---- a/drivers/macintosh/smu.c -+++ b/drivers/macintosh/smu.c -@@ -670,6 +670,7 @@ static const struct of_device_id smu_pla - - static struct of_platform_driver smu_of_platform_driver = - { -+ .owner = THIS_MODULE, - .name = "smu", - .match_table = smu_platform_match, - .probe = smu_platform_probe, ---- a/drivers/macintosh/therm_pm72.c -+++ b/drivers/macintosh/therm_pm72.c -@@ -2239,6 +2239,7 @@ static const struct of_device_id fcu_mat - - static struct of_platform_driver fcu_of_platform_driver = - { -+ .owner = THIS_MODULE, - .name = "temperature", - .match_table = fcu_match, - .probe = fcu_of_probe, ---- a/drivers/macintosh/therm_windtunnel.c -+++ b/drivers/macintosh/therm_windtunnel.c -@@ -464,6 +464,7 @@ static const struct of_device_id therm_o - }; - - static struct of_platform_driver therm_of_driver = { -+ .owner = THIS_MODULE, - .name = "temperature", - .match_table = therm_of_match, - .probe = therm_of_probe, ---- a/drivers/mmc/host/sdhci-of-core.c -+++ b/drivers/mmc/host/sdhci-of-core.c -@@ -205,6 +205,7 @@ static const struct of_device_id sdhci_o - MODULE_DEVICE_TABLE(of, sdhci_of_match); - - static struct of_platform_driver sdhci_of_driver = { -+ .owner = THIS_MODULE, - .driver.name = "sdhci-of", - .match_table = sdhci_of_match, - .probe = sdhci_of_probe, ---- a/drivers/mtd/maps/physmap_of.c -+++ b/drivers/mtd/maps/physmap_of.c -@@ -374,6 +374,7 @@ static struct of_device_id of_flash_matc - MODULE_DEVICE_TABLE(of, of_flash_match); - - static struct of_platform_driver of_flash_driver = { -+ .owner = THIS_MODULE, - .name = "of-flash", - .match_table = of_flash_match, - .probe = of_flash_probe, ---- a/drivers/mtd/maps/sun_uflash.c -+++ b/drivers/mtd/maps/sun_uflash.c -@@ -148,6 +148,7 @@ static const struct of_device_id uflash_ - MODULE_DEVICE_TABLE(of, uflash_match); - - static struct of_platform_driver uflash_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = uflash_match, - .probe = uflash_probe, ---- a/drivers/mtd/nand/fsl_elbc_nand.c -+++ b/drivers/mtd/nand/fsl_elbc_nand.c -@@ -1079,6 +1079,7 @@ static const struct of_device_id fsl_elb - static struct of_platform_driver fsl_elbc_ctrl_driver = { - .driver = { - .name = "fsl-elbc", -+ .owner = THIS_MODULE, - }, - .match_table = fsl_elbc_match, - .probe = fsl_elbc_ctrl_probe, ---- a/drivers/mtd/nand/fsl_upm.c -+++ b/drivers/mtd/nand/fsl_upm.c -@@ -356,6 +356,7 @@ static struct of_device_id of_fun_match[ - MODULE_DEVICE_TABLE(of, of_fun_match); - - static struct of_platform_driver of_fun_driver = { -+ .owner = THIS_MODULE, - .name = "fsl,upm-nand", - .match_table = of_fun_match, - .probe = fun_probe, ---- a/drivers/mtd/nand/ndfc.c -+++ b/drivers/mtd/nand/ndfc.c -@@ -292,6 +292,7 @@ static const struct of_device_id ndfc_ma - MODULE_DEVICE_TABLE(of, ndfc_match); - - static struct of_platform_driver ndfc_driver = { -+ .owner = THIS_MODULE, - .driver = { - .name = "ndfc", - }, ---- a/drivers/mtd/nand/pasemi_nand.c -+++ b/drivers/mtd/nand/pasemi_nand.c -@@ -221,6 +221,7 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_matc - - static struct of_platform_driver pasemi_nand_driver = - { -+ .owner = THIS_MODULE, - .name = (char*)driver_name, - .match_table = pasemi_nand_match, - .probe = pasemi_nand_probe, ---- a/drivers/mtd/nand/socrates_nand.c -+++ b/drivers/mtd/nand/socrates_nand.c -@@ -301,6 +301,7 @@ static struct of_device_id socrates_nand - MODULE_DEVICE_TABLE(of, socrates_nand_match); - - static struct of_platform_driver socrates_nand_driver = { -+ .owner = THIS_MODULE, - .name = "socrates_nand", - .match_table = socrates_nand_match, - .probe = socrates_nand_probe, ---- a/drivers/net/ehea/ehea_main.c -+++ b/drivers/net/ehea/ehea_main.c -@@ -122,6 +122,7 @@ MODULE_DEVICE_TABLE(of, ehea_device_tabl - - static struct of_platform_driver ehea_driver = { - .name = "ehea", -+ .owner = THIS_MODULE, - .match_table = ehea_device_table, - .probe = ehea_probe_adapter, - .remove = ehea_remove, ---- a/drivers/net/fec_mpc52xx_phy.c -+++ b/drivers/net/fec_mpc52xx_phy.c -@@ -158,6 +158,7 @@ static struct of_device_id mpc52xx_fec_m - MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); - - struct of_platform_driver mpc52xx_fec_mdio_driver = { -+ .owner = THIS_MODULE, - .name = "mpc5200b-fec-phy", - .probe = mpc52xx_fec_mdio_probe, - .remove = mpc52xx_fec_mdio_remove, ---- a/drivers/net/fs_enet/fs_enet-main.c -+++ b/drivers/net/fs_enet/fs_enet-main.c -@@ -1158,6 +1158,7 @@ static struct of_device_id fs_enet_match - MODULE_DEVICE_TABLE(of, fs_enet_match); - - static struct of_platform_driver fs_enet_driver = { -+ .owner = THIS_MODULE, - .name = "fs_enet", - .match_table = fs_enet_match, - .probe = fs_enet_probe, ---- a/drivers/net/fs_enet/mii-bitbang.c -+++ b/drivers/net/fs_enet/mii-bitbang.c -@@ -224,6 +224,7 @@ static struct of_device_id fs_enet_mdio_ - MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); - - static struct of_platform_driver fs_enet_bb_mdio_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-bb-mdio", - .match_table = fs_enet_mdio_bb_match, - .probe = fs_enet_mdio_probe, ---- a/drivers/net/fs_enet/mii-fec.c -+++ b/drivers/net/fs_enet/mii-fec.c -@@ -222,6 +222,7 @@ static struct of_device_id fs_enet_mdio_ - MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); - - static struct of_platform_driver fs_enet_fec_mdio_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-fec-mdio", - .match_table = fs_enet_mdio_fec_match, - .probe = fs_enet_mdio_probe, ---- a/drivers/net/fsl_pq_mdio.c -+++ b/drivers/net/fsl_pq_mdio.c -@@ -461,6 +461,7 @@ static struct of_device_id fsl_pq_mdio_m - MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); - - static struct of_platform_driver fsl_pq_mdio_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-pq_mdio", - .probe = fsl_pq_mdio_probe, - .remove = fsl_pq_mdio_remove, ---- a/drivers/net/gianfar.c -+++ b/drivers/net/gianfar.c -@@ -3053,6 +3053,7 @@ MODULE_DEVICE_TABLE(of, gfar_match); - - /* Structure for a device driver */ - static struct of_platform_driver gfar_driver = { -+ .owner = THIS_MODULE, - .name = "fsl-gianfar", - .match_table = gfar_match, - ---- a/drivers/net/ibm_newemac/core.c -+++ b/drivers/net/ibm_newemac/core.c -@@ -2994,6 +2994,7 @@ static struct of_device_id emac_match[] - MODULE_DEVICE_TABLE(of, emac_match); - - static struct of_platform_driver emac_driver = { -+ .owner = THIS_MODULE, - .name = "emac", - .match_table = emac_match, - ---- a/drivers/net/ibm_newemac/mal.c -+++ b/drivers/net/ibm_newemac/mal.c -@@ -789,6 +789,7 @@ static struct of_device_id mal_platform_ - }; - - static struct of_platform_driver mal_of_driver = { -+ .owner = THIS_MODULE, - .name = "mcmal", - .match_table = mal_platform_match, - ---- a/drivers/net/ibm_newemac/rgmii.c -+++ b/drivers/net/ibm_newemac/rgmii.c -@@ -318,6 +318,7 @@ static struct of_device_id rgmii_match[] - }; - - static struct of_platform_driver rgmii_driver = { -+ .owner = THIS_MODULE, - .name = "emac-rgmii", - .match_table = rgmii_match, - ---- a/drivers/net/ibm_newemac/tah.c -+++ b/drivers/net/ibm_newemac/tah.c -@@ -165,6 +165,7 @@ static struct of_device_id tah_match[] = - }; - - static struct of_platform_driver tah_driver = { -+ .owner = THIS_MODULE, - .name = "emac-tah", - .match_table = tah_match, - ---- a/drivers/net/ibm_newemac/zmii.c -+++ b/drivers/net/ibm_newemac/zmii.c -@@ -311,6 +311,7 @@ static struct of_device_id zmii_match[] - }; - - static struct of_platform_driver zmii_driver = { -+ .owner = THIS_MODULE, - .name = "emac-zmii", - .match_table = zmii_match, - ---- a/drivers/net/myri_sbus.c -+++ b/drivers/net/myri_sbus.c -@@ -1161,6 +1161,7 @@ static const struct of_device_id myri_sb - MODULE_DEVICE_TABLE(of, myri_sbus_match); - - static struct of_platform_driver myri_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "myri", - .match_table = myri_sbus_match, - .probe = myri_sbus_probe, ---- a/drivers/net/niu.c -+++ b/drivers/net/niu.c -@@ -10206,6 +10206,7 @@ static const struct of_device_id niu_mat - MODULE_DEVICE_TABLE(of, niu_match); - - static struct of_platform_driver niu_of_driver = { -+ .owner = THIS_MODULE, - .name = "niu", - .match_table = niu_match, - .probe = niu_of_probe, ---- a/drivers/net/phy/mdio-gpio.c -+++ b/drivers/net/phy/mdio-gpio.c -@@ -241,6 +241,7 @@ static struct of_device_id mdio_ofgpio_m - MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); - - static struct of_platform_driver mdio_ofgpio_driver = { -+ .owner = THIS_MODULE, - .name = "mdio-gpio", - .match_table = mdio_ofgpio_match, - .probe = mdio_ofgpio_probe, ---- a/drivers/net/sunbmac.c -+++ b/drivers/net/sunbmac.c -@@ -1292,6 +1292,7 @@ static const struct of_device_id bigmac_ - MODULE_DEVICE_TABLE(of, bigmac_sbus_match); - - static struct of_platform_driver bigmac_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "sunbmac", - .match_table = bigmac_sbus_match, - .probe = bigmac_sbus_probe, ---- a/drivers/net/sunhme.c -+++ b/drivers/net/sunhme.c -@@ -3295,6 +3295,7 @@ static const struct of_device_id hme_sbu - MODULE_DEVICE_TABLE(of, hme_sbus_match); - - static struct of_platform_driver hme_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "hme", - .match_table = hme_sbus_match, - .probe = hme_sbus_probe, ---- a/drivers/net/sunlance.c -+++ b/drivers/net/sunlance.c -@@ -1546,6 +1546,7 @@ static const struct of_device_id sunlanc - MODULE_DEVICE_TABLE(of, sunlance_sbus_match); - - static struct of_platform_driver sunlance_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "sunlance", - .match_table = sunlance_sbus_match, - .probe = sunlance_sbus_probe, ---- a/drivers/net/sunqe.c -+++ b/drivers/net/sunqe.c -@@ -978,6 +978,7 @@ static const struct of_device_id qec_sbu - MODULE_DEVICE_TABLE(of, qec_sbus_match); - - static struct of_platform_driver qec_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "qec", - .match_table = qec_sbus_match, - .probe = qec_sbus_probe, ---- a/drivers/net/ucc_geth.c -+++ b/drivers/net/ucc_geth.c -@@ -3965,6 +3965,7 @@ static struct of_device_id ucc_geth_matc - MODULE_DEVICE_TABLE(of, ucc_geth_match); - - static struct of_platform_driver ucc_geth_driver = { -+ .owner = THIS_MODULE, - .name = DRV_NAME, - .match_table = ucc_geth_match, - .probe = ucc_geth_probe, ---- a/drivers/parport/parport_sunbpp.c -+++ b/drivers/parport/parport_sunbpp.c -@@ -382,6 +382,7 @@ static const struct of_device_id bpp_mat - MODULE_DEVICE_TABLE(of, bpp_match); - - static struct of_platform_driver bpp_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "bpp", - .match_table = bpp_match, - .probe = bpp_probe, ---- a/drivers/pcmcia/electra_cf.c -+++ b/drivers/pcmcia/electra_cf.c -@@ -356,6 +356,7 @@ static const struct of_device_id electra - MODULE_DEVICE_TABLE(of, electra_cf_match); - - static struct of_platform_driver electra_cf_driver = { -+ .owner = THIS_MODULE, - .name = (char *)driver_name, - .match_table = electra_cf_match, - .probe = electra_cf_probe, ---- a/drivers/pcmcia/m8xx_pcmcia.c -+++ b/drivers/pcmcia/m8xx_pcmcia.c -@@ -1314,6 +1314,7 @@ static const struct of_device_id m8xx_pc - MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match); - - static struct of_platform_driver m8xx_pcmcia_driver = { -+ .owner = THIS_MODULE, - .name = driver_name, - .match_table = m8xx_pcmcia_match, - .probe = m8xx_probe, ---- a/drivers/sbus/char/bbc_i2c.c -+++ b/drivers/sbus/char/bbc_i2c.c -@@ -414,6 +414,7 @@ static const struct of_device_id bbc_i2c - MODULE_DEVICE_TABLE(of, bbc_i2c_match); - - static struct of_platform_driver bbc_i2c_driver = { -+ .owner = THIS_MODULE, - .name = "bbc_i2c", - .match_table = bbc_i2c_match, - .probe = bbc_i2c_probe, ---- a/drivers/sbus/char/display7seg.c -+++ b/drivers/sbus/char/display7seg.c -@@ -265,6 +265,7 @@ static const struct of_device_id d7s_mat - MODULE_DEVICE_TABLE(of, d7s_match); - - static struct of_platform_driver d7s_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = d7s_match, - .probe = d7s_probe, ---- a/drivers/sbus/char/envctrl.c -+++ b/drivers/sbus/char/envctrl.c -@@ -1130,6 +1130,7 @@ static const struct of_device_id envctrl - MODULE_DEVICE_TABLE(of, envctrl_match); - - static struct of_platform_driver envctrl_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = envctrl_match, - .probe = envctrl_probe, ---- a/drivers/sbus/char/flash.c -+++ b/drivers/sbus/char/flash.c -@@ -208,6 +208,7 @@ static const struct of_device_id flash_m - MODULE_DEVICE_TABLE(of, flash_match); - - static struct of_platform_driver flash_driver = { -+ .owner = THIS_MODULE, - .name = "flash", - .match_table = flash_match, - .probe = flash_probe, ---- a/drivers/sbus/char/uctrl.c -+++ b/drivers/sbus/char/uctrl.c -@@ -425,6 +425,7 @@ static const struct of_device_id uctrl_m - MODULE_DEVICE_TABLE(of, uctrl_match); - - static struct of_platform_driver uctrl_driver = { -+ .owner = THIS_MODULE, - .name = "uctrl", - .match_table = uctrl_match, - .probe = uctrl_probe, ---- a/drivers/scsi/qlogicpti.c -+++ b/drivers/scsi/qlogicpti.c -@@ -1456,6 +1456,7 @@ static const struct of_device_id qpti_ma - MODULE_DEVICE_TABLE(of, qpti_match); - - static struct of_platform_driver qpti_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "qpti", - .match_table = qpti_match, - .probe = qpti_sbus_probe, ---- a/drivers/scsi/sun_esp.c -+++ b/drivers/scsi/sun_esp.c -@@ -632,6 +632,7 @@ static const struct of_device_id esp_mat - MODULE_DEVICE_TABLE(of, esp_match); - - static struct of_platform_driver esp_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "esp", - .match_table = esp_match, - .probe = esp_sbus_probe, ---- a/drivers/serial/cpm_uart/cpm_uart_core.c -+++ b/drivers/serial/cpm_uart/cpm_uart_core.c -@@ -1372,6 +1372,7 @@ static struct of_device_id cpm_uart_matc - }; - - static struct of_platform_driver cpm_uart_driver = { -+ .owner = THIS_MODULE, - .name = "cpm_uart", - .match_table = cpm_uart_match, - .probe = cpm_uart_probe, ---- a/drivers/serial/mpc52xx_uart.c -+++ b/drivers/serial/mpc52xx_uart.c -@@ -1464,6 +1464,7 @@ mpc52xx_uart_of_enumerate(void) - MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match); - - static struct of_platform_driver mpc52xx_uart_of_driver = { -+ .owner = THIS_MODULE, - .match_table = mpc52xx_uart_of_match, - .probe = mpc52xx_uart_of_probe, - .remove = mpc52xx_uart_of_remove, ---- a/drivers/serial/sunhv.c -+++ b/drivers/serial/sunhv.c -@@ -630,6 +630,7 @@ static const struct of_device_id hv_matc - MODULE_DEVICE_TABLE(of, hv_match); - - static struct of_platform_driver hv_driver = { -+ .owner = THIS_MODULE, - .name = "hv", - .match_table = hv_match, - .probe = hv_probe, ---- a/drivers/serial/sunsab.c -+++ b/drivers/serial/sunsab.c -@@ -1093,6 +1093,7 @@ static const struct of_device_id sab_mat - MODULE_DEVICE_TABLE(of, sab_match); - - static struct of_platform_driver sab_driver = { -+ .owner = THIS_MODULE, - .name = "sab", - .match_table = sab_match, - .probe = sab_probe, ---- a/drivers/serial/sunsu.c -+++ b/drivers/serial/sunsu.c -@@ -1536,6 +1536,7 @@ static const struct of_device_id su_matc - MODULE_DEVICE_TABLE(of, su_match); - - static struct of_platform_driver su_driver = { -+ .owner = THIS_MODULE, - .name = "su", - .match_table = su_match, - .probe = su_probe, ---- a/drivers/serial/sunzilog.c -+++ b/drivers/serial/sunzilog.c -@@ -1491,6 +1491,7 @@ static const struct of_device_id zs_matc - MODULE_DEVICE_TABLE(of, zs_match); - - static struct of_platform_driver zs_driver = { -+ .owner = THIS_MODULE, - .name = "zs", - .match_table = zs_match, - .probe = zs_probe, ---- a/drivers/spi/spi_mpc8xxx.c -+++ b/drivers/spi/spi_mpc8xxx.c -@@ -1311,6 +1311,7 @@ static const struct of_device_id of_mpc8 - MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); - - static struct of_platform_driver of_mpc8xxx_spi_driver = { -+ .owner = THIS_MODULE, - .name = "mpc8xxx_spi", - .match_table = of_mpc8xxx_spi_match, - .probe = of_mpc8xxx_spi_probe, ---- a/drivers/usb/gadget/fsl_qe_udc.c -+++ b/drivers/usb/gadget/fsl_qe_udc.c -@@ -2768,6 +2768,7 @@ static const struct of_device_id qe_udc_ - MODULE_DEVICE_TABLE(of, qe_udc_match); - - static struct of_platform_driver udc_driver = { -+ .owner = THIS_MODULE, - .name = (char *)driver_name, - .match_table = qe_udc_match, - .probe = qe_udc_probe, ---- a/drivers/usb/host/fhci-hcd.c -+++ b/drivers/usb/host/fhci-hcd.c -@@ -812,6 +812,7 @@ static const struct of_device_id of_fhci - MODULE_DEVICE_TABLE(of, of_fhci_match); - - static struct of_platform_driver of_fhci_driver = { -+ .owner = THIS_MODULE, - .name = "fsl,usb-fhci", - .match_table = of_fhci_match, - .probe = of_fhci_probe, ---- a/drivers/usb/host/isp1760-if.c -+++ b/drivers/usb/host/isp1760-if.c -@@ -121,6 +121,7 @@ static const struct of_device_id of_isp1 - MODULE_DEVICE_TABLE(of, of_isp1760_match); - - static struct of_platform_driver isp1760_of_driver = { -+ .owner = THIS_MODULE, - .name = "nxp-isp1760", - .match_table = of_isp1760_match, - .probe = of_isp1760_probe, ---- a/drivers/video/bw2.c -+++ b/drivers/video/bw2.c -@@ -377,6 +377,7 @@ static const struct of_device_id bw2_mat - MODULE_DEVICE_TABLE(of, bw2_match); - - static struct of_platform_driver bw2_driver = { -+ .owner = THIS_MODULE, - .name = "bw2", - .match_table = bw2_match, - .probe = bw2_probe, ---- a/drivers/video/cg14.c -+++ b/drivers/video/cg14.c -@@ -597,6 +597,7 @@ static const struct of_device_id cg14_ma - MODULE_DEVICE_TABLE(of, cg14_match); - - static struct of_platform_driver cg14_driver = { -+ .owner = THIS_MODULE, - .name = "cg14", - .match_table = cg14_match, - .probe = cg14_probe, ---- a/drivers/video/cg3.c -+++ b/drivers/video/cg3.c -@@ -464,6 +464,7 @@ static const struct of_device_id cg3_mat - MODULE_DEVICE_TABLE(of, cg3_match); - - static struct of_platform_driver cg3_driver = { -+ .owner = THIS_MODULE, - .name = "cg3", - .match_table = cg3_match, - .probe = cg3_probe, ---- a/drivers/video/cg6.c -+++ b/drivers/video/cg6.c -@@ -857,6 +857,7 @@ static const struct of_device_id cg6_mat - MODULE_DEVICE_TABLE(of, cg6_match); - - static struct of_platform_driver cg6_driver = { -+ .owner = THIS_MODULE, - .name = "cg6", - .match_table = cg6_match, - .probe = cg6_probe, ---- a/drivers/video/ffb.c -+++ b/drivers/video/ffb.c -@@ -1054,6 +1054,7 @@ static const struct of_device_id ffb_mat - MODULE_DEVICE_TABLE(of, ffb_match); - - static struct of_platform_driver ffb_driver = { -+ .owner = THIS_MODULE, - .name = "ffb", - .match_table = ffb_match, - .probe = ffb_probe, ---- a/drivers/video/leo.c -+++ b/drivers/video/leo.c -@@ -664,6 +664,7 @@ static const struct of_device_id leo_mat - MODULE_DEVICE_TABLE(of, leo_match); - - static struct of_platform_driver leo_driver = { -+ .owner = THIS_MODULE, - .name = "leo", - .match_table = leo_match, - .probe = leo_probe, ---- a/drivers/video/p9100.c -+++ b/drivers/video/p9100.c -@@ -354,6 +354,7 @@ static const struct of_device_id p9100_m - MODULE_DEVICE_TABLE(of, p9100_match); - - static struct of_platform_driver p9100_driver = { -+ .owner = THIS_MODULE, - .name = "p9100", - .match_table = p9100_match, - .probe = p9100_probe, ---- a/drivers/video/platinumfb.c -+++ b/drivers/video/platinumfb.c -@@ -680,6 +680,7 @@ static struct of_device_id platinumfb_ma - - static struct of_platform_driver platinum_driver = - { -+ .owner = THIS_MODULE, - .name = "platinumfb", - .match_table = platinumfb_match, - .probe = platinumfb_probe, ---- a/drivers/video/tcx.c -+++ b/drivers/video/tcx.c -@@ -513,6 +513,7 @@ static const struct of_device_id tcx_mat - MODULE_DEVICE_TABLE(of, tcx_match); - - static struct of_platform_driver tcx_driver = { -+ .owner = THIS_MODULE, - .name = "tcx", - .match_table = tcx_match, - .probe = tcx_probe, ---- a/drivers/watchdog/cpwd.c -+++ b/drivers/watchdog/cpwd.c -@@ -676,6 +676,7 @@ static const struct of_device_id cpwd_ma - MODULE_DEVICE_TABLE(of, cpwd_match); - - static struct of_platform_driver cpwd_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = cpwd_match, - .probe = cpwd_probe, ---- a/drivers/watchdog/riowd.c -+++ b/drivers/watchdog/riowd.c -@@ -238,6 +238,7 @@ static const struct of_device_id riowd_m - MODULE_DEVICE_TABLE(of, riowd_match); - - static struct of_platform_driver riowd_driver = { -+ .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = riowd_match, - .probe = riowd_probe, ---- a/sound/sparc/amd7930.c -+++ b/sound/sparc/amd7930.c -@@ -1065,6 +1065,7 @@ static const struct of_device_id amd7930 - }; - - static struct of_platform_driver amd7930_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "audio", - .match_table = amd7930_match, - .probe = amd7930_sbus_probe, ---- a/sound/sparc/cs4231.c -+++ b/sound/sparc/cs4231.c -@@ -2110,6 +2110,7 @@ static const struct of_device_id cs4231_ - MODULE_DEVICE_TABLE(of, cs4231_match); - - static struct of_platform_driver cs4231_driver = { -+ .owner = THIS_MODULE, - .name = "audio", - .match_table = cs4231_match, - .probe = cs4231_probe, ---- a/sound/sparc/dbri.c -+++ b/sound/sparc/dbri.c -@@ -2686,6 +2686,7 @@ static const struct of_device_id dbri_ma - MODULE_DEVICE_TABLE(of, dbri_match); - - static struct of_platform_driver dbri_sbus_driver = { -+ .owner = THIS_MODULE, - .name = "dbri", - .match_table = dbri_match, - .probe = dbri_probe, diff --git a/patches.suse/parser-match_string.diff b/patches.suse/parser-match_string.diff deleted file mode 100644 index 82d779e..0000000 --- a/patches.suse/parser-match_string.diff +++ /dev/null @@ -1,55 +0,0 @@ -From: Andreas Gruenbacher -Subject: Add match_string() for mount option parsing -References: FATE301275 -Patch-mainline: no - -The match_string() function allows to parse string constants in -mount options. - -Signed-off-by: Andreas Gruenbacher - ---- - include/linux/parser.h | 1 + - lib/parser.c | 14 ++++++++++++++ - 2 files changed, 15 insertions(+) - ---- a/include/linux/parser.h -+++ b/include/linux/parser.h -@@ -26,6 +26,7 @@ typedef struct { - } substring_t; - - int match_token(char *, const match_table_t table, substring_t args[]); -+int match_string(substring_t *s, const char *str); - int match_int(substring_t *, int *result); - int match_octal(substring_t *, int *result); - int match_hex(substring_t *, int *result); ---- a/lib/parser.c -+++ b/lib/parser.c -@@ -114,6 +114,19 @@ int match_token(char *s, const match_tab - } - - /** -+ * match_string: check for a particular parameter -+ * @s: substring to be scanned -+ * @str: string to scan for -+ * -+ * Description: Return if a &substring_t is equal to string @str. -+ */ -+int match_string(substring_t *s, const char *str) -+{ -+ return strlen(str) == s->to - s->from && -+ !memcmp(str, s->from, s->to - s->from); -+} -+ -+/** - * match_number: scan a number in the given base from a substring_t - * @s: substring to be scanned - * @result: resulting integer on success -@@ -224,6 +237,7 @@ char *match_strdup(const substring_t *s) - } - - EXPORT_SYMBOL(match_token); -+EXPORT_SYMBOL(match_string); - EXPORT_SYMBOL(match_int); - EXPORT_SYMBOL(match_octal); - EXPORT_SYMBOL(match_hex); diff --git a/patches.suse/ppc-powerbook-usb-fn-key-default.patch b/patches.suse/ppc-powerbook-usb-fn-key-default.patch index 10bc083..f6c9901 100644 --- a/patches.suse/ppc-powerbook-usb-fn-key-default.patch +++ b/patches.suse/ppc-powerbook-usb-fn-key-default.patch @@ -21,7 +21,7 @@ Patch-mainline: not yet --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c -@@ -35,7 +35,7 @@ +@@ -36,7 +36,7 @@ #define APPLE_FLAG_FKEY 0x01 diff --git a/patches.suse/radeon-monitor-jsxx-quirk.patch b/patches.suse/radeon-monitor-jsxx-quirk.patch index cb3533d..a809eed 100644 --- a/patches.suse/radeon-monitor-jsxx-quirk.patch +++ b/patches.suse/radeon-monitor-jsxx-quirk.patch @@ -14,7 +14,7 @@ Signed-off-by: Olaf Hering --- a/drivers/video/aty/radeon_monitor.c +++ b/drivers/video/aty/radeon_monitor.c -@@ -727,6 +727,25 @@ static void radeon_videomode_to_var(stru +@@ -730,6 +730,25 @@ static void radeon_videomode_to_var(stru var->vmode = mode->vmode; } @@ -40,7 +40,7 @@ Signed-off-by: Olaf Hering /* * Build the modedb for head 1 (head 2 will come later), check panel infos * from either BIOS or EDID, and pick up the default mode -@@ -862,6 +881,22 @@ void __devinit radeon_check_modes(struct +@@ -865,6 +884,22 @@ void __devinit radeon_check_modes(struct has_default_mode = 1; } diff --git a/patches.suse/raw_device_max_minors_param.diff b/patches.suse/raw_device_max_minors_param.diff index 9b66773..4d2cb1e 100644 --- a/patches.suse/raw_device_max_minors_param.diff +++ b/patches.suse/raw_device_max_minors_param.diff @@ -16,7 +16,7 @@ Signed-off-by: Jan Kara --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig -@@ -1036,7 +1036,7 @@ config RAW_DRIVER +@@ -1059,7 +1059,7 @@ config RAW_DRIVER with the O_DIRECT flag. config MAX_RAW_DEVS @@ -27,15 +27,15 @@ Signed-off-by: Jan Kara help --- a/drivers/char/raw.c +++ b/drivers/char/raw.c -@@ -20,6 +20,7 @@ +@@ -21,6 +21,7 @@ #include - #include #include + #include +#include #include -@@ -29,10 +30,15 @@ struct raw_device_data { +@@ -30,10 +31,15 @@ struct raw_device_data { }; static struct class *raw_class; @@ -52,16 +52,16 @@ Signed-off-by: Jan Kara /* * Open/close code for raw IO. * -@@ -158,7 +164,7 @@ static int raw_ctl_ioctl(struct inode *i - goto out; - } +@@ -131,7 +137,7 @@ static int bind_set(int number, u64 majo + struct raw_device_data *rawdev; + int err = 0; -- if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) { -+ if (rq.raw_minor <= 0 || rq.raw_minor >= max_raw_minors) { - err = -EINVAL; - goto out; - } -@@ -271,12 +277,26 @@ static int __init raw_init(void) +- if (number <= 0 || number >= MAX_RAW_MINORS) ++ if (number <= 0 || number >= max_raw_minors) + return -EINVAL; + + if (MAJOR(dev) != major || MINOR(dev) != minor) +@@ -318,12 +324,26 @@ static int __init raw_init(void) dev_t dev = MKDEV(RAW_MAJOR, 0); int ret; @@ -90,7 +90,7 @@ Signed-off-by: Jan Kara if (ret) { kobject_put(&raw_cdev.kobj); goto error_region; -@@ -295,8 +315,9 @@ static int __init raw_init(void) +@@ -342,8 +362,9 @@ static int __init raw_init(void) return 0; error_region: @@ -101,7 +101,7 @@ Signed-off-by: Jan Kara return ret; } -@@ -305,7 +326,7 @@ static void __exit raw_exit(void) +@@ -352,7 +373,7 @@ static void __exit raw_exit(void) device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); class_destroy(raw_class); cdev_del(&raw_cdev); diff --git a/patches.suse/readahead-request-tunables.patch b/patches.suse/readahead-request-tunables.patch index 2f22792..949bf8e 100644 --- a/patches.suse/readahead-request-tunables.patch +++ b/patches.suse/readahead-request-tunables.patch @@ -16,7 +16,7 @@ Signed-off-by: Jan Kara --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -1038,7 +1038,11 @@ extern int blk_verify_command(unsigned c +@@ -947,7 +947,11 @@ extern int blk_verify_command(unsigned c enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, @@ -30,7 +30,7 @@ Signed-off-by: Jan Kara }; --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -1302,7 +1302,11 @@ int write_one_page(struct page *page, in +@@ -1307,7 +1307,11 @@ int write_one_page(struct page *page, in void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ diff --git a/patches.suse/reiser4-dependencies b/patches.suse/reiser4-dependencies new file mode 100644 index 0000000..f92ab67 --- /dev/null +++ b/patches.suse/reiser4-dependencies @@ -0,0 +1,225 @@ +From: Reiser4 Development +Subject: In-kernel dependencies for reiser4 +Patch-mainline: Probably never + + This patch contains the in-kernel dependencies needed by reiser4. + +Acked-by: Jeff Mahoney +--- + fs/fs-writeback.c | 48 +++++++++++++++++++++++++++++++++++++++------- + fs/inode.c | 1 + include/linux/fs.h | 14 ++++++++++++- + include/linux/mm.h | 1 + include/linux/writeback.h | 3 ++ + mm/filemap.c | 2 + + mm/page-writeback.c | 26 ++++++++++++++++++++++++ + 7 files changed, 87 insertions(+), 8 deletions(-) + +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -461,8 +461,10 @@ static bool pin_sb_for_writeback(struct + * Return 1, if the caller writeback routine should be + * interrupted. Otherwise return 0. + */ +-static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, +- struct writeback_control *wbc, bool only_this_sb) ++int generic_writeback_sb_inodes(struct super_block *sb, ++ struct bdi_writeback *wb, ++ struct writeback_control *wbc, ++ bool only_this_sb) + { + while (!list_empty(&wb->b_io)) { + long pages_skipped; +@@ -548,7 +550,10 @@ void writeback_inodes_wb(struct bdi_writ + requeue_io(inode); + continue; + } +- ret = writeback_sb_inodes(sb, wb, wbc, false); ++ if (sb->s_op->writeback_inodes) ++ ret = sb->s_op->writeback_inodes(sb, wb, wbc, false); ++ else ++ ret = generic_writeback_sb_inodes(sb, wb, wbc, false); + drop_super(sb); + + if (ret) +@@ -557,18 +562,21 @@ void writeback_inodes_wb(struct bdi_writ + spin_unlock(&inode_lock); + /* Leave any unwritten inodes on b_io */ + } ++EXPORT_SYMBOL(writeback_inodes_wb); + +-static void __writeback_inodes_sb(struct super_block *sb, ++void __writeback_inodes_sb(struct super_block *sb, + struct bdi_writeback *wb, struct writeback_control *wbc) + { +- WARN_ON(!rwsem_is_locked(&sb->s_umount)); +- + spin_lock(&inode_lock); + if (!wbc->for_kupdate || list_empty(&wb->b_io)) + queue_io(wb, wbc->older_than_this); +- writeback_sb_inodes(sb, wb, wbc, true); ++ if (sb->s_op->writeback_inodes) ++ sb->s_op->writeback_inodes(sb, wb, wbc, true); ++ else ++ generic_writeback_sb_inodes(sb, wb, wbc, true); + spin_unlock(&inode_lock); + } ++EXPORT_SYMBOL(__writeback_inodes_sb); + + /* + * The maximum number of pages to writeout in a single bdi flush/kupdate +@@ -688,6 +696,32 @@ static long wb_writeback(struct bdi_writ + + return wrote; + } ++EXPORT_SYMBOL(generic_writeback_sb_inodes); ++ ++/* ++ * This function is for file systems which have their ++ * own means of periodical write-out of old data. ++ * NOTE: inode_lock should be hold. ++ * ++ * Skip a portion of b_io inodes which belong to @sb ++ * and go sequentially in reverse order. ++ */ ++void writeback_skip_sb_inodes(struct super_block *sb, ++ struct bdi_writeback *wb) ++{ ++ while (1) { ++ struct inode *inode; ++ ++ if (list_empty(&wb->b_io)) ++ break; ++ inode = list_entry(wb->b_io.prev, struct inode, i_wb_list); ++ if (sb != inode->i_sb) ++ break; ++ redirty_tail(inode); ++ } ++} ++EXPORT_SYMBOL(writeback_skip_sb_inodes); ++ + + /* + * Return the next wb_writeback_work struct that hasn't been processed yet. +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -82,6 +82,7 @@ static struct hlist_head *inode_hashtabl + * the i_state of an inode while it is in use.. + */ + DEFINE_SPINLOCK(inode_lock); ++EXPORT_SYMBOL_GPL(inode_lock); + + /* + * iprune_sem provides exclusion between the kswapd or try_to_free_pages +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -522,6 +522,7 @@ enum positive_aop_returns { + struct page; + struct address_space; + struct writeback_control; ++struct bdi_writeback; + + struct iov_iter { + const struct iovec *iov; +@@ -1605,7 +1606,12 @@ struct super_operations { + int (*statfs) (struct dentry *, struct kstatfs *); + int (*remount_fs) (struct super_block *, int *, char *); + void (*umount_begin) (struct super_block *); +- ++ int (*writeback_inodes)(struct super_block *sb, ++ struct bdi_writeback *wb, ++ struct writeback_control *wbc, ++ bool only_this_sb); ++ void (*sync_inodes) (struct super_block *sb, ++ struct writeback_control *wbc); + int (*show_options)(struct seq_file *, struct vfsmount *); + int (*show_stats)(struct seq_file *, struct vfsmount *); + #ifdef CONFIG_QUOTA +@@ -2133,6 +2139,12 @@ extern int invalidate_inode_pages2(struc + extern int invalidate_inode_pages2_range(struct address_space *mapping, + pgoff_t start, pgoff_t end); + extern int write_inode_now(struct inode *, int); ++extern void writeback_skip_sb_inodes(struct super_block *sb, ++ struct bdi_writeback *wb); ++extern int generic_writeback_sb_inodes(struct super_block *sb, ++ struct bdi_writeback *wb, ++ struct writeback_control *wbc, ++ bool only_this_sb); + extern int filemap_fdatawrite(struct address_space *); + extern int filemap_flush(struct address_space *); + extern int filemap_fdatawait(struct address_space *); +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -875,6 +875,7 @@ void account_page_dirtied(struct page *p + void account_page_writeback(struct page *page); + int set_page_dirty(struct page *page); + int set_page_dirty_lock(struct page *page); ++int set_page_dirty_notag(struct page *page); + int clear_page_dirty_for_io(struct page *page); + + /* Is the vma a continuation of the stack vma above it? */ +--- a/include/linux/writeback.h ++++ b/include/linux/writeback.h +@@ -64,6 +64,9 @@ int writeback_inodes_sb_nr_if_idle(struc + void sync_inodes_sb(struct super_block *); + void writeback_inodes_wb(struct bdi_writeback *wb, + struct writeback_control *wbc); ++void __writeback_inodes_sb(struct super_block *sb, ++ struct bdi_writeback *wb, ++ struct writeback_control *wbc); + long wb_do_writeback(struct bdi_writeback *wb, int force_wait); + void wakeup_flusher_threads(long nr_pages); + +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -139,6 +139,7 @@ void __remove_from_page_cache(struct pag + dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); + } + } ++EXPORT_SYMBOL(__remove_from_page_cache); + + void remove_from_page_cache(struct page *page) + { +@@ -967,6 +968,7 @@ static void shrink_readahead_size_eio(st + { + ra->ra_pages /= 4; + } ++EXPORT_SYMBOL(find_get_pages); + + /** + * do_generic_file_read - generic file read routine +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -1180,6 +1180,32 @@ int __set_page_dirty_nobuffers(struct pa + EXPORT_SYMBOL(__set_page_dirty_nobuffers); + + /* ++ * set_page_dirty_notag() -- similar to __set_page_dirty_nobuffers() ++ * except it doesn't tag the page dirty in the page-cache radix tree. ++ * This means that the address space using this cannot use the regular ++ * filemap ->writepages() helpers and must provide its own means of ++ * tracking and finding non-tagged dirty pages. ++ * ++ * NOTE: furthermore, this version also doesn't handle truncate races. ++ */ ++int set_page_dirty_notag(struct page *page) ++{ ++ struct address_space *mapping = page->mapping; ++ ++ if (!TestSetPageDirty(page)) { ++ unsigned long flags; ++ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); ++ local_irq_save(flags); ++ account_page_dirtied(page, mapping); ++ local_irq_restore(flags); ++ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); ++ return 1; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(set_page_dirty_notag); ++ ++/* + * When a writepage implementation decides that it doesn't want to write this + * page for some reason, it should redirty the locked page via + * redirty_page_for_writepage() and it should then unlock the page and return 0 diff --git a/patches.suse/reiser4-exports b/patches.suse/reiser4-exports deleted file mode 100644 index ece4113..0000000 --- a/patches.suse/reiser4-exports +++ /dev/null @@ -1,41 +0,0 @@ -From: ReiserFS Development -Subject: [PATCH] reiser4: add new exports for used symbols -Patch-mainline: Not yet - - This patch exports the following symbols for use in reiser4: - - - __remove_from_page_cache - - find_get_pages - -Acked-by: Jeff Mahoney - ---- - mm/filemap.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -139,6 +139,7 @@ void __remove_from_page_cache(struct pag - dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); - } - } -+EXPORT_SYMBOL_GPL(__remove_from_page_cache); - - void remove_from_page_cache(struct page *page) - { -@@ -151,6 +152,7 @@ void remove_from_page_cache(struct page - spin_unlock_irq(&mapping->tree_lock); - mem_cgroup_uncharge_cache_page(page); - } -+EXPORT_SYMBOL_GPL(remove_from_page_cache); - - static int sync_page(void *word) - { -@@ -779,6 +781,7 @@ repeat: - rcu_read_unlock(); - return ret; - } -+EXPORT_SYMBOL_GPL(find_get_pages); - - /** - * find_get_pages_contig - gang contiguous pagecache lookup diff --git a/patches.suse/reiser4-set_page_dirty_notag b/patches.suse/reiser4-set_page_dirty_notag deleted file mode 100644 index 92f5995..0000000 --- a/patches.suse/reiser4-set_page_dirty_notag +++ /dev/null @@ -1,61 +0,0 @@ -From: ReiserFS Development -Subject: [PATCH] mm: Add set_page_dirty_notag() helper for reiser4 -Patch-mainline: not yet - -This patch adds a set_page_dirty_notag() helper which is like -set_page_dirty but doesn't add the pages to the radix tree. - -Currently the only user is reiser4. - -Acked-by: Jeff Mahoney ---- - - include/linux/mm.h | 1 + - mm/page-writeback.c | 26 ++++++++++++++++++++++++++ - 2 files changed, 27 insertions(+) - ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -853,6 +853,7 @@ int redirty_page_for_writepage(struct wr - void account_page_dirtied(struct page *page, struct address_space *mapping); - int set_page_dirty(struct page *page); - int set_page_dirty_lock(struct page *page); -+int set_page_dirty_notag(struct page *page); - int clear_page_dirty_for_io(struct page *page); - - extern unsigned long move_page_tables(struct vm_area_struct *vma, ---- a/mm/page-writeback.c -+++ b/mm/page-writeback.c -@@ -1130,6 +1130,32 @@ int __set_page_dirty_nobuffers(struct pa - EXPORT_SYMBOL(__set_page_dirty_nobuffers); - - /* -+ * set_page_dirty_notag() -- similar to __set_page_dirty_nobuffers() -+ * except it doesn't tag the page dirty in the page-cache radix tree. -+ * This means that the address space using this cannot use the regular -+ * filemap ->writepages() helpers and must provide its own means of -+ * tracking and finding non-tagged dirty pages. -+ * -+ * NOTE: furthermore, this version also doesn't handle truncate races. -+ */ -+int set_page_dirty_notag(struct page *page) -+{ -+ struct address_space *mapping = page->mapping; -+ -+ if (!TestSetPageDirty(page)) { -+ unsigned long flags; -+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); -+ local_irq_save(flags); -+ account_page_dirtied(page, mapping); -+ local_irq_restore(flags); -+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); -+ return 1; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(set_page_dirty_notag); -+ -+/* - * When a writepage implementation decides that it doesn't want to write this - * page for some reason, it should redirty the locked page via - * redirty_page_for_writepage() and it should then unlock the page and return 0 diff --git a/patches.suse/reiserfs-barrier-default b/patches.suse/reiserfs-barrier-default index 5041211..9a11cca 100644 --- a/patches.suse/reiserfs-barrier-default +++ b/patches.suse/reiserfs-barrier-default @@ -44,7 +44,7 @@ Signed-off-by: Jeff Mahoney depends on REISERFS_FS --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c -@@ -1626,6 +1626,9 @@ static int reiserfs_fill_super(struct su +@@ -1637,6 +1637,9 @@ static int reiserfs_fill_super(struct su /* Set default values for options: non-aggressive tails, RO on errors */ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); diff --git a/patches.suse/richacl-fix b/patches.suse/richacl-fix new file mode 100644 index 0000000..74ecc54 --- /dev/null +++ b/patches.suse/richacl-fix @@ -0,0 +1,235 @@ +From: Jeff Mahoney +Subject: richacl: Adopt 2.6.38 API +Patch-mainline: dependent on local patches + + This patch addresses changes in the inode_operations->permission prototype. + +Signed-off-by: Jeff Mahoney +--- + fs/ext4/richacl.c | 13 +++++++------ + fs/ext4/richacl.h | 4 ++-- + fs/richacl_base.c | 2 +- + fs/richacl_inode.c | 34 +++++++++++++++++++--------------- + include/linux/richacl.h | 16 ++++++++++------ + 5 files changed, 39 insertions(+), 30 deletions(-) + +--- a/fs/ext4/richacl.c ++++ b/fs/ext4/richacl.c +@@ -120,7 +120,8 @@ ext4_set_richacl(handle_t *handle, struc + } + + int +-ext4_richacl_permission(struct inode *inode, unsigned int mask) ++ext4_richacl_permission(struct inode *inode, unsigned int mask, ++ unsigned int flags) + { + struct richacl *acl; + int retval; +@@ -132,20 +133,20 @@ ext4_richacl_permission(struct inode *in + if (acl && IS_ERR(acl)) + retval = PTR_ERR(acl); + else { +- retval = richacl_inode_permission(inode, acl, mask); ++ retval = richacl_inode_permission(inode, acl, mask, flags); + richacl_put(acl); + } + + return retval; + } + +-int ext4_permission(struct inode *inode, int mask) ++int ext4_permission(struct inode *inode, int mask, unsigned int flags) + { + if (IS_RICHACL(inode)) + return ext4_richacl_permission(inode, +- richacl_want_to_mask(mask)); ++ richacl_want_to_mask(mask), flags); + else +- return generic_permission(inode, mask, ext4_check_acl); ++ return generic_permission(inode, mask, flags, ext4_check_acl); + } + + int ext4_may_create(struct inode *dir, int isdir) +@@ -260,7 +261,7 @@ ext4_xattr_set_richacl(struct dentry *de + if (strcmp(name, "") != 0) + return -EINVAL; + if (current_fsuid() != inode->i_uid && +- ext4_richacl_permission(inode, ACE4_WRITE_ACL) && ++ ext4_richacl_permission(inode, ACE4_WRITE_ACL, 0) && + !capable(CAP_FOWNER)) + return -EPERM; + if (value) { +--- a/fs/ext4/richacl.h ++++ b/fs/ext4/richacl.h +@@ -24,8 +24,8 @@ + /* Value for i_richacl if RICHACL has not been cached */ + # define EXT4_RICHACL_NOT_CACHED ((void *)-1) + +-extern int ext4_permission(struct inode *, int); +-extern int ext4_richacl_permission(struct inode *, unsigned int); ++extern int ext4_permission(struct inode *, int, unsigned int); ++extern int ext4_richacl_permission(struct inode *, unsigned int, unsigned int); + extern int ext4_may_create(struct inode *, int); + extern int ext4_may_delete(struct inode *, struct inode *, int); + extern int ext4_init_richacl(handle_t *, struct inode *, struct inode *); +--- a/fs/richacl_base.c ++++ b/fs/richacl_base.c +@@ -381,7 +381,7 @@ EXPORT_SYMBOL_GPL(richacl_chmod); + */ + int + richacl_permission(struct inode *inode, const struct richacl *acl, +- unsigned int mask) ++ unsigned int mask, unsigned int flags) + { + const struct richace *ace; + unsigned int file_mask, requested = mask, denied = 0; +--- a/fs/richacl_inode.c ++++ b/fs/richacl_inode.c +@@ -23,14 +23,15 @@ + */ + int + richacl_may_create(struct inode *dir, int isdir, +- int (*richacl_permission)(struct inode *, unsigned int)) ++ int (*richacl_permission)(struct inode *, unsigned int, ++ unsigned int)) + { + if (IS_RICHACL(dir)) + return richacl_permission(dir, + ACE4_EXECUTE | (isdir ? +- ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); ++ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE), 0); + else +- return generic_permission(dir, MAY_WRITE | MAY_EXEC, ++ return generic_permission(dir, MAY_WRITE | MAY_EXEC, 0, + dir->i_op->check_acl); + } + EXPORT_SYMBOL(richacl_may_create); +@@ -52,23 +53,25 @@ check_sticky(struct inode *dir, struct i + */ + int + richacl_may_delete(struct inode *dir, struct inode *inode, int replace, +- int (*richacl_permission)(struct inode *, unsigned int)) ++ int (*richacl_permission)(struct inode *, unsigned int, ++ unsigned int)) + { + int error; + + if (IS_RICHACL(inode)) { + error = richacl_permission(dir, +- ACE4_EXECUTE | ACE4_DELETE_CHILD); ++ ACE4_EXECUTE | ACE4_DELETE_CHILD, 0); + if (!error && check_sticky(dir, inode)) + error = -EPERM; +- if (error && !richacl_permission(inode, ACE4_DELETE)) ++ if (error && !richacl_permission(inode, ACE4_DELETE, 0)) + error = 0; + if (!error && replace) + error = richacl_permission(dir, + ACE4_EXECUTE | (S_ISDIR(inode->i_mode) ? +- ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); ++ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE), ++ 0); + } else { +- error = generic_permission(dir, MAY_WRITE | MAY_EXEC, ++ error = generic_permission(dir, MAY_WRITE | MAY_EXEC, 0, + dir->i_op->check_acl); + if (!error && check_sticky(dir, inode)) + error = -EPERM; +@@ -89,10 +92,10 @@ EXPORT_SYMBOL(richacl_may_delete); + */ + int + richacl_inode_permission(struct inode *inode, const struct richacl *acl, +- unsigned int mask) ++ unsigned int mask, unsigned int flags) + { + if (acl) { +- if (!richacl_permission(inode, acl, mask)) ++ if (!richacl_permission(inode, acl, mask, flags)) + return 0; + } else { + int mode = inode->i_mode; +@@ -140,7 +143,8 @@ EXPORT_SYMBOL_GPL(richacl_inode_permissi + */ + int + richacl_inode_change_ok(struct inode *inode, struct iattr *attr, +- int (*richacl_permission)(struct inode *, unsigned int)) ++ int (*richacl_permission)(struct inode *, unsigned int, ++ unsigned int)) + { + unsigned int ia_valid = attr->ia_valid; + +@@ -153,7 +157,7 @@ richacl_inode_change_ok(struct inode *in + (current_fsuid() != inode->i_uid || + attr->ia_uid != inode->i_uid) && + (current_fsuid() != attr->ia_uid || +- richacl_permission(inode, ACE4_WRITE_OWNER)) && ++ richacl_permission(inode, ACE4_WRITE_OWNER, 0)) && + !capable(CAP_CHOWN)) + goto error; + +@@ -163,7 +167,7 @@ richacl_inode_change_ok(struct inode *in + if ((current_fsuid() != inode->i_uid || + (!in_group && attr->ia_gid != inode->i_gid)) && + (!in_group || +- richacl_permission(inode, ACE4_WRITE_OWNER)) && ++ richacl_permission(inode, ACE4_WRITE_OWNER, 0)) && + !capable(CAP_CHOWN)) + goto error; + } +@@ -171,7 +175,7 @@ richacl_inode_change_ok(struct inode *in + /* Make sure a caller can chmod. */ + if (ia_valid & ATTR_MODE) { + if (current_fsuid() != inode->i_uid && +- richacl_permission(inode, ACE4_WRITE_ACL) && ++ richacl_permission(inode, ACE4_WRITE_ACL, 0) && + !capable(CAP_FOWNER)) + goto error; + /* Also check the setgid bit! */ +@@ -183,7 +187,7 @@ richacl_inode_change_ok(struct inode *in + /* Check for setting the inode time. */ + if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) { + if (current_fsuid() != inode->i_uid && +- richacl_permission(inode, ACE4_WRITE_ATTRIBUTES) && ++ richacl_permission(inode, ACE4_WRITE_ATTRIBUTES, 0) && + !capable(CAP_FOWNER)) + goto error; + } +--- a/include/linux/richacl.h ++++ b/include/linux/richacl.h +@@ -294,7 +294,7 @@ extern unsigned int richacl_want_to_mask + extern void richacl_compute_max_masks(struct richacl *); + extern struct richacl *richacl_chmod(struct richacl *, mode_t); + extern int richacl_permission(struct inode *, const struct richacl *, +- unsigned int); ++ unsigned int, unsigned int); + extern struct richacl *richacl_inherit(const struct richacl *, struct inode *); + extern int richacl_equiv_mode(const struct richacl *, mode_t *); + +@@ -302,18 +302,22 @@ extern int richacl_equiv_mode(const stru + + #ifdef CONFIG_FS_RICHACL + extern int richacl_may_create(struct inode *, int, +- int (*)(struct inode *, unsigned int)); ++ int (*)(struct inode *, unsigned int, ++ unsigned int)); + extern int richacl_may_delete(struct inode *, struct inode *, int, +- int (*)(struct inode *, unsigned int)); ++ int (*)(struct inode *, unsigned int, ++ unsigned int)); + extern int richacl_inode_permission(struct inode *, const struct richacl *, +- unsigned int); ++ unsigned int, unsigned int); + extern int richacl_inode_change_ok(struct inode *, struct iattr *, +- int (*)(struct inode *, unsigned int)); ++ int (*)(struct inode *, unsigned int, ++ unsigned int)); + #else + static inline int + richacl_inode_change_ok(struct inode *inode, struct iattr *attr, + int (*richacl_permission)(struct inode *inode, +- unsigned int mask)) ++ unsigned int mask, ++ unsigned int flags)) + { + return -EPERM; + } diff --git a/patches.suse/rlim-0015-SECURITY-add-task_struct-to-setrlimit.patch b/patches.suse/rlim-0015-SECURITY-add-task_struct-to-setrlimit.patch deleted file mode 100644 index 5da8307..0000000 --- a/patches.suse/rlim-0015-SECURITY-add-task_struct-to-setrlimit.patch +++ /dev/null @@ -1,114 +0,0 @@ -From 9440a3f562ffe6ab34eff1ab52e6cfc516d6e7ba Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Wed, 26 Aug 2009 18:41:16 +0200 -Subject: [PATCH] SECURITY: add task_struct to setrlimit -References: FATE#305733 -Patch-mainline: no (later) - -Add task_struct to task_setrlimit of security_operations to be able to set -rlimit of different task than current. - -Signed-off-by: Jiri Slaby -Acked-by: Eric Paris -Acked-by: James Morris ---- - include/linux/security.h | 9 ++++++--- - kernel/sys.c | 2 +- - security/capability.c | 3 ++- - security/security.c | 5 +++-- - security/selinux/hooks.c | 7 ++++--- - 5 files changed, 16 insertions(+), 10 deletions(-) - ---- a/include/linux/security.h -+++ b/include/linux/security.h -@@ -1602,7 +1602,8 @@ struct security_operations { - int (*task_setnice) (struct task_struct *p, int nice); - int (*task_setioprio) (struct task_struct *p, int ioprio); - int (*task_getioprio) (struct task_struct *p); -- int (*task_setrlimit) (unsigned int resource, struct rlimit *new_rlim); -+ int (*task_setrlimit) (struct task_struct *p, unsigned int resource, -+ struct rlimit *new_rlim); - int (*task_setscheduler) (struct task_struct *p, int policy, - struct sched_param *lp); - int (*task_getscheduler) (struct task_struct *p); -@@ -1867,7 +1868,8 @@ int security_task_setgroups(struct group - int security_task_setnice(struct task_struct *p, int nice); - int security_task_setioprio(struct task_struct *p, int ioprio); - int security_task_getioprio(struct task_struct *p); --int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim); -+int security_task_setrlimit(struct task_struct *p, unsigned int resource, -+ struct rlimit *new_rlim); - int security_task_setscheduler(struct task_struct *p, - int policy, struct sched_param *lp); - int security_task_getscheduler(struct task_struct *p); -@@ -2483,7 +2485,8 @@ static inline int security_task_getiopri - return 0; - } - --static inline int security_task_setrlimit(unsigned int resource, -+static inline int security_task_setrlimit(struct task_struct *p, -+ unsigned int resource, - struct rlimit *new_rlim) - { - return 0; ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -1320,7 +1320,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, - if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) - return -EPERM; - -- retval = security_task_setrlimit(resource, &new_rlim); -+ retval = security_task_setrlimit(current, resource, &new_rlim); - if (retval) - return retval; - ---- a/security/capability.c -+++ b/security/capability.c -@@ -466,7 +466,8 @@ static int cap_task_getioprio(struct tas - return 0; - } - --static int cap_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) -+static int cap_task_setrlimit(struct task_struct *p, unsigned int resource, -+ struct rlimit *new_rlim) - { - return 0; - } ---- a/security/security.c -+++ b/security/security.c -@@ -832,9 +832,10 @@ int security_task_getioprio(struct task_ - return security_ops->task_getioprio(p); - } - --int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) -+int security_task_setrlimit(struct task_struct *p, unsigned int resource, -+ struct rlimit *new_rlim) - { -- return security_ops->task_setrlimit(resource, new_rlim); -+ return security_ops->task_setrlimit(p, resource, new_rlim); - } - - int security_task_setscheduler(struct task_struct *p, ---- a/security/selinux/hooks.c -+++ b/security/selinux/hooks.c -@@ -3393,16 +3393,17 @@ static int selinux_task_getioprio(struct - return current_has_perm(p, PROCESS__GETSCHED); - } - --static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) -+static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource, -+ struct rlimit *new_rlim) - { -- struct rlimit *old_rlim = current->signal->rlim + resource; -+ struct rlimit *old_rlim = p->signal->rlim + resource; - - /* Control the ability to change the hard limit (whether - lowering or raising it), so that the hard limit can - later be used as a safe reset point for the soft limit - upon context transitions. See selinux_bprm_committing_creds. */ - if (old_rlim->rlim_max != new_rlim->rlim_max) -- return current_has_perm(current, PROCESS__SETRLIMIT); -+ return current_has_perm(p, PROCESS__SETRLIMIT); - - return 0; - } diff --git a/patches.suse/rlim-0016-core-add-task_struct-to-update_rlimit_cpu.patch b/patches.suse/rlim-0016-core-add-task_struct-to-update_rlimit_cpu.patch deleted file mode 100644 index 52c89bd..0000000 --- a/patches.suse/rlim-0016-core-add-task_struct-to-update_rlimit_cpu.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 0c1b5ce8de67c36bbf67db38240a91f358133bdd Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Fri, 28 Aug 2009 14:05:12 +0200 -Subject: [PATCH] core: add task_struct to update_rlimit_cpu -References: FATE#305733 -Patch-mainline: no (later) - -Add task_struct as a parameter to update_rlimit_cpu to be able to set -rlimit_cpu of different task than current. - -Signed-off-by: Jiri Slaby -Acked-by: James Morris ---- - include/linux/posix-timers.h | 2 +- - kernel/posix-cpu-timers.c | 10 +++++----- - kernel/sys.c | 2 +- - security/selinux/hooks.c | 3 ++- - 4 files changed, 9 insertions(+), 8 deletions(-) - ---- a/include/linux/posix-timers.h -+++ b/include/linux/posix-timers.h -@@ -117,6 +117,6 @@ void set_process_cpu_timer(struct task_s - - long clock_nanosleep_restart(struct restart_block *restart_block); - --void update_rlimit_cpu(unsigned long rlim_new); -+void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); - - #endif ---- a/kernel/posix-cpu-timers.c -+++ b/kernel/posix-cpu-timers.c -@@ -13,16 +13,16 @@ - /* - * Called after updating RLIMIT_CPU to set timer expiration if necessary. - */ --void update_rlimit_cpu(unsigned long rlim_new) -+void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) - { - cputime_t cputime = secs_to_cputime(rlim_new); -- struct signal_struct *const sig = current->signal; -+ struct signal_struct *const sig = task->signal; - - if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || - cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { -- spin_lock_irq(¤t->sighand->siglock); -- set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); -- spin_unlock_irq(¤t->sighand->siglock); -+ spin_lock_irq(&task->sighand->siglock); -+ set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL); -+ spin_unlock_irq(&task->sighand->siglock); - } - } - ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -1350,7 +1350,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, - if (new_rlim.rlim_cur == RLIM_INFINITY) - goto out; - -- update_rlimit_cpu(new_rlim.rlim_cur); -+ update_rlimit_cpu(current, new_rlim.rlim_cur); - out: - return 0; - } ---- a/security/selinux/hooks.c -+++ b/security/selinux/hooks.c -@@ -2360,7 +2360,8 @@ static void selinux_bprm_committing_cred - initrlim = init_task.signal->rlim + i; - rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur); - } -- update_rlimit_cpu(current->signal->rlim[RLIMIT_CPU].rlim_cur); -+ update_rlimit_cpu(current, -+ current->signal->rlim[RLIMIT_CPU].rlim_cur); - } - } - diff --git a/patches.suse/rlim-0017-sys_setrlimit-make-sure-rlim_max-never-grows.patch b/patches.suse/rlim-0017-sys_setrlimit-make-sure-rlim_max-never-grows.patch deleted file mode 100644 index 7755cc4..0000000 --- a/patches.suse/rlim-0017-sys_setrlimit-make-sure-rlim_max-never-grows.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 60aa0fa94c00942cc2c9f67f05d4a0d9e15349c7 Mon Sep 17 00:00:00 2001 -From: Oleg Nesterov -Date: Thu, 3 Sep 2009 19:21:45 +0200 -Subject: [PATCH] sys_setrlimit: make sure ->rlim_max never grows -References: FATE#305733 -Patch-mainline: no (later) - -Mostly preparation for Jiri's changes, but probably makes sense anyway. - -sys_setrlimit() checks new_rlim.rlim_max <= old_rlim->rlim_max, but when -it takes task_lock() old_rlim->rlim_max can be already lowered. Move this -check under task_lock(). - -Currently this is not important, we can only race with our sub-thread, -this means the application is stupid. But when we change the code to allow -the update of !current task's limits, it becomes important to make sure -->rlim_max can be lowered "reliably" even if we race with the application -doing sys_setrlimit(). - -Signed-off-by: Oleg Nesterov -Signed-off-by: Jiri Slaby ---- - kernel/sys.c | 15 ++++++++------- - 1 file changed, 8 insertions(+), 7 deletions(-) - ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -1313,10 +1313,6 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, - return -EFAULT; - if (new_rlim.rlim_cur > new_rlim.rlim_max) - return -EINVAL; -- old_rlim = current->signal->rlim + resource; -- if ((new_rlim.rlim_max > old_rlim->rlim_max) && -- !capable(CAP_SYS_RESOURCE)) -- return -EPERM; - if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) - return -EPERM; - -@@ -1334,11 +1330,16 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, - new_rlim.rlim_cur = 1; - } - -+ old_rlim = current->signal->rlim + resource; - task_lock(current->group_leader); -- *old_rlim = new_rlim; -+ if ((new_rlim.rlim_max <= old_rlim->rlim_max) || -+ capable(CAP_SYS_RESOURCE)) -+ *old_rlim = new_rlim; -+ else -+ retval = -EPERM; - task_unlock(current->group_leader); - -- if (resource != RLIMIT_CPU) -+ if (retval || resource != RLIMIT_CPU) - goto out; - - /* -@@ -1352,7 +1353,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, - - update_rlimit_cpu(current, new_rlim.rlim_cur); - out: -- return 0; -+ return retval; - } - - /* diff --git a/patches.suse/rlim-0018-core-split-sys_setrlimit.patch b/patches.suse/rlim-0018-core-split-sys_setrlimit.patch deleted file mode 100644 index ecc4c87..0000000 --- a/patches.suse/rlim-0018-core-split-sys_setrlimit.patch +++ /dev/null @@ -1,112 +0,0 @@ -From 282b6f3a2d1c95ed2443ad974e354883b66cd7c9 Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Wed, 26 Aug 2009 23:45:34 +0200 -Subject: [PATCH] core: split sys_setrlimit -References: FATE#305733 -Patch-mainline: no (later) - -Create do_setrlimit from sys_setrlimit and declare do_setrlimit -in the resource header. This is to allow rlimits to be changed -not only by syscall, but later from proc and syscall code too. - -Signed-off-by: Jiri Slaby ---- - include/linux/resource.h | 2 ++ - kernel/sys.c | 40 ++++++++++++++++++++++++---------------- - 2 files changed, 26 insertions(+), 16 deletions(-) - ---- a/include/linux/resource.h -+++ b/include/linux/resource.h -@@ -73,6 +73,8 @@ struct rlimit { - struct task_struct; - - int getrusage(struct task_struct *p, int who, struct rusage __user *ru); -+int do_setrlimit(struct task_struct *tsk, unsigned int resource, -+ struct rlimit *new_rlim); - - #endif /* __KERNEL__ */ - ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -1302,42 +1302,41 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned - - #endif - --SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) -+int do_setrlimit(struct task_struct *tsk, unsigned int resource, -+ struct rlimit *new_rlim) - { -- struct rlimit new_rlim, *old_rlim; -+ struct rlimit *old_rlim; - int retval; - - if (resource >= RLIM_NLIMITS) - return -EINVAL; -- if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) -- return -EFAULT; -- if (new_rlim.rlim_cur > new_rlim.rlim_max) -+ if (new_rlim->rlim_cur > new_rlim->rlim_max) - return -EINVAL; -- if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) -+ if (resource == RLIMIT_NOFILE && new_rlim->rlim_max > sysctl_nr_open) - return -EPERM; - -- retval = security_task_setrlimit(current, resource, &new_rlim); -+ retval = security_task_setrlimit(tsk, resource, new_rlim); - if (retval) - return retval; - -- if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) { -+ if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { - /* - * The caller is asking for an immediate RLIMIT_CPU - * expiry. But we use the zero value to mean "it was - * never set". So let's cheat and make it one second - * instead - */ -- new_rlim.rlim_cur = 1; -+ new_rlim->rlim_cur = 1; - } - -- old_rlim = current->signal->rlim + resource; -- task_lock(current->group_leader); -- if ((new_rlim.rlim_max <= old_rlim->rlim_max) || -+ old_rlim = tsk->signal->rlim + resource; -+ task_lock(tsk->group_leader); -+ if ((new_rlim->rlim_max <= old_rlim->rlim_max) || - capable(CAP_SYS_RESOURCE)) -- *old_rlim = new_rlim; -+ *old_rlim = *new_rlim; - else - retval = -EPERM; -- task_unlock(current->group_leader); -+ task_unlock(tsk->group_leader); - - if (retval || resource != RLIMIT_CPU) - goto out; -@@ -1348,14 +1347,23 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, - * very long-standing error, and fixing it now risks breakage of - * applications, so we live with it - */ -- if (new_rlim.rlim_cur == RLIM_INFINITY) -+ if (new_rlim->rlim_cur == RLIM_INFINITY) - goto out; - -- update_rlimit_cpu(current, new_rlim.rlim_cur); -+ update_rlimit_cpu(tsk, new_rlim->rlim_cur); - out: - return retval; - } - -+SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) -+{ -+ struct rlimit new_rlim; -+ -+ if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) -+ return -EFAULT; -+ return do_setrlimit(current, resource, &new_rlim); -+} -+ - /* - * It would make sense to put struct rusage in the task_struct, - * except that would make the task_struct be *really big*. After diff --git a/patches.suse/rlim-0019-core-allow-setrlimit-to-non-current-tasks.patch b/patches.suse/rlim-0019-core-allow-setrlimit-to-non-current-tasks.patch deleted file mode 100644 index 2157825..0000000 --- a/patches.suse/rlim-0019-core-allow-setrlimit-to-non-current-tasks.patch +++ /dev/null @@ -1,56 +0,0 @@ -From a35547da680829ce9036a476678a5fd4bfa59a6b Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Fri, 28 Aug 2009 14:08:17 +0200 -Subject: [PATCH] core: allow setrlimit to non-current tasks -References: FATE#305733 -Patch-mainline: no (later) - -Add locking to allow setrlimit accept task parameter other than -current. - -Namely, lock tasklist_lock for read and check whether the task -structure has sighand non-null. Do all the signal processing under -that lock still held. - -Signed-off-by: Jiri Slaby -Cc: Oleg Nesterov ---- - kernel/sys.c | 11 ++++++++++- - 1 file changed, 10 insertions(+), 1 deletion(-) - ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -1302,6 +1302,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned - - #endif - -+/* make sure you are allowed to change @tsk limits before calling this */ - int do_setrlimit(struct task_struct *tsk, unsigned int resource, - struct rlimit *new_rlim) - { -@@ -1315,9 +1316,16 @@ int do_setrlimit(struct task_struct *tsk - if (resource == RLIMIT_NOFILE && new_rlim->rlim_max > sysctl_nr_open) - return -EPERM; - -+ /* protect tsk->signal and tsk->sighand from disappearing */ -+ read_lock(&tasklist_lock); -+ if (!tsk->sighand) { -+ retval = -ESRCH; -+ goto out; -+ } -+ - retval = security_task_setrlimit(tsk, resource, new_rlim); - if (retval) -- return retval; -+ goto out; - - if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { - /* -@@ -1352,6 +1360,7 @@ int do_setrlimit(struct task_struct *tsk - - update_rlimit_cpu(tsk, new_rlim->rlim_cur); - out: -+ read_unlock(&tasklist_lock); - return retval; - } - diff --git a/patches.suse/rlim-0020-core-optimize-setrlimit-for-current-task.patch b/patches.suse/rlim-0020-core-optimize-setrlimit-for-current-task.patch deleted file mode 100644 index b519a97..0000000 --- a/patches.suse/rlim-0020-core-optimize-setrlimit-for-current-task.patch +++ /dev/null @@ -1,50 +0,0 @@ -From d54c730eba63dfe0dcb8c7ab41514c564c159212 Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Fri, 28 Aug 2009 14:08:17 +0200 -Subject: [PATCH] core: optimize setrlimit for current task -References: FATE#305733 -Patch-mainline: no (later) - -Don't take tasklist lock for 'current'. It's not needed, since -current->sighand/signal can't disappear. - -This improves serlimit called especially via sys_setrlimit. - -Signed-off-by: Jiri Slaby -Cc: Oleg Nesterov ---- - kernel/sys.c | 16 ++++++++++------ - 1 file changed, 10 insertions(+), 6 deletions(-) - ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -1316,11 +1316,14 @@ int do_setrlimit(struct task_struct *tsk - if (resource == RLIMIT_NOFILE && new_rlim->rlim_max > sysctl_nr_open) - return -EPERM; - -- /* protect tsk->signal and tsk->sighand from disappearing */ -- read_lock(&tasklist_lock); -- if (!tsk->sighand) { -- retval = -ESRCH; -- goto out; -+ /* optimization: 'current' doesn't need locking, e.g. setrlimit */ -+ if (tsk != current) { -+ /* protect tsk->signal and tsk->sighand from disappearing */ -+ read_lock(&tasklist_lock); -+ if (!tsk->sighand) { -+ retval = -ESRCH; -+ goto out; -+ } - } - - retval = security_task_setrlimit(tsk, resource, new_rlim); -@@ -1360,7 +1363,8 @@ int do_setrlimit(struct task_struct *tsk - - update_rlimit_cpu(tsk, new_rlim->rlim_cur); - out: -- read_unlock(&tasklist_lock); -+ if (tsk != current) -+ read_unlock(&tasklist_lock); - return retval; - } - diff --git a/patches.suse/rlim-0021-FS-proc-switch-limits-reading-to-fops.patch b/patches.suse/rlim-0021-FS-proc-switch-limits-reading-to-fops.patch deleted file mode 100644 index d68ad5a..0000000 --- a/patches.suse/rlim-0021-FS-proc-switch-limits-reading-to-fops.patch +++ /dev/null @@ -1,96 +0,0 @@ -From 8e75daf5abec2e008b7d2e21876bc60cf0f95ece Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Fri, 27 Nov 2009 15:25:03 +0100 -Subject: [PATCH] FS: proc, switch limits reading to fops -References: FATE#305733 -Patch-mainline: no (later) - -Use fops instead of proc_info_read. We will need fops for limits -writing and the code would look ugly if we used -NOD("limits", S_IFREG|S_IRUSR|S_IWUSR, NULL, - &proc_pid_limits_operations, { .proc_read = proc_pid_limits }), - -We will just use -REG("limits", S_IRUSR|S_IWUSR, proc_pid_limits_operations), - -Signed-off-by: Jiri Slaby ---- - fs/proc/base.c | 37 ++++++++++++++++++++++++++++--------- - 1 file changed, 28 insertions(+), 9 deletions(-) - ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -477,19 +477,30 @@ static const struct limit_names lnames[R - }; - - /* Display limits for a process */ --static int proc_pid_limits(struct task_struct *task, char *buffer) -+static ssize_t limits_read(struct file *file, char __user *buf, size_t rcount, -+ loff_t *ppos) - { -- unsigned int i; -- int count = 0; -- unsigned long flags; -- char *bufptr = buffer; -- - struct rlimit rlim[RLIM_NLIMITS]; -+ struct task_struct *task; -+ unsigned long flags; -+ unsigned int i; -+ ssize_t count = 0; -+ char *bufptr; - -- if (!lock_task_sighand(task, &flags)) -+ task = get_proc_task(file->f_path.dentry->d_inode); -+ if (!task) -+ return -ESRCH; -+ if (!lock_task_sighand(task, &flags)) { -+ put_task_struct(task); - return 0; -+ } - memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); - unlock_task_sighand(task, &flags); -+ put_task_struct(task); -+ -+ bufptr = (char *)__get_free_page(GFP_TEMPORARY); -+ if (!bufptr) -+ return -ENOMEM; - - /* - * print the file header -@@ -518,9 +529,17 @@ static int proc_pid_limits(struct task_s - count += sprintf(&bufptr[count], "\n"); - } - -+ count = simple_read_from_buffer(buf, rcount, ppos, bufptr, count); -+ -+ free_page((unsigned long)bufptr); -+ - return count; - } - -+static const struct file_operations proc_pid_limits_operations = { -+ .read = limits_read, -+}; -+ - #ifdef CONFIG_HAVE_ARCH_TRACEHOOK - static int proc_pid_syscall(struct task_struct *task, char *buffer) - { -@@ -2577,7 +2596,7 @@ static const struct pid_entry tgid_base_ - INF("auxv", S_IRUSR, proc_pid_auxv), - ONE("status", S_IRUGO, proc_pid_status), - ONE("personality", S_IRUSR, proc_pid_personality), -- INF("limits", S_IRUSR, proc_pid_limits), -+ REG("limits", S_IRUSR, proc_pid_limits_operations), - #ifdef CONFIG_SCHED_DEBUG - REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), - #endif -@@ -2912,7 +2931,7 @@ static const struct pid_entry tid_base_s - INF("auxv", S_IRUSR, proc_pid_auxv), - ONE("status", S_IRUGO, proc_pid_status), - ONE("personality", S_IRUSR, proc_pid_personality), -- INF("limits", S_IRUSR, proc_pid_limits), -+ REG("limits", S_IRUSR, proc_pid_limits_operations), - #ifdef CONFIG_SCHED_DEBUG - REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), - #endif diff --git a/patches.suse/rlim-0022-FS-proc-make-limits-writable.patch b/patches.suse/rlim-0022-FS-proc-make-limits-writable.patch deleted file mode 100644 index 368de27..0000000 --- a/patches.suse/rlim-0022-FS-proc-make-limits-writable.patch +++ /dev/null @@ -1,109 +0,0 @@ -From db3d91905e245c7495df8991a9c054b6d4e0dc98 Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Wed, 26 Aug 2009 21:24:30 +0200 -Subject: [PATCH] FS: proc, make limits writable -References: FATE#305733 -Patch-mainline: no (later) - -Allow writing strings such as -Max core file size=0:unlimited -to /proc//limits to change limits. - -Signed-off-by: Jiri Slaby ---- - fs/proc/base.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- - 1 file changed, 66 insertions(+), 2 deletions(-) - ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -536,8 +536,72 @@ static ssize_t limits_read(struct file * - return count; - } - -+static ssize_t limits_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); -+ char str[32 + 1 + 16 + 1 + 16 + 1], *delim, *next; -+ struct rlimit new_rlimit; -+ unsigned int i; -+ int ret; -+ -+ if (!task) { -+ count = -ESRCH; -+ goto out; -+ } -+ if (copy_from_user(str, buf, min(count, sizeof(str) - 1))) { -+ count = -EFAULT; -+ goto put_task; -+ } -+ -+ str[min(count, sizeof(str) - 1)] = 0; -+ -+ delim = strchr(str, '='); -+ if (!delim) { -+ count = -EINVAL; -+ goto put_task; -+ } -+ *delim++ = 0; /* for easy 'str' usage */ -+ new_rlimit.rlim_cur = simple_strtoul(delim, &next, 0); -+ if (*next != ':') { -+ if (strncmp(delim, "unlimited:", 10)) { -+ count = -EINVAL; -+ goto put_task; -+ } -+ new_rlimit.rlim_cur = RLIM_INFINITY; -+ next = delim + 9; /* move to ':' */ -+ } -+ delim = next + 1; -+ new_rlimit.rlim_max = simple_strtoul(delim, &next, 0); -+ if (*next != 0) { -+ if (strcmp(delim, "unlimited")) { -+ count = -EINVAL; -+ goto put_task; -+ } -+ new_rlimit.rlim_max = RLIM_INFINITY; -+ } -+ -+ for (i = 0; i < RLIM_NLIMITS; i++) -+ if (!strcmp(str, lnames[i].name)) -+ break; -+ if (i >= RLIM_NLIMITS) { -+ count = -EINVAL; -+ goto put_task; -+ } -+ -+ ret = do_setrlimit(task, i, &new_rlimit); -+ if (ret) -+ count = ret; -+ -+put_task: -+ put_task_struct(task); -+out: -+ return count; -+} -+ - static const struct file_operations proc_pid_limits_operations = { - .read = limits_read, -+ .write = limits_write, - }; - - #ifdef CONFIG_HAVE_ARCH_TRACEHOOK -@@ -2596,7 +2660,7 @@ static const struct pid_entry tgid_base_ - INF("auxv", S_IRUSR, proc_pid_auxv), - ONE("status", S_IRUGO, proc_pid_status), - ONE("personality", S_IRUSR, proc_pid_personality), -- REG("limits", S_IRUSR, proc_pid_limits_operations), -+ REG("limits", S_IRUSR|S_IWUSR, proc_pid_limits_operations), - #ifdef CONFIG_SCHED_DEBUG - REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), - #endif -@@ -2931,7 +2995,7 @@ static const struct pid_entry tid_base_s - INF("auxv", S_IRUSR, proc_pid_auxv), - ONE("status", S_IRUGO, proc_pid_status), - ONE("personality", S_IRUSR, proc_pid_personality), -- REG("limits", S_IRUSR, proc_pid_limits_operations), -+ REG("limits", S_IRUSR|S_IWUSR, proc_pid_limits_operations), - #ifdef CONFIG_SCHED_DEBUG - REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), - #endif diff --git a/patches.suse/rlim-0023-core-do-security-check-under-task_lock.patch b/patches.suse/rlim-0023-core-do-security-check-under-task_lock.patch deleted file mode 100644 index 3dcbfab..0000000 --- a/patches.suse/rlim-0023-core-do-security-check-under-task_lock.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 70b83579b39dc1369bc58ab395259bd254bf4a38 Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Sat, 14 Nov 2009 17:37:04 +0100 -Subject: [PATCH] core: do security check under task_lock -References: FATE#305733 -Patch-mainline: no (later) - -Do security_task_setrlimit under task_lock. Other tasks may -change limits under our hands while we are checking limits -inside the function. From now on, they can't. - -Signed-off-by: Jiri Slaby -Acked-by: James Morris -Cc: Heiko Carstens -Cc: Andrew Morton -Cc: Ingo Molnar ---- - kernel/sys.c | 16 +++++++--------- - 1 file changed, 7 insertions(+), 9 deletions(-) - ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -1307,7 +1307,7 @@ int do_setrlimit(struct task_struct *tsk - struct rlimit *new_rlim) - { - struct rlimit *old_rlim; -- int retval; -+ int retval = 0; - - if (resource >= RLIM_NLIMITS) - return -EINVAL; -@@ -1326,10 +1326,6 @@ int do_setrlimit(struct task_struct *tsk - } - } - -- retval = security_task_setrlimit(tsk, resource, new_rlim); -- if (retval) -- goto out; -- - if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { - /* - * The caller is asking for an immediate RLIMIT_CPU -@@ -1342,11 +1338,13 @@ int do_setrlimit(struct task_struct *tsk - - old_rlim = tsk->signal->rlim + resource; - task_lock(tsk->group_leader); -- if ((new_rlim->rlim_max <= old_rlim->rlim_max) || -- capable(CAP_SYS_RESOURCE)) -- *old_rlim = *new_rlim; -- else -+ if ((new_rlim->rlim_max > old_rlim->rlim_max) && -+ !capable(CAP_SYS_RESOURCE)) - retval = -EPERM; -+ if (!retval) -+ retval = security_task_setrlimit(tsk, resource, new_rlim); -+ if (!retval) -+ *old_rlim = *new_rlim; - task_unlock(tsk->group_leader); - - if (retval || resource != RLIMIT_CPU) diff --git a/patches.suse/s390-Kerntypes.diff b/patches.suse/s390-Kerntypes.diff index a6994ba..ed9356a 100644 --- a/patches.suse/s390-Kerntypes.diff +++ b/patches.suse/s390-Kerntypes.diff @@ -18,7 +18,7 @@ Signed-off-by: Michal Marek --- a/arch/s390/Makefile +++ b/arch/s390/Makefile -@@ -105,12 +105,12 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/ +@@ -106,12 +106,12 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/ boot := arch/s390/boot diff --git a/patches.suse/scsi-error-test-unit-ready-timeout b/patches.suse/scsi-error-test-unit-ready-timeout index a3e2f09..ed09317 100644 --- a/patches.suse/scsi-error-test-unit-ready-timeout +++ b/patches.suse/scsi-error-test-unit-ready-timeout @@ -16,15 +16,15 @@ Signed-off-by: Kurt Garloff --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c -@@ -42,6 +42,7 @@ - #include "scsi_transport_api.h" +@@ -45,6 +45,7 @@ + #include #define SENSE_TIMEOUT (10*HZ) +#define TEST_UNIT_READY_TIMEOUT (30*HZ) /* * These should *probably* be handled by the host itself. -@@ -994,7 +995,7 @@ static int scsi_eh_tur(struct scsi_cmnd +@@ -1028,7 +1029,7 @@ static int scsi_eh_tur(struct scsi_cmnd int retry_cnt = 1, rtn; retry_tur: diff --git a/patches.suse/scsi-netlink-ml b/patches.suse/scsi-netlink-ml index 5e98bbd..e1d42a7 100644 --- a/patches.suse/scsi-netlink-ml +++ b/patches.suse/scsi-netlink-ml @@ -17,7 +17,7 @@ Signed-off-by: Hannes Reinecke --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c -@@ -24,6 +24,8 @@ +@@ -25,6 +25,8 @@ #include #include #include @@ -26,7 +26,7 @@ Signed-off-by: Hannes Reinecke #include #include -@@ -33,6 +35,7 @@ +@@ -34,6 +36,7 @@ #include #include #include @@ -34,7 +34,7 @@ Signed-off-by: Hannes Reinecke #include "scsi_priv.h" #include "scsi_logging.h" -@@ -213,6 +216,80 @@ static inline void scsi_eh_prt_fail_stat +@@ -218,6 +221,80 @@ static inline void scsi_eh_prt_fail_stat } #endif @@ -115,7 +115,7 @@ Signed-off-by: Hannes Reinecke /** * scsi_check_sense - Examine scsi cmd sense * @scmd: Cmd to have sense checked. -@@ -235,6 +312,8 @@ static int scsi_check_sense(struct scsi_ +@@ -240,6 +317,8 @@ static int scsi_check_sense(struct scsi_ if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; diff --git a/patches.suse/setuid-dumpable-wrongdir b/patches.suse/setuid-dumpable-wrongdir index 60b77f9..cf919a4 100644 --- a/patches.suse/setuid-dumpable-wrongdir +++ b/patches.suse/setuid-dumpable-wrongdir @@ -22,7 +22,7 @@ Signed-off-by: Kurt Garloff --- a/kernel/sysctl.c +++ b/kernel/sysctl.c -@@ -760,6 +760,13 @@ static struct ctl_table kern_table[] = { +@@ -817,6 +817,13 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif @@ -38,9 +38,9 @@ Signed-off-by: Kurt Garloff .procname = "spin_retry", --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c -@@ -136,6 +136,7 @@ static const struct bin_table bin_kern_t +@@ -138,6 +138,7 @@ static const struct bin_table bin_kern_t + { CTL_INT, KERN_COMPAT_LOG, "compat-log" }, { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, - { CTL_INT, KERN_NMI_WATCHDOG, "nmi_watchdog" }, { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, + { CTL_INT, KERN_SETUID_DUMPABLE, "suid_dumpable" }, {} diff --git a/patches.suse/silent-stack-overflow-2.patch b/patches.suse/silent-stack-overflow-2.patch deleted file mode 100644 index f904f9d..0000000 --- a/patches.suse/silent-stack-overflow-2.patch +++ /dev/null @@ -1,492 +0,0 @@ -From: Nick Piggin -Subject: avoid silent stack overflow over the heap -Patch-mainline: no -References: bnc#44807 bnc#211997 - -This is a rewrite of Andrea Arcangeli's patch, which implements a stack -guard feature. That is, it prevents the stack from growing right next -to another vma, and prevents other vmas being allocated right next to the -stack. This will cause a segfault rather than the stack silently overwriting -other memory areas (eg. the heap) in the case that the app has a stack -overflow. - -I have rewritten it so as not to require changes to expand_stack prototype, -support for growsup stacks, and support for powerpc and ia64. - - -Signed-off-by: Nick Piggin ---- - arch/ia64/kernel/sys_ia64.c | 11 +++++ - arch/powerpc/mm/slice.c | 82 +++++++++++++++++++++++++----------------- - arch/x86/kernel/sys_x86_64.c | 52 ++++++++++++++++++++------ - include/linux/mm.h | 1 - kernel/sysctl.c | 7 +++ - mm/mmap.c | 83 ++++++++++++++++++++++++++++++++++++------- - 6 files changed, 177 insertions(+), 59 deletions(-) - ---- a/arch/ia64/kernel/sys_ia64.c -+++ b/arch/ia64/kernel/sys_ia64.c -@@ -59,6 +59,8 @@ arch_get_unmapped_area (struct file *fil - start_addr = addr = (addr + align_mask) & ~align_mask; - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { -+ unsigned long guard; -+ - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { -@@ -68,7 +70,14 @@ arch_get_unmapped_area (struct file *fil - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(TASK_SIZE - (addr + len), -+ (unsigned long)guard << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) { -+got_it: - /* Remember the address where we stopped this search: */ - mm->free_area_cache = addr + len; - return addr; ---- a/arch/powerpc/mm/slice.c -+++ b/arch/powerpc/mm/slice.c -@@ -94,11 +94,21 @@ static int slice_area_is_free(struct mm_ - unsigned long len) - { - struct vm_area_struct *vma; -+ unsigned long guard; - - if ((mm->task_size - len) < addr) - return 0; - vma = find_vma(mm, addr); -- return (!vma || (addr + len) <= vma->vm_start); -+ if (!vma) -+ return 1; -+ -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(mm->task_size - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) -+ return 1; -+ return 0; - } - - static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) -@@ -242,8 +252,10 @@ static unsigned long slice_find_area_bot - - full_search: - for (;;) { -+ unsigned long guard; -+ - addr = _ALIGN_UP(addr, 1ul << pshift); -- if ((TASK_SIZE - len) < addr) -+ if ((mm->task_size - len) < addr) - break; - vma = find_vma(mm, addr); - BUG_ON(vma && (addr >= vma->vm_end)); -@@ -256,7 +268,14 @@ full_search: - addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); - continue; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(mm->task_size - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) { -+got_it: - /* - * Remember the place where we stopped the search: - */ -@@ -264,8 +283,8 @@ full_search: - mm->free_area_cache = addr + len; - return addr; - } -- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) -- mm->cached_hole_size = vma->vm_start - addr; -+ if (use_cache && (addr + guard + mm->cached_hole_size) < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - (addr + guard); - addr = vma->vm_end; - } - -@@ -284,37 +303,23 @@ static unsigned long slice_find_area_top - int psize, int use_cache) - { - struct vm_area_struct *vma; -- unsigned long addr; -+ unsigned long start_addr, addr; - struct slice_mask mask; - int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); - -- /* check if free_area_cache is useful for us */ - if (use_cache) { - if (len <= mm->cached_hole_size) { -+ start_addr = addr = mm->mmap_base; - mm->cached_hole_size = 0; -- mm->free_area_cache = mm->mmap_base; -- } -- -- /* either no address requested or can't fit in requested -- * address hole -- */ -- addr = mm->free_area_cache; -- -- /* make sure it can fit in the remaining address space */ -- if (addr > len) { -- addr = _ALIGN_DOWN(addr - len, 1ul << pshift); -- mask = slice_range_to_mask(addr, len); -- if (slice_check_fit(mask, available) && -- slice_area_is_free(mm, addr, len)) -- /* remember the address as a hint for -- * next time -- */ -- return (mm->free_area_cache = addr); -- } -- } -+ } else -+ start_addr = addr = mm->free_area_cache; -+ } else -+ start_addr = addr = mm->mmap_base; - -- addr = mm->mmap_base; -+full_search: - while (addr > len) { -+ unsigned long guard; -+ - /* Go down by chunk size */ - addr = _ALIGN_DOWN(addr - len, 1ul << pshift); - -@@ -336,7 +341,15 @@ static unsigned long slice_find_area_top - * return with success: - */ - vma = find_vma(mm, addr); -- if (!vma || (addr + len) <= vma->vm_start) { -+ -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(mm->task_size - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) { -+got_it: - /* remember the address as a hint for next time */ - if (use_cache) - mm->free_area_cache = addr; -@@ -344,11 +357,16 @@ static unsigned long slice_find_area_top - } - - /* remember the largest hole we saw so far */ -- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) -- mm->cached_hole_size = vma->vm_start - addr; -+ if (use_cache && (addr + guard + mm->cached_hole_size) < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - (addr + guard); - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start; -+ addr = vma->vm_start - guard; -+ } -+ if (start_addr != mm->mmap_base) { -+ start_addr = addr = mm->mmap_base; -+ mm->cached_hole_size = 0; -+ goto full_search; - } - - /* ---- a/arch/x86/kernel/sys_x86_64.c -+++ b/arch/x86/kernel/sys_x86_64.c -@@ -93,6 +93,8 @@ arch_get_unmapped_area(struct file *filp - - full_search: - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { -+ unsigned long guard; -+ - /* At this point: (!vma || addr < vma->vm_end). */ - if (end - len < addr) { - /* -@@ -106,15 +108,22 @@ full_search: - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(end - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) { -+got_it: - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } -- if (addr + mm->cached_hole_size < vma->vm_start) -- mm->cached_hole_size = vma->vm_start - addr; -+ if (addr + guard + mm->cached_hole_size < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - (addr + guard); - - addr = vma->vm_end; - } -@@ -161,34 +170,51 @@ arch_get_unmapped_area_topdown(struct fi - - /* make sure it can fit in the remaining address space */ - if (addr > len) { -- vma = find_vma(mm, addr-len); -- if (!vma || addr <= vma->vm_start) -- /* remember the address as a hint for next time */ -- return mm->free_area_cache = addr-len; -+ unsigned long guard; -+ -+ addr -= len; -+ vma = find_vma(mm, addr); -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(TASK_SIZE - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) -+ goto got_it; - } - - if (mm->mmap_base < len) - goto bottomup; - - addr = mm->mmap_base-len; -- - do { -+ unsigned long guard; - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); -- if (!vma || addr+len <= vma->vm_start) -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(TASK_SIZE - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) { -+got_it: - /* remember the address as a hint for next time */ -- return mm->free_area_cache = addr; -+ mm->free_area_cache = addr; -+ return addr; -+ } - - /* remember the largest hole we saw so far */ -- if (addr + mm->cached_hole_size < vma->vm_start) -- mm->cached_hole_size = vma->vm_start - addr; -+ if (addr + guard + mm->cached_hole_size < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - (addr + guard); - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start-len; -+ addr = vma->vm_start - (len + guard); - } while (len < vma->vm_start); - - bottomup: ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -1324,6 +1324,7 @@ unsigned long ra_submit(struct file_ra_s - struct file *filp); - - /* Do stack extension */ -+extern int heap_stack_gap; - extern int expand_stack(struct vm_area_struct *vma, unsigned long address); - #ifdef CONFIG_IA64 - extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -1267,6 +1267,13 @@ static struct ctl_table vm_table[] = { - .mode = 0644, - .proc_handler = scan_unevictable_handler, - }, -+ { -+ .procname = "heap-stack-gap", -+ .data = &heap_stack_gap, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_dointvec, -+ }, - #ifdef CONFIG_MEMORY_FAILURE - { - .procname = "memory_failure_early_kill", ---- a/mm/mmap.c -+++ b/mm/mmap.c -@@ -86,6 +86,7 @@ int sysctl_overcommit_memory = OVERCOMMI - int sysctl_overcommit_ratio = 50; /* default is 50% */ - int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; - struct percpu_counter vm_committed_as; -+int heap_stack_gap __read_mostly = 1; - - /* - * Check that a process has enough memory to allocate a new virtual -@@ -1360,6 +1361,8 @@ arch_get_unmapped_area(struct file *filp - - full_search: - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { -+ unsigned long guard; -+ - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { - /* -@@ -1374,15 +1377,23 @@ full_search: - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(TASK_SIZE - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) { -+got_it: - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } -- if (addr + mm->cached_hole_size < vma->vm_start) -- mm->cached_hole_size = vma->vm_start - addr; -+ if (addr + guard + mm->cached_hole_size < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - (addr + guard); -+ - addr = vma->vm_end; - } - } -@@ -1440,34 +1451,51 @@ arch_get_unmapped_area_topdown(struct fi - - /* make sure it can fit in the remaining address space */ - if (addr > len) { -- vma = find_vma(mm, addr-len); -- if (!vma || addr <= vma->vm_start) -- /* remember the address as a hint for next time */ -- return (mm->free_area_cache = addr-len); -+ unsigned long guard; -+ -+ addr -= len; -+ vma = find_vma(mm, addr); -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(TASK_SIZE - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) -+ goto got_it; - } - - if (mm->mmap_base < len) - goto bottomup; - - addr = mm->mmap_base-len; -- - do { -+ unsigned long guard; - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); -- if (!vma || addr+len <= vma->vm_start) -+ if (!vma) -+ goto got_it; -+ guard = 0; -+ if (vma->vm_flags & VM_GROWSDOWN) -+ guard = min(TASK_SIZE - (addr + len), -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (addr + len + guard <= vma->vm_start) { -+got_it: - /* remember the address as a hint for next time */ -- return (mm->free_area_cache = addr); -+ mm->free_area_cache = addr; -+ return addr; -+ } - - /* remember the largest hole we saw so far */ -- if (addr + mm->cached_hole_size < vma->vm_start) -- mm->cached_hole_size = vma->vm_start - addr; -+ if (addr + guard + mm->cached_hole_size < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - (addr + guard); - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start-len; -+ addr = vma->vm_start - (len + guard); - } while (len < vma->vm_start); - - bottomup: -@@ -1699,6 +1727,19 @@ int expand_upwards(struct vm_area_struct - /* Somebody else might have raced and expanded it already */ - if (address > vma->vm_end) { - unsigned long size, grow; -+#ifdef CONFIG_STACK_GROWSUP -+ unsigned long guard; -+ struct vm_area_struct *vm_next; -+ -+ error = -ENOMEM; -+ guard = min(TASK_SIZE - address, -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ vm_next = find_vma(vma->vm_mm, address + guard); -+ if (unlikely(vm_next && vm_next != vma)) { -+ /* stack collision with another vma */ -+ goto out_unlock; -+ } -+#endif - - size = address - vma->vm_start; - grow = (address - vma->vm_end) >> PAGE_SHIFT; -@@ -1707,6 +1748,7 @@ int expand_upwards(struct vm_area_struct - if (!error) - vma->vm_end = address; - } -+out_unlock: __maybe_unused - anon_vma_unlock(vma); - return error; - } -@@ -1743,7 +1785,21 @@ static int expand_downwards(struct vm_ar - /* Somebody else might have raced and expanded it already */ - if (address < vma->vm_start) { - unsigned long size, grow; -+ struct vm_area_struct *prev_vma; -+ -+ find_vma_prev(vma->vm_mm, address, &prev_vma); - -+ error = -ENOMEM; -+ if (prev_vma) { -+ unsigned long guard; -+ -+ guard = min(TASK_SIZE - prev_vma->vm_end, -+ (unsigned long)heap_stack_gap << PAGE_SHIFT); -+ if (unlikely(prev_vma->vm_end + guard > address)) { -+ /* stack collision with another vma */ -+ goto out_unlock; -+ } -+ } - size = vma->vm_end - address; - grow = (vma->vm_start - address) >> PAGE_SHIFT; - -@@ -1753,6 +1809,7 @@ static int expand_downwards(struct vm_ar - vma->vm_pgoff -= grow; - } - } -+ out_unlock: - anon_vma_unlock(vma); - return error; - } diff --git a/patches.suse/slab-handle-memoryless-nodes-v2a.patch b/patches.suse/slab-handle-memoryless-nodes-v2a.patch index d392be7..7b363d6 100644 --- a/patches.suse/slab-handle-memoryless-nodes-v2a.patch +++ b/patches.suse/slab-handle-memoryless-nodes-v2a.patch @@ -115,12 +115,12 @@ Average of 40: 9.796 9.857 0.623 Signed-off-by: Lee Schermerhorn Acked-by: Nick Piggin - mm/slab.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++--------- - 1 file changed, 78 insertions(+), 13 deletions(-) + mm/slab.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++++++--------- + 1 file changed, 76 insertions(+), 12 deletions(-) --- a/mm/slab.c +++ b/mm/slab.c -@@ -305,7 +305,7 @@ struct kmem_list3 { +@@ -281,7 +281,7 @@ struct kmem_list3 { struct array_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ @@ -129,7 +129,7 @@ Acked-by: Nick Piggin /* * Need this for bootstrapping a per node allocator. -@@ -968,6 +968,11 @@ static int transfer_objects(struct array +@@ -944,6 +944,11 @@ static int transfer_objects(struct array #define drain_alien_cache(cachep, alien) do { } while (0) #define reap_alien(cachep, l3) do { } while (0) @@ -141,7 +141,7 @@ Acked-by: Nick Piggin static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { return (struct array_cache **)BAD_ALIEN_MAGIC; -@@ -999,6 +1004,64 @@ static inline void *____cache_alloc_node +@@ -975,6 +980,64 @@ static inline void *____cache_alloc_node static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); @@ -185,7 +185,7 @@ Acked-by: Nick Piggin +#define memoryless_node(L3L) ((L3L) & 1) +static inline int numa_slab_nid(struct kmem_cache *cachep, gfp_t flags) +{ -+ int node = numa_node_id(); ++ int node = numa_mem_id(); + + if (likely(cachep)){ + unsigned long l3l = (unsigned long)cachep->nodelists[node]; @@ -206,119 +206,102 @@ Acked-by: Nick Piggin static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { struct array_cache **ac_ptr; -@@ -1098,7 +1161,7 @@ static inline int cache_free_alien(struc +@@ -1074,7 +1137,7 @@ static inline int cache_free_alien(struc struct array_cache *alien = NULL; int node; -- node = numa_node_id(); +- node = numa_mem_id(); + node = numa_slab_nid(cachep, GFP_KERNEL); /* * Make sure we are not freeing a object from another node to the array -@@ -1443,7 +1506,7 @@ void __init kmem_cache_init(void) +@@ -1503,7 +1566,7 @@ void __init kmem_cache_init(void) * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ -- node = numa_node_id(); +- node = numa_mem_id(); + node = numa_slab_nid(NULL, GFP_KERNEL); /* 1) create the cache_cache */ INIT_LIST_HEAD(&cache_chain); -@@ -2079,7 +2142,7 @@ static int __init_refok setup_cpu_cache( +@@ -2147,7 +2210,7 @@ static int __init_refok setup_cpu_cache( } } } -- cachep->nodelists[numa_node_id()]->next_reap = +- cachep->nodelists[numa_mem_id()]->next_reap = + cachep->nodelists[numa_slab_nid(cachep, GFP_KERNEL)]->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; -@@ -2411,7 +2474,7 @@ static void check_spinlock_acquired(stru +@@ -2479,7 +2542,7 @@ static void check_spinlock_acquired(stru { #ifdef CONFIG_SMP check_irq_off(); -- assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); +- assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock); + assert_spin_locked(&cachep->nodelists[numa_slab_nid(cachep, GFP_KERNEL)]->list_lock); #endif } -@@ -2438,7 +2501,7 @@ static void do_drain(void *arg) +@@ -2506,7 +2569,7 @@ static void do_drain(void *arg) { struct kmem_cache *cachep = arg; struct array_cache *ac; -- int node = numa_node_id(); +- int node = numa_mem_id(); + int node = numa_slab_nid(cachep, GFP_KERNEL); check_irq_off(); ac = cpu_cache_get(cachep); -@@ -2975,7 +3038,7 @@ static void *cache_alloc_refill(struct k +@@ -3043,7 +3106,7 @@ static void *cache_alloc_refill(struct k retry: check_irq_off(); -- node = numa_node_id(); +- node = numa_mem_id(); + node = numa_slab_nid(cachep, flags); if (unlikely(must_refill)) goto force_grow; ac = cpu_cache_get(cachep); -@@ -3185,7 +3248,7 @@ static void *alternate_node_alloc(struct +@@ -3253,7 +3316,7 @@ static void *alternate_node_alloc(struct if (in_interrupt() || (flags & __GFP_THISNODE)) return NULL; -- nid_alloc = nid_here = numa_node_id(); +- nid_alloc = nid_here = numa_mem_id(); + nid_alloc = nid_here = numa_slab_nid(cachep, flags); + get_mems_allowed(); if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) - nid_alloc = cpuset_mem_spread_node(); - else if (current->mempolicy) -@@ -3360,6 +3423,7 @@ __cache_alloc_node(struct kmem_cache *ca + nid_alloc = cpuset_slab_spread_node(); +@@ -3432,7 +3495,7 @@ __cache_alloc_node(struct kmem_cache *ca { unsigned long save_flags; void *ptr; +- int slab_node = numa_mem_id(); + int slab_node = numa_slab_nid(cachep, flags); flags &= gfp_allowed_mask; -@@ -3372,7 +3436,7 @@ __cache_alloc_node(struct kmem_cache *ca - local_irq_save(save_flags); - - if (nodeid == -1) -- nodeid = numa_node_id(); -+ nodeid = slab_node; - - if (unlikely(!cachep->nodelists[nodeid])) { - /* Node not bootstrapped yet */ -@@ -3380,7 +3444,7 @@ __cache_alloc_node(struct kmem_cache *ca - goto out; - } - -- if (nodeid == numa_node_id()) { -+ if (nodeid == slab_node) { - /* - * Use the locally cached objects if possible. - * However ____cache_alloc does not allow fallback -@@ -3425,7 +3489,8 @@ __do_cache_alloc(struct kmem_cache *cach +@@ -3498,7 +3561,8 @@ __do_cache_alloc(struct kmem_cache *cach * ____cache_alloc_node() knows how to locate memory on other nodes */ - if (!objp) -- objp = ____cache_alloc_node(cache, flags, numa_node_id()); -+ objp = ____cache_alloc_node(cache, flags, + if (!objp) +- objp = ____cache_alloc_node(cache, flags, numa_mem_id()); ++ objp = ____cache_alloc_node(cache, flags, + numa_slab_nid(cache, flags)); out: return objp; -@@ -3522,7 +3587,7 @@ static void cache_flusharray(struct kmem +@@ -3595,7 +3659,7 @@ static void cache_flusharray(struct kmem { int batchcount; struct kmem_list3 *l3; -- int node = numa_node_id(); +- int node = numa_mem_id(); + int node = numa_slab_nid(cachep, GFP_KERNEL); batchcount = ac->batchcount; #if DEBUG -@@ -4172,7 +4237,7 @@ static void cache_reap(struct work_struc +@@ -4234,7 +4298,7 @@ static void cache_reap(struct work_struc { struct kmem_cache *searchp; struct kmem_list3 *l3; -- int node = numa_node_id(); +- int node = numa_mem_id(); + int node = numa_slab_nid(NULL, GFP_KERNEL); struct delayed_work *work = to_delayed_work(w); diff --git a/patches.suse/stack-unwind b/patches.suse/stack-unwind index 6364791..a91a3ea 100644 --- a/patches.suse/stack-unwind +++ b/patches.suse/stack-unwind @@ -7,33 +7,37 @@ This includes reverting f1883f86dea84fe47a71a39fc1afccc005915ed8. Update Jan 17 2009 jeffm: - Something in 2.6.29-rc1 tweaked the frame pointer code somehow, so I fixed that up. +Update Jul 02 2010 jbeulich: +- fix after upstream commit 9e565292270a2d55524be38835104c564ac8f795 --- Makefile | 5 arch/x86/Kconfig | 2 arch/x86/Makefile | 2 + arch/x86/include/asm/dwarf2.h | 3 + arch/x86/include/asm/stacktrace.h | 4 arch/x86/include/asm/system.h | 10 arch/x86/include/asm/unwind.h | 163 ++++ arch/x86/kernel/dumpstack.c | 89 ++ - arch/x86/kernel/dumpstack.h | 4 - arch/x86/kernel/dumpstack_32.c | 18 - arch/x86/kernel/dumpstack_64.c | 20 + arch/x86/kernel/dumpstack_32.c | 5 + arch/x86/kernel/dumpstack_64.c | 8 arch/x86/kernel/entry_32.S | 35 + arch/x86/kernel/entry_64.S | 34 + arch/x86/kernel/vmlinux.lds.S | 2 include/asm-generic/vmlinux.lds.h | 22 include/linux/module.h | 3 include/linux/unwind.h | 135 +++ init/main.c | 3 kernel/Makefile | 1 - kernel/module.c | 15 + kernel/module.c | 32 kernel/unwind.c | 1303 ++++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 18 - 19 files changed, 1866 insertions(+), 16 deletions(-) + 21 files changed, 1874 insertions(+), 5 deletions(-) --- a/Makefile +++ b/Makefile -@@ -570,6 +570,11 @@ else - KBUILD_CFLAGS += -fomit-frame-pointer +@@ -589,6 +589,11 @@ KBUILD_CFLAGS += -fomit-frame-pointer + endif endif +ifdef CONFIG_UNWIND_INFO @@ -46,7 +50,7 @@ Update Jan 17 2009 jeffm: KBUILD_AFLAGS += -gdwarf-2 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -487,7 +487,7 @@ config X86_ES7000 +@@ -496,7 +496,7 @@ config X86_32_IRIS config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" @@ -57,7 +61,7 @@ Update Jan 17 2009 jeffm: is disabled then wchan values will recurse back to the --- a/arch/x86/Makefile +++ b/arch/x86/Makefile -@@ -105,7 +105,9 @@ KBUILD_CFLAGS += -pipe +@@ -110,7 +110,9 @@ KBUILD_CFLAGS += -pipe # Workaround for a gcc prelease that unfortunately was shipped in a suse release KBUILD_CFLAGS += -Wno-sign-compare # @@ -67,6 +71,31 @@ Update Jan 17 2009 jeffm: # prevent gcc from generating any FP code by mistake KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) +--- a/arch/x86/include/asm/dwarf2.h ++++ b/arch/x86/include/asm/dwarf2.h +@@ -34,7 +34,8 @@ + #define CFI_SIGNAL_FRAME + #endif + +-#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__) ++#if !defined(CONFIG_UNWIND_INFO) && defined(CONFIG_AS_CFI_SECTIONS) \ ++ && defined(__ASSEMBLY__) + /* + * Emit CFI data in .debug_frame sections, not .eh_frame sections. + * The latter we currently just discard since we don't do DWARF +--- a/arch/x86/include/asm/stacktrace.h ++++ b/arch/x86/include/asm/stacktrace.h +@@ -92,6 +92,10 @@ extern void + show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, char *log_lvl); + ++int try_stack_unwind(struct task_struct *task, struct pt_regs *regs, ++ unsigned long **stack, unsigned long *bp, ++ const struct stacktrace_ops *ops, void *data); ++ + extern unsigned int code_bytes; + + /* The form of the top of the frame on the stack */ --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -123,12 +123,22 @@ do { \ @@ -260,15 +289,14 @@ Update Jan 17 2009 jeffm: +#endif /* _ASM_X86_UNWIND_H */ --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c -@@ -20,6 +20,7 @@ - #endif +@@ -17,12 +17,18 @@ + #include #include +#include - #include "dumpstack.h" -@@ -27,6 +28,11 @@ int panic_on_unrecovered_nmi; + int panic_on_unrecovered_nmi; int panic_on_io_nmi; unsigned int code_bytes = 64; int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; @@ -280,7 +308,7 @@ Update Jan 17 2009 jeffm: static int die_counter; void printk_address(unsigned long address, int reliable) -@@ -66,6 +72,71 @@ print_ftrace_graph_addr(unsigned long ad +@@ -62,6 +68,71 @@ print_ftrace_graph_addr(unsigned long ad { } #endif @@ -352,7 +380,7 @@ Update Jan 17 2009 jeffm: /* * x86-64 can have up to three kernel stacks: * process stack -@@ -396,3 +467,21 @@ static int __init code_bytes_setup(char +@@ -373,3 +444,21 @@ static int __init code_bytes_setup(char return 1; } __setup("code_bytes=", code_bytes_setup); @@ -374,50 +402,24 @@ Update Jan 17 2009 jeffm: +} +early_param("call_trace", call_trace_setup); +#endif ---- a/arch/x86/kernel/dumpstack.h -+++ b/arch/x86/kernel/dumpstack.h -@@ -22,6 +22,10 @@ extern void - show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *sp, unsigned long bp, char *log_lvl); - -+int try_stack_unwind(struct task_struct *task, struct pt_regs *regs, -+ unsigned long **stack, unsigned long *bp, -+ const struct stacktrace_ops *ops, void *data); -+ - extern unsigned int code_bytes; - - /* The form of the top of the frame on the stack */ --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c -@@ -28,14 +28,6 @@ void dump_trace(struct task_struct *task +@@ -27,6 +27,10 @@ void dump_trace(struct task_struct *task if (!task) task = current; -- if (!stack) { -- unsigned long dummy; -- -- stack = &dummy; -- if (task && task != current) -- stack = (unsigned long *)task->thread.sp; -- } -- - #ifdef CONFIG_FRAME_POINTER - if (!bp) { - if (task == current) { -@@ -48,6 +40,16 @@ void dump_trace(struct task_struct *task - } - #endif - ++ bp = stack_frame(task, regs); + if (try_stack_unwind(task, regs, &stack, &bp, ops, data)) + return; + -+ if (!stack) { -+ unsigned long dummy; -+ stack = &dummy; -+ if (task && task != current) -+ stack = (unsigned long *)task->thread.sp; -+ } -+ + if (!stack) { + unsigned long dummy; + +@@ -35,7 +39,6 @@ void dump_trace(struct task_struct *task + stack = (unsigned long *)task->thread.sp; + } + +- bp = stack_frame(task, regs); for (;;) { struct thread_info *context; @@ -430,43 +432,30 @@ Update Jan 17 2009 jeffm: +#include #include - #include "dumpstack.h" -@@ -154,13 +155,6 @@ void dump_trace(struct task_struct *task + +@@ -155,13 +156,18 @@ void dump_trace(struct task_struct *task if (!task) task = current; -- if (!stack) { -- unsigned long dummy; -- stack = &dummy; -- if (task && task != current) -- stack = (unsigned long *)task->thread.sp; -- } -- - #ifdef CONFIG_FRAME_POINTER - if (!bp) { - if (task == current) { -@@ -173,6 +167,18 @@ void dump_trace(struct task_struct *task - } - #endif - ++ bp = stack_frame(task, regs); + if (try_stack_unwind(task, regs, &stack, &bp, ops, data)) { + put_cpu(); + return; + } + -+ if (!stack) { -+ unsigned long dummy; -+ stack = &dummy; -+ if (task && task != current) -+ stack = (unsigned long *)task->thread.sp; -+ } -+ + if (!stack) { + stack = &dummy; + if (task && task != current) + stack = (unsigned long *)task->thread.sp; + } + +- bp = stack_frame(task, regs); /* * Print function call entries in all stacks, starting at the * current stack address. If the stacks consist of nested --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S -@@ -1064,6 +1064,41 @@ END(spurious_interrupt_bug) +@@ -1002,6 +1002,41 @@ END(spurious_interrupt_bug) */ .popsection @@ -510,7 +499,7 @@ Update Jan 17 2009 jeffm: CFI_STARTPROC --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S -@@ -1232,6 +1232,40 @@ ENTRY(call_softirq) +@@ -1212,6 +1212,40 @@ ENTRY(call_softirq) CFI_ENDPROC END(call_softirq) @@ -551,9 +540,21 @@ Update Jan 17 2009 jeffm: #ifdef CONFIG_XEN zeroentry xen_hypervisor_callback xen_do_hypervisor_callback +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -358,7 +358,9 @@ SECTIONS + + /* Sections to be discarded */ + DISCARDS ++#ifndef CONFIG_UNWIND_INFO + /DISCARD/ : { *(.eh_frame) } ++#endif + } + + --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h -@@ -341,6 +341,8 @@ +@@ -359,6 +359,8 @@ MEM_KEEP(exit.rodata) \ } \ \ @@ -562,7 +563,7 @@ Update Jan 17 2009 jeffm: /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___param) = .; \ -@@ -758,3 +760,23 @@ +@@ -798,3 +800,23 @@ BSS(bss_align) \ . = ALIGN(stop_align); \ VMLINUX_SYMBOL(__bss_stop) = .; @@ -588,9 +589,9 @@ Update Jan 17 2009 jeffm: +#endif --- a/include/linux/module.h +++ b/include/linux/module.h -@@ -301,6 +301,9 @@ struct module - /* The size of the executable code in each section. */ - unsigned int init_text_size, core_text_size; +@@ -338,6 +338,9 @@ struct module + /* Size of RO sections of the module (text+rodata) */ + unsigned int init_ro_size, core_ro_size; + /* The handle returned from unwind_add_table. */ + void *unwind_info; @@ -738,7 +739,7 @@ Update Jan 17 2009 jeffm: +#endif /* _LINUX_UNWIND_H */ --- a/init/main.c +++ b/init/main.c -@@ -50,6 +50,7 @@ +@@ -48,6 +48,7 @@ #include #include #include @@ -746,7 +747,7 @@ Update Jan 17 2009 jeffm: #include #include #include -@@ -560,6 +561,7 @@ asmlinkage void __init start_kernel(void +@@ -552,6 +553,7 @@ asmlinkage void __init start_kernel(void * Need to run as early as possible, to initialize the * lockdep hash: */ @@ -754,7 +755,7 @@ Update Jan 17 2009 jeffm: lockdep_init(); debug_objects_early_init(); -@@ -586,6 +588,7 @@ asmlinkage void __init start_kernel(void +@@ -576,6 +578,7 @@ asmlinkage void __init start_kernel(void setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); setup_command_line(command_line); @@ -782,52 +783,72 @@ Update Jan 17 2009 jeffm: #include #include #include -@@ -1501,6 +1502,8 @@ static void free_module(struct module *m - remove_sect_attrs(mod); - mod_kobject_remove(mod); +@@ -158,7 +159,7 @@ struct load_info { + struct _ddebug *debug; + unsigned int num_debug; + struct { +- unsigned int sym, str, mod, vers, info, pcpu; ++ unsigned int sym, str, mod, vers, info, pcpu, unwind; + } index; + }; + +@@ -532,6 +533,27 @@ bool is_module_percpu_address(unsigned l + + #endif /* CONFIG_SMP */ + ++static unsigned int find_unwind(struct load_info *info) ++{ ++ int section = 0; ++#ifdef ARCH_UNWIND_SECTION_NAME ++ section = find_sec(info, ARCH_UNWIND_SECTION_NAME); ++ if (section) ++ info->sechdrs[section].sh_flags |= SHF_ALLOC; ++#endif ++ return section; ++} ++ ++static void add_unwind_table(struct module *mod, struct load_info *info) ++{ ++ int index = info->index.unwind; ++ ++ /* Size of section 0 is 0, so this is ok if there is no unwind info. */ ++ mod->unwind_info = unwind_add_table(mod, ++ (void *)info->sechdrs[index].sh_addr, ++ info->sechdrs[index].sh_size); ++} ++ + #define MODINFO_ATTR(field) \ + static void setup_modinfo_##field(struct module *mod, const char *s) \ + { \ +@@ -1759,6 +1781,8 @@ static void free_module(struct module *m + /* Remove dynamic debug info */ + ddebug_remove_module(mod->name); + unwind_remove_table(mod->unwind_info, 0); + /* Arch-specific cleanup. */ module_arch_cleanup(mod); -@@ -2064,6 +2067,7 @@ static noinline struct module *load_modu - unsigned int symindex = 0; - unsigned int strindex = 0; - unsigned int modindex, versindex, infoindex, pcpuindex; -+ unsigned int unwindex = 0; - struct module *mod; - long err = 0; - void *ptr = NULL; /* Stops spurious gcc warning */ -@@ -2146,10 +2150,15 @@ static noinline struct module *load_modu - versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); - infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); - pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); -+#ifdef ARCH_UNWIND_SECTION_NAME -+ unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME); -+#endif +@@ -2464,6 +2488,8 @@ static struct module *setup_load_info(st - /* Don't keep modinfo and version sections. */ - sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; - sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC; -+ if (unwindex) -+ sechdrs[unwindex].sh_flags |= SHF_ALLOC; + info->index.pcpu = find_pcpusec(info); ++ info->index.unwind = find_unwind(info); ++ /* Check module struct version now, before we try to use module. */ - if (!check_modstruct_version(sechdrs, versindex, mod)) { -@@ -2511,6 +2520,11 @@ static noinline struct module *load_modu - } - #endif + if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) + return ERR_PTR(-ENOEXEC); +@@ -2885,6 +2911,9 @@ static struct module *load_module(void _ + if (err < 0) + goto unlink; -+ /* Size of section 0 is 0, so this works well if no unwind info. */ -+ mod->unwind_info = unwind_add_table(mod, -+ (void *)sechdrs[unwindex].sh_addr, -+ sechdrs[unwindex].sh_size); ++ /* Initialize unwind table */ ++ add_unwind_table(mod, &info); + - /* Get rid of temporary copy */ - vfree(hdr); - -@@ -2632,6 +2646,7 @@ SYSCALL_DEFINE3(init_module, void __user + /* Get rid of temporary copy and strmap. */ + kfree(info.strmap); + free_copy(&info); +@@ -2999,6 +3028,7 @@ SYSCALL_DEFINE3(init_module, void __user /* Drop initial reference. */ module_put(mod); trim_init_extable(mod); @@ -837,7 +858,7 @@ Update Jan 17 2009 jeffm: mod->symtab = mod->core_symtab; --- /dev/null +++ b/kernel/unwind.c -@@ -0,0 +1,1303 @@ +@@ -0,0 +1,1305 @@ +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich @@ -1889,21 +1910,21 @@ Update Jan 17 2009 jeffm: + if (!fde) + dprintk(1, "FDE validation failed (%p,%p).", ptr, end); + } -+#ifdef CONFIG_FRAME_POINTER + if (cie == NULL || fde == NULL) { ++#ifdef CONFIG_FRAME_POINTER + unsigned long top = TSK_STACK_TOP(frame->task); + unsigned long bottom = STACK_BOTTOM(frame->task); + unsigned long fp = UNW_FP(frame); + unsigned long sp = UNW_SP(frame); + unsigned long link; + -+ if ((sp | fp) & sizeof(unsigned long)) ++ if ((sp | fp) & (sizeof(unsigned long) - 1)) + return -EPERM; + +# if FRAME_RETADDR_OFFSET < 0 + if (!(sp < top && fp <= sp && bottom < fp)) +# else -+ if (!(sp < top && fp >= sp && bottom < fp)) ++ if (!(sp > top && fp >= sp && bottom > fp)) +# endif + return -ENXIO; + @@ -1913,11 +1934,11 @@ Update Jan 17 2009 jeffm: +# if FRAME_RETADDR_OFFSET < 0 + if (!(link > bottom && link < fp)) +# else -+ if (!(link > bottom && link > fp)) ++ if (!(link < bottom && link > fp)) +# endif + return -ENXIO; + -+ if (link & (sizeof(unsigned long) - 1)) ++ if (link & (sizeof(link) - 1)) + return -ENXIO; + + fp += FRAME_RETADDR_OFFSET; @@ -1932,8 +1953,10 @@ Update Jan 17 2009 jeffm: +# endif + UNW_FP(frame) = link; + return 0; -+ } ++#else ++ return -ENXIO; +#endif ++ } + state.org = startLoc; + memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); + /* process instructions */ @@ -2143,7 +2166,7 @@ Update Jan 17 2009 jeffm: +EXPORT_SYMBOL_GPL(unwind_to_user); --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -742,6 +742,24 @@ config FRAME_POINTER +@@ -828,6 +828,24 @@ config FRAME_POINTER larger and slower, but it gives very useful debugging information in case of kernel bugs. (precise oopses/stacktraces/warnings) diff --git a/patches.suse/supported-flag b/patches.suse/supported-flag index 3e0f664..9a922e8 100644 --- a/patches.suse/supported-flag +++ b/patches.suse/supported-flag @@ -14,35 +14,36 @@ Signed-off-by: Andreas Gruenbacher --- - Documentation/kernel-parameters.txt | 6 +++ + Documentation/kernel-parameters.txt | 5 ++ Documentation/sysctl/kernel.txt | 12 ++++++ Makefile | 5 ++ - include/linux/kernel.h | 8 ++++ - kernel/module.c | 42 ++++++++++++++++++++++- - kernel/panic.c | 4 ++ + include/linux/kernel.h | 9 ++++ + include/linux/module.h | 1 + kernel/ksysfs.c | 23 ++++++++++++ + kernel/module.c | 65 ++++++++++++++++++++++++++++++++++++ + kernel/panic.c | 9 ++++ kernel/sysctl.c | 9 ++++ scripts/Makefile.modpost | 4 +- scripts/mod/modpost.c | 65 +++++++++++++++++++++++++++++++++++- - 9 files changed, 152 insertions(+), 3 deletions(-) + 11 files changed, 205 insertions(+), 2 deletions(-) --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt -@@ -2549,6 +2549,12 @@ and is between 256 and 4096 characters. - improve throughput, but will also increase the - amount of memory reserved for use by the client. +@@ -2504,6 +2504,11 @@ and is between 256 and 4096 characters. + [X86] + Set unknown_nmi_panic=1 early on boot. + unsupported Allow loading of unsupported kernel modules: + 0 = only allow supported modules, + 1 = warn when loading unsupported modules, + 2 = don't warn. + -+ - swiotlb= [IA-64] Number of I/O TLB slabs - - switches= [HW,M68k] + usbcore.autosuspend= + [USB] The autosuspend time delay (in seconds) used + for newly-detected USB devices (default 2). This --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt -@@ -477,6 +477,18 @@ can be ORed together: +@@ -491,6 +491,18 @@ can be ORed together: instead of using the one provided by the hardware. 512 - A kernel warning has occurred. 1024 - A module from drivers/staging was loaded. @@ -63,9 +64,9 @@ Signed-off-by: Andreas Gruenbacher --- a/Makefile +++ b/Makefile -@@ -353,6 +353,11 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstric - -fno-delete-null-pointer-checks - KBUILD_AFLAGS := -D__ASSEMBLY__ +@@ -362,6 +362,11 @@ KBUILD_AFLAGS_MODULE := -DMODULE + KBUILD_CFLAGS_MODULE := -DMODULE + KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds +# Warn about unsupported modules in kernels built inside Autobuild +ifneq ($(wildcard /.buildenv),) @@ -77,17 +78,20 @@ Signed-off-by: Andreas Gruenbacher KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) --- a/include/linux/kernel.h +++ b/include/linux/kernel.h -@@ -317,6 +317,7 @@ extern int panic_timeout; +@@ -222,8 +222,10 @@ extern int panic_timeout; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; +extern int unsupported; extern const char *print_tainted(void); extern void add_taint(unsigned flag); ++extern void add_nonfatal_taint(unsigned flag); extern int test_taint(unsigned flag); -@@ -345,6 +346,13 @@ extern enum system_states { - #define TAINT_WARN 9 + extern unsigned long get_taint(void); + extern int root_mountflags; +@@ -251,6 +253,13 @@ extern enum system_states { #define TAINT_CRAP 10 + #define TAINT_FIRMWARE_WORKAROUND 11 +/* + * Take the upper bits to hopefully allow them @@ -96,12 +100,61 @@ Signed-off-by: Andreas Gruenbacher +#define TAINT_NO_SUPPORT 30 +#define TAINT_EXTERNAL_SUPPORT 31 + - extern void dump_stack(void) __cold; + extern const char hex_asc[]; + #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] + #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -410,6 +410,7 @@ struct module *__module_address(unsigned + bool is_module_address(unsigned long addr); + bool is_module_percpu_address(unsigned long addr); + bool is_module_text_address(unsigned long addr); ++const char *supported_printable(int taint); + + static inline int within_module_core(unsigned long addr, struct module *mod) + { +--- a/kernel/ksysfs.c ++++ b/kernel/ksysfs.c +@@ -157,6 +157,28 @@ static struct bin_attribute notes_attr = + struct kobject *kernel_kobj; + EXPORT_SYMBOL_GPL(kernel_kobj); + ++const char *supported_printable(int taint) ++{ ++ int mask = TAINT_PROPRIETARY_MODULE|TAINT_NO_SUPPORT; ++ if ((taint & mask) == mask) ++ return "No, Proprietary and Unsupported modules are loaded"; ++ else if (taint & TAINT_PROPRIETARY_MODULE) ++ return "No, Proprietary modules are loaded"; ++ else if (taint & TAINT_NO_SUPPORT) ++ return "No, Unsupported modules are loaded"; ++ else if (taint & TAINT_EXTERNAL_SUPPORT) ++ return "Yes, External"; ++ else ++ return "Yes"; ++} ++ ++static ssize_t supported_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%s\n", supported_printable(get_taint())); ++} ++KERNEL_ATTR_RO(supported); ++ + static struct attribute * kernel_attrs[] = { + #if defined(CONFIG_HOTPLUG) + &uevent_seqnum_attr.attr, +@@ -171,6 +193,7 @@ static struct attribute * kernel_attrs[] + &kexec_crash_size_attr.attr, + &vmcoreinfo_attr.attr, + #endif ++ &supported_attr.attr, + NULL + }; - enum { --- a/kernel/module.c +++ b/kernel/module.c -@@ -74,6 +74,20 @@ EXPORT_TRACEPOINT_SYMBOL(module_get); +@@ -73,6 +73,20 @@ /* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) @@ -119,30 +172,54 @@ Signed-off-by: Andreas Gruenbacher +} +__setup("unsupported=", unsupported_setup); + - /* List of modules, protected by module_mutex or preempt_disable - * (delete uses stop_machine/add uses RCU list operations). */ - DEFINE_MUTEX(module_mutex); -@@ -1947,7 +1961,7 @@ static noinline struct module *load_modu - Elf_Ehdr *hdr; - Elf_Shdr *sechdrs; - char *secstrings, *args, *modmagic, *strtab = NULL; -- char *staging; -+ char *staging, *supported; - unsigned int i; - unsigned int symindex = 0; - unsigned int strindex = 0; -@@ -2066,6 +2080,28 @@ static noinline struct module *load_modu - mod->name); - } + /* + * Mutex protects: + * 1) List of modules (also safely readable with preempt_disable), +@@ -931,10 +945,36 @@ static struct module_attribute initstate + .show = show_initstate, + }; -+ supported = get_modinfo(sechdrs, infoindex, "supported"); -+ if (supported) { -+ if (!strcmp(supported, "external")) -+ add_taint_module(mod, TAINT_EXTERNAL_SUPPORT); -+ else if (strcmp(supported, "yes")) -+ supported = NULL; ++static void setup_modinfo_supported(struct module *mod, const char *s) ++{ ++ if (!s) { ++ mod->taints |= (1 << TAINT_NO_SUPPORT); ++ return; + } -+ if (!supported) { ++ ++ if (strcmp(s, "external") == 0) ++ mod->taints |= (1 << TAINT_EXTERNAL_SUPPORT); ++ else if (strcmp(s, "yes")) ++ mod->taints |= (1 << TAINT_NO_SUPPORT); ++} ++ ++static ssize_t show_modinfo_supported(struct module_attribute *mattr, ++ struct module *mod, char *buffer) ++{ ++ return sprintf(buffer, "%s\n", supported_printable(mod->taints)); ++} ++ ++static struct module_attribute modinfo_supported = { ++ .attr = { .name = "supported", .mode = 0444 }, ++ .show = show_modinfo_supported, ++ .setup = setup_modinfo_supported, ++}; ++ + static struct module_attribute *modinfo_attrs[] = { + &modinfo_version, + &modinfo_srcversion, + &initstate, ++ &modinfo_supported, + #ifdef CONFIG_MODULE_UNLOAD + &refcnt, + #endif +@@ -1476,6 +1516,26 @@ static int mod_sysfs_setup(struct module + add_sect_attrs(mod, info); + add_notes_attrs(mod, info); + ++ /* We don't use add_taint() here because it also disables lockdep. */ ++ if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT)) ++ add_nonfatal_taint(TAINT_EXTERNAL_SUPPORT); ++ else if (mod->taints == (1 << TAINT_NO_SUPPORT)) { + if (unsupported == 0) { + printk(KERN_WARNING "%s: module not supported by " + "Novell, refusing to load. To override, echo " @@ -150,17 +227,19 @@ Signed-off-by: Andreas Gruenbacher + err = -ENOEXEC; + goto free_hdr; + } -+ add_taint_module(mod, TAINT_NO_SUPPORT); ++ add_nonfatal_taint(TAINT_NO_SUPPORT); + if (unsupported == 1) { -+ printk(KERN_WARNING "%s: module not supported by " -+ "Novell, setting U taint flag.\n", mod->name); ++ printk(KERN_WARNING "%s: module is not supported by " ++ "Novell. Novell Technical Services may decline " ++ "your support request if it involves a kernel " ++ "fault.\n", mod->name); + } + } + - /* Now copy in args */ - args = strndup_user(uargs, ~0UL >> 1); - if (IS_ERR(args)) { -@@ -2748,6 +2784,10 @@ static char *module_flags(struct module + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + return 0; + +@@ -3005,6 +3065,10 @@ static char *module_flags(struct module buf[bx++] = 'F'; if (mod->taints & (1 << TAINT_CRAP)) buf[bx++] = 'C'; @@ -171,29 +250,49 @@ Signed-off-by: Andreas Gruenbacher /* * TAINT_FORCED_RMMOD: could be added. * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't +@@ -3217,6 +3281,7 @@ void print_modules(void) + if (last_unloaded_module[0]) + printk(" [last unloaded: %s]", last_unloaded_module); + printk("\n"); ++ printk("Supported: %s\n", supported_printable(get_taint())); + } + + #ifdef CONFIG_MODVERSIONS --- a/kernel/panic.c +++ b/kernel/panic.c -@@ -178,6 +178,8 @@ static const struct tnt tnts[] = { - { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, +@@ -174,6 +174,8 @@ static const struct tnt tnts[] = { { TAINT_WARN, 'W', ' ' }, { TAINT_CRAP, 'C', ' ' }, + { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, + { TAINT_NO_SUPPORT, 'N', ' ' }, + { TAINT_EXTERNAL_SUPPORT, 'X', ' ' }, }; /** -@@ -194,6 +196,8 @@ static const struct tnt tnts[] = { - * 'A' - ACPI table overridden. +@@ -191,6 +193,8 @@ static const struct tnt tnts[] = { * 'W' - Taint on warning. * 'C' - modules from drivers/staging are loaded. + * 'I' - Working around severe firmware bug. + * 'N' - Unsuported modules loaded. + * 'X' - Modules with external support loaded. * * The string is overwritten by the next call to print_tainted(). */ +@@ -226,6 +230,11 @@ unsigned long get_taint(void) + return tainted_mask; + } + ++void add_nonfatal_taint(unsigned flag) ++{ ++ set_bit(flag, &tainted_mask); ++} ++ + void add_taint(unsigned flag) + { + /* --- a/kernel/sysctl.c +++ b/kernel/sysctl.c -@@ -636,6 +636,15 @@ static struct ctl_table kern_table[] = { +@@ -664,6 +664,15 @@ static struct ctl_table kern_table[] = { .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, @@ -224,7 +323,7 @@ Signed-off-by: Andreas Gruenbacher cmd_modpost = $(modpost) -s --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c -@@ -1516,6 +1516,48 @@ static void check_sec_ref(struct module +@@ -1639,6 +1639,48 @@ static void check_sec_ref(struct module } } @@ -273,7 +372,7 @@ Signed-off-by: Andreas Gruenbacher static void read_symbols(char *modname) { const char *symname; -@@ -1703,6 +1745,13 @@ static void add_staging_flag(struct buff +@@ -1826,6 +1868,13 @@ static void add_staging_flag(struct buff buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n"); } @@ -287,7 +386,7 @@ Signed-off-by: Andreas Gruenbacher /** * Record CRCs for unresolved symbols **/ -@@ -1843,6 +1892,13 @@ static void write_if_changed(struct buff +@@ -1966,6 +2015,13 @@ static void write_if_changed(struct buff fclose(file); } @@ -301,7 +400,7 @@ Signed-off-by: Andreas Gruenbacher /* parse Module.symvers file. line format: * 0x12345678symbolmodule[[export]something] **/ -@@ -1936,12 +1992,13 @@ int main(int argc, char **argv) +@@ -2059,12 +2115,13 @@ int main(int argc, char **argv) struct buffer buf = { }; char *kernel_read = NULL, *module_read = NULL; char *dump_write = NULL; @@ -316,7 +415,7 @@ Signed-off-by: Andreas Gruenbacher switch (opt) { case 'i': kernel_read = optarg; -@@ -1979,11 +2036,16 @@ int main(int argc, char **argv) +@@ -2102,11 +2159,16 @@ int main(int argc, char **argv) case 'w': warn_unresolved = 1; break; @@ -333,7 +432,7 @@ Signed-off-by: Andreas Gruenbacher if (kernel_read) read_dump(kernel_read, 1); if (module_read) -@@ -2016,6 +2078,7 @@ int main(int argc, char **argv) +@@ -2139,6 +2201,7 @@ int main(int argc, char **argv) add_header(&buf, mod); add_staging_flag(&buf, mod->name); diff --git a/patches.suse/supported-flag-enterprise b/patches.suse/supported-flag-enterprise index 75af4ad..6e32c32 100644 --- a/patches.suse/supported-flag-enterprise +++ b/patches.suse/supported-flag-enterprise @@ -13,32 +13,33 @@ Patch-mainline: Never, SLES feature Signed-off-by: Jeff Mahoney --- - Documentation/kernel-parameters.txt | 2 ++ + Documentation/kernel-parameters.txt | 3 +++ include/linux/kernel.h | 2 ++ init/Kconfig | 18 ++++++++++++++++++ kernel/ksysfs.c | 4 ++++ - kernel/module.c | 12 ++++++++++++ + kernel/module.c | 19 ++++++++++++++++++- kernel/panic.c | 2 ++ kernel/sysctl.c | 2 +- scripts/Makefile.modpost | 5 +++-- - 8 files changed, 44 insertions(+), 3 deletions(-) + 8 files changed, 51 insertions(+), 4 deletions(-) --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt -@@ -2554,6 +2554,8 @@ and is between 256 and 4096 characters. +@@ -2509,6 +2509,9 @@ and is between 256 and 4096 characters. 1 = warn when loading unsupported modules, 2 = don't warn. + CONFIG_ENTERPRISE_SUPPORT must be enabled for this + to have any effect. - - swiotlb= [IA-64] Number of I/O TLB slabs - ++ + usbcore.autosuspend= + [USB] The autosuspend time delay (in seconds) used + for newly-detected USB devices (default 2). This --- a/include/linux/kernel.h +++ b/include/linux/kernel.h -@@ -347,12 +347,14 @@ extern enum system_states { - #define TAINT_WARN 9 +@@ -253,12 +253,14 @@ extern enum system_states { #define TAINT_CRAP 10 + #define TAINT_FIRMWARE_WORKAROUND 11 +#ifdef CONFIG_ENTERPRISE_SUPPORT /* @@ -49,18 +50,17 @@ Signed-off-by: Jeff Mahoney #define TAINT_EXTERNAL_SUPPORT 31 +#endif - extern void dump_stack(void) __cold; - + extern const char hex_asc[]; + #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] --- a/init/Kconfig +++ b/init/Kconfig -@@ -2,6 +2,24 @@ config SUSE_KERNEL - bool - default y +@@ -1,6 +1,23 @@ + config SUSE_KERNEL + def_bool y +config ENTERPRISE_SUPPORT + bool "Enable enterprise support facility" + depends on SUSE_KERNEL -+ default n + help + This feature enables the handling of the "supported" module flag. + This flag can be used to report unsupported module loads or even @@ -80,7 +80,7 @@ Signed-off-by: Jeff Mahoney depends on SUSE_KERNEL && MODULES --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c -@@ -156,6 +156,7 @@ static struct bin_attribute notes_attr = +@@ -157,6 +157,7 @@ static struct bin_attribute notes_attr = struct kobject *kernel_kobj; EXPORT_SYMBOL_GPL(kernel_kobj); @@ -88,7 +88,7 @@ Signed-off-by: Jeff Mahoney const char *supported_printable(int taint) { int mask = TAINT_PROPRIETARY_MODULE|TAINT_NO_SUPPORT; -@@ -177,6 +178,7 @@ static ssize_t supported_show(struct kob +@@ -178,6 +179,7 @@ static ssize_t supported_show(struct kob return sprintf(buf, "%s\n", supported_printable(get_taint())); } KERNEL_ATTR_RO(supported); @@ -96,7 +96,7 @@ Signed-off-by: Jeff Mahoney static struct attribute * kernel_attrs[] = { #if defined(CONFIG_HOTPLUG) -@@ -192,7 +194,9 @@ static struct attribute * kernel_attrs[] +@@ -193,7 +195,9 @@ static struct attribute * kernel_attrs[] &kexec_crash_size_attr.attr, &vmcoreinfo_attr.attr, #endif @@ -108,7 +108,7 @@ Signed-off-by: Jeff Mahoney --- a/kernel/module.c +++ b/kernel/module.c -@@ -74,6 +74,7 @@ EXPORT_TRACEPOINT_SYMBOL(module_get); +@@ -73,6 +73,7 @@ /* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) @@ -116,15 +116,15 @@ Signed-off-by: Jeff Mahoney /* Allow unsupported modules switch. */ #ifdef UNSUPPORTED_MODULES int unsupported = UNSUPPORTED_MODULES; -@@ -87,6 +88,7 @@ static int __init unsupported_setup(char +@@ -86,6 +87,7 @@ static int __init unsupported_setup(char return 1; } __setup("unsupported=", unsupported_setup); +#endif - /* List of modules, protected by module_mutex or preempt_disable - * (delete uses stop_machine/add uses RCU list operations). */ -@@ -870,6 +872,7 @@ static struct module_attribute initstate + /* + * Mutex protects: +@@ -945,6 +947,7 @@ static struct module_attribute initstate .show = show_initstate, }; @@ -132,7 +132,7 @@ Signed-off-by: Jeff Mahoney static void setup_modinfo_supported(struct module *mod, const char *s) { if (!s) { -@@ -894,12 +897,15 @@ static struct module_attribute modinfo_s +@@ -969,12 +972,15 @@ static struct module_attribute modinfo_s .show = show_modinfo_supported, .setup = setup_modinfo_supported, }; @@ -148,23 +148,41 @@ Signed-off-by: Jeff Mahoney #ifdef CONFIG_MODULE_UNLOAD &refcnt, #endif -@@ -2421,6 +2427,7 @@ static noinline struct module *load_modu - add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); - add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs); +@@ -1516,6 +1522,7 @@ static int mod_sysfs_setup(struct module + add_sect_attrs(mod, info); + add_notes_attrs(mod, info); +#ifdef CONFIG_ENTERPRISE_SUPPORT /* We don't use add_taint() here because it also disables lockdep. */ if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT)) add_nonfatal_taint(TAINT_EXTERNAL_SUPPORT); -@@ -2440,6 +2447,7 @@ static noinline struct module *load_modu +@@ -1525,7 +1532,7 @@ static int mod_sysfs_setup(struct module + "Novell, refusing to load. To override, echo " + "1 > /proc/sys/kernel/unsupported\n", mod->name); + err = -ENOEXEC; +- goto free_hdr; ++ goto out_remove_attrs; + } + add_nonfatal_taint(TAINT_NO_SUPPORT); + if (unsupported == 1) { +@@ -1535,10 +1542,16 @@ static int mod_sysfs_setup(struct module "fault.\n", mod->name); } } +#endif - /* Get rid of temporary copy */ - vfree(hdr); -@@ -2808,10 +2816,12 @@ static char *module_flags(struct module + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + return 0; + ++out_remove_attrs: ++ remove_notes_attrs(mod); ++ remove_sect_attrs(mod); ++ del_usage_links(mod); ++ module_remove_modinfo_attrs(mod); + out_unreg_param: + module_param_sysfs_remove(mod); + out_unreg_holders: +@@ -3065,10 +3078,12 @@ static char *module_flags(struct module buf[bx++] = 'F'; if (mod->taints & (1 << TAINT_CRAP)) buf[bx++] = 'C'; @@ -177,7 +195,7 @@ Signed-off-by: Jeff Mahoney /* * TAINT_FORCED_RMMOD: could be added. * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't -@@ -3024,7 +3034,9 @@ void print_modules(void) +@@ -3281,7 +3296,9 @@ void print_modules(void) if (last_unloaded_module[0]) printk(" [last unloaded: %s]", last_unloaded_module); printk("\n"); @@ -189,10 +207,10 @@ Signed-off-by: Jeff Mahoney #ifdef CONFIG_MODVERSIONS --- a/kernel/panic.c +++ b/kernel/panic.c -@@ -178,8 +178,10 @@ static const struct tnt tnts[] = { - { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, +@@ -174,8 +174,10 @@ static const struct tnt tnts[] = { { TAINT_WARN, 'W', ' ' }, { TAINT_CRAP, 'C', ' ' }, + { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, +#ifdef CONFIG_ENTERPRISE_SUPPORT { TAINT_NO_SUPPORT, 'N', ' ' }, { TAINT_EXTERNAL_SUPPORT, 'X', ' ' }, @@ -202,7 +220,7 @@ Signed-off-by: Jeff Mahoney /** --- a/kernel/sysctl.c +++ b/kernel/sysctl.c -@@ -636,7 +636,7 @@ static struct ctl_table kern_table[] = { +@@ -664,7 +664,7 @@ static struct ctl_table kern_table[] = { .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, diff --git a/patches.suse/supported-flag-sysfs b/patches.suse/supported-flag-sysfs deleted file mode 100644 index 37b3c13..0000000 --- a/patches.suse/supported-flag-sysfs +++ /dev/null @@ -1,210 +0,0 @@ -From: Jeff Mahoney -Subject: Export supported status via sysfs -Patch-mainline: Never, SLES feature - - This patch adds a /sys/kernel/supported file indicating the supportability - status of the entire kernel. - - It also adds a /sys/module//supported file indicating the - supportability status of individual modules. - - This is useful because it can be used to obtain the supported status - of a running system without current modules (ie: immediately after - a kernel update but before a reboot) and without generating an oops. - -Signed-off-by: Jeff Mahoney - ---- - - include/linux/kernel.h | 1 - include/linux/module.h | 1 - kernel/ksysfs.c | 23 +++++++++++++++ - kernel/module.c | 71 +++++++++++++++++++++++++++++++++---------------- - kernel/panic.c | 5 +++ - 5 files changed, 78 insertions(+), 23 deletions(-) - ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -320,6 +320,7 @@ extern int panic_on_io_nmi; - extern int unsupported; - extern const char *print_tainted(void); - extern void add_taint(unsigned flag); -+extern void add_nonfatal_taint(unsigned flag); - extern int test_taint(unsigned flag); - extern unsigned long get_taint(void); - extern int root_mountflags; ---- a/include/linux/module.h -+++ b/include/linux/module.h -@@ -398,6 +398,7 @@ struct module *__module_address(unsigned - bool is_module_address(unsigned long addr); - bool is_module_percpu_address(unsigned long addr); - bool is_module_text_address(unsigned long addr); -+const char *supported_printable(int taint); - - static inline int within_module_core(unsigned long addr, struct module *mod) - { ---- a/kernel/ksysfs.c -+++ b/kernel/ksysfs.c -@@ -156,6 +156,28 @@ static struct bin_attribute notes_attr = - struct kobject *kernel_kobj; - EXPORT_SYMBOL_GPL(kernel_kobj); - -+const char *supported_printable(int taint) -+{ -+ int mask = TAINT_PROPRIETARY_MODULE|TAINT_NO_SUPPORT; -+ if ((taint & mask) == mask) -+ return "No, Proprietary and Unsupported modules are loaded"; -+ else if (taint & TAINT_PROPRIETARY_MODULE) -+ return "No, Proprietary modules are loaded"; -+ else if (taint & TAINT_NO_SUPPORT) -+ return "No, Unsupported modules are loaded"; -+ else if (taint & TAINT_EXTERNAL_SUPPORT) -+ return "Yes, External"; -+ else -+ return "Yes"; -+} -+ -+static ssize_t supported_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%s\n", supported_printable(get_taint())); -+} -+KERNEL_ATTR_RO(supported); -+ - static struct attribute * kernel_attrs[] = { - #if defined(CONFIG_HOTPLUG) - &uevent_seqnum_attr.attr, -@@ -170,6 +192,7 @@ static struct attribute * kernel_attrs[] - &kexec_crash_size_attr.attr, - &vmcoreinfo_attr.attr, - #endif -+ &supported_attr.attr, - NULL - }; - ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -937,10 +937,36 @@ static struct module_attribute initstate - .show = show_initstate, - }; - -+static void setup_modinfo_supported(struct module *mod, const char *s) -+{ -+ if (!s) { -+ mod->taints |= (1 << TAINT_NO_SUPPORT); -+ return; -+ } -+ -+ if (strcmp(s, "external") == 0) -+ mod->taints |= (1 << TAINT_EXTERNAL_SUPPORT); -+ else if (strcmp(s, "yes")) -+ mod->taints |= (1 << TAINT_NO_SUPPORT); -+} -+ -+static ssize_t show_modinfo_supported(struct module_attribute *mattr, -+ struct module *mod, char *buffer) -+{ -+ return sprintf(buffer, "%s\n", supported_printable(mod->taints)); -+} -+ -+static struct module_attribute modinfo_supported = { -+ .attr = { .name = "supported", .mode = 0444 }, -+ .show = show_modinfo_supported, -+ .setup = setup_modinfo_supported, -+}; -+ - static struct module_attribute *modinfo_attrs[] = { - &modinfo_version, - &modinfo_srcversion, - &initstate, -+ &modinfo_supported, - #ifdef CONFIG_MODULE_UNLOAD - &refcnt, - #endif -@@ -2027,7 +2053,7 @@ static noinline struct module *load_modu - Elf_Ehdr *hdr; - Elf_Shdr *sechdrs; - char *secstrings, *args, *modmagic, *strtab = NULL; -- char *staging, *supported; -+ char *staging; - unsigned int i; - unsigned int symindex = 0; - unsigned int strindex = 0; -@@ -2146,28 +2172,6 @@ static noinline struct module *load_modu - mod->name); - } - -- supported = get_modinfo(sechdrs, infoindex, "supported"); -- if (supported) { -- if (!strcmp(supported, "external")) -- add_taint_module(mod, TAINT_EXTERNAL_SUPPORT); -- else if (strcmp(supported, "yes")) -- supported = NULL; -- } -- if (!supported) { -- if (unsupported == 0) { -- printk(KERN_WARNING "%s: module not supported by " -- "Novell, refusing to load. To override, echo " -- "1 > /proc/sys/kernel/unsupported\n", mod->name); -- err = -ENOEXEC; -- goto free_hdr; -- } -- add_taint_module(mod, TAINT_NO_SUPPORT); -- if (unsupported == 1) { -- printk(KERN_WARNING "%s: module not supported by " -- "Novell, setting U taint flag.\n", mod->name); -- } -- } -- - /* Now copy in args */ - args = strndup_user(uargs, ~0UL >> 1); - if (IS_ERR(args)) { -@@ -2479,6 +2483,26 @@ static noinline struct module *load_modu - add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); - add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs); - -+ /* We don't use add_taint() here because it also disables lockdep. */ -+ if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT)) -+ add_nonfatal_taint(TAINT_EXTERNAL_SUPPORT); -+ else if (mod->taints == (1 << TAINT_NO_SUPPORT)) { -+ if (unsupported == 0) { -+ printk(KERN_WARNING "%s: module not supported by " -+ "Novell, refusing to load. To override, echo " -+ "1 > /proc/sys/kernel/unsupported\n", mod->name); -+ err = -ENOEXEC; -+ goto free_hdr; -+ } -+ add_nonfatal_taint(TAINT_NO_SUPPORT); -+ if (unsupported == 1) { -+ printk(KERN_WARNING "%s: module is not supported by " -+ "Novell. Novell Technical Services may decline " -+ "your support request if it involves a kernel " -+ "fault.\n", mod->name); -+ } -+ } -+ - /* Get rid of temporary copy */ - vfree(hdr); - -@@ -3061,6 +3085,7 @@ void print_modules(void) - if (last_unloaded_module[0]) - printk(" [last unloaded: %s]", last_unloaded_module); - printk("\n"); -+ printk("Supported: %s\n", supported_printable(get_taint())); - } - - #ifdef CONFIG_MODVERSIONS ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -233,6 +233,11 @@ unsigned long get_taint(void) - return tainted_mask; - } - -+void add_nonfatal_taint(unsigned flag) -+{ -+ set_bit(flag, &tainted_mask); -+} -+ - void add_taint(unsigned flag) - { - /* diff --git a/patches.suse/suse-ppc64-branding b/patches.suse/suse-ppc64-branding index 3ff0a09..13b5409 100644 --- a/patches.suse/suse-ppc64-branding +++ b/patches.suse/suse-ppc64-branding @@ -10,7 +10,7 @@ also the uname -r output instead of uname -v. --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c -@@ -319,7 +319,7 @@ static void __init pSeries_setup_arch(vo +@@ -318,7 +318,7 @@ static void __init pSeries_setup_arch(vo static int __init pSeries_init_panel(void) { /* Manually leave the kernel version on the panel. */ diff --git a/patches.suse/twofish-2.6 b/patches.suse/twofish-2.6 deleted file mode 100644 index 83c7ba0..0000000 --- a/patches.suse/twofish-2.6 +++ /dev/null @@ -1,664 +0,0 @@ -Subject: Twofish encryption for loop device for old S.u.S.E. crypto partitions -From: kraxel@suse.de -Patch-mainline: not yet - -See $subject, used up to 9.2 on new installs. - ---- - drivers/block/Kconfig | 6 - drivers/block/Makefile | 2 - drivers/block/loop_fish2.c | 625 +++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 633 insertions(+) - ---- a/drivers/block/Kconfig -+++ b/drivers/block/Kconfig -@@ -452,6 +452,12 @@ config SUNVDC - Support for virtual disk devices as a client under Sun - Logical Domains. - -+config CIPHER_TWOFISH -+ tristate "Twofish encryption for loop device for old S.u.S.E. crypto partitions" -+ depends on BLK_DEV_LOOP -+ help -+ Say Y here if you want to support old S.u.S.E. crypto partitions. -+ - source "drivers/s390/block/Kconfig" - - config XILINX_SYSACE ---- a/drivers/block/Makefile -+++ b/drivers/block/Makefile -@@ -38,4 +38,6 @@ obj-$(CONFIG_BLK_DEV_HD) += hd.o - obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o - obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ - -+obj-$(CONFIG_CIPHER_TWOFISH) += loop_fish2.o -+ - swim_mod-objs := swim.o swim_asm.o ---- /dev/null -+++ b/drivers/block/loop_fish2.c -@@ -0,0 +1,625 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define ROL(x,c) (((x) << (c)) | ((x) >> (32-(c)))) -+#define ROR(x,c) (((x) >> (c)) | ((x) << (32-(c)))) -+#define Bswap(x) __le32_to_cpu(x) -+ -+#define DWORD __u32 -+#define BYTE unsigned char -+ -+typedef struct fish2_key -+{ int keyLen; /* Key Length in Bit */ -+ DWORD sboxKeys[4]; -+ DWORD subKeys[40]; -+ BYTE key[32]; -+ DWORD sbox_full[1024]; /* This have to be 1024 DWORDs */ -+} fish2_key; -+ -+ -+/* Mul_5B[i] is 0x5B * i in GF(256), whatever that means... */ -+ -+static unsigned char Mul_5B[256] = { -+ 0x00,0x5B,0xB6,0xED,0x05,0x5E,0xB3,0xE8, -+ 0x0A,0x51,0xBC,0xE7,0x0F,0x54,0xB9,0xE2, -+ 0x14,0x4F,0xA2,0xF9,0x11,0x4A,0xA7,0xFC, -+ 0x1E,0x45,0xA8,0xF3,0x1B,0x40,0xAD,0xF6, -+ 0x28,0x73,0x9E,0xC5,0x2D,0x76,0x9B,0xC0, -+ 0x22,0x79,0x94,0xCF,0x27,0x7C,0x91,0xCA, -+ 0x3C,0x67,0x8A,0xD1,0x39,0x62,0x8F,0xD4, -+ 0x36,0x6D,0x80,0xDB,0x33,0x68,0x85,0xDE, -+ 0x50,0x0B,0xE6,0xBD,0x55,0x0E,0xE3,0xB8, -+ 0x5A,0x01,0xEC,0xB7,0x5F,0x04,0xE9,0xB2, -+ 0x44,0x1F,0xF2,0xA9,0x41,0x1A,0xF7,0xAC, -+ 0x4E,0x15,0xF8,0xA3,0x4B,0x10,0xFD,0xA6, -+ 0x78,0x23,0xCE,0x95,0x7D,0x26,0xCB,0x90, -+ 0x72,0x29,0xC4,0x9F,0x77,0x2C,0xC1,0x9A, -+ 0x6C,0x37,0xDA,0x81,0x69,0x32,0xDF,0x84, -+ 0x66,0x3D,0xD0,0x8B,0x63,0x38,0xD5,0x8E, -+ 0xA0,0xFB,0x16,0x4D,0xA5,0xFE,0x13,0x48, -+ 0xAA,0xF1,0x1C,0x47,0xAF,0xF4,0x19,0x42, -+ 0xB4,0xEF,0x02,0x59,0xB1,0xEA,0x07,0x5C, -+ 0xBE,0xE5,0x08,0x53,0xBB,0xE0,0x0D,0x56, -+ 0x88,0xD3,0x3E,0x65,0x8D,0xD6,0x3B,0x60, -+ 0x82,0xD9,0x34,0x6F,0x87,0xDC,0x31,0x6A, -+ 0x9C,0xC7,0x2A,0x71,0x99,0xC2,0x2F,0x74, -+ 0x96,0xCD,0x20,0x7B,0x93,0xC8,0x25,0x7E, -+ 0xF0,0xAB,0x46,0x1D,0xF5,0xAE,0x43,0x18, -+ 0xFA,0xA1,0x4C,0x17,0xFF,0xA4,0x49,0x12, -+ 0xE4,0xBF,0x52,0x09,0xE1,0xBA,0x57,0x0C, -+ 0xEE,0xB5,0x58,0x03,0xEB,0xB0,0x5D,0x06, -+ 0xD8,0x83,0x6E,0x35,0xDD,0x86,0x6B,0x30, -+ 0xD2,0x89,0x64,0x3F,0xD7,0x8C,0x61,0x3A, -+ 0xCC,0x97,0x7A,0x21,0xC9,0x92,0x7F,0x24, -+ 0xC6,0x9D,0x70,0x2B,0xC3,0x98,0x75,0x2E }; -+ -+ -+/* Mul_EF[i] is 0xEF * i in GF(256), whatever that means... */ -+ -+static unsigned char Mul_EF[256] = { -+ 0x00,0xEF,0xB7,0x58,0x07,0xE8,0xB0,0x5F, -+ 0x0E,0xE1,0xB9,0x56,0x09,0xE6,0xBE,0x51, -+ 0x1C,0xF3,0xAB,0x44,0x1B,0xF4,0xAC,0x43, -+ 0x12,0xFD,0xA5,0x4A,0x15,0xFA,0xA2,0x4D, -+ 0x38,0xD7,0x8F,0x60,0x3F,0xD0,0x88,0x67, -+ 0x36,0xD9,0x81,0x6E,0x31,0xDE,0x86,0x69, -+ 0x24,0xCB,0x93,0x7C,0x23,0xCC,0x94,0x7B, -+ 0x2A,0xC5,0x9D,0x72,0x2D,0xC2,0x9A,0x75, -+ 0x70,0x9F,0xC7,0x28,0x77,0x98,0xC0,0x2F, -+ 0x7E,0x91,0xC9,0x26,0x79,0x96,0xCE,0x21, -+ 0x6C,0x83,0xDB,0x34,0x6B,0x84,0xDC,0x33, -+ 0x62,0x8D,0xD5,0x3A,0x65,0x8A,0xD2,0x3D, -+ 0x48,0xA7,0xFF,0x10,0x4F,0xA0,0xF8,0x17, -+ 0x46,0xA9,0xF1,0x1E,0x41,0xAE,0xF6,0x19, -+ 0x54,0xBB,0xE3,0x0C,0x53,0xBC,0xE4,0x0B, -+ 0x5A,0xB5,0xED,0x02,0x5D,0xB2,0xEA,0x05, -+ 0xE0,0x0F,0x57,0xB8,0xE7,0x08,0x50,0xBF, -+ 0xEE,0x01,0x59,0xB6,0xE9,0x06,0x5E,0xB1, -+ 0xFC,0x13,0x4B,0xA4,0xFB,0x14,0x4C,0xA3, -+ 0xF2,0x1D,0x45,0xAA,0xF5,0x1A,0x42,0xAD, -+ 0xD8,0x37,0x6F,0x80,0xDF,0x30,0x68,0x87, -+ 0xD6,0x39,0x61,0x8E,0xD1,0x3E,0x66,0x89, -+ 0xC4,0x2B,0x73,0x9C,0xC3,0x2C,0x74,0x9B, -+ 0xCA,0x25,0x7D,0x92,0xCD,0x22,0x7A,0x95, -+ 0x90,0x7F,0x27,0xC8,0x97,0x78,0x20,0xCF, -+ 0x9E,0x71,0x29,0xC6,0x99,0x76,0x2E,0xC1, -+ 0x8C,0x63,0x3B,0xD4,0x8B,0x64,0x3C,0xD3, -+ 0x82,0x6D,0x35,0xDA,0x85,0x6A,0x32,0xDD, -+ 0xA8,0x47,0x1F,0xF0,0xAF,0x40,0x18,0xF7, -+ 0xA6,0x49,0x11,0xFE,0xA1,0x4E,0x16,0xF9, -+ 0xB4,0x5B,0x03,0xEC,0xB3,0x5C,0x04,0xEB, -+ 0xBA,0x55,0x0D,0xE2,0xBD,0x52,0x0A,0xE5 }; -+ -+static inline DWORD mds_mul(BYTE *y) -+{ DWORD z; -+ -+ z=Mul_EF[y[0]] ^ y[1] ^ Mul_EF[y[2]] ^ Mul_5B[y[3]]; -+ z<<=8; -+ z|=Mul_EF[y[0]] ^ Mul_5B[y[1]] ^ y[2] ^ Mul_EF[y[3]]; -+ z<<=8; -+ z|=Mul_5B[y[0]] ^ Mul_EF[y[1]] ^ Mul_EF[y[2]] ^ y[3]; -+ z<<=8; -+ z|=y[0] ^ Mul_EF[y[1]] ^ Mul_5B[y[2]] ^ Mul_5B[y[3]]; -+ -+ return z; -+} -+ -+/* q0 and q1 are the lookup substitutions done in twofish */ -+ -+static unsigned char q0[256] = -+{ 0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, -+ 0x9A, 0x92, 0x80, 0x78, 0xE4, 0xDD, 0xD1, 0x38, -+ 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C, -+ 0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, -+ 0xF2, 0xD0, 0x8B, 0x30, 0x84, 0x54, 0xDF, 0x23, -+ 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82, -+ 0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, -+ 0xA6, 0xEB, 0xA5, 0xBE, 0x16, 0x0C, 0xE3, 0x61, -+ 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B, -+ 0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, -+ 0xE1, 0xE6, 0xBD, 0x45, 0xE2, 0xF4, 0xB6, 0x66, -+ 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7, -+ 0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, -+ 0xEA, 0x77, 0x39, 0xAF, 0x33, 0xC9, 0x62, 0x71, -+ 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8, -+ 0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, -+ 0xA1, 0x1D, 0xAA, 0xED, 0x06, 0x70, 0xB2, 0xD2, -+ 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90, -+ 0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, -+ 0x9E, 0x9C, 0x52, 0x1B, 0x5F, 0x93, 0x0A, 0xEF, -+ 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B, -+ 0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, -+ 0x2A, 0xCE, 0xCB, 0x2F, 0xFC, 0x97, 0x05, 0x7A, -+ 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A, -+ 0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, -+ 0xB8, 0xDA, 0xB0, 0x17, 0x55, 0x1F, 0x8A, 0x7D, -+ 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72, -+ 0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, -+ 0x6E, 0x50, 0xDE, 0x68, 0x65, 0xBC, 0xDB, 0xF8, -+ 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4, -+ 0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, -+ 0x6F, 0x9D, 0x36, 0x42, 0x4A, 0x5E, 0xC1, 0xE0}; -+ -+static unsigned char q1[256] = -+{ 0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, -+ 0x4A, 0xD3, 0xE6, 0x6B, 0x45, 0x7D, 0xE8, 0x4B, -+ 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1, -+ 0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, -+ 0x5E, 0xBA, 0xAE, 0x5B, 0x8A, 0x00, 0xBC, 0x9D, -+ 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5, -+ 0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, -+ 0xB2, 0x73, 0x4C, 0x54, 0x92, 0x74, 0x36, 0x51, -+ 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96, -+ 0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, -+ 0x13, 0x95, 0x9C, 0xC7, 0x24, 0x46, 0x3B, 0x70, -+ 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8, -+ 0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, -+ 0x03, 0x6F, 0x08, 0xBF, 0x40, 0xE7, 0x2B, 0xE2, -+ 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9, -+ 0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, -+ 0x66, 0x94, 0xA1, 0x1D, 0x3D, 0xF0, 0xDE, 0xB3, -+ 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E, -+ 0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, -+ 0x81, 0x88, 0xEE, 0x21, 0xC4, 0x1A, 0xEB, 0xD9, -+ 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01, -+ 0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, -+ 0x4F, 0xF2, 0x65, 0x8E, 0x78, 0x5C, 0x58, 0x19, -+ 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64, -+ 0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, -+ 0xCE, 0xE9, 0x68, 0x44, 0xE0, 0x4D, 0x43, 0x69, -+ 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E, -+ 0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, -+ 0x22, 0xC9, 0xC0, 0x9B, 0x89, 0xD4, 0xED, 0xAB, -+ 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9, -+ 0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, -+ 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xBE, 0x91 -+ }; -+ -+ -+static DWORD f32(DWORD x, const DWORD * k32, int keyLen) -+{ -+ BYTE b[4]; -+ -+ /* Run each byte thru 8x8 S-boxes, xoring with key byte at each stage. */ -+ /* Note that each byte goes through a different combination of S-boxes. */ -+ -+ *((DWORD *) b) = Bswap(x); /* make b[0] = LSB, b[3] = MSB */ -+ -+ switch (((keyLen + 63) / 64) & 3) -+ { -+ case 0: /* 256 bits of key */ -+ b[0] = q1[b[0]]; -+ b[1] = q0[b[1]]; -+ b[2] = q0[b[2]]; -+ b[3] = q1[b[3]]; -+ -+ *((DWORD *) b) ^= k32[3]; -+ -+ /* fall thru, having pre-processed b[0]..b[3] with k32[3] */ -+ case 3: /* 192 bits of key */ -+ b[0] = q1[b[0]]; -+ b[1] = q1[b[1]]; -+ b[2] = q0[b[2]]; -+ b[3] = q0[b[3]]; -+ -+ *((DWORD *) b) ^= k32[2]; -+ -+ /* fall thru, having pre-processed b[0]..b[3] with k32[2] */ -+ case 2: /* 128 bits of key */ -+ b[0] = q0[b[0]]; -+ b[1] = q1[b[1]]; -+ b[2] = q0[b[2]]; -+ b[3] = q1[b[3]]; -+ -+ *((DWORD *) b) ^= k32[1]; -+ -+ b[0] = q0[b[0]]; -+ b[1] = q0[b[1]]; -+ b[2] = q1[b[2]]; -+ b[3] = q1[b[3]]; -+ -+ *((DWORD *) b) ^= k32[0]; -+ -+ b[0] = q1[b[0]]; -+ b[1] = q0[b[1]]; -+ b[2] = q1[b[2]]; -+ b[3] = q0[b[3]]; -+ } -+ -+ -+ /* Now perform the MDS matrix multiply inline. */ -+ return mds_mul(b); -+} -+ -+ -+static void init_sbox(fish2_key *key) -+{ DWORD x,*sbox,z,*k32; -+ int i,keyLen; -+ BYTE b[4]; -+ -+ k32=key->sboxKeys; -+ keyLen=key->keyLen; -+ sbox=key->sbox_full; -+ -+ x=0; -+ for (i=0;i<256;i++,x+=0x01010101) -+ { -+ *((DWORD *) b) = Bswap(x); /* make b[0] = LSB, b[3] = MSB */ -+ -+ switch (((keyLen + 63) / 64) & 3) -+ { -+ case 0: /* 256 bits of key */ -+ b[0] = q1[b[0]]; -+ b[1] = q0[b[1]]; -+ b[2] = q0[b[2]]; -+ b[3] = q1[b[3]]; -+ -+ *((DWORD *) b) ^= k32[3]; -+ -+ /* fall thru, having pre-processed b[0]..b[3] with k32[3] */ -+ case 3: /* 192 bits of key */ -+ b[0] = q1[b[0]]; -+ b[1] = q1[b[1]]; -+ b[2] = q0[b[2]]; -+ b[3] = q0[b[3]]; -+ -+ *((DWORD *) b) ^= k32[2]; -+ -+ /* fall thru, having pre-processed b[0]..b[3] with k32[2] */ -+ case 2: /* 128 bits of key */ -+ b[0] = q0[b[0]]; -+ b[1] = q1[b[1]]; -+ b[2] = q0[b[2]]; -+ b[3] = q1[b[3]]; -+ -+ *((DWORD *) b) ^= k32[1]; -+ -+ b[0] = q0[b[0]]; -+ b[1] = q0[b[1]]; -+ b[2] = q1[b[2]]; -+ b[3] = q1[b[3]]; -+ -+ *((DWORD *) b) ^= k32[0]; -+ -+ b[0] = q1[b[0]]; -+ b[1] = q0[b[1]]; -+ b[2] = q1[b[2]]; -+ b[3] = q0[b[3]]; -+ } -+ -+ z=Mul_EF[b[0]]; -+ z<<=8; -+ z|=Mul_EF[b[0]]; -+ z<<=8; -+ z|=Mul_5B[b[0]]; -+ z<<=8; -+ z|=b[0]; -+ -+ sbox[i]=z; -+ -+ z=b[1]; -+ z<<=8; -+ z|=Mul_5B[b[1]]; -+ z<<=8; -+ z|=Mul_EF[b[1]]; -+ z<<=8; -+ z|=Mul_EF[b[1]]; -+ -+ sbox[i+256]=z; -+ -+ z=Mul_EF[b[2]]; -+ z<<=8; -+ z|=b[2]; -+ z<<=8; -+ z|=Mul_EF[b[2]]; -+ z<<=8; -+ z|=Mul_5B[b[2]]; -+ -+ sbox[i+512]=z; -+ -+ z=Mul_5B[b[3]]; -+ z<<=8; -+ z|=Mul_EF[b[3]]; -+ z<<=8; -+ z|=b[3]; -+ z<<=8; -+ z|=Mul_5B[b[3]]; -+ -+ sbox[i+768]=z; -+ } -+} -+ -+ -+/* Reed-Solomon code parameters: (12,8) reversible code -+ g(x) = x**4 + (a + 1/a) x**3 + a x**2 + (a + 1/a) x + 1 -+ where a = primitive root of field generator 0x14D */ -+#define RS_GF_FDBK 0x14D /* field generator */ -+#define RS_rem(x) \ -+ { BYTE b = x >> 24; \ -+ DWORD g2 = ((b << 1) ^ ((b & 0x80) ? RS_GF_FDBK : 0 )) & 0xFF; \ -+ DWORD g3 = ((b >> 1) & 0x7F) ^ ((b & 1) ? RS_GF_FDBK >> 1 : 0 ) ^ g2 ; \ -+ x = (x << 8) ^ (g3 << 24) ^ (g2 << 16) ^ (g3 << 8) ^ b; \ -+ } -+ -+static DWORD rs_mds(DWORD k0, DWORD k1) -+{ -+ int i, j; -+ DWORD r; -+ -+ for (i = r = 0; i < 2; i++) -+ { -+ r ^= (i) ? k0 : k1; /* merge in 32 more key bits */ -+ for (j = 0; j < 4; j++) /* shift one byte at a time */ -+ RS_rem(r); -+ } -+ return r; -+} -+ -+ -+#define INPUT_WHITEN 0 /* subkey array indices */ -+#define OUTPUT_WHITEN 4 -+#define ROUND_SUBKEYS 8 /* use 2 * (# rounds) */ -+#define TOTAL_SUBKEYS 40 -+ -+static void init_key(fish2_key * key) -+{ -+ int i, k64Cnt; -+ int keyLen = key->keyLen; -+ int subkeyCnt = TOTAL_SUBKEYS; -+ DWORD A, B; -+ DWORD k32e[4], k32o[4]; /* even/odd key dwords */ -+ -+ k64Cnt = (keyLen + 63) / 64; /* round up to next multiple of 64 bits */ -+ for (i = 0; i < k64Cnt; i++) -+ { /* split into even/odd key dwords */ -+ k32e[i] = ((DWORD *)key->key)[2 * i]; -+ k32o[i] = ((DWORD *)key->key)[2 * i + 1]; -+ /* compute S-box keys using (12,8) Reed-Solomon code over GF(256) */ -+ /* store in reverse order */ -+ key->sboxKeys[k64Cnt - 1 - i] = -+ Bswap(rs_mds(Bswap(k32e[i]), Bswap(k32o[i]))); -+ -+ } -+ -+ for (i = 0; i < subkeyCnt / 2; i++) /* compute round subkeys for PHT */ -+ { -+ A = f32(i * 0x02020202, k32e, keyLen); /* A uses even key dwords */ -+ B = f32(i * 0x02020202 + 0x01010101, k32o, keyLen); /* B uses odd key -+ dwords */ -+ B = ROL(B, 8); -+ key->subKeys[2 * i] = A + B; /* combine with a PHT */ -+ key->subKeys[2 * i + 1] = ROL(A + 2 * B, 9); -+ } -+ -+ init_sbox(key); -+} -+ -+ -+static inline DWORD f32_sbox(DWORD x,DWORD *sbox) -+{ -+ /* Run each byte thru 8x8 S-boxes, xoring with key byte at each stage. */ -+ /* Note that each byte goes through a different combination of S-boxes. */ -+ -+ return (sbox[ (x) &0xff]^ -+ sbox[256 + (((x)>> 8)&0xff)]^ -+ sbox[512 + (((x)>>16)&0xff)]^ -+ sbox[768 + (((x)>>24)&0xff)]); -+} -+ -+#define roundE_m(x0,x1,x2,x3,rnd) \ -+ t0 = f32_sbox( x0, key->sbox_full ) ; \ -+ t1 = f32_sbox( ROL(x1,8), key->sbox_full ); \ -+ x2 ^= t0 + t1 + key->subKeys[2*rnd+8]; \ -+ x3 = ROL(x3,1); \ -+ x3 ^= t0 + 2*t1 + key->subKeys[2*rnd+9]; \ -+ x2 = ROR(x2,1); -+ -+ -+static int blockEncrypt_CBC(fish2_key *key,BYTE *src,BYTE *dst,int len) -+{ DWORD xx0,xx1,xx2,xx3,t0,t1,iv0,iv1,iv2,iv3; -+ -+ if (len & 0xF) return -1; -+ -+ iv0=0; -+ iv1=0; -+ iv2=0; -+ iv3=0; -+ for (;len>=16;len-=16) -+ -+ { -+ if ( ( len & 0x1FF) == 0) -+ { iv0=0; -+ iv1=0; -+ iv2=0; -+ iv3=0; -+ } -+ -+ xx0=Bswap(((DWORD *)src)[0]) ^ key->subKeys[0] ^ iv0; -+ xx1=Bswap(((DWORD *)src)[1]) ^ key->subKeys[1] ^ iv1; -+ xx2=Bswap(((DWORD *)src)[2]) ^ key->subKeys[2] ^ iv2; -+ xx3=Bswap(((DWORD *)src)[3]) ^ key->subKeys[3] ^ iv3; -+ -+ src+=16; -+ -+ roundE_m(xx0,xx1,xx2,xx3,0); -+ roundE_m(xx2,xx3,xx0,xx1,1); -+ roundE_m(xx0,xx1,xx2,xx3,2); -+ roundE_m(xx2,xx3,xx0,xx1,3); -+ roundE_m(xx0,xx1,xx2,xx3,4); -+ roundE_m(xx2,xx3,xx0,xx1,5); -+ roundE_m(xx0,xx1,xx2,xx3,6); -+ roundE_m(xx2,xx3,xx0,xx1,7); -+ roundE_m(xx0,xx1,xx2,xx3,8); -+ roundE_m(xx2,xx3,xx0,xx1,9); -+ roundE_m(xx0,xx1,xx2,xx3,10); -+ roundE_m(xx2,xx3,xx0,xx1,11); -+ roundE_m(xx0,xx1,xx2,xx3,12); -+ roundE_m(xx2,xx3,xx0,xx1,13); -+ roundE_m(xx0,xx1,xx2,xx3,14); -+ roundE_m(xx2,xx3,xx0,xx1,15); -+ -+ iv0=xx2 ^ key->subKeys[4]; -+ iv1=xx3 ^ key->subKeys[5]; -+ iv2=xx0 ^ key->subKeys[6]; -+ iv3=xx1 ^ key->subKeys[7]; -+ -+ ((DWORD *)dst)[0] = Bswap(iv0); -+ ((DWORD *)dst)[1] = Bswap(iv1); -+ ((DWORD *)dst)[2] = Bswap(iv2); -+ ((DWORD *)dst)[3] = Bswap(iv3); -+ dst+=16; -+ } -+ return len; -+} -+ -+#define roundD_m(x0,x1,x2,x3,rnd) \ -+ t0 = f32_sbox( x0, key->sbox_full); \ -+ t1 = f32_sbox( ROL(x1,8),key->sbox_full); \ -+ x2 = ROL(x2,1); \ -+ x3 ^= t0 + 2*t1 + key->subKeys[rnd*2+9]; \ -+ x3 = ROR(x3,1); \ -+ x2 ^= t0 + t1 + key->subKeys[rnd*2+8]; -+ -+ -+static int blockDecrypt_CBC(fish2_key *key,BYTE *src,BYTE *dst,int len) -+{ DWORD xx0,xx1,xx2,xx3,t0,t1,lx0,lx1,lx2,lx3,iv0,iv1,iv2,iv3; -+ -+ if (len & 0xF) return -1; -+ -+ iv0=0; -+ iv1=0; -+ iv2=0; -+ iv3=0; -+ -+ for (;len>=16;len-=16) -+ { -+ if ( ( len & 0x1FF) == 0) -+ { iv0=0; -+ iv1=0; -+ iv2=0; -+ iv3=0; -+ } -+ -+ lx0=iv0;iv0=Bswap(((DWORD *)src)[0]);xx0=iv0 ^ key->subKeys[4]; -+ lx1=iv1;iv1=Bswap(((DWORD *)src)[1]);xx1=iv1 ^ key->subKeys[5]; -+ lx2=iv2;iv2=Bswap(((DWORD *)src)[2]);xx2=iv2 ^ key->subKeys[6]; -+ lx3=iv3;iv3=Bswap(((DWORD *)src)[3]);xx3=iv3 ^ key->subKeys[7]; -+ src+=16; -+ -+ roundD_m(xx0,xx1,xx2,xx3,15); -+ roundD_m(xx2,xx3,xx0,xx1,14); -+ roundD_m(xx0,xx1,xx2,xx3,13); -+ roundD_m(xx2,xx3,xx0,xx1,12); -+ roundD_m(xx0,xx1,xx2,xx3,11); -+ roundD_m(xx2,xx3,xx0,xx1,10); -+ roundD_m(xx0,xx1,xx2,xx3,9); -+ roundD_m(xx2,xx3,xx0,xx1,8); -+ roundD_m(xx0,xx1,xx2,xx3,7); -+ roundD_m(xx2,xx3,xx0,xx1,6); -+ roundD_m(xx0,xx1,xx2,xx3,5); -+ roundD_m(xx2,xx3,xx0,xx1,4); -+ roundD_m(xx0,xx1,xx2,xx3,3); -+ roundD_m(xx2,xx3,xx0,xx1,2); -+ roundD_m(xx0,xx1,xx2,xx3,1); -+ roundD_m(xx2,xx3,xx0,xx1,0); -+ -+ ((DWORD *)dst)[0] = Bswap(xx2 ^ key->subKeys[0] ^ lx0); -+ ((DWORD *)dst)[1] = Bswap(xx3 ^ key->subKeys[1] ^ lx1); -+ ((DWORD *)dst)[2] = Bswap(xx0 ^ key->subKeys[2] ^ lx2); -+ ((DWORD *)dst)[3] = Bswap(xx1 ^ key->subKeys[3] ^ lx3); -+ dst+=16; -+ } -+ return len; -+} -+ -+ -+int transfer_fish2(struct loop_device *lo, int cmd, -+ struct page *raw_page, unsigned raw_off, -+ struct page *loop_page, unsigned loop_off, -+ int size, sector_t IV) -+{ -+ char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; -+ char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; -+ -+ if (cmd == READ) -+ blockDecrypt_CBC((fish2_key *)lo->key_data,raw_buf,loop_buf,size); -+ else -+ blockEncrypt_CBC((fish2_key *)lo->key_data,loop_buf,raw_buf,size); -+ -+ kunmap_atomic(raw_buf, KM_USER0); -+ kunmap_atomic(loop_buf, KM_USER1); -+ cond_resched(); -+ -+ return 0; -+} -+ -+int fish2_init(struct loop_device *lo,const struct loop_info64 *info) -+{ fish2_key *key; -+ -+ if (info->lo_encrypt_key_size<16 || info->lo_encrypt_key_size>32) -+ return -EINVAL; -+ -+ key=(fish2_key *)kmalloc(sizeof(fish2_key),GFP_KERNEL); -+ -+ if (key==NULL) -+ return -ENOMEM; -+ -+ lo->key_data=key; -+ -+ memset(key->key,0,32); -+ -+ key->keyLen=info->lo_encrypt_key_size << 3; -+ memcpy(key->key,info->lo_encrypt_key,info->lo_encrypt_key_size); -+ -+ init_key(key); -+ -+ return 0; -+} -+ -+static int fish2_release(struct loop_device *lo) -+{ if (lo->key_data!=NULL) -+ { -+ kfree(lo->key_data); -+ lo->key_data=NULL; -+ } -+ return(0); -+} -+ -+static struct loop_func_table fish2_funcs = -+{ .number = LO_CRYPT_FISH2, -+ .transfer = transfer_fish2, -+ .init = fish2_init, -+ .release = fish2_release, -+ .owner = THIS_MODULE -+}; -+ -+int __init loop_fish2_init(void) -+{ -+ int err; -+ -+ if ((err=loop_register_transfer(&fish2_funcs))) -+ { -+ printk(KERN_WARNING "Couldn't register Twofish encryption\n"); -+ return err; -+ } -+ printk(KERN_INFO "loop: registered Twofish encryption \n"); -+ return 0; -+} -+ -+void __exit loop_fish2_exit(void) -+{ -+ if (loop_unregister_transfer(LO_CRYPT_FISH2)) -+ printk(KERN_WARNING "Couldn't unregister Twofish encryption\n"); -+ printk(KERN_INFO "loop: unregistered Twofish encryption \n"); -+} -+ -+module_init(loop_fish2_init); -+module_exit(loop_fish2_exit); -+MODULE_LICENSE("GPL"); diff --git a/patches.suse/unmap_vmas-lat b/patches.suse/unmap_vmas-lat index 6776999..9c32811 100644 --- a/patches.suse/unmap_vmas-lat +++ b/patches.suse/unmap_vmas-lat @@ -15,7 +15,7 @@ latency improvements that have nothing to do with preempt. --- a/mm/memory.c +++ b/mm/memory.c -@@ -1061,11 +1061,11 @@ static unsigned long unmap_page_range(st +@@ -1060,11 +1060,11 @@ static unsigned long unmap_page_range(st return addr; } diff --git a/patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch b/patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch index 7512e69..653631f 100644 --- a/patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch +++ b/patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch @@ -17,20 +17,20 @@ Signed-off-by: Brandon Philips --- a/drivers/media/video/uvc/uvc_ctrl.c +++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -811,6 +811,10 @@ int uvc_query_v4l2_ctrl(struct uvc_video - unsigned int i; - int ret; +@@ -862,6 +862,10 @@ int uvc_query_v4l2_ctrl(struct uvc_video + if (ret < 0) + return -ERESTARTSYS; + if ((chain->dev->quirks & UVC_QUIRK_HUE_EPIPE) && + (v4l2_ctrl->id == V4L2_CID_HUE)) + return -EINVAL; + ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping); - if (ctrl == NULL) - return -EINVAL; + if (ctrl == NULL) { + ret = -EINVAL; --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c -@@ -2198,6 +2198,14 @@ static struct usb_device_id uvc_ids[] = +@@ -2239,6 +2239,14 @@ static struct usb_device_id uvc_ids[] = .bInterfaceProtocol = 0, .driver_info = UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_IGNORE_SELECTOR_UNIT }, @@ -47,11 +47,11 @@ Signed-off-by: Brandon Philips {} --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h -@@ -163,6 +163,7 @@ struct uvc_xu_control { - #define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 +@@ -182,6 +182,7 @@ struct uvc_xu_control { #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 #define UVC_QUIRK_PROBE_DEF 0x00000100 -+#define UVC_QUIRK_HUE_EPIPE 0x00000200 + #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 ++#define UVC_QUIRK_HUE_EPIPE 0x00000400 /* Format flags */ #define UVC_FMT_FLAG_COMPRESSED 0x00000001 diff --git a/patches.suse/wireless-no-aes-select b/patches.suse/wireless-no-aes-select index 94197de..b9570e8 100644 --- a/patches.suse/wireless-no-aes-select +++ b/patches.suse/wireless-no-aes-select @@ -13,7 +13,7 @@ Remove that. The optimized versions provide the cipher as well. --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig -@@ -114,7 +114,6 @@ config AIRO_CS +@@ -121,7 +121,6 @@ config AIRO_CS select WEXT_SPY select WEXT_PRIV select CRYPTO @@ -29,5 +29,5 @@ Remove that. The optimized versions provide the cipher as well. select CRYPTO_ARC4 - select CRYPTO_AES select CRC32 + select AVERAGE ---help--- - This option enables the hardware independent IEEE 802.11 diff --git a/patches.suse/x86-mark_rodata_rw.patch b/patches.suse/x86-mark_rodata_rw.patch index affdcdc..60308ea 100644 --- a/patches.suse/x86-mark_rodata_rw.patch +++ b/patches.suse/x86-mark_rodata_rw.patch @@ -21,12 +21,12 @@ Acked-by: Andres Gruenbacher arch/x86/include/asm/cacheflush.h | 3 +++ arch/x86/mm/init_32.c | 14 ++++++++++++++ arch/x86/mm/init_64.c | 31 +++++++++++++++++++++++++------ - arch/x86/mm/pageattr.c | 30 ++++++++++++++++++++++++++++-- - 4 files changed, 70 insertions(+), 8 deletions(-) + arch/x86/mm/pageattr.c | 31 +++++++++++++++++++++++++++++-- + 4 files changed, 71 insertions(+), 8 deletions(-) --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h -@@ -135,6 +135,7 @@ int set_memory_x(unsigned long addr, int +@@ -101,6 +101,7 @@ int set_memory_x(unsigned long addr, int int set_memory_nx(unsigned long addr, int numpages); int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); @@ -34,7 +34,7 @@ Acked-by: Andres Gruenbacher int set_memory_np(unsigned long addr, int numpages); int set_memory_4k(unsigned long addr, int numpages); -@@ -170,12 +171,14 @@ int set_pages_x(struct page *page, int n +@@ -138,12 +139,14 @@ int set_pages_x(struct page *page, int n int set_pages_nx(struct page *page, int numpages); int set_pages_ro(struct page *page, int numpages); int set_pages_rw(struct page *page, int numpages); @@ -51,9 +51,9 @@ Acked-by: Andres Gruenbacher void set_kernel_text_rw(void); --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c -@@ -1068,6 +1068,20 @@ void mark_rodata_ro(void) - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); +@@ -967,5 +967,19 @@ void mark_rodata_ro(void) #endif + mark_nxdata_nx(); } +EXPORT_SYMBOL_GPL(mark_rodata_ro); + @@ -71,10 +71,9 @@ Acked-by: Andres Gruenbacher +EXPORT_SYMBOL_GPL(mark_rodata_rw); #endif - int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c -@@ -755,6 +755,7 @@ void set_kernel_text_ro(void) +@@ -782,6 +782,7 @@ void set_kernel_text_ro(void) set_memory_ro(start, (end - start) >> PAGE_SHIFT); } @@ -82,7 +81,7 @@ Acked-by: Andres Gruenbacher void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); -@@ -787,15 +788,33 @@ void mark_rodata_ro(void) +@@ -814,15 +815,33 @@ void mark_rodata_ro(void) set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif @@ -121,10 +120,10 @@ Acked-by: Andres Gruenbacher +EXPORT_SYMBOL_GPL(mark_rodata_rw); #endif - int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, + int kern_addr_valid(unsigned long addr) --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c -@@ -245,6 +245,8 @@ static void cpa_flush_array(unsigned lon +@@ -246,6 +246,8 @@ static void cpa_flush_array(unsigned lon } } @@ -133,7 +132,7 @@ Acked-by: Andres Gruenbacher /* * Certain areas of memory on x86 require very specific protection flags, * for example the BIOS area or kernel text. Callers don't always get this -@@ -276,8 +278,10 @@ static inline pgprot_t static_protection +@@ -279,8 +281,11 @@ static inline pgprot_t static_protection * catches all aliases. */ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, @@ -143,10 +142,11 @@ Acked-by: Andres Gruenbacher + if (!static_protections_allow_rodata) + pgprot_val(forbidden) |= _PAGE_RW; + } ++ #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) /* -@@ -1134,6 +1138,21 @@ int set_memory_rw(unsigned long addr, in +@@ -1155,6 +1160,21 @@ int set_memory_rw(unsigned long addr, in } EXPORT_SYMBOL_GPL(set_memory_rw); @@ -168,7 +168,7 @@ Acked-by: Andres Gruenbacher int set_memory_np(unsigned long addr, int numpages) { return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); -@@ -1248,6 +1267,13 @@ int set_pages_rw(struct page *page, int +@@ -1288,6 +1308,13 @@ int set_pages_rw(struct page *page, int return set_memory_rw(addr, numpages); } diff --git a/patches.suse/xfs-dmapi-2-6-34-api-changes b/patches.suse/xfs-dmapi-2-6-34-api-changes deleted file mode 100644 index 4f30ca8..0000000 --- a/patches.suse/xfs-dmapi-2-6-34-api-changes +++ /dev/null @@ -1,91 +0,0 @@ -From: Jeff Mahoney -Subject: xfs-dmapi: 2.6.34 API changes -Patch-mainline: Never - - 2.6.34-rc1 changed some XFS APIs. This patch updates them. - -Signed-off-by: Jeff Mahoney -Acked-by: Jeff Mahoney ---- - fs/xfs/dmapi/xfs_dm.c | 4 ++-- - fs/xfs/linux-2.6/xfs_file.c | 23 +++++++++++++++-------- - fs/xfs/linux-2.6/xfs_iops.h | 3 +++ - 3 files changed, 20 insertions(+), 10 deletions(-) - ---- a/fs/xfs/dmapi/xfs_dm.c -+++ b/fs/xfs/dmapi/xfs_dm.c -@@ -1956,7 +1956,7 @@ xfs_dm_get_dmattr( - alloc_size = XFS_BUG_KLUDGE; - if (alloc_size > ATTR_MAX_VALUELEN) - alloc_size = ATTR_MAX_VALUELEN; -- value = kmem_alloc(alloc_size, KM_SLEEP | KM_LARGE); -+ value = kmem_zalloc_large(alloc_size); - - /* Get the attribute's value. */ - -@@ -2877,7 +2877,7 @@ xfs_dm_sync_by_handle( - /* We need to protect against concurrent writers.. */ - ret = filemap_fdatawrite(inode->i_mapping); - down_rw_sems(inode, DM_FLAGS_IMUX); -- err = -xfs_fsync(ip); -+ err = xfs_fsync(inode, 1); - if (!ret) - ret = err; - up_rw_sems(inode, DM_FLAGS_IMUX); ---- a/fs/xfs/linux-2.6/xfs_file.c -+++ b/fs/xfs/linux-2.6/xfs_file.c -@@ -100,13 +100,10 @@ xfs_iozero( - return (-status); - } - --STATIC int --xfs_file_fsync( -- struct file *file, -- struct dentry *dentry, -- int datasync) -+int -+xfs_fsync(struct inode *inode, int datasync) - { -- struct xfs_inode *ip = XFS_I(dentry->d_inode); -+ struct xfs_inode *ip = XFS_I(inode); - struct xfs_trans *tp; - int error = 0; - int log_flushed = 0; -@@ -141,8 +138,8 @@ xfs_file_fsync( - * might gets cleared when the inode gets written out via the AIL - * or xfs_iflush_cluster. - */ -- if (((dentry->d_inode->i_state & I_DIRTY_DATASYNC) || -- ((dentry->d_inode->i_state & I_DIRTY_SYNC) && !datasync)) && -+ if (((inode->i_state & I_DIRTY_DATASYNC) || -+ ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && - ip->i_update_core) { - /* - * Kick off a transaction to log the inode core to get the -@@ -210,6 +207,16 @@ xfs_file_fsync( - return -error; - } - -+STATIC int -+xfs_file_fsync( -+ struct file *file, -+ struct dentry *dentry, -+ int datasync) -+{ -+ return xfs_fsync(dentry->d_inode, datasync); -+} -+ -+ - STATIC ssize_t - xfs_file_aio_read( - struct kiocb *iocb, ---- a/fs/xfs/linux-2.6/xfs_iops.h -+++ b/fs/xfs/linux-2.6/xfs_iops.h -@@ -27,4 +27,7 @@ extern ssize_t xfs_vn_listxattr(struct d - - extern void xfs_setup_inode(struct xfs_inode *); - -+extern int xfs_fsync(struct inode *, int); -+ - #endif /* __XFS_IOPS_H__ */ -+ diff --git a/patches.suse/xfs-dmapi-enable b/patches.suse/xfs-dmapi-enable deleted file mode 100644 index c4bed85..0000000 --- a/patches.suse/xfs-dmapi-enable +++ /dev/null @@ -1,128 +0,0 @@ -Date: Thu, 09 Oct 2008 17:11:14 +1100 -From: Donald Douwsma -Subject: VFS changes to support DMAPI -Patch-mainline: not yet -References: bnc#450658 - -VFS changes to support DMAPI including open_exec(), mprotect() -and build infastructure. - -Acked-by: Jan Kara - ---- - MAINTAINERS | 7 +++++++ - fs/Kconfig | 19 +++++++++++++++++++ - fs/Makefile | 2 ++ - fs/exec.c | 6 ++++++ - include/linux/fs.h | 2 ++ - include/linux/mm.h | 3 +++ - mm/mprotect.c | 5 +++++ - 7 files changed, 44 insertions(+) - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -6228,6 +6228,13 @@ S: Supported - F: Documentation/filesystems/xfs.txt - F: fs/xfs/ - -+DMAPI -+P: Silicon Graphics Inc -+M: xfs-masters@oss.sgi.com -+L: xfs@oss.sgi.com -+W: http://oss.sgi.com/projects/xfs -+S: Supported -+ - XILINX SYSTEMACE DRIVER - M: Grant Likely - W: http://www.secretlab.ca/ ---- a/fs/Kconfig -+++ b/fs/Kconfig -@@ -57,6 +57,25 @@ config FILE_LOCKING - - source "fs/notify/Kconfig" - -+config DMAPI -+ tristate "DMAPI support" -+ help -+ The Data Management API is a system interface used to implement -+ the interface defined in the X/Open document: -+ "Systems Management: Data Storage Management (XDSM) API", -+ dated February 1997. This interface is used by hierarchical -+ storage management systems. -+ -+ If any DMAPI-capable filesystem is built into the kernel, then -+ DMAPI must also be built into the kernel. -+ -+config DMAPI_DEBUG -+ bool "DMAPI debugging support" -+ depends on DMAPI -+ help -+ If you don't know whether you need it, then you don't need it: -+ answer N. -+ - source "fs/quota/Kconfig" - - source "fs/autofs/Kconfig" ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -53,6 +53,8 @@ obj-$(CONFIG_GENERIC_ACL) += generic_acl - - obj-y += quota/ - -+obj-$(CONFIG_DMAPI) += dmapi/ -+ - obj-$(CONFIG_PROC_FS) += proc/ - obj-y += partitions/ - obj-$(CONFIG_SYSFS) += sysfs/ ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -680,6 +680,12 @@ struct file *open_exec(const char *name) - - fsnotify_open(file->f_path.dentry); - -+ if (file->f_op && file->f_op->open_exec) { -+ err = file->f_op->open_exec(file->f_path.dentry->d_inode); -+ if (err) -+ goto exit; -+ } -+ - err = deny_write_access(file); - if (err) - goto exit; ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1508,6 +1508,8 @@ struct file_operations { - int (*flock) (struct file *, int, struct file_lock *); - ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); - ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); -+#define HAVE_FOP_OPEN_EXEC -+ int (*open_exec) (struct inode *); - int (*setlease)(struct file *, long, struct file_lock **); - }; - ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -187,6 +187,9 @@ struct vm_operations_struct { - void (*close)(struct vm_area_struct * area); - int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - -+#define HAVE_VMOP_MPROTECT -+ int (*mprotect)(struct vm_area_struct * area, unsigned int newflags); -+ - /* notification that a previously read-only page is about to become - * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); ---- a/mm/mprotect.c -+++ b/mm/mprotect.c -@@ -294,6 +294,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, - if (error) - goto out; - -+ if (vma->vm_ops && vma->vm_ops->mprotect) { -+ error = vma->vm_ops->mprotect(vma, newflags); -+ if (error < 0) -+ goto out; -+ } - tmp = vma->vm_end; - if (tmp > end) - tmp = end; diff --git a/patches.suse/xfs-dmapi-fix-incompatible-pointer-type-warning b/patches.suse/xfs-dmapi-fix-incompatible-pointer-type-warning deleted file mode 100644 index 7435671..0000000 --- a/patches.suse/xfs-dmapi-fix-incompatible-pointer-type-warning +++ /dev/null @@ -1,30 +0,0 @@ -From: Jeff Mahoney -Subject: xfs/dmapi: fix incompatible pointer type warning -Patch-mainline: Whenever dmapi gets upstream - - This fixes an incompatible pointer initialization warning. - -Signed-off-by: Jeff Mahoney ---- - fs/xfs/dmapi/xfs_dm.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/fs/xfs/dmapi/xfs_dm.c -+++ b/fs/xfs/dmapi/xfs_dm.c -@@ -3152,12 +3152,12 @@ STATIC int - xfs_dm_send_namesp_event( - dm_eventtype_t event, - struct xfs_mount *mp, -- xfs_inode_t *ip1, -+ struct xfs_inode *ip1, - dm_right_t vp1_right, -- xfs_inode_t *ip2, -+ struct xfs_inode *ip2, - dm_right_t vp2_right, -- const char *name1, -- const char *name2, -+ const unsigned char *name1, -+ const unsigned char *name2, - mode_t mode, - int retcode, - int flags) diff --git a/patches.suse/xfs-dmapi-re-add-flags-for-xfs_free_eofblocks b/patches.suse/xfs-dmapi-re-add-flags-for-xfs_free_eofblocks deleted file mode 100644 index 8938711..0000000 --- a/patches.suse/xfs-dmapi-re-add-flags-for-xfs_free_eofblocks +++ /dev/null @@ -1,74 +0,0 @@ -From: Jeff Mahoney -Subject: xfs/dmapi: Re-add flags for xfs_free_eofblocks -Patch-mainline: Depends on dmapi being upstream - - 2.6.33 removed the NOLOCK flag from xfs_free_eofblocks. xfs_dm_punch_hole - needs it because it already holds the iolock for the vnode it's operating - on. This patch restores the flag to avoid a pretty obvious deadlock in - dmapi. - -Signed-off-by: Jeff Mahoney ---- - fs/xfs/dmapi/xfs_dm.c | 2 +- - fs/xfs/xfs_rw.h | 6 ++++++ - fs/xfs/xfs_vnodeops.c | 10 +++------- - 3 files changed, 10 insertions(+), 8 deletions(-) - ---- a/fs/xfs/dmapi/xfs_dm.c -+++ b/fs/xfs/dmapi/xfs_dm.c -@@ -2481,7 +2481,7 @@ xfs_dm_punch_hole( - * leaving them around if we are migrating the file.... - */ - if (!error && (len == 0)) { -- error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_NOLOCK); -+ error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_HASLOCK); - } - - /* ---- a/fs/xfs/xfs_rw.h -+++ b/fs/xfs/xfs_rw.h -@@ -48,6 +48,12 @@ extern xfs_extlen_t xfs_get_extsz_hint(s - /* - * Prototypes for functions in xfs_vnodeops.c. - */ -+ -+/* -+ * Flags for xfs_free_eofblocks -+ */ -+#define XFS_FREE_EOF_TRYLOCK (1<<0) -+#define XFS_FREE_EOF_HASLOCK (1<<1) - extern int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip, - int flags); - ---- a/fs/xfs/xfs_vnodeops.c -+++ b/fs/xfs/xfs_vnodeops.c -@@ -584,11 +584,6 @@ xfs_readlink( - } - - /* -- * Flags for xfs_free_eofblocks -- */ --#define XFS_FREE_EOF_TRYLOCK (1<<0) -- --/* - * This is called by xfs_inactive to free any blocks beyond eof - * when the link count isn't zero and by xfs_dm_punch_hole() when - * punching a hole to EOF. -@@ -652,14 +647,15 @@ xfs_free_eofblocks( - xfs_trans_cancel(tp, 0); - return 0; - } -- } else { -+ } else if (!(flags & XFS_FREE_EOF_HASLOCK)){ - xfs_ilock(ip, XFS_IOLOCK_EXCL); - } - error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, - ip->i_size); - if (error) { - xfs_trans_cancel(tp, 0); -- xfs_iunlock(ip, XFS_IOLOCK_EXCL); -+ if (!(flags & XFS_FREE_EOF_HASLOCK)) -+ xfs_iunlock(ip, XFS_IOLOCK_EXCL); - return error; - } - diff --git a/patches.suse/xfs-dmapi-src b/patches.suse/xfs-dmapi-src deleted file mode 100644 index 75fb4a9..0000000 --- a/patches.suse/xfs-dmapi-src +++ /dev/null @@ -1,10791 +0,0 @@ -Date: Thu, 09 Oct 2008 17:11:31 +1100 -From: Donald Douwsma -Subject: DMAPI Source -Patch-mainline: ? -References: bnc#450658 - -Acked-by: Jan Kara - ---- - fs/dmapi/Makefile | 53 + - fs/dmapi/Status | 128 +++ - fs/dmapi/dmapi.h | 1086 ++++++++++++++++++++++++++ - fs/dmapi/dmapi_attr.c | 93 ++ - fs/dmapi/dmapi_bulkattr.c | 170 ++++ - fs/dmapi/dmapi_config.c | 117 ++ - fs/dmapi/dmapi_dmattr.c | 228 +++++ - fs/dmapi/dmapi_event.c | 860 +++++++++++++++++++++ - fs/dmapi/dmapi_handle.c | 119 ++ - fs/dmapi/dmapi_hole.c | 119 ++ - fs/dmapi/dmapi_io.c | 142 +++ - fs/dmapi/dmapi_kern.h | 598 ++++++++++++++ - fs/dmapi/dmapi_mountinfo.c | 527 +++++++++++++ - fs/dmapi/dmapi_port.h | 138 +++ - fs/dmapi/dmapi_private.h | 619 +++++++++++++++ - fs/dmapi/dmapi_region.c | 91 ++ - fs/dmapi/dmapi_register.c | 1638 ++++++++++++++++++++++++++++++++++++++++ - fs/dmapi/dmapi_right.c | 1256 ++++++++++++++++++++++++++++++ - fs/dmapi/dmapi_session.c | 1824 +++++++++++++++++++++++++++++++++++++++++++++ - fs/dmapi/dmapi_sysent.c | 801 +++++++++++++++++++ - fs/dmapi/sv.h | 89 ++ - 21 files changed, 10696 insertions(+) - ---- /dev/null -+++ b/fs/dmapi/Makefile -@@ -0,0 +1,53 @@ -+# -+# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. -+# -+# This program is free software; you can redistribute it and/or modify it -+# under the terms of version 2 of the GNU General Public License as -+# published by the Free Software Foundation. -+# -+# This program is distributed in the hope that it would be useful, but -+# WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+# -+# Further, this software is distributed without any warranty that it is -+# free of the rightful claim of any third person regarding infringement -+# or the like. Any license provided herein, whether implied or -+# otherwise, applies only to this software file. Patent licenses, if -+# any, provided herein do not apply to combinations of this program with -+# other software, or any other product whatsoever. -+# -+# You should have received a copy of the GNU General Public License along -+# with this program; if not, write the Free Software Foundation, Inc., 59 -+# Temple Place - Suite 330, Boston MA 02111-1307, USA. -+# -+# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+# Mountain View, CA 94043, or: -+# -+# http://www.sgi.com -+# -+# For further information regarding this notice, see: -+# -+# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+# -+ -+ifeq ($(CONFIG_DMAPI_DEBUG),y) -+ EXTRA_CFLAGS += -DDEBUG -+ EXTRA_CFLAGS += -g -+endif -+ -+obj-$(CONFIG_DMAPI) += dmapi.o -+ -+dmapi-y += dmapi_sysent.o \ -+ dmapi_attr.o \ -+ dmapi_config.o \ -+ dmapi_bulkattr.o \ -+ dmapi_dmattr.o \ -+ dmapi_event.o \ -+ dmapi_handle.o \ -+ dmapi_hole.o \ -+ dmapi_io.o \ -+ dmapi_mountinfo.o \ -+ dmapi_region.o \ -+ dmapi_register.o \ -+ dmapi_right.o \ -+ dmapi_session.o ---- /dev/null -+++ b/fs/dmapi/Status -@@ -0,0 +1,128 @@ -+Jan21,04 - dm_get_bulkall is now implemented. roehrich -+ -+for linux: -+ -+ -+68 external interfaces in libdm -+ -+ 56 of those interfaces go through to dmi(), the kernel side of DMAPI -+ -+ -+ -+Functions known to work -+---------------------------------------------- -+ -+dm_create_session -+dm_create_userevent -+dm_destroy_session -+dm_getall_sessions -+dm_getall_tokens -+dm_get_allocinfo -+dm_get_bulkall -+dm_get_bulkattr -+dm_get_config_events -+dm_get_dmattr -+dm_get_eventlist -+dm_get_events -+dm_get_fileattr -+dm_get_region -+dm_handle_free -+dm_init_attrloc -+dm_init_service -+dm_obj_ref_hold -+dm_obj_ref_query -+dm_obj_ref_rele -+dm_path_to_fshandle -+dm_path_to_handle -+dm_punch_hole -+dm_query_session -+dm_read_invis -+dm_remove_dmattr -+dm_respond_event -+dm_send_msg -+dm_set_disp -+dm_set_dmattr -+dm_set_eventlist -+dm_set_fileattr -+dm_set_region -+dm_sync_by_handle -+dm_write_invis -+35 -+ -+Functions that seem to work (would like more rigorous test case) -+------------------------------------------ -+ -+dm_pending -+dm_probe_hole - one test case of test_hole.c fails -+dm_request_right -+3 -+ -+Functions untested but probably work -+---------------------------------------------- -+ -+dm_find_eventmsg -+dm_handle_cmp -+dm_handle_to_fshandle -+dm_handle_to_ino -+dm_release_right -+5 -+ -+Functions that do not work -+----------------------------------------- -+ -+dm_get_dioinfo - directio not implemented -+1 -+ -+Functions not supported in SGI DMAPI -+------------------------------------------------------------- -+ -+dm_clear_inherit -+dm_create_by_handle -+dm_getall_inherit -+dm_mkdir_by_handle -+dm_set_inherit -+dm_symlink_by_handle -+ -+ -+ -+ -+Functions that seem to work (would like more rigorous test case) -+---------------------------------------------------------------- -+ -+dm_get_config -+dm_downgrade_right -+dm_get_mountinfo -+dm_set_return_on_destory -+dm_upgrade_right -+ -+ -+ -+Functions that do not work -+----------------------------------------------------------------- -+ -+dm_fd_to_handle - Irix getf not implemented on linux -+dm_get_dirattrs - null pointer reference -+dm_handle_to_path -+dm_getall_dmattr - needs a copy_from_user in place of useracc -+ -+ -+Functions that are untested, but probably work -+----------------------------------------------------------------- -+ -+dm_getall_disp -+dm_handle_hash -+dm_handle_is_valid -+dm_handle_to_fsid -+dm_handle_to_igen -+dm_make_fshandle -+dm_make_handle -+dm_move_event -+dm_query_right -+ -+ -+ -+Other things not working -+---------------------------------- -+ -+- read/write events for memory-mapped I/O? -+ ---- /dev/null -+++ b/fs/dmapi/dmapi.h -@@ -0,0 +1,1086 @@ -+/* -+ * Copyright (c) 1995-2003 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2.1 of the GNU Lesser General Public License -+ * as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with this program; if not, write the Free Software -+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, -+ * USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+#ifndef __DMAPI_H__ -+#define __DMAPI_H__ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#ifndef __KERNEL__ -+#include -+#endif -+#include -+ -+#ifndef __user -+#define __user -+#endif -+ -+/************************************************************************** -+ * * -+ * The SGI implementation of DMAPI is based upon the X/Open document * -+ * Systems Management: Data Storage Managment (XDSM) API * -+ * dated February 1997. Not all DMAPI functions and structure fields * -+ * have been implemented. Most importantly, the DMAPI functions * -+ * dm_request_right, dm_release_right, dm_query_right, dm_upgrade_right * -+ * and dm_downgrade_right do not work as described in the specification. * -+ * * -+ * The XFS filesystem currently does not allow its locking mechanisms to * -+ * be externally accessed from user space. While the above-mentioned * -+ * dm_xxx_right functions exist and can be called by applications, they * -+ * always return successfully without actually obtaining any locks * -+ * within the filesystem. * -+ * * -+ * Applications which do not need full rights support and which only * -+ * make dm_xxx_right calls in order to satisfy the input requirements of * -+ * other DMAPI calls should be able to use these routines to avoid * -+ * having to implement special-case code for SGI platforms. Applications * -+ * which truely need the capabilities of a full implementation of rights * -+ * will unfortunately have to come up with alternate software solutions * -+ * until such time as rights can be completely implemented. * -+ * * -+ * Functions and structure fields defined within this file which are not * -+ * supported in the SGI implementation of DMAPI are indicated by comments * -+ * following their definitions such as "not supported", or "not * -+ * completely supported". Any function or field not so marked may be * -+ * assumed to work exactly according to the spec. * -+ * * -+ **************************************************************************/ -+ -+ -+ -+/* The first portion of this file contains defines and typedefs that are -+ DMAPI implementation-dependent, and could be different on other platforms. -+*/ -+ -+typedef __s64 dm_attrloc_t; -+typedef unsigned int dm_boolean_t; -+typedef __u64 dm_eventset_t; -+typedef __u64 dm_fsid_t; -+typedef __u64 dm_ino_t; -+typedef __u32 dm_igen_t; -+typedef __s64 dm_off_t; -+typedef unsigned int dm_sequence_t; -+typedef int dm_sessid_t; -+typedef __u64 dm_size_t; -+typedef __s64 dm_ssize_t; -+typedef int dm_token_t; -+ -+/* XXX dev_t, mode_t, and nlink_t are not the same size in kernel space -+ and user space. This affects the field offsets for dm_stat_t. -+ The following solution is temporary. -+ -+ user space sizes: dev_t=8 mode_t=4 nlink_t=4 -+ kernel space : dev_t=2 mode_t=2 nlink_t=2 -+ -+*/ -+typedef __s64 dm_dev_t; -+typedef int dm_mode_t; -+typedef int dm_nlink_t; -+ -+ -+#define DM_REGION_NOEVENT 0x0 -+#define DM_REGION_READ 0x1 -+#define DM_REGION_WRITE 0x2 -+#define DM_REGION_TRUNCATE 0x4 -+ -+/* Values for the mask argument used with dm_get_fileattr, dm_get_bulkattr, -+ dm_get_dirattrs, and dm_set_fileattr. -+*/ -+ -+#define DM_AT_MODE 0x0001 -+#define DM_AT_UID 0x0002 -+#define DM_AT_GID 0x0004 -+#define DM_AT_ATIME 0x0008 -+#define DM_AT_MTIME 0x0010 -+#define DM_AT_CTIME 0x0020 -+#define DM_AT_SIZE 0x0040 -+#define DM_AT_DTIME 0x0080 -+#define DM_AT_HANDLE 0x0100 -+#define DM_AT_EMASK 0x0200 -+#define DM_AT_PMANR 0x0400 -+#define DM_AT_PATTR 0x0800 -+#define DM_AT_STAT 0x1000 -+#define DM_AT_CFLAG 0x2000 -+ -+#define DM_EV_WAIT 0x1 /* used in dm_get_events() */ -+ -+#define DM_MOUNT_RDONLY 0x1 /* me_mode field in dm_mount_event_t */ -+ -+#define DM_RR_WAIT 0x1 -+ -+#define DM_UNMOUNT_FORCE 0x1 /* ne_mode field in dm_namesp_event_t */ -+ -+#define DM_WRITE_SYNC 0x1 /* used in dm_write_invis() */ -+ -+#define DM_SESSION_INFO_LEN 256 -+#define DM_NO_SESSION 0 -+#define DM_TRUE 1 -+#define DM_FALSE 0 -+#define DM_INVALID_TOKEN 0 -+#define DM_NO_TOKEN (-1) -+#define DM_INVALID_HANP NULL -+#define DM_INVALID_HLEN 0 -+#define DM_GLOBAL_HANP ((void *)(1LL)) -+#define DM_GLOBAL_HLEN ((size_t)(1)) -+#define DM_VER_STR_CONTENTS "SGI DMAPI (XDSM) API, Release 1.1." -+ -+ -+#define DMEV_SET(event_type, event_list) \ -+ ((event_list) |= (1 << (event_type))) -+#define DMEV_CLR(event_type, event_list) \ -+ ((event_list) &= ~(1 << (event_type))) -+#define DMEV_ISSET(event_type, event_list) \ -+ (int)(((event_list) & (1 << (event_type))) != 0) -+#define DMEV_ZERO(event_list) \ -+ (event_list) = 0 -+ -+ -+typedef struct { -+ int vd_offset; /* offset from start of containing struct */ -+ unsigned int vd_length; /* length of data starting at vd_offset */ -+} dm_vardata_t; -+ -+#define DM_GET_VALUE(p, field, type) \ -+ ((type) ((char *)(p) + (p)->field.vd_offset)) -+ -+#define DM_GET_LEN(p, field) \ -+ ((p)->field.vd_length) -+ -+#define DM_STEP_TO_NEXT(p, type) \ -+ ((type) ((p)->_link ? (char *)(p) + (p)->_link : NULL)) -+ -+ -+ -+ -+/* The remainder of this include file contains defines, typedefs, and -+ structures which are strictly defined by the DMAPI 2.3 specification. -+ -+ (The _link field which appears in several structures is an -+ implementation-specific way to implement DM_STEP_TO_NEXT, and -+ should not be referenced directly by application code.) -+*/ -+ -+ -+#define DM_ATTR_NAME_SIZE 8 -+ -+ -+struct dm_attrname { -+ unsigned char an_chars[DM_ATTR_NAME_SIZE]; -+}; -+typedef struct dm_attrname dm_attrname_t; -+ -+ -+struct dm_attrlist { -+ int _link; -+ dm_attrname_t al_name; -+ dm_vardata_t al_data; -+}; -+typedef struct dm_attrlist dm_attrlist_t; -+ -+ -+typedef enum { -+ DM_CONFIG_INVALID, -+ DM_CONFIG_BULKALL, -+ DM_CONFIG_CREATE_BY_HANDLE, -+ DM_CONFIG_DTIME_OVERLOAD, -+ DM_CONFIG_LEGACY, -+ DM_CONFIG_LOCK_UPGRADE, -+ DM_CONFIG_MAX_ATTR_ON_DESTROY, -+ DM_CONFIG_MAX_ATTRIBUTE_SIZE, -+ DM_CONFIG_MAX_HANDLE_SIZE, -+ DM_CONFIG_MAX_MANAGED_REGIONS, -+ DM_CONFIG_MAX_MESSAGE_DATA, -+ DM_CONFIG_OBJ_REF, -+ DM_CONFIG_PENDING, -+ DM_CONFIG_PERS_ATTRIBUTES, -+ DM_CONFIG_PERS_EVENTS, -+ DM_CONFIG_PERS_INHERIT_ATTRIBS, -+ DM_CONFIG_PERS_MANAGED_REGIONS, -+ DM_CONFIG_PUNCH_HOLE, -+ DM_CONFIG_TOTAL_ATTRIBUTE_SPACE, -+ DM_CONFIG_WILL_RETRY -+} dm_config_t; -+ -+ -+struct dm_dioinfo { /* non-standard SGI addition */ -+ unsigned int d_mem; -+ unsigned int d_miniosz; -+ unsigned int d_maxiosz; -+ dm_boolean_t d_dio_only; -+}; -+typedef struct dm_dioinfo dm_dioinfo_t; -+ -+ -+struct dm_dispinfo { -+ int _link; -+ unsigned int di_pad1; /* reserved; do not reference */ -+ dm_vardata_t di_fshandle; -+ dm_eventset_t di_eventset; -+}; -+typedef struct dm_dispinfo dm_dispinfo_t; -+ -+ -+#ifndef HAVE_DM_EVENTTYPE_T -+#define HAVE_DM_EVENTTYPE_T -+typedef enum { -+ DM_EVENT_INVALID = -1, -+ DM_EVENT_CANCEL = 0, /* not supported */ -+ DM_EVENT_MOUNT = 1, -+ DM_EVENT_PREUNMOUNT = 2, -+ DM_EVENT_UNMOUNT = 3, -+ DM_EVENT_DEBUT = 4, /* not supported */ -+ DM_EVENT_CREATE = 5, -+ DM_EVENT_CLOSE = 6, /* not supported */ -+ DM_EVENT_POSTCREATE = 7, -+ DM_EVENT_REMOVE = 8, -+ DM_EVENT_POSTREMOVE = 9, -+ DM_EVENT_RENAME = 10, -+ DM_EVENT_POSTRENAME = 11, -+ DM_EVENT_LINK = 12, -+ DM_EVENT_POSTLINK = 13, -+ DM_EVENT_SYMLINK = 14, -+ DM_EVENT_POSTSYMLINK = 15, -+ DM_EVENT_READ = 16, -+ DM_EVENT_WRITE = 17, -+ DM_EVENT_TRUNCATE = 18, -+ DM_EVENT_ATTRIBUTE = 19, -+ DM_EVENT_DESTROY = 20, -+ DM_EVENT_NOSPACE = 21, -+ DM_EVENT_USER = 22, -+ DM_EVENT_MAX = 23 -+} dm_eventtype_t; -+#endif -+ -+ -+struct dm_eventmsg { -+ int _link; -+ dm_eventtype_t ev_type; -+ dm_token_t ev_token; -+ dm_sequence_t ev_sequence; -+ dm_vardata_t ev_data; -+}; -+typedef struct dm_eventmsg dm_eventmsg_t; -+ -+ -+struct dm_cancel_event { /* not supported */ -+ dm_sequence_t ce_sequence; -+ dm_token_t ce_token; -+}; -+typedef struct dm_cancel_event dm_cancel_event_t; -+ -+ -+struct dm_data_event { -+ dm_vardata_t de_handle; -+ dm_off_t de_offset; -+ dm_size_t de_length; -+}; -+typedef struct dm_data_event dm_data_event_t; -+ -+struct dm_destroy_event { -+ dm_vardata_t ds_handle; -+ dm_attrname_t ds_attrname; -+ dm_vardata_t ds_attrcopy; -+}; -+typedef struct dm_destroy_event dm_destroy_event_t; -+ -+struct dm_mount_event { -+ dm_mode_t me_mode; -+ dm_vardata_t me_handle1; -+ dm_vardata_t me_handle2; -+ dm_vardata_t me_name1; -+ dm_vardata_t me_name2; -+ dm_vardata_t me_roothandle; -+}; -+typedef struct dm_mount_event dm_mount_event_t; -+ -+struct dm_namesp_event { -+ dm_mode_t ne_mode; -+ dm_vardata_t ne_handle1; -+ dm_vardata_t ne_handle2; -+ dm_vardata_t ne_name1; -+ dm_vardata_t ne_name2; -+ int ne_retcode; -+}; -+typedef struct dm_namesp_event dm_namesp_event_t; -+ -+ -+typedef enum { -+ DM_EXTENT_INVALID, -+ DM_EXTENT_RES, -+ DM_EXTENT_HOLE -+} dm_extenttype_t; -+ -+ -+struct dm_extent { -+ dm_extenttype_t ex_type; -+ unsigned int ex_pad1; /* reserved; do not reference */ -+ dm_off_t ex_offset; -+ dm_size_t ex_length; -+}; -+typedef struct dm_extent dm_extent_t; -+ -+struct dm_fileattr { -+ dm_mode_t fa_mode; -+ uid_t fa_uid; -+ gid_t fa_gid; -+ time_t fa_atime; -+ time_t fa_mtime; -+ time_t fa_ctime; -+ time_t fa_dtime; -+ unsigned int fa_pad1; /* reserved; do not reference */ -+ dm_off_t fa_size; -+}; -+typedef struct dm_fileattr dm_fileattr_t; -+ -+ -+struct dm_inherit { /* not supported */ -+ dm_attrname_t ih_name; -+ dm_mode_t ih_filetype; -+}; -+typedef struct dm_inherit dm_inherit_t; -+ -+ -+typedef enum { -+ DM_MSGTYPE_INVALID, -+ DM_MSGTYPE_SYNC, -+ DM_MSGTYPE_ASYNC -+} dm_msgtype_t; -+ -+ -+struct dm_region { -+ dm_off_t rg_offset; -+ dm_size_t rg_size; -+ unsigned int rg_flags; -+ unsigned int rg_pad1; /* reserved; do not reference */ -+}; -+typedef struct dm_region dm_region_t; -+ -+ -+typedef enum { -+ DM_RESP_INVALID, -+ DM_RESP_CONTINUE, -+ DM_RESP_ABORT, -+ DM_RESP_DONTCARE -+} dm_response_t; -+ -+ -+#ifndef HAVE_DM_RIGHT_T -+#define HAVE_DM_RIGHT_T -+typedef enum { -+ DM_RIGHT_NULL, -+ DM_RIGHT_SHARED, -+ DM_RIGHT_EXCL -+} dm_right_t; -+#endif -+ -+ -+struct dm_stat { -+ int _link; -+ dm_vardata_t dt_handle; -+ dm_vardata_t dt_compname; -+ int dt_nevents; -+ dm_eventset_t dt_emask; -+ int dt_pers; /* field not supported */ -+ int dt_pmanreg; -+ time_t dt_dtime; -+ unsigned int dt_change; /* field not supported */ -+ unsigned int dt_pad1; /* reserved; do not reference */ -+ dm_dev_t dt_dev; -+ dm_ino_t dt_ino; -+ dm_mode_t dt_mode; -+ dm_nlink_t dt_nlink; -+ uid_t dt_uid; -+ gid_t dt_gid; -+ dm_dev_t dt_rdev; -+ unsigned int dt_pad2; /* reserved; do not reference */ -+ dm_off_t dt_size; -+ time_t dt_atime; -+ time_t dt_mtime; -+ time_t dt_ctime; -+ unsigned int dt_blksize; -+ dm_size_t dt_blocks; -+ -+ /* Non-standard filesystem-specific fields. Currently XFS is the only -+ supported filesystem type. -+ */ -+ -+ __u64 dt_pad3; /* reserved; do not reference */ -+ int dt_fstype; /* filesystem index; see sysfs(2) */ -+ union { -+ struct { -+ dm_igen_t igen; -+ unsigned int xflags; -+ unsigned int extsize; -+ unsigned int extents; -+ unsigned short aextents; -+ unsigned short dmstate; -+ } sgi_xfs; -+ } fsys_dep; -+}; -+typedef struct dm_stat dm_stat_t; -+ -+#define dt_xfs_igen fsys_dep.sgi_xfs.igen -+#define dt_xfs_xflags fsys_dep.sgi_xfs.xflags -+#define dt_xfs_extsize fsys_dep.sgi_xfs.extsize -+#define dt_xfs_extents fsys_dep.sgi_xfs.extents -+#define dt_xfs_aextents fsys_dep.sgi_xfs.aextents -+#define dt_xfs_dmstate fsys_dep.sgi_xfs.dmstate -+ -+/* Flags for the non-standard dt_xfs_xflags field. */ -+ -+#define DM_XFLAG_REALTIME 0x00000001 -+#define DM_XFLAG_PREALLOC 0x00000002 -+#define DM_XFLAG_IMMUTABLE 0x00000008 -+#define DM_XFLAG_APPEND 0x00000010 -+#define DM_XFLAG_SYNC 0x00000020 -+#define DM_XFLAG_NOATIME 0x00000040 -+#define DM_XFLAG_NODUMP 0x00000080 -+#define DM_XFLAG_HASATTR 0x80000000 -+ -+ -+struct dm_timestruct { -+ time_t dm_tv_sec; -+ int dm_tv_nsec; -+}; -+typedef struct dm_timestruct dm_timestruct_t; -+ -+ -+struct dm_xstat { /* not supported */ -+ dm_stat_t dx_statinfo; -+ dm_vardata_t dx_attrdata; -+}; -+typedef struct dm_xstat dm_xstat_t; -+ -+ -+#define MAXDMFSFIDSZ 46 -+ -+struct dm_fid { -+ __u16 dm_fid_len; /* length of remainder */ -+ __u16 dm_fid_pad; -+ __u32 dm_fid_gen; /* generation number */ -+ __u64 dm_fid_ino; /* 64 bits inode number */ -+}; -+typedef struct dm_fid dm_fid_t; -+ -+ -+struct dm_handle { -+ union { -+ __s64 align; /* force alignment of ha_fid */ -+ dm_fsid_t _ha_fsid; /* unique file system identifier */ -+ } ha_u; -+ dm_fid_t ha_fid; /* file system specific file ID */ -+}; -+typedef struct dm_handle dm_handle_t; -+#define ha_fsid ha_u._ha_fsid -+ -+#define DM_HSIZE(handle) (((char *) &(handle).ha_fid.dm_fid_pad \ -+ - (char *) &(handle)) \ -+ + (handle).ha_fid.dm_fid_len) -+ -+#define DM_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(dm_handle_t)) -+ -+#define DM_FSHSIZE sizeof(dm_fsid_t) -+ -+ -+/* The following list provides the prototypes for all functions defined in -+ the DMAPI interface. -+*/ -+ -+extern int -+dm_clear_inherit( /* not supported */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep); -+ -+extern int -+dm_create_by_handle( /* not supported */ -+ dm_sessid_t sid, -+ void __user *dirhanp, -+ size_t dirhlen, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname); -+ -+extern int -+dm_create_session( -+ dm_sessid_t oldsid, -+ char __user *sessinfop, -+ dm_sessid_t __user *newsidp); -+ -+extern int -+dm_create_userevent( -+ dm_sessid_t sid, -+ size_t msglen, -+ void __user *msgdatap, -+ dm_token_t __user *tokenp); -+ -+extern int -+dm_destroy_session( -+ dm_sessid_t sid); -+ -+extern int -+dm_downgrade_right( /* not completely supported; see caveat above */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token); -+ -+extern int -+dm_fd_to_handle( -+ int fd, -+ void **hanpp, -+ size_t *hlenp); -+ -+extern int -+dm_find_eventmsg( -+ dm_sessid_t sid, -+ dm_token_t token, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+extern int -+dm_get_allocinfo( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t *offp, -+ unsigned int nelem, -+ dm_extent_t *extentp, -+ unsigned int *nelemp); -+ -+extern int -+dm_get_bulkall( /* not supported */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int mask, -+ dm_attrname_t *attrnamep, -+ dm_attrloc_t *locp, -+ size_t buflen, -+ void *bufp, -+ size_t *rlenp); -+ -+extern int -+dm_get_bulkattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int mask, -+ dm_attrloc_t *locp, -+ size_t buflen, -+ void *bufp, -+ size_t *rlenp); -+ -+extern int -+dm_get_config( -+ void __user *hanp, -+ size_t hlen, -+ dm_config_t flagname, -+ dm_size_t __user *retvalp); -+ -+extern int -+dm_get_config_events( -+ void __user *hanp, -+ size_t hlen, -+ unsigned int nelem, -+ dm_eventset_t __user *eventsetp, -+ unsigned int __user *nelemp); -+ -+extern int -+dm_get_dirattrs( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int mask, -+ dm_attrloc_t *locp, -+ size_t buflen, -+ void *bufp, -+ size_t *rlenp); -+ -+extern int -+dm_get_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+extern int -+dm_get_eventlist( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int nelem, -+ dm_eventset_t __user *eventsetp, -+ unsigned int __user *nelemp); -+ -+extern int -+dm_get_events( -+ dm_sessid_t sid, -+ unsigned int maxmsgs, -+ unsigned int flags, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+extern int -+dm_get_fileattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int mask, -+ dm_stat_t __user *statp); -+ -+extern int -+dm_get_mountinfo( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+extern int -+dm_get_region( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int nelem, -+ dm_region_t __user *regbufp, -+ unsigned int __user *nelemp); -+ -+extern int -+dm_getall_disp( -+ dm_sessid_t sid, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+extern int -+dm_getall_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+extern int -+dm_getall_inherit( /* not supported */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int nelem, -+ dm_inherit_t __user *inheritbufp, -+ unsigned int __user *nelemp); -+ -+extern int -+dm_getall_sessions( -+ unsigned int nelem, -+ dm_sessid_t __user *sidbufp, -+ unsigned int __user *nelemp); -+ -+extern int -+dm_getall_tokens( -+ dm_sessid_t sid, -+ unsigned int nelem, -+ dm_token_t __user *tokenbufp, -+ unsigned int __user *nelemp); -+ -+extern int -+dm_handle_cmp( -+ void *hanp1, -+ size_t hlen1, -+ void *hanp2, -+ size_t hlen2); -+ -+extern void -+dm_handle_free( -+ void *hanp, -+ size_t hlen); -+ -+extern u_int -+dm_handle_hash( -+ void *hanp, -+ size_t hlen); -+ -+extern dm_boolean_t -+dm_handle_is_valid( -+ void *hanp, -+ size_t hlen); -+ -+extern int -+dm_handle_to_fshandle( -+ void *hanp, -+ size_t hlen, -+ void **fshanpp, -+ size_t *fshlenp); -+ -+extern int -+dm_handle_to_fsid( -+ void *hanp, -+ size_t hlen, -+ dm_fsid_t *fsidp); -+ -+extern int -+dm_handle_to_igen( -+ void *hanp, -+ size_t hlen, -+ dm_igen_t *igenp); -+ -+extern int -+dm_handle_to_ino( -+ void *hanp, -+ size_t hlen, -+ dm_ino_t *inop); -+ -+extern int -+dm_handle_to_path( -+ void *dirhanp, -+ size_t dirhlen, -+ void *targhanp, -+ size_t targhlen, -+ size_t buflen, -+ char *pathbufp, -+ size_t *rlenp); -+ -+extern int -+dm_init_attrloc( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrloc_t __user *locp); -+ -+extern int -+dm_init_service( -+ char **versionstrpp); -+ -+extern int -+dm_make_handle( -+ dm_fsid_t *fsidp, -+ dm_ino_t *inop, -+ dm_igen_t *igenp, -+ void **hanpp, -+ size_t *hlenp); -+ -+extern int -+dm_make_fshandle( -+ dm_fsid_t *fsidp, -+ void **hanpp, -+ size_t *hlenp); -+ -+extern int -+dm_mkdir_by_handle( /* not supported */ -+ dm_sessid_t sid, -+ void __user *dirhanp, -+ size_t dirhlen, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname); -+ -+extern int -+dm_move_event( -+ dm_sessid_t srcsid, -+ dm_token_t token, -+ dm_sessid_t targetsid, -+ dm_token_t __user *rtokenp); -+ -+extern int -+dm_obj_ref_hold( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen); -+ -+extern int -+dm_obj_ref_query( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void *hanp, -+ size_t hlen); -+ -+extern int -+dm_obj_ref_rele( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen); -+ -+extern int -+dm_path_to_fshandle( -+ char *path, -+ void **hanpp, -+ size_t *hlenp); -+ -+extern int -+dm_path_to_handle( -+ char *path, -+ void **hanpp, -+ size_t *hlenp); -+ -+extern int -+dm_pending( -+ dm_sessid_t sid, -+ dm_token_t token, -+ dm_timestruct_t __user *delay); -+ -+extern int -+dm_probe_hole( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t off, -+ dm_size_t len, -+ dm_off_t __user *roffp, -+ dm_size_t __user *rlenp); -+ -+extern int -+dm_punch_hole( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t off, -+ dm_size_t len); -+ -+extern int -+dm_query_right( /* not completely supported; see caveat above */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_right_t __user *rightp); -+ -+extern int -+dm_query_session( -+ dm_sessid_t sid, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+extern dm_ssize_t -+dm_read_invis( -+ dm_sessid_t sid, -+ void *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t off, -+ dm_size_t len, -+ void *bufp); -+ -+extern int -+dm_release_right( /* not completely supported; see caveat above */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token); -+ -+extern int -+dm_remove_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ int setdtime, -+ dm_attrname_t __user *attrnamep); -+ -+extern int -+dm_request_right( /* not completely supported; see caveat above */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int flags, -+ dm_right_t right); -+ -+extern int -+dm_respond_event( -+ dm_sessid_t sid, -+ dm_token_t token, -+ dm_response_t response, -+ int reterror, -+ size_t buflen, -+ void __user *respbufp); -+ -+extern int -+dm_send_msg( -+ dm_sessid_t targetsid, -+ dm_msgtype_t msgtype, -+ size_t buflen, -+ void __user *bufp); -+ -+extern int -+dm_set_disp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_eventset_t __user *eventsetp, -+ unsigned int maxevent); -+ -+extern int -+dm_set_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ int setdtime, -+ size_t buflen, -+ void __user *bufp); -+ -+extern int -+dm_set_eventlist( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_eventset_t __user *eventsetp, -+ unsigned int maxevent); -+ -+extern int -+dm_set_fileattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int mask, -+ dm_fileattr_t __user *attrp); -+ -+extern int -+dm_set_inherit( /* not supported */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ mode_t mode); -+ -+extern int -+dm_set_region( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ unsigned int nelem, -+ dm_region_t __user *regbufp, -+ dm_boolean_t __user *exactflagp); -+ -+extern int -+dm_set_return_on_destroy( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ dm_boolean_t enable); -+ -+extern int -+dm_symlink_by_handle( /* not supported */ -+ dm_sessid_t sid, -+ void __user *dirhanp, -+ size_t dirhlen, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname, -+ char __user *path); -+ -+extern int -+dm_sync_by_handle( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token); -+ -+extern int -+dm_upgrade_right( /* not completely supported; see caveat above */ -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token); -+ -+extern dm_ssize_t -+dm_write_invis( -+ dm_sessid_t sid, -+ void *hanp, -+ size_t hlen, -+ dm_token_t token, -+ int flags, -+ dm_off_t off, -+ dm_size_t len, -+ void *bufp); -+ -+/* Non-standard SGI additions to the DMAPI interface. */ -+ -+int -+dm_open_by_handle( -+ void __user *hanp, -+ size_t hlen, -+ int mode); -+ -+extern int -+dm_get_dioinfo( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_dioinfo_t __user *diop); -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __DMAPI_H__ */ ---- /dev/null -+++ b/fs/dmapi/dmapi_attr.c -@@ -0,0 +1,93 @@ -+/* -+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+/* Retrieve attributes for a single file, directory or symlink. */ -+ -+int -+dm_get_fileattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_stat_t __user *statp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_fileattr(tdp->td_ip, tdp->td_right, -+ mask, statp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+/* Set one or more file attributes of a file, directory, or symlink. */ -+ -+int -+dm_set_fileattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_fileattr_t __user *attrp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->set_fileattr(tdp->td_ip, tdp->td_right, -+ mask, attrp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_bulkattr.c -@@ -0,0 +1,170 @@ -+/* -+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+int -+dm_init_attrloc( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrloc_t __user *locp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS|DM_TDT_DIR, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->init_attrloc(tdp->td_ip, tdp->td_right, locp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+/* -+ * Retrieves both standard and DM specific file attributes for the file -+ * system indicated by the handle. (The FS has to be mounted). -+ * Syscall returns 1 to indicate SUCCESS and more information is available. -+ * -1 is returned on error, and errno will be set appropriately. -+ * 0 is returned upon successful completion. -+ */ -+ -+int -+dm_get_bulkattr_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_bulkattr_rvp(tdp->td_ip, tdp->td_right, -+ mask, locp, buflen, bufp, rlenp, rvp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+/* -+ * Retrieves attributes of directory entries given a handle to that -+ * directory. Iterative. -+ * Syscall returns 1 to indicate SUCCESS and more information is available. -+ * -1 is returned on error, and errno will be set appropriately. -+ * 0 is returned upon successful completion. -+ */ -+ -+int -+dm_get_dirattrs_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_DIR, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_dirattrs_rvp(tdp->td_ip, tdp->td_right, -+ mask, locp, buflen, bufp, rlenp, rvp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_get_bulkall_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_attrname_t __user *attrnamep, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_bulkall_rvp(tdp->td_ip, tdp->td_right, -+ mask, attrnamep, locp, buflen, bufp, rlenp, rvp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_config.c -@@ -0,0 +1,117 @@ -+/* -+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+#include -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+int -+dm_get_config( -+ void __user *hanp, -+ size_t hlen, -+ dm_config_t flagname, -+ dm_size_t __user *retvalp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ dm_size_t retval; -+ int system = 1; -+ int error; -+ -+ /* Trap and process configuration parameters which are system-wide. */ -+ -+ switch (flagname) { -+ case DM_CONFIG_LEGACY: -+ case DM_CONFIG_PENDING: -+ case DM_CONFIG_OBJ_REF: -+ retval = DM_TRUE; -+ break; -+ case DM_CONFIG_MAX_MESSAGE_DATA: -+ retval = DM_MAX_MSG_DATA; -+ break; -+ default: -+ system = 0; -+ break; -+ } -+ if (system) { -+ if (copy_to_user(retvalp, &retval, sizeof(retval))) -+ return(-EFAULT); -+ return(0); -+ } -+ -+ /* Must be filesystem-specific. Convert the handle into an inode. */ -+ -+ if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0) -+ return(error); -+ -+ /* Now call the filesystem-specific routine to determine the -+ value of the configuration option for that filesystem. -+ */ -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_config(tdp->td_ip, tdp->td_right, -+ flagname, retvalp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_get_config_events( -+ void __user *hanp, -+ size_t hlen, -+ u_int nelem, -+ dm_eventset_t __user *eventsetp, -+ u_int __user *nelemp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ /* Convert the handle into an inode. */ -+ -+ if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0) -+ return(error); -+ -+ /* Now call the filesystem-specific routine to determine the -+ events supported by that filesystem. -+ */ -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_config_events(tdp->td_ip, tdp->td_right, -+ nelem, eventsetp, nelemp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_dmattr.c -@@ -0,0 +1,228 @@ -+/* -+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+int -+dm_clear_inherit( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->clear_inherit(tdp->td_ip, tdp->td_right, -+ attrnamep); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_get_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_dmattr(tdp->td_ip, tdp->td_right, -+ attrnamep, buflen, bufp, rlenp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_getall_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->getall_dmattr(tdp->td_ip, tdp->td_right, -+ buflen, bufp, rlenp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_getall_inherit( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int nelem, -+ dm_inherit_t __user *inheritbufp, -+ u_int __user *nelemp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->getall_inherit(tdp->td_ip, tdp->td_right, -+ nelem, inheritbufp, nelemp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_remove_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ int setdtime, -+ dm_attrname_t __user *attrnamep) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->remove_dmattr(tdp->td_ip, tdp->td_right, -+ setdtime, attrnamep); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_set_dmattr( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ int setdtime, -+ size_t buflen, -+ void __user *bufp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->set_dmattr(tdp->td_ip, tdp->td_right, -+ attrnamep, setdtime, buflen, bufp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_set_inherit( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ mode_t mode) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->set_inherit(tdp->td_ip, tdp->td_right, -+ attrnamep, mode); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_event.c -@@ -0,0 +1,860 @@ -+/* -+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+/* The "rights" portion of the DMAPI spec is not currently implemented. A -+ framework for rights is provided in the code, but turns out to be a noop -+ in practice. The following comments are a brain dump to serve as input to -+ the poor soul that eventually has to get DMAPI rights working in IRIX. -+ -+ A DMAPI right is similar but not identical to the mrlock_t mechanism -+ already used within the kernel. The similarities are that it is a -+ sleeping lock, and that a multiple-reader, single-writer protocol is used. -+ How locks are obtained and dropped are different however. With a mrlock_t, -+ a thread grabs the lock, does some stuff, then drops the lock, and all other -+ threads block in the meantime (assuming a write lock). There is a one-to- -+ one relationship between the lock and the thread which obtained the lock. -+ Not so with DMAPI right locks. A DMAPI lock is associated with a particular -+ session/token/hanp/hlen quad; since there is a dm_tokdata_t structure for -+ each such quad, you can think of it as a one-to-one relationship between the -+ lock and a dm_tokdata_t. Any application thread which presents the correct -+ quad is entitled to grab or release the lock, or to use the rights -+ associated with that lock. The thread that grabs the lock does not have to -+ be the one to use the lock, nor does it have to be the thread which drops -+ the lock. The lock can be held for very long periods of time, even across -+ multiple systems calls by multiple application threads. The idea is that a -+ coordinated group of DMAPI application threads can grab the lock, issue a -+ series of inode accesses and/or updates, then drop the lock, and be assured -+ that no other thread in the system could be modifying the inode at the same -+ time. The kernel is expected to blindly trust that the application will -+ not forget to unlock inodes it has locked, and will not deadlock itself -+ against the kernel. -+ -+ There are two types of DMAPI rights, file object (inode) and filesystem -+ object (superblock?). An inode right is the equivalent of the combination -+ of both the XFS ilock and iolock; if held exclusively, no data or metadata -+ within the file can be changed by non-lock-holding threads. The filesystem -+ object lock is a little fuzzier; I think that if it is held, things like -+ unmounts can be blocked, plus there is an event mask associated with the -+ filesystem which can't be updated without the lock. (By the way, that -+ event mask is supposed to be persistent in the superblock; add that to -+ your worklist :-) -+ -+ All events generated by XFS currently arrive with no rights, i.e. -+ DM_RIGHT_NULL, and return to the filesystem with no rights. It would be -+ smart to leave it this way if possible, because it otherwise becomes more -+ likely that an application thread will deadlock against the kernel if the -+ one responsible for calling dm_get_events() happens to touch a file which -+ was locked at the time the event was queued. Since the thread is blocked, -+ it can't read the event in order to find and drop the lock. Catch-22. If -+ you do have events that arrive with non-null rights, then dm_enqueue() needs -+ to have code added for synchronous events which atomically switches the -+ right from being a thread-based right to a dm_tokdata_t-based right without -+ allowing the lock to drop in between. You will probably have to add a new -+ dm_fsys_vector entry point to do this. The lock can't be lost during the -+ switch, or other threads might change the inode or superblock in between. -+ Likewise, if you need to return to the filesystem holding a right, then -+ you need a DMAPI-to-thread atomic switch to occur, most likely in -+ dm_change_right(). Again, the lock must not be lost during the switch; the -+ DMAPI spec spends a couple of pages stressing this. Another dm_fsys_vector -+ entry point is probably the answer. -+ -+ There are several assumptions implied in the current layout of the code. -+ First of all, if an event returns to the filesystem with a return value of -+ zero, then the filesystem can assume that any locks (rights) held at the -+ start of the event are still in effect at the end of the event. (Note that -+ the application could have temporarily dropped and reaquired the right -+ while the event was outstanding, however). If the event returns to the -+ filesystem with an errno, then the filesystem must assume that it has lost -+ any and all rights associated with any of the objects in the event. This -+ was done for a couple of reasons. First of all, since an errno is being -+ returned, most likely the filesystem is going to immediately drop all the -+ locks anyway. If the DMAPI code was required to unconditionally reobtain -+ all locks before returning to the filesystem, then dm_pending() wouldn't -+ work for NFS server threads because the process would block indefinitely -+ trying to get its thread-based rights back, because the DMAPI-rights -+ associated with the dm_tokdata_t in the outstanding event would prevent -+ the rights from being obtained. That would be a bad thing. We wouldn't -+ be able to let users Cntl-C out of read/write/truncate events either. -+ -+ If a case should ever surface where the thread has lost its rights even -+ though it has a zero return status, or where the thread has rights even -+ though it is returning with an errno, then this logic will have to be -+ reworked. This could be done by changing the 'right' parameters on all -+ the event calls to (dm_right_t *), so that they could serve both as IN -+ and OUT parameters. -+ -+ Some events such as DM_EVENT_DESTROY arrive without holding an inode -+ reference; if you don't have an inode reference, you can't have a right -+ on the file. -+ -+ One more quirk. The DM_EVENT_UNMOUNT event is defined to be synchronous -+ when it's behavior is asynchronous. If an unmount event arrives with -+ rights, the event should return with the same rights and should NOT leave -+ any rights in the dm_tokdata_t where the application could use them. -+*/ -+ -+ -+#define GETNEXTOFF(vdat) ((vdat).vd_offset + (vdat).vd_length) -+#define HANDLE_SIZE(tdp) \ -+ ((tdp)->td_type & DM_TDT_VFS ? DM_FSHSIZE : DM_HSIZE((tdp)->td_handle)) -+ -+ -+/* Given an inode pointer in a filesystem known to support DMAPI, -+ build a tdp structure for the corresponding inode. -+*/ -+ -+static dm_tokdata_t * -+dm_ip_data( -+ struct inode *ip, -+ dm_right_t right, -+ int referenced) /* != 0, caller holds inode reference */ -+{ -+ int error; -+ dm_tokdata_t *tdp; -+ int filetype; -+ -+ tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL); -+ if (tdp == NULL) { -+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return NULL; -+ } -+ -+ tdp->td_next = NULL; -+ tdp->td_tevp = NULL; -+ tdp->td_app_ref = 0; -+ tdp->td_orig_right = right; -+ tdp->td_right = right; -+ tdp->td_flags = DM_TDF_ORIG; -+ if (referenced) { -+ tdp->td_flags |= DM_TDF_EVTREF; -+ } -+ -+ filetype = ip->i_mode & S_IFMT; -+ if (filetype == S_IFREG) { -+ tdp->td_type = DM_TDT_REG; -+ } else if (filetype == S_IFDIR) { -+ tdp->td_type = DM_TDT_DIR; -+ } else if (filetype == S_IFLNK) { -+ tdp->td_type = DM_TDT_LNK; -+ } else { -+ tdp->td_type = DM_TDT_OTH; -+ } -+ -+ if (referenced) { -+ tdp->td_ip = ip; -+ } else { -+ tdp->td_ip = NULL; -+ } -+ tdp->td_vcount = 0; -+ -+ if ((error = dm_ip_to_handle(ip, &tdp->td_handle)) != 0) { -+ panic("dm_ip_data: dm_ip_to_handle failed for ip %p in " -+ "a DMAPI filesystem, errno %d\n", ip, error); -+ } -+ -+ return(tdp); -+} -+ -+ -+/* Given a sb pointer to a filesystem known to support DMAPI, build a tdp -+ structure for that sb. -+*/ -+static dm_tokdata_t * -+dm_sb_data( -+ struct super_block *sb, -+ struct inode *ip, /* will be NULL for DM_EVENT_UNMOUNT */ -+ dm_right_t right) -+{ -+ dm_tokdata_t *tdp; -+ struct filesystem_dmapi_operations *dops; -+ dm_fsid_t fsid; -+ -+ dops = dm_fsys_ops(sb); -+ ASSERT(dops); -+ dops->get_fsid(sb, &fsid); -+ -+ tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL); -+ if (tdp == NULL) { -+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return NULL; -+ } -+ -+ tdp->td_next = NULL; -+ tdp->td_tevp = NULL; -+ tdp->td_app_ref = 0; -+ tdp->td_orig_right = right; -+ tdp->td_right = right; -+ tdp->td_flags = DM_TDF_ORIG; -+ if (ip) { -+ tdp->td_flags |= DM_TDF_EVTREF; -+ } -+ tdp->td_type = DM_TDT_VFS; -+ tdp->td_ip = ip; -+ tdp->td_vcount = 0; -+ -+ memcpy(&tdp->td_handle.ha_fsid, &fsid, sizeof(fsid)); -+ memset((char *)&tdp->td_handle.ha_fsid + sizeof(fsid), 0, -+ sizeof(tdp->td_handle) - sizeof(fsid)); -+ -+ return(tdp); -+} -+ -+ -+/* Link a tdp structure into the tevp. */ -+ -+static void -+dm_add_handle_to_event( -+ dm_tokevent_t *tevp, -+ dm_tokdata_t *tdp) -+{ -+ tdp->td_next = tevp->te_tdp; -+ tevp->te_tdp = tdp; -+ tdp->td_tevp = tevp; -+} -+ -+ -+/* Generate the given data event for the inode, and wait for a reply. The -+ caller must guarantee that the inode's reference count is greater than zero -+ so that the filesystem can't disappear while the request is outstanding. -+*/ -+ -+int -+dm_send_data_event( -+ dm_eventtype_t event, -+ struct inode *ip, -+ dm_right_t vp_right, /* current right for ip */ -+ dm_off_t offset, -+ size_t length, -+ int flags) /* 0 or DM_FLAGS_NDELAY */ -+{ -+ dm_data_event_t *datap; -+ dm_tokevent_t *tevp; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ tdp = dm_ip_data(ip, vp_right, /* reference held */ 1); -+ if (tdp == NULL) -+ return -ENOMEM; -+ -+ /* Calculate the size of the event in bytes, create an event structure -+ for it, and insert the file's handle into the event. -+ */ -+ -+ tevp = dm_evt_create_tevp(event, HANDLE_SIZE(tdp), (void **)&datap); -+ if (tevp == NULL) { -+ kmem_cache_free(dm_tokdata_cachep, tdp); -+ return(-ENOMEM); -+ } -+ dm_add_handle_to_event(tevp, tdp); -+ -+ /* Now fill in all the dm_data_event_t fields. */ -+ -+ datap->de_handle.vd_offset = sizeof(*datap); -+ datap->de_handle.vd_length = HANDLE_SIZE(tdp); -+ memcpy((char *)datap + datap->de_handle.vd_offset, &tdp->td_handle, -+ datap->de_handle.vd_length); -+ datap->de_offset = offset; -+ datap->de_length = length; -+ -+ /* Queue the message and wait for the reply. */ -+ -+ error = dm_enqueue_normal_event(ip->i_sb, &tevp, flags); -+ -+ /* If no errors occurred, we must leave with the same rights we had -+ upon entry. If errors occurred, we must leave with no rights. -+ */ -+ -+ dm_evt_rele_tevp(tevp, error); -+ -+ return(error); -+} -+ -+ -+/* Generate the destroy event for the inode and wait until the request has been -+ queued. The caller does not hold an inode reference or a right on the inode, -+ but it must otherwise lock down the inode such that the filesystem can't -+ disappear while the request is waiting to be queued. While waiting to be -+ queued, the inode must not be referenceable either by path or by a call -+ to dm_handle_to_ip(). -+*/ -+ -+int -+dm_send_destroy_event( -+ struct inode *ip, -+ dm_right_t vp_right) /* always DM_RIGHT_NULL */ -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokevent_t *tevp; -+ dm_tokdata_t *tdp; -+ dm_destroy_event_t *destp; -+ dm_attrname_t attrname; -+ char *value; -+ int value_len; -+ int error; -+ -+ tdp = dm_ip_data(ip, vp_right, /* no reference held */ 0); -+ if (tdp == NULL) -+ return -ENOMEM; -+ -+ if ((error = dm_waitfor_destroy_attrname(ip->i_sb, &attrname)) != 0) -+ return(error); -+ -+ /* If a return-on-destroy attribute name exists for this filesystem, -+ see if the object being deleted has this attribute. If the object -+ doesn't have the attribute or if we encounter an error, then send -+ the event without the attribute. -+ */ -+ -+ value_len = -1; /* because zero is a valid attribute length */ -+ if (attrname.an_chars[0] != '\0') { -+ fsys_vector = dm_fsys_vector(ip); -+ error = fsys_vector->get_destroy_dmattr(ip, vp_right, &attrname, -+ &value, &value_len); -+ if (error && error != -ENODATA) -+ return error; -+ } -+ -+ /* Now that we know the size of the attribute value, if any, calculate -+ the size of the event in bytes, create an event structure for it, -+ and insert the handle into the event. -+ */ -+ -+ tevp = dm_evt_create_tevp(DM_EVENT_DESTROY, -+ HANDLE_SIZE(tdp) + (value_len >= 0 ? value_len : 0), -+ (void **)&destp); -+ if (tevp == NULL) { -+ kmem_cache_free(dm_tokdata_cachep, tdp); -+ if (value_len > 0) -+ kfree(value); -+ return(-ENOMEM); -+ } -+ dm_add_handle_to_event(tevp, tdp); -+ -+ /* Now fill in all the dm_destroy_event_t fields. */ -+ -+ destp->ds_handle.vd_offset = sizeof(*destp); -+ destp->ds_handle.vd_length = HANDLE_SIZE(tdp); -+ memcpy((char *)destp + destp->ds_handle.vd_offset, &tdp->td_handle, -+ destp->ds_handle.vd_length); -+ if (value_len >= 0) { -+ destp->ds_attrname = attrname; -+ destp->ds_attrcopy.vd_length = value_len; -+ if (value_len == 0) { -+ destp->ds_attrcopy.vd_offset = 0; -+ } else { -+ destp->ds_attrcopy.vd_offset = GETNEXTOFF(destp->ds_handle); -+ memcpy((char *)destp + destp->ds_attrcopy.vd_offset, value, -+ value_len); -+ kfree(value); -+ } -+ } -+ -+ /* Queue the message asynchronously. */ -+ -+ error = dm_enqueue_normal_event(ip->i_sb, &tevp, 0); -+ -+ /* Since we had no rights upon entry, we have none to reobtain before -+ leaving. -+ */ -+ -+ dm_evt_rele_tevp(tevp, 1); -+ -+ return(error); -+} -+ -+ -+/* The dm_mount_event_t event is sent in turn to all sessions that have asked -+ for it until one either rejects it or accepts it. The filesystem is not -+ going anywhere because the mount is blocked until the event is answered. -+*/ -+ -+int -+dm_send_mount_event( -+ struct super_block *sb, /* filesystem being mounted */ -+ dm_right_t vfsp_right, -+ struct inode *ip, /* mounted on directory */ -+ dm_right_t vp_right, -+ struct inode *rootip, -+ dm_right_t rootvp_right, -+ char *name1, /* mount path */ -+ char *name2) /* filesystem device name */ -+{ -+ int error; -+ dm_tokevent_t *tevp = NULL; -+ dm_tokdata_t *tdp1 = NULL; /* filesystem handle for event */ -+ dm_tokdata_t *tdp2 = NULL; /* file handle for mounted-on dir. */ -+ dm_tokdata_t *tdp3 = NULL; /* file handle for root inode */ -+ dm_mount_event_t *mp; -+ size_t nextoff; -+ -+ /* Convert the sb to a filesystem handle, and ip and rootip into -+ file handles. ip (the mounted-on directory) may not have a handle -+ if it is a different filesystem type which does not support DMAPI. -+ */ -+ -+ tdp1 = dm_sb_data(sb, rootip, vfsp_right); -+ if (tdp1 == NULL) -+ goto out_nomem; -+ -+ if ((ip == NULL) || dm_check_dmapi_ip(ip)) { -+ ip = NULL; /* we are mounting on non-DMAPI FS */ -+ } else { -+ tdp2 = dm_ip_data(ip, vp_right, /* reference held */ 1); -+ if (tdp2 == NULL) -+ goto out_nomem; -+ } -+ -+ tdp3 = dm_ip_data(rootip, rootvp_right, /* reference held */ 1); -+ if (tdp3 == NULL) -+ goto out_nomem; -+ -+ /* Calculate the size of the event in bytes, create an event structure -+ for it, and insert the handles into the event. -+ */ -+ -+ tevp = dm_evt_create_tevp(DM_EVENT_MOUNT, -+ HANDLE_SIZE(tdp1) + (ip ? HANDLE_SIZE(tdp2) : 0) + -+ HANDLE_SIZE(tdp3) + strlen(name1) + 1 + -+ strlen(name2) + 1, (void **)&mp); -+ if (tevp == NULL) -+ goto out_nomem; -+ -+ dm_add_handle_to_event(tevp, tdp1); -+ if (ip) -+ dm_add_handle_to_event(tevp, tdp2); -+ dm_add_handle_to_event(tevp, tdp3); -+ -+ /* Now fill in all the dm_mount_event_t fields. */ -+ -+ mp->me_handle1.vd_offset = sizeof(*mp); -+ mp->me_handle1.vd_length = HANDLE_SIZE(tdp1); -+ memcpy((char *) mp + mp->me_handle1.vd_offset, &tdp1->td_handle, -+ mp->me_handle1.vd_length); -+ nextoff = GETNEXTOFF(mp->me_handle1); -+ -+ if (ip) { -+ mp->me_handle2.vd_offset = nextoff; -+ mp->me_handle2.vd_length = HANDLE_SIZE(tdp2); -+ memcpy((char *)mp + mp->me_handle2.vd_offset, &tdp2->td_handle, -+ mp->me_handle2.vd_length); -+ nextoff = GETNEXTOFF(mp->me_handle2); -+ } -+ -+ mp->me_name1.vd_offset = nextoff; -+ mp->me_name1.vd_length = strlen(name1) + 1; -+ memcpy((char *)mp + mp->me_name1.vd_offset, name1, mp->me_name1.vd_length); -+ nextoff = GETNEXTOFF(mp->me_name1); -+ -+ mp->me_name2.vd_offset = nextoff; -+ mp->me_name2.vd_length = strlen(name2) + 1; -+ memcpy((char *)mp + mp->me_name2.vd_offset, name2, mp->me_name2.vd_length); -+ nextoff = GETNEXTOFF(mp->me_name2); -+ -+ mp->me_roothandle.vd_offset = nextoff; -+ mp->me_roothandle.vd_length = HANDLE_SIZE(tdp3); -+ memcpy((char *)mp + mp->me_roothandle.vd_offset, &tdp3->td_handle, -+ mp->me_roothandle.vd_length); -+ -+ mp->me_mode = (sb->s_flags & MS_RDONLY ? DM_MOUNT_RDONLY : 0); -+ -+ /* Queue the message and wait for the reply. */ -+ -+ error = dm_enqueue_mount_event(sb, tevp); -+ -+ /* If no errors occurred, we must leave with the same rights we had -+ upon entry. If errors occurred, we must leave with no rights. -+ */ -+ -+ dm_evt_rele_tevp(tevp, error); -+ -+ return(error); -+ -+out_nomem: -+ if (tevp) -+ kfree(tevp); -+ if (tdp1) -+ kmem_cache_free(dm_tokdata_cachep, tdp1); -+ if (tdp2) -+ kmem_cache_free(dm_tokdata_cachep, tdp2); -+ if (tdp3) -+ kmem_cache_free(dm_tokdata_cachep, tdp3); -+ return -ENOMEM; -+} -+ -+ -+/* Generate an DM_EVENT_UNMOUNT event and wait for a reply. The 'retcode' -+ field indicates whether this is a successful or unsuccessful unmount. -+ If successful, the filesystem is already unmounted, and any pending handle -+ reference to the filesystem will be failed. If the unmount was -+ unsuccessful, then the filesystem will be placed back into full service. -+ -+ The DM_EVENT_UNMOUNT event should really be asynchronous, because the -+ application has no control over whether or not the unmount succeeds. (The -+ DMAPI spec defined it that way because asynchronous events aren't always -+ guaranteed to be delivered.) -+ -+ Since the filesystem is already unmounted in the successful case, the -+ DM_EVENT_UNMOUNT event can't make available any inode to be used in -+ subsequent sid/hanp/hlen/token calls by the application. The event will -+ hang around until the application does a DM_RESP_CONTINUE, but the handle -+ within the event is unusable by the application. -+*/ -+ -+void -+dm_send_unmount_event( -+ struct super_block *sb, -+ struct inode *ip, /* NULL if unmount successful */ -+ dm_right_t vfsp_right, -+ mode_t mode, -+ int retcode, /* errno, if unmount failed */ -+ int flags) -+{ -+ dm_namesp_event_t *np; -+ dm_tokevent_t *tevp; -+ dm_tokdata_t *tdp1; -+ -+ /* If the unmount failed, put the filesystem back into full service, -+ allowing blocked handle references to finish. If it succeeded, put -+ the filesystem into the DM_STATE_UNMOUNTED state and fail all -+ blocked DM_NO_TOKEN handle accesses. -+ */ -+ -+ if (retcode != 0) { /* unmount was unsuccessful */ -+ dm_change_fsys_entry(sb, DM_STATE_MOUNTED); -+ } else { -+ dm_change_fsys_entry(sb, DM_STATE_UNMOUNTED); -+ } -+ -+ /* If the event wasn't in the filesystem dm_eventset_t, just remove -+ the filesystem from the list of DMAPI filesystems and return. -+ */ -+ -+ if (flags & DM_FLAGS_UNWANTED) { -+ if (retcode == 0) -+ dm_remove_fsys_entry(sb); -+ return; -+ } -+ -+ /* Calculate the size of the event in bytes and allocate zeroed memory -+ for it. -+ */ -+ -+ tdp1 = dm_sb_data(sb, ip, vfsp_right); -+ if (tdp1 == NULL) -+ return; -+ -+ tevp = dm_evt_create_tevp(DM_EVENT_UNMOUNT, HANDLE_SIZE(tdp1), -+ (void **)&np); -+ if (tevp == NULL) { -+ kmem_cache_free(dm_tokdata_cachep, tdp1); -+ return; -+ } -+ -+ dm_add_handle_to_event(tevp, tdp1); -+ -+ /* Now copy in all the dm_namesp_event_t specific fields. */ -+ -+ np->ne_handle1.vd_offset = sizeof(*np); -+ np->ne_handle1.vd_length = HANDLE_SIZE(tdp1); -+ memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle, -+ np->ne_handle1.vd_length); -+ np->ne_mode = mode; -+ np->ne_retcode = retcode; -+ -+ /* Since DM_EVENT_UNMOUNT is effectively asynchronous, queue the -+ message and ignore any error return for DM_EVENT_UNMOUNT. -+ */ -+ -+ (void)dm_enqueue_normal_event(sb, &tevp, flags); -+ -+ if (retcode == 0) -+ dm_remove_fsys_entry(sb); -+ -+ dm_evt_rele_tevp(tevp, 0); -+} -+ -+ -+/* Generate the given namespace event and wait for a reply (if synchronous) or -+ until the event has been queued (asynchronous). The caller must guarantee -+ that at least one inode within the filesystem has had its reference count -+ bumped so that the filesystem can't disappear while the event is -+ outstanding. -+*/ -+ -+int -+dm_send_namesp_event( -+ dm_eventtype_t event, -+ struct super_block *sb, /* used by PREUNMOUNT */ -+ struct inode *ip1, -+ dm_right_t vp1_right, -+ struct inode *ip2, -+ dm_right_t vp2_right, -+ const char *name1, -+ const char *name2, -+ mode_t mode, -+ int retcode, -+ int flags) -+{ -+ dm_namesp_event_t *np; -+ dm_tokevent_t *tevp; -+ dm_tokdata_t *tdp1 = NULL; /* primary handle for event */ -+ dm_tokdata_t *tdp2 = NULL; /* additional handle for event */ -+ size_t nextoff; -+ int error; -+ -+ if (sb == NULL) -+ sb = ip1->i_sb; -+ -+ switch (event) { -+ case DM_EVENT_PREUNMOUNT: -+ /* -+ * PREUNMOUNT - Send the file system handle in handle1, -+ * and the handle for the root dir in the second. Otherwise -+ * it's a normal sync message; i.e. succeeds or fails -+ * depending on the app's return code. -+ * ip1 and ip2 are both the root dir of mounted FS -+ * vp1_right is the filesystem right. -+ * vp2_right is the root inode right. -+ */ -+ -+ if (flags & DM_FLAGS_UNWANTED) { -+ dm_change_fsys_entry(sb, DM_STATE_UNMOUNTING); -+ return(0); -+ } -+ if (ip1 == NULL) { -+ /* If preunmount happens after kill_super then -+ * it's too late; there's nothing left with which -+ * to construct an event. -+ */ -+ return(0); -+ } -+ tdp1 = dm_sb_data(sb, ip1, vp1_right); -+ if (tdp1 == NULL) -+ return -ENOMEM; -+ tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1); -+ if (tdp2 == NULL) { -+ kmem_cache_free(dm_tokdata_cachep, tdp1); -+ return -ENOMEM; -+ } -+ break; -+ -+ case DM_EVENT_NOSPACE: -+ /* vp1_right is the filesystem right. */ -+ -+ tdp1 = dm_sb_data(sb, ip1, vp1_right); -+ if (tdp1 == NULL) -+ return -ENOMEM; -+ tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1); /* additional info - not in the spec */ -+ if (tdp2 == NULL) { -+ kmem_cache_free(dm_tokdata_cachep, tdp1); -+ return -ENOMEM; -+ } -+ break; -+ -+ default: -+ /* All other events only pass in inodes and don't require any -+ special cases. -+ */ -+ -+ tdp1 = dm_ip_data(ip1, vp1_right, /* reference held */ 1); -+ if (tdp1 == NULL) -+ return -ENOMEM; -+ if (ip2) { -+ tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1); -+ if (tdp2 == NULL) { -+ kmem_cache_free(dm_tokdata_cachep, tdp1); -+ return -ENOMEM; -+ } -+ } -+ } -+ -+ /* Calculate the size of the event in bytes and allocate zeroed memory -+ for it. -+ */ -+ -+ tevp = dm_evt_create_tevp(event, -+ HANDLE_SIZE(tdp1) + (ip2 ? HANDLE_SIZE(tdp2) : 0) + -+ (name1 ? strlen(name1) + 1 : 0) + -+ (name2 ? strlen(name2) + 1 : 0), (void **)&np); -+ if (tevp == NULL) { -+ if (tdp1) -+ kmem_cache_free(dm_tokdata_cachep, tdp1); -+ if (tdp2) -+ kmem_cache_free(dm_tokdata_cachep, tdp2); -+ return(-ENOMEM); -+ } -+ -+ dm_add_handle_to_event(tevp, tdp1); -+ if (ip2) -+ dm_add_handle_to_event(tevp, tdp2); -+ -+ /* Now copy in all the dm_namesp_event_t specific fields. */ -+ -+ np->ne_handle1.vd_offset = sizeof(*np); -+ np->ne_handle1.vd_length = HANDLE_SIZE(tdp1); -+ memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle, -+ np->ne_handle1.vd_length); -+ nextoff = GETNEXTOFF(np->ne_handle1); -+ if (ip2) { -+ np->ne_handle2.vd_offset = nextoff; -+ np->ne_handle2.vd_length = HANDLE_SIZE(tdp2); -+ memcpy((char *)np + np->ne_handle2.vd_offset, &tdp2->td_handle, -+ np->ne_handle2.vd_length); -+ nextoff = GETNEXTOFF(np->ne_handle2); -+ } -+ if (name1) { -+ np->ne_name1.vd_offset = nextoff; -+ np->ne_name1.vd_length = strlen(name1) + 1; -+ memcpy((char *)np + np->ne_name1.vd_offset, name1, -+ np->ne_name1.vd_length); -+ nextoff = GETNEXTOFF(np->ne_name1); -+ } -+ if (name2) { -+ np->ne_name2.vd_offset = nextoff; -+ np->ne_name2.vd_length = strlen(name2) + 1; -+ memcpy((char *)np + np->ne_name2.vd_offset, name2, -+ np->ne_name2.vd_length); -+ } -+ np->ne_mode = mode; -+ np->ne_retcode = retcode; -+ -+ /* Queue the message and wait for the reply. */ -+ -+ error = dm_enqueue_normal_event(sb, &tevp, flags); -+ -+ /* If no errors occurred, we must leave with the same rights we had -+ upon entry. If errors occurred, we must leave with no rights. -+ */ -+ -+ dm_evt_rele_tevp(tevp, error); -+ -+ if (!error && event == DM_EVENT_PREUNMOUNT) { -+ dm_change_fsys_entry(sb, DM_STATE_UNMOUNTING); -+ } -+ -+ return(error); -+} -+ -+ -+/* -+ * Send a message of type "DM_EVENT_USER". Since no inode is involved, we -+ * don't have to worry about rights here. -+ */ -+ -+int -+dm_send_msg( -+ dm_sessid_t targetsid, -+ dm_msgtype_t msgtype, /* SYNC or ASYNC */ -+ size_t buflen, -+ void __user *bufp) -+{ -+ dm_tokevent_t *tevp; -+ int sync; -+ void *msgp; -+ int error; -+ -+ if (buflen > DM_MAX_MSG_DATA) -+ return(-E2BIG); -+ if (msgtype == DM_MSGTYPE_ASYNC) { -+ sync = 0; -+ } else if (msgtype == DM_MSGTYPE_SYNC) { -+ sync = 1; -+ } else { -+ return(-EINVAL); -+ } -+ -+ tevp = dm_evt_create_tevp(DM_EVENT_USER, buflen, (void **)&msgp); -+ if (tevp == NULL) -+ return -ENOMEM; -+ -+ if (buflen && copy_from_user(msgp, bufp, buflen)) { -+ dm_evt_rele_tevp(tevp, 0); -+ return(-EFAULT); -+ } -+ -+ /* Enqueue the request and wait for the reply. */ -+ -+ error = dm_enqueue_sendmsg_event(targetsid, tevp, sync); -+ -+ /* Destroy the tevp and return the reply. (dm_pending is not -+ supported here.) -+ */ -+ -+ dm_evt_rele_tevp(tevp, error); -+ -+ return(error); -+} -+ -+ -+/* -+ * Send a message of type "DM_EVENT_USER". Since no inode is involved, we -+ * don't have to worry about rights here. -+ */ -+ -+int -+dm_create_userevent( -+ dm_sessid_t sid, -+ size_t msglen, -+ void __user *msgdatap, -+ dm_token_t __user *tokenp) /* return token created */ -+{ -+ dm_tokevent_t *tevp; -+ dm_token_t token; -+ int error; -+ void *msgp; -+ -+ if (msglen > DM_MAX_MSG_DATA) -+ return(-E2BIG); -+ -+ tevp = dm_evt_create_tevp(DM_EVENT_USER, msglen, (void **)&msgp); -+ if (tevp == NULL) -+ return(-ENOMEM); -+ -+ if (msglen && copy_from_user(msgp, msgdatap, msglen)) { -+ dm_evt_rele_tevp(tevp, 0); -+ return(-EFAULT); -+ } -+ -+ /* Queue the message. If that didn't work, free the tevp structure. */ -+ -+ if ((error = dm_enqueue_user_event(sid, tevp, &token)) != 0) -+ dm_evt_rele_tevp(tevp, 0); -+ -+ if (!error && copy_to_user(tokenp, &token, sizeof(token))) -+ error = -EFAULT; -+ -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_handle.c -@@ -0,0 +1,119 @@ -+/* -+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+int -+dm_create_by_handle( -+ dm_sessid_t sid, -+ void __user *dirhanp, -+ size_t dirhlen, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->create_by_handle(tdp->td_ip, tdp->td_right, -+ hanp, hlen, cname); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_mkdir_by_handle( -+ dm_sessid_t sid, -+ void __user *dirhanp, -+ size_t dirhlen, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->mkdir_by_handle(tdp->td_ip, tdp->td_right, -+ hanp, hlen, cname); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_symlink_by_handle( -+ dm_sessid_t sid, -+ void __user *dirhanp, -+ size_t dirhlen, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname, -+ char __user *path) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->symlink_by_handle(tdp->td_ip, tdp->td_right, -+ hanp, hlen, cname, path); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_hole.c -@@ -0,0 +1,119 @@ -+/* -+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+int -+dm_get_allocinfo_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t __user *offp, -+ u_int nelem, -+ dm_extent_t __user *extentp, -+ u_int __user *nelemp, -+ int *rvp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_allocinfo_rvp(tdp->td_ip, tdp->td_right, -+ offp, nelem, extentp, nelemp, rvp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_probe_hole( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t off, -+ dm_size_t len, -+ dm_off_t __user *roffp, -+ dm_size_t __user *rlenp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->probe_hole(tdp->td_ip, tdp->td_right, -+ off, len, roffp, rlenp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_punch_hole( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t off, -+ dm_size_t len) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->punch_hole(tdp->td_ip, tdp->td_right, off, len); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_io.c -@@ -0,0 +1,142 @@ -+/* -+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+int -+dm_read_invis_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->read_invis_rvp(tdp->td_ip, tdp->td_right, -+ off, len, bufp, rvp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_write_invis_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ int flags, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->write_invis_rvp(tdp->td_ip, tdp->td_right, -+ flags, off, len, bufp, rvp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_sync_by_handle ( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->sync_by_handle(tdp->td_ip, tdp->td_right); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_get_dioinfo ( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_dioinfo_t __user *diop) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_dioinfo(tdp->td_ip, tdp->td_right, diop); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_kern.h -@@ -0,0 +1,598 @@ -+/* -+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+#ifndef __DMAPI_KERN_H__ -+#define __DMAPI_KERN_H__ -+ -+#include -+ -+union sys_dmapi_uarg { -+ void *p; -+ __u64 u; -+}; -+typedef union sys_dmapi_uarg sys_dmapi_u; -+ -+struct sys_dmapi_args { -+ sys_dmapi_u uarg1, uarg2, uarg3, uarg4, uarg5, uarg6, uarg7, uarg8, -+ uarg9, uarg10, uarg11; -+}; -+typedef struct sys_dmapi_args sys_dmapi_args_t; -+ -+#define DM_Uarg(uap,i) uap->uarg##i.u -+#define DM_Parg(uap,i) uap->uarg##i.p -+ -+#ifdef __KERNEL__ -+ -+struct dm_handle_t; -+ -+/* The first group of definitions and prototypes define the filesystem's -+ interface into the DMAPI code. -+*/ -+ -+ -+/* Definitions used for the flags field on dm_send_data_event(), -+ dm_send_unmount_event(), and dm_send_namesp_event() calls. -+*/ -+ -+#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */ -+#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */ -+ -+/* Possible code levels reported by dm_code_level(). */ -+ -+#define DM_CLVL_INIT 0 /* DMAPI prior to X/Open compliance */ -+#define DM_CLVL_XOPEN 1 /* X/Open compliant DMAPI */ -+ -+ -+/* -+ * Filesystem operations accessed by the DMAPI core. -+ */ -+struct filesystem_dmapi_operations { -+ int (*get_fsys_vector)(struct super_block *sb, void *addr); -+ int (*fh_to_inode)(struct super_block *sb, struct inode **ip, -+ dm_fid_t *fid); -+ int (*inode_to_fh)(struct inode *ip, dm_fid_t *fid, -+ dm_fsid_t *fsid ); -+ void (*get_fsid)(struct super_block *sb, dm_fsid_t *fsid); -+#define HAVE_DM_QUEUE_FLUSH -+ int (*flushing)(struct inode *ip); -+}; -+ -+ -+/* Prototypes used outside of the DMI module/directory. */ -+ -+int dm_send_data_event( -+ dm_eventtype_t event, -+ struct inode *ip, -+ dm_right_t vp_right, -+ dm_off_t off, -+ size_t len, -+ int flags); -+ -+int dm_send_destroy_event( -+ struct inode *ip, -+ dm_right_t vp_right); -+ -+int dm_send_mount_event( -+ struct super_block *sb, -+ dm_right_t vfsp_right, -+ struct inode *ip, -+ dm_right_t vp_right, -+ struct inode *rootip, -+ dm_right_t rootvp_right, -+ char *name1, -+ char *name2); -+ -+int dm_send_namesp_event( -+ dm_eventtype_t event, -+ struct super_block *sb, -+ struct inode *ip1, -+ dm_right_t vp1_right, -+ struct inode *ip2, -+ dm_right_t vp2_right, -+ const char *name1, -+ const char *name2, -+ mode_t mode, -+ int retcode, -+ int flags); -+ -+void dm_send_unmount_event( -+ struct super_block *sbp, -+ struct inode *ip, -+ dm_right_t sbp_right, -+ mode_t mode, -+ int retcode, -+ int flags); -+ -+int dm_code_level(void); -+ -+int dm_ip_to_handle ( -+ struct inode *ip, -+ dm_handle_t *handlep); -+ -+#define HAVE_DM_RELEASE_THREADS_ERRNO -+int dm_release_threads( -+ struct super_block *sb, -+ struct inode *inode, -+ int errno); -+ -+void dmapi_register( -+ struct file_system_type *fstype, -+ struct filesystem_dmapi_operations *dmapiops); -+ -+void dmapi_unregister( -+ struct file_system_type *fstype); -+ -+int dmapi_registered( -+ struct file_system_type *fstype, -+ struct filesystem_dmapi_operations **dmapiops); -+ -+ -+/* The following prototypes and definitions are used by DMAPI as its -+ interface into the filesystem code. Communication between DMAPI and the -+ filesystem are established as follows: -+ 1. DMAPI uses the VFS_DMAPI_FSYS_VECTOR to ask for the addresses -+ of all the functions within the filesystem that it may need to call. -+ 2. The filesystem returns an array of function name/address pairs which -+ DMAPI builds into a function vector. -+ The VFS_DMAPI_FSYS_VECTOR call is only made one time for a particular -+ filesystem type. From then on, DMAPI uses its function vector to call the -+ filesystem functions directly. Functions in the array which DMAPI doesn't -+ recognize are ignored. A dummy function which returns ENOSYS is used for -+ any function that DMAPI needs but which was not provided by the filesystem. -+ If XFS doesn't recognize the VFS_DMAPI_FSYS_VECTOR, DMAPI assumes that it -+ doesn't have the X/Open support code; in this case DMAPI uses the XFS-code -+ originally bundled within DMAPI. -+ -+ The goal of this interface is allow incremental changes to be made to -+ both the filesystem and to DMAPI while minimizing inter-patch dependencies, -+ and to eventually allow DMAPI to support multiple filesystem types at the -+ same time should that become necessary. -+*/ -+ -+typedef enum { -+ DM_FSYS_CLEAR_INHERIT = 0, -+ DM_FSYS_CREATE_BY_HANDLE = 1, -+ DM_FSYS_DOWNGRADE_RIGHT = 2, -+ DM_FSYS_GET_ALLOCINFO_RVP = 3, -+ DM_FSYS_GET_BULKALL_RVP = 4, -+ DM_FSYS_GET_BULKATTR_RVP = 5, -+ DM_FSYS_GET_CONFIG = 6, -+ DM_FSYS_GET_CONFIG_EVENTS = 7, -+ DM_FSYS_GET_DESTROY_DMATTR = 8, -+ DM_FSYS_GET_DIOINFO = 9, -+ DM_FSYS_GET_DIRATTRS_RVP = 10, -+ DM_FSYS_GET_DMATTR = 11, -+ DM_FSYS_GET_EVENTLIST = 12, -+ DM_FSYS_GET_FILEATTR = 13, -+ DM_FSYS_GET_REGION = 14, -+ DM_FSYS_GETALL_DMATTR = 15, -+ DM_FSYS_GETALL_INHERIT = 16, -+ DM_FSYS_INIT_ATTRLOC = 17, -+ DM_FSYS_MKDIR_BY_HANDLE = 18, -+ DM_FSYS_PROBE_HOLE = 19, -+ DM_FSYS_PUNCH_HOLE = 20, -+ DM_FSYS_READ_INVIS_RVP = 21, -+ DM_FSYS_RELEASE_RIGHT = 22, -+ DM_FSYS_REMOVE_DMATTR = 23, -+ DM_FSYS_REQUEST_RIGHT = 24, -+ DM_FSYS_SET_DMATTR = 25, -+ DM_FSYS_SET_EVENTLIST = 26, -+ DM_FSYS_SET_FILEATTR = 27, -+ DM_FSYS_SET_INHERIT = 28, -+ DM_FSYS_SET_REGION = 29, -+ DM_FSYS_SYMLINK_BY_HANDLE = 30, -+ DM_FSYS_SYNC_BY_HANDLE = 31, -+ DM_FSYS_UPGRADE_RIGHT = 32, -+ DM_FSYS_WRITE_INVIS_RVP = 33, -+ DM_FSYS_OBJ_REF_HOLD = 34, -+ DM_FSYS_MAX = 35 -+} dm_fsys_switch_t; -+ -+ -+#define DM_FSYS_OBJ 0x1 /* object refers to a fsys handle */ -+ -+ -+/* -+ * Prototypes for filesystem-specific functions. -+ */ -+ -+typedef int (*dm_fsys_clear_inherit_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep); -+ -+typedef int (*dm_fsys_create_by_handle_t)( -+ struct inode *ip, -+ dm_right_t right, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname); -+ -+typedef int (*dm_fsys_downgrade_right_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int type); /* DM_FSYS_OBJ or zero */ -+ -+typedef int (*dm_fsys_get_allocinfo_rvp_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_off_t __user *offp, -+ u_int nelem, -+ dm_extent_t __user *extentp, -+ u_int __user *nelemp, -+ int *rvalp); -+ -+typedef int (*dm_fsys_get_bulkall_rvp_t)( -+ struct inode *ip, /* root inode */ -+ dm_right_t right, -+ u_int mask, -+ dm_attrname_t __user *attrnamep, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvalp); -+ -+typedef int (*dm_fsys_get_bulkattr_rvp_t)( -+ struct inode *ip, /* root inode */ -+ dm_right_t right, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvalp); -+ -+typedef int (*dm_fsys_get_config_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_config_t flagname, -+ dm_size_t __user *retvalp); -+ -+typedef int (*dm_fsys_get_config_events_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int nelem, -+ dm_eventset_t __user *eventsetp, -+ u_int __user *nelemp); -+ -+typedef int (*dm_fsys_get_destroy_dmattr_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_attrname_t *attrnamep, -+ char **valuepp, -+ int *vlenp); -+ -+typedef int (*dm_fsys_get_dioinfo_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_dioinfo_t __user *diop); -+ -+typedef int (*dm_fsys_get_dirattrs_rvp_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvalp); -+ -+typedef int (*dm_fsys_get_dmattr_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+typedef int (*dm_fsys_get_eventlist_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int type, -+ u_int nelem, -+ dm_eventset_t *eventsetp, /* in kernel space! */ -+ u_int *nelemp); /* in kernel space! */ -+ -+typedef int (*dm_fsys_get_fileattr_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int mask, -+ dm_stat_t __user *statp); -+ -+typedef int (*dm_fsys_get_region_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int nelem, -+ dm_region_t __user *regbufp, -+ u_int __user *nelemp); -+ -+typedef int (*dm_fsys_getall_dmattr_t)( -+ struct inode *ip, -+ dm_right_t right, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+typedef int (*dm_fsys_getall_inherit_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int nelem, -+ dm_inherit_t __user *inheritbufp, -+ u_int __user *nelemp); -+ -+typedef int (*dm_fsys_init_attrloc_t)( -+ struct inode *ip, /* sometimes root inode */ -+ dm_right_t right, -+ dm_attrloc_t __user *locp); -+ -+typedef int (*dm_fsys_mkdir_by_handle_t)( -+ struct inode *ip, -+ dm_right_t right, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname); -+ -+typedef int (*dm_fsys_probe_hole_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_off_t off, -+ dm_size_t len, -+ dm_off_t __user *roffp, -+ dm_size_t __user *rlenp); -+ -+typedef int (*dm_fsys_punch_hole_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_off_t off, -+ dm_size_t len); -+ -+typedef int (*dm_fsys_read_invis_rvp_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp); -+ -+typedef int (*dm_fsys_release_right_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int type); -+ -+typedef int (*dm_fsys_remove_dmattr_t)( -+ struct inode *ip, -+ dm_right_t right, -+ int setdtime, -+ dm_attrname_t __user *attrnamep); -+ -+typedef int (*dm_fsys_request_right_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int type, /* DM_FSYS_OBJ or zero */ -+ u_int flags, -+ dm_right_t newright); -+ -+typedef int (*dm_fsys_set_dmattr_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep, -+ int setdtime, -+ size_t buflen, -+ void __user *bufp); -+ -+typedef int (*dm_fsys_set_eventlist_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int type, -+ dm_eventset_t *eventsetp, /* in kernel space! */ -+ u_int maxevent); -+ -+typedef int (*dm_fsys_set_fileattr_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int mask, -+ dm_fileattr_t __user *attrp); -+ -+typedef int (*dm_fsys_set_inherit_t)( -+ struct inode *ip, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep, -+ mode_t mode); -+ -+typedef int (*dm_fsys_set_region_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int nelem, -+ dm_region_t __user *regbufp, -+ dm_boolean_t __user *exactflagp); -+ -+typedef int (*dm_fsys_symlink_by_handle_t)( -+ struct inode *ip, -+ dm_right_t right, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname, -+ char __user *path); -+ -+typedef int (*dm_fsys_sync_by_handle_t)( -+ struct inode *ip, -+ dm_right_t right); -+ -+typedef int (*dm_fsys_upgrade_right_t)( -+ struct inode *ip, -+ dm_right_t right, -+ u_int type); /* DM_FSYS_OBJ or zero */ -+ -+typedef int (*dm_fsys_write_invis_rvp_t)( -+ struct inode *ip, -+ dm_right_t right, -+ int flags, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp); -+ -+typedef void (*dm_fsys_obj_ref_hold_t)( -+ struct inode *ip); -+ -+ -+/* Structure definitions used by the VFS_DMAPI_FSYS_VECTOR call. */ -+ -+typedef struct { -+ dm_fsys_switch_t func_no; /* function number */ -+ union { -+ dm_fsys_clear_inherit_t clear_inherit; -+ dm_fsys_create_by_handle_t create_by_handle; -+ dm_fsys_downgrade_right_t downgrade_right; -+ dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp; -+ dm_fsys_get_bulkall_rvp_t get_bulkall_rvp; -+ dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp; -+ dm_fsys_get_config_t get_config; -+ dm_fsys_get_config_events_t get_config_events; -+ dm_fsys_get_destroy_dmattr_t get_destroy_dmattr; -+ dm_fsys_get_dioinfo_t get_dioinfo; -+ dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp; -+ dm_fsys_get_dmattr_t get_dmattr; -+ dm_fsys_get_eventlist_t get_eventlist; -+ dm_fsys_get_fileattr_t get_fileattr; -+ dm_fsys_get_region_t get_region; -+ dm_fsys_getall_dmattr_t getall_dmattr; -+ dm_fsys_getall_inherit_t getall_inherit; -+ dm_fsys_init_attrloc_t init_attrloc; -+ dm_fsys_mkdir_by_handle_t mkdir_by_handle; -+ dm_fsys_probe_hole_t probe_hole; -+ dm_fsys_punch_hole_t punch_hole; -+ dm_fsys_read_invis_rvp_t read_invis_rvp; -+ dm_fsys_release_right_t release_right; -+ dm_fsys_remove_dmattr_t remove_dmattr; -+ dm_fsys_request_right_t request_right; -+ dm_fsys_set_dmattr_t set_dmattr; -+ dm_fsys_set_eventlist_t set_eventlist; -+ dm_fsys_set_fileattr_t set_fileattr; -+ dm_fsys_set_inherit_t set_inherit; -+ dm_fsys_set_region_t set_region; -+ dm_fsys_symlink_by_handle_t symlink_by_handle; -+ dm_fsys_sync_by_handle_t sync_by_handle; -+ dm_fsys_upgrade_right_t upgrade_right; -+ dm_fsys_write_invis_rvp_t write_invis_rvp; -+ dm_fsys_obj_ref_hold_t obj_ref_hold; -+ } u_fc; -+} fsys_function_vector_t; -+ -+struct dm_fcntl_vector { -+ int code_level; -+ int count; /* Number of functions in the vector */ -+ fsys_function_vector_t *vecp; -+}; -+typedef struct dm_fcntl_vector dm_fcntl_vector_t; -+ -+struct dm_fcntl_mapevent { -+ size_t length; /* length of transfer */ -+ dm_eventtype_t max_event; /* Maximum (WRITE or READ) event */ -+ int error; /* returned error code */ -+}; -+typedef struct dm_fcntl_mapevent dm_fcntl_mapevent_t; -+ -+#endif /* __KERNEL__ */ -+ -+ -+/* The following definitions are needed both by the kernel and by the -+ library routines. -+*/ -+ -+#define DM_MAX_HANDLE_SIZE 56 /* maximum size for a file handle */ -+ -+ -+/* -+ * Opcodes for dmapi ioctl. -+ */ -+ -+#define DM_CLEAR_INHERIT 1 -+#define DM_CREATE_BY_HANDLE 2 -+#define DM_CREATE_SESSION 3 -+#define DM_CREATE_USEREVENT 4 -+#define DM_DESTROY_SESSION 5 -+#define DM_DOWNGRADE_RIGHT 6 -+#define DM_FD_TO_HANDLE 7 -+#define DM_FIND_EVENTMSG 8 -+#define DM_GET_ALLOCINFO 9 -+#define DM_GET_BULKALL 10 -+#define DM_GET_BULKATTR 11 -+#define DM_GET_CONFIG 12 -+#define DM_GET_CONFIG_EVENTS 13 -+#define DM_GET_DIOINFO 14 -+#define DM_GET_DIRATTRS 15 -+#define DM_GET_DMATTR 16 -+#define DM_GET_EVENTLIST 17 -+#define DM_GET_EVENTS 18 -+#define DM_GET_FILEATTR 19 -+#define DM_GET_MOUNTINFO 20 -+#define DM_GET_REGION 21 -+#define DM_GETALL_DISP 22 -+#define DM_GETALL_DMATTR 23 -+#define DM_GETALL_INHERIT 24 -+#define DM_GETALL_SESSIONS 25 -+#define DM_GETALL_TOKENS 26 -+#define DM_INIT_ATTRLOC 27 -+#define DM_MKDIR_BY_HANDLE 28 -+#define DM_MOVE_EVENT 29 -+#define DM_OBJ_REF_HOLD 30 -+#define DM_OBJ_REF_QUERY 31 -+#define DM_OBJ_REF_RELE 32 -+#define DM_PATH_TO_FSHANDLE 33 -+#define DM_PATH_TO_HANDLE 34 -+#define DM_PENDING 35 -+#define DM_PROBE_HOLE 36 -+#define DM_PUNCH_HOLE 37 -+#define DM_QUERY_RIGHT 38 -+#define DM_QUERY_SESSION 39 -+#define DM_READ_INVIS 40 -+#define DM_RELEASE_RIGHT 41 -+#define DM_REMOVE_DMATTR 42 -+#define DM_REQUEST_RIGHT 43 -+#define DM_RESPOND_EVENT 44 -+#define DM_SEND_MSG 45 -+#define DM_SET_DISP 46 -+#define DM_SET_DMATTR 47 -+#define DM_SET_EVENTLIST 48 -+#define DM_SET_FILEATTR 49 -+#define DM_SET_INHERIT 50 -+#define DM_SET_REGION 51 -+#define DM_SET_RETURN_ON_DESTROY 52 -+#define DM_SYMLINK_BY_HANDLE 53 -+#define DM_SYNC_BY_HANDLE 54 -+#define DM_UPGRADE_RIGHT 55 -+#define DM_WRITE_INVIS 56 -+#define DM_OPEN_BY_HANDLE 57 -+ -+#endif /* __DMAPI_KERN_H__ */ ---- /dev/null -+++ b/fs/dmapi/dmapi_mountinfo.c -@@ -0,0 +1,527 @@ -+/* -+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+static LIST_HEAD(dm_fsys_map); -+static spinlock_t dm_fsys_lock = SPIN_LOCK_UNLOCKED; -+ -+int -+dm_code_level(void) -+{ -+ return DM_CLVL_XOPEN; /* initial X/Open compliant release */ -+} -+ -+ -+/* Dummy routine which is stored in each function vector slot for which the -+ filesystem provides no function of its own. If an application calls the -+ function, he will just get ENOSYS. -+*/ -+ -+static int -+dm_enosys(void) -+{ -+ return -ENOSYS; /* function not supported by filesystem */ -+} -+ -+ -+/* dm_query_fsys_for_vector() asks a filesystem for its list of supported -+ DMAPI functions, and builds a dm_vector_map_t structure based upon the -+ reply. We ignore functions supported by the filesystem which we do not -+ know about, and we substitute the subroutine 'dm_enosys' for each function -+ we know about but the filesystem does not support. -+*/ -+ -+static void -+dm_query_fsys_for_vector( -+ dm_vector_map_t *map) -+{ -+ struct super_block *sb = map->sb; -+ fsys_function_vector_t *vecp; -+ dm_fcntl_vector_t vecrq; -+ dm_fsys_vector_t *vptr; -+ struct filesystem_dmapi_operations *dmapiops = map->dmapiops; -+ int error; -+ int i; -+ -+ -+ /* Allocate a function vector and initialize all fields with a -+ dummy function that returns ENOSYS. -+ */ -+ -+ vptr = map->vptr = kmem_cache_alloc(dm_fsys_vptr_cachep, GFP_KERNEL); -+ if (vptr == NULL) { -+ printk("%s/%d: kmem_cache_alloc(dm_fsys_vptr_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return; -+ } -+ -+ vptr->code_level = 0; -+ vptr->clear_inherit = (dm_fsys_clear_inherit_t)dm_enosys; -+ vptr->create_by_handle = (dm_fsys_create_by_handle_t)dm_enosys; -+ vptr->downgrade_right = (dm_fsys_downgrade_right_t)dm_enosys; -+ vptr->get_allocinfo_rvp = (dm_fsys_get_allocinfo_rvp_t)dm_enosys; -+ vptr->get_bulkall_rvp = (dm_fsys_get_bulkall_rvp_t)dm_enosys; -+ vptr->get_bulkattr_rvp = (dm_fsys_get_bulkattr_rvp_t)dm_enosys; -+ vptr->get_config = (dm_fsys_get_config_t)dm_enosys; -+ vptr->get_config_events = (dm_fsys_get_config_events_t)dm_enosys; -+ vptr->get_destroy_dmattr = (dm_fsys_get_destroy_dmattr_t)dm_enosys; -+ vptr->get_dioinfo = (dm_fsys_get_dioinfo_t)dm_enosys; -+ vptr->get_dirattrs_rvp = (dm_fsys_get_dirattrs_rvp_t)dm_enosys; -+ vptr->get_dmattr = (dm_fsys_get_dmattr_t)dm_enosys; -+ vptr->get_eventlist = (dm_fsys_get_eventlist_t)dm_enosys; -+ vptr->get_fileattr = (dm_fsys_get_fileattr_t)dm_enosys; -+ vptr->get_region = (dm_fsys_get_region_t)dm_enosys; -+ vptr->getall_dmattr = (dm_fsys_getall_dmattr_t)dm_enosys; -+ vptr->getall_inherit = (dm_fsys_getall_inherit_t)dm_enosys; -+ vptr->init_attrloc = (dm_fsys_init_attrloc_t)dm_enosys; -+ vptr->mkdir_by_handle = (dm_fsys_mkdir_by_handle_t)dm_enosys; -+ vptr->probe_hole = (dm_fsys_probe_hole_t)dm_enosys; -+ vptr->punch_hole = (dm_fsys_punch_hole_t)dm_enosys; -+ vptr->read_invis_rvp = (dm_fsys_read_invis_rvp_t)dm_enosys; -+ vptr->release_right = (dm_fsys_release_right_t)dm_enosys; -+ vptr->request_right = (dm_fsys_request_right_t)dm_enosys; -+ vptr->remove_dmattr = (dm_fsys_remove_dmattr_t)dm_enosys; -+ vptr->set_dmattr = (dm_fsys_set_dmattr_t)dm_enosys; -+ vptr->set_eventlist = (dm_fsys_set_eventlist_t)dm_enosys; -+ vptr->set_fileattr = (dm_fsys_set_fileattr_t)dm_enosys; -+ vptr->set_inherit = (dm_fsys_set_inherit_t)dm_enosys; -+ vptr->set_region = (dm_fsys_set_region_t)dm_enosys; -+ vptr->symlink_by_handle = (dm_fsys_symlink_by_handle_t)dm_enosys; -+ vptr->sync_by_handle = (dm_fsys_sync_by_handle_t)dm_enosys; -+ vptr->upgrade_right = (dm_fsys_upgrade_right_t)dm_enosys; -+ vptr->write_invis_rvp = (dm_fsys_write_invis_rvp_t)dm_enosys; -+ vptr->obj_ref_hold = (dm_fsys_obj_ref_hold_t)dm_enosys; -+ -+ /* Issue a call to the filesystem in order to obtain -+ its vector of filesystem-specific DMAPI routines. -+ */ -+ -+ vecrq.count = 0; -+ vecrq.vecp = NULL; -+ -+ error = -ENOSYS; -+ ASSERT(dmapiops); -+ if (dmapiops->get_fsys_vector) -+ error = dmapiops->get_fsys_vector(sb, (caddr_t)&vecrq); -+ -+ /* If we still have an error at this point, then the filesystem simply -+ does not support DMAPI, so we give up with all functions set to -+ ENOSYS. -+ */ -+ -+ if (error || vecrq.count == 0) { -+ kmem_cache_free(dm_fsys_vptr_cachep, vptr); -+ map->vptr = NULL; -+ return; -+ } -+ -+ /* The request succeeded and we were given a vector which we need to -+ map to our current level. Overlay the dummy function with every -+ filesystem function we understand. -+ */ -+ -+ vptr->code_level = vecrq.code_level; -+ vecp = vecrq.vecp; -+ for (i = 0; i < vecrq.count; i++) { -+ switch (vecp[i].func_no) { -+ case DM_FSYS_CLEAR_INHERIT: -+ vptr->clear_inherit = vecp[i].u_fc.clear_inherit; -+ break; -+ case DM_FSYS_CREATE_BY_HANDLE: -+ vptr->create_by_handle = vecp[i].u_fc.create_by_handle; -+ break; -+ case DM_FSYS_DOWNGRADE_RIGHT: -+ vptr->downgrade_right = vecp[i].u_fc.downgrade_right; -+ break; -+ case DM_FSYS_GET_ALLOCINFO_RVP: -+ vptr->get_allocinfo_rvp = vecp[i].u_fc.get_allocinfo_rvp; -+ break; -+ case DM_FSYS_GET_BULKALL_RVP: -+ vptr->get_bulkall_rvp = vecp[i].u_fc.get_bulkall_rvp; -+ break; -+ case DM_FSYS_GET_BULKATTR_RVP: -+ vptr->get_bulkattr_rvp = vecp[i].u_fc.get_bulkattr_rvp; -+ break; -+ case DM_FSYS_GET_CONFIG: -+ vptr->get_config = vecp[i].u_fc.get_config; -+ break; -+ case DM_FSYS_GET_CONFIG_EVENTS: -+ vptr->get_config_events = vecp[i].u_fc.get_config_events; -+ break; -+ case DM_FSYS_GET_DESTROY_DMATTR: -+ vptr->get_destroy_dmattr = vecp[i].u_fc.get_destroy_dmattr; -+ break; -+ case DM_FSYS_GET_DIOINFO: -+ vptr->get_dioinfo = vecp[i].u_fc.get_dioinfo; -+ break; -+ case DM_FSYS_GET_DIRATTRS_RVP: -+ vptr->get_dirattrs_rvp = vecp[i].u_fc.get_dirattrs_rvp; -+ break; -+ case DM_FSYS_GET_DMATTR: -+ vptr->get_dmattr = vecp[i].u_fc.get_dmattr; -+ break; -+ case DM_FSYS_GET_EVENTLIST: -+ vptr->get_eventlist = vecp[i].u_fc.get_eventlist; -+ break; -+ case DM_FSYS_GET_FILEATTR: -+ vptr->get_fileattr = vecp[i].u_fc.get_fileattr; -+ break; -+ case DM_FSYS_GET_REGION: -+ vptr->get_region = vecp[i].u_fc.get_region; -+ break; -+ case DM_FSYS_GETALL_DMATTR: -+ vptr->getall_dmattr = vecp[i].u_fc.getall_dmattr; -+ break; -+ case DM_FSYS_GETALL_INHERIT: -+ vptr->getall_inherit = vecp[i].u_fc.getall_inherit; -+ break; -+ case DM_FSYS_INIT_ATTRLOC: -+ vptr->init_attrloc = vecp[i].u_fc.init_attrloc; -+ break; -+ case DM_FSYS_MKDIR_BY_HANDLE: -+ vptr->mkdir_by_handle = vecp[i].u_fc.mkdir_by_handle; -+ break; -+ case DM_FSYS_PROBE_HOLE: -+ vptr->probe_hole = vecp[i].u_fc.probe_hole; -+ break; -+ case DM_FSYS_PUNCH_HOLE: -+ vptr->punch_hole = vecp[i].u_fc.punch_hole; -+ break; -+ case DM_FSYS_READ_INVIS_RVP: -+ vptr->read_invis_rvp = vecp[i].u_fc.read_invis_rvp; -+ break; -+ case DM_FSYS_RELEASE_RIGHT: -+ vptr->release_right = vecp[i].u_fc.release_right; -+ break; -+ case DM_FSYS_REMOVE_DMATTR: -+ vptr->remove_dmattr = vecp[i].u_fc.remove_dmattr; -+ break; -+ case DM_FSYS_REQUEST_RIGHT: -+ vptr->request_right = vecp[i].u_fc.request_right; -+ break; -+ case DM_FSYS_SET_DMATTR: -+ vptr->set_dmattr = vecp[i].u_fc.set_dmattr; -+ break; -+ case DM_FSYS_SET_EVENTLIST: -+ vptr->set_eventlist = vecp[i].u_fc.set_eventlist; -+ break; -+ case DM_FSYS_SET_FILEATTR: -+ vptr->set_fileattr = vecp[i].u_fc.set_fileattr; -+ break; -+ case DM_FSYS_SET_INHERIT: -+ vptr->set_inherit = vecp[i].u_fc.set_inherit; -+ break; -+ case DM_FSYS_SET_REGION: -+ vptr->set_region = vecp[i].u_fc.set_region; -+ break; -+ case DM_FSYS_SYMLINK_BY_HANDLE: -+ vptr->symlink_by_handle = vecp[i].u_fc.symlink_by_handle; -+ break; -+ case DM_FSYS_SYNC_BY_HANDLE: -+ vptr->sync_by_handle = vecp[i].u_fc.sync_by_handle; -+ break; -+ case DM_FSYS_UPGRADE_RIGHT: -+ vptr->upgrade_right = vecp[i].u_fc.upgrade_right; -+ break; -+ case DM_FSYS_WRITE_INVIS_RVP: -+ vptr->write_invis_rvp = vecp[i].u_fc.write_invis_rvp; -+ break; -+ case DM_FSYS_OBJ_REF_HOLD: -+ vptr->obj_ref_hold = vecp[i].u_fc.obj_ref_hold; -+ break; -+ default: /* ignore ones we don't understand */ -+ break; -+ } -+ } -+} -+ -+ -+/* Must hold dm_fsys_lock. -+ * This returns the prototype for all instances of the fstype. -+ */ -+static dm_vector_map_t * -+dm_fsys_map_by_fstype( -+ struct file_system_type *fstype) -+{ -+ struct list_head *p; -+ dm_vector_map_t *proto = NULL; -+ dm_vector_map_t *m; -+ -+ ASSERT_ALWAYS(fstype); -+ list_for_each(p, &dm_fsys_map) { -+ m = list_entry(p, dm_vector_map_t, ftype_list); -+ if (m->f_type == fstype) { -+ proto = m; -+ break; -+ } -+ } -+ return proto; -+} -+ -+ -+/* Must hold dm_fsys_lock */ -+static dm_vector_map_t * -+dm_fsys_map_by_sb( -+ struct super_block *sb) -+{ -+ struct list_head *p; -+ dm_vector_map_t *proto; -+ dm_vector_map_t *m; -+ dm_vector_map_t *foundmap = NULL; -+ -+ proto = dm_fsys_map_by_fstype(sb->s_type); -+ if(proto == NULL) { -+ return NULL; -+ } -+ -+ list_for_each(p, &proto->sb_list) { -+ m = list_entry(p, dm_vector_map_t, sb_list); -+ if (m->sb == sb) { -+ foundmap = m; -+ break; -+ } -+ } -+ return foundmap; -+} -+ -+ -+#ifdef CONFIG_DMAPI_DEBUG -+static void -+sb_list( -+ struct super_block *sb) -+{ -+ struct list_head *p; -+ dm_vector_map_t *proto; -+ dm_vector_map_t *m; -+ -+ proto = dm_fsys_map_by_fstype(sb->s_type); -+ ASSERT(proto); -+ -+printk("%s/%d: Current sb_list\n", __FUNCTION__, __LINE__); -+ list_for_each(p, &proto->sb_list) { -+ m = list_entry(p, dm_vector_map_t, sb_list); -+printk("%s/%d: map 0x%p, sb 0x%p, vptr 0x%p, dmapiops 0x%p\n", __FUNCTION__, __LINE__, m, m->sb, m->vptr, m->dmapiops); -+ } -+printk("%s/%d: Done sb_list\n", __FUNCTION__, __LINE__); -+} -+#else -+#define sb_list(x) -+#endif -+ -+#ifdef CONFIG_DMAPI_DEBUG -+static void -+ftype_list(void) -+{ -+ struct list_head *p; -+ dm_vector_map_t *m; -+ -+printk("%s/%d: Current ftype_list\n", __FUNCTION__, __LINE__); -+ list_for_each(p, &dm_fsys_map) { -+ m = list_entry(p, dm_vector_map_t, ftype_list); -+ printk("%s/%d: FS 0x%p, ftype 0x%p %s\n", __FUNCTION__, __LINE__, m, m->f_type, m->f_type->name); -+ } -+printk("%s/%d: Done ftype_list\n", __FUNCTION__, __LINE__); -+} -+#else -+#define ftype_list() -+#endif -+ -+/* Ask for vptr for this filesystem instance. -+ * The caller knows this inode is on a dmapi-managed filesystem. -+ */ -+dm_fsys_vector_t * -+dm_fsys_vector( -+ struct inode *ip) -+{ -+ dm_vector_map_t *map; -+ -+ spin_lock(&dm_fsys_lock); -+ ftype_list(); -+ map = dm_fsys_map_by_sb(ip->i_sb); -+ spin_unlock(&dm_fsys_lock); -+ ASSERT(map); -+ ASSERT(map->vptr); -+ return map->vptr; -+} -+ -+ -+/* Ask for the dmapiops for this filesystem instance. The caller is -+ * also asking if this is a dmapi-managed filesystem. -+ */ -+struct filesystem_dmapi_operations * -+dm_fsys_ops( -+ struct super_block *sb) -+{ -+ dm_vector_map_t *proto = NULL; -+ dm_vector_map_t *map; -+ -+ spin_lock(&dm_fsys_lock); -+ ftype_list(); -+ sb_list(sb); -+ map = dm_fsys_map_by_sb(sb); -+ if (map == NULL) -+ proto = dm_fsys_map_by_fstype(sb->s_type); -+ spin_unlock(&dm_fsys_lock); -+ -+ if ((map == NULL) && (proto == NULL)) -+ return NULL; -+ -+ if (map == NULL) { -+ /* Find out if it's dmapi-managed */ -+ dm_vector_map_t *m; -+ -+ ASSERT(proto); -+ m = kmem_cache_alloc(dm_fsys_map_cachep, GFP_KERNEL); -+ if (m == NULL) { -+ printk("%s/%d: kmem_cache_alloc(dm_fsys_map_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return NULL; -+ } -+ memset(m, 0, sizeof(*m)); -+ m->dmapiops = proto->dmapiops; -+ m->f_type = sb->s_type; -+ m->sb = sb; -+ INIT_LIST_HEAD(&m->sb_list); -+ INIT_LIST_HEAD(&m->ftype_list); -+ -+ dm_query_fsys_for_vector(m); -+ if (m->vptr == NULL) { -+ /* This isn't dmapi-managed */ -+ kmem_cache_free(dm_fsys_map_cachep, m); -+ return NULL; -+ } -+ -+ spin_lock(&dm_fsys_lock); -+ if ((map = dm_fsys_map_by_sb(sb)) == NULL) -+ list_add(&m->sb_list, &proto->sb_list); -+ spin_unlock(&dm_fsys_lock); -+ -+ if (map) { -+ kmem_cache_free(dm_fsys_vptr_cachep, m->vptr); -+ kmem_cache_free(dm_fsys_map_cachep, m); -+ } -+ else { -+ map = m; -+ } -+ } -+ -+ return map->dmapiops; -+} -+ -+ -+ -+/* Called when a filesystem instance is unregistered from dmapi */ -+void -+dm_fsys_ops_release( -+ struct super_block *sb) -+{ -+ dm_vector_map_t *map; -+ -+ spin_lock(&dm_fsys_lock); -+ ASSERT(!list_empty(&dm_fsys_map)); -+ map = dm_fsys_map_by_sb(sb); -+ ASSERT(map); -+ list_del(&map->sb_list); -+ spin_unlock(&dm_fsys_lock); -+ -+ ASSERT(map->vptr); -+ kmem_cache_free(dm_fsys_vptr_cachep, map->vptr); -+ kmem_cache_free(dm_fsys_map_cachep, map); -+} -+ -+ -+/* Called by a filesystem module that is loading into the kernel. -+ * This creates a new dm_vector_map_t which serves as the prototype -+ * for instances of this fstype and also provides the list_head -+ * for instances of this fstype. The prototypes are the only ones -+ * on the fstype_list, and will never be on the sb_list. -+ */ -+void -+dmapi_register( -+ struct file_system_type *fstype, -+ struct filesystem_dmapi_operations *dmapiops) -+{ -+ dm_vector_map_t *proto; -+ -+ proto = kmem_cache_alloc(dm_fsys_map_cachep, GFP_KERNEL); -+ if (proto == NULL) { -+ printk("%s/%d: kmem_cache_alloc(dm_fsys_map_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return; -+ } -+ memset(proto, 0, sizeof(*proto)); -+ proto->dmapiops = dmapiops; -+ proto->f_type = fstype; -+ INIT_LIST_HEAD(&proto->sb_list); -+ INIT_LIST_HEAD(&proto->ftype_list); -+ -+ spin_lock(&dm_fsys_lock); -+ ASSERT(dm_fsys_map_by_fstype(fstype) == NULL); -+ list_add(&proto->ftype_list, &dm_fsys_map); -+ ftype_list(); -+ spin_unlock(&dm_fsys_lock); -+} -+ -+/* Called by a filesystem module that is unloading from the kernel */ -+void -+dmapi_unregister( -+ struct file_system_type *fstype) -+{ -+ struct list_head *p; -+ dm_vector_map_t *proto; -+ dm_vector_map_t *m; -+ -+ spin_lock(&dm_fsys_lock); -+ ASSERT(!list_empty(&dm_fsys_map)); -+ proto = dm_fsys_map_by_fstype(fstype); -+ ASSERT(proto); -+ list_del(&proto->ftype_list); -+ spin_unlock(&dm_fsys_lock); -+ -+ p = &proto->sb_list; -+ while (!list_empty(p)) { -+ m = list_entry(p->next, dm_vector_map_t, sb_list); -+ list_del(&m->sb_list); -+ ASSERT(m->vptr); -+ kmem_cache_free(dm_fsys_vptr_cachep, m->vptr); -+ kmem_cache_free(dm_fsys_map_cachep, m); -+ } -+ kmem_cache_free(dm_fsys_map_cachep, proto); -+} -+ -+ -+int -+dmapi_registered( -+ struct file_system_type *fstype, -+ struct filesystem_dmapi_operations **dmapiops) -+{ -+ return 0; -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_port.h -@@ -0,0 +1,138 @@ -+/* -+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#ifndef _DMAPI_PORT_H -+#define _DMAPI_PORT_H -+ -+#include -+#include "sv.h" -+ -+#include /* preempt needs this */ -+#include -+ -+typedef spinlock_t lock_t; -+ -+#define spinlock_init(lock, name) spin_lock_init(lock) -+#define spinlock_destroy(lock) -+ -+#define mutex_spinlock(lock) ({ spin_lock(lock); 0; }) -+#define mutex_spinunlock(lock, s) spin_unlock(lock) -+#define nested_spinlock(lock) spin_lock(lock) -+#define nested_spinunlock(lock) spin_unlock(lock) -+ -+typedef signed int __int32_t; -+typedef unsigned int __uint32_t; -+typedef signed long long int __int64_t; -+typedef unsigned long long int __uint64_t; -+ -+ -+/* __psint_t is the same size as a pointer */ -+#if (BITS_PER_LONG == 32) -+typedef __int32_t __psint_t; -+typedef __uint32_t __psunsigned_t; -+#elif (BITS_PER_LONG == 64) -+typedef __int64_t __psint_t; -+typedef __uint64_t __psunsigned_t; -+#else -+#error BITS_PER_LONG must be 32 or 64 -+#endif -+ -+static inline void -+assfail(char *a, char *f, int l) -+{ -+ printk("DMAPI assertion failed: %s, file: %s, line: %d\n", a, f, l); -+ BUG(); -+} -+ -+#ifdef DEBUG -+#define doass 1 -+# ifdef lint -+# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */ -+# else -+# define ASSERT(EX) ((!doass||(EX))?((void)0):assfail(#EX, __FILE__, __LINE__)) -+# endif /* lint */ -+#else -+# define ASSERT(x) ((void)0) -+#endif /* DEBUG */ -+ -+#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__)) -+ -+ -+#if defined __i386__ -+ -+/* Side effect free 64 bit mod operation */ -+static inline __u32 dmapi_do_mod(void *a, __u32 b, int n) -+{ -+ switch (n) { -+ case 4: -+ return *(__u32 *)a % b; -+ case 8: -+ { -+ unsigned long __upper, __low, __high, __mod; -+ __u64 c = *(__u64 *)a; -+ __upper = __high = c >> 32; -+ __low = c; -+ if (__high) { -+ __upper = __high % (b); -+ __high = __high / (b); -+ } -+ asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); -+ asm("":"=A" (c):"a" (__low),"d" (__high)); -+ return __mod; -+ } -+ } -+ -+ /* NOTREACHED */ -+ return 0; -+} -+#else -+ -+/* Side effect free 64 bit mod operation */ -+static inline __u32 dmapi_do_mod(void *a, __u32 b, int n) -+{ -+ switch (n) { -+ case 4: -+ return *(__u32 *)a % b; -+ case 8: -+ { -+ __u64 c = *(__u64 *)a; -+ return do_div(c, b); -+ } -+ } -+ -+ /* NOTREACHED */ -+ return 0; -+} -+#endif -+ -+#define do_mod(a, b) dmapi_do_mod(&(a), (b), sizeof(a)) -+ -+#endif /* _DMAPI_PORT_H */ ---- /dev/null -+++ b/fs/dmapi/dmapi_private.h -@@ -0,0 +1,619 @@ -+/* -+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#ifndef _DMAPI_PRIVATE_H -+#define _DMAPI_PRIVATE_H -+ -+#include -+#include "dmapi_port.h" -+#include "sv.h" -+ -+#ifdef CONFIG_PROC_FS -+#define DMAPI_PROCFS "orig/fs/dmapi_v2" /* DMAPI device in /proc. */ -+#define DMAPI_DBG_PROCFS "orig/fs/dmapi_d" /* DMAPI debugging dir */ -+#endif -+ -+extern struct kmem_cache *dm_fsreg_cachep; -+extern struct kmem_cache *dm_tokdata_cachep; -+extern struct kmem_cache *dm_session_cachep; -+extern struct kmem_cache *dm_fsys_map_cachep; -+extern struct kmem_cache *dm_fsys_vptr_cachep; -+ -+typedef struct dm_tokdata { -+ struct dm_tokdata *td_next; -+ struct dm_tokevent *td_tevp; /* pointer to owning tevp */ -+ int td_app_ref; /* # app threads currently active */ -+ dm_right_t td_orig_right; /* original right held when created */ -+ dm_right_t td_right; /* current right held for this handle */ -+ short td_flags; -+ short td_type; /* object type */ -+ int td_vcount; /* # of current application VN_HOLDs */ -+ struct inode *td_ip; /* inode pointer */ -+ dm_handle_t td_handle; /* handle for ip or sb */ -+} dm_tokdata_t; -+ -+/* values for td_type */ -+ -+#define DM_TDT_NONE 0x00 /* td_handle is empty */ -+#define DM_TDT_VFS 0x01 /* td_handle points to a sb */ -+#define DM_TDT_REG 0x02 /* td_handle points to a file */ -+#define DM_TDT_DIR 0x04 /* td_handle points to a directory */ -+#define DM_TDT_LNK 0x08 /* td_handle points to a symlink */ -+#define DM_TDT_OTH 0x10 /* some other object eg. pipe, socket */ -+ -+#define DM_TDT_VNO (DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH) -+#define DM_TDT_ANY (DM_TDT_VFS|DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH) -+ -+/* values for td_flags */ -+ -+#define DM_TDF_ORIG 0x0001 /* part of the original event */ -+#define DM_TDF_EVTREF 0x0002 /* event thread holds inode reference */ -+#define DM_TDF_STHREAD 0x0004 /* only one app can use this handle */ -+#define DM_TDF_RIGHT 0x0008 /* vcount bumped for dm_request_right */ -+#define DM_TDF_HOLD 0x0010 /* vcount bumped for dm_obj_ref_hold */ -+ -+ -+/* Because some events contain __u64 fields, we force te_msg and te_event -+ to always be 8-byte aligned. In order to send more than one message in -+ a single dm_get_events() call, we also ensure that each message is an -+ 8-byte multiple. -+*/ -+ -+typedef struct dm_tokevent { -+ struct dm_tokevent *te_next; -+ struct dm_tokevent *te_hashnext; /* hash chain */ -+ lock_t te_lock; /* lock for all fields but te_*next. -+ * te_next and te_hashnext are -+ * protected by the session lock. -+ */ -+ short te_flags; -+ short te_allocsize; /* alloc'ed size of this structure */ -+ sv_t te_evt_queue; /* queue waiting for dm_respond_event */ -+ sv_t te_app_queue; /* queue waiting for handle access */ -+ int te_evt_ref; /* number of event procs using token */ -+ int te_app_ref; /* number of app procs using token */ -+ int te_app_slp; /* number of app procs sleeping */ -+ int te_reply; /* return errno for sync messages */ -+ dm_tokdata_t *te_tdp; /* list of handle/right pairs */ -+ union { -+ __u64 align; /* force alignment of te_msg */ -+ dm_eventmsg_t te_msg; /* user visible part */ -+ } te_u; -+ __u64 te_event; /* start of dm_xxx_event_t message */ -+} dm_tokevent_t; -+ -+#define te_msg te_u.te_msg -+ -+/* values for te_flags */ -+ -+#define DM_TEF_LOCKED 0x0001 /* event "locked" by dm_get_events() */ -+#define DM_TEF_INTERMED 0x0002 /* a dm_pending reply was received */ -+#define DM_TEF_FINAL 0x0004 /* dm_respond_event has been received */ -+#define DM_TEF_HASHED 0x0010 /* event is on hash chain */ -+#define DM_TEF_FLUSH 0x0020 /* flushing threads from queues */ -+ -+ -+#ifdef CONFIG_DMAPI_DEBUG -+#define DM_SHASH_DEBUG -+#endif -+ -+typedef struct dm_sesshash { -+ dm_tokevent_t *h_next; /* ptr to chain of tokevents */ -+#ifdef DM_SHASH_DEBUG -+ int maxlength; -+ int curlength; -+ int num_adds; -+ int num_dels; -+ int dup_hits; -+#endif -+} dm_sesshash_t; -+ -+ -+typedef struct dm_eventq { -+ dm_tokevent_t *eq_head; -+ dm_tokevent_t *eq_tail; -+ int eq_count; /* size of queue */ -+} dm_eventq_t; -+ -+ -+typedef struct dm_session { -+ struct dm_session *sn_next; /* sessions linkage */ -+ dm_sessid_t sn_sessid; /* user-visible session number */ -+ u_int sn_flags; -+ lock_t sn_qlock; /* lock for newq/delq related fields */ -+ sv_t sn_readerq; /* waiting for message on sn_newq */ -+ sv_t sn_writerq; /* waiting for room on sn_newq */ -+ u_int sn_readercnt; /* count of waiting readers */ -+ u_int sn_writercnt; /* count of waiting readers */ -+ dm_eventq_t sn_newq; /* undelivered event queue */ -+ dm_eventq_t sn_delq; /* delivered event queue */ -+ dm_eventq_t sn_evt_writerq; /* events of thrds in sn_writerq */ -+ dm_sesshash_t *sn_sesshash; /* buckets for tokevent hash chains */ -+#ifdef DM_SHASH_DEBUG -+ int sn_buckets_in_use; -+ int sn_max_buckets_in_use; -+#endif -+ char sn_info[DM_SESSION_INFO_LEN]; /* user-supplied info */ -+} dm_session_t; -+ -+/* values for sn_flags */ -+ -+#define DM_SN_WANTMOUNT 0x0001 /* session wants to get mount events */ -+ -+ -+typedef enum { -+ DM_STATE_MOUNTING, -+ DM_STATE_MOUNTED, -+ DM_STATE_UNMOUNTING, -+ DM_STATE_UNMOUNTED -+} dm_fsstate_t; -+ -+ -+typedef struct dm_fsreg { -+ struct dm_fsreg *fr_next; -+ struct super_block *fr_sb; /* filesystem pointer */ -+ dm_tokevent_t *fr_tevp; -+ dm_fsid_t fr_fsid; /* filesystem ID */ -+ void *fr_msg; /* dm_mount_event_t for filesystem */ -+ int fr_msgsize; /* size of dm_mount_event_t */ -+ dm_fsstate_t fr_state; -+ sv_t fr_dispq; -+ int fr_dispcnt; -+ dm_eventq_t fr_evt_dispq; /* events of thrds in fr_dispq */ -+ sv_t fr_queue; /* queue for hdlcnt/sbcnt/unmount */ -+ lock_t fr_lock; -+ int fr_hdlcnt; /* threads blocked during unmount */ -+ int fr_vfscnt; /* threads in VFS_VGET or VFS_ROOT */ -+ int fr_unmount; /* if non-zero, umount is sleeping */ -+ dm_attrname_t fr_rattr; /* dm_set_return_on_destroy attribute */ -+ dm_session_t *fr_sessp [DM_EVENT_MAX]; -+} dm_fsreg_t; -+ -+ -+ -+ -+/* events valid in dm_set_disp() when called with a filesystem handle. */ -+ -+#define DM_VALID_DISP_EVENTS ( \ -+ (1 << DM_EVENT_PREUNMOUNT) | \ -+ (1 << DM_EVENT_UNMOUNT) | \ -+ (1 << DM_EVENT_NOSPACE) | \ -+ (1 << DM_EVENT_DEBUT) | \ -+ (1 << DM_EVENT_CREATE) | \ -+ (1 << DM_EVENT_POSTCREATE) | \ -+ (1 << DM_EVENT_REMOVE) | \ -+ (1 << DM_EVENT_POSTREMOVE) | \ -+ (1 << DM_EVENT_RENAME) | \ -+ (1 << DM_EVENT_POSTRENAME) | \ -+ (1 << DM_EVENT_LINK) | \ -+ (1 << DM_EVENT_POSTLINK) | \ -+ (1 << DM_EVENT_SYMLINK) | \ -+ (1 << DM_EVENT_POSTSYMLINK) | \ -+ (1 << DM_EVENT_READ) | \ -+ (1 << DM_EVENT_WRITE) | \ -+ (1 << DM_EVENT_TRUNCATE) | \ -+ (1 << DM_EVENT_ATTRIBUTE) | \ -+ (1 << DM_EVENT_DESTROY) ) -+ -+ -+/* isolate the read/write/trunc events of a dm_tokevent_t */ -+ -+#define DM_EVENT_RDWRTRUNC(tevp) ( \ -+ ((tevp)->te_msg.ev_type == DM_EVENT_READ) || \ -+ ((tevp)->te_msg.ev_type == DM_EVENT_WRITE) || \ -+ ((tevp)->te_msg.ev_type == DM_EVENT_TRUNCATE) ) -+ -+ -+/* -+ * Global handle hack isolation. -+ */ -+ -+#define DM_GLOBALHAN(hanp, hlen) (((hanp) == DM_GLOBAL_HANP) && \ -+ ((hlen) == DM_GLOBAL_HLEN)) -+ -+ -+#define DM_MAX_MSG_DATA 3960 -+ -+ -+ -+/* Supported filesystem function vector functions. */ -+ -+ -+typedef struct { -+ int code_level; -+ dm_fsys_clear_inherit_t clear_inherit; -+ dm_fsys_create_by_handle_t create_by_handle; -+ dm_fsys_downgrade_right_t downgrade_right; -+ dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp; -+ dm_fsys_get_bulkall_rvp_t get_bulkall_rvp; -+ dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp; -+ dm_fsys_get_config_t get_config; -+ dm_fsys_get_config_events_t get_config_events; -+ dm_fsys_get_destroy_dmattr_t get_destroy_dmattr; -+ dm_fsys_get_dioinfo_t get_dioinfo; -+ dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp; -+ dm_fsys_get_dmattr_t get_dmattr; -+ dm_fsys_get_eventlist_t get_eventlist; -+ dm_fsys_get_fileattr_t get_fileattr; -+ dm_fsys_get_region_t get_region; -+ dm_fsys_getall_dmattr_t getall_dmattr; -+ dm_fsys_getall_inherit_t getall_inherit; -+ dm_fsys_init_attrloc_t init_attrloc; -+ dm_fsys_mkdir_by_handle_t mkdir_by_handle; -+ dm_fsys_probe_hole_t probe_hole; -+ dm_fsys_punch_hole_t punch_hole; -+ dm_fsys_read_invis_rvp_t read_invis_rvp; -+ dm_fsys_release_right_t release_right; -+ dm_fsys_remove_dmattr_t remove_dmattr; -+ dm_fsys_request_right_t request_right; -+ dm_fsys_set_dmattr_t set_dmattr; -+ dm_fsys_set_eventlist_t set_eventlist; -+ dm_fsys_set_fileattr_t set_fileattr; -+ dm_fsys_set_inherit_t set_inherit; -+ dm_fsys_set_region_t set_region; -+ dm_fsys_symlink_by_handle_t symlink_by_handle; -+ dm_fsys_sync_by_handle_t sync_by_handle; -+ dm_fsys_upgrade_right_t upgrade_right; -+ dm_fsys_write_invis_rvp_t write_invis_rvp; -+ dm_fsys_obj_ref_hold_t obj_ref_hold; -+} dm_fsys_vector_t; -+ -+ -+typedef struct { -+ struct list_head ftype_list; /* list of fstypes */ -+ struct list_head sb_list; /* list of sb's per fstype */ -+ struct file_system_type *f_type; -+ struct filesystem_dmapi_operations *dmapiops; -+ dm_fsys_vector_t *vptr; -+ struct super_block *sb; -+} dm_vector_map_t; -+ -+ -+extern dm_session_t *dm_sessions; /* head of session list */ -+extern dm_fsreg_t *dm_registers; -+extern lock_t dm_reg_lock; /* lock for registration list */ -+ -+/* -+ * Kernel only prototypes. -+ */ -+ -+int dm_find_session_and_lock( -+ dm_sessid_t sid, -+ dm_session_t **sessionpp, -+ unsigned long *lcp); -+ -+int dm_find_msg_and_lock( -+ dm_sessid_t sid, -+ dm_token_t token, -+ dm_tokevent_t **tevpp, -+ unsigned long *lcp); -+ -+dm_tokevent_t * dm_evt_create_tevp( -+ dm_eventtype_t event, -+ int variable_size, -+ void **msgpp); -+ -+int dm_app_get_tdp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ short types, -+ dm_right_t right, -+ dm_tokdata_t **tdpp); -+ -+int dm_get_config_tdp( -+ void __user *hanp, -+ size_t hlen, -+ dm_tokdata_t **tdpp); -+ -+void dm_app_put_tdp( -+ dm_tokdata_t *tdp); -+ -+void dm_put_tevp( -+ dm_tokevent_t *tevp, -+ dm_tokdata_t *tdp); -+ -+void dm_evt_rele_tevp( -+ dm_tokevent_t *tevp, -+ int droprights); -+ -+int dm_enqueue_normal_event( -+ struct super_block *sbp, -+ dm_tokevent_t **tevpp, -+ int flags); -+ -+int dm_enqueue_mount_event( -+ struct super_block *sbp, -+ dm_tokevent_t *tevp); -+ -+int dm_enqueue_sendmsg_event( -+ dm_sessid_t targetsid, -+ dm_tokevent_t *tevp, -+ int synch); -+ -+int dm_enqueue_user_event( -+ dm_sessid_t sid, -+ dm_tokevent_t *tevp, -+ dm_token_t *tokenp); -+ -+int dm_obj_ref_query_rvp( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ int *rvp); -+ -+int dm_read_invis_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp); -+ -+int dm_write_invis_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ int flags, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp); -+ -+int dm_get_bulkattr_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvp); -+ -+int dm_get_bulkall_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_attrname_t __user *attrnamep, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvp); -+ -+int dm_get_dirattrs_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvp); -+ -+int dm_get_allocinfo_rvp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_off_t __user *offp, -+ u_int nelem, -+ dm_extent_t __user *extentp, -+ u_int __user *nelemp, -+ int *rvp); -+ -+int dm_waitfor_destroy_attrname( -+ struct super_block *sb, -+ dm_attrname_t *attrnamep); -+ -+void dm_clear_fsreg( -+ dm_session_t *s); -+ -+int dm_add_fsys_entry( -+ struct super_block *sb, -+ dm_tokevent_t *tevp); -+ -+void dm_change_fsys_entry( -+ struct super_block *sb, -+ dm_fsstate_t newstate); -+ -+void dm_remove_fsys_entry( -+ struct super_block *sb); -+ -+dm_fsys_vector_t *dm_fsys_vector( -+ struct inode *ip); -+ -+struct filesystem_dmapi_operations *dm_fsys_ops( -+ struct super_block *sb); -+ -+void dm_fsys_ops_release( -+ struct super_block *sb); -+ -+int dm_waitfor_disp_session( -+ struct super_block *sb, -+ dm_tokevent_t *tevp, -+ dm_session_t **sessionpp, -+ unsigned long *lcp); -+ -+struct inode * dm_handle_to_ip ( -+ dm_handle_t *handlep, -+ short *typep); -+ -+int dm_check_dmapi_ip( -+ struct inode *ip); -+ -+dm_tokevent_t * dm_find_mount_tevp_and_lock( -+ dm_fsid_t *fsidp, -+ unsigned long *lcp); -+ -+int dm_path_to_hdl( -+ char __user *path, -+ void __user *hanp, -+ size_t __user *hlenp); -+ -+int dm_path_to_fshdl( -+ char __user *path, -+ void __user *hanp, -+ size_t __user *hlenp); -+ -+int dm_fd_to_hdl( -+ int fd, -+ void __user *hanp, -+ size_t __user *hlenp); -+ -+int dm_upgrade_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token); -+ -+int dm_downgrade_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token); -+ -+int dm_request_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int flags, -+ dm_right_t right); -+ -+int dm_release_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token); -+ -+int dm_query_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_right_t __user *rightp); -+ -+ -+int dm_set_eventlist( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_eventset_t __user *eventsetp, -+ u_int maxevent); -+ -+int dm_obj_ref_hold( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen); -+ -+int dm_obj_ref_rele( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen); -+ -+int dm_get_eventlist( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int nelem, -+ dm_eventset_t __user *eventsetp, -+ u_int __user *nelemp); -+ -+ -+int dm_set_disp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_eventset_t __user *eventsetp, -+ u_int maxevent); -+ -+ -+int dm_set_return_on_destroy( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ dm_boolean_t enable); -+ -+ -+int dm_get_mountinfo( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp); -+ -+void dm_link_event( -+ dm_tokevent_t *tevp, -+ dm_eventq_t *queue); -+ -+void dm_unlink_event( -+ dm_tokevent_t *tevp, -+ dm_eventq_t *queue); -+ -+int dm_open_by_handle_rvp( -+ unsigned int fd, -+ void __user *hanp, -+ size_t hlen, -+ int mode, -+ int *rvp); -+ -+int dm_copyin_handle( -+ void __user *hanp, -+ size_t hlen, -+ dm_handle_t *handlep); -+ -+int dm_release_disp_threads( -+ dm_fsid_t *fsid, -+ struct inode *inode, -+ int errno); -+ -+#endif /* _DMAPI_PRIVATE_H */ ---- /dev/null -+++ b/fs/dmapi/dmapi_region.c -@@ -0,0 +1,91 @@ -+/* -+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+int -+dm_get_region( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int nelem, -+ dm_region_t __user *regbufp, -+ u_int __user *nelemp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_region(tdp->td_ip, tdp->td_right, -+ nelem, regbufp, nelemp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+ -+int -+dm_set_region( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int nelem, -+ dm_region_t __user *regbufp, -+ dm_boolean_t __user *exactflagp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->set_region(tdp->td_ip, tdp->td_right, -+ nelem, regbufp, exactflagp); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_register.c -@@ -0,0 +1,1638 @@ -+/* -+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+/* LOOKUP_POSTIVE was removed in Linux 2.6 */ -+#ifndef LOOKUP_POSITIVE -+#define LOOKUP_POSITIVE 0 -+#endif -+ -+dm_fsreg_t *dm_registers; /* head of filesystem registration list */ -+int dm_fsys_cnt; /* number of filesystems on dm_registers list */ -+lock_t dm_reg_lock = SPIN_LOCK_UNLOCKED;/* lock for dm_registers */ -+ -+ -+ -+#ifdef CONFIG_PROC_FS -+static int -+fsreg_read_pfs(char *buffer, char **start, off_t offset, -+ int count, int *eof, void *data) -+{ -+ int len; -+ int i; -+ dm_fsreg_t *fsrp = (dm_fsreg_t*)data; -+ char statebuf[30]; -+ -+#define CHKFULL if(len >= count) break; -+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL; -+ -+ switch (fsrp->fr_state) { -+ case DM_STATE_MOUNTING: sprintf(statebuf, "mounting"); break; -+ case DM_STATE_MOUNTED: sprintf(statebuf, "mounted"); break; -+ case DM_STATE_UNMOUNTING: sprintf(statebuf, "unmounting"); break; -+ case DM_STATE_UNMOUNTED: sprintf(statebuf, "unmounted"); break; -+ default: -+ sprintf(statebuf, "unknown:%d", (int)fsrp->fr_state); -+ break; -+ } -+ -+ len=0; -+ while(1){ -+ ADDBUF("fsrp=0x%p\n", fsrp); -+ ADDBUF("fr_next=0x%p\n", fsrp->fr_next); -+ ADDBUF("fr_sb=0x%p\n", fsrp->fr_sb); -+ ADDBUF("fr_tevp=0x%p\n", fsrp->fr_tevp); -+ ADDBUF("fr_fsid=%c\n", '?'); -+ ADDBUF("fr_msg=0x%p\n", fsrp->fr_msg); -+ ADDBUF("fr_msgsize=%d\n", fsrp->fr_msgsize); -+ ADDBUF("fr_state=%s\n", statebuf); -+ ADDBUF("fr_dispq=%c\n", '?'); -+ ADDBUF("fr_dispcnt=%d\n", fsrp->fr_dispcnt); -+ -+ ADDBUF("fr_evt_dispq.eq_head=0x%p\n", fsrp->fr_evt_dispq.eq_head); -+ ADDBUF("fr_evt_dispq.eq_tail=0x%p\n", fsrp->fr_evt_dispq.eq_tail); -+ ADDBUF("fr_evt_dispq.eq_count=%d\n", fsrp->fr_evt_dispq.eq_count); -+ -+ ADDBUF("fr_queue=%c\n", '?'); -+ ADDBUF("fr_lock=%c\n", '?'); -+ ADDBUF("fr_hdlcnt=%d\n", fsrp->fr_hdlcnt); -+ ADDBUF("fr_vfscnt=%d\n", fsrp->fr_vfscnt); -+ ADDBUF("fr_unmount=%d\n", fsrp->fr_unmount); -+ -+ len += sprintf(buffer + len, "fr_rattr="); -+ CHKFULL; -+ for(i = 0; i <= DM_ATTR_NAME_SIZE; ++i){ -+ ADDBUF("%c", fsrp->fr_rattr.an_chars[i]); -+ } -+ CHKFULL; -+ len += sprintf(buffer + len, "\n"); -+ CHKFULL; -+ -+ for(i = 0; i < DM_EVENT_MAX; i++){ -+ if( fsrp->fr_sessp[i] != NULL ){ -+ ADDBUF("fr_sessp[%d]=", i); -+ ADDBUF("0x%p\n", fsrp->fr_sessp[i]); -+ } -+ } -+ CHKFULL; -+ -+ break; -+ } -+ -+ if (offset >= len) { -+ *start = buffer; -+ *eof = 1; -+ return 0; -+ } -+ *start = buffer + offset; -+ if ((len -= offset) > count) -+ return count; -+ *eof = 1; -+ -+ return len; -+} -+#endif -+ -+ -+/* Returns a pointer to the filesystem structure for the filesystem -+ referenced by fsidp. The caller is responsible for obtaining dm_reg_lock -+ before calling this routine. -+*/ -+ -+static dm_fsreg_t * -+dm_find_fsreg( -+ dm_fsid_t *fsidp) -+{ -+ dm_fsreg_t *fsrp; -+ -+ for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) { -+ if (!memcmp(&fsrp->fr_fsid, fsidp, sizeof(*fsidp))) -+ break; -+ } -+ return(fsrp); -+} -+ -+ -+/* Given a fsid_t, dm_find_fsreg_and_lock() finds the dm_fsreg_t structure -+ for that filesytem if one exists, and returns a pointer to the structure -+ after obtaining its 'fr_lock' so that the caller can safely modify the -+ dm_fsreg_t. The caller is responsible for releasing 'fr_lock'. -+*/ -+ -+static dm_fsreg_t * -+dm_find_fsreg_and_lock( -+ dm_fsid_t *fsidp, -+ unsigned long *lcp) /* address of returned lock cookie */ -+{ -+ dm_fsreg_t *fsrp; -+ -+ for (;;) { -+ *lcp = mutex_spinlock(&dm_reg_lock); -+ -+ if ((fsrp = dm_find_fsreg(fsidp)) == NULL) { -+ mutex_spinunlock(&dm_reg_lock, *lcp); -+ return(NULL); -+ } -+ if (spin_trylock(&fsrp->fr_lock)) { -+ nested_spinunlock(&dm_reg_lock); -+ return(fsrp); /* success */ -+ } -+ -+ /* If the second lock is not available, drop the first and -+ start over. This gives the CPU a chance to process any -+ interrupts, and also allows processes which want a fr_lock -+ for a different filesystem to proceed. -+ */ -+ -+ mutex_spinunlock(&dm_reg_lock, *lcp); -+ } -+} -+ -+ -+/* dm_add_fsys_entry() is called when a DM_EVENT_MOUNT event is about to be -+ sent. It creates a dm_fsreg_t structure for the filesystem and stores a -+ pointer to a copy of the mount event within that structure so that it is -+ available for subsequent dm_get_mountinfo() calls. -+*/ -+ -+int -+dm_add_fsys_entry( -+ struct super_block *sb, -+ dm_tokevent_t *tevp) -+{ -+ dm_fsreg_t *fsrp; -+ int msgsize; -+ void *msg; -+ unsigned long lc; /* lock cookie */ -+ dm_fsid_t fsid; -+ struct filesystem_dmapi_operations *dops; -+ -+ dops = dm_fsys_ops(sb); -+ ASSERT(dops); -+ dops->get_fsid(sb, &fsid); -+ -+ /* Allocate and initialize a dm_fsreg_t structure for the filesystem. */ -+ -+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_event); -+ msg = kmalloc(msgsize, GFP_KERNEL); -+ if (msg == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return -ENOMEM; -+ } -+ memcpy(msg, &tevp->te_event, msgsize); -+ -+ fsrp = kmem_cache_alloc(dm_fsreg_cachep, GFP_KERNEL); -+ if (fsrp == NULL) { -+ kfree(msg); -+ printk("%s/%d: kmem_cache_alloc(dm_fsreg_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return -ENOMEM; -+ } -+ memset(fsrp, 0, sizeof(*fsrp)); -+ -+ fsrp->fr_sb = sb; -+ fsrp->fr_tevp = tevp; -+ memcpy(&fsrp->fr_fsid, &fsid, sizeof(fsid)); -+ fsrp->fr_msg = msg; -+ fsrp->fr_msgsize = msgsize; -+ fsrp->fr_state = DM_STATE_MOUNTING; -+ sv_init(&fsrp->fr_dispq, SV_DEFAULT, "fr_dispq"); -+ sv_init(&fsrp->fr_queue, SV_DEFAULT, "fr_queue"); -+ spinlock_init(&fsrp->fr_lock, "fr_lock"); -+ -+ /* If no other mounted DMAPI filesystem already has this same -+ fsid_t, then add this filesystem to the list. -+ */ -+ -+ lc = mutex_spinlock(&dm_reg_lock); -+ -+ if (!dm_find_fsreg(&fsid)) { -+ fsrp->fr_next = dm_registers; -+ dm_registers = fsrp; -+ dm_fsys_cnt++; -+ mutex_spinunlock(&dm_reg_lock, lc); -+#ifdef CONFIG_PROC_FS -+ { -+ char buf[100]; -+ struct proc_dir_entry *entry; -+ -+ sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp); -+ entry = create_proc_read_entry(buf, 0, NULL, fsreg_read_pfs, fsrp); -+ } -+#endif -+ return(0); -+ } -+ -+ /* A fsid_t collision occurred, so prevent this new filesystem from -+ mounting. -+ */ -+ -+ mutex_spinunlock(&dm_reg_lock, lc); -+ -+ sv_destroy(&fsrp->fr_dispq); -+ sv_destroy(&fsrp->fr_queue); -+ spinlock_destroy(&fsrp->fr_lock); -+ kfree(msg); -+ kmem_cache_free(dm_fsreg_cachep, fsrp); -+ return(-EBUSY); -+} -+ -+ -+/* dm_change_fsys_entry() is called whenever a filesystem's mount state is -+ about to change. The state is changed to DM_STATE_MOUNTED after a -+ successful DM_EVENT_MOUNT event or after a failed unmount. It is changed -+ to DM_STATE_UNMOUNTING after a successful DM_EVENT_PREUNMOUNT event. -+ Finally, the state is changed to DM_STATE_UNMOUNTED after a successful -+ unmount. It stays in this state until the DM_EVENT_UNMOUNT event is -+ queued, at which point the filesystem entry is removed. -+*/ -+ -+void -+dm_change_fsys_entry( -+ struct super_block *sb, -+ dm_fsstate_t newstate) -+{ -+ dm_fsreg_t *fsrp; -+ int seq_error; -+ unsigned long lc; /* lock cookie */ -+ dm_fsid_t fsid; -+ struct filesystem_dmapi_operations *dops; -+ -+ /* Find the filesystem referenced by the sb's fsid_t. This should -+ always succeed. -+ */ -+ -+ dops = dm_fsys_ops(sb); -+ ASSERT(dops); -+ dops->get_fsid(sb, &fsid); -+ -+ if ((fsrp = dm_find_fsreg_and_lock(&fsid, &lc)) == NULL) { -+ panic("dm_change_fsys_entry: can't find DMAPI fsrp for " -+ "sb %p\n", sb); -+ } -+ -+ /* Make sure that the new state is acceptable given the current state -+ of the filesystem. Any error here is a major DMAPI/filesystem -+ screwup. -+ */ -+ -+ seq_error = 0; -+ switch (newstate) { -+ case DM_STATE_MOUNTED: -+ if (fsrp->fr_state != DM_STATE_MOUNTING && -+ fsrp->fr_state != DM_STATE_UNMOUNTING) { -+ seq_error++; -+ } -+ break; -+ case DM_STATE_UNMOUNTING: -+ if (fsrp->fr_state != DM_STATE_MOUNTED) -+ seq_error++; -+ break; -+ case DM_STATE_UNMOUNTED: -+ if (fsrp->fr_state != DM_STATE_UNMOUNTING) -+ seq_error++; -+ break; -+ default: -+ seq_error++; -+ break; -+ } -+ if (seq_error) { -+ panic("dm_change_fsys_entry: DMAPI sequence error: old state " -+ "%d, new state %d, fsrp %p\n", fsrp->fr_state, -+ newstate, fsrp); -+ } -+ -+ /* If the old state was DM_STATE_UNMOUNTING, then processes could be -+ sleeping in dm_handle_to_ip() waiting for their DM_NO_TOKEN handles -+ to be translated to inodes. Wake them up so that they either -+ continue (new state is DM_STATE_MOUNTED) or fail (new state is -+ DM_STATE_UNMOUNTED). -+ */ -+ -+ if (fsrp->fr_state == DM_STATE_UNMOUNTING) { -+ if (fsrp->fr_hdlcnt) -+ sv_broadcast(&fsrp->fr_queue); -+ } -+ -+ /* Change the filesystem's mount state to its new value. */ -+ -+ fsrp->fr_state = newstate; -+ fsrp->fr_tevp = NULL; /* not valid after DM_STATE_MOUNTING */ -+ -+ /* If the new state is DM_STATE_UNMOUNTING, wait until any application -+ threads currently in the process of making VFS_VGET and VFS_ROOT -+ calls are done before we let this unmount thread continue the -+ unmount. (We want to make sure that the unmount will see these -+ inode references during its scan.) -+ */ -+ -+ if (newstate == DM_STATE_UNMOUNTING) { -+ while (fsrp->fr_vfscnt) { -+ fsrp->fr_unmount++; -+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc); -+ lc = mutex_spinlock(&fsrp->fr_lock); -+ fsrp->fr_unmount--; -+ } -+ } -+ -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+} -+ -+ -+/* dm_remove_fsys_entry() gets called after a failed mount or after an -+ DM_EVENT_UNMOUNT event has been queued. (The filesystem entry must stay -+ until the DM_EVENT_UNMOUNT reply is queued so that the event can use the -+ 'fr_sessp' list to see which session to send the event to.) -+*/ -+ -+void -+dm_remove_fsys_entry( -+ struct super_block *sb) -+{ -+ dm_fsreg_t **fsrpp; -+ dm_fsreg_t *fsrp; -+ unsigned long lc; /* lock cookie */ -+ struct filesystem_dmapi_operations *dops; -+ dm_fsid_t fsid; -+ -+ dops = dm_fsys_ops(sb); -+ ASSERT(dops); -+ dops->get_fsid(sb, &fsid); -+ -+ /* Find the filesystem referenced by the sb's fsid_t and dequeue -+ it after verifying that the fr_state shows a filesystem that is -+ either mounting or unmounted. -+ */ -+ -+ lc = mutex_spinlock(&dm_reg_lock); -+ -+ fsrpp = &dm_registers; -+ while ((fsrp = *fsrpp) != NULL) { -+ if (!memcmp(&fsrp->fr_fsid, &fsid, sizeof(fsrp->fr_fsid))) -+ break; -+ fsrpp = &fsrp->fr_next; -+ } -+ if (fsrp == NULL) { -+ mutex_spinunlock(&dm_reg_lock, lc); -+ panic("dm_remove_fsys_entry: can't find DMAPI fsrp for " -+ "sb %p\n", sb); -+ } -+ -+ nested_spinlock(&fsrp->fr_lock); -+ -+ /* Verify that it makes sense to remove this entry. */ -+ -+ if (fsrp->fr_state != DM_STATE_MOUNTING && -+ fsrp->fr_state != DM_STATE_UNMOUNTED) { -+ nested_spinunlock(&fsrp->fr_lock); -+ mutex_spinunlock(&dm_reg_lock, lc); -+ panic("dm_remove_fsys_entry: DMAPI sequence error: old state " -+ "%d, fsrp %p\n", fsrp->fr_state, fsrp); -+ } -+ -+ *fsrpp = fsrp->fr_next; -+ dm_fsys_cnt--; -+ -+ nested_spinunlock(&dm_reg_lock); -+ -+ /* Since the filesystem is about to finish unmounting, we must be sure -+ that no inodes are being referenced within the filesystem before we -+ let this event thread continue. If the filesystem is currently in -+ state DM_STATE_MOUNTING, then we know by definition that there can't -+ be any references. If the filesystem is DM_STATE_UNMOUNTED, then -+ any application threads referencing handles with DM_NO_TOKEN should -+ have already been awakened by dm_change_fsys_entry and should be -+ long gone by now. Just in case they haven't yet left, sleep here -+ until they are really gone. -+ */ -+ -+ while (fsrp->fr_hdlcnt) { -+ fsrp->fr_unmount++; -+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc); -+ lc = mutex_spinlock(&fsrp->fr_lock); -+ fsrp->fr_unmount--; -+ } -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ -+ /* Release all memory. */ -+ -+#ifdef CONFIG_PROC_FS -+ { -+ char buf[100]; -+ sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp); -+ remove_proc_entry(buf, NULL); -+ } -+#endif -+ dm_fsys_ops_release(sb); -+ sv_destroy(&fsrp->fr_dispq); -+ sv_destroy(&fsrp->fr_queue); -+ spinlock_destroy(&fsrp->fr_lock); -+ kfree(fsrp->fr_msg); -+ kmem_cache_free(dm_fsreg_cachep, fsrp); -+} -+ -+ -+/* Get an inode for the object referenced by handlep. We cannot use -+ altgetvfs() because it fails if the VFS_OFFLINE bit is set, which means -+ that any call to dm_handle_to_ip() while a umount is in progress would -+ return an error, even if the umount can't possibly succeed because users -+ are in the filesystem. The requests would start to fail as soon as the -+ umount begins, even before the application receives the DM_EVENT_PREUNMOUNT -+ event. -+ -+ dm_handle_to_ip() emulates the behavior of lookup() while an unmount is -+ in progress. Any call to dm_handle_to_ip() while the filesystem is in the -+ DM_STATE_UNMOUNTING state will block. If the unmount eventually succeeds, -+ the requests will wake up and fail. If the unmount fails, the requests will -+ wake up and complete normally. -+ -+ While a filesystem is in state DM_STATE_MOUNTING, dm_handle_to_ip() will -+ fail all requests. Per the DMAPI spec, the only handles in the filesystem -+ which are valid during a mount event are the handles within the event -+ itself. -+*/ -+ -+struct inode * -+dm_handle_to_ip( -+ dm_handle_t *handlep, -+ short *typep) -+{ -+ dm_fsreg_t *fsrp; -+ short type; -+ unsigned long lc; /* lock cookie */ -+ int error = 0; -+ dm_fid_t *fidp; -+ struct super_block *sb; -+ struct inode *ip; -+ int filetype; -+ struct filesystem_dmapi_operations *dmapiops; -+ -+ if ((fsrp = dm_find_fsreg_and_lock(&handlep->ha_fsid, &lc)) == NULL) -+ return NULL; -+ -+ fidp = (dm_fid_t*)&handlep->ha_fid; -+ /* If mounting, and we are not asking for a filesystem handle, -+ * then fail the request. (dm_fid_len==0 for fshandle) -+ */ -+ if ((fsrp->fr_state == DM_STATE_MOUNTING) && -+ (fidp->dm_fid_len != 0)) { -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ return NULL; -+ } -+ -+ for (;;) { -+ if (fsrp->fr_state == DM_STATE_MOUNTING) -+ break; -+ if (fsrp->fr_state == DM_STATE_MOUNTED) -+ break; -+ if (fsrp->fr_state == DM_STATE_UNMOUNTED) { -+ if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0) -+ sv_broadcast(&fsrp->fr_queue); -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ return NULL; -+ } -+ -+ /* Must be DM_STATE_UNMOUNTING. */ -+ -+ fsrp->fr_hdlcnt++; -+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc); -+ lc = mutex_spinlock(&fsrp->fr_lock); -+ fsrp->fr_hdlcnt--; -+ } -+ -+ fsrp->fr_vfscnt++; -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ -+ /* Now that the mutex is released, wait until we have access to the -+ inode. -+ */ -+ -+ sb = fsrp->fr_sb; -+ error = -ENOSYS; -+ dmapiops = dm_fsys_ops(sb); -+ ASSERT(dmapiops); -+ if (dmapiops->fh_to_inode) -+ error = dmapiops->fh_to_inode(sb, &ip, (void*)fidp); -+ -+ lc = mutex_spinlock(&fsrp->fr_lock); -+ -+ fsrp->fr_vfscnt--; -+ if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0) -+ sv_broadcast(&fsrp->fr_queue); -+ -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ if (error || ip == NULL) -+ return NULL; -+ -+ filetype = ip->i_mode & S_IFMT; -+ if (fidp->dm_fid_len == 0) { -+ type = DM_TDT_VFS; -+ } else if (filetype == S_IFREG) { -+ type = DM_TDT_REG; -+ } else if (filetype == S_IFDIR) { -+ type = DM_TDT_DIR; -+ } else if (filetype == S_IFLNK) { -+ type = DM_TDT_LNK; -+ } else { -+ type = DM_TDT_OTH; -+ } -+ *typep = type; -+ return ip; -+} -+ -+ -+int -+dm_ip_to_handle( -+ struct inode *ip, -+ dm_handle_t *handlep) -+{ -+ int error; -+ dm_fid_t fid; -+ dm_fsid_t fsid; -+ int hsize; -+ struct filesystem_dmapi_operations *dops; -+ -+ dops = dm_fsys_ops(ip->i_sb); -+ ASSERT(dops); -+ -+ error = dops->inode_to_fh(ip, &fid, &fsid); -+ if (error) -+ return error; -+ -+ memcpy(&handlep->ha_fsid, &fsid, sizeof(fsid)); -+ memcpy(&handlep->ha_fid, &fid, fid.dm_fid_len + sizeof fid.dm_fid_len); -+ hsize = DM_HSIZE(*handlep); -+ memset((char *)handlep + hsize, 0, sizeof(*handlep) - hsize); -+ return 0; -+} -+ -+ -+/* Given an inode, check if that inode resides in filesystem that supports -+ DMAPI. Returns zero if the inode is in a DMAPI filesystem, otherwise -+ returns an errno. -+*/ -+ -+int -+dm_check_dmapi_ip( -+ struct inode *ip) -+{ -+ dm_handle_t handle; -+ /* REFERENCED */ -+ dm_fsreg_t *fsrp; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ if ((error = dm_ip_to_handle(ip, &handle)) != 0) -+ return(error); -+ -+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) -+ return(-EBADF); -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ return(0); -+} -+ -+ -+/* Return a pointer to the DM_EVENT_MOUNT event while a mount is still in -+ progress. This is only called by dm_get_config and dm_get_config_events -+ which need to access the filesystem during a mount but which don't have -+ a session and token to use. -+*/ -+ -+dm_tokevent_t * -+dm_find_mount_tevp_and_lock( -+ dm_fsid_t *fsidp, -+ unsigned long *lcp) /* address of returned lock cookie */ -+{ -+ dm_fsreg_t *fsrp; -+ -+ if ((fsrp = dm_find_fsreg_and_lock(fsidp, lcp)) == NULL) -+ return(NULL); -+ -+ if (!fsrp->fr_tevp || fsrp->fr_state != DM_STATE_MOUNTING) { -+ mutex_spinunlock(&fsrp->fr_lock, *lcp); -+ return(NULL); -+ } -+ nested_spinlock(&fsrp->fr_tevp->te_lock); -+ nested_spinunlock(&fsrp->fr_lock); -+ return(fsrp->fr_tevp); -+} -+ -+ -+/* Wait interruptibly until a session registers disposition for 'event' in -+ filesystem 'sb'. Upon successful exit, both the filesystem's dm_fsreg_t -+ structure and the session's dm_session_t structure are locked. The caller -+ is responsible for unlocking both structures using the returned cookies. -+ -+ Warning: The locks can be dropped in any order, but the 'lc2p' cookie MUST -+ BE USED FOR THE FIRST UNLOCK, and the lc1p cookie must be used for the -+ second unlock. If this is not done, the CPU will be interruptible while -+ holding a mutex, which could deadlock the machine! -+*/ -+ -+static int -+dm_waitfor_disp( -+ struct super_block *sb, -+ dm_tokevent_t *tevp, -+ dm_fsreg_t **fsrpp, -+ unsigned long *lc1p, /* addr of first returned lock cookie */ -+ dm_session_t **sessionpp, -+ unsigned long *lc2p) /* addr of 2nd returned lock cookie */ -+{ -+ dm_eventtype_t event = tevp->te_msg.ev_type; -+ dm_session_t *s; -+ dm_fsreg_t *fsrp; -+ dm_fsid_t fsid; -+ struct filesystem_dmapi_operations *dops; -+ -+ dops = dm_fsys_ops(sb); -+ ASSERT(dops); -+ -+ dops->get_fsid(sb, &fsid); -+ if ((fsrp = dm_find_fsreg_and_lock(&fsid, lc1p)) == NULL) -+ return -ENOENT; -+ -+ /* If no session is registered for this event in the specified -+ filesystem, then sleep interruptibly until one does. -+ */ -+ -+ for (;;) { -+ int rc = 0; -+ -+ /* The dm_find_session_and_lock() call is needed because a -+ session that is in the process of being removed might still -+ be in the dm_fsreg_t structure but won't be in the -+ dm_sessions list. -+ */ -+ -+ if ((s = fsrp->fr_sessp[event]) != NULL && -+ dm_find_session_and_lock(s->sn_sessid, &s, lc2p) == 0) { -+ break; -+ } -+ -+ /* Noone is currently registered. DM_EVENT_UNMOUNT events -+ don't wait for anyone to register because the unmount is -+ already past the point of no return. -+ */ -+ -+ if (event == DM_EVENT_UNMOUNT) { -+ mutex_spinunlock(&fsrp->fr_lock, *lc1p); -+ return -ENOENT; -+ } -+ -+ /* Wait until a session registers for disposition of this -+ event. -+ */ -+ -+ fsrp->fr_dispcnt++; -+ dm_link_event(tevp, &fsrp->fr_evt_dispq); -+ -+ sv_wait_sig(&fsrp->fr_dispq, 1, &fsrp->fr_lock, *lc1p); -+ rc = signal_pending(current); -+ -+ *lc1p = mutex_spinlock(&fsrp->fr_lock); -+ fsrp->fr_dispcnt--; -+ dm_unlink_event(tevp, &fsrp->fr_evt_dispq); -+#ifdef HAVE_DM_QUEUE_FLUSH -+ if (tevp->te_flags & DM_TEF_FLUSH) { -+ mutex_spinunlock(&fsrp->fr_lock, *lc1p); -+ return tevp->te_reply; -+ } -+#endif /* HAVE_DM_QUEUE_FLUSH */ -+ if (rc) { /* if signal was received */ -+ mutex_spinunlock(&fsrp->fr_lock, *lc1p); -+ return -EINTR; -+ } -+ } -+ *sessionpp = s; -+ *fsrpp = fsrp; -+ return 0; -+} -+ -+ -+/* Returns the session pointer for the session registered for an event -+ in the given sb. If successful, the session is locked upon return. The -+ caller is responsible for releasing the lock. If no session is currently -+ registered for the event, dm_waitfor_disp_session() will sleep interruptibly -+ until a registration occurs. -+*/ -+ -+int -+dm_waitfor_disp_session( -+ struct super_block *sb, -+ dm_tokevent_t *tevp, -+ dm_session_t **sessionpp, -+ unsigned long *lcp) -+{ -+ dm_fsreg_t *fsrp; -+ unsigned long lc2; -+ int error; -+ -+ if (tevp->te_msg.ev_type < 0 || tevp->te_msg.ev_type > DM_EVENT_MAX) -+ return(-EIO); -+ -+ error = dm_waitfor_disp(sb, tevp, &fsrp, lcp, sessionpp, &lc2); -+ if (!error) -+ mutex_spinunlock(&fsrp->fr_lock, lc2); /* rev. cookie order*/ -+ return(error); -+} -+ -+ -+/* Find the session registered for the DM_EVENT_DESTROY event on the specified -+ filesystem, sleeping if necessary until registration occurs. Once found, -+ copy the session's return-on-destroy attribute name, if any, back to the -+ caller. -+*/ -+ -+int -+dm_waitfor_destroy_attrname( -+ struct super_block *sbp, -+ dm_attrname_t *attrnamep) -+{ -+ dm_tokevent_t *tevp; -+ dm_session_t *s; -+ dm_fsreg_t *fsrp; -+ int error; -+ unsigned long lc1; /* first lock cookie */ -+ unsigned long lc2; /* second lock cookie */ -+ void *msgp; -+ -+ tevp = dm_evt_create_tevp(DM_EVENT_DESTROY, 1, (void**)&msgp); -+ error = dm_waitfor_disp(sbp, tevp, &fsrp, &lc1, &s, &lc2); -+ if (!error) { -+ *attrnamep = fsrp->fr_rattr; /* attribute or zeros */ -+ mutex_spinunlock(&s->sn_qlock, lc2); /* rev. cookie order */ -+ mutex_spinunlock(&fsrp->fr_lock, lc1); -+ } -+ dm_evt_rele_tevp(tevp,0); -+ return(error); -+} -+ -+ -+/* Unregisters the session for the disposition of all events on all -+ filesystems. This routine is not called until the session has been -+ dequeued from the session list and its session lock has been dropped, -+ but before the actual structure is freed, so it is safe to grab the -+ 'dm_reg_lock' here. If dm_waitfor_disp_session() happens to be called -+ by another thread, it won't find this session on the session list and -+ will wait until a new session registers. -+*/ -+ -+void -+dm_clear_fsreg( -+ dm_session_t *s) -+{ -+ dm_fsreg_t *fsrp; -+ int event; -+ unsigned long lc; /* lock cookie */ -+ -+ lc = mutex_spinlock(&dm_reg_lock); -+ -+ for (fsrp = dm_registers; fsrp != NULL; fsrp = fsrp->fr_next) { -+ nested_spinlock(&fsrp->fr_lock); -+ for (event = 0; event < DM_EVENT_MAX; event++) { -+ if (fsrp->fr_sessp[event] != s) -+ continue; -+ fsrp->fr_sessp[event] = NULL; -+ if (event == DM_EVENT_DESTROY) -+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr)); -+ } -+ nested_spinunlock(&fsrp->fr_lock); -+ } -+ -+ mutex_spinunlock(&dm_reg_lock, lc); -+} -+ -+ -+/* -+ * Return the handle for the object named by path. -+ */ -+ -+int -+dm_path_to_hdl( -+ char __user *path, /* any path name */ -+ void __user *hanp, /* user's data buffer */ -+ size_t __user *hlenp) /* set to size of data copied */ -+{ -+ /* REFERENCED */ -+ dm_fsreg_t *fsrp; -+ dm_handle_t handle; -+ size_t hlen; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ struct nameidata nd; -+ struct inode *inode; -+ size_t len; -+ char *name; -+ struct filesystem_dmapi_operations *dops; -+ -+ /* XXX get things straightened out so getname() works here? */ -+ if (!(len = strnlen_user(path, PATH_MAX))) -+ return(-EFAULT); -+ if (len == 1) -+ return(-ENOENT); -+ if (len > PATH_MAX) -+ return(-ENAMETOOLONG); -+ name = kmalloc(len, GFP_KERNEL); -+ if (name == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return(-ENOMEM); -+ } -+ if (copy_from_user(name, path, len)) { -+ kfree(name); -+ return(-EFAULT); -+ } -+ -+ error = path_lookup(name, LOOKUP_POSITIVE, &nd); -+ kfree(name); -+ if (error) -+ return error; -+ -+ ASSERT(nd.path.dentry); -+ ASSERT(nd.path.dentry->d_inode); -+ inode = igrab(nd.path.dentry->d_inode); -+ path_put(&nd.path); -+ -+ dops = dm_fsys_ops(inode->i_sb); -+ if (dops == NULL) { -+ /* No longer in a dmapi-capable filesystem...Toto */ -+ iput(inode); -+ return -EINVAL; -+ } -+ -+ /* we need the inode */ -+ error = dm_ip_to_handle(inode, &handle); -+ iput(inode); -+ if (error) -+ return(error); -+ -+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) -+ return(-EBADF); -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ -+ hlen = DM_HSIZE(handle); -+ -+ if (copy_to_user(hanp, &handle, (int)hlen)) -+ return(-EFAULT); -+ if (put_user(hlen,hlenp)) -+ return(-EFAULT); -+ return 0; -+} -+ -+ -+/* -+ * Return the handle for the file system containing the object named by path. -+ */ -+ -+int -+dm_path_to_fshdl( -+ char __user *path, /* any path name */ -+ void __user *hanp, /* user's data buffer */ -+ size_t __user *hlenp) /* set to size of data copied */ -+{ -+ /* REFERENCED */ -+ dm_fsreg_t *fsrp; -+ dm_handle_t handle; -+ size_t hlen; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ struct nameidata nd; -+ struct inode *inode; -+ size_t len; -+ char *name; -+ struct filesystem_dmapi_operations *dops; -+ -+ /* XXX get things straightened out so getname() works here? */ -+ if(!(len = strnlen_user(path, PATH_MAX))) -+ return(-EFAULT); -+ if (len == 1) -+ return(-ENOENT); -+ if (len > PATH_MAX) -+ return(-ENAMETOOLONG); -+ name = kmalloc(len, GFP_KERNEL); -+ if (name == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return(-ENOMEM); -+ } -+ if (copy_from_user(name, path, len)) { -+ kfree(name); -+ return(-EFAULT); -+ } -+ -+ error = path_lookup(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd); -+ kfree(name); -+ if (error) -+ return error; -+ -+ ASSERT(nd.path.dentry); -+ ASSERT(nd.path.dentry->d_inode); -+ -+ inode = igrab(nd.path.dentry->d_inode); -+ path_put(&nd.path); -+ -+ dops = dm_fsys_ops(inode->i_sb); -+ if (dops == NULL) { -+ /* No longer in a dmapi-capable filesystem...Toto */ -+ iput(inode); -+ return -EINVAL; -+ } -+ -+ error = dm_ip_to_handle(inode, &handle); -+ iput(inode); -+ -+ if (error) -+ return(error); -+ -+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) -+ return(-EBADF); -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ -+ hlen = DM_FSHSIZE; -+ if(copy_to_user(hanp, &handle, (int)hlen)) -+ return(-EFAULT); -+ if(put_user(hlen,hlenp)) -+ return(-EFAULT); -+ return 0; -+} -+ -+ -+int -+dm_fd_to_hdl( -+ int fd, /* any file descriptor */ -+ void __user *hanp, /* user's data buffer */ -+ size_t __user *hlenp) /* set to size of data copied */ -+{ -+ /* REFERENCED */ -+ dm_fsreg_t *fsrp; -+ dm_handle_t handle; -+ size_t hlen; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ struct file *filep = fget(fd); -+ struct inode *ip = filep->f_dentry->d_inode; -+ -+ if (!filep) -+ return(-EBADF); -+ if ((error = dm_ip_to_handle(ip, &handle)) != 0) -+ return(error); -+ -+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) -+ return(-EBADF); -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ -+ hlen = DM_HSIZE(handle); -+ if (copy_to_user(hanp, &handle, (int)hlen)) -+ return(-EFAULT); -+ fput(filep); -+ if(put_user(hlen, hlenp)) -+ return(-EFAULT); -+ return 0; -+} -+ -+ -+/* Enable events on an object. */ -+ -+int -+dm_set_eventlist( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_eventset_t __user *eventsetp, -+ u_int maxevent) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_eventset_t eventset; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ if (copy_from_user(&eventset, eventsetp, sizeof(eventset))) -+ return(-EFAULT); -+ -+ /* Do some minor sanity checking. */ -+ -+ if (maxevent == 0 || maxevent > DM_EVENT_MAX) -+ return(-EINVAL); -+ -+ /* Access the specified object. */ -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_ANY, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->set_eventlist(tdp->td_ip, tdp->td_right, -+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0), -+ &eventset, maxevent); -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+/* Return the list of enabled events for an object. */ -+ -+int -+dm_get_eventlist( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int nelem, -+ dm_eventset_t __user *eventsetp, -+ u_int __user *nelemp) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ dm_eventset_t eventset; -+ u_int elem; -+ int error; -+ -+ if (nelem == 0) -+ return(-EINVAL); -+ -+ /* Access the specified object. */ -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_ANY, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ /* Get the object's event list. */ -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->get_eventlist(tdp->td_ip, tdp->td_right, -+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0), -+ nelem, &eventset, &elem); -+ -+ dm_app_put_tdp(tdp); -+ -+ if (error) -+ return(error); -+ -+ if (copy_to_user(eventsetp, &eventset, sizeof(eventset))) -+ return(-EFAULT); -+ if (put_user(nelem, nelemp)) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+/* Register for disposition of events. The handle must either be the -+ global handle or must be the handle of a file system. The list of events -+ is pointed to by eventsetp. -+*/ -+ -+int -+dm_set_disp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_eventset_t __user *eventsetp, -+ u_int maxevent) -+{ -+ dm_session_t *s; -+ dm_fsreg_t *fsrp; -+ dm_tokdata_t *tdp; -+ dm_eventset_t eventset; -+ int error; -+ unsigned long lc1; /* first lock cookie */ -+ unsigned long lc2; /* second lock cookie */ -+ u_int i; -+ -+ /* Copy in and validate the event mask. Only the lower maxevent bits -+ are meaningful, so clear any bits set above maxevent. -+ */ -+ -+ if (maxevent == 0 || maxevent > DM_EVENT_MAX) -+ return(-EINVAL); -+ if (copy_from_user(&eventset, eventsetp, sizeof(eventset))) -+ return(-EFAULT); -+ eventset &= (1 << maxevent) - 1; -+ -+ /* If the caller specified the global handle, then the only valid token -+ is DM_NO_TOKEN, and the only valid event in the event mask is -+ DM_EVENT_MOUNT. If it is set, add the session to the list of -+ sessions that want to receive mount events. If it is clear, remove -+ the session from the list. Since DM_EVENT_MOUNT events never block -+ waiting for a session to register, there is noone to wake up if we -+ do add the session to the list. -+ */ -+ -+ if (DM_GLOBALHAN(hanp, hlen)) { -+ if (token != DM_NO_TOKEN) -+ return(-EINVAL); -+ if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0) -+ return(error); -+ if (eventset == 0) { -+ s->sn_flags &= ~DM_SN_WANTMOUNT; -+ error = 0; -+ } else if (eventset == 1 << DM_EVENT_MOUNT) { -+ s->sn_flags |= DM_SN_WANTMOUNT; -+ error = 0; -+ } else { -+ error = -EINVAL; -+ } -+ mutex_spinunlock(&s->sn_qlock, lc1); -+ return(error); -+ } -+ -+ /* Since it's not the global handle, it had better be a filesystem -+ handle. Verify that the first 'maxevent' events in the event list -+ are all valid for a filesystem handle. -+ */ -+ -+ if (eventset & ~DM_VALID_DISP_EVENTS) -+ return(-EINVAL); -+ -+ /* Verify that the session is valid, that the handle is a filesystem -+ handle, and that the filesystem is capable of sending events. (If -+ a dm_fsreg_t structure exists, then the filesystem can issue events.) -+ */ -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1); -+ if (fsrp == NULL) { -+ dm_app_put_tdp(tdp); -+ return(-EINVAL); -+ } -+ -+ /* Now that we own 'fsrp->fr_lock', get the lock on the session so that -+ it can't disappear while we add it to the filesystem's event mask. -+ */ -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) { -+ mutex_spinunlock(&fsrp->fr_lock, lc1); -+ dm_app_put_tdp(tdp); -+ return(error); -+ } -+ -+ /* Update the event disposition array for this filesystem, adding -+ and/or removing the session as appropriate. If this session is -+ dropping registration for DM_EVENT_DESTROY, or is overriding some -+ other session's registration for DM_EVENT_DESTROY, then clear any -+ any attr-on-destroy attribute name also. -+ */ -+ -+ for (i = 0; i < DM_EVENT_MAX; i++) { -+ if (DMEV_ISSET(i, eventset)) { -+ if (i == DM_EVENT_DESTROY && fsrp->fr_sessp[i] != s) -+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr)); -+ fsrp->fr_sessp[i] = s; -+ } else if (fsrp->fr_sessp[i] == s) { -+ if (i == DM_EVENT_DESTROY) -+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr)); -+ fsrp->fr_sessp[i] = NULL; -+ } -+ } -+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */ -+ -+ /* Wake up all processes waiting for a disposition on this filesystem -+ in case any of them happen to be waiting for an event which we just -+ added. -+ */ -+ -+ if (fsrp->fr_dispcnt) -+ sv_broadcast(&fsrp->fr_dispq); -+ -+ mutex_spinunlock(&fsrp->fr_lock, lc1); -+ -+ dm_app_put_tdp(tdp); -+ return(0); -+} -+ -+ -+/* -+ * Register a specific attribute name with a filesystem. The value of -+ * the attribute is to be returned with an asynchronous destroy event. -+ */ -+ -+int -+dm_set_return_on_destroy( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_attrname_t __user *attrnamep, -+ dm_boolean_t enable) -+{ -+ dm_attrname_t attrname; -+ dm_tokdata_t *tdp; -+ dm_fsreg_t *fsrp; -+ dm_session_t *s; -+ int error; -+ unsigned long lc1; /* first lock cookie */ -+ unsigned long lc2; /* second lock cookie */ -+ -+ /* If a dm_attrname_t is provided, copy it in and validate it. */ -+ -+ if (enable && (error = copy_from_user(&attrname, attrnamep, sizeof(attrname))) != 0) -+ return(error); -+ -+ /* Validate the filesystem handle and use it to get the filesystem's -+ disposition structure. -+ */ -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_EXCL, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1); -+ if (fsrp == NULL) { -+ dm_app_put_tdp(tdp); -+ return(-EINVAL); -+ } -+ -+ /* Now that we own 'fsrp->fr_lock', get the lock on the session so that -+ it can't disappear while we add it to the filesystem's event mask. -+ */ -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) { -+ mutex_spinunlock(&fsrp->fr_lock, lc1); -+ dm_app_put_tdp(tdp); -+ return(error); -+ } -+ -+ /* A caller cannot disable return-on-destroy if he is not registered -+ for DM_EVENT_DESTROY. Enabling return-on-destroy is an implicit -+ dm_set_disp() for DM_EVENT_DESTROY; we wake up all processes -+ waiting for a disposition in case any was waiting for a -+ DM_EVENT_DESTROY event. -+ */ -+ -+ error = 0; -+ if (enable) { -+ fsrp->fr_sessp[DM_EVENT_DESTROY] = s; -+ fsrp->fr_rattr = attrname; -+ if (fsrp->fr_dispcnt) -+ sv_broadcast(&fsrp->fr_dispq); -+ } else if (fsrp->fr_sessp[DM_EVENT_DESTROY] != s) { -+ error = -EINVAL; -+ } else { -+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr)); -+ } -+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */ -+ mutex_spinunlock(&fsrp->fr_lock, lc1); -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_get_mountinfo( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_fsreg_t *fsrp; -+ dm_tokdata_t *tdp; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ /* Make sure that the caller's buffer is 8-byte aligned. */ -+ -+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0) -+ return(-EFAULT); -+ -+ /* Verify that the handle is a filesystem handle, and that the -+ filesystem is capable of sending events. If not, return an error. -+ */ -+ -+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS, -+ DM_RIGHT_SHARED, &tdp); -+ if (error != 0) -+ return(error); -+ -+ /* Find the filesystem entry. This should always succeed as the -+ dm_app_get_tdp call created a filesystem reference. Once we find -+ the entry, drop the lock. The mountinfo message is never modified, -+ the filesystem entry can't disappear, and we don't want to hold a -+ spinlock while doing copyout calls. -+ */ -+ -+ fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc); -+ if (fsrp == NULL) { -+ dm_app_put_tdp(tdp); -+ return(-EINVAL); -+ } -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ -+ /* Copy the message into the user's buffer and update his 'rlenp'. */ -+ -+ if (put_user(fsrp->fr_msgsize, rlenp)) { -+ error = -EFAULT; -+ } else if (fsrp->fr_msgsize > buflen) { /* user buffer not big enough */ -+ error = -E2BIG; -+ } else if (copy_to_user(bufp, fsrp->fr_msg, fsrp->fr_msgsize)) { -+ error = -EFAULT; -+ } else { -+ error = 0; -+ } -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_getall_disp( -+ dm_sessid_t sid, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_session_t *s; /* pointer to session given by sid */ -+ unsigned long lc1; /* first lock cookie */ -+ unsigned long lc2; /* second lock cookie */ -+ int totalsize; -+ int msgsize; -+ int fsyscnt; -+ dm_dispinfo_t *prevmsg; -+ dm_fsreg_t *fsrp; -+ int error; -+ char *kbuf; -+ -+ int tmp3; -+ int tmp4; -+ -+ /* Because the dm_getall_disp structure contains a __u64 field, -+ make sure that the buffer provided by the caller is aligned so -+ that he can read such fields successfully. -+ */ -+ -+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0) -+ return(-EFAULT); -+ -+ /* Compute the size of a dm_dispinfo structure, rounding up to an -+ 8-byte boundary so that any subsequent structures will also be -+ aligned. -+ */ -+ -+#if 0 -+ /* XXX ug, what is going on here? */ -+ msgsize = (sizeof(dm_dispinfo_t) + DM_FSHSIZE + sizeof(uint64_t) - 1) & -+ ~(sizeof(uint64_t) - 1); -+#else -+ tmp3 = sizeof(dm_dispinfo_t) + DM_FSHSIZE; -+ tmp3 += sizeof(__u64); -+ tmp3 -= 1; -+ tmp4 = ~((int)sizeof(__u64) - 1); -+ msgsize = tmp3 & tmp4; -+#endif -+ -+ /* Loop until we can get the right amount of temp space, being careful -+ not to hold a mutex during the allocation. Usually only one trip. -+ */ -+ -+ for (;;) { -+ if ((fsyscnt = dm_fsys_cnt) == 0) { -+ /*if (dm_cpoutsizet(rlenp, 0))*/ -+ if (put_user(0,rlenp)) -+ return(-EFAULT); -+ return(0); -+ } -+ kbuf = kmalloc(fsyscnt * msgsize, GFP_KERNEL); -+ if (kbuf == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return -ENOMEM; -+ } -+ -+ lc1 = mutex_spinlock(&dm_reg_lock); -+ if (fsyscnt == dm_fsys_cnt) -+ break; -+ -+ mutex_spinunlock(&dm_reg_lock, lc1); -+ kfree(kbuf); -+ } -+ -+ /* Find the indicated session and lock it. */ -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) { -+ mutex_spinunlock(&dm_reg_lock, lc1); -+ kfree(kbuf); -+ return(error); -+ } -+ -+ /* Create a dm_dispinfo structure for each filesystem in which -+ this session has at least one event selected for disposition. -+ */ -+ -+ totalsize = 0; /* total bytes to transfer to the user */ -+ prevmsg = NULL; -+ -+ for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) { -+ dm_dispinfo_t *disp; -+ int event; -+ int found; -+ -+ disp = (dm_dispinfo_t *)(kbuf + totalsize); -+ -+ DMEV_ZERO(disp->di_eventset); -+ -+ for (event = 0, found = 0; event < DM_EVENT_MAX; event++) { -+ if (fsrp->fr_sessp[event] != s) -+ continue; -+ DMEV_SET(event, disp->di_eventset); -+ found++; -+ } -+ if (!found) -+ continue; -+ -+ disp->_link = 0; -+ disp->di_fshandle.vd_offset = sizeof(dm_dispinfo_t); -+ disp->di_fshandle.vd_length = DM_FSHSIZE; -+ -+ memcpy((char *)disp + disp->di_fshandle.vd_offset, -+ &fsrp->fr_fsid, disp->di_fshandle.vd_length); -+ -+ if (prevmsg) -+ prevmsg->_link = msgsize; -+ -+ prevmsg = disp; -+ totalsize += msgsize; -+ } -+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */ -+ mutex_spinunlock(&dm_reg_lock, lc1); -+ -+ if (put_user(totalsize, rlenp)) { -+ error = -EFAULT; -+ } else if (totalsize > buflen) { /* no more room */ -+ error = -E2BIG; -+ } else if (totalsize && copy_to_user(bufp, kbuf, totalsize)) { -+ error = -EFAULT; -+ } else { -+ error = 0; -+ } -+ -+ kfree(kbuf); -+ return(error); -+} -+ -+int -+dm_open_by_handle_rvp( -+ unsigned int fd, -+ void __user *hanp, -+ size_t hlen, -+ int flags, -+ int *rvp) -+{ -+ const struct cred *cred = current_cred(); -+ dm_handle_t handle; -+ int error; -+ short td_type; -+ struct dentry *dentry; -+ struct inode *inodep; -+ int new_fd; -+ struct file *mfilp; -+ struct file *filp; -+ -+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0) { -+ return(error); -+ } -+ -+ if ((inodep = dm_handle_to_ip(&handle, &td_type)) == NULL) { -+ return(-EBADF); -+ } -+ if ((td_type == DM_TDT_VFS) || (td_type == DM_TDT_OTH)) { -+ iput(inodep); -+ return(-EBADF); -+ } -+ -+ if ((new_fd = get_unused_fd()) < 0) { -+ iput(inodep); -+ return(-EMFILE); -+ } -+ -+ dentry = d_obtain_alias(inodep); -+ if (dentry == NULL) { -+ iput(inodep); -+ put_unused_fd(new_fd); -+ return(-ENOMEM); -+ } -+ -+ mfilp = fget(fd); -+ if (!mfilp) { -+ dput(dentry); -+ put_unused_fd(new_fd); -+ return(-EBADF); -+ } -+ -+ mntget(mfilp->f_vfsmnt); -+ -+ /* Create file pointer */ -+ filp = dentry_open(dentry, mfilp->f_vfsmnt, flags, cred); -+ if (IS_ERR(filp)) { -+ put_unused_fd(new_fd); -+ fput(mfilp); -+ return PTR_ERR(filp); -+ } -+ -+ if (td_type == DM_TDT_REG) -+ filp->f_mode |= FMODE_NOCMTIME; -+ -+ fd_install(new_fd, filp); -+ fput(mfilp); -+ *rvp = new_fd; -+ return 0; -+} -+ -+ -+#ifdef HAVE_DM_QUEUE_FLUSH -+/* Find the threads that have a reference to our filesystem and force -+ them to return with the specified errno. -+ We look for them in each dm_fsreg_t's fr_evt_dispq. -+*/ -+ -+int -+dm_release_disp_threads( -+ dm_fsid_t *fsidp, -+ struct inode *inode, /* may be null */ -+ int errno) -+{ -+ unsigned long lc; -+ dm_fsreg_t *fsrp; -+ dm_tokevent_t *tevp; -+ dm_tokdata_t *tdp; -+ dm_eventq_t *queue; -+ int found_events = 0; -+ -+ if ((fsrp = dm_find_fsreg_and_lock(fsidp, &lc)) == NULL){ -+ return 0; -+ } -+ -+ queue = &fsrp->fr_evt_dispq; -+ for (tevp = queue->eq_head; tevp; tevp = tevp->te_next) { -+ nested_spinlock(&tevp->te_lock); -+ if (inode) { -+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) { -+ if( tdp->td_ip == inode ) { -+ tevp->te_flags |= DM_TEF_FLUSH; -+ tevp->te_reply = errno; -+ found_events = 1; -+ break; -+ } -+ } -+ } -+ else { -+ tevp->te_flags |= DM_TEF_FLUSH; -+ tevp->te_reply = errno; -+ found_events = 1; -+ } -+ nested_spinunlock(&tevp->te_lock); -+ } -+ -+ if (found_events && fsrp->fr_dispcnt) -+ sv_broadcast(&fsrp->fr_dispq); -+ mutex_spinunlock(&fsrp->fr_lock, lc); -+ return 0; -+} -+#endif /* HAVE_DM_QUEUE_FLUSH */ ---- /dev/null -+++ b/fs/dmapi/dmapi_right.c -@@ -0,0 +1,1256 @@ -+/* -+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#include -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+ -+#define DM_FG_STHREAD 0x001 /* keep other threads from using tdp */ -+#define DM_FG_MUSTEXIST 0x002 /* handle must exist in the event */ -+#define DM_FG_DONTADD 0x004 /* don't add handle if not in event */ -+ -+/* Get a handle of the form (void *, size_t) from user space and convert it to -+ a handle_t. Do as much validation of the result as possible; any error -+ other than a bad address should return EBADF per the DMAPI spec. -+*/ -+ -+int -+dm_copyin_handle( -+ void __user *hanp, /* input, handle data */ -+ size_t hlen, /* input, size of handle data */ -+ dm_handle_t *handlep) /* output, copy of data */ -+{ -+ u_short len; -+ dm_fid_t *fidp; -+ -+ fidp = (dm_fid_t*)&handlep->ha_fid; -+ -+ if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) -+ return -EBADF; -+ -+ if (copy_from_user(handlep, hanp, hlen)) -+ return -EFAULT; -+ -+ if (hlen < sizeof(*handlep)) -+ memset((char *)handlep + hlen, 0, sizeof(*handlep) - hlen); -+ -+ if (hlen == sizeof(handlep->ha_fsid)) -+ return 0; /* FS handle, nothing more to check */ -+ -+ len = hlen - sizeof(handlep->ha_fsid) - sizeof(fidp->dm_fid_len); -+ -+ if ((fidp->dm_fid_len != len) || fidp->dm_fid_pad) -+ return -EBADF; -+ return 0; -+} -+ -+/* Allocate and initialize a tevp structure. Called from both application and -+ event threads. -+*/ -+ -+static dm_tokevent_t * -+dm_init_tevp( -+ int ev_size, /* size of event structure */ -+ int var_size) /* size of variable-length data */ -+{ -+ dm_tokevent_t *tevp; -+ int msgsize; -+ -+ /* Calculate the size of the event in bytes and allocate memory for it. -+ Zero all but the variable portion of the message, which will be -+ eventually overlaid by the caller with data. -+ */ -+ -+ msgsize = offsetof(dm_tokevent_t, te_event) + ev_size + var_size; -+ tevp = kmalloc(msgsize, GFP_KERNEL); -+ if (tevp == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return NULL; -+ } -+ memset(tevp, 0, msgsize - var_size); -+ -+ /* Now initialize all the non-zero fields. */ -+ -+ spinlock_init(&tevp->te_lock, "te_lock"); -+ sv_init(&tevp->te_evt_queue, SV_DEFAULT, "te_evt_queue"); -+ sv_init(&tevp->te_app_queue, SV_DEFAULT, "te_app_queue"); -+ tevp->te_allocsize = msgsize; -+ tevp->te_msg.ev_type = DM_EVENT_INVALID; -+ tevp->te_flags = 0; -+ -+ return(tevp); -+} -+ -+ -+/* Given the event type and the number of bytes of variable length data that -+ will follow the event, dm_evt_create_tevp() creates a dm_tokevent_t -+ structure to hold the event and initializes all the common event fields. -+ -+ No locking is required for this routine because the caller is an event -+ thread, and is therefore the only thread that can see the event. -+*/ -+ -+dm_tokevent_t * -+dm_evt_create_tevp( -+ dm_eventtype_t event, -+ int variable_size, -+ void **msgpp) -+{ -+ dm_tokevent_t *tevp; -+ int evsize; -+ -+ switch (event) { -+ case DM_EVENT_READ: -+ case DM_EVENT_WRITE: -+ case DM_EVENT_TRUNCATE: -+ evsize = sizeof(dm_data_event_t); -+ break; -+ -+ case DM_EVENT_DESTROY: -+ evsize = sizeof(dm_destroy_event_t); -+ break; -+ -+ case DM_EVENT_MOUNT: -+ evsize = sizeof(dm_mount_event_t); -+ break; -+ -+ case DM_EVENT_PREUNMOUNT: -+ case DM_EVENT_UNMOUNT: -+ case DM_EVENT_NOSPACE: -+ case DM_EVENT_CREATE: -+ case DM_EVENT_REMOVE: -+ case DM_EVENT_RENAME: -+ case DM_EVENT_SYMLINK: -+ case DM_EVENT_LINK: -+ case DM_EVENT_POSTCREATE: -+ case DM_EVENT_POSTREMOVE: -+ case DM_EVENT_POSTRENAME: -+ case DM_EVENT_POSTSYMLINK: -+ case DM_EVENT_POSTLINK: -+ case DM_EVENT_ATTRIBUTE: -+ case DM_EVENT_DEBUT: /* currently not supported */ -+ case DM_EVENT_CLOSE: /* currently not supported */ -+ evsize = sizeof(dm_namesp_event_t); -+ break; -+ -+ case DM_EVENT_CANCEL: /* currently not supported */ -+ evsize = sizeof(dm_cancel_event_t); -+ break; -+ -+ case DM_EVENT_USER: -+ evsize = 0; -+ break; -+ -+ default: -+ panic("dm_create_tevp: called with unknown event type %d\n", -+ event); -+ } -+ -+ /* Allocate and initialize an event structure of the correct size. */ -+ -+ tevp = dm_init_tevp(evsize, variable_size); -+ if (tevp == NULL) -+ return NULL; -+ tevp->te_evt_ref = 1; -+ -+ /* Fields ev_token, ev_sequence, and _link are all filled in when the -+ event is queued onto a session. Initialize all other fields here. -+ */ -+ -+ tevp->te_msg.ev_type = event; -+ tevp->te_msg.ev_data.vd_offset = offsetof(dm_tokevent_t, te_event) - -+ offsetof(dm_tokevent_t, te_msg); -+ tevp->te_msg.ev_data.vd_length = evsize + variable_size; -+ -+ /* Give the caller a pointer to the event-specific structure. */ -+ -+ *msgpp = ((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset); -+ return(tevp); -+} -+ -+ -+/* Given a pointer to an event (tevp) and a pointer to a handle_t, look for a -+ tdp structure within the event which contains the handle_t. Either verify -+ that the event contains the tdp, or optionally add the tdp to the -+ event. Called only from application threads. -+ -+ On entry, tevp->te_lock is held; it is dropped prior to return. -+*/ -+ -+static int -+dm_app_lookup_tdp( -+ dm_handle_t *handlep, /* the handle we are looking for */ -+ dm_tokevent_t *tevp, /* the event to search for the handle */ -+ unsigned long *lcp, /* address of active lock cookie */ -+ short types, /* acceptable object types */ -+ dm_right_t right, /* minimum right the object must have */ -+ u_int flags, -+ dm_tokdata_t **tdpp) /* if ! NULL, pointer to matching tdp */ -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ struct inode *ip; -+ int error; -+ -+ /* Bump the tevp application reference counter so that the event -+ can't disappear in case we have to drop the lock for a while. -+ */ -+ -+ tevp->te_app_ref++; -+ *tdpp = NULL; /* assume failure */ -+ -+ for (;;) { -+ /* Look for a matching tdp in the tevp. */ -+ -+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) { -+ if (DM_HANDLE_CMP(&tdp->td_handle, handlep) == 0) -+ break; -+ } -+ -+ /* If the tdp exists, but either we need single-thread access -+ to the handle and can't get it, or some other thread already -+ has single-thread access, then sleep until we can try again. -+ */ -+ -+ if (tdp != NULL && tdp->td_app_ref && -+ ((flags & DM_FG_STHREAD) || -+ (tdp->td_flags & DM_TDF_STHREAD))) { -+ tevp->te_app_slp++; -+ sv_wait(&tevp->te_app_queue, 1, -+ &tevp->te_lock, *lcp); -+ *lcp = mutex_spinlock(&tevp->te_lock); -+ tevp->te_app_slp--; -+ continue; -+ } -+ -+ if (tdp != NULL && -+ (tdp->td_vcount > 0 || tdp->td_flags & DM_TDF_EVTREF)) { -+ /* We have an existing tdp with a non-zero inode -+ reference count. If it's the wrong type, return -+ an appropriate errno. -+ */ -+ -+ if (!(tdp->td_type & types)) { -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ dm_put_tevp(tevp, NULL); /* no destroy events */ -+ return(-EOPNOTSUPP); -+ } -+ -+ /* If the current access right isn't high enough, -+ complain. -+ */ -+ -+ if (tdp->td_right < right) { -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ dm_put_tevp(tevp, NULL); /* no destroy events */ -+ return(-EACCES); -+ } -+ -+ /* The handle is acceptable. Increment the tdp -+ application and inode references and mark the tdp -+ as single-threaded if necessary. -+ */ -+ -+ tdp->td_app_ref++; -+ if (flags & DM_FG_STHREAD) -+ tdp->td_flags |= DM_TDF_STHREAD; -+ tdp->td_vcount++; -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ (void)fsys_vector->obj_ref_hold(tdp->td_ip); -+ -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ *tdpp = tdp; -+ return(0); -+ } -+ -+ /* If the tdp is not in the tevp or does not have an inode -+ reference, check to make sure it is okay to add/update it. -+ */ -+ -+ if (flags & DM_FG_MUSTEXIST) { -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ dm_put_tevp(tevp, NULL); /* no destroy events */ -+ return(-EACCES); /* i.e. an insufficient right */ -+ } -+ if (flags & DM_FG_DONTADD) { -+ tevp->te_app_ref--; -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ return(0); -+ } -+ -+ /* If a tdp structure doesn't yet exist, create one and link -+ it into the tevp. Drop the lock while we are doing this as -+ zallocs can go to sleep. Once we have the memory, make -+ sure that another thread didn't simultaneously add the same -+ handle to the same event. If so, toss ours and start over. -+ */ -+ -+ if (tdp == NULL) { -+ dm_tokdata_t *tmp; -+ -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ -+ tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL); -+ if (tdp == NULL){ -+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return(-ENOMEM); -+ } -+ memset(tdp, 0, sizeof(*tdp)); -+ -+ *lcp = mutex_spinlock(&tevp->te_lock); -+ -+ for (tmp = tevp->te_tdp; tmp; tmp = tmp->td_next) { -+ if (DM_HANDLE_CMP(&tmp->td_handle, handlep) == 0) -+ break; -+ } -+ if (tmp) { -+ kmem_cache_free(dm_tokdata_cachep, tdp); -+ continue; -+ } -+ -+ tdp->td_next = tevp->te_tdp; -+ tevp->te_tdp = tdp; -+ tdp->td_tevp = tevp; -+ tdp->td_handle = *handlep; -+ } -+ -+ /* Temporarily single-thread access to the tdp so that other -+ threads don't touch it while we are filling the rest of the -+ fields in. -+ */ -+ -+ tdp->td_app_ref = 1; -+ tdp->td_flags |= DM_TDF_STHREAD; -+ -+ /* Drop the spinlock while we access, validate, and obtain the -+ proper rights to the object. This can take a very long time -+ if the inode is not in memory, if the filesystem is -+ unmounting, or if the request_right() call should block -+ because some other tdp or kernel thread is holding a right. -+ */ -+ -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ -+ if ((ip = dm_handle_to_ip(handlep, &tdp->td_type)) == NULL) { -+ error = -EBADF; -+ } else { -+ tdp->td_vcount = 1; -+ tdp->td_ip = ip; -+ -+ /* The handle is usable. Check that the type of the -+ object matches one of the types that the caller -+ will accept. -+ */ -+ -+ if (!(types & tdp->td_type)) { -+ error = -EOPNOTSUPP; -+ } else if (right > DM_RIGHT_NULL) { -+ /* Attempt to get the rights required by the -+ caller. If rights can't be obtained, return -+ an error. -+ */ -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->request_right(tdp->td_ip, -+ DM_RIGHT_NULL, -+ (tdp->td_type == DM_TDT_VFS ? -+ DM_FSYS_OBJ : 0), -+ DM_RR_WAIT, right); -+ if (!error) { -+ tdp->td_right = right; -+ } -+ } else { -+ error = 0; -+ } -+ } -+ if (error != 0) { -+ dm_put_tevp(tevp, tdp); /* destroy event risk, although tiny */ -+ return(error); -+ } -+ -+ *lcp = mutex_spinlock(&tevp->te_lock); -+ -+ /* Wake up any threads which may have seen our tdp while we -+ were filling it in. -+ */ -+ -+ if (!(flags & DM_FG_STHREAD)) { -+ tdp->td_flags &= ~DM_TDF_STHREAD; -+ if (tevp->te_app_slp) -+ sv_broadcast(&tevp->te_app_queue); -+ } -+ -+ mutex_spinunlock(&tevp->te_lock, *lcp); -+ *tdpp = tdp; -+ return(0); -+ } -+} -+ -+ -+/* dm_app_get_tdp_by_token() is called whenever the application request -+ contains a session ID and contains a token other than DM_NO_TOKEN. -+ Most of the callers provide a right that is either DM_RIGHT_SHARED or -+ DM_RIGHT_EXCL, but a few of the callers such as dm_obj_ref_hold() may -+ specify a right of DM_RIGHT_NULL. -+*/ -+ -+static int -+dm_app_get_tdp_by_token( -+ dm_sessid_t sid, /* an existing session ID */ -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, /* an existing token */ -+ short types, /* acceptable object types */ -+ dm_right_t right, /* minimum right the object must have */ -+ u_int flags, -+ dm_tokdata_t **tdpp) -+{ -+ dm_tokevent_t *tevp; -+ dm_handle_t handle; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ if (right < DM_RIGHT_NULL || right > DM_RIGHT_EXCL) -+ return(-EINVAL); -+ -+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0) -+ return(error); -+ -+ /* Find and lock the event which corresponds to the specified -+ session/token pair. -+ */ -+ -+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0) -+ return(error); -+ -+ return(dm_app_lookup_tdp(&handle, tevp, &lc, types, -+ right, flags, tdpp)); -+} -+ -+ -+/* Function dm_app_get_tdp() must ONLY be called from routines associated with -+ application calls, e.g. dm_read_invis, dm_set_disp, etc. It must not be -+ called by a thread responsible for generating an event such as -+ dm_send_data_event()! -+ -+ dm_app_get_tdp() is the interface used by all application calls other than -+ dm_get_events, dm_respond_event, dm_get_config, dm_get_config_events, and by -+ the dm_obj_ref_* and dm_*_right families of requests. -+ -+ dm_app_get_tdp() converts a sid/hanp/hlen/token quad into a tdp pointer, -+ increments the number of active application threads in the event, and -+ increments the number of active application threads using the tdp. The -+ 'right' parameter must be either DM_RIGHT_SHARED or DM_RIGHT_EXCL. The -+ token may either be DM_NO_TOKEN, or can be a token received in a synchronous -+ event. -+*/ -+ -+int -+dm_app_get_tdp( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ short types, -+ dm_right_t right, /* minimum right */ -+ dm_tokdata_t **tdpp) -+{ -+ dm_session_t *s; -+ dm_handle_t handle; -+ dm_tokevent_t *tevp; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ ASSERT(right >= DM_RIGHT_SHARED); -+ -+ /* If a token other than DM_NO_TOKEN is specified, find the event on -+ this session which owns the token and increment its reference count. -+ */ -+ -+ if (token != DM_NO_TOKEN) { /* look up existing tokevent struct */ -+ return(dm_app_get_tdp_by_token(sid, hanp, hlen, token, types, -+ right, DM_FG_MUSTEXIST, tdpp)); -+ } -+ -+ /* The token is DM_NO_TOKEN. In this case we only want to verify that -+ the session ID is valid, and do not need to continue holding the -+ session lock after we know that to be true. -+ */ -+ -+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0) -+ return(error); -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) -+ return(error); -+ mutex_spinunlock(&s->sn_qlock, lc); -+ -+ /* When DM_NO_TOKEN is used, we simply block until we can obtain the -+ right that we want (since the tevp contains no tdp structures). -+ The blocking when we eventually support it will occur within -+ fsys_vector->request_right(). -+ */ -+ -+ tevp = dm_init_tevp(0, 0); -+ lc = mutex_spinlock(&tevp->te_lock); -+ -+ return(dm_app_lookup_tdp(&handle, tevp, &lc, types, right, 0, tdpp)); -+} -+ -+ -+/* dm_get_config_tdp() is only called by dm_get_config() and -+ dm_get_config_events(), which neither have a session ID nor a token. -+ Both of these calls are supposed to work even if the filesystem is in the -+ process of being mounted, as long as the caller only uses handles within -+ the mount event. -+*/ -+ -+int -+dm_get_config_tdp( -+ void __user *hanp, -+ size_t hlen, -+ dm_tokdata_t **tdpp) -+{ -+ dm_handle_t handle; -+ dm_tokevent_t *tevp; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0) -+ return(error); -+ -+ tevp = dm_init_tevp(0, 0); -+ lc = mutex_spinlock(&tevp->te_lock); -+ -+ /* Try to use the handle provided by the caller and assume DM_NO_TOKEN. -+ This will fail if the filesystem is in the process of being mounted. -+ */ -+ -+ error = dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY, -+ DM_RIGHT_NULL, 0, tdpp); -+ -+ if (!error) { -+ return(0); -+ } -+ -+ /* Perhaps the filesystem is still mounting, in which case we need to -+ see if this is one of the handles in the DM_EVENT_MOUNT tevp. -+ */ -+ -+ if ((tevp = dm_find_mount_tevp_and_lock(&handle.ha_fsid, &lc)) == NULL) -+ return(-EBADF); -+ -+ return(dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY, -+ DM_RIGHT_NULL, DM_FG_MUSTEXIST, tdpp)); -+} -+ -+ -+/* dm_put_tdp() is called to release any right held on the inode, and to -+ VN_RELE() all references held on the inode. It is the caller's -+ responsibility to ensure that no other application threads are using the -+ tdp, and if necessary to unlink the tdp from the tevp before calling -+ this routine and to free the tdp afterwards. -+*/ -+ -+static void -+dm_put_tdp( -+ dm_tokdata_t *tdp) -+{ -+ ASSERT(tdp->td_app_ref <= 1); -+ -+ /* If the application thread is holding a right, or if the event -+ thread had a right but it has disappeared because of a dm_pending -+ or Cntl-C, then we need to release it here. -+ */ -+ -+ if (tdp->td_right != DM_RIGHT_NULL) { -+ dm_fsys_vector_t *fsys_vector; -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ (void)fsys_vector->release_right(tdp->td_ip, tdp->td_right, -+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0)); -+ tdp->td_right = DM_RIGHT_NULL; -+ } -+ -+ /* Given that we wouldn't be here if there was still an event thread, -+ this VN_RELE loop has the potential of generating a DM_EVENT_DESTROY -+ event if some other thread has unlinked the file. -+ */ -+ -+ while (tdp->td_vcount > 0) { -+ iput(tdp->td_ip); -+ tdp->td_vcount--; -+ } -+ -+ tdp->td_flags &= ~(DM_TDF_HOLD|DM_TDF_RIGHT); -+ tdp->td_ip = NULL; -+} -+ -+ -+/* Function dm_put_tevp() must ONLY be called from routines associated with -+ application threads, e.g. dm_read_invis, dm_get_events, etc. It must not be -+ called by a thread responsible for generating an event, such as -+ dm_send_data_event. -+ -+ PLEASE NOTE: It is possible for this routine to generate DM_EVENT_DESTROY -+ events, because its calls to dm_put_tdp drop inode references, and another -+ thread may have already unlinked a file whose inode we are de-referencing. -+ This sets the stage for various types of deadlock if the thread calling -+ dm_put_tevp is the same thread that calls dm_respond_event! In particular, -+ the dm_sent_destroy_event routine needs to obtain the dm_reg_lock, -+ dm_session_lock, and sn_qlock in order to queue the destroy event. No -+ caller of dm_put_tevp can hold any of these locks! -+ -+ Other possible deadlocks are that dm_send_destroy_event could block waiting -+ for a thread to register for the event using dm_set_disp() and/or -+ dm_set_return_on_destroy, or it could block because the session's sn_newq -+ is at the dm_max_queued_msgs event limit. The only safe solution -+ (unimplemented) is to have a separate kernel thread for each filesystem -+ whose only job is to do the inode-dereferencing. That way dm_respond_event -+ will not block, so the application can keep calling dm_get_events to read -+ events even if the filesystem thread should block. (If the filesystem -+ thread blocks, so will all subsequent destroy events for the same -+ filesystem.) -+*/ -+ -+void -+dm_put_tevp( -+ dm_tokevent_t *tevp, -+ dm_tokdata_t *tdp) -+{ -+ int free_tdp = 0; -+ unsigned long lc; /* lock cookie */ -+ -+ lc = mutex_spinlock(&tevp->te_lock); -+ -+ if (tdp != NULL) { -+ if (tdp->td_vcount > 1 || (tdp->td_flags & DM_TDF_EVTREF)) { -+ ASSERT(tdp->td_app_ref > 0); -+ -+ iput(tdp->td_ip); -+ tdp->td_vcount--; -+ } else { -+ ASSERT(tdp->td_app_ref == 1); -+ -+ /* The inode reference count is either already at -+ zero (e.g. a failed dm_handle_to_ip() call in -+ dm_app_lookup_tdp()) or is going to zero. We can't -+ hold the lock while we decrement the count because -+ we could potentially end up being busy for a long -+ time in VOP_INACTIVATE. Use single-threading to -+ lock others out while we clean house. -+ */ -+ -+ tdp->td_flags |= DM_TDF_STHREAD; -+ -+ /* WARNING - A destroy event is possible here if we are -+ giving up the last reference on an inode which has -+ been previously unlinked by some other thread! -+ */ -+ -+ mutex_spinunlock(&tevp->te_lock, lc); -+ dm_put_tdp(tdp); -+ lc = mutex_spinlock(&tevp->te_lock); -+ -+ /* If this tdp is not one of the original tdps in the -+ event, then remove it from the tevp. -+ */ -+ -+ if (!(tdp->td_flags & DM_TDF_ORIG)) { -+ dm_tokdata_t **tdpp = &tevp->te_tdp; -+ -+ while (*tdpp && *tdpp != tdp) { -+ tdpp = &(*tdpp)->td_next; -+ } -+ if (*tdpp == NULL) { -+ panic("dm_remove_tdp_from_tevp: tdp " -+ "%p not in tevp %p\n", tdp, -+ tevp); -+ } -+ *tdpp = tdp->td_next; -+ free_tdp++; -+ } -+ } -+ -+ /* If this is the last app thread actively using the tdp, clear -+ any single-threading and wake up any other app threads who -+ might be waiting to use this tdp, single-threaded or -+ otherwise. -+ */ -+ -+ if (--tdp->td_app_ref == 0) { -+ if (tdp->td_flags & DM_TDF_STHREAD) { -+ tdp->td_flags &= ~DM_TDF_STHREAD; -+ if (tevp->te_app_slp) -+ sv_broadcast(&tevp->te_app_queue); -+ } -+ } -+ -+ if (free_tdp) { -+ kmem_cache_free(dm_tokdata_cachep, tdp); -+ } -+ } -+ -+ /* If other application threads are using this token/event, they will -+ do the cleanup. -+ */ -+ -+ if (--tevp->te_app_ref > 0) { -+ mutex_spinunlock(&tevp->te_lock, lc); -+ return; -+ } -+ -+ /* If event generation threads are waiting for this thread to go away, -+ wake them up and let them do the cleanup. -+ */ -+ -+ if (tevp->te_evt_ref > 0) { -+ sv_broadcast(&tevp->te_evt_queue); -+ mutex_spinunlock(&tevp->te_lock, lc); -+ return; -+ } -+ -+ /* This thread is the last active thread using the token/event. No -+ lock can be held while we disassemble the tevp because we could -+ potentially end up being busy for a long time in VOP_INACTIVATE. -+ */ -+ -+ mutex_spinunlock(&tevp->te_lock, lc); -+ -+ /* WARNING - One or more destroy events are possible here if we are -+ giving up references on inodes which have been previously unlinked -+ by other kernel threads! -+ */ -+ -+ while ((tdp = tevp->te_tdp) != NULL) { -+ tevp->te_tdp = tdp->td_next; -+ dm_put_tdp(tdp); -+ kmem_cache_free(dm_tokdata_cachep, tdp); -+ } -+ spinlock_destroy(&tevp->te_lock); -+ sv_destroy(&tevp->te_evt_queue); -+ sv_destroy(&tevp->te_app_queue); -+ kfree(tevp); -+} -+ -+ -+/* No caller of dm_app_put_tevp can hold either of the locks dm_reg_lock, -+ dm_session_lock, or any sn_qlock! (See dm_put_tevp for details.) -+*/ -+ -+void -+dm_app_put_tdp( -+ dm_tokdata_t *tdp) -+{ -+ dm_put_tevp(tdp->td_tevp, tdp); -+} -+ -+ -+/* dm_change_right is only called if the event thread is the one doing the -+ cleanup on a completed event. It looks at the current rights of a tdp -+ and compares that with the rights it had on the tdp when the event was -+ created. If different, it reaquires the original rights, then transfers -+ the rights back to being thread-based. -+*/ -+ -+static void -+dm_change_right( -+ dm_tokdata_t *tdp) -+{ -+#ifdef HAVE_DMAPI_RIGHTS -+ dm_fsys_vector_t *fsys_vector; -+ int error; -+ u_int type; -+#endif -+ -+ /* If the event doesn't have an inode reference, if the original right -+ was DM_RIGHT_NULL, or if the rights were never switched from being -+ thread-based to tdp-based, then there is nothing to do. -+ */ -+ -+ if (!(tdp->td_flags & DM_TDF_EVTREF)) -+ return; -+ -+ if (tdp->td_orig_right == DM_RIGHT_NULL) -+ return; -+ -+ /* DEBUG - Need a check here for event-based rights. */ -+ -+#ifdef HAVE_DMAPI_RIGHTS -+ /* The "rights" vectors are stubs now anyway. When they are -+ * implemented then bhv locking will have to be sorted out. -+ */ -+ -+ /* If the current right is not the same as it was when the event was -+ created, first get back the original right. -+ */ -+ -+ if (tdp->td_right != tdp->td_orig_right) { -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ type = (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0); -+ -+ switch (tdp->td_orig_right) { -+ case DM_RIGHT_SHARED: -+ if (tdp->td_right == DM_RIGHT_EXCL) { -+ error = fsys_vector->downgrade_right( -+ tdp->td_ip, tdp->td_right, type); -+ if (!error) -+ break; -+ (void)fsys_vector->release_right(tdp->td_ip, -+ tdp->td_right, type); -+ } -+ (void)fsys_vector->request_right(tdp->td_ip, -+ tdp->td_right, type, DM_RR_WAIT, -+ tdp->td_orig_right); -+ break; -+ -+ case DM_RIGHT_EXCL: -+ if (tdp->td_right == DM_RIGHT_SHARED) { -+ error = fsys_vector->upgrade_right(tdp->td_ip, -+ tdp->td_right, type); -+ if (!error) -+ break; -+ (void)fsys_vector->release_right(tdp->td_ip, -+ tdp->td_right, type); -+ } -+ (void)fsys_vector->request_right(tdp->td_ip, -+ tdp->td_right, type, DM_RR_WAIT, -+ tdp->td_orig_right); -+ break; -+ case DM_RIGHT_NULL: -+ break; -+ } -+ } -+#endif -+ -+ /* We now have back the same level of rights as we had when the event -+ was generated. Now transfer the rights from being tdp-based back -+ to thread-based. -+ */ -+ -+ /* DEBUG - Add a call here to transfer rights back to thread-based. */ -+ -+ /* Finally, update the tdp so that we don't mess with the rights when -+ we eventually call dm_put_tdp. -+ */ -+ -+ tdp->td_right = DM_RIGHT_NULL; -+} -+ -+ -+/* This routine is only called by event threads. The calls to dm_put_tdp -+ are not a deadlock risk here because this is an event thread, and it is -+ okay for such a thread to block on an induced destroy event. Okay, maybe -+ there is a slight risk; say that the event contains three inodes all of -+ which have DM_RIGHT_EXCL, and say that we are at the dm_max_queued_msgs -+ limit, and that the first inode is already unlinked. In that case the -+ destroy event will block waiting to be queued, and the application thread -+ could happen to reference one of the other locked inodes. Deadlock. -+*/ -+ -+void -+dm_evt_rele_tevp( -+ dm_tokevent_t *tevp, -+ int droprights) /* non-zero, evt thread loses rights */ -+{ -+ dm_tokdata_t *tdp; -+ unsigned long lc; /* lock cookie */ -+ -+ lc = mutex_spinlock(&tevp->te_lock); -+ -+ /* If we are here without DM_TEF_FINAL set and with at least one -+ application reference still remaining, then one of several -+ possibilities is true: -+ 1. This is an asynchronous event which has been queued but has not -+ yet been delivered, or which is in the process of being delivered. -+ 2. This is an unmount event (pseudo-asynchronous) yet to be -+ delivered or in the process of being delivered. -+ 3. This event had DM_FLAGS_NDELAY specified, and the application -+ has sent a dm_pending() reply for the event. -+ 4. This is a DM_EVENT_READ, DM_EVENT_WRITE, or DM_EVENT_TRUNCATE -+ event and the user typed a Cntl-C. -+ In all of these cases, the correct behavior is to leave the -+ responsibility of releasing any rights to the application threads -+ when they are done. -+ */ -+ -+ if (tevp->te_app_ref > 0 && !(tevp->te_flags & DM_TEF_FINAL)) { -+ tevp->te_evt_ref--; -+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) { -+ if (tdp->td_flags & DM_TDF_EVTREF) { -+ tdp->td_flags &= ~DM_TDF_EVTREF; -+ if (tdp->td_vcount == 0) { -+ tdp->td_ip = NULL; -+ } -+ } -+ } -+ mutex_spinunlock(&tevp->te_lock, lc); -+ return; /* not the last thread */ -+ } -+ -+ /* If the application reference count is non-zero here, that can only -+ mean that dm_respond_event() has been called, but the application -+ still has one or more threads in the kernel that haven't let go of -+ the tevp. In these cases, the event thread must wait until all -+ application threads have given up their references, and their -+ rights to handles within the event. -+ */ -+ -+ while (tevp->te_app_ref) { -+ sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc); -+ lc = mutex_spinlock(&tevp->te_lock); -+ } -+ -+ /* This thread is the last active thread using the token/event. Reset -+ the rights of any inode that was part of the original event back -+ to their initial values before returning to the filesystem. The -+ exception is if the event failed (droprights is non-zero), in which -+ case we chose to return to the filesystem with all rights released. -+ Release the rights on any inode that was not part of the original -+ event. Give up all remaining application inode references -+ regardless of whether or not the inode was part of the original -+ event. -+ */ -+ -+ mutex_spinunlock(&tevp->te_lock, lc); -+ -+ while ((tdp = tevp->te_tdp) != NULL) { -+ tevp->te_tdp = tdp->td_next; -+ if ((tdp->td_flags & DM_TDF_ORIG) && -+ (tdp->td_flags & DM_TDF_EVTREF) && -+ (!droprights)) { -+ dm_change_right(tdp); -+ } -+ dm_put_tdp(tdp); -+ kmem_cache_free(dm_tokdata_cachep, tdp); -+ } -+ spinlock_destroy(&tevp->te_lock); -+ sv_destroy(&tevp->te_evt_queue); -+ sv_destroy(&tevp->te_app_queue); -+ kfree(tevp); -+} -+ -+ -+/* dm_obj_ref_hold() is just a fancy way to get an inode reference on an object -+ to hold it in kernel memory. -+*/ -+ -+int -+dm_obj_ref_hold( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_NULL, DM_FG_STHREAD, &tdp); -+ -+ /* The tdp is single-threaded, so no mutex lock needed for update. */ -+ -+ if (error == 0) { -+ if (tdp->td_flags & DM_TDF_HOLD) { /* if already held */ -+ error = -EBUSY; -+ } else { -+ tdp->td_flags |= DM_TDF_HOLD; -+ tdp->td_vcount++; -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ (void)fsys_vector->obj_ref_hold(tdp->td_ip); -+ } -+ dm_app_put_tdp(tdp); -+ } -+ return(error); -+} -+ -+ -+int -+dm_obj_ref_rele( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen) -+{ -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_NULL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp); -+ -+ /* The tdp is single-threaded, so no mutex lock needed for update. */ -+ -+ if (error == 0) { -+ if (!(tdp->td_flags & DM_TDF_HOLD)) { /* if not held */ -+ error = -EACCES; /* use the DM_FG_MUSTEXIST errno */ -+ } else { -+ tdp->td_flags &= ~DM_TDF_HOLD; -+ iput(tdp->td_ip); -+ tdp->td_vcount--; -+ } -+ dm_app_put_tdp(tdp); -+ } -+ return(error); -+} -+ -+ -+int -+dm_obj_ref_query_rvp( -+ dm_sessid_t sid, -+ dm_token_t token, -+ void __user *hanp, -+ size_t hlen, -+ int *rvp) -+{ -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO, -+ DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp); -+ if (error != 0) -+ return(error); -+ -+ /* If the request is valid but the handle just isn't present in the -+ event or the hold flag isn't set, return zero, else return one. -+ */ -+ -+ if (tdp) { -+ if (tdp->td_flags & DM_TDF_HOLD) { /* if held */ -+ *rvp = 1; -+ } else { -+ *rvp = 0; -+ } -+ dm_app_put_tdp(tdp); -+ } else { -+ *rvp = 0; -+ } -+ return(0); -+} -+ -+ -+int -+dm_downgrade_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY, -+ DM_RIGHT_EXCL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp); -+ if (error != 0) -+ return(error); -+ -+ /* Attempt the downgrade. Filesystems which support rights but not -+ the downgrading of rights will return ENOSYS. -+ */ -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->downgrade_right(tdp->td_ip, tdp->td_right, -+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0)); -+ -+ /* The tdp is single-threaded, so no mutex lock needed for update. */ -+ -+ if (error == 0) -+ tdp->td_right = DM_RIGHT_SHARED; -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_query_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ dm_right_t __user *rightp) -+{ -+ dm_tokdata_t *tdp; -+ dm_right_t right; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY, -+ DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp); -+ if (error != 0) -+ return(error); -+ -+ /* Get the current right and copy it to the caller. The tdp is -+ single-threaded, so no mutex lock is needed. If the tdp is not in -+ the event we are supposed to return DM_RIGHT_NULL in order to be -+ compatible with Veritas. -+ */ -+ -+ if (tdp) { -+ right = tdp->td_right; -+ dm_app_put_tdp(tdp); -+ } else { -+ right = DM_RIGHT_NULL; -+ } -+ if (copy_to_user(rightp, &right, sizeof(right))) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+int -+dm_release_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY, -+ DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->release_right(tdp->td_ip, tdp->td_right, -+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0)); -+ -+ /* The tdp is single-threaded, so no mutex lock needed for update. */ -+ -+ if (error == 0) { -+ tdp->td_right = DM_RIGHT_NULL; -+ if (tdp->td_flags & DM_TDF_RIGHT) { -+ tdp->td_flags &= ~DM_TDF_RIGHT; -+ iput(tdp->td_ip); -+ tdp->td_vcount--; -+ } -+ } -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_request_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token, -+ u_int flags, -+ dm_right_t right) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY, -+ DM_RIGHT_NULL, DM_FG_STHREAD, &tdp); -+ if (error != 0) -+ return(error); -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->request_right(tdp->td_ip, tdp->td_right, -+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0), flags, right); -+ -+ /* The tdp is single-threaded, so no mutex lock is needed for update. -+ -+ If this is the first dm_request_right call for this inode, then we -+ need to bump the inode reference count for two reasons. First of -+ all, it is supposed to be impossible for the file to disappear or -+ for the filesystem to be unmounted while a right is held on a file; -+ bumping the file's inode reference count ensures this. Second, if -+ rights are ever actually implemented, it will most likely be done -+ without changes to the on-disk inode, which means that we can't let -+ the inode become unreferenced while a right on it is held. -+ */ -+ -+ if (error == 0) { -+ if (!(tdp->td_flags & DM_TDF_RIGHT)) { /* if first call */ -+ tdp->td_flags |= DM_TDF_RIGHT; -+ tdp->td_vcount++; -+ (void)fsys_vector->obj_ref_hold(tdp->td_ip); -+ } -+ tdp->td_right = right; -+ } -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} -+ -+ -+int -+dm_upgrade_right( -+ dm_sessid_t sid, -+ void __user *hanp, -+ size_t hlen, -+ dm_token_t token) -+{ -+ dm_fsys_vector_t *fsys_vector; -+ dm_tokdata_t *tdp; -+ int error; -+ -+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY, -+ DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp); -+ if (error != 0) -+ return(error); -+ -+ /* If the object already has the DM_RIGHT_EXCL right, no need to -+ attempt an upgrade. -+ */ -+ -+ if (tdp->td_right == DM_RIGHT_EXCL) { -+ dm_app_put_tdp(tdp); -+ return(0); -+ } -+ -+ /* Attempt the upgrade. Filesystems which support rights but not -+ the upgrading of rights will return ENOSYS. -+ */ -+ -+ fsys_vector = dm_fsys_vector(tdp->td_ip); -+ error = fsys_vector->upgrade_right(tdp->td_ip, tdp->td_right, -+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0)); -+ -+ /* The tdp is single-threaded, so no mutex lock needed for update. */ -+ -+ if (error == 0) -+ tdp->td_right = DM_RIGHT_EXCL; -+ -+ dm_app_put_tdp(tdp); -+ return(error); -+} ---- /dev/null -+++ b/fs/dmapi/dmapi_session.c -@@ -0,0 +1,1824 @@ -+/* -+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+#include -+#include -+#include -+#ifdef CONFIG_PROC_FS -+#include -+#endif -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+dm_session_t *dm_sessions = NULL; /* head of session list */ -+u_int dm_sessions_active = 0; /* # sessions currently active */ -+dm_sessid_t dm_next_sessid = 1; /* next session ID to use */ -+lock_t dm_session_lock = SPIN_LOCK_UNLOCKED;/* lock for session list */ -+ -+dm_token_t dm_next_token = 1; /* next token ID to use */ -+dm_sequence_t dm_next_sequence = 1; /* next sequence number to use */ -+lock_t dm_token_lock = SPIN_LOCK_UNLOCKED;/* dm_next_token/dm_next_sequence lock */ -+ -+int dm_max_queued_msgs = 2048; /* max # undelivered msgs/session */ -+ -+int dm_hash_buckets = 1009; /* prime -- number of buckets */ -+ -+#define DM_SHASH(sess,inodenum) \ -+ ((sess)->sn_sesshash + do_mod((inodenum), dm_hash_buckets)) -+ -+ -+#ifdef CONFIG_PROC_FS -+static int -+sessions_read_pfs(char *buffer, char **start, off_t offset, -+ int count, int *eof, void *data) -+{ -+ int len; -+ dm_session_t *sessp = (dm_session_t*)data; -+ -+#define CHKFULL if(len >= count) break; -+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL; -+ -+ len=0; -+ while(1){ -+ ADDBUF("sessp=0x%p\n", sessp); -+ ADDBUF("sn_next=0x%p\n", sessp->sn_next); -+ ADDBUF("sn_sessid=%d\n", sessp->sn_sessid); -+ ADDBUF("sn_flags=%x\n", sessp->sn_flags); -+ ADDBUF("sn_qlock=%c\n", '?'); -+ ADDBUF("sn_readerq=%c\n", '?'); -+ ADDBUF("sn_writerq=%c\n", '?'); -+ ADDBUF("sn_readercnt=%u\n", sessp->sn_readercnt); -+ ADDBUF("sn_writercnt=%u\n", sessp->sn_writercnt); -+ -+ ADDBUF("sn_newq.eq_head=0x%p\n", sessp->sn_newq.eq_head); -+ ADDBUF("sn_newq.eq_tail=0x%p\n", sessp->sn_newq.eq_tail); -+ ADDBUF("sn_newq.eq_count=%d\n", sessp->sn_newq.eq_count); -+ -+ ADDBUF("sn_delq.eq_head=0x%p\n", sessp->sn_delq.eq_head); -+ ADDBUF("sn_delq.eq_tail=0x%p\n", sessp->sn_delq.eq_tail); -+ ADDBUF("sn_delq.eq_count=%d\n", sessp->sn_delq.eq_count); -+ -+ ADDBUF("sn_evt_writerq.eq_head=0x%p\n", sessp->sn_evt_writerq.eq_head); -+ ADDBUF("sn_evt_writerq.eq_tail=0x%p\n", sessp->sn_evt_writerq.eq_tail); -+ ADDBUF("sn_evt_writerq.eq_count=%d\n", sessp->sn_evt_writerq.eq_count); -+ -+ ADDBUF("sn_info=\"%s\"\n", sessp->sn_info); -+ -+ break; -+ } -+ -+ if (offset >= len) { -+ *start = buffer; -+ *eof = 1; -+ return 0; -+ } -+ *start = buffer + offset; -+ if ((len -= offset) > count) -+ return count; -+ *eof = 1; -+ -+ return len; -+} -+#endif -+ -+ -+/* Link a session to the end of the session list. New sessions are always -+ added at the end of the list so that dm_enqueue_mount_event() doesn't -+ miss a session. The caller must have obtained dm_session_lock before -+ calling this routine. -+*/ -+ -+static void -+link_session( -+ dm_session_t *s) -+{ -+ dm_session_t *tmp; -+ -+ if ((tmp = dm_sessions) == NULL) { -+ dm_sessions = s; -+ } else { -+ while (tmp->sn_next != NULL) -+ tmp = tmp->sn_next; -+ tmp->sn_next = s; -+ } -+ s->sn_next = NULL; -+ dm_sessions_active++; -+} -+ -+ -+/* Remove a session from the session list. The caller must have obtained -+ dm_session_lock before calling this routine. unlink_session() should only -+ be used in situations where the session is known to be on the dm_sessions -+ list; otherwise it panics. -+*/ -+ -+static void -+unlink_session( -+ dm_session_t *s) -+{ -+ dm_session_t *tmp; -+ -+ if (dm_sessions == s) { -+ dm_sessions = dm_sessions->sn_next; -+ } else { -+ for (tmp = dm_sessions; tmp; tmp = tmp->sn_next) { -+ if (tmp->sn_next == s) -+ break; -+ } -+ if (tmp == NULL) { -+ panic("unlink_session: corrupt DMAPI session list, " -+ "dm_sessions %p, session %p\n", -+ dm_sessions, s); -+ } -+ tmp->sn_next = s->sn_next; -+ } -+ s->sn_next = NULL; -+ dm_sessions_active--; -+} -+ -+ -+/* Link an event to the end of an event queue. The caller must have obtained -+ the session's sn_qlock before calling this routine. -+*/ -+ -+void -+dm_link_event( -+ dm_tokevent_t *tevp, -+ dm_eventq_t *queue) -+{ -+ if (queue->eq_tail) { -+ queue->eq_tail->te_next = tevp; -+ queue->eq_tail = tevp; -+ } else { -+ queue->eq_head = queue->eq_tail = tevp; -+ } -+ tevp->te_next = NULL; -+ queue->eq_count++; -+} -+ -+ -+/* Remove an event from an event queue. The caller must have obtained the -+ session's sn_qlock before calling this routine. dm_unlink_event() should -+ only be used in situations where the event is known to be on the queue; -+ otherwise it panics. -+*/ -+ -+void -+dm_unlink_event( -+ dm_tokevent_t *tevp, -+ dm_eventq_t *queue) -+{ -+ dm_tokevent_t *tmp; -+ -+ if (queue->eq_head == tevp) { -+ queue->eq_head = tevp->te_next; -+ if (queue->eq_head == NULL) -+ queue->eq_tail = NULL; -+ } else { -+ tmp = queue->eq_head; -+ while (tmp && tmp->te_next != tevp) -+ tmp = tmp->te_next; -+ if (tmp == NULL) { -+ panic("dm_unlink_event: corrupt DMAPI queue %p, " -+ "tevp %p\n", queue, tevp); -+ } -+ tmp->te_next = tevp->te_next; -+ if (tmp->te_next == NULL) -+ queue->eq_tail = tmp; -+ } -+ tevp->te_next = NULL; -+ queue->eq_count--; -+} -+ -+/* Link a regular file event to a hash bucket. The caller must have obtained -+ the session's sn_qlock before calling this routine. -+ The tokevent must be for a regular file object--DM_TDT_REG. -+*/ -+ -+static void -+hash_event( -+ dm_session_t *s, -+ dm_tokevent_t *tevp) -+{ -+ dm_sesshash_t *sh; -+ dm_ino_t ino; -+ -+ if (s->sn_sesshash == NULL) { -+ s->sn_sesshash = kmalloc(dm_hash_buckets * sizeof(dm_sesshash_t), GFP_KERNEL); -+ if (s->sn_sesshash == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return; -+ } -+ memset(s->sn_sesshash, 0, dm_hash_buckets * sizeof(dm_sesshash_t)); -+ } -+ -+ ino = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino; -+ sh = DM_SHASH(s, ino); -+ -+#ifdef DM_SHASH_DEBUG -+ if (sh->h_next == NULL) { -+ s->sn_buckets_in_use++; -+ if (s->sn_buckets_in_use > s->sn_max_buckets_in_use) -+ s->sn_max_buckets_in_use++; -+ } -+ sh->maxlength++; -+ sh->curlength++; -+ sh->num_adds++; -+#endif -+ -+ tevp->te_flags |= DM_TEF_HASHED; -+ tevp->te_hashnext = sh->h_next; -+ sh->h_next = tevp; -+} -+ -+ -+/* Remove a regular file event from a hash bucket. The caller must have -+ obtained the session's sn_qlock before calling this routine. -+ The tokevent must be for a regular file object--DM_TDT_REG. -+*/ -+ -+static void -+unhash_event( -+ dm_session_t *s, -+ dm_tokevent_t *tevp) -+{ -+ dm_sesshash_t *sh; -+ dm_tokevent_t *tmp; -+ dm_ino_t ino; -+ -+ if (s->sn_sesshash == NULL) -+ return; -+ -+ ino = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino; -+ sh = DM_SHASH(s, ino); -+ -+ if (sh->h_next == tevp) { -+ sh->h_next = tevp->te_hashnext; /* leap frog */ -+ } else { -+ tmp = sh->h_next; -+ while (tmp->te_hashnext != tevp) { -+ tmp = tmp->te_hashnext; -+ } -+ tmp->te_hashnext = tevp->te_hashnext; /* leap frog */ -+ } -+ tevp->te_hashnext = NULL; -+ tevp->te_flags &= ~DM_TEF_HASHED; -+ -+#ifdef DM_SHASH_DEBUG -+ if (sh->h_next == NULL) -+ s->sn_buckets_in_use--; -+ sh->curlength--; -+ sh->num_dels++; -+#endif -+} -+ -+ -+/* Determine if this is a repeat event. The caller MUST be holding -+ the session lock. -+ The tokevent must be for a regular file object--DM_TDT_REG. -+ Returns: -+ 0 == match not found -+ 1 == match found -+*/ -+ -+static int -+repeated_event( -+ dm_session_t *s, -+ dm_tokevent_t *tevp) -+{ -+ dm_sesshash_t *sh; -+ dm_data_event_t *d_event1; -+ dm_data_event_t *d_event2; -+ dm_tokevent_t *tevph; -+ dm_ino_t ino1; -+ dm_ino_t ino2; -+ -+ if ((!s->sn_newq.eq_tail) && (!s->sn_delq.eq_tail)) { -+ return(0); -+ } -+ if (s->sn_sesshash == NULL) { -+ return(0); -+ } -+ -+ ino1 = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino; -+ sh = DM_SHASH(s, ino1); -+ -+ if (sh->h_next == NULL) { -+ /* bucket is empty, no match here */ -+ return(0); -+ } -+ -+ d_event1 = (dm_data_event_t *)((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset); -+ tevph = sh->h_next; -+ while (tevph) { -+ /* find something with the same event type and handle type */ -+ if ((tevph->te_msg.ev_type == tevp->te_msg.ev_type) && -+ (tevph->te_tdp->td_type == tevp->te_tdp->td_type)) { -+ -+ ino2 = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino; -+ d_event2 = (dm_data_event_t *)((char *)&tevph->te_msg + tevph->te_msg.ev_data.vd_offset); -+ -+ /* If the two events are operating on the same file, -+ and the same part of that file, then we have a -+ match. -+ */ -+ if ((ino1 == ino2) && -+ (d_event2->de_offset == d_event1->de_offset) && -+ (d_event2->de_length == d_event1->de_length)) { -+ /* found a match */ -+#ifdef DM_SHASH_DEBUG -+ sh->dup_hits++; -+#endif -+ return(1); -+ } -+ } -+ tevph = tevph->te_hashnext; -+ } -+ -+ /* No match found */ -+ return(0); -+} -+ -+ -+/* Return a pointer to a session given its session ID, or EINVAL if no session -+ has the session ID (per the DMAPI spec). The caller must have obtained -+ dm_session_lock before calling this routine. -+*/ -+ -+static int -+dm_find_session( -+ dm_sessid_t sid, -+ dm_session_t **sessionpp) -+{ -+ dm_session_t *s; -+ -+ for (s = dm_sessions; s; s = s->sn_next) { -+ if (s->sn_sessid == sid) { -+ *sessionpp = s; -+ return(0); -+ } -+ } -+ return(-EINVAL); -+} -+ -+ -+/* Return a pointer to a locked session given its session ID. '*lcp' is -+ used to obtain the session's sn_qlock. Caller is responsible for eventually -+ unlocking it. -+*/ -+ -+int -+dm_find_session_and_lock( -+ dm_sessid_t sid, -+ dm_session_t **sessionpp, -+ unsigned long *lcp) /* addr of returned lock cookie */ -+{ -+ int error; -+ -+ for (;;) { -+ *lcp = mutex_spinlock(&dm_session_lock); -+ -+ if ((error = dm_find_session(sid, sessionpp)) != 0) { -+ mutex_spinunlock(&dm_session_lock, *lcp); -+ return(error); -+ } -+ if (spin_trylock(&(*sessionpp)->sn_qlock)) { -+ nested_spinunlock(&dm_session_lock); -+ return(0); /* success */ -+ } -+ -+ /* If the second lock is not available, drop the first and -+ start over. This gives the CPU a chance to process any -+ interrupts, and also allows processes which want a sn_qlock -+ for a different session to proceed. -+ */ -+ -+ mutex_spinunlock(&dm_session_lock, *lcp); -+ } -+} -+ -+ -+/* Return a pointer to the event on the specified session's sn_delq which -+ contains the given token. The caller must have obtained the session's -+ sn_qlock before calling this routine. -+*/ -+ -+static int -+dm_find_msg( -+ dm_session_t *s, -+ dm_token_t token, -+ dm_tokevent_t **tevpp) -+{ -+ dm_tokevent_t *tevp; -+ -+ if (token <= DM_INVALID_TOKEN) -+ return(-EINVAL); -+ -+ for (tevp = s->sn_delq.eq_head; tevp; tevp = tevp->te_next) { -+ if (tevp->te_msg.ev_token == token) { -+ *tevpp = tevp; -+ return(0); -+ } -+ } -+ return(-ESRCH); -+} -+ -+ -+/* Given a session ID and token, find the tevp on the specified session's -+ sn_delq which corresponds to that session ID/token pair. If a match is -+ found, lock the tevp's te_lock and return a pointer to the tevp. -+ '*lcp' is used to obtain the tevp's te_lock. The caller is responsible -+ for eventually unlocking it. -+*/ -+ -+int -+dm_find_msg_and_lock( -+ dm_sessid_t sid, -+ dm_token_t token, -+ dm_tokevent_t **tevpp, -+ unsigned long *lcp) /* address of returned lock cookie */ -+{ -+ dm_session_t *s; -+ int error; -+ -+ if ((error = dm_find_session_and_lock(sid, &s, lcp)) != 0) -+ return(error); -+ -+ if ((error = dm_find_msg(s, token, tevpp)) != 0) { -+ mutex_spinunlock(&s->sn_qlock, *lcp); -+ return(error); -+ } -+ nested_spinlock(&(*tevpp)->te_lock); -+ nested_spinunlock(&s->sn_qlock); -+ return(0); -+} -+ -+ -+/* Create a new session, or resume an old session if one is given. */ -+ -+int -+dm_create_session( -+ dm_sessid_t old, -+ char __user *info, -+ dm_sessid_t __user *new) -+{ -+ dm_session_t *s; -+ dm_sessid_t sid; -+ char sessinfo[DM_SESSION_INFO_LEN]; -+ size_t len; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ len = strnlen_user(info, DM_SESSION_INFO_LEN-1); -+ if (copy_from_user(sessinfo, info, len)) -+ return(-EFAULT); -+ lc = mutex_spinlock(&dm_session_lock); -+ sid = dm_next_sessid++; -+ mutex_spinunlock(&dm_session_lock, lc); -+ if (copy_to_user(new, &sid, sizeof(sid))) -+ return(-EFAULT); -+ -+ if (old == DM_NO_SESSION) { -+ s = kmem_cache_alloc(dm_session_cachep, GFP_KERNEL); -+ if (s == NULL) { -+ printk("%s/%d: kmem_cache_alloc(dm_session_cachep) returned NULL\n", __FUNCTION__, __LINE__); -+ return -ENOMEM; -+ } -+ memset(s, 0, sizeof(*s)); -+ -+ sv_init(&s->sn_readerq, SV_DEFAULT, "dmreadq"); -+ sv_init(&s->sn_writerq, SV_DEFAULT, "dmwritq"); -+ spinlock_init(&s->sn_qlock, "sn_qlock"); -+ } else { -+ lc = mutex_spinlock(&dm_session_lock); -+ if ((error = dm_find_session(old, &s)) != 0) { -+ mutex_spinunlock(&dm_session_lock, lc); -+ return(error); -+ } -+ unlink_session(s); -+ mutex_spinunlock(&dm_session_lock, lc); -+#ifdef CONFIG_PROC_FS -+ { -+ char buf[100]; -+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s); -+ remove_proc_entry(buf, NULL); -+ } -+#endif -+ } -+ memcpy(s->sn_info, sessinfo, len); -+ s->sn_info[len-1] = 0; /* if not NULL, then now 'tis */ -+ s->sn_sessid = sid; -+ lc = mutex_spinlock(&dm_session_lock); -+ link_session(s); -+ mutex_spinunlock(&dm_session_lock, lc); -+#ifdef CONFIG_PROC_FS -+ { -+ char buf[100]; -+ struct proc_dir_entry *entry; -+ -+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s); -+ entry = create_proc_read_entry(buf, 0, NULL, sessions_read_pfs, s); -+ } -+#endif -+ return(0); -+} -+ -+ -+int -+dm_destroy_session( -+ dm_sessid_t sid) -+{ -+ dm_session_t *s; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ /* The dm_session_lock must be held until the session is unlinked. */ -+ -+ lc = mutex_spinlock(&dm_session_lock); -+ -+ if ((error = dm_find_session(sid, &s)) != 0) { -+ mutex_spinunlock(&dm_session_lock, lc); -+ return(error); -+ } -+ nested_spinlock(&s->sn_qlock); -+ -+ /* The session exists. Check to see if it is still in use. If any -+ messages still exist on the sn_newq or sn_delq, or if any processes -+ are waiting for messages to arrive on the session, then the session -+ must not be destroyed. -+ */ -+ -+ if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) { -+ nested_spinunlock(&s->sn_qlock); -+ mutex_spinunlock(&dm_session_lock, lc); -+ return(-EBUSY); -+ } -+ -+ /* The session is not in use. Dequeue it from the session chain. */ -+ -+ unlink_session(s); -+ nested_spinunlock(&s->sn_qlock); -+ mutex_spinunlock(&dm_session_lock, lc); -+ -+#ifdef CONFIG_PROC_FS -+ { -+ char buf[100]; -+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s); -+ remove_proc_entry(buf, NULL); -+ } -+#endif -+ -+ /* Now clear the sessions's disposition registration, and then destroy -+ the session structure. -+ */ -+ -+ dm_clear_fsreg(s); -+ -+ spinlock_destroy(&s->sn_qlock); -+ sv_destroy(&s->sn_readerq); -+ sv_destroy(&s->sn_writerq); -+ if (s->sn_sesshash) -+ kfree(s->sn_sesshash); -+ kmem_cache_free(dm_session_cachep, s); -+ return(0); -+} -+ -+ -+/* -+ * Return a list of all active sessions. -+ */ -+ -+int -+dm_getall_sessions( -+ u_int nelem, -+ dm_sessid_t __user *sidp, -+ u_int __user *nelemp) -+{ -+ dm_session_t *s; -+ u_int sesscnt; -+ dm_sessid_t *sesslist; -+ unsigned long lc; /* lock cookie */ -+ int error; -+ int i; -+ -+ /* Loop until we can get the right amount of temp space, being careful -+ not to hold a mutex during the allocation. Usually only one trip. -+ */ -+ -+ for (;;) { -+ if ((sesscnt = dm_sessions_active) == 0) { -+ /*if (suword(nelemp, 0))*/ -+ if (put_user(0, nelemp)) -+ return(-EFAULT); -+ return(0); -+ } -+ sesslist = kmalloc(sesscnt * sizeof(*sidp), GFP_KERNEL); -+ if (sesslist == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return -ENOMEM; -+ } -+ -+ lc = mutex_spinlock(&dm_session_lock); -+ if (sesscnt == dm_sessions_active) -+ break; -+ -+ mutex_spinunlock(&dm_session_lock, lc); -+ kfree(sesslist); -+ } -+ -+ /* Make a temp copy of the data, then release the mutex. */ -+ -+ for (i = 0, s = dm_sessions; i < sesscnt; i++, s = s->sn_next) -+ sesslist[i] = s->sn_sessid; -+ -+ mutex_spinunlock(&dm_session_lock, lc); -+ -+ /* Now copy the data to the user. */ -+ -+ if(put_user(sesscnt, nelemp)) { -+ error = -EFAULT; -+ } else if (sesscnt > nelem) { -+ error = -E2BIG; -+ } else if (copy_to_user(sidp, sesslist, sesscnt * sizeof(*sidp))) { -+ error = -EFAULT; -+ } else { -+ error = 0; -+ } -+ kfree(sesslist); -+ return(error); -+} -+ -+ -+/* -+ * Return the descriptive string associated with a session. -+ */ -+ -+int -+dm_query_session( -+ dm_sessid_t sid, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_session_t *s; /* pointer to session given by sid */ -+ int len; /* length of session info string */ -+ int error; -+ char sessinfo[DM_SESSION_INFO_LEN]; -+ unsigned long lc; /* lock cookie */ -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) -+ return(error); -+ -+ len = strlen(s->sn_info) + 1; /* NULL terminated when created */ -+ memcpy(sessinfo, s->sn_info, len); -+ -+ mutex_spinunlock(&s->sn_qlock, lc); -+ -+ /* Now that the mutex is released, copy the sessinfo to the user. */ -+ -+ if (put_user(len, rlenp)) { -+ error = -EFAULT; -+ } else if (len > buflen) { -+ error = -E2BIG; -+ } else if (copy_to_user(bufp, sessinfo, len)) { -+ error = -EFAULT; -+ } else { -+ error = 0; -+ } -+ return(error); -+} -+ -+ -+/* -+ * Return all of the previously delivered tokens (that is, their IDs) -+ * for the given session. -+ */ -+ -+int -+dm_getall_tokens( -+ dm_sessid_t sid, /* session obtaining tokens from */ -+ u_int nelem, /* size of tokenbufp */ -+ dm_token_t __user *tokenbufp,/* buffer to copy token IDs to */ -+ u_int __user *nelemp) /* return number copied to tokenbufp */ -+{ -+ dm_session_t *s; /* pointer to session given by sid */ -+ dm_tokevent_t *tevp; /* event message queue traversal */ -+ unsigned long lc; /* lock cookie */ -+ int tokcnt; -+ dm_token_t *toklist; -+ int error; -+ int i; -+ -+ /* Loop until we can get the right amount of temp space, being careful -+ not to hold a mutex during the allocation. Usually only one trip. -+ */ -+ -+ for (;;) { -+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) -+ return(error); -+ tokcnt = s->sn_delq.eq_count; -+ mutex_spinunlock(&s->sn_qlock, lc); -+ -+ if (tokcnt == 0) { -+ /*if (suword(nelemp, 0))*/ -+ if (put_user(0, nelemp)) -+ return(-EFAULT); -+ return(0); -+ } -+ toklist = kmalloc(tokcnt * sizeof(*tokenbufp), GFP_KERNEL); -+ if (toklist == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return -ENOMEM; -+ } -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) { -+ kfree(toklist); -+ return(error); -+ } -+ -+ if (tokcnt == s->sn_delq.eq_count) -+ break; -+ -+ mutex_spinunlock(&s->sn_qlock, lc); -+ kfree(toklist); -+ } -+ -+ /* Make a temp copy of the data, then release the mutex. */ -+ -+ tevp = s->sn_delq.eq_head; -+ for (i = 0; i < tokcnt; i++, tevp = tevp->te_next) -+ toklist[i] = tevp->te_msg.ev_token; -+ -+ mutex_spinunlock(&s->sn_qlock, lc); -+ -+ /* Now copy the data to the user. */ -+ -+ if (put_user(tokcnt, nelemp)) { -+ error = -EFAULT; -+ } else if (tokcnt > nelem) { -+ error = -E2BIG; -+ } else if (copy_to_user(tokenbufp,toklist,tokcnt*sizeof(*tokenbufp))) { -+ error = -EFAULT; -+ } else { -+ error = 0; -+ } -+ kfree(toklist); -+ return(error); -+} -+ -+ -+/* -+ * Return the message identified by token. -+ */ -+ -+int -+dm_find_eventmsg( -+ dm_sessid_t sid, -+ dm_token_t token, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_tokevent_t *tevp; /* message identified by token */ -+ int msgsize; /* size of message to copy out */ -+ void *msg; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ /* Because some of the events (dm_data_event_t in particular) contain -+ __u64 fields, we need to make sure that the buffer provided by the -+ caller is aligned such that he can read those fields successfully. -+ */ -+ -+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0) -+ return(-EFAULT); -+ -+ /* Allocate the right amount of temp space, being careful not to hold -+ a mutex during the allocation. -+ */ -+ -+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0) -+ return(error); -+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg); -+ mutex_spinunlock(&tevp->te_lock, lc); -+ -+ msg = kmalloc(msgsize, GFP_KERNEL); -+ if (msg == NULL) { -+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__); -+ return -ENOMEM; -+ } -+ -+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0) { -+ kfree(msg); -+ return(error); -+ } -+ -+ /* Make a temp copy of the data, then release the mutex. */ -+ -+ memcpy(msg, &tevp->te_msg, msgsize); -+ mutex_spinunlock(&tevp->te_lock, lc); -+ -+ /* Now copy the data to the user. */ -+ -+ if (put_user(msgsize,rlenp)) { -+ error = -EFAULT; -+ } else if (msgsize > buflen) { /* user buffer not big enough */ -+ error = -E2BIG; -+ } else if (copy_to_user( bufp, msg, msgsize )) { -+ error = -EFAULT; -+ } else { -+ error = 0; -+ } -+ kfree(msg); -+ return(error); -+} -+ -+ -+int -+dm_move_event( -+ dm_sessid_t srcsid, -+ dm_token_t token, -+ dm_sessid_t targetsid, -+ dm_token_t __user *rtokenp) -+{ -+ dm_session_t *s1; -+ dm_session_t *s2; -+ dm_tokevent_t *tevp; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ int hash_it = 0; -+ -+ lc = mutex_spinlock(&dm_session_lock); -+ -+ if ((error = dm_find_session(srcsid, &s1)) != 0 || -+ (error = dm_find_session(targetsid, &s2)) != 0 || -+ (error = dm_find_msg(s1, token, &tevp)) != 0) { -+ mutex_spinunlock(&dm_session_lock, lc); -+ return(error); -+ } -+ dm_unlink_event(tevp, &s1->sn_delq); -+ if (tevp->te_flags & DM_TEF_HASHED) { -+ unhash_event(s1, tevp); -+ hash_it = 1; -+ } -+ dm_link_event(tevp, &s2->sn_delq); -+ if (hash_it) -+ hash_event(s2, tevp); -+ mutex_spinunlock(&dm_session_lock, lc); -+ -+ if (copy_to_user(rtokenp, &token, sizeof(token))) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+/* ARGSUSED */ -+int -+dm_pending( -+ dm_sessid_t sid, -+ dm_token_t token, -+ dm_timestruct_t __user *delay) /* unused */ -+{ -+ dm_tokevent_t *tevp; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0) -+ return(error); -+ -+ tevp->te_flags |= DM_TEF_INTERMED; -+ if (tevp->te_evt_ref > 0) /* if event generation threads exist */ -+ sv_broadcast(&tevp->te_evt_queue); -+ -+ mutex_spinunlock(&tevp->te_lock, lc); -+ return(0); -+} -+ -+ -+int -+dm_get_events( -+ dm_sessid_t sid, -+ u_int maxmsgs, -+ u_int flags, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_session_t *s; /* pointer to session given by sid */ -+ dm_tokevent_t *tevp; /* next event message on queue */ -+ int error; -+ unsigned long lc1; /* first lock cookie */ -+ unsigned long lc2 = 0; /* second lock cookie */ -+ int totalsize; -+ int msgsize; -+ dm_eventmsg_t __user *prevmsg; -+ int prev_msgsize = 0; -+ u_int msgcnt; -+ -+ /* Because some of the events (dm_data_event_t in particular) contain -+ __u64 fields, we need to make sure that the buffer provided by the -+ caller is aligned such that he can read those fields successfully. -+ */ -+ -+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0) -+ return(-EFAULT); -+ -+ /* Find the indicated session and lock it. */ -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0) -+ return(error); -+ -+ /* Check for messages on sn_newq. If there aren't any that haven't -+ already been grabbed by another process, and if we are supposed to -+ to wait until one shows up, then go to sleep interruptibly on the -+ sn_readerq semaphore. The session can't disappear out from under -+ us as long as sn_readerq is non-zero. -+ */ -+ -+ for (;;) { -+ int rc; -+ -+ for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) { -+ lc2 = mutex_spinlock(&tevp->te_lock); -+ if (!(tevp->te_flags & DM_TEF_LOCKED)) -+ break; -+ mutex_spinunlock(&tevp->te_lock, lc2); -+ } -+ if (tevp) -+ break; /* got one! */ -+ -+ if (!(flags & DM_EV_WAIT)) { -+ mutex_spinunlock(&s->sn_qlock, lc1); -+ return(-EAGAIN); -+ } -+ s->sn_readercnt++; -+ -+ sv_wait_sig(&s->sn_readerq, 1, &s->sn_qlock, lc1); -+ rc = signal_pending(current); -+ -+ lc1 = mutex_spinlock(&s->sn_qlock); -+ s->sn_readercnt--; -+ if (rc) { /* if signal was received */ -+ mutex_spinunlock(&s->sn_qlock, lc1); -+ return(-EINTR); -+ } -+ } -+ -+ /* At least one message is available for delivery, and we have both the -+ session lock and event lock. Mark the event so that it is not -+ grabbed by other daemons, then drop both locks prior copying the -+ data to the caller's buffer. Leaving the event on the queue in a -+ marked state prevents both the session and the event from -+ disappearing out from under us while we don't have the locks. -+ */ -+ -+ tevp->te_flags |= DM_TEF_LOCKED; -+ mutex_spinunlock(&tevp->te_lock, lc2); /* reverse cookie order */ -+ mutex_spinunlock(&s->sn_qlock, lc1); -+ -+ /* Continue to deliver messages until there are no more, the -+ user's buffer becomes full, or we hit his maxmsgs limit. -+ */ -+ -+ totalsize = 0; /* total bytes transferred to the user */ -+ prevmsg = NULL; -+ msgcnt = 0; -+ -+ while (tevp) { -+ /* Compute the number of bytes to be moved, rounding up to an -+ 8-byte boundary so that any subsequent messages will also be -+ aligned. -+ */ -+ -+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg); -+ msgsize = (msgsize + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1); -+ totalsize += msgsize; -+ -+ /* If it fits, copy the message into the user's buffer and -+ update his 'rlenp'. Update the _link pointer for any -+ previous message. -+ */ -+ -+ if (totalsize > buflen) { /* no more room */ -+ error = -E2BIG; -+ } else if (put_user(totalsize, rlenp)) { -+ error = -EFAULT; -+ } else if (copy_to_user(bufp, &tevp->te_msg, msgsize)) { -+ error = -EFAULT; -+ } else if (prevmsg && put_user(prev_msgsize, &prevmsg->_link)) { -+ error = -EFAULT; -+ } else { -+ error = 0; -+ } -+ -+ /* If an error occurred, just unmark the event and leave it on -+ the queue for someone else. Note that other daemons may -+ have gone to sleep because this event was marked, so wake -+ them up. Also, if at least one message has already been -+ delivered, then an error here is not really an error. -+ */ -+ -+ lc1 = mutex_spinlock(&s->sn_qlock); -+ lc2 = mutex_spinlock(&tevp->te_lock); -+ tevp->te_flags &= ~DM_TEF_LOCKED; /* drop the mark */ -+ -+ if (error) { -+ if (s->sn_readercnt) -+ sv_signal(&s->sn_readerq); -+ -+ mutex_spinunlock(&tevp->te_lock, lc2); /* rev. order */ -+ mutex_spinunlock(&s->sn_qlock, lc1); -+ if (prevmsg) -+ return(0); -+ if (error == -E2BIG && put_user(totalsize,rlenp)) -+ error = -EFAULT; -+ return(error); -+ } -+ -+ /* The message was successfully delivered. Unqueue it. */ -+ -+ dm_unlink_event(tevp, &s->sn_newq); -+ -+ /* Wake up the first of any processes waiting for room on the -+ sn_newq. -+ */ -+ -+ if (s->sn_writercnt) -+ sv_signal(&s->sn_writerq); -+ -+ /* If the message is synchronous, add it to the sn_delq while -+ still holding the lock. If it is asynchronous, free it. -+ */ -+ -+ if (tevp->te_msg.ev_token != DM_INVALID_TOKEN) { /* synch */ -+ dm_link_event(tevp, &s->sn_delq); -+ mutex_spinunlock(&tevp->te_lock, lc2); -+ } else { -+ tevp->te_flags |= DM_TEF_FINAL; -+ if (tevp->te_flags & DM_TEF_HASHED) -+ unhash_event(s, tevp); -+ mutex_spinunlock(&tevp->te_lock, lc2); -+ dm_put_tevp(tevp, NULL);/* can't cause destroy events */ -+ } -+ -+ /* Update our notion of where we are in the user's buffer. If -+ he doesn't want any more messages, then stop. -+ */ -+ -+ prevmsg = (dm_eventmsg_t __user *)bufp; -+ prev_msgsize = msgsize; -+ bufp = (char __user *)bufp + msgsize; -+ -+ msgcnt++; -+ if (maxmsgs && msgcnt >= maxmsgs) { -+ mutex_spinunlock(&s->sn_qlock, lc1); -+ break; -+ } -+ -+ /* While still holding the sn_qlock, see if any additional -+ messages are available for delivery. -+ */ -+ -+ for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) { -+ lc2 = mutex_spinlock(&tevp->te_lock); -+ if (!(tevp->te_flags & DM_TEF_LOCKED)) { -+ tevp->te_flags |= DM_TEF_LOCKED; -+ mutex_spinunlock(&tevp->te_lock, lc2); -+ break; -+ } -+ mutex_spinunlock(&tevp->te_lock, lc2); -+ } -+ mutex_spinunlock(&s->sn_qlock, lc1); -+ } -+ return(0); -+} -+ -+ -+/* -+ * Remove an event message from the delivered queue, set the returned -+ * error where the event generator wants it, and wake up the generator. -+ * Also currently have the user side release any locks it holds... -+ */ -+ -+/* ARGSUSED */ -+int -+dm_respond_event( -+ dm_sessid_t sid, -+ dm_token_t token, -+ dm_response_t response, -+ int reterror, -+ size_t buflen, /* unused */ -+ void __user *respbufp) /* unused */ -+{ -+ dm_session_t *s; /* pointer to session given by sid */ -+ dm_tokevent_t *tevp; /* event message queue traversal */ -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ /* Sanity check the input parameters. */ -+ -+ switch (response) { -+ case DM_RESP_CONTINUE: /* continue must have reterror == 0 */ -+ if (reterror != 0) -+ return(-EINVAL); -+ break; -+ case DM_RESP_ABORT: /* abort must have errno set */ -+ if (reterror <= 0) -+ return(-EINVAL); -+ break; -+ case DM_RESP_DONTCARE: -+ reterror = -1; /* to distinguish DM_RESP_DONTCARE */ -+ break; -+ default: -+ return(-EINVAL); -+ } -+ -+ /* Hold session lock until the event is unqueued. */ -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) -+ return(error); -+ -+ if ((error = dm_find_msg(s, token, &tevp)) != 0) { -+ mutex_spinunlock(&s->sn_qlock, lc); -+ return(error); -+ } -+ nested_spinlock(&tevp->te_lock); -+ -+ if ((response == DM_RESP_DONTCARE) && -+ (tevp->te_msg.ev_type != DM_EVENT_MOUNT)) { -+ error = -EINVAL; -+ nested_spinunlock(&tevp->te_lock); -+ mutex_spinunlock(&s->sn_qlock, lc); -+ } else { -+ dm_unlink_event(tevp, &s->sn_delq); -+ if (tevp->te_flags & DM_TEF_HASHED) -+ unhash_event(s, tevp); -+ tevp->te_reply = -reterror; /* linux wants negative errno */ -+ tevp->te_flags |= DM_TEF_FINAL; -+ if (tevp->te_evt_ref) -+ sv_broadcast(&tevp->te_evt_queue); -+ nested_spinunlock(&tevp->te_lock); -+ mutex_spinunlock(&s->sn_qlock, lc); -+ error = 0; -+ -+ /* Absolutely no locks can be held when calling dm_put_tevp! */ -+ -+ dm_put_tevp(tevp, NULL); /* this can generate destroy events */ -+ } -+ return(error); -+} -+ -+/* The caller must hold sn_qlock. -+ This will return the tokevent locked. -+ */ -+static dm_tokevent_t * -+__find_match_event_no_waiters_locked( -+ dm_tokevent_t *tevp1, -+ dm_eventq_t *queue) -+{ -+ dm_tokevent_t *tevp2, *next_tevp; -+ dm_tokdata_t *tdp1 = tevp1->te_tdp; -+ dm_tokdata_t *tdp2; -+ dm_data_event_t *d_event1; -+ dm_data_event_t *d_event2; -+ -+ d_event1 = (dm_data_event_t *)((char *)&tevp1->te_msg + tevp1->te_msg.ev_data.vd_offset); -+ -+ for(tevp2 = queue->eq_head; tevp2; tevp2 = next_tevp) { -+ nested_spinlock(&tevp2->te_lock); -+ next_tevp = tevp2->te_next; -+ -+ /* Just compare the first tdp's in each--there should -+ be just one, if it's the match we want. -+ */ -+ tdp2 = tevp2->te_tdp; -+ if ((tevp2->te_msg.ev_type == tevp1->te_msg.ev_type) && -+ (tevp2->te_tdp->td_type == tevp1->te_tdp->td_type) && -+ (tevp2->te_evt_ref == 0) && (tdp2->td_next == NULL) && -+ (memcmp(&tdp1->td_handle, &tdp2->td_handle, -+ sizeof(dm_handle_t)) == 0)) { -+ -+ d_event2 = (dm_data_event_t *)((char *)&tevp2->te_msg + tevp2->te_msg.ev_data.vd_offset); -+ -+ -+ if ((d_event2->de_offset == d_event1->de_offset) && -+ (d_event2->de_length == d_event1->de_length)) { -+ /* Match -- return it locked */ -+ return tevp2; -+ } -+ } -+ nested_spinunlock(&tevp2->te_lock); -+ } -+ return NULL; -+} -+ -+/* The caller must hold the sn_qlock. -+ The returned tokevent will be locked with nested_spinlock. -+ */ -+static dm_tokevent_t * -+find_match_event_no_waiters_locked( -+ dm_session_t *s, -+ dm_tokevent_t *tevp) -+{ -+ dm_tokevent_t *tevp2; -+ -+ if ((!s->sn_newq.eq_tail) && (!s->sn_delq.eq_tail)) -+ return NULL; -+ if (!tevp->te_tdp) -+ return NULL; -+ if (tevp->te_tdp->td_next) { -+ /* If it has multiple tdp's then don't bother trying to -+ find a match. -+ */ -+ return NULL; -+ } -+ tevp2 = __find_match_event_no_waiters_locked(tevp, &s->sn_newq); -+ if (tevp2 == NULL) -+ tevp2 = __find_match_event_no_waiters_locked(tevp, &s->sn_delq); -+ /* returns a locked tokevent */ -+ return tevp2; -+} -+ -+ -+ -+/* Queue the filled in event message pointed to by tevp on the session s, and -+ (if a synchronous event) wait for the reply from the DMAPI application. -+ The caller MUST be holding the session lock before calling this routine! -+ The session lock is always released upon exit. -+ Returns: -+ -1 == don't care -+ 0 == success (or async event) -+ > 0 == errno describing reason for failure -+*/ -+ -+static int -+dm_enqueue( -+ dm_session_t *s, -+ unsigned long lc, /* input lock cookie */ -+ dm_tokevent_t **tevpp, /* in/out parameter */ -+ int sync, -+ int flags, -+ int interruptable) -+{ -+ int is_unmount = 0; -+ int is_hashable = 0; -+ int reply; -+ dm_tokevent_t *tevp = *tevpp; -+ -+ /* If the caller isn't planning to stick around for the result -+ and this request is identical to one that is already on the -+ queues then just give the caller an EAGAIN. Release the -+ session lock before returning. -+ -+ We look only at NDELAY requests with an event type of READ, -+ WRITE, or TRUNCATE on objects that are regular files. -+ */ -+ -+ if ((flags & DM_FLAGS_NDELAY) && DM_EVENT_RDWRTRUNC(tevp) && -+ (tevp->te_tdp->td_type == DM_TDT_REG)) { -+ if (repeated_event(s, tevp)) { -+ mutex_spinunlock(&s->sn_qlock, lc); -+ return -EAGAIN; -+ } -+ is_hashable = 1; -+ } -+ -+ /* If the caller is a sync event then look for a matching sync -+ event. If there is a match and it doesn't currently have -+ event threads waiting on it, then we will drop our own -+ tokevent and jump on the matching event. -+ */ -+ if (((flags & DM_FLAGS_NDELAY) == 0) && DM_EVENT_RDWRTRUNC(tevp) && -+ (tevp->te_tdp->td_type == DM_TDT_REG)) { -+ dm_tokevent_t *tevp2; -+ if ((tevp2 = find_match_event_no_waiters_locked(s, tevp))) { -+ ASSERT(tevp2->te_evt_ref == 0); -+ tevp2->te_evt_ref++; -+ nested_spinunlock(&tevp2->te_lock); -+ nested_spinlock(&tevp->te_lock); -+ tevp->te_evt_ref--; -+ nested_spinunlock(&tevp->te_lock); -+ mutex_spinunlock(&s->sn_qlock, lc); -+ /* All locks have been released */ -+ dm_evt_rele_tevp(tevp, 1); -+ *tevpp = tevp = tevp2; -+ goto wait_on_tevp; -+ } -+ } -+ -+ if (tevp->te_msg.ev_type == DM_EVENT_UNMOUNT) -+ is_unmount = 1; -+ -+ /* Check for room on sn_newq. If there is no room for new messages, -+ then go to sleep on the sn_writerq semaphore. The -+ session cannot disappear out from under us as long as sn_writercnt -+ is non-zero. -+ */ -+ -+ while (s->sn_newq.eq_count >= dm_max_queued_msgs) { /* no room */ -+ s->sn_writercnt++; -+ dm_link_event(tevp, &s->sn_evt_writerq); -+ if (interruptable) { -+ sv_wait_sig(&s->sn_writerq, 1, &s->sn_qlock, lc); -+ if (signal_pending(current)) { -+ s->sn_writercnt--; -+ return -EINTR; -+ } -+ } else { -+ sv_wait(&s->sn_writerq, 1, &s->sn_qlock, lc); -+ } -+ lc = mutex_spinlock(&s->sn_qlock); -+ s->sn_writercnt--; -+ dm_unlink_event(tevp, &s->sn_evt_writerq); -+#ifdef HAVE_DM_QUEUE_FLUSH -+ /* We hold the sn_qlock, from here to after we get into -+ * the sn_newq. Any thread going through -+ * dm_release_threads() looking for us is already past us -+ * and has set the DM_TEF_FLUSH flag for us or is blocked on -+ * sn_qlock and will find us in sn_newq after we release -+ * the sn_qlock. -+ * We check for dop->flushing anyway, in case the -+ * dm_release_threads() already completed before we -+ * could enter dmapi. -+ */ -+ if (!sync) { -+ /* async events are forced into the newq */ -+ break; -+ } -+ if (tevp->te_flags & DM_TEF_FLUSH) { -+ mutex_spinunlock(&s->sn_qlock, lc); -+ return tevp->te_reply; -+ } -+ else { -+ struct filesystem_dmapi_operations *dops; -+ dm_tokdata_t *tdp; -+ int errno = 0; -+ -+ nested_spinlock(&tevp->te_lock); -+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) { -+ if (tdp->td_ip) { -+ dops = dm_fsys_ops(tdp->td_ip->i_sb); -+ ASSERT(dops); -+ if (dops->flushing) -+ errno = dops->flushing(tdp->td_ip); -+ if (errno) { -+ nested_spinunlock(&tevp->te_lock); -+ mutex_spinunlock(&s->sn_qlock, lc); -+ return errno; -+ } -+ } -+ } -+ nested_spinunlock(&tevp->te_lock); -+ } -+#endif /* HAVE_DM_QUEUE_FLUSH */ -+ } -+ -+ /* Assign a sequence number and token to the event and bump the -+ application reference count by one. We don't need 'te_lock' here -+ because this thread is still the only thread that can see the event. -+ */ -+ -+ nested_spinlock(&dm_token_lock); -+ tevp->te_msg.ev_sequence = dm_next_sequence++; -+ if (sync) { -+ tevp->te_msg.ev_token = dm_next_token++; -+ } else { -+ tevp->te_msg.ev_token = DM_INVALID_TOKEN; -+ } -+ nested_spinunlock(&dm_token_lock); -+ -+ tevp->te_app_ref++; -+ -+ /* Room exists on the sn_newq queue, so add this request. If the -+ queue was previously empty, wake up the first of any processes -+ that are waiting for an event. -+ */ -+ -+ dm_link_event(tevp, &s->sn_newq); -+ if (is_hashable) -+ hash_event(s, tevp); -+ -+ if (s->sn_readercnt) -+ sv_signal(&s->sn_readerq); -+ -+ mutex_spinunlock(&s->sn_qlock, lc); -+ -+ /* Now that the message is queued, processes issuing asynchronous -+ events or DM_EVENT_UNMOUNT events are ready to continue. -+ */ -+ -+ if (!sync || is_unmount) -+ return 0; -+ -+ /* Synchronous requests wait until a final reply is received. If the -+ caller supplied the DM_FLAGS_NDELAY flag, the process will return -+ EAGAIN if dm_pending() sets DM_TEF_INTERMED. We also let users -+ Cntl-C out of a read, write, and truncate requests. -+ */ -+ -+wait_on_tevp: -+ lc = mutex_spinlock(&tevp->te_lock); -+ -+ while (!(tevp->te_flags & DM_TEF_FINAL)) { -+ if ((tevp->te_flags & DM_TEF_INTERMED) && -+ (flags & DM_FLAGS_NDELAY)) { -+ mutex_spinunlock(&tevp->te_lock, lc); -+ return -EAGAIN; -+ } -+ if (tevp->te_msg.ev_type == DM_EVENT_READ || -+ tevp->te_msg.ev_type == DM_EVENT_WRITE || -+ tevp->te_msg.ev_type == DM_EVENT_TRUNCATE) { -+ sv_wait_sig(&tevp->te_evt_queue, 1, &tevp->te_lock, lc); -+ if (signal_pending(current)){ -+ return -EINTR; -+ } -+ } else { -+ sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc); -+ } -+ lc = mutex_spinlock(&tevp->te_lock); -+#ifdef HAVE_DM_QUEUE_FLUSH -+ /* Did we pop out because of queue flushing? */ -+ if (tevp->te_flags & DM_TEF_FLUSH) { -+ mutex_spinunlock(&tevp->te_lock, lc); -+ return tevp->te_reply; -+ } -+#endif /* HAVE_DM_QUEUE_FLUSH */ -+ } -+ -+ /* Return both the tevp and the reply which was stored in the tevp by -+ dm_respond_event. The tevp structure has already been removed from -+ the reply queue by this point in dm_respond_event(). -+ */ -+ -+ reply = tevp->te_reply; -+ mutex_spinunlock(&tevp->te_lock, lc); -+ return reply; -+} -+ -+ -+/* The filesystem is guaranteed to stay mounted while this event is -+ outstanding. -+*/ -+ -+int -+dm_enqueue_normal_event( -+ struct super_block *sb, -+ dm_tokevent_t **tevpp, -+ int flags) -+{ -+ dm_session_t *s; -+ int error; -+ int sync; -+ unsigned long lc; /* lock cookie */ -+ -+ switch ((*tevpp)->te_msg.ev_type) { -+ case DM_EVENT_READ: -+ case DM_EVENT_WRITE: -+ case DM_EVENT_TRUNCATE: -+ case DM_EVENT_PREUNMOUNT: -+ case DM_EVENT_UNMOUNT: -+ case DM_EVENT_NOSPACE: -+ case DM_EVENT_CREATE: -+ case DM_EVENT_REMOVE: -+ case DM_EVENT_RENAME: -+ case DM_EVENT_SYMLINK: -+ case DM_EVENT_LINK: -+ case DM_EVENT_DEBUT: /* not currently supported */ -+ sync = 1; -+ break; -+ -+ case DM_EVENT_DESTROY: -+ case DM_EVENT_POSTCREATE: -+ case DM_EVENT_POSTREMOVE: -+ case DM_EVENT_POSTRENAME: -+ case DM_EVENT_POSTSYMLINK: -+ case DM_EVENT_POSTLINK: -+ case DM_EVENT_ATTRIBUTE: -+ case DM_EVENT_CLOSE: /* not currently supported */ -+ case DM_EVENT_CANCEL: /* not currently supported */ -+ sync = 0; -+ break; -+ -+ default: -+ return(-EIO); /* garbage event number */ -+ } -+ -+ /* Wait until a session selects disposition for the event. The session -+ is locked upon return from dm_waitfor_disp_session(). -+ */ -+ -+ if ((error = dm_waitfor_disp_session(sb, *tevpp, &s, &lc)) != 0) -+ return(error); -+ -+ return(dm_enqueue(s, lc, tevpp, sync, flags, 0)); -+} -+ -+ -+/* Traverse the session list checking for sessions with the WANTMOUNT flag -+ set. When one is found, send it the message. Possible responses to the -+ message are one of DONTCARE, CONTINUE, or ABORT. The action taken in each -+ case is: -+ DONTCARE (-1) - Send the event to the next session with WANTMOUNT set -+ CONTINUE ( 0) - Proceed with the mount, errno zero. -+ ABORT (>0) - Fail the mount, return the returned errno. -+ -+ The mount request is sent to sessions in ascending session ID order. -+ Since the session list can change dramatically while this process is -+ sleeping in dm_enqueue(), this routine must use session IDs rather than -+ session pointers when keeping track of where it is in the list. Since -+ new sessions are always added at the end of the queue, and have increasing -+ session ID values, we don't have to worry about missing any session. -+*/ -+ -+int -+dm_enqueue_mount_event( -+ struct super_block *sb, -+ dm_tokevent_t *tevp) -+{ -+ dm_session_t *s; -+ dm_sessid_t sid; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ /* Make the mounting filesystem visible to other DMAPI calls. */ -+ -+ if ((error = dm_add_fsys_entry(sb, tevp)) != 0){ -+ return(error); -+ } -+ -+ /* Walk through the session list presenting the mount event to each -+ session that is interested until a session accepts or rejects it, -+ or until all sessions ignore it. -+ */ -+ -+ for (sid = DM_NO_SESSION, error = 1; error > 0; sid = s->sn_sessid) { -+ -+ lc = mutex_spinlock(&dm_session_lock); -+ for (s = dm_sessions; s; s = s->sn_next) { -+ if (s->sn_sessid > sid && s->sn_flags & DM_SN_WANTMOUNT) { -+ nested_spinlock(&s->sn_qlock); -+ nested_spinunlock(&dm_session_lock); -+ break; -+ } -+ } -+ if (s == NULL) { -+ mutex_spinunlock(&dm_session_lock, lc); -+ break; /* noone wants it; proceed with mount */ -+ } -+ error = dm_enqueue(s, lc, &tevp, 1, 0, 0); -+ } -+ -+ /* If the mount will be allowed to complete, then update the fsrp entry -+ accordingly. If the mount is to be aborted, remove the fsrp entry. -+ */ -+ -+ if (error >= 0) { -+ dm_change_fsys_entry(sb, DM_STATE_MOUNTED); -+ error = 0; -+ } else { -+ dm_remove_fsys_entry(sb); -+ } -+ return(error); -+} -+ -+int -+dm_enqueue_sendmsg_event( -+ dm_sessid_t targetsid, -+ dm_tokevent_t *tevp, -+ int sync) -+{ -+ dm_session_t *s; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ if ((error = dm_find_session_and_lock(targetsid, &s, &lc)) != 0) -+ return(error); -+ -+ return(dm_enqueue(s, lc, &tevp, sync, 0, 1)); -+} -+ -+ -+dm_token_t -+dm_enqueue_user_event( -+ dm_sessid_t sid, -+ dm_tokevent_t *tevp, -+ dm_token_t *tokenp) -+{ -+ dm_session_t *s; -+ int error; -+ unsigned long lc; /* lock cookie */ -+ -+ /* Atomically find and lock the session whose session id is 'sid'. */ -+ -+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) -+ return(error); -+ -+ /* Assign a sequence number and token to the event, bump the -+ application reference count by one, and decrement the event -+ count because the caller gives up all ownership of the event. -+ We don't need 'te_lock' here because this thread is still the -+ only thread that can see the event. -+ */ -+ -+ nested_spinlock(&dm_token_lock); -+ tevp->te_msg.ev_sequence = dm_next_sequence++; -+ *tokenp = tevp->te_msg.ev_token = dm_next_token++; -+ nested_spinunlock(&dm_token_lock); -+ -+ tevp->te_flags &= ~(DM_TEF_INTERMED|DM_TEF_FINAL); -+ tevp->te_app_ref++; -+ tevp->te_evt_ref--; -+ -+ /* Add the request to the tail of the sn_delq. Now it's visible. */ -+ -+ dm_link_event(tevp, &s->sn_delq); -+ mutex_spinunlock(&s->sn_qlock, lc); -+ -+ return(0); -+} -+ -+#ifdef HAVE_DM_QUEUE_FLUSH -+/* If inode is non-null, find any tdp referencing that inode and flush the -+ * thread waiting on that inode and set DM_TEF_FLUSH for that tokevent. -+ * Otherwise, if inode is null, find any tdp referencing the specified fsid -+ * and flush that thread and set DM_TEF_FLUSH for that tokevent. -+ */ -+static int -+dm_flush_events( -+ dm_session_t *s, -+ dm_fsid_t *fsidp, -+ struct inode *inode, /* may be null */ -+ dm_eventq_t *queue, -+ int is_writerq, -+ int errno) -+{ -+ dm_tokevent_t *tevp, *next_tevp; -+ dm_tokdata_t *tdp; -+ int found_events = 0; -+ -+ ASSERT(fsidp); -+ for (tevp = queue->eq_head; tevp; tevp = next_tevp) { -+ nested_spinlock(&tevp->te_lock); -+ next_tevp = tevp->te_next; -+ -+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) { -+ if( inode ) { -+ if( tdp->td_ip == inode ) { -+ break; -+ } -+ } -+ else if(memcmp(fsidp, &tdp->td_handle.ha_fsid, sizeof(*fsidp)) == 0) { -+ break; -+ } -+ } -+ -+ if (tdp != NULL) { -+ /* found a handle reference in this event */ -+ ++found_events; -+ tevp->te_flags |= DM_TEF_FLUSH; -+ -+ /* Set the reply value, unless dm_get_events is -+ already on this one. -+ */ -+ if (! (tevp->te_flags & DM_TEF_LOCKED)) -+ tevp->te_reply = errno; -+ -+ /* If it is on the sn_evt_writerq or is being -+ used by dm_get_events then we're done with it. -+ */ -+ if (is_writerq || (tevp->te_flags & DM_TEF_LOCKED)) { -+ nested_spinunlock(&tevp->te_lock); -+ continue; -+ } -+ -+ /* If there is a thread waiting on a synchronous -+ event then be like dm_respond_event. -+ */ -+ -+ if ((tevp->te_evt_ref) && -+ (tevp->te_msg.ev_token != DM_INVALID_TOKEN)) { -+ -+ tevp->te_flags |= DM_TEF_FINAL; -+ dm_unlink_event(tevp, queue); -+ if (tevp->te_flags & DM_TEF_HASHED) -+ unhash_event(s, tevp); -+ sv_broadcast(&tevp->te_evt_queue); -+ nested_spinunlock(&tevp->te_lock); -+ dm_put_tevp(tevp, NULL); -+ continue; -+ } -+ } -+ nested_spinunlock(&tevp->te_lock); -+ } -+ -+ return(found_events); -+} -+ -+ -+/* If inode is non-null then find any threads that have a reference to that -+ * inode and flush them with the specified errno. -+ * Otherwise,if inode is null, then find any threads that have a reference -+ * to that sb and flush them with the specified errno. -+ * We look for these threads in each session's sn_evt_writerq, sn_newq, -+ * and sn_delq. -+ */ -+int -+dm_release_threads( -+ struct super_block *sb, -+ struct inode *inode, /* may be null */ -+ int errno) -+{ -+ dm_sessid_t sid; -+ dm_session_t *s; -+ unsigned long lc; -+ u_int sesscnt; -+ dm_sessid_t *sidlist; -+ int i; -+ int found_events = 0; -+ dm_fsid_t fsid; -+ struct filesystem_dmapi_operations *dops; -+ -+ ASSERT(sb); -+ dops = dm_fsys_ops(sb); -+ ASSERT(dops); -+ dops->get_fsid(sb, &fsid); -+ dm_release_disp_threads(&fsid, inode, errno); -+ -+ /* Loop until we can get the right amount of temp space, being careful -+ not to hold a mutex during the allocation. Usually only one trip. -+ */ -+ -+ for (;;) { -+ lc = mutex_spinlock(&dm_session_lock); -+ sesscnt = dm_sessions_active; -+ mutex_spinunlock(&dm_session_lock, lc); -+ -+ if (sesscnt == 0) -+ return 0; -+ -+ sidlist = kmalloc(sesscnt * sizeof(sid), GFP_KERNEL); -+ -+ lc = mutex_spinlock(&dm_session_lock); -+ if (sesscnt == dm_sessions_active) -+ break; -+ -+ mutex_spinunlock(&dm_session_lock, lc); -+ kfree(sidlist); -+ } -+ -+ for (i = 0, s = dm_sessions; i < sesscnt; i++, s = s->sn_next) -+ sidlist[i] = s->sn_sessid; -+ -+ mutex_spinunlock(&dm_session_lock, lc); -+ -+ -+ for (i = 0; i < sesscnt; i++) { -+ sid = sidlist[i]; -+ if( dm_find_session_and_lock( sid, &s, &lc ) == 0 ){ -+ found_events = dm_flush_events( s, &fsid, inode, -+ &s->sn_evt_writerq, 1, -+ errno ); -+ if (found_events) -+ sv_broadcast(&s->sn_writerq); -+ -+ dm_flush_events(s, &fsid, inode, &s->sn_newq, 0, errno); -+ dm_flush_events(s, &fsid, inode, &s->sn_delq, 0, errno); -+ -+ mutex_spinunlock( &s->sn_qlock, lc ); -+ } -+ } -+ kfree(sidlist); -+ -+ return 0; -+} -+#endif /* HAVE_DM_QUEUE_FLUSH */ ---- /dev/null -+++ b/fs/dmapi/dmapi_sysent.c -@@ -0,0 +1,801 @@ -+/* -+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+ -+/* Data Migration API (DMAPI) -+ */ -+ -+ -+/* We're using MISC_MAJOR / MISC_DYNAMIC_MINOR. */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "dmapi.h" -+#include "dmapi_kern.h" -+#include "dmapi_private.h" -+ -+struct kmem_cache *dm_fsreg_cachep = NULL; -+struct kmem_cache *dm_tokdata_cachep = NULL; -+struct kmem_cache *dm_session_cachep = NULL; -+struct kmem_cache *dm_fsys_map_cachep = NULL; -+struct kmem_cache *dm_fsys_vptr_cachep = NULL; -+ -+static int -+dmapi_ioctl(struct inode *inode, struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ sys_dmapi_args_t kargs; -+ sys_dmapi_args_t *uap = &kargs; -+ int error = 0; -+ int rvp = -ENOSYS; -+ int use_rvp = 0; -+ -+ if (!capable(CAP_MKNOD)) -+ return -EPERM; -+ -+ if( copy_from_user( &kargs, (sys_dmapi_args_t __user *)arg, -+ sizeof(sys_dmapi_args_t) ) ) -+ return -EFAULT; -+ -+ unlock_kernel(); -+ -+ switch (cmd) { -+ case DM_CLEAR_INHERIT: -+ error = dm_clear_inherit( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_attrname_t __user *) DM_Parg(uap,5));/* attrnamep */ -+ break; -+ case DM_CREATE_BY_HANDLE: -+ error = dm_create_by_handle( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* dirhanp */ -+ (size_t) DM_Uarg(uap,3), /* dirhlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (void __user *) DM_Parg(uap,5), /* hanp */ -+ (size_t) DM_Uarg(uap,6), /* hlen */ -+ (char __user *) DM_Parg(uap,7));/* cname */ -+ break; -+ case DM_CREATE_SESSION: -+ error = dm_create_session( -+ (dm_sessid_t) DM_Uarg(uap,1), /* oldsid */ -+ (char __user *) DM_Parg(uap,2), /* sessinfop */ -+ (dm_sessid_t __user *) DM_Parg(uap,3));/* newsidp */ -+ break; -+ case DM_CREATE_USEREVENT: -+ error = dm_create_userevent( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (size_t) DM_Uarg(uap,2), /* msglen */ -+ (void __user *) DM_Parg(uap,3), /* msgdatap */ -+ (dm_token_t __user *) DM_Parg(uap,4));/* tokenp */ -+ break; -+ case DM_DESTROY_SESSION: -+ error = dm_destroy_session( -+ (dm_sessid_t) DM_Uarg(uap,1));/* sid */ -+ break; -+ case DM_DOWNGRADE_RIGHT: -+ error = dm_downgrade_right( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4));/* token */ -+ break; -+ case DM_FD_TO_HANDLE: -+ error = dm_fd_to_hdl( -+ (int) DM_Uarg(uap,1), /* fd */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t __user *) DM_Parg(uap,3));/* hlenp */ -+ break; -+ case DM_FIND_EVENTMSG: -+ error = dm_find_eventmsg( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (dm_token_t) DM_Uarg(uap,2), /* token */ -+ (size_t) DM_Uarg(uap,3), /* buflen */ -+ (void __user *) DM_Parg(uap,4), /* bufp */ -+ (size_t __user *) DM_Parg(uap,5));/* rlenp */ -+ break; -+ case DM_GET_ALLOCINFO: -+ use_rvp = 1; -+ error = dm_get_allocinfo_rvp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_off_t __user *) DM_Parg(uap,5), /* offp */ -+ (u_int) DM_Uarg(uap,6), /* nelem */ -+ (dm_extent_t __user *) DM_Parg(uap,7), /* extentp */ -+ (u_int __user *) DM_Parg(uap,8), /* nelemp */ -+ &rvp); -+ break; -+ case DM_GET_BULKALL: -+ use_rvp = 1; -+ error = dm_get_bulkall_rvp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* mask */ -+ (dm_attrname_t __user *) DM_Parg(uap,6),/* attrnamep */ -+ (dm_attrloc_t __user *) DM_Parg(uap,7),/* locp */ -+ (size_t) DM_Uarg(uap,8), /* buflen */ -+ (void __user *) DM_Parg(uap,9), /* bufp */ -+ (size_t __user *) DM_Parg(uap,10),/* rlenp */ -+ &rvp); -+ break; -+ case DM_GET_BULKATTR: -+ use_rvp = 1; -+ error = dm_get_bulkattr_rvp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* mask */ -+ (dm_attrloc_t __user *)DM_Parg(uap,6), /* locp */ -+ (size_t) DM_Uarg(uap,7), /* buflen */ -+ (void __user *) DM_Parg(uap,8), /* bufp */ -+ (size_t __user *) DM_Parg(uap,9), /* rlenp */ -+ &rvp); -+ break; -+ case DM_GET_CONFIG: -+ error = dm_get_config( -+ (void __user *) DM_Parg(uap,1), /* hanp */ -+ (size_t) DM_Uarg(uap,2), /* hlen */ -+ (dm_config_t) DM_Uarg(uap,3), /* flagname */ -+ (dm_size_t __user *)DM_Parg(uap,4));/* retvalp */ -+ break; -+ case DM_GET_CONFIG_EVENTS: -+ error = dm_get_config_events( -+ (void __user *) DM_Parg(uap,1), /* hanp */ -+ (size_t) DM_Uarg(uap,2), /* hlen */ -+ (u_int) DM_Uarg(uap,3), /* nelem */ -+ (dm_eventset_t __user *) DM_Parg(uap,4),/* eventsetp */ -+ (u_int __user *) DM_Parg(uap,5));/* nelemp */ -+ break; -+ case DM_GET_DIOINFO: -+ error = dm_get_dioinfo( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_dioinfo_t __user *)DM_Parg(uap,5));/* diop */ -+ break; -+ case DM_GET_DIRATTRS: -+ use_rvp = 1; -+ error = dm_get_dirattrs_rvp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* mask */ -+ (dm_attrloc_t __user *)DM_Parg(uap,6), /* locp */ -+ (size_t) DM_Uarg(uap,7), /* buflen */ -+ (void __user *) DM_Parg(uap,8), /* bufp */ -+ (size_t __user *) DM_Parg(uap,9), /* rlenp */ -+ &rvp); -+ break; -+ case DM_GET_DMATTR: -+ error = dm_get_dmattr( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */ -+ (size_t) DM_Uarg(uap,6), /* buflen */ -+ (void __user *) DM_Parg(uap,7), /* bufp */ -+ (size_t __user *) DM_Parg(uap,8));/* rlenp */ -+ -+ break; -+ case DM_GET_EVENTLIST: -+ error = dm_get_eventlist( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* nelem */ -+ (dm_eventset_t __user *) DM_Parg(uap,6),/* eventsetp */ -+ (u_int __user *) DM_Parg(uap,7));/* nelemp */ -+ break; -+ case DM_GET_EVENTS: -+ error = dm_get_events( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (u_int) DM_Uarg(uap,2), /* maxmsgs */ -+ (u_int) DM_Uarg(uap,3), /* flags */ -+ (size_t) DM_Uarg(uap,4), /* buflen */ -+ (void __user *) DM_Parg(uap,5), /* bufp */ -+ (size_t __user *) DM_Parg(uap,6));/* rlenp */ -+ break; -+ case DM_GET_FILEATTR: -+ error = dm_get_fileattr( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* mask */ -+ (dm_stat_t __user *) DM_Parg(uap,6));/* statp */ -+ break; -+ case DM_GET_MOUNTINFO: -+ error = dm_get_mountinfo( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (size_t) DM_Uarg(uap,5), /* buflen */ -+ (void __user *) DM_Parg(uap,6), /* bufp */ -+ (size_t __user *) DM_Parg(uap,7));/* rlenp */ -+ break; -+ case DM_GET_REGION: -+ error = dm_get_region( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* nelem */ -+ (dm_region_t __user *) DM_Parg(uap,6), /* regbufp */ -+ (u_int __user *) DM_Parg(uap,7));/* nelemp */ -+ break; -+ case DM_GETALL_DISP: -+ error = dm_getall_disp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (size_t) DM_Uarg(uap,2), /* buflen */ -+ (void __user *) DM_Parg(uap,3), /* bufp */ -+ (size_t __user *) DM_Parg(uap,4));/* rlenp */ -+ break; -+ case DM_GETALL_DMATTR: -+ error = dm_getall_dmattr( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (size_t) DM_Uarg(uap,5), /* buflen */ -+ (void __user *) DM_Parg(uap,6), /* bufp */ -+ (size_t __user *) DM_Parg(uap,7));/* rlenp */ -+ break; -+ case DM_GETALL_INHERIT: -+ error = dm_getall_inherit( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* nelem */ -+ (dm_inherit_t __user *)DM_Parg(uap,6), /* inheritbufp*/ -+ (u_int __user *) DM_Parg(uap,7));/* nelemp */ -+ break; -+ case DM_GETALL_SESSIONS: -+ error = dm_getall_sessions( -+ (u_int) DM_Uarg(uap,1), /* nelem */ -+ (dm_sessid_t __user *) DM_Parg(uap,2), /* sidbufp */ -+ (u_int __user *) DM_Parg(uap,3));/* nelemp */ -+ break; -+ case DM_GETALL_TOKENS: -+ error = dm_getall_tokens( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (u_int) DM_Uarg(uap,2), /* nelem */ -+ (dm_token_t __user *) DM_Parg(uap,3), /* tokenbufp */ -+ (u_int __user *) DM_Parg(uap,4));/* nelemp */ -+ break; -+ case DM_INIT_ATTRLOC: -+ error = dm_init_attrloc( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_attrloc_t __user *) DM_Parg(uap,5));/* locp */ -+ break; -+ case DM_MKDIR_BY_HANDLE: -+ error = dm_mkdir_by_handle( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* dirhanp */ -+ (size_t) DM_Uarg(uap,3), /* dirhlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (void __user *) DM_Parg(uap,5), /* hanp */ -+ (size_t) DM_Uarg(uap,6), /* hlen */ -+ (char __user *) DM_Parg(uap,7));/* cname */ -+ break; -+ case DM_MOVE_EVENT: -+ error = dm_move_event( -+ (dm_sessid_t) DM_Uarg(uap,1), /* srcsid */ -+ (dm_token_t) DM_Uarg(uap,2), /* token */ -+ (dm_sessid_t) DM_Uarg(uap,3), /* targetsid */ -+ (dm_token_t __user *) DM_Parg(uap,4));/* rtokenp */ -+ break; -+ case DM_OBJ_REF_HOLD: -+ error = dm_obj_ref_hold( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (dm_token_t) DM_Uarg(uap,2), /* token */ -+ (void __user *) DM_Parg(uap,3), /* hanp */ -+ (size_t) DM_Uarg(uap,4));/* hlen */ -+ break; -+ case DM_OBJ_REF_QUERY: -+ use_rvp = 1; -+ error = dm_obj_ref_query_rvp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (dm_token_t) DM_Uarg(uap,2), /* token */ -+ (void __user *) DM_Parg(uap,3), /* hanp */ -+ (size_t) DM_Uarg(uap,4), /* hlen */ -+ &rvp); -+ break; -+ case DM_OBJ_REF_RELE: -+ error = dm_obj_ref_rele( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (dm_token_t) DM_Uarg(uap,2), /* token */ -+ (void __user *) DM_Parg(uap,3), /* hanp */ -+ (size_t) DM_Uarg(uap,4));/* hlen */ -+ break; -+ case DM_PATH_TO_FSHANDLE: -+ error = dm_path_to_fshdl( -+ (char __user *) DM_Parg(uap,1), /* path */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t __user *) DM_Parg(uap,3));/* hlenp */ -+ break; -+ case DM_PATH_TO_HANDLE: -+ error = dm_path_to_hdl( -+ (char __user *) DM_Parg(uap,1), /* path */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t __user *) DM_Parg(uap,3));/* hlenp */ -+ break; -+ case DM_PENDING: -+ error = dm_pending( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (dm_token_t) DM_Uarg(uap,2), /* token */ -+ (dm_timestruct_t __user *) DM_Parg(uap,3));/* delay */ -+ break; -+ case DM_PROBE_HOLE: -+ error = dm_probe_hole( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_off_t) DM_Uarg(uap,5), /* off */ -+ (dm_size_t) DM_Uarg(uap,6), /* len */ -+ (dm_off_t __user *) DM_Parg(uap,7), /* roffp */ -+ (dm_size_t __user *) DM_Parg(uap,8));/* rlenp */ -+ break; -+ case DM_PUNCH_HOLE: -+ error = dm_punch_hole( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_off_t) DM_Uarg(uap,5), /* off */ -+ (dm_size_t) DM_Uarg(uap,6));/* len */ -+ break; -+ case DM_QUERY_RIGHT: -+ error = dm_query_right( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_right_t __user *) DM_Parg(uap,5));/* rightp */ -+ break; -+ case DM_QUERY_SESSION: -+ error = dm_query_session( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (size_t) DM_Uarg(uap,2), /* buflen */ -+ (void __user *) DM_Parg(uap,3), /* bufp */ -+ (size_t __user *) DM_Parg(uap,4));/* rlenp */ -+ break; -+ case DM_READ_INVIS: -+ use_rvp = 1; -+ error = dm_read_invis_rvp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_off_t) DM_Uarg(uap,5), /* off */ -+ (dm_size_t) DM_Uarg(uap,6), /* len */ -+ (void __user *) DM_Parg(uap,7), /* bufp */ -+ &rvp); -+ break; -+ case DM_RELEASE_RIGHT: -+ error = dm_release_right( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4));/* token */ -+ break; -+ case DM_REMOVE_DMATTR: -+ error = dm_remove_dmattr( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (int) DM_Uarg(uap,5), /* setdtime */ -+ (dm_attrname_t __user *) DM_Parg(uap,6));/* attrnamep */ -+ break; -+ case DM_REQUEST_RIGHT: -+ error = dm_request_right( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* flags */ -+ (dm_right_t) DM_Uarg(uap,6));/* right */ -+ break; -+ case DM_RESPOND_EVENT: -+ error = dm_respond_event( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (dm_token_t) DM_Uarg(uap,2), /* token */ -+ (dm_response_t) DM_Uarg(uap,3), /* response */ -+ (int) DM_Uarg(uap,4), /* reterror */ -+ (size_t) DM_Uarg(uap,5), /* buflen */ -+ (void __user *) DM_Parg(uap,6));/* respbufp */ -+ break; -+ case DM_SEND_MSG: -+ error = dm_send_msg( -+ (dm_sessid_t) DM_Uarg(uap,1), /* targetsid */ -+ (dm_msgtype_t) DM_Uarg(uap,2), /* msgtype */ -+ (size_t) DM_Uarg(uap,3), /* buflen */ -+ (void __user *) DM_Parg(uap,4));/* bufp */ -+ break; -+ case DM_SET_DISP: -+ error = dm_set_disp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_eventset_t __user *) DM_Parg(uap,5),/* eventsetp */ -+ (u_int) DM_Uarg(uap,6));/* maxevent */ -+ break; -+ case DM_SET_DMATTR: -+ error = dm_set_dmattr( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */ -+ (int) DM_Uarg(uap,6), /* setdtime */ -+ (size_t) DM_Uarg(uap,7), /* buflen */ -+ (void __user *) DM_Parg(uap,8));/* bufp */ -+ break; -+ case DM_SET_EVENTLIST: -+ error = dm_set_eventlist( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_eventset_t __user *) DM_Parg(uap,5),/* eventsetp */ -+ (u_int) DM_Uarg(uap,6));/* maxevent */ -+ break; -+ case DM_SET_FILEATTR: -+ error = dm_set_fileattr( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* mask */ -+ (dm_fileattr_t __user *)DM_Parg(uap,6));/* attrp */ -+ break; -+ case DM_SET_INHERIT: -+ error = dm_set_inherit( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_attrname_t __user *)DM_Parg(uap,5),/* attrnamep */ -+ (mode_t) DM_Uarg(uap,6));/* mode */ -+ break; -+ case DM_SET_REGION: -+ error = dm_set_region( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (u_int) DM_Uarg(uap,5), /* nelem */ -+ (dm_region_t __user *) DM_Parg(uap,6), /* regbufp */ -+ (dm_boolean_t __user *) DM_Parg(uap,7));/* exactflagp */ -+ break; -+ case DM_SET_RETURN_ON_DESTROY: -+ error = dm_set_return_on_destroy( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */ -+ (dm_boolean_t) DM_Uarg(uap,6));/* enable */ -+ break; -+ case DM_SYMLINK_BY_HANDLE: -+ error = dm_symlink_by_handle( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* dirhanp */ -+ (size_t) DM_Uarg(uap,3), /* dirhlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (void __user *) DM_Parg(uap,5), /* hanp */ -+ (size_t) DM_Uarg(uap,6), /* hlen */ -+ (char __user *) DM_Parg(uap,7), /* cname */ -+ (char __user *) DM_Parg(uap,8));/* path */ -+ break; -+ case DM_SYNC_BY_HANDLE: -+ error = dm_sync_by_handle( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4));/* token */ -+ break; -+ case DM_UPGRADE_RIGHT: -+ error = dm_upgrade_right( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4));/* token */ -+ break; -+ case DM_WRITE_INVIS: -+ use_rvp = 1; -+ error = dm_write_invis_rvp( -+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (dm_token_t) DM_Uarg(uap,4), /* token */ -+ (int) DM_Uarg(uap,5), /* flags */ -+ (dm_off_t) DM_Uarg(uap,6), /* off */ -+ (dm_size_t) DM_Uarg(uap,7), /* len */ -+ (void __user *) DM_Parg(uap,8), /* bufp */ -+ &rvp); -+ break; -+ case DM_OPEN_BY_HANDLE: -+ use_rvp = 1; -+ error = dm_open_by_handle_rvp( -+ (unsigned int) DM_Uarg(uap,1), /* fd */ -+ (void __user *) DM_Parg(uap,2), /* hanp */ -+ (size_t) DM_Uarg(uap,3), /* hlen */ -+ (int) DM_Uarg(uap,4), /* flags */ -+ &rvp); -+ break; -+ default: -+ error = -ENOSYS; -+ break; -+ } -+ -+ lock_kernel(); -+ -+ /* If it was an *_rvp() function, then -+ if error==0, return |rvp| -+ */ -+ if( use_rvp && (error == 0) ) -+ return rvp; -+ else -+ return error; -+} -+ -+ -+ -+static int -+dmapi_open(struct inode *inode, struct file *file) -+{ -+ return 0; -+} -+ -+ -+static int -+dmapi_release(struct inode *inode, struct file *file) -+{ -+ return 0; -+} -+ -+ -+/* say hello, and let me know the device is hooked up */ -+static ssize_t -+dmapi_dump(struct file *file, char __user *buf, size_t count, loff_t *ppos) -+{ -+ char tmp[50]; -+ int len; -+ if( *ppos == 0 ){ -+ len = sprintf( tmp, "# " DM_VER_STR_CONTENTS "\n" ); -+ if( copy_to_user(buf, tmp, len) ) -+ return -EFAULT; -+ *ppos += 1; -+ return len; -+ } -+ return 0; -+} -+ -+static struct file_operations dmapi_fops = { -+ .open = dmapi_open, -+ .ioctl = dmapi_ioctl, -+ .read = dmapi_dump, -+ .release = dmapi_release -+}; -+ -+static struct miscdevice dmapi_dev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "dmapi", -+ .fops = &dmapi_fops -+}; -+ -+ -+ -+#ifdef CONFIG_PROC_FS -+static int -+dmapi_summary(char *buffer, char **start, off_t offset, -+ int count, int *eof, void *data) -+{ -+ int len; -+ -+ extern u_int dm_sessions_active; -+ extern dm_sessid_t dm_next_sessid; -+ extern dm_token_t dm_next_token; -+ extern dm_sequence_t dm_next_sequence; -+ extern int dm_fsys_cnt; -+ -+#define CHKFULL if(len >= count) break; -+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL; -+ -+ len=0; -+ while(1){ -+ ADDBUF("dm_sessions_active=%u\n", dm_sessions_active); -+ ADDBUF("dm_next_sessid=%d\n", (int)dm_next_sessid); -+ ADDBUF("dm_next_token=%d\n", (int)dm_next_token); -+ ADDBUF("dm_next_sequence=%u\n", (u_int)dm_next_sequence); -+ ADDBUF("dm_fsys_cnt=%d\n", dm_fsys_cnt); -+ -+ break; -+ } -+ -+ if (offset >= len) { -+ *start = buffer; -+ *eof = 1; -+ return 0; -+ } -+ *start = buffer + offset; -+ if ((len -= offset) > count) -+ return count; -+ *eof = 1; -+ -+ return len; -+} -+#endif -+ -+ -+static void __init -+dmapi_init_procfs(int dmapi_minor) -+{ -+#ifdef CONFIG_PROC_FS -+ struct proc_dir_entry *entry; -+ -+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS, NULL)) == NULL ) -+ return; -+ entry->mode = S_IFDIR | S_IRUSR | S_IXUSR; -+ -+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/fsreg", NULL)) == NULL ) -+ return; -+ -+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/sessions", NULL)) == NULL ) -+ return; -+ -+ entry = create_proc_read_entry( DMAPI_DBG_PROCFS "/summary", -+ 0, NULL, dmapi_summary, NULL); -+#endif -+} -+ -+#if 0 -+static void __exit -+dmapi_cleanup_procfs(void) -+{ -+#ifdef CONFIG_PROC_FS -+ remove_proc_entry( DMAPI_DBG_PROCFS "/summary", NULL); -+ remove_proc_entry( DMAPI_DBG_PROCFS "/fsreg", NULL); -+ remove_proc_entry( DMAPI_DBG_PROCFS "/sessions", NULL); -+ remove_proc_entry( DMAPI_DBG_PROCFS, NULL); -+#endif -+} -+#endif -+ -+int __init dmapi_init(void) -+{ -+ int ret; -+ -+ dm_tokdata_cachep = kmem_cache_create("dm_tokdata", -+ sizeof(struct dm_tokdata), 0, 0, NULL); -+ if (dm_tokdata_cachep == NULL) -+ goto out; -+ -+ dm_fsreg_cachep = kmem_cache_create("dm_fsreg", -+ sizeof(struct dm_fsreg), 0, 0, NULL); -+ if (dm_fsreg_cachep == NULL) -+ goto out_free_tokdata_cachep; -+ -+ dm_session_cachep = kmem_cache_create("dm_session", -+ sizeof(struct dm_session), 0, 0, NULL); -+ if (dm_session_cachep == NULL) -+ goto out_free_fsreg_cachep; -+ -+ dm_fsys_map_cachep = kmem_cache_create("dm_fsys_map", -+ sizeof(dm_vector_map_t), 0, 0, NULL); -+ if (dm_fsys_map_cachep == NULL) -+ goto out_free_session_cachep; -+ dm_fsys_vptr_cachep = kmem_cache_create("dm_fsys_vptr", -+ sizeof(dm_fsys_vector_t), 0, 0, NULL); -+ if (dm_fsys_vptr_cachep == NULL) -+ goto out_free_fsys_map_cachep; -+ -+ ret = misc_register(&dmapi_dev); -+ if (ret) { -+ printk(KERN_ERR "dmapi_init: misc_register returned %d\n", ret); -+ goto out_free_fsys_vptr_cachep; -+ } -+ -+ dmapi_init_procfs(dmapi_dev.minor); -+ return 0; -+ -+ out_free_fsys_vptr_cachep: -+ kmem_cache_destroy(dm_fsys_vptr_cachep); -+ out_free_fsys_map_cachep: -+ kmem_cache_destroy(dm_fsys_map_cachep); -+ out_free_session_cachep: -+ kmem_cache_destroy(dm_session_cachep); -+ out_free_fsreg_cachep: -+ kmem_cache_destroy(dm_fsreg_cachep); -+ out_free_tokdata_cachep: -+ kmem_cache_destroy(dm_tokdata_cachep); -+ out: -+ return -ENOMEM; -+} -+ -+#if 0 -+void __exit dmapi_uninit(void) -+{ -+ misc_deregister(&dmapi_dev); -+ dmapi_cleanup_procfs(); -+ kmem_cache_destroy(dm_tokdata_cachep); -+ kmem_cache_destroy(dm_fsreg_cachep); -+ kmem_cache_destroy(dm_session_cachep); -+ kmem_cache_destroy(dm_fsys_map_cachep); -+ kmem_cache_destroy(dm_fsys_vptr_cachep); -+} -+#endif -+ -+module_init(dmapi_init); -+/*module_exit(dmapi_uninit);*/ /* Some other day */ -+ -+MODULE_AUTHOR("Silicon Graphics, Inc."); -+MODULE_DESCRIPTION("SGI Data Migration Subsystem"); -+MODULE_LICENSE("GPL"); -+ -+EXPORT_SYMBOL(dm_send_mount_event); -+EXPORT_SYMBOL(dm_send_namesp_event); -+EXPORT_SYMBOL(dm_send_unmount_event); -+EXPORT_SYMBOL(dm_send_data_event); -+EXPORT_SYMBOL(dm_send_destroy_event); -+EXPORT_SYMBOL(dm_ip_to_handle); -+EXPORT_SYMBOL(dmapi_register); -+EXPORT_SYMBOL(dmapi_unregister); -+EXPORT_SYMBOL(dmapi_registered); -+EXPORT_SYMBOL(dm_release_threads); ---- /dev/null -+++ b/fs/dmapi/sv.h -@@ -0,0 +1,89 @@ -+/* -+ * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ * Further, this software is distributed without any warranty that it is -+ * free of the rightful claim of any third person regarding infringement -+ * or the like. Any license provided herein, whether implied or -+ * otherwise, applies only to this software file. Patent licenses, if -+ * any, provided herein do not apply to combinations of this program with -+ * other software, or any other product whatsoever. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write the Free Software Foundation, Inc., 59 -+ * Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -+ * Mountain View, CA 94043, or: -+ * -+ * http://www.sgi.com -+ * -+ * For further information regarding this notice, see: -+ * -+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ -+ */ -+#ifndef __DMAPI_SV_H__ -+#define __DMAPI_SV_H__ -+ -+#include -+#include -+#include -+ -+/* -+ * Synchronisation variables. -+ * -+ * (Parameters "pri", "svf" and "rts" are not implemented) -+ */ -+ -+typedef struct sv_s { -+ wait_queue_head_t waiters; -+} sv_t; -+ -+#define SV_FIFO 0x0 /* sv_t is FIFO type */ -+#define SV_LIFO 0x2 /* sv_t is LIFO type */ -+#define SV_PRIO 0x4 /* sv_t is PRIO type */ -+#define SV_KEYED 0x6 /* sv_t is KEYED type */ -+#define SV_DEFAULT SV_FIFO -+ -+ -+static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state, -+ unsigned long timeout) -+{ -+ DECLARE_WAITQUEUE(wait, current); -+ -+ add_wait_queue_exclusive(&sv->waiters, &wait); -+ __set_current_state(state); -+ spin_unlock(lock); -+ -+ schedule_timeout(timeout); -+ -+ remove_wait_queue(&sv->waiters, &wait); -+} -+ -+#define init_sv(sv,type,name,flag) \ -+ init_waitqueue_head(&(sv)->waiters) -+#define sv_init(sv,flag,name) \ -+ init_waitqueue_head(&(sv)->waiters) -+#define sv_destroy(sv) \ -+ /*NOTHING*/ -+#define sv_wait(sv, pri, lock, s) \ -+ _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) -+#define sv_wait_sig(sv, pri, lock, s) \ -+ _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) -+#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \ -+ _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts)) -+#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \ -+ _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts)) -+#define sv_signal(sv) \ -+ wake_up(&(sv)->waiters) -+#define sv_broadcast(sv) \ -+ wake_up_all(&(sv)->waiters) -+ -+#endif /* __DMAPI_SV_H__ */ diff --git a/patches.suse/xfs-dmapi-xfs-enable b/patches.suse/xfs-dmapi-xfs-enable deleted file mode 100644 index 891e137..0000000 --- a/patches.suse/xfs-dmapi-xfs-enable +++ /dev/null @@ -1,3808 +0,0 @@ -Date: Thu, 09 Oct 2008 17:11:53 +1100 -From: Donald Douwsma -Subject: DMAPI support for xfs -Patch-mainline: Not yet -References: bnc#450658 - -Acked-by: Jan Kara - ---- - fs/xfs/Kconfig | 13 - fs/xfs/Makefile | 5 - fs/xfs/dmapi/Makefile | 28 - fs/xfs/dmapi/xfs_dm.c | 3327 +++++++++++++++++++++++++++++++++++++++++++ - fs/xfs/dmapi/xfs_dm.h | 23 - fs/xfs/linux-2.6/xfs_file.c | 76 - fs/xfs/linux-2.6/xfs_ksyms.c | 92 + - fs/xfs/linux-2.6/xfs_linux.h | 4 - fs/xfs/linux-2.6/xfs_super.c | 13 - fs/xfs/xfs_dmops.c | 20 - fs/xfs/xfs_itable.c | 2 - fs/xfs/xfs_itable.h | 5 - fs/xfs/xfs_mount.h | 1 - fs/xfs/xfs_rw.c | 1 - fs/xfs/xfs_rw.h | 5 - fs/xfs/xfs_vnodeops.c | 2 - 16 files changed, 3609 insertions(+), 8 deletions(-) - ---- a/fs/xfs/Kconfig -+++ b/fs/xfs/Kconfig -@@ -36,6 +36,19 @@ config XFS_QUOTA - with or without the generic quota support enabled (CONFIG_QUOTA) - - they are completely independent subsystems. - -+config XFS_DMAPI -+ tristate "XFS DMAPI support" -+ depends on XFS_FS -+ select DMAPI -+ help -+ The Data Management API is a system interface used to implement -+ the interface defined in the X/Open document: -+ "Systems Management: Data Storage Management (XDSM) API", -+ dated February 1997. This interface is used by hierarchical -+ storage management systems. -+ -+ If unsure, say N. -+ - config XFS_POSIX_ACL - bool "XFS POSIX ACL support" - depends on XFS_FS ---- a/fs/xfs/Makefile -+++ b/fs/xfs/Makefile -@@ -41,6 +41,8 @@ ifeq ($(CONFIG_XFS_QUOTA),y) - xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o - endif - -+obj-$(CONFIG_XFS_DMAPI) += dmapi/ -+ - xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o - xfs-$(CONFIG_XFS_POSIX_ACL) += $(XFS_LINUX)/xfs_acl.o - xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o -@@ -107,7 +109,8 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ - xfs_iops.o \ - xfs_super.o \ - xfs_sync.o \ -- xfs_xattr.o) -+ xfs_xattr.o \ -+ xfs_ksyms.o) - - # Objects in support/ - xfs-y += $(addprefix support/, \ ---- /dev/null -+++ b/fs/xfs/dmapi/Makefile -@@ -0,0 +1,28 @@ -+# -+# Copyright (c) 2006 Silicon Graphics, Inc. -+# All Rights Reserved. -+# -+# This program is free software; you can redistribute it and/or -+# modify it under the terms of the GNU General Public License as -+# published by the Free Software Foundation. -+# -+# This program is distributed in the hope that it would be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with this program; if not, write the Free Software Foundation, -+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+# -+ -+EXTRA_CFLAGS += -I$(src)/.. -I$(src)/../linux-2.6 -+EXTRA_CFLAGS += -I$(srctree)/fs/dmapi -+ -+ifeq ($(CONFIG_XFS_DEBUG),y) -+ EXTRA_CFLAGS += -g -DDEBUG -+endif -+ -+obj-$(CONFIG_XFS_DMAPI) += xfs_dmapi.o -+ -+xfs_dmapi-y += xfs_dm.o ---- /dev/null -+++ b/fs/xfs/dmapi/xfs_dm.c -@@ -0,0 +1,3327 @@ -+/* -+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+#include "xfs.h" -+#include "xfs_fs.h" -+#include "xfs_types.h" -+#include "xfs_bit.h" -+#include "xfs_log.h" -+#include "xfs_inum.h" -+#include "xfs_trans.h" -+#include "xfs_sb.h" -+#include "xfs_ag.h" -+#include "xfs_dir2.h" -+#include "xfs_alloc.h" -+#include "xfs_dmapi.h" -+#include "xfs_mount.h" -+#include "xfs_da_btree.h" -+#include "xfs_bmap_btree.h" -+#include "xfs_alloc_btree.h" -+#include "xfs_ialloc_btree.h" -+#include "xfs_dir2_sf.h" -+#include "xfs_attr_sf.h" -+#include "xfs_dinode.h" -+#include "xfs_inode.h" -+#include "xfs_btree.h" -+#include "xfs_ialloc.h" -+#include "xfs_itable.h" -+#include "xfs_bmap.h" -+#include "xfs_rw.h" -+#include "xfs_acl.h" -+#include "xfs_attr.h" -+#include "xfs_attr_leaf.h" -+#include "xfs_inode_item.h" -+#include "xfs_vnodeops.h" -+#include -+#include -+#include "xfs_dm.h" -+ -+#include -+ -+#define MAXNAMLEN MAXNAMELEN -+ -+#define MIN_DIO_SIZE(mp) ((mp)->m_sb.sb_sectsize) -+#define MAX_DIO_SIZE(mp) (INT_MAX & ~(MIN_DIO_SIZE(mp) - 1)) -+ -+static void up_rw_sems(struct inode *ip, int flags) -+{ -+ if (flags & DM_FLAGS_IALLOCSEM_WR) -+ up_write(&ip->i_alloc_sem); -+ if (flags & DM_FLAGS_IMUX) -+ mutex_unlock(&ip->i_mutex); -+} -+ -+static void down_rw_sems(struct inode *ip, int flags) -+{ -+ if (flags & DM_FLAGS_IMUX) -+ mutex_lock(&ip->i_mutex); -+ if (flags & DM_FLAGS_IALLOCSEM_WR) -+ down_write(&ip->i_alloc_sem); -+} -+ -+ -+/* Structure used to hold the on-disk version of a dm_attrname_t. All -+ on-disk attribute names start with the 8-byte string "SGI_DMI_". -+*/ -+ -+typedef struct { -+ char dan_chars[DMATTR_PREFIXLEN + DM_ATTR_NAME_SIZE + 1]; -+} dm_dkattrname_t; -+ -+/* Structure used by xfs_dm_get_bulkall(), used as the "private_data" -+ * that we want xfs_bulkstat to send to our formatter. -+ */ -+typedef struct { -+ dm_fsid_t fsid; -+ void __user *laststruct; -+ dm_dkattrname_t attrname; -+} dm_bulkstat_one_t; -+ -+/* In the on-disk inode, DMAPI attribute names consist of the user-provided -+ name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be -+ changed! -+*/ -+ -+static const char dmattr_prefix[DMATTR_PREFIXLEN + 1] = DMATTR_PREFIXSTRING; -+ -+static dm_size_t dm_min_dio_xfer = 0; /* direct I/O disabled for now */ -+ -+ -+/* See xfs_dm_get_dmattr() for a description of why this is needed. */ -+ -+#define XFS_BUG_KLUDGE 256 /* max size of an in-inode attribute value */ -+ -+#define DM_MAX_ATTR_BYTES_ON_DESTROY 256 -+ -+#define DM_STAT_SIZE(dmtype,namelen) \ -+ (sizeof(dmtype) + sizeof(dm_handle_t) + namelen) -+ -+#define DM_STAT_ALIGN (sizeof(__uint64_t)) -+ -+/* DMAPI's E2BIG == EA's ERANGE */ -+#define DM_EA_XLATE_ERR(err) { if (err == ERANGE) err = E2BIG; } -+ -+static inline size_t dm_stat_align(size_t size) -+{ -+ return (size + (DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1); -+} -+ -+static inline size_t dm_stat_size(size_t namelen) -+{ -+ return dm_stat_align(sizeof(dm_stat_t) + sizeof(dm_handle_t) + namelen); -+} -+ -+/* -+ * xfs_dm_send_data_event() -+ * -+ * Send data event to DMAPI. Drop IO lock (if specified) before -+ * the dm_send_data_event() call and reacquire it afterwards. -+ */ -+int -+xfs_dm_send_data_event( -+ dm_eventtype_t event, -+ xfs_inode_t *ip, -+ xfs_off_t offset, -+ size_t length, -+ int flags, -+ int *lock_flags) -+{ -+ struct inode *inode = &ip->i_vnode; -+ int error; -+ uint16_t dmstate; -+ -+ /* Returns positive errors to XFS */ -+ -+ do { -+ dmstate = ip->i_d.di_dmstate; -+ if (lock_flags) -+ xfs_iunlock(ip, *lock_flags); -+ -+ up_rw_sems(inode, flags); -+ -+ error = dm_send_data_event(event, inode, DM_RIGHT_NULL, -+ offset, length, flags); -+ error = -error; /* DMAPI returns negative errors */ -+ -+ down_rw_sems(inode, flags); -+ -+ if (lock_flags) -+ xfs_ilock(ip, *lock_flags); -+ } while (!error && (ip->i_d.di_dmstate != dmstate)); -+ -+ return error; -+} -+ -+/* prohibited_mr_events -+ * -+ * Return event bits representing any events which cannot have managed -+ * region events set due to memory mapping of the file. If the maximum -+ * protection allowed in any pregion includes PROT_WRITE, and the region -+ * is shared and not text, then neither READ nor WRITE events can be set. -+ * Otherwise if the file is memory mapped, no READ event can be set. -+ * -+ */ -+STATIC int -+prohibited_mr_events( -+ struct address_space *mapping) -+{ -+ int prohibited = (1 << DM_EVENT_READ); -+ -+ if (!mapping_mapped(mapping)) -+ return 0; -+ -+ spin_lock(&mapping->i_mmap_lock); -+ if (mapping_writably_mapped(mapping)) -+ prohibited |= (1 << DM_EVENT_WRITE); -+ spin_unlock(&mapping->i_mmap_lock); -+ -+ return prohibited; -+} -+ -+#ifdef DEBUG_RIGHTS -+STATIC int -+xfs_vp_to_hexhandle( -+ struct inode *inode, -+ u_int type, -+ char *buffer) -+{ -+ dm_handle_t handle; -+ u_char *ip; -+ int length; -+ int error; -+ int i; -+ -+ /* -+ * XXX: dm_vp_to_handle doesn't exist. -+ * Looks like this debug code is rather dead. -+ */ -+ if ((error = dm_vp_to_handle(inode, &handle))) -+ return(error); -+ -+ if (type == DM_FSYS_OBJ) { /* a filesystem handle */ -+ length = DM_FSHSIZE; -+ } else { -+ length = DM_HSIZE(handle); -+ } -+ for (ip = (u_char *)&handle, i = 0; i < length; i++) { -+ *buffer++ = "0123456789abcdef"[ip[i] >> 4]; -+ *buffer++ = "0123456789abcdef"[ip[i] & 0xf]; -+ } -+ *buffer = '\0'; -+ return(0); -+} -+#endif /* DEBUG_RIGHTS */ -+ -+ -+ -+ -+/* Copy in and validate an attribute name from user space. It should be a -+ string of at least one and at most DM_ATTR_NAME_SIZE characters. Because -+ the dm_attrname_t structure doesn't provide room for the trailing NULL -+ byte, we just copy in one extra character and then zero it if it -+ happens to be non-NULL. -+*/ -+ -+STATIC int -+xfs_copyin_attrname( -+ dm_attrname_t __user *from, /* dm_attrname_t in user space */ -+ dm_dkattrname_t *to) /* name buffer in kernel space */ -+{ -+ int error = 0; -+ size_t len; -+ -+ strcpy(to->dan_chars, dmattr_prefix); -+ -+ len = strnlen_user((char __user *)from, DM_ATTR_NAME_SIZE); -+ if (len == 0) -+ error = EFAULT; -+ else { -+ if (copy_from_user(&to->dan_chars[DMATTR_PREFIXLEN], from, len)) -+ to->dan_chars[sizeof(to->dan_chars) - 1] = '\0'; -+ else if (to->dan_chars[DMATTR_PREFIXLEN] == '\0') -+ error = EINVAL; -+ else -+ to->dan_chars[DMATTR_PREFIXLEN + len - 1] = '\0'; -+ } -+ -+ return error; -+} -+ -+ -+/* -+ * Convert the XFS flags into their DMAPI flag equivalent for export -+ */ -+STATIC uint -+_xfs_dic2dmflags( -+ __uint16_t di_flags) -+{ -+ uint flags = 0; -+ -+ if (di_flags & XFS_DIFLAG_ANY) { -+ if (di_flags & XFS_DIFLAG_REALTIME) -+ flags |= DM_XFLAG_REALTIME; -+ if (di_flags & XFS_DIFLAG_PREALLOC) -+ flags |= DM_XFLAG_PREALLOC; -+ if (di_flags & XFS_DIFLAG_IMMUTABLE) -+ flags |= DM_XFLAG_IMMUTABLE; -+ if (di_flags & XFS_DIFLAG_APPEND) -+ flags |= DM_XFLAG_APPEND; -+ if (di_flags & XFS_DIFLAG_SYNC) -+ flags |= DM_XFLAG_SYNC; -+ if (di_flags & XFS_DIFLAG_NOATIME) -+ flags |= DM_XFLAG_NOATIME; -+ if (di_flags & XFS_DIFLAG_NODUMP) -+ flags |= DM_XFLAG_NODUMP; -+ } -+ return flags; -+} -+ -+STATIC uint -+xfs_ip2dmflags( -+ xfs_inode_t *ip) -+{ -+ return _xfs_dic2dmflags(ip->i_d.di_flags) | -+ (XFS_IFORK_Q(ip) ? DM_XFLAG_HASATTR : 0); -+} -+ -+STATIC uint -+xfs_dic2dmflags( -+ xfs_dinode_t *dip) -+{ -+ return _xfs_dic2dmflags(be16_to_cpu(dip->di_flags)) | -+ (XFS_DFORK_Q(dip) ? DM_XFLAG_HASATTR : 0); -+} -+ -+/* -+ * This copies selected fields in an inode into a dm_stat structure. Because -+ * these fields must return the same values as they would in stat(), the -+ * majority of this code was copied directly from xfs_getattr(). Any future -+ * changes to xfs_gettattr() must also be reflected here. -+ */ -+STATIC void -+xfs_dip_to_stat( -+ xfs_mount_t *mp, -+ xfs_ino_t ino, -+ xfs_dinode_t *dip, -+ dm_stat_t *buf) -+{ -+ xfs_dinode_t *dic = dip; -+ -+ /* -+ * The inode format changed when we moved the link count and -+ * made it 32 bits long. If this is an old format inode, -+ * convert it in memory to look like a new one. If it gets -+ * flushed to disk we will convert back before flushing or -+ * logging it. We zero out the new projid field and the old link -+ * count field. We'll handle clearing the pad field (the remains -+ * of the old uuid field) when we actually convert the inode to -+ * the new format. We don't change the version number so that we -+ * can distinguish this from a real new format inode. -+ */ -+ if (dic->di_version == 1) { -+ buf->dt_nlink = be16_to_cpu(dic->di_onlink); -+ /*buf->dt_xfs_projid = 0;*/ -+ } else { -+ buf->dt_nlink = be32_to_cpu(dic->di_nlink); -+ /*buf->dt_xfs_projid = be16_to_cpu(dic->di_projid);*/ -+ } -+ buf->dt_ino = ino; -+ buf->dt_dev = new_encode_dev(mp->m_ddev_targp->bt_dev); -+ buf->dt_mode = be16_to_cpu(dic->di_mode); -+ buf->dt_uid = be32_to_cpu(dic->di_uid); -+ buf->dt_gid = be32_to_cpu(dic->di_gid); -+ buf->dt_size = be64_to_cpu(dic->di_size); -+ buf->dt_atime = be32_to_cpu(dic->di_atime.t_sec); -+ buf->dt_mtime = be32_to_cpu(dic->di_mtime.t_sec); -+ buf->dt_ctime = be32_to_cpu(dic->di_ctime.t_sec); -+ buf->dt_xfs_xflags = xfs_dic2dmflags(dip); -+ buf->dt_xfs_extsize = -+ be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; -+ buf->dt_xfs_extents = be32_to_cpu(dic->di_nextents); -+ buf->dt_xfs_aextents = be16_to_cpu(dic->di_anextents); -+ buf->dt_xfs_igen = be32_to_cpu(dic->di_gen); -+ buf->dt_xfs_dmstate = be16_to_cpu(dic->di_dmstate); -+ -+ switch (dic->di_format) { -+ case XFS_DINODE_FMT_DEV: -+ buf->dt_rdev = xfs_dinode_get_rdev(dic); -+ buf->dt_blksize = BLKDEV_IOSIZE; -+ buf->dt_blocks = 0; -+ break; -+ case XFS_DINODE_FMT_LOCAL: -+ case XFS_DINODE_FMT_UUID: -+ buf->dt_rdev = 0; -+ buf->dt_blksize = mp->m_sb.sb_blocksize; -+ buf->dt_blocks = 0; -+ break; -+ case XFS_DINODE_FMT_EXTENTS: -+ case XFS_DINODE_FMT_BTREE: -+ buf->dt_rdev = 0; -+ buf->dt_blksize = mp->m_sb.sb_blocksize; -+ buf->dt_blocks = -+ XFS_FSB_TO_BB(mp, be64_to_cpu(dic->di_nblocks)); -+ break; -+ } -+ -+ memset(&buf->dt_pad1, 0, sizeof(buf->dt_pad1)); -+ memset(&buf->dt_pad2, 0, sizeof(buf->dt_pad2)); -+ memset(&buf->dt_pad3, 0, sizeof(buf->dt_pad3)); -+ -+ /* Finally fill in the DMAPI specific fields */ -+ buf->dt_pers = 0; -+ buf->dt_change = 0; -+ buf->dt_nevents = DM_EVENT_MAX; -+ buf->dt_emask = be32_to_cpu(dic->di_dmevmask); -+ buf->dt_dtime = be32_to_cpu(dic->di_ctime.t_sec); -+ /* Set if one of READ, WRITE or TRUNCATE bits is set in emask */ -+ buf->dt_pmanreg = (DMEV_ISSET(DM_EVENT_READ, buf->dt_emask) || -+ DMEV_ISSET(DM_EVENT_WRITE, buf->dt_emask) || -+ DMEV_ISSET(DM_EVENT_TRUNCATE, buf->dt_emask)) ? 1 : 0; -+} -+ -+/* -+ * Pull out both ondisk and incore fields, incore has preference. -+ * The inode must be kept locked SHARED by the caller. -+ */ -+STATIC void -+xfs_ip_to_stat( -+ xfs_mount_t *mp, -+ xfs_ino_t ino, -+ xfs_inode_t *ip, -+ dm_stat_t *buf) -+{ -+ xfs_icdinode_t *dic = &ip->i_d; -+ -+ buf->dt_ino = ino; -+ buf->dt_nlink = dic->di_nlink; -+ /*buf->dt_xfs_projid = dic->di_projid;*/ -+ buf->dt_mode = dic->di_mode; -+ buf->dt_uid = dic->di_uid; -+ buf->dt_gid = dic->di_gid; -+ buf->dt_size = XFS_ISIZE(ip); -+ buf->dt_dev = new_encode_dev(mp->m_ddev_targp->bt_dev); -+ buf->dt_atime = VFS_I(ip)->i_atime.tv_sec; -+ buf->dt_mtime = dic->di_mtime.t_sec; -+ buf->dt_ctime = dic->di_ctime.t_sec; -+ buf->dt_xfs_xflags = xfs_ip2dmflags(ip); -+ buf->dt_xfs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; -+ buf->dt_xfs_extents = dic->di_nextents; -+ buf->dt_xfs_aextents = dic->di_anextents; -+ buf->dt_xfs_igen = dic->di_gen; -+ buf->dt_xfs_dmstate = dic->di_dmstate; -+ -+ switch (dic->di_format) { -+ case XFS_DINODE_FMT_DEV: -+ buf->dt_rdev = ip->i_df.if_u2.if_rdev; -+ buf->dt_blksize = BLKDEV_IOSIZE; -+ buf->dt_blocks = 0; -+ break; -+ case XFS_DINODE_FMT_LOCAL: -+ case XFS_DINODE_FMT_UUID: -+ buf->dt_rdev = 0; -+ buf->dt_blksize = mp->m_sb.sb_blocksize; -+ buf->dt_blocks = 0; -+ break; -+ case XFS_DINODE_FMT_EXTENTS: -+ case XFS_DINODE_FMT_BTREE: -+ buf->dt_rdev = 0; -+ buf->dt_blksize = mp->m_sb.sb_blocksize; -+ buf->dt_blocks = XFS_FSB_TO_BB(mp, -+ (dic->di_nblocks + ip->i_delayed_blks)); -+ break; -+ } -+ -+ memset(&buf->dt_pad1, 0, sizeof(buf->dt_pad1)); -+ memset(&buf->dt_pad2, 0, sizeof(buf->dt_pad2)); -+ memset(&buf->dt_pad3, 0, sizeof(buf->dt_pad3)); -+ -+ /* Finally fill in the DMAPI specific fields */ -+ buf->dt_pers = 0; -+ buf->dt_change = 0; -+ buf->dt_nevents = DM_EVENT_MAX; -+ buf->dt_emask = dic->di_dmevmask; -+ buf->dt_dtime = dic->di_ctime.t_sec; -+ /* Set if one of READ, WRITE or TRUNCATE bits is set in emask */ -+ buf->dt_pmanreg = (DMEV_ISSET(DM_EVENT_READ, buf->dt_emask) || -+ DMEV_ISSET(DM_EVENT_WRITE, buf->dt_emask) || -+ DMEV_ISSET(DM_EVENT_TRUNCATE, buf->dt_emask)) ? 1 : 0; -+} -+ -+/* -+ * Take the handle and put it at the end of a dm_xstat buffer. -+ * dt_compname is unused in bulkstat - so we zero it out. -+ * Finally, update link in dm_xstat_t to point to next struct. -+ */ -+STATIC void -+xfs_dm_handle_to_xstat( -+ dm_xstat_t *xbuf, -+ size_t xstat_sz, -+ dm_handle_t *handle, -+ size_t handle_sz) -+{ -+ dm_stat_t *sbuf = &xbuf->dx_statinfo; -+ -+ memcpy(xbuf + 1, handle, handle_sz); -+ sbuf->dt_handle.vd_offset = (ssize_t) sizeof(dm_xstat_t); -+ sbuf->dt_handle.vd_length = (size_t) DM_HSIZE(*handle); -+ memset(&sbuf->dt_compname, 0, sizeof(dm_vardata_t)); -+ sbuf->_link = xstat_sz; -+} -+ -+STATIC int -+xfs_dm_bulkall_iget_one( -+ xfs_mount_t *mp, -+ xfs_ino_t ino, -+ xfs_daddr_t bno, -+ int *value_lenp, -+ dm_xstat_t *xbuf, -+ u_int *xstat_szp, -+ char *attr_name, -+ caddr_t attr_buf) -+{ -+ xfs_inode_t *ip; -+ dm_handle_t handle; -+ u_int xstat_sz = *xstat_szp; -+ int value_len = *value_lenp; -+ int error; -+ -+ error = xfs_iget(mp, NULL, ino, -+ XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); -+ if (error) -+ return error; -+ -+ xfs_ip_to_stat(mp, ino, ip, &xbuf->dx_statinfo); -+ dm_ip_to_handle(&ip->i_vnode, &handle); -+ xfs_dm_handle_to_xstat(xbuf, xstat_sz, &handle, sizeof(handle)); -+ -+ /* Drop ILOCK_SHARED for call to xfs_attr_get */ -+ xfs_iunlock(ip, XFS_ILOCK_SHARED); -+ -+ memset(&xbuf->dx_attrdata, 0, sizeof(dm_vardata_t)); -+ error = xfs_attr_get(ip, attr_name, attr_buf, &value_len, ATTR_ROOT); -+ iput(&ip->i_vnode); -+ -+ DM_EA_XLATE_ERR(error); -+ if (error && (error != ENOATTR)) { -+ if (error == E2BIG) -+ error = ENOMEM; -+ return error; -+ } -+ -+ /* How much space was in the attr? */ -+ if (error != ENOATTR) { -+ xbuf->dx_attrdata.vd_offset = xstat_sz; -+ xbuf->dx_attrdata.vd_length = value_len; -+ xstat_sz += (value_len+(DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1); -+ } -+ *xstat_szp = xbuf->dx_statinfo._link = xstat_sz; -+ *value_lenp = value_len; -+ return 0; -+} -+ -+ -+STATIC int -+xfs_dm_inline_attr( -+ xfs_mount_t *mp, -+ xfs_dinode_t *dip, -+ char *attr_name, -+ caddr_t attr_buf, -+ int *value_lenp) -+{ -+ if (dip->di_aformat == XFS_DINODE_FMT_LOCAL) { -+ xfs_attr_shortform_t *sf; -+ xfs_attr_sf_entry_t *sfe; -+ unsigned int namelen = strlen(attr_name); -+ unsigned int valuelen = *value_lenp; -+ int i; -+ -+ sf = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); -+ sfe = &sf->list[0]; -+ for (i = 0; i < sf->hdr.count; -+ sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { -+ if (sfe->namelen != namelen) -+ continue; -+ if (!(sfe->flags & XFS_ATTR_ROOT)) -+ continue; -+ if (memcmp(attr_name, sfe->nameval, namelen) != 0) -+ continue; -+ if (valuelen < sfe->valuelen) -+ return ERANGE; -+ valuelen = sfe->valuelen; -+ memcpy(attr_buf, &sfe->nameval[namelen], valuelen); -+ *value_lenp = valuelen; -+ return 0; -+ } -+ } -+ *value_lenp = 0; -+ return ENOATTR; -+} -+ -+STATIC void -+dm_dip_to_handle( -+ xfs_ino_t ino, -+ xfs_dinode_t *dip, -+ dm_fsid_t *fsid, -+ dm_handle_t *handlep) -+{ -+ dm_fid_t fid; -+ int hsize; -+ -+ fid.dm_fid_len = sizeof(struct dm_fid) - sizeof(fid.dm_fid_len); -+ fid.dm_fid_pad = 0; -+ fid.dm_fid_ino = ino; -+ fid.dm_fid_gen = be32_to_cpu(dip->di_gen); -+ -+ memcpy(&handlep->ha_fsid, fsid, sizeof(*fsid)); -+ memcpy(&handlep->ha_fid, &fid, fid.dm_fid_len + sizeof(fid.dm_fid_len)); -+ hsize = DM_HSIZE(*handlep); -+ memset((char *)handlep + hsize, 0, sizeof(*handlep) - hsize); -+} -+ -+STATIC int -+xfs_dm_bulkall_inline_one( -+ xfs_mount_t *mp, -+ xfs_ino_t ino, -+ xfs_dinode_t *dip, -+ dm_fsid_t *fsid, -+ int *value_lenp, -+ dm_xstat_t *xbuf, -+ u_int *xstat_szp, -+ char *attr_name, -+ caddr_t attr_buf) -+{ -+ dm_handle_t handle; -+ u_int xstat_sz = *xstat_szp; -+ int value_len = *value_lenp; -+ int error; -+ -+ if (dip->di_mode == 0) -+ return ENOENT; -+ -+ xfs_dip_to_stat(mp, ino, dip, &xbuf->dx_statinfo); -+ dm_dip_to_handle(ino, dip, fsid, &handle); -+ xfs_dm_handle_to_xstat(xbuf, xstat_sz, &handle, sizeof(handle)); -+ -+ memset(&xbuf->dx_attrdata, 0, sizeof(dm_vardata_t)); -+ error = xfs_dm_inline_attr(mp, dip, attr_name, attr_buf, &value_len); -+ DM_EA_XLATE_ERR(error); -+ if (error && (error != ENOATTR)) { -+ if (error == E2BIG) -+ error = ENOMEM; -+ return error; -+ } -+ -+ /* How much space was in the attr? */ -+ if (error != ENOATTR) { -+ xbuf->dx_attrdata.vd_offset = xstat_sz; -+ xbuf->dx_attrdata.vd_length = value_len; -+ xstat_sz += (value_len+(DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1); -+ } -+ *xstat_szp = xbuf->dx_statinfo._link = xstat_sz; -+ *value_lenp = value_len; -+ return 0; -+} -+ -+/* -+ * This is used by dm_get_bulkall(). -+ * Given a inumber, it igets the inode and fills the given buffer -+ * with the dm_xstat structure for the file. -+ */ -+STATIC int -+xfs_dm_bulkall_one( -+ xfs_mount_t *mp, /* mount point for filesystem */ -+ xfs_ino_t ino, /* inode number to get data for */ -+ void __user *buffer, /* buffer to place output in */ -+ int ubsize, /* size of buffer */ -+ void *private_data, /* my private data */ -+ xfs_daddr_t bno, /* starting block of inode cluster */ -+ int *ubused, /* amount of buffer we used */ -+ void *dibuff, /* on-disk inode buffer */ -+ int *res) /* bulkstat result code */ -+{ -+ dm_xstat_t *xbuf; -+ u_int xstat_sz; -+ int error; -+ int value_len; -+ int kern_buf_sz; -+ int attr_buf_sz; -+ caddr_t attr_buf; -+ void __user *attr_user_buf; -+ dm_bulkstat_one_t *dmb = (dm_bulkstat_one_t*)private_data; -+ -+ /* Returns positive errors to XFS */ -+ -+ *res = BULKSTAT_RV_NOTHING; -+ -+ if (!buffer || xfs_internal_inum(mp, ino)) -+ return EINVAL; -+ -+ xstat_sz = DM_STAT_SIZE(*xbuf, 0); -+ xstat_sz = (xstat_sz + (DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1); -+ if (xstat_sz > ubsize) -+ return ENOMEM; -+ -+ kern_buf_sz = xstat_sz; -+ xbuf = kmem_alloc(kern_buf_sz, KM_SLEEP); -+ -+ /* Determine place to drop attr value, and available space. */ -+ value_len = ubsize - xstat_sz; -+ if (value_len > ATTR_MAX_VALUELEN) -+ value_len = ATTR_MAX_VALUELEN; -+ -+ attr_user_buf = buffer + xstat_sz; -+ attr_buf_sz = value_len; -+ attr_buf = kmem_alloc(attr_buf_sz, KM_SLEEP); -+ -+ if (!dibuff) -+ error = xfs_dm_bulkall_iget_one(mp, ino, bno, -+ &value_len, xbuf, &xstat_sz, -+ dmb->attrname.dan_chars, -+ attr_buf); -+ else -+ error = xfs_dm_bulkall_inline_one(mp, ino, -+ (xfs_dinode_t *)dibuff, -+ &dmb->fsid, -+ &value_len, xbuf, &xstat_sz, -+ dmb->attrname.dan_chars, -+ attr_buf); -+ if (error) -+ goto out_free_buffers; -+ -+ if (copy_to_user(buffer, xbuf, kern_buf_sz)) { -+ error = EFAULT; -+ goto out_free_buffers; -+ } -+ if (copy_to_user(attr_user_buf, attr_buf, value_len)) { -+ error = EFAULT; -+ goto out_free_buffers; -+ } -+ -+ kmem_free(attr_buf); -+ kmem_free(xbuf); -+ -+ *res = BULKSTAT_RV_DIDONE; -+ if (ubused) -+ *ubused = xstat_sz; -+ dmb->laststruct = buffer; -+ return 0; -+ -+ out_free_buffers: -+ kmem_free(attr_buf); -+ kmem_free(xbuf); -+ return error; -+} -+ -+/* -+ * Take the handle and put it at the end of a dm_stat buffer. -+ * dt_compname is unused in bulkstat - so we zero it out. -+ * Finally, update link in dm_stat_t to point to next struct. -+ */ -+STATIC void -+xfs_dm_handle_to_stat( -+ dm_stat_t *sbuf, -+ size_t stat_sz, -+ dm_handle_t *handle, -+ size_t handle_sz) -+{ -+ memcpy(sbuf + 1, handle, handle_sz); -+ sbuf->dt_handle.vd_offset = (ssize_t) sizeof(dm_stat_t); -+ sbuf->dt_handle.vd_length = (size_t) DM_HSIZE(*handle); -+ memset(&sbuf->dt_compname, 0, sizeof(dm_vardata_t)); -+ sbuf->_link = stat_sz; -+} -+ -+STATIC int -+xfs_dm_bulkattr_iget_one( -+ xfs_mount_t *mp, -+ xfs_ino_t ino, -+ xfs_daddr_t bno, -+ dm_stat_t *sbuf, -+ u_int stat_sz) -+{ -+ xfs_inode_t *ip; -+ dm_handle_t handle; -+ int error; -+ -+ error = xfs_iget(mp, NULL, ino, -+ XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); -+ if (error) -+ return error; -+ -+ xfs_ip_to_stat(mp, ino, ip, sbuf); -+ dm_ip_to_handle(&ip->i_vnode, &handle); -+ xfs_dm_handle_to_stat(sbuf, stat_sz, &handle, sizeof(handle)); -+ -+ xfs_iput(ip, XFS_ILOCK_SHARED); -+ return 0; -+} -+ -+STATIC int -+xfs_dm_bulkattr_inline_one( -+ xfs_mount_t *mp, -+ xfs_ino_t ino, -+ xfs_dinode_t *dip, -+ dm_fsid_t *fsid, -+ dm_stat_t *sbuf, -+ u_int stat_sz) -+{ -+ dm_handle_t handle; -+ -+ if (dip->di_mode == 0) -+ return ENOENT; -+ xfs_dip_to_stat(mp, ino, dip, sbuf); -+ dm_dip_to_handle(ino, dip, fsid, &handle); -+ xfs_dm_handle_to_stat(sbuf, stat_sz, &handle, sizeof(handle)); -+ return 0; -+} -+ -+/* -+ * This is used by dm_get_bulkattr(). -+ * Given a inumber, it igets the inode and fills the given buffer -+ * with the dm_stat structure for the file. -+ */ -+STATIC int -+xfs_dm_bulkattr_one( -+ xfs_mount_t *mp, /* mount point for filesystem */ -+ xfs_ino_t ino, /* inode number to get data for */ -+ void __user *buffer, /* buffer to place output in */ -+ int ubsize, /* size of buffer */ -+ void *private_data, /* my private data */ -+ xfs_daddr_t bno, /* starting block of inode cluster */ -+ int *ubused, /* amount of buffer we used */ -+ void *dibuff, /* on-disk inode buffer */ -+ int *res) /* bulkstat result code */ -+{ -+ dm_stat_t *sbuf; -+ u_int stat_sz; -+ int error; -+ dm_bulkstat_one_t *dmb = (dm_bulkstat_one_t*)private_data; -+ -+ /* Returns positive errors to XFS */ -+ -+ *res = BULKSTAT_RV_NOTHING; -+ -+ if (!buffer || xfs_internal_inum(mp, ino)) -+ return EINVAL; -+ -+ stat_sz = DM_STAT_SIZE(*sbuf, 0); -+ stat_sz = (stat_sz+(DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1); -+ if (stat_sz > ubsize) -+ return ENOMEM; -+ -+ sbuf = kmem_alloc(stat_sz, KM_SLEEP); -+ -+ if (!dibuff) -+ error = xfs_dm_bulkattr_iget_one(mp, ino, bno, sbuf, stat_sz); -+ else -+ error = xfs_dm_bulkattr_inline_one(mp, ino, -+ (xfs_dinode_t *)dibuff, -+ &dmb->fsid, sbuf, stat_sz); -+ if (error) -+ goto out_free_buffer; -+ -+ if (copy_to_user(buffer, sbuf, stat_sz)) { -+ error = EFAULT; -+ goto out_free_buffer; -+ } -+ -+ kmem_free(sbuf); -+ *res = BULKSTAT_RV_DIDONE; -+ if (ubused) -+ *ubused = stat_sz; -+ dmb->laststruct = buffer; -+ return 0; -+ -+ out_free_buffer: -+ kmem_free(sbuf); -+ return error; -+} -+ -+/* xfs_dm_f_get_eventlist - return the dm_eventset_t mask for inode ip. */ -+ -+STATIC int -+xfs_dm_f_get_eventlist( -+ xfs_inode_t *ip, -+ dm_right_t right, -+ u_int nelem, -+ dm_eventset_t *eventsetp, /* in kernel space! */ -+ u_int *nelemp) /* in kernel space! */ -+{ -+ dm_eventset_t eventset; -+ -+ if (right < DM_RIGHT_SHARED) -+ return(EACCES); -+ -+ /* Note that we MUST return a regular file's managed region bits as -+ part of the mask because dm_get_eventlist is supposed to return the -+ union of all managed region flags in those bits. Since we only -+ support one region, we can just return the bits as they are. For -+ all other object types, the bits will already be zero. Handy, huh? -+ */ -+ -+ eventset = ip->i_d.di_dmevmask; -+ -+ /* Now copy the event mask and event count back to the caller. We -+ return the lesser of nelem and DM_EVENT_MAX. -+ */ -+ -+ if (nelem > DM_EVENT_MAX) -+ nelem = DM_EVENT_MAX; -+ eventset &= (1 << nelem) - 1; -+ -+ *eventsetp = eventset; -+ *nelemp = nelem; -+ return(0); -+} -+ -+ -+/* xfs_dm_f_set_eventlist - update the dm_eventset_t mask in the inode vp. Only the -+ bits from zero to maxevent-1 are being replaced; higher bits are preserved. -+*/ -+ -+STATIC int -+xfs_dm_f_set_eventlist( -+ xfs_inode_t *ip, -+ dm_right_t right, -+ dm_eventset_t *eventsetp, /* in kernel space! */ -+ u_int maxevent) -+{ -+ dm_eventset_t eventset; -+ dm_eventset_t max_mask; -+ dm_eventset_t valid_events; -+ xfs_trans_t *tp; -+ xfs_mount_t *mp; -+ int error; -+ -+ if (right < DM_RIGHT_EXCL) -+ return(EACCES); -+ -+ eventset = *eventsetp; -+ if (maxevent >= sizeof(ip->i_d.di_dmevmask) * NBBY) -+ return(EINVAL); -+ max_mask = (1 << maxevent) - 1; -+ -+ if (S_ISDIR(ip->i_d.di_mode)) { -+ valid_events = DM_XFS_VALID_DIRECTORY_EVENTS; -+ } else { /* file or symlink */ -+ valid_events = DM_XFS_VALID_FILE_EVENTS; -+ } -+ if ((eventset & max_mask) & ~valid_events) -+ return(EINVAL); -+ -+ /* Adjust the event mask so that the managed region bits will not -+ be altered. -+ */ -+ -+ max_mask &= ~(1 <i_mount; -+ tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); -+ error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); -+ if (error) { -+ xfs_trans_cancel(tp, 0); -+ return(error); -+ } -+ xfs_ilock(ip, XFS_ILOCK_EXCL); -+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); -+ -+ ip->i_d.di_dmevmask = (eventset & max_mask) | (ip->i_d.di_dmevmask & ~max_mask); -+ -+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); -+ igrab(&ip->i_vnode); -+ xfs_trans_commit(tp, 0); -+ -+ return(0); -+} -+ -+ -+/* xfs_dm_fs_get_eventlist - return the dm_eventset_t mask for filesystem vfsp. */ -+ -+STATIC int -+xfs_dm_fs_get_eventlist( -+ xfs_mount_t *mp, -+ dm_right_t right, -+ u_int nelem, -+ dm_eventset_t *eventsetp, /* in kernel space! */ -+ u_int *nelemp) /* in kernel space! */ -+{ -+ dm_eventset_t eventset; -+ -+ if (right < DM_RIGHT_SHARED) -+ return(EACCES); -+ -+ eventset = mp->m_dmevmask; -+ -+ /* Now copy the event mask and event count back to the caller. We -+ return the lesser of nelem and DM_EVENT_MAX. -+ */ -+ -+ if (nelem > DM_EVENT_MAX) -+ nelem = DM_EVENT_MAX; -+ eventset &= (1 << nelem) - 1; -+ -+ *eventsetp = eventset; -+ *nelemp = nelem; -+ return(0); -+} -+ -+ -+/* xfs_dm_fs_set_eventlist - update the dm_eventset_t mask in the mount structure for -+ filesystem vfsp. Only the bits from zero to maxevent-1 are being replaced; -+ higher bits are preserved. -+*/ -+ -+STATIC int -+xfs_dm_fs_set_eventlist( -+ xfs_mount_t *mp, -+ dm_right_t right, -+ dm_eventset_t *eventsetp, /* in kernel space! */ -+ u_int maxevent) -+{ -+ dm_eventset_t eventset; -+ dm_eventset_t max_mask; -+ -+ if (right < DM_RIGHT_EXCL) -+ return(EACCES); -+ -+ eventset = *eventsetp; -+ -+ if (maxevent >= sizeof(mp->m_dmevmask) * NBBY) -+ return(EINVAL); -+ max_mask = (1 << maxevent) - 1; -+ -+ if ((eventset & max_mask) & ~DM_XFS_VALID_FS_EVENTS) -+ return(EINVAL); -+ -+ mp->m_dmevmask = (eventset & max_mask) | (mp->m_dmevmask & ~max_mask); -+ return(0); -+} -+ -+ -+/* Code in this routine must exactly match the logic in xfs_diordwr() in -+ order for this to work! -+*/ -+ -+STATIC int -+xfs_dm_direct_ok( -+ xfs_inode_t *ip, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp) -+{ -+ xfs_mount_t *mp; -+ -+ mp = ip->i_mount; -+ -+ /* Realtime files can ONLY do direct I/O. */ -+ -+ if (XFS_IS_REALTIME_INODE(ip)) -+ return(1); -+ -+ /* If direct I/O is disabled, or if the request is too small, use -+ buffered I/O. -+ */ -+ -+ if (!dm_min_dio_xfer || len < dm_min_dio_xfer) -+ return(0); -+ -+#if 0 -+ /* If the request is not well-formed or is too large, use -+ buffered I/O. -+ */ -+ -+ if ((__psint_t)bufp & scache_linemask) /* if buffer not aligned */ -+ return(0); -+ if (off & mp->m_blockmask) /* if file offset not aligned */ -+ return(0); -+ if (len & mp->m_blockmask) /* if xfer length not aligned */ -+ return(0); -+ if (len > ctooff(v.v_maxdmasz - 1)) /* if transfer too large */ -+ return(0); -+ -+ /* A valid direct I/O candidate. */ -+ -+ return(1); -+#else -+ return(0); -+#endif -+} -+ -+ -+/* We need to be able to select various combinations of O_NONBLOCK, -+ O_DIRECT, and O_SYNC, yet we don't have a file descriptor and we don't have -+ the file's pathname. All we have is a handle. -+*/ -+ -+STATIC int -+xfs_dm_rdwr( -+ struct inode *inode, -+ uint fflag, -+ mode_t fmode, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp) -+{ -+ const struct cred *cred = current_cred(); -+ xfs_inode_t *ip = XFS_I(inode); -+ int error; -+ int oflags; -+ ssize_t xfer; -+ struct file *file; -+ struct dentry *dentry; -+ -+ if ((off < 0) || (off > i_size_read(inode)) || !S_ISREG(inode->i_mode)) -+ return EINVAL; -+ -+ if (fmode & FMODE_READ) { -+ oflags = O_RDONLY; -+ } else { -+ oflags = O_WRONLY; -+ } -+ -+ /* -+ * Build file descriptor flags and I/O flags. O_NONBLOCK is needed so -+ * that we don't block on mandatory file locks. This is an invisible IO, -+ * don't change the atime. -+ */ -+ -+ oflags |= O_LARGEFILE | O_NONBLOCK | O_NOATIME; -+ if (xfs_dm_direct_ok(ip, off, len, bufp)) -+ oflags |= O_DIRECT; -+ -+ if (fflag & O_SYNC) -+ oflags |= O_SYNC; -+ -+ if (inode->i_fop == NULL) { -+ /* no iput; caller did get, and will do put */ -+ return EINVAL; -+ } -+ -+ igrab(inode); -+ -+ dentry = d_obtain_alias(inode); -+ if (dentry == NULL) { -+ iput(inode); -+ return ENOMEM; -+ } -+ -+ file = dentry_open(dentry, mntget(ip->i_mount->m_vfsmount), oflags, -+ cred); -+ if (IS_ERR(file)) { -+ return -PTR_ERR(file); -+ } -+ file->f_mode |= FMODE_NOCMTIME; -+ -+ if (fmode & FMODE_READ) { -+ xfer = file->f_op->read(file, bufp, len, (loff_t*)&off); -+ } else { -+ xfer = file->f_op->write(file, bufp, len, (loff_t*)&off); -+ } -+ -+ if (xfer >= 0) { -+ *rvp = xfer; -+ error = 0; -+ } else { -+ /* xfs_read/xfs_write return negative error--flip it */ -+ error = -(int)xfer; -+ } -+ -+ fput(file); -+ return error; -+} -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_clear_inherit( -+ struct inode *inode, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep) -+{ -+ return(-ENOSYS); /* Return negative error to DMAPI */ -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_create_by_handle( -+ struct inode *inode, -+ dm_right_t right, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname) -+{ -+ return(-ENOSYS); /* Return negative error to DMAPI */ -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_downgrade_right( -+ struct inode *inode, -+ dm_right_t right, -+ u_int type) /* DM_FSYS_OBJ or zero */ -+{ -+#ifdef DEBUG_RIGHTS -+ char buffer[sizeof(dm_handle_t) * 2 + 1]; -+ -+ if (!xfs_vp_to_hexhandle(inode, type, buffer)) { -+ printf("dm_downgrade_right: old %d new %d type %d handle %s\n", -+ right, DM_RIGHT_SHARED, type, buffer); -+ } else { -+ printf("dm_downgrade_right: old %d new %d type %d handle " -+ "\n", right, DM_RIGHT_SHARED, type); -+ } -+#endif /* DEBUG_RIGHTS */ -+ return(0); -+} -+ -+ -+/* Note: xfs_dm_get_allocinfo() makes no attempt to coalesce two adjacent -+ extents when both are of type DM_EXTENT_RES; this is left to the caller. -+ XFS guarantees that there will never be two adjacent DM_EXTENT_HOLE extents. -+ -+ In order to provide the caller with all extents in a file including -+ those beyond the file's last byte offset, we have to use the xfs_bmapi() -+ interface. -+*/ -+ -+STATIC int -+xfs_dm_get_allocinfo_rvp( -+ struct inode *inode, -+ dm_right_t right, -+ dm_off_t __user *offp, -+ u_int nelem, -+ dm_extent_t __user *extentp, -+ u_int __user *nelemp, -+ int *rvp) -+{ -+ xfs_inode_t *ip = XFS_I(inode); -+ xfs_mount_t *mp; /* file system mount point */ -+ xfs_fileoff_t fsb_offset; -+ xfs_filblks_t fsb_length; -+ dm_off_t startoff; -+ int elem; -+ xfs_bmbt_irec_t *bmp = NULL; -+ u_int bmpcnt = 50; -+ u_int bmpsz = sizeof(xfs_bmbt_irec_t) * bmpcnt; -+ int error = 0; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ if ((inode->i_mode & S_IFMT) != S_IFREG) -+ return(-EINVAL); -+ -+ if (copy_from_user( &startoff, offp, sizeof(startoff))) -+ return(-EFAULT); -+ -+ mp = ip->i_mount; -+ ASSERT(mp); -+ -+ if (startoff > XFS_MAXIOFFSET(mp)) -+ return(-EINVAL); -+ -+ if (nelem == 0) -+ return(-EINVAL); -+ -+ /* Convert the caller's starting offset into filesystem allocation -+ units as required by xfs_bmapi(). Round the offset down so that -+ it is sure to be included in the reply. -+ */ -+ -+ fsb_offset = XFS_B_TO_FSBT(mp, startoff); -+ fsb_length = XFS_B_TO_FSB(mp, XFS_MAXIOFFSET(mp)) - fsb_offset; -+ elem = 0; -+ -+ if (fsb_length) -+ bmp = kmem_alloc(bmpsz, KM_SLEEP); -+ -+ while (fsb_length && elem < nelem) { -+ dm_extent_t extent; -+ xfs_filblks_t fsb_bias; -+ dm_size_t bias; -+ int lock; -+ int num; -+ int i; -+ -+ /* Compute how many getbmap structures to use on the xfs_bmapi -+ call. -+ */ -+ -+ num = MIN((u_int)(nelem - elem), bmpcnt); -+ -+ xfs_ilock(ip, XFS_IOLOCK_SHARED); -+ lock = xfs_ilock_map_shared(ip); -+ -+ error = xfs_bmapi(NULL, ip, fsb_offset, fsb_length, -+ XFS_BMAPI_ENTIRE, NULL, 0, bmp, &num, NULL, NULL); -+ -+ xfs_iunlock_map_shared(ip, lock); -+ xfs_iunlock(ip, XFS_IOLOCK_SHARED); -+ -+ if (error) { -+ error = -error; /* Return negative error to DMAPI */ -+ goto finish_out; -+ } -+ -+ /* Fill in the caller's extents, adjusting the bias in the -+ first entry if necessary. -+ */ -+ -+ for (i = 0; i < num; i++, extentp++) { -+ bias = startoff - XFS_FSB_TO_B(mp, bmp[i].br_startoff); -+ extent.ex_offset = startoff; -+ extent.ex_length = -+ XFS_FSB_TO_B(mp, bmp[i].br_blockcount) - bias; -+ if (bmp[i].br_startblock == HOLESTARTBLOCK) { -+ extent.ex_type = DM_EXTENT_HOLE; -+ } else { -+ extent.ex_type = DM_EXTENT_RES; -+ } -+ startoff = extent.ex_offset + extent.ex_length; -+ -+ if (copy_to_user( extentp, &extent, sizeof(extent))) { -+ error = -EFAULT; -+ goto finish_out; -+ } -+ -+ fsb_bias = fsb_offset - bmp[i].br_startoff; -+ fsb_offset += bmp[i].br_blockcount - fsb_bias; -+ fsb_length -= bmp[i].br_blockcount - fsb_bias; -+ elem++; -+ } -+ } -+ -+ if (fsb_length == 0) { -+ startoff = 0; -+ } -+ if (copy_to_user( offp, &startoff, sizeof(startoff))) { -+ error = -EFAULT; -+ goto finish_out; -+ } -+ -+ if (copy_to_user( nelemp, &elem, sizeof(elem))) { -+ error = -EFAULT; -+ goto finish_out; -+ } -+ -+ *rvp = (fsb_length == 0 ? 0 : 1); -+ -+finish_out: -+ if (bmp) -+ kmem_free(bmp); -+ return(error); -+} -+ -+ -+STATIC int -+xfs_dm_zero_xstatinfo_link( -+ dm_xstat_t __user *dxs) -+{ -+ dm_xstat_t *ldxs; -+ int error = 0; -+ -+ if (!dxs) -+ return 0; -+ ldxs = kmalloc(sizeof(*ldxs), GFP_KERNEL); -+ if (!ldxs) -+ return -ENOMEM; -+ if (copy_from_user(ldxs, dxs, sizeof(*dxs))) { -+ error = -EFAULT; -+ } else { -+ ldxs->dx_statinfo._link = 0; -+ if (copy_to_user(dxs, ldxs, sizeof(*dxs))) -+ error = -EFAULT; -+ } -+ kfree(ldxs); -+ return error; -+} -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_get_bulkall_rvp( -+ struct inode *inode, -+ dm_right_t right, -+ u_int mask, -+ dm_attrname_t __user *attrnamep, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, /* address of buffer in user space */ -+ size_t __user *rlenp, /* user space address */ -+ int *rvalp) -+{ -+ int error, done; -+ int nelems; -+ u_int statstruct_sz; -+ dm_attrloc_t loc; -+ xfs_mount_t *mp = XFS_I(inode)->i_mount; -+ dm_attrname_t attrname; -+ dm_bulkstat_one_t dmb; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (copy_from_user(&attrname, attrnamep, sizeof(attrname)) || -+ copy_from_user(&loc, locp, sizeof(loc))) -+ return -EFAULT; -+ -+ if (attrname.an_chars[0] == '\0') -+ return(-EINVAL); -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ /* Because we will write directly to the user's buffer, make sure that -+ the buffer is properly aligned. -+ */ -+ -+ if (((unsigned long)bufp & (DM_STAT_ALIGN - 1)) != 0) -+ return(-EFAULT); -+ -+ /* Size of the handle is constant for this function. -+ * If there are no files with attributes, then this will be the -+ * maximum number of inodes we can get. -+ */ -+ -+ statstruct_sz = DM_STAT_SIZE(dm_xstat_t, 0); -+ statstruct_sz = (statstruct_sz+(DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1); -+ -+ nelems = buflen / statstruct_sz; -+ if (nelems < 1) { -+ if (put_user( statstruct_sz, rlenp )) -+ return(-EFAULT); -+ return(-E2BIG); -+ } -+ -+ /* Build the on-disk version of the attribute name. */ -+ strcpy(dmb.attrname.dan_chars, dmattr_prefix); -+ strncpy(&dmb.attrname.dan_chars[DMATTR_PREFIXLEN], -+ attrname.an_chars, DM_ATTR_NAME_SIZE + 1); -+ dmb.attrname.dan_chars[sizeof(dmb.attrname.dan_chars) - 1] = '\0'; -+ -+ /* -+ * fill the buffer with dm_xstat_t's -+ */ -+ -+ dmb.laststruct = NULL; -+ memcpy(&dmb.fsid, mp->m_fixedfsid, sizeof(dm_fsid_t)); -+ error = xfs_bulkstat(mp, (xfs_ino_t *)&loc, &nelems, -+ xfs_dm_bulkall_one, (void*)&dmb, statstruct_sz, -+ bufp, BULKSTAT_FG_INLINE, &done); -+ if (error) -+ return(-error); /* Return negative error to DMAPI */ -+ -+ *rvalp = !done ? 1 : 0; -+ -+ if (put_user( statstruct_sz * nelems, rlenp )) -+ return(-EFAULT); -+ -+ if (copy_to_user( locp, &loc, sizeof(loc))) -+ return(-EFAULT); -+ /* -+ * If we didn't do any, we must not have any more to do. -+ */ -+ if (nelems < 1) -+ return(0); -+ /* -+ * Set _link in the last struct to zero -+ */ -+ return xfs_dm_zero_xstatinfo_link((dm_xstat_t __user *)dmb.laststruct); -+} -+ -+ -+STATIC int -+xfs_dm_zero_statinfo_link( -+ dm_stat_t __user *dxs) -+{ -+ dm_stat_t *ldxs; -+ int error = 0; -+ -+ if (!dxs) -+ return 0; -+ ldxs = kmalloc(sizeof(*ldxs), GFP_KERNEL); -+ if (!ldxs) -+ return -ENOMEM; -+ if (copy_from_user(ldxs, dxs, sizeof(*dxs))) { -+ error = -EFAULT; -+ } else { -+ ldxs->_link = 0; -+ if (copy_to_user(dxs, ldxs, sizeof(*dxs))) -+ error = -EFAULT; -+ } -+ kfree(ldxs); -+ return error; -+} -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_get_bulkattr_rvp( -+ struct inode *inode, -+ dm_right_t right, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvalp) -+{ -+ int error, done; -+ int nelems; -+ u_int statstruct_sz; -+ dm_attrloc_t loc; -+ xfs_mount_t *mp = XFS_I(inode)->i_mount; -+ dm_bulkstat_one_t dmb; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ if (copy_from_user( &loc, locp, sizeof(loc))) -+ return(-EFAULT); -+ -+ /* Because we will write directly to the user's buffer, make sure that -+ the buffer is properly aligned. -+ */ -+ -+ if (((unsigned long)bufp & (DM_STAT_ALIGN - 1)) != 0) -+ return(-EFAULT); -+ -+ /* size of the handle is constant for this function */ -+ -+ statstruct_sz = DM_STAT_SIZE(dm_stat_t, 0); -+ statstruct_sz = (statstruct_sz+(DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1); -+ -+ nelems = buflen / statstruct_sz; -+ if (nelems < 1) { -+ if (put_user( statstruct_sz, rlenp )) -+ return(-EFAULT); -+ return(-E2BIG); -+ } -+ -+ dmb.laststruct = NULL; -+ memcpy(&dmb.fsid, mp->m_fixedfsid, sizeof(dm_fsid_t)); -+ error = xfs_bulkstat(mp, (xfs_ino_t *)&loc, &nelems, -+ xfs_dm_bulkattr_one, (void*)&dmb, -+ statstruct_sz, bufp, BULKSTAT_FG_INLINE, &done); -+ if (error) -+ return(-error); /* Return negative error to DMAPI */ -+ -+ *rvalp = !done ? 1 : 0; -+ -+ if (put_user( statstruct_sz * nelems, rlenp )) -+ return(-EFAULT); -+ -+ if (copy_to_user( locp, &loc, sizeof(loc))) -+ return(-EFAULT); -+ -+ /* -+ * If we didn't do any, we must not have any more to do. -+ */ -+ if (nelems < 1) -+ return(0); -+ /* -+ * Set _link in the last struct to zero -+ */ -+ return xfs_dm_zero_statinfo_link((dm_stat_t __user *)dmb.laststruct); -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_get_config( -+ struct inode *inode, -+ dm_right_t right, -+ dm_config_t flagname, -+ dm_size_t __user *retvalp) -+{ -+ dm_size_t retval; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ switch (flagname) { -+ case DM_CONFIG_DTIME_OVERLOAD: -+ case DM_CONFIG_PERS_ATTRIBUTES: -+ case DM_CONFIG_PERS_EVENTS: -+ case DM_CONFIG_PERS_MANAGED_REGIONS: -+ case DM_CONFIG_PUNCH_HOLE: -+ case DM_CONFIG_WILL_RETRY: -+ retval = DM_TRUE; -+ break; -+ -+ case DM_CONFIG_CREATE_BY_HANDLE: /* these will never be done */ -+ case DM_CONFIG_LOCK_UPGRADE: -+ case DM_CONFIG_PERS_INHERIT_ATTRIBS: -+ retval = DM_FALSE; -+ break; -+ -+ case DM_CONFIG_BULKALL: -+ retval = DM_TRUE; -+ break; -+ case DM_CONFIG_MAX_ATTR_ON_DESTROY: -+ retval = DM_MAX_ATTR_BYTES_ON_DESTROY; -+ break; -+ -+ case DM_CONFIG_MAX_ATTRIBUTE_SIZE: -+ retval = ATTR_MAX_VALUELEN; -+ break; -+ -+ case DM_CONFIG_MAX_HANDLE_SIZE: -+ retval = DM_MAX_HANDLE_SIZE; -+ break; -+ -+ case DM_CONFIG_MAX_MANAGED_REGIONS: -+ retval = 1; -+ break; -+ -+ case DM_CONFIG_TOTAL_ATTRIBUTE_SPACE: -+ retval = 0x7fffffff; /* actually it's unlimited */ -+ break; -+ -+ default: -+ return(-EINVAL); -+ } -+ -+ /* Copy the results back to the user. */ -+ -+ if (copy_to_user( retvalp, &retval, sizeof(retval))) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_get_config_events( -+ struct inode *inode, -+ dm_right_t right, -+ u_int nelem, -+ dm_eventset_t __user *eventsetp, -+ u_int __user *nelemp) -+{ -+ dm_eventset_t eventset; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (nelem == 0) -+ return(-EINVAL); -+ -+ eventset = DM_XFS_SUPPORTED_EVENTS; -+ -+ /* Now copy the event mask and event count back to the caller. We -+ return the lesser of nelem and DM_EVENT_MAX. -+ */ -+ -+ if (nelem > DM_EVENT_MAX) -+ nelem = DM_EVENT_MAX; -+ eventset &= (1 << nelem) - 1; -+ -+ if (copy_to_user( eventsetp, &eventset, sizeof(eventset))) -+ return(-EFAULT); -+ -+ if (put_user(nelem, nelemp)) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_get_destroy_dmattr( -+ struct inode *inode, -+ dm_right_t right, -+ dm_attrname_t *attrnamep, -+ char **valuepp, -+ int *vlenp) -+{ -+ dm_dkattrname_t dkattrname; -+ int alloc_size; -+ int value_len; -+ char *value; -+ int error; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ *vlenp = -1; /* assume failure by default */ -+ -+ if (attrnamep->an_chars[0] == '\0') -+ return(-EINVAL); -+ -+ /* Build the on-disk version of the attribute name. */ -+ -+ strcpy(dkattrname.dan_chars, dmattr_prefix); -+ strncpy(&dkattrname.dan_chars[DMATTR_PREFIXLEN], -+ (char *)attrnamep->an_chars, DM_ATTR_NAME_SIZE + 1); -+ dkattrname.dan_chars[sizeof(dkattrname.dan_chars) - 1] = '\0'; -+ -+ /* xfs_attr_get will not return anything if the buffer is too small, -+ and we don't know how big to make the buffer, so this may take -+ two tries to get it right. The initial try must use a buffer of -+ at least XFS_BUG_KLUDGE bytes to prevent buffer overflow because -+ of a bug in XFS. -+ */ -+ -+ alloc_size = XFS_BUG_KLUDGE; -+ value = kmalloc(alloc_size, GFP_KERNEL); -+ if (value == NULL) -+ return(-ENOMEM); -+ -+ error = xfs_attr_get(XFS_I(inode), dkattrname.dan_chars, value, -+ &value_len, ATTR_ROOT); -+ if (error == ERANGE) { -+ kfree(value); -+ alloc_size = value_len; -+ value = kmalloc(alloc_size, GFP_KERNEL); -+ if (value == NULL) -+ return(-ENOMEM); -+ -+ error = xfs_attr_get(XFS_I(inode), dkattrname.dan_chars, value, -+ &value_len, ATTR_ROOT); -+ } -+ if (error) { -+ kfree(value); -+ DM_EA_XLATE_ERR(error); -+ return(-error); /* Return negative error to DMAPI */ -+ } -+ -+ /* The attribute exists and has a value. Note that a value_len of -+ zero is valid! -+ */ -+ -+ if (value_len == 0) { -+ kfree(value); -+ *vlenp = 0; -+ return(0); -+ } else if (value_len > DM_MAX_ATTR_BYTES_ON_DESTROY) { -+ char *value2; -+ -+ value2 = kmalloc(DM_MAX_ATTR_BYTES_ON_DESTROY, GFP_KERNEL); -+ if (value2 == NULL) { -+ kfree(value); -+ return(-ENOMEM); -+ } -+ memcpy(value2, value, DM_MAX_ATTR_BYTES_ON_DESTROY); -+ kfree(value); -+ value = value2; -+ value_len = DM_MAX_ATTR_BYTES_ON_DESTROY; -+ } -+ *vlenp = value_len; -+ *valuepp = value; -+ return(0); -+} -+ -+/* This code was taken from xfs_fcntl(F_DIOINFO) and modified slightly because -+ we don't have a flags parameter (no open file). -+ Taken from xfs_ioctl(XFS_IOC_DIOINFO) on Linux. -+*/ -+ -+STATIC int -+xfs_dm_get_dioinfo( -+ struct inode *inode, -+ dm_right_t right, -+ dm_dioinfo_t __user *diop) -+{ -+ dm_dioinfo_t dio; -+ xfs_mount_t *mp; -+ xfs_inode_t *ip = XFS_I(inode); -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ mp = ip->i_mount; -+ -+ dio.d_miniosz = dio.d_mem = MIN_DIO_SIZE(mp); -+ dio.d_maxiosz = MAX_DIO_SIZE(mp); -+ dio.d_dio_only = DM_FALSE; -+ -+ if (copy_to_user(diop, &dio, sizeof(dio))) -+ return(-EFAULT); -+ return(0); -+} -+ -+typedef struct dm_readdir_cb { -+ xfs_mount_t *mp; -+ char __user *ubuf; -+ dm_stat_t __user *lastbuf; -+ size_t spaceleft; -+ size_t nwritten; -+ int error; -+ dm_stat_t kstat; -+} dm_readdir_cb_t; -+ -+STATIC int -+dm_filldir(void *__buf, const char *name, int namelen, loff_t offset, -+ u64 ino, unsigned int d_type) -+{ -+ dm_readdir_cb_t *cb = __buf; -+ dm_stat_t *statp = &cb->kstat; -+ size_t len; -+ int error; -+ int needed; -+ -+ /* -+ * Make sure we have enough space. -+ */ -+ needed = dm_stat_size(namelen + 1); -+ if (cb->spaceleft < needed) { -+ cb->spaceleft = 0; -+ return -ENOSPC; -+ } -+ -+ error = -EINVAL; -+ if (xfs_internal_inum(cb->mp, ino)) -+ goto out_err; -+ -+ memset(statp, 0, dm_stat_size(MAXNAMLEN)); -+ error = -xfs_dm_bulkattr_iget_one(cb->mp, ino, 0, -+ statp, needed); -+ if (error) -+ goto out_err; -+ -+ /* -+ * On return from bulkstat_one(), stap->_link points -+ * at the end of the handle in the stat structure. -+ */ -+ statp->dt_compname.vd_offset = statp->_link; -+ statp->dt_compname.vd_length = namelen + 1; -+ -+ len = statp->_link; -+ -+ /* Word-align the record */ -+ statp->_link = dm_stat_align(len + namelen + 1); -+ -+ error = -EFAULT; -+ if (copy_to_user(cb->ubuf, statp, len)) -+ goto out_err; -+ if (copy_to_user(cb->ubuf + len, name, namelen)) -+ goto out_err; -+ if (put_user(0, cb->ubuf + len + namelen)) -+ goto out_err; -+ -+ cb->lastbuf = (dm_stat_t __user *)cb->ubuf; -+ cb->spaceleft -= statp->_link; -+ cb->nwritten += statp->_link; -+ cb->ubuf += statp->_link; -+ -+ return 0; -+ -+ out_err: -+ cb->error = error; -+ return error; -+} -+ -+/* Returns negative errors to DMAPI */ -+STATIC int -+xfs_dm_get_dirattrs_rvp( -+ struct inode *inode, -+ dm_right_t right, -+ u_int mask, -+ dm_attrloc_t __user *locp, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp, -+ int *rvp) -+{ -+ xfs_inode_t *dp = XFS_I(inode); -+ xfs_mount_t *mp = dp->i_mount; -+ dm_readdir_cb_t *cb; -+ dm_attrloc_t loc; -+ int error; -+ -+ if (right < DM_RIGHT_SHARED) -+ return -EACCES; -+ -+ /* -+ * Make sure that the buffer is properly aligned. -+ */ -+ if (((unsigned long)bufp & (DM_STAT_ALIGN - 1)) != 0) -+ return -EFAULT; -+ -+ if (mask & ~(DM_AT_HANDLE|DM_AT_EMASK|DM_AT_PMANR|DM_AT_PATTR| -+ DM_AT_DTIME|DM_AT_CFLAG|DM_AT_STAT)) -+ return -EINVAL; -+ -+ if (!S_ISDIR(inode->i_mode)) -+ return -EINVAL; -+ -+ /* -+ * bufp should be able to fit at least one dm_stat entry including -+ * dt_handle and full size MAXNAMLEN dt_compname. -+ */ -+ if (buflen < dm_stat_size(MAXNAMLEN)) -+ return -ENOMEM; -+ -+ if (copy_from_user(&loc, locp, sizeof(loc))) -+ return -EFAULT; -+ -+ cb = kzalloc(sizeof(*cb) + dm_stat_size(MAXNAMLEN), GFP_KERNEL); -+ if (!cb) -+ return -ENOMEM; -+ -+ cb->mp = mp; -+ cb->spaceleft = buflen; -+ cb->ubuf = bufp; -+ -+ mutex_lock(&inode->i_mutex); -+ error = -ENOENT; -+ if (!IS_DEADDIR(inode)) { -+ error = -xfs_readdir(dp, cb, dp->i_size, -+ (xfs_off_t *)&loc, dm_filldir); -+ } -+ mutex_unlock(&inode->i_mutex); -+ -+ if (error) -+ goto out_kfree; -+ if (cb->error) { -+ error = cb->error; -+ goto out_kfree; -+ } -+ -+ error = -EFAULT; -+ if (cb->lastbuf && put_user(0, &cb->lastbuf->_link)) -+ goto out_kfree; -+ if (put_user(cb->nwritten, rlenp)) -+ goto out_kfree; -+ if (copy_to_user(locp, &loc, sizeof(loc))) -+ goto out_kfree; -+ -+ if (cb->nwritten) -+ *rvp = 1; -+ else -+ *rvp = 0; -+ error = 0; -+ -+ out_kfree: -+ kfree(cb); -+ return error; -+} -+ -+STATIC int -+xfs_dm_get_dmattr( -+ struct inode *inode, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ dm_dkattrname_t name; -+ char *value; -+ int value_len; -+ int alloc_size; -+ int error; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ if ((error = xfs_copyin_attrname(attrnamep, &name)) != 0) -+ return(-error); /* Return negative error to DMAPI */ -+ -+ /* Allocate a buffer to receive the attribute's value. We allocate -+ at least one byte even if the caller specified a buflen of zero. -+ (A buflen of zero is considered valid.) -+ -+ Allocating a minimum of XFS_BUG_KLUDGE bytes temporarily works -+ around a bug within XFS in which in-inode attribute values are not -+ checked to see if they will fit in the buffer before they are -+ copied. Since no in-core attribute value can be larger than 256 -+ bytes (an 8-bit size field), we allocate that minimum size here to -+ prevent buffer overrun in both the kernel's and user's buffers. -+ */ -+ -+ alloc_size = buflen; -+ if (alloc_size < XFS_BUG_KLUDGE) -+ alloc_size = XFS_BUG_KLUDGE; -+ if (alloc_size > ATTR_MAX_VALUELEN) -+ alloc_size = ATTR_MAX_VALUELEN; -+ value = kmem_alloc(alloc_size, KM_SLEEP | KM_LARGE); -+ -+ /* Get the attribute's value. */ -+ -+ value_len = alloc_size; /* in/out parameter */ -+ -+ error = xfs_attr_get(XFS_I(inode), name.dan_chars, value, &value_len, -+ ATTR_ROOT); -+ DM_EA_XLATE_ERR(error); -+ -+ /* DMAPI requires an errno of ENOENT if an attribute does not exist, -+ so remap ENOATTR here. -+ */ -+ -+ if (error == ENOATTR) -+ error = ENOENT; -+ if (!error && value_len > buflen) -+ error = E2BIG; -+ if (!error && copy_to_user(bufp, value, value_len)) -+ error = EFAULT; -+ if (!error || error == E2BIG) { -+ if (put_user(value_len, rlenp)) -+ error = EFAULT; -+ } -+ -+ kmem_free(value); -+ return(-error); /* Return negative error to DMAPI */ -+} -+ -+STATIC int -+xfs_dm_get_eventlist( -+ struct inode *inode, -+ dm_right_t right, -+ u_int type, -+ u_int nelem, -+ dm_eventset_t *eventsetp, -+ u_int *nelemp) -+{ -+ int error; -+ xfs_inode_t *ip = XFS_I(inode); -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (type == DM_FSYS_OBJ) { -+ error = xfs_dm_fs_get_eventlist(ip->i_mount, right, nelem, -+ eventsetp, nelemp); -+ } else { -+ error = xfs_dm_f_get_eventlist(ip, right, nelem, -+ eventsetp, nelemp); -+ } -+ return(-error); /* Returns negative error to DMAPI */ -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_get_fileattr( -+ struct inode *inode, -+ dm_right_t right, -+ u_int mask, /* not used; always return everything */ -+ dm_stat_t __user *statp) -+{ -+ dm_stat_t stat; -+ xfs_inode_t *ip = XFS_I(inode); -+ xfs_mount_t *mp; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ /* Find the mount point. */ -+ -+ mp = ip->i_mount; -+ -+ xfs_ilock(ip, XFS_ILOCK_SHARED); -+ xfs_ip_to_stat(mp, ip->i_ino, ip, &stat); -+ xfs_iunlock(ip, XFS_ILOCK_SHARED); -+ -+ if (copy_to_user( statp, &stat, sizeof(stat))) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+/* We currently only support a maximum of one managed region per file, and -+ use the DM_EVENT_READ, DM_EVENT_WRITE, and DM_EVENT_TRUNCATE events in -+ the file's dm_eventset_t event mask to implement the DM_REGION_READ, -+ DM_REGION_WRITE, and DM_REGION_TRUNCATE flags for that single region. -+*/ -+ -+STATIC int -+xfs_dm_get_region( -+ struct inode *inode, -+ dm_right_t right, -+ u_int nelem, -+ dm_region_t __user *regbufp, -+ u_int __user *nelemp) -+{ -+ dm_eventset_t evmask; -+ dm_region_t region; -+ xfs_inode_t *ip = XFS_I(inode); -+ u_int elem; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ evmask = ip->i_d.di_dmevmask; /* read the mask "atomically" */ -+ -+ /* Get the file's current managed region flags out of the -+ dm_eventset_t mask and use them to build a managed region that -+ covers the entire file, i.e. set rg_offset and rg_size to zero. -+ */ -+ -+ memset((char *)®ion, 0, sizeof(region)); -+ -+ if (evmask & (1 << DM_EVENT_READ)) -+ region.rg_flags |= DM_REGION_READ; -+ if (evmask & (1 << DM_EVENT_WRITE)) -+ region.rg_flags |= DM_REGION_WRITE; -+ if (evmask & (1 << DM_EVENT_TRUNCATE)) -+ region.rg_flags |= DM_REGION_TRUNCATE; -+ -+ elem = (region.rg_flags ? 1 : 0); -+ -+ if (copy_to_user( nelemp, &elem, sizeof(elem))) -+ return(-EFAULT); -+ if (elem > nelem) -+ return(-E2BIG); -+ if (elem && copy_to_user(regbufp, ®ion, sizeof(region))) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+STATIC int -+xfs_dm_getall_dmattr( -+ struct inode *inode, -+ dm_right_t right, -+ size_t buflen, -+ void __user *bufp, -+ size_t __user *rlenp) -+{ -+ attrlist_cursor_kern_t cursor; -+ attrlist_t *attrlist; -+ dm_attrlist_t __user *ulist; -+ int *last_link; -+ int alignment; -+ int total_size; -+ int list_size = 8192; /* should be big enough */ -+ int error; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ /* Verify that the user gave us a buffer that is 4-byte aligned, lock -+ it down, and work directly within that buffer. As a side-effect, -+ values of buflen < sizeof(int) return EINVAL. -+ */ -+ -+ alignment = sizeof(int) - 1; -+ if ((((__psint_t)bufp & alignment) != 0) || -+ !access_ok(VERIFY_WRITE, bufp, buflen)) { -+ return(-EFAULT); -+ } -+ buflen &= ~alignment; /* round down the alignment */ -+ -+ /* Initialize all the structures and variables for the main loop. */ -+ -+ memset(&cursor, 0, sizeof(cursor)); -+ attrlist = (attrlist_t *)kmem_alloc(list_size, KM_SLEEP); -+ total_size = 0; -+ ulist = (dm_attrlist_t *)bufp; -+ last_link = NULL; -+ -+ /* Use vop_attr_list to get the names of DMAPI attributes, and use -+ vop_attr_get to get their values. There is a risk here that the -+ DMAPI attributes could change between the vop_attr_list and -+ vop_attr_get calls. If we can detect it, we return EIO to notify -+ the user. -+ */ -+ -+ do { -+ int i; -+ -+ /* Get a buffer full of attribute names. If there aren't any -+ more or if we encounter an error, then finish up. -+ */ -+ -+ error = xfs_attr_list(XFS_I(inode), (char *)attrlist, list_size, -+ ATTR_ROOT, &cursor); -+ DM_EA_XLATE_ERR(error); -+ -+ if (error || attrlist->al_count == 0) -+ break; -+ -+ for (i = 0; i < attrlist->al_count; i++) { -+ attrlist_ent_t *entry; -+ char *user_name; -+ int size_needed; -+ int value_len; -+ -+ /* Skip over all non-DMAPI attributes. If the -+ attribute name is too long, we assume it is -+ non-DMAPI even if it starts with the correct -+ prefix. -+ */ -+ -+ entry = ATTR_ENTRY(attrlist, i); -+ if (strncmp(entry->a_name, dmattr_prefix, DMATTR_PREFIXLEN)) -+ continue; -+ user_name = &entry->a_name[DMATTR_PREFIXLEN]; -+ if (strlen(user_name) > DM_ATTR_NAME_SIZE) -+ continue; -+ -+ /* We have a valid DMAPI attribute to return. If it -+ won't fit in the user's buffer, we still need to -+ keep track of the number of bytes for the user's -+ next call. -+ */ -+ -+ -+ size_needed = sizeof(*ulist) + entry->a_valuelen; -+ size_needed = (size_needed + alignment) & ~alignment; -+ -+ total_size += size_needed; -+ if (total_size > buflen) -+ continue; -+ -+ /* Start by filling in all the fields in the -+ dm_attrlist_t structure. -+ */ -+ -+ strncpy((char *)ulist->al_name.an_chars, user_name, -+ DM_ATTR_NAME_SIZE); -+ ulist->al_data.vd_offset = sizeof(*ulist); -+ ulist->al_data.vd_length = entry->a_valuelen; -+ ulist->_link = size_needed; -+ last_link = &ulist->_link; -+ -+ /* Next read the attribute's value into its correct -+ location after the dm_attrlist structure. Any sort -+ of error indicates that the data is moving under us, -+ so we return EIO to let the user know. -+ */ -+ -+ value_len = entry->a_valuelen; -+ -+ error = xfs_attr_get(XFS_I(inode), entry->a_name, -+ (void *)(ulist + 1), &value_len, -+ ATTR_ROOT); -+ DM_EA_XLATE_ERR(error); -+ -+ if (error || value_len != entry->a_valuelen) { -+ error = EIO; -+ break; -+ } -+ -+ ulist = (dm_attrlist_t *)((char *)ulist + ulist->_link); -+ } -+ } while (!error && attrlist->al_more); -+ if (last_link) -+ *last_link = 0; -+ -+ if (!error && total_size > buflen) -+ error = E2BIG; -+ if (!error || error == E2BIG) { -+ if (put_user(total_size, rlenp)) -+ error = EFAULT; -+ } -+ -+ kmem_free(attrlist); -+ return(-error); /* Return negative error to DMAPI */ -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_getall_inherit( -+ struct inode *inode, -+ dm_right_t right, -+ u_int nelem, -+ dm_inherit_t __user *inheritbufp, -+ u_int __user *nelemp) -+{ -+ return(-ENOSYS); /* Return negative error to DMAPI */ -+} -+ -+ -+/* Initialize location pointer for subsequent dm_get_dirattrs, -+ dm_get_bulkattr, and dm_get_bulkall calls. The same initialization must -+ work for inode-based routines (dm_get_dirattrs) and filesystem-based -+ routines (dm_get_bulkattr and dm_get_bulkall). Filesystem-based functions -+ call this routine using the filesystem's root inode. -+*/ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_init_attrloc( -+ struct inode *inode, -+ dm_right_t right, -+ dm_attrloc_t __user *locp) -+{ -+ dm_attrloc_t loc = 0; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ if (copy_to_user( locp, &loc, sizeof(loc))) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_mkdir_by_handle( -+ struct inode *inode, -+ dm_right_t right, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname) -+{ -+ return(-ENOSYS); /* Return negative error to DMAPI */ -+} -+ -+ -+/* -+ * Probe and Punch -+ * -+ * Hole punching alignment is based on the underlying device base -+ * allocation size. Because it is not defined in the DMAPI spec, we -+ * can align how we choose here. Round inwards (offset up and length -+ * down) to the block, extent or page size whichever is bigger. Our -+ * DMAPI implementation rounds the hole geometry strictly inwards. If -+ * this is not possible, return EINVAL for both for xfs_dm_probe_hole -+ * and xfs_dm_punch_hole which differs from the DMAPI spec. Note that -+ * length = 0 is special - it means "punch to EOF" and at that point -+ * we treat the punch as remove everything past offset (including -+ * preallocation past EOF). -+ */ -+ -+STATIC int -+xfs_dm_round_hole( -+ dm_off_t offset, -+ dm_size_t length, -+ dm_size_t align, -+ xfs_fsize_t filesize, -+ dm_off_t *roff, -+ dm_size_t *rlen) -+{ -+ -+ dm_off_t off = offset; -+ dm_size_t len = length; -+ -+ /* Try to round offset up to the nearest boundary */ -+ *roff = roundup_64(off, align); -+ if ((*roff >= filesize) || (len && (len < align))) -+ return -EINVAL; -+ -+ if ((len == 0) || ((off + len) == filesize)) { -+ /* punch to EOF */ -+ *rlen = 0; -+ } else { -+ /* Round length down to the nearest boundary. */ -+ ASSERT(len >= align); -+ ASSERT(align > (*roff - off)); -+ len -= *roff - off; -+ *rlen = len - do_mod(len, align); -+ if (*rlen == 0) -+ return -EINVAL; /* requested length is too small */ -+ } -+#ifdef CONFIG_DMAPI_DEBUG -+ printk("xfs_dm_round_hole: off %lu, len %ld, align %lu, " -+ "filesize %llu, roff %ld, rlen %ld\n", -+ offset, length, align, filesize, *roff, *rlen); -+#endif -+ return 0; /* hole geometry successfully rounded */ -+} -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_probe_hole( -+ struct inode *inode, -+ dm_right_t right, -+ dm_off_t off, -+ dm_size_t len, -+ dm_off_t __user *roffp, -+ dm_size_t __user *rlenp) -+{ -+ dm_off_t roff; -+ dm_size_t rlen; -+ xfs_inode_t *ip = XFS_I(inode); -+ xfs_mount_t *mp; -+ uint lock_flags; -+ xfs_fsize_t realsize; -+ dm_size_t align; -+ int error; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return -EACCES; -+ -+ if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) -+ return -EINVAL; -+ -+ mp = ip->i_mount; -+ lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; -+ xfs_ilock(ip, lock_flags); -+ realsize = ip->i_size; -+ xfs_iunlock(ip, lock_flags); -+ -+ if ((off + len) > realsize) -+ return -E2BIG; -+ -+ align = 1 << mp->m_sb.sb_blocklog; -+ -+ error = xfs_dm_round_hole(off, len, align, realsize, &roff, &rlen); -+ if (error) -+ return error; -+ -+ if (copy_to_user( roffp, &roff, sizeof(roff))) -+ return -EFAULT; -+ if (copy_to_user( rlenp, &rlen, sizeof(rlen))) -+ return -EFAULT; -+ return(0); -+} -+ -+ -+STATIC int -+xfs_dm_punch_hole( -+ struct inode *inode, -+ dm_right_t right, -+ dm_off_t off, -+ dm_size_t len) -+{ -+ xfs_flock64_t bf; -+ int error = 0; -+ xfs_inode_t *ip = XFS_I(inode); -+ xfs_mount_t *mp; -+ dm_size_t align; -+ xfs_fsize_t realsize; -+ dm_off_t roff; -+ dm_size_t rlen; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_EXCL) -+ return -EACCES; -+ -+ /* Make sure there are no leases. */ -+ error = break_lease(inode, FMODE_WRITE); -+ if (error) -+ return -EBUSY; -+ -+ error = get_write_access(inode); -+ if (error) -+ return -EBUSY; -+ -+ mp = ip->i_mount; -+ -+ down_rw_sems(inode, DM_SEM_FLAG_WR); -+ -+ xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); -+ realsize = ip->i_size; -+ xfs_iunlock(ip, XFS_ILOCK_EXCL); -+ align = xfs_get_extsz_hint(ip); -+ if (align == 0) -+ align = 1; -+ -+ align <<= mp->m_sb.sb_blocklog; -+ -+ if ((off + len) > realsize) { -+ xfs_iunlock(ip, XFS_IOLOCK_EXCL); -+ error = -E2BIG; -+ goto up_and_out; -+ } -+ -+ if ((off + len) == realsize) -+ len = 0; -+ -+ error = xfs_dm_round_hole(off, len, align, realsize, &roff, &rlen); -+ if (error || (off != roff) || (len != rlen)) { -+ xfs_iunlock(ip, XFS_IOLOCK_EXCL); -+ error = -EINVAL; -+ goto up_and_out; -+ } -+ -+ bf.l_type = 0; -+ bf.l_whence = 0; -+ bf.l_start = (xfs_off_t)off; -+ if (len) { -+ bf.l_len = len; -+ } -+ else { -+ /* -+ * When we are punching to EOF, we have to make sure we punch -+ * the last partial block that contains EOF. Round up -+ * the length to make sure we punch the block and not just -+ * zero it. -+ */ -+ bf.l_len = roundup_64((realsize - off), mp->m_sb.sb_blocksize); -+ } -+ -+#ifdef CONFIG_DMAPI_DEBUG -+ printk("xfs_dm_punch_hole: off %lu, len %ld, align %lu\n", -+ off, len, align); -+#endif -+ -+ error = xfs_change_file_space(ip, XFS_IOC_UNRESVSP, &bf, -+ (xfs_off_t)off, XFS_ATTR_DMI|XFS_ATTR_NOLOCK); -+ -+ /* -+ * if punching to end of file, kill any blocks past EOF that -+ * may have been (speculatively) preallocated. No point in -+ * leaving them around if we are migrating the file.... -+ */ -+ if (!error && (len == 0)) { -+ error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_NOLOCK); -+ } -+ -+ /* -+ * negate the error for return here as core XFS functions return -+ * positive error numbers -+ */ -+ if (error) -+ error = -error; -+ -+ /* Let threads in send_data_event know we punched the file. */ -+ ip->i_d.di_dmstate++; -+ xfs_iunlock(ip, XFS_IOLOCK_EXCL); -+ -+up_and_out: -+ up_rw_sems(inode, DM_SEM_FLAG_WR); -+ put_write_access(inode); -+ -+ return error; -+} -+ -+ -+STATIC int -+xfs_dm_read_invis_rvp( -+ struct inode *inode, -+ dm_right_t right, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp) -+{ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_SHARED) -+ return(-EACCES); -+ -+ return(-xfs_dm_rdwr(inode, 0, FMODE_READ, off, len, bufp, rvp)); -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_release_right( -+ struct inode *inode, -+ dm_right_t right, -+ u_int type) /* DM_FSYS_OBJ or zero */ -+{ -+#ifdef DEBUG_RIGHTS -+ char buffer[sizeof(dm_handle_t) * 2 + 1]; -+ -+ if (!xfs_vp_to_hexhandle(inode, type, buffer)) { -+ printf("dm_release_right: old %d type %d handle %s\n", -+ right, type, buffer); -+ } else { -+ printf("dm_release_right: old %d type %d handle " -+ " \n", right, type); -+ } -+#endif /* DEBUG_RIGHTS */ -+ return(0); -+} -+ -+ -+STATIC int -+xfs_dm_remove_dmattr( -+ struct inode *inode, -+ dm_right_t right, -+ int setdtime, -+ dm_attrname_t __user *attrnamep) -+{ -+ dm_dkattrname_t name; -+ int error; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_EXCL) -+ return(-EACCES); -+ -+ if ((error = xfs_copyin_attrname(attrnamep, &name)) != 0) -+ return(-error); /* Return negative error to DMAPI */ -+ -+ /* Remove the attribute from the object. */ -+ -+ error = xfs_attr_remove(XFS_I(inode), name.dan_chars, setdtime ? -+ ATTR_ROOT : (ATTR_ROOT|ATTR_KERNOTIME)); -+ DM_EA_XLATE_ERR(error); -+ -+ if (error == ENOATTR) -+ error = ENOENT; -+ return(-error); /* Return negative error to DMAPI */ -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_request_right( -+ struct inode *inode, -+ dm_right_t right, -+ u_int type, /* DM_FSYS_OBJ or zero */ -+ u_int flags, -+ dm_right_t newright) -+{ -+#ifdef DEBUG_RIGHTS -+ char buffer[sizeof(dm_handle_t) * 2 + 1]; -+ -+ if (!xfs_vp_to_hexhandle(inode, type, buffer)) { -+ printf("dm_request_right: old %d new %d type %d flags 0x%x " -+ "handle %s\n", right, newright, type, flags, buffer); -+ } else { -+ printf("dm_request_right: old %d new %d type %d flags 0x%x " -+ "handle \n", right, newright, type, flags); -+ } -+#endif /* DEBUG_RIGHTS */ -+ return(0); -+} -+ -+ -+STATIC int -+xfs_dm_set_dmattr( -+ struct inode *inode, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep, -+ int setdtime, -+ size_t buflen, -+ void __user *bufp) -+{ -+ dm_dkattrname_t name; -+ char *value; -+ int alloc_size; -+ int error; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_EXCL) -+ return(-EACCES); -+ -+ if ((error = xfs_copyin_attrname(attrnamep, &name)) != 0) -+ return(-error); /* Return negative error to DMAPI */ -+ if (buflen > ATTR_MAX_VALUELEN) -+ return(-E2BIG); -+ -+ /* Copy in the attribute's value and store the pair in -+ the object. We allocate a buffer of at least one byte even if the -+ caller specified a buflen of zero. (A buflen of zero is considered -+ valid.) -+ */ -+ -+ alloc_size = (buflen == 0) ? 1 : buflen; -+ value = kmem_alloc(alloc_size, KM_SLEEP); -+ if (copy_from_user( value, bufp, buflen)) { -+ error = EFAULT; -+ } else { -+ error = xfs_attr_set(XFS_I(inode), name.dan_chars, value, buflen, -+ setdtime ? ATTR_ROOT : -+ (ATTR_ROOT|ATTR_KERNOTIME)); -+ DM_EA_XLATE_ERR(error); -+ } -+ kmem_free(value); -+ return(-error); /* Return negative error to DMAPI */ -+} -+ -+STATIC int -+xfs_dm_set_eventlist( -+ struct inode *inode, -+ dm_right_t right, -+ u_int type, -+ dm_eventset_t *eventsetp, /* in kernel space! */ -+ u_int maxevent) -+{ -+ int error; -+ xfs_inode_t *ip = XFS_I(inode); -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (type == DM_FSYS_OBJ) { -+ error = xfs_dm_fs_set_eventlist(ip->i_mount, right, eventsetp, maxevent); -+ } else { -+ error = xfs_dm_f_set_eventlist(ip, right, eventsetp, maxevent); -+ } -+ return(-error); /* Return negative error to DMAPI */ -+} -+ -+ -+/* -+ * This turned out not XFS-specific, but leave it here with get_fileattr. -+ */ -+ -+STATIC int -+xfs_dm_set_fileattr( -+ struct inode *inode, -+ dm_right_t right, -+ u_int mask, -+ dm_fileattr_t __user *statp) -+{ -+ dm_fileattr_t stat; -+ struct iattr iattr; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_EXCL) -+ return(-EACCES); -+ -+ if (copy_from_user( &stat, statp, sizeof(stat))) -+ return(-EFAULT); -+ -+ iattr.ia_valid = 0; -+ -+ if (mask & DM_AT_MODE) { -+ iattr.ia_valid |= ATTR_MODE; -+ iattr.ia_mode = stat.fa_mode; -+ } -+ if (mask & DM_AT_UID) { -+ iattr.ia_valid |= ATTR_UID; -+ iattr.ia_uid = stat.fa_uid; -+ } -+ if (mask & DM_AT_GID) { -+ iattr.ia_valid |= ATTR_GID; -+ iattr.ia_gid = stat.fa_gid; -+ } -+ if (mask & DM_AT_ATIME) { -+ iattr.ia_valid |= ATTR_ATIME; -+ iattr.ia_atime.tv_sec = stat.fa_atime; -+ iattr.ia_atime.tv_nsec = 0; -+ inode->i_atime.tv_sec = stat.fa_atime; -+ } -+ if (mask & DM_AT_MTIME) { -+ iattr.ia_valid |= ATTR_MTIME; -+ iattr.ia_mtime.tv_sec = stat.fa_mtime; -+ iattr.ia_mtime.tv_nsec = 0; -+ } -+ if (mask & DM_AT_CTIME) { -+ iattr.ia_valid |= ATTR_CTIME; -+ iattr.ia_ctime.tv_sec = stat.fa_ctime; -+ iattr.ia_ctime.tv_nsec = 0; -+ } -+ -+ /* -+ * DM_AT_DTIME only takes effect if DM_AT_CTIME is not specified. We -+ * overload ctime to also act as dtime, i.e. DM_CONFIG_DTIME_OVERLOAD. -+ */ -+ if ((mask & DM_AT_DTIME) && !(mask & DM_AT_CTIME)) { -+ iattr.ia_valid |= ATTR_CTIME; -+ iattr.ia_ctime.tv_sec = stat.fa_dtime; -+ iattr.ia_ctime.tv_nsec = 0; -+ } -+ if (mask & DM_AT_SIZE) { -+ iattr.ia_valid |= ATTR_SIZE; -+ iattr.ia_size = stat.fa_size; -+ } -+ -+ return -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_DMI); -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_set_inherit( -+ struct inode *inode, -+ dm_right_t right, -+ dm_attrname_t __user *attrnamep, -+ mode_t mode) -+{ -+ return(-ENOSYS); /* Return negative error to DMAPI */ -+} -+ -+ -+STATIC int -+xfs_dm_set_region( -+ struct inode *inode, -+ dm_right_t right, -+ u_int nelem, -+ dm_region_t __user *regbufp, -+ dm_boolean_t __user *exactflagp) -+{ -+ xfs_inode_t *ip = XFS_I(inode); -+ xfs_trans_t *tp; -+ xfs_mount_t *mp; -+ dm_region_t region; -+ dm_eventset_t new_mask; -+ dm_eventset_t mr_mask; -+ int error; -+ u_int exactflag; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_EXCL) -+ return(-EACCES); -+ -+ /* If the caller gave us more than one dm_region_t structure, complain. -+ (He has to call dm_get_config() to find out what our limit is.) -+ */ -+ -+ if (nelem > 1) -+ return(-E2BIG); -+ -+ /* If the user provided a dm_region_t structure, then copy it in, -+ validate it, and convert its flags to the corresponding bits in a -+ dm_set_eventlist() event mask. A call with zero regions is -+ equivalent to clearing all region flags. -+ */ -+ -+ new_mask = 0; -+ if (nelem == 1) { -+ if (copy_from_user( ®ion, regbufp, sizeof(region))) -+ return(-EFAULT); -+ -+ if (region.rg_flags & ~(DM_REGION_READ|DM_REGION_WRITE|DM_REGION_TRUNCATE)) -+ return(-EINVAL); -+ if (region.rg_flags & DM_REGION_READ) -+ new_mask |= 1 << DM_EVENT_READ; -+ if (region.rg_flags & DM_REGION_WRITE) -+ new_mask |= 1 << DM_EVENT_WRITE; -+ if (region.rg_flags & DM_REGION_TRUNCATE) -+ new_mask |= 1 << DM_EVENT_TRUNCATE; -+ } -+ mr_mask = (1 << DM_EVENT_READ) | (1 << DM_EVENT_WRITE) | (1 << DM_EVENT_TRUNCATE); -+ -+ /* Get the file's existing event mask, clear the old managed region -+ bits, add in the new ones, and update the file's mask. -+ */ -+ -+ if (new_mask & prohibited_mr_events(inode->i_mapping)) { -+ /* If the change is simply to remove the READ -+ * bit, then that's always okay. Otherwise, it's busy. -+ */ -+ dm_eventset_t m1; -+ m1 = ip->i_d.di_dmevmask & ((1 << DM_EVENT_WRITE) | (1 << DM_EVENT_TRUNCATE)); -+ if (m1 != new_mask) { -+ return -EBUSY; -+ } -+ } -+ -+ mp = ip->i_mount; -+ tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); -+ error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0); -+ if (error) { -+ xfs_trans_cancel(tp, 0); -+ return(-error); /* Return negative error to DMAPI */ -+ } -+ xfs_ilock(ip, XFS_ILOCK_EXCL); -+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); -+ -+ ip->i_d.di_dmevmask = (ip->i_d.di_dmevmask & ~mr_mask) | new_mask; -+ -+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); -+ igrab(inode); -+ xfs_trans_commit(tp, 0); -+ -+ /* Return the proper value for *exactflagp depending upon whether or not -+ we "changed" the user's managed region. In other words, if the user -+ specified a non-zero value for either rg_offset or rg_size, we -+ round each of those values back to zero. -+ */ -+ -+ if (nelem && (region.rg_offset || region.rg_size)) { -+ exactflag = DM_FALSE; /* user region was changed */ -+ } else { -+ exactflag = DM_TRUE; /* user region was unchanged */ -+ } -+ if (copy_to_user( exactflagp, &exactflag, sizeof(exactflag))) -+ return(-EFAULT); -+ return(0); -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_symlink_by_handle( -+ struct inode *inode, -+ dm_right_t right, -+ void __user *hanp, -+ size_t hlen, -+ char __user *cname, -+ char __user *path) -+{ -+ return(-ENOSYS); /* Return negative errors to DMAPI */ -+} -+ -+ -+/* -+ * xfs_dm_sync_by_handle needs to do the same thing as sys_fsync() -+ */ -+STATIC int -+xfs_dm_sync_by_handle( -+ struct inode *inode, -+ dm_right_t right) -+{ -+ int err, ret; -+ xfs_inode_t *ip = XFS_I(inode); -+ -+ /* Returns negative errors to DMAPI */ -+ if (right < DM_RIGHT_EXCL) -+ return(-EACCES); -+ -+ /* We need to protect against concurrent writers.. */ -+ ret = filemap_fdatawrite(inode->i_mapping); -+ down_rw_sems(inode, DM_FLAGS_IMUX); -+ err = -xfs_fsync(ip); -+ if (!ret) -+ ret = err; -+ up_rw_sems(inode, DM_FLAGS_IMUX); -+ err = filemap_fdatawait(inode->i_mapping); -+ if (!ret) -+ ret = err; -+ xfs_iflags_clear(ip, XFS_ITRUNCATED); -+ return ret; -+} -+ -+ -+/* ARGSUSED */ -+STATIC int -+xfs_dm_upgrade_right( -+ struct inode *inode, -+ dm_right_t right, -+ u_int type) /* DM_FSYS_OBJ or zero */ -+{ -+#ifdef DEBUG_RIGHTS -+ char buffer[sizeof(dm_handle_t) * 2 + 1]; -+ -+ if (!xfs_vp_to_hexhandle(inode, type, buffer)) { -+ printf("dm_upgrade_right: old %d new %d type %d handle %s\n", -+ right, DM_RIGHT_EXCL, type, buffer); -+ } else { -+ printf("dm_upgrade_right: old %d new %d type %d handle " -+ "\n", right, DM_RIGHT_EXCL, type); -+ } -+#endif /* DEBUG_RIGHTS */ -+ return(0); -+} -+ -+ -+STATIC int -+xfs_dm_write_invis_rvp( -+ struct inode *inode, -+ dm_right_t right, -+ int flags, -+ dm_off_t off, -+ dm_size_t len, -+ void __user *bufp, -+ int *rvp) -+{ -+ int fflag = 0; -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (right < DM_RIGHT_EXCL) -+ return(-EACCES); -+ -+ if (flags & DM_WRITE_SYNC) -+ fflag |= O_SYNC; -+ return(-xfs_dm_rdwr(inode, fflag, FMODE_WRITE, off, len, bufp, rvp)); -+} -+ -+ -+STATIC void -+xfs_dm_obj_ref_hold( -+ struct inode *inode) -+{ -+ igrab(inode); -+} -+ -+ -+static fsys_function_vector_t xfs_fsys_vector[DM_FSYS_MAX]; -+ -+ -+STATIC int -+xfs_dm_get_dmapiops( -+ struct super_block *sb, -+ void *addr) -+{ -+ static int initialized = 0; -+ dm_fcntl_vector_t *vecrq; -+ fsys_function_vector_t *vecp; -+ int i = 0; -+ -+ vecrq = (dm_fcntl_vector_t *)addr; -+ vecrq->count = -+ sizeof(xfs_fsys_vector) / sizeof(xfs_fsys_vector[0]); -+ vecrq->vecp = xfs_fsys_vector; -+ if (initialized) -+ return(0); -+ vecrq->code_level = DM_CLVL_XOPEN; -+ vecp = xfs_fsys_vector; -+ -+ vecp[i].func_no = DM_FSYS_CLEAR_INHERIT; -+ vecp[i++].u_fc.clear_inherit = xfs_dm_clear_inherit; -+ vecp[i].func_no = DM_FSYS_CREATE_BY_HANDLE; -+ vecp[i++].u_fc.create_by_handle = xfs_dm_create_by_handle; -+ vecp[i].func_no = DM_FSYS_DOWNGRADE_RIGHT; -+ vecp[i++].u_fc.downgrade_right = xfs_dm_downgrade_right; -+ vecp[i].func_no = DM_FSYS_GET_ALLOCINFO_RVP; -+ vecp[i++].u_fc.get_allocinfo_rvp = xfs_dm_get_allocinfo_rvp; -+ vecp[i].func_no = DM_FSYS_GET_BULKALL_RVP; -+ vecp[i++].u_fc.get_bulkall_rvp = xfs_dm_get_bulkall_rvp; -+ vecp[i].func_no = DM_FSYS_GET_BULKATTR_RVP; -+ vecp[i++].u_fc.get_bulkattr_rvp = xfs_dm_get_bulkattr_rvp; -+ vecp[i].func_no = DM_FSYS_GET_CONFIG; -+ vecp[i++].u_fc.get_config = xfs_dm_get_config; -+ vecp[i].func_no = DM_FSYS_GET_CONFIG_EVENTS; -+ vecp[i++].u_fc.get_config_events = xfs_dm_get_config_events; -+ vecp[i].func_no = DM_FSYS_GET_DESTROY_DMATTR; -+ vecp[i++].u_fc.get_destroy_dmattr = xfs_dm_get_destroy_dmattr; -+ vecp[i].func_no = DM_FSYS_GET_DIOINFO; -+ vecp[i++].u_fc.get_dioinfo = xfs_dm_get_dioinfo; -+ vecp[i].func_no = DM_FSYS_GET_DIRATTRS_RVP; -+ vecp[i++].u_fc.get_dirattrs_rvp = xfs_dm_get_dirattrs_rvp; -+ vecp[i].func_no = DM_FSYS_GET_DMATTR; -+ vecp[i++].u_fc.get_dmattr = xfs_dm_get_dmattr; -+ vecp[i].func_no = DM_FSYS_GET_EVENTLIST; -+ vecp[i++].u_fc.get_eventlist = xfs_dm_get_eventlist; -+ vecp[i].func_no = DM_FSYS_GET_FILEATTR; -+ vecp[i++].u_fc.get_fileattr = xfs_dm_get_fileattr; -+ vecp[i].func_no = DM_FSYS_GET_REGION; -+ vecp[i++].u_fc.get_region = xfs_dm_get_region; -+ vecp[i].func_no = DM_FSYS_GETALL_DMATTR; -+ vecp[i++].u_fc.getall_dmattr = xfs_dm_getall_dmattr; -+ vecp[i].func_no = DM_FSYS_GETALL_INHERIT; -+ vecp[i++].u_fc.getall_inherit = xfs_dm_getall_inherit; -+ vecp[i].func_no = DM_FSYS_INIT_ATTRLOC; -+ vecp[i++].u_fc.init_attrloc = xfs_dm_init_attrloc; -+ vecp[i].func_no = DM_FSYS_MKDIR_BY_HANDLE; -+ vecp[i++].u_fc.mkdir_by_handle = xfs_dm_mkdir_by_handle; -+ vecp[i].func_no = DM_FSYS_PROBE_HOLE; -+ vecp[i++].u_fc.probe_hole = xfs_dm_probe_hole; -+ vecp[i].func_no = DM_FSYS_PUNCH_HOLE; -+ vecp[i++].u_fc.punch_hole = xfs_dm_punch_hole; -+ vecp[i].func_no = DM_FSYS_READ_INVIS_RVP; -+ vecp[i++].u_fc.read_invis_rvp = xfs_dm_read_invis_rvp; -+ vecp[i].func_no = DM_FSYS_RELEASE_RIGHT; -+ vecp[i++].u_fc.release_right = xfs_dm_release_right; -+ vecp[i].func_no = DM_FSYS_REMOVE_DMATTR; -+ vecp[i++].u_fc.remove_dmattr = xfs_dm_remove_dmattr; -+ vecp[i].func_no = DM_FSYS_REQUEST_RIGHT; -+ vecp[i++].u_fc.request_right = xfs_dm_request_right; -+ vecp[i].func_no = DM_FSYS_SET_DMATTR; -+ vecp[i++].u_fc.set_dmattr = xfs_dm_set_dmattr; -+ vecp[i].func_no = DM_FSYS_SET_EVENTLIST; -+ vecp[i++].u_fc.set_eventlist = xfs_dm_set_eventlist; -+ vecp[i].func_no = DM_FSYS_SET_FILEATTR; -+ vecp[i++].u_fc.set_fileattr = xfs_dm_set_fileattr; -+ vecp[i].func_no = DM_FSYS_SET_INHERIT; -+ vecp[i++].u_fc.set_inherit = xfs_dm_set_inherit; -+ vecp[i].func_no = DM_FSYS_SET_REGION; -+ vecp[i++].u_fc.set_region = xfs_dm_set_region; -+ vecp[i].func_no = DM_FSYS_SYMLINK_BY_HANDLE; -+ vecp[i++].u_fc.symlink_by_handle = xfs_dm_symlink_by_handle; -+ vecp[i].func_no = DM_FSYS_SYNC_BY_HANDLE; -+ vecp[i++].u_fc.sync_by_handle = xfs_dm_sync_by_handle; -+ vecp[i].func_no = DM_FSYS_UPGRADE_RIGHT; -+ vecp[i++].u_fc.upgrade_right = xfs_dm_upgrade_right; -+ vecp[i].func_no = DM_FSYS_WRITE_INVIS_RVP; -+ vecp[i++].u_fc.write_invis_rvp = xfs_dm_write_invis_rvp; -+ vecp[i].func_no = DM_FSYS_OBJ_REF_HOLD; -+ vecp[i++].u_fc.obj_ref_hold = xfs_dm_obj_ref_hold; -+ -+ return(0); -+} -+ -+ -+/* xfs_dm_send_mmap_event - send events needed for memory mapping a file. -+ * -+ * This is a workaround called for files that are about to be -+ * mapped. DMAPI events are not being generated at a low enough level -+ * in the kernel for page reads/writes to generate the correct events. -+ * So for memory-mapped files we generate read or write events for the -+ * whole byte range being mapped. If the mmap call can never cause a -+ * write to the file, then only a read event is sent. -+ * -+ * Code elsewhere prevents adding managed regions to a file while it -+ * is still mapped. -+ */ -+ -+STATIC int -+xfs_dm_send_mmap_event( -+ struct vm_area_struct *vma, -+ unsigned int wantflag) -+{ -+ xfs_inode_t *ip; -+ int error = 0; -+ dm_eventtype_t max_event = DM_EVENT_READ; -+ xfs_fsize_t filesize; -+ xfs_off_t length, end_of_area, evsize, offset; -+ int iolock; -+ -+ if (!vma->vm_file) -+ return 0; -+ -+ ip = XFS_I(vma->vm_file->f_dentry->d_inode); -+ -+ if (!S_ISREG(vma->vm_file->f_dentry->d_inode->i_mode) || -+ !(ip->i_mount->m_flags & XFS_MOUNT_DMAPI)) -+ return 0; -+ -+ /* If they specifically asked for 'read', then give it to them. -+ * Otherwise, see if it's possible to give them 'write'. -+ */ -+ if( wantflag & VM_READ ){ -+ max_event = DM_EVENT_READ; -+ } -+ else if( ! (vma->vm_flags & VM_DENYWRITE) ) { -+ if((wantflag & VM_WRITE) || (vma->vm_flags & VM_WRITE)) -+ max_event = DM_EVENT_WRITE; -+ } -+ -+ if( (wantflag & VM_WRITE) && (max_event != DM_EVENT_WRITE) ){ -+ return -EACCES; -+ } -+ -+ /* Figure out how much of the file is being requested by the user. */ -+ offset = 0; /* beginning of file, for now */ -+ length = 0; /* whole file, for now */ -+ -+ filesize = ip->i_new_size; -+ if (filesize < ip->i_size) { -+ filesize = ip->i_size; -+ } -+ -+ /* Set first byte number beyond the map area. */ -+ -+ if (length) { -+ end_of_area = offset + length; -+ if (end_of_area > filesize) -+ end_of_area = filesize; -+ } else { -+ end_of_area = filesize; -+ } -+ -+ /* Set the real amount being mapped. */ -+ evsize = end_of_area - offset; -+ if (evsize < 0) -+ evsize = 0; -+ -+ if (max_event == DM_EVENT_READ) -+ iolock = XFS_IOLOCK_SHARED; -+ else -+ iolock = XFS_IOLOCK_EXCL; -+ -+ xfs_ilock(ip, iolock); -+ /* If write possible, try a DMAPI write event */ -+ if (max_event == DM_EVENT_WRITE && DM_EVENT_ENABLED(ip, max_event)) { -+ error = xfs_dm_send_data_event(max_event, ip, offset, -+ evsize, 0, &iolock); -+ goto out_unlock; -+ } -+ -+ /* Try a read event if max_event was != DM_EVENT_WRITE or if it -+ * was DM_EVENT_WRITE but the WRITE event was not enabled. -+ */ -+ if (DM_EVENT_ENABLED(ip, DM_EVENT_READ)) { -+ error = xfs_dm_send_data_event(DM_EVENT_READ, ip, offset, -+ evsize, 0, &iolock); -+ } -+out_unlock: -+ xfs_iunlock(ip, iolock); -+ return -error; -+} -+ -+ -+STATIC int -+xfs_dm_send_destroy_event( -+ xfs_inode_t *ip, -+ dm_right_t vp_right) /* always DM_RIGHT_NULL */ -+{ -+ /* Returns positive errors to XFS */ -+ return -dm_send_destroy_event(&ip->i_vnode, vp_right); -+} -+ -+ -+STATIC int -+xfs_dm_send_namesp_event( -+ dm_eventtype_t event, -+ struct xfs_mount *mp, -+ xfs_inode_t *ip1, -+ dm_right_t vp1_right, -+ xfs_inode_t *ip2, -+ dm_right_t vp2_right, -+ const char *name1, -+ const char *name2, -+ mode_t mode, -+ int retcode, -+ int flags) -+{ -+ /* Returns positive errors to XFS */ -+ return -dm_send_namesp_event(event, mp ? mp->m_super : NULL, -+ &ip1->i_vnode, vp1_right, -+ ip2 ? &ip2->i_vnode : NULL, vp2_right, -+ name1, name2, -+ mode, retcode, flags); -+} -+ -+STATIC int -+xfs_dm_send_mount_event( -+ struct xfs_mount *mp, -+ dm_right_t root_right, -+ char *mtpt, -+ char *fsname) -+{ -+ return dm_send_mount_event(mp->m_super, root_right, -+ NULL, DM_RIGHT_NULL, -+ mp->m_rootip ? VFS_I(mp->m_rootip) : NULL, -+ DM_RIGHT_NULL, mtpt, fsname); -+} -+ -+STATIC void -+xfs_dm_send_unmount_event( -+ struct xfs_mount *mp, -+ xfs_inode_t *ip, /* NULL if unmount successful */ -+ dm_right_t vfsp_right, -+ mode_t mode, -+ int retcode, /* errno, if unmount failed */ -+ int flags) -+{ -+ dm_send_unmount_event(mp->m_super, ip ? &ip->i_vnode : NULL, -+ vfsp_right, mode, retcode, flags); -+} -+ -+ -+/* -+ * Data migration operations accessed by the rest of XFS. -+ * When DMAPI support is configured in, this vector is used. -+ */ -+ -+xfs_dmops_t xfs_dmcore_xfs = { -+ .xfs_send_data = xfs_dm_send_data_event, -+ .xfs_send_mmap = xfs_dm_send_mmap_event, -+ .xfs_send_destroy = xfs_dm_send_destroy_event, -+ .xfs_send_namesp = xfs_dm_send_namesp_event, -+ .xfs_send_mount = xfs_dm_send_mount_event, -+ .xfs_send_unmount = xfs_dm_send_unmount_event, -+}; -+EXPORT_SYMBOL(xfs_dmcore_xfs); -+ -+STATIC int -+xfs_dm_fh_to_inode( -+ struct super_block *sb, -+ struct inode **inode, -+ dm_fid_t *dmfid) -+{ -+ xfs_mount_t *mp = XFS_M(sb); -+ xfs_inode_t *ip; -+ xfs_ino_t ino; -+ unsigned int igen; -+ int error; -+ -+ *inode = NULL; -+ -+ if (!dmfid->dm_fid_len) { -+ /* filesystem handle */ -+ *inode = igrab(&mp->m_rootip->i_vnode); -+ if (!*inode) -+ return -ENOENT; -+ return 0; -+ } -+ -+ if (dmfid->dm_fid_len != sizeof(*dmfid) - sizeof(dmfid->dm_fid_len)) -+ return -EINVAL; -+ -+ ino = dmfid->dm_fid_ino; -+ igen = dmfid->dm_fid_gen; -+ -+ /* fail requests for ino 0 gracefully. */ -+ if (ino == 0) -+ return -ESTALE; -+ -+ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); -+ if (error) -+ return -error; -+ if (!ip) -+ return -EIO; -+ -+ if (!ip->i_d.di_mode || ip->i_d.di_gen != igen) { -+ xfs_iput_new(ip, XFS_ILOCK_SHARED); -+ return -ENOENT; -+ } -+ -+ *inode = &ip->i_vnode; -+ xfs_iunlock(ip, XFS_ILOCK_SHARED); -+ return 0; -+} -+ -+STATIC int -+xfs_dm_inode_to_fh( -+ struct inode *inode, -+ dm_fid_t *dmfid, -+ dm_fsid_t *dmfsid) -+{ -+ xfs_inode_t *ip = XFS_I(inode); -+ -+ /* Returns negative errors to DMAPI */ -+ -+ if (ip->i_mount->m_fixedfsid == NULL) -+ return -EINVAL; -+ -+ dmfid->dm_fid_len = sizeof(dm_fid_t) - sizeof(dmfid->dm_fid_len); -+ dmfid->dm_fid_pad = 0; -+ /* -+ * use memcpy because the inode is a long long and there's no -+ * assurance that dmfid->dm_fid_ino is properly aligned. -+ */ -+ memcpy(&dmfid->dm_fid_ino, &ip->i_ino, sizeof(dmfid->dm_fid_ino)); -+ dmfid->dm_fid_gen = ip->i_d.di_gen; -+ -+ memcpy(dmfsid, ip->i_mount->m_fixedfsid, sizeof(*dmfsid)); -+ return 0; -+} -+ -+STATIC void -+xfs_dm_get_fsid( -+ struct super_block *sb, -+ dm_fsid_t *fsid) -+{ -+ memcpy(fsid, XFS_M(sb)->m_fixedfsid, sizeof(*fsid)); -+} -+ -+/* -+ * Filesystem operations accessed by the DMAPI core. -+ */ -+static struct filesystem_dmapi_operations xfs_dmapiops = { -+ .get_fsys_vector = xfs_dm_get_dmapiops, -+ .fh_to_inode = xfs_dm_fh_to_inode, -+ .inode_to_fh = xfs_dm_inode_to_fh, -+ .get_fsid = xfs_dm_get_fsid, -+}; -+ -+static int __init -+xfs_dm_init(void) -+{ -+ printk(KERN_INFO "SGI XFS Data Management API subsystem\n"); -+ -+ dmapi_register(&xfs_fs_type, &xfs_dmapiops); -+ return 0; -+} -+ -+static void __exit -+xfs_dm_exit(void) -+{ -+ dmapi_unregister(&xfs_fs_type); -+} -+ -+MODULE_AUTHOR("Silicon Graphics, Inc."); -+MODULE_DESCRIPTION("SGI XFS dmapi subsystem"); -+MODULE_LICENSE("GPL"); -+ -+module_init(xfs_dm_init); -+module_exit(xfs_dm_exit); ---- /dev/null -+++ b/fs/xfs/dmapi/xfs_dm.h -@@ -0,0 +1,23 @@ -+/* -+ * Copyright (c) 2006 Silicon Graphics, Inc. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+#ifndef __XFS_DM_H__ -+#define __XFS_DM_H__ -+ -+extern struct file_system_type xfs_fs_type; -+ -+#endif /* __XFS_DM_H__ */ ---- a/fs/xfs/linux-2.6/xfs_file.c -+++ b/fs/xfs/linux-2.6/xfs_file.c -@@ -47,6 +47,9 @@ - #include - - static const struct vm_operations_struct xfs_file_vm_ops; -+#ifdef HAVE_DMAPI -+static struct vm_operations_struct xfs_dmapi_file_vm_ops; -+#endif - - /* - * xfs_iozero -@@ -938,6 +941,23 @@ xfs_file_release( - return -xfs_release(XFS_I(inode)); - } - -+#ifdef HAVE_DMAPI -+STATIC int -+xfs_vm_fault( -+ struct vm_area_struct *vma, -+ struct vm_fault *vmf) -+{ -+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode; -+ struct xfs_mount *mp = XFS_M(inode->i_sb); -+ -+ ASSERT_ALWAYS(mp->m_flags & XFS_MOUNT_DMAPI); -+ -+ if (XFS_SEND_MMAP(mp, vma, 0)) -+ return VM_FAULT_SIGBUS; -+ return filemap_fault(vma, vmf); -+} -+#endif /* HAVE_DMAPI */ -+ - STATIC int - xfs_file_readdir( - struct file *filp, -@@ -978,10 +998,56 @@ xfs_file_mmap( - vma->vm_ops = &xfs_file_vm_ops; - vma->vm_flags |= VM_CAN_NONLINEAR; - -+#ifdef HAVE_DMAPI -+ if (XFS_M(filp->f_path.dentry->d_inode->i_sb)->m_flags & XFS_MOUNT_DMAPI) -+ vma->vm_ops = &xfs_dmapi_file_vm_ops; -+#endif /* HAVE_DMAPI */ -+ - file_accessed(filp); - return 0; - } - -+#ifdef HAVE_DMAPI -+#ifdef HAVE_VMOP_MPROTECT -+STATIC int -+xfs_vm_mprotect( -+ struct vm_area_struct *vma, -+ unsigned int newflags) -+{ -+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode; -+ struct xfs_mount *mp = XFS_M(inode->i_sb); -+ int error = 0; -+ -+ if (mp->m_flags & XFS_MOUNT_DMAPI) { -+ if ((vma->vm_flags & VM_MAYSHARE) && -+ (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) -+ error = XFS_SEND_MMAP(mp, vma, VM_WRITE); -+ } -+ return error; -+} -+#endif /* HAVE_VMOP_MPROTECT */ -+#endif /* HAVE_DMAPI */ -+ -+#ifdef HAVE_FOP_OPEN_EXEC -+/* If the user is attempting to execute a file that is offline then -+ * we have to trigger a DMAPI READ event before the file is marked as busy -+ * otherwise the invisible I/O will not be able to write to the file to bring -+ * it back online. -+ */ -+STATIC int -+xfs_file_open_exec( -+ struct inode *inode) -+{ -+ struct xfs_mount *mp = XFS_M(inode->i_sb); -+ struct xfs_inode *ip = XFS_I(inode); -+ -+ if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI) && -+ DM_EVENT_ENABLED(ip, DM_EVENT_READ)) -+ return -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL); -+ return 0; -+} -+#endif /* HAVE_FOP_OPEN_EXEC */ -+ - /* - * mmap()d file has taken write protection fault and is being made - * writable. We can set the page state up correctly for a writable -@@ -1033,3 +1099,13 @@ static const struct vm_operations_struct - .fault = filemap_fault, - .page_mkwrite = xfs_vm_page_mkwrite, - }; -+ -+#ifdef HAVE_DMAPI -+static struct vm_operations_struct xfs_dmapi_file_vm_ops = { -+ .fault = xfs_vm_fault, -+ .page_mkwrite = xfs_vm_page_mkwrite, -+#ifdef HAVE_VMOP_MPROTECT -+ .mprotect = xfs_vm_mprotect, -+#endif -+}; -+#endif /* HAVE_DMAPI */ ---- /dev/null -+++ b/fs/xfs/linux-2.6/xfs_ksyms.c -@@ -0,0 +1,92 @@ -+/* -+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+#include "xfs.h" -+#include "xfs_fs.h" -+#include "xfs_bit.h" -+#include "xfs_buf.h" -+#include "xfs_log.h" -+#include "xfs_inum.h" -+#include "xfs_trans.h" -+#include "xfs_sb.h" -+#include "xfs_ag.h" -+#include "xfs_dir2.h" -+#include "xfs_alloc.h" -+#include "xfs_dmapi.h" -+#include "xfs_quota.h" -+#include "xfs_mount.h" -+#include "xfs_da_btree.h" -+#include "xfs_bmap_btree.h" -+#include "xfs_alloc_btree.h" -+#include "xfs_ialloc_btree.h" -+#include "xfs_dir2_sf.h" -+#include "xfs_attr_sf.h" -+#include "xfs_dinode.h" -+#include "xfs_inode.h" -+#include "xfs_btree.h" -+#include "xfs_ialloc.h" -+#include "xfs_bmap.h" -+#include "xfs_rtalloc.h" -+#include "xfs_error.h" -+#include "xfs_itable.h" -+#include "xfs_rw.h" -+#include "xfs_dir2_data.h" -+#include "xfs_dir2_leaf.h" -+#include "xfs_dir2_block.h" -+#include "xfs_dir2_node.h" -+#include "xfs_acl.h" -+#include "xfs_attr.h" -+#include "xfs_attr_leaf.h" -+#include "xfs_inode_item.h" -+#include "xfs_buf_item.h" -+#include "xfs_extfree_item.h" -+#include "xfs_log_priv.h" -+#include "xfs_trans_priv.h" -+#include "xfs_trans_space.h" -+#include "xfs_utils.h" -+#include "xfs_iomap.h" -+#include "xfs_filestream.h" -+#include "xfs_vnodeops.h" -+ -+EXPORT_SYMBOL(xfs_iunlock); -+EXPORT_SYMBOL(xfs_attr_remove); -+EXPORT_SYMBOL(xfs_iunlock_map_shared); -+EXPORT_SYMBOL(xfs_iget); -+EXPORT_SYMBOL(xfs_bmapi); -+EXPORT_SYMBOL(xfs_internal_inum); -+EXPORT_SYMBOL(xfs_attr_set); -+EXPORT_SYMBOL(xfs_trans_reserve); -+EXPORT_SYMBOL(xfs_trans_ijoin); -+EXPORT_SYMBOL(xfs_free_eofblocks); -+EXPORT_SYMBOL(kmem_free); -+EXPORT_SYMBOL(_xfs_trans_commit); -+EXPORT_SYMBOL(xfs_ilock); -+EXPORT_SYMBOL(xfs_attr_get); -+EXPORT_SYMBOL(xfs_readdir); -+EXPORT_SYMBOL(xfs_setattr); -+EXPORT_SYMBOL(xfs_trans_alloc); -+EXPORT_SYMBOL(xfs_trans_cancel); -+EXPORT_SYMBOL(xfs_fsync); -+EXPORT_SYMBOL(xfs_iput_new); -+EXPORT_SYMBOL(xfs_bulkstat); -+EXPORT_SYMBOL(xfs_ilock_map_shared); -+EXPORT_SYMBOL(xfs_iput); -+EXPORT_SYMBOL(xfs_trans_log_inode); -+EXPORT_SYMBOL(xfs_attr_list); -+EXPORT_SYMBOL(kmem_alloc); -+EXPORT_SYMBOL(xfs_change_file_space); ---- a/fs/xfs/linux-2.6/xfs_linux.h -+++ b/fs/xfs/linux-2.6/xfs_linux.h -@@ -160,6 +160,10 @@ - #define xfs_itruncate_data(ip, off) \ - (-vmtruncate(VFS_I(ip), (off))) - -+#undef HAVE_DMAPI -+#if defined(CONFIG_XFS_DMAPI) || defined(CONFIG_XFS_DMAPI_MODULE) -+#define HAVE_DMAPI -+#endif - - /* Move the kernel do_div definition off to one side */ - ---- a/fs/xfs/linux-2.6/xfs_super.c -+++ b/fs/xfs/linux-2.6/xfs_super.c -@@ -1670,8 +1670,16 @@ xfs_fs_get_sb( - void *data, - struct vfsmount *mnt) - { -- return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super, -+ int error; -+ -+ error = get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super, - mnt); -+ if (!error) { -+ xfs_mount_t *mp = XFS_M(mnt->mnt_sb); -+ mp->m_vfsmount = mnt; -+ } -+ -+ return error; - } - - static const struct super_operations xfs_super_operations = { -@@ -1689,13 +1697,14 @@ static const struct super_operations xfs - .show_options = xfs_fs_show_options, - }; - --static struct file_system_type xfs_fs_type = { -+struct file_system_type xfs_fs_type = { - .owner = THIS_MODULE, - .name = "xfs", - .get_sb = xfs_fs_get_sb, - .kill_sb = kill_block_super, - .fs_flags = FS_REQUIRES_DEV, - }; -+EXPORT_SYMBOL(xfs_fs_type); - - STATIC int __init - xfs_init_zones(void) ---- a/fs/xfs/xfs_dmops.c -+++ b/fs/xfs/xfs_dmops.c -@@ -40,9 +40,21 @@ int - xfs_dmops_get(struct xfs_mount *mp) - { - if (mp->m_flags & XFS_MOUNT_DMAPI) { -- cmn_err(CE_WARN, -- "XFS: dmapi support not available in this kernel."); -- return EINVAL; -+ struct xfs_dmops *ops; -+ -+ ops = symbol_get(xfs_dmcore_xfs); -+ if (!ops) { -+ request_module("xfs_dmapi"); -+ ops = symbol_get(xfs_dmcore_xfs); -+ } -+ -+ if (!ops) { -+ cmn_err(CE_WARN, "XFS: no dmapi support available."); -+ return EINVAL; -+ } -+ mp->m_dm_ops = ops; -+ } else { -+ mp->m_dm_ops = &xfs_dmcore_stub; - } - - mp->m_dm_ops = &xfs_dmcore_stub; -@@ -52,4 +64,6 @@ xfs_dmops_get(struct xfs_mount *mp) - void - xfs_dmops_put(struct xfs_mount *mp) - { -+ if (mp->m_dm_ops != &xfs_dmcore_stub) -+ symbol_put(xfs_dmcore_xfs); - } ---- a/fs/xfs/xfs_itable.c -+++ b/fs/xfs/xfs_itable.c -@@ -39,7 +39,7 @@ - #include "xfs_error.h" - #include "xfs_btree.h" - --STATIC int -+int - xfs_internal_inum( - xfs_mount_t *mp, - xfs_ino_t ino) ---- a/fs/xfs/xfs_itable.h -+++ b/fs/xfs/xfs_itable.h -@@ -99,6 +99,11 @@ xfs_bulkstat_one( - void *dibuff, - int *stat); - -+int -+xfs_internal_inum( -+ xfs_mount_t *mp, -+ xfs_ino_t ino); -+ - typedef int (*inumbers_fmt_pf)( - void __user *ubuffer, /* buffer to write to */ - const xfs_inogrp_t *buffer, /* buffer to read from */ ---- a/fs/xfs/xfs_mount.h -+++ b/fs/xfs/xfs_mount.h -@@ -259,6 +259,7 @@ typedef struct xfs_mount { - __int64_t m_update_flags; /* sb flags we need to update - on the next remount,rw */ - struct list_head m_mplist; /* inode shrinker mount list */ -+ struct vfsmount *m_vfsmount; - } xfs_mount_t; - - /* ---- a/fs/xfs/xfs_rw.c -+++ b/fs/xfs/xfs_rw.c -@@ -202,3 +202,4 @@ xfs_get_extsz_hint( - - return extsz; - } -+EXPORT_SYMBOL(xfs_get_extsz_hint); ---- a/fs/xfs/xfs_rw.h -+++ b/fs/xfs/xfs_rw.h -@@ -45,5 +45,10 @@ extern int xfs_read_buf(struct xfs_mount - extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp, - xfs_buf_t *bp, xfs_daddr_t blkno); - extern xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip); -+/* -+ * Prototypes for functions in xfs_vnodeops.c. -+ */ -+extern int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip, -+ int flags); - - #endif /* __XFS_RW_H__ */ ---- a/fs/xfs/xfs_vnodeops.c -+++ b/fs/xfs/xfs_vnodeops.c -@@ -593,7 +593,7 @@ xfs_readlink( - * when the link count isn't zero and by xfs_dm_punch_hole() when - * punching a hole to EOF. - */ --STATIC int -+int - xfs_free_eofblocks( - xfs_mount_t *mp, - xfs_inode_t *ip, diff --git a/patches.suse/xfs-nfsd-dmapi-aware b/patches.suse/xfs-nfsd-dmapi-aware deleted file mode 100644 index 5828e03..0000000 --- a/patches.suse/xfs-nfsd-dmapi-aware +++ /dev/null @@ -1,181 +0,0 @@ -From: Greg Banks -Subject: Make NFSD DMAPI aware -References: 74107, 173874, bnc#450658 -Patch-mainline: obstruction... - -G'day, - -The NFSv3 protocol specifies an error, NFS3ERR_JUKEBOX, which a server -should return when an I/O operation will take a very long time. -This causes a different pattern of retries in clients, and avoids -a number of serious problems associated with I/Os which take longer -than an RPC timeout. The Linux knfsd server has code to generate the -jukebox error and many NFS clients are known to have working code to -handle it. - -One scenario in which a server should emit the JUKEBOX error is when -a file data which the client is attempting to access is managed by -an HSM (Hierarchical Storage Manager) and is not present on the disk -and needs to be brought in from tape. Due to the nature of tapes this -operation can take minutes rather than the milliseconds normally seen -for local file data. - -Currently the Linux knfsd handles this situation poorly. A READ NFS -call will cause the nfsd thread handling it to block until the file -is available, without sending a reply to the NFS client. After a -few seconds the client retries, and this second READ call causes -another nfsd to block behind the first one. A few seconds later and -the client's retries have blocked *all* the nfsd threads, and all NFS -service from the server stops until the original file arrives on disk. - -WRITEs and SETATTRs which truncate the file are marginally better, in -that the knfsd dupcache will catch the retries and drop them without -blocking an nfsd (the dupcache *will* catch the retries because the -cache entry remains in RC_INPROG state and is not reused until the -first call finishes). However the first call still blocks, so given -WRITEs to enough offline files the server can still be locked up. - -There are also client-side implications, depending on the client -implementation. For example, on a Linux client an RPC retry loop uses -an RPC request slot, so reads from enough separate offline files can -lock up a mountpoint. - -This patch seeks to remedy the interaction between knfsd and HSMs by -providing mechanisms to allow knfsd to tell an underlying filesystem -(which supports HSMs) not to block for reads, writes and truncates -of offline files. It's a port of a Linux 2.4 patch used in SGI's -ProPack distro for the last 12 months. The patch: - -* provides a new ATTR_NO_BLOCK flag which the kernel can - use to tell a filesystem's inode_ops->setattr() operation not - to block when truncating an offline file. XFS already obeys - this flag (inside a #ifdef) - -* changes knfsd to provide ATTR_NO_BLOCK when it does the VFS - calls to implement the SETATTR NFS call. - -* changes knfsd to supply the O_NONBLOCK flag in the temporary - struct file it uses for VFS reads and writes, in order to ask - the filesystem not to block when reading or writing an offline - file. XFS already obeys this new semantic for O_NONBLOCK - (and in SLES9 so does JFS). - -* adds code to translate the -EAGAIN the filesystem returns when - it would have blocked, to the -ETIMEDOUT that knfsd expects. - - -Signed-off-by: Greg Banks -(SLES9 patch Acked-by: okir@suse.de) -Signed-off-by: NeilBrown -Acked-by: Jan Kara - - fs/nfsd/vfs.c | 32 ++++++++++++++++++++++++++++++-- - fs/xfs/linux-2.6/xfs_iops.c | 7 ++++++- - include/linux/fs.h | 1 + - 3 files changed, 37 insertions(+), 3 deletions(-) - - ---- a/fs/nfsd/vfs.c -+++ b/fs/nfsd/vfs.c -@@ -404,6 +404,15 @@ nfsd_setattr(struct svc_rqst *rqstp, str - put_write_access(inode); - goto out_nfserr; - } -+ -+ /* -+ * Tell a Hierarchical Storage Manager (e.g. via DMAPI) to -+ * return EAGAIN when an action would take minutes instead of -+ * milliseconds so that NFS can reply to the client with -+ * NFSERR_JUKEBOX instead of blocking an nfsd thread. -+ */ -+ if (rqstp->rq_vers >= 3) -+ iap->ia_valid |= ATTR_NO_BLOCK; - } - - /* sanitize the mode change */ -@@ -436,6 +445,9 @@ nfsd_setattr(struct svc_rqst *rqstp, str - if (!check_guard || guardtime == inode->i_ctime.tv_sec) { - fh_lock(fhp); - host_err = notify_change(dentry, iap); -+ /* to get NFSERR_JUKEBOX on the wire, need -ETIMEDOUT */ -+ if (host_err == -EAGAIN) -+ host_err = -ETIMEDOUT; - err = nfserrno(host_err); - fh_unlock(fhp); - } -@@ -919,6 +931,10 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st - if (ra && ra->p_set) - file->f_ra = ra->p_ra; - -+ /* Support HSMs -- see comment in nfsd_setattr() */ -+ if (rqstp->rq_vers >= 3) -+ file->f_flags |= O_NONBLOCK; -+ - if (file->f_op->splice_read && rqstp->rq_splice_ok) { - struct splice_desc sd = { - .len = 0, -@@ -951,8 +967,12 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st - *count = host_err; - err = 0; - fsnotify_access(file->f_path.dentry); -- } else -+ } else { -+ /* to get NFSERR_JUKEBOX on the wire, need -ETIMEDOUT */ -+ if (host_err == -EAGAIN) -+ host_err = -ETIMEDOUT; - err = nfserrno(host_err); -+ } - out: - return err; - } -@@ -1053,6 +1073,10 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s - spin_unlock(&file->f_lock); - } - -+ /* Support HSMs -- see comment in nfsd_setattr() */ -+ if (rqstp->rq_vers >= 3) -+ file->f_flags |= O_NONBLOCK; -+ - /* Write the data. */ - oldfs = get_fs(); set_fs(KERNEL_DS); - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); -@@ -1074,8 +1098,12 @@ out_nfserr: - dprintk("nfsd: write complete host_err=%d\n", host_err); - if (host_err >= 0) - err = 0; -- else -+ else { -+ /* to get NFSERR_JUKEBOX on the wire, need -ETIMEDOUT */ -+ if (host_err == -EAGAIN) -+ host_err = -ETIMEDOUT; - err = nfserrno(host_err); -+ } - out: - return err; - } ---- a/fs/xfs/linux-2.6/xfs_iops.c -+++ b/fs/xfs/linux-2.6/xfs_iops.c -@@ -544,7 +544,12 @@ xfs_vn_setattr( - struct dentry *dentry, - struct iattr *iattr) - { -- return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0); -+ int flags = 0; -+#ifdef ATTR_NO_BLOCK -+ if (iattr->ia_valid & ATTR_NO_BLOCK) -+ flags |= O_NONBLOCK; -+#endif -+ return -xfs_setattr(XFS_I(dentry->d_inode), iattr, flags); - } - - /* ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -438,6 +438,7 @@ typedef void (dio_iodone_t)(struct kiocb - #define ATTR_KILL_PRIV (1 << 14) - #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ - #define ATTR_TIMES_SET (1 << 16) -+#define ATTR_NO_BLOCK (1 << 17) /* Return EAGAIN and don't block on long truncates */ - - /* - * This is the Inode Attributes structure, used for notify_change(). It diff --git a/patches.trace/utrace-core b/patches.trace/utrace-core new file mode 100644 index 0000000..be85833 --- /dev/null +++ b/patches.trace/utrace-core @@ -0,0 +1,4101 @@ +From: Roland McGrath +Date: Tue 27 Oct 2009 +Subject: utrace core +References: FATE#304321 +Patch-mainline: no + +This adds the utrace facility, a new modular interface in the kernel for +implementing user thread tracing and debugging. This fits on top of the +tracehook_* layer, so the new code is well-isolated. + +The new interface is in and the DocBook utrace book +describes it. It allows for multiple separate tracing engines to work in +parallel without interfering with each other. Higher-level tracing +facilities can be implemented as loadable kernel modules using this layer. + +The new facility is made optional under CONFIG_UTRACE. +When this is not enabled, no new code is added. +It can only be enabled on machines that have all the +prerequisites and select CONFIG_HAVE_ARCH_TRACEHOOK. + +In this initial version, utrace and ptrace do not play together at all. +If ptrace is attached to a thread, the attach calls in the utrace kernel +API return -EBUSY. If utrace is attached to a thread, the PTRACE_ATTACH +or PTRACE_TRACEME request will return EBUSY to userland. The old ptrace +code is otherwise unchanged and nothing using ptrace should be affected +by this patch as long as utrace is not used at the same time. In the +future we can clean up the ptrace implementation and rework it to use +the utrace API. + +Signed-off-by: Roland McGrath +Signed-off-by: Tony Jones +--- + Documentation/DocBook/Makefile | 2 + Documentation/DocBook/utrace.tmpl | 590 +++++++++ + fs/proc/array.c | 3 + include/linux/init_task.h | 1 + include/linux/sched.h | 6 + include/linux/tracehook.h | 66 + + include/linux/utrace.h | 702 +++++++++++ + include/linux/utrace_struct.h | 59 + init/Kconfig | 9 + kernel/Makefile | 1 + kernel/ptrace.c | 14 + kernel/utrace.c | 2340 ++++++++++++++++++++++++++++++++++++++ + 12 files changed, 3791 insertions(+), 2 deletions(-) + +--- a/Documentation/DocBook/Makefile ++++ b/Documentation/DocBook/Makefile +@@ -9,7 +9,7 @@ + DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \ + kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ + writing_usb_driver.xml networking.xml \ +- kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \ ++ kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml utrace.xml \ + gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ + genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ + 80211.xml debugobjects.xml sh.xml regulator.xml \ +--- /dev/null ++++ b/Documentation/DocBook/utrace.tmpl +@@ -0,0 +1,590 @@ ++ ++ ++ ++ ++ ++ The utrace User Debugging Infrastructure ++ ++ ++ ++ ++ utrace concepts ++ ++ Introduction ++ ++ ++ utrace is infrastructure code for tracing ++ and controlling user threads. This is the foundation for writing ++ tracing engines, which can be loadable kernel modules. ++ ++ ++ ++ The basic actors in utrace are the thread ++ and the tracing engine. A tracing engine is some body of code that ++ calls into the <linux/utrace.h> ++ interfaces, represented by a struct ++ utrace_engine_ops. (Usually it's a kernel module, ++ though the legacy ptrace support is a tracing ++ engine that is not in a kernel module.) The interface operates on ++ individual threads (struct task_struct). ++ If an engine wants to treat several threads as a group, that is up ++ to its higher-level code. ++ ++ ++ ++ Tracing begins by attaching an engine to a thread, using ++ utrace_attach_task or ++ utrace_attach_pid. If successful, it returns a ++ pointer that is the handle used in all other calls. ++ ++ ++ ++ ++ Events and Callbacks ++ ++ ++ An attached engine does nothing by default. An engine makes something ++ happen by requesting callbacks via utrace_set_events ++ and poking the thread with utrace_control. ++ The synchronization issues related to these two calls ++ are discussed further below in . ++ ++ ++ ++ Events are specified using the macro ++ UTRACE_EVENT(type). ++ Each event type is associated with a callback in struct ++ utrace_engine_ops. A tracing engine can leave unused ++ callbacks NULL. The only callbacks required ++ are those used by the event flags it sets. ++ ++ ++ ++ Many engines can be attached to each thread. When a thread has an ++ event, each engine gets a callback if it has set the event flag for ++ that event type. For most events, engines are called in the order they ++ attached. Engines that attach after the event has occurred do not get ++ callbacks for that event. This includes any new engines just attached ++ by an existing engine's callback function. Once the sequence of ++ callbacks for that one event has completed, such new engines are then ++ eligible in the next sequence that starts when there is another event. ++ ++ ++ ++ Event reporting callbacks have details particular to the event type, ++ but are all called in similar environments and have the same ++ constraints. Callbacks are made from safe points, where no locks ++ are held, no special resources are pinned (usually), and the ++ user-mode state of the thread is accessible. So, callback code has ++ a pretty free hand. But to be a good citizen, callback code should ++ never block for long periods. It is fine to block in ++ kmalloc and the like, but never wait for i/o or ++ for user mode to do something. If you need the thread to wait, use ++ UTRACE_STOP and return from the callback ++ quickly. When your i/o finishes or whatever, you can use ++ utrace_control to resume the thread. ++ ++ ++ ++ The UTRACE_EVENT(SYSCALL_ENTRY) event is a special ++ case. While other events happen in the kernel when it will return to ++ user mode soon, this event happens when entering the kernel before it ++ will proceed with the work requested from user mode. Because of this ++ difference, the report_syscall_entry callback is ++ special in two ways. For this event, engines are called in reverse of ++ the normal order (this includes the report_quiesce ++ call that precedes a report_syscall_entry call). ++ This preserves the semantics that the last engine to attach is called ++ "closest to user mode"--the engine that is first to see a thread's user ++ state when it enters the kernel is also the last to see that state when ++ the thread returns to user mode. For the same reason, if these ++ callbacks use UTRACE_STOP (see the next section), ++ the thread stops immediately after callbacks rather than only when it's ++ ready to return to user mode; when allowed to resume, it will actually ++ attempt the system call indicated by the register values at that time. ++ ++ ++ ++ ++ Stopping Safely ++ ++ Writing well-behaved callbacks ++ ++ ++ Well-behaved callbacks are important to maintain two essential ++ properties of the interface. The first of these is that unrelated ++ tracing engines should not interfere with each other. If your engine's ++ event callback does not return quickly, then another engine won't get ++ the event notification in a timely manner. The second important ++ property is that tracing should be as noninvasive as possible to the ++ normal operation of the system overall and of the traced thread in ++ particular. That is, attached tracing engines should not perturb a ++ thread's behavior, except to the extent that changing its user-visible ++ state is explicitly what you want to do. (Obviously some perturbation ++ is unavoidable, primarily timing changes, ranging from small delays due ++ to the overhead of tracing, to arbitrary pauses in user code execution ++ when a user stops a thread with a debugger for examination.) Even when ++ you explicitly want the perturbation of making the traced thread block, ++ just blocking directly in your callback has more unwanted effects. For ++ example, the CLONE event callbacks are called when ++ the new child thread has been created but not yet started running; the ++ child can never be scheduled until the CLONE ++ tracing callbacks return. (This allows engines tracing the parent to ++ attach to the child.) If a CLONE event callback ++ blocks the parent thread, it also prevents the child thread from ++ running (even to process a SIGKILL). If what you ++ want is to make both the parent and child block, then use ++ utrace_attach_task on the child and then use ++ UTRACE_STOP on both threads. A more crucial ++ problem with blocking in callbacks is that it can prevent ++ SIGKILL from working. A thread that is blocking ++ due to UTRACE_STOP will still wake up and die ++ immediately when sent a SIGKILL, as all threads ++ should. Relying on the utrace ++ infrastructure rather than on private synchronization calls in event ++ callbacks is an important way to help keep tracing robustly ++ noninvasive. ++ ++ ++ ++ ++ Using <constant>UTRACE_STOP</constant> ++ ++ ++ To control another thread and access its state, it must be stopped ++ with UTRACE_STOP. This means that it is ++ stopped and won't start running again while we access it. When a ++ thread is not already stopped, utrace_control ++ returns -EINPROGRESS and an engine must wait ++ for an event callback when the thread is ready to stop. The thread ++ may be running on another CPU or may be blocked. When it is ready ++ to be examined, it will make callbacks to engines that set the ++ UTRACE_EVENT(QUIESCE) event bit. To wake up an ++ interruptible wait, use UTRACE_INTERRUPT. ++ ++ ++ ++ As long as some engine has used UTRACE_STOP and ++ not called utrace_control to resume the thread, ++ then the thread will remain stopped. SIGKILL ++ will wake it up, but it will not run user code. When the stop is ++ cleared with utrace_control or a callback ++ return value, the thread starts running again. ++ (See also .) ++ ++ ++ ++ ++ ++ ++ Tear-down Races ++ ++ Primacy of <constant>SIGKILL</constant> ++ ++ Ordinarily synchronization issues for tracing engines are kept fairly ++ straightforward by using UTRACE_STOP. You ask a ++ thread to stop, and then once it makes the ++ report_quiesce callback it cannot do anything else ++ that would result in another callback, until you let it with a ++ utrace_control call. This simple arrangement ++ avoids complex and error-prone code in each one of a tracing engine's ++ event callbacks to keep them serialized with the engine's other ++ operations done on that thread from another thread of control. ++ However, giving tracing engines complete power to keep a traced thread ++ stuck in place runs afoul of a more important kind of simplicity that ++ the kernel overall guarantees: nothing can prevent or delay ++ SIGKILL from making a thread die and release its ++ resources. To preserve this important property of ++ SIGKILL, it as a special case can break ++ UTRACE_STOP like nothing else normally can. This ++ includes both explicit SIGKILL signals and the ++ implicit SIGKILL sent to each other thread in the ++ same thread group by a thread doing an exec, or processing a fatal ++ signal, or making an exit_group system call. A ++ tracing engine can prevent a thread from beginning the exit or exec or ++ dying by signal (other than SIGKILL) if it is ++ attached to that thread, but once the operation begins, no tracing ++ engine can prevent or delay all other threads in the same thread group ++ dying. ++ ++ ++ ++ Final callbacks ++ ++ The report_reap callback is always the final event ++ in the life cycle of a traced thread. Tracing engines can use this as ++ the trigger to clean up their own data structures. The ++ report_death callback is always the penultimate ++ event a tracing engine might see; it's seen unless the thread was ++ already in the midst of dying when the engine attached. Many tracing ++ engines will have no interest in when a parent reaps a dead process, ++ and nothing they want to do with a zombie thread once it dies; for ++ them, the report_death callback is the natural ++ place to clean up data structures and detach. To facilitate writing ++ such engines robustly, given the asynchrony of ++ SIGKILL, and without error-prone manual ++ implementation of synchronization schemes, the ++ utrace infrastructure provides some special ++ guarantees about the report_death and ++ report_reap callbacks. It still takes some care ++ to be sure your tracing engine is robust to tear-down races, but these ++ rules make it reasonably straightforward and concise to handle a lot of ++ corner cases correctly. ++ ++ ++ ++ Engine and task pointers ++ ++ The first sort of guarantee concerns the core data structures ++ themselves. struct utrace_engine is ++ a reference-counted data structure. While you hold a reference, an ++ engine pointer will always stay valid so that you can safely pass it to ++ any utrace call. Each call to ++ utrace_attach_task or ++ utrace_attach_pid returns an engine pointer with a ++ reference belonging to the caller. You own that reference until you ++ drop it using utrace_engine_put. There is an ++ implicit reference on the engine while it is attached. So if you drop ++ your only reference, and then use ++ utrace_attach_task without ++ UTRACE_ATTACH_CREATE to look up that same engine, ++ you will get the same pointer with a new reference to replace the one ++ you dropped, just like calling utrace_engine_get. ++ When an engine has been detached, either explicitly with ++ UTRACE_DETACH or implicitly after ++ report_reap, then any references you hold are all ++ that keep the old engine pointer alive. ++ ++ ++ ++ There is nothing a kernel module can do to keep a struct ++ task_struct alive outside of ++ rcu_read_lock. When the task dies and is reaped ++ by its parent (or itself), that structure can be freed so that any ++ dangling pointers you have stored become invalid. ++ utrace will not prevent this, but it can ++ help you detect it safely. By definition, a task that has been reaped ++ has had all its engines detached. All ++ utrace calls can be safely called on a ++ detached engine if the caller holds a reference on that engine pointer, ++ even if the task pointer passed in the call is invalid. All calls ++ return -ESRCH for a detached engine, which tells ++ you that the task pointer you passed could be invalid now. Since ++ utrace_control and ++ utrace_set_events do not block, you can call those ++ inside a rcu_read_lock section and be sure after ++ they don't return -ESRCH that the task pointer is ++ still valid until rcu_read_unlock. The ++ infrastructure never holds task references of its own. Though neither ++ rcu_read_lock nor any other lock is held while ++ making a callback, it's always guaranteed that the struct ++ task_struct and the struct ++ utrace_engine passed as arguments remain valid ++ until the callback function returns. ++ ++ ++ ++ The common means for safely holding task pointers that is available to ++ kernel modules is to use struct pid, which ++ permits put_pid from kernel modules. When using ++ that, the calls utrace_attach_pid, ++ utrace_control_pid, ++ utrace_set_events_pid, and ++ utrace_barrier_pid are available. ++ ++ ++ ++ ++ ++ Serialization of <constant>DEATH</constant> and <constant>REAP</constant> ++ ++ ++ The second guarantee is the serialization of ++ DEATH and REAP event ++ callbacks for a given thread. The actual reaping by the parent ++ (release_task call) can occur simultaneously ++ while the thread is still doing the final steps of dying, including ++ the report_death callback. If a tracing engine ++ has requested both DEATH and ++ REAP event reports, it's guaranteed that the ++ report_reap callback will not be made until ++ after the report_death callback has returned. ++ If the report_death callback itself detaches ++ from the thread, then the report_reap callback ++ will never be made. Thus it is safe for a ++ report_death callback to clean up data ++ structures and detach. ++ ++ ++ ++ Interlock with final callbacks ++ ++ The final sort of guarantee is that a tracing engine will know for sure ++ whether or not the report_death and/or ++ report_reap callbacks will be made for a certain ++ thread. These tear-down races are disambiguated by the error return ++ values of utrace_set_events and ++ utrace_control. Normally ++ utrace_control called with ++ UTRACE_DETACH returns zero, and this means that no ++ more callbacks will be made. If the thread is in the midst of dying, ++ it returns -EALREADY to indicate that the ++ report_death callback may already be in progress; ++ when you get this error, you know that any cleanup your ++ report_death callback does is about to happen or ++ has just happened--note that if the report_death ++ callback does not detach, the engine remains attached until the thread ++ gets reaped. If the thread is in the midst of being reaped, ++ utrace_control returns -ESRCH ++ to indicate that the report_reap callback may ++ already be in progress; this means the engine is implicitly detached ++ when the callback completes. This makes it possible for a tracing ++ engine that has decided asynchronously to detach from a thread to ++ safely clean up its data structures, knowing that no ++ report_death or report_reap ++ callback will try to do the same. utrace_detach ++ returns -ESRCH when the struct ++ utrace_engine has already been detached, but is ++ still a valid pointer because of its reference count. A tracing engine ++ can use this to safely synchronize its own independent multiple threads ++ of control with each other and with its event callbacks that detach. ++ ++ ++ ++ In the same vein, utrace_set_events normally ++ returns zero; if the target thread was stopped before the call, then ++ after a successful call, no event callbacks not requested in the new ++ flags will be made. It fails with -EALREADY if ++ you try to clear UTRACE_EVENT(DEATH) when the ++ report_death callback may already have begun, if ++ you try to clear UTRACE_EVENT(REAP) when the ++ report_reap callback may already have begun, or if ++ you try to newly set UTRACE_EVENT(DEATH) or ++ UTRACE_EVENT(QUIESCE) when the target is already ++ dead or dying. Like utrace_control, it returns ++ -ESRCH when the thread has already been detached ++ (including forcible detach on reaping). This lets the tracing engine ++ know for sure which event callbacks it will or won't see after ++ utrace_set_events has returned. By checking for ++ errors, it can know whether to clean up its data structures immediately ++ or to let its callbacks do the work. ++ ++ ++ ++ Using <function>utrace_barrier</function> ++ ++ When a thread is safely stopped, calling ++ utrace_control with UTRACE_DETACH ++ or calling utrace_set_events to disable some events ++ ensures synchronously that your engine won't get any more of the callbacks ++ that have been disabled (none at all when detaching). But these can also ++ be used while the thread is not stopped, when it might be simultaneously ++ making a callback to your engine. For this situation, these calls return ++ -EINPROGRESS when it's possible a callback is in ++ progress. If you are not prepared to have your old callbacks still run, ++ then you can synchronize to be sure all the old callbacks are finished, ++ using utrace_barrier. This is necessary if the ++ kernel module containing your callback code is going to be unloaded. ++ ++ ++ After using UTRACE_DETACH once, further calls to ++ utrace_control with the same engine pointer will ++ return -ESRCH. In contrast, after getting ++ -EINPROGRESS from ++ utrace_set_events, you can call ++ utrace_set_events again later and if it returns zero ++ then know the old callbacks have finished. ++ ++ ++ Unlike all other calls, utrace_barrier (and ++ utrace_barrier_pid) will accept any engine pointer you ++ hold a reference on, even if UTRACE_DETACH has already ++ been used. After any utrace_control or ++ utrace_set_events call (these do not block), you can ++ call utrace_barrier to block until callbacks have ++ finished. This returns -ESRCH only if the engine is ++ completely detached (finished all callbacks). Otherwise it waits ++ until the thread is definitely not in the midst of a callback to this ++ engine and then returns zero, but can return ++ -ERESTARTSYS if its wait is interrupted. ++ ++ ++ ++ ++ ++ ++ ++utrace core API ++ ++ ++ The utrace API is declared in <linux/utrace.h>. ++ ++ ++!Iinclude/linux/utrace.h ++!Ekernel/utrace.c ++ ++ ++ ++Machine State ++ ++ ++ The task_current_syscall function can be used on any ++ valid struct task_struct at any time, and does ++ not even require that utrace_attach_task was used at all. ++ ++ ++ ++ The other ways to access the registers and other machine-dependent state of ++ a task can only be used on a task that is at a known safe point. The safe ++ points are all the places where utrace_set_events can ++ request callbacks (except for the DEATH and ++ REAP events). So at any event callback, it is safe to ++ examine current. ++ ++ ++ ++ One task can examine another only after a callback in the target task that ++ returns UTRACE_STOP so that task will not return to user ++ mode after the safe point. This guarantees that the task will not resume ++ until the same engine uses utrace_control, unless the ++ task dies suddenly. To examine safely, one must use a pair of calls to ++ utrace_prepare_examine and ++ utrace_finish_examine surrounding the calls to ++ struct user_regset functions or direct examination ++ of task data structures. utrace_prepare_examine returns ++ an error if the task is not properly stopped and not dead. After a ++ successful examination, the paired utrace_finish_examine ++ call returns an error if the task ever woke up during the examination. If ++ so, any data gathered may be scrambled and should be discarded. This means ++ there was a spurious wake-up (which should not happen), or a sudden death. ++ ++ ++<structname>struct user_regset</structname> ++ ++ ++ The struct user_regset API ++ is declared in <linux/regset.h>. ++ ++ ++!Finclude/linux/regset.h ++ ++ ++ ++ ++ <filename>System Call Information</filename> ++ ++ ++ This function is declared in <linux/ptrace.h>. ++ ++ ++!Elib/syscall.c ++ ++ ++ ++<filename>System Call Tracing</filename> ++ ++ ++ The arch API for system call information is declared in ++ <asm/syscall.h>. ++ Each of these calls can be used only at system call entry tracing, ++ or can be used only at system call exit and the subsequent safe points ++ before returning to user mode. ++ At system call entry tracing means either during a ++ report_syscall_entry callback, ++ or any time after that callback has returned UTRACE_STOP. ++ ++ ++!Finclude/asm-generic/syscall.h ++ ++ ++ ++ ++ ++Kernel Internals ++ ++ ++ This chapter covers the interface to the tracing infrastructure ++ from the core of the kernel and the architecture-specific code. ++ This is for maintainers of the kernel and arch code, and not relevant ++ to using the tracing facilities described in preceding chapters. ++ ++ ++Core Calls In ++ ++ ++ These calls are declared in <linux/tracehook.h>. ++ The core kernel calls these functions at various important places. ++ ++ ++!Finclude/linux/tracehook.h ++ ++ ++ ++Architecture Calls Out ++ ++ ++ An arch that has done all these things sets ++ CONFIG_HAVE_ARCH_TRACEHOOK. ++ This is required to enable the utrace code. ++ ++ ++<filename><asm/ptrace.h></filename> ++ ++ ++ An arch defines these in <asm/ptrace.h> ++ if it supports hardware single-step or block-step features. ++ ++ ++!Finclude/linux/ptrace.h arch_has_single_step arch_has_block_step ++!Finclude/linux/ptrace.h user_enable_single_step user_enable_block_step ++!Finclude/linux/ptrace.h user_disable_single_step ++ ++ ++ ++ ++ <filename><asm/syscall.h></filename> ++ ++ ++ An arch provides <asm/syscall.h> that ++ defines these as inlines, or declares them as exported functions. ++ These interfaces are described in . ++ ++ ++ ++ ++ ++ <filename><linux/tracehook.h></filename> ++ ++ ++ An arch must define TIF_NOTIFY_RESUME ++ and TIF_SYSCALL_TRACE ++ in its <asm/thread_info.h>. ++ The arch code must call the following functions, all declared ++ in <linux/tracehook.h> and ++ described in : ++ ++ ++ ++ tracehook_notify_resume ++ ++ ++ tracehook_report_syscall_entry ++ ++ ++ tracehook_report_syscall_exit ++ ++ ++ tracehook_signal_handler ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -81,6 +81,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -192,6 +193,8 @@ static inline void task_state(struct seq + cred->uid, cred->euid, cred->suid, cred->fsuid, + cred->gid, cred->egid, cred->sgid, cred->fsgid); + ++ task_utrace_proc_status(m, p); ++ + task_lock(p); + if (p->files) + fdt = files_fdtable(p->files); +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h +@@ -165,6 +165,7 @@ extern struct cred init_cred; + }, \ + .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ + .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ ++ INIT_UTRACE(tsk) \ + INIT_IDS \ + INIT_PERF_EVENTS(tsk) \ + INIT_TRACE_IRQFLAGS \ +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -61,6 +61,7 @@ struct sched_param { + #include + #include + #include ++#include + + #include + #include +@@ -1339,6 +1340,11 @@ struct task_struct { + #endif + seccomp_t seccomp; + ++#ifdef CONFIG_UTRACE ++ struct utrace utrace; ++ unsigned long utrace_flags; ++#endif ++ + /* Thread group tracking */ + u32 parent_exec_id; + u32 self_exec_id; +--- a/include/linux/tracehook.h ++++ b/include/linux/tracehook.h +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + struct linux_binprm; + + /** +@@ -63,6 +64,8 @@ struct linux_binprm; + */ + static inline int tracehook_expect_breakpoints(struct task_struct *task) + { ++ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_CORE))) ++ return 1; + return (task_ptrace(task) & PT_PTRACED) != 0; + } + +@@ -111,6 +114,9 @@ static inline void ptrace_report_syscall + static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) + { ++ if ((task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_ENTRY)) && ++ utrace_report_syscall_entry(regs)) ++ return 1; + ptrace_report_syscall(regs); + return 0; + } +@@ -141,6 +147,8 @@ static inline void tracehook_report_sysc + return; + } + ++ if (task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_EXIT)) ++ utrace_report_syscall_exit(regs); + ptrace_report_syscall(regs); + } + +@@ -201,6 +209,8 @@ static inline void tracehook_report_exec + struct linux_binprm *bprm, + struct pt_regs *regs) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXEC))) ++ utrace_report_exec(fmt, bprm, regs); + if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && + unlikely(task_ptrace(current) & PT_PTRACED)) + send_sig(SIGTRAP, current, 0); +@@ -218,6 +228,8 @@ static inline void tracehook_report_exec + */ + static inline void tracehook_report_exit(long *exit_code) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXIT))) ++ utrace_report_exit(exit_code); + ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); + } + +@@ -261,6 +273,7 @@ static inline int tracehook_prepare_clon + static inline void tracehook_finish_clone(struct task_struct *child, + unsigned long clone_flags, int trace) + { ++ utrace_init_task(child); + ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); + } + +@@ -285,6 +298,8 @@ static inline void tracehook_report_clon + unsigned long clone_flags, + pid_t pid, struct task_struct *child) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE))) ++ utrace_report_clone(clone_flags, child); + if (unlikely(task_ptrace(child))) { + /* + * It doesn't matter who attached/attaching to this +@@ -317,6 +332,9 @@ static inline void tracehook_report_clon + pid_t pid, + struct task_struct *child) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)) && ++ (clone_flags & CLONE_VFORK)) ++ utrace_finish_vfork(current); + if (unlikely(trace)) + ptrace_event(0, trace, pid); + } +@@ -351,6 +369,10 @@ static inline void tracehook_report_vfor + */ + static inline void tracehook_prepare_release_task(struct task_struct *task) + { ++ /* see utrace_add_engine() about this barrier */ ++ smp_mb(); ++ if (task_utrace_flags(task)) ++ utrace_release_task(task); + } + + /** +@@ -365,6 +387,7 @@ static inline void tracehook_prepare_rel + static inline void tracehook_finish_release_task(struct task_struct *task) + { + ptrace_release_task(task); ++ BUG_ON(task->exit_state != EXIT_DEAD); + } + + /** +@@ -386,6 +409,8 @@ static inline void tracehook_signal_hand + const struct k_sigaction *ka, + struct pt_regs *regs, int stepping) + { ++ if (task_utrace_flags(current)) ++ utrace_signal_handler(current, stepping); + if (stepping) + ptrace_notify(SIGTRAP); + } +@@ -403,6 +428,8 @@ static inline void tracehook_signal_hand + static inline int tracehook_consider_ignored_signal(struct task_struct *task, + int sig) + { ++ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_IGN))) ++ return 1; + return (task_ptrace(task) & PT_PTRACED) != 0; + } + +@@ -422,6 +449,9 @@ static inline int tracehook_consider_ign + static inline int tracehook_consider_fatal_signal(struct task_struct *task, + int sig) + { ++ if (unlikely(task_utrace_flags(task) & (UTRACE_EVENT(SIGNAL_TERM) | ++ UTRACE_EVENT(SIGNAL_CORE)))) ++ return 1; + return (task_ptrace(task) & PT_PTRACED) != 0; + } + +@@ -436,6 +466,8 @@ static inline int tracehook_consider_fat + */ + static inline int tracehook_force_sigpending(void) + { ++ if (unlikely(task_utrace_flags(current))) ++ return utrace_interrupt_pending(); + return 0; + } + +@@ -465,6 +497,8 @@ static inline int tracehook_get_signal(s + siginfo_t *info, + struct k_sigaction *return_ka) + { ++ if (unlikely(task_utrace_flags(task))) ++ return utrace_get_signal(task, regs, info, return_ka); + return 0; + } + +@@ -492,6 +526,8 @@ static inline int tracehook_get_signal(s + */ + static inline int tracehook_notify_jctl(int notify, int why) + { ++ if (task_utrace_flags(current) & UTRACE_EVENT(JCTL)) ++ utrace_report_jctl(notify, why); + return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; + } + +@@ -502,6 +538,8 @@ static inline int tracehook_notify_jctl( + */ + static inline void tracehook_finish_jctl(void) + { ++ if (task_utrace_flags(current)) ++ utrace_finish_jctl(); + } + + #define DEATH_REAP -1 +@@ -524,6 +562,8 @@ static inline void tracehook_finish_jctl + static inline int tracehook_notify_death(struct task_struct *task, + void **death_cookie, int group_dead) + { ++ *death_cookie = task_utrace_struct(task); ++ + if (task_detached(task)) + return task->ptrace ? SIGCHLD : DEATH_REAP; + +@@ -560,6 +600,20 @@ static inline void tracehook_report_deat + int signal, void *death_cookie, + int group_dead) + { ++ /* ++ * This barrier ensures that our caller's setting of ++ * @task->exit_state precedes checking @task->utrace_flags here. ++ * If utrace_set_events() was just called to enable ++ * UTRACE_EVENT(DEATH), then we are obliged to call ++ * utrace_report_death() and not miss it. utrace_set_events() ++ * uses tasklist_lock to synchronize enabling the bit with the ++ * actual change to @task->exit_state, but we need this barrier ++ * to be sure we see a flags change made just before our caller ++ * took the tasklist_lock. ++ */ ++ smp_mb(); ++ if (task_utrace_flags(task) & _UTRACE_DEATH_EVENTS) ++ utrace_report_death(task, death_cookie, group_dead, signal); + } + + #ifdef TIF_NOTIFY_RESUME +@@ -589,10 +643,20 @@ static inline void set_notify_resume(str + * asynchronously, this will be called again before we return to + * user mode. + * +- * Called without locks. ++ * Called without locks. However, on some machines this may be ++ * called with interrupts disabled. + */ + static inline void tracehook_notify_resume(struct pt_regs *regs) + { ++ struct task_struct *task = current; ++ /* ++ * This pairs with the barrier implicit in set_notify_resume(). ++ * It ensures that we read the nonzero utrace_flags set before ++ * set_notify_resume() was called by utrace setup. ++ */ ++ smp_rmb(); ++ if (task_utrace_flags(task)) ++ utrace_resume(task, regs); + } + #endif /* TIF_NOTIFY_RESUME */ + +--- /dev/null ++++ b/include/linux/utrace.h +@@ -0,0 +1,702 @@ ++/* ++ * utrace infrastructure interface for debugging user processes ++ * ++ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU General Public License v.2. ++ * ++ * Red Hat Author: Roland McGrath. ++ * ++ * This interface allows for notification of interesting events in a ++ * thread. It also mediates access to thread state such as registers. ++ * Multiple unrelated users can be associated with a single thread. ++ * We call each of these a tracing engine. ++ * ++ * A tracing engine starts by calling utrace_attach_task() or ++ * utrace_attach_pid() on the chosen thread, passing in a set of hooks ++ * (&struct utrace_engine_ops), and some associated data. This produces a ++ * &struct utrace_engine, which is the handle used for all other ++ * operations. An attached engine has its ops vector, its data, and an ++ * event mask controlled by utrace_set_events(). ++ * ++ * For each event bit that is set, that engine will get the ++ * appropriate ops->report_*() callback when the event occurs. The ++ * &struct utrace_engine_ops need not provide callbacks for an event ++ * unless the engine sets one of the associated event bits. ++ */ ++ ++#ifndef _LINUX_UTRACE_H ++#define _LINUX_UTRACE_H 1 ++ ++#include ++#include ++#include ++#include ++ ++struct linux_binprm; ++struct pt_regs; ++struct utrace; ++struct user_regset; ++struct user_regset_view; ++ ++/* ++ * Event bits passed to utrace_set_events(). ++ * These appear in &struct task_struct.@utrace_flags ++ * and &struct utrace_engine.@flags. ++ */ ++enum utrace_events { ++ _UTRACE_EVENT_QUIESCE, /* Thread is available for examination. */ ++ _UTRACE_EVENT_REAP, /* Zombie reaped, no more tracing possible. */ ++ _UTRACE_EVENT_CLONE, /* Successful clone/fork/vfork just done. */ ++ _UTRACE_EVENT_EXEC, /* Successful execve just completed. */ ++ _UTRACE_EVENT_EXIT, /* Thread exit in progress. */ ++ _UTRACE_EVENT_DEATH, /* Thread has died. */ ++ _UTRACE_EVENT_SYSCALL_ENTRY, /* User entered kernel for system call. */ ++ _UTRACE_EVENT_SYSCALL_EXIT, /* Returning to user after system call. */ ++ _UTRACE_EVENT_SIGNAL, /* Signal delivery will run a user handler. */ ++ _UTRACE_EVENT_SIGNAL_IGN, /* No-op signal to be delivered. */ ++ _UTRACE_EVENT_SIGNAL_STOP, /* Signal delivery will suspend. */ ++ _UTRACE_EVENT_SIGNAL_TERM, /* Signal delivery will terminate. */ ++ _UTRACE_EVENT_SIGNAL_CORE, /* Signal delivery will dump core. */ ++ _UTRACE_EVENT_JCTL, /* Job control stop or continue completed. */ ++ _UTRACE_NEVENTS ++}; ++#define UTRACE_EVENT(type) (1UL << _UTRACE_EVENT_##type) ++ ++/* ++ * All the kinds of signal events. ++ * These all use the @report_signal() callback. ++ */ ++#define UTRACE_EVENT_SIGNAL_ALL (UTRACE_EVENT(SIGNAL) \ ++ | UTRACE_EVENT(SIGNAL_IGN) \ ++ | UTRACE_EVENT(SIGNAL_STOP) \ ++ | UTRACE_EVENT(SIGNAL_TERM) \ ++ | UTRACE_EVENT(SIGNAL_CORE)) ++/* ++ * Both kinds of syscall events; these call the @report_syscall_entry() ++ * and @report_syscall_exit() callbacks, respectively. ++ */ ++#define UTRACE_EVENT_SYSCALL \ ++ (UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT)) ++ ++/* ++ * The event reports triggered synchronously by task death. ++ */ ++#define _UTRACE_DEATH_EVENTS (UTRACE_EVENT(DEATH) | UTRACE_EVENT(QUIESCE)) ++ ++/* ++ * Hooks in call these entry points to the ++ * utrace dispatch. They are weak references here only so ++ * tracehook.h doesn't need to #ifndef CONFIG_UTRACE them to ++ * avoid external references in case of unoptimized compilation. ++ */ ++bool utrace_interrupt_pending(void) ++ __attribute__((weak)); ++void utrace_resume(struct task_struct *, struct pt_regs *) ++ __attribute__((weak)); ++int utrace_get_signal(struct task_struct *, struct pt_regs *, ++ siginfo_t *, struct k_sigaction *) ++ __attribute__((weak)); ++void utrace_report_clone(unsigned long, struct task_struct *) ++ __attribute__((weak)); ++void utrace_finish_vfork(struct task_struct *) ++ __attribute__((weak)); ++void utrace_report_exit(long *exit_code) ++ __attribute__((weak)); ++void utrace_report_death(struct task_struct *, struct utrace *, bool, int) ++ __attribute__((weak)); ++void utrace_report_jctl(int notify, int type) ++ __attribute__((weak)); ++void utrace_finish_jctl(void) ++ __attribute__((weak)); ++void utrace_report_exec(struct linux_binfmt *, struct linux_binprm *, ++ struct pt_regs *regs) ++ __attribute__((weak)); ++bool utrace_report_syscall_entry(struct pt_regs *) ++ __attribute__((weak)); ++void utrace_report_syscall_exit(struct pt_regs *) ++ __attribute__((weak)); ++void utrace_signal_handler(struct task_struct *, int) ++ __attribute__((weak)); ++ ++#ifndef CONFIG_UTRACE ++ ++/* ++ * uses these accessors to avoid #ifdef CONFIG_UTRACE. ++ */ ++static inline unsigned long task_utrace_flags(struct task_struct *task) ++{ ++ return 0; ++} ++static inline struct utrace *task_utrace_struct(struct task_struct *task) ++{ ++ return NULL; ++} ++static inline void utrace_init_task(struct task_struct *child) ++{ ++} ++static inline void utrace_release_task(struct task_struct *task) ++{ ++} ++ ++static inline void task_utrace_proc_status(struct seq_file *m, ++ struct task_struct *p) ++{ ++} ++ ++#else /* CONFIG_UTRACE */ ++ ++static inline unsigned long task_utrace_flags(struct task_struct *task) ++{ ++ return task->utrace_flags; ++} ++ ++static inline struct utrace *task_utrace_struct(struct task_struct *task) ++{ ++ return &task->utrace; ++} ++ ++static inline void utrace_init_task(struct task_struct *task) ++{ ++ task->utrace_flags = 0; ++ memset(&task->utrace, 0, sizeof(task->utrace)); ++ INIT_LIST_HEAD(&task->utrace.attached); ++ INIT_LIST_HEAD(&task->utrace.attaching); ++ spin_lock_init(&task->utrace.lock); ++} ++ ++void utrace_release_task(struct task_struct *); ++void task_utrace_proc_status(struct seq_file *m, struct task_struct *p); ++ ++ ++/* ++ * Version number of the API defined in this file. This will change ++ * whenever a tracing engine's code would need some updates to keep ++ * working. We maintain this here for the benefit of tracing engine code ++ * that is developed concurrently with utrace API improvements before they ++ * are merged into the kernel, making LINUX_VERSION_CODE checks unwieldy. ++ */ ++#define UTRACE_API_VERSION 20090416 ++ ++/** ++ * enum utrace_resume_action - engine's choice of action for a traced task ++ * @UTRACE_STOP: Stay quiescent after callbacks. ++ * @UTRACE_INTERRUPT: Make @report_signal() callback soon. ++ * @UTRACE_REPORT: Make some callback soon. ++ * @UTRACE_SINGLESTEP: Resume in user mode for one instruction. ++ * @UTRACE_BLOCKSTEP: Resume in user mode until next branch. ++ * @UTRACE_RESUME: Resume normally in user mode. ++ * @UTRACE_DETACH: Detach my engine (implies %UTRACE_RESUME). ++ * ++ * See utrace_control() for detailed descriptions of each action. This is ++ * encoded in the @action argument and the return value for every callback ++ * with a &u32 return value. ++ * ++ * The order of these is important. When there is more than one engine, ++ * each supplies its choice and the smallest value prevails. ++ */ ++enum utrace_resume_action { ++ UTRACE_STOP, ++ UTRACE_INTERRUPT, ++ UTRACE_REPORT, ++ UTRACE_SINGLESTEP, ++ UTRACE_BLOCKSTEP, ++ UTRACE_RESUME, ++ UTRACE_DETACH ++}; ++#define UTRACE_RESUME_MASK 0x0f ++ ++/** ++ * utrace_resume_action - &enum utrace_resume_action from callback action ++ * @action: &u32 callback @action argument or return value ++ * ++ * This extracts the &enum utrace_resume_action from @action, ++ * which is the @action argument to a &struct utrace_engine_ops ++ * callback or the return value from one. ++ */ ++static inline enum utrace_resume_action utrace_resume_action(u32 action) ++{ ++ return action & UTRACE_RESUME_MASK; ++} ++ ++/** ++ * enum utrace_signal_action - disposition of signal ++ * @UTRACE_SIGNAL_DELIVER: Deliver according to sigaction. ++ * @UTRACE_SIGNAL_IGN: Ignore the signal. ++ * @UTRACE_SIGNAL_TERM: Terminate the process. ++ * @UTRACE_SIGNAL_CORE: Terminate with core dump. ++ * @UTRACE_SIGNAL_STOP: Deliver as absolute stop. ++ * @UTRACE_SIGNAL_TSTP: Deliver as job control stop. ++ * @UTRACE_SIGNAL_REPORT: Reporting before pending signals. ++ * @UTRACE_SIGNAL_HANDLER: Reporting after signal handler setup. ++ * ++ * This is encoded in the @action argument and the return value for ++ * a @report_signal() callback. It says what will happen to the ++ * signal described by the &siginfo_t parameter to the callback. ++ * ++ * The %UTRACE_SIGNAL_REPORT value is used in an @action argument when ++ * a tracing report is being made before dequeuing any pending signal. ++ * If this is immediately after a signal handler has been set up, then ++ * %UTRACE_SIGNAL_HANDLER is used instead. A @report_signal callback ++ * that uses %UTRACE_SIGNAL_DELIVER|%UTRACE_SINGLESTEP will ensure ++ * it sees a %UTRACE_SIGNAL_HANDLER report. ++ */ ++enum utrace_signal_action { ++ UTRACE_SIGNAL_DELIVER = 0x00, ++ UTRACE_SIGNAL_IGN = 0x10, ++ UTRACE_SIGNAL_TERM = 0x20, ++ UTRACE_SIGNAL_CORE = 0x30, ++ UTRACE_SIGNAL_STOP = 0x40, ++ UTRACE_SIGNAL_TSTP = 0x50, ++ UTRACE_SIGNAL_REPORT = 0x60, ++ UTRACE_SIGNAL_HANDLER = 0x70 ++}; ++#define UTRACE_SIGNAL_MASK 0xf0 ++#define UTRACE_SIGNAL_HOLD 0x100 /* Flag, push signal back on queue. */ ++ ++/** ++ * utrace_signal_action - &enum utrace_signal_action from callback action ++ * @action: @report_signal callback @action argument or return value ++ * ++ * This extracts the &enum utrace_signal_action from @action, which ++ * is the @action argument to a @report_signal callback or the ++ * return value from one. ++ */ ++static inline enum utrace_signal_action utrace_signal_action(u32 action) ++{ ++ return action & UTRACE_SIGNAL_MASK; ++} ++ ++/** ++ * enum utrace_syscall_action - disposition of system call attempt ++ * @UTRACE_SYSCALL_RUN: Run the system call. ++ * @UTRACE_SYSCALL_ABORT: Don't run the system call. ++ * ++ * This is encoded in the @action argument and the return value for ++ * a @report_syscall_entry callback. ++ */ ++enum utrace_syscall_action { ++ UTRACE_SYSCALL_RUN = 0x00, ++ UTRACE_SYSCALL_ABORT = 0x10 ++}; ++#define UTRACE_SYSCALL_MASK 0xf0 ++ ++/** ++ * utrace_syscall_action - &enum utrace_syscall_action from callback action ++ * @action: @report_syscall_entry callback @action or return value ++ * ++ * This extracts the &enum utrace_syscall_action from @action, which ++ * is the @action argument to a @report_syscall_entry callback or the ++ * return value from one. ++ */ ++static inline enum utrace_syscall_action utrace_syscall_action(u32 action) ++{ ++ return action & UTRACE_SYSCALL_MASK; ++} ++ ++/* ++ * Flags for utrace_attach_task() and utrace_attach_pid(). ++ */ ++#define UTRACE_ATTACH_CREATE 0x0010 /* Attach a new engine. */ ++#define UTRACE_ATTACH_EXCLUSIVE 0x0020 /* Refuse if existing match. */ ++#define UTRACE_ATTACH_MATCH_OPS 0x0001 /* Match engines on ops. */ ++#define UTRACE_ATTACH_MATCH_DATA 0x0002 /* Match engines on data. */ ++#define UTRACE_ATTACH_MATCH_MASK 0x000f ++ ++/** ++ * struct utrace_engine - per-engine structure ++ * @ops: &struct utrace_engine_ops pointer passed to utrace_attach_task() ++ * @data: engine-private &void * passed to utrace_attach_task() ++ * @flags: event mask set by utrace_set_events() plus internal flag bits ++ * ++ * The task itself never has to worry about engines detaching while ++ * it's doing event callbacks. These structures are removed from the ++ * task's active list only when it's stopped, or by the task itself. ++ * ++ * utrace_engine_get() and utrace_engine_put() maintain a reference count. ++ * When it drops to zero, the structure is freed. One reference is held ++ * implicitly while the engine is attached to its task. ++ */ ++struct utrace_engine { ++/* private: */ ++ struct kref kref; ++ void (*release)(void *); ++ struct list_head entry; ++ ++/* public: */ ++ const struct utrace_engine_ops *ops; ++ void *data; ++ ++ unsigned long flags; ++}; ++ ++/** ++ * utrace_engine_get - acquire a reference on a &struct utrace_engine ++ * @engine: &struct utrace_engine pointer ++ * ++ * You must hold a reference on @engine, and you get another. ++ */ ++static inline void utrace_engine_get(struct utrace_engine *engine) ++{ ++ kref_get(&engine->kref); ++} ++ ++void __utrace_engine_release(struct kref *); ++ ++/** ++ * utrace_engine_put - release a reference on a &struct utrace_engine ++ * @engine: &struct utrace_engine pointer ++ * ++ * You must hold a reference on @engine, and you lose that reference. ++ * If it was the last one, @engine becomes an invalid pointer. ++ */ ++static inline void utrace_engine_put(struct utrace_engine *engine) ++{ ++ kref_put(&engine->kref, __utrace_engine_release); ++} ++ ++/** ++ * struct utrace_engine_ops - tracing engine callbacks ++ * ++ * Each @report_*() callback corresponds to an %UTRACE_EVENT(*) bit. ++ * utrace_set_events() calls on @engine choose which callbacks will be made ++ * to @engine from @task. ++ * ++ * Most callbacks take an @action argument, giving the resume action ++ * chosen by other tracing engines. All callbacks take an @engine ++ * argument, and a @task argument, which is always equal to @current. ++ * For some calls, @action also includes bits specific to that event ++ * and utrace_resume_action() is used to extract the resume action. ++ * This shows what would happen if @engine wasn't there, or will if ++ * the callback's return value uses %UTRACE_RESUME. This always ++ * starts as %UTRACE_RESUME when no other tracing is being done on ++ * this task. ++ * ++ * All return values contain &enum utrace_resume_action bits. For ++ * some calls, other bits specific to that kind of event are added to ++ * the resume action bits with OR. These are the same bits used in ++ * the @action argument. The resume action returned by a callback ++ * does not override previous engines' choices, it only says what ++ * @engine wants done. What @task actually does is the action that's ++ * most constrained among the choices made by all attached engines. ++ * See utrace_control() for more information on the actions. ++ * ++ * When %UTRACE_STOP is used in @report_syscall_entry, then @task ++ * stops before attempting the system call. In other cases, the ++ * resume action does not take effect until @task is ready to check ++ * for signals and return to user mode. If there are more callbacks ++ * to be made, the last round of calls determines the final action. ++ * A @report_quiesce callback with @event zero, or a @report_signal ++ * callback, will always be the last one made before @task resumes. ++ * Only %UTRACE_STOP is "sticky"--if @engine returned %UTRACE_STOP ++ * then @task stays stopped unless @engine returns different from a ++ * following callback. ++ * ++ * The report_death() and report_reap() callbacks do not take @action ++ * arguments, and only %UTRACE_DETACH is meaningful in the return value ++ * from a report_death() callback. None of the resume actions applies ++ * to a dead thread. ++ * ++ * All @report_*() hooks are called with no locks held, in a generally ++ * safe environment when we will be returning to user mode soon (or just ++ * entered the kernel). It is fine to block for memory allocation and ++ * the like, but all hooks are asynchronous and must not block on ++ * external events! If you want the thread to block, use %UTRACE_STOP ++ * in your hook's return value; then later wake it up with utrace_control(). ++ * ++ * @report_quiesce: ++ * Requested by %UTRACE_EVENT(%QUIESCE). ++ * This does not indicate any event, but just that @task (the current ++ * thread) is in a safe place for examination. This call is made ++ * before each specific event callback, except for @report_reap. ++ * The @event argument gives the %UTRACE_EVENT(@which) value for ++ * the event occurring. This callback might be made for events @engine ++ * has not requested, if some other engine is tracing the event; ++ * calling utrace_set_events() call here can request the immediate ++ * callback for this occurrence of @event. @event is zero when there ++ * is no other event, @task is now ready to check for signals and ++ * return to user mode, and some engine has used %UTRACE_REPORT or ++ * %UTRACE_INTERRUPT to request this callback. For this case, ++ * if @report_signal is not %NULL, the @report_quiesce callback ++ * may be replaced with a @report_signal callback passing ++ * %UTRACE_SIGNAL_REPORT in its @action argument, whenever @task is ++ * entering the signal-check path anyway. ++ * ++ * @report_signal: ++ * Requested by %UTRACE_EVENT(%SIGNAL_*) or %UTRACE_EVENT(%QUIESCE). ++ * Use utrace_signal_action() and utrace_resume_action() on @action. ++ * The signal action is %UTRACE_SIGNAL_REPORT when some engine has ++ * used %UTRACE_REPORT or %UTRACE_INTERRUPT; the callback can choose ++ * to stop or to deliver an artificial signal, before pending signals. ++ * It's %UTRACE_SIGNAL_HANDLER instead when signal handler setup just ++ * finished (after a previous %UTRACE_SIGNAL_DELIVER return); this ++ * serves in lieu of any %UTRACE_SIGNAL_REPORT callback requested by ++ * %UTRACE_REPORT or %UTRACE_INTERRUPT, and is also implicitly ++ * requested by %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP into the ++ * signal delivery. The other signal actions indicate a signal about ++ * to be delivered; the previous engine's return value sets the signal ++ * action seen by the the following engine's callback. The @info data ++ * can be changed at will, including @info->si_signo. The settings in ++ * @return_ka determines what %UTRACE_SIGNAL_DELIVER does. @orig_ka ++ * is what was in force before other tracing engines intervened, and ++ * it's %NULL when this report began as %UTRACE_SIGNAL_REPORT or ++ * %UTRACE_SIGNAL_HANDLER. For a report without a new signal, @info ++ * is left uninitialized and must be set completely by an engine that ++ * chooses to deliver a signal; if there was a previous @report_signal ++ * callback ending in %UTRACE_STOP and it was just resumed using ++ * %UTRACE_REPORT or %UTRACE_INTERRUPT, then @info is left unchanged ++ * from the previous callback. In this way, the original signal can ++ * be left in @info while returning %UTRACE_STOP|%UTRACE_SIGNAL_IGN ++ * and then found again when resuming @task with %UTRACE_INTERRUPT. ++ * The %UTRACE_SIGNAL_HOLD flag bit can be OR'd into the return value, ++ * and might be in @action if the previous engine returned it. This ++ * flag asks that the signal in @info be pushed back on @task's queue ++ * so that it will be seen again after whatever action is taken now. ++ * ++ * @report_clone: ++ * Requested by %UTRACE_EVENT(%CLONE). ++ * Event reported for parent, before the new task @child might run. ++ * @clone_flags gives the flags used in the clone system call, ++ * or equivalent flags for a fork() or vfork() system call. ++ * This function can use utrace_attach_task() on @child. It's guaranteed ++ * that asynchronous utrace_attach_task() calls will be ordered after ++ * any calls in @report_clone callbacks for the parent. Thus ++ * when using %UTRACE_ATTACH_EXCLUSIVE in the asynchronous calls, ++ * you can be sure that the parent's @report_clone callback has ++ * already attached to @child or chosen not to. Passing %UTRACE_STOP ++ * to utrace_control() on @child here keeps the child stopped before ++ * it ever runs in user mode, %UTRACE_REPORT or %UTRACE_INTERRUPT ++ * ensures a callback from @child before it starts in user mode. ++ * ++ * @report_jctl: ++ * Requested by %UTRACE_EVENT(%JCTL). ++ * Job control event; @type is %CLD_STOPPED or %CLD_CONTINUED, ++ * indicating whether we are stopping or resuming now. If @notify ++ * is nonzero, @task is the last thread to stop and so will send ++ * %SIGCHLD to its parent after this callback; @notify reflects ++ * what the parent's %SIGCHLD has in @si_code, which can sometimes ++ * be %CLD_STOPPED even when @type is %CLD_CONTINUED. ++ * ++ * @report_exec: ++ * Requested by %UTRACE_EVENT(%EXEC). ++ * An execve system call has succeeded and the new program is about to ++ * start running. The initial user register state is handy to be tweaked ++ * directly in @regs. @fmt and @bprm gives the details of this exec. ++ * ++ * @report_syscall_entry: ++ * Requested by %UTRACE_EVENT(%SYSCALL_ENTRY). ++ * Thread has entered the kernel to request a system call. ++ * The user register state is handy to be tweaked directly in @regs. ++ * The @action argument contains an &enum utrace_syscall_action, ++ * use utrace_syscall_action() to extract it. The return value ++ * overrides the last engine's action for the system call. ++ * If the final action is %UTRACE_SYSCALL_ABORT, no system call ++ * is made. The details of the system call being attempted can ++ * be fetched here with syscall_get_nr() and syscall_get_arguments(). ++ * The parameter registers can be changed with syscall_set_arguments(). ++ * ++ * @report_syscall_exit: ++ * Requested by %UTRACE_EVENT(%SYSCALL_EXIT). ++ * Thread is about to leave the kernel after a system call request. ++ * The user register state is handy to be tweaked directly in @regs. ++ * The results of the system call attempt can be examined here using ++ * syscall_get_error() and syscall_get_return_value(). It is safe ++ * here to call syscall_set_return_value() or syscall_rollback(). ++ * ++ * @report_exit: ++ * Requested by %UTRACE_EVENT(%EXIT). ++ * Thread is exiting and cannot be prevented from doing so, ++ * but all its state is still live. The @code value will be ++ * the wait result seen by the parent, and can be changed by ++ * this engine or others. The @orig_code value is the real ++ * status, not changed by any tracing engine. Returning %UTRACE_STOP ++ * here keeps @task stopped before it cleans up its state and dies, ++ * so it can be examined by other processes. When @task is allowed ++ * to run, it will die and get to the @report_death callback. ++ * ++ * @report_death: ++ * Requested by %UTRACE_EVENT(%DEATH). ++ * Thread is really dead now. It might be reaped by its parent at ++ * any time, or self-reap immediately. Though the actual reaping ++ * may happen in parallel, a report_reap() callback will always be ++ * ordered after a report_death() callback. ++ * ++ * @report_reap: ++ * Requested by %UTRACE_EVENT(%REAP). ++ * Called when someone reaps the dead task (parent, init, or self). ++ * This means the parent called wait, or else this was a detached ++ * thread or a process whose parent ignores SIGCHLD. ++ * No more callbacks are made after this one. ++ * The engine is always detached. ++ * There is nothing more a tracing engine can do about this thread. ++ * After this callback, the @engine pointer will become invalid. ++ * The @task pointer may become invalid if get_task_struct() hasn't ++ * been used to keep it alive. ++ * An engine should always request this callback if it stores the ++ * @engine pointer or stores any pointer in @engine->data, so it ++ * can clean up its data structures. ++ * Unlike other callbacks, this can be called from the parent's context ++ * rather than from the traced thread itself--it must not delay the ++ * parent by blocking. ++ * ++ * @release: ++ * If not %NULL, this is called after the last utrace_engine_put() ++ * call for a &struct utrace_engine, which could be implicit after ++ * a %UTRACE_DETACH return from another callback. Its argument is ++ * the engine's @data member. ++ */ ++struct utrace_engine_ops { ++ u32 (*report_quiesce)(enum utrace_resume_action action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ unsigned long event); ++ u32 (*report_signal)(u32 action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ struct pt_regs *regs, ++ siginfo_t *info, ++ const struct k_sigaction *orig_ka, ++ struct k_sigaction *return_ka); ++ u32 (*report_clone)(enum utrace_resume_action action, ++ struct utrace_engine *engine, ++ struct task_struct *parent, ++ unsigned long clone_flags, ++ struct task_struct *child); ++ u32 (*report_jctl)(enum utrace_resume_action action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ int type, int notify); ++ u32 (*report_exec)(enum utrace_resume_action action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ const struct linux_binfmt *fmt, ++ const struct linux_binprm *bprm, ++ struct pt_regs *regs); ++ u32 (*report_syscall_entry)(u32 action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ struct pt_regs *regs); ++ u32 (*report_syscall_exit)(enum utrace_resume_action action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ struct pt_regs *regs); ++ u32 (*report_exit)(enum utrace_resume_action action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ long orig_code, long *code); ++ u32 (*report_death)(struct utrace_engine *engine, ++ struct task_struct *task, ++ bool group_dead, int signal); ++ void (*report_reap)(struct utrace_engine *engine, ++ struct task_struct *task); ++ void (*release)(void *data); ++}; ++ ++/** ++ * struct utrace_examiner - private state for using utrace_prepare_examine() ++ * ++ * The members of &struct utrace_examiner are private to the implementation. ++ * This data type holds the state from a call to utrace_prepare_examine() ++ * to be used by a call to utrace_finish_examine(). ++ */ ++struct utrace_examiner { ++/* private: */ ++ long state; ++ unsigned long ncsw; ++}; ++ ++/* ++ * These are the exported entry points for tracing engines to use. ++ * See kernel/utrace.c for their kerneldoc comments with interface details. ++ */ ++struct utrace_engine *utrace_attach_task(struct task_struct *, int, ++ const struct utrace_engine_ops *, ++ void *); ++struct utrace_engine *utrace_attach_pid(struct pid *, int, ++ const struct utrace_engine_ops *, ++ void *); ++int __must_check utrace_control(struct task_struct *, ++ struct utrace_engine *, ++ enum utrace_resume_action); ++int __must_check utrace_set_events(struct task_struct *, ++ struct utrace_engine *, ++ unsigned long eventmask); ++int __must_check utrace_barrier(struct task_struct *, ++ struct utrace_engine *); ++int __must_check utrace_prepare_examine(struct task_struct *, ++ struct utrace_engine *, ++ struct utrace_examiner *); ++int __must_check utrace_finish_examine(struct task_struct *, ++ struct utrace_engine *, ++ struct utrace_examiner *); ++ ++/** ++ * utrace_control_pid - control a thread being traced by a tracing engine ++ * @pid: thread to affect ++ * @engine: attached engine to affect ++ * @action: &enum utrace_resume_action for thread to do ++ * ++ * This is the same as utrace_control(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++static inline __must_check int utrace_control_pid( ++ struct pid *pid, struct utrace_engine *engine, ++ enum utrace_resume_action action) ++{ ++ /* ++ * We don't bother with rcu_read_lock() here to protect the ++ * task_struct pointer, because utrace_control will return ++ * -ESRCH without looking at that pointer if the engine is ++ * already detached. A task_struct pointer can't die before ++ * all the engines are detached in release_task() first. ++ */ ++ struct task_struct *task = pid_task(pid, PIDTYPE_PID); ++ return unlikely(!task) ? -ESRCH : utrace_control(task, engine, action); ++} ++ ++/** ++ * utrace_set_events_pid - choose which event reports a tracing engine gets ++ * @pid: thread to affect ++ * @engine: attached engine to affect ++ * @eventmask: new event mask ++ * ++ * This is the same as utrace_set_events(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++static inline __must_check int utrace_set_events_pid( ++ struct pid *pid, struct utrace_engine *engine, unsigned long eventmask) ++{ ++ struct task_struct *task = pid_task(pid, PIDTYPE_PID); ++ return unlikely(!task) ? -ESRCH : ++ utrace_set_events(task, engine, eventmask); ++} ++ ++/** ++ * utrace_barrier_pid - synchronize with simultaneous tracing callbacks ++ * @pid: thread to affect ++ * @engine: engine to affect (can be detached) ++ * ++ * This is the same as utrace_barrier(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++static inline __must_check int utrace_barrier_pid(struct pid *pid, ++ struct utrace_engine *engine) ++{ ++ struct task_struct *task = pid_task(pid, PIDTYPE_PID); ++ return unlikely(!task) ? -ESRCH : utrace_barrier(task, engine); ++} ++ ++#endif /* CONFIG_UTRACE */ ++ ++#endif /* linux/utrace.h */ +--- /dev/null ++++ b/include/linux/utrace_struct.h +@@ -0,0 +1,59 @@ ++/* ++ * 'struct utrace' data structure for kernel/utrace.c private use. ++ * ++ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU General Public License v.2. ++ */ ++ ++#ifndef _LINUX_UTRACE_STRUCT_H ++#define _LINUX_UTRACE_STRUCT_H 1 ++ ++#ifdef CONFIG_UTRACE ++ ++#include ++#include ++ ++/* ++ * Per-thread structure private to utrace implementation. This properly ++ * belongs in kernel/utrace.c and its use is entirely private to the code ++ * there. It is only defined in a header file so that it can be embedded ++ * in the struct task_struct layout. It is here rather than in utrace.h ++ * to avoid header nesting order issues getting too complex. ++ * ++ */ ++struct utrace { ++ struct task_struct *cloning; ++ ++ struct list_head attached, attaching; ++ spinlock_t lock; ++ ++ struct utrace_engine *reporting; ++ ++ unsigned int stopped:1; ++ unsigned int report:1; ++ unsigned int interrupt:1; ++ unsigned int signal_handler:1; ++ unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */ ++ unsigned int death:1; /* in utrace_report_death() now */ ++ unsigned int reap:1; /* release_task() has run */ ++ unsigned int pending_attach:1; /* need splice_attaching() */ ++}; ++ ++# define INIT_UTRACE(tsk) \ ++ .utrace_flags = 0, \ ++ .utrace = { \ ++ .lock = __SPIN_LOCK_UNLOCKED(tsk.utrace.lock), \ ++ .attached = LIST_HEAD_INIT(tsk.utrace.attached), \ ++ .attaching = LIST_HEAD_INIT(tsk.utrace.attaching), \ ++ }, ++ ++#else ++ ++# define INIT_UTRACE(tsk) /* Nothing. */ ++ ++#endif /* CONFIG_UTRACE */ ++ ++#endif /* linux/utrace_struct.h */ +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1315,6 +1315,15 @@ config STOP_MACHINE + help + Need stop_machine() primitive. + ++menuconfig UTRACE ++ bool "Infrastructure for tracing and debugging user processes" ++ depends on EXPERIMENTAL ++ depends on HAVE_ARCH_TRACEHOOK ++ help ++ Enable the utrace process tracing interface. This is an internal ++ kernel interface exported to kernel modules, to track events in ++ user threads, extract and change user thread state. ++ + source "block/Kconfig" + + config PREEMPT_NOTIFIERS +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -71,6 +71,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o + obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o + obj-$(CONFIG_SMP) += stop_machine.o + obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o ++obj-$(CONFIG_UTRACE) += utrace.o + obj-$(CONFIG_AUDIT) += audit.o auditfilter.o + obj-$(CONFIG_AUDITSYSCALL) += auditsc.o + obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -163,6 +164,14 @@ bool ptrace_may_access(struct task_struc + return !err; + } + ++/* ++ * For experimental use of utrace, exclude ptrace on the same task. ++ */ ++static inline bool exclude_ptrace(struct task_struct *task) ++{ ++ return unlikely(!!task_utrace_flags(task)); ++} ++ + static int ptrace_attach(struct task_struct *task) + { + int retval; +@@ -186,6 +195,8 @@ int ptrace_attach(struct task_struct *ta + + task_lock(task); + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); ++ if (!retval && exclude_ptrace(task)) ++ retval = -EBUSY; + task_unlock(task); + if (retval) + goto unlock_creds; +@@ -223,6 +234,9 @@ int ptrace_traceme(void) + { + int ret = -EPERM; + ++ if (exclude_ptrace(current)) /* XXX locking */ ++ return -EBUSY; ++ + write_lock_irq(&tasklist_lock); + /* Are we already being traced? */ + if (!current->ptrace) { +--- /dev/null ++++ b/kernel/utrace.c +@@ -0,0 +1,2340 @@ ++/* ++ * utrace infrastructure interface for debugging user processes ++ * ++ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU General Public License v.2. ++ * ++ * Red Hat Author: Roland McGrath. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++/* ++ * Rules for 'struct utrace', defined in ++ * but used entirely privately in this file. ++ * ++ * The common event reporting loops are done by the task making the ++ * report without ever taking any locks. To facilitate this, the two ++ * lists @attached and @attaching work together for smooth asynchronous ++ * attaching with low overhead. Modifying either list requires @lock. ++ * The @attaching list can be modified any time while holding @lock. ++ * New engines being attached always go on this list. ++ * ++ * The @attached list is what the task itself uses for its reporting ++ * loops. When the task itself is not quiescent, it can use the ++ * @attached list without taking any lock. Nobody may modify the list ++ * when the task is not quiescent. When it is quiescent, that means ++ * that it won't run again without taking @lock itself before using ++ * the list. ++ * ++ * At each place where we know the task is quiescent (or it's current), ++ * while holding @lock, we call splice_attaching(), below. This moves ++ * the @attaching list members on to the end of the @attached list. ++ * Since this happens at the start of any reporting pass, any new ++ * engines attached asynchronously go on the stable @attached list ++ * in time to have their callbacks seen. ++ */ ++ ++static struct kmem_cache *utrace_engine_cachep; ++static const struct utrace_engine_ops utrace_detached_ops; /* forward decl */ ++ ++static int __init utrace_init(void) ++{ ++ utrace_engine_cachep = KMEM_CACHE(utrace_engine, SLAB_PANIC); ++ return 0; ++} ++module_init(utrace_init); ++ ++/* ++ * This is called with @utrace->lock held when the task is safely ++ * quiescent, i.e. it won't consult utrace->attached without the lock. ++ * Move any engines attached asynchronously from @utrace->attaching ++ * onto the @utrace->attached list. ++ */ ++static void splice_attaching(struct utrace *utrace) ++{ ++ list_splice_tail_init(&utrace->attaching, &utrace->attached); ++ utrace->pending_attach = 0; ++} ++ ++/* ++ * This is the exported function used by the utrace_engine_put() inline. ++ */ ++void __utrace_engine_release(struct kref *kref) ++{ ++ struct utrace_engine *engine = container_of(kref, struct utrace_engine, ++ kref); ++ BUG_ON(!list_empty(&engine->entry)); ++ if (engine->release) ++ (*engine->release)(engine->data); ++ kmem_cache_free(utrace_engine_cachep, engine); ++} ++EXPORT_SYMBOL_GPL(__utrace_engine_release); ++ ++static bool engine_matches(struct utrace_engine *engine, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ if ((flags & UTRACE_ATTACH_MATCH_OPS) && engine->ops != ops) ++ return false; ++ if ((flags & UTRACE_ATTACH_MATCH_DATA) && engine->data != data) ++ return false; ++ return engine->ops && engine->ops != &utrace_detached_ops; ++} ++ ++static struct utrace_engine *matching_engine( ++ struct utrace *utrace, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ struct utrace_engine *engine; ++ list_for_each_entry(engine, &utrace->attached, entry) ++ if (engine_matches(engine, flags, ops, data)) ++ return engine; ++ list_for_each_entry(engine, &utrace->attaching, entry) ++ if (engine_matches(engine, flags, ops, data)) ++ return engine; ++ return NULL; ++} ++ ++/* ++ * Called without locks, when we might be the first utrace engine to attach. ++ * If this is a newborn thread and we are not the creator, we have to wait ++ * for it. The creator gets the first chance to attach. The PF_STARTING ++ * flag is cleared after its report_clone hook has had a chance to run. ++ */ ++static inline int utrace_attach_delay(struct task_struct *target) ++{ ++ if ((target->flags & PF_STARTING) && ++ current->utrace.cloning != target) ++ do { ++ schedule_timeout_interruptible(1); ++ if (signal_pending(current)) ++ return -ERESTARTNOINTR; ++ } while (target->flags & PF_STARTING); ++ ++ return 0; ++} ++ ++/* ++ * Enqueue @engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE. ++ */ ++static int utrace_add_engine(struct task_struct *target, ++ struct utrace *utrace, ++ struct utrace_engine *engine, ++ int flags, ++ const struct utrace_engine_ops *ops, ++ void *data) ++{ ++ int ret; ++ ++ spin_lock(&utrace->lock); ++ ++ ret = -EEXIST; ++ if ((flags & UTRACE_ATTACH_EXCLUSIVE) && ++ unlikely(matching_engine(utrace, flags, ops, data))) ++ goto unlock; ++ ++ /* ++ * In case we had no engines before, make sure that ++ * utrace_flags is not zero. ++ */ ++ ret = -ESRCH; ++ if (!target->utrace_flags) { ++ target->utrace_flags = UTRACE_EVENT(REAP); ++ /* ++ * If we race with tracehook_prepare_release_task() ++ * make sure that either it sees utrace_flags != 0 ++ * or we see exit_state == EXIT_DEAD. ++ */ ++ smp_mb(); ++ if (unlikely(target->exit_state == EXIT_DEAD)) { ++ target->utrace_flags = 0; ++ goto unlock; ++ } ++ } ++ ++ /* ++ * Put the new engine on the pending ->attaching list. ++ * Make sure it gets onto the ->attached list by the next ++ * time it's examined. Setting ->pending_attach ensures ++ * that start_report() takes the lock and splices the lists ++ * before the next new reporting pass. ++ * ++ * When target == current, it would be safe just to call ++ * splice_attaching() right here. But if we're inside a ++ * callback, that would mean the new engine also gets ++ * notified about the event that precipitated its own ++ * creation. This is not what the user wants. ++ */ ++ list_add_tail(&engine->entry, &utrace->attaching); ++ utrace->pending_attach = 1; ++ ret = 0; ++unlock: ++ spin_unlock(&utrace->lock); ++ ++ return ret; ++} ++ ++/** ++ * utrace_attach_task - attach new engine, or look up an attached engine ++ * @target: thread to attach to ++ * @flags: flag bits combined with OR, see below ++ * @ops: callback table for new engine ++ * @data: engine private data pointer ++ * ++ * The caller must ensure that the @target thread does not get freed, ++ * i.e. hold a ref or be its parent. It is always safe to call this ++ * on @current, or on the @child pointer in a @report_clone callback. ++ * For most other cases, it's easier to use utrace_attach_pid() instead. ++ * ++ * UTRACE_ATTACH_CREATE: ++ * Create a new engine. If %UTRACE_ATTACH_CREATE is not specified, you ++ * only look up an existing engine already attached to the thread. ++ * ++ * UTRACE_ATTACH_EXCLUSIVE: ++ * Attempting to attach a second (matching) engine fails with -%EEXIST. ++ * ++ * UTRACE_ATTACH_MATCH_OPS: Only consider engines matching @ops. ++ * UTRACE_ATTACH_MATCH_DATA: Only consider engines matching @data. ++ * ++ * Calls with neither %UTRACE_ATTACH_MATCH_OPS nor %UTRACE_ATTACH_MATCH_DATA ++ * match the first among any engines attached to @target. That means that ++ * %UTRACE_ATTACH_EXCLUSIVE in such a call fails with -%EEXIST if there ++ * are any engines on @target at all. ++ */ ++struct utrace_engine *utrace_attach_task( ++ struct task_struct *target, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ struct utrace *utrace = task_utrace_struct(target); ++ struct utrace_engine *engine; ++ int ret; ++ ++ if (!(flags & UTRACE_ATTACH_CREATE)) { ++ spin_lock(&utrace->lock); ++ engine = matching_engine(utrace, flags, ops, data); ++ if (engine) ++ utrace_engine_get(engine); ++ spin_unlock(&utrace->lock); ++ return engine ?: ERR_PTR(-ENOENT); ++ } ++ ++ if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops)) ++ return ERR_PTR(-EINVAL); ++ ++ if (unlikely(target->flags & PF_KTHREAD)) ++ /* ++ * Silly kernel, utrace is for users! ++ */ ++ return ERR_PTR(-EPERM); ++ ++ engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL); ++ if (unlikely(!engine)) ++ return ERR_PTR(-ENOMEM); ++ ++ /* ++ * Initialize the new engine structure. It starts out with two ++ * refs: one ref to return, and one ref for being attached. ++ */ ++ kref_init(&engine->kref); ++ kref_get(&engine->kref); ++ engine->flags = 0; ++ engine->ops = ops; ++ engine->data = data; ++ engine->release = ops->release; ++ ++ ret = utrace_attach_delay(target); ++ if (likely(!ret)) ++ ret = utrace_add_engine(target, utrace, engine, ++ flags, ops, data); ++ ++ if (unlikely(ret)) { ++ kmem_cache_free(utrace_engine_cachep, engine); ++ engine = ERR_PTR(ret); ++ } ++ ++ return engine; ++} ++EXPORT_SYMBOL_GPL(utrace_attach_task); ++ ++/** ++ * utrace_attach_pid - attach new engine, or look up an attached engine ++ * @pid: &struct pid pointer representing thread to attach to ++ * @flags: flag bits combined with OR, see utrace_attach_task() ++ * @ops: callback table for new engine ++ * @data: engine private data pointer ++ * ++ * This is the same as utrace_attach_task(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++struct utrace_engine *utrace_attach_pid( ++ struct pid *pid, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ struct utrace_engine *engine = ERR_PTR(-ESRCH); ++ struct task_struct *task = get_pid_task(pid, PIDTYPE_PID); ++ if (task) { ++ engine = utrace_attach_task(task, flags, ops, data); ++ put_task_struct(task); ++ } ++ return engine; ++} ++EXPORT_SYMBOL_GPL(utrace_attach_pid); ++ ++/* ++ * When an engine is detached, the target thread may still see it and ++ * make callbacks until it quiesces. We install a special ops vector ++ * with these two callbacks. When the target thread quiesces, it can ++ * safely free the engine itself. For any event we will always get ++ * the report_quiesce() callback first, so we only need this one ++ * pointer to be set. The only exception is report_reap(), so we ++ * supply that callback too. ++ */ ++static u32 utrace_detached_quiesce(enum utrace_resume_action action, ++ struct utrace_engine *engine, ++ struct task_struct *task, ++ unsigned long event) ++{ ++ return UTRACE_DETACH; ++} ++ ++static void utrace_detached_reap(struct utrace_engine *engine, ++ struct task_struct *task) ++{ ++} ++ ++static const struct utrace_engine_ops utrace_detached_ops = { ++ .report_quiesce = &utrace_detached_quiesce, ++ .report_reap = &utrace_detached_reap ++}; ++ ++/* ++ * The caller has to hold a ref on the engine. If the attached flag is ++ * true (all but utrace_barrier() calls), the engine is supposed to be ++ * attached. If the attached flag is false (utrace_barrier() only), ++ * then return -ERESTARTSYS for an engine marked for detach but not yet ++ * fully detached. The task pointer can be invalid if the engine is ++ * detached. ++ * ++ * Get the utrace lock for the target task. ++ * Returns the struct if locked, or ERR_PTR(-errno). ++ * ++ * This has to be robust against races with: ++ * utrace_control(target, UTRACE_DETACH) calls ++ * UTRACE_DETACH after reports ++ * utrace_report_death ++ * utrace_release_task ++ */ ++static struct utrace *get_utrace_lock(struct task_struct *target, ++ struct utrace_engine *engine, ++ bool attached) ++ __acquires(utrace->lock) ++{ ++ struct utrace *utrace; ++ ++ rcu_read_lock(); ++ ++ /* ++ * If this engine was already detached, bail out before we look at ++ * the task_struct pointer at all. If it's detached after this ++ * check, then RCU is still keeping this task_struct pointer valid. ++ * ++ * The ops pointer is NULL when the engine is fully detached. ++ * It's &utrace_detached_ops when it's marked detached but still ++ * on the list. In the latter case, utrace_barrier() still works, ++ * since the target might be in the middle of an old callback. ++ */ ++ if (unlikely(!engine->ops)) { ++ rcu_read_unlock(); ++ return ERR_PTR(-ESRCH); ++ } ++ ++ if (unlikely(engine->ops == &utrace_detached_ops)) { ++ rcu_read_unlock(); ++ return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS); ++ } ++ ++ utrace = &target->utrace; ++ spin_lock(&utrace->lock); ++ if (unlikely(!engine->ops) || ++ unlikely(engine->ops == &utrace_detached_ops)) { ++ /* ++ * By the time we got the utrace lock, ++ * it had been reaped or detached already. ++ */ ++ spin_unlock(&utrace->lock); ++ utrace = ERR_PTR(-ESRCH); ++ if (!attached && engine->ops == &utrace_detached_ops) ++ utrace = ERR_PTR(-ERESTARTSYS); ++ } ++ rcu_read_unlock(); ++ ++ return utrace; ++} ++ ++/* ++ * Now that we don't hold any locks, run through any ++ * detached engines and free their references. Each ++ * engine had one implicit ref while it was attached. ++ */ ++static void put_detached_list(struct list_head *list) ++{ ++ struct utrace_engine *engine, *next; ++ list_for_each_entry_safe(engine, next, list, entry) { ++ list_del_init(&engine->entry); ++ utrace_engine_put(engine); ++ } ++} ++ ++/* ++ * Called with utrace->lock held and utrace->reap set. ++ * Notify and clean up all engines, then free utrace. ++ */ ++static void utrace_reap(struct task_struct *target, struct utrace *utrace) ++ __releases(utrace->lock) ++{ ++ struct utrace_engine *engine, *next; ++ ++ /* utrace_add_engine() checks ->utrace_flags != 0 */ ++ target->utrace_flags = 0; ++ splice_attaching(utrace); ++ ++ /* ++ * Since we were called with @utrace->reap set, nobody can ++ * set/clear UTRACE_EVENT(REAP) in @engine->flags or change ++ * @engine->ops, and nobody can change @utrace->attached. ++ */ ++ spin_unlock(&utrace->lock); ++ ++ list_for_each_entry_safe(engine, next, &utrace->attached, entry) { ++ if (engine->flags & UTRACE_EVENT(REAP)) ++ engine->ops->report_reap(engine, target); ++ ++ engine->ops = NULL; ++ engine->flags = 0; ++ list_del_init(&engine->entry); ++ ++ utrace_engine_put(engine); ++ } ++} ++ ++ ++/* ++ * Called by release_task. After this, target->utrace must be cleared. ++ */ ++void utrace_release_task(struct task_struct *target) ++{ ++ struct utrace *utrace; ++ ++ utrace = &target->utrace; ++ ++ spin_lock(&utrace->lock); ++ ++ utrace->reap = 1; ++ ++ /* ++ * If the target will do some final callbacks but hasn't ++ * finished them yet, we know because it clears these event ++ * bits after it's done. Instead of cleaning up here and ++ * requiring utrace_report_death() to cope with it, we delay ++ * the REAP report and the teardown until after the target ++ * finishes its death reports. ++ */ ++ ++ if (target->utrace_flags & _UTRACE_DEATH_EVENTS) ++ spin_unlock(&utrace->lock); ++ else ++ utrace_reap(target, utrace); /* Unlocks. */ ++} ++ ++/* ++ * We use an extra bit in utrace_engine.flags past the event bits, ++ * to record whether the engine is keeping the target thread stopped. ++ * ++ * This bit is set in task_struct.utrace_flags whenever it is set in any ++ * engine's flags. Only utrace_reset() resets it in utrace_flags. ++ */ ++#define ENGINE_STOP (1UL << _UTRACE_NEVENTS) ++ ++static void mark_engine_wants_stop(struct task_struct *task, ++ struct utrace_engine *engine) ++{ ++ engine->flags |= ENGINE_STOP; ++ task->utrace_flags |= ENGINE_STOP; ++} ++ ++static void clear_engine_wants_stop(struct utrace_engine *engine) ++{ ++ engine->flags &= ~ENGINE_STOP; ++} ++ ++static bool engine_wants_stop(struct utrace_engine *engine) ++{ ++ return (engine->flags & ENGINE_STOP) != 0; ++} ++ ++/** ++ * utrace_set_events - choose which event reports a tracing engine gets ++ * @target: thread to affect ++ * @engine: attached engine to affect ++ * @events: new event mask ++ * ++ * This changes the set of events for which @engine wants callbacks made. ++ * ++ * This fails with -%EALREADY and does nothing if you try to clear ++ * %UTRACE_EVENT(%DEATH) when the @report_death callback may already have ++ * begun, if you try to clear %UTRACE_EVENT(%REAP) when the @report_reap ++ * callback may already have begun, or if you try to newly set ++ * %UTRACE_EVENT(%DEATH) or %UTRACE_EVENT(%QUIESCE) when @target is ++ * already dead or dying. ++ * ++ * This can fail with -%ESRCH when @target has already been detached, ++ * including forcible detach on reaping. ++ * ++ * If @target was stopped before the call, then after a successful call, ++ * no event callbacks not requested in @events will be made; if ++ * %UTRACE_EVENT(%QUIESCE) is included in @events, then a ++ * @report_quiesce callback will be made when @target resumes. ++ * ++ * If @target was not stopped and @events excludes some bits that were ++ * set before, this can return -%EINPROGRESS to indicate that @target ++ * may have been making some callback to @engine. When this returns ++ * zero, you can be sure that no event callbacks you've disabled in ++ * @events can be made. If @events only sets new bits that were not set ++ * before on @engine, then -%EINPROGRESS will never be returned. ++ * ++ * To synchronize after an -%EINPROGRESS return, see utrace_barrier(). ++ * ++ * When @target is @current, -%EINPROGRESS is not returned. But note ++ * that a newly-created engine will not receive any callbacks related to ++ * an event notification already in progress. This call enables @events ++ * callbacks to be made as soon as @engine becomes eligible for any ++ * callbacks, see utrace_attach_task(). ++ * ++ * These rules provide for coherent synchronization based on %UTRACE_STOP, ++ * even when %SIGKILL is breaking its normal simple rules. ++ */ ++int utrace_set_events(struct task_struct *target, ++ struct utrace_engine *engine, ++ unsigned long events) ++{ ++ struct utrace *utrace; ++ unsigned long old_flags, old_utrace_flags, set_utrace_flags; ++ int ret; ++ ++ utrace = get_utrace_lock(target, engine, true); ++ if (unlikely(IS_ERR(utrace))) ++ return PTR_ERR(utrace); ++ ++ old_utrace_flags = target->utrace_flags; ++ set_utrace_flags = events; ++ old_flags = engine->flags & ~ENGINE_STOP; ++ ++ if (target->exit_state && ++ (((events & ~old_flags) & _UTRACE_DEATH_EVENTS) || ++ (utrace->death && ++ ((old_flags & ~events) & _UTRACE_DEATH_EVENTS)) || ++ (utrace->reap && ((old_flags & ~events) & UTRACE_EVENT(REAP))))) { ++ spin_unlock(&utrace->lock); ++ return -EALREADY; ++ } ++ ++ /* ++ * When setting these flags, it's essential that we really ++ * synchronize with exit_notify(). They cannot be set after ++ * exit_notify() takes the tasklist_lock. By holding the read ++ * lock here while setting the flags, we ensure that the calls ++ * to tracehook_notify_death() and tracehook_report_death() will ++ * see the new flags. This ensures that utrace_release_task() ++ * knows positively that utrace_report_death() will be called or ++ * that it won't. ++ */ ++ if ((set_utrace_flags & ~old_utrace_flags) & _UTRACE_DEATH_EVENTS) { ++ read_lock(&tasklist_lock); ++ if (unlikely(target->exit_state)) { ++ read_unlock(&tasklist_lock); ++ spin_unlock(&utrace->lock); ++ return -EALREADY; ++ } ++ target->utrace_flags |= set_utrace_flags; ++ read_unlock(&tasklist_lock); ++ } ++ ++ engine->flags = events | (engine->flags & ENGINE_STOP); ++ target->utrace_flags |= set_utrace_flags; ++ ++ if ((set_utrace_flags & UTRACE_EVENT_SYSCALL) && ++ !(old_utrace_flags & UTRACE_EVENT_SYSCALL)) ++ set_tsk_thread_flag(target, TIF_SYSCALL_TRACE); ++ ++ ret = 0; ++ if ((old_flags & ~events) && ++ !utrace->stopped && target != current && !target->exit_state) { ++ /* ++ * This barrier ensures that our engine->flags changes ++ * have hit before we examine utrace->reporting, ++ * pairing with the barrier in start_callback(). If ++ * @target has not yet hit finish_callback() to clear ++ * utrace->reporting, we might be in the middle of a ++ * callback to @engine. ++ */ ++ smp_mb(); ++ if (utrace->reporting == engine) ++ ret = -EINPROGRESS; ++ } ++ ++ spin_unlock(&utrace->lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_set_events); ++ ++/* ++ * Asynchronously mark an engine as being detached. ++ * ++ * This must work while the target thread races with us doing ++ * start_callback(), defined below. It uses smp_rmb() between checking ++ * @engine->flags and using @engine->ops. Here we change @engine->ops ++ * first, then use smp_wmb() before changing @engine->flags. This ensures ++ * it can check the old flags before using the old ops, or check the old ++ * flags before using the new ops, or check the new flags before using the ++ * new ops, but can never check the new flags before using the old ops. ++ * Hence, utrace_detached_ops might be used with any old flags in place. ++ * It has report_quiesce() and report_reap() callbacks to handle all cases. ++ */ ++static void mark_engine_detached(struct utrace_engine *engine) ++{ ++ engine->ops = &utrace_detached_ops; ++ smp_wmb(); ++ engine->flags = UTRACE_EVENT(QUIESCE); ++} ++ ++/* ++ * Get @target to stop and return true if it is already stopped now. ++ * If we return false, it will make some event callback soonish. ++ * Called with @utrace locked. ++ */ ++static bool utrace_do_stop(struct task_struct *target, struct utrace *utrace) ++{ ++ bool stopped = false; ++ ++ if (task_is_stopped(target)) { ++ /* ++ * Stopped is considered quiescent; when it wakes up, it will ++ * go through utrace_finish_jctl() before doing anything else. ++ */ ++ spin_lock_irq(&target->sighand->siglock); ++ if (likely(task_is_stopped(target))) { ++ __set_task_state(target, TASK_TRACED); ++ utrace->stopped = stopped = true; ++ } ++ spin_unlock_irq(&target->sighand->siglock); ++ } else if (!utrace->report && !utrace->interrupt) { ++ utrace->report = 1; ++ set_notify_resume(target); ++ } ++ ++ return stopped; ++} ++ ++/* ++ * If the target is not dead it should not be in tracing ++ * stop any more. Wake it unless it's in job control stop. ++ * ++ * Called with @utrace->lock held and @utrace->stopped set. ++ */ ++static void utrace_wakeup(struct task_struct *target, struct utrace *utrace) ++{ ++ utrace->stopped = 0; ++ ++ /* The task must be either TASK_TRACED or killed */ ++ spin_lock_irq(&target->sighand->siglock); ++ if (target->signal->flags & SIGNAL_STOP_STOPPED || ++ target->signal->group_stop_count) ++ target->state = TASK_STOPPED; ++ else ++ wake_up_state(target, __TASK_TRACED); ++ spin_unlock_irq(&target->sighand->siglock); ++} ++ ++/* ++ * This is called when there might be some detached engines on the list or ++ * some stale bits in @task->utrace_flags. Clean them up and recompute the ++ * flags. Returns true if we're now fully detached. ++ * ++ * Called with @utrace->lock held, returns with it released. ++ * After this returns, @utrace might be freed if everything detached. ++ */ ++static bool utrace_reset(struct task_struct *task, struct utrace *utrace) ++ __releases(utrace->lock) ++{ ++ struct utrace_engine *engine, *next; ++ unsigned long flags = 0; ++ LIST_HEAD(detached); ++ ++ splice_attaching(utrace); ++ ++ /* ++ * Update the set of events of interest from the union ++ * of the interests of the remaining tracing engines. ++ * For any engine marked detached, remove it from the list. ++ * We'll collect them on the detached list. ++ */ ++ list_for_each_entry_safe(engine, next, &utrace->attached, entry) { ++ if (engine->ops == &utrace_detached_ops) { ++ engine->ops = NULL; ++ list_move(&engine->entry, &detached); ++ } else { ++ flags |= engine->flags | UTRACE_EVENT(REAP); ++ } ++ } ++ ++ if (task->exit_state) { ++ /* ++ * Once it's already dead, we never install any flags ++ * except REAP. When ->exit_state is set and events ++ * like DEATH are not set, then they never can be set. ++ * This ensures that utrace_release_task() knows ++ * positively that utrace_report_death() can never run. ++ */ ++ BUG_ON(utrace->death); ++ flags &= UTRACE_EVENT(REAP); ++ } else if (!(flags & UTRACE_EVENT_SYSCALL) && ++ test_tsk_thread_flag(task, TIF_SYSCALL_TRACE)) { ++ clear_tsk_thread_flag(task, TIF_SYSCALL_TRACE); ++ } ++ ++ if (!flags) ++ /* ++ * No more engines, cleared out the utrace. ++ */ ++ utrace->interrupt = utrace->report = utrace->signal_handler = 0; ++ ++ if (!(flags & ENGINE_STOP) && utrace->stopped) ++ /* ++ * No more engines want it stopped. Wake it up. ++ */ ++ utrace_wakeup(task, utrace); ++ ++ /* ++ * In theory spin_lock() doesn't imply rcu_read_lock(). ++ * Once we clear ->utrace_flags this task_struct can go away ++ * because tracehook_prepare_release_task() path does not take ++ * utrace->lock when ->utrace_flags == 0. ++ */ ++ rcu_read_lock(); ++ task->utrace_flags = flags; ++ spin_unlock(&utrace->lock); ++ rcu_read_unlock(); ++ ++ put_detached_list(&detached); ++ ++ return !flags; ++} ++ ++/* ++ * Perform %UTRACE_STOP, i.e. block in TASK_TRACED until woken up. ++ * @task == current, @utrace == current->utrace, which is not locked. ++ * Return true if we were woken up by SIGKILL even though some utrace ++ * engine may still want us to stay stopped. ++ */ ++static void utrace_stop(struct task_struct *task, struct utrace *utrace, ++ enum utrace_resume_action action) ++{ ++ /* ++ * @utrace->stopped is the flag that says we are safely ++ * inside this function. It should never be set on entry. ++ */ ++ BUG_ON(utrace->stopped); ++relock: ++ spin_lock(&utrace->lock); ++ ++ if (action == UTRACE_INTERRUPT) { ++ /* ++ * Ensure a %UTRACE_SIGNAL_REPORT reporting pass when we're ++ * resumed. The recalc_sigpending() call below will see ++ * this flag and set TIF_SIGPENDING. ++ */ ++ utrace->interrupt = 1; ++ } else if (action < UTRACE_RESUME) { ++ /* ++ * Ensure a reporting pass when we're resumed. ++ */ ++ utrace->report = 1; ++ set_thread_flag(TIF_NOTIFY_RESUME); ++ } ++ ++ /* ++ * If the ENGINE_STOP bit is clear in utrace_flags, that means ++ * utrace_reset() ran after we processed some UTRACE_STOP return ++ * values from callbacks to get here. If all engines have detached ++ * or resumed us, we don't stop. This check doesn't require ++ * siglock, but it should follow the interrupt/report bookkeeping ++ * steps (this can matter for UTRACE_RESUME but not UTRACE_DETACH). ++ */ ++ if (unlikely(!(task->utrace_flags & ENGINE_STOP))) { ++ utrace_reset(task, utrace); ++ if (task->utrace_flags & ENGINE_STOP) ++ goto relock; ++ return; ++ } ++ ++ /* ++ * The siglock protects us against signals. As well as SIGKILL ++ * waking us up, we must synchronize with the signal bookkeeping ++ * for stop signals and SIGCONT. ++ */ ++ spin_lock_irq(&task->sighand->siglock); ++ ++ if (unlikely(__fatal_signal_pending(task))) { ++ spin_unlock_irq(&task->sighand->siglock); ++ spin_unlock(&utrace->lock); ++ return; ++ } ++ ++ utrace->stopped = 1; ++ __set_current_state(TASK_TRACED); ++ ++ /* ++ * If there is a group stop in progress, ++ * we must participate in the bookkeeping. ++ */ ++ if (unlikely(task->signal->group_stop_count) && ++ !--task->signal->group_stop_count) ++ task->signal->flags = SIGNAL_STOP_STOPPED; ++ ++ spin_unlock_irq(&task->sighand->siglock); ++ spin_unlock(&utrace->lock); ++ ++ schedule(); ++ ++ /* ++ * While in TASK_TRACED, we were considered "frozen enough". ++ * Now that we woke up, it's crucial if we're supposed to be ++ * frozen that we freeze now before running anything substantial. ++ */ ++ try_to_freeze(); ++ ++ /* ++ * utrace_wakeup() clears @utrace->stopped before waking us up. ++ * We're officially awake if it's clear. ++ */ ++ if (unlikely(utrace->stopped)) { ++ /* ++ * If we're here with it still set, it must have been ++ * signal_wake_up() instead, waking us up for a SIGKILL. ++ */ ++ WARN_ON(!__fatal_signal_pending(task)); ++ spin_lock(&utrace->lock); ++ utrace->stopped = 0; ++ spin_unlock(&utrace->lock); ++ } ++ ++ /* ++ * While we were in TASK_TRACED, complete_signal() considered ++ * us "uninterested" in signal wakeups. Now make sure our ++ * TIF_SIGPENDING state is correct for normal running. ++ */ ++ spin_lock_irq(&task->sighand->siglock); ++ recalc_sigpending(); ++ spin_unlock_irq(&task->sighand->siglock); ++} ++ ++/* ++ * You can't do anything to a dead task but detach it. ++ * If release_task() has been called, you can't do that. ++ * ++ * On the exit path, DEATH and QUIESCE event bits are set only ++ * before utrace_report_death() has taken the lock. At that point, ++ * the death report will come soon, so disallow detach until it's ++ * done. This prevents us from racing with it detaching itself. ++ * ++ * Called with utrace->lock held, when @target->exit_state is nonzero. ++ */ ++static inline int utrace_control_dead(struct task_struct *target, ++ struct utrace *utrace, ++ enum utrace_resume_action action) ++{ ++ if (action != UTRACE_DETACH || unlikely(utrace->reap)) ++ return -ESRCH; ++ ++ if (unlikely(utrace->death)) ++ /* ++ * We have already started the death report. We can't ++ * prevent the report_death and report_reap callbacks, ++ * so tell the caller they will happen. ++ */ ++ return -EALREADY; ++ ++ return 0; ++} ++ ++/** ++ * utrace_control - control a thread being traced by a tracing engine ++ * @target: thread to affect ++ * @engine: attached engine to affect ++ * @action: &enum utrace_resume_action for thread to do ++ * ++ * This is how a tracing engine asks a traced thread to do something. ++ * This call is controlled by the @action argument, which has the ++ * same meaning as the &enum utrace_resume_action value returned by ++ * event reporting callbacks. ++ * ++ * If @target is already dead (@target->exit_state nonzero), ++ * all actions except %UTRACE_DETACH fail with -%ESRCH. ++ * ++ * The following sections describe each option for the @action argument. ++ * ++ * UTRACE_DETACH: ++ * ++ * After this, the @engine data structure is no longer accessible, ++ * and the thread might be reaped. The thread will start running ++ * again if it was stopped and no longer has any attached engines ++ * that want it stopped. ++ * ++ * If the @report_reap callback may already have begun, this fails ++ * with -%ESRCH. If the @report_death callback may already have ++ * begun, this fails with -%EALREADY. ++ * ++ * If @target is not already stopped, then a callback to this engine ++ * might be in progress or about to start on another CPU. If so, ++ * then this returns -%EINPROGRESS; the detach happens as soon as ++ * the pending callback is finished. To synchronize after an ++ * -%EINPROGRESS return, see utrace_barrier(). ++ * ++ * If @target is properly stopped before utrace_control() is called, ++ * then after successful return it's guaranteed that no more callbacks ++ * to the @engine->ops vector will be made. ++ * ++ * The only exception is %SIGKILL (and exec or group-exit by another ++ * thread in the group), which can cause asynchronous @report_death ++ * and/or @report_reap callbacks even when %UTRACE_STOP was used. ++ * (In that event, this fails with -%ESRCH or -%EALREADY, see above.) ++ * ++ * UTRACE_STOP: ++ * This asks that @target stop running. This returns 0 only if ++ * @target is already stopped, either for tracing or for job ++ * control. Then @target will remain stopped until another ++ * utrace_control() call is made on @engine; @target can be woken ++ * only by %SIGKILL (or equivalent, such as exec or termination by ++ * another thread in the same thread group). ++ * ++ * This returns -%EINPROGRESS if @target is not already stopped. ++ * Then the effect is like %UTRACE_REPORT. A @report_quiesce or ++ * @report_signal callback will be made soon. Your callback can ++ * then return %UTRACE_STOP to keep @target stopped. ++ * ++ * This does not interrupt system calls in progress, including ones ++ * that sleep for a long time. For that, use %UTRACE_INTERRUPT. ++ * To interrupt system calls and then keep @target stopped, your ++ * @report_signal callback can return %UTRACE_STOP. ++ * ++ * UTRACE_RESUME: ++ * ++ * Just let @target continue running normally, reversing the effect ++ * of a previous %UTRACE_STOP. If another engine is keeping @target ++ * stopped, then it remains stopped until all engines let it resume. ++ * If @target was not stopped, this has no effect. ++ * ++ * UTRACE_REPORT: ++ * ++ * This is like %UTRACE_RESUME, but also ensures that there will be ++ * a @report_quiesce or @report_signal callback made soon. If ++ * @target had been stopped, then there will be a callback before it ++ * resumes running normally. If another engine is keeping @target ++ * stopped, then there might be no callbacks until all engines let ++ * it resume. ++ * ++ * Since this is meaningless unless @report_quiesce callbacks will ++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). ++ * ++ * UTRACE_INTERRUPT: ++ * ++ * This is like %UTRACE_REPORT, but ensures that @target will make a ++ * @report_signal callback before it resumes or delivers signals. ++ * If @target was in a system call or about to enter one, work in ++ * progress will be interrupted as if by %SIGSTOP. If another ++ * engine is keeping @target stopped, then there might be no ++ * callbacks until all engines let it resume. ++ * ++ * This gives @engine an opportunity to introduce a forced signal ++ * disposition via its @report_signal callback. ++ * ++ * UTRACE_SINGLESTEP: ++ * ++ * It's invalid to use this unless arch_has_single_step() returned true. ++ * This is like %UTRACE_RESUME, but resumes for one user instruction ++ * only. It's invalid to use this in utrace_control() unless @target ++ * had been stopped by @engine previously. ++ * ++ * Note that passing %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP to ++ * utrace_control() or returning it from an event callback alone does ++ * not necessarily ensure that stepping will be enabled. If there are ++ * more callbacks made to any engine before returning to user mode, ++ * then the resume action is chosen only by the last set of callbacks. ++ * To be sure, enable %UTRACE_EVENT(%QUIESCE) and look for the ++ * @report_quiesce callback with a zero event mask, or the ++ * @report_signal callback with %UTRACE_SIGNAL_REPORT. ++ * ++ * Since this is not robust unless @report_quiesce callbacks will ++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). ++ * ++ * UTRACE_BLOCKSTEP: ++ * ++ * It's invalid to use this unless arch_has_block_step() returned true. ++ * This is like %UTRACE_SINGLESTEP, but resumes for one whole basic ++ * block of user instructions. ++ * ++ * Since this is not robust unless @report_quiesce callbacks will ++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). ++ * ++ * %UTRACE_BLOCKSTEP devolves to %UTRACE_SINGLESTEP when another ++ * tracing engine is using %UTRACE_SINGLESTEP at the same time. ++ */ ++int utrace_control(struct task_struct *target, ++ struct utrace_engine *engine, ++ enum utrace_resume_action action) ++{ ++ struct utrace *utrace; ++ bool reset; ++ int ret; ++ ++ if (unlikely(action > UTRACE_DETACH)) ++ return -EINVAL; ++ ++ /* ++ * This is a sanity check for a programming error in the caller. ++ * Their request can only work properly in all cases by relying on ++ * a follow-up callback, but they didn't set one up! This check ++ * doesn't do locking, but it shouldn't matter. The caller has to ++ * be synchronously sure the callback is set up to be operating the ++ * interface properly. ++ */ ++ if (action >= UTRACE_REPORT && action < UTRACE_RESUME && ++ unlikely(!(engine->flags & UTRACE_EVENT(QUIESCE)))) ++ return -EINVAL; ++ ++ utrace = get_utrace_lock(target, engine, true); ++ if (unlikely(IS_ERR(utrace))) ++ return PTR_ERR(utrace); ++ ++ reset = utrace->stopped; ++ ret = 0; ++ ++ /* ++ * ->exit_state can change under us, this doesn't matter. ++ * We do not care about ->exit_state in fact, but we do ++ * care about ->reap and ->death. If either flag is set, ++ * we must also see ->exit_state != 0. ++ */ ++ if (unlikely(target->exit_state)) { ++ ret = utrace_control_dead(target, utrace, action); ++ if (ret) { ++ spin_unlock(&utrace->lock); ++ return ret; ++ } ++ reset = true; ++ } ++ ++ switch (action) { ++ case UTRACE_STOP: ++ mark_engine_wants_stop(target, engine); ++ if (!reset && !utrace_do_stop(target, utrace)) ++ ret = -EINPROGRESS; ++ reset = false; ++ break; ++ ++ case UTRACE_DETACH: ++ if (engine_wants_stop(engine)) ++ target->utrace_flags &= ~ENGINE_STOP; ++ mark_engine_detached(engine); ++ reset = reset || utrace_do_stop(target, utrace); ++ if (!reset) { ++ /* ++ * As in utrace_set_events(), this barrier ensures ++ * that our engine->flags changes have hit before we ++ * examine utrace->reporting, pairing with the barrier ++ * in start_callback(). If @target has not yet hit ++ * finish_callback() to clear utrace->reporting, we ++ * might be in the middle of a callback to @engine. ++ */ ++ smp_mb(); ++ if (utrace->reporting == engine) ++ ret = -EINPROGRESS; ++ } ++ break; ++ ++ case UTRACE_RESUME: ++ /* ++ * This and all other cases imply resuming if stopped. ++ * There might not be another report before it just ++ * resumes, so make sure single-step is not left set. ++ */ ++ clear_engine_wants_stop(engine); ++ if (likely(reset)) ++ user_disable_single_step(target); ++ break; ++ ++ case UTRACE_REPORT: ++ /* ++ * Make the thread call tracehook_notify_resume() soon. ++ * But don't bother if it's already been interrupted. ++ * In that case, utrace_get_signal() will be reporting soon. ++ */ ++ clear_engine_wants_stop(engine); ++ if (!utrace->report && !utrace->interrupt) { ++ utrace->report = 1; ++ set_notify_resume(target); ++ } ++ break; ++ ++ case UTRACE_INTERRUPT: ++ /* ++ * Make the thread call tracehook_get_signal() soon. ++ */ ++ clear_engine_wants_stop(engine); ++ if (utrace->interrupt) ++ break; ++ utrace->interrupt = 1; ++ ++ /* ++ * If it's not already stopped, interrupt it now. ++ * We need the siglock here in case it calls ++ * recalc_sigpending() and clears its own ++ * TIF_SIGPENDING. By taking the lock, we've ++ * serialized any later recalc_sigpending() after ++ * our setting of utrace->interrupt to force it on. ++ */ ++ if (reset) { ++ /* ++ * This is really just to keep the invariant ++ * that TIF_SIGPENDING is set with utrace->interrupt. ++ * When it's stopped, we know it's always going ++ * through utrace_get_signal and will recalculate. ++ */ ++ set_tsk_thread_flag(target, TIF_SIGPENDING); ++ } else { ++ struct sighand_struct *sighand; ++ unsigned long irqflags; ++ sighand = lock_task_sighand(target, &irqflags); ++ if (likely(sighand)) { ++ signal_wake_up(target, 0); ++ unlock_task_sighand(target, &irqflags); ++ } ++ } ++ break; ++ ++ case UTRACE_BLOCKSTEP: ++ /* ++ * Resume from stopped, step one block. ++ */ ++ clear_engine_wants_stop(engine); ++ if (unlikely(!arch_has_block_step())) { ++ WARN_ON(1); ++ /* Fall through to treat it as SINGLESTEP. */ ++ } else if (likely(reset)) { ++ user_enable_block_step(target); ++ break; ++ } ++ ++ case UTRACE_SINGLESTEP: ++ /* ++ * Resume from stopped, step one instruction. ++ */ ++ clear_engine_wants_stop(engine); ++ if (unlikely(!arch_has_single_step())) { ++ WARN_ON(1); ++ reset = false; ++ ret = -EOPNOTSUPP; ++ break; ++ } ++ ++ if (likely(reset)) ++ user_enable_single_step(target); ++ else ++ /* ++ * You were supposed to stop it before asking ++ * it to step. ++ */ ++ ret = -EAGAIN; ++ break; ++ } ++ ++ /* ++ * Let the thread resume running. If it's not stopped now, ++ * there is nothing more we need to do. ++ */ ++ if (reset) ++ utrace_reset(target, utrace); ++ else ++ spin_unlock(&utrace->lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_control); ++ ++/** ++ * utrace_barrier - synchronize with simultaneous tracing callbacks ++ * @target: thread to affect ++ * @engine: engine to affect (can be detached) ++ * ++ * This blocks while @target might be in the midst of making a callback to ++ * @engine. It can be interrupted by signals and will return -%ERESTARTSYS. ++ * A return value of zero means no callback from @target to @engine was ++ * in progress. Any effect of its return value (such as %UTRACE_STOP) has ++ * already been applied to @engine. ++ * ++ * It's not necessary to keep the @target pointer alive for this call. ++ * It's only necessary to hold a ref on @engine. This will return ++ * safely even if @target has been reaped and has no task refs. ++ * ++ * A successful return from utrace_barrier() guarantees its ordering ++ * with respect to utrace_set_events() and utrace_control() calls. If ++ * @target was not properly stopped, event callbacks just disabled might ++ * still be in progress; utrace_barrier() waits until there is no chance ++ * an unwanted callback can be in progress. ++ */ ++int utrace_barrier(struct task_struct *target, struct utrace_engine *engine) ++{ ++ struct utrace *utrace; ++ int ret = -ERESTARTSYS; ++ ++ if (unlikely(target == current)) ++ return 0; ++ ++ do { ++ utrace = get_utrace_lock(target, engine, false); ++ if (unlikely(IS_ERR(utrace))) { ++ ret = PTR_ERR(utrace); ++ if (ret != -ERESTARTSYS) ++ break; ++ } else { ++ /* ++ * All engine state changes are done while ++ * holding the lock, i.e. before we get here. ++ * Since we have the lock, we only need to ++ * worry about @target making a callback. ++ * When it has entered start_callback() but ++ * not yet gotten to finish_callback(), we ++ * will see utrace->reporting == @engine. ++ * When @target doesn't take the lock, it uses ++ * barriers to order setting utrace->reporting ++ * before it examines the engine state. ++ */ ++ if (utrace->reporting != engine) ++ ret = 0; ++ spin_unlock(&utrace->lock); ++ if (!ret) ++ break; ++ } ++ schedule_timeout_interruptible(1); ++ } while (!signal_pending(current)); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_barrier); ++ ++/* ++ * This is local state used for reporting loops, perhaps optimized away. ++ */ ++struct utrace_report { ++ u32 result; ++ enum utrace_resume_action action; ++ enum utrace_resume_action resume_action; ++ bool detaches; ++ bool takers; ++}; ++ ++#define INIT_REPORT(var) \ ++ struct utrace_report var = { \ ++ .action = UTRACE_RESUME, \ ++ .resume_action = UTRACE_RESUME \ ++ } ++ ++/* ++ * We are now making the report, so clear the flag saying we need one. ++ * When there is a new attach, ->pending_attach is set just so we will ++ * know to do splice_attaching() here before the callback loop. ++ */ ++static void start_report(struct utrace *utrace) ++{ ++ BUG_ON(utrace->stopped); ++ if (utrace->report || utrace->pending_attach) { ++ spin_lock(&utrace->lock); ++ splice_attaching(utrace); ++ utrace->report = 0; ++ spin_unlock(&utrace->lock); ++ } ++} ++ ++static inline void finish_report_reset(struct task_struct *task, ++ struct utrace *utrace, ++ struct utrace_report *report) ++{ ++ if (unlikely(!report->takers || report->detaches)) { ++ spin_lock(&utrace->lock); ++ if (utrace_reset(task, utrace)) ++ report->action = UTRACE_RESUME; ++ } ++} ++ ++/* ++ * Complete a normal reporting pass, pairing with a start_report() call. ++ * This handles any UTRACE_DETACH or UTRACE_REPORT or UTRACE_INTERRUPT ++ * returns from engine callbacks. If any engine's last callback used ++ * UTRACE_STOP, we do UTRACE_REPORT here to ensure we stop before user ++ * mode. If there were no callbacks made, it will recompute ++ * @task->utrace_flags to avoid another false-positive. ++ */ ++static void finish_report(struct utrace_report *report, ++ struct task_struct *task, struct utrace *utrace) ++{ ++ if (report->action <= UTRACE_REPORT && !utrace->interrupt && ++ (report->action == UTRACE_INTERRUPT || !utrace->report)) { ++ spin_lock(&utrace->lock); ++ if (report->action == UTRACE_INTERRUPT) { ++ utrace->interrupt = 1; ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++ } else { ++ utrace->report = 1; ++ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME); ++ } ++ spin_unlock(&utrace->lock); ++ } ++ ++ finish_report_reset(task, utrace, report); ++} ++ ++static inline void finish_callback_report(struct task_struct *task, ++ struct utrace *utrace, ++ struct utrace_report *report, ++ struct utrace_engine *engine, ++ enum utrace_resume_action action) ++{ ++ /* ++ * If utrace_control() was used, treat that like UTRACE_DETACH here. ++ */ ++ if (action == UTRACE_DETACH || engine->ops == &utrace_detached_ops) { ++ engine->ops = &utrace_detached_ops; ++ report->detaches = true; ++ return; ++ } ++ ++ if (action < report->action) ++ report->action = action; ++ ++ if (action != UTRACE_STOP) { ++ if (action < report->resume_action) ++ report->resume_action = action; ++ ++ if (engine_wants_stop(engine)) { ++ spin_lock(&utrace->lock); ++ clear_engine_wants_stop(engine); ++ spin_unlock(&utrace->lock); ++ } ++ ++ return; ++ } ++ ++ if (!engine_wants_stop(engine)) { ++ spin_lock(&utrace->lock); ++ /* ++ * If utrace_control() came in and detached us ++ * before we got the lock, we must not stop now. ++ */ ++ if (unlikely(engine->ops == &utrace_detached_ops)) ++ report->detaches = true; ++ else ++ mark_engine_wants_stop(task, engine); ++ spin_unlock(&utrace->lock); ++ } ++} ++ ++/* ++ * Apply the return value of one engine callback to @report. ++ * Returns true if @engine detached and should not get any more callbacks. ++ */ ++static bool finish_callback(struct task_struct *task, struct utrace *utrace, ++ struct utrace_report *report, ++ struct utrace_engine *engine, ++ u32 ret) ++{ ++ report->result = ret & ~UTRACE_RESUME_MASK; ++ finish_callback_report(task, utrace, report, engine, ++ utrace_resume_action(ret)); ++ ++ /* ++ * Now that we have applied the effect of the return value, ++ * clear this so that utrace_barrier() can stop waiting. ++ * A subsequent utrace_control() can stop or resume @engine ++ * and know this was ordered after its callback's action. ++ * ++ * We don't need any barriers here because utrace_barrier() ++ * takes utrace->lock. If we touched engine->flags above, ++ * the lock guaranteed this change was before utrace_barrier() ++ * examined utrace->reporting. ++ */ ++ utrace->reporting = NULL; ++ ++ /* ++ * This is a good place to make sure tracing engines don't ++ * introduce too much latency under voluntary preemption. ++ */ ++ if (need_resched()) ++ cond_resched(); ++ ++ return engine->ops == &utrace_detached_ops; ++} ++ ++/* ++ * Start the callbacks for @engine to consider @event (a bit mask). ++ * This makes the report_quiesce() callback first. If @engine wants ++ * a specific callback for @event, we return the ops vector to use. ++ * If not, we return NULL. The return value from the ops->callback ++ * function called should be passed to finish_callback(). ++ */ ++static const struct utrace_engine_ops *start_callback( ++ struct utrace *utrace, struct utrace_report *report, ++ struct utrace_engine *engine, struct task_struct *task, ++ unsigned long event) ++{ ++ const struct utrace_engine_ops *ops; ++ unsigned long want; ++ ++ /* ++ * This barrier ensures that we've set utrace->reporting before ++ * we examine engine->flags or engine->ops. utrace_barrier() ++ * relies on this ordering to indicate that the effect of any ++ * utrace_control() and utrace_set_events() calls is in place ++ * by the time utrace->reporting can be seen to be NULL. ++ */ ++ utrace->reporting = engine; ++ smp_mb(); ++ ++ /* ++ * This pairs with the barrier in mark_engine_detached(). ++ * It makes sure that we never see the old ops vector with ++ * the new flags, in case the original vector had no report_quiesce. ++ */ ++ want = engine->flags; ++ smp_rmb(); ++ ops = engine->ops; ++ ++ if (want & UTRACE_EVENT(QUIESCE)) { ++ if (finish_callback(task, utrace, report, engine, ++ (*ops->report_quiesce)(report->action, ++ engine, task, ++ event))) ++ return NULL; ++ ++ /* ++ * finish_callback() reset utrace->reporting after the ++ * quiesce callback. Now we set it again (as above) ++ * before re-examining engine->flags, which could have ++ * been changed synchronously by ->report_quiesce or ++ * asynchronously by utrace_control() or utrace_set_events(). ++ */ ++ utrace->reporting = engine; ++ smp_mb(); ++ want = engine->flags; ++ } ++ ++ if (want & ENGINE_STOP) ++ report->action = UTRACE_STOP; ++ ++ if (want & event) { ++ report->takers = true; ++ return ops; ++ } ++ ++ utrace->reporting = NULL; ++ return NULL; ++} ++ ++/* ++ * Do a normal reporting pass for engines interested in @event. ++ * @callback is the name of the member in the ops vector, and remaining ++ * args are the extras it takes after the standard three args. ++ */ ++#define REPORT(task, utrace, report, event, callback, ...) \ ++ do { \ ++ start_report(utrace); \ ++ REPORT_CALLBACKS(, task, utrace, report, event, callback, \ ++ (report)->action, engine, current, \ ++ ## __VA_ARGS__); \ ++ finish_report(report, task, utrace); \ ++ } while (0) ++#define REPORT_CALLBACKS(rev, task, utrace, report, event, callback, ...) \ ++ do { \ ++ struct utrace_engine *engine; \ ++ const struct utrace_engine_ops *ops; \ ++ list_for_each_entry##rev(engine, &utrace->attached, entry) { \ ++ ops = start_callback(utrace, report, engine, task, \ ++ event); \ ++ if (!ops) \ ++ continue; \ ++ finish_callback(task, utrace, report, engine, \ ++ (*ops->callback)(__VA_ARGS__)); \ ++ } \ ++ } while (0) ++ ++/* ++ * Called iff UTRACE_EVENT(EXEC) flag is set. ++ */ ++void utrace_report_exec(struct linux_binfmt *fmt, struct linux_binprm *bprm, ++ struct pt_regs *regs) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(EXEC), ++ report_exec, fmt, bprm, regs); ++} ++ ++/* ++ * Called iff UTRACE_EVENT(SYSCALL_ENTRY) flag is set. ++ * Return true to prevent the system call. ++ */ ++bool utrace_report_syscall_entry(struct pt_regs *regs) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ start_report(utrace); ++ REPORT_CALLBACKS(_reverse, task, utrace, &report, ++ UTRACE_EVENT(SYSCALL_ENTRY), report_syscall_entry, ++ report.result | report.action, engine, current, regs); ++ finish_report(&report, task, utrace); ++ ++ if (report.action == UTRACE_STOP) { ++ utrace_stop(task, utrace, report.resume_action); ++ if (fatal_signal_pending(task)) ++ /* ++ * We are continuing despite UTRACE_STOP because of a ++ * SIGKILL. Don't let the system call actually proceed. ++ */ ++ return true; ++ } ++ ++ return report.result == UTRACE_SYSCALL_ABORT; ++} ++ ++/* ++ * Called iff UTRACE_EVENT(SYSCALL_EXIT) flag is set. ++ */ ++void utrace_report_syscall_exit(struct pt_regs *regs) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT), ++ report_syscall_exit, regs); ++} ++ ++/* ++ * Called iff UTRACE_EVENT(CLONE) flag is set. ++ * This notification call blocks the wake_up_new_task call on the child. ++ * So we must not quiesce here. tracehook_report_clone_complete will do ++ * a quiescence check momentarily. ++ */ ++void utrace_report_clone(unsigned long clone_flags, struct task_struct *child) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ /* ++ * We don't use the REPORT() macro here, because we need ++ * to clear utrace->cloning before finish_report(). ++ * After finish_report(), utrace can be a stale pointer ++ * in cases when report.action is still UTRACE_RESUME. ++ */ ++ start_report(utrace); ++ utrace->cloning = child; ++ ++ REPORT_CALLBACKS(, task, utrace, &report, ++ UTRACE_EVENT(CLONE), report_clone, ++ report.action, engine, task, clone_flags, child); ++ ++ utrace->cloning = NULL; ++ finish_report(&report, task, utrace); ++ ++ /* ++ * For a vfork, we will go into an uninterruptible block waiting ++ * for the child. We need UTRACE_STOP to happen before this, not ++ * after. For CLONE_VFORK, utrace_finish_vfork() will be called. ++ */ ++ if (report.action == UTRACE_STOP && (clone_flags & CLONE_VFORK)) { ++ spin_lock(&utrace->lock); ++ utrace->vfork_stop = 1; ++ spin_unlock(&utrace->lock); ++ } ++} ++ ++/* ++ * We're called after utrace_report_clone() for a CLONE_VFORK. ++ * If UTRACE_STOP was left from the clone report, we stop here. ++ * After this, we'll enter the uninterruptible wait_for_completion() ++ * waiting for the child. ++ */ ++void utrace_finish_vfork(struct task_struct *task) ++{ ++ struct utrace *utrace = task_utrace_struct(task); ++ ++ if (utrace->vfork_stop) { ++ spin_lock(&utrace->lock); ++ utrace->vfork_stop = 0; ++ spin_unlock(&utrace->lock); ++ utrace_stop(task, utrace, UTRACE_RESUME); /* XXX */ ++ } ++} ++ ++/* ++ * Called iff UTRACE_EVENT(JCTL) flag is set. ++ * ++ * Called with siglock held. ++ */ ++void utrace_report_jctl(int notify, int what) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ spin_unlock_irq(&task->sighand->siglock); ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(JCTL), ++ report_jctl, what, notify); ++ ++ spin_lock_irq(&task->sighand->siglock); ++} ++ ++/* ++ * Called without locks. ++ */ ++void utrace_finish_jctl(void) ++{ ++ struct utrace *utrace = task_utrace_struct(current); ++ /* ++ * While in TASK_STOPPED, we can be considered safely stopped by ++ * utrace_do_stop(). Clear ->stopped if we were woken by SIGKILL. ++ */ ++ if (utrace->stopped) { ++ spin_lock(&utrace->lock); ++ utrace->stopped = false; ++ spin_unlock(&utrace->lock); ++ } ++} ++ ++/* ++ * Called iff UTRACE_EVENT(EXIT) flag is set. ++ */ ++void utrace_report_exit(long *exit_code) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ long orig_code = *exit_code; ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(EXIT), ++ report_exit, orig_code, exit_code); ++ ++ if (report.action == UTRACE_STOP) ++ utrace_stop(task, utrace, report.resume_action); ++} ++ ++/* ++ * Called iff UTRACE_EVENT(DEATH) or UTRACE_EVENT(QUIESCE) flag is set. ++ * ++ * It is always possible that we are racing with utrace_release_task here. ++ * For this reason, utrace_release_task checks for the event bits that get ++ * us here, and delays its cleanup for us to do. ++ */ ++void utrace_report_death(struct task_struct *task, struct utrace *utrace, ++ bool group_dead, int signal) ++{ ++ INIT_REPORT(report); ++ ++ BUG_ON(!task->exit_state); ++ ++ /* ++ * We are presently considered "quiescent"--which is accurate ++ * inasmuch as we won't run any more user instructions ever again. ++ * But for utrace_control and utrace_set_events to be robust, they ++ * must be sure whether or not we will run any more callbacks. If ++ * a call comes in before we do, taking the lock here synchronizes ++ * us so we don't run any callbacks just disabled. Calls that come ++ * in while we're running the callbacks will see the exit.death ++ * flag and know that we are not yet fully quiescent for purposes ++ * of detach bookkeeping. ++ */ ++ spin_lock(&utrace->lock); ++ BUG_ON(utrace->death); ++ utrace->death = 1; ++ utrace->report = 0; ++ utrace->interrupt = 0; ++ splice_attaching(utrace); ++ spin_unlock(&utrace->lock); ++ ++ REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH), ++ report_death, engine, task, group_dead, signal); ++ ++ spin_lock(&utrace->lock); ++ ++ /* ++ * After we unlock (possibly inside utrace_reap for callbacks) with ++ * this flag clear, competing utrace_control/utrace_set_events calls ++ * know that we've finished our callbacks and any detach bookkeeping. ++ */ ++ utrace->death = 0; ++ ++ if (utrace->reap) ++ /* ++ * utrace_release_task() was already called in parallel. ++ * We must complete its work now. ++ */ ++ utrace_reap(task, utrace); ++ else ++ utrace_reset(task, utrace); ++} ++ ++/* ++ * Finish the last reporting pass before returning to user mode. ++ */ ++static void finish_resume_report(struct utrace_report *report, ++ struct task_struct *task, ++ struct utrace *utrace) ++{ ++ finish_report_reset(task, utrace, report); ++ ++ switch (report->action) { ++ case UTRACE_STOP: ++ utrace_stop(task, utrace, report->resume_action); ++ break; ++ ++ case UTRACE_INTERRUPT: ++ if (!signal_pending(task)) ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++ break; ++ ++ case UTRACE_BLOCKSTEP: ++ if (likely(arch_has_block_step())) { ++ user_enable_block_step(task); ++ break; ++ } ++ ++ /* ++ * This means some callback is to blame for failing ++ * to check arch_has_block_step() itself. Warn and ++ * then fall through to treat it as SINGLESTEP. ++ */ ++ WARN_ON(1); ++ ++ case UTRACE_SINGLESTEP: ++ if (likely(arch_has_single_step())) ++ user_enable_single_step(task); ++ else ++ /* ++ * This means some callback is to blame for failing ++ * to check arch_has_single_step() itself. Spew ++ * about it so the loser will fix his module. ++ */ ++ WARN_ON(1); ++ break; ++ ++ case UTRACE_REPORT: ++ case UTRACE_RESUME: ++ default: ++ user_disable_single_step(task); ++ break; ++ } ++} ++ ++/* ++ * This is called when TIF_NOTIFY_RESUME had been set (and is now clear). ++ * We are close to user mode, and this is the place to report or stop. ++ * When we return, we're going to user mode or into the signals code. ++ */ ++void utrace_resume(struct task_struct *task, struct pt_regs *regs) ++{ ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ struct utrace_engine *engine; ++ ++ /* ++ * Some machines get here with interrupts disabled. The same arch ++ * code path leads to calling into get_signal_to_deliver(), which ++ * implicitly reenables them by virtue of spin_unlock_irq. ++ */ ++ local_irq_enable(); ++ ++ /* ++ * If this flag is still set it's because there was a signal ++ * handler setup done but no report_signal following it. Clear ++ * the flag before we get to user so it doesn't confuse us later. ++ */ ++ if (unlikely(utrace->signal_handler)) { ++ int skip; ++ spin_lock(&utrace->lock); ++ utrace->signal_handler = 0; ++ skip = !utrace->report; ++ spin_unlock(&utrace->lock); ++ if (skip) ++ return; ++ } ++ ++ /* ++ * If UTRACE_INTERRUPT was just used, we don't bother with a report ++ * here. We will report and stop in utrace_get_signal(). In case ++ * of a race with utrace_control(), make sure we don't momentarily ++ * return to user mode because TIF_SIGPENDING was not set yet. ++ */ ++ if (unlikely(utrace->interrupt)) { ++ set_thread_flag(TIF_SIGPENDING); ++ return; ++ } ++ ++ /* ++ * Update our bookkeeping even if there are no callbacks made here. ++ */ ++ start_report(utrace); ++ ++ if (likely(task->utrace_flags & UTRACE_EVENT(QUIESCE))) { ++ /* ++ * Do a simple reporting pass, with no specific ++ * callback after report_quiesce. ++ */ ++ list_for_each_entry(engine, &utrace->attached, entry) ++ start_callback(utrace, &report, engine, task, 0); ++ } ++ ++ /* ++ * Finish the report and either stop or get ready to resume. ++ */ ++ finish_resume_report(&report, task, utrace); ++} ++ ++/* ++ * Return true if current has forced signal_pending(). ++ * ++ * This is called only when current->utrace_flags is nonzero, so we know ++ * that current->utrace must be set. It's not inlined in tracehook.h ++ * just so that struct utrace can stay opaque outside this file. ++ */ ++bool utrace_interrupt_pending(void) ++{ ++ return task_utrace_struct(current)->interrupt; ++} ++ ++/* ++ * Take the siglock and push @info back on our queue. ++ * Returns with @task->sighand->siglock held. ++ */ ++static void push_back_signal(struct task_struct *task, siginfo_t *info) ++ __acquires(task->sighand->siglock) ++{ ++ struct sigqueue *q; ++ ++ if (unlikely(!info->si_signo)) { /* Oh, a wise guy! */ ++ spin_lock_irq(&task->sighand->siglock); ++ return; ++ } ++ ++ q = sigqueue_alloc(); ++ if (likely(q)) { ++ q->flags = 0; ++ copy_siginfo(&q->info, info); ++ } ++ ++ spin_lock_irq(&task->sighand->siglock); ++ ++ sigaddset(&task->pending.signal, info->si_signo); ++ if (likely(q)) ++ list_add(&q->list, &task->pending.list); ++ ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++} ++ ++/* ++ * This is the hook from the signals code, called with the siglock held. ++ * Here is the ideal place to stop. We also dequeue and intercept signals. ++ */ ++int utrace_get_signal(struct task_struct *task, struct pt_regs *regs, ++ siginfo_t *info, struct k_sigaction *return_ka) ++ __releases(task->sighand->siglock) ++ __acquires(task->sighand->siglock) ++{ ++ struct utrace *utrace; ++ struct k_sigaction *ka; ++ INIT_REPORT(report); ++ struct utrace_engine *engine; ++ const struct utrace_engine_ops *ops; ++ unsigned long event, want; ++ u32 ret; ++ int signr; ++ ++ utrace = &task->utrace; ++ if (utrace->interrupt || utrace->report || utrace->signal_handler) { ++ /* ++ * We've been asked for an explicit report before we ++ * even check for pending signals. ++ */ ++ ++ spin_unlock_irq(&task->sighand->siglock); ++ ++ spin_lock(&utrace->lock); ++ ++ splice_attaching(utrace); ++ ++ if (unlikely(!utrace->interrupt) && unlikely(!utrace->report)) ++ report.result = UTRACE_SIGNAL_IGN; ++ else if (utrace->signal_handler) ++ report.result = UTRACE_SIGNAL_HANDLER; ++ else ++ report.result = UTRACE_SIGNAL_REPORT; ++ ++ /* ++ * We are now making the report and it's on the ++ * interrupt path, so clear the flags asking for those. ++ */ ++ utrace->interrupt = utrace->report = utrace->signal_handler = 0; ++ /* ++ * Make sure signal_pending() only returns true ++ * if there are real signals pending. ++ */ ++ if (signal_pending(task)) { ++ spin_lock_irq(&task->sighand->siglock); ++ recalc_sigpending(); ++ spin_unlock_irq(&task->sighand->siglock); ++ } ++ ++ spin_unlock(&utrace->lock); ++ ++ if (!(task->utrace_flags & UTRACE_EVENT(QUIESCE)) || ++ unlikely(report.result == UTRACE_SIGNAL_IGN)) ++ /* ++ * We only got here to clear utrace->signal_handler. ++ */ ++ return -1; ++ ++ /* ++ * Do a reporting pass for no signal, just for EVENT(QUIESCE). ++ * The engine callbacks can fill in *info and *return_ka. ++ * We'll pass NULL for the @orig_ka argument to indicate ++ * that there was no original signal. ++ */ ++ event = 0; ++ ka = NULL; ++ memset(return_ka, 0, sizeof *return_ka); ++ } else if (!(task->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) || ++ unlikely(task->signal->group_stop_count)) { ++ /* ++ * If no engine is interested in intercepting signals or ++ * we must stop, let the caller just dequeue them normally ++ * or participate in group-stop. ++ */ ++ return 0; ++ } else { ++ /* ++ * Steal the next signal so we can let tracing engines ++ * examine it. From the signal number and sigaction, ++ * determine what normal delivery would do. If no ++ * engine perturbs it, we'll do that by returning the ++ * signal number after setting *return_ka. ++ */ ++ signr = dequeue_signal(task, &task->blocked, info); ++ if (signr == 0) ++ return signr; ++ BUG_ON(signr != info->si_signo); ++ ++ ka = &task->sighand->action[signr - 1]; ++ *return_ka = *ka; ++ ++ /* ++ * We are never allowed to interfere with SIGKILL. ++ * Just punt after filling in *return_ka for our caller. ++ */ ++ if (signr == SIGKILL) ++ return signr; ++ ++ if (ka->sa.sa_handler == SIG_IGN) { ++ event = UTRACE_EVENT(SIGNAL_IGN); ++ report.result = UTRACE_SIGNAL_IGN; ++ } else if (ka->sa.sa_handler != SIG_DFL) { ++ event = UTRACE_EVENT(SIGNAL); ++ report.result = UTRACE_SIGNAL_DELIVER; ++ } else if (sig_kernel_coredump(signr)) { ++ event = UTRACE_EVENT(SIGNAL_CORE); ++ report.result = UTRACE_SIGNAL_CORE; ++ } else if (sig_kernel_ignore(signr)) { ++ event = UTRACE_EVENT(SIGNAL_IGN); ++ report.result = UTRACE_SIGNAL_IGN; ++ } else if (signr == SIGSTOP) { ++ event = UTRACE_EVENT(SIGNAL_STOP); ++ report.result = UTRACE_SIGNAL_STOP; ++ } else if (sig_kernel_stop(signr)) { ++ event = UTRACE_EVENT(SIGNAL_STOP); ++ report.result = UTRACE_SIGNAL_TSTP; ++ } else { ++ event = UTRACE_EVENT(SIGNAL_TERM); ++ report.result = UTRACE_SIGNAL_TERM; ++ } ++ ++ /* ++ * Now that we know what event type this signal is, we ++ * can short-circuit if no engines care about those. ++ */ ++ if ((task->utrace_flags & (event | UTRACE_EVENT(QUIESCE))) == 0) ++ return signr; ++ ++ /* ++ * We have some interested engines, so tell them about ++ * the signal and let them change its disposition. ++ */ ++ spin_unlock_irq(&task->sighand->siglock); ++ } ++ ++ /* ++ * This reporting pass chooses what signal disposition we'll act on. ++ */ ++ list_for_each_entry(engine, &utrace->attached, entry) { ++ /* ++ * See start_callback() comment about this barrier. ++ */ ++ utrace->reporting = engine; ++ smp_mb(); ++ ++ /* ++ * This pairs with the barrier in mark_engine_detached(), ++ * see start_callback() comments. ++ */ ++ want = engine->flags; ++ smp_rmb(); ++ ops = engine->ops; ++ ++ if ((want & (event | UTRACE_EVENT(QUIESCE))) == 0) { ++ utrace->reporting = NULL; ++ continue; ++ } ++ ++ if (ops->report_signal) ++ ret = (*ops->report_signal)( ++ report.result | report.action, engine, task, ++ regs, info, ka, return_ka); ++ else ++ ret = (report.result | (*ops->report_quiesce)( ++ report.action, engine, task, event)); ++ ++ /* ++ * Avoid a tight loop reporting again and again if some ++ * engine is too stupid. ++ */ ++ switch (utrace_resume_action(ret)) { ++ default: ++ break; ++ case UTRACE_INTERRUPT: ++ case UTRACE_REPORT: ++ ret = (ret & ~UTRACE_RESUME_MASK) | UTRACE_RESUME; ++ break; ++ } ++ ++ finish_callback(task, utrace, &report, engine, ret); ++ } ++ ++ /* ++ * We express the chosen action to the signals code in terms ++ * of a representative signal whose default action does it. ++ * Our caller uses our return value (signr) to decide what to ++ * do, but uses info->si_signo as the signal number to report. ++ */ ++ switch (utrace_signal_action(report.result)) { ++ case UTRACE_SIGNAL_TERM: ++ signr = SIGTERM; ++ break; ++ ++ case UTRACE_SIGNAL_CORE: ++ signr = SIGQUIT; ++ break; ++ ++ case UTRACE_SIGNAL_STOP: ++ signr = SIGSTOP; ++ break; ++ ++ case UTRACE_SIGNAL_TSTP: ++ signr = SIGTSTP; ++ break; ++ ++ case UTRACE_SIGNAL_DELIVER: ++ signr = info->si_signo; ++ ++ if (return_ka->sa.sa_handler == SIG_DFL) { ++ /* ++ * We'll do signr's normal default action. ++ * For ignore, we'll fall through below. ++ * For stop/death, break locks and returns it. ++ */ ++ if (likely(signr) && !sig_kernel_ignore(signr)) ++ break; ++ } else if (return_ka->sa.sa_handler != SIG_IGN && ++ likely(signr)) { ++ /* ++ * Complete the bookkeeping after the report. ++ * The handler will run. If an engine wanted to ++ * stop or step, then make sure we do another ++ * report after signal handler setup. ++ */ ++ if (report.action != UTRACE_RESUME) ++ report.action = UTRACE_INTERRUPT; ++ finish_report(&report, task, utrace); ++ ++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) ++ push_back_signal(task, info); ++ else ++ spin_lock_irq(&task->sighand->siglock); ++ ++ /* ++ * We do the SA_ONESHOT work here since the ++ * normal path will only touch *return_ka now. ++ */ ++ if (unlikely(return_ka->sa.sa_flags & SA_ONESHOT)) { ++ return_ka->sa.sa_flags &= ~SA_ONESHOT; ++ if (likely(valid_signal(signr))) { ++ ka = &task->sighand->action[signr - 1]; ++ ka->sa.sa_handler = SIG_DFL; ++ } ++ } ++ ++ return signr; ++ } ++ ++ /* Fall through for an ignored signal. */ ++ ++ case UTRACE_SIGNAL_IGN: ++ case UTRACE_SIGNAL_REPORT: ++ default: ++ /* ++ * If the signal is being ignored, then we are on the way ++ * directly back to user mode. We can stop here, or step, ++ * as in utrace_resume(), above. After we've dealt with that, ++ * our caller will relock and come back through here. ++ */ ++ finish_resume_report(&report, task, utrace); ++ ++ if (unlikely(fatal_signal_pending(task))) { ++ /* ++ * The only reason we woke up now was because of a ++ * SIGKILL. Don't do normal dequeuing in case it ++ * might get a signal other than SIGKILL. That would ++ * perturb the death state so it might differ from ++ * what the debugger would have allowed to happen. ++ * Instead, pluck out just the SIGKILL to be sure ++ * we'll die immediately with nothing else different ++ * from the quiescent state the debugger wanted us in. ++ */ ++ sigset_t sigkill_only; ++ siginitsetinv(&sigkill_only, sigmask(SIGKILL)); ++ spin_lock_irq(&task->sighand->siglock); ++ signr = dequeue_signal(task, &sigkill_only, info); ++ BUG_ON(signr != SIGKILL); ++ *return_ka = task->sighand->action[SIGKILL - 1]; ++ return signr; ++ } ++ ++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) { ++ push_back_signal(task, info); ++ spin_unlock_irq(&task->sighand->siglock); ++ } ++ ++ return -1; ++ } ++ ++ /* ++ * Complete the bookkeeping after the report. ++ * This sets utrace->report if UTRACE_STOP was used. ++ */ ++ finish_report(&report, task, utrace); ++ ++ return_ka->sa.sa_handler = SIG_DFL; ++ ++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) ++ push_back_signal(task, info); ++ else ++ spin_lock_irq(&task->sighand->siglock); ++ ++ if (sig_kernel_stop(signr)) ++ task->signal->flags |= SIGNAL_STOP_DEQUEUED; ++ ++ return signr; ++} ++ ++/* ++ * This gets called after a signal handler has been set up. ++ * We set a flag so the next report knows it happened. ++ * If we're already stepping, make sure we do a report_signal. ++ * If not, make sure we get into utrace_resume() where we can ++ * clear the signal_handler flag before resuming. ++ */ ++void utrace_signal_handler(struct task_struct *task, int stepping) ++{ ++ struct utrace *utrace = task_utrace_struct(task); ++ ++ spin_lock(&utrace->lock); ++ ++ utrace->signal_handler = 1; ++ if (stepping) { ++ utrace->interrupt = 1; ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++ } else { ++ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME); ++ } ++ ++ spin_unlock(&utrace->lock); ++} ++ ++/** ++ * utrace_prepare_examine - prepare to examine thread state ++ * @target: thread of interest, a &struct task_struct pointer ++ * @engine: engine pointer returned by utrace_attach_task() ++ * @exam: temporary state, a &struct utrace_examiner pointer ++ * ++ * This call prepares to safely examine the thread @target using ++ * &struct user_regset calls, or direct access to thread-synchronous fields. ++ * ++ * When @target is current, this call is superfluous. When @target is ++ * another thread, it must held stopped via %UTRACE_STOP by @engine. ++ * ++ * This call may block the caller until @target stays stopped, so it must ++ * be called only after the caller is sure @target is about to unschedule. ++ * This means a zero return from a utrace_control() call on @engine giving ++ * %UTRACE_STOP, or a report_quiesce() or report_signal() callback to ++ * @engine that used %UTRACE_STOP in its return value. ++ * ++ * Returns -%ESRCH if @target is dead or -%EINVAL if %UTRACE_STOP was ++ * not used. If @target has started running again despite %UTRACE_STOP ++ * (for %SIGKILL or a spurious wakeup), this call returns -%EAGAIN. ++ * ++ * When this call returns zero, it's safe to use &struct user_regset ++ * calls and task_user_regset_view() on @target and to examine some of ++ * its fields directly. When the examination is complete, a ++ * utrace_finish_examine() call must follow to check whether it was ++ * completed safely. ++ */ ++int utrace_prepare_examine(struct task_struct *target, ++ struct utrace_engine *engine, ++ struct utrace_examiner *exam) ++{ ++ int ret = 0; ++ ++ if (unlikely(target == current)) ++ return 0; ++ ++ rcu_read_lock(); ++ if (unlikely(!engine_wants_stop(engine))) ++ ret = -EINVAL; ++ else if (unlikely(target->exit_state)) ++ ret = -ESRCH; ++ else { ++ exam->state = target->state; ++ if (unlikely(exam->state == TASK_RUNNING)) ++ ret = -EAGAIN; ++ else ++ get_task_struct(target); ++ } ++ rcu_read_unlock(); ++ ++ if (likely(!ret)) { ++ exam->ncsw = wait_task_inactive(target, exam->state); ++ put_task_struct(target); ++ if (unlikely(!exam->ncsw)) ++ ret = -EAGAIN; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_prepare_examine); ++ ++/** ++ * utrace_finish_examine - complete an examination of thread state ++ * @target: thread of interest, a &struct task_struct pointer ++ * @engine: engine pointer returned by utrace_attach_task() ++ * @exam: pointer passed to utrace_prepare_examine() call ++ * ++ * This call completes an examination on the thread @target begun by a ++ * paired utrace_prepare_examine() call with the same arguments that ++ * returned success (zero). ++ * ++ * When @target is current, this call is superfluous. When @target is ++ * another thread, this returns zero if @target has remained unscheduled ++ * since the paired utrace_prepare_examine() call returned zero. ++ * ++ * When this returns an error, any examination done since the paired ++ * utrace_prepare_examine() call is unreliable and the data extracted ++ * should be discarded. The error is -%EINVAL if @engine is not ++ * keeping @target stopped, or -%EAGAIN if @target woke up unexpectedly. ++ */ ++int utrace_finish_examine(struct task_struct *target, ++ struct utrace_engine *engine, ++ struct utrace_examiner *exam) ++{ ++ int ret = 0; ++ ++ if (unlikely(target == current)) ++ return 0; ++ ++ rcu_read_lock(); ++ if (unlikely(!engine_wants_stop(engine))) ++ ret = -EINVAL; ++ else if (unlikely(target->state != exam->state)) ++ ret = -EAGAIN; ++ else ++ get_task_struct(target); ++ rcu_read_unlock(); ++ ++ if (likely(!ret)) { ++ unsigned long ncsw = wait_task_inactive(target, exam->state); ++ if (unlikely(ncsw != exam->ncsw)) ++ ret = -EAGAIN; ++ put_task_struct(target); ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_finish_examine); ++ ++/* ++ * This is declared in linux/regset.h and defined in machine-dependent ++ * code. We put the export here to ensure no machine forgets it. ++ */ ++EXPORT_SYMBOL_GPL(task_user_regset_view); ++ ++/* ++ * Called with rcu_read_lock() held. ++ */ ++void task_utrace_proc_status(struct seq_file *m, struct task_struct *p) ++{ ++ struct utrace *utrace = &p->utrace; ++ seq_printf(m, "Utrace:\t%lx%s%s%s\n", ++ p->utrace_flags, ++ utrace->stopped ? " (stopped)" : "", ++ utrace->report ? " (report)" : "", ++ utrace->interrupt ? " (interrupt)" : ""); ++} diff --git a/patches.xen/add-console-use-vt b/patches.xen/add-console-use-vt index fdd91c4..aeba6c6 100644 --- a/patches.xen/add-console-use-vt +++ b/patches.xen/add-console-use-vt @@ -4,18 +4,18 @@ Patch-mainline: no $subject says all ---- head-2010-05-25.orig/drivers/char/tty_io.c 2010-05-25 09:12:10.000000000 +0200 -+++ head-2010-05-25/drivers/char/tty_io.c 2010-05-25 09:13:34.000000000 +0200 -@@ -136,6 +136,8 @@ LIST_HEAD(tty_drivers); /* linked list - DEFINE_MUTEX(tty_mutex); - EXPORT_SYMBOL(tty_mutex); +--- head-2011-02-08.orig/drivers/tty/tty_io.c 2011-02-08 09:51:53.000000000 +0100 ++++ head-2011-02-08/drivers/tty/tty_io.c 2011-01-31 14:30:58.000000000 +0100 +@@ -140,6 +140,8 @@ EXPORT_SYMBOL(tty_mutex); + /* Spinlock to protect the tty->tty_files list */ + DEFINE_SPINLOCK(tty_files_lock); +int console_use_vt = 1; + static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); ssize_t redirected_tty_write(struct file *, const char __user *, -@@ -1778,7 +1780,7 @@ retry_open: +@@ -1834,7 +1836,7 @@ retry_open: goto got_driver; } #ifdef CONFIG_VT @@ -24,8 +24,8 @@ $subject says all extern struct tty_driver *console_driver; driver = tty_driver_kref_get(console_driver); index = fg_console; -@@ -3160,7 +3162,8 @@ static int __init tty_init(void) - "console"); +@@ -3309,7 +3311,8 @@ int __init tty_init(void) + WARN_ON(device_create_file(consdev, &dev_attr_active) < 0); #ifdef CONFIG_VT - vty_init(&console_fops); @@ -34,9 +34,9 @@ $subject says all #endif return 0; } ---- head-2010-05-25.orig/include/linux/console.h 2010-05-25 09:12:10.000000000 +0200 -+++ head-2010-05-25/include/linux/console.h 2010-01-19 14:51:01.000000000 +0100 -@@ -63,6 +63,7 @@ extern const struct consw dummy_con; /* +--- head-2011-02-08.orig/include/linux/console.h 2011-02-08 09:51:53.000000000 +0100 ++++ head-2011-02-08/include/linux/console.h 2011-01-31 14:30:58.000000000 +0100 +@@ -73,6 +73,7 @@ extern const struct consw dummy_con; /* extern const struct consw vga_con; /* VGA text console */ extern const struct consw newport_con; /* SGI Newport console */ extern const struct consw prom_con; /* SPARC PROM console */ diff --git a/patches.xen/ipv6-no-autoconf b/patches.xen/ipv6-no-autoconf index 106936f..b966811 100644 --- a/patches.xen/ipv6-no-autoconf +++ b/patches.xen/ipv6-no-autoconf @@ -15,21 +15,21 @@ This patch makes autoconf (DAD and router discovery) depend on the interface's ability to do multicast. Turning off multicast for an interface before bringing it up will suppress autoconfiguration. ---- head-2010-04-15.orig/net/ipv6/addrconf.c 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/net/ipv6/addrconf.c 2010-04-15 09:39:07.000000000 +0200 -@@ -2832,6 +2832,7 @@ static void addrconf_dad_start(struct in +--- head-2011-02-08.orig/net/ipv6/addrconf.c 2011-02-08 09:51:53.000000000 +0100 ++++ head-2011-02-08/net/ipv6/addrconf.c 2011-02-08 10:00:00.000000000 +0100 +@@ -2848,6 +2848,7 @@ static void addrconf_dad_start(struct in + goto out; - spin_lock(&ifp->lock); if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || + !(dev->flags&IFF_MULTICAST) || idev->cnf.accept_dad < 1 || !(ifp->flags&IFA_F_TENTATIVE) || ifp->flags & IFA_F_NODAD) { -@@ -2925,6 +2926,7 @@ static void addrconf_dad_completed(struc - if (ifp->idev->cnf.forwarding == 0 && +@@ -2951,6 +2952,7 @@ static void addrconf_dad_completed(struc + ifp->idev->cnf.forwarding == 2) && ifp->idev->cnf.rtr_solicits > 0 && (dev->flags&IFF_LOOPBACK) == 0 && -+ (dev->flags & IFF_MULTICAST) && ++ (dev->flags&IFF_MULTICAST) && (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { /* * If a host as already performed a random delay diff --git a/patches.xen/pci-guestdev b/patches.xen/pci-guestdev index 096966b..5b75dce 100644 --- a/patches.xen/pci-guestdev +++ b/patches.xen/pci-guestdev @@ -1,14 +1,16 @@ Subject: xen/dom0: Reserve devices for guest use -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 898:ca12928cdafe) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1023:85ca9742b8b9) Patch-mainline: n/a jb: Added support for reassign_resources=all (bnc#574224). jb: Used kzalloc() instead of all kmalloc()+memset() pairs. +jb: Added support for guestiomuldev=all. +jb: split /dev/xen/pci_iomul driver to be separate (so it can be a module) Acked-by: jbeulich@novell.com ---- head-2010-04-29.orig/Documentation/kernel-parameters.txt 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/Documentation/kernel-parameters.txt 2010-04-29 09:30:30.000000000 +0200 -@@ -834,6 +834,24 @@ and is between 256 and 4096 characters. +--- head-2011-03-11.orig/Documentation/kernel-parameters.txt 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/Documentation/kernel-parameters.txt 2011-03-11 10:49:08.000000000 +0100 +@@ -815,6 +815,24 @@ bytes respectively. Such letter suffixes gpt [EFI] Forces disk with valid GPT signature but invalid Protective MBR to be treated as GPT. @@ -30,10 +32,10 @@ Acked-by: jbeulich@novell.com + Note: function shouldn't be specified. + Specifies PCI device for IO port multiplexing driver. + - gvp11= [HW,SCSI] - hashdist= [KNL,NUMA] Large hashes allocated during boot -@@ -2183,6 +2201,10 @@ and is between 256 and 4096 characters. + are distributed across NUMA nodes. Defaults on + for 64bit NUMA, off otherwise. +@@ -2162,6 +2180,10 @@ bytes respectively. Such letter suffixes Run specified binary instead of /init from the ramdisk, used for early userspace startup. See initrd. @@ -44,9 +46,9 @@ Acked-by: jbeulich@novell.com reboot= [BUGS=X86-32,BUGS=ARM,BUGS=IA-64] Rebooting mode Format: [,[,...]] See arch/*/kernel/reboot.c or arch/*/kernel/process.c ---- head-2010-04-29.orig/drivers/acpi/pci_root.c 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/drivers/acpi/pci_root.c 2010-04-15 09:39:25.000000000 +0200 -@@ -419,6 +419,40 @@ out: +--- head-2011-03-11.orig/drivers/acpi/pci_root.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/pci_root.c 2011-01-31 14:31:27.000000000 +0100 +@@ -448,6 +448,41 @@ out: } EXPORT_SYMBOL(acpi_pci_osc_control_set); @@ -77,7 +79,8 @@ Acked-by: jbeulich@novell.com + struct acpi_pci_root *root; + root = list_entry(entry, struct acpi_pci_root, node); + if (&root->device->dev == dev) -+ return sprintf(buf, "%02x\n", root->bus_nr); ++ return sprintf(buf, "%02x\n", ++ (unsigned int)root->secondary.start); + } + return 0; +} @@ -87,9 +90,9 @@ Acked-by: jbeulich@novell.com static int __devinit acpi_pci_root_add(struct acpi_device *device) { unsigned long long segment, bus; -@@ -530,6 +564,13 @@ static int __devinit acpi_pci_root_add(s - if (flags != base_flags) - acpi_pci_osc_support(root, flags); +@@ -599,6 +634,13 @@ static int __devinit acpi_pci_root_add(s + "ACPI _OSC request failed (code %d)\n", status); + } +#ifdef CONFIG_PCI_GUESTDEV + if (device_create_file(&device->dev, &dev_attr_seg)) @@ -101,7 +104,7 @@ Acked-by: jbeulich@novell.com pci_acpi_add_bus_pm_notifier(device, root->bus); if (device->wakeup.flags.run_wake) device_set_run_wake(root->bus->bridge, true); -@@ -575,3 +616,31 @@ static int __init acpi_pci_root_init(voi +@@ -646,3 +688,31 @@ static int __init acpi_pci_root_init(voi } subsys_initcall(acpi_pci_root_init); @@ -127,15 +130,15 @@ Acked-by: jbeulich@novell.com + } + + *seg = (int)root->segment; -+ *bbn = (int)root->bus_nr; ++ *bbn = (int)root->secondary.start; + return TRUE; + } + return FALSE; +} +#endif ---- head-2010-04-29.orig/drivers/acpi/scan.c 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/drivers/acpi/scan.c 2010-04-15 09:39:27.000000000 +0200 -@@ -170,6 +170,16 @@ acpi_device_hid_show(struct device *dev, +--- head-2011-03-11.orig/drivers/acpi/scan.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/scan.c 2011-01-31 14:31:27.000000000 +0100 +@@ -175,6 +175,16 @@ acpi_device_hid_show(struct device *dev, } static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); @@ -152,9 +155,9 @@ Acked-by: jbeulich@novell.com static ssize_t acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); -@@ -210,6 +220,13 @@ static int acpi_device_setup_files(struc - if (result) - goto end; +@@ -217,6 +227,13 @@ static int acpi_device_setup_files(struc + goto end; + } +#ifdef CONFIG_PCI_GUESTDEV + if(dev->pnp.unique_id) { @@ -166,7 +169,7 @@ Acked-by: jbeulich@novell.com /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. -@@ -273,6 +290,9 @@ static void acpi_free_ids(struct acpi_de +@@ -280,6 +297,9 @@ static void acpi_free_ids(struct acpi_de kfree(id->id); kfree(id); } @@ -176,7 +179,7 @@ Acked-by: jbeulich@novell.com } static void acpi_device_release(struct device *dev) -@@ -1096,6 +1116,11 @@ static void acpi_device_set_id(struct ac +@@ -1131,6 +1151,11 @@ static void acpi_device_set_id(struct ac for (i = 0; i < cid_list->count; i++) acpi_add_id(device, cid_list->ids[i].string); } @@ -188,8 +191,8 @@ Acked-by: jbeulich@novell.com if (info->valid & ACPI_VALID_ADR) { device->pnp.bus_address = info->address; device->flags.bus_address = 1; ---- head-2010-04-29.orig/drivers/pci/Kconfig 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/drivers/pci/Kconfig 2010-03-24 13:55:21.000000000 +0100 +--- head-2011-03-11.orig/drivers/pci/Kconfig 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/pci/Kconfig 2011-01-31 14:31:27.000000000 +0100 @@ -31,6 +31,20 @@ config PCI_DEBUG When in doubt, say N. @@ -202,7 +205,7 @@ Acked-by: jbeulich@novell.com + Say Y here if you want to reserve PCI device for passthrough. + +config PCI_IOMULTI -+ bool "PCI Device IO Multiplex for Passthrough" ++ tristate "PCI Device IO Multiplex for Passthrough" + depends on PCI && ACPI && XEN + default y + help @@ -211,20 +214,22 @@ Acked-by: jbeulich@novell.com config PCI_STUB tristate "PCI Stub driver" depends on PCI ---- head-2010-04-29.orig/drivers/pci/Makefile 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/drivers/pci/Makefile 2010-03-24 13:55:21.000000000 +0100 -@@ -7,6 +7,8 @@ obj-y += access.o bus.o probe.o remove. +--- head-2011-03-11.orig/drivers/pci/Makefile 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/pci/Makefile 2011-01-31 14:31:28.000000000 +0100 +@@ -7,6 +7,10 @@ obj-y += access.o bus.o probe.o remove. irq.o vpd.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSFS) += slot.o +obj-$(CONFIG_PCI_GUESTDEV) += guestdev.o -+obj-$(CONFIG_PCI_IOMULTI) += iomulti.o ++obj-$(CONFIG_PCI_IOMULTI) += pci-iomul.o ++iomul-$(CONFIG_PCI_IOMULTI) := iomulti.o ++obj-y += $(iomul-y) $(iomul-m) obj-$(CONFIG_PCI_QUIRKS) += quirks.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/pci/guestdev.c 2010-04-28 15:57:49.000000000 +0200 -@@ -0,0 +1,887 @@ ++++ head-2011-03-11/drivers/pci/guestdev.c 2011-01-31 14:31:28.000000000 +0100 +@@ -0,0 +1,880 @@ +/* + * Copyright (c) 2008, 2009 NEC Corporation. + * Copyright (c) 2009 Isaku Yamahata @@ -308,7 +313,7 @@ Acked-by: jbeulich@novell.com + struct pcidev_sbdf_node *child; +}; + -+static char guestdev_param[COMMAND_LINE_SIZE]; ++static char __initdata guestdev_param[COMMAND_LINE_SIZE]; +static LIST_HEAD(guestdev_list); + +/* Get hid and uid */ @@ -420,9 +425,8 @@ Acked-by: jbeulich@novell.com + return TRUE; + +format_err_end: -+ printk(KERN_ERR -+ "PCI: The format of the guestdev parameter is illegal. [%s]\n", -+ str); ++ pr_err("PCI: The format of the guestdev parameter is illegal. [%s]\n", ++ str); + return FALSE; +} + @@ -531,7 +535,7 @@ Acked-by: jbeulich@novell.com +allocate_err_end: + if (gdev) + pci_free_guestdev(gdev); -+ printk(KERN_ERR "PCI: Failed to allocate memory.\n"); ++ pr_err("PCI: failed to allocate memory\n"); + return NULL; +} + @@ -601,9 +605,8 @@ Acked-by: jbeulich@novell.com + } else + gdev->u.devicepath.child = node; + } else if (gdev) { -+ printk(KERN_ERR -+ "PCI: Can't obtain dev# and #func# from %s.\n", -+ sp); ++ pr_err("PCI: Can't obtain dev# and #func# from %s.\n", ++ sp); + ret_val = -EINVAL; + if (gdev == gdev_org) + goto end; @@ -643,14 +646,13 @@ Acked-by: jbeulich@novell.com + goto end; + +format_err_end: -+ printk(KERN_ERR -+ "PCI: The format of the guestdev parameter is illegal. [%s]\n", -+ path_str); ++ pr_err("PCI: The format of the guestdev parameter is illegal. [%s]\n", ++ path_str); + ret_val = -EINVAL; + goto end; + +allocate_err_end: -+ printk(KERN_ERR "PCI: Failed to allocate memory.\n"); ++ pr_err("PCI: failed to allocate memory\n"); + ret_val = -ENOMEM; + goto end; + @@ -674,7 +676,7 @@ Acked-by: jbeulich@novell.com + } + gdev = kmalloc(sizeof(*gdev), GFP_KERNEL); + if (!gdev) { -+ printk(KERN_ERR "PCI: Failed to allocate memory.\n"); ++ pr_err("PCI: failed to allocate memory\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&gdev->root_list); @@ -754,9 +756,8 @@ Acked-by: jbeulich@novell.com + list_for_each(head, &guestdev_list) { + gdev = list_entry(head, struct guestdev, root_list); + pci_make_guestdev_str(gdev, path_str, GUESTDEV_STR_MAX); -+ printk(KERN_DEBUG -+ "PCI: %s has been reserved for guest domain.\n", -+ path_str); ++ pr_debug("PCI: %s has been reserved for guest domain.\n", ++ path_str); + } + return 0; +} @@ -856,7 +857,7 @@ Acked-by: jbeulich@novell.com + for(;;) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { -+ printk(KERN_ERR "PCI: Failed to allocate memory.\n"); ++ pr_err("PCI: failed to allocate memory\n"); + goto err_end; + } + node->dev = PCI_SLOT(dev->devfn); @@ -987,7 +988,7 @@ Acked-by: jbeulich@novell.com + return FALSE; +} + -+#ifdef CONFIG_PCI_IOMULTI ++#if defined(CONFIG_PCI_IOMULTI) || defined(CONFIG_PCI_IOMULTI_MODULE) +static int pci_iomul_node_match(const struct devicepath_node *gdev_node, + const struct pcidev_sbdf_node *sbdf_node, + int options) @@ -1070,21 +1071,19 @@ Acked-by: jbeulich@novell.com + } else { + pci_make_guestdev_str(gdev, + path_str, GUESTDEV_STR_MAX); -+ printk(KERN_INFO -+ "PCI: Device does not exist. %s\n", -+ path_str); ++ pr_info("PCI: " ++ "device %s does not exist\n", ++ path_str); + continue; + } + } + + bus = pci_find_bus(gdev->u.devicepath.seg, + gdev->u.devicepath.bbn); -+ if (!bus || -+ !pci_check_devicepath_exists(gdev, bus)) { ++ if (!bus || !pci_check_devicepath_exists(gdev, bus)) { + pci_make_guestdev_str(gdev, path_str, + GUESTDEV_STR_MAX); -+ printk(KERN_INFO -+ "PCI: Device does not exist. %s\n", ++ pr_info("PCI: device %s does not exist\n", + path_str); + } + break; @@ -1100,8 +1099,7 @@ Acked-by: jbeulich@novell.com + } + } + pci_make_guestdev_str(gdev, path_str, GUESTDEV_STR_MAX); -+ printk(KERN_INFO "PCI: Device does not exist. %s\n", -+ path_str); ++ pr_info("PCI: device %s does not exist\n", path_str); + break; + default: + BUG(); @@ -1113,8 +1111,8 @@ Acked-by: jbeulich@novell.com +fs_initcall(pci_check_guestdev_exists); + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/pci/iomulti.c 2010-03-24 13:55:21.000000000 +0100 -@@ -0,0 +1,1415 @@ ++++ head-2011-03-11/drivers/pci/iomulti.c 2011-01-31 14:31:28.000000000 +0100 +@@ -0,0 +1,898 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by @@ -1132,27 +1130,16 @@ Acked-by: jbeulich@novell.com + * + * Copyright (c) 2009 Isaku Yamahata + * VA Linux Systems Japan K.K. -+ * + */ + -+#include -+#include -+#include -+#include -+#include ++#include "iomulti.h" ++#include "pci.h" ++#include +#include -+ +#include -+#include + -+#include "pci.h" -+#include "iomulti.h" -+ -+#define PCI_NUM_BARS 6 +#define PCI_BUS_MAX 255 +#define PCI_DEV_MAX 31 -+#define PCI_FUNC_MAX 7 -+#define PCI_NUM_FUNC 8 + +/* see pci_resource_len */ +static inline resource_size_t pci_iomul_len(const struct resource* r) @@ -1209,73 +1196,15 @@ Acked-by: jbeulich@novell.com + return pdev->bus->number; +} + -+struct pci_iomul_func { -+ int segment; -+ uint8_t bus; -+ uint8_t devfn; -+ -+ /* only start and end are used */ -+ unsigned long io_size; -+ uint8_t io_bar; -+ struct resource resource[PCI_NUM_BARS]; -+ struct resource dummy_parent; -+}; -+ -+struct pci_iomul_switch { -+ struct list_head list; /* bus_list_lock protects */ -+ -+ /* -+ * This lock the following entry and following -+ * pci_iomul_slot/pci_iomul_func. -+ */ -+ struct mutex lock; -+ struct kref kref; -+ -+ struct resource io_resource; -+ struct resource *io_region; -+ unsigned int count; -+ struct pci_dev *current_pdev; -+ -+ int segment; -+ uint8_t bus; -+ -+ uint32_t io_base; -+ uint32_t io_limit; -+ -+ /* func which has the largeset io size*/ -+ struct pci_iomul_func *func; -+ -+ struct list_head slots; -+}; -+ -+struct pci_iomul_slot { -+ struct list_head sibling; -+ struct kref kref; -+ /* -+ * busnr -+ * when pcie, the primary busnr of the PCI-PCI bridge on which -+ * this devices sits. -+ */ -+ uint8_t switch_busnr; -+ struct resource dummy_parent[PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES]; -+ -+ /* device */ -+ int segment; -+ uint8_t bus; -+ uint8_t dev; -+ -+ struct pci_iomul_func *func[PCI_NUM_FUNC]; -+}; -+ +static LIST_HEAD(switch_list); +static DEFINE_MUTEX(switch_list_lock); + +/*****************************************************************************/ -+static int inline pci_iomul_switch_io_allocated( -+ const struct pci_iomul_switch *sw) ++int pci_iomul_switch_io_allocated(const struct pci_iomul_switch *sw) +{ + return !(sw->io_base == 0 || sw->io_base > sw->io_limit); +} ++EXPORT_SYMBOL_GPL(pci_iomul_switch_io_allocated); + +static struct pci_iomul_switch *pci_iomul_find_switch_locked(int segment, + uint8_t bus) @@ -1307,9 +1236,9 @@ Acked-by: jbeulich@novell.com +/* on successfull exit, sw->lock is locked for use slot and + * refrence count of sw is incremented. + */ -+static void pci_iomul_get_lock_switch(struct pci_dev *pdev, -+ struct pci_iomul_switch **swp, -+ struct pci_iomul_slot **slot) ++void pci_iomul_get_lock_switch(struct pci_dev *pdev, ++ struct pci_iomul_switch **swp, ++ struct pci_iomul_slot **slot) +{ + mutex_lock(&switch_list_lock); + @@ -1332,6 +1261,7 @@ Acked-by: jbeulich@novell.com +out: + mutex_unlock(&switch_list_lock); +} ++EXPORT_SYMBOL_GPL(pci_iomul_get_lock_switch); + +static struct pci_iomul_switch *pci_iomul_switch_alloc(int segment, + uint8_t bus) @@ -1363,7 +1293,7 @@ Acked-by: jbeulich@novell.com + list_add(&sw->list, &switch_list); +} + -+#ifdef CONFIG_HOTPLUG_PCI ++#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) +static void pci_iomul_switch_del_locked(struct pci_iomul_switch *sw) +{ + BUG_ON(!mutex_is_locked(&switch_list_lock)); @@ -1371,24 +1301,6 @@ Acked-by: jbeulich@novell.com +} +#endif + -+static void pci_iomul_switch_get(struct pci_iomul_switch *sw) -+{ -+ kref_get(&sw->kref); -+} -+ -+static void pci_iomul_switch_release(struct kref *kref) -+{ -+ struct pci_iomul_switch *sw = container_of(kref, -+ struct pci_iomul_switch, -+ kref); -+ kfree(sw); -+} -+ -+static void pci_iomul_switch_put(struct pci_iomul_switch *sw) -+{ -+ kref_put(&sw->kref, &pci_iomul_switch_release); -+} -+ +static int __devinit pci_iomul_slot_init(struct pci_dev *pdev, + struct pci_iomul_slot *slot) +{ @@ -1398,29 +1310,26 @@ Acked-by: jbeulich@novell.com + rpcap = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (!rpcap) { + /* pci device isn't supported */ -+ printk(KERN_INFO -+ "PCI: sharing io port of non PCIe device %s " -+ "isn't supported. ignoring.\n", -+ pci_name(pdev)); ++ pr_info("PCI: sharing io port of non PCIe device %s " ++ "isn't supported. ignoring.\n", ++ pci_name(pdev)); + return -ENOSYS; + } + -+ pci_read_config_word(pdev, rpcap + PCI_CAP_FLAGS, &cap); ++ pci_read_config_word(pdev, rpcap + PCI_CAP_FLAGS, &cap); + switch ((cap & PCI_EXP_FLAGS_TYPE) >> 4) { + case PCI_EXP_TYPE_RC_END: -+ printk(KERN_INFO -+ "PCI: io port sharing of root complex integrated " -+ "endpoint %s isn't supported. ignoring.\n", -+ pci_name(pdev)); ++ pr_info("PCI: io port sharing of root complex integrated " ++ "endpoint %s isn't supported. ignoring.\n", ++ pci_name(pdev)); + return -ENOSYS; + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_LEG_END: + break; + default: -+ printk(KERN_INFO -+ "PCI: io port sharing of non endpoint %s " -+ "doesn't make sense. ignoring.\n", -+ pci_name(pdev)); ++ pr_info("PCI: io port sharing of non endpoint %s " ++ "doesn't make sense. ignoring.\n", ++ pci_name(pdev)); + return -EINVAL; + } + @@ -1456,7 +1365,7 @@ Acked-by: jbeulich@novell.com + list_add(&slot->sibling, &sw->slots); +} + -+#ifdef CONFIG_HOTPLUG_PCI ++#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) +static void pci_iomul_slot_del_locked(struct pci_iomul_switch *sw, + struct pci_iomul_slot *slot) +{ @@ -1465,23 +1374,6 @@ Acked-by: jbeulich@novell.com +} +#endif + -+static void pci_iomul_slot_get(struct pci_iomul_slot *slot) -+{ -+ kref_get(&slot->kref); -+} -+ -+static void pci_iomul_slot_release(struct kref *kref) -+{ -+ struct pci_iomul_slot *slot = container_of(kref, struct pci_iomul_slot, -+ kref); -+ kfree(slot); -+} -+ -+static void pci_iomul_slot_put(struct pci_iomul_slot *slot) -+{ -+ kref_put(&slot->kref, &pci_iomul_slot_release); -+} -+ +/*****************************************************************************/ +static int pci_get_sbd(const char *str, + int *segment__, uint8_t *bus__, uint8_t *dev__) @@ -1514,10 +1406,12 @@ Acked-by: jbeulich@novell.com +#define TOKEN_MAX 10 /* SSSS:BB:DD length is 10 */ +static int pci_is_iomul_dev_param(struct pci_dev *pdev) +{ -+ int len; -+ char *p; ++ int len; ++ char *p; + char *next_str; + ++ if (!strcmp(iomul_param, "all")) ++ return 1; + for (p = &iomul_param[0]; *p != '\0'; p = next_str + 1) { + next_str = strchr(p, ','); + if (next_str != NULL) @@ -1542,7 +1436,7 @@ Acked-by: jbeulich@novell.com + break; + } + -+ /* check guestcev=+iomul option */ ++ /* check guestdev=+iomul option */ + return pci_is_iomuldev(pdev); +} + @@ -1576,12 +1470,12 @@ Acked-by: jbeulich@novell.com + upper16 = ((io_base & 0xffff00) >> 8) | + (((io_limit & 0xffff00) >> 8) << 16); + -+ /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ -+ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); -+ /* Update lower 16 bits of I/O base/limit. */ -+ pci_write_config_word(bridge, PCI_IO_BASE, l); -+ /* Update upper 16 bits of I/O base/limit. */ -+ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, upper16); ++ /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ ++ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); ++ /* Update lower 16 bits of I/O base/limit. */ ++ pci_write_config_word(bridge, PCI_IO_BASE, l); ++ /* Update upper 16 bits of I/O base/limit. */ ++ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, upper16); +} + +static void __devinit pci_disable_bridge_io_window(struct pci_dev *bridge) @@ -1663,8 +1557,8 @@ Acked-by: jbeulich@novell.com + uint8_t i; + struct resource *r; + -+ printk(KERN_INFO "PCI: deallocating io resource[%s]. io size 0x%lx\n", -+ pci_name(pdev), func->io_size); ++ pr_info("PCI: deallocating io resource[%s]. io size 0x%lx\n", ++ pci_name(pdev), func->io_size); + for (i = 0; i < PCI_NUM_BARS; i++) { + r = &pdev->resource[i]; + if (!(func->io_bar & (1 << i))) @@ -1759,9 +1653,9 @@ Acked-by: jbeulich@novell.com + pci_dev_switch_busnr(pdev)); + if (sw == NULL) { + mutex_unlock(&switch_list_lock); -+ printk(KERN_WARNING -+ "PCI: can't allocate memory " -+ "for sw of IO mulplexing %s", pci_name(pdev)); ++ pr_warn("PCI: can't allocate memory" ++ "for sw of IO multiplexing %s", ++ pci_name(pdev)); + return; + } + pci_iomul_switch_add_locked(sw); @@ -1777,15 +1671,15 @@ Acked-by: jbeulich@novell.com + if (slot == NULL) { + mutex_unlock(&sw->lock); + pci_iomul_switch_put(sw); -+ printk(KERN_WARNING "PCI: can't allocate memory " -+ "for IO mulplexing %s", pci_name(pdev)); ++ pr_warn("PCI: can't allocate memory " ++ "for IO multiplexing %s", pci_name(pdev)); + return; + } + pci_iomul_slot_add_locked(sw, slot); + } + -+ printk(KERN_INFO "PCI: disable device and release io resource[%s].\n", -+ pci_name(pdev)); ++ pr_info("PCI: disable device and release io resource[%s].\n", ++ pci_name(pdev)); + pci_disable_device(pdev); + + __quirk_iomul_dealloc_ioresource(sw, pdev, slot); @@ -1831,8 +1725,8 @@ Acked-by: jbeulich@novell.com + sw->io_limit = io_limit; + + pci_dev_put(pdev); -+ printk(KERN_INFO "PCI: bridge %s base 0x%x limit 0x%x\n", -+ pci_name(bridge), sw->io_base, sw->io_limit); ++ pr_info("PCI: bridge %s base 0x%x limit 0x%x\n", ++ pci_name(bridge), sw->io_base, sw->io_limit); +} + +static void __devinit pci_iomul_setup_brige(struct pci_dev *bridge, @@ -1850,9 +1744,8 @@ Acked-by: jbeulich@novell.com + pci_read_config_word(bridge, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_IO)) { + cmd |= PCI_COMMAND_IO; -+ printk(KERN_INFO "PCI: Forcibly Enabling IO %s\n", -+ pci_name(bridge)); -+ pci_write_config_word(bridge, PCI_COMMAND, cmd); ++ pr_info("PCI: forcibly enabling IO %s\n", pci_name(bridge)); ++ pci_write_config_word(bridge, PCI_COMMAND, cmd); + } +} + @@ -1878,8 +1771,8 @@ Acked-by: jbeulich@novell.com + uint8_t num_bars = 0; + struct resource *r; + -+ printk(KERN_INFO "PCI: Forcibly assign IO %s from 0x%x\n", -+ pci_name(pdev), io_base); ++ pr_info("PCI: Forcibly assign IO %s from 0x%x\n", ++ pci_name(pdev), io_base); + + for (i = 0; i < PCI_NUM_BARS; i++) { + if (!(f->io_bar & (1 << i))) @@ -1952,8 +1845,7 @@ Acked-by: jbeulich@novell.com + + if (request_resource(parent, + &sw->io_resource)) -+ printk(KERN_ERR -+ "PCI IOMul: can't allocate " ++ pr_err("PCI IOMul: can't allocate " + "resource. [0x%x, 0x%x]", + sw->io_base, sw->io_limit); + } @@ -1996,8 +1888,7 @@ Acked-by: jbeulich@novell.com + } + + BUG_ON(f->io_size > sw->io_limit - sw->io_base + 1); -+ if (/* f == sf */ -+ sf != NULL && ++ if (/* f == sf */ sf != NULL && + pci_domain_nr(pdev->bus) == sf->segment && + pdev->bus->number == sf->bus && + pdev->devfn == sf->devfn) @@ -2014,7 +1905,7 @@ Acked-by: jbeulich@novell.com + quirk_iomul_reassign_ioresource); + +/*****************************************************************************/ -+#ifdef CONFIG_HOTPLUG_PCI ++#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) +static int __devinit __pci_iomul_notifier_del_device(struct pci_dev *pdev) +{ + struct pci_iomul_switch *sw; @@ -2080,9 +1971,9 @@ Acked-by: jbeulich@novell.com + ret = __pci_iomul_notifier_del_switch(pdev); + break; + default: -+ printk(KERN_WARNING "PCI IOMUL: " -+ "device %s has unknown header type %02x, ignoring.\n", -+ pci_name(pdev), pdev->hdr_type); ++ pr_warn("PCI IOMUL: device %s has unknown " ++ "header type %02x, ignoring.\n", ++ pci_name(pdev), pdev->hdr_type); + ret = -EIO; + break; + } @@ -2092,10 +1983,10 @@ Acked-by: jbeulich@novell.com +static int __devinit pci_iomul_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ -+ struct device *dev = data; -+ struct pci_dev *pdev = to_pci_dev(dev); ++ struct device *dev = data; ++ struct pci_dev *pdev = to_pci_dev(dev); + -+ switch (action) { ++ switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + quirk_iomul_reassign_ioresource(pdev); + break; @@ -2109,20 +2000,201 @@ Acked-by: jbeulich@novell.com + return 0; +} + -+static struct notifier_block pci_iomul_nb = { -+ .notifier_call = pci_iomul_notifier, ++static struct notifier_block __devinitdata pci_iomul_nb = { ++ .notifier_call = pci_iomul_notifier, +}; + +static int __init pci_iomul_hotplug_init(void) +{ -+ bus_register_notifier(&pci_bus_type, &pci_iomul_nb); ++ bus_register_notifier(&pci_bus_type, &pci_iomul_nb); + return 0; +} -+ +late_initcall(pci_iomul_hotplug_init); +#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/drivers/pci/iomulti.h 2011-01-31 14:31:28.000000000 +0100 +@@ -0,0 +1,122 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ++ * ++ * Copyright (c) 2009 Isaku Yamahata ++ * VA Linux Systems Japan K.K. ++ */ ++ ++#include ++#include ++#include ++ ++#define PCI_NUM_BARS 6 ++#define PCI_NUM_FUNC 8 ++ ++struct pci_iomul_func { ++ int segment; ++ uint8_t bus; ++ uint8_t devfn; ++ ++ /* only start and end are used */ ++ unsigned long io_size; ++ uint8_t io_bar; ++ struct resource resource[PCI_NUM_BARS]; ++ struct resource dummy_parent; ++}; ++ ++struct pci_iomul_switch { ++ struct list_head list; /* bus_list_lock protects */ ++ ++ /* ++ * This lock the following entry and following ++ * pci_iomul_slot/pci_iomul_func. ++ */ ++ struct mutex lock; ++ struct kref kref; ++ ++ struct resource io_resource; ++ struct resource *io_region; ++ unsigned int count; ++ struct pci_dev *current_pdev; ++ ++ int segment; ++ uint8_t bus; ++ ++ uint32_t io_base; ++ uint32_t io_limit; ++ ++ /* func which has the largeset io size*/ ++ struct pci_iomul_func *func; ++ ++ struct list_head slots; ++}; ++ ++static inline void pci_iomul_switch_get(struct pci_iomul_switch *sw) ++{ ++ kref_get(&sw->kref); ++} ++ ++static inline void pci_iomul_switch_release(struct kref *kref) ++{ ++ struct pci_iomul_switch *sw = container_of(kref, ++ struct pci_iomul_switch, ++ kref); ++ kfree(sw); ++} ++ ++static inline void pci_iomul_switch_put(struct pci_iomul_switch *sw) ++{ ++ kref_put(&sw->kref, &pci_iomul_switch_release); ++} ++ ++struct pci_iomul_slot { ++ struct list_head sibling; ++ struct kref kref; ++ /* ++ * busnr ++ * when pcie, the primary busnr of the PCI-PCI bridge on which ++ * this devices sits. ++ */ ++ uint8_t switch_busnr; ++ struct resource dummy_parent[PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES]; ++ ++ /* device */ ++ int segment; ++ uint8_t bus; ++ uint8_t dev; ++ ++ struct pci_iomul_func *func[PCI_NUM_FUNC]; ++}; ++ ++static inline void pci_iomul_slot_get(struct pci_iomul_slot *slot) ++{ ++ kref_get(&slot->kref); ++} ++ ++static inline void pci_iomul_slot_release(struct kref *kref) ++{ ++ struct pci_iomul_slot *slot = container_of(kref, struct pci_iomul_slot, ++ kref); ++ kfree(slot); ++} ++ ++static inline void pci_iomul_slot_put(struct pci_iomul_slot *slot) ++{ ++ kref_put(&slot->kref, &pci_iomul_slot_release); ++} ++ ++int pci_iomul_switch_io_allocated(const struct pci_iomul_switch *); ++void pci_iomul_get_lock_switch(struct pci_dev *, struct pci_iomul_switch **, ++ struct pci_iomul_slot **); +--- head-2011-03-11.orig/drivers/pci/pci.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/pci/pci.c 2011-01-31 14:31:28.000000000 +0100 +@@ -2984,6 +2984,13 @@ resource_size_t pci_specified_resource_a + */ + int pci_is_reassigndev(struct pci_dev *dev) + { ++#ifdef CONFIG_PCI_GUESTDEV ++ int result; ++ ++ result = pci_is_guestdev_to_reassign(dev); ++ if (result) ++ return result; ++#endif /* CONFIG_PCI_GUESTDEV */ + return (pci_specified_resource_alignment(dev) != 0); + } + +--- head-2011-03-11.orig/drivers/pci/pci.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/pci/pci.h 2011-01-31 14:31:28.000000000 +0100 +@@ -350,4 +350,11 @@ static inline int pci_dev_specific_reset + } + #endif + ++#ifdef CONFIG_PCI_GUESTDEV ++extern int pci_is_guestdev_to_reassign(struct pci_dev *dev); ++extern int pci_is_iomuldev(struct pci_dev *dev); ++#else ++#define pci_is_iomuldev(dev) 0 ++#endif ++ + #endif /* DRIVERS_PCI_H */ +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/drivers/pci/pci-iomul.c 2011-01-31 14:31:28.000000000 +0100 +@@ -0,0 +1,437 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ++ * ++ * Copyright (c) 2009 Isaku Yamahata ++ * VA Linux Systems Japan K.K. ++ */ ++ ++#include "iomulti.h" ++#include ++#include ++#include ++#include ++#include + -+/*****************************************************************************/ +struct pci_iomul_data { + struct mutex lock; + @@ -2193,7 +2265,7 @@ Acked-by: jbeulich@novell.com + iomul->func = NULL; + filp->private_data = (void*)iomul; + -+ return 0; ++ return nonseekable_open(inode, filp); +} + +static int pci_iomul_release(struct inode *inode, struct file *filp) @@ -2490,7 +2562,7 @@ Acked-by: jbeulich@novell.com +static const struct file_operations pci_iomul_fops = { + .owner = THIS_MODULE, + -+ .open = pci_iomul_open, /* nonseekable_open */ ++ .open = pci_iomul_open, + .release = pci_iomul_release, + + .unlocked_ioctl = pci_iomul_ioctl, @@ -2499,26 +2571,29 @@ Acked-by: jbeulich@novell.com +static struct miscdevice pci_iomul_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "pci_iomul", ++ .nodename = "xen/pci_iomul", + .fops = &pci_iomul_fops, +}; + -+static int pci_iomul_init(void) ++static int __init pci_iomul_init(void) +{ + int error; ++ + error = misc_register(&pci_iomul_miscdev); + if (error != 0) { -+ printk(KERN_ALERT "Couldn't register /dev/misc/pci_iomul"); ++ pr_alert("Couldn't register /dev/xen/pci_iomul"); + return error; + } -+ printk("PCI IO multiplexer device installed.\n"); ++ pr_info("PCI IO multiplexer device installed\n"); + return 0; +} + -+#if 0 -+static void pci_iomul_cleanup(void) ++#ifdef MODULE ++static void __exit pci_iomul_cleanup(void) +{ + misc_deregister(&pci_iomul_miscdev); +} ++module_exit(pci_iomul_cleanup); +#endif + +/* @@ -2527,14 +2602,50 @@ Acked-by: jbeulich@novell.com + */ +late_initcall(pci_iomul_init); + ++MODULE_ALIAS("devname:xen/pci_iomul"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Isaku Yamahata "); +MODULE_DESCRIPTION("PCI IO space multiplexing driver"); +--- head-2011-03-11.orig/include/linux/acpi.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/acpi.h 2011-01-31 14:31:28.000000000 +0100 +@@ -248,6 +248,8 @@ int acpi_check_region(resource_size_t st + + int acpi_resources_are_enforced(void); + ++int acpi_pci_get_root_seg_bbn(char *hid, char *uid, int *seg, int *bbn); ++ + #ifdef CONFIG_PM_SLEEP + void __init acpi_no_s4_hw_signature(void); + void __init acpi_old_suspend_ordering(void); +--- head-2011-03-11.orig/include/linux/pci.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/pci.h 2011-01-31 14:31:28.000000000 +0100 +@@ -1538,5 +1538,11 @@ int pci_vpd_find_tag(const u8 *buf, unsi + int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, + unsigned int len, const char *kw); + ++#ifdef CONFIG_PCI_GUESTDEV ++int pci_is_guestdev(struct pci_dev *dev); ++#else ++#define pci_is_guestdev(dev) 0 ++#endif ++ + #endif /* __KERNEL__ */ + #endif /* LINUX_PCI_H */ +--- head-2011-03-11.orig/include/xen/Kbuild 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/xen/Kbuild 2011-01-31 14:31:28.000000000 +0100 +@@ -1,2 +1,3 @@ + header-y += evtchn.h + header-y += privcmd.h ++header-y += public/ +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/include/xen/public/Kbuild 2011-01-31 14:31:28.000000000 +0100 +@@ -0,0 +1 @@ ++header-y += iomulti.h --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/pci/iomulti.h 2010-03-24 13:55:21.000000000 +0100 -@@ -0,0 +1,51 @@ -+#ifndef PCI_IOMULTI_H -+#define PCI_IOMULTI_H ++++ head-2011-03-11/include/xen/public/iomulti.h 2011-01-31 14:31:28.000000000 +0100 +@@ -0,0 +1,50 @@ ++#ifndef __LINUX_PUBLIC_IOMULTI_H__ ++#define __LINUX_PUBLIC_IOMULTI_H__ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by @@ -2552,7 +2663,6 @@ Acked-by: jbeulich@novell.com + * + * Copyright (c) 2009 Isaku Yamahata + * VA Linux Systems Japan K.K. -+ * + */ + +struct pci_iomul_setup { @@ -2583,59 +2693,4 @@ Acked-by: jbeulich@novell.com +#define PCI_IOMUL_IN _IOWR('P', 2, struct pci_iomul_in) +#define PCI_IOMUL_OUT _IOW ('P', 3, struct pci_iomul_out) + -+#endif /* PCI_IOMULTI_H */ ---- head-2010-04-29.orig/drivers/pci/pci.c 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/drivers/pci/pci.c 2010-04-29 09:30:41.000000000 +0200 -@@ -2901,6 +2901,13 @@ resource_size_t pci_specified_resource_a - */ - int pci_is_reassigndev(struct pci_dev *dev) - { -+#ifdef CONFIG_PCI_GUESTDEV -+ int result; -+ -+ result = pci_is_guestdev_to_reassign(dev); -+ if (result) -+ return result; -+#endif /* CONFIG_PCI_GUESTDEV */ - return (pci_specified_resource_alignment(dev) != 0); - } - ---- head-2010-04-29.orig/drivers/pci/pci.h 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/drivers/pci/pci.h 2010-03-24 13:55:21.000000000 +0100 -@@ -337,4 +337,11 @@ static inline int pci_dev_specific_reset - } - #endif - -+#ifdef CONFIG_PCI_GUESTDEV -+extern int pci_is_guestdev_to_reassign(struct pci_dev *dev); -+extern int pci_is_iomuldev(struct pci_dev *dev); -+#else -+#define pci_is_iomuldev(dev) 0 -+#endif -+ - #endif /* DRIVERS_PCI_H */ ---- head-2010-04-29.orig/include/linux/acpi.h 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/include/linux/acpi.h 2010-03-24 13:55:21.000000000 +0100 -@@ -247,6 +247,8 @@ int acpi_check_region(resource_size_t st - int acpi_check_mem_region(resource_size_t start, resource_size_t n, - const char *name); - -+int acpi_pci_get_root_seg_bbn(char *hid, char *uid, int *seg, int *bbn); -+ - #ifdef CONFIG_PM_SLEEP - void __init acpi_no_s4_hw_signature(void); - void __init acpi_old_suspend_ordering(void); ---- head-2010-04-29.orig/include/linux/pci.h 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/include/linux/pci.h 2010-03-24 13:55:21.000000000 +0100 -@@ -1504,5 +1504,11 @@ int pci_vpd_find_tag(const u8 *buf, unsi - int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, - unsigned int len, const char *kw); - -+#ifdef CONFIG_PCI_GUESTDEV -+int pci_is_guestdev(struct pci_dev *dev); -+#else -+#define pci_is_guestdev(dev) 0 -+#endif -+ - #endif /* __KERNEL__ */ - #endif /* LINUX_PCI_H */ ++#endif /* __LINUX_PUBLIC_IOMULTI_H__ */ diff --git a/patches.xen/pci-reserve b/patches.xen/pci-reserve index 4a9cccb..a4cae76 100644 --- a/patches.xen/pci-reserve +++ b/patches.xen/pci-reserve @@ -8,9 +8,9 @@ by PCI hotplug. Signed-off-by: Isaku Yamahata Acked-by: jbeulich@novell.com ---- head-2010-04-29.orig/Documentation/kernel-parameters.txt 2010-04-29 09:30:30.000000000 +0200 -+++ head-2010-04-29/Documentation/kernel-parameters.txt 2010-04-29 09:30:50.000000000 +0200 -@@ -2032,6 +2032,13 @@ and is between 256 and 4096 characters. +--- head-2011-03-11.orig/Documentation/kernel-parameters.txt 2011-03-11 10:49:08.000000000 +0100 ++++ head-2011-03-11/Documentation/kernel-parameters.txt 2011-03-11 10:49:17.000000000 +0100 +@@ -2010,6 +2010,13 @@ bytes respectively. Such letter suffixes off: Turn ECRC off on: Turn ECRC on. @@ -24,8 +24,8 @@ Acked-by: jbeulich@novell.com pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power Management. off Disable ASPM. ---- head-2010-04-29.orig/drivers/pci/Kconfig 2010-03-24 13:55:21.000000000 +0100 -+++ head-2010-04-29/drivers/pci/Kconfig 2010-03-24 14:00:05.000000000 +0100 +--- head-2011-03-11.orig/drivers/pci/Kconfig 2011-01-31 14:31:27.000000000 +0100 ++++ head-2011-03-11/drivers/pci/Kconfig 2011-01-31 14:32:40.000000000 +0100 @@ -45,6 +45,13 @@ config PCI_IOMULTI help Say Y here if you need io multiplexing. @@ -40,19 +40,19 @@ Acked-by: jbeulich@novell.com config PCI_STUB tristate "PCI Stub driver" depends on PCI ---- head-2010-04-29.orig/drivers/pci/Makefile 2010-03-24 13:55:21.000000000 +0100 -+++ head-2010-04-29/drivers/pci/Makefile 2010-03-24 14:00:05.000000000 +0100 -@@ -9,6 +9,7 @@ obj-$(CONFIG_PROC_FS) += proc.o - obj-$(CONFIG_SYSFS) += slot.o - obj-$(CONFIG_PCI_GUESTDEV) += guestdev.o - obj-$(CONFIG_PCI_IOMULTI) += iomulti.o +--- head-2011-03-11.orig/drivers/pci/Makefile 2011-01-31 14:31:28.000000000 +0100 ++++ head-2011-03-11/drivers/pci/Makefile 2011-01-31 14:32:40.000000000 +0100 +@@ -11,6 +11,7 @@ obj-$(CONFIG_PCI_GUESTDEV) += guestdev.o + obj-$(CONFIG_PCI_IOMULTI) += pci-iomul.o + iomul-$(CONFIG_PCI_IOMULTI) := iomulti.o + obj-y += $(iomul-y) $(iomul-m) +obj-$(CONFIG_PCI_RESERVE) += reserve.o obj-$(CONFIG_PCI_QUIRKS) += quirks.o ---- head-2010-04-29.orig/drivers/pci/pci.h 2010-03-24 13:55:21.000000000 +0100 -+++ head-2010-04-29/drivers/pci/pci.h 2010-03-24 14:00:05.000000000 +0100 -@@ -344,4 +344,19 @@ extern int pci_is_iomuldev(struct pci_de +--- head-2011-03-11.orig/drivers/pci/pci.h 2011-01-31 14:31:28.000000000 +0100 ++++ head-2011-03-11/drivers/pci/pci.h 2011-01-31 14:32:40.000000000 +0100 +@@ -357,4 +357,19 @@ extern int pci_is_iomuldev(struct pci_de #define pci_is_iomuldev(dev) 0 #endif @@ -73,8 +73,8 @@ Acked-by: jbeulich@novell.com + #endif /* DRIVERS_PCI_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/pci/reserve.c 2010-03-24 14:00:05.000000000 +0100 -@@ -0,0 +1,138 @@ ++++ head-2011-03-11/drivers/pci/reserve.c 2011-01-31 14:32:40.000000000 +0100 +@@ -0,0 +1,137 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by @@ -92,7 +92,6 @@ Acked-by: jbeulich@novell.com + * + * Copyright (c) 2009 Isaku Yamahata + * VA Linux Systems Japan K.K. -+ * + */ + +#include @@ -213,8 +212,8 @@ Acked-by: jbeulich@novell.com + return 1; +} +__setup("pci_reserve=", pci_reserve_setup); ---- head-2010-04-29.orig/drivers/pci/setup-bus.c 2010-04-29 09:29:51.000000000 +0200 -+++ head-2010-04-29/drivers/pci/setup-bus.c 2010-03-24 14:09:20.000000000 +0100 +--- head-2011-03-11.orig/drivers/pci/setup-bus.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/pci/setup-bus.c 2011-01-31 14:32:40.000000000 +0100 @@ -448,6 +448,9 @@ static void pbus_size_io(struct pci_bus size = ALIGN(size + size1, 4096); if (size < old_size) diff --git a/patches.xen/pcifront-claim.patch b/patches.xen/pcifront-claim.patch deleted file mode 100644 index 4a6aaae..0000000 --- a/patches.xen/pcifront-claim.patch +++ /dev/null @@ -1,55 +0,0 @@ -http://git.kernel.org/?p=linux/kernel/git/konrad/xen.git;a=commit;h=621d869f36b215d63bb99e7ecd7a11f029821b85 -xen-pcifront: Claim PCI resources before going live. -author Konrad Rzeszutek Wilk - Fri, 18 Jun 2010 19:31:47 +0000 (15:31 -0400) -committer Konrad Rzeszutek Wilk - Fri, 18 Jun 2010 19:40:37 +0000 (15:40 -0400) -We were missing the important step of claiming (and setting the -parent of IO and MEM regions to 'PCI IO' and 'PCI mem' respectivly) -of the BARs. This meant that during hot inserts we would get: - -igb 0000:01:00.1: device not available (can't reserve [mem 0xfb840000-0xfb85ffff]) - -even thought the memory region had been reserved before. - -Signed-off-by: Konrad Rzeszutek Wilk ---- linux-2.6.34.1/drivers/xen/pcifront/pci_op.c.orig2 2010-09-29 16:31:58.702675503 +0200 -+++ linux-2.6.34.1/drivers/xen/pcifront/pci_op.c 2010-09-29 16:38:47.260675349 +0200 -@@ -426,7 +426,7 @@ static int pcifront_claim_resource(struc - r = &dev->resource[i]; - - if (!r->parent && r->start && r->flags) { -- dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n", -+ dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n", - pci_name(dev), i); - pci_claim_resource(dev, i); - } -@@ -516,14 +516,15 @@ int __devinit pcifront_scan_root(struct - - list_add(&bus_entry->list, &pdev->root_buses); - -+ /* pci_scan_bus_parented skips devices which do not have a have -+ * devfn==0. The pcifront_scan_bus enumerates all devfn. */ -+ err = pcifront_scan_bus(pdev, domain, bus, b); -+ - /* Claim resources before going "live" with our devices */ - pci_walk_bus(b, pcifront_claim_resource, pdev); - - pci_bus_add_devices(b); - -- /* pci_scan_bus_parented skips devices which do not have a have -- * devfn==0. The pcifront_scan_bus enumerates all devfn. */ -- err = pcifront_scan_bus(pdev, domain, bus, b); - - return err; - -@@ -560,6 +561,9 @@ int __devinit pcifront_rescan_root(struc - - err = pcifront_scan_bus(pdev, domain, bus, b); - -+ /* Claim resources before going "live" with our devices */ -+ pci_walk_bus(b, pcifront_claim_resource, pdev); -+ - return err; - } - diff --git a/patches.xen/pcifront-dont-race-udev.patch b/patches.xen/pcifront-dont-race-udev.patch deleted file mode 100644 index 886a7c1..0000000 --- a/patches.xen/pcifront-dont-race-udev.patch +++ /dev/null @@ -1,56 +0,0 @@ -http://git.kernel.org/?p=linux/kernel/git/konrad/xen.git;a=commitdiff;h=4a65de894fc0af05397eedca180d0ea7d8c6caba#patch1 -git/pub/scm / linux/kernel/git/konrad/xen.git / commitdiff -? search: re -summary | shortlog | log | commit | commitdiff | tree -raw | patch (parent: 621d869) -xen-pcifront: Don't race with udev when discovering new devices. -author Konrad Rzeszutek Wilk - Fri, 23 Jul 2010 14:35:57 +0000 (10:35 -0400) -committer Konrad Rzeszutek Wilk - Fri, 23 Jul 2010 15:15:56 +0000 (11:15 -0400) -We inadvertly would call 'pci_bus_add_device' right after discovering -the device, but before claiming the BARs. This ended up firing off -a uevent and udev loading the module and the modules failing to -request_region as they were not claimed. We fix this by holding off -going live by calling 'pci_bus_add_devices' at the end. - -Signed-off-by: Konrad Rzeszutek Wilk ---- linux-2.6.34.1/drivers/xen/pcifront/pci_op.c.orig3 2010-09-29 16:32:08.324675371 +0200 -+++ linux-2.6.34.1/drivers/xen/pcifront/pci_op.c 2010-09-29 16:37:23.215674973 +0200 -@@ -456,17 +456,10 @@ int __devinit pcifront_scan_bus(struct p - } - - d = pci_scan_single_device(b, devfn); -- if (d) { -+ if (d) - dev_info(&pdev->xdev->dev, "New device on " - "%04x:%02x:%02x.%02x found.\n", domain, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); -- err = pci_bus_add_device(d); -- if (err) { -- dev_err(&pdev->xdev->dev, "Failed to add " -- " device to bus.\n"); -- return err; -- } -- } - } - - return 0; -@@ -523,6 +516,7 @@ int __devinit pcifront_scan_root(struct - /* Claim resources before going "live" with our devices */ - pci_walk_bus(b, pcifront_claim_resource, pdev); - -+ /* Create SysFS and notify udev of the devices. Aka: "going live" */ - pci_bus_add_devices(b); - - -@@ -564,6 +558,9 @@ int __devinit pcifront_rescan_root(struc - /* Claim resources before going "live" with our devices */ - pci_walk_bus(b, pcifront_claim_resource, pdev); - -+ /* Create SysFS and notify udev of the devices. Aka: "going live" */ -+ pci_bus_add_devices(b); -+ - return err; - } - diff --git a/patches.xen/pcifront-enforce-scan.patch b/patches.xen/pcifront-enforce-scan.patch deleted file mode 100644 index 5217491..0000000 --- a/patches.xen/pcifront-enforce-scan.patch +++ /dev/null @@ -1,127 +0,0 @@ -http://git.kernel.org/?p=linux/kernel/git/konrad/xen.git;a=commitdiff;h=978b7df39be386f9a875bb14fcd84145e8ad0ee2#patch1 -git/pub/scm / linux/kernel/git/konrad/xen.git / commitdiff -? search: re -summary | shortlog | log | commit | commitdiff | tree -raw | patch (parent: 28a4d3a) -xen-pcifront: Enforce scanning of device functions on initial execution. -author Konrad Rzeszutek Wilk - Tue, 8 Jun 2010 16:59:41 +0000 (12:59 -0400) -committer Konrad Rzeszutek Wilk - Fri, 18 Jun 2010 19:40:27 +0000 (15:40 -0400) -'pci_scan_slot' abondons scanning of functions above 0 if a device with -function has not been detected. We need to be able to scan functions -above 0 in case the user has passed in devices without the function 0 -for the slot/bus. To that end we are reusing the code that existed in -the rescan code path and make usage of it in the initial execution -path. - -Signed-off-by: Konrad Rzeszutek Wilk ---- linux-2.6.34.1/drivers/xen/pcifront/pci_op.c.orig 2010-09-29 16:31:32.330675478 +0200 -+++ linux-2.6.34.1/drivers/xen/pcifront/pci_op.c 2010-09-29 16:39:23.197674096 +0200 -@@ -435,6 +435,43 @@ static int pcifront_claim_resource(struc - return 0; - } - -+int __devinit pcifront_scan_bus(struct pcifront_device *pdev, -+ unsigned int domain, unsigned int bus, -+ struct pci_bus *b) -+{ -+ struct pci_dev *d; -+ unsigned int devfn; -+ int err; -+ -+ /* Scan the bus for functions and add. -+ * We omit handling of PCI bridge attachment because pciback prevents -+ * bridges from being exported. -+ */ -+ for (devfn = 0; devfn < 0x100; devfn++) { -+ d = pci_get_slot(b, devfn); -+ if (d) { -+ /* Device is already known. */ -+ pci_dev_put(d); -+ continue; -+ } -+ -+ d = pci_scan_single_device(b, devfn); -+ if (d) { -+ dev_info(&pdev->xdev->dev, "New device on " -+ "%04x:%02x:%02x.%02x found.\n", domain, bus, -+ PCI_SLOT(devfn), PCI_FUNC(devfn)); -+ err = pci_bus_add_device(d); -+ if (err) { -+ dev_err(&pdev->xdev->dev, "Failed to add " -+ " device to bus.\n"); -+ return err; -+ } -+ } -+ } -+ -+ return 0; -+} -+ - int __devinit pcifront_scan_root(struct pcifront_device *pdev, - unsigned int domain, unsigned int bus) - { -@@ -484,7 +521,11 @@ int __devinit pcifront_scan_root(struct - - pci_bus_add_devices(b); - -- return 0; -+ /* pci_scan_bus_parented skips devices which do not have a have -+ * devfn==0. The pcifront_scan_bus enumerates all devfn. */ -+ err = pcifront_scan_bus(pdev, domain, bus, b); -+ -+ return err; - - err_out: - kfree(bus_entry); -@@ -496,10 +537,9 @@ int __devinit pcifront_scan_root(struct - int __devinit pcifront_rescan_root(struct pcifront_device *pdev, - unsigned int domain, unsigned int bus) - { -+ int err; - struct pci_bus *b; -- struct pci_dev *d; -- unsigned int devfn; -- -+ - #ifndef CONFIG_PCI_DOMAINS - if (domain != 0) { - dev_err(&pdev->xdev->dev, -@@ -518,34 +558,9 @@ int __devinit pcifront_rescan_root(struc - /* If the bus is unknown, create it. */ - return pcifront_scan_root(pdev, domain, bus); - -- /* Rescan the bus for newly attached functions and add. -- * We omit handling of PCI bridge attachment because pciback prevents -- * bridges from being exported. -- */ -- for (devfn = 0; devfn < 0x100; devfn++) { -- d = pci_get_slot(b, devfn); -- if(d) { -- /* Device is already known. */ -- pci_dev_put(d); -- continue; -- } -- -- d = pci_scan_single_device(b, devfn); -- if (d) { -- int err; -- -- dev_info(&pdev->xdev->dev, "New device on " -- "%04x:%02x:%02x.%02x found.\n", domain, bus, -- PCI_SLOT(devfn), PCI_FUNC(devfn)); -- err = pci_bus_add_device(d); -- if (err) -- dev_err(&pdev->xdev->dev, -- "error %d adding device, continuing.\n", -- err); -- } -- } -+ err = pcifront_scan_bus(pdev, domain, bus, b); - -- return 0; -+ return err; - } - - static void free_root_bus_devs(struct pci_bus *bus) diff --git a/patches.xen/pcifront-irq-not-evtchn.patch b/patches.xen/pcifront-irq-not-evtchn.patch deleted file mode 100644 index 3d44a0d..0000000 --- a/patches.xen/pcifront-irq-not-evtchn.patch +++ /dev/null @@ -1,14 +0,0 @@ -unbind_from_irqhandler takes irq, not evtchn, as its first argument. - -Signed-off-by: Rafal Wojtczuk ---- linux-2.6.34.1/drivers/xen/pcifront/xenbus.c.orig 2010-09-29 16:47:39.961674359 +0200 -+++ linux-2.6.34.1/drivers/xen/pcifront/xenbus.c 2010-09-29 16:47:49.458675391 +0200 -@@ -61,7 +61,7 @@ static void free_pdev(struct pcifront_de - - /*For PCIE_AER error handling job*/ - flush_scheduled_work(); -- unbind_from_irqhandler(pdev->evtchn, pdev); -+ unbind_from_irqhandler(irq_from_evtchn(pdev->evtchn), pdev); - - if (pdev->evtchn != INVALID_EVTCHN) - xenbus_free_evtchn(pdev->xdev, pdev->evtchn); diff --git a/patches.xen/tmem b/patches.xen/tmem index 9cb58df..9bf577f 100644 --- a/patches.xen/tmem +++ b/patches.xen/tmem @@ -30,8 +30,33 @@ http://oss.oracle.com/projects/tmem Signed-off-by: Dan Magenheimer Acked-by: jbeulich@novell.com ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/Documentation/transcendent-memory.txt 2010-03-24 14:09:47.000000000 +0100 +--- + Documentation/transcendent-memory.txt | 176 ++++++++++++++++++++++++++++++++ + fs/btrfs/extent_io.c | 9 + + fs/btrfs/super.c | 2 + fs/buffer.c | 6 + + fs/ext3/super.c | 2 + fs/ext4/super.c | 3 + fs/mpage.c | 8 + + fs/ocfs2/super.c | 2 + fs/super.c | 5 + include/linux/fs.h | 3 + include/linux/precache.h | 55 ++++++++++ + include/linux/swap.h | 53 +++++++++ + kernel/sysctl.c | 11 ++ + mm/Kconfig | 28 +++++ + mm/Makefile | 3 + mm/filemap.c | 11 ++ + mm/page_io.c | 13 ++ + mm/precache.c | 138 +++++++++++++++++++++++++ + mm/preswap.c | 182 ++++++++++++++++++++++++++++++++++ + mm/swapfile.c | 143 +++++++++++++++++++++++++- + mm/tmem.h | 84 +++++++++++++++ + mm/truncate.c | 10 + + 22 files changed, 943 insertions(+), 4 deletions(-) + +--- /dev/null ++++ b/Documentation/transcendent-memory.txt @@ -0,0 +1,176 @@ +Normal memory is directly addressable by the kernel, of a known +normally-fixed size, synchronously accessible, and persistent (though @@ -209,8 +234,8 @@ Acked-by: jbeulich@novell.com +- A tmem implementation provides no serialization guarantees (e.g. to + an SMP Linux). So if different Linux threads are putting and flushing + the same page, the results are indeterminate. ---- head-2010-05-12.orig/fs/btrfs/extent_io.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/btrfs/extent_io.c 2010-04-15 09:41:13.000000000 +0200 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c @@ -10,6 +10,7 @@ #include #include @@ -219,7 +244,7 @@ Acked-by: jbeulich@novell.com #include "extent_io.h" #include "extent_map.h" #include "compat.h" -@@ -2030,6 +2031,13 @@ static int __extent_read_full_page(struc +@@ -1990,6 +1991,13 @@ static int __extent_read_full_page(struc set_page_extent_mapped(page); @@ -231,9 +256,9 @@ Acked-by: jbeulich@novell.com + } + end = page_end; - lock_extent(tree, start, end, GFP_NOFS); - -@@ -2146,6 +2154,7 @@ static int __extent_read_full_page(struc + while (1) { + lock_extent(tree, start, end, GFP_NOFS); +@@ -2117,6 +2125,7 @@ static int __extent_read_full_page(struc cur = cur + iosize; page_offset += iosize; } @@ -241,8 +266,8 @@ Acked-by: jbeulich@novell.com if (!nr) { if (!PageError(page)) SetPageUptodate(page); ---- head-2010-05-12.orig/fs/btrfs/super.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/btrfs/super.c 2010-04-15 09:41:20.000000000 +0200 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c @@ -39,6 +39,7 @@ #include #include @@ -251,7 +276,7 @@ Acked-by: jbeulich@novell.com #include "compat.h" #include "ctree.h" #include "disk-io.h" -@@ -477,6 +478,7 @@ static int btrfs_fill_super(struct super +@@ -607,6 +608,7 @@ static int btrfs_fill_super(struct super sb->s_root = root_dentry; save_mount_options(sb, data); @@ -259,8 +284,8 @@ Acked-by: jbeulich@novell.com return 0; fail_close: ---- head-2010-05-12.orig/fs/buffer.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/buffer.c 2010-03-24 14:09:47.000000000 +0100 +--- a/fs/buffer.c ++++ b/fs/buffer.c @@ -41,6 +41,7 @@ #include #include @@ -269,9 +294,9 @@ Acked-by: jbeulich@novell.com static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); -@@ -276,6 +277,11 @@ void invalidate_bdev(struct block_device - +@@ -277,6 +278,11 @@ void invalidate_bdev(struct block_device invalidate_bh_lrus(); + lru_add_drain_all(); /* make sure all lru add caches are flushed */ invalidate_mapping_pages(mapping, 0, -1); + + /* 99% of the time, we don't need to flush the precache on the bdev. @@ -281,17 +306,17 @@ Acked-by: jbeulich@novell.com } EXPORT_SYMBOL(invalidate_bdev); ---- head-2010-05-12.orig/fs/ext3/super.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/ext3/super.c 2010-03-24 14:09:47.000000000 +0100 -@@ -38,6 +38,7 @@ +--- a/fs/ext3/super.c ++++ b/fs/ext3/super.c +@@ -36,6 +36,7 @@ + #include #include - #include #include +#include #include -@@ -1370,6 +1371,7 @@ static int ext3_setup_super(struct super +@@ -1367,6 +1368,7 @@ static int ext3_setup_super(struct super } else { ext3_msg(sb, KERN_INFO, "using internal journal"); } @@ -299,27 +324,27 @@ Acked-by: jbeulich@novell.com return res; } ---- head-2010-05-12.orig/fs/ext4/super.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/ext4/super.c 2010-04-15 09:41:30.000000000 +0200 -@@ -39,6 +39,7 @@ +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -38,6 +38,7 @@ #include #include #include +#include #include - #include "ext4.h" -@@ -1784,6 +1785,8 @@ static int ext4_setup_super(struct super + #include +@@ -1941,6 +1942,8 @@ static int ext4_setup_super(struct super EXT4_INODES_PER_GROUP(sb), - sbi->s_mount_opt); + sbi->s_mount_opt, sbi->s_mount_opt2); + precache_init(sb); + return res; } ---- head-2010-05-12.orig/fs/mpage.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/mpage.c 2010-04-15 09:41:38.000000000 +0200 +--- a/fs/mpage.c ++++ b/fs/mpage.c @@ -27,6 +27,7 @@ #include #include @@ -328,7 +353,7 @@ Acked-by: jbeulich@novell.com /* * I/O completion handler for multipage BIOs. -@@ -286,6 +287,13 @@ do_mpage_readpage(struct bio *bio, struc +@@ -271,6 +272,13 @@ do_mpage_readpage(struct bio *bio, struc SetPageMappedToDisk(page); } @@ -342,17 +367,17 @@ Acked-by: jbeulich@novell.com /* * This page will go to BIO. Do we need to send this BIO off first? */ ---- head-2010-05-12.orig/fs/ocfs2/super.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/ocfs2/super.c 2010-03-24 14:09:47.000000000 +0100 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c @@ -41,6 +41,7 @@ #include #include #include +#include - #include #define MLOG_MASK_PREFIX ML_SUPER -@@ -2260,6 +2261,7 @@ static int ocfs2_initialize_super(struct + #include +@@ -2385,6 +2386,7 @@ static int ocfs2_initialize_super(struct mlog_errno(status); goto bail; } @@ -360,18 +385,18 @@ Acked-by: jbeulich@novell.com bail: mlog_exit(status); ---- head-2010-05-12.orig/fs/super.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/fs/super.c 2010-05-12 08:57:07.000000000 +0200 -@@ -38,6 +38,7 @@ +--- a/fs/super.c ++++ b/fs/super.c +@@ -31,6 +31,7 @@ #include - #include #include + #include +#include - #include #include "internal.h" -@@ -105,6 +106,9 @@ static struct super_block *alloc_super(s - s->s_qcop = sb_quotactl_ops; + +@@ -112,6 +113,9 @@ static struct super_block *alloc_super(s + s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; +#ifdef CONFIG_PRECACHE @@ -380,25 +405,17 @@ Acked-by: jbeulich@novell.com } out: return s; -@@ -195,6 +199,7 @@ void deactivate_super(struct super_block - vfs_dq_off(s, 0); - down_write(&s->s_umount); - fs->kill_sb(s); -+ precache_flush_filesystem(s); - put_filesystem(fs); - put_super(s); - } -@@ -221,6 +226,7 @@ void deactivate_locked_super(struct supe - spin_unlock(&sb_lock); - vfs_dq_off(s, 0); - fs->kill_sb(s); +@@ -183,6 +187,7 @@ void deactivate_locked_super(struct supe + * inodes are flushed before we release the fs module. + */ + rcu_barrier(); + precache_flush_filesystem(s); put_filesystem(fs); put_super(s); } else { ---- head-2010-05-12.orig/include/linux/fs.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/linux/fs.h 2010-03-24 14:09:47.000000000 +0100 -@@ -1377,6 +1377,9 @@ struct super_block { +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1426,6 +1426,9 @@ struct super_block { /* Granularity of c/m/atime in ns. Cannot be worse than a second */ u32 s_time_gran; @@ -408,8 +425,8 @@ Acked-by: jbeulich@novell.com /* * The next field is for VFS *only*. No filesystems have any business ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/linux/precache.h 2010-03-24 14:09:47.000000000 +0100 +--- /dev/null ++++ b/include/linux/precache.h @@ -0,0 +1,55 @@ +#ifndef _LINUX_PRECACHE_H + @@ -466,9 +483,9 @@ Acked-by: jbeulich@novell.com + +#define _LINUX_PRECACHE_H +#endif /* _LINUX_PRECACHE_H */ ---- head-2010-05-12.orig/include/linux/swap.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/linux/swap.h 2010-03-24 14:09:47.000000000 +0100 -@@ -183,8 +183,61 @@ struct swap_info_struct { +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -186,8 +186,61 @@ struct swap_info_struct { struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ @@ -530,11 +547,11 @@ Acked-by: jbeulich@novell.com struct swap_list_t { int head; /* head of priority-ordered swapfile list */ int next; /* swapfile to be used next */ ---- head-2010-05-12.orig/kernel/sysctl.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/kernel/sysctl.c 2010-03-24 14:09:47.000000000 +0100 -@@ -1274,6 +1274,17 @@ static struct ctl_table vm_table[] = { +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -1330,6 +1330,17 @@ static struct ctl_table vm_table[] = { .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = scan_unevictable_handler, }, +#ifdef CONFIG_PRESWAP + { @@ -550,12 +567,12 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_MEMORY_FAILURE { .procname = "memory_failure_early_kill", ---- head-2010-05-12.orig/mm/Kconfig 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/mm/Kconfig 2010-03-24 14:09:47.000000000 +0100 -@@ -287,3 +287,31 @@ config NOMMU_INITIAL_TRIM_EXCESS - of 1 says that all excess pages should be trimmed. - - See Documentation/nommu-mmap.txt for more information. +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -347,3 +347,31 @@ config NEED_PER_CPU_KM + depends on !SMP + bool + default y + +# +# support for transcendent memory @@ -584,9 +601,9 @@ Acked-by: jbeulich@novell.com + Allows the transcendent memory pool to be used as a pseudo-swap + device which, under some circumstances, will greatly reduce + swapping and thus improve performance. If unsure, say Y. ---- head-2010-05-12.orig/mm/Makefile 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/mm/Makefile 2010-03-24 14:09:47.000000000 +0100 -@@ -17,6 +17,9 @@ obj-y += init-mm.o +--- a/mm/Makefile ++++ b/mm/Makefile +@@ -19,6 +19,9 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock. obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o @@ -596,8 +613,8 @@ Acked-by: jbeulich@novell.com obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o ---- head-2010-05-12.orig/mm/filemap.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/mm/filemap.c 2010-03-24 14:09:47.000000000 +0100 +--- a/mm/filemap.c ++++ b/mm/filemap.c @@ -33,6 +33,7 @@ #include #include /* for BUG_ON(!in_atomic()) only */ @@ -606,7 +623,7 @@ Acked-by: jbeulich@novell.com #include /* for page_is_file_cache() */ #include "internal.h" -@@ -119,6 +120,16 @@ void __remove_from_page_cache(struct pag +@@ -116,6 +117,16 @@ void __remove_from_page_cache(struct pag { struct address_space *mapping = page->mapping; @@ -623,8 +640,8 @@ Acked-by: jbeulich@novell.com radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; ---- head-2010-05-12.orig/mm/page_io.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/mm/page_io.c 2010-04-15 09:41:45.000000000 +0200 +--- a/mm/page_io.c ++++ b/mm/page_io.c @@ -111,6 +111,13 @@ int swap_writepage(struct page *page, st return ret; } @@ -652,9 +669,9 @@ Acked-by: jbeulich@novell.com bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/mm/precache.c 2010-03-24 14:09:47.000000000 +0100 -@@ -0,0 +1,140 @@ +--- /dev/null ++++ b/mm/precache.c +@@ -0,0 +1,138 @@ +/* + * linux/mm/precache.c + * @@ -694,7 +711,7 @@ Acked-by: jbeulich@novell.com +static int precache_auto_allocate; /* set to 1 to auto_allocate */ + +int precache_put(struct address_space *mapping, unsigned long index, -+ struct page *page) ++ struct page *page) +{ + u32 tmem_pool = mapping->host->i_sb->precache_poolid; + u64 obj = (unsigned long) mapping->host->i_ino; @@ -709,8 +726,7 @@ Acked-by: jbeulich@novell.com + ret = tmem_new_pool(0, 0, 0); + if (ret < 0) + return 0; -+ printk(KERN_INFO -+ "Mapping superblock for s_id=%s to precache_id=%d\n", ++ pr_info("Mapping superblock for s_id=%s to precache_id=%d\n", + mapping->host->i_sb->s_id, tmem_pool); + mapping->host->i_sb->precache_poolid = tmem_pool; + } @@ -721,7 +737,7 @@ Acked-by: jbeulich@novell.com +} + +int precache_get(struct address_space *mapping, unsigned long index, -+ struct page *empty_page) ++ struct page *empty_page) +{ + u32 tmem_pool = mapping->host->i_sb->precache_poolid; + u64 obj = (unsigned long) mapping->host->i_ino; @@ -774,8 +790,7 @@ Acked-by: jbeulich@novell.com + ret = tmem_destroy_pool(tmem_pool); + if (!ret) + return 0; -+ printk(KERN_INFO -+ "Unmapping superblock for s_id=%s from precache_id=%d\n", ++ pr_info("Unmapping superblock for s_id=%s from precache_id=%d\n", + sb->s_id, ret); + sb->precache_poolid = 0; + return 1; @@ -795,8 +810,8 @@ Acked-by: jbeulich@novell.com + sb->precache_poolid = tmem_new_pool(uuid_lo, uuid_hi, TMEM_POOL_SHARED); +} +EXPORT_SYMBOL(shared_precache_init); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/mm/preswap.c 2010-03-24 14:09:47.000000000 +0100 +--- /dev/null ++++ b/mm/preswap.c @@ -0,0 +1,182 @@ +/* + * linux/mm/preswap.c @@ -980,17 +995,17 @@ Acked-by: jbeulich@novell.com + return; + preswap_poolid = tmem_new_pool(0, 0, TMEM_POOL_PERSIST); +} ---- head-2010-05-12.orig/mm/swapfile.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/mm/swapfile.c 2010-03-24 14:09:47.000000000 +0100 -@@ -587,6 +587,7 @@ static unsigned char swap_entry_free(str +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -588,6 +588,7 @@ static unsigned char swap_entry_free(str swap_list.next = p->type; nr_swap_pages++; p->inuse_pages--; + preswap_flush(p->type, offset); - } - - return usage; -@@ -1029,7 +1030,7 @@ static int unuse_mm(struct mm_struct *mm + if (p->flags & SWP_BLKDEV) { + struct gendisk *disk = p->bdev->bd_disk; + if (disk->fops->swap_slot_free_notify) +@@ -1055,7 +1056,7 @@ static int unuse_mm(struct mm_struct *mm * Recycle to start on reaching the end, returning 0 when empty. */ static unsigned int find_next_to_unuse(struct swap_info_struct *si, @@ -999,7 +1014,7 @@ Acked-by: jbeulich@novell.com { unsigned int max = si->max; unsigned int i = prev; -@@ -1055,6 +1056,12 @@ static unsigned int find_next_to_unuse(s +@@ -1081,6 +1082,12 @@ static unsigned int find_next_to_unuse(s prev = 0; i = 1; } @@ -1012,7 +1027,7 @@ Acked-by: jbeulich@novell.com count = si->swap_map[i]; if (count && swap_count(count) != SWAP_MAP_BAD) break; -@@ -1066,8 +1073,12 @@ static unsigned int find_next_to_unuse(s +@@ -1092,8 +1099,12 @@ static unsigned int find_next_to_unuse(s * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. @@ -1026,7 +1041,7 @@ Acked-by: jbeulich@novell.com { struct swap_info_struct *si = swap_info[type]; struct mm_struct *start_mm; -@@ -1100,7 +1111,7 @@ static int try_to_unuse(unsigned int typ +@@ -1126,7 +1137,7 @@ static int try_to_unuse(unsigned int typ * one pass through swap_map is enough, but not necessarily: * there are races when an instance of an entry might be missed. */ @@ -1035,7 +1050,7 @@ Acked-by: jbeulich@novell.com if (signal_pending(current)) { retval = -EINTR; break; -@@ -1267,6 +1278,8 @@ static int try_to_unuse(unsigned int typ +@@ -1293,6 +1304,8 @@ static int try_to_unuse(unsigned int typ * interactive performance. */ cond_resched(); @@ -1044,7 +1059,7 @@ Acked-by: jbeulich@novell.com } mmput(start_mm); -@@ -1611,7 +1624,7 @@ SYSCALL_DEFINE1(swapoff, const char __us +@@ -1637,7 +1650,7 @@ SYSCALL_DEFINE1(swapoff, const char __us spin_unlock(&swap_lock); current->flags |= PF_OOM_ORIGIN; @@ -1053,7 +1068,7 @@ Acked-by: jbeulich@novell.com current->flags &= ~PF_OOM_ORIGIN; if (err) { -@@ -1663,9 +1676,14 @@ SYSCALL_DEFINE1(swapoff, const char __us +@@ -1689,9 +1702,14 @@ SYSCALL_DEFINE1(swapoff, const char __us swap_map = p->swap_map; p->swap_map = NULL; p->flags = 0; @@ -1068,7 +1083,7 @@ Acked-by: jbeulich@novell.com /* Destroy swap account informatin */ swap_cgroup_swapoff(type); -@@ -1821,6 +1839,7 @@ SYSCALL_DEFINE2(swapon, const char __use +@@ -1886,6 +1904,7 @@ SYSCALL_DEFINE2(swapon, const char __use unsigned long maxpages; unsigned long swapfilepages; unsigned char *swap_map = NULL; @@ -1076,7 +1091,7 @@ Acked-by: jbeulich@novell.com struct page *page = NULL; struct inode *inode = NULL; int did_down = 0; -@@ -2021,6 +2040,12 @@ SYSCALL_DEFINE2(swapon, const char __use +@@ -2088,6 +2107,12 @@ SYSCALL_DEFINE2(swapon, const char __use } } @@ -1089,7 +1104,7 @@ Acked-by: jbeulich@novell.com error = swap_cgroup_swapon(type, maxpages); if (error) goto bad_swap; -@@ -2059,6 +2084,9 @@ SYSCALL_DEFINE2(swapon, const char __use +@@ -2126,6 +2151,9 @@ SYSCALL_DEFINE2(swapon, const char __use else p->prio = --least_priority; p->swap_map = swap_map; @@ -1099,23 +1114,23 @@ Acked-by: jbeulich@novell.com p->flags |= SWP_WRITEOK; nr_swap_pages += nr_good_pages; total_swap_pages += nr_good_pages; -@@ -2082,6 +2110,7 @@ SYSCALL_DEFINE2(swapon, const char __use +@@ -2149,6 +2177,7 @@ SYSCALL_DEFINE2(swapon, const char __use swap_list.head = swap_list.next = type; else swap_info[prev]->next = type; + preswap_init(type); spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); - error = 0; -@@ -2098,6 +2127,7 @@ bad_swap_2: + atomic_inc(&proc_poll_event); +@@ -2168,6 +2197,7 @@ bad_swap_2: p->swap_file = NULL; p->flags = 0; spin_unlock(&swap_lock); + vfree(preswap_map); vfree(swap_map); - if (swap_file) - filp_close(swap_file, NULL); -@@ -2316,6 +2346,10 @@ int valid_swaphandles(swp_entry_t entry, + if (swap_file) { + if (did_down) { +@@ -2373,6 +2403,10 @@ int valid_swaphandles(swp_entry_t entry, base++; spin_lock(&swap_lock); @@ -1126,7 +1141,7 @@ Acked-by: jbeulich@novell.com if (end > si->max) /* don't go beyond end of map */ end = si->max; -@@ -2326,6 +2360,9 @@ int valid_swaphandles(swp_entry_t entry, +@@ -2383,6 +2417,9 @@ int valid_swaphandles(swp_entry_t entry, break; if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) break; @@ -1136,7 +1151,7 @@ Acked-by: jbeulich@novell.com } /* Count contiguous allocated slots below our target */ for (toff = target; --toff >= base; nr_pages++) { -@@ -2334,6 +2371,9 @@ int valid_swaphandles(swp_entry_t entry, +@@ -2391,6 +2428,9 @@ int valid_swaphandles(swp_entry_t entry, break; if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) break; @@ -1146,7 +1161,7 @@ Acked-by: jbeulich@novell.com } spin_unlock(&swap_lock); -@@ -2560,3 +2600,98 @@ static void free_swap_count_continuation +@@ -2617,3 +2657,98 @@ static void free_swap_count_continuation } } } @@ -1245,8 +1260,8 @@ Acked-by: jbeulich@novell.com +} +#endif +#endif /* CONFIG_PRESWAP */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/mm/tmem.h 2010-03-24 14:09:47.000000000 +0100 +--- /dev/null ++++ b/mm/tmem.h @@ -0,0 +1,84 @@ +/* + * linux/mm/tmem.h @@ -1332,8 +1347,8 @@ Acked-by: jbeulich@novell.com + } u; +}; +#endif ---- head-2010-05-12.orig/mm/truncate.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/mm/truncate.c 2010-04-15 09:41:48.000000000 +0200 +--- a/mm/truncate.c ++++ b/mm/truncate.c @@ -16,6 +16,7 @@ #include #include @@ -1369,7 +1384,7 @@ Acked-by: jbeulich@novell.com if (mapping->nrpages == 0) return; -@@ -290,6 +297,7 @@ void truncate_inode_pages_range(struct a +@@ -292,6 +299,7 @@ void truncate_inode_pages_range(struct a pagevec_release(&pvec); mem_cgroup_uncharge_end(); } @@ -1377,7 +1392,7 @@ Acked-by: jbeulich@novell.com } EXPORT_SYMBOL(truncate_inode_pages_range); -@@ -428,6 +436,7 @@ int invalidate_inode_pages2_range(struct +@@ -434,6 +442,7 @@ int invalidate_inode_pages2_range(struct int did_range_unmap = 0; int wrapped = 0; @@ -1385,7 +1400,7 @@ Acked-by: jbeulich@novell.com pagevec_init(&pvec, 0); next = start; while (next <= end && !wrapped && -@@ -486,6 +495,7 @@ int invalidate_inode_pages2_range(struct +@@ -492,6 +501,7 @@ int invalidate_inode_pages2_range(struct mem_cgroup_uncharge_end(); cond_resched(); } diff --git a/patches.xen/xen-balloon-max-target b/patches.xen/xen-balloon-max-target index 63ba1f3..79af8b5 100644 --- a/patches.xen/xen-balloon-max-target +++ b/patches.xen/xen-balloon-max-target @@ -5,9 +5,9 @@ References: 152667, 184727 jb: Also added this to the sysfs representation. ---- head-2010-02-03.orig/drivers/xen/balloon/balloon.c 2010-02-03 11:51:26.000000000 +0100 -+++ head-2010-02-03/drivers/xen/balloon/balloon.c 2010-02-03 11:56:18.000000000 +0100 -@@ -239,7 +239,7 @@ static unsigned long current_target(void +--- head-2010-11-25.orig/drivers/xen/balloon/balloon.c 2010-11-25 11:55:54.000000000 +0100 ++++ head-2010-11-25/drivers/xen/balloon/balloon.c 2010-11-25 13:47:01.000000000 +0100 +@@ -236,7 +236,7 @@ static unsigned long current_target(void return target; } @@ -16,7 +16,7 @@ jb: Also added this to the sysfs representation. { #ifndef CONFIG_XEN #define max_pfn num_physpages -@@ -461,7 +461,7 @@ static void balloon_process(struct work_ +@@ -458,7 +458,7 @@ static void balloon_process(struct work_ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ @@ -25,7 +25,7 @@ jb: Also added this to the sysfs representation. schedule_work(&balloon_worker); } -@@ -536,10 +536,13 @@ static int balloon_read(char *page, char +@@ -533,10 +533,13 @@ static int balloon_read(char *page, char page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" @@ -39,8 +39,8 @@ jb: Also added this to the sysfs representation. PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); ---- head-2010-02-03.orig/drivers/xen/balloon/common.h 2009-06-09 15:01:37.000000000 +0200 -+++ head-2010-02-03/drivers/xen/balloon/common.h 2009-08-19 10:36:49.000000000 +0200 +--- head-2010-11-25.orig/drivers/xen/balloon/common.h 2009-06-09 15:01:37.000000000 +0200 ++++ head-2010-11-25/drivers/xen/balloon/common.h 2009-08-19 10:36:49.000000000 +0200 @@ -52,5 +52,6 @@ int balloon_sysfs_init(void); void balloon_sysfs_exit(void); @@ -48,8 +48,8 @@ jb: Also added this to the sysfs representation. +unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ ---- head-2010-02-03.orig/drivers/xen/balloon/sysfs.c 2010-01-28 10:34:35.000000000 +0100 -+++ head-2010-02-03/drivers/xen/balloon/sysfs.c 2009-08-19 10:36:47.000000000 +0200 +--- head-2010-11-25.orig/drivers/xen/balloon/sysfs.c 2010-11-22 13:06:57.000000000 +0100 ++++ head-2010-11-25/drivers/xen/balloon/sysfs.c 2009-08-19 10:36:47.000000000 +0200 @@ -31,6 +31,7 @@ #include #include diff --git a/patches.xen/xen-blkback-bimodal-suse b/patches.xen/xen-blkback-bimodal-suse index 7b37197..9f7c848 100644 --- a/patches.xen/xen-blkback-bimodal-suse +++ b/patches.xen/xen-blkback-bimodal-suse @@ -7,9 +7,9 @@ Patch-mainline: obsolete linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c | 6 ++++++ 2 files changed, 12 insertions(+) ---- head-2010-04-29.orig/drivers/xen/blkback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/xenbus.c 2010-03-25 14:37:55.000000000 +0100 -@@ -500,6 +500,12 @@ static int connect_ring(struct backend_i +--- head-2010-11-25.orig/drivers/xen/blkback/xenbus.c 2010-11-22 13:10:22.000000000 +0100 ++++ head-2010-11-25/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:03.000000000 +0100 +@@ -506,6 +506,12 @@ static int connect_ring(struct backend_i be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; @@ -22,9 +22,9 @@ Patch-mainline: obsolete else { xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); return -1; ---- head-2010-04-29.orig/drivers/xen/blktap/xenbus.c 2010-04-29 09:52:52.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap/xenbus.c 2010-04-29 10:15:25.000000000 +0200 -@@ -441,6 +441,12 @@ static int connect_ring(struct backend_i +--- head-2010-11-25.orig/drivers/xen/blktap/xenbus.c 2010-11-25 10:28:23.000000000 +0100 ++++ head-2010-11-25/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:00.000000000 +0100 +@@ -457,6 +457,12 @@ static int connect_ring(struct backend_i be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; diff --git a/patches.xen/xen-blkback-cdrom b/patches.xen/xen-blkback-cdrom index 36d9a6c..99277e3 100644 --- a/patches.xen/xen-blkback-cdrom +++ b/patches.xen/xen-blkback-cdrom @@ -3,8 +3,8 @@ From: plc@novell.com Patch-mainline: obsolete References: 159907 ---- head-2010-03-24.orig/drivers/xen/blkback/Makefile 2009-06-09 15:01:37.000000000 +0200 -+++ head-2010-03-24/drivers/xen/blkback/Makefile 2010-03-25 14:38:02.000000000 +0100 +--- head-2011-01-30.orig/drivers/xen/blkback/Makefile 2009-06-09 15:01:37.000000000 +0200 ++++ head-2011-01-30/drivers/xen/blkback/Makefile 2010-03-25 14:38:02.000000000 +0100 @@ -1,4 +1,4 @@ obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o obj-$(CONFIG_XEN_BLKBACK_PAGEMAP) += blkback-pagemap.o @@ -12,7 +12,7 @@ References: 159907 -blkbk-y := blkback.o xenbus.o interface.o vbd.o +blkbk-y := blkback.o xenbus.o interface.o vbd.o cdrom.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-03-24/drivers/xen/blkback/cdrom.c 2010-03-25 14:38:02.000000000 +0100 ++++ head-2011-01-30/drivers/xen/blkback/cdrom.c 2010-10-11 10:34:44.000000000 +0200 @@ -0,0 +1,162 @@ +/****************************************************************************** + * blkback/cdrom.c @@ -51,9 +51,9 @@ References: 159907 +#include "common.h" + +#undef DPRINTK -+#define DPRINTK(_f, _a...) \ -+ printk("(%s() file=%s, line=%d) " _f "\n", \ -+ __PRETTY_FUNCTION__, __FILE__ , __LINE__ , ##_a ) ++#define DPRINTK(_f, _a...) \ ++ printk(KERN_DEBUG "(%s() file=%s, line=%d) " _f "\n", \ ++ __func__, __FILE__ , __LINE__ , ##_a ) + + +#define MEDIA_PRESENT "media-present" @@ -176,9 +176,9 @@ References: 159907 + } + } +} ---- head-2010-03-24.orig/drivers/xen/blkback/common.h 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-03-24/drivers/xen/blkback/common.h 2010-03-25 14:38:02.000000000 +0100 -@@ -106,6 +106,7 @@ struct backend_info +--- head-2011-01-30.orig/drivers/xen/blkback/common.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-01-30/drivers/xen/blkback/common.h 2010-09-23 16:58:21.000000000 +0200 +@@ -103,6 +103,7 @@ struct backend_info struct xenbus_device *dev; blkif_t *blkif; struct xenbus_watch backend_watch; @@ -186,7 +186,7 @@ References: 159907 unsigned major; unsigned minor; char *mode; -@@ -152,4 +153,7 @@ int blkif_schedule(void *arg); +@@ -149,4 +150,7 @@ int blkif_schedule(void *arg); int blkback_barrier(struct xenbus_transaction xbt, struct backend_info *be, int state); @@ -194,9 +194,9 @@ References: 159907 +void cdrom_add_media_watch(struct backend_info *be); + #endif /* __BLKIF__BACKEND__COMMON_H__ */ ---- head-2010-03-24.orig/drivers/xen/blkback/vbd.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-03-24/drivers/xen/blkback/vbd.c 2010-03-25 14:38:02.000000000 +0100 -@@ -108,6 +108,9 @@ int vbd_translate(struct phys_req *req, +--- head-2011-01-30.orig/drivers/xen/blkback/vbd.c 2011-02-07 14:04:20.000000000 +0100 ++++ head-2011-01-30/drivers/xen/blkback/vbd.c 2011-02-07 14:17:36.000000000 +0100 +@@ -111,6 +111,9 @@ int vbd_translate(struct phys_req *req, if ((operation != READ) && vbd->readonly) goto out; @@ -206,8 +206,8 @@ References: 159907 if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd))) goto out; ---- head-2010-03-24.orig/drivers/xen/blkback/xenbus.c 2010-03-25 14:37:59.000000000 +0100 -+++ head-2010-03-24/drivers/xen/blkback/xenbus.c 2010-03-25 14:38:02.000000000 +0100 +--- head-2011-01-30.orig/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:11.000000000 +0100 ++++ head-2011-01-30/drivers/xen/blkback/xenbus.c 2010-03-25 14:38:02.000000000 +0100 @@ -187,6 +187,12 @@ static int blkback_remove(struct xenbus_ be->backend_watch.node = NULL; } diff --git a/patches.xen/xen-blkfront-cdrom b/patches.xen/xen-blkfront-cdrom index 1b231b4..a01fcf9 100644 --- a/patches.xen/xen-blkfront-cdrom +++ b/patches.xen/xen-blkfront-cdrom @@ -3,8 +3,8 @@ Subject: implement forwarding of CD-ROM specific commands Patch-mainline: obsolete References: fate#300964 ---- head-2010-04-15.orig/drivers/cdrom/Makefile 2010-04-28 15:44:04.000000000 +0200 -+++ head-2010-04-15/drivers/cdrom/Makefile 2010-03-25 14:38:07.000000000 +0100 +--- head-2010-12-06.orig/drivers/cdrom/Makefile 2010-12-06 14:17:48.000000000 +0100 ++++ head-2010-12-06/drivers/cdrom/Makefile 2010-11-23 15:06:54.000000000 +0100 @@ -9,6 +9,7 @@ obj-$(CONFIG_BLK_DEV_IDECD) += obj-$(CONFIG_BLK_DEV_SR) += cdrom.o obj-$(CONFIG_PARIDE_PCD) += cdrom.o @@ -13,8 +13,8 @@ References: fate#300964 obj-$(CONFIG_VIOCD) += viocd.o cdrom.o obj-$(CONFIG_GDROM) += gdrom.o cdrom.o ---- head-2010-04-15.orig/drivers/xen/blkfront/Makefile 2007-06-12 13:13:44.000000000 +0200 -+++ head-2010-04-15/drivers/xen/blkfront/Makefile 2010-03-25 14:38:07.000000000 +0100 +--- head-2010-12-06.orig/drivers/xen/blkfront/Makefile 2007-06-12 13:13:44.000000000 +0200 ++++ head-2010-12-06/drivers/xen/blkfront/Makefile 2010-11-23 15:06:54.000000000 +0100 @@ -1,5 +1,5 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o @@ -22,9 +22,9 @@ References: fate#300964 -xenblk-objs := blkfront.o vbd.o +xenblk-objs := blkfront.o vbd.o vcd.o ---- head-2010-04-15.orig/drivers/xen/blkfront/blkfront.c 2010-03-25 14:38:05.000000000 +0100 -+++ head-2010-04-15/drivers/xen/blkfront/blkfront.c 2010-03-25 14:38:07.000000000 +0100 -@@ -395,6 +395,8 @@ static void connect(struct blkfront_info +--- head-2010-12-06.orig/drivers/xen/blkfront/blkfront.c 2010-12-06 15:01:01.000000000 +0100 ++++ head-2010-12-06/drivers/xen/blkfront/blkfront.c 2010-12-06 15:01:15.000000000 +0100 +@@ -412,6 +412,8 @@ static void connect(struct blkfront_info add_disk(info->gd); info->is_ready = 1; @@ -33,7 +33,7 @@ References: fate#300964 } /** -@@ -424,6 +426,8 @@ static void blkfront_closing(struct blkf +@@ -441,6 +443,8 @@ static void blkfront_closing(struct blkf xlvbd_sysfs_delif(info); @@ -42,8 +42,8 @@ References: fate#300964 xlvbd_del(info); out: ---- head-2010-04-15.orig/drivers/xen/blkfront/block.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-15/drivers/xen/blkfront/block.h 2010-03-25 14:38:07.000000000 +0100 +--- head-2010-12-06.orig/drivers/xen/blkfront/block.h 2010-12-06 14:37:35.000000000 +0100 ++++ head-2010-12-06/drivers/xen/blkfront/block.h 2010-11-23 15:06:54.000000000 +0100 @@ -163,4 +163,8 @@ static inline void xlvbd_sysfs_delif(str } #endif @@ -53,9 +53,9 @@ References: fate#300964 +extern void unregister_vcd(struct blkfront_info *info); + #endif /* __XEN_DRIVERS_BLOCK_H__ */ ---- head-2010-04-15.orig/drivers/xen/blkfront/vbd.c 2010-03-25 16:41:12.000000000 +0100 -+++ head-2010-04-15/drivers/xen/blkfront/vbd.c 2010-03-25 14:38:07.000000000 +0100 -@@ -369,7 +369,8 @@ xlvbd_add(blkif_sector_t capacity, int v +--- head-2010-12-06.orig/drivers/xen/blkfront/vbd.c 2010-11-23 16:11:19.000000000 +0100 ++++ head-2010-12-06/drivers/xen/blkfront/vbd.c 2010-11-23 15:06:54.000000000 +0100 +@@ -367,7 +367,8 @@ xlvbd_add(blkif_sector_t capacity, int v goto out; info->mi = mi; @@ -65,7 +65,7 @@ References: fate#300964 nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor, nr_minors); -@@ -383,7 +384,7 @@ xlvbd_add(blkif_sector_t capacity, int v +@@ -381,7 +382,7 @@ xlvbd_add(blkif_sector_t capacity, int v offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); @@ -75,8 +75,8 @@ References: fate#300964 sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-15/drivers/xen/blkfront/vcd.c 2010-04-28 16:17:50.000000000 +0200 -@@ -0,0 +1,505 @@ ++++ head-2010-12-06/drivers/xen/blkfront/vcd.c 2010-11-23 15:06:54.000000000 +0100 +@@ -0,0 +1,507 @@ +/******************************************************************************* + * vcd.c + * @@ -174,13 +174,13 @@ References: fate#300964 + struct vcd_generic_command *vgc; + + if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { -+ printk(KERN_WARNING "%s() Packet buffer length is to large \n", __func__); ++ pr_warning("%s() Packet buffer length is to large \n", __func__); + return -EIO; + } + + page = alloc_page(GFP_NOIO|__GFP_ZERO); + if (!page) { -+ printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); ++ pr_crit("%s() Unable to allocate page\n", __func__); + return -ENOMEM; + } + @@ -242,7 +242,7 @@ References: fate#300964 + + page = alloc_page(GFP_NOIO|__GFP_ZERO); + if (!page) { -+ printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); ++ pr_crit("%s() Unable to allocate page\n", __func__); + return -ENOMEM; + } + @@ -283,7 +283,7 @@ References: fate#300964 + + page = alloc_page(GFP_NOIO|__GFP_ZERO); + if (!page) { -+ printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); ++ pr_crit("%s() Unable to allocate page\n", __func__); + return -ENOMEM; + } + @@ -356,7 +356,7 @@ References: fate#300964 + + page = alloc_page(GFP_NOIO|__GFP_ZERO); + if (!page) { -+ printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); ++ pr_crit("%s() Unable to allocate page\n", __func__); + return -ENOMEM; + } + @@ -476,13 +476,15 @@ References: fate#300964 + ret = submit_cdrom_cmd(info, (struct packet_command *)arg); + break; + default: -+ /* Not supported, augment supported above if necessary */ -+ printk("%s():%d Unsupported IOCTL:%x \n", __func__, __LINE__, cmd); -+ ret = -ENOTTY; -+ break; ++out: ++ spin_unlock(&vcd->vcd_cdrom_info_lock); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ++ return blkif_ioctl(inode, file, cmd, arg); ++#else ++ return blkif_ioctl(bd, mode, cmd, arg); ++#endif + } + spin_unlock(&vcd->vcd_cdrom_info_lock); -+out: + return ret; +} + @@ -533,7 +535,7 @@ References: fate#300964 + /* Create new vcd_disk and fill in cdrom_info */ + vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL); + if (!vcd) { -+ printk(KERN_INFO "%s(): Unable to allocate vcd struct!\n", __func__); ++ pr_info("%s(): Unable to allocate vcd struct!\n", __func__); + goto out; + } + spin_lock_init(&vcd->vcd_cdrom_info_lock); @@ -548,8 +550,8 @@ References: fate#300964 + CDC_MRW | CDC_MRW_W | CDC_RAM); + + if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { -+ printk(KERN_WARNING "%s() Cannot register blkdev as a cdrom %d!\n", __func__, -+ gd->major); ++ pr_warning("%s() Cannot register blkdev as a cdrom %d!\n", ++ __func__, gd->major); + goto err_out; + } + gd->fops = &xencdrom_bdops; @@ -583,7 +585,7 @@ References: fate#300964 +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-15/include/xen/interface/io/cdromif.h 2010-03-25 14:38:07.000000000 +0100 ++++ head-2010-12-06/include/xen/interface/io/cdromif.h 2010-11-23 15:06:54.000000000 +0100 @@ -0,0 +1,120 @@ +/****************************************************************************** + * cdromif.h diff --git a/patches.xen/xen-blkif-protocol-fallback-hack b/patches.xen/xen-blkif-protocol-fallback-hack index ef35e69..2fa8016 100644 --- a/patches.xen/xen-blkif-protocol-fallback-hack +++ b/patches.xen/xen-blkif-protocol-fallback-hack @@ -5,9 +5,9 @@ Patch-mainline: never. See the comment below. Oh well. ---- head-2010-04-29.orig/drivers/xen/Kconfig 2010-03-31 14:08:31.000000000 +0200 -+++ head-2010-04-29/drivers/xen/Kconfig 2010-03-31 14:08:50.000000000 +0200 -@@ -30,6 +30,9 @@ config XEN_PRIVCMD +--- head-2011-02-17.orig/drivers/xen/Kconfig 2011-02-02 17:03:22.000000000 +0100 ++++ head-2011-02-17/drivers/xen/Kconfig 2011-02-24 15:23:15.000000000 +0100 +@@ -26,6 +26,9 @@ config XEN_PRIVCMD def_bool y depends on PROC_FS @@ -17,24 +17,24 @@ See the comment below. Oh well. config XEN_XENBUS_DEV def_bool y depends on PROC_FS -@@ -49,6 +52,7 @@ config XEN_BLKDEV_BACKEND +@@ -45,6 +48,7 @@ config XEN_BLKDEV_BACKEND tristate "Block-device backend driver" - depends on XEN_BACKEND + depends on BLOCK && XEN_BACKEND default XEN_BACKEND + select XEN_DOMCTL help The block-device backend driver allows the kernel to export its block devices to other guests via a high-performance shared-memory -@@ -58,6 +62,7 @@ config XEN_BLKDEV_TAP +@@ -54,6 +58,7 @@ config XEN_BLKDEV_TAP tristate "Block-device tap backend driver" - depends on XEN_BACKEND + depends on BLOCK && XEN_BACKEND default XEN_BACKEND + select XEN_DOMCTL help The block tap driver is an alternative to the block back driver and allows VM block requests to be redirected to userspace through ---- head-2010-04-29.orig/drivers/xen/blkback/xenbus.c 2010-03-25 14:37:55.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/xenbus.c 2010-03-25 14:37:59.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:03.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:11.000000000 +0100 @@ -21,6 +21,7 @@ #include #include @@ -43,7 +43,7 @@ See the comment below. Oh well. #undef DPRINTK #define DPRINTK(fmt, args...) \ -@@ -492,8 +493,10 @@ static int connect_ring(struct backend_i +@@ -498,8 +499,10 @@ static int connect_ring(struct backend_i be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", "%63s", protocol, NULL); @@ -56,8 +56,8 @@ See the comment below. Oh well. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) ---- head-2010-04-29.orig/drivers/xen/blktap/xenbus.c 2010-04-29 10:15:25.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap/xenbus.c 2010-04-29 10:15:31.000000000 +0200 +--- head-2011-02-17.orig/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:00.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:14.000000000 +0100 @@ -39,6 +39,7 @@ #include #include @@ -66,7 +66,7 @@ See the comment below. Oh well. struct backend_info -@@ -433,8 +434,10 @@ static int connect_ring(struct backend_i +@@ -449,8 +450,10 @@ static int connect_ring(struct backend_i be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", "%63s", protocol, NULL); @@ -79,8 +79,8 @@ See the comment below. Oh well. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) ---- head-2010-04-29.orig/drivers/xen/core/Makefile 2010-04-19 14:52:49.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/Makefile 2010-04-19 14:55:02.000000000 +0200 +--- head-2011-02-17.orig/drivers/xen/core/Makefile 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/Makefile 2010-04-19 14:55:02.000000000 +0200 @@ -12,3 +12,6 @@ obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o obj-$(CONFIG_SMP) += spinlock.o @@ -89,7 +89,7 @@ See the comment below. Oh well. +CFLAGS_domctl.o := -D__XEN_PUBLIC_XEN_H__ -D__XEN_PUBLIC_GRANT_TABLE_H__ +CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/domctl.c 2010-05-07 12:12:03.000000000 +0200 ++++ head-2011-02-17/drivers/xen/core/domctl.c 2010-10-11 10:31:06.000000000 +0200 @@ -0,0 +1,127 @@ +/* + * !!! dirty hack alert !!! @@ -177,8 +177,8 @@ See the comment below. Oh well. + domctl.v##ver.domain = domid; \ + ret = hypervisor_domctl(&domctl) ?: domctl.v##ver.address_size.size; \ + if (ret == 32 || ret == 64) { \ -+ printk("v" #ver " domctl worked ok: dom%d is %d-bit\n", \ -+ domid, ret); \ ++ pr_info("v" #ver " domctl worked ok: dom%d is %d-bit\n",\ ++ domid, ret); \ + return ret; \ + } \ +} while (0) @@ -196,8 +196,8 @@ See the comment below. Oh well. +#endif + + ret = BITS_PER_LONG; -+ printk("v%d...7 domctls failed, assuming dom%d is native: %d\n", -+ low, domid, ret); ++ pr_warning("v%d...%d domctls failed, assuming dom%d is native: %d\n", ++ low, XEN_DOMCTL_INTERFACE_VERSION, domid, ret); + + return ret; +} @@ -219,7 +219,7 @@ See the comment below. Oh well. + +MODULE_LICENSE("GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/domctl.h 2010-03-25 14:37:59.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/domctl.h 2010-03-25 14:37:59.000000000 +0100 @@ -0,0 +1,2 @@ +int xen_guest_address_size(int domid); +int xen_guest_blkif_protocol(int domid); diff --git a/patches.xen/xen-blktap-modular b/patches.xen/xen-blktap-modular new file mode 100644 index 0000000..7c39b0e --- /dev/null +++ b/patches.xen/xen-blktap-modular @@ -0,0 +1,47 @@ +From: ccoffing@novell.com +Subject: Retain backwards-compatible module name with CONFIG_XEN_BLKDEV_TAP=m +Patch-mainline: obsolete + +--- head-2011-02-17.orig/drivers/xen/blktap/Makefile 2007-06-12 13:13:44.000000000 +0200 ++++ head-2011-02-17/drivers/xen/blktap/Makefile 2009-05-29 12:39:04.000000000 +0200 +@@ -1,5 +1,5 @@ + LINUXINCLUDE += -I../xen/include/public/io + +-obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o ++obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o + +-xenblktap-y := xenbus.o interface.o blktap.o ++blktap-y := xenbus.o interface.o blocktap.o +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-02-17/drivers/xen/blktap/blocktap.c 2009-05-29 12:39:04.000000000 +0200 +@@ -0,0 +1 @@ ++#include "blktap.c" +--- head-2011-02-17.orig/drivers/xen/blktap2/Makefile 2009-05-29 10:25:53.000000000 +0200 ++++ head-2011-02-17/drivers/xen/blktap2/Makefile 2009-05-29 12:39:04.000000000 +0200 +@@ -1,3 +1,4 @@ +-obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap.o ++obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap2.o + +-blktap-objs := control.o ring.o wait_queue.o device.o request.o sysfs.o ++blktap2-y := control.o ring.o wait_queue.o device.o request.o ++blktap2-$(CONFIG_SYSFS) += sysfs.o +--- head-2011-02-17.orig/drivers/xen/blktap2/blktap.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap2/blktap.h 2011-02-24 15:24:27.000000000 +0100 +@@ -216,10 +216,17 @@ int blktap_ring_pause(struct blktap *); + int blktap_ring_resume(struct blktap *); + void blktap_ring_kick_user(struct blktap *); + ++#ifdef CONFIG_SYSFS + int blktap_sysfs_init(void); + void blktap_sysfs_free(void); + int blktap_sysfs_create(struct blktap *); + int blktap_sysfs_destroy(struct blktap *); ++#else ++static inline int blktap_sysfs_init(void) { return 0; } ++static inline void blktap_sysfs_exit(void) {} ++static inline int blktap_sysfs_create(struct blktap *tapdev) { return 0; } ++static inline int blktap_sysfs_destroy(struct blktap *tapdev) { return 0; } ++#endif + + int blktap_device_init(int *); + void blktap_device_free(void); diff --git a/patches.xen/xen-blktap-write-barriers b/patches.xen/xen-blktap-write-barriers index a774c26..9f7a6fc 100644 --- a/patches.xen/xen-blktap-write-barriers +++ b/patches.xen/xen-blktap-write-barriers @@ -2,9 +2,9 @@ From: kwolf@suse.de Subject: blktap: Write Barriers Patch-mainline: obsolete ---- head-2010-04-29.orig/drivers/xen/blktap/blktap.c 2010-04-29 09:52:39.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap/blktap.c 2010-04-29 10:16:10.000000000 +0200 -@@ -1388,6 +1388,9 @@ static int do_block_io_op(blkif_t *blkif +--- head-2011-02-17.orig/drivers/xen/blktap/blktap.c 2011-02-28 14:15:27.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap/blktap.c 2011-02-28 14:21:49.000000000 +0100 +@@ -1374,6 +1374,9 @@ static int do_block_io_op(blkif_t *blkif dispatch_rw_block_io(blkif, &req, pending_req); break; @@ -14,67 +14,41 @@ Patch-mainline: obsolete case BLKIF_OP_WRITE: blkif->st_wr_req++; dispatch_rw_block_io(blkif, &req, pending_req); -@@ -1419,7 +1422,7 @@ static void dispatch_rw_block_io(blkif_t - pending_req_t *pending_req) - { - extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); -- int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; -+ int op, operation; - struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; - unsigned int nseg; - int ret, i, nr_sects = 0; -@@ -1431,6 +1434,21 @@ static void dispatch_rw_block_io(blkif_t - struct mm_struct *mm; - struct vm_area_struct *vma = NULL; +@@ -1432,7 +1435,7 @@ static void dispatch_rw_block_io(blkif_t + /* Check that number of segments is sane. */ + nseg = req->nr_segments; +- if ( unlikely(nseg == 0) || ++ if (unlikely(nseg == 0 && req->operation != BLKIF_OP_WRITE_BARRIER) || + unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) { + WPRINTK("Bad number of segments in request (%d)\n", nseg); + goto fail_response; +@@ -1458,8 +1461,13 @@ static void dispatch_rw_block_io(blkif_t + pending_req->nr_pages = nseg; + + flags = GNTMAP_host_map; +- if (req->operation == BLKIF_OP_WRITE) + switch (req->operation) { -+ case BLKIF_OP_READ: -+ operation = READ; -+ break; + case BLKIF_OP_WRITE: -+ operation = WRITE; -+ break; + case BLKIF_OP_WRITE_BARRIER: -+ operation = WRITE_BARRIER; + flags |= GNTMAP_readonly; + break; -+ default: -+ operation = 0; /* make gcc happy */ -+ BUG(); + } + - if (blkif->dev_num < 0 || blkif->dev_num >= MAX_TAP_DEV) - goto fail_response; - -@@ -1470,7 +1488,7 @@ static void dispatch_rw_block_io(blkif_t - - pending_req->blkif = blkif; - pending_req->id = req->id; -- pending_req->operation = operation; -+ pending_req->operation = req->operation; - pending_req->status = BLKIF_RSP_OKAY; - pending_req->nr_pages = nseg; op = 0; -@@ -1487,7 +1505,7 @@ static void dispatch_rw_block_io(blkif_t - kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i); - - flags = GNTMAP_host_map; -- if (operation == WRITE) -+ if (operation != READ) - flags |= GNTMAP_readonly; - gnttab_set_map_op(&map[op], kvaddr, flags, - req->seg[i].gref, blkif->domid); -@@ -1504,7 +1522,7 @@ static void dispatch_rw_block_io(blkif_t - - flags = GNTMAP_host_map | GNTMAP_application_map - | GNTMAP_contains_pte; -- if (operation == WRITE) -+ if (operation != READ) - flags |= GNTMAP_readonly; - gnttab_set_map_op(&map[op], ptep, flags, - req->seg[i].gref, blkif->domid); ---- head-2010-04-29.orig/drivers/xen/blktap/xenbus.c 2010-04-29 10:15:31.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap/xenbus.c 2010-04-29 10:16:08.000000000 +0200 -@@ -402,7 +402,28 @@ static void connect(struct backend_info + mm = info->mm; + if (!xen_feature(XENFEAT_auto_translated_physmap)) +@@ -1622,6 +1630,7 @@ static void dispatch_rw_block_io(blkif_t + blkif->st_rd_sect += nr_sects; + break; + case BLKIF_OP_WRITE: ++ case BLKIF_OP_WRITE_BARRIER: + blkif->st_wr_sect += nr_sects; + break; + } +--- head-2011-02-17.orig/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:14.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:21.000000000 +0100 +@@ -418,7 +418,28 @@ static void connect(struct backend_info int err; struct xenbus_device *dev = be->dev; diff --git a/patches.xen/xen-blktap2-use-after-free b/patches.xen/xen-blktap2-use-after-free new file mode 100644 index 0000000..ed6d01c --- /dev/null +++ b/patches.xen/xen-blktap2-use-after-free @@ -0,0 +1,27 @@ +From: Dominic Curran +Subject: blktap: Fix reference to freed struct request +Patch-mainline: tbd + +The request will be freed by the call to __blktap_end_rq(), so rq->q +is invalid before spin_unlock_irq(). + +Signed-off-by: Dominic Curran +Acked-by: Daniel Stodden +Acked-by: jbeulich@novell.com + +--- head-2011-03-11.orig/drivers/xen/blktap2-new/device.c 2011-02-24 16:31:17.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2-new/device.c 2011-03-11 00:00:00.000000000 +0100 +@@ -135,9 +135,11 @@ __blktap_end_rq(struct request *rq, int + static inline void + blktap_end_rq(struct request *rq, int err) + { +- spin_lock_irq(rq->q->queue_lock); ++ struct request_queue *q = rq->q; ++ ++ spin_lock_irq(q->queue_lock); + __blktap_end_rq(rq, err); +- spin_unlock_irq(rq->q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + } + + void diff --git a/patches.xen/xen-block-backends-cleanup b/patches.xen/xen-block-backends-cleanup new file mode 100644 index 0000000..8a29c7c --- /dev/null +++ b/patches.xen/xen-block-backends-cleanup @@ -0,0 +1,242 @@ +From: jbeulich@novell.com +Subject: cleanup to blkback and blktap +Patch-mainline: n/a + +Remove unused/unneccessary fields of their pending_req_t structures, +and reduce the width of those structures' nr_pages field. + +Move loop-invariant grant table flags calculation out of loops (also +in scsiback). + +--- head-2011-02-17.orig/drivers/xen/blkback/blkback.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkback/blkback.c 2011-02-28 14:15:32.000000000 +0100 +@@ -73,10 +73,9 @@ module_param(debug_lvl, int, 0644); + typedef struct { + blkif_t *blkif; + u64 id; +- int nr_pages; + atomic_t pendcnt; ++ unsigned short nr_pages; + unsigned short operation; +- int status; + struct list_head free_list; + } pending_req_t; + +@@ -255,22 +254,24 @@ int blkif_schedule(void *arg) + + static void __end_block_io_op(pending_req_t *pending_req, int error) + { ++ int status = BLKIF_RSP_OKAY; ++ + /* An error fails the entire request. */ + if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && + (error == -EOPNOTSUPP)) { + DPRINTK("blkback: write barrier op failed, not supported\n"); + blkback_barrier(XBT_NIL, pending_req->blkif->be, 0); +- pending_req->status = BLKIF_RSP_EOPNOTSUPP; ++ status = BLKIF_RSP_EOPNOTSUPP; + } else if (error) { + DPRINTK("Buffer not up-to-date at end of operation, " + "error=%d\n", error); +- pending_req->status = BLKIF_RSP_ERROR; ++ status = BLKIF_RSP_ERROR; + } + + if (atomic_dec_and_test(&pending_req->pendcnt)) { + fast_flush_area(pending_req); + make_response(pending_req->blkif, pending_req->id, +- pending_req->operation, pending_req->status); ++ pending_req->operation, status); + blkif_put(pending_req->blkif); + free_req(pending_req); + } +@@ -387,7 +388,6 @@ static void dispatch_rw_block_io(blkif_t + blkif_request_t *req, + pending_req_t *pending_req) + { +- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); + struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct phys_req preq; + struct { +@@ -395,6 +395,7 @@ static void dispatch_rw_block_io(blkif_t + } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + unsigned int nseg; + struct bio *bio = NULL; ++ uint32_t flags; + int ret, i; + int operation; + +@@ -428,12 +429,13 @@ static void dispatch_rw_block_io(blkif_t + pending_req->blkif = blkif; + pending_req->id = req->id; + pending_req->operation = req->operation; +- pending_req->status = BLKIF_RSP_OKAY; + pending_req->nr_pages = nseg; + +- for (i = 0; i < nseg; i++) { +- uint32_t flags; ++ flags = GNTMAP_host_map; ++ if (operation != READ) ++ flags |= GNTMAP_readonly; + ++ for (i = 0; i < nseg; i++) { + seg[i].nsec = req->seg[i].last_sect - + req->seg[i].first_sect + 1; + +@@ -442,9 +444,6 @@ static void dispatch_rw_block_io(blkif_t + goto fail_response; + preq.nr_sects += seg[i].nsec; + +- flags = GNTMAP_host_map; +- if (operation != READ) +- flags |= GNTMAP_readonly; + gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, + req->seg[i].gref, blkif->domid); + } +--- head-2011-02-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:19:26.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap/blktap.c 2011-02-28 14:15:27.000000000 +0100 +@@ -134,20 +134,14 @@ module_param(debug_lvl, int, 0644); + + /* + * Each outstanding request that we've passed to the lower device layers has a +- * 'pending_req' allocated to it. Each buffer_head that completes decrements +- * the pendcnt towards zero. When it hits zero, the specified domain has a +- * response queued for it, with the saved 'id' passed back. ++ * 'pending_req' allocated to it. + */ + typedef struct { + blkif_t *blkif; + u64 id; + unsigned short mem_idx; +- int nr_pages; +- atomic_t pendcnt; +- unsigned short operation; +- int status; ++ unsigned short nr_pages; + struct list_head free_list; +- int inuse; + } pending_req_t; + + static pending_req_t *pending_reqs[MAX_PENDING_REQS]; +@@ -994,10 +988,8 @@ static pending_req_t* alloc_req(void) + list_del(&req->free_list); + } + +- if (req) { +- req->inuse = 1; ++ if (req) + alloc_pending_reqs++; +- } + spin_unlock_irqrestore(&pending_free_lock, flags); + + return req; +@@ -1011,7 +1003,6 @@ static void free_req(pending_req_t *req) + spin_lock_irqsave(&pending_free_lock, flags); + + alloc_pending_reqs--; +- req->inuse = 0; + if (mmap_lock && (req->mem_idx == mmap_alloc-1)) { + mmap_inuse--; + if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1); +@@ -1413,16 +1404,15 @@ static void dispatch_rw_block_io(blkif_t + blkif_request_t *req, + pending_req_t *pending_req) + { +- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); +- int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; + struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; + unsigned int nseg; +- int ret, i, nr_sects = 0; ++ int ret, i, op, nr_sects = 0; + tap_blkif_t *info; + blkif_request_t *target; + unsigned int mmap_idx = pending_req->mem_idx; + unsigned int pending_idx = RTN_PEND_IDX(pending_req, mmap_idx); + unsigned int usr_idx; ++ uint32_t flags; + struct mm_struct *mm; + struct vm_area_struct *vma = NULL; + +@@ -1465,9 +1455,11 @@ static void dispatch_rw_block_io(blkif_t + + pending_req->blkif = blkif; + pending_req->id = req->id; +- pending_req->operation = operation; +- pending_req->status = BLKIF_RSP_OKAY; + pending_req->nr_pages = nseg; ++ ++ flags = GNTMAP_host_map; ++ if (req->operation == BLKIF_OP_WRITE) ++ flags |= GNTMAP_readonly; + op = 0; + mm = info->mm; + if (!xen_feature(XENFEAT_auto_translated_physmap)) +@@ -1476,14 +1468,10 @@ static void dispatch_rw_block_io(blkif_t + unsigned long uvaddr; + unsigned long kvaddr; + uint64_t ptep; +- uint32_t flags; + + uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i); + kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i); + +- flags = GNTMAP_host_map; +- if (operation == WRITE) +- flags |= GNTMAP_readonly; + gnttab_set_map_op(&map[op], kvaddr, flags, + req->seg[i].gref, blkif->domid); + op++; +@@ -1497,11 +1485,9 @@ static void dispatch_rw_block_io(blkif_t + goto fail_flush; + } + +- flags = GNTMAP_host_map | GNTMAP_application_map +- | GNTMAP_contains_pte; +- if (operation == WRITE) +- flags |= GNTMAP_readonly; +- gnttab_set_map_op(&map[op], ptep, flags, ++ gnttab_set_map_op(&map[op], ptep, ++ flags | GNTMAP_application_map ++ | GNTMAP_contains_pte, + req->seg[i].gref, blkif->domid); + op++; + } +@@ -1631,10 +1617,14 @@ static void dispatch_rw_block_io(blkif_t + wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */ + info->ufe_ring.req_prod_pvt++; + +- if (operation == READ) ++ switch (req->operation) { ++ case BLKIF_OP_READ: + blkif->st_rd_sect += nr_sects; +- else if (operation == WRITE) ++ break; ++ case BLKIF_OP_WRITE: + blkif->st_wr_sect += nr_sects; ++ break; ++ } + + return; + +--- head-2011-02-17.orig/drivers/xen/scsiback/scsiback.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-02-17/drivers/xen/scsiback/scsiback.c 2011-02-28 14:51:23.000000000 +0100 +@@ -272,14 +272,14 @@ static int scsiback_gnttab_data_map(vscs + + sg_init_table(pending_req->sgl, nr_segments); + +- for (i = 0; i < nr_segments; i++) { +- flags = GNTMAP_host_map; +- if (write) +- flags |= GNTMAP_readonly; ++ flags = GNTMAP_host_map; ++ if (write) ++ flags |= GNTMAP_readonly; ++ ++ for (i = 0; i < nr_segments; i++) + gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, + ring_req->seg[i].gref, + info->domid); +- } + + err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments); + BUG_ON(err); diff --git a/patches.xen/xen-clockevents b/patches.xen/xen-clockevents index 3e1de62..25652ae 100644 --- a/patches.xen/xen-clockevents +++ b/patches.xen/xen-clockevents @@ -4,9 +4,9 @@ Patch-mainline: n/a Once validated this could be merged into the 2.6.?? patch. ---- head-2010-05-12.orig/arch/x86/Kconfig 2010-03-25 16:41:03.000000000 +0100 -+++ head-2010-05-12/arch/x86/Kconfig 2010-03-25 14:39:15.000000000 +0100 -@@ -80,7 +80,6 @@ config CLOCKSOURCE_WATCHDOG +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-02-01 16:43:32.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-02-02 15:09:52.000000000 +0100 +@@ -90,7 +90,6 @@ config CLOCKSOURCE_WATCHDOG config GENERIC_CLOCKEVENTS def_bool y @@ -14,19 +14,65 @@ Once validated this could be merged into the 2.6.?? patch. config GENERIC_CLOCKEVENTS_BROADCAST def_bool y ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2009-11-23 10:45:08.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:39:15.000000000 +0100 -@@ -72,8 +72,6 @@ extern start_info_t *xen_start_info; - #define init_hypervisor(c) ((void)((c)->x86_hyper_vendor = X86_HYPER_VENDOR_XEN)) +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-02 15:09:52.000000000 +0100 +@@ -74,7 +74,6 @@ extern start_info_t *xen_start_info; #define init_hypervisor_platform() init_hypervisor(&boot_cpu_data) + DECLARE_PER_CPU(struct vcpu_runstate_info, runstate); -struct vcpu_runstate_info *setup_runstate_area(unsigned int cpu); -- + #define vcpu_running(cpu) (per_cpu(runstate.state, cpu) == RUNSTATE_running) + /* arch/xen/kernel/evtchn.c */ - /* Force a proper event-channel callback from Xen. */ - void force_evtchn_callback(void); ---- head-2010-05-12.orig/arch/x86/kernel/time-xen.c 2010-05-12 09:14:09.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/time-xen.c 2010-05-12 09:14:39.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-02 15:09:52.000000000 +0100 +@@ -4,6 +4,8 @@ + #include + + #ifndef __ASSEMBLY__ ++#include ++#include + /* + * The use of 'barrier' in the following reflects their use as local-lock + * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following +@@ -43,10 +45,6 @@ do { \ + force_evtchn_callback(); \ + } while (0) + +-void xen_safe_halt(void); +- +-void xen_halt(void); +- + #define arch_local_save_flags() xen_save_fl() + + #define arch_local_irq_restore(flags) xen_restore_fl(flags) +@@ -59,19 +57,16 @@ void xen_halt(void); + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +-static inline void arch_safe_halt(void) +-{ +- xen_safe_halt(); +-} ++#define arch_safe_halt HYPERVISOR_block + + /* + * Used when interrupts are already enabled or to + * shutdown the processor: + */ +-static inline void halt(void) +-{ +- xen_halt(); +-} ++#define halt() VOID(irqs_disabled() \ ++ ? HYPERVISOR_vcpu_op(VCPUOP_down, \ ++ smp_processor_id(), NULL) \ ++ : 0) + + /* + * For spinlocks, etc: +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2010-10-05 16:57:34.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-02-02 15:09:52.000000000 +0100 @@ -25,7 +25,7 @@ #include #include @@ -50,8 +96,8 @@ Once validated this could be merged into the 2.6.?? patch. +static u64 jiffies_bias, system_time_bias; /* Current runstate of each CPU (updated automatically by the hypervisor). */ - static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); -@@ -69,10 +63,6 @@ static DEFINE_PER_CPU(struct vcpu_runsta + DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); +@@ -69,16 +63,6 @@ DEFINE_PER_CPU(struct vcpu_runstate_info /* Must be signed, as it's compared with s64 quantities which can be -ve. */ #define NS_PER_TICK (1000000000LL/HZ) @@ -59,10 +105,16 @@ Once validated this could be merged into the 2.6.?? patch. - .period_ns = NS_PER_TICK -}; - - static void __clock_was_set(struct work_struct *unused) - { - clock_was_set(); -@@ -205,6 +195,11 @@ static u64 get_nsec_offset(struct shadow +-/* +- * GCC 4.3 can turn loops over an induction variable into division. We do +- * not support arbitrary 64-bit division, and so must break the induction. +- */ +-#define clobber_induction_variable(v) asm ( "" : "+r" (v) ) +- + /* Does this guest OS track Xen time, or set its wall clock independently? */ + static int independent_wallclock = 0; + static int __init __independent_wallclock(char *str) +@@ -185,6 +169,11 @@ static u64 get_nsec_offset(struct shadow return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); } @@ -71,46 +123,42 @@ Once validated this could be merged into the 2.6.?? patch. + return (jiffies_64 - jiffies_bias) * NS_PER_TICK + system_time_bias; +} + - static void __update_wallclock(time_t sec, long nsec) + static void update_wallclock(void) { - long wtm_nsec, xtime_nsec; -@@ -212,7 +207,7 @@ static void __update_wallclock(time_t se - u64 tmp, wc_nsec; - - /* Adjust wall-clock time base. */ -- wc_nsec = processed_system_time; -+ wc_nsec = processed_system_time(); - wc_nsec += sec * (u64)NSEC_PER_SEC; - wc_nsec += nsec; - -@@ -244,6 +239,17 @@ static void update_wallclock(void) - __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec); + static DEFINE_MUTEX(uwc_mutex); +@@ -201,7 +190,7 @@ static void update_wallclock(void) + } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version)); + + if (!independent_wallclock) { +- u64 tmp = processed_system_time; ++ u64 tmp = processed_system_time(); + long nsec = do_div(tmp, NSEC_PER_SEC); + struct timespec tv; + +@@ -219,6 +208,13 @@ static void _update_wallclock(struct wor } + static DECLARE_WORK(update_wallclock_work, _update_wallclock); +void xen_check_wallclock_update(void) +{ -+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) { -+ write_seqlock(&xtime_lock); -+ update_wallclock(); -+ write_sequnlock(&xtime_lock); -+ if (keventd_up()) -+ schedule_work(&clock_was_set_work); -+ } ++ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version ++ && keventd_up()) ++ schedule_work(&update_wallclock_work); +} + /* * Reads a consistent set of time-base values from Xen, into a shadow data * area. -@@ -309,7 +315,7 @@ static void sync_xen_wallclock(unsigned +@@ -285,7 +281,7 @@ static void sync_xen_wallclock(unsigned op.cmd = XENPF_settime; - op.u.settime.secs = sec; - op.u.settime.nsecs = nsec; + op.u.settime.secs = now.tv_sec; + op.u.settime.nsecs = now.tv_nsec; - op.u.settime.system_time = processed_system_time; + op.u.settime.system_time = processed_system_time(); WARN_ON(HYPERVISOR_platform_op(&op)); update_wallclock(); -@@ -320,7 +326,7 @@ static void sync_xen_wallclock(unsigned +@@ -294,7 +290,7 @@ static void sync_xen_wallclock(unsigned mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ); } @@ -119,7 +167,7 @@ Once validated this could be merged into the 2.6.?? patch. { unsigned int cpu = get_cpu(); struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); -@@ -344,7 +350,7 @@ static unsigned long long local_clock(vo +@@ -318,7 +314,7 @@ static unsigned long long local_clock(vo /* * Runstate accounting */ @@ -128,7 +176,7 @@ Once validated this could be merged into the 2.6.?? patch. { u64 state_time; struct vcpu_runstate_info *state; -@@ -380,7 +386,7 @@ unsigned long long sched_clock(void) +@@ -354,7 +350,7 @@ unsigned long long sched_clock(void) */ preempt_disable(); @@ -137,7 +185,7 @@ Once validated this could be merged into the 2.6.?? patch. get_runstate_snapshot(&runstate); -@@ -423,140 +429,6 @@ unsigned long profile_pc(struct pt_regs +@@ -397,138 +393,6 @@ unsigned long profile_pc(struct pt_regs } EXPORT_SYMBOL(profile_pc); @@ -202,14 +250,12 @@ Once validated this could be merged into the 2.6.?? patch. - do_timer(delta); - } - -- if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) { -- update_wallclock(); -- if (keventd_up()) -- schedule_work(&clock_was_set_work); -- } -- - write_sequnlock(&xtime_lock); - +- if (shadow_tv_version != HYPERVISOR_shared_info->wc_version +- && keventd_up()) +- schedule_work(&update_wallclock_work); +- - /* - * Account stolen ticks. - * ensures that the ticks are accounted as stolen. @@ -278,7 +324,7 @@ Once validated this could be merged into the 2.6.?? patch. void mark_tsc_unstable(char *reason) { #ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */ -@@ -565,24 +437,13 @@ void mark_tsc_unstable(char *reason) +@@ -537,24 +401,13 @@ void mark_tsc_unstable(char *reason) } EXPORT_SYMBOL_GPL(mark_tsc_unstable); @@ -304,7 +350,7 @@ Once validated this could be merged into the 2.6.?? patch. if (unlikely((s64)(ret - last) < 0)) { if (last - ret > permitted_clock_jitter -@@ -608,37 +469,28 @@ static cycle_t xen_clocksource_read(stru +@@ -580,37 +433,28 @@ static cycle_t xen_clocksource_read(stru last = cur; } #else @@ -351,7 +397,7 @@ Once validated this could be merged into the 2.6.?? patch. } static struct clocksource clocksource_xen = { -@@ -683,7 +535,7 @@ void xen_read_persistent_clock(struct ti +@@ -655,7 +499,7 @@ void xen_read_persistent_clock(struct ti rmb(); } while ((s->wc_version & 1) | (version ^ s->wc_version)); @@ -360,7 +406,7 @@ Once validated this could be merged into the 2.6.?? patch. do_div(delta, NSEC_PER_SEC); ts->tv_sec = delta; -@@ -698,42 +550,17 @@ int xen_update_persistent_clock(void) +@@ -670,24 +514,10 @@ int xen_update_persistent_clock(void) return 0; } @@ -378,9 +424,15 @@ Once validated this could be merged into the 2.6.?? patch. - BUG_ON(timer_irq < 0); -} - - void __init time_init(void) + static void __init _late_time_init(void) { - init_cpu_khz(); + update_wallclock(); +- setup_cpu0_timer_irq(); ++ xen_clockevents_init(); + } + + void __init time_init(void) +@@ -696,22 +526,11 @@ void __init time_init(void) printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); @@ -406,16 +458,7 @@ Once validated this could be merged into the 2.6.?? patch. clocksource_register(&clocksource_xen); -@@ -742,7 +569,7 @@ void __init time_init(void) - use_tsc_delay(); - - /* Cannot request_irq() until kmem is initialised. */ -- late_time_init = setup_cpu0_timer_irq; -+ late_time_init = xen_clockevents_init; - } - - /* Convert jiffies to system time. */ -@@ -758,13 +585,13 @@ u64 jiffies_to_st(unsigned long j) +@@ -737,13 +556,13 @@ u64 jiffies_to_st(unsigned long j) if (delta < 1) { /* Triggers in some wrap-around cases, but that's okay: * we just end up with a shorter timeout. */ @@ -431,7 +474,7 @@ Once validated this could be merged into the 2.6.?? patch. } } while (read_seqretry(&xtime_lock, seq)); -@@ -772,73 +599,10 @@ u64 jiffies_to_st(unsigned long j) +@@ -751,122 +570,6 @@ u64 jiffies_to_st(unsigned long j) } EXPORT_SYMBOL(jiffies_to_st); @@ -496,19 +539,20 @@ Once validated this could be merged into the 2.6.?? patch. - cpumask_clear_cpu(cpu, nohz_cpu_mask); -} - - void xen_safe_halt(void) - { +-void xen_safe_halt(void) +-{ - stop_hz_timer(); - /* Blocking includes an implicit local_irq_enable(). */ - HYPERVISOR_block(); +- /* Blocking includes an implicit local_irq_enable(). */ +- HYPERVISOR_block(); - start_hz_timer(); - } - EXPORT_SYMBOL(xen_safe_halt); - -@@ -849,47 +613,6 @@ void xen_halt(void) - } - EXPORT_SYMBOL(xen_halt); - +-} +- +-void xen_halt(void) +-{ +- if (irqs_disabled()) +- VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL)); +-} +- -#ifdef CONFIG_SMP -int __cpuinit local_setup_timer(unsigned int cpu) -{ @@ -553,11 +597,11 @@ Once validated this could be merged into the 2.6.?? patch. #ifdef CONFIG_CPU_FREQ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) ---- head-2010-05-12.orig/drivers/xen/Kconfig 2010-03-31 14:10:55.000000000 +0200 -+++ head-2010-05-12/drivers/xen/Kconfig 2010-03-31 14:11:27.000000000 +0200 -@@ -355,9 +355,6 @@ config HAVE_IRQ_IGNORE_UNHANDLED - config IRQ_PER_CPU - bool +--- head-2011-03-11.orig/drivers/xen/Kconfig 2011-02-03 14:49:15.000000000 +0100 ++++ head-2011-03-11/drivers/xen/Kconfig 2011-02-17 10:32:19.000000000 +0100 +@@ -356,9 +356,6 @@ endmenu + config HAVE_IRQ_IGNORE_UNHANDLED + def_bool y -config NO_IDLE_HZ - def_bool y @@ -565,8 +609,8 @@ Once validated this could be merged into the 2.6.?? patch. config ARCH_HAS_WALK_MEMORY def_bool y depends on X86 ---- head-2010-05-12.orig/drivers/xen/core/Makefile 2010-04-19 14:55:02.000000000 +0200 -+++ head-2010-05-12/drivers/xen/core/Makefile 2010-03-25 14:39:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/Makefile 2010-04-19 14:55:02.000000000 +0200 ++++ head-2011-03-11/drivers/xen/core/Makefile 2011-02-02 15:09:52.000000000 +0100 @@ -12,6 +12,7 @@ obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o obj-$(CONFIG_SMP) += spinlock.o @@ -576,7 +620,7 @@ Once validated this could be merged into the 2.6.?? patch. CFLAGS_domctl.o := -D__XEN_PUBLIC_XEN_H__ -D__XEN_PUBLIC_GRANT_TABLE_H__ CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/drivers/xen/core/clockevents.c 2010-03-25 14:39:15.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/clockevents.c 2011-02-02 15:09:52.000000000 +0100 @@ -0,0 +1,298 @@ +/* + * Xen clockevent functions @@ -610,7 +654,7 @@ Once validated this could be merged into the 2.6.?? patch. +#include +#include +#include -+#include ++#include +#include +#include +#include @@ -769,7 +813,7 @@ Once validated this could be merged into the 2.6.?? patch. + + if (stolen >= NS_PER_TICK) + account_steal_ticks(div_u64_rem(stolen, NS_PER_TICK, -+ &__get_cpu_var(xen_residual_stolen))); ++ &__get_cpu_var(xen_residual_stolen))); + else + percpu_write(xen_residual_stolen, stolen > 0 ? stolen : 0); + @@ -778,7 +822,7 @@ Once validated this could be merged into the 2.6.?? patch. + + if (blocked >= NS_PER_TICK) + account_idle_ticks(div_u64_rem(blocked, NS_PER_TICK, -+ &__get_cpu_var(xen_residual_blocked))); ++ &__get_cpu_var(xen_residual_blocked))); + else + percpu_write(xen_residual_blocked, blocked > 0 ? blocked : 0); + @@ -876,8 +920,26 @@ Once validated this could be merged into the 2.6.?? patch. + + xen_setup_cpu_clockevents(); +} ---- head-2010-05-12.orig/drivers/xen/core/machine_reboot.c 2009-12-18 14:19:13.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/machine_reboot.c 2010-03-25 14:39:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-02-16 08:29:06.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-02-02 15:09:52.000000000 +0100 +@@ -382,6 +382,7 @@ asmlinkage void __irq_entry evtchn_do_up + wmb(); + #endif + ++#ifndef CONFIG_NO_HZ + /* + * Handle timer interrupts before all others, so that all + * hardirq handlers see an up-to-date system time even if we +@@ -407,6 +408,7 @@ asmlinkage void __irq_entry evtchn_do_up + BUG(); + } + } ++#endif /* CONFIG_NO_HZ */ + + l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); + +--- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-01-13 16:21:42.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-02-02 15:09:52.000000000 +0100 @@ -13,6 +13,7 @@ #include #include @@ -886,7 +948,7 @@ Once validated this could be merged into the 2.6.?? patch. #include #include #include -@@ -163,10 +164,12 @@ static int take_machine_down(void *_susp +@@ -158,10 +159,12 @@ static int take_machine_down(void *_susp } else BUG_ON(suspend_cancelled > 0); suspend->resume_notifier(suspend_cancelled); @@ -901,9 +963,9 @@ Once validated this could be merged into the 2.6.?? patch. if (!suspend_cancelled) { #ifdef __x86_64__ /* ---- head-2010-05-12.orig/drivers/xen/core/smpboot.c 2010-03-19 15:20:27.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/smpboot.c 2010-03-25 14:39:15.000000000 +0100 -@@ -19,6 +19,7 @@ +--- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-03-03 16:14:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-02-07 12:28:20.000000000 +0100 +@@ -18,6 +18,7 @@ #include #include #include @@ -911,26 +973,26 @@ Once validated this could be merged into the 2.6.?? patch. #include #include #include -@@ -206,6 +207,7 @@ static void __cpuinit cpu_bringup(void) - identify_secondary_cpu(¤t_cpu_data); +@@ -146,6 +147,7 @@ static void __cpuinit cpu_bringup(void) + identify_secondary_cpu(__this_cpu_ptr(&cpu_info)); touch_softlockup_watchdog(); preempt_disable(); + xen_setup_cpu_clockevents(); local_irq_enable(); } ---- head-2010-05-12.orig/drivers/xen/core/spinlock.c 2010-02-24 12:38:54.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/spinlock.c 2010-03-25 14:39:15.000000000 +0100 -@@ -10,6 +10,7 @@ +--- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:18:17.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:18:37.000000000 +0100 +@@ -13,6 +13,7 @@ #include - #include #include + #include +#include #include - #ifdef TICKET_SHIFT + struct spinning { --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/clock.h 2010-03-25 14:39:15.000000000 +0100 ++++ head-2011-03-11/include/xen/clock.h 2011-02-02 15:09:52.000000000 +0100 @@ -0,0 +1,19 @@ +#ifndef __XEN_CPU_CLOCK_H__ +#define __XEN_CPU_CLOCK_H__ @@ -951,25 +1013,3 @@ Once validated this could be merged into the 2.6.?? patch. +#endif + +#endif /* __XEN_CPU_CLOCK_H__ */ ---- head-2010-05-12.orig/kernel/hrtimer.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/kernel/hrtimer.c 2010-03-25 14:39:15.000000000 +0100 -@@ -1108,7 +1108,7 @@ ktime_t hrtimer_get_remaining(const stru - } - EXPORT_SYMBOL_GPL(hrtimer_get_remaining); - --#if defined(CONFIG_NO_HZ) || defined(CONFIG_NO_IDLE_HZ) -+#ifdef CONFIG_NO_HZ - /** - * hrtimer_get_next_event - get the time until next expiry event - * ---- head-2010-05-12.orig/kernel/timer.c 2010-04-15 10:05:03.000000000 +0200 -+++ head-2010-05-12/kernel/timer.c 2010-04-15 11:42:54.000000000 +0200 -@@ -1044,7 +1044,7 @@ static inline void __run_timers(struct t - spin_unlock_irq(&base->lock); - } - --#if defined(CONFIG_NO_HZ) || defined(CONFIG_NO_IDLE_HZ) -+#ifdef CONFIG_NO_HZ - /* - * Find out when the next timer event is due to happen. This - * is used on S/390 to stop all activity when a CPU is idle. diff --git a/patches.xen/xen-configurable-guest-devices b/patches.xen/xen-configurable-guest-devices index e4784de..65a2e81 100644 --- a/patches.xen/xen-configurable-guest-devices +++ b/patches.xen/xen-configurable-guest-devices @@ -7,20 +7,23 @@ value). Similarly, allow the number of simultaneous transmits in netback to be configurable. ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-30 17:15:14.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-04-15 11:44:09.000000000 +0200 -@@ -95,7 +95,7 @@ extern int nr_pirqs; - #endif +--- head-2011-02-08.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:50:13.000000000 +0100 ++++ head-2011-02-08/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-16 08:29:39.000000000 +0100 +@@ -97,9 +97,9 @@ extern int nr_pirqs; #define DYNIRQ_BASE (PIRQ_BASE + nr_pirqs) + #ifdef CONFIG_SPARSE_IRQ +-#define NR_DYNIRQS CPU_VECTOR_LIMIT ++#define NR_DYNIRQS (CPU_VECTOR_LIMIT + CONFIG_XEN_NR_GUEST_DEVICES) + #else -#define NR_DYNIRQS 256 +#define NR_DYNIRQS (64 + CONFIG_XEN_NR_GUEST_DEVICES) + #endif #define NR_IRQS (NR_PIRQS + NR_DYNIRQS) - ---- head-2010-04-15.orig/drivers/xen/Kconfig 2010-03-31 14:11:27.000000000 +0200 -+++ head-2010-04-15/drivers/xen/Kconfig 2010-03-31 14:11:36.000000000 +0200 -@@ -98,6 +98,15 @@ config XEN_NETDEV_BACKEND +--- head-2011-02-08.orig/drivers/xen/Kconfig 2011-02-03 14:49:25.000000000 +0100 ++++ head-2011-02-08/drivers/xen/Kconfig 2010-11-26 13:38:08.000000000 +0100 +@@ -94,6 +94,15 @@ config XEN_NETDEV_BACKEND network devices to other guests via a high-performance shared-memory interface. @@ -36,7 +39,7 @@ configurable. config XEN_NETDEV_PIPELINED_TRANSMITTER bool "Pipelined transmitter (DANGEROUS)" depends on XEN_NETDEV_BACKEND -@@ -309,6 +318,16 @@ config XEN_SYSFS +@@ -305,6 +314,16 @@ config XEN_SYSFS help Xen hypervisor attributes will show up under /sys/hypervisor/. @@ -53,9 +56,20 @@ configurable. choice prompt "Xen version compatibility" default XEN_COMPAT_030002_AND_LATER ---- head-2010-04-15.orig/drivers/xen/netback/netback.c 2010-01-04 13:31:26.000000000 +0100 -+++ head-2010-04-15/drivers/xen/netback/netback.c 2010-03-25 14:39:26.000000000 +0100 -@@ -71,7 +71,7 @@ static DECLARE_TASKLET(net_rx_tasklet, n +--- head-2011-02-08.orig/drivers/xen/core/evtchn.c 2011-02-02 15:09:52.000000000 +0100 ++++ head-2011-02-08/drivers/xen/core/evtchn.c 2011-02-16 08:29:29.000000000 +0100 +@@ -1766,7 +1766,7 @@ EXPORT_SYMBOL_GPL(nr_pirqs); + + int __init arch_probe_nr_irqs(void) + { +- int nr = 256, nr_irqs_gsi; ++ int nr = 64 + CONFIG_XEN_NR_GUEST_DEVICES, nr_irqs_gsi; + + if (is_initial_xendomain()) { + nr_irqs_gsi = NR_IRQS_LEGACY; +--- head-2011-02-08.orig/drivers/xen/netback/netback.c 2011-02-09 16:06:37.000000000 +0100 ++++ head-2011-02-08/drivers/xen/netback/netback.c 2011-01-03 13:29:58.000000000 +0100 +@@ -74,7 +74,7 @@ static DECLARE_TASKLET(net_rx_tasklet, n static struct timer_list net_timer; static struct timer_list netbk_tx_pending_timer; @@ -64,7 +78,7 @@ configurable. static struct sk_buff_head rx_queue; -@@ -1265,6 +1265,7 @@ static void net_tx_action(unsigned long +@@ -1263,6 +1263,7 @@ static void net_tx_action(unsigned long net_tx_action_dealloc(); mop = tx_map_ops; diff --git a/patches.xen/xen-cpufreq-report b/patches.xen/xen-cpufreq-report index daaf962..aca69d2 100644 --- a/patches.xen/xen-cpufreq-report +++ b/patches.xen/xen-cpufreq-report @@ -2,12 +2,12 @@ From: jbeulich@novell.com Subject: make /proc/cpuinfo track CPU speed Patch-mainline: obsolete ---- head-2010-03-15.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-03-16 10:37:21.000000000 +0100 -+++ head-2010-03-15/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-03-16 10:42:20.000000000 +0100 -@@ -206,3 +206,14 @@ void arch_acpi_processor_init_extcntl(co - *ops = &xen_extcntl_ops; +--- head-2010-10-05.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-10-06 17:02:47.000000000 +0200 ++++ head-2010-10-05/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-10-06 17:04:43.000000000 +0200 +@@ -208,3 +208,14 @@ static int __init init_extcntl( + return 0; } - EXPORT_SYMBOL(arch_acpi_processor_init_extcntl); + arch_initcall(init_extcntl); + +unsigned int cpufreq_quick_get(unsigned int cpu) +{ @@ -19,9 +19,9 @@ Patch-mainline: obsolete + + return HYPERVISOR_platform_op(&op) == 0 ? op.u.get_cpu_freq.freq : 0; +} ---- head-2010-03-15.orig/include/linux/cpufreq.h 2010-03-16 10:34:57.000000000 +0100 -+++ head-2010-03-15/include/linux/cpufreq.h 2010-01-25 13:46:23.000000000 +0100 -@@ -303,7 +303,7 @@ static inline unsigned int cpufreq_get(u +--- head-2010-10-05.orig/include/linux/cpufreq.h 2010-10-06 16:54:08.000000000 +0200 ++++ head-2010-10-05/include/linux/cpufreq.h 2010-08-25 14:41:45.000000000 +0200 +@@ -328,7 +328,7 @@ static inline unsigned int cpufreq_get(u #endif /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ @@ -30,8 +30,8 @@ Patch-mainline: obsolete unsigned int cpufreq_quick_get(unsigned int cpu); #else static inline unsigned int cpufreq_quick_get(unsigned int cpu) ---- head-2010-03-15.orig/include/xen/interface/platform.h 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-03-15/include/xen/interface/platform.h 2010-01-25 13:46:23.000000000 +0100 +--- head-2010-10-05.orig/include/xen/interface/platform.h 2010-01-04 11:56:34.000000000 +0100 ++++ head-2010-10-05/include/xen/interface/platform.h 2010-06-22 15:48:58.000000000 +0200 @@ -355,6 +355,14 @@ struct xenpf_mem_hotadd uint32_t flags; }; diff --git a/patches.xen/xen-cxgb3 b/patches.xen/xen-cxgb3 new file mode 100644 index 0000000..4f777f9 --- /dev/null +++ b/patches.xen/xen-cxgb3 @@ -0,0 +1,151 @@ +From: http://xenbits.xen.org/XCP/linux-2.6.32.pq.hg?rev/20e4634f7b7b +Subject: apply xen specific patch to the Chelsio ethernet drivers +as a result of their feedback from the Cowly Beta +Patch-mainline: n/a + +* Disable LRO by default. The kernel.org driver does enable it, but it +does not play very well with the bridging layer. (Please note that the +kernel.org driver does now implement GRO) + +* Allocate SKBs instead of pages for incoming data. Using pages causes +traffic to stall when the VMs use large MTUs. + +* Disable lazy completion to Tx buffers. cxgb3 completion mechanism +coalesces TX completion notifications, but this breaks the VM's +behavior: The VMs networking stacks rely on skb to be freed in the +hypervisor to open the Tx buffer. + +Acked-by: bphilips@suse.de + +--- head-2011-01-30.orig/drivers/net/cxgb3/cxgb3_main.c 2011-01-31 12:42:17.000000000 +0100 ++++ head-2011-01-30/drivers/net/cxgb3/cxgb3_main.c 2011-02-03 14:45:48.000000000 +0100 +@@ -1923,7 +1923,11 @@ static int set_rx_csum(struct net_device + } else { + int i; + ++#ifndef CONFIG_XEN + p->rx_offload &= ~(T3_RX_CSUM | T3_LRO); ++#else ++ p->rx_offload &= ~(T3_RX_CSUM); ++#endif + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) + set_qset_lro(dev, i, 0); + } +@@ -3298,7 +3302,11 @@ static int __devinit init_one(struct pci + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; ++#ifndef CONFIG_XEN + pi->rx_offload = T3_RX_CSUM | T3_LRO; ++#else ++ pi->rx_offload = T3_RX_CSUM; ++#endif + pi->port_id = i; + netif_carrier_off(netdev); + netdev->irq = pdev->irq; +--- head-2011-01-30.orig/drivers/net/cxgb3/sge.c 2011-01-05 01:50:19.000000000 +0100 ++++ head-2011-01-30/drivers/net/cxgb3/sge.c 2011-02-03 14:45:48.000000000 +0100 +@@ -58,11 +58,24 @@ + * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs + * directly. + */ ++#ifndef CONFIG_XEN + #define FL0_PG_CHUNK_SIZE 2048 ++#else ++/* Use skbuffs for XEN kernels. LRO is already disabled */ ++#define FL0_PG_CHUNK_SIZE 0 ++#endif ++ + #define FL0_PG_ORDER 0 + #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) ++ ++#ifndef CONFIG_XEN + #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) + #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) ++#else ++#define FL1_PG_CHUNK_SIZE 0 ++#define FL1_PG_ORDER 0 ++#endif ++ + #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) + + #define SGE_RX_DROP_THRES 16 +@@ -1267,7 +1280,27 @@ netdev_tx_t t3_eth_xmit(struct sk_buff * + + gen = q->gen; + q->unacked += ndesc; ++#ifdef CONFIG_XEN ++ /* ++ * Some Guest OS clients get terrible performance when they have bad ++ * message size / socket send buffer space parameters. For instance, ++ * if an application selects an 8KB message size and an 8KB send ++ * socket buffer size. This forces the application into a single ++ * packet stop-and-go mode where it's only willing to have a single ++ * message outstanding. The next message is only sent when the ++ * previous message is noted as having been sent. Until we issue a ++ * kfree_skb() against the TX skb, the skb is charged against the ++ * application's send buffer space. We only free up TX skbs when we ++ * get a TX credit return from the hardware / firmware which is fairly ++ * lazy about this. So we request a TX WR Completion Notification on ++ * every TX descriptor in order to accellerate TX credit returns. See ++ * also the change in handle_rsp_cntrl_info() to free up TX skb's when ++ * we receive the TX WR Completion Notifications ... ++ */ ++ compl = F_WR_COMPL; ++#else + compl = (q->unacked & 8) << (S_WR_COMPL - 3); ++#endif + q->unacked &= 7; + pidx = q->pidx; + q->pidx += ndesc; +@@ -2176,8 +2209,35 @@ static inline void handle_rsp_cntrl_info + #endif + + credits = G_RSPD_TXQ0_CR(flags); +- if (credits) ++ if (credits) { + qs->txq[TXQ_ETH].processed += credits; ++#ifdef CONFIG_XEN ++ /* ++ * In the normal Linux driver t3_eth_xmit() routine, we call ++ * skb_orphan() on unshared TX skb. This results in a call to ++ * the destructor for the skb which frees up the send buffer ++ * space it was holding down. This, in turn, allows the ++ * application to make forward progress generating more data ++ * which is important at 10Gb/s. For Virtual Machine Guest ++ * Operating Systems this doesn't work since the send buffer ++ * space is being held down in the Virtual Machine. Thus we ++ * need to get the TX skb's freed up as soon as possible in ++ * order to prevent applications from stalling. ++ * ++ * This code is largely copied from the corresponding code in ++ * sge_timer_tx() and should probably be kept in sync with any ++ * changes there. ++ */ ++ if (__netif_tx_trylock(qs->tx_q)) { ++ struct port_info *pi = netdev_priv(qs->netdev); ++ struct adapter *adap = pi->adapter; ++ ++ reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], ++ TX_RECLAIM_CHUNK); ++ __netif_tx_unlock(qs->tx_q); ++ } ++#endif ++ } + + credits = G_RSPD_TXQ2_CR(flags); + if (credits) +--- head-2011-01-30.orig/drivers/net/cxgb3/version.h 2010-10-20 22:30:22.000000000 +0200 ++++ head-2011-01-30/drivers/net/cxgb3/version.h 2011-02-03 14:45:48.000000000 +0100 +@@ -35,7 +35,11 @@ + #define DRV_DESC "Chelsio T3 Network Driver" + #define DRV_NAME "cxgb3" + /* Driver version */ ++#ifndef CONFIG_XEN + #define DRV_VERSION "1.1.4-ko" ++#else ++#define DRV_VERSION "1.1.4-xen-ko" ++#endif + + /* Firmware version */ + #define FW_VERSION_MAJOR 7 diff --git a/patches.xen/xen-dcdbas b/patches.xen/xen-dcdbas index 9e9d059..ea526ca 100644 --- a/patches.xen/xen-dcdbas +++ b/patches.xen/xen-dcdbas @@ -4,9 +4,16 @@ Patch-mainline: n/a The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. ---- head-2010-04-15.orig/drivers/firmware/Kconfig 2010-03-25 14:39:33.000000000 +0100 -+++ head-2010-04-15/drivers/firmware/Kconfig 2009-10-21 12:05:13.000000000 +0200 -@@ -90,6 +90,7 @@ config DELL_RBU +--- + drivers/firmware/Kconfig | 1 + drivers/firmware/dcdbas.c | 28 ++++++++- + drivers/xen/core/domctl.c | 141 ++++++++++++++++++++++++++++++++++++++++++++++ + drivers/xen/core/domctl.h | 1 + 4 files changed, 169 insertions(+), 2 deletions(-) + +--- a/drivers/firmware/Kconfig ++++ b/drivers/firmware/Kconfig +@@ -91,6 +91,7 @@ config DELL_RBU config DCDBAS tristate "Dell Systems Management Base Driver" depends on X86 @@ -14,8 +21,8 @@ The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. help The Dell Systems Management Base Driver provides a sysfs interface for systems management software to perform System Management ---- head-2010-04-15.orig/drivers/firmware/dcdbas.c 2010-04-15 09:37:45.000000000 +0200 -+++ head-2010-04-15/drivers/firmware/dcdbas.c 2010-04-15 11:45:50.000000000 +0200 +--- a/drivers/firmware/dcdbas.c ++++ b/drivers/firmware/dcdbas.c @@ -37,6 +37,10 @@ #include #include @@ -68,8 +75,8 @@ The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. +#endif /* generate SMI */ - asm volatile ( -@@ -278,9 +293,13 @@ int dcdbas_smi_request(struct smi_cmd *s + /* inb to force posted write through and make SMI happen now */ +@@ -280,9 +295,13 @@ int dcdbas_smi_request(struct smi_cmd *s : "memory" ); @@ -83,7 +90,7 @@ The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. return ret; } -@@ -320,7 +339,7 @@ static ssize_t smi_request_store(struct +@@ -322,7 +341,7 @@ static ssize_t smi_request_store(struct break; case 1: /* Calling Interface SMI */ @@ -92,7 +99,7 @@ The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. ret = dcdbas_smi_request(smi_cmd); if (!ret) ret = count; -@@ -601,6 +620,11 @@ static int __init dcdbas_init(void) +@@ -603,6 +622,11 @@ static int __init dcdbas_init(void) { int error; @@ -104,8 +111,8 @@ The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. error = platform_driver_register(&dcdbas_driver); if (error) return error; ---- head-2010-04-15.orig/drivers/xen/core/domctl.c 2010-03-25 14:37:59.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/domctl.c 2010-05-07 12:14:29.000000000 +0200 +--- a/drivers/xen/core/domctl.c ++++ b/drivers/xen/core/domctl.c @@ -20,6 +20,8 @@ #undef __XEN_TOOLS__ #include @@ -280,8 +287,8 @@ The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. +#endif /* CONFIG_X86 */ + MODULE_LICENSE("GPL"); ---- head-2010-04-15.orig/drivers/xen/core/domctl.h 2010-03-25 14:37:59.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/domctl.h 2009-10-21 13:24:42.000000000 +0200 +--- a/drivers/xen/core/domctl.h ++++ b/drivers/xen/core/domctl.h @@ -1,2 +1,3 @@ int xen_guest_address_size(int domid); int xen_guest_blkif_protocol(int domid); diff --git a/patches.xen/xen-ipi-per-cpu-irq b/patches.xen/xen-ipi-per-cpu-irq index 2caef38..f521183 100644 --- a/patches.xen/xen-ipi-per-cpu-irq +++ b/patches.xen/xen-ipi-per-cpu-irq @@ -2,9 +2,36 @@ From: jbeulich@novell.com Subject: fold IPIs onto a single IRQ each Patch-mainline: n/a ---- head-2010-04-15.orig/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/apic/ipi-xen.c 2010-01-25 13:46:29.000000000 +0100 -@@ -21,31 +21,22 @@ +--- head-2011-02-17.orig/arch/x86/include/asm/hw_irq.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/asm/hw_irq.h 2011-02-02 15:09:42.000000000 +0100 +@@ -132,7 +132,6 @@ extern void smp_error_interrupt(struct p + extern asmlinkage void smp_irq_move_cleanup_interrupt(void); + #endif + #ifdef CONFIG_SMP +-#ifndef CONFIG_XEN + extern void smp_reschedule_interrupt(struct pt_regs *); + extern void smp_call_function_interrupt(struct pt_regs *); + extern void smp_call_function_single_interrupt(struct pt_regs *); +@@ -141,13 +140,9 @@ extern void smp_invalidate_interrupt(str + #else + extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *); + #endif +-#else +-#include +-extern irqreturn_t smp_reschedule_interrupt(int, void *); +-extern irqreturn_t smp_call_function_interrupt(int, void *); +-extern irqreturn_t smp_call_function_single_interrupt(int, void *); +-extern irqreturn_t smp_reboot_interrupt(int, void *); +-extern irqreturn_t smp_irq_work_interrupt(int, void *); ++extern void smp_irq_work_interrupt(struct pt_regs *); ++#ifdef CONFIG_XEN ++extern void smp_reboot_interrupt(struct pt_regs *); + #endif + #endif + +--- head-2011-02-17.orig/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:57:40.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:58:00.000000000 +0100 +@@ -6,25 +6,6 @@ #include @@ -13,62 +40,119 @@ Patch-mainline: n/a -static inline void __send_IPI_one(unsigned int cpu, int vector) -{ - int irq = per_cpu(ipi_to_irq, cpu)[vector]; +- +- if (vector == NMI_VECTOR) { +- static int __read_mostly printed; +- int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); +- +- if (rc && !printed) +- pr_warning("Unable (%d) to send NMI to CPU#%u\n", +- printed = rc, cpu); +- return; +- } - BUG_ON(irq < 0); - notify_remote_via_irq(irq); -} - - static void __send_IPI_shortcut(unsigned int shortcut, int vector) + void xen_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { - unsigned int cpu; - - switch (shortcut) { - case APIC_DEST_SELF: -- __send_IPI_one(smp_processor_id(), vector); -+ notify_remote_via_ipi(vector, smp_processor_id()); - break; - case APIC_DEST_ALLBUT: - for_each_online_cpu(cpu) - if (cpu != smp_processor_id()) -- __send_IPI_one(cpu, vector); -+ notify_remote_via_ipi(vector, cpu); - break; - case APIC_DEST_ALLINC: - for_each_online_cpu(cpu) -- __send_IPI_one(cpu, vector); -+ notify_remote_via_ipi(vector, cpu); - break; - default: - printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut, -@@ -63,7 +54,7 @@ void xen_send_IPI_mask_allbutself(const + unsigned int cpu, this_cpu = smp_processor_id(); +@@ -32,7 +13,7 @@ void xen_send_IPI_mask_allbutself(const WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); for_each_cpu_and(cpu, cpumask, cpu_online_mask) - if (cpu != smp_processor_id()) + if (cpu != this_cpu) - __send_IPI_one(cpu, vector); + notify_remote_via_ipi(vector, cpu); - local_irq_restore(flags); } -@@ -75,7 +66,7 @@ void xen_send_IPI_mask(const struct cpum - local_irq_save(flags); + void xen_send_IPI_mask(const struct cpumask *cpumask, int vector) +@@ -41,7 +22,7 @@ void xen_send_IPI_mask(const struct cpum + WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); for_each_cpu_and(cpu, cpumask, cpu_online_mask) - __send_IPI_one(cpu, vector); + notify_remote_via_ipi(vector, cpu); - local_irq_restore(flags); } ---- head-2010-04-15.orig/arch/x86/kernel/irq-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/irq-xen.c 2010-01-25 13:46:29.000000000 +0100 -@@ -312,6 +312,7 @@ void fixup_irqs(void) + void xen_send_IPI_allbutself(int vector) +@@ -56,5 +37,5 @@ void xen_send_IPI_all(int vector) - affinity = desc->affinity; + void xen_send_IPI_self(int vector) + { +- __send_IPI_one(smp_processor_id(), vector); ++ notify_remote_via_ipi(vector, smp_processor_id()); + } +--- head-2011-02-17.orig/arch/x86/kernel/irq-xen.c 2011-02-18 15:17:23.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/irq-xen.c 2011-02-02 15:09:43.000000000 +0100 +@@ -331,6 +331,7 @@ void fixup_irqs(void) + data = &desc->irq_data; + affinity = data->affinity; if (!irq_has_action(irq) || + (desc->status & IRQ_PER_CPU) || - cpumask_equal(affinity, cpu_online_mask)) { + cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; ---- head-2010-04-15.orig/drivers/xen/Kconfig 2010-03-31 14:09:58.000000000 +0200 -+++ head-2010-04-15/drivers/xen/Kconfig 2010-03-31 14:10:55.000000000 +0200 +--- head-2011-02-17.orig/arch/x86/kernel/irq_work-xen.c 2011-02-03 11:19:35.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/irq_work-xen.c 2011-02-03 13:56:43.000000000 +0100 +@@ -8,12 +8,10 @@ + #include + + #ifdef CONFIG_SMP +-irqreturn_t smp_irq_work_interrupt(int irq, void *dev_id) ++void smp_irq_work_interrupt(struct pt_regs *regs) + { + inc_irq_stat(apic_irq_work_irqs); + irq_work_run(); +- +- return IRQ_HANDLED; + } + + void arch_irq_work_raise(void) +--- head-2011-02-17.orig/arch/x86/kernel/smp-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/smp-xen.c 2011-02-02 15:09:43.000000000 +0100 +@@ -136,11 +136,9 @@ void xen_send_call_func_ipi(const struct + * this function calls the 'stop' function on all other CPUs in the system. + */ + +-irqreturn_t smp_reboot_interrupt(int irq, void *dev_id) ++void smp_reboot_interrupt(struct pt_regs *regs) + { + stop_this_cpu(NULL); +- +- return IRQ_HANDLED; + } + + void xen_stop_other_cpus(int wait) +@@ -179,24 +177,19 @@ void xen_stop_other_cpus(int wait) + * all the work is done automatically when + * we return from the interrupt. + */ +-irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) ++void smp_reschedule_interrupt(struct pt_regs *regs) + { + inc_irq_stat(irq_resched_count); +- return IRQ_HANDLED; + } + +-irqreturn_t smp_call_function_interrupt(int irq, void *dev_id) ++void smp_call_function_interrupt(struct pt_regs *regs) + { + generic_smp_call_function_interrupt(); + inc_irq_stat(irq_call_count); +- +- return IRQ_HANDLED; + } + +-irqreturn_t smp_call_function_single_interrupt(int irq, void *dev_id) ++void smp_call_function_single_interrupt(struct pt_regs *regs) + { + generic_smp_call_function_single_interrupt(); + inc_irq_stat(irq_call_count); +- +- return IRQ_HANDLED; + } +--- head-2011-02-17.orig/drivers/xen/Kconfig 2011-02-03 14:48:57.000000000 +0100 ++++ head-2011-02-17/drivers/xen/Kconfig 2011-02-03 14:49:15.000000000 +0100 @@ -4,6 +4,7 @@ config XEN @@ -77,172 +161,208 @@ Patch-mainline: n/a if XEN config XEN_INTERFACE_VERSION -@@ -351,6 +352,9 @@ endmenu - config HAVE_IRQ_IGNORE_UNHANDLED - def_bool y - -+config IRQ_PER_CPU -+ bool -+ - config NO_IDLE_HZ - def_bool y - ---- head-2010-04-15.orig/drivers/xen/core/evtchn.c 2010-04-23 15:20:28.000000000 +0200 -+++ head-2010-04-15/drivers/xen/core/evtchn.c 2010-04-23 15:20:31.000000000 +0200 -@@ -59,6 +59,22 @@ static DEFINE_SPINLOCK(irq_mapping_updat +--- head-2011-02-17.orig/drivers/xen/core/evtchn.c 2011-02-10 16:18:00.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/evtchn.c 2011-02-15 17:52:39.000000000 +0100 +@@ -59,6 +59,20 @@ static DEFINE_SPINLOCK(irq_mapping_updat static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; +/* IRQ <-> IPI mapping. */ -+#ifndef NR_IPIS -+#define NR_IPIS 1 -+#endif +#if defined(CONFIG_SMP) && defined(CONFIG_X86) -+static int ipi_to_irq[NR_IPIS] __read_mostly = {[0 ... NR_IPIS-1] = -1}; -+static DEFINE_PER_CPU(int[NR_IPIS], ipi_to_evtchn); ++static int __read_mostly ipi_irq = -1; ++DEFINE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending); ++static DEFINE_PER_CPU_READ_MOSTLY(evtchn_port_t, ipi_evtchn); +#else +#define PER_CPU_IPI_IRQ +#endif +#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ) -+#define BUG_IF_IPI(irq) BUG_ON(type_from_irq(irq) == IRQT_IPI) ++#define BUG_IF_IPI(irq_cfg) BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_IPI) +#else -+#define BUG_IF_IPI(irq) ((void)(irq)) ++#define BUG_IF_IPI(irq_cfg) ((void)0) +#endif + /* Binding types. */ enum { IRQT_UNBOUND, -@@ -117,12 +133,14 @@ static inline u32 mk_irq_info(u32 type, - * Accessors for packed IRQ information. - */ - -+#ifdef PER_CPU_IPI_IRQ - static inline unsigned int evtchn_from_irq(int irq) - { - const struct irq_cfg *cfg = irq_cfg(irq); +@@ -108,7 +122,9 @@ static inline u32 mk_irq_info(u32 type, - return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0; - } + BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS)); + BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS)); ++#if defined(PER_CPU_IPI_IRQ) && defined(NR_IPIS) + BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS)); +#endif + BUG_ON(index >> _INDEX_BITS); + + BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS)); +@@ -120,25 +136,6 @@ static inline u32 mk_irq_info(u32 type, + * Accessors for packed IRQ information. + */ - static inline unsigned int index_from_irq(int irq) +-static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg) +-{ +- return cfg->info & ((1U << _EVTCHN_BITS) - 1); +-} +- +-static inline unsigned int evtchn_from_irq_data(struct irq_data *data) +-{ +- const struct irq_cfg *cfg = irq_data_cfg(data); +- +- return cfg ? evtchn_from_irq_cfg(cfg) : 0; +-} +- +-static inline unsigned int evtchn_from_irq(int irq) +-{ +- struct irq_data *data = irq_get_irq_data(irq); +- +- return data ? evtchn_from_irq_data(data) : 0; +-} +- + static inline unsigned int index_from_irq_cfg(const struct irq_cfg *cfg) { -@@ -139,6 +157,25 @@ static inline unsigned int type_from_irq - return cfg ? cfg->info >> (32 - _IRQT_BITS) : IRQT_UNBOUND; + return (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1); +@@ -163,6 +160,38 @@ static inline unsigned int type_from_irq + return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND; } +#ifndef PER_CPU_IPI_IRQ -+static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq, -+ unsigned int cpu) ++static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg, ++ unsigned int cpu) +{ -+ BUG_ON(type_from_irq(irq) != IRQT_IPI); -+ return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)]; ++ BUG_ON(type_from_irq_cfg(cfg) != IRQT_IPI); ++ return per_cpu(ipi_evtchn, cpu); +} ++#endif + -+static inline unsigned int evtchn_from_irq(unsigned int irq) ++static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg) +{ -+ if (type_from_irq(irq) != IRQT_IPI) { -+ const struct irq_cfg *cfg = irq_cfg(irq); ++#ifndef PER_CPU_IPI_IRQ ++ if (type_from_irq_cfg(cfg) == IRQT_IPI) ++ return evtchn_from_per_cpu_irq(cfg, smp_processor_id()); ++#endif ++ return cfg->info & ((1U << _EVTCHN_BITS) - 1); ++} + -+ return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0; -+ } -+ return evtchn_from_per_cpu_irq(irq, smp_processor_id()); ++static inline unsigned int evtchn_from_irq_data(struct irq_data *data) ++{ ++ const struct irq_cfg *cfg = irq_data_cfg(data); ++ ++ return cfg ? evtchn_from_irq_cfg(cfg) : 0; ++} ++ ++static inline unsigned int evtchn_from_irq(int irq) ++{ ++ struct irq_data *data = irq_get_irq_data(irq); ++ ++ return data ? evtchn_from_irq_data(data) : 0; +} -+#endif + unsigned int irq_from_evtchn(unsigned int port) { return evtchn_to_irq[port]; -@@ -148,11 +185,10 @@ EXPORT_SYMBOL_GPL(irq_from_evtchn); +@@ -172,11 +201,13 @@ EXPORT_SYMBOL_GPL(irq_from_evtchn); /* IRQ <-> VIRQ mapping. */ DEFINE_PER_CPU(int[NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) /* IRQ <-> IPI mapping. */ --#ifndef NR_IPIS --#define NR_IPIS 1 --#endif + #ifndef NR_IPIS + #define NR_IPIS 1 + #endif DEFINE_PER_CPU(int[NR_IPIS], ipi_to_irq) = {[0 ... NR_IPIS-1] = -1}; +#endif #ifdef CONFIG_SMP -@@ -176,8 +212,14 @@ static void bind_evtchn_to_cpu(unsigned +@@ -204,8 +235,14 @@ static void bind_evtchn_to_cpu(unsigned BUG_ON(!test_bit(chn, s->evtchn_mask)); - if (irq != -1) -- cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); +- cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu)); + if (irq != -1) { + struct irq_desc *desc = irq_to_desc(irq); + + if (!(desc->status & IRQ_PER_CPU)) -+ cpumask_copy(desc->affinity, cpumask_of(cpu)); ++ cpumask_copy(desc->irq_data.affinity, cpumask_of(cpu)); + else -+ cpumask_set_cpu(cpu, desc->affinity); ++ cpumask_set_cpu(cpu, desc->irq_data.affinity); + } clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn])); set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); -@@ -350,7 +392,7 @@ asmlinkage void __irq_entry evtchn_do_up - - static struct irq_chip dynirq_chip; +@@ -370,7 +407,10 @@ asmlinkage void __irq_entry evtchn_do_up + port = (l1i * BITS_PER_LONG) + l2i; + mask_evtchn(port); + if ((irq = evtchn_to_irq[port]) != -1) { +- clear_evtchn(port); ++#ifndef PER_CPU_IPI_IRQ ++ if (port != percpu_read(ipi_evtchn)) ++#endif ++ clear_evtchn(port); + handled = handle_irq(irq, regs); + } + if (!handled && printk_ratelimit()) +@@ -404,7 +444,7 @@ asmlinkage void __irq_entry evtchn_do_up + } --static int find_unbound_irq(unsigned int cpu) -+static int find_unbound_irq(unsigned int cpu, bool percpu) + static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg, +- struct irq_chip *chip) ++ struct irq_chip *chip, bool percpu) { static int warned; int irq; -@@ -360,10 +402,19 @@ static int find_unbound_irq(unsigned int - struct irq_cfg *cfg = desc->chip_data; +@@ -420,11 +460,20 @@ static int find_unbound_irq(unsigned int + continue; if (!cfg->bindcount) { + irq_flow_handler_t handle; + const char *name; + + *pcfg = cfg; desc->status |= IRQ_NOPROBE; + if (!percpu) { -+ handle = handle_level_irq; -+ name = "level"; ++ handle = handle_fasteoi_irq; ++ name = "fasteoi"; + } else { + handle = handle_percpu_irq; + name = "percpu"; + } - set_irq_chip_and_handler_name(irq, &dynirq_chip, -- handle_level_irq, -- "level"); + set_irq_chip_and_handler_name(irq, chip, +- handle_fasteoi_irq, +- "fasteoi"); + handle, name); return irq; } } -@@ -384,7 +435,7 @@ static int bind_caller_port_to_irq(unsig - spin_lock(&irq_mapping_update_lock); +@@ -449,7 +498,7 @@ static int bind_caller_port_to_irq(unsig if ((irq = evtchn_to_irq[caller_port]) == -1) { -- if ((irq = find_unbound_irq(smp_processor_id())) < 0) -+ if ((irq = find_unbound_irq(smp_processor_id(), false)) < 0) + if ((irq = find_unbound_irq(numa_node_id(), &cfg, +- &dynirq_chip)) < 0) ++ &dynirq_chip, false)) < 0) goto out; evtchn_to_irq[caller_port] = irq; -@@ -407,7 +458,7 @@ static int bind_local_port_to_irq(unsign +@@ -473,7 +522,8 @@ static int bind_local_port_to_irq(unsign BUG_ON(evtchn_to_irq[local_port] != -1); -- if ((irq = find_unbound_irq(smp_processor_id())) < 0) { -+ if ((irq = find_unbound_irq(smp_processor_id(), false)) < 0) { - struct evtchn_close close = { .port = local_port }; - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) +- if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip)) < 0) { ++ if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip, ++ false)) < 0) { + if (close_evtchn(local_port)) BUG(); -@@ -460,7 +511,7 @@ static int bind_virq_to_irq(unsigned int - spin_lock(&irq_mapping_update_lock); + goto out; +@@ -527,7 +577,7 @@ static int bind_virq_to_irq(unsigned int if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { -- if ((irq = find_unbound_irq(cpu)) < 0) -+ if ((irq = find_unbound_irq(cpu, false)) < 0) + if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, +- &dynirq_chip)) < 0) ++ &dynirq_chip, false)) < 0) goto out; bind_virq.virq = virq; -@@ -485,6 +536,7 @@ static int bind_virq_to_irq(unsigned int +@@ -553,6 +603,7 @@ static int bind_virq_to_irq(unsigned int return irq; } @@ -250,16 +370,16 @@ Patch-mainline: n/a static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; -@@ -493,7 +545,7 @@ static int bind_ipi_to_irq(unsigned int - spin_lock(&irq_mapping_update_lock); +@@ -563,7 +614,7 @@ static int bind_ipi_to_irq(unsigned int if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) { -- if ((irq = find_unbound_irq(cpu)) < 0) -+ if ((irq = find_unbound_irq(cpu, false)) < 0) + if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, +- &dynirq_chip)) < 0) ++ &dynirq_chip, false)) < 0) goto out; bind_ipi.vcpu = cpu; -@@ -516,6 +568,7 @@ static int bind_ipi_to_irq(unsigned int +@@ -587,6 +638,7 @@ static int bind_ipi_to_irq(unsigned int spin_unlock(&irq_mapping_update_lock); return irq; } @@ -267,55 +387,55 @@ Patch-mainline: n/a static void unbind_from_irq(unsigned int irq) { -@@ -523,6 +576,7 @@ static void unbind_from_irq(unsigned int - unsigned int cpu; - int evtchn = evtchn_from_irq(irq); +@@ -594,6 +646,7 @@ static void unbind_from_irq(unsigned int + struct irq_cfg *cfg = irq_cfg(irq); + int evtchn = evtchn_from_irq_cfg(cfg); -+ BUG_IF_IPI(irq); ++ BUG_IF_IPI(cfg); spin_lock(&irq_mapping_update_lock); - if (!--irq_cfg(irq)->bindcount && VALID_EVTCHN(evtchn)) { -@@ -536,10 +590,12 @@ static void unbind_from_irq(unsigned int + if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) { +@@ -606,10 +659,12 @@ static void unbind_from_irq(unsigned int per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) - [index_from_irq(irq)] = -1; + [index_from_irq_cfg(cfg)] = -1; break; +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) case IRQT_IPI: per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) - [index_from_irq(irq)] = -1; + [index_from_irq_cfg(cfg)] = -1; break; +#endif default: break; } -@@ -562,6 +618,46 @@ static void unbind_from_irq(unsigned int +@@ -636,6 +691,46 @@ static void unbind_from_irq(unsigned int spin_unlock(&irq_mapping_update_lock); } -+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ) ++#ifndef PER_CPU_IPI_IRQ +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu) +{ + struct evtchn_close close; -+ int evtchn = evtchn_from_per_cpu_irq(irq, cpu); ++ struct irq_data *data = irq_get_irq_data(irq); ++ struct irq_cfg *cfg = irq_data_cfg(data); ++ int evtchn = evtchn_from_per_cpu_irq(cfg, cpu); + + spin_lock(&irq_mapping_update_lock); + + if (VALID_EVTCHN(evtchn)) { -+ struct irq_desc *desc = irq_to_desc(irq); -+ + mask_evtchn(evtchn); + -+ BUG_ON(irq_cfg(irq)->bindcount <= 1); -+ irq_cfg(irq)->bindcount--; -+ cpumask_clear_cpu(cpu, desc->affinity); ++ BUG_ON(cfg->bindcount <= 1); ++ cfg->bindcount--; ++ cpumask_clear_cpu(cpu, data->affinity); + + close.port = evtchn; + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) + BUG(); + -+ switch (type_from_irq(irq)) { ++ switch (type_from_irq_cfg(cfg)) { + case IRQT_IPI: -+ per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = 0; ++ per_cpu(ipi_evtchn, cpu) = 0; + break; + default: + BUG(); @@ -330,12 +450,12 @@ Patch-mainline: n/a + + spin_unlock(&irq_mapping_update_lock); +} -+#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */ ++#endif /* !PER_CPU_IPI_IRQ */ + int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, -@@ -656,6 +752,8 @@ int bind_virq_to_irqhandler( +@@ -730,6 +825,8 @@ int bind_virq_to_irqhandler( } EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); @@ -344,53 +464,53 @@ Patch-mainline: n/a int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, -@@ -679,7 +777,71 @@ int bind_ipi_to_irqhandler( +@@ -753,7 +850,71 @@ int bind_ipi_to_irqhandler( return irq; } -EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler); +#else +int __cpuinit bind_ipi_to_irqaction( -+ unsigned int ipi, + unsigned int cpu, + struct irqaction *action) +{ + struct evtchn_bind_ipi bind_ipi; -+ int evtchn, irq, retval = 0; ++ struct irq_cfg *cfg; ++ int evtchn, retval = 0; + + spin_lock(&irq_mapping_update_lock); + -+ if (VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi])) { ++ if (VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) { + spin_unlock(&irq_mapping_update_lock); + return -EBUSY; + } + -+ if ((irq = ipi_to_irq[ipi]) == -1) { -+ if ((irq = find_unbound_irq(cpu, true)) < 0) { ++ if (ipi_irq < 0) { ++ if ((ipi_irq = find_unbound_irq(cpu_to_node(cpu), &cfg, ++ &dynirq_chip, true)) < 0) { + spin_unlock(&irq_mapping_update_lock); -+ return irq; ++ return ipi_irq; + } + + /* Extra reference so count will never drop to zero. */ -+ irq_cfg(irq)->bindcount++; ++ cfg->bindcount++; + -+ ipi_to_irq[ipi] = irq; -+ irq_cfg(irq)->info = mk_irq_info(IRQT_IPI, ipi, 0); ++ cfg->info = mk_irq_info(IRQT_IPI, 0, 0); + retval = 1; -+ } ++ } else ++ cfg = irq_cfg(ipi_irq); + + bind_ipi.vcpu = cpu; -+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, -+ &bind_ipi) != 0) ++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi)) + BUG(); + + evtchn = bind_ipi.port; -+ evtchn_to_irq[evtchn] = irq; -+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn; ++ evtchn_to_irq[evtchn] = ipi_irq; ++ per_cpu(ipi_evtchn, cpu) = evtchn; + + bind_evtchn_to_cpu(evtchn, cpu); + -+ irq_cfg(irq)->bindcount++; ++ cfg->bindcount++; + + spin_unlock(&irq_mapping_update_lock); + @@ -402,100 +522,158 @@ Patch-mainline: n/a + local_irq_restore(flags); + } else { + action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND; -+ retval = setup_irq(irq, action); ++ retval = setup_irq(ipi_irq, action); + if (retval) { -+ unbind_from_per_cpu_irq(irq, cpu); ++ unbind_from_per_cpu_irq(ipi_irq, cpu); + BUG_ON(retval > 0); -+ irq = retval; ++ ipi_irq = retval; + } + } + -+ return irq; ++ return ipi_irq; +} +#endif /* PER_CPU_IPI_IRQ */ +#endif /* CONFIG_SMP */ void unbind_from_irqhandler(unsigned int irq, void *dev_id) { -@@ -705,6 +867,7 @@ static void rebind_irq_to_cpu(unsigned i +@@ -777,8 +938,10 @@ void rebind_evtchn_to_cpu(int port, unsi + + static void rebind_irq_to_cpu(struct irq_data *data, unsigned int tcpu) { - int evtchn = evtchn_from_irq(irq); +- int evtchn = evtchn_from_irq_data(data); ++ const struct irq_cfg *cfg = irq_data_cfg(data); ++ int evtchn = evtchn_from_irq_cfg(cfg); -+ BUG_IF_IPI(irq); ++ BUG_IF_IPI(cfg); if (VALID_EVTCHN(evtchn)) rebind_evtchn_to_cpu(evtchn, tcpu); } -@@ -965,10 +1128,21 @@ int irq_ignore_unhandled(unsigned int ir +@@ -1031,10 +1194,47 @@ int irq_ignore_unhandled(unsigned int ir return !!(irq_status.flags & XENIRQSTAT_shared); } +#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ) +void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu) +{ -+ int evtchn = evtchn_from_per_cpu_irq(ipi_to_irq[ipi], cpu); ++ int evtchn = per_cpu(ipi_evtchn, cpu); ++ ++#ifdef NMI_VECTOR ++ if (ipi == NMI_VECTOR) { ++ static int __read_mostly printed; ++ int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); ++ ++ if (rc && !printed) ++ pr_warning("Unable (%d) to send NMI to CPU#%u\n", ++ printed = rc, cpu); ++ return; ++ } ++#endif + -+ if (VALID_EVTCHN(evtchn)) ++ if (VALID_EVTCHN(evtchn) ++ && !test_and_set_bit(ipi, per_cpu(ipi_pending, cpu)) ++ && !test_evtchn(evtchn)) + notify_remote_via_evtchn(evtchn); +} ++ ++void clear_ipi_evtchn(void) ++{ ++ int evtchn = percpu_read(ipi_evtchn); ++ ++ BUG_ON(!VALID_EVTCHN(evtchn)); ++ clear_evtchn(evtchn); ++} +#endif + void notify_remote_via_irq(int irq) { - int evtchn = evtchn_from_irq(irq); - -+ BUG_IF_IPI(irq); +- int evtchn = evtchn_from_irq(irq); ++ const struct irq_cfg *cfg = irq_cfg(irq); ++ int evtchn; + ++ if (WARN_ON_ONCE(!cfg)) ++ return; ++ BUG_IF_IPI(cfg); ++ evtchn = evtchn_from_irq_cfg(cfg); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); } -@@ -976,6 +1150,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq) +@@ -1042,7 +1242,12 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq) int irq_to_evtchn_port(int irq) { -+ BUG_IF_IPI(irq); - return evtchn_from_irq(irq); +- return evtchn_from_irq(irq); ++ const struct irq_cfg *cfg = irq_cfg(irq); ++ ++ if (!cfg) ++ return 0; ++ BUG_IF_IPI(cfg); ++ return evtchn_from_irq_cfg(cfg); } EXPORT_SYMBOL_GPL(irq_to_evtchn_port); -@@ -1091,11 +1266,17 @@ static void restore_cpu_virqs(unsigned i + +@@ -1130,12 +1335,22 @@ static void restore_cpu_virqs(unsigned i static void restore_cpu_ipis(unsigned int cpu) { +#ifdef CONFIG_SMP struct evtchn_bind_ipi bind_ipi; - int ipi, irq, evtchn; +- int ipi, irq, evtchn; ++ int evtchn; ++#ifdef PER_CPU_IPI_IRQ ++ int ipi, irq; for (ipi = 0; ipi < NR_IPIS; ipi++) { -+#ifdef PER_CPU_IPI_IRQ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) + continue; +#else -+ if ((irq = ipi_to_irq[ipi]) == -1 -+ || !VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi])) ++#define ipi 0 ++#define irq ipi_irq ++ if (irq == -1 ++ || !VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) ++ return; +#endif - continue; BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_IPI, ipi, 0)); -@@ -1109,13 +1290,18 @@ static void restore_cpu_ipis(unsigned in + +@@ -1148,13 +1363,23 @@ static void restore_cpu_ipis(unsigned in /* Record the new mapping. */ evtchn_to_irq[evtchn] = irq; +#ifdef PER_CPU_IPI_IRQ irq_cfg(irq)->info = mk_irq_info(IRQT_IPI, ipi, evtchn); +#else -+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn; ++ per_cpu(ipi_evtchn, cpu) = evtchn; +#endif bind_evtchn_to_cpu(evtchn, cpu); /* Ready for use. */ if (!(irq_to_desc(irq)->status & IRQ_DISABLED)) unmask_evtchn(evtchn); ++#ifdef PER_CPU_IPI_IRQ } ++#else ++#undef irq ++#undef ipi +#endif ++#endif /* CONFIG_SMP */ } static int evtchn_resume(struct sys_device *dev) ---- head-2010-04-15.orig/drivers/xen/core/smpboot.c 2010-03-19 15:20:15.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/smpboot.c 2010-03-19 15:20:24.000000000 +0100 -@@ -40,14 +40,10 @@ cpumask_var_t vcpu_initialized_mask; - DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info); +@@ -1358,7 +1583,8 @@ int evtchn_map_pirq(int irq, int xen_pir + struct irq_cfg *cfg; + + spin_lock(&irq_mapping_update_lock); +- irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip); ++ irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip, ++ false); + if (irq >= 0) { + BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND); + cfg->bindcount++; +--- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:13:04.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-03-03 16:14:20.000000000 +0100 +@@ -36,19 +36,7 @@ cpumask_var_t vcpu_initialized_mask; + DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); -static DEFINE_PER_CPU(int, resched_irq); @@ -506,37 +684,34 @@ Patch-mainline: n/a -static char callfunc_name[NR_CPUS][15]; -static char call1func_name[NR_CPUS][15]; -static char reboot_name[NR_CPUS][15]; -+static int __read_mostly resched_irq = -1; -+static int __read_mostly callfunc_irq = -1; -+static int __read_mostly call1func_irq = -1; -+static int __read_mostly reboot_irq = -1; - - #ifdef CONFIG_X86_LOCAL_APIC - #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid)) -@@ -109,58 +105,68 @@ remove_siblinginfo(unsigned int cpu) +- +-#ifdef CONFIG_IRQ_WORK +-static DEFINE_PER_CPU(int, irq_work_irq); +-static char irq_work_name[NR_CPUS][15]; +-#endif ++static int __read_mostly ipi_irq = -1; - static int __cpuinit xen_smp_intr_init(unsigned int cpu) + void __init prefill_possible_map(void) { -+ static struct irqaction resched_action = { -+ .handler = smp_reschedule_interrupt, -+ .flags = IRQF_DISABLED, -+ .name = "resched" -+ }, callfunc_action = { -+ .handler = smp_call_function_interrupt, -+ .flags = IRQF_DISABLED, -+ .name = "callfunc" -+ }, call1func_action = { -+ .handler = smp_call_function_single_interrupt, -+ .flags = IRQF_DISABLED, -+ .name = "call1func" -+ }, reboot_action = { -+ .handler = smp_reboot_interrupt, -+ .flags = IRQF_DISABLED, -+ .name = "reboot" -+ }; - int rc; +@@ -75,76 +63,59 @@ void __init prefill_possible_map(void) + ++total_cpus; + } +-static int __cpuinit xen_smp_intr_init(unsigned int cpu) ++static irqreturn_t ipi_interrupt(int irq, void *dev_id) + { +- int rc; +- - per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = ++ static void (*const handlers[])(struct pt_regs *) = { ++ [RESCHEDULE_VECTOR] = smp_reschedule_interrupt, ++ [CALL_FUNCTION_VECTOR] = smp_call_function_interrupt, ++ [CALL_FUNC_SINGLE_VECTOR] = smp_call_function_single_interrupt, ++ [REBOOT_VECTOR] = smp_reboot_interrupt, + #ifdef CONFIG_IRQ_WORK +- per_cpu(irq_work_irq, cpu) = ++ [IRQ_WORK_VECTOR] = smp_irq_work_interrupt, + #endif - per_cpu(call1func_irq, cpu) = per_cpu(reboot_irq, cpu) = -1; - - sprintf(resched_name[cpu], "resched%u", cpu); @@ -546,10 +721,7 @@ Patch-mainline: n/a - IRQF_DISABLED|IRQF_NOBALANCING, - resched_name[cpu], - NULL); -+ rc = bind_ipi_to_irqaction(RESCHEDULE_VECTOR, -+ cpu, -+ &resched_action); - if (rc < 0) +- if (rc < 0) - goto fail; - per_cpu(resched_irq, cpu) = rc; - @@ -560,16 +732,7 @@ Patch-mainline: n/a - IRQF_DISABLED|IRQF_NOBALANCING, - callfunc_name[cpu], - NULL); -+ return rc; -+ if (resched_irq < 0) -+ resched_irq = rc; -+ else -+ BUG_ON(resched_irq != rc); -+ -+ rc = bind_ipi_to_irqaction(CALL_FUNCTION_VECTOR, -+ cpu, -+ &callfunc_action); - if (rc < 0) +- if (rc < 0) - goto fail; - per_cpu(callfunc_irq, cpu) = rc; - @@ -580,19 +743,32 @@ Patch-mainline: n/a - IRQF_DISABLED|IRQF_NOBALANCING, - call1func_name[cpu], - NULL); -+ goto unbind_resched; -+ if (callfunc_irq < 0) -+ callfunc_irq = rc; -+ else -+ BUG_ON(callfunc_irq != rc); -+ -+ rc = bind_ipi_to_irqaction(CALL_FUNC_SINGLE_VECTOR, -+ cpu, -+ &call1func_action); - if (rc < 0) +- if (rc < 0) - goto fail; - per_cpu(call1func_irq, cpu) = rc; -- ++ }; ++ unsigned long *pending = __get_cpu_var(ipi_pending); ++ struct pt_regs *regs = get_irq_regs(); ++ irqreturn_t ret = IRQ_NONE; ++ ++ for (;;) { ++ unsigned int ipi = find_first_bit(pending, NR_IPIS); ++ ++ if (ipi >= NR_IPIS) { ++ clear_ipi_evtchn(); ++ ipi = find_first_bit(pending, NR_IPIS); ++ } ++ if (ipi >= NR_IPIS) ++ return ret; ++ ret = IRQ_HANDLED; ++ do { ++ clear_bit(ipi, pending); ++ handlers[ipi](regs); ++ ipi = find_next_bit(pending, NR_IPIS, ipi); ++ } while (ipi < NR_IPIS); ++ } ++} + - sprintf(reboot_name[cpu], "reboot%u", cpu); - rc = bind_ipi_to_irqhandler(REBOOT_VECTOR, - cpu, @@ -600,32 +776,45 @@ Patch-mainline: n/a - IRQF_DISABLED|IRQF_NOBALANCING, - reboot_name[cpu], - NULL); -+ goto unbind_call; -+ if (call1func_irq < 0) -+ call1func_irq = rc; -+ else -+ BUG_ON(call1func_irq != rc); -+ -+ rc = bind_ipi_to_irqaction(REBOOT_VECTOR, -+ cpu, -+ &reboot_action); - if (rc < 0) +- if (rc < 0) - goto fail; - per_cpu(reboot_irq, cpu) = rc; -+ goto unbind_call1; -+ if (reboot_irq < 0) -+ reboot_irq = rc; ++static int __cpuinit xen_smp_intr_init(unsigned int cpu) ++{ ++ static struct irqaction ipi_action = { ++ .handler = ipi_interrupt, ++ .flags = IRQF_DISABLED, ++ .name = "ipi" ++ }; ++ int rc; + +-#ifdef CONFIG_IRQ_WORK +- sprintf(irq_work_name[cpu], "irqwork%u", cpu); +- rc = bind_ipi_to_irqhandler(IRQ_WORK_VECTOR, +- cpu, +- smp_irq_work_interrupt, +- IRQF_DISABLED|IRQF_NOBALANCING, +- irq_work_name[cpu], +- NULL); ++ rc = bind_ipi_to_irqaction(cpu, &ipi_action); + if (rc < 0) +- goto fail; +- per_cpu(irq_work_irq, cpu) = rc; +-#endif ++ return rc; ++ if (ipi_irq < 0) ++ ipi_irq = rc; + else -+ BUG_ON(reboot_irq != rc); ++ BUG_ON(ipi_irq != rc); rc = xen_spinlock_init(cpu); if (rc < 0) - goto fail; -+ goto unbind_reboot; ++ goto unbind_ipi; if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0)) goto fail; -@@ -168,15 +174,15 @@ static int __cpuinit xen_smp_intr_init(u +@@ -152,19 +123,9 @@ static int __cpuinit xen_smp_intr_init(u return 0; fail: @@ -637,19 +826,17 @@ Patch-mainline: n/a - unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); - if (per_cpu(reboot_irq, cpu) >= 0) - unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL); +-#ifdef CONFIG_IRQ_WORK +- if (per_cpu(irq_work_irq, cpu) >= 0) +- unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL); +-#endif xen_spinlock_cleanup(cpu); -+ unbind_reboot: -+ unbind_from_per_cpu_irq(reboot_irq, cpu); -+ unbind_call1: -+ unbind_from_per_cpu_irq(call1func_irq, cpu); -+ unbind_call: -+ unbind_from_per_cpu_irq(callfunc_irq, cpu); -+ unbind_resched: -+ unbind_from_per_cpu_irq(resched_irq, cpu); ++ unbind_ipi: ++ unbind_from_per_cpu_irq(ipi_irq, cpu); return rc; } -@@ -186,10 +192,10 @@ static void __cpuinit xen_smp_intr_exit( +@@ -174,13 +135,7 @@ static void __cpuinit xen_smp_intr_exit( if (cpu != 0) local_teardown_timer(cpu); @@ -657,90 +844,16 @@ Patch-mainline: n/a - unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL); -+ unbind_from_per_cpu_irq(resched_irq, cpu); -+ unbind_from_per_cpu_irq(callfunc_irq, cpu); -+ unbind_from_per_cpu_irq(call1func_irq, cpu); -+ unbind_from_per_cpu_irq(reboot_irq, cpu); +-#ifdef CONFIG_IRQ_WORK +- unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL); +-#endif ++ unbind_from_per_cpu_irq(ipi_irq, cpu); xen_spinlock_cleanup(cpu); } #endif ---- head-2010-04-15.orig/drivers/xen/core/spinlock.c 2010-04-15 10:14:50.000000000 +0200 -+++ head-2010-04-15/drivers/xen/core/spinlock.c 2010-02-24 12:38:00.000000000 +0100 -@@ -14,8 +14,7 @@ - - #ifdef TICKET_SHIFT - --static DEFINE_PER_CPU(int, spinlock_irq) = -1; --static char spinlock_name[NR_CPUS][15]; -+static int __read_mostly spinlock_irq = -1; - - struct spinning { - arch_spinlock_t *lock; -@@ -32,29 +31,31 @@ static DEFINE_PER_CPU(arch_rwlock_t, spi - - int __cpuinit xen_spinlock_init(unsigned int cpu) - { -+ static struct irqaction spinlock_action = { -+ .handler = smp_reschedule_interrupt, -+ .flags = IRQF_DISABLED, -+ .name = "spinlock" -+ }; - int rc; - -- sprintf(spinlock_name[cpu], "spinlock%u", cpu); -- rc = bind_ipi_to_irqhandler(SPIN_UNLOCK_VECTOR, -- cpu, -- smp_reschedule_interrupt, -- IRQF_DISABLED|IRQF_NOBALANCING, -- spinlock_name[cpu], -- NULL); -+ rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR, -+ cpu, -+ &spinlock_action); - if (rc < 0) - return rc; - -- disable_irq(rc); /* make sure it's never delivered */ -- per_cpu(spinlock_irq, cpu) = rc; -+ if (spinlock_irq < 0) { -+ disable_irq(rc); /* make sure it's never delivered */ -+ spinlock_irq = rc; -+ } else -+ BUG_ON(spinlock_irq != rc); - - return 0; - } - - void __cpuinit xen_spinlock_cleanup(unsigned int cpu) - { -- if (per_cpu(spinlock_irq, cpu) >= 0) -- unbind_from_irqhandler(per_cpu(spinlock_irq, cpu), NULL); -- per_cpu(spinlock_irq, cpu) = -1; -+ unbind_from_per_cpu_irq(spinlock_irq, cpu); - } - - static unsigned int spin_adjust(struct spinning *spinning, -@@ -84,7 +85,7 @@ unsigned int xen_spin_adjust(const arch_ - bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, - unsigned int flags) - { -- int irq = percpu_read(spinlock_irq); -+ int irq = spinlock_irq; - bool rc; - typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask; - arch_rwlock_t *rm_lock; -@@ -240,7 +241,7 @@ void xen_spin_kick(arch_spinlock_t *lock - raw_local_irq_restore(flags); - - if (unlikely(spinning)) { -- notify_remote_via_irq(per_cpu(spinlock_irq, cpu)); -+ notify_remote_via_ipi(SPIN_UNLOCK_VECTOR, cpu); - return; - } - } ---- head-2010-04-15.orig/include/xen/evtchn.h 2010-03-31 14:10:36.000000000 +0200 -+++ head-2010-04-15/include/xen/evtchn.h 2010-03-31 14:41:42.000000000 +0200 -@@ -93,6 +93,8 @@ int bind_virq_to_irqhandler( +--- head-2011-02-17.orig/include/xen/evtchn.h 2010-11-23 15:07:01.000000000 +0100 ++++ head-2011-02-17/include/xen/evtchn.h 2011-02-02 15:09:43.000000000 +0100 +@@ -94,6 +94,8 @@ int bind_virq_to_irqhandler( unsigned long irqflags, const char *devname, void *dev_id); @@ -749,21 +862,21 @@ Patch-mainline: n/a int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, -@@ -100,6 +102,13 @@ int bind_ipi_to_irqhandler( +@@ -101,6 +103,13 @@ int bind_ipi_to_irqhandler( unsigned long irqflags, const char *devname, void *dev_id); +#else +int bind_ipi_to_irqaction( -+ unsigned int ipi, + unsigned int cpu, + struct irqaction *action); ++DECLARE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending); +#endif +#endif /* * Common unbind function for all event sources. Takes IRQ to unbind from. -@@ -108,6 +117,11 @@ int bind_ipi_to_irqhandler( +@@ -109,6 +118,11 @@ int bind_ipi_to_irqhandler( */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); @@ -775,12 +888,13 @@ Patch-mainline: n/a #ifndef CONFIG_XEN void irq_resume(void); #endif -@@ -183,5 +197,9 @@ void xen_poll_irq(int irq); +@@ -180,5 +194,10 @@ int xen_test_irq_pending(int irq); void notify_remote_via_irq(int irq); int irq_to_evtchn_port(int irq); +#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) +void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu); ++void clear_ipi_evtchn(void); +#endif + #endif /* __ASM_EVTCHN_H__ */ diff --git a/patches.xen/xen-kconfig-compat b/patches.xen/xen-kconfig-compat index 24a38e5..d2d9ae8 100644 --- a/patches.xen/xen-kconfig-compat +++ b/patches.xen/xen-kconfig-compat @@ -2,9 +2,9 @@ From: jbeulich@novell.com Subject: add backward-compatibility configure options Patch-mainline: n/a ---- head-2010-03-24.orig/drivers/xen/Kconfig 2010-03-31 14:08:50.000000000 +0200 -+++ head-2010-03-24/drivers/xen/Kconfig 2010-03-31 14:09:58.000000000 +0200 -@@ -321,6 +321,15 @@ choice +--- head-2011-01-30.orig/drivers/xen/Kconfig 2010-11-26 13:37:36.000000000 +0100 ++++ head-2011-01-30/drivers/xen/Kconfig 2011-02-03 14:48:57.000000000 +0100 +@@ -317,6 +317,21 @@ choice config XEN_COMPAT_030100_AND_LATER bool "3.1.0 and later" @@ -16,14 +16,22 @@ Patch-mainline: n/a + + config XEN_COMPAT_030400_AND_LATER + bool "3.4.0 and later" ++ ++ config XEN_COMPAT_040000_AND_LATER ++ bool "4.0.0 and later" ++ ++ config XEN_COMPAT_040100_AND_LATER ++ bool "4.1.0 and later" + config XEN_COMPAT_LATEST_ONLY bool "no compatibility code" -@@ -329,6 +338,9 @@ endchoice +@@ -325,6 +340,11 @@ endchoice config XEN_COMPAT hex default 0xffffff if XEN_COMPAT_LATEST_ONLY ++ default 0x040100 if XEN_COMPAT_040100_AND_LATER ++ default 0x040000 if XEN_COMPAT_040000_AND_LATER + default 0x030400 if XEN_COMPAT_030400_AND_LATER + default 0x030300 if XEN_COMPAT_030300_AND_LATER + default 0x030200 if XEN_COMPAT_030200_AND_LATER diff --git a/patches.xen/xen-kzalloc b/patches.xen/xen-kzalloc index 351199d..1d5587c 100644 --- a/patches.xen/xen-kzalloc +++ b/patches.xen/xen-kzalloc @@ -4,9 +4,31 @@ Patch-mainline: n/a Also use clear_page() in favor of memset(, 0, PAGE_SIZE). ---- head-2010-04-29.orig/drivers/xen/blkback/blkback.c 2010-03-25 14:38:05.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/blkback.c 2010-04-28 16:32:16.000000000 +0200 -@@ -671,7 +671,7 @@ static int __init blkif_init(void) +--- head-2011-02-17.orig/arch/x86/mm/init_32-xen.c 2011-02-02 15:07:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/init_32-xen.c 2011-02-02 15:10:16.000000000 +0100 +@@ -724,7 +724,7 @@ unsigned long __init extend_init_mapping + if (pmd_none(*pmd)) { + unsigned long pa = start_pfn++ << PAGE_SHIFT; + +- memset(__va(pa), 0, PAGE_SIZE); ++ clear_page(__va(pa)); + make_lowmem_page_readonly(__va(pa), + XENFEAT_writable_page_tables); + xen_l2_entry_update(pmd, __pmd(pa | _KERNPG_TABLE)); +--- head-2011-02-17.orig/arch/x86/mm/init_64-xen.c 2011-02-02 15:07:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/init_64-xen.c 2011-02-02 15:10:16.000000000 +0100 +@@ -213,7 +213,7 @@ static __ref void *spp_getpage(void) + else if (e820_table_end < e820_table_top) { + ptr = __va(e820_table_end << PAGE_SHIFT); + e820_table_end++; +- memset(ptr, 0, PAGE_SIZE); ++ clear_page(ptr); + } else + ptr = alloc_bootmem_pages(PAGE_SIZE); + +--- head-2011-02-17.orig/drivers/xen/blkback/blkback.c 2011-02-28 14:23:53.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkback/blkback.c 2011-02-28 14:26:29.000000000 +0100 +@@ -639,7 +639,7 @@ static int __init blkif_init(void) mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; @@ -15,7 +37,7 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). blkif_reqs, GFP_KERNEL); pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) * mmap_pages, GFP_KERNEL); -@@ -688,7 +688,6 @@ static int __init blkif_init(void) +@@ -656,7 +656,6 @@ static int __init blkif_init(void) blkif_interface_init(); @@ -23,39 +45,20 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). INIT_LIST_HEAD(&pending_free); for (i = 0; i < blkif_reqs; i++) ---- head-2010-04-29.orig/drivers/xen/blkback/interface.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/interface.c 2010-04-28 16:37:43.000000000 +0200 -@@ -41,11 +41,10 @@ blkif_t *blkif_alloc(domid_t domid) - { - blkif_t *blkif; - -- blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL); -+ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL|__GFP_ZERO); - if (!blkif) - return ERR_PTR(-ENOMEM); - -- memset(blkif, 0, sizeof(*blkif)); - blkif->domid = domid; - spin_lock_init(&blkif->blk_ring_lock); - atomic_set(&blkif->refcnt, 1); ---- head-2010-04-29.orig/drivers/xen/blktap/interface.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blktap/interface.c 2010-04-28 16:38:55.000000000 +0200 -@@ -41,11 +41,10 @@ blkif_t *tap_alloc_blkif(domid_t domid) - { - blkif_t *blkif; - -- blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL); -+ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL|__GFP_ZERO); - if (!blkif) - return ERR_PTR(-ENOMEM); - -- memset(blkif, 0, sizeof(*blkif)); - blkif->domid = domid; - spin_lock_init(&blkif->blk_ring_lock); - atomic_set(&blkif->refcnt, 1); ---- head-2010-04-29.orig/drivers/xen/core/machine_reboot.c 2010-03-25 14:39:15.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/machine_reboot.c 2010-04-28 17:04:28.000000000 +0200 -@@ -102,7 +102,7 @@ static void post_suspend(int suspend_can +--- head-2011-02-17.orig/drivers/xen/core/gnttab.c 2010-09-23 17:06:35.000000000 +0200 ++++ head-2011-02-17/drivers/xen/core/gnttab.c 2011-02-02 15:10:16.000000000 +0100 +@@ -546,7 +546,7 @@ int gnttab_copy_grant_page(grant_ref_t r + + new_addr = page_address(new_page); + addr = page_address(page); +- memcpy(new_addr, addr, PAGE_SIZE); ++ copy_page(new_addr, addr); + + pfn = page_to_pfn(page); + mfn = pfn_to_mfn(pfn); +--- head-2011-02-17.orig/drivers/xen/core/machine_reboot.c 2011-02-02 15:09:52.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/machine_reboot.c 2011-02-02 15:10:16.000000000 +0100 +@@ -97,7 +97,7 @@ static void post_suspend(int suspend_can BUG(); HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); @@ -64,9 +67,9 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). fpp = PAGE_SIZE/sizeof(unsigned long); for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) { ---- head-2010-04-29.orig/drivers/xen/core/smpboot.c 2010-04-15 11:43:29.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/smpboot.c 2010-04-28 16:44:14.000000000 +0200 -@@ -218,17 +218,12 @@ static void __cpuinit cpu_initialize_con +--- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-02-07 12:28:20.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-02-02 15:10:16.000000000 +0100 +@@ -176,17 +176,12 @@ static void __cpuinit cpu_initialize_con ctxt.flags = VGCF_IN_KERNEL; ctxt.user_regs.ds = __USER_DS; ctxt.user_regs.es = __USER_DS; @@ -84,9 +87,9 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). ctxt.gdt_frames[0] = arbitrary_virt_to_mfn(get_cpu_gdt_table(cpu)); ctxt.gdt_ents = GDT_SIZE / 8; ---- head-2010-04-29.orig/drivers/xen/netback/interface.c 2010-04-30 10:42:29.000000000 +0200 -+++ head-2010-04-29/drivers/xen/netback/interface.c 2010-04-30 10:49:15.000000000 +0200 -@@ -227,7 +227,6 @@ netif_t *netif_alloc(struct device *pare +--- head-2011-02-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:34:28.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/interface.c 2011-02-17 10:36:11.000000000 +0100 +@@ -270,7 +270,6 @@ netif_t *netif_alloc(struct device *pare SET_NETDEV_DEV(dev, parent); netif = netdev_priv(dev); @@ -94,15 +97,15 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). netif->domid = domid; netif->group = UINT_MAX; netif->handle = handle; ---- head-2010-04-29.orig/drivers/xen/scsiback/emulate.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/scsiback/emulate.c 2010-04-28 16:51:05.000000000 +0200 -@@ -240,13 +240,11 @@ static void __report_luns(pending_req_t +--- head-2011-02-17.orig/drivers/xen/scsiback/emulate.c 2011-02-08 10:04:09.000000000 +0100 ++++ head-2011-02-17/drivers/xen/scsiback/emulate.c 2011-02-08 10:45:57.000000000 +0100 +@@ -243,13 +243,11 @@ static void __report_luns(pending_req_t alloc_len = sizeof(struct scsi_lun) * alloc_luns + VSCSI_REPORT_LUNS_HEADER; retry: - if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) { + if ((buff = kzalloc(alloc_len, GFP_KERNEL)) == NULL) { - printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__); + pr_err("scsiback:%s kmalloc err\n", __FUNCTION__); goto fail; } @@ -111,24 +114,9 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). one_lun = (struct scsi_lun *) &buff[8]; spin_lock_irqsave(&info->v2p_lock, flags); list_for_each_entry(entry, head, l) { ---- head-2010-04-29.orig/drivers/xen/scsiback/interface.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/drivers/xen/scsiback/interface.c 2010-04-28 16:51:29.000000000 +0200 -@@ -46,11 +46,10 @@ struct vscsibk_info *vscsibk_info_alloc( - { - struct vscsibk_info *info; - -- info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL); -+ info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL|__GFP_ZERO); - if (!info) - return ERR_PTR(-ENOMEM); - -- memset(info, 0, sizeof(*info)); - info->domid = domid; - spin_lock_init(&info->ring_lock); - atomic_set(&info->nr_unreplied_reqs, 0); ---- head-2010-04-29.orig/drivers/xen/scsiback/scsiback.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-04-29/drivers/xen/scsiback/scsiback.c 2010-04-28 16:52:02.000000000 +0200 -@@ -676,7 +676,7 @@ static int __init scsiback_init(void) +--- head-2011-02-17.orig/drivers/xen/scsiback/scsiback.c 2011-02-28 14:51:23.000000000 +0100 ++++ head-2011-02-17/drivers/xen/scsiback/scsiback.c 2011-02-28 14:53:21.000000000 +0100 +@@ -668,7 +668,7 @@ static int __init scsiback_init(void) mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE; @@ -137,7 +125,7 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). vscsiif_reqs, GFP_KERNEL); pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) * mmap_pages, GFP_KERNEL); -@@ -691,7 +691,6 @@ static int __init scsiback_init(void) +@@ -683,7 +683,6 @@ static int __init scsiback_init(void) if (scsiback_interface_init() < 0) goto out_of_kmem; @@ -145,8 +133,8 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). INIT_LIST_HEAD(&pending_free); for (i = 0; i < vscsiif_reqs; i++) ---- head-2010-04-29.orig/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2010-04-15 11:11:11.000000000 +0200 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2010-04-28 16:54:07.000000000 +0200 +--- head-2011-02-17.orig/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-02-17/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2011-02-02 15:10:16.000000000 +0100 @@ -77,7 +77,7 @@ int cuckoo_hash_init(cuckoo_hash_table * BUG_ON(length_bits >= sizeof(unsigned) * 8); BUG_ON(key_length > sizeof(cuckoo_hash_key)); @@ -166,22 +154,9 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). return 0; } EXPORT_SYMBOL_GPL(cuckoo_hash_init); ---- head-2010-04-29.orig/drivers/xen/tpmback/interface.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/drivers/xen/tpmback/interface.c 2010-04-28 16:55:39.000000000 +0200 -@@ -26,11 +26,10 @@ static tpmif_t *alloc_tpmif(domid_t domi - { - tpmif_t *tpmif; - -- tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL); -+ tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL|__GFP_ZERO); - if (tpmif == NULL) - goto out_of_memory; - -- memset(tpmif, 0, sizeof (*tpmif)); - tpmif->domid = domid; - tpmif->status = DISCONNECTED; - tpmif->bi = bi; -@@ -131,7 +130,7 @@ int tpmif_map(tpmif_t *tpmif, unsigned l +--- head-2011-02-17.orig/drivers/xen/tpmback/interface.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-02-17/drivers/xen/tpmback/interface.c 2011-02-02 15:10:16.000000000 +0100 +@@ -128,7 +128,7 @@ int tpmif_map(tpmif_t *tpmif, unsigned l } tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr; @@ -190,9 +165,9 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). err = bind_interdomain_evtchn_to_irqhandler( tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif); ---- head-2010-04-29.orig/drivers/xen/usbback/usbback.c 2010-04-15 17:36:18.000000000 +0200 -+++ head-2010-04-29/drivers/xen/usbback/usbback.c 2010-04-28 16:56:36.000000000 +0200 -@@ -1149,7 +1149,7 @@ static int __init usbback_init(void) +--- head-2011-02-17.orig/drivers/xen/usbback/usbback.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-02-17/drivers/xen/usbback/usbback.c 2011-02-02 15:10:16.000000000 +0100 +@@ -1140,7 +1140,7 @@ static int __init usbback_init(void) return -ENODEV; mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST; @@ -201,7 +176,7 @@ Also use clear_page() in favor of memset(, 0, PAGE_SIZE). usbif_reqs, GFP_KERNEL); pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) * mmap_pages, GFP_KERNEL); -@@ -1163,7 +1163,6 @@ static int __init usbback_init(void) +@@ -1154,7 +1154,6 @@ static int __init usbback_init(void) for (i = 0; i < mmap_pages; i++) pending_grant_handles[i] = USBBACK_INVALID_HANDLE; diff --git a/patches.xen/xen-mem-hotplug b/patches.xen/xen-mem-hotplug new file mode 100644 index 0000000..07fd402 --- /dev/null +++ b/patches.xen/xen-mem-hotplug @@ -0,0 +1,285 @@ +From: Jiang, Yunhong +Subject: xen/acpi: Add memory hotadd to pvops dom0 +References: bnc#651066 +Patch-mainline: n/a + +When memory hotadd event happen, a Xen hook will be called, to notify +hypervisor of the new added memory. + +Because xen hypervisor will use the new memory to setup frametable/m2p +table, so dom0 will always return success to acpi bios, and notify xen +hypervisor later. + +It add a hook in driver/acpi/acpi_memhotplug.c, but that change is quite +small, not sure if it is acceptable. Other method is to provide a xen +specific acpi_memory_device_driver, but I'm not sure if it worth to add +so much changes, to simply avoid two hooks. + +jb: Integrate into base module; cleanup. +Acked-by: jbeulich@novell.com + +--- head-2011-01-30.orig/drivers/acpi/Kconfig 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-01-30/drivers/acpi/Kconfig 2011-02-02 15:10:05.000000000 +0100 +@@ -349,7 +349,7 @@ config ACPI_CONTAINER + + config ACPI_HOTPLUG_MEMORY + tristate "Memory Hotplug" +- depends on MEMORY_HOTPLUG ++ depends on MEMORY_HOTPLUG || XEN_PRIVILEGED_GUEST + default n + help + This driver supports ACPI memory hotplug. The driver +--- head-2011-01-30.orig/drivers/acpi/acpi_memhotplug.c 2010-05-16 23:17:36.000000000 +0200 ++++ head-2011-01-30/drivers/acpi/acpi_memhotplug.c 2011-02-02 15:10:06.000000000 +0100 +@@ -88,6 +88,14 @@ struct acpi_memory_device { + + static int acpi_hotmem_initialized; + ++#ifdef CONFIG_XEN ++#include "../xen/core/acpi_memhotplug.c" ++#define memory_add_physaddr_to_nid(start) 0 ++#else ++static inline int xen_hotadd_mem_init(void) { return 0; } ++static inline void xen_hotadd_mem_exit(void) {} ++#endif ++ + static acpi_status + acpi_memory_get_resource(struct acpi_resource *resource, void *context) + { +@@ -229,6 +237,10 @@ static int acpi_memory_enable_device(str + return result; + } + ++#ifdef CONFIG_XEN ++ return xen_hotadd_memory(mem_device); ++#endif ++ + node = acpi_get_node(mem_device->device->handle); + /* + * Tell the VM there is more memory here... +@@ -312,6 +324,10 @@ static int acpi_memory_disable_device(st + struct acpi_memory_info *info, *n; + + ++#ifdef CONFIG_XEN ++ return -EOPNOTSUPP; ++#endif ++ + /* + * Ask the VM to offline this memory range. + * Note: Assume that this function returns zero on success +@@ -531,6 +547,10 @@ static int __init acpi_memory_device_ini + acpi_status status; + + ++ result = xen_hotadd_mem_init(); ++ if (result < 0) ++ return result; ++ + result = acpi_bus_register_driver(&acpi_memory_device_driver); + + if (result < 0) +@@ -570,6 +590,8 @@ static void __exit acpi_memory_device_ex + + acpi_bus_unregister_driver(&acpi_memory_device_driver); + ++ xen_hotadd_mem_exit(); ++ + return; + } + +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-01-30/drivers/xen/core/acpi_memhotplug.c 2011-02-02 15:10:06.000000000 +0100 +@@ -0,0 +1,192 @@ ++/* ++ * xen_acpi_memhotplug.c - interface to notify Xen on memory device hotadd ++ * ++ * Copyright (C) 2008, Intel corporation ++ * ++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or (at ++ * your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. ++ * ++ */ ++ ++#include ++#include ++ ++struct xen_hotmem_entry { ++ struct list_head hotmem_list; ++ uint64_t start; ++ uint64_t end; ++ uint32_t flags; ++ uint32_t pxm; ++}; ++ ++struct xen_hotmem_list { ++ struct list_head list; ++ unsigned int entry_nr; ++}; ++ ++static struct xen_hotmem_list xen_hotmem = { ++ .list = LIST_HEAD_INIT(xen_hotmem.list) ++}; ++static DEFINE_SPINLOCK(xen_hotmem_lock); ++ ++static int xen_hyper_addmem(struct xen_hotmem_entry *entry) ++{ ++ xen_platform_op_t op = { ++ .cmd = XENPF_mem_hotadd, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ }; ++ ++ op.u.mem_add.spfn = entry->start >> PAGE_SHIFT; ++ op.u.mem_add.epfn = entry->end >> PAGE_SHIFT; ++ op.u.mem_add.flags = entry->flags; ++ op.u.mem_add.pxm = entry->pxm; ++ ++ return HYPERVISOR_platform_op(&op); ++} ++ ++static int add_hotmem_entry(int pxm, uint64_t start, ++ uint64_t length, uint32_t flags) ++{ ++ struct xen_hotmem_entry *entry; ++ ++ if (pxm < 0 || !length) ++ return -EINVAL; ++ ++ entry = kzalloc(sizeof(struct xen_hotmem_entry), GFP_ATOMIC); ++ if (!entry) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&entry->hotmem_list); ++ entry->start = start; ++ entry->end = start + length; ++ entry->flags = flags; ++ entry->pxm = pxm; ++ ++ spin_lock(&xen_hotmem_lock); ++ ++ list_add_tail(&entry->hotmem_list, &xen_hotmem.list); ++ xen_hotmem.entry_nr++; ++ ++ spin_unlock(&xen_hotmem_lock); ++ ++ return 0; ++} ++ ++static int free_hotmem_entry(struct xen_hotmem_entry *entry) ++{ ++ list_del(&entry->hotmem_list); ++ kfree(entry); ++ ++ return 0; ++} ++ ++static void xen_hotadd_mem_dpc(struct work_struct *work) ++{ ++ struct list_head *elem, *tmp; ++ struct xen_hotmem_entry *entry; ++ unsigned long flags; ++ int ret; ++ ++ spin_lock_irqsave(&xen_hotmem_lock, flags); ++ list_for_each_safe(elem, tmp, &xen_hotmem.list) { ++ entry = list_entry(elem, struct xen_hotmem_entry, hotmem_list); ++ ret = xen_hyper_addmem(entry); ++ if (ret) ++ pr_warning("xen addmem failed with %x\n", ret); ++ free_hotmem_entry(entry); ++ xen_hotmem.entry_nr--; ++ } ++ spin_unlock_irqrestore(&xen_hotmem_lock, flags); ++} ++ ++static DECLARE_WORK(xen_hotadd_mem_work, xen_hotadd_mem_dpc); ++ ++static int xen_acpi_get_pxm(acpi_handle h) ++{ ++ unsigned long long pxm; ++ acpi_status status; ++ acpi_handle handle; ++ acpi_handle phandle = h; ++ ++ do { ++ handle = phandle; ++ status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); ++ if (ACPI_SUCCESS(status)) ++ return pxm; ++ status = acpi_get_parent(handle, &phandle); ++ } while (ACPI_SUCCESS(status)); ++ ++ return -1; ++} ++ ++static int xen_hotadd_memory(struct acpi_memory_device *mem_device) ++{ ++ int pxm, result; ++ int num_enabled = 0; ++ struct acpi_memory_info *info; ++ ++ if (!mem_device) ++ return -EINVAL; ++ ++ pxm = xen_acpi_get_pxm(mem_device->device->handle); ++ ++ if (pxm < 0) ++ return -EINVAL; ++ ++ /* ++ * Always return success to ACPI driver, and notify hypervisor later ++ * because hypervisor will utilize the memory in memory hotadd hypercall ++ */ ++ list_for_each_entry(info, &mem_device->res_list, list) { ++ if (info->enabled) { /* just sanity check...*/ ++ num_enabled++; ++ continue; ++ } ++ /* ++ * If the memory block size is zero, please ignore it. ++ * Don't try to do the following memory hotplug flowchart. ++ */ ++ if (!info->length) ++ continue; ++ ++ result = add_hotmem_entry(pxm, info->start_addr, ++ info->length, 0); ++ if (result) ++ continue; ++ info->enabled = 1; ++ num_enabled++; ++ } ++ ++ if (!num_enabled) ++ return -EINVAL; ++ ++ schedule_work(&xen_hotadd_mem_work); ++ ++ return 0; ++} ++ ++static int xen_hotadd_mem_init(void) ++{ ++ if (!is_initial_xendomain()) ++ return -ENODEV; ++ ++ return 0; ++} ++ ++static void xen_hotadd_mem_exit(void) ++{ ++ flush_scheduled_work(); ++} diff --git a/patches.xen/xen-modular-blktap b/patches.xen/xen-modular-blktap deleted file mode 100644 index cbc56a1..0000000 --- a/patches.xen/xen-modular-blktap +++ /dev/null @@ -1,27 +0,0 @@ -From: ccoffing@novell.com -Subject: Retain backwards-compatible module name with CONFIG_XEN_BLKDEV_TAP=m -Patch-mainline: obsolete - ---- head-2009-05-29.orig/drivers/xen/blktap/Makefile 2007-06-12 13:13:44.000000000 +0200 -+++ head-2009-05-29/drivers/xen/blktap/Makefile 2009-05-29 12:39:04.000000000 +0200 -@@ -1,5 +1,5 @@ - LINUXINCLUDE += -I../xen/include/public/io - --obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o -+obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o - --xenblktap-y := xenbus.o interface.o blktap.o -+blktap-y := xenbus.o interface.o blocktap.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-05-29/drivers/xen/blktap/blocktap.c 2009-05-29 12:39:04.000000000 +0200 -@@ -0,0 +1 @@ -+#include "blktap.c" ---- head-2009-05-29.orig/drivers/xen/blktap2/Makefile 2009-05-29 10:25:53.000000000 +0200 -+++ head-2009-05-29/drivers/xen/blktap2/Makefile 2009-05-29 12:39:04.000000000 +0200 -@@ -1,3 +1,4 @@ --obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap.o -+obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap2.o - --blktap-objs := control.o ring.o wait_queue.o device.o request.o sysfs.o -+blktap2-y := control.o ring.o wait_queue.o device.o request.o -+blktap2-$(CONFIG_SYSFS) += sysfs.o diff --git a/patches.xen/xen-netback-generalize b/patches.xen/xen-netback-generalize index 857f50f..6f84c6d 100644 --- a/patches.xen/xen-netback-generalize +++ b/patches.xen/xen-netback-generalize @@ -11,10 +11,10 @@ Signed-off-by: Dongxiao Xu jb: various cleanups Acked-by: jbeulich@novell.com ---- head-2010-04-29.orig/drivers/xen/netback/common.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/common.h 2010-04-30 11:11:33.000000000 +0200 -@@ -217,4 +217,74 @@ static inline int netbk_can_sg(struct ne - return netif->features & NETIF_F_SG; +--- head-2011-02-17.orig/drivers/xen/netback/common.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/common.h 2011-02-17 10:33:48.000000000 +0100 +@@ -219,4 +219,74 @@ static inline int netbk_can_sg(struct ne + return netif->can_sg; } +struct pending_tx_info { @@ -53,9 +53,9 @@ Acked-by: jbeulich@novell.com + pending_ring_idx_t dealloc_cons; + + struct list_head pending_inuse_head; -+ struct list_head net_schedule_list; ++ struct list_head schedule_list; + -+ spinlock_t net_schedule_list_lock; ++ spinlock_t schedule_list_lock; + spinlock_t release_lock; + + struct page **mmap_pages; @@ -88,15 +88,17 @@ Acked-by: jbeulich@novell.com + unsigned long mfn_list[MAX_MFN_ALLOC]; +}; #endif /* __NETIF__BACKEND__COMMON_H__ */ ---- head-2010-04-29.orig/drivers/xen/netback/netback.c 2010-01-04 13:31:57.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/netback.c 2010-04-30 11:49:12.000000000 +0200 -@@ -35,23 +35,18 @@ - */ +--- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-01-03 13:30:15.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-01 11:53:28.000000000 +0100 +@@ -36,6 +36,7 @@ #include "common.h" + #include +#include + #include #include #include +@@ -43,18 +44,12 @@ /*define NETBE_DEBUG_INTERRUPT*/ @@ -106,7 +108,7 @@ Acked-by: jbeulich@novell.com - u8 copy:1; -}; +static struct xen_netbk *__read_mostly xen_netbk; -+static const unsigned int __read_mostly netbk_nr_groups = 1; ++static const unsigned int netbk_nr_groups = 1; -struct netbk_tx_pending_inuse { - struct list_head list; @@ -119,7 +121,7 @@ Acked-by: jbeulich@novell.com static void make_tx_response(netif_t *netif, netif_tx_request_t *txp, s8 st); -@@ -62,81 +57,67 @@ static netif_rx_response_t *make_rx_resp +@@ -65,47 +60,56 @@ static netif_rx_response_t *make_rx_resp u16 size, u16 flags); @@ -201,7 +203,10 @@ Acked-by: jbeulich@novell.com + return ext.e.idx; } - #define PKT_PROT_LEN 64 + /* +@@ -117,36 +121,13 @@ static inline int netif_page_index(struc + sizeof(struct iphdr) + MAX_IPOPTLEN + \ + sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) -static struct pending_tx_info { - netif_tx_request_t req; @@ -212,11 +217,11 @@ Acked-by: jbeulich@novell.com #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1)) -static PEND_RING_IDX pending_prod, pending_cons; -#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons) - +- -/* Freed TX SKBs get batched on this ring before return to pending_ring. */ -static u16 dealloc_ring[MAX_PENDING_REQS]; -static PEND_RING_IDX dealloc_prod, dealloc_cons; -- + -/* Doubly-linked list of in-use pending entries. */ -static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS]; -static LIST_HEAD(pending_inuse_head); @@ -241,7 +246,7 @@ Acked-by: jbeulich@novell.com /* Setting this allows the safe use of this driver without netloop. */ static int MODPARM_copy_skb = 1; -@@ -148,13 +129,13 @@ MODULE_PARM_DESC(permute_returns, "Rando +@@ -158,13 +139,13 @@ MODULE_PARM_DESC(permute_returns, "Rando int netbk_copy_skb_mode; @@ -259,7 +264,7 @@ Acked-by: jbeulich@novell.com { struct xen_memory_reservation reservation = { .extent_order = 0, -@@ -162,24 +143,27 @@ static int check_mfn(int nr) +@@ -172,24 +153,27 @@ static int check_mfn(int nr) }; int rc; @@ -291,12 +296,12 @@ Acked-by: jbeulich@novell.com - !list_empty(&net_schedule_list)) - tasklet_schedule(&net_tx_tasklet); + if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && -+ !list_empty(&netbk->net_schedule_list)) ++ !list_empty(&netbk->schedule_list)) + tasklet_schedule(&netbk->net_tx_tasklet); } static struct sk_buff *netbk_copy_skb(struct sk_buff *skb) -@@ -288,6 +272,7 @@ static void tx_queue_callback(unsigned l +@@ -298,6 +282,7 @@ static void tx_queue_callback(unsigned l int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev) { netif_t *netif = netdev_priv(dev); @@ -304,7 +309,7 @@ Acked-by: jbeulich@novell.com BUG_ON(skb->dev != dev); -@@ -337,8 +322,9 @@ int netif_be_start_xmit(struct sk_buff * +@@ -346,8 +331,9 @@ int netif_be_start_xmit(struct sk_buff * } } @@ -316,7 +321,7 @@ Acked-by: jbeulich@novell.com return NETDEV_TX_OK; -@@ -393,19 +379,29 @@ static u16 netbk_gop_frag(netif_t *netif +@@ -402,19 +388,29 @@ static u16 netbk_gop_frag(netif_t *netif multicall_entry_t *mcl; netif_rx_request_t *req; unsigned long old_mfn, new_mfn; @@ -349,7 +354,7 @@ Acked-by: jbeulich@novell.com copy_gop->source.domid = src_pend->netif->domid; copy_gop->source.u.ref = src_pend->req.gref; copy_gop->flags |= GNTCOPY_source_gref; -@@ -421,7 +417,7 @@ static u16 netbk_gop_frag(netif_t *netif +@@ -430,7 +426,7 @@ static u16 netbk_gop_frag(netif_t *netif } else { meta->copy = 0; if (!xen_feature(XENFEAT_auto_translated_physmap)) { @@ -358,7 +363,7 @@ Acked-by: jbeulich@novell.com /* * Set the new P2M table entry before -@@ -566,7 +562,7 @@ static void netbk_add_frag_responses(net +@@ -570,7 +566,7 @@ static void netbk_add_frag_responses(net } } @@ -367,16 +372,10 @@ Acked-by: jbeulich@novell.com { netif_t *netif = NULL; s8 status; -@@ -576,52 +572,37 @@ static void net_rx_action(unsigned long - struct sk_buff_head rxq; - struct sk_buff *skb; - int notify_nr = 0; -- int ret; -+ int ret, eagain; +@@ -584,47 +580,33 @@ static void net_rx_action(unsigned long int nr_frags; int count; unsigned long offset; -- int eagain; - - /* - * Putting hundreds of bytes on the stack is considered rude. @@ -432,7 +431,7 @@ Acked-by: jbeulich@novell.com break; } -@@ -636,39 +617,39 @@ static void net_rx_action(unsigned long +@@ -639,39 +621,39 @@ static void net_rx_action(unsigned long break; } @@ -479,7 +478,7 @@ Acked-by: jbeulich@novell.com mcl->args[2] = npo.copy_prod; } -@@ -676,7 +657,7 @@ static void net_rx_action(unsigned long +@@ -679,7 +661,7 @@ static void net_rx_action(unsigned long if (!npo.mcl_prod) return; @@ -488,7 +487,7 @@ Acked-by: jbeulich@novell.com ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod); BUG_ON(ret != 0); -@@ -707,7 +688,7 @@ static void net_rx_action(unsigned long +@@ -705,13 +687,13 @@ static void net_rx_action(unsigned long atomic_set(&(skb_shinfo(skb)->dataref), 1); skb_shinfo(skb)->frag_list = NULL; skb_shinfo(skb)->nr_frags = 0; @@ -496,19 +495,17 @@ Acked-by: jbeulich@novell.com + netbk_free_pages(nr_frags, netbk->meta + npo.meta_cons + 1); } - if(!eagain) -@@ -716,7 +697,7 @@ static void net_rx_action(unsigned long - netif->stats.tx_packets++; - } + skb->dev->stats.tx_bytes += skb->len; + skb->dev->stats.tx_packets++; - id = meta[npo.meta_cons].id; + id = netbk->meta[npo.meta_cons].id; flags = nr_frags ? NETRXF_more_data : 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ -@@ -724,14 +705,14 @@ static void net_rx_action(unsigned long - else if (skb->proto_data_valid) /* remote but checksummed? */ - flags |= NETRXF_data_validated; + switch (skb->ip_summed) { +@@ -723,14 +705,14 @@ static void net_rx_action(unsigned long + break; + } - if (meta[npo.meta_cons].copy) + if (netbk->meta[npo.meta_cons].copy) @@ -523,7 +520,7 @@ Acked-by: jbeulich@novell.com struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_RESPONSE(&netif->rx, -@@ -739,7 +720,7 @@ static void net_rx_action(unsigned long +@@ -738,7 +720,7 @@ static void net_rx_action(unsigned long resp->flags |= NETRXF_extra_info; @@ -532,7 +529,7 @@ Acked-by: jbeulich@novell.com gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; -@@ -749,13 +730,13 @@ static void net_rx_action(unsigned long +@@ -748,13 +730,13 @@ static void net_rx_action(unsigned long } netbk_add_frag_responses(netif, status, @@ -549,15 +546,7 @@ Acked-by: jbeulich@novell.com if (netif_queue_stopped(netif->dev) && netif_schedulable(netif) && -@@ -772,45 +753,46 @@ static void net_rx_action(unsigned long - { - netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 + - !!skb_shinfo(skb)->gso_size; -- skb_queue_head(&rx_queue, skb); -+ skb_queue_head(&netbk->rx_queue, skb); - } - - npo.meta_cons += nr_frags + 1; +@@ -768,38 +750,39 @@ static void net_rx_action(unsigned long } if (notify_nr == 1) { @@ -608,45 +597,61 @@ Acked-by: jbeulich@novell.com + tasklet_schedule(&xen_netbk[group].net_tx_tasklet); } - struct net_device_stats *netif_be_get_stats(struct net_device *dev) -@@ -826,27 +808,31 @@ static int __on_net_schedule_list(netif_ + static int __on_net_schedule_list(netif_t *netif) +@@ -807,7 +790,7 @@ static int __on_net_schedule_list(netif_ + return netif->list.next != NULL; + } +-/* Must be called with net_schedule_list_lock held. */ ++/* Must be called with netbk->schedule_list_lock held. */ static void remove_from_net_schedule_list(netif_t *netif) { -- spin_lock_irq(&net_schedule_list_lock); -+ struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; -+ -+ spin_lock_irq(&netbk->net_schedule_list_lock); if (likely(__on_net_schedule_list(netif))) { - list_del(&netif->list); - netif->list.next = NULL; - netif_put(netif); +@@ -817,34 +800,35 @@ static void remove_from_net_schedule_lis + } + } + +-static netif_t *poll_net_schedule_list(void) ++static netif_t *poll_net_schedule_list(struct xen_netbk *netbk) + { + netif_t *netif = NULL; + +- spin_lock_irq(&net_schedule_list_lock); +- if (!list_empty(&net_schedule_list)) { +- netif = list_first_entry(&net_schedule_list, netif_t, list); ++ spin_lock_irq(&netbk->schedule_list_lock); ++ if (!list_empty(&netbk->schedule_list)) { ++ netif = list_first_entry(&netbk->schedule_list, netif_t, list); + netif_get(netif); + remove_from_net_schedule_list(netif); } - spin_unlock_irq(&net_schedule_list_lock); -+ spin_unlock_irq(&netbk->net_schedule_list_lock); ++ spin_unlock_irq(&netbk->schedule_list_lock); + return netif; } static void add_to_net_schedule_list_tail(netif_t *netif) { + struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; -+ + unsigned long flags; + if (__on_net_schedule_list(netif)) return; -- spin_lock_irq(&net_schedule_list_lock); -+ spin_lock_irq(&netbk->net_schedule_list_lock); +- spin_lock_irqsave(&net_schedule_list_lock, flags); ++ spin_lock_irqsave(&netbk->schedule_list_lock, flags); if (!__on_net_schedule_list(netif) && likely(netif_schedulable(netif))) { - list_add_tail(&netif->list, &net_schedule_list); -+ list_add_tail(&netif->list, &netbk->net_schedule_list); ++ list_add_tail(&netif->list, &netbk->schedule_list); netif_get(netif); } -- spin_unlock_irq(&net_schedule_list_lock); -+ spin_unlock_irq(&netbk->net_schedule_list_lock); +- spin_unlock_irqrestore(&net_schedule_list_lock, flags); ++ spin_unlock_irqrestore(&netbk->schedule_list_lock, flags); } /* -@@ -869,7 +855,7 @@ void netif_schedule_work(netif_t *netif) +@@ -867,15 +851,17 @@ void netif_schedule_work(netif_t *netif) if (more_to_do) { add_to_net_schedule_list_tail(netif); @@ -655,6 +660,18 @@ Acked-by: jbeulich@novell.com } } + void netif_deschedule_work(netif_t *netif) + { +- spin_lock_irq(&net_schedule_list_lock); ++ struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; ++ ++ spin_lock_irq(&netbk->schedule_list_lock); + remove_from_net_schedule_list(netif); +- spin_unlock_irq(&net_schedule_list_lock); ++ spin_unlock_irq(&netbk->schedule_list_lock); + } + + @@ -906,17 +892,19 @@ static void tx_credit_callback(unsigned netif_schedule_work(netif); } @@ -680,7 +697,7 @@ Acked-by: jbeulich@novell.com u16 tmp; while (dc != dp) { -@@ -931,62 +919,67 @@ static void permute_dealloc_ring(PEND_RI +@@ -931,67 +919,73 @@ static void permute_dealloc_ring(PEND_RI } } @@ -693,7 +710,6 @@ Acked-by: jbeulich@novell.com - PEND_RING_IDX dc, dp; + pending_ring_idx_t dc, dp; netif_t *netif; - int ret; LIST_HEAD(list); - dc = dealloc_cons; @@ -738,13 +754,23 @@ Acked-by: jbeulich@novell.com gop++; } - if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB || -- list_empty(&pending_inuse_head)) -+ list_empty(&netbk->pending_inuse_head)) - break; +- } while (dp != dealloc_prod); ++ } while (dp != netbk->dealloc_prod); - /* Copy any entries that have been pending for too long. */ +- dealloc_cons = dc; ++ netbk->dealloc_cons = dc; + + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, +- tx_unmap_ops, gop - tx_unmap_ops)) ++ netbk->tx_unmap_ops, ++ gop - netbk->tx_unmap_ops)) + BUG(); + + /* Copy any entries that have been pending for too long. */ + if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB && +- !list_empty(&pending_inuse_head)) { - list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) { ++ !list_empty(&netbk->pending_inuse_head)) { + list_for_each_entry_safe(inuse, n, &netbk->pending_inuse_head, list) { + struct pending_tx_info *pending_tx_info + = netbk->pending_tx_info; @@ -762,21 +788,8 @@ Acked-by: jbeulich@novell.com case 0: list_move_tail(&inuse->list, &list); continue; -@@ -999,26 +992,30 @@ inline static void net_tx_action_dealloc - - break; - } -- } while (dp != dealloc_prod); -+ } while (dp != netbk->dealloc_prod); - -- dealloc_cons = dc; -+ netbk->dealloc_cons = dc; - - ret = HYPERVISOR_grant_table_op( -- GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops); -+ GNTTABOP_unmap_grant_ref, netbk->tx_unmap_ops, -+ gop - netbk->tx_unmap_ops); - BUG_ON(ret); +@@ -1007,17 +1001,20 @@ inline static void net_tx_action_dealloc + } list_for_each_entry_safe(inuse, n, &list, list) { - pending_idx = inuse - pending_inuse; @@ -799,7 +812,7 @@ Acked-by: jbeulich@novell.com netif_put(netif); -@@ -1095,9 +1092,14 @@ static gnttab_map_grant_ref_t *netbk_get +@@ -1094,9 +1091,14 @@ static gnttab_map_grant_ref_t *netbk_get start = ((unsigned long)shinfo->frags[0].page == pending_idx); for (i = start; i < shinfo->nr_frags; i++, txp++) { @@ -808,15 +821,15 @@ Acked-by: jbeulich@novell.com + pending_ring_idx_t index = MASK_PEND_IDX(netbk->pending_cons++); + struct pending_tx_info *pending_tx_info = + netbk->pending_tx_info; -+ -+ pending_idx = netbk->pending_ring[index]; - gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx), ++ pending_idx = netbk->pending_ring[index]; ++ + gnttab_set_map_op(mop++, idx_to_kaddr(netbk, pending_idx), GNTMAP_host_map | GNTMAP_readonly, txp->gref, netif->domid); -@@ -1110,11 +1112,12 @@ static gnttab_map_grant_ref_t *netbk_get +@@ -1109,11 +1111,12 @@ static gnttab_map_grant_ref_t *netbk_get return mop; } @@ -831,10 +844,10 @@ Acked-by: jbeulich@novell.com netif_t *netif = pending_tx_info[pending_idx].netif; netif_tx_request_t *txp; struct skb_shared_info *shinfo = skb_shinfo(skb); -@@ -1124,14 +1127,16 @@ static int netbk_tx_check_mop(struct sk_ +@@ -1123,14 +1126,16 @@ static int netbk_tx_check_mop(struct sk_ /* Check status of header. */ err = mop->status; - if (unlikely(err)) { + if (unlikely(err != GNTST_okay)) { + pending_ring_idx_t index = MASK_PEND_IDX(netbk->pending_prod++); + txp = &pending_tx_info[pending_idx].req; @@ -851,7 +864,7 @@ Acked-by: jbeulich@novell.com } /* Skip first skb fragment if it is on same page as header fragment. */ -@@ -1139,25 +1144,27 @@ static int netbk_tx_check_mop(struct sk_ +@@ -1138,25 +1143,27 @@ static int netbk_tx_check_mop(struct sk_ for (i = start; i < nr_frags; i++) { int j, newerr; @@ -861,14 +874,14 @@ Acked-by: jbeulich@novell.com /* Check error status: if okay then remember grant handle. */ newerr = (++mop)->status; - if (likely(!newerr)) { + if (likely(newerr == GNTST_okay)) { - set_phys_to_machine(idx_to_pfn(pending_idx), + set_phys_to_machine(idx_to_pfn(netbk, pending_idx), FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT)); - grant_tx_handle[pending_idx] = mop->handle; + netbk->grant_tx_handle[pending_idx] = mop->handle; /* Had a previous error? Invalidate this fragment. */ - if (unlikely(err)) + if (unlikely(err != GNTST_okay)) - netif_idx_release(pending_idx); + netif_idx_release(netbk, pending_idx); continue; @@ -883,7 +896,7 @@ Acked-by: jbeulich@novell.com netif_put(netif); /* Not the first error? Preceding frags already invalidated. */ -@@ -1166,10 +1173,10 @@ static int netbk_tx_check_mop(struct sk_ +@@ -1165,10 +1172,10 @@ static int netbk_tx_check_mop(struct sk_ /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); @@ -896,7 +909,7 @@ Acked-by: jbeulich@novell.com } /* Remember the error: invalidate all subsequent fragments. */ -@@ -1180,7 +1187,7 @@ static int netbk_tx_check_mop(struct sk_ +@@ -1179,7 +1186,7 @@ static int netbk_tx_check_mop(struct sk_ return err; } @@ -905,7 +918,7 @@ Acked-by: jbeulich@novell.com { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; -@@ -1193,12 +1200,12 @@ static void netbk_fill_frags(struct sk_b +@@ -1192,12 +1199,12 @@ static void netbk_fill_frags(struct sk_b pending_idx = (unsigned long)frag->page; @@ -923,19 +936,18 @@ Acked-by: jbeulich@novell.com frag->size = txp->size; frag->page_offset = txp->offset; -@@ -1260,9 +1267,9 @@ static int netbk_set_skb_gso(struct sk_b +@@ -1259,8 +1266,9 @@ static int netbk_set_skb_gso(struct sk_b } /* Called after netfront has transmitted */ -static void net_tx_action(unsigned long unused) +static void net_tx_action(unsigned long group) { -- struct list_head *ent; + struct xen_netbk *netbk = &xen_netbk[group]; struct sk_buff *skb; netif_t *netif; netif_tx_request_t txreq; -@@ -1274,15 +1281,15 @@ static void net_tx_action(unsigned long +@@ -1272,14 +1280,14 @@ static void net_tx_action(unsigned long unsigned int data_len; int ret, work_to_do; @@ -948,16 +960,14 @@ Acked-by: jbeulich@novell.com - while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && - !list_empty(&net_schedule_list)) { + while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && -+ !list_empty(&netbk->net_schedule_list)) { ++ !list_empty(&netbk->schedule_list)) { /* Get a netif from the list with work to do. */ -- ent = net_schedule_list.next; -- netif = list_entry(ent, netif_t, list); -+ netif = list_first_entry(&netbk->net_schedule_list, -+ netif_t, list); - netif_get(netif); - remove_from_net_schedule_list(netif); +- netif = poll_net_schedule_list(); ++ netif = poll_net_schedule_list(netbk); + if (!netif) + continue; -@@ -1364,7 +1371,7 @@ static void net_tx_action(unsigned long +@@ -1361,7 +1369,7 @@ static void net_tx_action(unsigned long continue; } @@ -966,7 +976,7 @@ Acked-by: jbeulich@novell.com data_len = (txreq.size > PKT_PROT_LEN && ret < MAX_SKB_FRAGS) ? -@@ -1392,14 +1399,14 @@ static void net_tx_action(unsigned long +@@ -1389,14 +1397,14 @@ static void net_tx_action(unsigned long } } @@ -984,7 +994,7 @@ Acked-by: jbeulich@novell.com *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); -@@ -1414,20 +1421,20 @@ static void net_tx_action(unsigned long +@@ -1411,20 +1419,20 @@ static void net_tx_action(unsigned long skb_shinfo(skb)->frags[0].page = (void *)~0UL; } @@ -1009,7 +1019,7 @@ Acked-by: jbeulich@novell.com goto out; /* NOTE: some maps may fail with GNTST_eagain, which could be successfully -@@ -1435,20 +1442,21 @@ static void net_tx_action(unsigned long +@@ -1432,22 +1440,23 @@ static void net_tx_action(unsigned long * req and let the frontend resend the relevant packet again. This is fine * because it is unlikely that a network buffer will be paged out or shared, * and therefore it is unlikely to fail with GNTST_eagain. */ @@ -1024,13 +1034,15 @@ Acked-by: jbeulich@novell.com - while ((skb = __skb_dequeue(&tx_queue)) != NULL) { + mop = netbk->tx_map_ops; + while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { + struct net_device *dev; netif_tx_request_t *txp; pending_idx = *((u16 *)skb->data); - netif = pending_tx_info[pending_idx].netif; ++ netif = netbk->pending_tx_info[pending_idx].netif; + dev = netif->dev; - txp = &pending_tx_info[pending_idx].req; -+ netif = netbk->pending_tx_info[pending_idx].netif; -+ txp = &netbk->pending_tx_info[pending_idx].req; ++ txp = &netbk->pending_tx_info[pending_idx].req; /* Check the remap error code. */ - if (unlikely(netbk_tx_check_mop(skb, &mop))) { @@ -1038,7 +1050,7 @@ Acked-by: jbeulich@novell.com DPRINTK("netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); -@@ -1457,7 +1465,7 @@ static void net_tx_action(unsigned long +@@ -1457,7 +1466,7 @@ static void net_tx_action(unsigned long data_len = skb->len; memcpy(skb->data, @@ -1047,7 +1059,7 @@ Acked-by: jbeulich@novell.com data_len); if (data_len < txp->size) { /* Append the packet payload as a fragment. */ -@@ -1465,7 +1473,7 @@ static void net_tx_action(unsigned long +@@ -1465,7 +1474,7 @@ static void net_tx_action(unsigned long txp->size -= data_len; } else { /* Schedule a response immediately. */ @@ -1055,17 +1067,17 @@ Acked-by: jbeulich@novell.com + netif_idx_release(netbk, pending_idx); } - /* -@@ -1481,7 +1489,7 @@ static void net_tx_action(unsigned long - } - skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank); + if (txp->flags & NETTXF_csum_blank) +@@ -1475,7 +1484,7 @@ static void net_tx_action(unsigned long + else + skb->ip_summed = CHECKSUM_NONE; - netbk_fill_frags(skb); + netbk_fill_frags(netbk, skb); - skb->dev = netif->dev; - skb->protocol = eth_type_trans(skb, skb->dev); -@@ -1502,36 +1510,39 @@ static void net_tx_action(unsigned long + /* + * If the initial fragment was < PKT_PROT_LEN then +@@ -1511,36 +1520,39 @@ static void net_tx_action(unsigned long out: if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB && @@ -1118,7 +1130,7 @@ Acked-by: jbeulich@novell.com } irqreturn_t netif_be_int(int irq, void *dev_id) -@@ -1539,7 +1550,7 @@ irqreturn_t netif_be_int(int irq, void * +@@ -1548,7 +1560,7 @@ irqreturn_t netif_be_int(int irq, void * netif_t *netif = dev_id; add_to_net_schedule_list_tail(netif); @@ -1127,61 +1139,67 @@ Acked-by: jbeulich@novell.com if (netif_schedulable(netif) && !netbk_queue_full(netif)) netif_wake_queue(netif->dev); -@@ -1605,29 +1616,35 @@ static irqreturn_t netif_be_dbg(int irq, +@@ -1612,33 +1624,38 @@ static netif_rx_response_t *make_rx_resp + #ifdef NETBE_DEBUG_INTERRUPT + static irqreturn_t netif_be_dbg(int irq, void *dev_id) { - struct list_head *ent; +- struct list_head *ent; netif_t *netif; - int i = 0; + unsigned int i = 0, group; - printk(KERN_ALERT "netif_schedule_list:\n"); + pr_alert("netif_schedule_list:\n"); - spin_lock_irq(&net_schedule_list_lock); - list_for_each (ent, &net_schedule_list) { - netif = list_entry(ent, netif_t, list); -- printk(KERN_ALERT " %d: private(rx_req_cons=%08x " -- "rx_resp_prod=%08x\n", -- i, netif->rx.req_cons, netif->rx.rsp_prod_pvt); -- printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n", -- netif->tx.req_cons, netif->tx.rsp_prod_pvt); -- printk(KERN_ALERT " shared(rx_req_prod=%08x " -- "rx_resp_prod=%08x\n", -- netif->rx.sring->req_prod, netif->rx.sring->rsp_prod); -- printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n", -- netif->rx.sring->rsp_event, netif->tx.sring->req_prod); -- printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n", -- netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event); +- pr_alert(" %d: private(rx_req_cons=%08x " +- "rx_resp_prod=%08x\n", +- i, netif->rx.req_cons, netif->rx.rsp_prod_pvt); +- pr_alert(" tx_req_cons=%08x tx_resp_prod=%08x)\n", +- netif->tx.req_cons, netif->tx.rsp_prod_pvt); +- pr_alert(" shared(rx_req_prod=%08x " +- "rx_resp_prod=%08x\n", +- netif->rx.sring->req_prod, netif->rx.sring->rsp_prod); +- pr_alert(" rx_event=%08x tx_req_prod=%08x\n", +- netif->rx.sring->rsp_event, +- netif->tx.sring->req_prod); +- pr_alert(" tx_resp_prod=%08x, tx_event=%08x)\n", +- netif->tx.sring->rsp_prod, +- netif->tx.sring->rsp_event); - i++; + for (group = 0; group < netbk_nr_groups; ++group) { + struct xen_netbk *netbk = &xen_netbk[group]; + -+ spin_lock_irq(&netbk->net_schedule_list_lock); ++ spin_lock_irq(&netbk->schedule_list_lock); + -+ list_for_each(ent, &netbk->net_schedule_list) { -+ netif = list_entry(ent, netif_t, list); -+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x " -+ "rx_resp_prod=%08x\n", -+ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt); -+ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n", -+ netif->tx.req_cons, netif->tx.rsp_prod_pvt); -+ printk(KERN_ALERT " shared(rx_req_prod=%08x " -+ "rx_resp_prod=%08x\n", -+ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod); -+ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n", -+ netif->rx.sring->rsp_event, netif->tx.sring->req_prod); -+ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n", -+ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event); ++ list_for_each_entry(netif, &netbk->schedule_list, list) { ++ pr_alert(" %d: private(rx_req_cons=%08x " ++ "rx_resp_prod=%08x\n", i, ++ netif->rx.req_cons, netif->rx.rsp_prod_pvt); ++ pr_alert(" tx_req_cons=%08x tx_resp_prod=%08x)\n", ++ netif->tx.req_cons, netif->tx.rsp_prod_pvt); ++ pr_alert(" shared(rx_req_prod=%08x " ++ "rx_resp_prod=%08x\n", ++ netif->rx.sring->req_prod, ++ netif->rx.sring->rsp_prod); ++ pr_alert(" rx_event=%08x tx_req_prod=%08x\n", ++ netif->rx.sring->rsp_event, ++ netif->tx.sring->req_prod); ++ pr_alert(" tx_resp_prod=%08x, tx_event=%08x)\n", ++ netif->tx.sring->rsp_prod, ++ netif->tx.sring->rsp_event); + i++; + } + -+ spin_unlock_irq(&netbk->netbk->net_schedule_list_lock); ++ spin_unlock_irq(&netbk->netbk->schedule_list_lock); } - spin_unlock_irq(&net_schedule_list_lock); - printk(KERN_ALERT " ** End of netif_schedule_list **\n"); + pr_alert(" ** End of netif_schedule_list **\n"); return IRQ_HANDLED; -@@ -1642,7 +1659,8 @@ static struct irqaction netif_be_dbg_act +@@ -1653,46 +1670,66 @@ static struct irqaction netif_be_dbg_act static int __init netback_init(void) { @@ -1191,13 +1209,25 @@ Acked-by: jbeulich@novell.com struct page *page; if (!is_running_on_xen()) -@@ -1651,37 +1669,55 @@ static int __init netback_init(void) + return -ENODEV; + ++ xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), ++ GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, ++ PAGE_KERNEL); ++ if (!xen_netbk) { ++ pr_err("%s: out of memory\n", __func__); ++ return -ENOMEM; ++ } ++ /* We can increase reservation by this much in net_rx_action(). */ - balloon_update_driver_allowance(NET_RX_RING_SIZE); +- balloon_update_driver_allowance(NET_RX_RING_SIZE); ++ balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE); - skb_queue_head_init(&rx_queue); - skb_queue_head_init(&tx_queue); -- ++ for (group = 0; group < netbk_nr_groups; group++) { ++ struct xen_netbk *netbk = &xen_netbk[group]; + - init_timer(&net_timer); - net_timer.data = 0; - net_timer.function = net_alarm; @@ -1208,13 +1238,11 @@ Acked-by: jbeulich@novell.com - - mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS); - if (mmap_pages == NULL) { -- printk("%s: out of memory\n", __FUNCTION__); -+ xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), -+ GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, PAGE_KERNEL); -+ if (!xen_netbk) { -+ printk(KERN_ALERT "%s: out of memory\n", __func__); - return -ENOMEM; - } +- pr_err("%s: out of memory\n", __FUNCTION__); +- return -ENOMEM; +- } ++ tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group); ++ tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group); - for (i = 0; i < MAX_PENDING_REQS; i++) { - page = mmap_pages[i]; @@ -1222,29 +1250,9 @@ Acked-by: jbeulich@novell.com - netif_set_page_index(page, i); - INIT_LIST_HEAD(&pending_inuse[i].list); - } -+ for (group = 0; group < netbk_nr_groups; group++) { -+ struct xen_netbk *netbk = &xen_netbk[group]; - -- pending_cons = 0; -- pending_prod = MAX_PENDING_REQS; -- for (i = 0; i < MAX_PENDING_REQS; i++) -- pending_ring[i] = i; -+ tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group); -+ tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group); - -- spin_lock_init(&net_schedule_list_lock); -- INIT_LIST_HEAD(&net_schedule_list); + skb_queue_head_init(&netbk->rx_queue); + skb_queue_head_init(&netbk->tx_queue); + -+ netbk->mmap_pages = -+ alloc_empty_pages_and_pagevec(MAX_PENDING_REQS); -+ if (netbk->mmap_pages == NULL) { -+ printk(KERN_ALERT "%s: out of memory\n", __func__); -+ rc = -ENOMEM; -+ goto failed_init; -+ } -+ + init_timer(&netbk->net_timer); + netbk->net_timer.data = group; + netbk->net_timer.function = net_alarm; @@ -1255,13 +1263,27 @@ Acked-by: jbeulich@novell.com + netbk_tx_pending_timeout; + + netbk->pending_prod = MAX_PENDING_REQS; -+ + +- pending_cons = 0; +- pending_prod = MAX_PENDING_REQS; +- for (i = 0; i < MAX_PENDING_REQS; i++) +- pending_ring[i] = i; + INIT_LIST_HEAD(&netbk->pending_inuse_head); -+ INIT_LIST_HEAD(&netbk->net_schedule_list); -+ -+ spin_lock_init(&netbk->net_schedule_list_lock); ++ INIT_LIST_HEAD(&netbk->schedule_list); + +- spin_lock_init(&net_schedule_list_lock); +- INIT_LIST_HEAD(&net_schedule_list); ++ spin_lock_init(&netbk->schedule_list_lock); + spin_lock_init(&netbk->release_lock); + ++ netbk->mmap_pages = ++ alloc_empty_pages_and_pagevec(MAX_PENDING_REQS); ++ if (netbk->mmap_pages == NULL) { ++ pr_err("%s: out of memory\n", __func__); ++ rc = -ENOMEM; ++ goto failed_init; ++ } ++ + for (i = 0; i < MAX_PENDING_REQS; i++) { + page = netbk->mmap_pages[i]; + SetPageForeign(page, netif_page_release); @@ -1273,7 +1295,7 @@ Acked-by: jbeulich@novell.com netbk_copy_skb_mode = NETBK_DONT_COPY_SKB; if (MODPARM_copy_skb) { -@@ -1703,6 +1739,19 @@ static int __init netback_init(void) +@@ -1714,6 +1751,19 @@ static int __init netback_init(void) #endif return 0; @@ -1284,10 +1306,10 @@ Acked-by: jbeulich@novell.com + + free_empty_pages_and_pagevec(netbk->mmap_pages, + MAX_PENDING_REQS); -+ del_timer(&netbk->tx_pending_timer); -+ del_timer(&netbk->net_timer); + } + vfree(xen_netbk); ++ balloon_update_driver_allowance(-(long)netbk_nr_groups ++ * NET_RX_RING_SIZE); + + return rc; } diff --git a/patches.xen/xen-netback-kernel-threads b/patches.xen/xen-netback-kernel-threads index 319814e..0284cbc 100644 --- a/patches.xen/xen-netback-kernel-threads +++ b/patches.xen/xen-netback-kernel-threads @@ -22,13 +22,20 @@ Signed-off-by: Ian Campbell Cc: Jeremy Fitzhardinge # Cc: "Xu, Dongxiao" +Subject: Add a missing test to tx_work_todo. + +Add a test so that, when netback is using worker threads, net_tx_action() +gets called in a timely manner when the pending_inuse list is populated. + +Signed-off-by: Paul Durrant + jb: changed write_seq{,un}lock_irq() to write_seq{,un}lock_bh(), and made the use of kernel threads optional (but default) Acked-by: jbeulich@novell.com ---- head-2010-04-29.orig/drivers/xen/core/gnttab.c 2010-04-15 11:42:34.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/gnttab.c 2010-04-15 11:44:26.000000000 +0200 -@@ -554,14 +554,14 @@ int gnttab_copy_grant_page(grant_ref_t r +--- head-2011-02-17.orig/drivers/xen/core/gnttab.c 2011-01-14 15:13:58.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/gnttab.c 2010-09-23 17:06:35.000000000 +0200 +@@ -552,14 +552,14 @@ int gnttab_copy_grant_page(grant_ref_t r mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); @@ -45,18 +52,18 @@ Acked-by: jbeulich@novell.com put_page(new_page); err = -EBUSY; goto out; -@@ -578,7 +578,7 @@ int gnttab_copy_grant_page(grant_ref_t r +@@ -576,7 +576,7 @@ int gnttab_copy_grant_page(grant_ref_t r BUG_ON(err); - BUG_ON(unmap.status); + BUG_ON(unmap.status != GNTST_okay); - write_sequnlock(&gnttab_dma_lock); + write_sequnlock_bh(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); ---- head-2010-04-29.orig/drivers/xen/netback/common.h 2010-04-30 11:32:08.000000000 +0200 -+++ head-2010-04-29/drivers/xen/netback/common.h 2010-04-30 11:32:26.000000000 +0200 -@@ -239,8 +239,16 @@ struct netbk_tx_pending_inuse { +--- head-2011-02-17.orig/drivers/xen/netback/common.h 2011-02-09 16:21:50.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/common.h 2011-02-17 10:34:35.000000000 +0100 +@@ -241,8 +241,16 @@ struct netbk_tx_pending_inuse { #define MAX_MFN_ALLOC 64 struct xen_netbk { @@ -75,17 +82,17 @@ Acked-by: jbeulich@novell.com struct sk_buff_head rx_queue; struct sk_buff_head tx_queue; ---- head-2010-04-29.orig/drivers/xen/netback/netback.c 2010-04-30 11:49:27.000000000 +0200 -+++ head-2010-04-29/drivers/xen/netback/netback.c 2010-04-30 11:49:32.000000000 +0200 -@@ -35,6 +35,7 @@ - */ +--- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-03-01 11:53:33.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-02 13:33:15.000000000 +0100 +@@ -36,6 +36,7 @@ #include "common.h" + #include +#include #include + #include #include - #include -@@ -43,6 +44,8 @@ +@@ -46,6 +47,8 @@ struct xen_netbk *__read_mostly xen_netbk; unsigned int __read_mostly netbk_nr_groups; @@ -94,7 +101,7 @@ Acked-by: jbeulich@novell.com #define GET_GROUP_INDEX(netif) ((netif)->group) -@@ -127,7 +130,11 @@ static int MODPARM_permute_returns = 0; +@@ -137,7 +140,11 @@ static int MODPARM_permute_returns = 0; module_param_named(permute_returns, MODPARM_permute_returns, bool, S_IRUSR|S_IWUSR); MODULE_PARM_DESC(permute_returns, "Randomly permute the order in which TX responses are sent to the frontend"); module_param_named(groups, netbk_nr_groups, uint, 0); @@ -107,94 +114,91 @@ Acked-by: jbeulich@novell.com int netbk_copy_skb_mode; -@@ -164,8 +171,12 @@ static inline void maybe_schedule_tx_act +@@ -168,6 +175,19 @@ static int check_mfn(struct xen_netbk *n + return netbk->alloc_index >= nr ? 0 : -ENOMEM; + } ++static void netbk_schedule(struct xen_netbk *netbk) ++{ ++ if (use_kthreads) ++ wake_up(&netbk->netbk_action_wq); ++ else ++ tasklet_schedule(&netbk->net_tx_tasklet); ++} ++ ++static void netbk_schedule_group(unsigned long group) ++{ ++ netbk_schedule(&xen_netbk[group]); ++} ++ + static inline void maybe_schedule_tx_action(unsigned int group) + { + struct xen_netbk *netbk = &xen_netbk[group]; +@@ -175,7 +195,7 @@ static inline void maybe_schedule_tx_act smp_mb(); if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && -- !list_empty(&netbk->net_schedule_list)) + !list_empty(&netbk->schedule_list)) - tasklet_schedule(&netbk->net_tx_tasklet); -+ !list_empty(&netbk->net_schedule_list)) { -+ if (use_kthreads) -+ wake_up(&netbk->netbk_action_wq); -+ else -+ tasklet_schedule(&netbk->net_tx_tasklet); -+ } ++ netbk_schedule(netbk); } static struct sk_buff *netbk_copy_skb(struct sk_buff *skb) -@@ -326,7 +337,10 @@ int netif_be_start_xmit(struct sk_buff * +@@ -335,7 +355,7 @@ int netif_be_start_xmit(struct sk_buff * netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; skb_queue_tail(&netbk->rx_queue, skb); - tasklet_schedule(&netbk->net_rx_tasklet); -+ if (use_kthreads) -+ wake_up(&netbk->netbk_action_wq); -+ else -+ tasklet_schedule(&netbk->net_rx_tasklet); ++ netbk_schedule(netbk); return NETDEV_TX_OK; -@@ -779,8 +793,12 @@ static void net_rx_action(unsigned long - +@@ -772,23 +792,13 @@ static void net_rx_action(unsigned long /* More work to do? */ if (!skb_queue_empty(&netbk->rx_queue) && -- !timer_pending(&netbk->net_timer)) + !timer_pending(&netbk->net_timer)) - tasklet_schedule(&netbk->net_rx_tasklet); -+ !timer_pending(&netbk->net_timer)) { -+ if (use_kthreads) -+ wake_up(&netbk->netbk_action_wq); -+ else -+ tasklet_schedule(&netbk->net_rx_tasklet); -+ } ++ netbk_schedule(netbk); #if 0 else xen_network_done_notify(); -@@ -789,12 +807,18 @@ static void net_rx_action(unsigned long - - static void net_alarm(unsigned long group) - { -- tasklet_schedule(&xen_netbk[group].net_rx_tasklet); -+ if (use_kthreads) -+ wake_up(&xen_netbk[group].netbk_action_wq); -+ else -+ tasklet_schedule(&xen_netbk[group].net_rx_tasklet); + #endif } - static void netbk_tx_pending_timeout(unsigned long group) - { +-static void net_alarm(unsigned long group) +-{ +- tasklet_schedule(&xen_netbk[group].net_rx_tasklet); +-} +- +-static void netbk_tx_pending_timeout(unsigned long group) +-{ - tasklet_schedule(&xen_netbk[group].net_tx_tasklet); -+ if (use_kthreads) -+ wake_up(&xen_netbk[group].netbk_action_wq); -+ else -+ tasklet_schedule(&xen_netbk[group].net_tx_tasklet); - } - - struct net_device_stats *netif_be_get_stats(struct net_device *dev) -@@ -1506,7 +1530,10 @@ static void net_tx_action(unsigned long - continue; - } +-} +- + static int __on_net_schedule_list(netif_t *netif) + { + return netif->list.next != NULL; +@@ -1519,7 +1529,10 @@ static void net_tx_action(unsigned long + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; - netif_rx(skb); + if (use_kthreads) + netif_rx_ni(skb); + else + netif_rx(skb); - netif->dev->last_rx = jiffies; } -@@ -1532,7 +1559,10 @@ static void netif_idx_release(struct xen + out: +@@ -1544,7 +1557,7 @@ static void netif_idx_release(struct xen netbk->dealloc_prod++; spin_unlock_irqrestore(&netbk->release_lock, flags); - tasklet_schedule(&netbk->net_tx_tasklet); -+ if (use_kthreads) -+ wake_up(&netbk->netbk_action_wq); -+ else -+ tasklet_schedule(&netbk->net_tx_tasklet); ++ netbk_schedule(netbk); } static void netif_page_release(struct page *page, unsigned int order) -@@ -1670,6 +1700,46 @@ static struct irqaction netif_be_dbg_act +@@ -1683,6 +1696,50 @@ static struct irqaction netif_be_dbg_act }; #endif @@ -208,8 +212,12 @@ Acked-by: jbeulich@novell.com + if (netbk->dealloc_cons != netbk->dealloc_prod) + return 1; + ++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB && ++ !list_empty(&netbk->pending_inuse_head)) ++ return 1; ++ + if (nr_pending_reqs(netbk) + MAX_SKB_FRAGS < MAX_PENDING_REQS && -+ !list_empty(&netbk->net_schedule_list)) ++ !list_empty(&netbk->schedule_list)) + return 1; + + return 0; @@ -241,46 +249,72 @@ Acked-by: jbeulich@novell.com static int __init netback_init(void) { unsigned int i, group; -@@ -1697,8 +1767,26 @@ static int __init netback_init(void) +@@ -1717,20 +1774,16 @@ static int __init netback_init(void) for (group = 0; group < netbk_nr_groups; group++) { struct xen_netbk *netbk = &xen_netbk[group]; - tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group); - tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group); +- + skb_queue_head_init(&netbk->rx_queue); + skb_queue_head_init(&netbk->tx_queue); + + init_timer(&netbk->net_timer); + netbk->net_timer.data = group; +- netbk->net_timer.function = net_alarm; ++ netbk->net_timer.function = netbk_schedule_group; + + init_timer(&netbk->tx_pending_timer); + netbk->tx_pending_timer.data = group; +- netbk->tx_pending_timer.function = +- netbk_tx_pending_timeout; ++ netbk->tx_pending_timer.function = netbk_schedule_group; + + netbk->pending_prod = MAX_PENDING_REQS; + +@@ -1755,6 +1808,25 @@ static int __init netback_init(void) + netbk->pending_ring[i] = i; + INIT_LIST_HEAD(&netbk->pending_inuse[i].list); + } ++ + if (use_kthreads) { + init_waitqueue_head(&netbk->netbk_action_wq); + netbk->task = kthread_create(netbk_action_thread, + (void *)(long)group, + "netback/%u", group); + -+ if (!IS_ERR(netbk->task)) { -+ if (bind_threads) -+ kthread_bind(netbk->task, group); -+ wake_up_process(netbk->task); -+ } else { -+ printk(KERN_ALERT -+ "kthread_create() fails at netback\n"); ++ if (IS_ERR(netbk->task)) { ++ pr_alert("netback: kthread_create() failed\n"); + rc = PTR_ERR(netbk->task); + goto failed_init; + } ++ if (bind_threads) ++ kthread_bind(netbk->task, group); ++ wake_up_process(netbk->task); + } else { + tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group); + tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group); + } + } - skb_queue_head_init(&netbk->rx_queue); - skb_queue_head_init(&netbk->tx_queue); -@@ -1762,8 +1850,11 @@ failed_init: - while (group-- > 0) { + netbk_copy_skb_mode = NETBK_DONT_COPY_SKB; +@@ -1779,12 +1851,15 @@ static int __init netback_init(void) + return 0; + + failed_init: +- while (group-- > 0) { ++ do { struct xen_netbk *netbk = &xen_netbk[group]; - free_empty_pages_and_pagevec(netbk->mmap_pages, - MAX_PENDING_REQS); +- } + if (use_kthreads && netbk->task && !IS_ERR(netbk->task)) + kthread_stop(netbk->task); + if (netbk->mmap_pages) + free_empty_pages_and_pagevec(netbk->mmap_pages, + MAX_PENDING_REQS); - del_timer(&netbk->tx_pending_timer); - del_timer(&netbk->net_timer); - } ++ } while (group--); + vfree(xen_netbk); + balloon_update_driver_allowance(-(long)netbk_nr_groups + * NET_RX_RING_SIZE); diff --git a/patches.xen/xen-netback-multiple-tasklets b/patches.xen/xen-netback-multiple-tasklets index d13758f..16acbd1 100644 --- a/patches.xen/xen-netback-multiple-tasklets +++ b/patches.xen/xen-netback-multiple-tasklets @@ -17,9 +17,9 @@ Signed-off-by: Dongxiao Xu jb: some cleanups Acked-by: jbeulich@novell.com ---- head-2010-04-29.orig/drivers/xen/netback/common.h 2010-04-30 11:11:33.000000000 +0200 -+++ head-2010-04-29/drivers/xen/netback/common.h 2010-04-30 11:32:08.000000000 +0200 -@@ -58,6 +58,7 @@ +--- head-2011-02-17.orig/drivers/xen/netback/common.h 2011-02-17 10:33:48.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/common.h 2011-02-09 16:21:50.000000000 +0100 +@@ -55,6 +55,7 @@ typedef struct netif_st { /* Unique identifier for this interface. */ domid_t domid; @@ -27,7 +27,7 @@ Acked-by: jbeulich@novell.com unsigned int handle; u8 fe_dev_addr[6]; -@@ -260,6 +261,7 @@ struct xen_netbk { +@@ -262,6 +263,7 @@ struct xen_netbk { struct page **mmap_pages; @@ -35,7 +35,7 @@ Acked-by: jbeulich@novell.com unsigned int alloc_index; struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; -@@ -287,4 +289,8 @@ struct xen_netbk { +@@ -289,4 +291,8 @@ struct xen_netbk { unsigned long mfn_list[MAX_MFN_ALLOC]; }; @@ -44,8 +44,8 @@ Acked-by: jbeulich@novell.com +extern unsigned int netbk_nr_groups; + #endif /* __NETIF__BACKEND__COMMON_H__ */ ---- head-2010-04-29.orig/drivers/xen/netback/interface.c 2010-01-26 09:03:24.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/interface.c 2010-04-30 10:42:29.000000000 +0200 +--- head-2011-02-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:33:17.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/interface.c 2011-02-17 10:34:28.000000000 +0100 @@ -54,14 +54,36 @@ module_param_named(queue_length, netbk_q static void __netif_up(netif_t *netif) @@ -83,22 +83,22 @@ Acked-by: jbeulich@novell.com } static int net_open(struct net_device *dev) -@@ -207,6 +229,7 @@ netif_t *netif_alloc(struct device *pare +@@ -250,6 +272,7 @@ netif_t *netif_alloc(struct device *pare netif = netdev_priv(dev); memset(netif, 0, sizeof(*netif)); netif->domid = domid; + netif->group = UINT_MAX; netif->handle = handle; - atomic_set(&netif->refcnt, 1); - init_waitqueue_head(&netif->waiting_to_free); ---- head-2010-04-29.orig/drivers/xen/netback/netback.c 2010-04-30 11:49:12.000000000 +0200 -+++ head-2010-04-29/drivers/xen/netback/netback.c 2010-04-30 11:49:27.000000000 +0200 -@@ -41,10 +41,10 @@ + netif->can_sg = 1; + netif->csum = 1; +--- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-03-01 11:53:28.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-01 11:53:33.000000000 +0100 +@@ -44,10 +44,10 @@ /*define NETBE_DEBUG_INTERRUPT*/ -static struct xen_netbk *__read_mostly xen_netbk; --static const unsigned int __read_mostly netbk_nr_groups = 1; +-static const unsigned int netbk_nr_groups = 1; +struct xen_netbk *__read_mostly xen_netbk; +unsigned int __read_mostly netbk_nr_groups; @@ -107,7 +107,7 @@ Acked-by: jbeulich@novell.com static void netif_idx_release(struct xen_netbk *, u16 pending_idx); static void make_tx_response(netif_t *netif, -@@ -126,6 +126,8 @@ MODULE_PARM_DESC(copy_skb, "Copy data re +@@ -136,6 +136,8 @@ MODULE_PARM_DESC(copy_skb, "Copy data re static int MODPARM_permute_returns = 0; module_param_named(permute_returns, MODPARM_permute_returns, bool, S_IRUSR|S_IWUSR); MODULE_PARM_DESC(permute_returns, "Randomly permute the order in which TX responses are sent to the frontend"); @@ -116,7 +116,22 @@ Acked-by: jbeulich@novell.com int netbk_copy_skb_mode; -@@ -1548,9 +1550,20 @@ static void netif_page_release(struct pa +@@ -406,11 +408,13 @@ static u16 netbk_gop_frag(netif_t *netif + (idx = netif_page_index(page)) < MAX_PENDING_REQS && + (group = netif_page_group(page)) < netbk_nr_groups) { + struct pending_tx_info *src_pend; ++ unsigned int grp; + + netbk = &xen_netbk[group]; + BUG_ON(netbk->mmap_pages[idx] != page); + src_pend = &netbk->pending_tx_info[idx]; +- BUG_ON(group != GET_GROUP_INDEX(src_pend->netif)); ++ grp = GET_GROUP_INDEX(src_pend->netif); ++ BUG_ON(group != grp && grp != UINT_MAX); + copy_gop->source.domid = src_pend->netif->domid; + copy_gop->source.u.ref = src_pend->req.gref; + copy_gop->flags |= GNTCOPY_source_gref; +@@ -1558,9 +1562,20 @@ static void netif_page_release(struct pa irqreturn_t netif_be_int(int irq, void *dev_id) { netif_t *netif = dev_id; @@ -138,18 +153,31 @@ Acked-by: jbeulich@novell.com if (netif_schedulable(netif) && !netbk_queue_full(netif)) netif_wake_queue(netif->dev); -@@ -1666,8 +1679,13 @@ static int __init netback_init(void) +@@ -1677,13 +1692,24 @@ static int __init netback_init(void) if (!is_running_on_xen()) return -ENODEV; +- xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), +- GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, +- PAGE_KERNEL); ++ group = netbk_nr_groups; + if (!netbk_nr_groups) + netbk_nr_groups = (num_online_cpus() + 1) / 2; + if (netbk_nr_groups > MAX_GROUPS) + netbk_nr_groups = MAX_GROUPS; + - /* We can increase reservation by this much in net_rx_action(). */ -- balloon_update_driver_allowance(NET_RX_RING_SIZE); -+ balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE); ++ do { ++ xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), ++ GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, ++ PAGE_KERNEL); ++ } while (!xen_netbk && (netbk_nr_groups >>= 1)); + if (!xen_netbk) { + pr_err("%s: out of memory\n", __func__); + return -ENOMEM; + } ++ if (group && netbk_nr_groups != group) ++ pr_warning("netback: only using %u (instead of %u) groups\n", ++ netbk_nr_groups, group); - xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), - GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, PAGE_KERNEL); + /* We can increase reservation by this much in net_rx_action(). */ + balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE); diff --git a/patches.xen/xen-netback-notify-multi b/patches.xen/xen-netback-notify-multi index 8dc539a..a716954 100644 --- a/patches.xen/xen-netback-notify-multi +++ b/patches.xen/xen-netback-notify-multi @@ -2,22 +2,27 @@ From: jbeulich@novell.com Subject: netback: use multicall for send multiple notifications Patch-mainline: obsolete -This also does a small fairness improvement since now notifications +This also yields a small fairness improvement since now notifications get sent in the order requests came in rather than in the inverse one. ---- head-2010-04-15.orig/drivers/xen/core/evtchn.c 2010-04-23 15:20:36.000000000 +0200 -+++ head-2010-04-15/drivers/xen/core/evtchn.c 2010-04-23 15:20:52.000000000 +0200 -@@ -1336,6 +1336,21 @@ void notify_remote_via_irq(int irq) +--- head-2011-02-08.orig/drivers/xen/core/evtchn.c 2011-02-16 08:29:29.000000000 +0100 ++++ head-2011-02-08/drivers/xen/core/evtchn.c 2011-02-16 08:29:50.000000000 +0100 +@@ -1473,6 +1473,27 @@ void notify_remote_via_irq(int irq) } EXPORT_SYMBOL_GPL(notify_remote_via_irq); ++#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) +int multi_notify_remote_via_irq(multicall_entry_t *mcl, int irq) +{ -+ int evtchn = evtchn_from_irq(irq); ++ const struct irq_cfg *cfg = irq_cfg(irq); ++ int evtchn; + -+ BUG_ON(type_from_irq(irq) == IRQT_VIRQ); -+ BUG_IF_IPI(irq); ++ if (WARN_ON_ONCE(!cfg)) ++ return -EINVAL; ++ BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ); ++ BUG_IF_IPI(cfg); + ++ evtchn = evtchn_from_irq_cfg(cfg); + if (!VALID_EVTCHN(evtchn)) + return -EINVAL; + @@ -25,13 +30,14 @@ get sent in the order requests came in rather than in the inverse one. + return 0; +} +EXPORT_SYMBOL_GPL(multi_notify_remote_via_irq); ++#endif + int irq_to_evtchn_port(int irq) { - BUG_IF_VIRQ_PER_CPU(irq); ---- head-2010-04-15.orig/drivers/xen/netback/netback.c 2010-01-04 13:31:44.000000000 +0100 -+++ head-2010-04-15/drivers/xen/netback/netback.c 2010-01-04 13:31:57.000000000 +0100 -@@ -778,10 +778,20 @@ static void net_rx_action(unsigned long + const struct irq_cfg *cfg = irq_cfg(irq); +--- head-2011-02-08.orig/drivers/xen/netback/netback.c 2011-01-03 13:30:08.000000000 +0100 ++++ head-2011-02-08/drivers/xen/netback/netback.c 2011-01-03 13:30:15.000000000 +0100 +@@ -767,10 +767,20 @@ static void net_rx_action(unsigned long npo.meta_cons += nr_frags + 1; } @@ -54,9 +60,9 @@ get sent in the order requests came in rather than in the inverse one. } /* More work to do? */ ---- head-2010-04-15.orig/include/xen/evtchn.h 2010-03-31 14:11:09.000000000 +0200 -+++ head-2010-04-15/include/xen/evtchn.h 2010-03-31 14:42:45.000000000 +0200 -@@ -192,6 +192,18 @@ static inline void notify_remote_via_evt +--- head-2011-02-08.orig/include/xen/evtchn.h 2010-11-23 16:18:23.000000000 +0100 ++++ head-2011-02-08/include/xen/evtchn.h 2010-11-23 16:20:08.000000000 +0100 +@@ -193,6 +193,18 @@ static inline void notify_remote_via_evt VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } @@ -72,10 +78,10 @@ get sent in the order requests came in rather than in the inverse one. + mcl->args[1] = (unsigned long)send; +} + - /* Clear an irq's pending state, in preparation for polling on it. */ - void xen_clear_irq_pending(int irq); - -@@ -210,6 +222,7 @@ void xen_poll_irq(int irq); + static inline int close_evtchn(int port) + { + struct evtchn_close close = { .port = port }; +@@ -207,6 +219,7 @@ int xen_test_irq_pending(int irq); * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); diff --git a/patches.xen/xen-netback-nr-irqs b/patches.xen/xen-netback-nr-irqs index 5714739..c7b5464 100644 --- a/patches.xen/xen-netback-nr-irqs +++ b/patches.xen/xen-netback-nr-irqs @@ -8,9 +8,9 @@ be pretty unbounded. Also, store the dynirq rather than the raw irq to push up the limit where the type of notify_list needs to become 'int' rather than 'u16'. ---- head-2010-01-19.orig/drivers/xen/netback/interface.c 2010-01-26 08:58:19.000000000 +0100 -+++ head-2010-01-19/drivers/xen/netback/interface.c 2010-01-26 09:03:24.000000000 +0100 -@@ -343,6 +343,7 @@ int netif_map(netif_t *netif, unsigned l +--- head-2011-02-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:18:52.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/interface.c 2011-02-17 10:33:17.000000000 +0100 +@@ -381,6 +381,7 @@ int netif_map(netif_t *netif, unsigned l netif->dev->name, netif); if (err < 0) goto err_hypervisor; @@ -18,9 +18,9 @@ to push up the limit where the type of notify_list needs to become netif->irq = err; disable_irq(netif->irq); ---- head-2010-01-19.orig/drivers/xen/netback/netback.c 2010-01-04 13:31:38.000000000 +0100 -+++ head-2010-01-19/drivers/xen/netback/netback.c 2010-01-04 13:31:44.000000000 +0100 -@@ -590,8 +590,12 @@ static void net_rx_action(unsigned long +--- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-01-03 13:29:58.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/netback.c 2011-01-03 13:30:08.000000000 +0100 +@@ -593,8 +593,12 @@ static void net_rx_action(unsigned long static mmu_update_t rx_mmu[NET_RX_RING_SIZE]; static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE]; static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE]; @@ -34,7 +34,7 @@ to push up the limit where the type of notify_list needs to become static struct netbk_rx_meta meta[NET_RX_RING_SIZE]; struct netrx_pending_operations npo = { -@@ -749,11 +753,9 @@ static void net_rx_action(unsigned long +@@ -748,11 +752,9 @@ static void net_rx_action(unsigned long nr_frags); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret); @@ -48,7 +48,7 @@ to push up the limit where the type of notify_list needs to become if (netif_queue_stopped(netif->dev) && netif_schedulable(netif) && -@@ -778,8 +780,8 @@ static void net_rx_action(unsigned long +@@ -767,8 +769,8 @@ static void net_rx_action(unsigned long while (notify_nr != 0) { irq = notify_list[--notify_nr]; diff --git a/patches.xen/xen-netfront-ethtool b/patches.xen/xen-netfront-ethtool deleted file mode 100644 index 368971c..0000000 --- a/patches.xen/xen-netfront-ethtool +++ /dev/null @@ -1,31 +0,0 @@ -From: ksrinivasan@novell.com -Subject: netfront: ethtool -i does not return info about xennet driver -Patch-mainline: n/a -References: bnc#591179 - -Signed-off-by: K. Y. Srinivasan - ---- linux.orig/drivers/xen/netfront/netfront.c 2010-02-08 06:59:48.000000000 -0700 -+++ linux/drivers/xen/netfront/netfront.c 2010-03-26 17:18:30.000000000 -0600 -@@ -1766,6 +1766,13 @@ static void xennet_set_features(struct n - xennet_set_tso(dev, 1); - } - -+static void netfront_get_drvinfo(struct net_device *dev, -+ struct ethtool_drvinfo *info) -+{ -+ strcpy(info->driver, "netfront"); -+ strcpy(info->bus_info, dev_name(dev->dev.parent)); -+} -+ - static int network_connect(struct net_device *dev) - { - struct netfront_info *np = netdev_priv(dev); -@@ -1874,6 +1881,7 @@ static void netif_uninit(struct net_devi - - static const struct ethtool_ops network_ethtool_ops = - { -+ .get_drvinfo = netfront_get_drvinfo, - .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, diff --git a/patches.xen/xen-no-reboot-vector b/patches.xen/xen-no-reboot-vector deleted file mode 100644 index 1c73cb9..0000000 --- a/patches.xen/xen-no-reboot-vector +++ /dev/null @@ -1,148 +0,0 @@ -From: jbeulich@novell.com -Subject: eliminate REBOOT_VECTOR -Patch-mainline: n/a - -We can do without it, and can that way save one event channel per CPU -(i.e. a significant number when having many CPUs, given that there are -only 1024/4096 of them on 32-/64-bit). - ---- head-2010-04-15.orig/arch/x86/include/asm/hw_irq.h 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/asm/hw_irq.h 2010-03-30 17:14:57.000000000 +0200 -@@ -137,7 +137,6 @@ extern asmlinkage void smp_invalidate_in - extern irqreturn_t smp_reschedule_interrupt(int, void *); - extern irqreturn_t smp_call_function_interrupt(int, void *); - extern irqreturn_t smp_call_function_single_interrupt(int, void *); --extern irqreturn_t smp_reboot_interrupt(int, void *); - #endif - #endif - ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-29 18:11:31.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-30 17:15:14.000000000 +0200 -@@ -12,8 +12,7 @@ - #define CALL_FUNCTION_VECTOR 1 - #define CALL_FUNC_SINGLE_VECTOR 2 - #define SPIN_UNLOCK_VECTOR 3 --#define REBOOT_VECTOR 4 --#define NR_IPIS 5 -+#define NR_IPIS 4 - - /* - * The maximum number of vectors supported by i386 processors ---- head-2010-04-15.orig/arch/x86/kernel/smp-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/smp-xen.c 2010-04-15 11:43:22.000000000 +0200 -@@ -29,6 +29,9 @@ - #include - #include - #include -+ -+static unsigned int __read_mostly reboot = NR_CPUS; -+ - /* - * Some notes on x86 processor bugs affecting SMP operation: - * -@@ -132,19 +135,6 @@ void xen_send_call_func_ipi(const struct - xen_send_IPI_mask_allbutself(mask, CALL_FUNCTION_VECTOR); - } - --/* -- * this function calls the 'stop' function on all other CPUs in the system. -- */ -- --irqreturn_t smp_reboot_interrupt(int irq, void *dev_id) --{ -- irq_enter(); -- stop_this_cpu(NULL); -- irq_exit(); -- -- return IRQ_HANDLED; --} -- - void xen_smp_send_stop(void) - { - unsigned long flags; -@@ -159,8 +149,10 @@ void xen_smp_send_stop(void) - * (this implies we cannot stop CPUs spinning with irq off - * currently) - */ -+ reboot = raw_smp_processor_id(); -+ wmb(); - if (num_online_cpus() > 1) { -- xen_send_IPI_allbutself(REBOOT_VECTOR); -+ xen_send_IPI_allbutself(RESCHEDULE_VECTOR); - - /* Don't wait longer than a second */ - wait = USEC_PER_SEC; -@@ -180,7 +172,13 @@ void xen_smp_send_stop(void) - */ - irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) - { -- inc_irq_stat(irq_resched_count); -+ if (likely(reboot >= NR_CPUS) || reboot == raw_smp_processor_id()) -+ inc_irq_stat(irq_resched_count); -+ else { -+ irq_enter(); -+ stop_this_cpu(NULL); -+ irq_exit(); -+ } - return IRQ_HANDLED; - } - ---- head-2010-04-15.orig/drivers/xen/core/smpboot.c 2010-03-25 14:39:15.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/smpboot.c 2010-04-15 11:43:29.000000000 +0200 -@@ -44,7 +44,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); - static int __read_mostly resched_irq = -1; - static int __read_mostly callfunc_irq = -1; - static int __read_mostly call1func_irq = -1; --static int __read_mostly reboot_irq = -1; - - #ifdef CONFIG_X86_LOCAL_APIC - #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid)) -@@ -118,10 +117,6 @@ static int __cpuinit xen_smp_intr_init(u - .handler = smp_call_function_single_interrupt, - .flags = IRQF_DISABLED, - .name = "call1func" -- }, reboot_action = { -- .handler = smp_reboot_interrupt, -- .flags = IRQF_DISABLED, -- .name = "reboot" - }; - int rc; - -@@ -155,19 +150,9 @@ static int __cpuinit xen_smp_intr_init(u - else - BUG_ON(call1func_irq != rc); - -- rc = bind_ipi_to_irqaction(REBOOT_VECTOR, -- cpu, -- &reboot_action); -- if (rc < 0) -- goto unbind_call1; -- if (reboot_irq < 0) -- reboot_irq = rc; -- else -- BUG_ON(reboot_irq != rc); -- - rc = xen_spinlock_init(cpu); - if (rc < 0) -- goto unbind_reboot; -+ goto unbind_call1; - - if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0)) - goto fail; -@@ -176,8 +161,6 @@ static int __cpuinit xen_smp_intr_init(u - - fail: - xen_spinlock_cleanup(cpu); -- unbind_reboot: -- unbind_from_per_cpu_irq(reboot_irq, cpu, NULL); - unbind_call1: - unbind_from_per_cpu_irq(call1func_irq, cpu, NULL); - unbind_call: -@@ -196,7 +179,6 @@ static void __cpuinit xen_smp_intr_exit( - unbind_from_per_cpu_irq(resched_irq, cpu, NULL); - unbind_from_per_cpu_irq(callfunc_irq, cpu, NULL); - unbind_from_per_cpu_irq(call1func_irq, cpu, NULL); -- unbind_from_per_cpu_irq(reboot_irq, cpu, NULL); - xen_spinlock_cleanup(cpu); - } - #endif diff --git a/patches.xen/xen-op-packet b/patches.xen/xen-op-packet index b53c70c..f0f616e 100644 --- a/patches.xen/xen-op-packet +++ b/patches.xen/xen-op-packet @@ -3,9 +3,9 @@ Subject: add support for new operation type BLKIF_OP_PACKET Patch-mainline: n/a References: fate#300964 ---- head-2010-04-29.orig/drivers/xen/blkback/blkback.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/blkback.c 2010-03-25 14:38:05.000000000 +0100 -@@ -195,13 +195,15 @@ static void fast_flush_area(pending_req_ +--- head-2011-03-17.orig/drivers/xen/blkback/blkback.c 2011-02-28 14:15:32.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/blkback.c 2011-02-28 14:23:53.000000000 +0100 +@@ -194,13 +194,15 @@ static void fast_flush_area(pending_req_ static void print_stats(blkif_t *blkif) { @@ -23,9 +23,9 @@ References: fate#300964 } int blkif_schedule(void *arg) -@@ -374,6 +376,13 @@ handle_request: +@@ -365,6 +367,13 @@ static int do_block_io_op(blkif_t *blkif blkif->st_wr_req++; - ret = dispatch_rw_block_io(blkif, &req, pending_req); + dispatch_rw_block_io(blkif, &req, pending_req); break; + case BLKIF_OP_PACKET: + DPRINTK("error: block operation BLKIF_OP_PACKET not implemented\n"); @@ -37,9 +37,9 @@ References: fate#300964 default: /* A good sign something is wrong: sleep for a while to * avoid excessive CPU consumption by a bad guest. */ ---- head-2010-04-29.orig/drivers/xen/blkback/common.h 2010-03-25 14:38:02.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/common.h 2010-03-25 14:38:05.000000000 +0100 -@@ -92,6 +92,7 @@ typedef struct blkif_st { +--- head-2011-03-17.orig/drivers/xen/blkback/common.h 2010-09-23 16:58:21.000000000 +0200 ++++ head-2011-03-17/drivers/xen/blkback/common.h 2010-11-23 15:06:50.000000000 +0100 +@@ -89,6 +89,7 @@ typedef struct blkif_st { int st_wr_req; int st_oo_req; int st_br_req; @@ -47,27 +47,28 @@ References: fate#300964 int st_rd_sect; int st_wr_sect; ---- head-2010-04-29.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkfront/blkfront.c 2010-03-25 14:38:05.000000000 +0100 -@@ -671,6 +671,8 @@ static int blkif_queue_request(struct re - BLKIF_OP_WRITE : BLKIF_OP_READ; - if (blk_barrier_rq(req)) +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-02-03 12:37:02.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2010-12-06 15:01:01.000000000 +0100 +@@ -708,6 +708,8 @@ static int blkif_queue_request(struct re + if (req->cmd_flags & REQ_HARDBARRIER) + #endif ring_req->operation = BLKIF_OP_WRITE_BARRIER; -+ if (blk_pc_request(req)) ++ if (req->cmd_type == REQ_TYPE_BLOCK_PC) + ring_req->operation = BLKIF_OP_PACKET; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); -@@ -728,7 +730,7 @@ void do_blkif_request(struct request_que +@@ -765,7 +767,8 @@ void do_blkif_request(struct request_que blk_start_request(req); -- if (!blk_fs_request(req)) { -+ if (!blk_fs_request(req) && !blk_pc_request(req)) { +- if (req->cmd_type != REQ_TYPE_FS) { ++ if (req->cmd_type != REQ_TYPE_FS ++ && req->cmd_type != REQ_TYPE_BLOCK_PC) { __blk_end_request_all(req, -EIO); continue; } -@@ -799,6 +801,7 @@ static irqreturn_t blkif_int(int irq, vo +@@ -852,6 +855,7 @@ static irqreturn_t blkif_int(int irq, vo /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: @@ -75,9 +76,9 @@ References: fate#300964 if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ---- head-2010-04-29.orig/drivers/xen/blktap/blktap.c 2010-04-29 10:16:10.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap/blktap.c 2010-04-29 10:16:17.000000000 +0200 -@@ -1148,13 +1148,14 @@ static void fast_flush_area(pending_req_ +--- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-02-28 14:21:49.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-28 14:25:05.000000000 +0100 +@@ -1129,13 +1129,14 @@ static void fast_flush_area(pending_req_ static void print_stats(blkif_t *blkif) { @@ -94,7 +95,7 @@ References: fate#300964 } int tap_blkif_schedule(void *arg) -@@ -1396,6 +1397,11 @@ static int do_block_io_op(blkif_t *blkif +@@ -1382,6 +1383,11 @@ static int do_block_io_op(blkif_t *blkif dispatch_rw_block_io(blkif, &req, pending_req); break; @@ -106,18 +107,9 @@ References: fate#300964 default: /* A good sign something is wrong: sleep for a while to * avoid excessive CPU consumption by a bad guest. */ -@@ -1435,6 +1441,8 @@ static void dispatch_rw_block_io(blkif_t - struct vm_area_struct *vma = NULL; - - switch (req->operation) { -+ case BLKIF_OP_PACKET: -+ /* Fall through */ - case BLKIF_OP_READ: - operation = READ; - break; ---- head-2010-04-29.orig/drivers/xen/blktap/common.h 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blktap/common.h 2010-03-25 14:38:05.000000000 +0100 -@@ -75,6 +75,7 @@ typedef struct blkif_st { +--- head-2011-03-17.orig/drivers/xen/blktap/common.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/common.h 2010-11-23 15:06:50.000000000 +0100 +@@ -74,6 +74,7 @@ typedef struct blkif_st { int st_rd_req; int st_wr_req; int st_oo_req; @@ -125,9 +117,9 @@ References: fate#300964 int st_rd_sect; int st_wr_sect; ---- head-2010-04-29.orig/drivers/xen/blktap2/blktap.h 2010-04-15 11:24:08.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap2/blktap.h 2010-04-15 11:42:01.000000000 +0200 -@@ -138,6 +138,7 @@ struct blktap_statistics { +--- head-2011-03-17.orig/drivers/xen/blktap2/blktap.h 2011-02-24 15:24:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/blktap.h 2010-11-23 15:06:50.000000000 +0100 +@@ -140,6 +140,7 @@ struct blktap_statistics { int st_rd_req; int st_wr_req; int st_oo_req; @@ -135,9 +127,9 @@ References: fate#300964 int st_rd_sect; int st_wr_sect; s64 st_rd_cnt; ---- head-2010-04-29.orig/drivers/xen/blktap2/device.c 2010-04-19 14:54:02.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap2/device.c 2010-04-19 14:55:13.000000000 +0200 -@@ -369,7 +369,8 @@ blktap_device_fail_pending_requests(stru +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-07 14:13:37.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2010-11-23 15:06:50.000000000 +0100 +@@ -366,7 +366,8 @@ blktap_device_fail_pending_requests(stru BTERR("%u:%u: failing pending %s of %d pages\n", blktap_device_major, tap->minor, @@ -147,7 +139,7 @@ References: fate#300964 "read" : "write"), request->nr_pages); blktap_unmap(tap, request); -@@ -410,6 +411,7 @@ blktap_device_finish_request(struct blkt +@@ -407,6 +408,7 @@ blktap_device_finish_request(struct blkt switch (request->operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: @@ -155,36 +147,141 @@ References: fate#300964 if (unlikely(res->status != BLKIF_RSP_OKAY)) BTERR("Bad return from device data " "request: %x\n", res->status); -@@ -649,6 +651,8 @@ blktap_device_process_request(struct blk +@@ -644,6 +646,8 @@ blktap_device_process_request(struct blk blkif_req.handle = 0; blkif_req.operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; -+ if (unlikely(blk_pc_request(req))) ++ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) + blkif_req.operation = BLKIF_OP_PACKET; request->id = (unsigned long)req; request->operation = blkif_req.operation; -@@ -714,7 +718,9 @@ blktap_device_process_request(struct blk +@@ -709,7 +713,9 @@ blktap_device_process_request(struct blk wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */ ring->ring.req_prod_pvt++; - if (rq_data_dir(req)) { -+ if (unlikely(blk_pc_request(req))) ++ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) + tap->stats.st_pk_req++; + else if (rq_data_dir(req)) { tap->stats.st_wr_sect += nr_sects; tap->stats.st_wr_req++; } else { ---- head-2010-04-29.orig/include/xen/interface/io/blkif.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-04-29/include/xen/interface/io/blkif.h 2010-03-25 14:38:05.000000000 +0100 -@@ -76,6 +76,10 @@ - * "feature-flush-cache" node! +--- head-2011-03-17.orig/drivers/xen/blktap2-new/blktap.h 2011-02-24 15:00:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/blktap.h 2011-02-24 15:27:07.000000000 +0100 +@@ -113,6 +113,7 @@ struct blktap_statistics { + int st_rd_req; + int st_wr_req; + int st_oo_req; ++ int st_pk_req; + int st_rd_sect; + int st_wr_sect; + s64 st_rd_cnt; +--- head-2011-03-17.orig/drivers/xen/blktap2-new/device.c 2011-03-11 00:00:00.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/device.c 2011-03-11 11:09:10.000000000 +0100 +@@ -189,6 +189,8 @@ blktap_device_make_request(struct blktap + + request->rq = rq; + request->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ; ++ if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) ++ request->operation = BLKIF_OP_PACKET; + + err = blktap_request_get_pages(tap, request, nsegs); + if (err) +--- head-2011-03-17.orig/drivers/xen/blktap2-new/ring.c 2011-02-24 15:10:15.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/ring.c 2011-02-24 15:39:28.000000000 +0100 +@@ -153,11 +153,11 @@ blktap_ring_map_request(struct blktap *t + int seg, err = 0; + int write; + +- write = request->operation == BLKIF_OP_WRITE; ++ write = request->operation != BLKIF_OP_READ; + + for (seg = 0; seg < request->nr_pages; seg++) { + if (write) +- blktap_request_bounce(tap, request, seg, write); ++ blktap_request_bounce(tap, request, seg, 1); + + err = blktap_ring_map_segment(tap, request, seg); + if (err) +@@ -181,11 +181,11 @@ blktap_ring_unmap_request(struct blktap + + uaddr = MMAP_VADDR(ring->user_vstart, request->usr_idx, 0); + size = request->nr_pages << PAGE_SHIFT; +- read = request->operation == BLKIF_OP_READ; ++ read = request->operation != BLKIF_OP_WRITE; + + if (read) + for (seg = 0; seg < request->nr_pages; seg++) +- blktap_request_bounce(tap, request, seg, !read); ++ blktap_request_bounce(tap, request, seg, 0); + + zap_page_range(ring->vma, uaddr, size, NULL); + } +@@ -269,14 +269,20 @@ blktap_ring_submit_request(struct blktap + do_gettimeofday(&request->time); + + +- if (request->operation == BLKIF_OP_WRITE) { ++ switch (request->operation) { ++ case BLKIF_OP_WRITE: + tap->stats.st_wr_sect += nsecs; + tap->stats.st_wr_req++; +- } ++ break; + +- if (request->operation == BLKIF_OP_READ) { ++ case BLKIF_OP_READ: + tap->stats.st_rd_sect += nsecs; + tap->stats.st_rd_req++; ++ break; ++ ++ case BLKIF_OP_PACKET: ++ tap->stats.st_pk_req++; ++ break; + } + } + +@@ -483,20 +489,24 @@ blktap_ring_debug(struct blktap *tap, ch + for (usr_idx = 0; usr_idx < MAX_PENDING_REQS; usr_idx++) { + struct blktap_request *request; + struct timeval *time; +- int write; ++ char op = '?'; + + request = ring->pending[usr_idx]; + if (!request) + continue; + +- write = request->operation == BLKIF_OP_WRITE; ++ switch (request->operation) { ++ case BLKIF_OP_WRITE: op = 'W'; break; ++ case BLKIF_OP_READ: op = 'R'; break; ++ case BLKIF_OP_PACKET: op = 'P'; break; ++ } + time = &request->time; + + s += snprintf(s, end - s, + "%02d: usr_idx:%02d " + "op:%c nr_pages:%02d time:%lu.%09lu\n", + usr_idx, request->usr_idx, +- write ? 'W' : 'R', request->nr_pages, ++ op, request->nr_pages, + time->tv_sec, time->tv_usec); + } + +--- head-2011-03-17.orig/include/xen/interface/io/blkif.h 2011-03-17 13:50:24.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/blkif.h 2011-03-17 14:34:04.000000000 +0100 +@@ -77,10 +77,9 @@ */ #define BLKIF_OP_FLUSH_DISKCACHE 3 -+/* + /* +- * Used in SLES sources for device specific command packet +- * contained within the request. Reserved for that purpose. + * Device specific command packet contained within the request -+ */ + */ +-#define BLKIF_OP_RESERVED_1 4 +#define BLKIF_OP_PACKET 4 - /* - * Maximum scatter/gather segments per request. + * Recognised only if "feature-trim" is present in backend xenbus info. + * The "feature-trim" node contains a boolean indicating whether trim diff --git a/patches.xen/xen-pcpu-hotplug b/patches.xen/xen-pcpu-hotplug new file mode 100644 index 0000000..b1ef1c6 --- /dev/null +++ b/patches.xen/xen-pcpu-hotplug @@ -0,0 +1,644 @@ +From: Jiang, Yunhong +Subject: xen/acpi: Export host physical CPU information to dom0 +References: bnc#651066 +Patch-mainline: n/a + +This patch expose host's physical CPU information to dom0 in sysfs, so +that dom0's management tools can control the physical CPU if needed. + +It also provides interface in sysfs to logical online/offline a +physical CPU. + +Notice: The information in dom0 is synced with xen hypervisor +asynchronously. + +From: Jiang, Yunhong +Subject: Add cpu hotplug support for 2.6.32 branch + +Add physical CPU hotplug support to origin/xen/next-2.6.32 branch. +Please notice that, even with this change, the acpi_processor->id is +still always -1. This is because several workaround in PM side depends +on acpi_processor->id == -1. As the CPU hotplug logic does not depends +on acpi_processor->id, I'd still keep it no changes. + +But we need change the acpi_processor->id in the future. + +Signed-off-by: Jiang, Yunhong + +jb: ported over glue logic; retry loops around XENPF_get_cpuinfo; + cleanup. +Acked-by: jbeulich@novell.com + +--- head-2011-01-30.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-02-02 15:09:40.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-02-02 15:09:57.000000000 +0100 +@@ -181,9 +181,69 @@ static int xen_tx_notifier(struct acpi_p + { + return -EINVAL; + } ++ + static int xen_hotplug_notifier(struct acpi_processor *pr, int event) + { +- return -EINVAL; ++ int ret = -EINVAL; ++#ifdef CONFIG_ACPI_HOTPLUG_CPU ++ acpi_status status = 0; ++ acpi_object_type type; ++ uint32_t apic_id; ++ int device_decl = 0; ++ unsigned long long pxm; ++ xen_platform_op_t op = { ++ .interface_version = XENPF_INTERFACE_VERSION, ++ }; ++ ++ status = acpi_get_type(pr->handle, &type); ++ if (ACPI_FAILURE(status)) { ++ pr_warning("can't get object type for acpi_id %#x\n", ++ pr->acpi_id); ++ return -ENXIO; ++ } ++ ++ switch (type) { ++ case ACPI_TYPE_PROCESSOR: ++ break; ++ case ACPI_TYPE_DEVICE: ++ device_decl = 1; ++ break; ++ default: ++ pr_warning("unsupported object type %#x for acpi_id %#x\n", ++ type, pr->acpi_id); ++ return -EOPNOTSUPP; ++ } ++ ++ apic_id = acpi_get_cpuid(pr->handle, ~device_decl, pr->acpi_id); ++ if (apic_id < 0) { ++ pr_warning("can't get apic_id for acpi_id %#x\n", ++ pr->acpi_id); ++ return -ENODATA; ++ } ++ ++ status = acpi_evaluate_integer(pr->handle, "_PXM", NULL, &pxm); ++ if (ACPI_FAILURE(status)) { ++ pr_warning("can't get pxm for acpi_id %#x\n", ++ pr->acpi_id); ++ return -ENODATA; ++ } ++ ++ switch (event) { ++ case HOTPLUG_TYPE_ADD: ++ op.cmd = XENPF_cpu_hotadd; ++ op.u.cpu_add.apic_id = apic_id; ++ op.u.cpu_add.acpi_id = pr->acpi_id; ++ op.u.cpu_add.pxm = pxm; ++ ret = HYPERVISOR_platform_op(&op); ++ break; ++ case HOTPLUG_TYPE_REMOVE: ++ pr_warning("Xen doesn't support CPU hot remove\n"); ++ ret = -EOPNOTSUPP; ++ break; ++ } ++#endif ++ ++ return ret; + } + + static struct processor_extcntl_ops xen_extcntl_ops = { +@@ -194,8 +254,10 @@ static int __init init_extcntl(void) + { + unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8; + ++#ifndef CONFIG_ACPI_HOTPLUG_CPU + if (!pmbits) + return 0; ++#endif + if (pmbits & XEN_PROCESSOR_PM_CX) + xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier; + if (pmbits & XEN_PROCESSOR_PM_PX) +--- head-2011-01-30.orig/drivers/acpi/processor_driver.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-01-30/drivers/acpi/processor_driver.c 2011-02-02 15:09:57.000000000 +0100 +@@ -82,7 +82,7 @@ MODULE_LICENSE("GPL"); + static int acpi_processor_add(struct acpi_device *device); + static int acpi_processor_remove(struct acpi_device *device, int type); + static void acpi_processor_notify(struct acpi_device *device, u32 event); +-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); ++static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); + static int acpi_processor_handle_eject(struct acpi_processor *pr); + + +@@ -324,8 +324,7 @@ static int acpi_processor_get_info(struc + * they are physically not present. + */ + if (pr->id == -1) { +- if (ACPI_FAILURE +- (acpi_processor_hotadd_init(pr->handle, &pr->id)) && ++ if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)) && + acpi_get_cpuid(pr->handle, ~device_declaration, + pr->acpi_id) < 0) { + return -ENODEV; +@@ -789,13 +788,26 @@ processor_walk_namespace_cb(acpi_handle + return (AE_OK); + } + +-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) ++static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) + { ++ acpi_handle handle = pr->handle; ++ int *p_cpu = &pr->id; ++ ++#ifdef CONFIG_XEN ++ if (xen_pcpu_index(pr->acpi_id, 1) != -1) ++ return AE_OK; ++#endif + + if (!is_processor_present(handle)) { + return AE_ERROR; + } + ++ if (processor_cntl_external()) { ++ processor_notify_external(pr, PROCESSOR_HOTPLUG, ++ HOTPLUG_TYPE_ADD); ++ return AE_OK; ++ } ++ + if (acpi_map_lsapic(handle, p_cpu)) + return AE_ERROR; + +@@ -809,10 +821,11 @@ static acpi_status acpi_processor_hotadd + + static int acpi_processor_handle_eject(struct acpi_processor *pr) + { +-#ifdef CONFIG_XEN +- if (pr->id == -1) ++ if (processor_cntl_external()) { ++ processor_notify_external(pr, PROCESSOR_HOTPLUG, ++ HOTPLUG_TYPE_REMOVE); + return (0); +-#endif ++ } + + if (cpu_online(pr->id)) + cpu_down(pr->id); +@@ -822,7 +835,7 @@ static int acpi_processor_handle_eject(s + return (0); + } + #else +-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) ++static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) + { + return AE_ERROR; + } +--- head-2011-01-30.orig/drivers/acpi/processor_extcntl.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-01-30/drivers/acpi/processor_extcntl.c 2011-02-02 15:09:57.000000000 +0100 +@@ -83,10 +83,13 @@ int processor_notify_external(struct acp + + ret = processor_extcntl_ops->pm_ops[type](pr, event); + break; ++#ifdef CONFIG_ACPI_HOTPLUG_CPU + case PROCESSOR_HOTPLUG: + if (processor_extcntl_ops->hotplug) + ret = processor_extcntl_ops->hotplug(pr, type); ++ xen_pcpu_hotplug(type); + break; ++#endif + default: + pr_err("Unsupported processor event %d.\n", event); + break; +--- head-2011-01-30.orig/drivers/xen/core/Makefile 2011-02-02 15:09:52.000000000 +0100 ++++ head-2011-01-30/drivers/xen/core/Makefile 2011-02-02 15:09:57.000000000 +0100 +@@ -5,6 +5,7 @@ + obj-y := evtchn.o gnttab.o reboot.o machine_reboot.o firmware.o + + obj-$(CONFIG_PCI) += pci.o ++obj-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o + obj-$(CONFIG_PROC_FS) += xen_proc.o + obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o + obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-01-30/drivers/xen/core/pcpu.c 2011-02-02 15:09:57.000000000 +0100 +@@ -0,0 +1,416 @@ ++/* ++ * pcpu.c - management physical cpu in dom0 environment ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct pcpu { ++ struct list_head pcpu_list; ++ struct sys_device sysdev; ++ uint32_t xen_id; ++ uint32_t apic_id; ++ uint32_t acpi_id; ++ uint32_t flags; ++}; ++ ++static inline int xen_pcpu_online(uint32_t flags) ++{ ++ return !!(flags & XEN_PCPU_FLAGS_ONLINE); ++} ++ ++static DEFINE_MUTEX(xen_pcpu_lock); ++ ++/* No need for irq disable since hotplug notify is in workqueue context */ ++#define get_pcpu_lock() mutex_lock(&xen_pcpu_lock); ++#define put_pcpu_lock() mutex_unlock(&xen_pcpu_lock); ++ ++static LIST_HEAD(xen_pcpus); ++ ++static int xen_pcpu_down(uint32_t xen_id) ++{ ++ xen_platform_op_t op = { ++ .cmd = XENPF_cpu_offline, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ .u.cpu_ol.cpuid = xen_id, ++ }; ++ ++ return HYPERVISOR_platform_op(&op); ++} ++ ++static int xen_pcpu_up(uint32_t xen_id) ++{ ++ xen_platform_op_t op = { ++ .cmd = XENPF_cpu_online, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ .u.cpu_ol.cpuid = xen_id, ++ }; ++ ++ return HYPERVISOR_platform_op(&op); ++} ++ ++static ssize_t show_online(struct sys_device *dev, ++ struct sysdev_attribute *attr, ++ char *buf) ++{ ++ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); ++ ++ return sprintf(buf, "%d\n", xen_pcpu_online(cpu->flags)); ++} ++ ++static ssize_t store_online(struct sys_device *dev, ++ struct sysdev_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); ++ ssize_t ret; ++ ++ switch (buf[0]) { ++ case '0': ++ ret = xen_pcpu_down(cpu->xen_id); ++ break; ++ case '1': ++ ret = xen_pcpu_up(cpu->xen_id); ++ break; ++ default: ++ ret = -EINVAL; ++ } ++ ++ if (ret >= 0) ++ ret = count; ++ return ret; ++} ++ ++static SYSDEV_ATTR(online, 0644, show_online, store_online); ++ ++static ssize_t show_apicid(struct sys_device *dev, ++ struct sysdev_attribute *attr, ++ char *buf) ++{ ++ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); ++ ++ return sprintf(buf, "%#x\n", cpu->apic_id); ++} ++static SYSDEV_ATTR(apic_id, 0444, show_apicid, NULL); ++ ++static ssize_t show_acpiid(struct sys_device *dev, ++ struct sysdev_attribute *attr, ++ char *buf) ++{ ++ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); ++ ++ return sprintf(buf, "%#x\n", cpu->acpi_id); ++} ++static SYSDEV_ATTR(acpi_id, 0444, show_acpiid, NULL); ++ ++static struct sysdev_class xen_pcpu_sysdev_class = { ++ .name = "xen_pcpu", ++}; ++ ++static int xen_pcpu_free(struct pcpu *pcpu) ++{ ++ if (!pcpu) ++ return 0; ++ ++ sysdev_remove_file(&pcpu->sysdev, &attr_online); ++ sysdev_remove_file(&pcpu->sysdev, &attr_apic_id); ++ sysdev_remove_file(&pcpu->sysdev, &attr_acpi_id); ++ sysdev_unregister(&pcpu->sysdev); ++ list_del(&pcpu->pcpu_list); ++ kfree(pcpu); ++ ++ return 0; ++} ++ ++static inline int same_pcpu(struct xenpf_pcpuinfo *info, ++ struct pcpu *pcpu) ++{ ++ return (pcpu->apic_id == info->apic_id) && ++ (pcpu->xen_id == info->xen_cpuid); ++} ++ ++/* ++ * Return 1 if online status changed ++ */ ++static int xen_pcpu_online_check(struct xenpf_pcpuinfo *info, ++ struct pcpu *pcpu) ++{ ++ int result = 0; ++ ++ if (info->xen_cpuid != pcpu->xen_id) ++ return 0; ++ ++ if (xen_pcpu_online(info->flags) && !xen_pcpu_online(pcpu->flags)) { ++ /* the pcpu is onlined */ ++ pcpu->flags |= XEN_PCPU_FLAGS_ONLINE; ++ kobject_uevent(&pcpu->sysdev.kobj, KOBJ_ONLINE); ++ result = 1; ++ } else if (!xen_pcpu_online(info->flags) && ++ xen_pcpu_online(pcpu->flags)) { ++ /* The pcpu is offlined now */ ++ pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE; ++ kobject_uevent(&pcpu->sysdev.kobj, KOBJ_OFFLINE); ++ result = 1; ++ } ++ ++ return result; ++} ++ ++static int pcpu_sysdev_init(struct pcpu *cpu) ++{ ++ int error; ++ ++ error = sysdev_register(&cpu->sysdev); ++ if (error) { ++ pr_warning("xen_pcpu_add: Failed to register pcpu\n"); ++ kfree(cpu); ++ return -1; ++ } ++ sysdev_create_file(&cpu->sysdev, &attr_online); ++ sysdev_create_file(&cpu->sysdev, &attr_apic_id); ++ sysdev_create_file(&cpu->sysdev, &attr_acpi_id); ++ return 0; ++} ++ ++static struct pcpu *get_pcpu(unsigned int xen_id) ++{ ++ struct pcpu *pcpu; ++ ++ list_for_each_entry(pcpu, &xen_pcpus, pcpu_list) ++ if (pcpu->xen_id == xen_id) ++ return pcpu; ++ ++ return NULL; ++} ++ ++static struct pcpu *init_pcpu(struct xenpf_pcpuinfo *info) ++{ ++ struct pcpu *pcpu; ++ ++ if (info->flags & XEN_PCPU_FLAGS_INVALID) ++ return NULL; ++ ++ /* The PCPU is just added */ ++ pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL); ++ if (!pcpu) ++ return NULL; ++ ++ INIT_LIST_HEAD(&pcpu->pcpu_list); ++ pcpu->xen_id = info->xen_cpuid; ++ pcpu->apic_id = info->apic_id; ++ pcpu->acpi_id = info->acpi_id; ++ pcpu->flags = info->flags; ++ ++ pcpu->sysdev.cls = &xen_pcpu_sysdev_class; ++ pcpu->sysdev.id = info->xen_cpuid; ++ ++ if (pcpu_sysdev_init(pcpu)) { ++ kfree(pcpu); ++ return NULL; ++ } ++ ++ list_add_tail(&pcpu->pcpu_list, &xen_pcpus); ++ return pcpu; ++} ++ ++#define PCPU_NO_CHANGE 0 ++#define PCPU_ADDED 1 ++#define PCPU_ONLINE_OFFLINE 2 ++#define PCPU_REMOVED 3 ++/* ++ * Caller should hold the pcpu lock ++ * < 0: Something wrong ++ * 0: No changes ++ * > 0: State changed ++ */ ++static struct pcpu *_sync_pcpu(unsigned int cpu_num, unsigned int *max_id, ++ int *result) ++{ ++ struct pcpu *pcpu; ++ struct xenpf_pcpuinfo *info; ++ xen_platform_op_t op = { ++ .cmd = XENPF_get_cpuinfo, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ }; ++ int ret; ++ ++ *result = -1; ++ ++ info = &op.u.pcpu_info; ++ info->xen_cpuid = cpu_num; ++ ++ do { ++ ret = HYPERVISOR_platform_op(&op); ++ } while (ret == -EBUSY); ++ if (ret) ++ return NULL; ++ ++ if (max_id) ++ *max_id = op.u.pcpu_info.max_present; ++ ++ pcpu = get_pcpu(cpu_num); ++ ++ if (info->flags & XEN_PCPU_FLAGS_INVALID) { ++ /* The pcpu has been removed */ ++ *result = PCPU_NO_CHANGE; ++ if (pcpu) { ++ xen_pcpu_free(pcpu); ++ *result = PCPU_REMOVED; ++ } ++ return NULL; ++ } ++ ++ ++ if (!pcpu) { ++ *result = PCPU_ADDED; ++ pcpu = init_pcpu(info); ++ if (pcpu == NULL) { ++ pr_warning("Failed to init pcpu %x\n", ++ info->xen_cpuid); ++ *result = -1; ++ } ++ } else { ++ *result = PCPU_NO_CHANGE; ++ /* ++ * Old PCPU is replaced with a new pcpu, this means ++ * several virq is missed, will it happen? ++ */ ++ if (!same_pcpu(info, pcpu)) { ++ pr_warning("Pcpu %x changed!\n", pcpu->xen_id); ++ pcpu->apic_id = info->apic_id; ++ pcpu->acpi_id = info->acpi_id; ++ } ++ if (xen_pcpu_online_check(info, pcpu)) ++ *result = PCPU_ONLINE_OFFLINE; ++ } ++ return pcpu; ++} ++ ++/* ++ * Sync dom0's pcpu information with xen hypervisor's ++ */ ++static int xen_sync_pcpus(void) ++{ ++ /* ++ * Boot cpu always have cpu_id 0 in xen ++ */ ++ unsigned int cpu_num = 0, max_id = 0; ++ int result = 0; ++ struct pcpu *pcpu; ++ ++ get_pcpu_lock(); ++ ++ while ((result >= 0) && (cpu_num <= max_id)) { ++ pcpu = _sync_pcpu(cpu_num, &max_id, &result); ++ ++ switch (result) { ++ case PCPU_NO_CHANGE: ++ case PCPU_ADDED: ++ case PCPU_ONLINE_OFFLINE: ++ case PCPU_REMOVED: ++ break; ++ default: ++ pr_warning("Failed to sync pcpu %x\n", cpu_num); ++ break; ++ } ++ cpu_num++; ++ } ++ ++ if (result < 0) { ++ struct pcpu *tmp; ++ ++ list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, pcpu_list) ++ xen_pcpu_free(pcpu); ++ } ++ ++ put_pcpu_lock(); ++ ++ return 0; ++} ++ ++static void xen_pcpu_dpc(struct work_struct *work) ++{ ++ if (xen_sync_pcpus() < 0) ++ pr_warning("xen_pcpu_dpc: Failed to sync pcpu information\n"); ++} ++static DECLARE_WORK(xen_pcpu_work, xen_pcpu_dpc); ++ ++static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id) ++{ ++ schedule_work(&xen_pcpu_work); ++ ++ return IRQ_HANDLED; ++} ++ ++int xen_pcpu_hotplug(int type) ++{ ++ schedule_work(&xen_pcpu_work); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(xen_pcpu_hotplug); ++ ++int xen_pcpu_index(uint32_t id, bool is_acpiid) ++{ ++ unsigned int cpu_num, max_id; ++ xen_platform_op_t op = { ++ .cmd = XENPF_get_cpuinfo, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ }; ++ struct xenpf_pcpuinfo *info = &op.u.pcpu_info; ++ ++ for (max_id = cpu_num = 0; cpu_num <= max_id; ++cpu_num) { ++ int ret; ++ ++ info->xen_cpuid = cpu_num; ++ do { ++ ret = HYPERVISOR_platform_op(&op); ++ } while (ret == -EBUSY); ++ if (ret) ++ continue; ++ ++ if (info->max_present > max_id) ++ max_id = info->max_present; ++ if (id == (is_acpiid ? info->acpi_id : info->apic_id)) ++ return cpu_num; ++ } ++ ++ return -1; ++} ++EXPORT_SYMBOL_GPL(xen_pcpu_index); ++ ++static int __init xen_pcpu_init(void) ++{ ++ int err; ++ ++ if (!is_initial_xendomain()) ++ return 0; ++ ++ err = sysdev_class_register(&xen_pcpu_sysdev_class); ++ if (err) { ++ pr_warning("xen_pcpu_init: " ++ "Failed to register sysdev class (%d)\n", err); ++ return err; ++ } ++ ++ xen_sync_pcpus(); ++ ++ if (!list_empty(&xen_pcpus)) ++ err = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0, ++ xen_pcpu_interrupt, 0, ++ "pcpu", NULL); ++ if (err < 0) ++ pr_warning("xen_pcpu_init: " ++ "Failed to bind pcpu_state virq (%d)\n", err); ++ ++ return err; ++} ++subsys_initcall(xen_pcpu_init); +--- head-2011-01-30.orig/include/acpi/processor.h 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-01-30/include/acpi/processor.h 2011-02-02 15:09:57.000000000 +0100 +@@ -509,6 +509,8 @@ static inline void xen_convert_psd_pack( + xpsd->num_processors = apsd->num_processors; + } + ++extern int xen_pcpu_hotplug(int type); ++extern int xen_pcpu_index(uint32_t id, bool is_acpiid); + #endif /* CONFIG_XEN */ + + #endif diff --git a/patches.xen/xen-sections b/patches.xen/xen-sections index a571e74..44cd67e 100644 --- a/patches.xen/xen-sections +++ b/patches.xen/xen-sections @@ -2,9 +2,9 @@ From: jbeulich@novell.com Subject: fix placement of some routines/data Patch-mainline: obsolete ---- head-2010-05-12.orig/arch/x86/kernel/time-xen.c 2010-05-12 09:03:15.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/time-xen.c 2010-05-12 09:13:55.000000000 +0200 -@@ -676,7 +676,7 @@ int xen_update_persistent_clock(void) +--- head-2011-01-22.orig/arch/x86/kernel/time-xen.c 2010-11-22 13:21:13.000000000 +0100 ++++ head-2011-01-22/arch/x86/kernel/time-xen.c 2010-09-16 16:49:59.000000000 +0200 +@@ -648,7 +648,7 @@ int xen_update_persistent_clock(void) /* Dynamically-mapped IRQ. */ DEFINE_PER_CPU(int, timer_irq); @@ -13,27 +13,18 @@ Patch-mainline: obsolete { per_cpu(timer_irq, 0) = bind_virq_to_irqhandler( -@@ -901,7 +901,7 @@ int __cpuinit local_setup_timer(unsigned - return 0; - } - --void __cpuexit local_teardown_timer(unsigned int cpu) -+void __cpuinit local_teardown_timer(unsigned int cpu) - { - BUG_ON(cpu == 0); - unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); ---- head-2010-05-12.orig/drivers/xen/core/cpu_hotplug.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/cpu_hotplug.c 2010-01-25 13:45:39.000000000 +0100 -@@ -24,7 +24,7 @@ static int local_cpu_hotplug_request(voi +--- head-2011-01-22.orig/drivers/xen/core/cpu_hotplug.c 2011-01-24 12:14:25.000000000 +0100 ++++ head-2011-01-22/drivers/xen/core/cpu_hotplug.c 2011-01-24 12:15:51.000000000 +0100 +@@ -25,7 +25,7 @@ static int local_cpu_hotplug_request(voi return (current->mm != NULL); } --static void vcpu_hotplug(unsigned int cpu) -+static void __cpuinit vcpu_hotplug(unsigned int cpu) +-static void vcpu_hotplug(unsigned int cpu, struct sys_device *dev) ++static void __cpuinit vcpu_hotplug(unsigned int cpu, struct sys_device *dev) { int err; char dir[32], state[32]; -@@ -51,7 +51,7 @@ static void vcpu_hotplug(unsigned int cp +@@ -54,7 +54,7 @@ static void vcpu_hotplug(unsigned int cp } } @@ -42,7 +33,7 @@ Patch-mainline: obsolete struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned int cpu; -@@ -80,12 +80,12 @@ static int smpboot_cpu_notify(struct not +@@ -83,12 +83,12 @@ static int smpboot_cpu_notify(struct not return NOTIFY_OK; } @@ -58,7 +49,7 @@ Patch-mainline: obsolete .node = "cpu", .callback = handle_vcpu_hotplug_event, .flags = XBWF_new_thread }; -@@ -105,7 +105,7 @@ static int __init setup_vcpu_hotplug_eve +@@ -107,7 +107,7 @@ static int __init setup_vcpu_hotplug_eve { static struct notifier_block hotplug_cpu = { .notifier_call = smpboot_cpu_notify }; @@ -67,7 +58,7 @@ Patch-mainline: obsolete .notifier_call = setup_cpu_watcher }; if (!is_running_on_xen()) -@@ -119,7 +119,7 @@ static int __init setup_vcpu_hotplug_eve +@@ -121,7 +121,7 @@ static int __init setup_vcpu_hotplug_eve arch_initcall(setup_vcpu_hotplug_event); @@ -76,7 +67,7 @@ Patch-mainline: obsolete { unsigned int cpu; int err; -@@ -140,7 +140,7 @@ int smp_suspend(void) +@@ -141,7 +141,7 @@ int smp_suspend(void) return 0; } @@ -85,23 +76,30 @@ Patch-mainline: obsolete { unsigned int cpu; ---- head-2010-05-12.orig/drivers/xen/core/smpboot.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/smpboot.c 2010-03-19 15:20:15.000000000 +0100 -@@ -181,7 +181,7 @@ static int __cpuinit xen_smp_intr_init(u +--- head-2011-01-22.orig/drivers/xen/core/gnttab.c 2010-11-22 13:21:03.000000000 +0100 ++++ head-2011-01-22/drivers/xen/core/gnttab.c 2011-01-14 15:00:13.000000000 +0100 +@@ -794,7 +794,12 @@ static int gnttab_expand(unsigned int re + return rc; } - #ifdef CONFIG_HOTPLUG_CPU --static void __cpuexit xen_smp_intr_exit(unsigned int cpu) -+static void __cpuinit xen_smp_intr_exit(unsigned int cpu) +-int __devinit gnttab_init(void) ++#ifdef CONFIG_XEN ++static int __init ++#else ++int __devinit ++#endif ++gnttab_init(void) { - if (cpu != 0) - local_teardown_timer(cpu); -@@ -400,7 +400,7 @@ int __cpuexit __cpu_disable(void) - return 0; - } + int i; + unsigned int max_nr_glist_frames, nr_glist_frames; +--- head-2011-01-22.orig/drivers/xen/pcifront/pci_op.c 2010-11-22 13:10:22.000000000 +0100 ++++ head-2011-01-22/drivers/xen/pcifront/pci_op.c 2010-10-04 11:10:07.000000000 +0200 +@@ -416,7 +416,7 @@ void pci_frontend_disable_msi(struct pci + #endif /* CONFIG_PCI_MSI */ --void __cpuexit __cpu_die(unsigned int cpu) -+void __cpuinit __cpu_die(unsigned int cpu) + /* Claim resources for the PCI frontend as-is, backend won't allow changes */ +-static int pcifront_claim_resource(struct pci_dev *dev, void *data) ++static int __devinit pcifront_claim_resource(struct pci_dev *dev, void *data) { - while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { - current->state = TASK_UNINTERRUPTIBLE; + struct pcifront_device *pdev = data; + int i; diff --git a/patches.xen/xen-setup-gsi b/patches.xen/xen-setup-gsi new file mode 100644 index 0000000..5813f6e --- /dev/null +++ b/patches.xen/xen-setup-gsi @@ -0,0 +1,158 @@ +From: jbeulich@novell.com +Subject: pass trigger mode and polarity information to Xen for all interrupts +Patch-mainline: n/a + +For Xen to be able to use non-legacy IRQs e.g. for its serial console, +it needs to know trigger mode and polarity for them regardless of +whether the kernel is also going to (try to) use those interrupts. + +--- head-2011-01-30.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 16:05:51.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/apic/io_apic-xen.c 2011-02-02 15:10:38.000000000 +0100 +@@ -1400,6 +1400,10 @@ static int setup_ioapic_entry(int apic_i + return 0; + } + ++static struct { ++ DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); ++} mp_ioapic_routing[MAX_IO_APICS]; ++ + static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, + struct irq_cfg *cfg, int trigger, int polarity) + { +@@ -1416,6 +1420,42 @@ static void setup_ioapic_irq(int apic_id + */ + if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) + apic->vector_allocation_domain(0, cfg->domain); ++#else ++ /* ++ * For legacy IRQs we may get here before trigger mode and polarity ++ * get obtained, but Xen refuses to set those through ++ * PHYSDEVOP_setup_gsi more than once (perhaps even at all). ++ */ ++ if (irq >= legacy_pic->nr_legacy_irqs ++ || test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { ++ struct physdev_setup_gsi setup_gsi = { ++ .gsi = irq, ++ .triggering = trigger, ++ .polarity = polarity ++ }; ++ struct physdev_map_pirq map_pirq = { ++ .domid = DOMID_SELF, ++ .type = MAP_PIRQ_TYPE_GSI, ++ .index = irq, ++ .pirq = irq ++ }; ++ ++ switch (HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, ++ &setup_gsi)) { ++ case -EEXIST: ++ if (irq < legacy_pic->nr_legacy_irqs) ++ break; ++ /* fall through */ ++ case 0: ++ evtchn_register_pirq(irq); ++ if (HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, ++ &map_pirq) == 0) { ++ /* fake (for init_IO_APIC_traps()): */ ++ cfg->vector = irq; ++ return; ++ } ++ } ++ } + #endif + + if (assign_irq_vector(irq, cfg, apic->target_cpus())) +@@ -1451,10 +1491,6 @@ static void setup_ioapic_irq(int apic_id + ioapic_write_entry(apic_id, pin, entry); + } + +-static struct { +- DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); +-} mp_ioapic_routing[MAX_IO_APICS]; +- + static void __init setup_IO_APIC_irqs(void) + { + int apic_id, pin, idx, irq, notcon = 0; +--- head-2011-01-30.orig/drivers/acpi/pci_irq.c 2011-01-05 01:50:19.000000000 +0100 ++++ head-2011-01-30/drivers/acpi/pci_irq.c 2011-02-02 15:10:38.000000000 +0100 +@@ -469,3 +469,80 @@ void acpi_pci_irq_disable(struct pci_dev + dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); + acpi_unregister_gsi(gsi); + } ++ ++#if defined(CONFIG_XEN) && defined(CONFIG_PCI) ++static int __init xen_setup_gsi(void) ++{ ++ struct pci_dev *dev = NULL; ++ ++ if (acpi_noirq) ++ return 0; ++ ++ /* Loop body is a clone of acpi_pci_irq_enable(). */ ++ for_each_pci_dev(dev) { ++ const struct acpi_prt_entry *entry; ++ int gsi; ++ int triggering = ACPI_LEVEL_SENSITIVE; ++ int polarity = ACPI_ACTIVE_LOW; ++ struct physdev_setup_gsi setup_gsi; ++ ++ if (!dev->pin) ++ continue; ++ ++ entry = acpi_pci_irq_lookup(dev, dev->pin); ++ if (!entry) { ++ /* ++ * IDE legacy mode controller IRQs are magic. Why do ++ * compat extensions always make such a nasty mess. ++ */ ++ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && ++ (dev->class & 0x05) == 0) ++ continue; ++ } ++ ++ gsi = entry ++ ? entry->link ++ ? acpi_pci_link_allocate_irq(entry->link, ++ entry->index, ++ &triggering, &polarity, ++ NULL) ++ : entry->index ++ : -1; ++ ++ if (gsi >= 0) { ++ setup_gsi.gsi = gsi; ++ setup_gsi.triggering ++ = (triggering == ACPI_LEVEL_SENSITIVE); ++ setup_gsi.polarity = (polarity == ACPI_ACTIVE_LOW); ++ if (HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, ++ &setup_gsi) < 0) ++ continue; ++ ++ dev_info(&dev->dev, "GSI%d: %s-%s\n", gsi, ++ triggering == ACPI_LEVEL_SENSITIVE ? "level" ++ : "edge", ++ polarity == ACPI_ACTIVE_LOW ? "low" : "high"); ++ } else { ++ /* ++ * No IRQ known to the ACPI subsystem - maybe the ++ * BIOS / driver reported one, then use it. ++ */ ++ dev_warn(&dev->dev, "PCI INT %c: no GSI", ++ pin_name(dev->pin)); ++ /* Interrupt Line values above 0xF are forbidden */ ++ if (dev->irq > 0 && (dev->irq <= 0xF)) { ++ pr_cont(" - using IRQ %d\n", dev->irq); ++ setup_gsi.gsi = dev->irq; ++ setup_gsi.triggering = 1; ++ setup_gsi.polarity = 1; ++ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, ++ &setup_gsi)); ++ } else ++ pr_cont("\n"); ++ } ++ } ++ ++ return 0; ++} ++subsys_initcall(xen_setup_gsi); ++#endif diff --git a/patches.xen/xen-spinlock-poll-early b/patches.xen/xen-spinlock-poll-early index 1defb82..fb22288 100644 --- a/patches.xen/xen-spinlock-poll-early +++ b/patches.xen/xen-spinlock-poll-early @@ -6,20 +6,24 @@ This could be merged into the original ticket spinlock code once validated, if there wasn't the dependency on smp-processor-id.h, which only gets introduced in the 2.6.32 merge. ---- head-2010-03-15.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-02-24 12:25:27.000000000 +0100 -+++ head-2010-03-15/arch/x86/include/mach-xen/asm/spinlock.h 2010-02-24 12:39:22.000000000 +0100 -@@ -41,6 +41,10 @@ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock.h 2011-01-18 15:47:44.000000000 +0100 +@@ -41,11 +41,12 @@ #ifdef TICKET_SHIFT #include +#include -+#include -+ -+DECLARE_PER_CPU(struct vcpu_runstate_info, runstate); int xen_spinlock_init(unsigned int cpu); void xen_spinlock_cleanup(unsigned int cpu); -@@ -113,6 +117,9 @@ static __always_inline int __ticket_spin +-bool xen_spin_wait(arch_spinlock_t *, unsigned int *token, +- unsigned int flags); ++unsigned int xen_spin_wait(arch_spinlock_t *, unsigned int *token, ++ unsigned int flags); + unsigned int xen_spin_adjust(const arch_spinlock_t *, unsigned int token); + void xen_spin_kick(arch_spinlock_t *, unsigned int token); + +@@ -113,6 +114,9 @@ static __always_inline int __ticket_spin : : "memory", "cc"); @@ -29,7 +33,7 @@ only gets introduced in the 2.6.32 merge. return tmp; } #elif TICKET_SHIFT == 16 -@@ -179,10 +186,17 @@ static __always_inline int __ticket_spin +@@ -179,10 +183,15 @@ static __always_inline int __ticket_spin : : "memory", "cc"); @@ -40,32 +44,30 @@ only gets introduced in the 2.6.32 merge. } #endif -+#define __ticket_spin_count(lock) \ -+ (per_cpu(runstate.state, (lock)->owner) == RUNSTATE_running \ -+ ? 1 << 10 : 1) ++#define __ticket_spin_count(lock) (vcpu_running((lock)->owner) ? 1 << 10 : 1) + static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); -@@ -204,16 +218,18 @@ static __always_inline void __ticket_spi +@@ -204,16 +213,18 @@ static __always_inline void __ticket_spi bool free; __ticket_spin_lock_preamble; - if (likely(free)) { + if (likely(free)) -+ raw_local_irq_restore(flags); + arch_local_irq_restore(flags); +- return; + else { + token = xen_spin_adjust(lock, token); - raw_local_irq_restore(flags); -- return; ++ arch_local_irq_restore(flags); ++ count = __ticket_spin_count(lock); + do { -+ count = __ticket_spin_count(lock); + __ticket_spin_lock_body; + } while (unlikely(!count) -+ && !xen_spin_wait(lock, &token, flags)); ++ && (count = xen_spin_wait(lock, &token, flags))); } - token = xen_spin_adjust(lock, token); -- raw_local_irq_restore(flags); +- arch_local_irq_restore(flags); - do { - count = 1 << 10; - __ticket_spin_lock_body; @@ -74,7 +76,7 @@ only gets introduced in the 2.6.32 merge. } static __always_inline void __ticket_spin_lock_flags(arch_spinlock_t *lock, -@@ -223,13 +239,15 @@ static __always_inline void __ticket_spi +@@ -223,13 +234,15 @@ static __always_inline void __ticket_spi bool free; __ticket_spin_lock_preamble; @@ -87,17 +89,17 @@ only gets introduced in the 2.6.32 merge. - } while (unlikely(!count) && !xen_spin_wait(lock, &token, flags)); + if (unlikely(!free)) { + token = xen_spin_adjust(lock, token); ++ count = __ticket_spin_count(lock); + do { -+ count = __ticket_spin_count(lock); + __ticket_spin_lock_body; + } while (unlikely(!count) -+ && !xen_spin_wait(lock, &token, flags)); ++ && (count = xen_spin_wait(lock, &token, flags))); + } + lock->owner = raw_smp_processor_id(); } static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) -@@ -246,6 +264,7 @@ static __always_inline void __ticket_spi +@@ -246,6 +259,7 @@ static __always_inline void __ticket_spi #undef __ticket_spin_lock_preamble #undef __ticket_spin_lock_body #undef __ticket_spin_unlock_body @@ -105,8 +107,8 @@ only gets introduced in the 2.6.32 merge. #endif #define __arch_spin(n) __ticket_spin_##n ---- head-2010-03-15.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-01-28 10:38:23.000000000 +0100 -+++ head-2010-03-15/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-01-26 11:27:24.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-01-26 11:27:24.000000000 +0100 @@ -26,6 +26,11 @@ typedef union { # define TICKET_SHIFT 16 u16 cur, seq; @@ -119,46 +121,42 @@ only gets introduced in the 2.6.32 merge. #else /* * This differs from the pre-2.6.24 spinlock by always using xchgb ---- head-2010-03-15.orig/arch/x86/kernel/time-xen.c 2010-03-02 10:20:07.000000000 +0100 -+++ head-2010-03-15/arch/x86/kernel/time-xen.c 2010-03-02 10:20:42.000000000 +0100 -@@ -58,7 +58,7 @@ static u32 shadow_tv_version; - static u64 jiffies_bias, system_time_bias; - - /* Current runstate of each CPU (updated automatically by the hypervisor). */ --static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); -+DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); - - /* Must be signed, as it's compared with s64 quantities which can be -ve. */ - #define NS_PER_TICK (1000000000LL/HZ) ---- head-2010-03-15.orig/drivers/xen/core/spinlock.c 2010-02-26 15:34:33.000000000 +0100 -+++ head-2010-03-15/drivers/xen/core/spinlock.c 2010-03-19 08:48:35.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:18:37.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:19:26.000000000 +0100 @@ -39,6 +39,8 @@ int __cpuinit xen_spinlock_init(unsigned - }; + struct evtchn_bind_ipi bind_ipi; int rc; + setup_runstate_area(cpu); + - rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR, - cpu, - &spinlock_action); -@@ -86,6 +88,7 @@ unsigned int xen_spin_adjust(const arch_ - bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, - unsigned int flags) + WARN_ON(per_cpu(poll_evtchn, cpu)); + bind_ipi.vcpu = cpu; + rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi); +@@ -124,18 +126,17 @@ unsigned int xen_spin_adjust(const arch_ + return spin_adjust(percpu_read(_spinning), lock, token); + } + +-bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, +- unsigned int flags) ++unsigned int xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, ++ unsigned int flags) { -+ unsigned int cpu = raw_smp_processor_id(); - int irq = spinlock_irq; ++ unsigned int rm_idx, cpu = raw_smp_processor_id(); bool rc; typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask; -@@ -93,7 +96,7 @@ bool xen_spin_wait(arch_spinlock_t *lock +- unsigned int rm_idx; struct spinning spinning, *other; /* If kicker interrupt not initialized yet, just spin. */ -- if (unlikely(irq < 0) || unlikely(!cpu_online(raw_smp_processor_id()))) -+ if (unlikely(irq < 0) || unlikely(!cpu_online(cpu))) - return false; +- if (unlikely(!cpu_online(raw_smp_processor_id())) +- || unlikely(!percpu_read(poll_evtchn))) +- return false; ++ if (unlikely(!cpu_online(cpu)) || unlikely(!percpu_read(poll_evtchn))) ++ return UINT_MAX; /* announce we're spinning */ -@@ -114,6 +117,7 @@ bool xen_spin_wait(arch_spinlock_t *lock + spinning.ticket = *ptok >> TICKET_SHIFT; +@@ -155,6 +156,7 @@ bool xen_spin_wait(arch_spinlock_t *lock * we weren't looking. */ if (lock->cur == spinning.ticket) { @@ -166,12 +164,21 @@ only gets introduced in the 2.6.32 merge. /* * If we interrupted another spinlock while it was * blocking, make sure it doesn't block (again) -@@ -207,6 +211,8 @@ bool xen_spin_wait(arch_spinlock_t *lock +@@ -251,6 +253,8 @@ bool xen_spin_wait(arch_spinlock_t *lock if (!free) token = spin_adjust(other->prev, lock, token); other->ticket = token >> TICKET_SHIFT; + if (lock->cur == other->ticket) + lock->owner = cpu; - } - raw_local_irq_restore(upcall_mask); + } while ((other = other->prev) != NULL); + lock = spinning.lock; + } +@@ -261,7 +265,7 @@ bool xen_spin_wait(arch_spinlock_t *lock + arch_local_irq_restore(upcall_mask); + *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); + +- return rc; ++ return rc ? 0 : __ticket_spin_count(lock); + } + void xen_spin_kick(arch_spinlock_t *lock, unsigned int token) diff --git a/patches.xen/xen-staging-build b/patches.xen/xen-staging-build deleted file mode 100644 index 54ddfbe..0000000 --- a/patches.xen/xen-staging-build +++ /dev/null @@ -1,40 +0,0 @@ -From: jbeulich@novell.com -Subject: fix issue with Windows-style types used in drivers/staging/ -Patch-mainline: obsolete - ---- head-2010-03-24.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 16:41:12.000000000 +0100 -+++ head-2010-03-24/arch/x86/include/mach-xen/asm/hypervisor.h 2009-11-23 10:45:08.000000000 +0100 -@@ -354,4 +354,9 @@ MULTI_grant_table_op(multicall_entry_t * - - #define uvm_multi(cpumask) ((unsigned long)cpumask_bits(cpumask) | UVMF_MULTI) - -+#ifdef LINUX -+/* drivers/staging/ use Windows-style types, including VOID */ -+#undef VOID -+#endif -+ - #endif /* __HYPERVISOR_H__ */ ---- head-2010-03-24.orig/drivers/staging/vt6655/ttype.h 2010-03-25 16:41:12.000000000 +0100 -+++ head-2010-03-24/drivers/staging/vt6655/ttype.h 2009-10-13 17:02:12.000000000 +0200 -@@ -30,6 +30,9 @@ - #ifndef __TTYPE_H__ - #define __TTYPE_H__ - -+#ifdef CONFIG_XEN -+#include -+#endif - - /******* Common definitions and typedefs ***********************************/ - ---- head-2010-03-24.orig/drivers/staging/vt6656/ttype.h 2010-03-25 16:41:12.000000000 +0100 -+++ head-2010-03-24/drivers/staging/vt6656/ttype.h 2009-10-13 17:02:12.000000000 +0200 -@@ -30,6 +30,9 @@ - #ifndef __TTYPE_H__ - #define __TTYPE_H__ - -+#ifdef CONFIG_XEN -+#include -+#endif - - /******* Common definitions and typedefs ***********************************/ - diff --git a/patches.xen/xen-swiotlb-heuristics b/patches.xen/xen-swiotlb-heuristics index 3fe022e..bd087af 100644 --- a/patches.xen/xen-swiotlb-heuristics +++ b/patches.xen/xen-swiotlb-heuristics @@ -2,9 +2,9 @@ From: jbeulich@novell.com Subject: adjust Xen's swiotlb default size setting Patch-mainline: obsolete ---- head-2010-04-15.orig/lib/swiotlb-xen.c 2010-04-15 10:54:48.000000000 +0200 -+++ head-2010-04-15/lib/swiotlb-xen.c 2010-04-15 11:42:17.000000000 +0200 -@@ -216,8 +216,8 @@ swiotlb_init_with_default_size(size_t de +--- head-2010-08-24.orig/lib/swiotlb-xen.c 2010-08-24 16:49:11.000000000 +0200 ++++ head-2010-08-24/lib/swiotlb-xen.c 2010-08-25 14:41:33.000000000 +0200 +@@ -228,8 +228,8 @@ swiotlb_init_with_default_size(size_t de void __init swiotlb_init(int verbose) { @@ -15,7 +15,7 @@ Patch-mainline: obsolete if (swiotlb_force == 1) { swiotlb = 1; -@@ -226,8 +226,12 @@ swiotlb_init(int verbose) +@@ -238,8 +238,12 @@ swiotlb_init(int verbose) is_initial_xendomain()) { /* Domain 0 always has a swiotlb. */ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL); diff --git a/patches.xen/xen-sysdev-suspend b/patches.xen/xen-sysdev-suspend index 0b298a8..1b81c68 100644 --- a/patches.xen/xen-sysdev-suspend +++ b/patches.xen/xen-sysdev-suspend @@ -1,12 +1,12 @@ From: jbeulich@novell.com Subject: use base kernel suspend/resume infrastructure -Patch-mainline: obsolete +Patch-mainline: n/a ... rather than calling just a few functions explicitly. ---- head-2010-05-12.orig/arch/x86/kernel/time-xen.c 2010-05-12 09:13:55.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/time-xen.c 2010-05-12 09:14:03.000000000 +0200 -@@ -69,6 +69,10 @@ static DEFINE_PER_CPU(struct vcpu_runsta +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2010-09-16 16:49:59.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2010-11-23 15:07:01.000000000 +0100 +@@ -69,6 +69,10 @@ DEFINE_PER_CPU(struct vcpu_runstate_info /* Must be signed, as it's compared with s64 quantities which can be -ve. */ #define NS_PER_TICK (1000000000LL/HZ) @@ -14,10 +14,10 @@ Patch-mainline: obsolete + .period_ns = NS_PER_TICK +}; + - static void __clock_was_set(struct work_struct *unused) - { - clock_was_set(); -@@ -561,6 +565,17 @@ void mark_tsc_unstable(char *reason) + /* + * GCC 4.3 can turn loops over an induction variable into division. We do + * not support arbitrary 64-bit division, and so must break the induction. +@@ -533,6 +537,17 @@ void mark_tsc_unstable(char *reason) } EXPORT_SYMBOL_GPL(mark_tsc_unstable); @@ -35,7 +35,7 @@ Patch-mainline: obsolete static cycle_t cs_last; static cycle_t xen_clocksource_read(struct clocksource *cs) -@@ -597,11 +612,32 @@ static cycle_t xen_clocksource_read(stru +@@ -569,11 +584,32 @@ static cycle_t xen_clocksource_read(stru #endif } @@ -70,7 +70,7 @@ Patch-mainline: obsolete cs_last = local_clock(); } -@@ -633,17 +669,6 @@ struct vcpu_runstate_info *setup_runstat +@@ -605,17 +641,6 @@ struct vcpu_runstate_info *setup_runstat return rs; } @@ -88,7 +88,7 @@ Patch-mainline: obsolete void xen_read_persistent_clock(struct timespec *ts) { const shared_info_t *s = HYPERVISOR_shared_info; -@@ -689,10 +714,6 @@ static void __init setup_cpu0_timer_irq( +@@ -661,10 +686,6 @@ static void __init setup_cpu0_timer_irq( BUG_ON(per_cpu(timer_irq, 0) < 0); } @@ -96,12 +96,12 @@ Patch-mainline: obsolete - .period_ns = NS_PER_TICK -}; - - void __init time_init(void) + static void __init _late_time_init(void) { - init_cpu_khz(); -@@ -830,35 +851,6 @@ void xen_halt(void) + update_wallclock(); +@@ -807,35 +828,6 @@ void xen_halt(void) + VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL)); } - EXPORT_SYMBOL(xen_halt); -/* No locking required. Interrupts are disabled on all CPUs. */ -void time_resume(void) @@ -135,17 +135,18 @@ Patch-mainline: obsolete #ifdef CONFIG_SMP static char timer_name[NR_CPUS][15]; ---- head-2010-05-12.orig/drivers/xen/core/evtchn.c 2010-04-15 11:03:28.000000000 +0200 -+++ head-2010-05-12/drivers/xen/core/evtchn.c 2010-04-23 15:20:28.000000000 +0200 -@@ -36,6 +36,7 @@ - #include - #include - #include +--- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-02-10 16:24:57.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-02-10 16:18:00.000000000 +0100 +@@ -1097,6 +1097,8 @@ int xen_test_irq_pending(int irq) + } + + #ifdef CONFIG_PM_SLEEP +#include - #include - #include - #include -@@ -1117,10 +1118,21 @@ static void restore_cpu_ipis(unsigned in ++ + static void restore_cpu_virqs(unsigned int cpu) + { + struct evtchn_bind_virq bind_virq; +@@ -1155,9 +1157,20 @@ static void restore_cpu_ipis(unsigned in } } @@ -153,7 +154,6 @@ Patch-mainline: obsolete +static int evtchn_resume(struct sys_device *dev) { unsigned int cpu, irq, evtchn; - struct irq_cfg *cfg; + struct evtchn_status status; + + /* Avoid doing anything in the 'suspend cancelled' case. */ @@ -168,7 +168,7 @@ Patch-mainline: obsolete init_evtchn_cpu_bindings(); -@@ -1156,7 +1168,32 @@ void irq_resume(void) +@@ -1198,7 +1211,32 @@ void irq_resume(void) restore_cpu_ipis(cpu); } @@ -176,13 +176,13 @@ Patch-mainline: obsolete +} + +static struct sysdev_class evtchn_sysclass = { -+ .name = "evtchn", -+ .resume = evtchn_resume, ++ .name = "evtchn", ++ .resume = evtchn_resume, +}; + +static struct sys_device device_evtchn = { -+ .id = 0, -+ .cls = &evtchn_sysclass, ++ .id = 0, ++ .cls = &evtchn_sysclass, +}; + +static int __init evtchn_register(void) @@ -201,21 +201,14 @@ Patch-mainline: obsolete #endif int __init arch_early_irq_init(void) ---- head-2010-05-12.orig/drivers/xen/core/gnttab.c 2010-04-15 11:04:07.000000000 +0200 -+++ head-2010-05-12/drivers/xen/core/gnttab.c 2010-04-15 11:42:34.000000000 +0200 -@@ -36,6 +36,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -708,23 +709,37 @@ EXPORT_SYMBOL(gnttab_post_map_adjust); +--- head-2011-03-11.orig/drivers/xen/core/gnttab.c 2011-01-14 15:00:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/gnttab.c 2011-01-14 15:13:58.000000000 +0100 +@@ -707,23 +707,40 @@ EXPORT_SYMBOL(gnttab_post_map_adjust); #endif /* __HAVE_ARCH_PTE_SPECIAL */ -int gnttab_resume(void) ++struct sys_device; +static int gnttab_resume(struct sys_device *dev) { if (max_nr_grant_frames() < nr_grant_frames) @@ -227,6 +220,8 @@ Patch-mainline: obsolete #ifdef CONFIG_PM_SLEEP -int gnttab_suspend(void) -{ ++#include ++ #ifdef CONFIG_X86 +static int gnttab_suspend(struct sys_device *dev, pm_message_t state) +{ @@ -253,7 +248,7 @@ Patch-mainline: obsolete #endif #else /* !CONFIG_XEN */ -@@ -804,6 +819,17 @@ int __devinit gnttab_init(void) +@@ -808,6 +825,17 @@ gnttab_init(void) if (!is_running_on_xen()) return -ENODEV; @@ -271,8 +266,8 @@ Patch-mainline: obsolete nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); ---- head-2010-05-12.orig/drivers/xen/core/machine_reboot.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/machine_reboot.c 2009-12-18 14:19:13.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-01-13 16:21:42.000000000 +0100 @@ -17,6 +17,7 @@ #include #include @@ -281,7 +276,7 @@ Patch-mainline: obsolete #if defined(__i386__) || defined(__x86_64__) #include -@@ -145,47 +146,28 @@ struct suspend { +@@ -140,50 +141,28 @@ struct suspend { static int take_machine_down(void *_suspend) { struct suspend *suspend = _suspend; @@ -341,11 +336,14 @@ Patch-mainline: obsolete + sysdev_resume(); + } if (!suspend_cancelled) { +- extern void spinlock_resume(void); +- +- spinlock_resume(); - irq_resume(); #ifdef __x86_64__ /* * Older versions of Xen do not save/restore the user %cr3. -@@ -197,10 +179,6 @@ static int take_machine_down(void *_susp +@@ -195,10 +174,6 @@ static int take_machine_down(void *_susp current->active_mm->pgd))); #endif } @@ -356,7 +354,7 @@ Patch-mainline: obsolete return suspend_cancelled; } -@@ -208,8 +186,14 @@ static int take_machine_down(void *_susp +@@ -206,8 +181,14 @@ static int take_machine_down(void *_susp int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled; @@ -371,26 +369,13 @@ Patch-mainline: obsolete BUG_ON(smp_processor_id() != 0); BUG_ON(in_interrupt()); -@@ -225,41 +209,91 @@ int __xen_suspend(int fast_suspend, void - if (num_possible_cpus() == 1) - fast_suspend = 0; - -- if (fast_suspend) { -- err = stop_machine_create(); -- if (err) -- return err; -+ if (fast_suspend && _check(stop_machine_create)) { -+ printk(KERN_ERR "%s() failed: %d\n", what, err); -+ return err; - } - +@@ -225,30 +206,77 @@ int __xen_suspend(int fast_suspend, void suspend.fast_suspend = fast_suspend; suspend.resume_notifier = resume_notifier; + if (_check(dpm_suspend_start, PMSG_SUSPEND)) { -+ if (fast_suspend) -+ stop_machine_destroy(); -+ printk(KERN_ERR "%s() failed: %d\n", what, err); ++ dpm_resume_end(PMSG_RESUME); ++ pr_err("%s() failed: %d\n", what, err); + return err; + } + @@ -400,8 +385,7 @@ Patch-mainline: obsolete + if (_check(dpm_suspend_noirq, PMSG_SUSPEND)) { + xenbus_suspend_cancel(); + dpm_resume_end(PMSG_RESUME); -+ stop_machine_destroy(); -+ printk(KERN_ERR "%s() failed: %d\n", what, err); ++ pr_err("%s() failed: %d\n", what, err); + return err; + } + @@ -421,8 +405,7 @@ Patch-mainline: obsolete + if (err) { + xenbus_suspend_cancel(); + dpm_resume_end(PMSG_RESUME); -+ printk(KERN_ERR "%s() failed: %d\n", -+ what, err); ++ pr_err("%s() failed: %d\n", what, err); + return err; + } + @@ -469,19 +452,62 @@ Patch-mainline: obsolete - if (!fast_suspend) - smp_resume(); -- else + dpm_resume_end(PMSG_RESUME); -+ -+ if (fast_suspend) - stop_machine_destroy(); - return 0; + return err; } #endif ---- head-2010-05-12.orig/include/xen/evtchn.h 2010-03-31 14:02:34.000000000 +0200 -+++ head-2010-05-12/include/xen/evtchn.h 2010-03-31 14:10:36.000000000 +0200 -@@ -108,7 +108,9 @@ int bind_ipi_to_irqhandler( +--- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:17:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:18:17.000000000 +0100 +@@ -60,7 +60,9 @@ void __cpuinit xen_spinlock_cleanup(unsi + } + + #ifdef CONFIG_PM_SLEEP +-void __cpuinit spinlock_resume(void) ++#include ++ ++static int __cpuinit spinlock_resume(struct sys_device *dev) + { + unsigned int cpu; + +@@ -68,7 +70,33 @@ void __cpuinit spinlock_resume(void) + per_cpu(poll_evtchn, cpu) = 0; + xen_spinlock_init(cpu); + } ++ ++ return 0; ++} ++ ++static struct sysdev_class __cpuinitdata spinlock_sysclass = { ++ .name = "spinlock", ++ .resume = spinlock_resume ++}; ++ ++static struct sys_device __cpuinitdata device_spinlock = { ++ .id = 0, ++ .cls = &spinlock_sysclass ++}; ++ ++static int __init spinlock_register(void) ++{ ++ int rc; ++ ++ if (is_initial_xendomain()) ++ return 0; ++ ++ rc = sysdev_class_register(&spinlock_sysclass); ++ if (!rc) ++ rc = sysdev_register(&device_spinlock); ++ return rc; + } ++core_initcall(spinlock_register); + #endif + + static unsigned int spin_adjust(struct spinning *spinning, +--- head-2011-03-11.orig/include/xen/evtchn.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-11/include/xen/evtchn.h 2010-11-23 15:07:01.000000000 +0100 +@@ -109,7 +109,9 @@ int bind_ipi_to_irqhandler( */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); @@ -491,9 +517,9 @@ Patch-mainline: obsolete /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); ---- head-2010-05-12.orig/include/xen/gnttab.h 2008-11-04 11:13:10.000000000 +0100 -+++ head-2010-05-12/include/xen/gnttab.h 2009-11-06 11:10:15.000000000 +0100 -@@ -110,8 +110,9 @@ static inline void __gnttab_dma_unmap_pa +--- head-2011-03-11.orig/include/xen/gnttab.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/include/xen/gnttab.h 2010-11-23 15:07:01.000000000 +0100 +@@ -111,8 +111,9 @@ static inline void __gnttab_dma_unmap_pa void gnttab_reset_grant_page(struct page *page); diff --git a/patches.xen/xen-tmem-v1 b/patches.xen/xen-tmem-v1 new file mode 100644 index 0000000..48fad80 --- /dev/null +++ b/patches.xen/xen-tmem-v1 @@ -0,0 +1,348 @@ +From: jbeulich@novell.com +Subject: update tmem interface to v1 +Patch-mainline: n/a + +--- head-2010-10-05.orig/include/xen/interface/tmem.h 2010-01-04 11:56:34.000000000 +0100 ++++ head-2010-10-05/include/xen/interface/tmem.h 2010-10-06 12:12:59.000000000 +0200 +@@ -29,6 +29,9 @@ + + #include "xen.h" + ++/* version of ABI */ ++#define TMEM_SPEC_VERSION 1 ++ + /* Commands to HYPERVISOR_tmem_op() */ + #define TMEM_CONTROL 0 + #define TMEM_NEW_POOL 1 +@@ -75,10 +78,12 @@ + /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ + #define TMEM_POOL_PERSIST 1 + #define TMEM_POOL_SHARED 2 ++#define TMEM_POOL_PRECOMPRESSED 4 + #define TMEM_POOL_PAGESIZE_SHIFT 4 + #define TMEM_POOL_PAGESIZE_MASK 0xf + #define TMEM_POOL_VERSION_SHIFT 24 + #define TMEM_POOL_VERSION_MASK 0xff ++#define TMEM_POOL_RESERVED_BITS 0x00ffff00 + + /* Bits for client flags (save/restore) */ + #define TMEM_CLIENT_COMPRESS 1 +@@ -106,12 +111,12 @@ struct tmem_op { + uint32_t cli_id; + uint32_t arg1; + uint32_t arg2; +- uint64_t arg3; ++ uint64_t oid[3]; + tmem_cli_va_t buf; + } ctrl; /* for cmd == TMEM_CONTROL */ + struct { + +- uint64_t object; ++ uint64_t oid[3]; + uint32_t index; + uint32_t tmem_offset; + uint32_t pfn_offset; +@@ -126,9 +131,8 @@ DEFINE_XEN_GUEST_HANDLE(tmem_op_t); + struct tmem_handle { + uint32_t pool_id; + uint32_t index; +- uint64_t oid; ++ uint64_t oid[3]; + }; +- + #endif + + #endif /* __XEN_PUBLIC_TMEM_H__ */ +--- head-2010-10-05.orig/mm/precache.c 2010-10-08 16:25:45.000000000 +0200 ++++ head-2010-10-05/mm/precache.c 2010-10-11 10:58:42.000000000 +0200 +@@ -31,16 +31,57 @@ + */ + + #include ++#include + #include + #include "tmem.h" + + static int precache_auto_allocate; /* set to 1 to auto_allocate */ + ++union precache_filekey { ++ struct tmem_oid oid; ++ u32 fh[0]; ++}; ++ ++/* ++ * If the filesystem uses exportable filehandles, use the filehandle as ++ * the key, else use the inode number. ++ */ ++static int precache_get_key(struct inode *inode, union precache_filekey *key) ++{ ++#define PRECACHE_KEY_MAX (sizeof(key->oid) / sizeof(*key->fh)) ++ struct super_block *sb = inode->i_sb; ++ ++ memset(key, 0, sizeof(key)); ++ if (sb->s_export_op) { ++ int (*fhfn)(struct dentry *, __u32 *fh, int *, int); ++ ++ fhfn = sb->s_export_op->encode_fh; ++ if (fhfn) { ++ struct dentry *d; ++ int ret, maxlen = PRECACHE_KEY_MAX; ++ ++ d = list_first_entry(&inode->i_dentry, ++ struct dentry, d_alias); ++ ret = fhfn(d, key->fh, &maxlen, 0); ++ if (ret < 0) ++ return ret; ++ if (ret >= 255 || maxlen > PRECACHE_KEY_MAX) ++ return -EPERM; ++ if (maxlen > 0) ++ return 0; ++ } ++ } ++ key->oid.oid[0] = inode->i_ino; ++ key->oid.oid[1] = inode->i_generation; ++ return 0; ++#undef PRECACHE_KEY_MAX ++} ++ + int precache_put(struct address_space *mapping, unsigned long index, + struct page *page) + { + u32 tmem_pool = mapping->host->i_sb->precache_poolid; +- u64 obj = (unsigned long) mapping->host->i_ino; ++ union precache_filekey key; + u32 ind = (u32) index; + unsigned long mfn = pfn_to_mfn(page_to_pfn(page)); + int ret; +@@ -56,53 +97,53 @@ int precache_put(struct address_space *m + mapping->host->i_sb->s_id, tmem_pool); + mapping->host->i_sb->precache_poolid = tmem_pool; + } +- if (ind != index) ++ if (ind != index || precache_get_key(mapping->host, &key)) + return 0; + mb(); /* ensure page is quiescent; tmem may address it with an alias */ +- return tmem_put_page(tmem_pool, obj, ind, mfn); ++ return tmem_put_page(tmem_pool, key.oid, ind, mfn); + } + + int precache_get(struct address_space *mapping, unsigned long index, + struct page *empty_page) + { + u32 tmem_pool = mapping->host->i_sb->precache_poolid; +- u64 obj = (unsigned long) mapping->host->i_ino; ++ union precache_filekey key; + u32 ind = (u32) index; + unsigned long mfn = pfn_to_mfn(page_to_pfn(empty_page)); + + if ((s32)tmem_pool < 0) + return 0; +- if (ind != index) ++ if (ind != index || precache_get_key(mapping->host, &key)) + return 0; + +- return tmem_get_page(tmem_pool, obj, ind, mfn); ++ return tmem_get_page(tmem_pool, key.oid, ind, mfn); + } + EXPORT_SYMBOL(precache_get); + + int precache_flush(struct address_space *mapping, unsigned long index) + { + u32 tmem_pool = mapping->host->i_sb->precache_poolid; +- u64 obj = (unsigned long) mapping->host->i_ino; ++ union precache_filekey key; + u32 ind = (u32) index; + + if ((s32)tmem_pool < 0) + return 0; +- if (ind != index) ++ if (ind != index || precache_get_key(mapping->host, &key)) + return 0; + +- return tmem_flush_page(tmem_pool, obj, ind); ++ return tmem_flush_page(tmem_pool, key.oid, ind); + } + EXPORT_SYMBOL(precache_flush); + + int precache_flush_inode(struct address_space *mapping) + { + u32 tmem_pool = mapping->host->i_sb->precache_poolid; +- u64 obj = (unsigned long) mapping->host->i_ino; ++ union precache_filekey key; + +- if ((s32)tmem_pool < 0) ++ if ((s32)tmem_pool < 0 || precache_get_key(mapping->host, &key)) + return 0; + +- return tmem_flush_object(tmem_pool, obj); ++ return tmem_flush_object(tmem_pool, key.oid); + } + EXPORT_SYMBOL(precache_flush_inode); + +--- head-2010-10-05.orig/mm/preswap.c 2010-08-24 11:19:44.000000000 +0200 ++++ head-2010-10-05/mm/preswap.c 2010-10-06 13:08:11.000000000 +0200 +@@ -46,7 +46,8 @@ const unsigned long preswap_zero = 0, pr + */ + #define SWIZ_BITS 4 + #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) +-#define oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) ++#define oswiz(_type, _ind) ((struct tmem_oid){ \ ++ .oid[0] = (_type << SWIZ_BITS) | (_ind & SWIZ_MASK) }) + #define iswiz(_ind) (_ind >> SWIZ_BITS) + + /* +--- head-2010-10-05.orig/mm/tmem.h 2010-08-24 11:19:44.000000000 +0200 ++++ head-2010-10-05/mm/tmem.h 2010-10-06 14:27:10.000000000 +0200 +@@ -14,71 +14,58 @@ + #define TMEM_POOL_MIN_PAGESHIFT 12 + #define TMEM_POOL_PAGEORDER (PAGE_SHIFT - TMEM_POOL_MIN_PAGESHIFT) + +-extern int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, u64 object, u32 index, ++struct tmem_pool_uuid { ++ u64 lo; ++ u64 hi; ++}; ++ ++struct tmem_oid { ++ u64 oid[3]; ++}; ++ ++extern int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid, u32 index, + unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len); +-extern int xen_tmem_new_pool(u32 tmem_cmd, u64 uuid_lo, u64 uuid_hi, u32 flags); ++extern int xen_tmem_new_pool(struct tmem_pool_uuid, u32 flags); + +-static inline int tmem_put_page(u32 pool_id, u64 object, u32 index, ++static inline int tmem_put_page(u32 pool_id, struct tmem_oid oid, u32 index, + unsigned long gmfn) + { +- return xen_tmem_op(TMEM_PUT_PAGE, pool_id, object, index, ++ return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, + gmfn, 0, 0, 0); + } + +-static inline int tmem_get_page(u32 pool_id, u64 object, u32 index, ++static inline int tmem_get_page(u32 pool_id, struct tmem_oid oid, u32 index, + unsigned long gmfn) + { +- return xen_tmem_op(TMEM_GET_PAGE, pool_id, object, index, ++ return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, + gmfn, 0, 0, 0); + } + +-static inline int tmem_flush_page(u32 pool_id, u64 object, u32 index) ++static inline int tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) + { +- return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, object, index, ++ return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index, + 0, 0, 0, 0); + } + +-static inline int tmem_flush_object(u32 pool_id, u64 object) ++static inline int tmem_flush_object(u32 pool_id, struct tmem_oid oid) + { +- return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, object, 0, 0, 0, 0, 0); ++ return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); + } + + static inline int tmem_new_pool(u64 uuid_lo, u64 uuid_hi, u32 flags) + { ++ struct tmem_pool_uuid uuid = { .lo = uuid_lo, .hi = uuid_hi }; ++ + BUILD_BUG_ON((TMEM_POOL_PAGEORDER < 0) || + (TMEM_POOL_PAGEORDER >= TMEM_POOL_PAGESIZE_MASK)); + flags |= TMEM_POOL_PAGEORDER << TMEM_POOL_PAGESIZE_SHIFT; +- return xen_tmem_new_pool(TMEM_NEW_POOL, uuid_lo, uuid_hi, flags); ++ return xen_tmem_new_pool(uuid, flags); + } + + static inline int tmem_destroy_pool(u32 pool_id) + { +- return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, 0, 0, 0, 0, 0, 0); ++ static const struct tmem_oid oid = {}; ++ ++ return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0); + } +-#else +-struct tmem_op { +- u32 cmd; +- s32 pool_id; /* private > 0; shared < 0; 0 is invalid */ +- union { +- struct { /* for cmd == TMEM_NEW_POOL */ +- u64 uuid[2]; +- u32 flags; +- } new; +- struct { /* for cmd == TMEM_CONTROL */ +- u32 subop; +- u32 cli_id; +- u32 arg1; +- u32 arg2; +- void *buf; +- } ctrl; +- struct { +- u64 object; +- u32 index; +- u32 tmem_offset; +- u32 pfn_offset; +- u32 len; +- unsigned long pfn; /* page frame */ +- } gen; +- } u; +-}; + #endif +--- head-2010-10-05.orig/mm/tmem-xen.c 2009-06-23 09:28:21.000000000 +0200 ++++ head-2010-10-05/mm/tmem-xen.c 2010-10-06 14:27:25.000000000 +0200 +@@ -7,8 +7,9 @@ + #include + #include + #include ++#include "tmem.h" + +-int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, u64 object, u32 index, ++int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid, u32 index, + unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len) + { + struct tmem_op op; +@@ -16,7 +17,8 @@ int xen_tmem_op(u32 tmem_cmd, u32 tmem_p + + op.cmd = tmem_cmd; + op.pool_id = tmem_pool; +- op.u.gen.object = object; ++ BUILD_BUG_ON(sizeof(op.u.gen.oid) != sizeof(oid.oid)); ++ memcpy(op.u.gen.oid, oid.oid, sizeof(op.u.gen.oid)); + op.u.gen.index = index; + op.u.gen.tmem_offset = tmem_offset; + op.u.gen.pfn_offset = pfn_offset; +@@ -26,15 +28,27 @@ int xen_tmem_op(u32 tmem_cmd, u32 tmem_p + return rc; + } + +-int xen_tmem_new_pool(uint32_t tmem_cmd, uint64_t uuid_lo, +- uint64_t uuid_hi, uint32_t flags) ++int xen_tmem_new_pool(struct tmem_pool_uuid uuid, uint32_t flags) + { + struct tmem_op op; + int rc = 0; + +- op.cmd = tmem_cmd; +- op.u.new.uuid[0] = uuid_lo; +- op.u.new.uuid[1] = uuid_hi; ++ op.cmd = TMEM_NEW_POOL; ++ op.u.new.uuid[0] = uuid.lo; ++ op.u.new.uuid[1] = uuid.hi; ++#ifdef TMEM_SPEC_VERSION ++ switch (flags >> TMEM_POOL_VERSION_SHIFT) { ++ case 0: ++ flags |= TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT; ++ break; ++ case TMEM_SPEC_VERSION: ++ break; ++ default: ++ WARN(1, "TMEM: Bogus version %u, expecting %u\n", ++ flags >> TMEM_POOL_VERSION_SHIFT, TMEM_SPEC_VERSION); ++ return -ENOSYS; ++ } ++#endif + op.u.new.flags = flags; + rc = HYPERVISOR_tmem_op(&op); + return rc; diff --git a/patches.xen/xen-unpriv-build b/patches.xen/xen-unpriv-build index eb729e3..72e133e 100644 --- a/patches.xen/xen-unpriv-build +++ b/patches.xen/xen-unpriv-build @@ -2,17 +2,17 @@ From: jbeulich@novell.com Subject: no need to build certain bits when building non-privileged kernel Patch-mainline: n/a ---- head-2010-05-12.orig/arch/x86/Kconfig 2010-03-25 14:39:15.000000000 +0100 -+++ head-2010-05-12/arch/x86/Kconfig 2010-05-06 16:05:14.000000000 +0200 -@@ -737,6 +737,7 @@ config APB_TIMER +--- head-2011-02-08.orig/arch/x86/Kconfig 2011-02-02 15:09:52.000000000 +0100 ++++ head-2011-02-08/arch/x86/Kconfig 2011-02-02 15:10:34.000000000 +0100 +@@ -665,6 +665,7 @@ config APB_TIMER config DMI default y - bool "Enable DMI scanning" if EMBEDDED + bool "Enable DMI scanning" if EXPERT + depends on !XEN_UNPRIVILEGED_GUEST ---help--- Enabled scanning of DMI to identify machine quirks. Say Y here unless you have verified that your setup is not -@@ -817,6 +818,7 @@ config AMD_IOMMU_STATS +@@ -745,6 +746,7 @@ config AMD_IOMMU_STATS # need this always selected by IOMMU for the VIA workaround config SWIOTLB def_bool y if X86_64 || XEN @@ -20,7 +20,7 @@ Patch-mainline: n/a ---help--- Support for software bounce buffers used on x86-64 systems which don't have a hardware IOMMU (e.g. the current generation -@@ -2021,13 +2023,15 @@ config PCI_GOBIOS +@@ -1968,13 +1970,15 @@ config PCI_GOBIOS config PCI_GOMMCONFIG bool "MMConfig" @@ -31,13 +31,13 @@ Patch-mainline: n/a + depends on !XEN_UNPRIVILEGED_GUEST config PCI_GOOLPC - bool "OLPC" + bool "OLPC XO-1" - depends on OLPC + depends on OLPC && !XEN_UNPRIVILEGED_GUEST config PCI_GOXEN_FE bool "Xen PCI Frontend" -@@ -2038,6 +2042,7 @@ config PCI_GOXEN_FE +@@ -1985,6 +1989,7 @@ config PCI_GOXEN_FE config PCI_GOANY bool "Any" @@ -45,25 +45,16 @@ Patch-mainline: n/a endchoice -@@ -2068,7 +2073,7 @@ config PCI_MMCONFIG +@@ -2185,7 +2190,7 @@ endif # X86_32 - config XEN_PCIDEV_FRONTEND - def_bool y -- prompt "Xen PCI Frontend" if X86_64 -+ prompt "Xen PCI Frontend" if X86_64 && !XEN_UNPRIVILEGED_GUEST - depends on PCI && XEN && (PCI_GOXEN_FE || PCI_GOANY || X86_64) - select HOTPLUG - help -@@ -2213,7 +2218,7 @@ endif # X86_32 - - config K8_NB + config AMD_NB def_bool y - depends on CPU_SUP_AMD && PCI + depends on CPU_SUP_AMD && PCI && !XEN_UNPRIVILEGED_GUEST source "drivers/pcmcia/Kconfig" -@@ -2264,7 +2269,9 @@ source "net/Kconfig" +@@ -2240,7 +2245,9 @@ source "net/Kconfig" source "drivers/Kconfig" @@ -73,22 +64,20 @@ Patch-mainline: n/a source "fs/Kconfig" ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/swiotlb.h 2010-03-25 14:39:33.000000000 +0100 -@@ -1,6 +1,10 @@ +--- head-2011-02-08.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-02-08/arch/x86/include/mach-xen/asm/swiotlb.h 2011-02-02 15:10:34.000000000 +0100 +@@ -1,4 +1,8 @@ #include_next -+#ifdef CONFIG_SWIOTLB - #define pci_swiotlb_detect() 1 -+#else ++#ifndef CONFIG_SWIOTLB +#define swiotlb_init(verbose) ((void)(verbose)) +#endif - ++ dma_addr_t swiotlb_map_single_phys(struct device *, phys_addr_t, size_t size, int dir); ---- head-2010-05-12.orig/drivers/firmware/Kconfig 2009-10-21 12:05:13.000000000 +0200 -+++ head-2010-05-12/drivers/firmware/Kconfig 2010-04-28 17:21:34.000000000 +0200 -@@ -115,7 +115,7 @@ config DMIID +--- head-2011-02-08.orig/drivers/firmware/Kconfig 2010-11-23 16:20:20.000000000 +0100 ++++ head-2011-02-08/drivers/firmware/Kconfig 2011-02-02 15:10:34.000000000 +0100 +@@ -116,7 +116,7 @@ config DMIID config ISCSI_IBFT_FIND bool "iSCSI Boot Firmware Table Attributes" @@ -97,9 +86,30 @@ Patch-mainline: n/a default n help This option enables the kernel to find the region of memory ---- head-2010-05-12.orig/drivers/xen/Kconfig 2010-03-31 14:11:36.000000000 +0200 -+++ head-2010-05-12/drivers/xen/Kconfig 2010-03-31 14:12:07.000000000 +0200 -@@ -275,6 +275,7 @@ config XEN_USB_FRONTEND_HCD_PM +--- head-2011-02-08.orig/drivers/pci/Kconfig 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-02-08/drivers/pci/Kconfig 2011-02-02 15:10:34.000000000 +0100 +@@ -74,7 +74,7 @@ config PARAVIRT_XEN_PCIDEV_FRONTEND + + config XEN_PCIDEV_FRONTEND + def_bool y +- prompt "Xen PCI Frontend" if X86_64 ++ prompt "Xen PCI Frontend" if X86_64 && !XEN_UNPRIVILEGED_GUEST + depends on PCI && XEN && (PCI_GOXEN_FE || PCI_GOANY || X86_64) + select HOTPLUG + help +--- head-2011-02-08.orig/drivers/xen/Kconfig 2010-11-26 13:38:08.000000000 +0100 ++++ head-2011-02-08/drivers/xen/Kconfig 2011-02-09 16:23:14.000000000 +0100 +@@ -19,7 +19,8 @@ config XEN_PRIVILEGED_GUEST + Support for privileged operation (domain 0) + + config XEN_UNPRIVILEGED_GUEST +- def_bool !XEN_PRIVILEGED_GUEST ++ def_bool y ++ depends on !XEN_PRIVILEGED_GUEST + select PM + select SUSPEND + +@@ -271,6 +272,7 @@ config XEN_USB_FRONTEND_HCD_PM config XEN_GRANT_DEV tristate "User-space granted page access driver" @@ -107,9 +117,9 @@ Patch-mainline: n/a default XEN_PRIVILEGED_GUEST help Device for accessing (in user-space) pages that have been granted ---- head-2010-05-12.orig/drivers/xen/balloon/balloon.c 2010-02-03 11:56:18.000000000 +0100 -+++ head-2010-05-12/drivers/xen/balloon/balloon.c 2010-04-15 11:44:37.000000000 +0200 -@@ -663,6 +663,9 @@ void balloon_update_driver_allowance(lon +--- head-2011-02-08.orig/drivers/xen/balloon/balloon.c 2010-11-25 13:47:01.000000000 +0100 ++++ head-2011-02-08/drivers/xen/balloon/balloon.c 2011-02-02 15:10:34.000000000 +0100 +@@ -660,6 +660,9 @@ void balloon_update_driver_allowance(lon bs.driver_pages += delta; balloon_unlock(flags); } @@ -119,7 +129,7 @@ Patch-mainline: n/a #ifdef CONFIG_XEN static int dealloc_pte_fn( -@@ -771,6 +774,7 @@ struct page **alloc_empty_pages_and_page +@@ -768,6 +771,7 @@ struct page **alloc_empty_pages_and_page pagevec = NULL; goto out; } @@ -127,7 +137,7 @@ Patch-mainline: n/a void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { -@@ -791,6 +795,9 @@ void free_empty_pages_and_pagevec(struct +@@ -788,6 +792,9 @@ void free_empty_pages_and_pagevec(struct schedule_work(&balloon_worker); } @@ -137,7 +147,7 @@ Patch-mainline: n/a void balloon_release_driver_page(struct page *page) { -@@ -804,10 +811,6 @@ void balloon_release_driver_page(struct +@@ -801,10 +808,6 @@ void balloon_release_driver_page(struct schedule_work(&balloon_worker); } @@ -148,9 +158,53 @@ Patch-mainline: n/a EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); ---- head-2010-05-12.orig/drivers/xen/core/Makefile 2010-03-25 14:39:15.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/Makefile 2010-03-25 14:39:33.000000000 +0100 -@@ -2,9 +2,10 @@ +--- head-2011-02-08.orig/drivers/xen/console/console.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-02-08/drivers/xen/console/console.c 2011-02-02 15:10:34.000000000 +0100 +@@ -47,7 +47,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -244,6 +243,7 @@ static int __init xen_console_init(void) + } + console_initcall(xen_console_init); + ++#ifdef CONFIG_XEN_PRIVILEGED_GUEST + /*** Useful function for console debugging -- goes straight to Xen. ***/ + asmlinkage int xprintk(const char *fmt, ...) + { +@@ -261,6 +261,7 @@ asmlinkage int xprintk(const char *fmt, + + return 0; + } ++#endif + + /*** Forcibly flush console data before dying. ***/ + void xencons_force_flush(void) +@@ -285,6 +286,9 @@ void xencons_force_flush(void) + } + + ++#ifdef CONFIG_XEN_PRIVILEGED_GUEST ++#include ++ + void __init dom0_init_screen_info(const struct dom0_vga_console_info *info, size_t size) + { + /* This is drawn from a dump from vgacon:startup in +@@ -340,6 +344,7 @@ void __init dom0_init_screen_info(const + break; + } + } ++#endif + + + /******************** User-space console driver (/dev/console) ************/ +--- head-2011-02-08.orig/drivers/xen/core/Makefile 2011-02-02 15:09:57.000000000 +0100 ++++ head-2011-02-08/drivers/xen/core/Makefile 2011-02-02 15:10:34.000000000 +0100 +@@ -2,10 +2,11 @@ # Makefile for the linux kernel. # @@ -158,14 +212,34 @@ Patch-mainline: n/a +obj-y := evtchn.o gnttab.o reboot.o machine_reboot.o -obj-$(CONFIG_PCI) += pci.o +-obj-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o +priv-$(CONFIG_PCI) += pci.o ++priv-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o +obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += firmware.o $(priv-y) obj-$(CONFIG_PROC_FS) += xen_proc.o obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o ---- head-2010-05-12.orig/drivers/xen/core/gnttab.c 2010-04-15 11:44:26.000000000 +0200 -+++ head-2010-05-12/drivers/xen/core/gnttab.c 2010-04-15 11:44:35.000000000 +0200 -@@ -438,8 +438,6 @@ static inline unsigned int max_nr_grant_ +--- head-2011-02-08.orig/drivers/xen/core/evtchn.c 2011-02-16 08:29:50.000000000 +0100 ++++ head-2011-02-08/drivers/xen/core/evtchn.c 2011-02-16 08:30:09.000000000 +0100 +@@ -1854,6 +1854,7 @@ void evtchn_register_pirq(int irq) + "fasteoi"); + } + ++#ifdef CONFIG_PCI_MSI + int evtchn_map_pirq(int irq, int xen_pirq) + { + if (irq < 0) { +@@ -1928,6 +1929,7 @@ int evtchn_map_pirq(int irq, int xen_pir + } + return index_from_irq(irq) ? irq : -EINVAL; + } ++#endif + + int evtchn_get_xen_pirq(int irq) + { +--- head-2011-02-08.orig/drivers/xen/core/gnttab.c 2011-02-02 15:10:16.000000000 +0100 ++++ head-2011-02-08/drivers/xen/core/gnttab.c 2011-02-02 15:10:34.000000000 +0100 +@@ -436,8 +436,6 @@ static inline unsigned int max_nr_grant_ #ifdef CONFIG_XEN @@ -174,7 +248,7 @@ Patch-mainline: n/a #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) -@@ -509,6 +507,10 @@ static int gnttab_map(unsigned int start +@@ -507,6 +505,10 @@ static int gnttab_map(unsigned int start return 0; } @@ -185,7 +259,7 @@ Patch-mainline: n/a static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); -@@ -640,6 +642,8 @@ void __gnttab_dma_map_page(struct page * +@@ -638,6 +640,8 @@ void __gnttab_dma_map_page(struct page * } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } @@ -194,8 +268,8 @@ Patch-mainline: n/a #ifdef __HAVE_ARCH_PTE_SPECIAL static unsigned int GNTMAP_pte_special; ---- head-2010-05-12.orig/drivers/xen/privcmd/Makefile 2007-07-10 09:42:30.000000000 +0200 -+++ head-2010-05-12/drivers/xen/privcmd/Makefile 2010-03-25 14:39:33.000000000 +0100 +--- head-2011-02-08.orig/drivers/xen/privcmd/Makefile 2007-07-10 09:42:30.000000000 +0200 ++++ head-2011-02-08/drivers/xen/privcmd/Makefile 2011-02-02 15:10:34.000000000 +0100 @@ -1,3 +1,3 @@ - -obj-y += privcmd.o @@ -203,9 +277,9 @@ Patch-mainline: n/a +priv-$(CONFIG_COMPAT) := compat_privcmd.o +obj-y := privcmd.o +obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += $(priv-y) ---- head-2010-05-12.orig/drivers/xen/privcmd/privcmd.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-12/drivers/xen/privcmd/privcmd.c 2010-03-25 14:39:33.000000000 +0100 -@@ -33,6 +33,9 @@ +--- head-2011-02-08.orig/drivers/xen/privcmd/privcmd.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-02-08/drivers/xen/privcmd/privcmd.c 2011-02-02 15:10:34.000000000 +0100 +@@ -32,6 +32,9 @@ static struct proc_dir_entry *privcmd_intf; static struct proc_dir_entry *capabilities_intf; @@ -215,7 +289,7 @@ Patch-mainline: n/a #ifndef HAVE_ARCH_PRIVCMD_MMAP static int enforce_singleshot_mapping_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) -@@ -57,12 +60,14 @@ static long privcmd_ioctl(struct file *f +@@ -56,12 +59,14 @@ static long privcmd_ioctl(struct file *f { long ret; void __user *udata = (void __user *) data; @@ -230,7 +304,7 @@ Patch-mainline: n/a switch (cmd) { case IOCTL_PRIVCMD_HYPERCALL: { -@@ -87,6 +92,8 @@ static long privcmd_ioctl(struct file *f +@@ -86,6 +91,8 @@ static long privcmd_ioctl(struct file *f } break; @@ -239,7 +313,7 @@ Patch-mainline: n/a case IOCTL_PRIVCMD_MMAP: { #define MMAP_NR_PER_PAGE \ (unsigned long)((PAGE_SIZE - sizeof(*l)) / sizeof(*msg)) -@@ -392,6 +399,8 @@ static long privcmd_ioctl(struct file *f +@@ -391,6 +398,8 @@ static long privcmd_ioctl(struct file *f } break; @@ -248,9 +322,9 @@ Patch-mainline: n/a default: ret = -EINVAL; break; -@@ -427,7 +436,9 @@ static int privcmd_mmap(struct file * fi - - static const struct file_operations privcmd_file_ops = { +@@ -429,7 +438,9 @@ static const struct file_operations priv + .open = nonseekable_open, + .llseek = no_llseek, .unlocked_ioctl = privcmd_ioctl, +#ifdef CONFIG_XEN_PRIVILEGED_GUEST .mmap = privcmd_mmap, @@ -258,9 +332,9 @@ Patch-mainline: n/a }; static int capabilities_read(char *page, char **start, off_t off, ---- head-2010-05-12.orig/fs/compat_ioctl.c 2010-05-12 09:02:56.000000000 +0200 -+++ head-2010-05-12/fs/compat_ioctl.c 2010-05-12 09:15:02.000000000 +0200 -@@ -1602,7 +1602,7 @@ static long do_ioctl_trans(int fd, unsig +--- head-2011-02-08.orig/fs/compat_ioctl.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-02-08/fs/compat_ioctl.c 2011-02-02 15:10:34.000000000 +0100 +@@ -1481,7 +1481,7 @@ static long do_ioctl_trans(int fd, unsig return do_video_stillpicture(fd, cmd, argp); case VIDEO_SET_SPU_PALETTE: return do_video_set_spu_palette(fd, cmd, argp); @@ -269,8 +343,8 @@ Patch-mainline: n/a case IOCTL_PRIVCMD_MMAP_32: case IOCTL_PRIVCMD_MMAPBATCH_32: case IOCTL_PRIVCMD_MMAPBATCH_V2_32: ---- head-2010-05-12.orig/include/xen/firmware.h 2007-07-02 08:16:19.000000000 +0200 -+++ head-2010-05-12/include/xen/firmware.h 2010-03-25 14:39:33.000000000 +0100 +--- head-2011-02-08.orig/include/xen/firmware.h 2007-07-02 08:16:19.000000000 +0200 ++++ head-2011-02-08/include/xen/firmware.h 2011-02-02 15:10:34.000000000 +0100 @@ -5,6 +5,10 @@ void copy_edd(void); #endif @@ -282,9 +356,9 @@ Patch-mainline: n/a +#endif #endif /* __XEN_FIRMWARE_H__ */ ---- head-2010-05-12.orig/include/xen/gnttab.h 2009-11-06 11:10:15.000000000 +0100 -+++ head-2010-05-12/include/xen/gnttab.h 2010-03-25 14:39:33.000000000 +0100 -@@ -103,7 +103,11 @@ void gnttab_grant_foreign_transfer_ref(g +--- head-2011-02-08.orig/include/xen/gnttab.h 2010-11-23 15:07:01.000000000 +0100 ++++ head-2011-02-08/include/xen/gnttab.h 2011-02-02 15:10:34.000000000 +0100 +@@ -104,7 +104,11 @@ void gnttab_grant_foreign_transfer_ref(g unsigned long pfn); int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); diff --git a/patches.xen/xen-virq-per-cpu-irq b/patches.xen/xen-virq-per-cpu-irq index 7e3279d..0175588 100644 --- a/patches.xen/xen-virq-per-cpu-irq +++ b/patches.xen/xen-virq-per-cpu-irq @@ -1,10 +1,10 @@ From: jbeulich@novell.com Subject: fold per-CPU VIRQs onto a single IRQ each -Patch-mainline: obsolete +Patch-mainline: n/a ---- head-2010-05-12.orig/arch/x86/kernel/time-xen.c 2010-05-12 09:14:03.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/time-xen.c 2010-05-12 09:14:09.000000000 +0200 -@@ -699,19 +699,17 @@ int xen_update_persistent_clock(void) +--- head-2011-02-17.orig/arch/x86/kernel/time-xen.c 2010-11-23 15:07:01.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/time-xen.c 2010-10-05 16:57:34.000000000 +0200 +@@ -671,19 +671,17 @@ int xen_update_persistent_clock(void) } /* Dynamically-mapped IRQ. */ @@ -31,9 +31,9 @@ Patch-mainline: obsolete + BUG_ON(timer_irq < 0); } - void __init time_init(void) -@@ -852,8 +850,6 @@ void xen_halt(void) - EXPORT_SYMBOL(xen_halt); + static void __init _late_time_init(void) +@@ -829,8 +827,6 @@ void xen_halt(void) + } #ifdef CONFIG_SMP -static char timer_name[NR_CPUS][15]; @@ -41,7 +41,7 @@ Patch-mainline: obsolete int __cpuinit local_setup_timer(unsigned int cpu) { int seq, irq; -@@ -879,16 +875,10 @@ int __cpuinit local_setup_timer(unsigned +@@ -856,16 +852,10 @@ int __cpuinit local_setup_timer(unsigned init_missing_ticks_accounting(cpu); } while (read_seqretry(&xtime_lock, seq)); @@ -60,7 +60,7 @@ Patch-mainline: obsolete return 0; } -@@ -896,7 +886,7 @@ int __cpuinit local_setup_timer(unsigned +@@ -873,7 +863,7 @@ int __cpuinit local_setup_timer(unsigned void __cpuinit local_teardown_timer(unsigned int cpu) { BUG_ON(cpu == 0); @@ -69,97 +69,97 @@ Patch-mainline: obsolete } #endif ---- head-2010-05-12.orig/drivers/xen/core/evtchn.c 2010-04-23 15:20:31.000000000 +0200 -+++ head-2010-05-12/drivers/xen/core/evtchn.c 2010-04-23 15:20:36.000000000 +0200 +--- head-2011-02-17.orig/drivers/xen/core/evtchn.c 2011-02-15 17:52:39.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/evtchn.c 2011-02-16 08:29:06.000000000 +0100 @@ -59,6 +59,23 @@ static DEFINE_SPINLOCK(irq_mapping_updat static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; +#if defined(CONFIG_SMP) && defined(CONFIG_X86) -+static struct per_cpu_irqaction { ++static struct percpu_irqaction { + struct irqaction action; /* must be first */ -+ struct per_cpu_irqaction *next; -+ cpumask_t cpus; ++ struct percpu_irqaction *next; ++ cpumask_var_t cpus; +} *virq_actions[NR_VIRQS]; +/* IRQ <-> VIRQ mapping. */ +static DECLARE_BITMAP(virq_per_cpu, NR_VIRQS) __read_mostly; -+static DEFINE_PER_CPU(int[NR_VIRQS], virq_to_evtchn); -+#define BUG_IF_VIRQ_PER_CPU(irq) \ -+ BUG_ON(type_from_irq(irq) == IRQT_VIRQ \ -+ && test_bit(index_from_irq(irq), virq_per_cpu)) ++static DEFINE_PER_CPU_READ_MOSTLY(int[NR_VIRQS], virq_to_evtchn); ++#define BUG_IF_VIRQ_PER_CPU(irq_cfg) \ ++ BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_VIRQ \ ++ && test_bit(index_from_irq_cfg(irq_cfg), virq_per_cpu)) +#else -+#define BUG_IF_VIRQ_PER_CPU(irq) ((void)(irq)) ++#define BUG_IF_VIRQ_PER_CPU(irq_cfg) ((void)0) +#define PER_CPU_VIRQ_IRQ +#endif + /* IRQ <-> IPI mapping. */ - #ifndef NR_IPIS - #define NR_IPIS 1 -@@ -133,15 +150,6 @@ static inline u32 mk_irq_info(u32 type, - * Accessors for packed IRQ information. - */ - --#ifdef PER_CPU_IPI_IRQ --static inline unsigned int evtchn_from_irq(int irq) --{ -- const struct irq_cfg *cfg = irq_cfg(irq); -- -- return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0; --} --#endif -- - static inline unsigned int index_from_irq(int irq) - { - const struct irq_cfg *cfg = irq_cfg(irq); -@@ -157,24 +165,39 @@ static inline unsigned int type_from_irq - return cfg ? cfg->info >> (32 - _IRQT_BITS) : IRQT_UNBOUND; + #if defined(CONFIG_SMP) && defined(CONFIG_X86) + static int __read_mostly ipi_irq = -1; +@@ -160,21 +177,34 @@ static inline unsigned int type_from_irq + return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND; } -#ifndef PER_CPU_IPI_IRQ - static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq, - unsigned int cpu) + static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg, + unsigned int cpu) { -- BUG_ON(type_from_irq(irq) != IRQT_IPI); -- return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)]; -+ switch (type_from_irq(irq)) { +- BUG_ON(type_from_irq_cfg(cfg) != IRQT_IPI); +- return per_cpu(ipi_evtchn, cpu); +-} ++ switch (type_from_irq_cfg(cfg)) { +#ifndef PER_CPU_VIRQ_IRQ + case IRQT_VIRQ: -+ return per_cpu(virq_to_evtchn, cpu)[index_from_irq(irq)]; ++ return per_cpu(virq_to_evtchn, cpu)[index_from_irq_cfg(cfg)]; +#endif +#ifndef PER_CPU_IPI_IRQ + case IRQT_IPI: -+ return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)]; -+#endif ++ return per_cpu(ipi_evtchn, cpu); + #endif + } + BUG(); + return 0; - } ++} - static inline unsigned int evtchn_from_irq(unsigned int irq) + static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg) { -- if (type_from_irq(irq) != IRQT_IPI) { -- const struct irq_cfg *cfg = irq_cfg(irq); -+ const struct irq_cfg *cfg; - -- return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0; -+ switch (type_from_irq(irq)) { ++ switch (type_from_irq_cfg(cfg)) { +#ifndef PER_CPU_VIRQ_IRQ + case IRQT_VIRQ: +#endif -+#ifndef PER_CPU_IPI_IRQ + #ifndef PER_CPU_IPI_IRQ +- if (type_from_irq_cfg(cfg) == IRQT_IPI) +- return evtchn_from_per_cpu_irq(cfg, smp_processor_id()); + case IRQT_IPI: -+#endif -+ return evtchn_from_per_cpu_irq(irq, smp_processor_id()); - } -- return evtchn_from_per_cpu_irq(irq, smp_processor_id()); -+ cfg = irq_cfg(irq); -+ return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0; + #endif ++ return evtchn_from_per_cpu_irq(cfg, smp_processor_id()); ++ } + return cfg->info & ((1U << _EVTCHN_BITS) - 1); } --#endif - unsigned int irq_from_evtchn(unsigned int port) - { -@@ -522,6 +545,14 @@ static int bind_virq_to_irq(unsigned int +@@ -357,13 +387,22 @@ asmlinkage void __irq_entry evtchn_do_up + * hardirq handlers see an up-to-date system time even if we + * have just woken from a long idle period. + */ ++#ifdef PER_CPU_VIRQ_IRQ + if ((irq = percpu_read(virq_to_irq[VIRQ_TIMER])) != -1) { + port = evtchn_from_irq(irq); ++#else ++ port = percpu_read(virq_to_evtchn[VIRQ_TIMER]); ++ if (VALID_EVTCHN(port)) { ++#endif + l1i = port / BITS_PER_LONG; + l2i = port % BITS_PER_LONG; + if (active_evtchns(l1i) & (1ul<info = mk_irq_info(IRQT_VIRQ, virq, evtchn); + cfg->info = mk_irq_info(IRQT_VIRQ, virq, evtchn); per_cpu(virq_to_irq, cpu)[virq] = irq; -@@ -576,7 +607,9 @@ static void unbind_from_irq(unsigned int - unsigned int cpu; - int evtchn = evtchn_from_irq(irq); +@@ -646,7 +693,9 @@ static void unbind_from_irq(unsigned int + struct irq_cfg *cfg = irq_cfg(irq); + int evtchn = evtchn_from_irq_cfg(cfg); -+ BUG_IF_VIRQ_PER_CPU(irq); - BUG_IF_IPI(irq); ++ BUG_IF_VIRQ_PER_CPU(cfg); + BUG_IF_IPI(cfg); + spin_lock(&irq_mapping_update_lock); - if (!--irq_cfg(irq)->bindcount && VALID_EVTCHN(evtchn)) { -@@ -589,6 +622,11 @@ static void unbind_from_irq(unsigned int + if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) { +@@ -658,6 +707,11 @@ static void unbind_from_irq(unsigned int case IRQT_VIRQ: per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) - [index_from_irq(irq)] = -1; + [index_from_irq_cfg(cfg)] = -1; +#ifndef PER_CPU_VIRQ_IRQ + for_each_possible_cpu(cpu) + per_cpu(virq_to_evtchn, cpu) -+ [index_from_irq(irq)] = 0; ++ [index_from_irq_cfg(cfg)] = 0; +#endif break; #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) case IRQT_IPI: -@@ -618,11 +656,13 @@ static void unbind_from_irq(unsigned int +@@ -691,13 +745,34 @@ static void unbind_from_irq(unsigned int spin_unlock(&irq_mapping_update_lock); } --#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ) +-#ifndef PER_CPU_IPI_IRQ -void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu) -+#if defined(CONFIG_SMP) && (!defined(PER_CPU_IPI_IRQ) || !defined(PER_CPU_VIRQ_IRQ)) ++#if !defined(PER_CPU_IPI_IRQ) || !defined(PER_CPU_VIRQ_IRQ) ++static inline struct percpu_irqaction *alloc_percpu_irqaction(gfp_t gfp) ++{ ++ struct percpu_irqaction *new = kzalloc(sizeof(*new), GFP_ATOMIC); ++ ++ if (new && !zalloc_cpumask_var(&new->cpus, gfp)) { ++ kfree(new); ++ new = NULL; ++ } ++ return new; ++} ++ ++static inline void free_percpu_irqaction(struct percpu_irqaction *action) ++{ ++ if (!action) ++ return; ++ free_cpumask_var(action->cpus); ++ kfree(action); ++} ++ +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu, + struct irqaction *action) { struct evtchn_close close; - int evtchn = evtchn_from_per_cpu_irq(irq, cpu); -+ struct irqaction *free_action = NULL; + struct irq_data *data = irq_get_irq_data(irq); + struct irq_cfg *cfg = irq_data_cfg(data); + int evtchn = evtchn_from_per_cpu_irq(cfg, cpu); ++ struct percpu_irqaction *free_action = NULL; spin_lock(&irq_mapping_update_lock); -@@ -633,6 +673,32 @@ void unbind_from_per_cpu_irq(unsigned in +@@ -706,6 +781,34 @@ void unbind_from_per_cpu_irq(unsigned in - BUG_ON(irq_cfg(irq)->bindcount <= 1); - irq_cfg(irq)->bindcount--; + BUG_ON(cfg->bindcount <= 1); + cfg->bindcount--; + +#ifndef PER_CPU_VIRQ_IRQ -+ if (type_from_irq(irq) == IRQT_VIRQ) { -+ unsigned int virq = index_from_irq(irq); -+ struct per_cpu_irqaction *cur, *prev = NULL; ++ if (type_from_irq_cfg(cfg) == IRQT_VIRQ) { ++ unsigned int virq = index_from_irq_cfg(cfg); ++ struct percpu_irqaction *cur, *prev = NULL; + + cur = virq_actions[virq]; + while (cur) { + if (cur->action.dev_id == action) { -+ cpu_clear(cpu, cur->cpus); -+ if (cpus_empty(cur->cpus)) { ++ cpumask_clear_cpu(cpu, cur->cpus); ++ if (cpumask_empty(cur->cpus)) { ++ WARN_ON(free_action); + if (prev) + prev->next = cur->next; + else -+ virq_actions[virq] = cur->next; -+ free_action = action; ++ virq_actions[virq] ++ = cur->next; ++ free_action = cur; + } -+ } else if (cpu_isset(cpu, cur->cpus)) ++ } else if (cpumask_test_cpu(cpu, cur->cpus)) + evtchn = 0; + cur = (prev = cur)->next; + } @@ -242,27 +265,28 @@ Patch-mainline: obsolete + } +#endif + - cpumask_clear_cpu(cpu, desc->affinity); + cpumask_clear_cpu(cpu, data->affinity); close.port = evtchn; -@@ -640,9 +706,16 @@ void unbind_from_per_cpu_irq(unsigned in +@@ -713,9 +816,17 @@ void unbind_from_per_cpu_irq(unsigned in BUG(); - switch (type_from_irq(irq)) { + switch (type_from_irq_cfg(cfg)) { +#ifndef PER_CPU_VIRQ_IRQ + case IRQT_VIRQ: -+ per_cpu(virq_to_evtchn, cpu)[index_from_irq(irq)] = 0; ++ per_cpu(virq_to_evtchn, cpu) ++ [index_from_irq_cfg(cfg)] = 0; + break; +#endif +#ifndef PER_CPU_IPI_IRQ case IRQT_IPI: - per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = 0; + per_cpu(ipi_evtchn, cpu) = 0; break; +#endif default: BUG(); break; -@@ -654,9 +727,16 @@ void unbind_from_per_cpu_irq(unsigned in +@@ -727,9 +838,18 @@ void unbind_from_per_cpu_irq(unsigned in evtchn_to_irq[evtchn] = -1; } @@ -271,25 +295,29 @@ Patch-mainline: obsolete +#endif spin_unlock(&irq_mapping_update_lock); + -+ if (free_action) -+ free_irq(irq, free_action); ++ if (free_action) { ++ free_irq(irq, free_action->action.dev_id); ++ free_percpu_irqaction(free_action); ++ } } --#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */ +-#endif /* !PER_CPU_IPI_IRQ */ +EXPORT_SYMBOL_GPL(unbind_from_per_cpu_irq); -+#endif /* CONFIG_SMP && (!PER_CPU_IPI_IRQ || !PER_CPU_VIRQ_IRQ) */ ++#endif /* !PER_CPU_IPI_IRQ || !PER_CPU_VIRQ_IRQ */ int bind_caller_port_to_irqhandler( unsigned int caller_port, -@@ -738,6 +818,8 @@ int bind_virq_to_irqhandler( +@@ -811,6 +931,10 @@ int bind_virq_to_irqhandler( { int irq, retval; -+ BUG_IF_VIRQ_PER_CPU(virq); ++#ifndef PER_CPU_VIRQ_IRQ ++ BUG_ON(test_bit(virq, virq_per_cpu)); ++#endif + irq = bind_virq_to_irq(virq, cpu); if (irq < 0) return irq; -@@ -753,6 +835,108 @@ int bind_virq_to_irqhandler( +@@ -826,6 +950,109 @@ int bind_virq_to_irqhandler( EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); #ifdef CONFIG_SMP @@ -300,15 +328,16 @@ Patch-mainline: obsolete + struct irqaction *action) +{ + struct evtchn_bind_virq bind_virq; ++ struct irq_cfg *cfg; + int evtchn, irq, retval = 0; -+ struct per_cpu_irqaction *cur = NULL, *new; ++ struct percpu_irqaction *cur = NULL, *new; + + BUG_ON(!test_bit(virq, virq_per_cpu)); + + if (action->dev_id) + return -EINVAL; + -+ new = kzalloc(sizeof(*new), GFP_ATOMIC); ++ new = alloc_percpu_irqaction(GFP_ATOMIC); + if (new) { + new->action = *action; + new->action.dev_id = action; @@ -326,9 +355,10 @@ Patch-mainline: obsolete + } + new->next = virq_actions[virq]; + virq_actions[virq] = cur = new; ++ new = NULL; + retval = 1; + } -+ cpu_set(cpu, cur->cpus); ++ cpumask_set_cpu(cpu, cur->cpus); + action = &cur->action; + + if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { @@ -336,22 +366,22 @@ Patch-mainline: obsolete + + BUG_ON(!retval); + -+ if ((irq = find_unbound_irq(cpu, true)) < 0) { -+ if (cur) -+ virq_actions[virq] = cur->next; ++ if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, ++ &dynirq_chip, true)) < 0) { ++ virq_actions[virq] = cur->next; + spin_unlock(&irq_mapping_update_lock); -+ if (cur != new) -+ kfree(new); ++ free_percpu_irqaction(new); + return irq; + } + + /* Extra reference so count will never drop to zero. */ -+ irq_cfg(irq)->bindcount++; ++ cfg->bindcount++; + + for_each_possible_cpu(nr) + per_cpu(virq_to_irq, nr)[virq] = irq; -+ irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq, 0); -+ } ++ cfg->info = mk_irq_info(IRQT_VIRQ, virq, 0); ++ } else ++ cfg = irq_cfg(irq); + + evtchn = per_cpu(virq_to_evtchn, cpu)[virq]; + if (!VALID_EVTCHN(evtchn)) { @@ -367,12 +397,11 @@ Patch-mainline: obsolete + bind_evtchn_to_cpu(evtchn, cpu); + } + -+ irq_cfg(irq)->bindcount++; ++ cfg->bindcount++; + + spin_unlock(&irq_mapping_update_lock); + -+ if (cur != new) -+ kfree(new); ++ free_percpu_irqaction(new); + + if (retval == 0) { + unsigned long flags; @@ -384,7 +413,7 @@ Patch-mainline: obsolete + action->flags |= IRQF_PERCPU; + retval = setup_irq(irq, action); + if (retval) { -+ unbind_from_per_cpu_irq(irq, cpu, cur->action.dev_id); ++ unbind_from_per_cpu_irq(irq, cpu, action); + BUG_ON(retval > 0); + irq = retval; + } @@ -398,44 +427,44 @@ Patch-mainline: obsolete #ifdef PER_CPU_IPI_IRQ int bind_ipi_to_irqhandler( unsigned int ipi, -@@ -832,7 +1016,7 @@ int __cpuinit bind_ipi_to_irqaction( +@@ -905,7 +1132,7 @@ int __cpuinit bind_ipi_to_irqaction( action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND; - retval = setup_irq(irq, action); + retval = setup_irq(ipi_irq, action); if (retval) { -- unbind_from_per_cpu_irq(irq, cpu); -+ unbind_from_per_cpu_irq(irq, cpu, NULL); +- unbind_from_per_cpu_irq(ipi_irq, cpu); ++ unbind_from_per_cpu_irq(ipi_irq, cpu, NULL); BUG_ON(retval > 0); - irq = retval; + ipi_irq = retval; } -@@ -867,7 +1051,9 @@ static void rebind_irq_to_cpu(unsigned i - { - int evtchn = evtchn_from_irq(irq); +@@ -941,7 +1168,9 @@ static void rebind_irq_to_cpu(struct irq + const struct irq_cfg *cfg = irq_data_cfg(data); + int evtchn = evtchn_from_irq_cfg(cfg); -+ BUG_IF_VIRQ_PER_CPU(irq); - BUG_IF_IPI(irq); ++ BUG_IF_VIRQ_PER_CPU(cfg); + BUG_IF_IPI(cfg); + if (VALID_EVTCHN(evtchn)) rebind_evtchn_to_cpu(evtchn, tcpu); } -@@ -1142,7 +1328,9 @@ void notify_remote_via_irq(int irq) - { - int evtchn = evtchn_from_irq(irq); +@@ -1233,7 +1462,9 @@ void notify_remote_via_irq(int irq) -+ BUG_ON(type_from_irq(irq) == IRQT_VIRQ); - BUG_IF_IPI(irq); + if (WARN_ON_ONCE(!cfg)) + return; ++ BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ); + BUG_IF_IPI(cfg); + + evtchn = evtchn_from_irq_cfg(cfg); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); - } -@@ -1150,6 +1338,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq) +@@ -1246,6 +1477,7 @@ int irq_to_evtchn_port(int irq) - int irq_to_evtchn_port(int irq) - { -+ BUG_IF_VIRQ_PER_CPU(irq); - BUG_IF_IPI(irq); - return evtchn_from_irq(irq); + if (!cfg) + return 0; ++ BUG_IF_VIRQ_PER_CPU(cfg); + BUG_IF_IPI(cfg); + return evtchn_from_irq_cfg(cfg); } -@@ -1244,6 +1433,12 @@ static void restore_cpu_virqs(unsigned i +@@ -1313,6 +1545,12 @@ static void restore_cpu_virqs(unsigned i if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; @@ -448,7 +477,7 @@ Patch-mainline: obsolete BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_VIRQ, virq, 0)); /* Get a new binding from Xen. */ -@@ -1256,7 +1451,20 @@ static void restore_cpu_virqs(unsigned i +@@ -1325,7 +1563,20 @@ static void restore_cpu_virqs(unsigned i /* Record the new mapping. */ evtchn_to_irq[evtchn] = irq; @@ -469,7 +498,7 @@ Patch-mainline: obsolete bind_evtchn_to_cpu(evtchn, cpu); /* Ready for use. */ -@@ -1312,7 +1520,11 @@ static int evtchn_resume(struct sys_devi +@@ -1389,7 +1640,11 @@ static int evtchn_resume(struct sys_devi /* Avoid doing anything in the 'suspend cancelled' case. */ status.dom = DOMID_SELF; @@ -481,7 +510,7 @@ Patch-mainline: obsolete if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status)) BUG(); if (status.status == EVTCHNSTAT_virq -@@ -1541,6 +1753,15 @@ void __init xen_init_IRQ(void) +@@ -1666,6 +1921,15 @@ void __init xen_init_IRQ(void) unsigned int i; struct physdev_pirq_eoi_gmfn eoi_gmfn; @@ -496,56 +525,30 @@ Patch-mainline: obsolete + init_evtchn_cpu_bindings(); - i = get_order(sizeof(unsigned long) * BITS_TO_LONGS(nr_pirqs)); ---- head-2010-05-12.orig/drivers/xen/core/smpboot.c 2010-03-19 15:20:24.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/smpboot.c 2010-03-19 15:20:27.000000000 +0100 -@@ -176,13 +176,13 @@ static int __cpuinit xen_smp_intr_init(u + #ifdef CONFIG_SPARSE_IRQ +--- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:14:20.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-03-03 16:14:51.000000000 +0100 +@@ -125,7 +125,7 @@ static int __cpuinit xen_smp_intr_init(u fail: xen_spinlock_cleanup(cpu); - unbind_reboot: -- unbind_from_per_cpu_irq(reboot_irq, cpu); -+ unbind_from_per_cpu_irq(reboot_irq, cpu, NULL); - unbind_call1: -- unbind_from_per_cpu_irq(call1func_irq, cpu); -+ unbind_from_per_cpu_irq(call1func_irq, cpu, NULL); - unbind_call: -- unbind_from_per_cpu_irq(callfunc_irq, cpu); -+ unbind_from_per_cpu_irq(callfunc_irq, cpu, NULL); - unbind_resched: -- unbind_from_per_cpu_irq(resched_irq, cpu); -+ unbind_from_per_cpu_irq(resched_irq, cpu, NULL); + unbind_ipi: +- unbind_from_per_cpu_irq(ipi_irq, cpu); ++ unbind_from_per_cpu_irq(ipi_irq, cpu, NULL); return rc; } -@@ -192,10 +192,10 @@ static void __cpuinit xen_smp_intr_exit( +@@ -135,7 +135,7 @@ static void __cpuinit xen_smp_intr_exit( if (cpu != 0) local_teardown_timer(cpu); -- unbind_from_per_cpu_irq(resched_irq, cpu); -- unbind_from_per_cpu_irq(callfunc_irq, cpu); -- unbind_from_per_cpu_irq(call1func_irq, cpu); -- unbind_from_per_cpu_irq(reboot_irq, cpu); -+ unbind_from_per_cpu_irq(resched_irq, cpu, NULL); -+ unbind_from_per_cpu_irq(callfunc_irq, cpu, NULL); -+ unbind_from_per_cpu_irq(call1func_irq, cpu, NULL); -+ unbind_from_per_cpu_irq(reboot_irq, cpu, NULL); +- unbind_from_per_cpu_irq(ipi_irq, cpu); ++ unbind_from_per_cpu_irq(ipi_irq, cpu, NULL); xen_spinlock_cleanup(cpu); } #endif ---- head-2010-05-12.orig/drivers/xen/core/spinlock.c 2010-02-24 12:38:00.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/spinlock.c 2010-02-24 12:38:54.000000000 +0100 -@@ -55,7 +55,7 @@ int __cpuinit xen_spinlock_init(unsigned - - void __cpuinit xen_spinlock_cleanup(unsigned int cpu) - { -- unbind_from_per_cpu_irq(spinlock_irq, cpu); -+ unbind_from_per_cpu_irq(spinlock_irq, cpu, NULL); - } - - static unsigned int spin_adjust(struct spinning *spinning, ---- head-2010-05-12.orig/drivers/xen/netback/netback.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/drivers/xen/netback/netback.c 2010-01-04 13:31:26.000000000 +0100 -@@ -1619,6 +1619,12 @@ static irqreturn_t netif_be_dbg(int irq, +--- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-03-01 11:52:43.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-01 11:53:15.000000000 +0100 +@@ -1630,6 +1630,12 @@ static irqreturn_t netif_be_dbg(int irq, return IRQ_HANDLED; } @@ -558,7 +561,7 @@ Patch-mainline: obsolete #endif static int __init netback_init(void) -@@ -1678,12 +1684,9 @@ static int __init netback_init(void) +@@ -1689,12 +1695,9 @@ static int __init netback_init(void) netif_xenbus_init(); #ifdef NETBE_DEBUG_INTERRUPT @@ -574,9 +577,9 @@ Patch-mainline: obsolete #endif return 0; ---- head-2010-05-12.orig/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/drivers/xen/xenoprof/xenoprofile.c 2010-01-07 11:04:10.000000000 +0100 -@@ -210,6 +210,11 @@ static irqreturn_t xenoprof_ovf_interrup +--- head-2011-02-17.orig/drivers/xen/xenoprof/xenoprofile.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-02-17/drivers/xen/xenoprof/xenoprofile.c 2010-09-09 16:53:30.000000000 +0200 +@@ -209,6 +209,11 @@ static irqreturn_t xenoprof_ovf_interrup return IRQ_HANDLED; } @@ -588,7 +591,7 @@ Patch-mainline: obsolete static void unbind_virq(void) { -@@ -217,7 +222,7 @@ static void unbind_virq(void) +@@ -216,7 +221,7 @@ static void unbind_virq(void) for_each_online_cpu(i) { if (ovf_irq[i] >= 0) { @@ -597,7 +600,7 @@ Patch-mainline: obsolete ovf_irq[i] = -1; } } -@@ -230,12 +235,7 @@ static int bind_virq(void) +@@ -229,12 +234,7 @@ static int bind_virq(void) int result; for_each_online_cpu(i) { @@ -611,9 +614,9 @@ Patch-mainline: obsolete if (result < 0) { unbind_virq(); ---- head-2010-05-12.orig/include/xen/evtchn.h 2010-03-31 14:41:42.000000000 +0200 -+++ head-2010-05-12/include/xen/evtchn.h 2010-03-31 14:11:09.000000000 +0200 -@@ -93,6 +93,17 @@ int bind_virq_to_irqhandler( +--- head-2011-02-17.orig/include/xen/evtchn.h 2011-02-02 15:09:43.000000000 +0100 ++++ head-2011-02-17/include/xen/evtchn.h 2010-11-23 16:18:23.000000000 +0100 +@@ -94,6 +94,17 @@ int bind_virq_to_irqhandler( unsigned long irqflags, const char *devname, void *dev_id); @@ -631,7 +634,7 @@ Patch-mainline: obsolete #if defined(CONFIG_SMP) && !defined(MODULE) #ifndef CONFIG_X86 int bind_ipi_to_irqhandler( -@@ -117,9 +128,13 @@ int bind_ipi_to_irqaction( +@@ -118,9 +129,13 @@ DECLARE_PER_CPU(DECLARE_BITMAP(, NR_IPIS */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); diff --git a/patches.xen/xen-watchdog b/patches.xen/xen-watchdog new file mode 100644 index 0000000..ee6c113 --- /dev/null +++ b/patches.xen/xen-watchdog @@ -0,0 +1,398 @@ +From: jbeulich@novell.com +Subject: Xen: para-virtual watchdog driver +Patch-mainline: n/a + +--- head-2011-01-30.orig/drivers/watchdog/Kconfig 2011-01-31 12:42:35.000000000 +0100 ++++ head-2011-01-30/drivers/watchdog/Kconfig 2011-02-02 15:10:41.000000000 +0100 +@@ -1119,6 +1119,16 @@ config WATCHDOG_RIO + + # XTENSA Architecture + ++# Xen Architecture ++ ++config XEN_WDT ++ tristate "Xen Watchdog support" ++ depends on PARAVIRT_XEN || XEN ++ help ++ Say Y here to support the hypervisor watchdog capability provided ++ by Xen 4.0 and newer. The watchdog timeout period is normally one ++ minute but can be changed with a boot-time parameter. ++ + # + # ISA-based Watchdog Cards + # +--- head-2011-01-30.orig/drivers/watchdog/Makefile 2011-01-31 12:42:35.000000000 +0100 ++++ head-2011-01-30/drivers/watchdog/Makefile 2011-02-02 15:10:41.000000000 +0100 +@@ -148,6 +148,9 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o + + # XTENSA Architecture + ++# Xen ++obj-$(CONFIG_XEN_WDT) += xen_wdt.o ++ + # Architecture Independant + obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o + obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-01-30/drivers/watchdog/xen_wdt.c 2011-02-02 15:10:41.000000000 +0100 +@@ -0,0 +1,360 @@ ++/* ++ * Xen Watchdog Driver ++ * ++ * (c) Copyright 2010 Novell, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ */ ++ ++#define DRV_NAME "wdt" ++#define DRV_VERSION "0.01" ++#define PFX DRV_NAME ": " ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef CONFIG_PARAVIRT_XEN ++#include ++#include ++#endif ++#include ++ ++static struct platform_device *platform_device; ++static DEFINE_SPINLOCK(wdt_lock); ++static struct sched_watchdog wdt; ++static __kernel_time_t wdt_expires; ++static bool is_active, expect_release; ++ ++#define WATCHDOG_TIMEOUT 60 /* in seconds */ ++static unsigned int timeout = WATCHDOG_TIMEOUT; ++module_param(timeout, uint, S_IRUGO); ++MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds " ++ "(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); ++ ++static bool nowayout = WATCHDOG_NOWAYOUT; ++module_param(nowayout, bool, S_IRUGO); ++MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " ++ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); ++ ++static inline __kernel_time_t set_timeout(void) ++{ ++ wdt.timeout = timeout; ++ return ktime_to_timespec(ktime_get()).tv_sec + timeout; ++} ++ ++static int xen_wdt_start(void) ++{ ++ __kernel_time_t expires; ++ int err; ++ ++ spin_lock(&wdt_lock); ++ ++ expires = set_timeout(); ++ if (!wdt.id) ++ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); ++ else ++ err = -EBUSY; ++ if (err > 0) { ++ wdt.id = err; ++ wdt_expires = expires; ++ err = 0; ++ } else ++ BUG_ON(!err); ++ ++ spin_unlock(&wdt_lock); ++ ++ return err; ++} ++ ++static int xen_wdt_stop(void) ++{ ++ int err = 0; ++ ++ spin_lock(&wdt_lock); ++ ++ wdt.timeout = 0; ++ if (wdt.id) ++ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); ++ if (!err) ++ wdt.id = 0; ++ ++ spin_unlock(&wdt_lock); ++ ++ return err; ++} ++ ++static int xen_wdt_kick(void) ++{ ++ __kernel_time_t expires; ++ int err; ++ ++ spin_lock(&wdt_lock); ++ ++ expires = set_timeout(); ++ if (wdt.id) ++ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); ++ else ++ err = -ENXIO; ++ if (!err) ++ wdt_expires = expires; ++ ++ spin_unlock(&wdt_lock); ++ ++ return err; ++} ++ ++static int xen_wdt_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ ++ /* /dev/watchdog can only be opened once */ ++ if (xchg(&is_active, true)) ++ return -EBUSY; ++ ++ err = xen_wdt_start(); ++ if (err == -EBUSY) ++ err = xen_wdt_kick(); ++ return err ?: nonseekable_open(inode, file); ++} ++ ++static int xen_wdt_release(struct inode *inode, struct file *file) ++{ ++ if (expect_release) ++ xen_wdt_stop(); ++ else { ++ pr_crit(PFX "unexpected close, not stopping watchdog!\n"); ++ xen_wdt_kick(); ++ } ++ is_active = false; ++ expect_release = false; ++ return 0; ++} ++ ++static ssize_t xen_wdt_write(struct file *file, const char __user *data, ++ size_t len, loff_t *ppos) ++{ ++ /* See if we got the magic character 'V' and reload the timer */ ++ if (len) { ++ if (!nowayout) { ++ size_t i; ++ ++ /* in case it was set long ago */ ++ expect_release = false; ++ ++ /* scan to see whether or not we got the magic ++ character */ ++ for (i = 0; i != len; i++) { ++ char c; ++ if (get_user(c, data + i)) ++ return -EFAULT; ++ if (c == 'V') ++ expect_release = true; ++ } ++ } ++ ++ /* someone wrote to us, we should reload the timer */ ++ xen_wdt_kick(); ++ } ++ return len; ++} ++ ++static long xen_wdt_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ int new_options, retval = -EINVAL; ++ int new_timeout; ++ int __user *argp = (void __user *)arg; ++ static const struct watchdog_info ident = { ++ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, ++ .firmware_version = 0, ++ .identity = DRV_NAME, ++ }; ++ ++ switch (cmd) { ++ case WDIOC_GETSUPPORT: ++ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; ++ ++ case WDIOC_GETSTATUS: ++ case WDIOC_GETBOOTSTATUS: ++ return put_user(0, argp); ++ ++ case WDIOC_SETOPTIONS: ++ if (get_user(new_options, argp)) ++ return -EFAULT; ++ ++ if (new_options & WDIOS_DISABLECARD) ++ retval = xen_wdt_stop(); ++ if (new_options & WDIOS_ENABLECARD) { ++ retval = xen_wdt_start(); ++ if (retval == -EBUSY) ++ retval = xen_wdt_kick(); ++ } ++ return retval; ++ ++ case WDIOC_KEEPALIVE: ++ xen_wdt_kick(); ++ return 0; ++ ++ case WDIOC_SETTIMEOUT: ++ if (get_user(new_timeout, argp)) ++ return -EFAULT; ++ if (!new_timeout) ++ return -EINVAL; ++ timeout = new_timeout; ++ xen_wdt_kick(); ++ /* fall through */ ++ case WDIOC_GETTIMEOUT: ++ return put_user(timeout, argp); ++ ++ case WDIOC_GETTIMELEFT: ++ retval = wdt_expires - ktime_to_timespec(ktime_get()).tv_sec; ++ return put_user(retval, argp); ++ } ++ ++ return -ENOTTY; ++} ++ ++static const struct file_operations xen_wdt_fops = { ++ .owner = THIS_MODULE, ++ .llseek = no_llseek, ++ .write = xen_wdt_write, ++ .unlocked_ioctl = xen_wdt_ioctl, ++ .open = xen_wdt_open, ++ .release = xen_wdt_release, ++}; ++ ++static struct miscdevice xen_wdt_miscdev = { ++ .minor = WATCHDOG_MINOR, ++ .name = "watchdog", ++ .fops = &xen_wdt_fops, ++}; ++ ++static int __devinit xen_wdt_probe(struct platform_device *dev) ++{ ++ struct sched_watchdog wd = { .id = ~0 }; ++ int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd); ++ ++ switch (ret) { ++ case -EINVAL: ++ if (!timeout) { ++ timeout = WATCHDOG_TIMEOUT; ++ pr_info(PFX "timeout value invalid, using %d\n", ++ timeout); ++ } ++ ++ ret = misc_register(&xen_wdt_miscdev); ++ if (ret) { ++ pr_err(PFX "can't register miscdev on minor=%d (%d)\n", ++ WATCHDOG_MINOR, ret); ++ break; ++ } ++ ++ pr_info(PFX "initialized (timeout=%ds, nowayout=%d)\n", ++ timeout, nowayout); ++ break; ++ ++ case -ENOSYS: ++ pr_info(PFX "not supported\n"); ++ ret = -ENODEV; ++ break; ++ ++ default: ++ pr_warning(PFX "bogus return value %d\n", ret); ++ break; ++ } ++ ++ return ret; ++} ++ ++static int __devexit xen_wdt_remove(struct platform_device *dev) ++{ ++ /* Stop the timer before we leave */ ++ if (!nowayout) ++ xen_wdt_stop(); ++ ++ misc_deregister(&xen_wdt_miscdev); ++ ++ return 0; ++} ++ ++static void xen_wdt_shutdown(struct platform_device *dev) ++{ ++ xen_wdt_stop(); ++} ++ ++static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state) ++{ ++ return xen_wdt_stop(); ++} ++ ++static int xen_wdt_resume(struct platform_device *dev) ++{ ++ return xen_wdt_start(); ++} ++ ++static struct platform_driver xen_wdt_driver = { ++ .probe = xen_wdt_probe, ++ .remove = __devexit_p(xen_wdt_remove), ++ .shutdown = xen_wdt_shutdown, ++ .suspend = xen_wdt_suspend, ++ .resume = xen_wdt_resume, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRV_NAME, ++ }, ++}; ++ ++static int __init xen_wdt_init_module(void) ++{ ++ int err; ++ ++#ifdef CONFIG_PARAVIRT_XEN ++ if (!xen_domain()) ++ return -ENODEV; ++#endif ++ ++ pr_info(PFX "Xen WatchDog Timer Driver v%s\n", DRV_VERSION); ++ ++ err = platform_driver_register(&xen_wdt_driver); ++ if (err) ++ return err; ++ ++ platform_device = platform_device_register_simple(DRV_NAME, ++ -1, NULL, 0); ++ if (IS_ERR(platform_device)) { ++ err = PTR_ERR(platform_device); ++ platform_driver_unregister(&xen_wdt_driver); ++ } ++ ++ return err; ++} ++ ++static void __exit xen_wdt_cleanup_module(void) ++{ ++ platform_device_unregister(platform_device); ++ platform_driver_unregister(&xen_wdt_driver); ++ pr_info(PFX "module unloaded\n"); ++} ++ ++module_init(xen_wdt_init_module); ++module_exit(xen_wdt_cleanup_module); ++ ++MODULE_AUTHOR("Jan Beulich "); ++MODULE_DESCRIPTION("Xen WatchDog Timer Driver"); ++MODULE_VERSION(DRV_VERSION); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/patches.xen/xen-x86-bigmem b/patches.xen/xen-x86-bigmem index 62674fd..c74929e 100644 --- a/patches.xen/xen-x86-bigmem +++ b/patches.xen/xen-x86-bigmem @@ -3,9 +3,9 @@ Subject: fix issues with the assignment of huge amounts of memory Patch-mainline: obsolete References: bnc#482614, bnc#537435 ---- head-2010-04-15.orig/arch/x86/kernel/e820-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/e820-xen.c 2010-04-15 11:48:01.000000000 +0200 -@@ -1093,6 +1093,26 @@ static int __init parse_memopt(char *p) +--- head-2011-01-30.orig/arch/x86/kernel/e820-xen.c 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/e820-xen.c 2011-02-03 14:42:11.000000000 +0100 +@@ -951,6 +951,26 @@ static int __init parse_memopt(char *p) userdef = 1; mem_size = memparse(p, &p); @@ -21,18 +21,18 @@ References: bnc#482614, bnc#537435 + if ((mem_size >> (PAGE_SHIFT + 5)) > xen_start_info->nr_pages) { + u64 size = (u64)xen_start_info->nr_pages << 5; + -+ printk(KERN_WARNING "mem=%Luk is invalid for an initial" -+ " allocation of %luk, using %Luk\n", -+ (unsigned long long)mem_size >> 10, -+ xen_start_info->nr_pages << (PAGE_SHIFT - 10), -+ (unsigned long long)size << (PAGE_SHIFT - 10)); ++ pr_warning("mem=%Luk is invalid for an initial" ++ " allocation of %luk, using %Luk\n", ++ (unsigned long long)mem_size >> 10, ++ xen_start_info->nr_pages << (PAGE_SHIFT - 10), ++ (unsigned long long)size << (PAGE_SHIFT - 10)); + mem_size = size << PAGE_SHIFT; + } +#endif e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); i = e820.nr_map - 1; -@@ -1291,6 +1311,7 @@ void __init e820_reserve_resources_late( +@@ -1149,6 +1169,7 @@ void __init e820_reserve_resources_late( char *__init default_machine_specific_memory_setup(void) { int rc, nr_map; @@ -40,7 +40,7 @@ References: bnc#482614, bnc#537435 struct xen_memory_map memmap; static struct e820entry __initdata map[E820MAX]; -@@ -1316,6 +1337,22 @@ char *__init default_machine_specific_me +@@ -1174,6 +1195,22 @@ char *__init default_machine_specific_me BUG(); #ifdef CONFIG_XEN @@ -51,11 +51,11 @@ References: bnc#482614, bnc#537435 + if ((maxmem >> (PAGE_SHIFT + 5)) > xen_start_info->nr_pages) { + unsigned long long size = (u64)xen_start_info->nr_pages << 5; + -+ printk(KERN_WARNING "maxmem of %LuM is invalid for an initial" -+ " allocation of %luM, using %LuM\n", -+ maxmem >> 20, -+ xen_start_info->nr_pages >> (20 - PAGE_SHIFT), -+ size >> (20 - PAGE_SHIFT)); ++ pr_warning("maxmem of %LuM is invalid for an initial" ++ " allocation of %luM, using %LuM\n", ++ maxmem >> 20, ++ xen_start_info->nr_pages >> (20 - PAGE_SHIFT), ++ size >> (20 - PAGE_SHIFT)); + size <<= PAGE_SHIFT; + e820_remove_range(size, ULLONG_MAX - size, E820_RAM, 1); + } @@ -63,9 +63,9 @@ References: bnc#482614, bnc#537435 if (is_initial_xendomain()) { memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, machine_e820.map); ---- head-2010-04-15.orig/arch/x86/kernel/setup-xen.c 2010-04-15 11:46:02.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/setup-xen.c 2010-04-15 11:48:03.000000000 +0200 -@@ -130,12 +130,7 @@ static struct notifier_block xen_panic_b +--- head-2011-01-30.orig/arch/x86/kernel/setup-xen.c 2011-01-03 14:07:52.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:11.000000000 +0100 +@@ -132,12 +132,7 @@ static struct notifier_block xen_panic_b unsigned long *phys_to_machine_mapping; EXPORT_SYMBOL(phys_to_machine_mapping); @@ -79,7 +79,7 @@ References: bnc#482614, bnc#537435 /* Raw start-of-day parameters from the hypervisor. */ start_info_t *xen_start_info; -@@ -1167,17 +1162,17 @@ void __init setup_arch(char **cmdline_p) +@@ -1188,17 +1183,17 @@ void __init setup_arch(char **cmdline_p) p2m_pages = xen_start_info->nr_pages; if (!xen_feature(XENFEAT_auto_translated_physmap)) { @@ -100,7 +100,7 @@ References: bnc#482614, bnc#537435 free_bootmem( __pa(xen_start_info->mfn_list), PFN_PHYS(PFN_UP(xen_start_info->nr_pages * -@@ -1187,15 +1182,26 @@ void __init setup_arch(char **cmdline_p) +@@ -1208,15 +1203,26 @@ void __init setup_arch(char **cmdline_p) * Initialise the list of the frames that specify the list of * frames that make up the p2m table. Used by save/restore. */ @@ -130,9 +130,9 @@ References: bnc#482614, bnc#537435 pfn_to_mfn_frame_list[k] = alloc_bootmem_pages(PAGE_SIZE); pfn_to_mfn_frame_list_list[k] = ---- head-2010-04-15.orig/drivers/xen/core/machine_reboot.c 2010-03-25 14:39:15.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/machine_reboot.c 2010-03-25 14:41:06.000000000 +0100 -@@ -80,7 +80,7 @@ static void post_suspend(int suspend_can +--- head-2011-01-30.orig/drivers/xen/core/machine_reboot.c 2011-02-02 15:10:16.000000000 +0100 ++++ head-2011-01-30/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:11.000000000 +0100 +@@ -75,7 +75,7 @@ static void post_suspend(int suspend_can unsigned long shinfo_mfn; extern unsigned long max_pfn; extern unsigned long *pfn_to_mfn_frame_list_list; diff --git a/patches.xen/xen-x86-consistent-nmi b/patches.xen/xen-x86-consistent-nmi deleted file mode 100644 index 1c9c204..0000000 --- a/patches.xen/xen-x86-consistent-nmi +++ /dev/null @@ -1,247 +0,0 @@ -From: jbeulich@novell.com -Subject: make i386 and x86 NMI code consistent, disable all APIC-related stuff -Patch-mainline: obsolete -References: 191115 - ---- head-2010-04-15.orig/arch/x86/include/asm/irq.h 2010-04-15 09:37:45.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/asm/irq.h 2010-03-25 14:40:02.000000000 +0100 -@@ -15,7 +15,7 @@ static inline int irq_canonicalize(int i - return ((irq == 2) ? 9 : irq); - } - --#ifdef CONFIG_X86_LOCAL_APIC -+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) - # define ARCH_HAS_NMI_WATCHDOG - #endif - ---- head-2010-04-15.orig/arch/x86/include/asm/nmi.h 2010-04-15 09:37:45.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/asm/nmi.h 2010-03-25 14:40:02.000000000 +0100 -@@ -5,8 +5,6 @@ - #include - #include - --#ifdef ARCH_HAS_NMI_WATCHDOG -- - /** - * do_nmi_callback - * -@@ -16,6 +14,11 @@ - int do_nmi_callback(struct pt_regs *regs, int cpu); - - extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); -+ -+extern int unknown_nmi_panic; -+ -+#ifdef ARCH_HAS_NMI_WATCHDOG -+ - extern int check_nmi_watchdog(void); - extern int nmi_watchdog_enabled; - extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); -@@ -41,7 +44,6 @@ extern unsigned int nmi_watchdog; - struct ctl_table; - extern int proc_nmi_enabled(struct ctl_table *, int , - void __user *, size_t *, loff_t *); --extern int unknown_nmi_panic; - - void arch_trigger_all_cpu_backtrace(void); - #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace -@@ -64,7 +66,6 @@ static inline int nmi_watchdog_active(vo - */ - return nmi_watchdog & (NMI_LOCAL_APIC | NMI_IO_APIC); - } --#endif - - void lapic_watchdog_stop(void); - int lapic_watchdog_init(unsigned nmi_hz); -@@ -72,6 +73,9 @@ int lapic_wd_event(unsigned nmi_hz); - unsigned lapic_adjust_nmi_hz(unsigned hz); - void disable_lapic_nmi_watchdog(void); - void enable_lapic_nmi_watchdog(void); -+ -+#endif -+ - void stop_nmi(void); - void restart_nmi(void); - ---- head-2010-04-15.orig/arch/x86/kernel/apic/Makefile 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/apic/Makefile 2010-03-25 14:40:02.000000000 +0100 -@@ -18,8 +18,6 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o - obj-$(CONFIG_X86_ES7000) += es7000_32.o - obj-$(CONFIG_X86_SUMMIT) += summit_32.o - --obj-$(CONFIG_XEN) += nmi.o -- - probe_64-$(CONFIG_XEN) := probe_32.o - - disabled-obj-$(CONFIG_XEN) := apic_flat_$(BITS).o apic_noop.o ---- head-2010-04-15.orig/arch/x86/kernel/apic/nmi.c 2010-04-15 10:05:32.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/apic/nmi.c 2010-04-15 11:46:23.000000000 +0200 -@@ -28,8 +28,10 @@ - #include - #include - --#ifndef CONFIG_XEN -+#ifdef ARCH_HAS_NMI_WATCHDOG - #include -+#else -+#include - #endif - #include - #include -@@ -40,6 +42,9 @@ - #include - - int unknown_nmi_panic; -+ -+#ifdef ARCH_HAS_NMI_WATCHDOG -+ - int nmi_watchdog_enabled; - - /* For reliability, we're prepared to waste bits here. */ -@@ -178,13 +183,11 @@ int __init check_nmi_watchdog(void) - kfree(prev_nmi_count); - return 0; - error: --#ifndef CONFIG_XEN - if (nmi_watchdog == NMI_IO_APIC) { - if (!timer_through_8259) - legacy_pic->chip->mask(0); - on_each_cpu(__acpi_nmi_disable, NULL, 1); - } --#endif - - #ifdef CONFIG_X86_32 - timer_ack = 0; -@@ -474,8 +477,11 @@ nmi_watchdog_tick(struct pt_regs *regs, - return rc; - } - -+#endif /* ARCH_HAS_NMI_WATCHDOG */ -+ - #ifdef CONFIG_SYSCTL - -+#ifdef ARCH_HAS_NMI_WATCHDOG - static void enable_ioapic_nmi_watchdog_single(void *unused) - { - __get_cpu_var(wd_enabled) = 1; -@@ -493,6 +499,7 @@ static void disable_ioapic_nmi_watchdog( - { - on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); - } -+#endif - - static int __init setup_unknown_nmi_panic(char *str) - { -@@ -511,6 +518,7 @@ static int unknown_nmi_panic_callback(st - return 0; - } - -+#ifdef ARCH_HAS_NMI_WATCHDOG - /* - * proc handler for /proc/sys/kernel/nmi - */ -@@ -548,6 +556,7 @@ int proc_nmi_enabled(struct ctl_table *t - } - return 0; - } -+#endif - - #endif /* CONFIG_SYSCTL */ - -@@ -560,6 +569,7 @@ int do_nmi_callback(struct pt_regs *regs - return 0; - } - -+#ifdef ARCH_HAS_NMI_WATCHDOG - void arch_trigger_all_cpu_backtrace(void) - { - int i; -@@ -576,3 +586,4 @@ void arch_trigger_all_cpu_backtrace(void - mdelay(1); - } - } -+#endif ---- head-2010-04-15.orig/arch/x86/kernel/cpu/Makefile 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/cpu/Makefile 2010-03-25 14:40:02.000000000 +0100 -@@ -34,7 +34,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ - - obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o - --disabled-obj-$(CONFIG_XEN) := hypervisor.o perf_event.o sched.o vmware.o -+disabled-obj-$(CONFIG_XEN) := hypervisor.o perfctr-watchdog.o perf_event.o sched.o vmware.o - - quiet_cmd_mkcapflags = MKCAP $@ - cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ ---- head-2010-04-15.orig/arch/x86/kernel/head-xen.c 2010-04-15 10:13:18.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/head-xen.c 2010-04-15 11:46:18.000000000 +0200 -@@ -183,12 +183,10 @@ void __init xen_arch_setup(void) - .address = CALLBACK_ADDR(system_call) - }; - #endif --#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) - static const struct callback_register __initconst nmi_cb = { - .type = CALLBACKTYPE_nmi, - .address = CALLBACK_ADDR(nmi) - }; --#endif - - ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event); - if (ret == 0) -@@ -212,7 +210,6 @@ void __init xen_arch_setup(void) - #endif - BUG_ON(ret); - --#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) - ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb); - #if CONFIG_XEN_COMPAT <= 0x030002 - if (ret == -ENOSYS) { -@@ -223,6 +220,5 @@ void __init xen_arch_setup(void) - HYPERVISOR_nmi_op(XENNMI_register_callback, &cb); - } - #endif --#endif - } - #endif /* CONFIG_XEN */ ---- head-2010-04-15.orig/arch/x86/kernel/traps-xen.c 2010-03-25 16:41:03.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/traps-xen.c 2010-03-25 14:40:02.000000000 +0100 -@@ -51,6 +51,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -394,12 +395,14 @@ static notrace __kprobes void default_do - == NOTIFY_STOP) - return; - #ifdef CONFIG_X86_LOCAL_APIC -+#ifdef ARCH_HAS_NMI_WATCHDOG - /* - * Ok, so this is none of the documented NMI sources, - * so it must be the NMI watchdog. - */ - if (nmi_watchdog_tick(regs, reason)) - return; -+#endif - if (!do_nmi_callback(regs, cpu)) - unknown_nmi_error(reason, regs); - #else ---- head-2010-04-15.orig/kernel/sysctl.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-04-15/kernel/sysctl.c 2010-03-25 14:40:02.000000000 +0100 -@@ -699,6 +699,7 @@ static struct ctl_table kern_table[] = { - .mode = 0644, - .proc_handler = proc_dointvec, - }, -+#ifdef ARCH_HAS_NMI_WATCHDOG - { - .procname = "nmi_watchdog", - .data = &nmi_watchdog_enabled, -@@ -707,6 +708,7 @@ static struct ctl_table kern_table[] = { - .proc_handler = proc_nmi_enabled, - }, - #endif -+#endif - #if defined(CONFIG_X86) - { - .procname = "panic_on_unrecovered_nmi", diff --git a/patches.xen/xen-x86-dcr-fallback b/patches.xen/xen-x86-dcr-fallback index e24ff5b..5c9a6fe 100644 --- a/patches.xen/xen-x86-dcr-fallback +++ b/patches.xen/xen-x86-dcr-fallback @@ -6,8 +6,8 @@ References: 181869 This avoids losing precious special memory in places where any memory can be used. ---- head-2010-04-15.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-04-15/arch/x86/mm/hypervisor.c 2009-06-09 15:52:17.000000000 +0200 +--- head-2010-12-08.orig/arch/x86/mm/hypervisor.c 2010-12-08 10:38:40.000000000 +0100 ++++ head-2010-12-08/arch/x86/mm/hypervisor.c 2010-12-08 10:45:24.000000000 +0100 @@ -43,6 +43,7 @@ #include #include @@ -16,7 +16,7 @@ used. #include #include -@@ -719,6 +720,83 @@ void xen_destroy_contiguous_region(unsig +@@ -717,6 +718,83 @@ void xen_destroy_contiguous_region(unsig BUG(); balloon_unlock(flags); @@ -33,9 +33,9 @@ used. + unsigned int j = 0; + + if (!page) { -+ printk(KERN_WARNING "Xen and kernel out of memory " -+ "while trying to release an order %u " -+ "contiguous region\n", order); ++ pr_warning("Xen and kernel out of memory" ++ " while trying to release an order" ++ " %u contiguous region\n", order); + break; + } + pfn = page_to_pfn(page); @@ -100,9 +100,9 @@ used. } EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); ---- head-2010-04-15.orig/drivers/xen/balloon/balloon.c 2010-04-15 11:44:37.000000000 +0200 -+++ head-2010-04-15/drivers/xen/balloon/balloon.c 2010-04-15 11:46:09.000000000 +0200 -@@ -776,7 +776,11 @@ struct page **alloc_empty_pages_and_page +--- head-2010-12-08.orig/drivers/xen/balloon/balloon.c 2010-11-25 13:47:53.000000000 +0100 ++++ head-2010-12-08/drivers/xen/balloon/balloon.c 2010-11-25 13:48:02.000000000 +0100 +@@ -773,7 +773,11 @@ struct page **alloc_empty_pages_and_page } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); @@ -115,7 +115,7 @@ used. { unsigned long flags; int i; -@@ -787,17 +791,33 @@ void free_empty_pages_and_pagevec(struct +@@ -784,17 +788,33 @@ void free_empty_pages_and_pagevec(struct balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); @@ -153,8 +153,8 @@ used. void balloon_release_driver_page(struct page *page) { ---- head-2010-04-15.orig/include/xen/balloon.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-15/include/xen/balloon.h 2009-06-09 15:52:17.000000000 +0200 +--- head-2010-12-08.orig/include/xen/balloon.h 2010-11-22 12:57:58.000000000 +0100 ++++ head-2010-12-08/include/xen/balloon.h 2009-06-09 15:52:17.000000000 +0200 @@ -47,6 +47,10 @@ void balloon_update_driver_allowance(lon struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); diff --git a/patches.xen/xen-x86-machphys-prediction b/patches.xen/xen-x86-machphys-prediction deleted file mode 100644 index b633164..0000000 --- a/patches.xen/xen-x86-machphys-prediction +++ /dev/null @@ -1,204 +0,0 @@ -From: jbeulich@novell.com -Subject: properly predict phys<->mach translations -Patch-mainline: obsolete - ---- head-2009-07-28.orig/arch/x86/include/mach-xen/asm/maddr_32.h 2009-07-28 12:14:16.000000000 +0200 -+++ head-2009-07-28/arch/x86/include/mach-xen/asm/maddr_32.h 2009-07-29 10:56:35.000000000 +0200 -@@ -30,17 +30,19 @@ extern unsigned int machine_to_phys_or - - static inline unsigned long pfn_to_mfn(unsigned long pfn) - { -- if (xen_feature(XENFEAT_auto_translated_physmap)) -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return pfn; -- BUG_ON(max_mapnr && pfn >= max_mapnr); -+ if (likely(max_mapnr)) -+ BUG_ON(pfn >= max_mapnr); - return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; - } - - static inline int phys_to_machine_mapping_valid(unsigned long pfn) - { -- if (xen_feature(XENFEAT_auto_translated_physmap)) -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return 1; -- BUG_ON(max_mapnr && pfn >= max_mapnr); -+ if (likely(max_mapnr)) -+ BUG_ON(pfn >= max_mapnr); - return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); - } - -@@ -48,7 +50,7 @@ static inline unsigned long mfn_to_pfn(u - { - unsigned long pfn; - -- if (xen_feature(XENFEAT_auto_translated_physmap)) -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return mfn; - - if (unlikely((mfn >> machine_to_phys_order) != 0)) -@@ -95,17 +97,18 @@ static inline unsigned long mfn_to_pfn(u - static inline unsigned long mfn_to_local_pfn(unsigned long mfn) - { - unsigned long pfn = mfn_to_pfn(mfn); -- if ((pfn < max_mapnr) -- && !xen_feature(XENFEAT_auto_translated_physmap) -- && (phys_to_machine_mapping[pfn] != mfn)) -+ if (likely(pfn < max_mapnr) -+ && likely(!xen_feature(XENFEAT_auto_translated_physmap)) -+ && unlikely(phys_to_machine_mapping[pfn] != mfn)) - return max_mapnr; /* force !pfn_valid() */ - return pfn; - } - - static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) - { -- BUG_ON(max_mapnr && pfn >= max_mapnr); -- if (xen_feature(XENFEAT_auto_translated_physmap)) { -+ if (likely(max_mapnr)) -+ BUG_ON(pfn >= max_mapnr); -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return; - } ---- head-2009-07-28.orig/arch/x86/include/mach-xen/asm/maddr_64.h 2009-07-28 12:14:16.000000000 +0200 -+++ head-2009-07-28/arch/x86/include/mach-xen/asm/maddr_64.h 2009-07-29 10:56:35.000000000 +0200 -@@ -25,17 +25,19 @@ extern unsigned int machine_to_phys_or - - static inline unsigned long pfn_to_mfn(unsigned long pfn) - { -- if (xen_feature(XENFEAT_auto_translated_physmap)) -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return pfn; -- BUG_ON(max_mapnr && pfn >= max_mapnr); -+ if (likely(max_mapnr)) -+ BUG_ON(pfn >= max_mapnr); - return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; - } - - static inline int phys_to_machine_mapping_valid(unsigned long pfn) - { -- if (xen_feature(XENFEAT_auto_translated_physmap)) -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return 1; -- BUG_ON(max_mapnr && pfn >= max_mapnr); -+ if (likely(max_mapnr)) -+ BUG_ON(pfn >= max_mapnr); - return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); - } - -@@ -43,7 +45,7 @@ static inline unsigned long mfn_to_pfn(u - { - unsigned long pfn; - -- if (xen_feature(XENFEAT_auto_translated_physmap)) -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return mfn; - - if (unlikely((mfn >> machine_to_phys_order) != 0)) -@@ -90,17 +92,18 @@ static inline unsigned long mfn_to_pfn(u - static inline unsigned long mfn_to_local_pfn(unsigned long mfn) - { - unsigned long pfn = mfn_to_pfn(mfn); -- if ((pfn < max_mapnr) -- && !xen_feature(XENFEAT_auto_translated_physmap) -- && (phys_to_machine_mapping[pfn] != mfn)) -+ if (likely(pfn < max_mapnr) -+ && likely(!xen_feature(XENFEAT_auto_translated_physmap)) -+ && unlikely(phys_to_machine_mapping[pfn] != mfn)) - return max_mapnr; /* force !pfn_valid() */ - return pfn; - } - - static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) - { -- BUG_ON(max_mapnr && pfn >= max_mapnr); -- if (xen_feature(XENFEAT_auto_translated_physmap)) { -+ if (likely(max_mapnr)) -+ BUG_ON(pfn >= max_mapnr); -+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return; - } ---- head-2009-07-28.orig/arch/x86/include/mach-xen/asm/pgtable_types.h 2009-07-28 13:14:11.000000000 +0200 -+++ head-2009-07-28/arch/x86/include/mach-xen/asm/pgtable_types.h 2009-07-29 10:56:35.000000000 +0200 -@@ -207,7 +207,7 @@ typedef struct { pgdval_t pgd; } pgd_t; - #define __pgd_ma(x) ((pgd_t) { (x) } ) - static inline pgd_t xen_make_pgd(pgdval_t val) - { -- if (val & _PAGE_PRESENT) -+ if (likely(val & _PAGE_PRESENT)) - val = pte_phys_to_machine(val); - return (pgd_t) { val }; - } -@@ -217,10 +217,10 @@ static inline pgdval_t xen_pgd_val(pgd_t - { - pgdval_t ret = __pgd_val(pgd); - #if PAGETABLE_LEVELS == 2 && CONFIG_XEN_COMPAT <= 0x030002 -- if (ret) -+ if (likely(ret)) - ret = machine_to_phys(ret) | _PAGE_PRESENT; - #else -- if (ret & _PAGE_PRESENT) -+ if (likely(ret & _PAGE_PRESENT)) - ret = pte_machine_to_phys(ret); - #endif - return ret; -@@ -237,7 +237,7 @@ typedef struct { pudval_t pud; } pud_t; - #define __pud_ma(x) ((pud_t) { (x) } ) - static inline pud_t xen_make_pud(pudval_t val) - { -- if (val & _PAGE_PRESENT) -+ if (likely(val & _PAGE_PRESENT)) - val = pte_phys_to_machine(val); - return (pud_t) { val }; - } -@@ -246,7 +246,7 @@ static inline pud_t xen_make_pud(pudval_ - static inline pudval_t xen_pud_val(pud_t pud) - { - pudval_t ret = __pud_val(pud); -- if (ret & _PAGE_PRESENT) -+ if (likely(ret & _PAGE_PRESENT)) - ret = pte_machine_to_phys(ret); - return ret; - } -@@ -266,7 +266,7 @@ typedef struct { pmdval_t pmd; } pmd_t; - #define __pmd_ma(x) ((pmd_t) { (x) } ) - static inline pmd_t xen_make_pmd(pmdval_t val) - { -- if (val & _PAGE_PRESENT) -+ if (likely(val & _PAGE_PRESENT)) - val = pte_phys_to_machine(val); - return (pmd_t) { val }; - } -@@ -276,10 +276,10 @@ static inline pmdval_t xen_pmd_val(pmd_t - { - pmdval_t ret = __pmd_val(pmd); - #if CONFIG_XEN_COMPAT <= 0x030002 -- if (ret) -+ if (likely(ret)) - ret = pte_machine_to_phys(ret) | _PAGE_PRESENT; - #else -- if (ret & _PAGE_PRESENT) -+ if (likely(ret & _PAGE_PRESENT)) - ret = pte_machine_to_phys(ret); - #endif - return ret; -@@ -308,7 +308,7 @@ static inline pmdval_t pmd_flags(pmd_t p - #define __pte_ma(x) ((pte_t) { .pte = (x) } ) - static inline pte_t xen_make_pte(pteval_t val) - { -- if ((val & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT) -+ if (likely((val & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT)) - val = pte_phys_to_machine(val); - return (pte_t) { .pte = val }; - } -@@ -317,7 +317,7 @@ static inline pte_t xen_make_pte(pteval_ - static inline pteval_t xen_pte_val(pte_t pte) - { - pteval_t ret = __pte_val(pte); -- if ((pte.pte_low & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT) -+ if (likely((pte.pte_low & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT)) - ret = pte_machine_to_phys(ret); - return ret; - } diff --git a/patches.xen/xen-x86-msr-on-pcpu b/patches.xen/xen-x86-msr-on-pcpu new file mode 100644 index 0000000..a7f135a --- /dev/null +++ b/patches.xen/xen-x86-msr-on-pcpu @@ -0,0 +1,822 @@ +From: jbeulich@novell.com +Subject: introduce {rd,wr}msr_safe_on_pcpu() and add/enable users +Patch-mainline: n/a + +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-02-02 15:10:34.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-02-03 14:42:26.000000000 +0100 +@@ -1048,6 +1048,7 @@ config MICROCODE_OLD_INTERFACE + + config X86_MSR + tristate "/dev/cpu/*/msr - Model-specific register support" ++ select XEN_DOMCTL if XEN_PRIVILEGED_GUEST + ---help--- + This device gives privileged processes access to the x86 + Model-Specific Registers (MSRs). It is a character device with +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/arch/x86/kernel/msr-xen.c 2011-02-03 14:42:26.000000000 +0100 +@@ -0,0 +1,339 @@ ++#ifndef CONFIG_XEN_PRIVILEGED_GUEST ++#include "msr.c" ++#else ++/* ----------------------------------------------------------------------- * ++ * ++ * Copyright 2010 Novell, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, ++ * USA; either version 2 of the License, or (at your option) any later ++ * version; incorporated herein by reference. ++ * ++ * ----------------------------------------------------------------------- */ ++ ++/* ++ * x86 MSR access device ++ * ++ * This device is accessed by lseek() to the appropriate register number ++ * and then read/write in chunks of 8 bytes. A larger size means multiple ++ * reads or writes of the same register. ++ * ++ * This driver uses /dev/xen/cpu/%d/msr where %d correlates to the minor ++ * number, and on an SMP box will direct the access to pCPU %d. ++ */ ++ ++static int msr_init(void); ++static void msr_exit(void); ++ ++#define msr_init(args...) _msr_init(args) ++#define msr_exit(args...) _msr_exit(args) ++#include "msr.c" ++#undef msr_exit ++#undef msr_init ++ ++#include ++#include ++ ++static struct class *pmsr_class; ++static unsigned int minor_bias = 10; ++static unsigned int nr_xen_cpu_ids; ++static unsigned long *xen_cpu_online_map; ++ ++#define PMSR_DEV(cpu) MKDEV(MSR_MAJOR, (cpu) + minor_bias) ++ ++static unsigned int pmsr_minor(struct inode *inode) ++{ ++ return iminor(inode) - minor_bias; ++} ++ ++static ssize_t pmsr_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ u32 __user *tmp = (u32 __user *) buf; ++ u32 data[2]; ++ u32 reg = *ppos; ++ unsigned int cpu = pmsr_minor(file->f_path.dentry->d_inode); ++ int err = 0; ++ ssize_t bytes = 0; ++ ++ if (count % 8) ++ return -EINVAL; /* Invalid chunk size */ ++ ++ for (; count; count -= 8) { ++ err = rdmsr_safe_on_pcpu(cpu, reg, &data[0], &data[1]); ++ if (err) ++ break; ++ if (copy_to_user(tmp, &data, 8)) { ++ err = -EFAULT; ++ break; ++ } ++ tmp += 2; ++ bytes += 8; ++ } ++ ++ return bytes ? bytes : err; ++} ++ ++static ssize_t pmsr_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ const u32 __user *tmp = (const u32 __user *)buf; ++ u32 data[2]; ++ u32 reg = *ppos; ++ unsigned int cpu = pmsr_minor(file->f_path.dentry->d_inode); ++ int err = 0; ++ ssize_t bytes = 0; ++ ++ if (count % 8) ++ return -EINVAL; /* Invalid chunk size */ ++ ++ for (; count; count -= 8) { ++ if (copy_from_user(&data, tmp, 8)) { ++ err = -EFAULT; ++ break; ++ } ++ err = wrmsr_safe_on_pcpu(cpu, reg, data[0], data[1]); ++ if (err) ++ break; ++ tmp += 2; ++ bytes += 8; ++ } ++ ++ return bytes ? bytes : err; ++} ++ ++static long pmsr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) ++{ ++ u32 __user *uregs = (u32 __user *)arg; ++ u32 regs[8]; ++ unsigned int cpu = pmsr_minor(file->f_path.dentry->d_inode); ++ int err; ++ ++ switch (ioc) { ++ case X86_IOC_RDMSR_REGS: ++ if (!(file->f_mode & FMODE_READ)) { ++ err = -EBADF; ++ break; ++ } ++ if (copy_from_user(®s, uregs, sizeof regs)) { ++ err = -EFAULT; ++ break; ++ } ++ err = rdmsr_safe_regs_on_pcpu(cpu, regs); ++ if (err) ++ break; ++ if (copy_to_user(uregs, ®s, sizeof regs)) ++ err = -EFAULT; ++ break; ++ ++ case X86_IOC_WRMSR_REGS: ++ if (!(file->f_mode & FMODE_WRITE)) { ++ err = -EBADF; ++ break; ++ } ++ if (copy_from_user(®s, uregs, sizeof regs)) { ++ err = -EFAULT; ++ break; ++ } ++ err = wrmsr_safe_regs_on_pcpu(cpu, regs); ++ if (err) ++ break; ++ if (copy_to_user(uregs, ®s, sizeof regs)) ++ err = -EFAULT; ++ break; ++ ++ default: ++ err = -ENOTTY; ++ break; ++ } ++ ++ return err; ++} ++ ++static int pmsr_open(struct inode *inode, struct file *file) ++{ ++ unsigned int cpu; ++ ++ cpu = pmsr_minor(file->f_path.dentry->d_inode); ++ if (cpu >= nr_xen_cpu_ids || !test_bit(cpu, xen_cpu_online_map)) ++ return -ENXIO; /* No such CPU */ ++ ++ return 0; ++} ++ ++/* ++ * File operations we support ++ */ ++static const struct file_operations pmsr_fops = { ++ .owner = THIS_MODULE, ++ .llseek = msr_seek, ++ .read = pmsr_read, ++ .write = pmsr_write, ++ .open = pmsr_open, ++ .unlocked_ioctl = pmsr_ioctl, ++ .compat_ioctl = pmsr_ioctl, ++}; ++ ++static int pmsr_device_create(unsigned int cpu) ++{ ++ struct device *dev; ++ ++ if (cpu >= nr_xen_cpu_ids) { ++ static bool warned; ++ unsigned long *map; ++ ++ if ((minor_bias + cpu) >> MINORBITS) { ++ if (!warned) { ++ warned = true; ++ pr_warning("Physical MSRs of CPUs beyond %u" ++ " will not be accessible\n", ++ MINORMASK - minor_bias); ++ } ++ return -EDOM; ++ } ++ ++ map = kzalloc(BITS_TO_LONGS(cpu + 1) * sizeof(*map), ++ GFP_KERNEL); ++ if (!map) { ++ if (!warned) { ++ warned = true; ++ pr_warning("Physical MSRs of CPUs beyond %u" ++ " may not be accessible\n", ++ nr_xen_cpu_ids - 1); ++ } ++ return -ENOMEM; ++ } ++ ++ memcpy(map, xen_cpu_online_map, ++ BITS_TO_LONGS(nr_xen_cpu_ids) ++ * sizeof(*xen_cpu_online_map)); ++ nr_xen_cpu_ids = min_t(unsigned int, ++ BITS_TO_LONGS(cpu + 1) * BITS_PER_LONG, ++ MINORMASK + 1 - minor_bias); ++ kfree(xchg(&xen_cpu_online_map, map)); ++ } ++ set_bit(cpu, xen_cpu_online_map); ++ dev = device_create(pmsr_class, NULL, PMSR_DEV(cpu), NULL, ++ "pmsr%d", cpu); ++ return IS_ERR(dev) ? PTR_ERR(dev) : 0; ++} ++ ++static void pmsr_device_destroy(unsigned int cpu) ++{ ++ clear_bit(cpu, xen_cpu_online_map); ++ device_destroy(pmsr_class, PMSR_DEV(cpu)); ++} ++ ++static int pmsr_cpu_callback(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ unsigned int cpu = (unsigned long)hcpu; ++ ++ switch (action) { ++ case CPU_ONLINE: ++ pmsr_device_create(cpu); ++ break; ++ case CPU_DEAD: ++ pmsr_device_destroy(cpu); ++ break; ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block pmsr_cpu_notifier = { ++ .notifier_call = pmsr_cpu_callback, ++}; ++ ++static char *pmsr_devnode(struct device *dev, mode_t *mode) ++{ ++ return kasprintf(GFP_KERNEL, "xen/cpu/%u/msr", ++ MINOR(dev->devt) - minor_bias); ++} ++ ++static int __init msr_init(void) ++{ ++ int err; ++ xen_platform_op_t op = { ++ .cmd = XENPF_get_cpuinfo, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ .u.pcpu_info.xen_cpuid = 0 ++ }; ++ ++ err = _msr_init(); ++ if (err || !is_initial_xendomain()) ++ return err; ++ ++ do { ++ err = HYPERVISOR_platform_op(&op); ++ } while (err == -EBUSY); ++ if (err) ++ goto out; ++ nr_xen_cpu_ids = BITS_TO_LONGS(op.u.pcpu_info.max_present + 1) ++ * BITS_PER_LONG; ++ ++ while (minor_bias < NR_CPUS) ++ minor_bias *= 10; ++ if ((minor_bias + nr_xen_cpu_ids - 1) >> MINORBITS) ++ minor_bias = NR_CPUS; ++ if ((minor_bias + nr_xen_cpu_ids - 1) >> MINORBITS) ++ nr_xen_cpu_ids = MINORMASK + 1 - NR_CPUS; ++ ++ xen_cpu_online_map = kzalloc(BITS_TO_LONGS(nr_xen_cpu_ids) ++ * sizeof(*xen_cpu_online_map), ++ GFP_KERNEL); ++ if (!xen_cpu_online_map) { ++ err = -ENOMEM; ++ goto out; ++ } ++ ++ if (__register_chrdev(MSR_MAJOR, minor_bias, ++ MINORMASK + 1 - minor_bias, ++ "pcpu/msr", &pmsr_fops)) { ++ pr_err("msr: unable to get minors for pmsr\n"); ++ goto out; ++ } ++ pmsr_class = class_create(THIS_MODULE, "pmsr"); ++ if (IS_ERR(pmsr_class)) { ++ err = PTR_ERR(pmsr_class); ++ goto out_chrdev; ++ } ++ pmsr_class->devnode = pmsr_devnode; ++ err = register_pcpu_notifier(&pmsr_cpu_notifier); ++ ++ if (!err && !nr_xen_cpu_ids) ++ err = -ENODEV; ++ if (!err) ++ return 0; ++ ++ class_destroy(pmsr_class); ++ ++out_chrdev: ++ __unregister_chrdev(MSR_MAJOR, minor_bias, ++ MINORMASK + 1 - minor_bias, "pcpu/msr"); ++out: ++ if (err) ++ pr_warning("msr: can't initialize physical MSR access (%d)\n", ++ err); ++ nr_xen_cpu_ids = 0; ++ kfree(xen_cpu_online_map); ++ return 0; ++} ++ ++static void __exit msr_exit(void) ++{ ++ if (nr_xen_cpu_ids) { ++ unsigned int cpu = 0; ++ ++ unregister_pcpu_notifier(&pmsr_cpu_notifier); ++ for_each_set_bit(cpu, xen_cpu_online_map, nr_xen_cpu_ids) ++ msr_device_destroy(cpu); ++ class_destroy(pmsr_class); ++ __unregister_chrdev(MSR_MAJOR, minor_bias, ++ MINORMASK + 1 - minor_bias, "pcpu/msr"); ++ kfree(xen_cpu_online_map); ++ } ++ _msr_exit(); ++} ++#endif /* CONFIG_XEN_PRIVILEGED_GUEST */ +--- head-2011-03-11.orig/drivers/hwmon/Kconfig 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-11/drivers/hwmon/Kconfig 2011-03-11 11:17:24.000000000 +0100 +@@ -392,7 +392,8 @@ config SENSORS_GPIO_FAN + + config SENSORS_CORETEMP + tristate "Intel Core/Core2/Atom temperature sensor" +- depends on X86 && PCI && !XEN && EXPERIMENTAL ++ depends on X86 && PCI && !XEN_UNPRIVILEGED_GUEST && EXPERIMENTAL ++ select XEN_DOMCTL if XEN + help + If you say yes here you get support for the temperature + sensor inside your CPU. Most of the family 6 CPUs +@@ -400,7 +401,8 @@ config SENSORS_CORETEMP + + config SENSORS_PKGTEMP + tristate "Intel processor package temperature sensor" +- depends on X86 && !XEN && EXPERIMENTAL ++ depends on X86 && !XEN_UNPRIVILEGED_GUEST && EXPERIMENTAL ++ select XEN_DOMCTL if XEN + help + If you say yes here you get support for the package level temperature + sensor inside your CPU. Check documentation/driver for details. +@@ -943,7 +945,8 @@ config SENSORS_TMP421 + + config SENSORS_VIA_CPUTEMP + tristate "VIA CPU temperature sensor" +- depends on X86 && !XEN ++ depends on X86 && !XEN_UNPRIVILEGED_GUEST ++ select XEN_DOMCTL if XEN + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of +--- head-2011-03-11.orig/drivers/xen/core/Makefile 2011-02-02 15:10:34.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/Makefile 2011-02-03 14:42:26.000000000 +0100 +@@ -5,8 +5,7 @@ + obj-y := evtchn.o gnttab.o reboot.o machine_reboot.o + + priv-$(CONFIG_PCI) += pci.o +-priv-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o +-obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += firmware.o $(priv-y) ++obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += firmware.o pcpu.o $(priv-y) + obj-$(CONFIG_PROC_FS) += xen_proc.o + obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o + obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o +@@ -17,4 +16,4 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o + obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o + obj-$(CONFIG_XEN_DOMCTL) += domctl.o + CFLAGS_domctl.o := -D__XEN_PUBLIC_XEN_H__ -D__XEN_PUBLIC_GRANT_TABLE_H__ +-CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h ++CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h -imacros xen/interface/sysctl.h +--- head-2011-03-11.orig/drivers/xen/core/domctl.c 2010-11-23 16:20:20.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/domctl.c 2011-02-03 14:42:26.000000000 +0100 +@@ -92,6 +92,110 @@ union xen_domctl { + } v5, v6, v7; + }; + ++struct xen_sysctl_physinfo_v6 { ++ uint32_t threads_per_core; ++ uint32_t cores_per_socket; ++ uint32_t nr_cpus; ++ uint32_t nr_nodes; ++ uint32_t cpu_khz; ++ uint64_aligned_t total_pages; ++ uint64_aligned_t free_pages; ++ uint64_aligned_t scrub_pages; ++ uint32_t hw_cap[8]; ++ uint32_t max_cpu_id; ++ union { ++ XEN_GUEST_HANDLE(uint32) cpu_to_node; ++ uint64_aligned_t _ctn_align; ++ }; ++ uint32_t capabilities; ++}; ++ ++struct xen_sysctl_physinfo_v7 { ++ uint32_t threads_per_core; ++ uint32_t cores_per_socket; ++ uint32_t nr_cpus; ++ uint32_t max_node_id; ++ uint32_t cpu_khz; ++ uint64_aligned_t total_pages; ++ uint64_aligned_t free_pages; ++ uint64_aligned_t scrub_pages; ++ uint32_t hw_cap[8]; ++ uint32_t max_cpu_id; ++ union { ++ XEN_GUEST_HANDLE(uint32) cpu_to_node; ++ uint64_aligned_t _ctn_align; ++ }; ++ uint32_t capabilities; ++}; ++ ++#define XEN_SYSCTL_pm_op_get_cputopo 0x20 ++struct xen_get_cputopo_v6 { ++ uint32_t max_cpus; ++ union { ++ XEN_GUEST_HANDLE(uint32) cpu_to_core; ++ uint64_aligned_t _ctc_align; ++ }; ++ union { ++ XEN_GUEST_HANDLE(uint32) cpu_to_socket; ++ uint64_aligned_t _cts_align; ++ }; ++ uint32_t nr_cpus; ++}; ++ ++struct xen_sysctl_pm_op_v6 { ++ uint32_t cmd; ++ uint32_t cpuid; ++ union { ++ struct xen_get_cputopo_v6 get_topo; ++ }; ++}; ++#define xen_sysctl_pm_op_v7 xen_sysctl_pm_op_v6 ++ ++struct xen_sysctl_topologyinfo_v8 { ++ uint32_t max_cpu_index; ++ union { ++ XEN_GUEST_HANDLE(uint32) cpu_to_core; ++ uint64_aligned_t _ctc_align; ++ }; ++ union { ++ XEN_GUEST_HANDLE(uint32) cpu_to_socket; ++ uint64_aligned_t _cts_align; ++ }; ++ union { ++ XEN_GUEST_HANDLE(uint32) cpu_to_node; ++ uint64_aligned_t _ctn_align; ++ }; ++}; ++ ++union xen_sysctl { ++ /* v6: Xen 3.4.x */ ++ struct { ++ uint32_t cmd; ++ uint32_t interface_version; ++ union { ++ struct xen_sysctl_physinfo_v6 physinfo; ++ struct xen_sysctl_pm_op_v6 pm_op; ++ }; ++ } v6; ++ /* v7: Xen 4.0.x */ ++ struct { ++ uint32_t cmd; ++ uint32_t interface_version; ++ union { ++ struct xen_sysctl_physinfo_v7 physinfo; ++ struct xen_sysctl_pm_op_v7 pm_op; ++ }; ++ } v7; ++ /* v8: Xen 4.1+ */ ++ struct { ++ uint32_t cmd; ++ uint32_t interface_version; ++ union { ++ struct xen_sysctl_topologyinfo_v8 topologyinfo; ++ }; ++ } v8; ++}; ++ + /* The actual code comes here */ + + static inline int hypervisor_domctl(void *domctl) +@@ -99,6 +203,11 @@ static inline int hypervisor_domctl(void + return _hypercall1(int, domctl, domctl); + } + ++static inline int hypervisor_sysctl(void *sysctl) ++{ ++ return _hypercall1(int, sysctl, sysctl); ++} ++ + int xen_guest_address_size(int domid) + { + union xen_domctl domctl; +@@ -263,6 +372,172 @@ int xen_set_physical_cpu_affinity(int pc + } + EXPORT_SYMBOL_GPL(xen_set_physical_cpu_affinity); + ++int xen_get_topology_info(unsigned int cpu, u32 *core, u32 *sock, u32 *node) ++{ ++ union xen_sysctl sysctl; ++ uint32_t *cores = NULL, *socks = NULL, *nodes = NULL; ++ unsigned int nr; ++ int rc; ++ ++ if (core) ++ cores = kmalloc((cpu + 1) * sizeof(*cores), GFP_KERNEL); ++ if (sock) ++ socks = kmalloc((cpu + 1) * sizeof(*socks), GFP_KERNEL); ++ if (node) ++ nodes = kmalloc((cpu + 1) * sizeof(*nodes), GFP_KERNEL); ++ if ((core && !cores) || (sock && !socks) || (node && !nodes)) { ++ kfree(cores); ++ kfree(socks); ++ kfree(nodes); ++ return -ENOMEM; ++ } ++ ++#define topologyinfo(ver) do { \ ++ memset(&sysctl, 0, sizeof(sysctl)); \ ++ sysctl.v##ver.cmd = XEN_SYSCTL_topologyinfo; \ ++ sysctl.v##ver.interface_version = ver; \ ++ sysctl.v##ver.topologyinfo.max_cpu_index = cpu; \ ++ set_xen_guest_handle(sysctl.v##ver.topologyinfo.cpu_to_core, \ ++ cores); \ ++ set_xen_guest_handle(sysctl.v##ver.topologyinfo.cpu_to_socket, \ ++ socks); \ ++ set_xen_guest_handle(sysctl.v##ver.topologyinfo.cpu_to_node, \ ++ nodes); \ ++ rc = hypervisor_sysctl(&sysctl); \ ++ nr = sysctl.v##ver.topologyinfo.max_cpu_index + 1; \ ++} while (0) ++ ++ BUILD_BUG_ON(XEN_SYSCTL_INTERFACE_VERSION > 8); ++ topologyinfo(8); ++ ++#if CONFIG_XEN_COMPAT < 0x040100 ++#define pm_op_cputopo(ver) do { \ ++ memset(&sysctl, 0, sizeof(sysctl)); \ ++ sysctl.v##ver.cmd = XEN_SYSCTL_pm_op; \ ++ sysctl.v##ver.interface_version = ver; \ ++ sysctl.v##ver.pm_op.cmd = XEN_SYSCTL_pm_op_get_cputopo; \ ++ sysctl.v##ver.pm_op.cpuid = 0; \ ++ sysctl.v##ver.pm_op.get_topo.max_cpus = cpu + 1; \ ++ set_xen_guest_handle(sysctl.v##ver.pm_op.get_topo.cpu_to_core, \ ++ cores); \ ++ set_xen_guest_handle(sysctl.v##ver.pm_op.get_topo.cpu_to_socket,\ ++ socks); \ ++ rc = hypervisor_sysctl(&sysctl); \ ++ memset(&sysctl, 0, sizeof(sysctl)); \ ++ sysctl.v##ver.cmd = XEN_SYSCTL_physinfo; \ ++ sysctl.v##ver.interface_version = ver; \ ++ sysctl.v##ver.physinfo.max_cpu_id = cpu; \ ++ set_xen_guest_handle(sysctl.v##ver.physinfo.cpu_to_node, nodes);\ ++ rc = hypervisor_sysctl(&sysctl) ?: rc; \ ++ nr = sysctl.v##ver.physinfo.max_cpu_id + 1; \ ++} while (0) ++ ++ if (rc) ++ pm_op_cputopo(7); ++#endif ++#if CONFIG_XEN_COMPAT < 0x040000 ++ if (rc) ++ pm_op_cputopo(6); ++#endif ++ ++ if (!rc && cpu >= nr) ++ rc = -EDOM; ++ ++ if (!rc && core && (*core = cores[cpu]) == INVALID_TOPOLOGY_ID) ++ rc = -ENOENT; ++ kfree(cores); ++ ++ if (!rc && sock && (*sock = socks[cpu]) == INVALID_TOPOLOGY_ID) ++ rc = -ENOENT; ++ kfree(socks); ++ ++ if (!rc && node && (*node = nodes[cpu]) == INVALID_TOPOLOGY_ID) ++ rc = -ENOENT; ++ kfree(nodes); ++ ++ return rc; ++} ++EXPORT_SYMBOL_GPL(xen_get_topology_info); ++ ++#include ++#include ++ ++int rdmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, u32 *l, u32 *h) ++{ ++ int err = xen_set_physical_cpu_affinity(pcpu); ++ ++ switch (err) { ++ case 0: ++ err = rdmsr_safe(msr_no, l, h); ++ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); ++ break; ++ case -EINVAL: ++ /* Fall back in case this is due to dom0_vcpus_pinned. */ ++ err = rdmsr_safe_on_cpu(pcpu, msr_no, l, h) ?: 1; ++ break; ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(rdmsr_safe_on_pcpu); ++ ++int wrmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, u32 l, u32 h) ++{ ++ int err = xen_set_physical_cpu_affinity(pcpu); ++ ++ switch (err) { ++ case 0: ++ err = wrmsr_safe(msr_no, l, h); ++ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); ++ break; ++ case -EINVAL: ++ /* Fall back in case this is due to dom0_vcpus_pinned. */ ++ err = wrmsr_safe_on_cpu(pcpu, msr_no, l, h) ?: 1; ++ break; ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(wrmsr_safe_on_pcpu); ++ ++int rdmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs) ++{ ++ int err = xen_set_physical_cpu_affinity(pcpu); ++ ++ switch (err) { ++ case 0: ++ err = rdmsr_safe_regs(regs); ++ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); ++ break; ++ case -EINVAL: ++ /* Fall back in case this is due to dom0_vcpus_pinned. */ ++ err = rdmsr_safe_regs_on_cpu(pcpu, regs) ?: 1; ++ break; ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(rdmsr_safe_regs_on_pcpu); ++ ++int wrmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs) ++{ ++ int err = xen_set_physical_cpu_affinity(pcpu); ++ ++ switch (err) { ++ case 0: ++ err = wrmsr_safe_regs(regs); ++ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); ++ break; ++ case -EINVAL: ++ /* Fall back in case this is due to dom0_vcpus_pinned. */ ++ err = wrmsr_safe_regs_on_cpu(pcpu, regs) ?: 1; ++ break; ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(wrmsr_safe_regs_on_pcpu); ++ + #endif /* CONFIG_X86 */ + + MODULE_LICENSE("GPL"); +--- head-2011-03-11.orig/drivers/xen/core/domctl.h 2010-11-23 16:20:20.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/domctl.h 2011-02-03 14:42:26.000000000 +0100 +@@ -1,3 +1,4 @@ + int xen_guest_address_size(int domid); + int xen_guest_blkif_protocol(int domid); + int xen_set_physical_cpu_affinity(int pcpu); ++int xen_get_topology_info(unsigned int cpu, u32 *core, u32 *socket, u32 *node); +--- head-2011-03-11.orig/drivers/xen/core/pcpu.c 2011-02-02 15:09:57.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/pcpu.c 2011-02-03 14:42:26.000000000 +0100 +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + + struct pcpu { +@@ -35,6 +36,44 @@ static DEFINE_MUTEX(xen_pcpu_lock); + + static LIST_HEAD(xen_pcpus); + ++static BLOCKING_NOTIFIER_HEAD(pcpu_chain); ++ ++static inline void *notifier_param(const struct pcpu *pcpu) ++{ ++ return (void *)(unsigned long)pcpu->xen_id; ++} ++ ++int register_pcpu_notifier(struct notifier_block *nb) ++{ ++ int err; ++ ++ get_pcpu_lock(); ++ ++ err = blocking_notifier_chain_register(&pcpu_chain, nb); ++ ++ if (!err) { ++ struct pcpu *pcpu; ++ ++ list_for_each_entry(pcpu, &xen_pcpus, pcpu_list) ++ if (xen_pcpu_online(pcpu->flags)) ++ nb->notifier_call(nb, CPU_ONLINE, ++ notifier_param(pcpu)); ++ } ++ ++ put_pcpu_lock(); ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(register_pcpu_notifier); ++ ++void unregister_pcpu_notifier(struct notifier_block *nb) ++{ ++ get_pcpu_lock(); ++ blocking_notifier_chain_unregister(&pcpu_chain, nb); ++ put_pcpu_lock(); ++} ++EXPORT_SYMBOL_GPL(unregister_pcpu_notifier); ++ + static int xen_pcpu_down(uint32_t xen_id) + { + xen_platform_op_t op = { +@@ -151,12 +190,16 @@ static int xen_pcpu_online_check(struct + if (xen_pcpu_online(info->flags) && !xen_pcpu_online(pcpu->flags)) { + /* the pcpu is onlined */ + pcpu->flags |= XEN_PCPU_FLAGS_ONLINE; ++ blocking_notifier_call_chain(&pcpu_chain, CPU_ONLINE, ++ notifier_param(pcpu)); + kobject_uevent(&pcpu->sysdev.kobj, KOBJ_ONLINE); + result = 1; + } else if (!xen_pcpu_online(info->flags) && + xen_pcpu_online(pcpu->flags)) { + /* The pcpu is offlined now */ + pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE; ++ blocking_notifier_call_chain(&pcpu_chain, CPU_DEAD, ++ notifier_param(pcpu)); + kobject_uevent(&pcpu->sysdev.kobj, KOBJ_OFFLINE); + result = 1; + } +@@ -350,6 +393,8 @@ static irqreturn_t xen_pcpu_interrupt(in + return IRQ_HANDLED; + } + ++#ifdef CONFIG_ACPI_HOTPLUG_CPU ++ + int xen_pcpu_hotplug(int type) + { + schedule_work(&xen_pcpu_work); +@@ -387,6 +432,8 @@ int xen_pcpu_index(uint32_t id, bool is_ + } + EXPORT_SYMBOL_GPL(xen_pcpu_index); + ++#endif /* CONFIG_ACPI_HOTPLUG_CPU */ ++ + static int __init xen_pcpu_init(void) + { + int err; +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/include/xen/pcpu.h 2011-02-03 14:42:26.000000000 +0100 +@@ -0,0 +1,18 @@ ++#ifndef _XEN_SYSCTL_H ++#define _XEN_SYSCTL_H ++ ++#include ++ ++int register_pcpu_notifier(struct notifier_block *); ++void unregister_pcpu_notifier(struct notifier_block *); ++ ++#ifdef CONFIG_X86 ++int __must_check rdmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, ++ u32 *l, u32 *h); ++int __must_check wrmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, ++ u32 l, u32 h); ++int __must_check rdmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs); ++int __must_check wrmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs); ++#endif ++ ++#endif /* _XEN_SYSCTL_H */ diff --git a/patches.xen/xen-x86-no-lapic b/patches.xen/xen-x86-no-lapic index a4d477d..a7a538f 100644 --- a/patches.xen/xen-x86-no-lapic +++ b/patches.xen/xen-x86-no-lapic @@ -3,8 +3,8 @@ Subject: Disallow all accesses to the local APIC page Patch-mainline: n/a References: bnc#191115 ---- head-2010-05-12.orig/arch/x86/include/asm/apic.h 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/asm/apic.h 2010-03-25 14:40:58.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/asm/apic.h 2011-02-17 10:23:17.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/apic.h 2011-02-17 10:36:33.000000000 +0100 @@ -10,7 +10,9 @@ #include #include @@ -32,7 +32,7 @@ References: bnc#191115 #ifdef CONFIG_X86_X2APIC /* * Make previous memory operations globally visible before -@@ -367,6 +372,8 @@ struct apic { +@@ -371,6 +376,8 @@ struct apic { */ extern struct apic *apic; @@ -41,7 +41,7 @@ References: bnc#191115 /* * APIC functionality to boot other CPUs - only used on SMP: */ -@@ -460,6 +467,8 @@ static inline void default_wait_for_init +@@ -473,6 +480,8 @@ static inline void default_wait_for_init extern void generic_bigsmp_probe(void); @@ -50,7 +50,7 @@ References: bnc#191115 #ifdef CONFIG_X86_LOCAL_APIC -@@ -479,6 +488,8 @@ static inline const struct cpumask *defa +@@ -492,6 +501,8 @@ static inline const struct cpumask *defa DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); @@ -59,7 +59,7 @@ References: bnc#191115 static inline unsigned int read_apic_id(void) { unsigned int reg; -@@ -587,6 +598,8 @@ extern int default_cpu_present_to_apicid +@@ -600,6 +611,8 @@ extern int default_cpu_present_to_apicid extern int default_check_phys_apicid_present(int phys_apicid); #endif @@ -68,8 +68,8 @@ References: bnc#191115 #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_32 ---- head-2010-05-12.orig/arch/x86/include/asm/apicdef.h 2010-05-12 08:55:22.000000000 +0200 -+++ head-2010-05-12/arch/x86/include/asm/apicdef.h 2010-03-25 14:40:58.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/asm/apicdef.h 2011-03-11 10:41:53.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/apicdef.h 2011-02-03 14:34:58.000000000 +0100 @@ -17,6 +17,8 @@ */ #define IO_APIC_SLOT_SIZE 1024 @@ -79,7 +79,7 @@ References: bnc#191115 #define APIC_ID 0x20 #define APIC_LVR 0x30 -@@ -142,6 +144,16 @@ +@@ -143,6 +145,16 @@ #define APIC_BASE_MSR 0x800 #define X2APIC_ENABLE (1UL << 10) @@ -95,8 +95,8 @@ References: bnc#191115 + #ifdef CONFIG_X86_32 # define MAX_IO_APICS 64 - #else -@@ -149,6 +161,8 @@ + # define MAX_LOCAL_APIC 256 +@@ -151,6 +163,8 @@ # define MAX_LOCAL_APIC 32768 #endif @@ -105,7 +105,7 @@ References: bnc#191115 /* * All x86-64 systems are xAPIC compatible. * In the following, "apicid" is a physical APIC ID. -@@ -419,6 +433,8 @@ struct local_apic { +@@ -421,6 +435,8 @@ struct local_apic { #undef u32 @@ -114,8 +114,8 @@ References: bnc#191115 #ifdef CONFIG_X86_32 #define BAD_APICID 0xFFu #else ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-04-15 10:29:09.000000000 +0200 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/fixmap.h 2010-04-15 11:47:12.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-03 14:34:58.000000000 +0100 @@ -17,7 +17,6 @@ #ifndef __ASSEMBLY__ #include @@ -136,8 +136,8 @@ References: bnc#191115 #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/smp.h 2010-04-26 11:32:06.000000000 +0200 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/smp.h 2010-04-28 17:21:52.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:12:54.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:50:42.000000000 +0100 @@ -15,7 +15,7 @@ # include # endif @@ -146,8 +146,8 @@ References: bnc#191115 +#include #include - extern int smp_num_siblings; -@@ -181,7 +181,7 @@ extern unsigned disabled_cpus __cpuinitd + extern unsigned int num_processors; +@@ -190,7 +190,7 @@ extern unsigned disabled_cpus __cpuinitd #include @@ -156,14 +156,15 @@ References: bnc#191115 #ifndef CONFIG_X86_64 static inline int logical_smp_processor_id(void) ---- head-2010-05-12.orig/arch/x86/kernel/acpi/boot.c 2010-04-15 10:07:05.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/acpi/boot.c 2010-04-15 11:47:20.000000000 +0200 -@@ -74,13 +74,13 @@ int acpi_sci_override_gsi __initdata; - #ifndef CONFIG_XEN +--- head-2011-03-11.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 11:06:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/boot.c 2011-03-11 11:17:06.000000000 +0100 +@@ -74,14 +74,14 @@ int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; + int acpi_fix_pin2_polarity __initdata; -#else -#define acpi_skip_timer_override 0 +-#define acpi_fix_pin2_polarity 0 -#endif #ifdef CONFIG_X86_LOCAL_APIC @@ -171,11 +172,12 @@ References: bnc#191115 #endif +#else +#define acpi_skip_timer_override 0 ++#define acpi_fix_pin2_polarity 0 +#endif #ifndef __HAVE_ARCH_CMPXCHG #warning ACPI uses CMPXCHG, i486 and later hardware -@@ -139,6 +139,7 @@ static int __init acpi_parse_madt(struct +@@ -187,6 +187,7 @@ static int __init acpi_parse_madt(struct return -ENODEV; } @@ -183,7 +185,7 @@ References: bnc#191115 if (madt->address) { acpi_lapic_addr = (u64) madt->address; -@@ -146,7 +147,6 @@ static int __init acpi_parse_madt(struct +@@ -194,7 +195,6 @@ static int __init acpi_parse_madt(struct madt->address); } @@ -191,7 +193,7 @@ References: bnc#191115 default_acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); #endif -@@ -247,6 +247,7 @@ static int __init +@@ -300,6 +300,7 @@ static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { @@ -199,7 +201,7 @@ References: bnc#191115 struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; -@@ -255,6 +256,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_su +@@ -308,6 +309,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_su return -EINVAL; acpi_lapic_addr = lapic_addr_ovr->address; @@ -207,57 +209,9 @@ References: bnc#191115 return 0; } -@@ -1094,7 +1096,7 @@ int mp_register_gsi(struct device *dev, - - ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); - --#ifdef CONFIG_X86_32 -+#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) - if (ioapic_renumber_irq) - gsi = ioapic_renumber_irq(ioapic, gsi); - #endif ---- head-2010-05-12.orig/arch/x86/kernel/apic/io_apic-xen.c 2010-05-12 09:09:25.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/apic/io_apic-xen.c 2010-05-12 09:15:16.000000000 +0200 -@@ -1071,7 +1071,9 @@ static inline int irq_trigger(int idx) - return MPBIOS_trigger(idx); - } - -+#ifndef CONFIG_XEN - int (*ioapic_renumber_irq)(int ioapic, int irq); -+#endif - static int pin_2_irq(int idx, int apic, int pin) - { - int irq, i; -@@ -1093,11 +1095,13 @@ static int pin_2_irq(int idx, int apic, - while (i < apic) - irq += nr_ioapic_registers[i++]; - irq += pin; -+#ifndef CONFIG_XEN - /* - * For MPS mode, so far only needed by ES7000 platform - */ - if (ioapic_renumber_irq) - irq = ioapic_renumber_irq(apic, irq); -+#endif - } - - #ifdef CONFIG_X86_32 -@@ -4106,10 +4110,12 @@ int io_apic_set_pci_routing(struct devic - u8 __init io_apic_unique_id(u8 id) - { - #ifdef CONFIG_X86_32 -+#ifndef CONFIG_XEN - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && - !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - return io_apic_get_unique_id(nr_ioapics, id); - else -+#endif - return id; - #else - int i; ---- head-2010-05-12.orig/arch/x86/kernel/irq-xen.c 2010-01-25 13:46:29.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/irq-xen.c 2010-03-25 14:40:58.000000000 +0100 -@@ -15,9 +15,9 @@ +--- head-2011-03-11.orig/arch/x86/kernel/irq-xen.c 2011-02-02 15:09:43.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/irq-xen.c 2011-02-03 14:34:58.000000000 +0100 +@@ -16,9 +16,9 @@ #include #include @@ -268,7 +222,7 @@ References: bnc#191115 /* Function pointer for generic interrupt vector handling */ void (*x86_platform_ipi_callback)(void) = NULL; #endif -@@ -57,7 +57,7 @@ static int show_other_interrupts(struct +@@ -58,7 +58,7 @@ static int show_other_interrupts(struct for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); seq_printf(p, " Non-maskable interrupts\n"); @@ -277,7 +231,7 @@ References: bnc#191115 seq_printf(p, "%*s: ", prec, "LOC"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); -@@ -122,10 +122,12 @@ static int show_other_interrupts(struct +@@ -128,10 +128,12 @@ static int show_other_interrupts(struct seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); seq_printf(p, " Machine check polls\n"); #endif @@ -290,7 +244,7 @@ References: bnc#191115 return 0; } -@@ -221,12 +223,16 @@ u64 arch_irq_stat_cpu(unsigned int cpu) +@@ -229,12 +231,16 @@ u64 arch_irq_stat_cpu(unsigned int cpu) u64 arch_irq_stat(void) { @@ -307,63 +261,9 @@ References: bnc#191115 } ---- head-2010-05-12.orig/arch/x86/kernel/mpparse-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/mpparse-xen.c 2010-03-25 14:40:58.000000000 +0100 -@@ -288,7 +288,9 @@ static int __init smp_check_mpc(struct m - - printk(KERN_INFO "MPTABLE: Product ID: %s\n", str); - -+#ifndef CONFIG_XEN - printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic); -+#endif - - return 1; - } -@@ -320,12 +322,14 @@ static int __init smp_read_mpc(struct mp - if (!smp_check_mpc(mpc, oem, str)) - return 0; - --#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) -+#ifndef CONFIG_XEN -+#ifdef CONFIG_X86_32 - generic_mps_oem_check(mpc, oem, str); - #endif - /* save the local APIC address, it might be non-default */ - if (!acpi_lapic) - mp_lapic_addr = mpc->lapic; -+#endif - - if (early) - return 1; -@@ -512,10 +516,12 @@ static inline void __init construct_defa - int linttypes[2] = { mp_ExtINT, mp_NMI }; - int i; - -+#ifndef CONFIG_XEN - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; -+#endif - - /* - * 2 CPUs, numbered 0 & 1. -@@ -648,10 +654,12 @@ void __init default_get_smp_config(unsig - */ - if (mpf->feature1 != 0) { - if (early) { -+#ifndef CONFIG_XEN - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; -+#endif - return; - } - ---- head-2010-05-12.orig/drivers/xen/core/smpboot.c 2010-04-28 16:44:14.000000000 +0200 -+++ head-2010-05-12/drivers/xen/core/smpboot.c 2010-04-28 17:21:59.000000000 +0200 -@@ -341,7 +341,7 @@ void __init smp_prepare_cpus(unsigned in +--- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-02-02 15:10:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-03-03 16:50:49.000000000 +0100 +@@ -283,7 +283,7 @@ void __init smp_prepare_cpus(unsigned in * Here we can be sure that there is an IO-APIC in the system. Let's * go and set it up: */ diff --git a/patches.xen/xen-x86-panic-no-reboot b/patches.xen/xen-x86-panic-no-reboot index 78bcc0b..b80124a 100644 --- a/patches.xen/xen-x86-panic-no-reboot +++ b/patches.xen/xen-x86-panic-no-reboot @@ -4,9 +4,9 @@ Patch-mainline: obsolete $subject says it all. ---- head-2010-04-15.orig/arch/x86/kernel/setup-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/setup-xen.c 2010-04-15 11:46:02.000000000 +0200 -@@ -791,15 +791,17 @@ void __init setup_arch(char **cmdline_p) +--- head-2011-01-03.orig/arch/x86/kernel/setup-xen.c 2011-01-03 13:29:09.000000000 +0100 ++++ head-2011-01-03/arch/x86/kernel/setup-xen.c 2011-01-03 14:07:52.000000000 +0100 +@@ -784,15 +784,17 @@ void __init setup_arch(char **cmdline_p) unsigned long p2m_pages; struct physdev_set_iopl set_iopl; @@ -27,6 +27,6 @@ $subject says it all. + /* Register a call for panic conditions. */ + atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); + } - #endif /* CONFIG_XEN */ - #ifdef CONFIG_X86_32 + set_iopl.iopl = 1; + WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl)); diff --git a/patches.xen/xen-x86-per-cpu-vcpu-info b/patches.xen/xen-x86-per-cpu-vcpu-info index 71de338..76c0768 100644 --- a/patches.xen/xen-x86-per-cpu-vcpu-info +++ b/patches.xen/xen-x86-per-cpu-vcpu-info @@ -5,18 +5,20 @@ Patch-mainline: obsolete ... reducing access code size and latency, as well as being the prerequisite for removing the limitation on 32 vCPU-s per guest. ---- head-2010-04-15.orig/arch/x86/include/asm/percpu.h 2010-04-28 15:44:01.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/asm/percpu.h 2010-03-25 14:42:40.000000000 +0100 -@@ -190,6 +190,38 @@ do { \ - pfo_ret__; \ +--- head-2011-03-17.orig/arch/x86/include/asm/percpu.h 2011-03-17 14:35:41.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/percpu.h 2011-02-07 11:41:40.000000000 +0100 +@@ -309,6 +309,40 @@ do { \ + pxo_ret__; \ }) -+#define percpu_xchg_op(op, var, val) \ ++#define percpu_exchange_op(op, var, val) \ +({ \ + typedef typeof(var) pxo_T__; \ + pxo_T__ pxo_ret__; \ -+ if (0) \ ++ if (0) { \ + pxo_ret__ = (val); \ ++ (void)pxo_ret__; \ ++ } \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b %0,"__percpu_arg(1) \ @@ -44,22 +46,22 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. +}) + /* - * percpu_read() makes gcc load the percpu variable every time it is - * accessed while percpu_read_stable() allows the value to be cached. -@@ -207,6 +239,10 @@ do { \ - #define percpu_and(var, val) percpu_to_op("and", var, val) + * cmpxchg has no such implied lock semantics as a result it is much + * more efficient for cpu local operations. +@@ -366,6 +400,10 @@ do { \ #define percpu_or(var, val) percpu_to_op("or", var, val) #define percpu_xor(var, val) percpu_to_op("xor", var, val) -+#define percpu_xchg(var, val) percpu_xchg_op("xchg", var, val) -+#if defined(CONFIG_X86_XADD) || defined(CONFIG_X86_64) -+#define percpu_xadd(var, val) percpu_xchg_op("xadd", var, val) + #define percpu_inc(var) percpu_unary_op("inc", var) ++#define percpu_xchg(var, val) percpu_exchange_op("xchg", var, val) ++#ifdef CONFIG_X86_XADD ++#define percpu_xadd(var, val) percpu_exchange_op("xadd", var, val) +#endif #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:41:00.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:41:15.000000000 +0100 -@@ -50,12 +50,26 @@ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:41:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:42:15.000000000 +0100 +@@ -51,12 +51,26 @@ extern shared_info_t *HYPERVISOR_shared_info; @@ -86,9 +88,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-25 14:41:15.000000000 +0100 -@@ -12,7 +12,7 @@ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-02 15:09:52.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-03 14:42:15.000000000 +0100 +@@ -14,7 +14,7 @@ * includes these barriers, for example. */ @@ -97,7 +99,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #define xen_restore_fl(f) \ do { \ -@@ -28,7 +28,7 @@ do { \ +@@ -30,7 +30,7 @@ do { \ #define xen_irq_disable() \ do { \ @@ -106,7 +108,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. barrier(); \ } while (0) -@@ -90,8 +90,6 @@ static inline void halt(void) +@@ -85,8 +85,6 @@ do { \ #define evtchn_upcall_pending /* 0 */ #define evtchn_upcall_mask 1 @@ -115,7 +117,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #ifdef CONFIG_X86_64 # define __REG_si %rsi # define __CPU_num PER_CPU_VAR(cpu_number) -@@ -100,6 +98,22 @@ static inline void halt(void) +@@ -95,6 +93,22 @@ do { \ # define __CPU_num TI_cpu(%ebp) #endif @@ -138,7 +140,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #ifdef CONFIG_SMP #define GET_VCPU_INFO movl __CPU_num,%esi ; \ shl $sizeof_vcpu_shift,%esi ; \ -@@ -116,15 +130,21 @@ static inline void halt(void) +@@ -111,15 +125,21 @@ do { \ #define ENABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \ __ENABLE_INTERRUPTS @@ -163,20 +165,20 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. TRACE_IRQS_OFF ; \ sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \ mov $__KERNEL_PERCPU, %ecx ; \ ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-25 14:41:00.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-25 14:41:15.000000000 +0100 -@@ -117,6 +117,8 @@ static inline void xen_set_pgd(pgd_t *pg +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:41:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:42:15.000000000 +0100 +@@ -128,6 +128,8 @@ static inline void xen_set_pgd(pgd_t *pg #define __pte_mfn(_pte) (((_pte).pte & PTE_PFN_MASK) >> PAGE_SHIFT) +extern unsigned long early_arbitrary_virt_to_mfn(void *va); + + extern void sync_global_pgds(unsigned long start, unsigned long end); + /* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/system.h 2010-01-25 13:43:44.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/system.h 2010-03-25 14:41:15.000000000 +0100 -@@ -245,8 +245,8 @@ static inline void xen_write_cr0(unsigne +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:13:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:48:41.000000000 +0100 +@@ -247,8 +247,8 @@ static inline void xen_write_cr0(unsigne asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); } @@ -187,9 +189,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. static inline unsigned long xen_read_cr3(void) { ---- head-2010-04-15.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/cpu/common-xen.c 2010-03-25 14:41:15.000000000 +0100 -@@ -335,8 +335,16 @@ static const char *__cpuinit table_looku +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:43:14.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:44:07.000000000 +0100 +@@ -346,8 +346,16 @@ static const char *__cpuinit table_looku __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; @@ -207,9 +209,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #ifdef CONFIG_X86_32 loadsegment(fs, __KERNEL_PERCPU); #else ---- head-2010-04-15.orig/arch/x86/kernel/entry_32-xen.S 2010-01-25 15:45:18.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/entry_32-xen.S 2010-03-25 14:41:15.000000000 +0100 -@@ -471,6 +471,9 @@ sysenter_exit: +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-02 15:07:22.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-03 14:42:15.000000000 +0100 +@@ -439,6 +439,9 @@ sysenter_exit: movl PT_EIP(%esp), %edx movl PT_OLDESP(%esp), %ecx xorl %ebp,%ebp @@ -219,7 +221,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. TRACE_IRQS_ON 1: mov PT_FS(%esp), %fs PTGS_TO_GS -@@ -1036,7 +1039,9 @@ critical_region_fixup: +@@ -997,7 +1000,9 @@ critical_region_fixup: .section .rodata,"a" critical_fixup_table: @@ -230,7 +232,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. .byte -1,-1 # jnz 14f .byte 0 # pop %ebx .byte 1 # pop %ecx -@@ -1055,7 +1060,9 @@ critical_fixup_table: +@@ -1016,7 +1021,9 @@ critical_fixup_table: .byte 10,10,10 # add $8,%esp #endif .byte 12 # iret @@ -241,9 +243,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. .previous # Hypervisor uses this for application faults while it executes. ---- head-2010-04-15.orig/arch/x86/kernel/head-xen.c 2010-04-15 11:46:18.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/head-xen.c 2010-04-28 17:22:58.000000000 +0200 -@@ -153,6 +153,8 @@ void __init xen_start_kernel(void) +--- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-03 14:42:15.000000000 +0100 +@@ -144,6 +144,8 @@ void __init xen_start_kernel(void) HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); clear_page(empty_zero_page); @@ -252,9 +254,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. /* Set up mapping of lowest 1MB of physical memory. */ for (i = 0; i < NR_FIX_ISAMAPS; i++) if (is_initial_xendomain()) ---- head-2010-04-15.orig/arch/x86/kernel/time-xen.c 2010-03-02 10:20:42.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/time-xen.c 2010-03-25 14:41:15.000000000 +0100 -@@ -282,16 +282,10 @@ static void get_time_values_from_xen(uns +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-02-02 15:09:52.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-02-03 14:42:15.000000000 +0100 +@@ -247,16 +247,10 @@ static void get_time_values_from_xen(uns local_irq_restore(flags); } @@ -273,7 +275,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. } static void sync_xen_wallclock(unsigned long dummy); -@@ -337,7 +331,7 @@ unsigned long long xen_local_clock(void) +@@ -301,7 +295,7 @@ unsigned long long xen_local_clock(void) local_time_version = shadow->version; rdtsc_barrier(); time = shadow->system_timestamp + get_nsec_offset(shadow); @@ -282,8 +284,8 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. get_time_values_from_xen(cpu); barrier(); } while (local_time_version != shadow->version); ---- head-2010-04-15.orig/arch/x86/mm/hypervisor.c 2010-03-25 14:41:00.000000000 +0100 -+++ head-2010-04-15/arch/x86/mm/hypervisor.c 2010-03-25 14:41:15.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2011-02-03 14:41:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-03 14:42:15.000000000 +0100 @@ -41,6 +41,7 @@ #include #include @@ -292,7 +294,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #include #include #include -@@ -50,7 +51,104 @@ +@@ -50,7 +51,103 @@ EXPORT_SYMBOL(hypercall_page); shared_info_t *__read_mostly HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; @@ -353,9 +355,8 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. + lpfn = mfn_to_local_pfn(lmfn); + rpfn = mfn_to_local_pfn(rmfn); + -+ printk(KERN_INFO -+ "Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n", -+ lpfn, rpfn, lmfn, rmfn); ++ pr_info("Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n", ++ lpfn, rpfn, lmfn, rmfn); + + xen_l1_entry_update(lpte, pfn_pte_ma(rmfn, pte_pgprot(*lpte))); + xen_l1_entry_update(rpte, pfn_pte_ma(lmfn, pte_pgprot(*rpte))); @@ -397,8 +398,8 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #define NR_MC BITS_PER_LONG #define NR_MMU BITS_PER_LONG ---- head-2010-04-15.orig/arch/x86/mm/init_64-xen.c 2010-04-15 11:47:48.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/init_64-xen.c 2010-04-15 11:48:38.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-03 14:41:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:15.000000000 +0100 @@ -118,6 +118,26 @@ void __meminit early_make_page_readonly( BUG(); } @@ -426,9 +427,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #ifndef CONFIG_XEN static int __init parse_direct_gbpages_off(char *arg) { ---- head-2010-04-15.orig/drivers/xen/Kconfig 2010-03-31 14:12:07.000000000 +0200 -+++ head-2010-04-15/drivers/xen/Kconfig 2010-03-31 14:12:47.000000000 +0200 -@@ -367,6 +367,18 @@ config XEN_COMPAT +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-09 16:23:14.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-09 16:23:27.000000000 +0100 +@@ -372,6 +372,18 @@ config XEN_COMPAT default 0x030002 if XEN_COMPAT_030002_AND_LATER default 0 @@ -447,9 +448,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. endmenu config HAVE_IRQ_IGNORE_UNHANDLED ---- head-2010-04-15.orig/drivers/xen/core/evtchn.c 2010-04-23 15:20:52.000000000 +0200 -+++ head-2010-04-15/drivers/xen/core/evtchn.c 2010-04-23 15:21:14.000000000 +0200 -@@ -323,6 +323,24 @@ static DEFINE_PER_CPU(unsigned int, upca +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-16 08:30:09.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-16 08:30:33.000000000 +0100 +@@ -355,6 +355,24 @@ static DEFINE_PER_CPU(unsigned int, upca static DEFINE_PER_CPU(unsigned int, current_l1i); static DEFINE_PER_CPU(unsigned int, current_l2i); @@ -474,7 +475,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. /* NB. Interrupts are disabled on entry. */ asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs) { -@@ -331,25 +349,25 @@ asmlinkage void __irq_entry evtchn_do_up +@@ -363,23 +381,23 @@ asmlinkage void __irq_entry evtchn_do_up unsigned long masked_l1, masked_l2; unsigned int l1i, l2i, start_l1i, start_l2i, port, count, i; int irq; @@ -500,12 +501,18 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. +#else + barrier(); #endif + + #ifndef CONFIG_NO_HZ +@@ -410,7 +428,7 @@ asmlinkage void __irq_entry evtchn_do_up + } + #endif /* CONFIG_NO_HZ */ + - l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); + l1 = vcpu_info_xchg(evtchn_pending_sel, 0); start_l1i = l1i = percpu_read(current_l1i); start_l2i = percpu_read(current_l2i); -@@ -1370,7 +1388,6 @@ void unmask_evtchn(int port) +@@ -1517,7 +1535,6 @@ void unmask_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; unsigned int cpu = smp_processor_id(); @@ -513,7 +520,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. BUG_ON(!irqs_disabled()); -@@ -1384,10 +1401,13 @@ void unmask_evtchn(int port) +@@ -1531,10 +1548,13 @@ void unmask_evtchn(int port) synch_clear_bit(port, s->evtchn_mask); /* Did we miss an interrupt 'edge'? Re-fire if so. */ @@ -531,9 +538,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. } EXPORT_SYMBOL_GPL(unmask_evtchn); ---- head-2010-04-15.orig/drivers/xen/core/machine_reboot.c 2010-03-25 14:41:06.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/machine_reboot.c 2010-03-25 14:41:15.000000000 +0100 -@@ -74,7 +74,7 @@ static void pre_suspend(void) +--- head-2011-03-17.orig/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:11.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:15.000000000 +0100 +@@ -69,7 +69,7 @@ static void pre_suspend(void) mfn_to_pfn(xen_start_info->console.domU.mfn); } @@ -542,7 +549,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. { int i, j, k, fpp; unsigned long shinfo_mfn; -@@ -91,8 +91,21 @@ static void post_suspend(int suspend_can +@@ -86,8 +86,21 @@ static void post_suspend(int suspend_can #ifdef CONFIG_SMP cpumask_copy(vcpu_initialized_mask, cpu_online_mask); #endif @@ -565,7 +572,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. } shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT; -@@ -134,7 +147,7 @@ static void post_suspend(int suspend_can +@@ -129,7 +142,7 @@ static void post_suspend(int suspend_can #define switch_idle_mm() ((void)0) #define mm_pin_all() ((void)0) #define pre_suspend() xen_pre_suspend() @@ -574,7 +581,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. #endif -@@ -165,7 +178,7 @@ static int take_machine_down(void *_susp +@@ -160,7 +173,7 @@ static int take_machine_down(void *_susp BUG_ON(suspend_cancelled > 0); suspend->resume_notifier(suspend_cancelled); if (suspend_cancelled >= 0) @@ -583,9 +590,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. if (!suspend_cancelled) xen_clockevents_resume(); if (suspend_cancelled >= 0) ---- head-2010-04-15.orig/drivers/xen/core/smpboot.c 2010-04-28 17:21:59.000000000 +0200 -+++ head-2010-04-15/drivers/xen/core/smpboot.c 2010-04-28 17:22:15.000000000 +0200 -@@ -348,8 +348,13 @@ void __init smp_prepare_cpus(unsigned in +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:50:49.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-02-03 14:42:15.000000000 +0100 +@@ -290,8 +290,13 @@ void __init smp_prepare_cpus(unsigned in void __init smp_prepare_boot_cpu(void) { @@ -599,9 +606,9 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. } #ifdef CONFIG_HOTPLUG_CPU ---- head-2010-04-15.orig/drivers/xen/core/spinlock.c 2010-03-19 08:48:35.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/spinlock.c 2010-03-25 14:41:15.000000000 +0100 -@@ -105,7 +105,7 @@ bool xen_spin_wait(arch_spinlock_t *lock +--- head-2011-03-17.orig/drivers/xen/core/spinlock.c 2011-03-15 16:19:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/spinlock.c 2011-03-15 16:19:57.000000000 +0100 +@@ -144,7 +144,7 @@ unsigned int xen_spin_wait(arch_spinlock spinning.prev = percpu_read(_spinning); smp_wmb(); percpu_write(_spinning, &spinning); @@ -610,7 +617,7 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. do { bool nested = false; -@@ -171,12 +171,12 @@ bool xen_spin_wait(arch_spinlock_t *lock +@@ -210,13 +210,13 @@ unsigned int xen_spin_wait(arch_spinlock * intended event processing will happen with the poll * call. */ @@ -619,10 +626,11 @@ prerequisite for removing the limitation on 32 vCPU-s per guest. + vcpu_info_write(evtchn_upcall_mask, + nested ? upcall_mask : flags); - xen_poll_irq(irq); + if (HYPERVISOR_poll_no_timeout(&__get_cpu_var(poll_evtchn), 1)) + BUG(); - current_vcpu_info()->evtchn_upcall_mask = upcall_mask; + vcpu_info_write(evtchn_upcall_mask, upcall_mask); - rc = !xen_test_irq_pending(irq); + rc = !test_evtchn(percpu_read(poll_evtchn)); if (!rc) diff --git a/patches.xen/xen-x86-pmd-handling b/patches.xen/xen-x86-pmd-handling index 7336d1a..ce0aad4 100644 --- a/patches.xen/xen-x86-pmd-handling +++ b/patches.xen/xen-x86-pmd-handling @@ -2,9 +2,9 @@ From: jbeulich@novell.com Subject: consolidate pmd/pud/pgd entry handling Patch-mainline: obsolete ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:39:15.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:41:00.000000000 +0100 -@@ -97,10 +97,12 @@ void xen_invlpg(unsigned long ptr); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-02 15:09:52.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:41:13.000000000 +0100 +@@ -101,10 +101,12 @@ void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ @@ -18,7 +18,7 @@ Patch-mainline: obsolete void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP -@@ -333,6 +335,18 @@ MULTI_update_va_mapping( +@@ -337,6 +339,18 @@ MULTI_update_va_mapping( } static inline void @@ -37,8 +37,8 @@ Patch-mainline: obsolete MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-25 16:41:03.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-25 14:41:00.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-03 14:41:13.000000000 +0100 @@ -75,20 +75,16 @@ static inline void pmd_populate(struct m struct page *pte) { @@ -118,8 +118,19 @@ Patch-mainline: obsolete } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-25 14:41:00.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 15:55:04.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-07 12:14:31.000000000 +0100 +@@ -621,7 +621,7 @@ static inline pmd_t xen_local_pmdp_get_a + { + pmd_t res = *pmdp; + +- xen_pmd_clear(pmdp); ++ xen_set_pmd(pmdp, __pmd(0)); + return res; + } + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 15:47:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-03 14:41:13.000000000 +0100 @@ -61,12 +61,15 @@ static inline void __xen_pte_clear(pte_t ptep->pte_high = 0; } @@ -156,9 +167,9 @@ Patch-mainline: obsolete #ifdef CONFIG_SMP static inline pte_t xen_ptep_get_and_clear(pte_t *ptep, pte_t res) { ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-25 16:41:03.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-25 14:41:00.000000000 +0100 -@@ -79,33 +79,41 @@ static inline void xen_set_pmd(pmd_t *pm +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:39:36.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:41:13.000000000 +0100 +@@ -70,10 +70,13 @@ static inline void xen_set_pmd(pmd_t *pm xen_l2_entry_update(pmdp, pmd); } @@ -174,8 +185,9 @@ Patch-mainline: obsolete + : (void)(*__pmdp = xen_make_pmd(0)); \ +}) - static inline void xen_set_pud(pud_t *pudp, pud_t pud) - { + #ifdef CONFIG_SMP + static inline pte_t xen_ptep_get_and_clear(pte_t *xp, pte_t ret) +@@ -100,23 +103,28 @@ static inline void xen_set_pud(pud_t *pu xen_l3_entry_update(pudp, pud); } @@ -214,9 +226,9 @@ Patch-mainline: obsolete #define __pte_mfn(_pte) (((_pte).pte & PTE_PFN_MASK) >> PAGE_SHIFT) ---- head-2010-04-15.orig/arch/x86/mm/hypervisor.c 2009-06-09 15:52:17.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/hypervisor.c 2010-03-25 14:41:00.000000000 +0100 -@@ -360,31 +360,91 @@ void xen_l1_entry_update(pte_t *ptr, pte +--- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2010-12-08 10:45:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-03 14:41:13.000000000 +0100 +@@ -358,31 +358,91 @@ void xen_l1_entry_update(pte_t *ptr, pte } EXPORT_SYMBOL_GPL(xen_l1_entry_update); @@ -315,9 +327,9 @@ Patch-mainline: obsolete } #endif /* CONFIG_X86_64 */ ---- head-2010-04-15.orig/arch/x86/mm/init_32-xen.c 2010-03-25 14:37:41.000000000 +0100 -+++ head-2010-04-15/arch/x86/mm/init_32-xen.c 2010-03-25 14:41:00.000000000 +0100 -@@ -750,6 +750,8 @@ static void __init zone_sizes_init(void) +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-02 15:10:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-03 14:41:13.000000000 +0100 +@@ -689,6 +689,8 @@ static void __init zone_sizes_init(void) #endif free_area_init_nodes(max_zone_pfns); @@ -325,19 +337,19 @@ Patch-mainline: obsolete + xen_init_pgd_pin(); } - #ifndef CONFIG_NO_BOOTMEM -@@ -1028,8 +1030,6 @@ void __init mem_init(void) + void __init setup_bootmem_allocator(void) +@@ -908,8 +910,6 @@ void __init mem_init(void) - save_pg_dir(); - zap_low_mappings(true); + if (boot_cpu_data.wp_works_ok < 0) + test_wp_bit(); - - SetPagePinned(virt_to_page(init_mm.pgd)); } #ifdef CONFIG_MEMORY_HOTPLUG ---- head-2010-04-15.orig/arch/x86/mm/init_64-xen.c 2010-04-15 11:41:27.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/init_64-xen.c 2010-04-15 11:47:48.000000000 +0200 -@@ -194,8 +194,11 @@ static pud_t *fill_pud(pgd_t *pgd, unsig +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-02 15:10:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-03 14:41:13.000000000 +0100 +@@ -231,8 +231,11 @@ static pud_t *fill_pud(pgd_t *pgd, unsig { if (pgd_none(*pgd)) { pud_t *pud = (pud_t *)spp_getpage(); @@ -351,7 +363,7 @@ Patch-mainline: obsolete if (pud != pud_offset(pgd, 0)) printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", pud, pud_offset(pgd, 0)); -@@ -207,8 +210,11 @@ static pmd_t *fill_pmd(pud_t *pud, unsig +@@ -244,8 +247,11 @@ static pmd_t *fill_pmd(pud_t *pud, unsig { if (pud_none(*pud)) { pmd_t *pmd = (pmd_t *) spp_getpage(); @@ -365,7 +377,7 @@ Patch-mainline: obsolete if (pmd != pmd_offset(pud, 0)) printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud, 0)); -@@ -541,7 +547,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned +@@ -578,7 +584,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned XENFEAT_writable_page_tables); *pmd = __pmd(pte_phys | _PAGE_TABLE); } else { @@ -373,7 +385,7 @@ Patch-mainline: obsolete spin_lock(&init_mm.page_table_lock); pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); spin_unlock(&init_mm.page_table_lock); -@@ -630,7 +635,6 @@ phys_pud_init(pud_t *pud_page, unsigned +@@ -667,7 +672,6 @@ phys_pud_init(pud_t *pud_page, unsigned else *pud = __pud(pmd_phys | _PAGE_TABLE); } else { @@ -381,7 +393,7 @@ Patch-mainline: obsolete spin_lock(&init_mm.page_table_lock); pud_populate(&init_mm, pud, __va(pmd_phys)); spin_unlock(&init_mm.page_table_lock); -@@ -804,7 +808,6 @@ kernel_physical_mapping_init(unsigned lo +@@ -843,7 +847,6 @@ kernel_physical_mapping_init(unsigned lo XENFEAT_writable_page_tables); xen_l4_entry_update(pgd, __pgd(pud_phys | _PAGE_TABLE)); } else { @@ -389,7 +401,7 @@ Patch-mainline: obsolete spin_lock(&init_mm.page_table_lock); pgd_populate(&init_mm, pgd, __va(pud_phys)); spin_unlock(&init_mm.page_table_lock); -@@ -869,7 +872,7 @@ void __init paging_init(void) +@@ -892,7 +895,7 @@ void __init paging_init(void) free_area_init_nodes(max_zone_pfns); @@ -398,8 +410,8 @@ Patch-mainline: obsolete } /* ---- head-2010-04-15.orig/arch/x86/mm/pgtable-xen.c 2010-04-15 10:53:40.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/pgtable-xen.c 2010-04-15 11:47:53.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-03-17 14:26:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-03-17 14:34:34.000000000 +0100 @@ -66,16 +66,16 @@ early_param("userpte", setup_userpte); void __pte_free(pgtable_t pte) { @@ -556,7 +568,7 @@ Patch-mainline: obsolete static void __pgd_pin(pgd_t *pgd) { pgd_walk(pgd, PAGE_KERNEL_RO); -@@ -498,21 +553,18 @@ static void pgd_dtor(pgd_t *pgd) +@@ -506,21 +561,18 @@ static void pgd_dtor(pgd_t *pgd) void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) { @@ -588,7 +600,7 @@ Patch-mainline: obsolete /* * According to Intel App note "TLBs, Paging-Structure Caches, -@@ -607,13 +659,10 @@ static void pgd_prepopulate_pmd(struct m +@@ -615,13 +667,10 @@ static void pgd_prepopulate_pmd(struct m i++, pud++, addr += PUD_SIZE) { pmd_t *pmd = pmds[i]; diff --git a/patches.xen/xen-x86_64-dump-user-pgt b/patches.xen/xen-x86_64-dump-user-pgt index 4028a75..e31273b 100644 --- a/patches.xen/xen-x86_64-dump-user-pgt +++ b/patches.xen/xen-x86_64-dump-user-pgt @@ -2,9 +2,9 @@ From: jbeulich@novell.com Subject: dump the correct page tables for user mode faults Patch-mainline: obsolete ---- head-2010-03-15.orig/arch/x86/mm/fault-xen.c 2010-01-28 10:38:23.000000000 +0100 -+++ head-2010-03-15/arch/x86/mm/fault-xen.c 2010-01-25 14:00:59.000000000 +0100 -@@ -329,6 +329,7 @@ static void dump_pagetable(unsigned long +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-03-17 14:22:21.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-03-17 14:35:18.000000000 +0100 +@@ -345,6 +345,7 @@ static void dump_pagetable(unsigned long out: printk(KERN_CONT "\n"); } @@ -12,7 +12,7 @@ Patch-mainline: obsolete #else /* CONFIG_X86_64: */ -@@ -453,7 +454,7 @@ static int bad_address(void *p) +@@ -449,7 +450,7 @@ static int bad_address(void *p) return probe_kernel_address((unsigned long *)p, dummy); } @@ -21,7 +21,7 @@ Patch-mainline: obsolete { pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); pgd_t *pgd = base + pgd_index(address); -@@ -461,6 +462,9 @@ static void dump_pagetable(unsigned long +@@ -457,6 +458,9 @@ static void dump_pagetable(unsigned long pmd_t *pmd; pte_t *pte; @@ -31,7 +31,7 @@ Patch-mainline: obsolete if (bad_address(pgd)) goto bad; -@@ -599,7 +603,7 @@ show_fault_oops(struct pt_regs *regs, un +@@ -595,7 +599,7 @@ show_fault_oops(struct pt_regs *regs, un printk(KERN_ALERT "IP:"); printk_address(regs->ip, 1); @@ -40,7 +40,7 @@ Patch-mainline: obsolete } static noinline void -@@ -616,7 +620,7 @@ pgtable_bad(struct pt_regs *regs, unsign +@@ -612,7 +616,7 @@ pgtable_bad(struct pt_regs *regs, unsign printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", tsk->comm, address); diff --git a/patches.xen/xen-x86_64-note-init-p2m b/patches.xen/xen-x86_64-note-init-p2m index ba095f9..cee1511 100644 --- a/patches.xen/xen-x86_64-note-init-p2m +++ b/patches.xen/xen-x86_64-note-init-p2m @@ -18,25 +18,25 @@ shouldn't be as expensive (and hence can be viewed as an optimization avoiding the spurious page fault on the local CPU), but is required when the functions are used before the page fault handler gets set up. ---- head-2010-05-12.orig/arch/x86/kernel/head64-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/head64-xen.c 2010-03-25 14:48:29.000000000 +0100 -@@ -121,6 +121,14 @@ void __init x86_64_start_reservations(ch +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-03 14:42:41.000000000 +0100 +@@ -124,6 +124,14 @@ void __init x86_64_start_reservations(ch - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + if (xen_feature(XENFEAT_auto_translated_physmap)) + xen_start_info->mfn_list = ~0UL; + else if (xen_start_info->mfn_list < __START_KERNEL_map) -+ reserve_early(xen_start_info->first_p2m_pfn << PAGE_SHIFT, -+ (xen_start_info->first_p2m_pfn -+ + xen_start_info->nr_p2m_frames) << PAGE_SHIFT, -+ "INITP2M"); ++ memblock_x86_reserve_range(xen_start_info->first_p2m_pfn << PAGE_SHIFT, ++ (xen_start_info->first_p2m_pfn ++ + xen_start_info->nr_p2m_frames) << PAGE_SHIFT, ++ "INITP2M"); + /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not ---- head-2010-05-12.orig/arch/x86/kernel/head_64-xen.S 2010-03-25 14:46:03.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/head_64-xen.S 2010-03-25 14:48:29.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:36.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:41.000000000 +0100 @@ -17,6 +17,7 @@ #include #include @@ -50,12 +50,12 @@ when the functions are used before the page fault handler gets set up. ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad hypercall_page) ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT, _PAGE_PRESENT) + ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad VMEMMAP_START) - ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel") + ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel") ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1) ---- head-2010-05-12.orig/arch/x86/kernel/setup-xen.c 2010-04-15 11:48:03.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/setup-xen.c 2010-04-15 11:49:47.000000000 +0200 -@@ -1152,7 +1152,7 @@ void __init setup_arch(char **cmdline_p) +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:11.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:41.000000000 +0100 +@@ -1173,7 +1173,7 @@ void __init setup_arch(char **cmdline_p) difference = xen_start_info->nr_pages - max_pfn; set_xen_guest_handle(reservation.extent_start, @@ -64,7 +64,7 @@ when the functions are used before the page fault handler gets set up. reservation.nr_extents = difference; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); -@@ -1169,14 +1169,86 @@ void __init setup_arch(char **cmdline_p) +@@ -1190,14 +1190,86 @@ void __init setup_arch(char **cmdline_p) phys_to_machine_mapping = alloc_bootmem_pages( max_pfn * sizeof(unsigned long)); memcpy(phys_to_machine_mapping, @@ -156,9 +156,9 @@ when the functions are used before the page fault handler gets set up. /* * Initialise the list of the frames that specify the list of ---- head-2010-05-12.orig/arch/x86/mm/init-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-05-12/arch/x86/mm/init-xen.c 2010-04-15 11:49:33.000000000 +0200 -@@ -339,9 +339,22 @@ unsigned long __init_refok init_memory_m +--- head-2011-03-17.orig/arch/x86/mm/init-xen.c 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-03 14:42:41.000000000 +0100 +@@ -340,9 +340,22 @@ unsigned long __init_refok init_memory_m __flush_tlb_all(); @@ -168,25 +168,25 @@ when the functions are used before the page fault handler gets set up. + if (xen_start_info->mfn_list < __START_KERNEL_map + && e820_table_start <= xen_start_info->first_p2m_pfn + && e820_table_top > xen_start_info->first_p2m_pfn) { -+ reserve_early(e820_table_start << PAGE_SHIFT, -+ xen_start_info->first_p2m_pfn -+ << PAGE_SHIFT, -+ "PGTABLE"); ++ memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, ++ xen_start_info->first_p2m_pfn ++ << PAGE_SHIFT, ++ "PGTABLE"); + e820_table_start = xen_start_info->first_p2m_pfn + + xen_start_info->nr_p2m_frames; + } +#endif - reserve_early(e820_table_start << PAGE_SHIFT, + memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT, "PGTABLE"); + } if (!after_bootmem) early_memtest(start, end); ---- head-2010-05-12.orig/arch/x86/mm/init_64-xen.c 2010-04-15 11:49:18.000000000 +0200 -+++ head-2010-05-12/arch/x86/mm/init_64-xen.c 2010-04-15 11:49:32.000000000 +0200 -@@ -183,6 +183,17 @@ static int __init nonx32_setup(char *str +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:36.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:41.000000000 +0100 +@@ -220,6 +220,17 @@ void sync_global_pgds(unsigned long star + } } - __setup("noexec32=", nonx32_setup); +static __init unsigned long get_table_end(void) +{ @@ -202,17 +202,17 @@ when the functions are used before the page fault handler gets set up. /* * NOTE: This function is marked __ref because it calls __init function * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. -@@ -194,8 +205,7 @@ static __ref void *spp_getpage(void) +@@ -231,8 +242,7 @@ static __ref void *spp_getpage(void) if (after_bootmem) ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); else if (e820_table_end < e820_table_top) { - ptr = __va(e820_table_end << PAGE_SHIFT); - e820_table_end++; + ptr = __va(get_table_end() << PAGE_SHIFT); - memset(ptr, 0, PAGE_SIZE); + clear_page(ptr); } else ptr = alloc_bootmem_pages(PAGE_SIZE); -@@ -390,8 +400,7 @@ static __ref void *alloc_low_page(unsign +@@ -427,8 +437,7 @@ static __ref void *alloc_low_page(unsign return adr; } @@ -222,7 +222,7 @@ when the functions are used before the page fault handler gets set up. if (pfn >= e820_table_top) panic("alloc_low_page: ran out of memory"); -@@ -417,14 +426,29 @@ static inline int __meminit make_readonl +@@ -454,14 +463,29 @@ static inline int __meminit make_readonl /* Make new page tables read-only on the first pass. */ if (!xen_feature(XENFEAT_writable_page_tables) && !max_pfn_mapped @@ -255,7 +255,7 @@ when the functions are used before the page fault handler gets set up. /* * No need for writable mapping of kernel image. This also ensures that -@@ -724,6 +748,12 @@ void __init xen_init_pt(void) +@@ -761,6 +785,12 @@ void __init xen_init_pt(void) (PTRS_PER_PUD - pud_index(__START_KERNEL_map)) * sizeof(*level3_kernel_pgt)); @@ -268,7 +268,7 @@ when the functions are used before the page fault handler gets set up. /* Do an early initialization of the fixmap area. */ addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE); if (pud_present(level3_kernel_pgt[pud_index(addr)])) { -@@ -755,22 +785,27 @@ void __init xen_init_pt(void) +@@ -792,22 +822,27 @@ void __init xen_init_pt(void) void __init xen_finish_init_mapping(void) { unsigned long start, end; @@ -300,9 +300,9 @@ when the functions are used before the page fault handler gets set up. /* Destroy the Xen-created mappings beyond the kernel image. */ start = PAGE_ALIGN(_brk_end); end = __START_KERNEL_map + (e820_table_start << PAGE_SHIFT); ---- head-2010-05-12.orig/arch/x86/mm/pageattr-xen.c 2010-03-25 14:37:41.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/pageattr-xen.c 2010-03-25 14:48:29.000000000 +0100 -@@ -1465,7 +1465,7 @@ static void __make_page_writable(unsigne +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-03-17 14:33:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-03-17 14:35:24.000000000 +0100 +@@ -1500,7 +1500,7 @@ static void __make_page_writable(unsigne pte = lookup_address(va, &level); BUG_ON(!pte || level != PG_LEVEL_4K); @@ -311,8 +311,8 @@ when the functions are used before the page fault handler gets set up. BUG(); if (in_secondary_range(va)) { unsigned long pfn = pte_pfn(*pte); ---- head-2010-05-12.orig/arch/x86/mm/pgtable-xen.c 2010-04-15 11:49:15.000000000 +0200 -+++ head-2010-05-12/arch/x86/mm/pgtable-xen.c 2010-04-15 11:49:41.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-03-17 14:35:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-02-03 14:42:41.000000000 +0100 @@ -344,7 +344,7 @@ void __init xen_init_pgd_pin(void) if (PTRS_PER_PUD > 1) /* not folded */ SetPagePinned(virt_to_page(pud)); @@ -331,9 +331,9 @@ when the functions are used before the page fault handler gets set up. continue; SetPagePinned(pmd_page(*pmd)); } ---- head-2010-05-12.orig/arch/x86/mm/pgtable_32-xen.c 2010-05-12 09:09:25.000000000 +0200 -+++ head-2010-05-12/arch/x86/mm/pgtable_32-xen.c 2010-05-12 09:15:36.000000000 +0200 -@@ -175,6 +175,6 @@ void make_lowmem_page_writable(void *va, +--- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-02-03 14:42:41.000000000 +0100 +@@ -174,6 +174,6 @@ void make_lowmem_page_writable(void *va, pte = lookup_address((unsigned long)va, &level); BUG_ON(!pte || level != PG_LEVEL_4K || !pte_present(*pte)); rc = HYPERVISOR_update_va_mapping( diff --git a/patches.xen/xen-x86_64-pgd-alloc-order b/patches.xen/xen-x86_64-pgd-alloc-order index 4c1d699..fb5a46e 100644 --- a/patches.xen/xen-x86_64-pgd-alloc-order +++ b/patches.xen/xen-x86_64-pgd-alloc-order @@ -4,9 +4,9 @@ Patch-mainline: n/a At the same time remove the useless user mode pair of init_level4_pgt. ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:45:56.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:46:03.000000000 +0100 -@@ -102,8 +102,8 @@ void do_hypervisor_callback(struct pt_re +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-11-23 16:31:40.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:42:36.000000000 +0100 +@@ -106,8 +106,8 @@ void do_hypervisor_callback(struct pt_re * be MACHINE addresses. */ @@ -17,7 +17,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); -@@ -111,7 +111,7 @@ void xen_invlpg(unsigned long ptr); +@@ -115,7 +115,7 @@ void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ @@ -26,8 +26,8 @@ At the same time remove the useless user mode pair of init_level4_pgt. void xen_pgd_pin(pgd_t *); void xen_pgd_unpin(pgd_t *); ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-25 14:46:03.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-08 10:25:49.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-08 10:46:27.000000000 +0100 @@ -82,6 +82,9 @@ static inline void switch_mm(struct mm_s { unsigned cpu = smp_processor_id(); @@ -38,7 +38,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. if (likely(prev != next)) { BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && -@@ -100,10 +103,11 @@ static inline void switch_mm(struct mm_s +@@ -98,10 +101,11 @@ static inline void switch_mm(struct mm_s op->arg1.mfn = virt_to_mfn(next->pgd); op++; @@ -52,7 +52,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. op++; #endif -@@ -131,7 +135,7 @@ static inline void switch_mm(struct mm_s +@@ -132,7 +136,7 @@ static inline void switch_mm(struct mm_s * to make sure to use no freed page tables. */ load_cr3(next->pgd); @@ -61,8 +61,8 @@ At the same time remove the useless user mode pair of init_level4_pgt. load_LDT_nolock(&next->context); } } ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-25 14:41:00.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-25 14:46:03.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-03 14:41:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-03 14:42:36.000000000 +0100 @@ -123,15 +123,13 @@ static inline void pud_populate(struct m #endif /* CONFIG_X86_PAE */ @@ -80,9 +80,9 @@ At the same time remove the useless user mode pair of init_level4_pgt. else *__user_pgd(pgd) = *pgd = ent; } ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-25 14:41:15.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-25 14:46:03.000000000 +0100 -@@ -100,18 +100,25 @@ static inline void xen_set_pud(pud_t *pu +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:42:15.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:42:36.000000000 +0100 +@@ -111,18 +111,25 @@ static inline void xen_set_pud(pud_t *pu : (void)(*__pudp = xen_make_pud(0)); \ }) @@ -92,7 +92,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. + if (unlikely(((unsigned long)pgd & PAGE_MASK) + == (unsigned long)init_level4_pgt)) + return NULL; -+ return (pgd_t *)(virt_to_page(pgd)->index ++ return (pgd_t *)(virt_to_page(pgd)->private + + ((unsigned long)pgd & ~PAGE_MASK)); +} @@ -111,9 +111,9 @@ At the same time remove the useless user mode pair of init_level4_pgt. : (void)(*__user_pgd(__pgdp) = *__pgdp = xen_make_pgd(0)); \ }) ---- head-2010-04-15.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-25 14:41:15.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/cpu/common-xen.c 2010-03-25 14:46:03.000000000 +0100 -@@ -1037,8 +1037,7 @@ DEFINE_PER_CPU_FIRST(union irq_stack_uni +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:44:07.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:44:15.000000000 +0100 +@@ -1064,8 +1064,7 @@ DEFINE_PER_CPU_FIRST(union irq_stack_uni void xen_switch_pt(void) { #ifdef CONFIG_XEN @@ -123,8 +123,8 @@ At the same time remove the useless user mode pair of init_level4_pgt. #endif } ---- head-2010-04-15.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-04-15/arch/x86/kernel/head_64-xen.S 2010-03-25 14:46:03.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:36.000000000 +0100 @@ -56,14 +56,6 @@ ENTRY(name) __PAGE_ALIGNED_BSS NEXT_PAGE(init_level4_pgt) @@ -140,9 +140,9 @@ At the same time remove the useless user mode pair of init_level4_pgt. NEXT_PAGE(level3_kernel_pgt) .fill 512,8,0 ---- head-2010-04-15.orig/arch/x86/mm/hypervisor.c 2010-03-25 17:55:14.000000000 +0100 -+++ head-2010-04-15/arch/x86/mm/hypervisor.c 2010-03-25 17:55:21.000000000 +0100 -@@ -524,7 +524,7 @@ void xen_l3_entry_update(pud_t *ptr, pud +--- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2010-12-08 10:45:40.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-03 14:42:36.000000000 +0100 +@@ -521,7 +521,7 @@ void xen_l3_entry_update(pud_t *ptr, pud #endif #ifdef CONFIG_X86_64 @@ -151,7 +151,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. { mmu_update_t u[2]; struct page *page = NULL; -@@ -537,8 +537,11 @@ void xen_l4_entry_update(pgd_t *ptr, int +@@ -534,8 +534,11 @@ void xen_l4_entry_update(pgd_t *ptr, int } u[0].ptr = virt_to_machine(ptr); u[0].val = __pgd_val(val); @@ -165,7 +165,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. u[1].val = __pgd_val(val); do_lN_entry_update(u, 2, page); } else -@@ -546,21 +549,25 @@ void xen_l4_entry_update(pgd_t *ptr, int +@@ -543,21 +546,25 @@ void xen_l4_entry_update(pgd_t *ptr, int } #endif /* CONFIG_X86_64 */ @@ -195,7 +195,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. void xen_tlb_flush(void) { -@@ -637,7 +644,14 @@ void xen_pgd_pin(pgd_t *pgd) +@@ -634,7 +641,14 @@ void xen_pgd_pin(pgd_t *pgd) op[0].arg1.mfn = virt_to_mfn(pgd); #ifdef CONFIG_X86_64 op[1].cmd = op[0].cmd = MMUEXT_PIN_L4_TABLE; @@ -211,7 +211,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. #endif if (HYPERVISOR_mmuext_op(op, NR_PGD_PIN_OPS, NULL, DOMID_SELF) < 0) BUG(); -@@ -650,8 +664,10 @@ void xen_pgd_unpin(pgd_t *pgd) +@@ -647,8 +661,10 @@ void xen_pgd_unpin(pgd_t *pgd) op[0].cmd = MMUEXT_UNPIN_TABLE; op[0].arg1.mfn = virt_to_mfn(pgd); #ifdef CONFIG_X86_64 @@ -223,9 +223,9 @@ At the same time remove the useless user mode pair of init_level4_pgt. #endif if (HYPERVISOR_mmuext_op(op, NR_PGD_PIN_OPS, NULL, DOMID_SELF) < 0) BUG(); ---- head-2010-04-15.orig/arch/x86/mm/init_64-xen.c 2010-04-15 11:49:06.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/init_64-xen.c 2010-04-15 11:49:18.000000000 +0200 -@@ -724,9 +724,6 @@ void __init xen_init_pt(void) +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2010-11-23 16:31:40.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:36.000000000 +0100 +@@ -761,9 +761,6 @@ void __init xen_init_pt(void) (PTRS_PER_PUD - pud_index(__START_KERNEL_map)) * sizeof(*level3_kernel_pgt)); @@ -235,7 +235,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. /* Do an early initialization of the fixmap area. */ addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE); if (pud_present(level3_kernel_pgt[pud_index(addr)])) { -@@ -742,8 +739,6 @@ void __init xen_init_pt(void) +@@ -779,8 +776,6 @@ void __init xen_init_pt(void) early_make_page_readonly(init_level4_pgt, XENFEAT_writable_page_tables); @@ -244,8 +244,8 @@ At the same time remove the useless user mode pair of init_level4_pgt. early_make_page_readonly(level3_kernel_pgt, XENFEAT_writable_page_tables); early_make_page_readonly(level3_user_pgt, ---- head-2010-04-15.orig/arch/x86/mm/pgtable-xen.c 2010-04-15 11:49:08.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/pgtable-xen.c 2010-04-15 11:49:15.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2010-11-23 16:31:40.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-03-17 14:35:10.000000000 +0100 @@ -291,9 +291,11 @@ static void pgd_walk(pgd_t *pgd_base, pg BUG(); seq = 0; @@ -260,7 +260,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. 0); MULTI_update_va_mapping(mcl + seq + 1, (unsigned long)pgd_base, -@@ -681,12 +683,29 @@ static void pgd_prepopulate_pmd(struct m +@@ -689,19 +691,37 @@ static void pgd_prepopulate_pmd(struct m } } @@ -275,7 +275,8 @@ At the same time remove the useless user mode pair of init_level4_pgt. + pgd_t *upgd = (void *)__get_free_page(PGALLOC_GFP); + + if (upgd) -+ virt_to_page(pgd)->index = (long)upgd; ++ set_page_private(virt_to_page(pgd), ++ (unsigned long)upgd); + else { + free_page((unsigned long)pgd); + pgd = NULL; @@ -288,22 +289,21 @@ At the same time remove the useless user mode pair of init_level4_pgt. +static inline void user_pgd_free(pgd_t *pgd) +{ +#ifdef CONFIG_X86_64 -+ free_page(virt_to_page(pgd)->index); ++ free_page(page_private(virt_to_page(pgd))); #endif +} pgd_t *pgd_alloc(struct mm_struct *mm) { -@@ -694,7 +713,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + pgd_t *pgd; pmd_t *pmds[PREALLOCATED_PMDS]; - unsigned long flags; - pgd = (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ORDER); + pgd = user_pgd_alloc((void *)__get_free_page(PGALLOC_GFP)); if (pgd == NULL) goto out; -@@ -733,7 +752,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) +@@ -740,7 +760,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) out_free_pmds: free_pmds(pmds, mm, !xen_feature(XENFEAT_pae_pgdir_above_4gb)); out_free_pgd: @@ -313,7 +313,7 @@ At the same time remove the useless user mode pair of init_level4_pgt. out: return NULL; } -@@ -752,7 +772,8 @@ void pgd_free(struct mm_struct *mm, pgd_ +@@ -759,7 +780,8 @@ void pgd_free(struct mm_struct *mm, pgd_ pgd_mop_up_pmds(mm, pgd); paravirt_pgd_free(mm, pgd); @@ -323,9 +323,9 @@ At the same time remove the useless user mode pair of init_level4_pgt. } /* blktap and gntdev need this, as otherwise they would implicitly (and ---- head-2010-04-15.orig/drivers/xen/core/machine_reboot.c 2010-03-25 14:41:15.000000000 +0100 -+++ head-2010-04-15/drivers/xen/core/machine_reboot.c 2010-03-25 14:46:03.000000000 +0100 -@@ -191,8 +191,7 @@ static int take_machine_down(void *_susp +--- head-2011-03-17.orig/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:15.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:36.000000000 +0100 +@@ -186,8 +186,7 @@ static int take_machine_down(void *_susp * in fast-suspend mode as that implies a new enough Xen. */ if (!suspend->fast_suspend) diff --git a/patches.xen/xen-x86_64-pgd-pin b/patches.xen/xen-x86_64-pgd-pin index 221ddb2..b025956 100644 --- a/patches.xen/xen-x86_64-pgd-pin +++ b/patches.xen/xen-x86_64-pgd-pin @@ -2,9 +2,9 @@ From: jbeulich@novell.com Subject: make pinning of pgd pairs transparent to callers Patch-mainline: obsolete ---- head-2010-04-15.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:41:15.000000000 +0100 -+++ head-2010-04-15/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-25 14:45:56.000000000 +0100 -@@ -112,8 +112,8 @@ void xen_l1_entry_update(pte_t *ptr, pte +--- head-2010-12-08.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-11-23 16:30:41.000000000 +0100 ++++ head-2010-12-08/arch/x86/include/mach-xen/asm/hypervisor.h 2010-11-23 16:31:40.000000000 +0100 +@@ -116,8 +116,8 @@ void xen_l1_entry_update(pte_t *ptr, pte void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val); /* x86_64 only */ @@ -15,9 +15,9 @@ Patch-mainline: obsolete void xen_init_pgd_pin(void); ---- head-2010-04-15.orig/arch/x86/mm/hypervisor.c 2010-03-25 14:41:15.000000000 +0100 -+++ head-2010-04-15/arch/x86/mm/hypervisor.c 2010-03-25 17:55:14.000000000 +0100 -@@ -623,26 +623,38 @@ EXPORT_SYMBOL_GPL(xen_invlpg_mask); +--- head-2010-12-08.orig/arch/x86/mm/hypervisor.c 2010-11-23 16:30:41.000000000 +0100 ++++ head-2010-12-08/arch/x86/mm/hypervisor.c 2010-12-08 10:45:40.000000000 +0100 +@@ -620,26 +620,38 @@ EXPORT_SYMBOL_GPL(xen_invlpg_mask); #endif /* CONFIG_SMP */ @@ -70,9 +70,9 @@ Patch-mainline: obsolete } void xen_set_ldt(const void *ptr, unsigned int ents) ---- head-2010-04-15.orig/arch/x86/mm/init_64-xen.c 2010-04-15 11:48:38.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/init_64-xen.c 2010-04-15 11:49:06.000000000 +0200 -@@ -753,10 +753,8 @@ void __init xen_init_pt(void) +--- head-2010-12-08.orig/arch/x86/mm/init_64-xen.c 2010-11-23 16:30:41.000000000 +0100 ++++ head-2010-12-08/arch/x86/mm/init_64-xen.c 2010-11-23 16:31:40.000000000 +0100 +@@ -790,10 +790,8 @@ void __init xen_init_pt(void) early_make_page_readonly(level1_fixmap_pgt, XENFEAT_writable_page_tables); @@ -85,8 +85,8 @@ Patch-mainline: obsolete } void __init xen_finish_init_mapping(void) ---- head-2010-04-15.orig/arch/x86/mm/pgtable-xen.c 2010-04-15 11:48:29.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/pgtable-xen.c 2010-04-15 11:49:08.000000000 +0200 +--- head-2010-12-08.orig/arch/x86/mm/pgtable-xen.c 2010-04-15 11:48:29.000000000 +0200 ++++ head-2010-12-08/arch/x86/mm/pgtable-xen.c 2010-11-23 16:31:40.000000000 +0100 @@ -368,19 +368,13 @@ static void __pgd_pin(pgd_t *pgd) { pgd_walk(pgd, PAGE_KERNEL_RO); diff --git a/patches.xen/xen-x86_64-unmapped-initrd b/patches.xen/xen-x86_64-unmapped-initrd new file mode 100644 index 0000000..94cc7b8 --- /dev/null +++ b/patches.xen/xen-x86_64-unmapped-initrd @@ -0,0 +1,252 @@ +From: jbeulich@novell.com +Subject: eliminate scalability issues from initrd handling +Patch-mainline: n/a + +Size restrictions native kernels wouldn't have resulted from the initrd +getting mapped into the initial mapping. The kernel doesn't really need +the initrd to be mapped, so use new infrastructure available in 4.1+ +Xen to avoid the mapping and hence the restriction. + +--- head-2011-01-30.orig/arch/x86/include/mach-xen/asm/setup.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-01-30/arch/x86/include/mach-xen/asm/setup.h 2011-02-03 14:42:45.000000000 +0100 +@@ -3,6 +3,13 @@ + void xen_start_kernel(void); + void xen_arch_setup(void); + ++#ifdef CONFIG_X86_64 ++void reserve_pfn_range(unsigned long pfn, unsigned long nr, char *); ++void reserve_pgtable_low(void); ++#endif ++ ++extern unsigned long xen_initrd_start; ++ + #endif + + #include_next +--- head-2011-01-30.orig/arch/x86/kernel/head-xen.c 2011-02-03 14:42:15.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/head-xen.c 2011-02-03 14:42:45.000000000 +0100 +@@ -75,6 +75,8 @@ extern void nmi(void); + #define CALLBACK_ADDR(fn) { __KERNEL_CS, (unsigned long)(fn) } + #endif + ++unsigned long __initdata xen_initrd_start; ++ + unsigned long *__read_mostly machine_to_phys_mapping = + (void *)MACH2PHYS_VIRT_START; + EXPORT_SYMBOL(machine_to_phys_mapping); +--- head-2011-01-30.orig/arch/x86/kernel/head32-xen.c 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/head32-xen.c 2011-02-03 14:42:45.000000000 +0100 +@@ -89,6 +89,11 @@ void __init i386_start_kernel(void) + break; + } + #else ++#ifdef CONFIG_BLK_DEV_INITRD ++ BUG_ON(xen_start_info->flags & SIF_MOD_START_PFN); ++ if (xen_start_info->mod_start) ++ xen_initrd_start = __pa(xen_start_info->mod_start); ++#endif + { + int max_cmdline; + +--- head-2011-01-30.orig/arch/x86/kernel/head64-xen.c 2011-02-03 14:42:41.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/head64-xen.c 2011-02-03 14:42:45.000000000 +0100 +@@ -124,13 +124,23 @@ void __init x86_64_start_reservations(ch + + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + ++#ifdef CONFIG_BLK_DEV_INITRD ++ /* Reserve INITRD if needed. */ ++ if (xen_start_info->flags & SIF_MOD_START_PFN) { ++ reserve_pfn_range(xen_start_info->mod_start, ++ PFN_UP(xen_start_info->mod_len), ++ "RAMDISK"); ++ xen_initrd_start = xen_start_info->mod_start << PAGE_SHIFT; ++ } else if (xen_start_info->mod_start) ++ xen_initrd_start = __pa(xen_start_info->mod_start); ++#endif ++ + if (xen_feature(XENFEAT_auto_translated_physmap)) + xen_start_info->mfn_list = ~0UL; + else if (xen_start_info->mfn_list < __START_KERNEL_map) +- memblock_x86_reserve_range(xen_start_info->first_p2m_pfn << PAGE_SHIFT, +- (xen_start_info->first_p2m_pfn +- + xen_start_info->nr_p2m_frames) << PAGE_SHIFT, +- "INITP2M"); ++ reserve_pfn_range(xen_start_info->first_p2m_pfn, ++ xen_start_info->nr_p2m_frames, ++ "INITP2M"); + + /* + * At this point everything still needed from the boot loader +--- head-2011-01-30.orig/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:41.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:45.000000000 +0100 +@@ -147,6 +147,7 @@ ENTRY(empty_zero_page) + ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad startup_64) + ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad hypercall_page) + ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT, _PAGE_PRESENT) ++ ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN, .long 1) + ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad VMEMMAP_START) + ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel") + ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") +--- head-2011-01-30.orig/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:41.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:45.000000000 +0100 +@@ -406,7 +406,7 @@ static void __init relocate_initrd(void) + #else + printk(KERN_ERR "initrd extends beyond end of memory " + "(0x%08lx > 0x%08lx)\ndisabling initrd\n", +- __pa(xen_start_info->mod_start) + xen_start_info->mod_len, ++ xen_initrd_start + xen_start_info->mod_len, + max_low_pfn_mapped << PAGE_SHIFT); + initrd_start = 0; + #endif +@@ -425,7 +425,7 @@ static void __init reserve_initrd(void) + !ramdisk_image || !ramdisk_size) + return; /* No initrd provided by bootloader */ + #else +- unsigned long ramdisk_image = __pa(xen_start_info->mod_start); ++ unsigned long ramdisk_image = xen_initrd_start; + unsigned long ramdisk_size = xen_start_info->mod_len; + unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); + unsigned long end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; +--- head-2011-01-30.orig/arch/x86/mm/init-xen.c 2011-02-03 14:42:41.000000000 +0100 ++++ head-2011-01-30/arch/x86/mm/init-xen.c 2011-02-03 14:42:45.000000000 +0100 +@@ -342,16 +342,7 @@ unsigned long __init_refok init_memory_m + + if (!after_bootmem && e820_table_top > e820_table_start) { + #ifdef CONFIG_X86_64 +- if (xen_start_info->mfn_list < __START_KERNEL_map +- && e820_table_start <= xen_start_info->first_p2m_pfn +- && e820_table_top > xen_start_info->first_p2m_pfn) { +- memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, +- xen_start_info->first_p2m_pfn +- << PAGE_SHIFT, +- "PGTABLE"); +- e820_table_start = xen_start_info->first_p2m_pfn +- + xen_start_info->nr_p2m_frames; +- } ++ reserve_pgtable_low(); + #endif + memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, + e820_table_top << PAGE_SHIFT, "PGTABLE"); +--- head-2011-01-30.orig/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:41.000000000 +0100 ++++ head-2011-01-30/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:45.000000000 +0100 +@@ -220,13 +220,73 @@ void sync_global_pgds(unsigned long star + } + } + ++static struct reserved_pfn_range { ++ unsigned long pfn, nr; ++} reserved_pfn_ranges[3] __meminitdata; ++ ++void __init reserve_pfn_range(unsigned long pfn, unsigned long nr, char *name) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { ++ struct reserved_pfn_range *range = reserved_pfn_ranges + i; ++ ++ if (!range->nr) { ++ range->pfn = pfn; ++ range->nr = nr; ++ break; ++ } ++ BUG_ON(range->pfn < pfn + nr && pfn < range->pfn + range->nr); ++ if (range->pfn > pfn) { ++ i = ARRAY_SIZE(reserved_pfn_ranges) - 1; ++ if (reserved_pfn_ranges[i].nr) ++ continue; ++ for (; reserved_pfn_ranges + i > range; --i) ++ reserved_pfn_ranges[i] ++ = reserved_pfn_ranges[i - 1]; ++ range->pfn = pfn; ++ range->nr = nr; ++ break; ++ } ++ } ++ BUG_ON(i >= ARRAY_SIZE(reserved_pfn_ranges)); ++ memblock_x86_reserve_range(pfn << PAGE_SHIFT, ++ (pfn + nr) << PAGE_SHIFT, name); ++} ++ ++void __init reserve_pgtable_low(void) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { ++ struct reserved_pfn_range *range = reserved_pfn_ranges + i; ++ ++ if (!range->nr) ++ break; ++ if (e820_table_start <= range->pfn ++ && e820_table_top > range->pfn) { ++ memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, ++ range->pfn << PAGE_SHIFT, ++ "PGTABLE"); ++ e820_table_start = range->pfn + range->nr; ++ } ++ } ++} ++ + static __init unsigned long get_table_end(void) + { ++ unsigned int i; ++ + BUG_ON(!e820_table_end); +- if (xen_start_info->mfn_list < __START_KERNEL_map +- && e820_table_end == xen_start_info->first_p2m_pfn) { +- e820_table_end += xen_start_info->nr_p2m_frames; +- e820_table_top += xen_start_info->nr_p2m_frames; ++ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { ++ struct reserved_pfn_range *range = reserved_pfn_ranges + i; ++ ++ if (!range->nr) ++ break; ++ if (e820_table_end == range->pfn) { ++ e820_table_end += range->nr; ++ e820_table_top += range->nr; ++ } + } + return e820_table_end++; + } +@@ -465,14 +525,25 @@ static inline int __meminit make_readonl + && !max_pfn_mapped + && (paddr >= (e820_table_start << PAGE_SHIFT))) { + unsigned long top = e820_table_top; ++ unsigned int i; ++ ++ /* Account for the ranges get_table_end() skips. */ ++ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { ++ const struct reserved_pfn_range *range; + +- /* Account for the range get_table_end() skips. */ +- if (xen_start_info->mfn_list < __START_KERNEL_map +- && e820_table_end <= xen_start_info->first_p2m_pfn +- && top > xen_start_info->first_p2m_pfn) +- top += xen_start_info->nr_p2m_frames; ++ range = reserved_pfn_ranges + i; ++ if (!range->nr) ++ continue; ++ if (e820_table_end <= range->pfn && top > range->pfn) { ++ if (paddr > (range->pfn << PAGE_SHIFT) ++ && paddr < ((range->pfn + range->nr) ++ << PAGE_SHIFT)) ++ break; ++ top += range->nr; ++ } ++ } + if (paddr < (top << PAGE_SHIFT)) +- readonly = 1; ++ readonly = (i >= ARRAY_SIZE(reserved_pfn_ranges)); + } + /* Make old page tables read-only. */ + if (!xen_feature(XENFEAT_writable_page_tables) +@@ -833,9 +904,6 @@ void __init xen_finish_init_mapping(void + && xen_start_info->mfn_list >= __START_KERNEL_map) + phys_to_machine_mapping = + __va(__pa(xen_start_info->mfn_list)); +- if (xen_start_info->mod_start) +- xen_start_info->mod_start = (unsigned long) +- __va(__pa(xen_start_info->mod_start)); + + /* Unpin the no longer used Xen provided page tables. */ + mmuext.cmd = MMUEXT_UNPIN_TABLE; diff --git a/patches.xen/xen3-acpi_processor_check_maxcpus.patch b/patches.xen/xen3-acpi_processor_check_maxcpus.patch deleted file mode 100644 index 50c7be2..0000000 --- a/patches.xen/xen3-acpi_processor_check_maxcpus.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Thomas Renninger -Subject: Do not try to set up acpi processor stuff on cores exceeding maxcpus= -References: bnc#601520 -Patch-Mainline: Not yet - -Signed-off-by: Thomas Renninger - -Automatically created from "patches.fixes/acpi_processor_check_maxcpus.patch" by xen-port-patches.py - ---- head-2010-05-25.orig/drivers/acpi/processor_driver.c 2010-05-25 09:25:03.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_driver.c 2010-05-25 09:51:32.000000000 +0200 -@@ -448,6 +448,11 @@ static int acpi_processor_get_info(struc - return -ENODEV; - } - } -+#if defined(CONFIG_SMP) && defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) -+ if (pr->id >= setup_max_cpus && pr->id > 0) -+ pr->id = -1; -+#endif -+ - /* - * On some boxes several processors use the same processor bus id. - * But they are located in different scope. For example: -@@ -597,8 +602,11 @@ static int __cpuinit acpi_processor_add( - } - - #ifdef CONFIG_SMP -- if (pr->id >= setup_max_cpus && pr->id != 0) -- return 0; -+ if (pr->id >= setup_max_cpus && pr->id != 0) { -+ if (!processor_cntl_external()) -+ return 0; -+ WARN_ON(pr->id != -1); -+ } - #endif - - BUG_ON(!processor_cntl_external() && diff --git a/patches.xen/xen3-auto-arch-i386.diff b/patches.xen/xen3-auto-arch-i386.diff index cb7ffdd..4c0744d 100644 --- a/patches.xen/xen3-auto-arch-i386.diff +++ b/patches.xen/xen3-auto-arch-i386.diff @@ -1,10 +1,10 @@ Subject: xen3 arch-i386 -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com ---- head-2010-01-19.orig/arch/x86/kernel/asm-offsets_32.c 2009-09-10 00:13:59.000000000 +0200 -+++ head-2010-01-19/arch/x86/kernel/asm-offsets_32.c 2010-01-19 16:00:16.000000000 +0100 +--- head-2011-01-30.orig/arch/x86/kernel/asm-offsets_32.c 2011-02-01 13:47:44.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/asm-offsets_32.c 2011-01-31 14:54:00.000000000 +0100 @@ -93,9 +93,14 @@ void foo(void) OFFSET(pbe_orig_address, pbe, orig_address); OFFSET(pbe_next, pbe, next); @@ -21,9 +21,9 @@ Acked-by: jbeulich@novell.com DEFINE(PAGE_SIZE_asm, PAGE_SIZE); DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); ---- head-2010-01-19.orig/arch/x86/kernel/entry_32.S 2010-01-19 13:27:24.000000000 +0100 -+++ head-2010-01-19/arch/x86/kernel/entry_32.S 2010-01-19 16:00:16.000000000 +0100 -@@ -401,7 +401,7 @@ ENTRY(ia32_sysenter_target) +--- head-2011-01-30.orig/arch/x86/kernel/entry_32.S 2011-02-01 13:57:16.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/entry_32.S 2011-02-01 14:10:27.000000000 +0100 +@@ -375,7 +375,7 @@ ENTRY(ia32_sysenter_target) CFI_SIGNAL_FRAME CFI_DEF_CFA esp, 0 CFI_REGISTER esp, ebp @@ -32,7 +32,7 @@ Acked-by: jbeulich@novell.com sysenter_past_esp: /* * Interrupts are disabled here, but we can't trace it until -@@ -1381,7 +1381,7 @@ END(page_fault) +@@ -1307,7 +1307,7 @@ END(page_fault) * that sets up the real kernel stack. Check here, since we can't * allow the wrong stack to be used. * @@ -41,7 +41,7 @@ Acked-by: jbeulich@novell.com * already pushed 3 words if it hits on the sysenter instruction: * eflags, cs and eip. * -@@ -1393,7 +1393,7 @@ END(page_fault) +@@ -1319,7 +1319,7 @@ END(page_fault) cmpw $__KERNEL_CS, 4(%esp) jne \ok \label: @@ -49,9 +49,9 @@ Acked-by: jbeulich@novell.com + movl SYSENTER_stack_sp0 + \offset(%esp), %esp CFI_DEF_CFA esp, 0 CFI_UNDEFINED eip - pushfl ---- head-2010-01-19.orig/arch/x86/kernel/machine_kexec_32.c 2010-01-19 14:51:07.000000000 +0100 -+++ head-2010-01-19/arch/x86/kernel/machine_kexec_32.c 2010-01-19 16:00:16.000000000 +0100 + pushfl_cfi +--- head-2011-01-30.orig/arch/x86/kernel/machine_kexec_32.c 2010-01-19 14:51:07.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/machine_kexec_32.c 2011-01-31 14:54:00.000000000 +0100 @@ -27,6 +27,10 @@ #include #include @@ -135,8 +135,8 @@ Acked-by: jbeulich@novell.com void arch_crash_save_vmcoreinfo(void) { ---- head-2010-01-19.orig/arch/x86/kernel/vm86_32.c 2010-01-19 13:26:11.000000000 +0100 -+++ head-2010-01-19/arch/x86/kernel/vm86_32.c 2010-01-19 16:00:16.000000000 +0100 +--- head-2011-01-30.orig/arch/x86/kernel/vm86_32.c 2011-02-01 13:47:44.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/vm86_32.c 2011-01-31 14:54:00.000000000 +0100 @@ -125,7 +125,9 @@ static int copy_vm86_regs_from_user(stru struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) @@ -164,7 +164,7 @@ Acked-by: jbeulich@novell.com ret = KVM86->regs32; -@@ -279,7 +285,9 @@ out: +@@ -280,7 +286,9 @@ out: static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) { @@ -174,7 +174,7 @@ Acked-by: jbeulich@novell.com /* * make sure the vm86() system call doesn't try to do anything silly */ -@@ -323,12 +331,16 @@ static void do_sys_vm86(struct kernel_vm +@@ -324,12 +332,16 @@ static void do_sys_vm86(struct kernel_vm tsk->thread.saved_fs = info->regs32->fs; tsk->thread.saved_gs = get_user_gs(info->regs32); diff --git a/patches.xen/xen3-auto-arch-x86.diff b/patches.xen/xen3-auto-arch-x86.diff index ebface2..dc067b0 100644 --- a/patches.xen/xen3-auto-arch-x86.diff +++ b/patches.xen/xen3-auto-arch-x86.diff @@ -1,5 +1,5 @@ Subject: xen3 arch-x86 -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com @@ -9,9 +9,33 @@ take the forward porting patches: 2.6.26/arch/x86/kernel/crash.c 2.6.30/arch/x86/kernel/acpi/boot.c ---- head-2010-04-15.orig/arch/x86/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/Makefile 2010-03-24 15:01:37.000000000 +0100 -@@ -111,6 +111,10 @@ endif +--- + arch/x86/Makefile | 24 +++++++++++++++++++++++- + arch/x86/boot/Makefile | 9 +++++++++ + arch/x86/include/asm/acpi.h | 27 +++++++++++++++++++++++++++ + arch/x86/include/asm/apic.h | 2 ++ + arch/x86/include/asm/kexec.h | 13 +++++++++++++ + arch/x86/include/asm/topology.h | 2 +- + arch/x86/include/asm/types.h | 2 +- + arch/x86/kernel/Makefile | 9 +++++++++ + arch/x86/kernel/acpi/Makefile | 4 ++++ + arch/x86/kernel/cpu/mcheck/Makefile | 1 + + arch/x86/kernel/cpu/mcheck/mce.c | 21 +++++++++++++++++++++ + arch/x86/kernel/cpu/mtrr/Makefile | 1 + + arch/x86/lib/Makefile | 2 ++ + arch/x86/mm/Makefile | 2 ++ + arch/x86/oprofile/Makefile | 7 +++++++ + arch/x86/pci/Makefile | 3 +++ + arch/x86/power/cpu.c | 4 ++++ + arch/x86/vdso/Makefile | 2 ++ + arch/x86/vdso/vdso32-setup.c | 34 ++++++++++++++++++++++++++++++++++ + 19 files changed, 166 insertions(+), 3 deletions(-) + +Index: linux-2.6.38-master/arch/x86/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/Makefile ++++ linux-2.6.38-master/arch/x86/Makefile +@@ -116,6 +116,10 @@ endif # prevent gcc from generating any FP code by mistake KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) @@ -22,7 +46,7 @@ take the forward porting patches: KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) -@@ -149,9 +153,26 @@ boot := arch/x86/boot +@@ -151,9 +155,26 @@ boot := arch/x86/boot BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage @@ -50,7 +74,7 @@ take the forward porting patches: all: bzImage # KBUILD_IMAGE specify target image being built -@@ -167,6 +188,7 @@ endif +@@ -169,6 +190,7 @@ endif $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ @@ -58,8 +82,10 @@ take the forward porting patches: PHONY += install install: ---- head-2010-04-15.orig/arch/x86/boot/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/boot/Makefile 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/boot/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/boot/Makefile ++++ linux-2.6.38-master/arch/x86/boot/Makefile @@ -23,6 +23,7 @@ ROOT_DEV := CURRENT SVGA_MODE := -DSVGA_MODE=NORMAL_VGA @@ -83,9 +109,11 @@ take the forward porting patches: install: sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ System.map "$(INSTALL_PATH)" ---- head-2010-04-15.orig/arch/x86/kernel/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/Makefile 2010-03-24 15:01:37.000000000 +0100 -@@ -117,9 +117,12 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) +Index: linux-2.6.38-master/arch/x86/kernel/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/kernel/Makefile ++++ linux-2.6.38-master/arch/x86/kernel/Makefile +@@ -110,9 +110,12 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o @@ -95,10 +123,10 @@ take the forward porting patches: # 64 bit specific files ifeq ($(CONFIG_X86_64),y) + obj-$(CONFIG_X86_XEN_GENAPIC) += genapic_xen_64.o - obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o - obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o obj-$(CONFIG_AUDIT) += audit_64.o -@@ -130,4 +133,10 @@ ifeq ($(CONFIG_X86_64),y) + + obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o +@@ -121,4 +124,10 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o @@ -109,8 +137,10 @@ take the forward porting patches: + +disabled-obj-$(CONFIG_XEN) := i8259_$(BITS).o reboot.o smpboot_$(BITS).o +%/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := ---- head-2010-04-15.orig/arch/x86/kernel/acpi/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/acpi/Makefile 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/kernel/acpi/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/kernel/acpi/Makefile ++++ linux-2.6.38-master/arch/x86/kernel/acpi/Makefile @@ -5,6 +5,9 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wake ifneq ($(CONFIG_ACPI_PROCESSOR),) @@ -126,19 +156,23 @@ take the forward porting patches: $(Q)$(MAKE) $(build)=$(obj)/realmode +disabled-obj-$(CONFIG_XEN) := cstate.o wakeup_$(BITS).o ---- head-2010-04-15.orig/arch/x86/kernel/cpu/mcheck/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/cpu/mcheck/Makefile 2010-03-24 15:01:37.000000000 +0100 -@@ -4,6 +4,7 @@ obj-$(CONFIG_X86_ANCIENT_MCE) += winchip +Index: linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/kernel/cpu/mcheck/Makefile ++++ linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/Makefile +@@ -3,6 +3,7 @@ obj-y = mce.o mce-severity.o + obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o - obj-$(CONFIG_X86_MCE_XEON75XX) += mce-xeon75xx.o obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o +obj-$(CONFIG_X86_XEN_MCE) += mce_dom0.o obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o ---- head-2010-04-15.orig/arch/x86/kernel/cpu/mcheck/mce.c 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/cpu/mcheck/mce.c 2010-04-15 09:44:40.000000000 +0200 -@@ -1149,8 +1149,15 @@ void mce_log_therm_throt_event(__u64 sta +Index: linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/mce.c +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/kernel/cpu/mcheck/mce.c ++++ linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/mce.c +@@ -1146,8 +1146,15 @@ void mce_log_therm_throt_event(__u64 sta * Periodic polling timer for "silent" machine check errors. If the * poller finds an MCE, poll 2x faster. When the poller finds no more * errors, poll 2x slower (up to check_interval seconds). @@ -154,7 +188,7 @@ take the forward porting patches: static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ static DEFINE_PER_CPU(struct timer_list, mce_timer); -@@ -1315,6 +1322,7 @@ static int __cpuinit __mcheck_cpu_apply_ +@@ -1312,6 +1319,7 @@ static int __cpuinit __mcheck_cpu_apply_ /* This should be disabled by the BIOS, but isn't always */ if (c->x86_vendor == X86_VENDOR_AMD) { @@ -162,7 +196,7 @@ take the forward porting patches: if (c->x86 == 15 && banks > 4) { /* * disable GART TBL walk error reporting, which -@@ -1323,6 +1331,7 @@ static int __cpuinit __mcheck_cpu_apply_ +@@ -1320,6 +1328,7 @@ static int __cpuinit __mcheck_cpu_apply_ */ clear_bit(10, (unsigned long *)&mce_banks[4].ctl); } @@ -170,7 +204,7 @@ take the forward porting patches: if (c->x86 <= 17 && mce_bootlog < 0) { /* * Lots of broken BIOS around that don't clear them -@@ -1390,6 +1399,7 @@ static void __cpuinit __mcheck_cpu_ancie +@@ -1387,6 +1396,7 @@ static void __cpuinit __mcheck_cpu_ancie static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) { @@ -178,7 +212,7 @@ take the forward porting patches: switch (c->x86_vendor) { case X86_VENDOR_INTEL: mce_intel_feature_init(c); -@@ -1400,6 +1410,7 @@ static void __mcheck_cpu_init_vendor(str +@@ -1397,6 +1407,7 @@ static void __mcheck_cpu_init_vendor(str default: break; } @@ -186,7 +220,7 @@ take the forward porting patches: } static void __mcheck_cpu_init_timer(void) -@@ -2096,6 +2107,16 @@ static __init int mcheck_init_device(voi +@@ -2142,6 +2153,16 @@ static __init int mcheck_init_device(voi register_hotcpu_notifier(&mce_cpu_notifier); misc_register(&mce_log_device); @@ -203,32 +237,42 @@ take the forward porting patches: return err; } ---- head-2010-04-15.orig/arch/x86/kernel/cpu/mtrr/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/cpu/mtrr/Makefile 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/kernel/cpu/mtrr/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/kernel/cpu/mtrr/Makefile ++++ linux-2.6.38-master/arch/x86/kernel/cpu/mtrr/Makefile @@ -1,3 +1,4 @@ obj-y := main.o if.o generic.o cleanup.o obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o +obj-$(CONFIG_XEN) := main.o if.o ---- head-2010-04-15.orig/arch/x86/lib/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/lib/Makefile 2010-03-24 15:01:37.000000000 +0100 -@@ -41,3 +41,5 @@ else +Index: linux-2.6.38-master/arch/x86/lib/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/lib/Makefile ++++ linux-2.6.38-master/arch/x86/lib/Makefile +@@ -43,3 +43,5 @@ else lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o endif + +lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o ---- head-2010-04-15.orig/arch/x86/mm/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/Makefile 2010-03-24 15:01:37.000000000 +0100 -@@ -25,4 +25,6 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BIT - obj-$(CONFIG_K8_NUMA) += k8topology_64.o +Index: linux-2.6.38-master/arch/x86/mm/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/mm/Makefile ++++ linux-2.6.38-master/arch/x86/mm/Makefile +@@ -26,6 +26,8 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BIT + obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_XEN) += hypervisor.o + + obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o + obj-$(CONFIG_MEMTEST) += memtest.o ---- head-2010-04-15.orig/arch/x86/oprofile/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/oprofile/Makefile 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/oprofile/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/oprofile/Makefile ++++ linux-2.6.38-master/arch/x86/oprofile/Makefile @@ -6,7 +6,14 @@ DRIVER_OBJS = $(addprefix ../../../drive oprofilefs.o oprofile_stats.o \ timer_int.o ) @@ -244,21 +288,25 @@ take the forward porting patches: op_model_ppro.o op_model_p4.o oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o +endif ---- head-2010-04-15.orig/arch/x86/pci/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/pci/Makefile 2010-03-24 15:01:37.000000000 +0100 -@@ -4,6 +4,9 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o - obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o +Index: linux-2.6.38-master/arch/x86/pci/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/pci/Makefile ++++ linux-2.6.38-master/arch/x86/pci/Makefile +@@ -5,6 +5,9 @@ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$ obj-$(CONFIG_PCI_DIRECT) += direct.o obj-$(CONFIG_PCI_OLPC) += olpc.o + obj-$(CONFIG_PCI_XEN) += xen.o +# pcifront should be after mmconfig.o and direct.o as it should only +# take over if direct access to the PCI bus is unavailable +obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o obj-y += fixup.o - obj-$(CONFIG_ACPI) += acpi.o ---- head-2010-04-15.orig/arch/x86/power/cpu.c 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/power/cpu.c 2010-03-24 15:01:37.000000000 +0100 -@@ -126,6 +126,7 @@ static void do_fpu_end(void) + obj-$(CONFIG_X86_INTEL_CE) += ce4100.o +Index: linux-2.6.38-master/arch/x86/power/cpu.c +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/power/cpu.c ++++ linux-2.6.38-master/arch/x86/power/cpu.c +@@ -129,6 +129,7 @@ static void do_fpu_end(void) static void fix_processor_context(void) { @@ -266,7 +314,7 @@ take the forward porting patches: int cpu = smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); -@@ -138,7 +139,10 @@ static void fix_processor_context(void) +@@ -141,7 +142,10 @@ static void fix_processor_context(void) #ifdef CONFIG_X86_64 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; @@ -277,8 +325,10 @@ take the forward porting patches: syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ ---- head-2010-04-15.orig/arch/x86/include/asm/acpi.h 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/asm/acpi.h 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/include/asm/acpi.h +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/include/asm/acpi.h ++++ linux-2.6.38-master/arch/x86/include/asm/acpi.h @@ -30,6 +30,10 @@ #include #include @@ -290,7 +340,7 @@ take the forward porting patches: #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long -@@ -120,6 +124,27 @@ extern unsigned long acpi_wakeup_address +@@ -122,6 +126,27 @@ extern unsigned long acpi_wakeup_address /* early initialization routine */ extern void acpi_reserve_wakeup_memory(void); @@ -318,7 +368,7 @@ take the forward porting patches: /* * Check if the CPU can handle C2 and deeper */ -@@ -178,7 +203,9 @@ static inline void disable_acpi(void) { +@@ -180,7 +205,9 @@ static inline void disable_acpi(void) { #endif /* !CONFIG_ACPI */ @@ -328,8 +378,10 @@ take the forward porting patches: struct bootnode; ---- head-2010-04-15.orig/arch/x86/include/asm/apic.h 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/asm/apic.h 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/include/asm/apic.h +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/include/asm/apic.h ++++ linux-2.6.38-master/arch/x86/include/asm/apic.h @@ -15,7 +15,9 @@ #include #include @@ -340,8 +392,10 @@ take the forward porting patches: /* * Debugging macros ---- head-2010-04-15.orig/arch/x86/include/asm/kexec.h 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/asm/kexec.h 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/include/asm/kexec.h +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/include/asm/kexec.h ++++ linux-2.6.38-master/arch/x86/include/asm/kexec.h @@ -163,6 +163,19 @@ struct kimage_arch { }; #endif @@ -362,8 +416,23 @@ take the forward porting patches: #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_KEXEC_H */ ---- head-2010-04-15.orig/arch/x86/include/asm/types.h 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/include/asm/types.h 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/include/asm/topology.h +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/include/asm/topology.h ++++ linux-2.6.38-master/arch/x86/include/asm/topology.h +@@ -30,7 +30,7 @@ + # define ENABLE_TOPO_DEFINES + # endif + #else +-# ifdef CONFIG_SMP ++# if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + # define ENABLE_TOPO_DEFINES + # endif + #endif +Index: linux-2.6.38-master/arch/x86/include/asm/types.h +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/include/asm/types.h ++++ linux-2.6.38-master/arch/x86/include/asm/types.h @@ -9,7 +9,7 @@ #ifndef __ASSEMBLY__ @@ -373,8 +442,10 @@ take the forward porting patches: /* DMA addresses come in 32-bit and 64-bit flavours. */ typedef u64 dma_addr_t; #else ---- head-2010-04-15.orig/arch/x86/vdso/Makefile 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/vdso/Makefile 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/vdso/Makefile +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/vdso/Makefile ++++ linux-2.6.38-master/arch/x86/vdso/Makefile @@ -65,6 +65,8 @@ obj-$(VDSO32-y) += vdso32-syms.lds vdso32.so-$(VDSO32-y) += int80 vdso32.so-$(CONFIG_COMPAT) += syscall @@ -384,8 +455,10 @@ take the forward porting patches: vdso32-images = $(vdso32.so-y:%=vdso32-%.so) ---- head-2010-04-15.orig/arch/x86/vdso/vdso32-setup.c 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/vdso/vdso32-setup.c 2010-03-24 15:01:37.000000000 +0100 +Index: linux-2.6.38-master/arch/x86/vdso/vdso32-setup.c +=================================================================== +--- linux-2.6.38-master.orig/arch/x86/vdso/vdso32-setup.c ++++ linux-2.6.38-master/arch/x86/vdso/vdso32-setup.c @@ -26,6 +26,10 @@ #include #include diff --git a/patches.xen/xen3-auto-arch-x86_64.diff b/patches.xen/xen3-auto-arch-x86_64.diff index 0a3af92..bc6dfa4 100644 --- a/patches.xen/xen3-auto-arch-x86_64.diff +++ b/patches.xen/xen3-auto-arch-x86_64.diff @@ -1,5 +1,5 @@ Subject: xen3 arch-x86_64 -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com diff --git a/patches.xen/xen3-auto-blktap2-pvops.diff b/patches.xen/xen3-auto-blktap2-pvops.diff new file mode 100644 index 0000000..ee89650 --- /dev/null +++ b/patches.xen/xen3-auto-blktap2-pvops.diff @@ -0,0 +1,2373 @@ +Subject: pv-ops blktap2 +From: https://git.kernel.org/?p=linux/kernel/git/jeremy/xen.git (commit 892d2f052e979cf1916647c752b94cf62ec1c6dc) +Patch-mainline: n/a +Acked-by: jbeulich@novell.com + +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/xen/blktap2-new/Makefile 2011-02-24 13:49:49.000000000 +0100 +@@ -0,0 +1,3 @@ ++obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o ++ ++blktap-objs := control.o ring.o device.o request.o sysfs.o +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/xen/blktap2-new/blktap.h 2011-02-24 13:49:49.000000000 +0100 +@@ -0,0 +1,209 @@ ++#ifndef _BLKTAP_H_ ++#define _BLKTAP_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern int blktap_debug_level; ++extern int blktap_ring_major; ++extern int blktap_device_major; ++ ++#define BTPRINTK(level, tag, force, _f, _a...) \ ++ do { \ ++ if (blktap_debug_level > level && \ ++ (force || printk_ratelimit())) \ ++ printk(tag "%s: " _f, __func__, ##_a); \ ++ } while (0) ++ ++#define BTDBG(_f, _a...) BTPRINTK(8, KERN_DEBUG, 1, _f, ##_a) ++#define BTINFO(_f, _a...) BTPRINTK(0, KERN_INFO, 0, _f, ##_a) ++#define BTWARN(_f, _a...) BTPRINTK(0, KERN_WARNING, 0, _f, ##_a) ++#define BTERR(_f, _a...) BTPRINTK(0, KERN_ERR, 0, _f, ##_a) ++ ++#define MAX_BLKTAP_DEVICE 1024 ++ ++#define BLKTAP_DEVICE 4 ++#define BLKTAP_DEVICE_CLOSED 5 ++#define BLKTAP_SHUTDOWN_REQUESTED 8 ++ ++/* blktap IOCTLs: */ ++#define BLKTAP2_IOCTL_KICK_FE 1 ++#define BLKTAP2_IOCTL_ALLOC_TAP 200 ++#define BLKTAP2_IOCTL_FREE_TAP 201 ++#define BLKTAP2_IOCTL_CREATE_DEVICE 202 ++#define BLKTAP2_IOCTL_REMOVE_DEVICE 207 ++ ++#define BLKTAP2_MAX_MESSAGE_LEN 256 ++ ++#define BLKTAP2_RING_MESSAGE_CLOSE 3 ++ ++#define BLKTAP_REQUEST_FREE 0 ++#define BLKTAP_REQUEST_PENDING 1 ++ ++/* ++ * The maximum number of requests that can be outstanding at any time ++ * is determined by ++ * ++ * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST] ++ * ++ * where mmap_alloc < MAX_DYNAMIC_MEM. ++ * ++ * TODO: ++ * mmap_alloc is initialised to 2 and should be adjustable on the fly via ++ * sysfs. ++ */ ++#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) ++#define MAX_DYNAMIC_MEM BLK_RING_SIZE ++#define MAX_PENDING_REQS BLK_RING_SIZE ++#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST) ++#define MMAP_VADDR(_start, _req, _seg) \ ++ (_start + \ ++ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ ++ ((_seg) * PAGE_SIZE)) ++ ++struct grant_handle_pair { ++ grant_handle_t kernel; ++ grant_handle_t user; ++}; ++#define INVALID_GRANT_HANDLE 0xFFFF ++ ++struct blktap_handle { ++ unsigned int ring; ++ unsigned int device; ++ unsigned int minor; ++}; ++ ++struct blktap_params { ++ char name[BLKTAP2_MAX_MESSAGE_LEN]; ++ unsigned long long capacity; ++ unsigned long sector_size; ++}; ++ ++struct blktap_device { ++ spinlock_t lock; ++ struct gendisk *gd; ++}; ++ ++struct blktap_ring { ++ struct task_struct *task; ++ ++ struct vm_area_struct *vma; ++ struct blkif_front_ring ring; ++ unsigned long ring_vstart; ++ unsigned long user_vstart; ++ ++ int n_pending; ++ struct blktap_request *pending[MAX_PENDING_REQS]; ++ ++ wait_queue_head_t poll_wait; ++ ++ dev_t devno; ++ struct device *dev; ++}; ++ ++struct blktap_statistics { ++ unsigned long st_print; ++ int st_rd_req; ++ int st_wr_req; ++ int st_oo_req; ++ int st_rd_sect; ++ int st_wr_sect; ++ s64 st_rd_cnt; ++ s64 st_rd_sum_usecs; ++ s64 st_rd_max_usecs; ++ s64 st_wr_cnt; ++ s64 st_wr_sum_usecs; ++ s64 st_wr_max_usecs; ++}; ++ ++struct blktap_request { ++ struct blktap *tap; ++ struct request *rq; ++ int usr_idx; ++ ++ int operation; ++ struct timeval time; ++ ++ struct scatterlist sg_table[BLKIF_MAX_SEGMENTS_PER_REQUEST]; ++ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; ++ int nr_pages; ++}; ++ ++#define blktap_for_each_sg(_sg, _req, _i) \ ++ for (_sg = (_req)->sg_table, _i = 0; \ ++ _i < (_req)->nr_pages; \ ++ (_sg)++, (_i)++) ++ ++struct blktap { ++ int minor; ++ unsigned long dev_inuse; ++ ++ struct blktap_ring ring; ++ struct blktap_device device; ++ struct blktap_page_pool *pool; ++ ++ wait_queue_head_t remove_wait; ++ struct work_struct remove_work; ++ char name[BLKTAP2_MAX_MESSAGE_LEN]; ++ ++ struct blktap_statistics stats; ++}; ++ ++struct blktap_page_pool { ++ struct mempool_s *bufs; ++ spinlock_t lock; ++ struct kobject kobj; ++ wait_queue_head_t wait; ++}; ++ ++extern struct mutex blktap_lock; ++extern struct blktap **blktaps; ++extern int blktap_max_minor; ++ ++int blktap_control_destroy_tap(struct blktap *); ++size_t blktap_control_debug(struct blktap *, char *, size_t); ++ ++int blktap_ring_init(void); ++void blktap_ring_exit(void); ++size_t blktap_ring_debug(struct blktap *, char *, size_t); ++int blktap_ring_create(struct blktap *); ++int blktap_ring_destroy(struct blktap *); ++struct blktap_request *blktap_ring_make_request(struct blktap *); ++void blktap_ring_free_request(struct blktap *,struct blktap_request *); ++void blktap_ring_submit_request(struct blktap *, struct blktap_request *); ++int blktap_ring_map_request_segment(struct blktap *, struct blktap_request *, int); ++int blktap_ring_map_request(struct blktap *, struct blktap_request *); ++void blktap_ring_unmap_request(struct blktap *, struct blktap_request *); ++void blktap_ring_set_message(struct blktap *, int); ++void blktap_ring_kick_user(struct blktap *); ++ ++int blktap_sysfs_init(void); ++void blktap_sysfs_exit(void); ++int blktap_sysfs_create(struct blktap *); ++void blktap_sysfs_destroy(struct blktap *); ++ ++int blktap_device_init(void); ++void blktap_device_exit(void); ++size_t blktap_device_debug(struct blktap *, char *, size_t); ++int blktap_device_create(struct blktap *, struct blktap_params *); ++int blktap_device_destroy(struct blktap *); ++void blktap_device_destroy_sync(struct blktap *); ++void blktap_device_run_queue(struct blktap *); ++void blktap_device_end_request(struct blktap *, struct blktap_request *, int); ++ ++int blktap_page_pool_init(struct kobject *); ++void blktap_page_pool_exit(void); ++struct blktap_page_pool *blktap_page_pool_get(const char *); ++ ++size_t blktap_request_debug(struct blktap *, char *, size_t); ++struct blktap_request *blktap_request_alloc(struct blktap *); ++int blktap_request_get_pages(struct blktap *, struct blktap_request *, int); ++void blktap_request_free(struct blktap *, struct blktap_request *); ++void blktap_request_bounce(struct blktap *, struct blktap_request *, int, int); ++ ++ ++#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/xen/blktap2-new/control.c 2011-02-24 13:49:49.000000000 +0100 +@@ -0,0 +1,315 @@ ++#include ++#include ++#include ++#include ++#include ++ ++#include "blktap.h" ++ ++DEFINE_MUTEX(blktap_lock); ++ ++struct blktap **blktaps; ++int blktap_max_minor; ++static struct blktap_page_pool *default_pool; ++ ++static struct blktap * ++blktap_control_get_minor(void) ++{ ++ int minor; ++ struct blktap *tap; ++ ++ tap = kzalloc(sizeof(*tap), GFP_KERNEL); ++ if (unlikely(!tap)) ++ return NULL; ++ ++ mutex_lock(&blktap_lock); ++ ++ for (minor = 0; minor < blktap_max_minor; minor++) ++ if (!blktaps[minor]) ++ break; ++ ++ if (minor == MAX_BLKTAP_DEVICE) ++ goto fail; ++ ++ if (minor == blktap_max_minor) { ++ void *p; ++ int n; ++ ++ n = min(2 * blktap_max_minor, MAX_BLKTAP_DEVICE); ++ p = krealloc(blktaps, n * sizeof(blktaps[0]), GFP_KERNEL); ++ if (!p) ++ goto fail; ++ ++ blktaps = p; ++ minor = blktap_max_minor; ++ blktap_max_minor = n; ++ ++ memset(&blktaps[minor], 0, (n - minor) * sizeof(blktaps[0])); ++ } ++ ++ tap->minor = minor; ++ blktaps[minor] = tap; ++ ++ __module_get(THIS_MODULE); ++out: ++ mutex_unlock(&blktap_lock); ++ return tap; ++ ++fail: ++ mutex_unlock(&blktap_lock); ++ kfree(tap); ++ tap = NULL; ++ goto out; ++} ++ ++static void ++blktap_control_put_minor(struct blktap* tap) ++{ ++ blktaps[tap->minor] = NULL; ++ kfree(tap); ++ ++ module_put(THIS_MODULE); ++} ++ ++static struct blktap* ++blktap_control_create_tap(void) ++{ ++ struct blktap *tap; ++ int err; ++ ++ tap = blktap_control_get_minor(); ++ if (!tap) ++ return NULL; ++ ++ kobject_get(&default_pool->kobj); ++ tap->pool = default_pool; ++ ++ err = blktap_ring_create(tap); ++ if (err) ++ goto fail_tap; ++ ++ err = blktap_sysfs_create(tap); ++ if (err) ++ goto fail_ring; ++ ++ return tap; ++ ++fail_ring: ++ blktap_ring_destroy(tap); ++fail_tap: ++ blktap_control_put_minor(tap); ++ ++ return NULL; ++} ++ ++int ++blktap_control_destroy_tap(struct blktap *tap) ++{ ++ int err; ++ ++ err = blktap_ring_destroy(tap); ++ if (err) ++ return err; ++ ++ kobject_put(&tap->pool->kobj); ++ ++ blktap_sysfs_destroy(tap); ++ ++ blktap_control_put_minor(tap); ++ ++ return 0; ++} ++ ++static int ++blktap_control_ioctl(struct inode *inode, struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ struct blktap *tap; ++ ++ switch (cmd) { ++ case BLKTAP2_IOCTL_ALLOC_TAP: { ++ struct blktap_handle h; ++ void __user *ptr = (void __user*)arg; ++ ++ tap = blktap_control_create_tap(); ++ if (!tap) ++ return -ENOMEM; ++ ++ h.ring = blktap_ring_major; ++ h.device = blktap_device_major; ++ h.minor = tap->minor; ++ ++ if (copy_to_user(ptr, &h, sizeof(h))) { ++ blktap_control_destroy_tap(tap); ++ return -EFAULT; ++ } ++ ++ return 0; ++ } ++ ++ case BLKTAP2_IOCTL_FREE_TAP: { ++ int minor = arg; ++ ++ if (minor > MAX_BLKTAP_DEVICE) ++ return -EINVAL; ++ ++ tap = blktaps[minor]; ++ if (!tap) ++ return -ENODEV; ++ ++ return blktap_control_destroy_tap(tap); ++ } ++ } ++ ++ return -ENOIOCTLCMD; ++} ++ ++static struct file_operations blktap_control_file_operations = { ++ .owner = THIS_MODULE, ++ .ioctl = blktap_control_ioctl, ++}; ++ ++static struct miscdevice blktap_control = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "blktap-control", ++ .fops = &blktap_control_file_operations, ++}; ++ ++static struct device *control_device; ++ ++static ssize_t ++blktap_control_show_default_pool(struct device *device, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ return sprintf(buf, "%s", kobject_name(&default_pool->kobj)); ++} ++ ++static ssize_t ++blktap_control_store_default_pool(struct device *device, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct blktap_page_pool *pool, *tmp = default_pool; ++ ++ pool = blktap_page_pool_get(buf); ++ if (IS_ERR(pool)) ++ return PTR_ERR(pool); ++ ++ default_pool = pool; ++ kobject_put(&tmp->kobj); ++ ++ return size; ++} ++ ++static DEVICE_ATTR(default_pool, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH, ++ blktap_control_show_default_pool, ++ blktap_control_store_default_pool); ++ ++size_t ++blktap_control_debug(struct blktap *tap, char *buf, size_t size) ++{ ++ char *s = buf, *end = buf + size; ++ ++ s += snprintf(s, end - s, ++ "tap %u:%u name:'%s' flags:%#08lx\n", ++ MAJOR(tap->ring.devno), MINOR(tap->ring.devno), ++ tap->name, tap->dev_inuse); ++ ++ return s - buf; ++} ++ ++static int __init ++blktap_control_init(void) ++{ ++ int err; ++ ++ err = misc_register(&blktap_control); ++ if (err) ++ return err; ++ ++ control_device = blktap_control.this_device; ++ ++ blktap_max_minor = min(64, MAX_BLKTAP_DEVICE); ++ blktaps = kzalloc(blktap_max_minor * sizeof(blktaps[0]), GFP_KERNEL); ++ if (!blktaps) { ++ BTERR("failed to allocate blktap minor map"); ++ return -ENOMEM; ++ } ++ ++ err = blktap_page_pool_init(&control_device->kobj); ++ if (err) ++ return err; ++ ++ default_pool = blktap_page_pool_get("default"); ++ if (!default_pool) ++ return -ENOMEM; ++ ++ err = device_create_file(control_device, &dev_attr_default_pool); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++static void ++blktap_control_exit(void) ++{ ++ if (default_pool) { ++ kobject_put(&default_pool->kobj); ++ default_pool = NULL; ++ } ++ ++ blktap_page_pool_exit(); ++ ++ if (blktaps) { ++ kfree(blktaps); ++ blktaps = NULL; ++ } ++ ++ if (control_device) { ++ misc_deregister(&blktap_control); ++ control_device = NULL; ++ } ++} ++ ++static void ++blktap_exit(void) ++{ ++ blktap_control_exit(); ++ blktap_ring_exit(); ++ blktap_sysfs_exit(); ++ blktap_device_exit(); ++} ++ ++static int __init ++blktap_init(void) ++{ ++ int err; ++ ++ err = blktap_device_init(); ++ if (err) ++ goto fail; ++ ++ err = blktap_ring_init(); ++ if (err) ++ goto fail; ++ ++ err = blktap_sysfs_init(); ++ if (err) ++ goto fail; ++ ++ err = blktap_control_init(); ++ if (err) ++ goto fail; ++ ++ return 0; ++ ++fail: ++ blktap_exit(); ++ return err; ++} ++ ++module_init(blktap_init); ++module_exit(blktap_exit); ++MODULE_LICENSE("Dual BSD/GPL"); +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/xen/blktap2-new/device.c 2011-02-24 13:49:49.000000000 +0100 +@@ -0,0 +1,564 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "blktap.h" ++ ++int blktap_device_major; ++ ++#define dev_to_blktap(_dev) container_of(_dev, struct blktap, device) ++ ++static int ++blktap_device_open(struct block_device *bdev, fmode_t mode) ++{ ++ struct gendisk *disk = bdev->bd_disk; ++ struct blktap_device *tapdev = disk->private_data; ++ ++ if (!tapdev) ++ return -ENXIO; ++ ++ /* NB. we might have bounced a bd trylock by tapdisk. when ++ * failing for reasons not !tapdev, make sure to kick tapdisk ++ * out of destroy wait state again. */ ++ ++ return 0; ++} ++ ++static int ++blktap_device_release(struct gendisk *disk, fmode_t mode) ++{ ++ struct blktap_device *tapdev = disk->private_data; ++ struct block_device *bdev = bdget_disk(disk, 0); ++ struct blktap *tap = dev_to_blktap(tapdev); ++ ++ bdput(bdev); ++ ++ if (!bdev->bd_openers) { ++ set_bit(BLKTAP_DEVICE_CLOSED, &tap->dev_inuse); ++ blktap_ring_kick_user(tap); ++ } ++ ++ return 0; ++} ++ ++static int ++blktap_device_getgeo(struct block_device *bd, struct hd_geometry *hg) ++{ ++ /* We don't have real geometry info, but let's at least return ++ values consistent with the size of the device */ ++ sector_t nsect = get_capacity(bd->bd_disk); ++ sector_t cylinders = nsect; ++ ++ hg->heads = 0xff; ++ hg->sectors = 0x3f; ++ sector_div(cylinders, hg->heads * hg->sectors); ++ hg->cylinders = cylinders; ++ if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) ++ hg->cylinders = 0xffff; ++ return 0; ++} ++ ++static int ++blktap_device_ioctl(struct block_device *bd, fmode_t mode, ++ unsigned command, unsigned long argument) ++{ ++ int i; ++ ++ switch (command) { ++ case CDROMMULTISESSION: ++ BTDBG("FIXME: support multisession CDs later\n"); ++ for (i = 0; i < sizeof(struct cdrom_multisession); i++) ++ if (put_user(0, (char __user *)(argument + i))) ++ return -EFAULT; ++ return 0; ++ ++ case SCSI_IOCTL_GET_IDLUN: ++ if (!access_ok(VERIFY_WRITE, argument, ++ sizeof(struct scsi_idlun))) ++ return -EFAULT; ++ ++ /* return 0 for now. */ ++ __put_user(0, &((struct scsi_idlun __user *)argument)->dev_id); ++ __put_user(0, ++ &((struct scsi_idlun __user *)argument)->host_unique_id); ++ return 0; ++ ++ default: ++ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", ++ command);*/ ++ return -EINVAL; /* same return as native Linux */ ++ } ++ ++ return 0; ++} ++ ++static struct block_device_operations blktap_device_file_operations = { ++ .owner = THIS_MODULE, ++ .open = blktap_device_open, ++ .release = blktap_device_release, ++ .ioctl = blktap_device_ioctl, ++ .getgeo = blktap_device_getgeo ++}; ++ ++/* NB. __blktap holding the queue lock; blktap where unlocked */ ++ ++static inline struct request* ++__blktap_next_queued_rq(struct request_queue *q) ++{ ++ return blk_peek_request(q); ++} ++ ++static inline void ++__blktap_dequeue_rq(struct request *rq) ++{ ++ blk_start_request(rq); ++} ++ ++/* NB. err == 0 indicates success, failures < 0 */ ++ ++static inline void ++__blktap_end_queued_rq(struct request *rq, int err) ++{ ++ blk_start_request(rq); ++ __blk_end_request(rq, err, blk_rq_bytes(rq)); ++} ++ ++static inline void ++__blktap_end_rq(struct request *rq, int err) ++{ ++ __blk_end_request(rq, err, blk_rq_bytes(rq)); ++} ++ ++static inline void ++blktap_end_rq(struct request *rq, int err) ++{ ++ spin_lock_irq(rq->q->queue_lock); ++ __blktap_end_rq(rq, err); ++ spin_unlock_irq(rq->q->queue_lock); ++} ++ ++void ++blktap_device_end_request(struct blktap *tap, ++ struct blktap_request *request, ++ int error) ++{ ++ struct blktap_device *tapdev = &tap->device; ++ struct request *rq = request->rq; ++ ++ blktap_ring_unmap_request(tap, request); ++ ++ blktap_ring_free_request(tap, request); ++ ++ dev_dbg(disk_to_dev(tapdev->gd), ++ "end_request: op=%d error=%d bytes=%d\n", ++ rq_data_dir(rq), error, blk_rq_bytes(rq)); ++ ++ blktap_end_rq(rq, error); ++} ++ ++int ++blktap_device_make_request(struct blktap *tap, struct request *rq) ++{ ++ struct blktap_device *tapdev = &tap->device; ++ struct blktap_request *request; ++ int write, nsegs; ++ int err; ++ ++ request = blktap_ring_make_request(tap); ++ if (IS_ERR(request)) { ++ err = PTR_ERR(request); ++ request = NULL; ++ ++ if (err == -ENOSPC || err == -ENOMEM) ++ goto stop; ++ ++ goto fail; ++ } ++ ++ write = rq_data_dir(rq) == WRITE; ++ nsegs = blk_rq_map_sg(rq->q, rq, request->sg_table); ++ ++ dev_dbg(disk_to_dev(tapdev->gd), ++ "make_request: op=%c bytes=%d nsegs=%d\n", ++ write ? 'w' : 'r', blk_rq_bytes(rq), nsegs); ++ ++ request->rq = rq; ++ request->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ; ++ ++ err = blktap_request_get_pages(tap, request, nsegs); ++ if (err) ++ goto stop; ++ ++ err = blktap_ring_map_request(tap, request); ++ if (err) ++ goto fail; ++ ++ blktap_ring_submit_request(tap, request); ++ ++ return 0; ++ ++stop: ++ tap->stats.st_oo_req++; ++ err = -EBUSY; ++ ++_out: ++ if (request) ++ blktap_ring_free_request(tap, request); ++ ++ return err; ++fail: ++ if (printk_ratelimit()) ++ dev_warn(disk_to_dev(tapdev->gd), ++ "make request: %d, failing\n", err); ++ goto _out; ++} ++ ++/* ++ * called from tapdisk context ++ */ ++void ++blktap_device_run_queue(struct blktap *tap) ++{ ++ struct blktap_device *tapdev = &tap->device; ++ struct request_queue *q; ++ struct request *rq; ++ int err; ++ ++ if (!tapdev->gd) ++ return; ++ ++ q = tapdev->gd->queue; ++ ++ spin_lock_irq(&tapdev->lock); ++ queue_flag_clear(QUEUE_FLAG_STOPPED, q); ++ ++ do { ++ rq = __blktap_next_queued_rq(q); ++ if (!rq) ++ break; ++ ++ if (!blk_fs_request(rq)) { ++ __blktap_end_queued_rq(rq, -EOPNOTSUPP); ++ continue; ++ } ++ ++ spin_unlock_irq(&tapdev->lock); ++ ++ err = blktap_device_make_request(tap, rq); ++ ++ spin_lock_irq(&tapdev->lock); ++ ++ if (err == -EBUSY) { ++ blk_stop_queue(q); ++ break; ++ } ++ ++ __blktap_dequeue_rq(rq); ++ ++ if (unlikely(err)) ++ __blktap_end_rq(rq, err); ++ } while (1); ++ ++ spin_unlock_irq(&tapdev->lock); ++} ++ ++static void ++blktap_device_do_request(struct request_queue *rq) ++{ ++ struct blktap_device *tapdev = rq->queuedata; ++ struct blktap *tap = dev_to_blktap(tapdev); ++ ++ blktap_ring_kick_user(tap); ++} ++ ++static void ++blktap_device_configure(struct blktap *tap, ++ struct blktap_params *params) ++{ ++ struct request_queue *rq; ++ struct blktap_device *dev = &tap->device; ++ ++ dev = &tap->device; ++ rq = dev->gd->queue; ++ ++ spin_lock_irq(&dev->lock); ++ ++ set_capacity(dev->gd, params->capacity); ++ ++ /* Hard sector size and max sectors impersonate the equiv. hardware. */ ++ blk_queue_logical_block_size(rq, params->sector_size); ++ blk_queue_max_sectors(rq, 512); ++ ++ /* Each segment in a request is up to an aligned page in size. */ ++ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); ++ blk_queue_max_segment_size(rq, PAGE_SIZE); ++ ++ /* Ensure a merged request will fit in a single I/O ring slot. */ ++ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); ++ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); ++ ++ /* Make sure buffer addresses are sector-aligned. */ ++ blk_queue_dma_alignment(rq, 511); ++ ++ /* We are reordering, but cacheless. */ ++ blk_queue_ordered(rq, QUEUE_ORDERED_DRAIN, NULL); ++ ++ spin_unlock_irq(&dev->lock); ++} ++ ++static int ++blktap_device_validate_params(struct blktap *tap, ++ struct blktap_params *params) ++{ ++ struct device *dev = tap->ring.dev; ++ int sector_order, name_sz; ++ ++ sector_order = ffs(params->sector_size) - 1; ++ ++ if (sector_order < 9 || ++ sector_order > 12 || ++ params->sector_size != 1U<capacity || ++ (params->capacity > ULLONG_MAX >> sector_order)) ++ goto fail; ++ ++ name_sz = min(sizeof(params->name), sizeof(tap->name)); ++ if (strnlen(params->name, name_sz) >= name_sz) ++ goto fail; ++ ++ return 0; ++ ++fail: ++ params->name[name_sz-1] = 0; ++ dev_err(dev, "capacity: %llu, sector-size: %lu, name: %s\n", ++ params->capacity, params->sector_size, params->name); ++ return -EINVAL; ++} ++ ++int ++blktap_device_destroy(struct blktap *tap) ++{ ++ struct blktap_device *tapdev = &tap->device; ++ struct block_device *bdev; ++ struct gendisk *gd; ++ int err; ++ ++ gd = tapdev->gd; ++ if (!gd) ++ return 0; ++ ++ bdev = bdget_disk(gd, 0); ++ ++ err = !mutex_trylock(&bdev->bd_mutex); ++ if (err) { ++ /* NB. avoid a deadlock. the last opener syncs the ++ * bdev holding bd_mutex. */ ++ err = -EBUSY; ++ goto out_nolock; ++ } ++ ++ if (bdev->bd_openers) { ++ err = -EBUSY; ++ goto out; ++ } ++ ++ del_gendisk(gd); ++ gd->private_data = NULL; ++ ++ blk_cleanup_queue(gd->queue); ++ ++ put_disk(gd); ++ tapdev->gd = NULL; ++ ++ clear_bit(BLKTAP_DEVICE, &tap->dev_inuse); ++ err = 0; ++out: ++ mutex_unlock(&bdev->bd_mutex); ++out_nolock: ++ bdput(bdev); ++ ++ return err; ++} ++ ++static void ++blktap_device_fail_queue(struct blktap *tap) ++{ ++ struct blktap_device *tapdev = &tap->device; ++ struct request_queue *q = tapdev->gd->queue; ++ ++ spin_lock_irq(&tapdev->lock); ++ queue_flag_clear(QUEUE_FLAG_STOPPED, q); ++ ++ do { ++ struct request *rq = __blktap_next_queued_rq(q); ++ if (!rq) ++ break; ++ ++ __blktap_end_queued_rq(rq, -EIO); ++ } while (1); ++ ++ spin_unlock_irq(&tapdev->lock); ++} ++ ++static int ++blktap_device_try_destroy(struct blktap *tap) ++{ ++ int err; ++ ++ err = blktap_device_destroy(tap); ++ if (err) ++ blktap_device_fail_queue(tap); ++ ++ return err; ++} ++ ++void ++blktap_device_destroy_sync(struct blktap *tap) ++{ ++ wait_event(tap->ring.poll_wait, ++ !blktap_device_try_destroy(tap)); ++} ++ ++int ++blktap_device_create(struct blktap *tap, struct blktap_params *params) ++{ ++ int minor, err; ++ struct gendisk *gd; ++ struct request_queue *rq; ++ struct blktap_device *tapdev; ++ ++ gd = NULL; ++ rq = NULL; ++ tapdev = &tap->device; ++ minor = tap->minor; ++ ++ if (test_bit(BLKTAP_DEVICE, &tap->dev_inuse)) ++ return -EEXIST; ++ ++ if (blktap_device_validate_params(tap, params)) ++ return -EINVAL; ++ ++ gd = alloc_disk(1); ++ if (!gd) { ++ err = -ENOMEM; ++ goto fail; ++ } ++ ++ if (minor < 26) { ++ sprintf(gd->disk_name, "td%c", 'a' + minor % 26); ++ } else if (minor < (26 + 1) * 26) { ++ sprintf(gd->disk_name, "td%c%c", ++ 'a' + minor / 26 - 1,'a' + minor % 26); ++ } else { ++ const unsigned int m1 = (minor / 26 - 1) / 26 - 1; ++ const unsigned int m2 = (minor / 26 - 1) % 26; ++ const unsigned int m3 = minor % 26; ++ sprintf(gd->disk_name, "td%c%c%c", ++ 'a' + m1, 'a' + m2, 'a' + m3); ++ } ++ ++ gd->major = blktap_device_major; ++ gd->first_minor = minor; ++ gd->fops = &blktap_device_file_operations; ++ gd->private_data = tapdev; ++ ++ spin_lock_init(&tapdev->lock); ++ rq = blk_init_queue(blktap_device_do_request, &tapdev->lock); ++ if (!rq) { ++ err = -ENOMEM; ++ goto fail; ++ } ++ elevator_init(rq, "noop"); ++ ++ gd->queue = rq; ++ rq->queuedata = tapdev; ++ tapdev->gd = gd; ++ ++ blktap_device_configure(tap, params); ++ add_disk(gd); ++ ++ if (params->name[0]) ++ strncpy(tap->name, params->name, sizeof(tap->name)-1); ++ ++ set_bit(BLKTAP_DEVICE, &tap->dev_inuse); ++ ++ dev_info(disk_to_dev(gd), "sector-size: %u capacity: %llu\n", ++ queue_logical_block_size(rq), ++ (unsigned long long)get_capacity(gd)); ++ ++ return 0; ++ ++fail: ++ if (gd) ++ del_gendisk(gd); ++ if (rq) ++ blk_cleanup_queue(rq); ++ ++ return err; ++} ++ ++size_t ++blktap_device_debug(struct blktap *tap, char *buf, size_t size) ++{ ++ struct gendisk *disk = tap->device.gd; ++ struct request_queue *q; ++ struct block_device *bdev; ++ char *s = buf, *end = buf + size; ++ ++ if (!disk) ++ return 0; ++ ++ q = disk->queue; ++ ++ s += snprintf(s, end - s, ++ "disk capacity:%llu sector size:%u\n", ++ (unsigned long long)get_capacity(disk), ++ queue_logical_block_size(q)); ++ ++ s += snprintf(s, end - s, ++ "queue flags:%#lx plugged:%d stopped:%d empty:%d\n", ++ q->queue_flags, ++ blk_queue_plugged(q), blk_queue_stopped(q), ++ elv_queue_empty(q)); ++ ++ bdev = bdget_disk(disk, 0); ++ if (bdev) { ++ s += snprintf(s, end - s, ++ "bdev openers:%d closed:%d\n", ++ bdev->bd_openers, ++ test_bit(BLKTAP_DEVICE_CLOSED, &tap->dev_inuse)); ++ bdput(bdev); ++ } ++ ++ return s - buf; ++} ++ ++int __init ++blktap_device_init() ++{ ++ int major; ++ ++ /* Dynamically allocate a major for this device */ ++ major = register_blkdev(0, "tapdev"); ++ if (major < 0) { ++ BTERR("Couldn't register blktap device\n"); ++ return -ENOMEM; ++ } ++ ++ blktap_device_major = major; ++ BTINFO("blktap device major %d\n", major); ++ ++ return 0; ++} ++ ++void ++blktap_device_exit(void) ++{ ++ if (blktap_device_major) ++ unregister_blkdev(blktap_device_major, "tapdev"); ++} +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/xen/blktap2-new/request.c 2011-02-24 13:49:49.000000000 +0100 +@@ -0,0 +1,418 @@ ++#include ++#include ++#include ++#include ++#include ++ ++#include "blktap.h" ++ ++/* max pages per shared pool. just to prevent accidental dos. */ ++#define POOL_MAX_PAGES (256*BLKIF_MAX_SEGMENTS_PER_REQUEST) ++ ++/* default page pool size. when considering to shrink a shared pool, ++ * note that paused tapdisks may grab a whole lot of pages for a long ++ * time. */ ++#define POOL_DEFAULT_PAGES (2 * MMAP_PAGES) ++ ++/* max number of pages allocatable per request. */ ++#define POOL_MAX_REQUEST_PAGES BLKIF_MAX_SEGMENTS_PER_REQUEST ++ ++/* min request structs per pool. These grow dynamically. */ ++#define POOL_MIN_REQS BLK_RING_SIZE ++ ++static struct kset *pool_set; ++ ++#define kobj_to_pool(_kobj) \ ++ container_of(_kobj, struct blktap_page_pool, kobj) ++ ++static struct kmem_cache *request_cache; ++static mempool_t *request_pool; ++ ++static void ++__page_pool_wake(struct blktap_page_pool *pool) ++{ ++ mempool_t *mem = pool->bufs; ++ ++ /* ++ NB. slightly wasteful to always wait for a full segment ++ set. but this ensures the next disk makes ++ progress. presently, the repeated request struct ++ alloc/release cycles would otherwise keep everyone spinning. ++ */ ++ ++ if (mem->curr_nr >= POOL_MAX_REQUEST_PAGES) ++ wake_up(&pool->wait); ++} ++ ++int ++blktap_request_get_pages(struct blktap *tap, ++ struct blktap_request *request, int nr_pages) ++{ ++ struct blktap_page_pool *pool = tap->pool; ++ mempool_t *mem = pool->bufs; ++ struct page *page; ++ ++ BUG_ON(request->nr_pages != 0); ++ BUG_ON(nr_pages > POOL_MAX_REQUEST_PAGES); ++ ++ if (mem->curr_nr < nr_pages) ++ return -ENOMEM; ++ ++ /* NB. avoid thundering herds of tapdisks colliding. */ ++ spin_lock(&pool->lock); ++ ++ if (mem->curr_nr < nr_pages) { ++ spin_unlock(&pool->lock); ++ return -ENOMEM; ++ } ++ ++ while (request->nr_pages < nr_pages) { ++ page = mempool_alloc(mem, GFP_NOWAIT); ++ BUG_ON(!page); ++ request->pages[request->nr_pages++] = page; ++ } ++ ++ spin_unlock(&pool->lock); ++ ++ return 0; ++} ++ ++static void ++blktap_request_put_pages(struct blktap *tap, ++ struct blktap_request *request) ++{ ++ struct blktap_page_pool *pool = tap->pool; ++ struct page *page; ++ ++ while (request->nr_pages) { ++ page = request->pages[--request->nr_pages]; ++ mempool_free(page, pool->bufs); ++ } ++} ++ ++size_t ++blktap_request_debug(struct blktap *tap, char *buf, size_t size) ++{ ++ struct blktap_page_pool *pool = tap->pool; ++ mempool_t *mem = pool->bufs; ++ char *s = buf, *end = buf + size; ++ ++ s += snprintf(buf, end - s, ++ "pool:%s pages:%d free:%d\n", ++ kobject_name(&pool->kobj), ++ mem->min_nr, mem->curr_nr); ++ ++ return s - buf; ++} ++ ++struct blktap_request* ++blktap_request_alloc(struct blktap *tap) ++{ ++ struct blktap_request *request; ++ ++ request = mempool_alloc(request_pool, GFP_NOWAIT); ++ if (request) ++ request->tap = tap; ++ ++ return request; ++} ++ ++void ++blktap_request_free(struct blktap *tap, ++ struct blktap_request *request) ++{ ++ blktap_request_put_pages(tap, request); ++ ++ mempool_free(request, request_pool); ++ ++ __page_pool_wake(tap->pool); ++} ++ ++void ++blktap_request_bounce(struct blktap *tap, ++ struct blktap_request *request, ++ int seg, int write) ++{ ++ struct scatterlist *sg = &request->sg_table[seg]; ++ void *s, *p; ++ ++ BUG_ON(seg >= request->nr_pages); ++ ++ s = sg_virt(sg); ++ p = page_address(request->pages[seg]) + sg->offset; ++ ++ if (write) ++ memcpy(p, s, sg->length); ++ else ++ memcpy(s, p, sg->length); ++} ++ ++static void ++blktap_request_ctor(void *obj) ++{ ++ struct blktap_request *request = obj; ++ ++ memset(request, 0, sizeof(*request)); ++ sg_init_table(request->sg_table, ARRAY_SIZE(request->sg_table)); ++} ++ ++static int ++blktap_page_pool_resize(struct blktap_page_pool *pool, int target) ++{ ++ mempool_t *bufs = pool->bufs; ++ int err; ++ ++ /* NB. mempool asserts min_nr >= 1 */ ++ target = max(1, target); ++ ++ err = mempool_resize(bufs, target, GFP_KERNEL); ++ if (err) ++ return err; ++ ++ __page_pool_wake(pool); ++ ++ return 0; ++} ++ ++struct pool_attribute { ++ struct attribute attr; ++ ++ ssize_t (*show)(struct blktap_page_pool *pool, ++ char *buf); ++ ++ ssize_t (*store)(struct blktap_page_pool *pool, ++ const char *buf, size_t count); ++}; ++ ++#define kattr_to_pool_attr(_kattr) \ ++ container_of(_kattr, struct pool_attribute, attr) ++ ++static ssize_t ++blktap_page_pool_show_size(struct blktap_page_pool *pool, ++ char *buf) ++{ ++ mempool_t *mem = pool->bufs; ++ return sprintf(buf, "%d", mem->min_nr); ++} ++ ++static ssize_t ++blktap_page_pool_store_size(struct blktap_page_pool *pool, ++ const char *buf, size_t size) ++{ ++ int target; ++ ++ /* ++ * NB. target fixup to avoid undesired results. less than a ++ * full segment set can wedge the disk. much more than a ++ * couple times the physical queue depth is rarely useful. ++ */ ++ ++ target = simple_strtoul(buf, NULL, 0); ++ target = max(POOL_MAX_REQUEST_PAGES, target); ++ target = min(target, POOL_MAX_PAGES); ++ ++ return blktap_page_pool_resize(pool, target) ? : size; ++} ++ ++static struct pool_attribute blktap_page_pool_attr_size = ++ __ATTR(size, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH, ++ blktap_page_pool_show_size, ++ blktap_page_pool_store_size); ++ ++static ssize_t ++blktap_page_pool_show_free(struct blktap_page_pool *pool, ++ char *buf) ++{ ++ mempool_t *mem = pool->bufs; ++ return sprintf(buf, "%d", mem->curr_nr); ++} ++ ++static struct pool_attribute blktap_page_pool_attr_free = ++ __ATTR(free, S_IRUSR|S_IRGRP|S_IROTH, ++ blktap_page_pool_show_free, ++ NULL); ++ ++static struct attribute *blktap_page_pool_attrs[] = { ++ &blktap_page_pool_attr_size.attr, ++ &blktap_page_pool_attr_free.attr, ++ NULL, ++}; ++ ++static inline struct kobject* ++__blktap_kset_find_obj(struct kset *kset, const char *name) ++{ ++ struct kobject *k; ++ struct kobject *ret = NULL; ++ ++ spin_lock(&kset->list_lock); ++ list_for_each_entry(k, &kset->list, entry) { ++ if (kobject_name(k) && !strcmp(kobject_name(k), name)) { ++ ret = kobject_get(k); ++ break; ++ } ++ } ++ spin_unlock(&kset->list_lock); ++ return ret; ++} ++ ++static ssize_t ++blktap_page_pool_show_attr(struct kobject *kobj, struct attribute *kattr, ++ char *buf) ++{ ++ struct blktap_page_pool *pool = kobj_to_pool(kobj); ++ struct pool_attribute *attr = kattr_to_pool_attr(kattr); ++ ++ if (attr->show) ++ return attr->show(pool, buf); ++ ++ return -EIO; ++} ++ ++static ssize_t ++blktap_page_pool_store_attr(struct kobject *kobj, struct attribute *kattr, ++ const char *buf, size_t size) ++{ ++ struct blktap_page_pool *pool = kobj_to_pool(kobj); ++ struct pool_attribute *attr = kattr_to_pool_attr(kattr); ++ ++ if (attr->show) ++ return attr->store(pool, buf, size); ++ ++ return -EIO; ++} ++ ++static struct sysfs_ops blktap_page_pool_sysfs_ops = { ++ .show = blktap_page_pool_show_attr, ++ .store = blktap_page_pool_store_attr, ++}; ++ ++static void ++blktap_page_pool_release(struct kobject *kobj) ++{ ++ struct blktap_page_pool *pool = kobj_to_pool(kobj); ++ mempool_destroy(pool->bufs); ++ kfree(pool); ++} ++ ++struct kobj_type blktap_page_pool_ktype = { ++ .release = blktap_page_pool_release, ++ .sysfs_ops = &blktap_page_pool_sysfs_ops, ++ .default_attrs = blktap_page_pool_attrs, ++}; ++ ++static void* ++__mempool_page_alloc(gfp_t gfp_mask, void *pool_data) ++{ ++ struct page *page; ++ ++ if (!(gfp_mask & __GFP_WAIT)) ++ return NULL; ++ ++ page = alloc_page(gfp_mask); ++ if (page) ++ SetPageReserved(page); ++ ++ return page; ++} ++ ++static void ++__mempool_page_free(void *element, void *pool_data) ++{ ++ struct page *page = element; ++ ++ ClearPageReserved(page); ++ put_page(page); ++} ++ ++static struct kobject* ++blktap_page_pool_create(const char *name, int nr_pages) ++{ ++ struct blktap_page_pool *pool; ++ int err; ++ ++ pool = kzalloc(sizeof(*pool), GFP_KERNEL); ++ if (!pool) ++ goto fail; ++ ++ spin_lock_init(&pool->lock); ++ init_waitqueue_head(&pool->wait); ++ ++ pool->bufs = mempool_create(nr_pages, ++ __mempool_page_alloc, __mempool_page_free, ++ pool); ++ if (!pool->bufs) ++ goto fail_pool; ++ ++ kobject_init(&pool->kobj, &blktap_page_pool_ktype); ++ pool->kobj.kset = pool_set; ++ err = kobject_add(&pool->kobj, &pool_set->kobj, "%s", name); ++ if (err) ++ goto fail_bufs; ++ ++ return &pool->kobj; ++ ++ kobject_del(&pool->kobj); ++fail_bufs: ++ mempool_destroy(pool->bufs); ++fail_pool: ++ kfree(pool); ++fail: ++ return NULL; ++} ++ ++struct blktap_page_pool* ++blktap_page_pool_get(const char *name) ++{ ++ struct kobject *kobj; ++ ++ kobj = __blktap_kset_find_obj(pool_set, name); ++ if (!kobj) ++ kobj = blktap_page_pool_create(name, ++ POOL_DEFAULT_PAGES); ++ if (!kobj) ++ return ERR_PTR(-ENOMEM); ++ ++ return kobj_to_pool(kobj); ++} ++ ++int __init ++blktap_page_pool_init(struct kobject *parent) ++{ ++ request_cache = ++ kmem_cache_create("blktap-request", ++ sizeof(struct blktap_request), 0, ++ 0, blktap_request_ctor); ++ if (!request_cache) ++ return -ENOMEM; ++ ++ request_pool = ++ mempool_create_slab_pool(POOL_MIN_REQS, request_cache); ++ if (!request_pool) ++ return -ENOMEM; ++ ++ pool_set = kset_create_and_add("pools", NULL, parent); ++ if (!pool_set) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++void ++blktap_page_pool_exit(void) ++{ ++ if (pool_set) { ++ BUG_ON(!list_empty(&pool_set->list)); ++ kset_unregister(pool_set); ++ pool_set = NULL; ++ } ++ ++ if (request_pool) { ++ mempool_destroy(request_pool); ++ request_pool = NULL; ++ } ++ ++ if (request_cache) { ++ kmem_cache_destroy(request_cache); ++ request_cache = NULL; ++ } ++} +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/xen/blktap2-new/ring.c 2011-02-24 13:49:49.000000000 +0100 +@@ -0,0 +1,550 @@ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "blktap.h" ++ ++int blktap_ring_major; ++static struct cdev blktap_ring_cdev; ++ ++ /* ++ * BLKTAP - immediately before the mmap area, ++ * we have a bunch of pages reserved for shared memory rings. ++ */ ++#define RING_PAGES 1 ++ ++static void ++blktap_ring_read_response(struct blktap *tap, ++ const struct blkif_response *rsp) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ struct blktap_request *request; ++ int usr_idx, err; ++ ++ request = NULL; ++ ++ usr_idx = rsp->id; ++ if (usr_idx < 0 || usr_idx >= MAX_PENDING_REQS) { ++ err = -ERANGE; ++ goto invalid; ++ } ++ ++ request = ring->pending[usr_idx]; ++ ++ if (!request) { ++ err = -ESRCH; ++ goto invalid; ++ } ++ ++ if (rsp->operation != request->operation) { ++ err = -EINVAL; ++ goto invalid; ++ } ++ ++ dev_dbg(ring->dev, ++ "request %d [%p] response: %d\n", ++ request->usr_idx, request, rsp->status); ++ ++ err = rsp->status == BLKIF_RSP_OKAY ? 0 : -EIO; ++end_request: ++ blktap_device_end_request(tap, request, err); ++ return; ++ ++invalid: ++ dev_warn(ring->dev, ++ "invalid response, idx:%d status:%d op:%d/%d: err %d\n", ++ usr_idx, rsp->status, ++ rsp->operation, request->operation, ++ err); ++ if (request) ++ goto end_request; ++} ++ ++static void ++blktap_read_ring(struct blktap *tap) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ struct blkif_response rsp; ++ RING_IDX rc, rp; ++ ++ down_read(¤t->mm->mmap_sem); ++ if (!ring->vma) { ++ up_read(¤t->mm->mmap_sem); ++ return; ++ } ++ ++ /* for each outstanding message on the ring */ ++ rp = ring->ring.sring->rsp_prod; ++ rmb(); ++ ++ for (rc = ring->ring.rsp_cons; rc != rp; rc++) { ++ memcpy(&rsp, RING_GET_RESPONSE(&ring->ring, rc), sizeof(rsp)); ++ blktap_ring_read_response(tap, &rsp); ++ } ++ ++ ring->ring.rsp_cons = rc; ++ ++ up_read(¤t->mm->mmap_sem); ++} ++ ++static int blktap_ring_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ return VM_FAULT_SIGBUS; ++} ++ ++static void ++blktap_ring_fail_pending(struct blktap *tap) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ struct blktap_request *request; ++ int usr_idx; ++ ++ for (usr_idx = 0; usr_idx < MAX_PENDING_REQS; usr_idx++) { ++ request = ring->pending[usr_idx]; ++ if (!request) ++ continue; ++ ++ blktap_device_end_request(tap, request, -EIO); ++ } ++} ++ ++static void ++blktap_ring_vm_close(struct vm_area_struct *vma) ++{ ++ struct blktap *tap = vma->vm_private_data; ++ struct blktap_ring *ring = &tap->ring; ++ struct page *page = virt_to_page(ring->ring.sring); ++ ++ blktap_ring_fail_pending(tap); ++ ++ zap_page_range(vma, vma->vm_start, PAGE_SIZE, NULL); ++ ClearPageReserved(page); ++ __free_page(page); ++ ++ ring->vma = NULL; ++ ++ if (test_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) ++ blktap_control_destroy_tap(tap); ++} ++ ++static struct vm_operations_struct blktap_ring_vm_operations = { ++ .close = blktap_ring_vm_close, ++ .fault = blktap_ring_fault, ++}; ++ ++int ++blktap_ring_map_segment(struct blktap *tap, ++ struct blktap_request *request, ++ int seg) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ unsigned long uaddr; ++ ++ uaddr = MMAP_VADDR(ring->user_vstart, request->usr_idx, seg); ++ return vm_insert_page(ring->vma, uaddr, request->pages[seg]); ++} ++ ++int ++blktap_ring_map_request(struct blktap *tap, ++ struct blktap_request *request) ++{ ++ int seg, err = 0; ++ int write; ++ ++ write = request->operation == BLKIF_OP_WRITE; ++ ++ for (seg = 0; seg < request->nr_pages; seg++) { ++ if (write) ++ blktap_request_bounce(tap, request, seg, write); ++ ++ err = blktap_ring_map_segment(tap, request, seg); ++ if (err) ++ break; ++ } ++ ++ if (err) ++ blktap_ring_unmap_request(tap, request); ++ ++ return err; ++} ++ ++void ++blktap_ring_unmap_request(struct blktap *tap, ++ struct blktap_request *request) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ unsigned long uaddr; ++ unsigned size; ++ int seg, read; ++ ++ uaddr = MMAP_VADDR(ring->user_vstart, request->usr_idx, 0); ++ size = request->nr_pages << PAGE_SHIFT; ++ read = request->operation == BLKIF_OP_READ; ++ ++ if (read) ++ for (seg = 0; seg < request->nr_pages; seg++) ++ blktap_request_bounce(tap, request, seg, !read); ++ ++ zap_page_range(ring->vma, uaddr, size, NULL); ++} ++ ++void ++blktap_ring_free_request(struct blktap *tap, ++ struct blktap_request *request) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ ++ ring->pending[request->usr_idx] = NULL; ++ ring->n_pending--; ++ ++ blktap_request_free(tap, request); ++} ++ ++struct blktap_request* ++blktap_ring_make_request(struct blktap *tap) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ struct blktap_request *request; ++ int usr_idx; ++ ++ if (RING_FULL(&ring->ring)) ++ return ERR_PTR(-ENOSPC); ++ ++ request = blktap_request_alloc(tap); ++ if (!request) ++ return ERR_PTR(-ENOMEM); ++ ++ for (usr_idx = 0; usr_idx < BLK_RING_SIZE; usr_idx++) ++ if (!ring->pending[usr_idx]) ++ break; ++ ++ BUG_ON(usr_idx >= BLK_RING_SIZE); ++ ++ request->tap = tap; ++ request->usr_idx = usr_idx; ++ ++ ring->pending[usr_idx] = request; ++ ring->n_pending++; ++ ++ return request; ++} ++ ++void ++blktap_ring_submit_request(struct blktap *tap, ++ struct blktap_request *request) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ struct blkif_request *breq; ++ struct scatterlist *sg; ++ int i, nsecs = 0; ++ ++ dev_dbg(ring->dev, ++ "request %d [%p] submit\n", request->usr_idx, request); ++ ++ breq = RING_GET_REQUEST(&ring->ring, ring->ring.req_prod_pvt); ++ ++ breq->id = request->usr_idx; ++ breq->sector_number = blk_rq_pos(request->rq); ++ breq->handle = 0; ++ breq->operation = request->operation; ++ breq->nr_segments = request->nr_pages; ++ ++ blktap_for_each_sg(sg, request, i) { ++ struct blkif_request_segment *seg = &breq->seg[i]; ++ int first, count; ++ ++ count = sg->length >> 9; ++ first = sg->offset >> 9; ++ ++ seg->first_sect = first; ++ seg->last_sect = first + count - 1; ++ ++ nsecs += count; ++ } ++ ++ ring->ring.req_prod_pvt++; ++ ++ do_gettimeofday(&request->time); ++ ++ ++ if (request->operation == BLKIF_OP_WRITE) { ++ tap->stats.st_wr_sect += nsecs; ++ tap->stats.st_wr_req++; ++ } ++ ++ if (request->operation == BLKIF_OP_READ) { ++ tap->stats.st_rd_sect += nsecs; ++ tap->stats.st_rd_req++; ++ } ++} ++ ++static int ++blktap_ring_open(struct inode *inode, struct file *filp) ++{ ++ struct blktap *tap = NULL; ++ int minor; ++ ++ minor = iminor(inode); ++ ++ if (minor < blktap_max_minor) ++ tap = blktaps[minor]; ++ ++ if (!tap) ++ return -ENXIO; ++ ++ if (test_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) ++ return -ENXIO; ++ ++ if (tap->ring.task) ++ return -EBUSY; ++ ++ filp->private_data = tap; ++ tap->ring.task = current; ++ ++ return 0; ++} ++ ++static int ++blktap_ring_release(struct inode *inode, struct file *filp) ++{ ++ struct blktap *tap = filp->private_data; ++ ++ blktap_device_destroy_sync(tap); ++ ++ tap->ring.task = NULL; ++ ++ if (test_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) ++ blktap_control_destroy_tap(tap); ++ ++ return 0; ++} ++ ++static int ++blktap_ring_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct blktap *tap = filp->private_data; ++ struct blktap_ring *ring = &tap->ring; ++ struct blkif_sring *sring; ++ struct page *page = NULL; ++ int err; ++ ++ if (ring->vma) ++ return -EBUSY; ++ ++ page = alloc_page(GFP_KERNEL|__GFP_ZERO); ++ if (!page) ++ return -ENOMEM; ++ ++ SetPageReserved(page); ++ ++ err = vm_insert_page(vma, vma->vm_start, page); ++ if (err) ++ goto fail; ++ ++ sring = page_address(page); ++ SHARED_RING_INIT(sring); ++ FRONT_RING_INIT(&ring->ring, sring, PAGE_SIZE); ++ ++ ring->ring_vstart = vma->vm_start; ++ ring->user_vstart = ring->ring_vstart + PAGE_SIZE; ++ ++ vma->vm_private_data = tap; ++ ++ vma->vm_flags |= VM_DONTCOPY; ++ vma->vm_flags |= VM_RESERVED; ++ ++ vma->vm_ops = &blktap_ring_vm_operations; ++ ++ ring->vma = vma; ++ return 0; ++ ++fail: ++ if (page) { ++ zap_page_range(vma, vma->vm_start, PAGE_SIZE, NULL); ++ ClearPageReserved(page); ++ __free_page(page); ++ } ++ ++ return err; ++} ++ ++static int ++blktap_ring_ioctl(struct inode *inode, struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ struct blktap *tap = filp->private_data; ++ struct blktap_ring *ring = &tap->ring; ++ ++ BTDBG("%d: cmd: %u, arg: %lu\n", tap->minor, cmd, arg); ++ ++ if (!ring->vma || ring->vma->vm_mm != current->mm) ++ return -EACCES; ++ ++ switch(cmd) { ++ case BLKTAP2_IOCTL_KICK_FE: ++ ++ blktap_read_ring(tap); ++ return 0; ++ ++ case BLKTAP2_IOCTL_CREATE_DEVICE: { ++ struct blktap_params params; ++ void __user *ptr = (void *)arg; ++ ++ if (!arg) ++ return -EINVAL; ++ ++ if (copy_from_user(¶ms, ptr, sizeof(params))) ++ return -EFAULT; ++ ++ return blktap_device_create(tap, ¶ms); ++ } ++ ++ case BLKTAP2_IOCTL_REMOVE_DEVICE: ++ ++ return blktap_device_destroy(tap); ++ } ++ ++ return -ENOIOCTLCMD; ++} ++ ++static unsigned int blktap_ring_poll(struct file *filp, poll_table *wait) ++{ ++ struct blktap *tap = filp->private_data; ++ struct blktap_ring *ring = &tap->ring; ++ int work; ++ ++ poll_wait(filp, &tap->pool->wait, wait); ++ poll_wait(filp, &ring->poll_wait, wait); ++ ++ down_read(¤t->mm->mmap_sem); ++ if (ring->vma && tap->device.gd) ++ blktap_device_run_queue(tap); ++ up_read(¤t->mm->mmap_sem); ++ ++ work = ring->ring.req_prod_pvt - ring->ring.sring->req_prod; ++ RING_PUSH_REQUESTS(&ring->ring); ++ ++ if (work || ++ ring->ring.sring->private.tapif_user.msg || ++ test_and_clear_bit(BLKTAP_DEVICE_CLOSED, &tap->dev_inuse)) ++ return POLLIN | POLLRDNORM; ++ ++ return 0; ++} ++ ++static struct file_operations blktap_ring_file_operations = { ++ .owner = THIS_MODULE, ++ .open = blktap_ring_open, ++ .release = blktap_ring_release, ++ .ioctl = blktap_ring_ioctl, ++ .mmap = blktap_ring_mmap, ++ .poll = blktap_ring_poll, ++}; ++ ++void ++blktap_ring_kick_user(struct blktap *tap) ++{ ++ wake_up(&tap->ring.poll_wait); ++} ++ ++int ++blktap_ring_destroy(struct blktap *tap) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ ++ if (ring->task || ring->vma) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++int ++blktap_ring_create(struct blktap *tap) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ ++ init_waitqueue_head(&ring->poll_wait); ++ ring->devno = MKDEV(blktap_ring_major, tap->minor); ++ ++ return 0; ++} ++ ++size_t ++blktap_ring_debug(struct blktap *tap, char *buf, size_t size) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ char *s = buf, *end = buf + size; ++ int usr_idx; ++ ++ s += snprintf(s, end - s, ++ "begin pending:%d\n", ring->n_pending); ++ ++ for (usr_idx = 0; usr_idx < MAX_PENDING_REQS; usr_idx++) { ++ struct blktap_request *request; ++ struct timeval *time; ++ int write; ++ ++ request = ring->pending[usr_idx]; ++ if (!request) ++ continue; ++ ++ write = request->operation == BLKIF_OP_WRITE; ++ time = &request->time; ++ ++ s += snprintf(s, end - s, ++ "%02d: usr_idx:%02d " ++ "op:%c nr_pages:%02d time:%lu.%09lu\n", ++ usr_idx, request->usr_idx, ++ write ? 'W' : 'R', request->nr_pages, ++ time->tv_sec, time->tv_usec); ++ } ++ ++ s += snprintf(s, end - s, "end pending\n"); ++ ++ return s - buf; ++} ++ ++ ++int __init ++blktap_ring_init(void) ++{ ++ dev_t dev = 0; ++ int err; ++ ++ cdev_init(&blktap_ring_cdev, &blktap_ring_file_operations); ++ blktap_ring_cdev.owner = THIS_MODULE; ++ ++ err = alloc_chrdev_region(&dev, 0, MAX_BLKTAP_DEVICE, "blktap2"); ++ if (err < 0) { ++ BTERR("error registering ring devices: %d\n", err); ++ return err; ++ } ++ ++ err = cdev_add(&blktap_ring_cdev, dev, MAX_BLKTAP_DEVICE); ++ if (err) { ++ BTERR("error adding ring device: %d\n", err); ++ unregister_chrdev_region(dev, MAX_BLKTAP_DEVICE); ++ return err; ++ } ++ ++ blktap_ring_major = MAJOR(dev); ++ BTINFO("blktap ring major: %d\n", blktap_ring_major); ++ ++ return 0; ++} ++ ++void ++blktap_ring_exit(void) ++{ ++ if (!blktap_ring_major) ++ return; ++ ++ cdev_del(&blktap_ring_cdev); ++ unregister_chrdev_region(MKDEV(blktap_ring_major, 0), ++ MAX_BLKTAP_DEVICE); ++ ++ blktap_ring_major = 0; ++} +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/xen/blktap2-new/sysfs.c 2011-02-24 13:49:49.000000000 +0100 +@@ -0,0 +1,288 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "blktap.h" ++ ++int blktap_debug_level = 1; ++ ++static struct class *class; ++ ++static ssize_t ++blktap_sysfs_set_name(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) ++{ ++ struct blktap *tap; ++ ++ tap = dev_get_drvdata(dev); ++ if (!tap) ++ return 0; ++ ++ if (size >= BLKTAP2_MAX_MESSAGE_LEN) ++ return -ENAMETOOLONG; ++ ++ if (strnlen(buf, size) != size) ++ return -EINVAL; ++ ++ strcpy(tap->name, buf); ++ ++ return size; ++} ++ ++static ssize_t ++blktap_sysfs_get_name(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct blktap *tap; ++ ssize_t size; ++ ++ tap = dev_get_drvdata(dev); ++ if (!tap) ++ return 0; ++ ++ if (tap->name[0]) ++ size = sprintf(buf, "%s\n", tap->name); ++ else ++ size = sprintf(buf, "%d\n", tap->minor); ++ ++ return size; ++} ++static DEVICE_ATTR(name, S_IRUGO|S_IWUSR, ++ blktap_sysfs_get_name, blktap_sysfs_set_name); ++ ++static void ++blktap_sysfs_remove_work(struct work_struct *work) ++{ ++ struct blktap *tap ++ = container_of(work, struct blktap, remove_work); ++ blktap_control_destroy_tap(tap); ++} ++ ++static ssize_t ++blktap_sysfs_remove_device(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct blktap *tap; ++ int err; ++ ++ tap = dev_get_drvdata(dev); ++ if (!tap) ++ return size; ++ ++ if (test_and_set_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) ++ goto wait; ++ ++ if (tap->ring.vma) { ++ struct blkif_sring *sring = tap->ring.ring.sring; ++ sring->private.tapif_user.msg = BLKTAP2_RING_MESSAGE_CLOSE; ++ blktap_ring_kick_user(tap); ++ } else { ++ INIT_WORK(&tap->remove_work, blktap_sysfs_remove_work); ++ schedule_work(&tap->remove_work); ++ } ++wait: ++ err = wait_event_interruptible(tap->remove_wait, ++ !dev_get_drvdata(dev)); ++ if (err) ++ return err; ++ ++ return size; ++} ++static DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); ++ ++static ssize_t ++blktap_sysfs_debug_device(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct blktap *tap; ++ char *s = buf, *end = buf + PAGE_SIZE; ++ ++ tap = dev_get_drvdata(dev); ++ if (!tap) ++ return 0; ++ ++ s += blktap_control_debug(tap, s, end - s); ++ ++ s += blktap_request_debug(tap, s, end - s); ++ ++ s += blktap_device_debug(tap, s, end - s); ++ ++ s += blktap_ring_debug(tap, s, end - s); ++ ++ return s - buf; ++} ++static DEVICE_ATTR(debug, S_IRUGO, blktap_sysfs_debug_device, NULL); ++ ++static ssize_t ++blktap_sysfs_show_task(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct blktap *tap; ++ ssize_t rv = 0; ++ ++ tap = dev_get_drvdata(dev); ++ if (!tap) ++ return 0; ++ ++ if (tap->ring.task) ++ rv = sprintf(buf, "%d\n", tap->ring.task->pid); ++ ++ return rv; ++} ++static DEVICE_ATTR(task, S_IRUGO, blktap_sysfs_show_task, NULL); ++ ++static ssize_t ++blktap_sysfs_show_pool(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct blktap *tap = dev_get_drvdata(dev); ++ return sprintf(buf, "%s", kobject_name(&tap->pool->kobj)); ++} ++ ++static ssize_t ++blktap_sysfs_store_pool(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct blktap *tap = dev_get_drvdata(dev); ++ struct blktap_page_pool *pool, *tmp = tap->pool; ++ ++ if (tap->device.gd) ++ return -EBUSY; ++ ++ pool = blktap_page_pool_get(buf); ++ if (IS_ERR(pool)) ++ return PTR_ERR(pool); ++ ++ tap->pool = pool; ++ kobject_put(&tmp->kobj); ++ ++ return size; ++} ++DEVICE_ATTR(pool, S_IRUSR|S_IWUSR, ++ blktap_sysfs_show_pool, blktap_sysfs_store_pool); ++ ++int ++blktap_sysfs_create(struct blktap *tap) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ struct device *dev; ++ int err = 0; ++ ++ init_waitqueue_head(&tap->remove_wait); ++ ++ dev = device_create(class, NULL, ring->devno, ++ tap, "blktap%d", tap->minor); ++ if (IS_ERR(dev)) ++ err = PTR_ERR(dev); ++ if (!err) ++ err = device_create_file(dev, &dev_attr_name); ++ if (!err) ++ err = device_create_file(dev, &dev_attr_remove); ++ if (!err) ++ err = device_create_file(dev, &dev_attr_debug); ++ if (!err) ++ err = device_create_file(dev, &dev_attr_task); ++ if (!err) ++ err = device_create_file(dev, &dev_attr_pool); ++ if (!err) ++ ring->dev = dev; ++ else ++ device_unregister(dev); ++ ++ return err; ++} ++ ++void ++blktap_sysfs_destroy(struct blktap *tap) ++{ ++ struct blktap_ring *ring = &tap->ring; ++ struct device *dev; ++ ++ dev = ring->dev; ++ ++ if (!dev) ++ return; ++ ++ dev_set_drvdata(dev, NULL); ++ wake_up(&tap->remove_wait); ++ ++ device_unregister(dev); ++ ring->dev = NULL; ++} ++ ++static ssize_t ++blktap_sysfs_show_verbosity(struct class *class, char *buf) ++{ ++ return sprintf(buf, "%d\n", blktap_debug_level); ++} ++ ++static ssize_t ++blktap_sysfs_set_verbosity(struct class *class, const char *buf, size_t size) ++{ ++ int level; ++ ++ if (sscanf(buf, "%d", &level) == 1) { ++ blktap_debug_level = level; ++ return size; ++ } ++ ++ return -EINVAL; ++} ++static CLASS_ATTR(verbosity, S_IRUGO|S_IWUSR, ++ blktap_sysfs_show_verbosity, blktap_sysfs_set_verbosity); ++ ++static ssize_t ++blktap_sysfs_show_devices(struct class *class, char *buf) ++{ ++ int i, ret; ++ struct blktap *tap; ++ ++ mutex_lock(&blktap_lock); ++ ++ ret = 0; ++ for (i = 0; i < blktap_max_minor; i++) { ++ tap = blktaps[i]; ++ if (!tap) ++ continue; ++ ++ if (!test_bit(BLKTAP_DEVICE, &tap->dev_inuse)) ++ continue; ++ ++ ret += sprintf(buf + ret, "%d %s\n", tap->minor, tap->name); ++ } ++ ++ mutex_unlock(&blktap_lock); ++ ++ return ret; ++} ++static CLASS_ATTR(devices, S_IRUGO, blktap_sysfs_show_devices, NULL); ++ ++void ++blktap_sysfs_exit(void) ++{ ++ if (class) ++ class_destroy(class); ++} ++ ++int __init ++blktap_sysfs_init(void) ++{ ++ struct class *cls; ++ int err = 0; ++ ++ cls = class_create(THIS_MODULE, "blktap2"); ++ if (IS_ERR(cls)) ++ err = PTR_ERR(cls); ++ if (!err) ++ err = class_create_file(cls, &class_attr_verbosity); ++ if (!err) ++ err = class_create_file(cls, &class_attr_devices); ++ if (!err) ++ class = cls; ++ else ++ class_destroy(cls); ++ ++ return err; ++} diff --git a/patches.xen/xen3-auto-common.diff b/patches.xen/xen3-auto-common.diff index 5c616ef..abfdd0d 100644 --- a/patches.xen/xen3-auto-common.diff +++ b/patches.xen/xen3-auto-common.diff @@ -1,30 +1,36 @@ Subject: xen3 common -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com List of files that don't require modification anymore (and hence got removed from this patch), for reference and in case upstream wants to take the forward porting patches: +2.6.19/include/linux/skbuff.h +2.6.19/net/core/dev.c +2.6.19/net/core/skbuff.c +2.6.19/net/ipv4/netfilter/nf_nat_proto_tcp.c +2.6.19/net/ipv4/netfilter/nf_nat_proto_udp.c +2.6.19/net/ipv4/xfrm4_output.c 2.6.22/include/linux/sched.h 2.6.22/kernel/softlockup.c 2.6.22/kernel/timer.c 2.6.25/mm/highmem.c 2.6.30/include/linux/pci_regs.h ---- head-2010-05-25.orig/drivers/Makefile 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/Makefile 2010-05-12 08:58:07.000000000 +0200 -@@ -34,6 +34,7 @@ obj-$(CONFIG_PARPORT) += parport/ - obj-y += base/ block/ misc/ mfd/ +--- head-2011-03-11.orig/drivers/Makefile 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/Makefile 2011-01-31 14:53:38.000000000 +0100 +@@ -35,6 +35,7 @@ obj-$(CONFIG_PARPORT) += parport/ + obj-y += base/ block/ misc/ mfd/ nfc/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ +obj-$(CONFIG_XEN) += xen/ obj-$(CONFIG_IDE) += ide/ obj-$(CONFIG_SCSI) += scsi/ obj-$(CONFIG_ATA) += ata/ ---- head-2010-05-25.orig/drivers/acpi/Makefile 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/Makefile 2010-03-24 14:53:41.000000000 +0100 -@@ -64,5 +64,8 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_ +--- head-2011-03-11.orig/drivers/acpi/Makefile 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/Makefile 2011-01-31 14:53:38.000000000 +0100 +@@ -67,6 +67,9 @@ obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys. processor-y := processor_driver.o processor_throttling.o processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o @@ -33,8 +39,9 @@ take the forward porting patches: +endif obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o ---- head-2010-05-25.orig/drivers/acpi/acpica/hwsleep.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/acpica/hwsleep.c 2010-03-24 14:53:41.000000000 +0100 + obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o +--- head-2011-03-11.orig/drivers/acpi/acpica/hwsleep.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/acpica/hwsleep.c 2011-01-31 14:53:38.000000000 +0100 @@ -236,7 +236,11 @@ acpi_status asmlinkage acpi_enter_sleep_ u32 pm1b_control; struct acpi_bit_register_info *sleep_type_reg_info; @@ -72,9 +79,9 @@ take the forward porting patches: return_ACPI_STATUS(AE_OK); } ---- head-2010-05-25.orig/drivers/acpi/processor_driver.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_driver.c 2010-05-25 09:19:10.000000000 +0200 -@@ -443,7 +443,8 @@ static int acpi_processor_get_info(struc +--- head-2011-03-11.orig/drivers/acpi/processor_driver.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_driver.c 2011-01-31 14:53:38.000000000 +0100 +@@ -325,7 +325,8 @@ static int acpi_processor_get_info(struc */ if (pr->id == -1) { if (ACPI_FAILURE @@ -84,7 +91,7 @@ take the forward porting patches: return -ENODEV; } } -@@ -494,7 +495,11 @@ static int acpi_processor_get_info(struc +@@ -376,7 +377,11 @@ static int acpi_processor_get_info(struc return 0; } @@ -96,7 +103,7 @@ take the forward porting patches: static void acpi_processor_notify(struct acpi_device *device, u32 event) { -@@ -575,8 +580,11 @@ static int __cpuinit acpi_processor_add( +@@ -462,8 +467,11 @@ static int __cpuinit acpi_processor_add( strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); device->driver_data = pr; @@ -109,7 +116,7 @@ take the forward porting patches: /* Processor is physically not present */ return 0; } -@@ -586,23 +594,36 @@ static int __cpuinit acpi_processor_add( +@@ -473,23 +481,36 @@ static int __cpuinit acpi_processor_add( return 0; #endif @@ -145,10 +152,10 @@ take the forward porting patches: + per_cpu(processors, pr->id) = pr; +#endif - result = acpi_processor_add_fs(device); - if (result) -@@ -614,15 +635,27 @@ static int __cpuinit acpi_processor_add( - goto err_remove_fs; + sysdev = get_cpu_sysdev(pr->id); + if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { +@@ -497,16 +518,28 @@ static int __cpuinit acpi_processor_add( + goto err_free_cpumask; } -#ifdef CONFIG_CPU_FREQ @@ -169,7 +176,8 @@ take the forward porting patches: + acpi_processor_get_limit_info(pr); + } - acpi_processor_power_init(pr, device); + if (cpuidle_get_driver() == &acpi_idle_driver) + acpi_processor_power_init(pr, device); + result = processor_extcntl_prepare(pr); + if (result) @@ -178,7 +186,7 @@ take the forward porting patches: pr->cdev = thermal_cooling_device_register("Processor", device, &processor_cooling_ops); if (IS_ERR(pr->cdev)) { -@@ -674,7 +707,7 @@ static int acpi_processor_remove(struct +@@ -556,7 +589,7 @@ static int acpi_processor_remove(struct pr = acpi_driver_data(device); @@ -187,7 +195,7 @@ take the forward porting patches: goto free; if (type == ACPI_BUS_REMOVAL_EJECT) { -@@ -695,8 +728,14 @@ static int acpi_processor_remove(struct +@@ -575,8 +608,14 @@ static int acpi_processor_remove(struct pr->cdev = NULL; } @@ -202,7 +210,7 @@ take the forward porting patches: free: free_cpumask_var(pr->throttling.shared_cpu_map); -@@ -752,6 +791,10 @@ int acpi_processor_device_add(acpi_handl +@@ -632,6 +671,10 @@ int acpi_processor_device_add(acpi_handl return -ENODEV; } @@ -213,7 +221,7 @@ take the forward porting patches: return 0; } -@@ -781,6 +824,10 @@ static void __ref acpi_processor_hotplug +@@ -661,6 +704,10 @@ static void __ref acpi_processor_hotplug "Unable to add the device\n"); break; } @@ -224,7 +232,7 @@ take the forward porting patches: break; case ACPI_NOTIFY_EJECT_REQUEST: ACPI_DEBUG_PRINT((ACPI_DB_INFO, -@@ -797,6 +844,9 @@ static void __ref acpi_processor_hotplug +@@ -677,6 +724,9 @@ static void __ref acpi_processor_hotplug "Driver data is NULL, dropping EJECT\n"); return; } @@ -234,7 +242,7 @@ take the forward porting patches: break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, -@@ -861,6 +911,11 @@ static acpi_status acpi_processor_hotadd +@@ -741,6 +791,11 @@ static acpi_status acpi_processor_hotadd static int acpi_processor_handle_eject(struct acpi_processor *pr) { @@ -247,7 +255,7 @@ take the forward porting patches: cpu_down(pr->id); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/drivers/acpi/processor_extcntl.c 2010-03-24 14:53:41.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_extcntl.c 2011-01-31 14:53:38.000000000 +0100 @@ -0,0 +1,241 @@ +/* + * processor_extcntl.c - channel to external control logic @@ -490,9 +498,9 @@ take the forward porting patches: + kfree(perf); + return ret; +} ---- head-2010-05-25.orig/drivers/acpi/processor_idle.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_idle.c 2010-04-15 09:43:01.000000000 +0200 -@@ -456,7 +456,8 @@ static int acpi_processor_get_power_info +--- head-2011-03-11.orig/drivers/acpi/processor_idle.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_idle.c 2011-01-31 14:53:38.000000000 +0100 +@@ -458,7 +458,8 @@ static int acpi_processor_get_power_info */ cx.entry_method = ACPI_CSTATE_HALT; snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); @@ -502,7 +510,7 @@ take the forward porting patches: continue; } if (cx.type == ACPI_STATE_C1 && -@@ -495,6 +496,12 @@ static int acpi_processor_get_power_info +@@ -497,6 +498,12 @@ static int acpi_processor_get_power_info cx.power = obj->integer.value; @@ -515,10 +523,10 @@ take the forward porting patches: current_count++; memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); -@@ -1229,6 +1236,11 @@ int __cpuinit acpi_processor_power_init( - if (!entry) - return -EIO; - #endif +@@ -1130,6 +1137,11 @@ int __cpuinit acpi_processor_power_init( + if (cpuidle_register_device(&pr->power.dev)) + return -EIO; + } + + if (processor_pm_external()) + processor_notify_external(pr, @@ -527,8 +535,8 @@ take the forward porting patches: return 0; } ---- head-2010-05-25.orig/drivers/acpi/processor_perflib.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_perflib.c 2010-04-15 09:43:05.000000000 +0200 +--- head-2011-03-11.orig/drivers/acpi/processor_perflib.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_perflib.c 2011-01-31 14:53:38.000000000 +0100 @@ -79,6 +79,7 @@ MODULE_PARM_DESC(ignore_ppc, "If the fre static int acpi_processor_ppc_status; @@ -608,8 +616,8 @@ take the forward porting patches: { int result = 0; acpi_status status = AE_OK; ---- head-2010-05-25.orig/drivers/acpi/sleep.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/sleep.c 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/acpi/sleep.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/sleep.c 2011-01-31 14:53:38.000000000 +0100 @@ -60,6 +60,7 @@ static struct notifier_block tts_notifie static int acpi_sleep_prepare(u32 acpi_state) { @@ -624,9 +632,9 @@ take the forward porting patches: } +#endif ACPI_FLUSH_CPU_CACHE(); - acpi_enable_wakeup_device_prep(acpi_state); #endif -@@ -249,7 +251,14 @@ static int acpi_suspend_enter(suspend_st + printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", +@@ -264,7 +266,14 @@ static int acpi_suspend_enter(suspend_st break; case ACPI_STATE_S3: @@ -641,9 +649,9 @@ take the forward porting patches: break; } ---- head-2010-05-25.orig/drivers/char/agp/intel-agp.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/intel-agp.c 2010-04-15 09:43:13.000000000 +0200 -@@ -443,6 +443,13 @@ static struct page *i8xx_alloc_pages(voi +--- head-2011-03-11.orig/drivers/char/agp/intel-gtt.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/char/agp/intel-gtt.c 2011-03-11 10:51:50.000000000 +0100 +@@ -147,6 +147,13 @@ static struct page *i8xx_alloc_pages(voi if (page == NULL) return NULL; @@ -657,7 +665,7 @@ take the forward porting patches: if (set_pages_uc(page, 4) < 0) { set_pages_wb(page, 4); __free_pages(page, 2); -@@ -459,6 +466,9 @@ static void i8xx_destroy_pages(struct pa +@@ -163,6 +170,9 @@ static void i8xx_destroy_pages(struct pa return; set_pages_wb(page, 4); @@ -667,8 +675,8 @@ take the forward porting patches: put_page(page); __free_pages(page, 2); atomic_dec(&agp_bridge->current_memory_agp); ---- head-2010-05-25.orig/drivers/char/mem.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/char/mem.c 2010-04-15 09:43:24.000000000 +0200 +--- head-2011-03-11.orig/drivers/char/mem.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/char/mem.c 2011-01-31 14:53:38.000000000 +0100 @@ -89,6 +89,7 @@ void __weak unxlate_dev_mem_ptr(unsigned { } @@ -727,16 +735,16 @@ take the forward porting patches: #ifdef CONFIG_DEVKMEM static const struct file_operations kmem_fops = { ---- head-2010-05-25.orig/drivers/char/tpm/Makefile 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/char/tpm/Makefile 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/char/tpm/Makefile 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/Makefile 2011-01-31 14:53:38.000000000 +0100 @@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o obj-$(CONFIG_TCG_NSC) += tpm_nsc.o obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o +obj-$(CONFIG_TCG_XEN) += tpm_xenu.o +tpm_xenu-y = tpm_xen.o tpm_vtpm.o ---- head-2010-05-25.orig/drivers/char/tpm/tpm.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/char/tpm/tpm.h 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/char/tpm/tpm.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm.h 2011-03-11 10:51:58.000000000 +0100 @@ -108,6 +108,9 @@ struct tpm_chip { struct dentry **bios_dir; @@ -747,7 +755,7 @@ take the forward porting patches: void (*release) (struct device *); }; -@@ -266,6 +269,18 @@ struct tpm_cmd_t { +@@ -272,6 +275,18 @@ struct tpm_cmd_t { ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *); @@ -767,7 +775,7 @@ take the forward porting patches: extern void tpm_gen_interrupt(struct tpm_chip *); extern void tpm_continue_selftest(struct tpm_chip *); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/drivers/char/tpm/tpm_vtpm.c 2010-03-24 14:53:41.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_vtpm.c 2011-01-31 14:53:38.000000000 +0100 @@ -0,0 +1,542 @@ +/* + * Copyright (C) 2006 IBM Corporation @@ -1312,7 +1320,7 @@ take the forward porting patches: + kfree(vtpms); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/drivers/char/tpm/tpm_vtpm.h 2010-03-24 14:53:41.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_vtpm.h 2011-01-31 14:53:38.000000000 +0100 @@ -0,0 +1,55 @@ +#ifndef TPM_VTPM_H +#define TPM_VTPM_H @@ -1370,7 +1378,7 @@ take the forward porting patches: + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/drivers/char/tpm/tpm_xen.c 2010-03-24 14:53:41.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_xen.c 2011-01-31 14:53:38.000000000 +0100 @@ -0,0 +1,722 @@ +/* + * Copyright (c) 2005, IBM Corporation @@ -2094,9 +2102,9 @@ take the forward porting patches: +module_init(tpmif_init); + +MODULE_LICENSE("Dual BSD/GPL"); ---- head-2010-05-25.orig/drivers/edac/edac_mc.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/edac/edac_mc.c 2010-03-24 14:53:41.000000000 +0100 -@@ -602,6 +602,10 @@ static void edac_mc_scrub_block(unsigned +--- head-2011-03-11.orig/drivers/edac/edac_mc.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/edac/edac_mc.c 2011-01-31 14:53:38.000000000 +0100 +@@ -615,6 +615,10 @@ static void edac_mc_scrub_block(unsigned debugf3("%s()\n", __func__); @@ -2107,8 +2115,8 @@ take the forward porting patches: /* ECC error page was not in our memory. Ignore it. */ if (!pfn_valid(page)) return; ---- head-2010-05-25.orig/drivers/firmware/dell_rbu.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/firmware/dell_rbu.c 2010-04-15 09:43:35.000000000 +0200 +--- head-2011-03-11.orig/drivers/firmware/dell_rbu.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/firmware/dell_rbu.c 2011-01-31 14:53:38.000000000 +0100 @@ -170,9 +170,28 @@ static int create_packet(void *data, siz spin_lock(&rbu_data.lock); goto out_alloc_packet_array; @@ -2215,8 +2223,8 @@ take the forward porting patches: spin_lock_init(&rbu_data.lock); init_packet_head(); ---- head-2010-05-25.orig/drivers/ide/ide-lib.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/ide/ide-lib.c 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/ide/ide-lib.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/ide/ide-lib.c 2011-01-31 14:53:38.000000000 +0100 @@ -18,12 +18,12 @@ void ide_toggle_bounce(ide_drive_t *driv { u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ @@ -2234,8 +2242,8 @@ take the forward porting patches: addr = *dev->dma_mask; } ---- head-2010-05-25.orig/drivers/oprofile/buffer_sync.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/buffer_sync.c 2010-04-15 09:43:44.000000000 +0200 +--- head-2011-03-11.orig/drivers/oprofile/buffer_sync.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/buffer_sync.c 2011-01-31 14:53:38.000000000 +0100 @@ -8,6 +8,10 @@ * @author Barry Kasindorf * @author Robert Richter @@ -2264,7 +2272,7 @@ take the forward porting patches: /* The task is on its way out. A sync of the buffer means we can catch * any remaining samples for this task. */ -@@ -154,6 +159,11 @@ static void end_sync(void) +@@ -144,6 +149,11 @@ static struct notifier_block module_load int sync_start(void) { int err; @@ -2276,7 +2284,7 @@ take the forward porting patches: if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) return -ENOMEM; -@@ -285,13 +295,29 @@ static void add_cpu_switch(int i) +@@ -286,13 +296,29 @@ static void add_cpu_switch(int i) last_cookie = INVALID_COOKIE; } @@ -2310,7 +2318,7 @@ take the forward porting patches: } static void -@@ -372,12 +398,12 @@ static inline void add_sample_entry(unsi +@@ -373,12 +399,12 @@ static inline void add_sample_entry(unsi * for later lookup from userspace. Return 0 on failure. */ static int @@ -2325,7 +2333,7 @@ take the forward porting patches: add_sample_entry(s->eip, s->event); return 1; } -@@ -502,9 +528,10 @@ void sync_buffer(int cpu) +@@ -503,9 +529,10 @@ void sync_buffer(int cpu) unsigned long val; struct task_struct *new; unsigned long cookie = 0; @@ -2337,7 +2345,7 @@ take the forward porting patches: unsigned long available; unsigned long flags; struct op_entry entry; -@@ -514,6 +541,11 @@ void sync_buffer(int cpu) +@@ -515,6 +542,11 @@ void sync_buffer(int cpu) add_cpu_switch(cpu); @@ -2349,7 +2357,7 @@ take the forward porting patches: op_cpu_buffer_reset(cpu); available = op_cpu_buffer_entries(cpu); -@@ -522,6 +554,13 @@ void sync_buffer(int cpu) +@@ -523,6 +555,13 @@ void sync_buffer(int cpu) if (!sample) break; @@ -2363,7 +2371,7 @@ take the forward porting patches: if (is_code(sample->eip)) { flags = sample->event; if (flags & TRACE_BEGIN) { -@@ -530,10 +569,10 @@ void sync_buffer(int cpu) +@@ -531,10 +570,10 @@ void sync_buffer(int cpu) } if (flags & KERNEL_CTX_SWITCH) { /* kernel/userspace switch */ @@ -2376,7 +2384,7 @@ take the forward porting patches: } if (flags & USER_CTX_SWITCH && op_cpu_buffer_get_data(&entry, &val)) { -@@ -546,16 +585,23 @@ void sync_buffer(int cpu) +@@ -547,16 +586,23 @@ void sync_buffer(int cpu) cookie = get_exec_dcookie(mm); add_user_ctx_switch(new, cookie); } @@ -2401,7 +2409,7 @@ take the forward porting patches: continue; /* ignore backtraces if failed to add a sample */ -@@ -566,6 +612,10 @@ void sync_buffer(int cpu) +@@ -567,6 +613,10 @@ void sync_buffer(int cpu) } release_mm(mm); @@ -2412,8 +2420,8 @@ take the forward porting patches: mark_done(cpu); mutex_unlock(&buffer_mutex); ---- head-2010-05-25.orig/drivers/oprofile/cpu_buffer.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/cpu_buffer.c 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/oprofile/cpu_buffer.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/cpu_buffer.c 2011-01-31 14:53:38.000000000 +0100 @@ -8,6 +8,10 @@ * @author Barry Kasindorf * @author Robert Richter @@ -2425,7 +2433,7 @@ take the forward porting patches: * Each CPU has a local buffer that stores PC value/event * pairs. We also log context switches when we notice them. * Eventually each CPU's buffer is processed into the global -@@ -54,6 +58,8 @@ static void wq_sync_buffer(struct work_s +@@ -38,6 +42,8 @@ static void wq_sync_buffer(struct work_s #define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; @@ -2434,7 +2442,7 @@ take the forward porting patches: unsigned long oprofile_get_cpu_buffer_size(void) { return oprofile_cpu_buffer_size; -@@ -97,7 +103,7 @@ int alloc_cpu_buffers(void) +@@ -75,7 +81,7 @@ int alloc_cpu_buffers(void) struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); b->last_task = NULL; @@ -2443,7 +2451,7 @@ take the forward porting patches: b->tracing = 0; b->buffer_size = buffer_size; b->sample_received = 0; -@@ -215,7 +221,7 @@ unsigned long op_cpu_buffer_entries(int +@@ -180,7 +186,7 @@ unsigned long op_cpu_buffer_entries(int static int op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, @@ -2452,7 +2460,7 @@ take the forward porting patches: { struct op_entry entry; struct op_sample *sample; -@@ -228,16 +234,15 @@ op_add_code(struct oprofile_cpu_buffer * +@@ -193,16 +199,15 @@ op_add_code(struct oprofile_cpu_buffer * flags |= TRACE_BEGIN; /* notice a switch from user->kernel or vice versa */ @@ -2475,7 +2483,7 @@ take the forward porting patches: cpu_buf->last_task = task; flags |= USER_CTX_SWITCH; } -@@ -286,14 +291,14 @@ op_add_sample(struct oprofile_cpu_buffer +@@ -251,14 +256,14 @@ op_add_sample(struct oprofile_cpu_buffer /* * This must be safe from any context. * @@ -2494,7 +2502,7 @@ take the forward porting patches: { cpu_buf->sample_received++; -@@ -302,7 +307,7 @@ log_sample(struct oprofile_cpu_buffer *c +@@ -267,7 +272,7 @@ log_sample(struct oprofile_cpu_buffer *c return 0; } @@ -2503,7 +2511,7 @@ take the forward porting patches: goto fail; if (op_add_sample(cpu_buf, pc, event)) -@@ -457,6 +462,25 @@ fail: +@@ -430,6 +435,25 @@ fail: return; } @@ -2529,9 +2537,9 @@ take the forward porting patches: /* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses ---- head-2010-05-25.orig/drivers/oprofile/cpu_buffer.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/cpu_buffer.h 2010-03-24 14:53:41.000000000 +0100 -@@ -40,7 +40,7 @@ struct op_entry; +--- head-2011-03-11.orig/drivers/oprofile/cpu_buffer.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/cpu_buffer.h 2011-01-31 14:53:38.000000000 +0100 +@@ -41,7 +41,7 @@ struct op_entry; struct oprofile_cpu_buffer { unsigned long buffer_size; struct task_struct *last_task; @@ -2540,7 +2548,7 @@ take the forward porting patches: int tracing; unsigned long sample_received; unsigned long sample_lost_overflow; -@@ -62,7 +62,7 @@ static inline void op_cpu_buffer_reset(i +@@ -63,7 +63,7 @@ static inline void op_cpu_buffer_reset(i { struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); @@ -2549,7 +2557,7 @@ take the forward porting patches: cpu_buf->last_task = NULL; } -@@ -112,9 +112,13 @@ int op_cpu_buffer_get_data(struct op_ent +@@ -113,9 +113,13 @@ int op_cpu_buffer_get_data(struct op_ent } /* extra data flags */ @@ -2565,8 +2573,8 @@ take the forward porting patches: +#define DOMAIN_SWITCH (1UL << 5) #endif /* OPROFILE_CPU_BUFFER_H */ ---- head-2010-05-25.orig/drivers/oprofile/event_buffer.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/event_buffer.h 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/oprofile/event_buffer.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/event_buffer.h 2011-01-31 14:53:38.000000000 +0100 @@ -30,6 +30,9 @@ void wake_up_buffer_waiter(void); #define INVALID_COOKIE ~0UL #define NO_COOKIE 0UL @@ -2577,8 +2585,8 @@ take the forward porting patches: extern const struct file_operations event_buffer_fops; /* mutex between sync_cpu_buffers() and the ---- head-2010-05-25.orig/drivers/oprofile/oprof.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/oprof.c 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/oprofile/oprof.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/oprof.c 2011-01-31 14:53:38.000000000 +0100 @@ -5,6 +5,10 @@ * @remark Read the file COPYING * @@ -2623,18 +2631,18 @@ take the forward porting patches: int oprofile_setup(void) { int err; ---- head-2010-05-25.orig/drivers/oprofile/oprof.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/oprof.h 2010-03-24 14:53:41.000000000 +0100 -@@ -39,4 +39,7 @@ void oprofile_timer_init(struct oprofile - int oprofile_set_backtrace(unsigned long depth); +--- head-2011-03-11.orig/drivers/oprofile/oprof.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/oprof.h 2011-01-31 14:53:38.000000000 +0100 +@@ -40,4 +40,7 @@ void oprofile_timer_exit(void); + int oprofile_set_ulong(unsigned long *addr, unsigned long val); int oprofile_set_timeout(unsigned long time); +int oprofile_set_active(int active_domains[], unsigned int adomains); +int oprofile_set_passive(int passive_domains[], unsigned int pdomains); + #endif /* OPROF_H */ ---- head-2010-05-25.orig/drivers/oprofile/oprofile_files.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/oprofile_files.c 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/drivers/oprofile/oprofile_files.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/oprofile_files.c 2011-01-31 14:53:38.000000000 +0100 @@ -5,11 +5,17 @@ * @remark Read the file COPYING * @@ -2653,8 +2661,8 @@ take the forward porting patches: #include "event_buffer.h" #include "oprofile_stats.h" -@@ -165,6 +171,195 @@ static const struct file_operations dump - .write = dump_write, +@@ -174,6 +180,195 @@ static const struct file_operations dump + .llseek = noop_llseek, }; +#define TMPBUFSIZE 512 @@ -2849,7 +2857,7 @@ take the forward porting patches: void oprofile_create_files(struct super_block *sb, struct dentry *root) { /* reinitialize default values */ -@@ -175,6 +370,8 @@ void oprofile_create_files(struct super_ +@@ -184,6 +379,8 @@ void oprofile_create_files(struct super_ oprofilefs_create_file(sb, root, "enable", &enable_fops); oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); @@ -2858,9 +2866,9 @@ take the forward porting patches: oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size); oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed); ---- head-2010-05-25.orig/fs/aio.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/fs/aio.c 2010-03-24 14:53:41.000000000 +0100 -@@ -40,6 +40,11 @@ +--- head-2011-03-11.orig/fs/aio.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/fs/aio.c 2011-03-11 10:52:11.000000000 +0100 +@@ -41,6 +41,11 @@ #include #include @@ -2872,7 +2880,7 @@ take the forward porting patches: #if DEBUG > 1 #define dprintk printk #else -@@ -997,6 +1002,11 @@ put_rq: +@@ -1002,6 +1007,11 @@ put_rq: if (waitqueue_active(&ctx->wait)) wake_up(&ctx->wait); @@ -2884,7 +2892,7 @@ take the forward porting patches: spin_unlock_irqrestore(&ctx->ctx_lock, flags); return ret; } -@@ -1005,6 +1015,8 @@ EXPORT_SYMBOL(aio_complete); +@@ -1010,6 +1020,8 @@ EXPORT_SYMBOL(aio_complete); /* aio_read_evt * Pull an event off of the ioctx's event ring. Returns the number of * events fetched (0 or 1 ;-) @@ -2893,7 +2901,7 @@ take the forward porting patches: * FIXME: make this use cmpxchg. * TODO: make the ringbuffer user mmap()able (requires FIXME). */ -@@ -1027,13 +1039,18 @@ static int aio_read_evt(struct kioctx *i +@@ -1032,13 +1044,18 @@ static int aio_read_evt(struct kioctx *i head = ring->head % info->nr; if (head != ring->tail) { @@ -2919,7 +2927,7 @@ take the forward porting patches: } spin_unlock(&info->ring_lock); -@@ -1218,6 +1235,13 @@ static void io_destroy(struct kioctx *io +@@ -1223,6 +1240,13 @@ static void io_destroy(struct kioctx *io aio_cancel_all(ioctx); wait_for_all_aios(ioctx); @@ -2933,7 +2941,7 @@ take the forward porting patches: /* * Wake up any waiters. The setting of ctx->dead must be seen -@@ -1228,6 +1252,67 @@ static void io_destroy(struct kioctx *io +@@ -1233,6 +1257,67 @@ static void io_destroy(struct kioctx *io put_ioctx(ioctx); /* once for the lookup */ } @@ -3001,7 +3009,7 @@ take the forward porting patches: /* sys_io_setup: * Create an aio_context capable of receiving at least nr_events. * ctxp must not point to an aio_context that already exists, and -@@ -1240,18 +1325,30 @@ static void io_destroy(struct kioctx *io +@@ -1245,18 +1330,30 @@ static void io_destroy(struct kioctx *io * resources are available. May fail with -EFAULT if an invalid * pointer is passed for ctxp. Will fail with -ENOSYS if not * implemented. @@ -3032,7 +3040,7 @@ take the forward porting patches: if (unlikely(ctx || nr_events == 0)) { pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", ctx, nr_events); -@@ -1262,8 +1359,12 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_e +@@ -1267,8 +1364,12 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_e ret = PTR_ERR(ioctx); if (!IS_ERR(ioctx)) { ret = put_user(ioctx->user_id, ctxp); @@ -3047,9 +3055,9 @@ take the forward porting patches: get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ io_destroy(ioctx); ---- head-2010-05-25.orig/fs/compat_ioctl.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/fs/compat_ioctl.c 2010-05-12 08:57:55.000000000 +0200 -@@ -116,6 +116,13 @@ +--- head-2011-03-11.orig/fs/compat_ioctl.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/fs/compat_ioctl.c 2011-01-31 14:53:38.000000000 +0100 +@@ -114,6 +114,13 @@ #include #endif @@ -3063,7 +3071,7 @@ take the forward porting patches: static int w_long(unsigned int fd, unsigned int cmd, compat_ulong_t __user *argp) { -@@ -1518,6 +1525,19 @@ IGNORE_IOCTL(FBIOGETCMAP32) +@@ -1408,6 +1415,19 @@ IGNORE_IOCTL(FBIOGETCMAP32) IGNORE_IOCTL(FBIOSCURSOR32) IGNORE_IOCTL(FBIOGCURSOR32) #endif @@ -3083,8 +3091,8 @@ take the forward porting patches: }; /* ---- head-2010-05-25.orig/include/acpi/processor.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/acpi/processor.h 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/include/acpi/processor.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/acpi/processor.h 2011-01-31 14:53:38.000000000 +0100 @@ -17,6 +17,12 @@ #define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */ #define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4 @@ -3116,20 +3124,20 @@ take the forward porting patches: struct acpi_power_register { u8 descriptor; u16 length; -@@ -74,6 +91,12 @@ struct acpi_processor_cx { - u32 power; +@@ -64,6 +81,12 @@ struct acpi_processor_cx { u32 usage; u64 time; + u8 bm_sts_skip; +#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL + /* Require raw information for external control logic */ + struct acpi_power_register reg; + u32 csd_count; + struct acpi_csd_package *domain_info; +#endif - struct acpi_processor_cx_policy promotion; - struct acpi_processor_cx_policy demotion; char desc[ACPI_CX_DESC_LEN]; -@@ -300,6 +323,9 @@ static inline void acpi_processor_ppc_ex + }; + +@@ -288,6 +311,9 @@ static inline void acpi_processor_ppc_ex { return; } @@ -3139,7 +3147,7 @@ take the forward porting patches: static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) { -@@ -313,6 +339,7 @@ static inline int acpi_processor_ppc_has +@@ -301,6 +327,7 @@ static inline int acpi_processor_ppc_has } return 0; } @@ -3147,7 +3155,7 @@ take the forward porting patches: static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) { return -ENODEV; -@@ -366,4 +393,120 @@ static inline void acpi_thermal_cpufreq_ +@@ -359,4 +386,120 @@ static inline void acpi_thermal_cpufreq_ } #endif @@ -3268,21 +3276,21 @@ take the forward porting patches: +#endif /* CONFIG_XEN */ + #endif ---- head-2010-05-25.orig/include/asm-generic/pgtable.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/asm-generic/pgtable.h 2010-03-24 14:53:41.000000000 +0100 -@@ -99,6 +99,10 @@ static inline void ptep_set_wrprotect(st - } +--- head-2011-03-11.orig/include/asm-generic/pgtable.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/asm-generic/pgtable.h 2011-03-11 10:52:21.000000000 +0100 +@@ -156,6 +156,10 @@ static inline void pmdp_set_wrprotect(st + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef arch_change_pte_range +#define arch_change_pte_range(mm, pmd, addr, end, newprot) 0 +#endif + - #ifndef __HAVE_ARCH_PTE_SAME - #define pte_same(A,B) (pte_val(A) == pte_val(B)) - #endif ---- head-2010-05-25.orig/include/linux/aio.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/aio.h 2010-03-24 14:53:41.000000000 +0100 + #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH + extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, +--- head-2011-03-11.orig/include/linux/aio.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/aio.h 2011-01-31 14:53:38.000000000 +0100 @@ -199,6 +199,12 @@ struct kioctx { struct delayed_work wq; @@ -3296,9 +3304,9 @@ take the forward porting patches: struct rcu_head rcu_head; }; ---- head-2010-05-25.orig/include/linux/highmem.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/highmem.h 2010-03-24 14:53:41.000000000 +0100 -@@ -136,12 +136,14 @@ alloc_zeroed_user_highpage_movable(struc +--- head-2011-03-11.orig/include/linux/highmem.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/highmem.h 2011-01-31 14:53:38.000000000 +0100 +@@ -178,12 +178,14 @@ alloc_zeroed_user_highpage_movable(struc return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); } @@ -3313,7 +3321,7 @@ take the forward porting patches: static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, -@@ -195,6 +197,8 @@ static inline void copy_user_highpage(st +@@ -237,6 +239,8 @@ static inline void copy_user_highpage(st #endif @@ -3322,16 +3330,16 @@ take the forward porting patches: static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; -@@ -206,4 +210,6 @@ static inline void copy_highpage(struct - kunmap_atomic(vto, KM_USER1); +@@ -248,4 +252,6 @@ static inline void copy_highpage(struct + kunmap_atomic(vfrom, KM_USER0); } +#endif + #endif /* _LINUX_HIGHMEM_H */ ---- head-2010-05-25.orig/include/linux/interrupt.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/interrupt.h 2010-03-24 14:53:41.000000000 +0100 -@@ -317,6 +317,12 @@ static inline int disable_irq_wake(unsig +--- head-2011-03-11.orig/include/linux/interrupt.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/interrupt.h 2011-01-31 14:53:38.000000000 +0100 +@@ -353,6 +353,12 @@ static inline int disable_irq_wake(unsig } #endif /* CONFIG_GENERIC_HARDIRQS */ @@ -3344,8 +3352,8 @@ take the forward porting patches: #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) ---- head-2010-05-25.orig/include/linux/kexec.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/kexec.h 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/include/linux/kexec.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/kexec.h 2011-01-31 14:53:38.000000000 +0100 @@ -46,6 +46,13 @@ KEXEC_CORE_NOTE_NAME_BYTES + \ KEXEC_CORE_NOTE_DESC_BYTES ) @@ -3373,9 +3381,9 @@ take the forward porting patches: extern asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, ---- head-2010-05-25.orig/include/linux/mm.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/mm.h 2010-03-24 14:53:41.000000000 +0100 -@@ -103,7 +103,12 @@ extern unsigned int kobjsize(const void +--- head-2011-03-11.orig/include/linux/mm.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/mm.h 2011-01-31 14:53:38.000000000 +0100 +@@ -113,7 +113,12 @@ extern unsigned int kobjsize(const void #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ @@ -3388,7 +3396,7 @@ take the forward porting patches: #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ -@@ -128,6 +133,12 @@ extern unsigned int kobjsize(const void +@@ -141,6 +146,12 @@ extern unsigned int kobjsize(const void */ #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) @@ -3401,7 +3409,7 @@ take the forward porting patches: /* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. -@@ -199,6 +210,15 @@ struct vm_operations_struct { +@@ -210,6 +221,15 @@ struct vm_operations_struct { */ int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); @@ -3417,18 +3425,18 @@ take the forward porting patches: #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy ---- head-2010-05-25.orig/include/linux/oprofile.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/oprofile.h 2010-03-24 14:53:41.000000000 +0100 -@@ -16,6 +16,8 @@ - #include - #include +--- head-2011-03-11.orig/include/linux/oprofile.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/oprofile.h 2011-02-17 10:05:41.000000000 +0100 +@@ -19,6 +19,8 @@ + #include + #include #include + +#include /* Each escaped entry is prefixed by ESCAPE_CODE * then one of the following codes, then the -@@ -28,14 +30,18 @@ +@@ -31,14 +33,18 @@ #define CPU_SWITCH_CODE 2 #define COOKIE_SWITCH_CODE 3 #define KERNEL_ENTER_SWITCH_CODE 4 @@ -3448,7 +3456,7 @@ take the forward porting patches: #define IBS_FETCH_CODE 13 #define IBS_OP_CODE 14 -@@ -49,6 +55,11 @@ struct oprofile_operations { +@@ -52,6 +58,11 @@ struct oprofile_operations { /* create any necessary configuration files in the oprofile fs. * Optional. */ int (*create_files)(struct super_block * sb, struct dentry * root); @@ -3460,7 +3468,7 @@ take the forward porting patches: /* Do any necessary interrupt setup. Optional. */ int (*setup)(void); /* Do any necessary interrupt shutdown. Optional. */ -@@ -110,6 +121,9 @@ void oprofile_add_pc(unsigned long pc, i +@@ -113,6 +124,9 @@ void oprofile_add_pc(unsigned long pc, i /* add a backtrace entry, to be called from the ->backtrace callback */ void oprofile_add_trace(unsigned long eip); @@ -3470,11 +3478,11 @@ take the forward porting patches: /** * Create a file of the given name as a child of the given root, with ---- head-2010-05-25.orig/include/linux/page-flags.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/page-flags.h 2010-03-24 14:53:41.000000000 +0100 -@@ -109,6 +109,11 @@ enum pageflags { - #ifdef CONFIG_MEMORY_FAILURE - PG_hwpoison, /* hardware poisoned page. Don't touch */ +--- head-2011-03-11.orig/include/linux/page-flags.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/page-flags.h 2011-01-31 14:53:38.000000000 +0100 +@@ -108,6 +108,11 @@ enum pageflags { + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + PG_compound_lock, #endif +#ifdef CONFIG_XEN + PG_foreign, /* Page is owned by foreign allocator. */ @@ -3484,7 +3492,7 @@ take the forward porting patches: __NR_PAGEFLAGS, /* Filesystems */ -@@ -337,6 +342,27 @@ static inline void SetPageUptodate(struc +@@ -333,6 +338,27 @@ static inline void SetPageUptodate(struc CLEARPAGEFLAG(Uptodate, uptodate) @@ -3512,8 +3520,8 @@ take the forward porting patches: extern void cancel_dirty_page(struct page *page, unsigned int account_size); int test_clear_page_writeback(struct page *page); -@@ -413,6 +439,14 @@ PAGEFLAG_FALSE(MemError) - #define __PG_MLOCKED 0 +@@ -463,6 +489,14 @@ static inline int PageTransCompound(stru + #define __PG_COMPOUND_LOCK 0 #endif +#if !defined(CONFIG_XEN) @@ -3527,18 +3535,18 @@ take the forward porting patches: /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. -@@ -422,7 +456,7 @@ PAGEFLAG_FALSE(MemError) - 1 << PG_private | 1 << PG_private_2 | \ - 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ +@@ -473,7 +507,7 @@ static inline int PageTransCompound(stru + 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ -- 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON) -+ 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | __PG_XEN) + 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ +- __PG_COMPOUND_LOCK) ++ __PG_COMPOUND_LOCK | __PG_XEN) /* * Flags checked when a page is prepped for return by the page allocator. ---- head-2010-05-25.orig/include/linux/pci.h 2010-03-24 13:55:21.000000000 +0100 -+++ head-2010-05-25/include/linux/pci.h 2010-03-24 14:53:41.000000000 +0100 -@@ -962,6 +962,11 @@ static inline int pci_msi_enabled(void) +--- head-2011-03-11.orig/include/linux/pci.h 2011-01-31 14:31:28.000000000 +0100 ++++ head-2011-03-11/include/linux/pci.h 2011-01-31 14:53:38.000000000 +0100 +@@ -979,6 +979,11 @@ static inline int pci_msi_enabled(void) { return 0; } @@ -3550,7 +3558,7 @@ take the forward porting patches: #else extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); extern void pci_msi_shutdown(struct pci_dev *dev); -@@ -974,6 +979,10 @@ extern void pci_disable_msix(struct pci_ +@@ -991,6 +996,10 @@ extern void pci_disable_msix(struct pci_ extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); extern void pci_restore_msi_state(struct pci_dev *dev); extern int pci_msi_enabled(void); @@ -3560,35 +3568,9 @@ take the forward porting patches: +#endif #endif - #ifndef CONFIG_PCIEASPM ---- head-2010-05-25.orig/include/linux/skbuff.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/skbuff.h 2010-04-15 09:43:55.000000000 +0200 -@@ -272,6 +272,8 @@ typedef unsigned char *sk_buff_data_t; - * @local_df: allow local fragmentation - * @cloned: Head may be cloned (check refcnt to be sure) - * @nohdr: Payload reference only, must not modify header -+ * @proto_data_valid: Protocol data validated since arriving at localhost -+ * @proto_csum_blank: Protocol csum must be added before leaving localhost - * @pkt_type: Packet class - * @fclone: skbuff clone status - * @ip_summed: Driver fed us an IP checksum -@@ -377,9 +379,13 @@ struct sk_buff { - #ifdef CONFIG_NETVM - __u8 emergency:1; - #endif -+#ifdef CONFIG_XEN -+ __u8 proto_data_valid:1, -+ proto_csum_blank:1; -+#endif - kmemcheck_bitfield_end(flags2); - -- /* 0/14 bit hole */ -+ /* 0/9...15 bit hole */ - - #ifdef CONFIG_NET_DMA - dma_cookie_t dma_cookie; ---- head-2010-05-25.orig/include/linux/vermagic.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/vermagic.h 2010-03-24 14:53:41.000000000 +0100 + #ifdef CONFIG_PCIEPORTBUS +--- head-2011-03-11.orig/include/linux/vermagic.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/include/linux/vermagic.h 2011-01-31 14:53:38.000000000 +0100 @@ -22,6 +22,11 @@ #else #define MODULE_VERMAGIC_MODVERSIONS "" @@ -3608,9 +3590,9 @@ take the forward porting patches: - MODULE_ARCH_VERMAGIC + MODULE_VERMAGIC_XEN MODULE_ARCH_VERMAGIC ---- head-2010-05-25.orig/kernel/irq/spurious.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/kernel/irq/spurious.c 2010-03-24 14:53:41.000000000 +0100 -@@ -233,7 +233,7 @@ void note_interrupt(unsigned int irq, st +--- head-2011-03-11.orig/kernel/irq/spurious.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/kernel/irq/spurious.c 2011-01-31 14:53:38.000000000 +0100 +@@ -227,7 +227,7 @@ void note_interrupt(unsigned int irq, st */ if (time_after(jiffies, desc->last_unhandled + HZ/10)) desc->irqs_unhandled = 1; @@ -3619,9 +3601,9 @@ take the forward porting patches: desc->irqs_unhandled++; desc->last_unhandled = jiffies; if (unlikely(action_ret != IRQ_NONE)) ---- head-2010-05-25.orig/kernel/kexec.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/kernel/kexec.c 2010-03-24 14:53:41.000000000 +0100 -@@ -360,13 +360,26 @@ static int kimage_is_destination_range(s +--- head-2011-03-11.orig/kernel/kexec.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/kernel/kexec.c 2011-01-31 14:53:38.000000000 +0100 +@@ -356,13 +356,26 @@ static int kimage_is_destination_range(s return 0; } @@ -3649,7 +3631,7 @@ take the forward porting patches: pages->mapping = NULL; set_page_private(pages, order); count = 1 << order; -@@ -430,10 +443,10 @@ static struct page *kimage_alloc_normal_ +@@ -426,10 +439,10 @@ static struct page *kimage_alloc_normal_ do { unsigned long pfn, epfn, addr, eaddr; @@ -3662,7 +3644,7 @@ take the forward porting patches: epfn = pfn + count; addr = pfn << PAGE_SHIFT; eaddr = epfn << PAGE_SHIFT; -@@ -467,6 +480,7 @@ static struct page *kimage_alloc_normal_ +@@ -463,6 +476,7 @@ static struct page *kimage_alloc_normal_ return pages; } @@ -3670,7 +3652,7 @@ take the forward porting patches: static struct page *kimage_alloc_crash_control_pages(struct kimage *image, unsigned int order) { -@@ -520,7 +534,7 @@ static struct page *kimage_alloc_crash_c +@@ -516,7 +530,7 @@ static struct page *kimage_alloc_crash_c } /* If I don't overlap any segments I have found my hole! */ if (i == image->nr_segments) { @@ -3679,7 +3661,7 @@ take the forward porting patches: break; } } -@@ -547,6 +561,13 @@ struct page *kimage_alloc_control_pages( +@@ -543,6 +557,13 @@ struct page *kimage_alloc_control_pages( return pages; } @@ -3693,7 +3675,7 @@ take the forward porting patches: static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) { -@@ -562,7 +583,7 @@ static int kimage_add_entry(struct kimag +@@ -558,7 +579,7 @@ static int kimage_add_entry(struct kimag return -ENOMEM; ind_page = page_address(page); @@ -3702,7 +3684,7 @@ take the forward porting patches: image->entry = ind_page; image->last_entry = ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); -@@ -621,13 +642,13 @@ static void kimage_terminate(struct kima +@@ -617,13 +638,13 @@ static void kimage_terminate(struct kima #define for_each_kimage_entry(image, ptr, entry) \ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ ptr = (entry & IND_INDIRECTION)? \ @@ -3718,7 +3700,7 @@ take the forward porting patches: kimage_free_pages(page); } -@@ -639,6 +660,10 @@ static void kimage_free(struct kimage *i +@@ -635,6 +656,10 @@ static void kimage_free(struct kimage *i if (!image) return; @@ -3729,7 +3711,7 @@ take the forward porting patches: kimage_free_extra_pages(image); for_each_kimage_entry(image, ptr, entry) { if (entry & IND_INDIRECTION) { -@@ -714,7 +739,7 @@ static struct page *kimage_alloc_page(st +@@ -710,7 +735,7 @@ static struct page *kimage_alloc_page(st * have a match. */ list_for_each_entry(page, &image->dest_pages, lru) { @@ -3738,7 +3720,7 @@ take the forward porting patches: if (addr == destination) { list_del(&page->lru); return page; -@@ -725,16 +750,16 @@ static struct page *kimage_alloc_page(st +@@ -721,16 +746,16 @@ static struct page *kimage_alloc_page(st kimage_entry_t *old; /* Allocate a page, if we run out of memory give up */ @@ -3758,7 +3740,7 @@ take the forward porting patches: /* If it is the destination page we want use it */ if (addr == destination) -@@ -757,7 +782,7 @@ static struct page *kimage_alloc_page(st +@@ -753,7 +778,7 @@ static struct page *kimage_alloc_page(st struct page *old_page; old_addr = *old & PAGE_MASK; @@ -3767,7 +3749,7 @@ take the forward porting patches: copy_highpage(page, old_page); *old = addr | (*old & ~PAGE_MASK); -@@ -813,7 +838,7 @@ static int kimage_load_normal_segment(st +@@ -809,7 +834,7 @@ static int kimage_load_normal_segment(st result = -ENOMEM; goto out; } @@ -3776,7 +3758,7 @@ take the forward porting patches: << PAGE_SHIFT); if (result < 0) goto out; -@@ -845,6 +870,7 @@ out: +@@ -841,6 +866,7 @@ out: return result; } @@ -3784,7 +3766,7 @@ take the forward porting patches: static int kimage_load_crash_segment(struct kimage *image, struct kexec_segment *segment) { -@@ -867,7 +893,7 @@ static int kimage_load_crash_segment(str +@@ -863,7 +889,7 @@ static int kimage_load_crash_segment(str char *ptr; size_t uchunk, mchunk; @@ -3793,7 +3775,7 @@ take the forward porting patches: if (!page) { result = -ENOMEM; goto out; -@@ -916,6 +942,13 @@ static int kimage_load_segment(struct ki +@@ -912,6 +938,13 @@ static int kimage_load_segment(struct ki return result; } @@ -3807,7 +3789,7 @@ take the forward porting patches: /* * Exec Kernel system call: for obvious reasons only root may call it. -@@ -1019,6 +1052,13 @@ SYSCALL_DEFINE4(kexec_load, unsigned lon +@@ -1015,6 +1048,13 @@ SYSCALL_DEFINE4(kexec_load, unsigned lon } kimage_terminate(image); } @@ -3821,9 +3803,9 @@ take the forward porting patches: /* Install the new kernel, and Uninstall the old */ image = xchg(dest_image, image); ---- head-2010-05-25.orig/kernel/sysctl.c 2010-03-24 14:09:47.000000000 +0100 -+++ head-2010-05-25/kernel/sysctl.c 2010-03-24 14:53:41.000000000 +0100 -@@ -776,7 +776,7 @@ static struct ctl_table kern_table[] = { +--- head-2011-03-11.orig/kernel/sysctl.c 2011-02-08 10:00:13.000000000 +0100 ++++ head-2011-03-11/kernel/sysctl.c 2011-02-08 10:02:12.000000000 +0100 +@@ -846,7 +846,7 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif @@ -3832,9 +3814,9 @@ take the forward porting patches: { .procname = "acpi_video_flags", .data = &acpi_realmode_flags, ---- head-2010-05-25.orig/mm/memory.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/mm/memory.c 2010-04-15 09:44:04.000000000 +0200 -@@ -599,6 +599,12 @@ struct page *vm_normal_page(struct vm_ar +--- head-2011-03-11.orig/mm/memory.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/mm/memory.c 2011-01-31 14:53:38.000000000 +0100 +@@ -604,6 +604,12 @@ struct page *vm_normal_page(struct vm_ar { unsigned long pfn = pte_pfn(pte); @@ -3847,7 +3829,7 @@ take the forward porting patches: if (HAVE_PTE_SPECIAL) { if (likely(!pte_special(pte))) goto check_pfn; -@@ -630,6 +636,9 @@ struct page *vm_normal_page(struct vm_ar +@@ -635,6 +641,9 @@ struct page *vm_normal_page(struct vm_ar return NULL; check_pfn: if (unlikely(pfn > highest_memmap_pfn)) { @@ -3857,7 +3839,7 @@ take the forward porting patches: print_bad_pte(vma, addr, pte, NULL); return NULL; } -@@ -935,8 +944,12 @@ static unsigned long zap_pte_range(struc +@@ -951,8 +960,12 @@ static unsigned long zap_pte_range(struc page->index > details->last_index)) continue; } @@ -3872,7 +3854,7 @@ take the forward porting patches: tlb_remove_tlb_entry(tlb, pte, addr); if (unlikely(!page)) continue; -@@ -1203,6 +1216,7 @@ unsigned long zap_page_range(struct vm_a +@@ -1229,6 +1242,7 @@ unsigned long zap_page_range(struct vm_a tlb_finish_mmu(tlb, address, end); return end; } @@ -3880,7 +3862,7 @@ take the forward porting patches: /** * zap_vma_ptes - remove ptes mapping the vma -@@ -1399,6 +1413,28 @@ int __get_user_pages(struct task_struct +@@ -1489,6 +1503,28 @@ int __get_user_pages(struct task_struct continue; } @@ -3909,9 +3891,9 @@ take the forward porting patches: if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) ---- head-2010-05-25.orig/mm/mmap.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/mm/mmap.c 2010-04-29 09:42:36.000000000 +0200 -@@ -1944,6 +1944,12 @@ static void unmap_region(struct mm_struc +--- head-2011-03-11.orig/mm/mmap.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/mm/mmap.c 2011-01-31 14:53:38.000000000 +0100 +@@ -1926,6 +1926,12 @@ static void unmap_region(struct mm_struc tlb_finish_mmu(tlb, start, end); } @@ -3924,15 +3906,15 @@ take the forward porting patches: /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. -@@ -1959,6 +1965,7 @@ detach_vmas_to_be_unmapped(struct mm_str - insertion_point = (prev ? &prev->vm_next : &mm->mmap); +@@ -1942,6 +1948,7 @@ detach_vmas_to_be_unmapped(struct mm_str + vma->vm_prev = NULL; do { rb_erase(&vma->vm_rb, &mm->mm_rb); + unmap_vma(vma); mm->map_count--; tail_vma = vma; vma = vma->vm_next; -@@ -2297,6 +2304,9 @@ void exit_mmap(struct mm_struct *mm) +@@ -2284,6 +2291,9 @@ void exit_mmap(struct mm_struct *mm) arch_exit_mmap(mm); @@ -3942,22 +3924,22 @@ take the forward porting patches: vma = mm->mmap; if (!vma) /* Can happen if dup_mmap() received an OOM */ return; ---- head-2010-05-25.orig/mm/mprotect.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/mm/mprotect.c 2010-04-15 09:44:14.000000000 +0200 -@@ -90,6 +90,8 @@ static inline void change_pmd_range(stru - next = pmd_addr_end(addr, end); +--- head-2011-03-11.orig/mm/mprotect.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/mm/mprotect.c 2011-01-31 14:53:38.000000000 +0100 +@@ -97,6 +97,8 @@ static inline void change_pmd_range(stru + } if (pmd_none_or_clear_bad(pmd)) continue; + if (arch_change_pte_range(mm, pmd, addr, next, newprot)) + continue; - change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); + change_pte_range(vma->vm_mm, pmd, addr, next, newprot, + dirty_accountable); } while (pmd++, addr = next, addr != end); - } ---- head-2010-05-25.orig/mm/page_alloc.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/mm/page_alloc.c 2010-03-24 14:59:37.000000000 +0100 -@@ -609,6 +609,13 @@ static void __free_pages_ok(struct page +--- head-2011-03-11.orig/mm/page_alloc.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/mm/page_alloc.c 2011-01-31 14:53:38.000000000 +0100 +@@ -647,6 +647,13 @@ static bool free_pages_prepare(struct pa + int i; int bad = 0; - int wasMlocked = __TestClearPageMlocked(page); +#ifdef CONFIG_XEN + if (PageForeign(page)) { @@ -3969,163 +3951,8 @@ take the forward porting patches: trace_mm_page_free_direct(page, order); kmemcheck_free_shadow(page, order); -@@ -1110,6 +1117,13 @@ void free_hot_cold_page(struct page *pag - int migratetype; - int wasMlocked = __TestClearPageMlocked(page); - -+#ifdef CONFIG_XEN -+ if (PageForeign(page)) { -+ PageForeignDestructor(page, 0); -+ return; -+ } -+#endif -+ - trace_mm_page_free_direct(page, 0); - kmemcheck_free_shadow(page, 0); - ---- head-2010-05-25.orig/net/core/dev.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/net/core/dev.c 2010-05-25 09:19:25.000000000 +0200 -@@ -139,6 +139,12 @@ - /* This should be increased if a protocol with a bigger head is added. */ - #define GRO_MAX_HEAD (MAX_HEADER + 128) - -+#ifdef CONFIG_XEN -+#include -+#include -+#include -+#endif -+ - /* - * The list of packet types we will receive (as opposed to discard) - * and the routines to invoke. -@@ -2005,6 +2011,43 @@ static struct netdev_queue *dev_pick_tx( - return netdev_get_tx_queue(dev, queue_index); - } - -+#ifdef CONFIG_XEN -+inline int skb_checksum_setup(struct sk_buff *skb) -+{ -+ if (skb->proto_csum_blank) { -+ if (skb->protocol != htons(ETH_P_IP)) -+ goto out; -+ skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl; -+ if (skb->h.raw >= skb->tail) -+ goto out; -+ switch (skb->nh.iph->protocol) { -+ case IPPROTO_TCP: -+ skb->csum = offsetof(struct tcphdr, check); -+ break; -+ case IPPROTO_UDP: -+ skb->csum = offsetof(struct udphdr, check); -+ break; -+ default: -+ if (net_ratelimit()) -+ printk(KERN_ERR "Attempting to checksum a non-" -+ "TCP/UDP packet, dropping a protocol" -+ " %d packet", skb->nh.iph->protocol); -+ goto out; -+ } -+ if ((skb->h.raw + skb->csum + 2) > skb->tail) -+ goto out; -+ skb->ip_summed = CHECKSUM_HW; -+ skb->proto_csum_blank = 0; -+ } -+ return 0; -+out: -+ return -EPROTO; -+} -+#else -+inline int skb_checksum_setup(struct sk_buff *skb) { return 0; } -+#endif -+EXPORT_SYMBOL(skb_checksum_setup); -+ - static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, - struct net_device *dev, - struct netdev_queue *txq) -@@ -2086,6 +2129,12 @@ int dev_queue_xmit(struct sk_buff *skb) - struct Qdisc *q; - int rc = -ENOMEM; - -+ /* If a checksum-deferred packet is forwarded to a device that needs a -+ * checksum, correct the pointers and force checksumming. -+ */ -+ if (skb_checksum_setup(skb)) -+ goto out_kfree_skb; -+ - /* GSO will handle the following emulations directly. */ - if (netif_needs_gso(dev, skb)) - goto gso; -@@ -2574,6 +2623,19 @@ int netif_receive_skb(struct sk_buff *sk - } - #endif - -+#ifdef CONFIG_XEN -+ switch (skb->ip_summed) { -+ case CHECKSUM_UNNECESSARY: -+ skb->proto_data_valid = 1; -+ break; -+ case CHECKSUM_HW: -+ /* XXX Implement me. */ -+ default: -+ skb->proto_data_valid = 0; -+ break; -+ } -+#endif -+ - if (skb_emergency(skb)) - goto skip_taps; - ---- head-2010-05-25.orig/net/core/skbuff.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/net/core/skbuff.c 2010-03-24 14:53:41.000000000 +0100 -@@ -645,6 +645,10 @@ static struct sk_buff *__skb_clone(struc - n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; - n->cloned = 1; - n->nohdr = 0; -+#ifdef CONFIG_XEN -+ C(proto_data_valid); -+ C(proto_csum_blank); -+#endif - n->destructor = NULL; - C(tail); - C(end); ---- head-2010-05-25.orig/net/ipv4/netfilter/nf_nat_proto_tcp.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/net/ipv4/netfilter/nf_nat_proto_tcp.c 2010-03-24 14:53:41.000000000 +0100 -@@ -75,6 +75,9 @@ tcp_manip_pkt(struct sk_buff *skb, - if (hdrsize < sizeof(*hdr)) - return true; - -+ if (skb_checksum_setup(skb)) -+ return false; -+ - inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); - inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0); - return true; ---- head-2010-05-25.orig/net/ipv4/netfilter/nf_nat_proto_udp.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/net/ipv4/netfilter/nf_nat_proto_udp.c 2010-03-24 14:53:41.000000000 +0100 -@@ -60,6 +60,10 @@ udp_manip_pkt(struct sk_buff *skb, - newport = tuple->dst.u.udp.port; - portptr = &hdr->dest; - } -+ -+ if (skb_checksum_setup(skb)) -+ return false; -+ - if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) { - inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); - inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, ---- head-2010-05-25.orig/net/ipv4/xfrm4_output.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/net/ipv4/xfrm4_output.c 2010-03-24 14:53:41.000000000 +0100 -@@ -81,7 +81,7 @@ static int xfrm4_output_finish(struct sk - #endif - - skb->protocol = htons(ETH_P_IP); -- return xfrm_output(skb); -+ return skb_checksum_setup(skb) ?: xfrm_output(skb); - } - - int xfrm4_output(struct sk_buff *skb) ---- head-2010-05-25.orig/scripts/Makefile.build 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/scripts/Makefile.build 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/scripts/Makefile.build 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/scripts/Makefile.build 2011-01-31 14:53:38.000000000 +0100 @@ -76,6 +76,21 @@ ifndef obj $(warning kbuild: Makefile.build is included improperly) endif @@ -4148,8 +3975,8 @@ take the forward porting patches: # =========================================================================== ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),) ---- head-2010-05-25.orig/scripts/Makefile.lib 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/scripts/Makefile.lib 2010-03-24 14:53:41.000000000 +0100 +--- head-2011-03-11.orig/scripts/Makefile.lib 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/scripts/Makefile.lib 2011-01-31 14:53:38.000000000 +0100 @@ -22,6 +22,12 @@ obj-m := $(filter-out $(obj-y),$(obj-m)) lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m))) diff --git a/patches.xen/xen3-auto-include-xen-interface.diff b/patches.xen/xen3-auto-include-xen-interface.diff index b377fae..fd4e673 100644 --- a/patches.xen/xen3-auto-include-xen-interface.diff +++ b/patches.xen/xen3-auto-include-xen-interface.diff @@ -1,10 +1,10 @@ Subject: xen3 include-xen-interface -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/COPYING 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/interface/COPYING 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,38 @@ +XEN NOTICE +========== @@ -45,7 +45,7 @@ Acked-by: jbeulich@novell.com +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86/cpuid.h 2008-01-21 11:15:27.000000000 +0100 ++++ b/include/xen/interface/arch-x86/cpuid.h 2011-03-17 13:50:24.000000000 +0100 @@ -0,0 +1,68 @@ +/****************************************************************************** + * arch-x86/cpuid.h @@ -73,7 +73,7 @@ Acked-by: jbeulich@novell.com + * Copyright (c) 2007 Citrix Systems, Inc. + * + * Authors: -+ * Keir Fraser ++ * Keir Fraser + */ + +#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ @@ -116,8 +116,8 @@ Acked-by: jbeulich@novell.com + +#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86/hvm/save.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,439 @@ ++++ b/include/xen/interface/arch-x86/hvm/save.h 2011-03-17 13:50:24.000000000 +0100 +@@ -0,0 +1,463 @@ +/* + * Structure definitions for HVM state that is held by Xen and must + * be saved along with the domain's memory and device-model state. @@ -385,6 +385,7 @@ Acked-by: jbeulich@novell.com + uint64_t apic_base_msr; + uint32_t disabled; /* VLAPIC_xx_DISABLED */ + uint32_t timer_divisor; ++ uint64_t tdt_msr; +}; + +DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); @@ -551,15 +552,38 @@ Acked-by: jbeulich@novell.com + +DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context); + ++ ++/* ++ * The save area of XSAVE/XRSTOR. ++ */ ++ ++struct hvm_hw_cpu_xsave { ++ uint64_t xfeature_mask; ++ uint64_t xcr0; /* Updated by XSETBV */ ++ uint64_t xcr0_accum; /* Updated by XSETBV */ ++ struct { ++ struct { char x[512]; } fpu_sse; ++ ++ struct { ++ uint64_t xstate_bv; /* Updated by XRSTOR */ ++ uint64_t reserved[7]; ++ } xsave_hdr; /* The 64-byte header */ ++ ++ struct { char x[0]; } ymm; /* YMM */ ++ } save_area; ++} __attribute__((packed)); ++ ++#define CPU_XSAVE_CODE 16 ++ +/* + * Largest type-code in use + */ -+#define HVM_SAVE_CODE_MAX 15 ++#define HVM_SAVE_CODE_MAX 16 + +#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86/xen-mca.h 2010-05-07 11:10:48.000000000 +0200 -@@ -0,0 +1,423 @@ ++++ b/include/xen/interface/arch-x86/xen-mca.h 2010-08-31 09:24:21.000000000 +0200 +@@ -0,0 +1,440 @@ +/****************************************************************************** + * arch-x86/mca.h + * @@ -966,6 +990,20 @@ Acked-by: jbeulich@novell.com + unsigned int mceinj_cpunr; /* target processor id */ +}; + ++#if defined(__XEN__) || defined(__XEN_TOOLS__) ++#define XEN_MC_inject_v2 6 ++#define XEN_MC_INJECT_TYPE_MASK 0x7 ++#define XEN_MC_INJECT_TYPE_MCE 0x0 ++#define XEN_MC_INJECT_TYPE_CMCI 0x1 ++ ++#define XEN_MC_INJECT_CPU_BROADCAST 0x8 ++ ++struct xen_mc_inject_v2 { ++ uint32_t flags; ++ struct xenctl_cpumap cpumap; ++}; ++#endif ++ +struct xen_mc { + uint32_t cmd; + uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ @@ -975,6 +1013,9 @@ Acked-by: jbeulich@novell.com + struct xen_mc_physcpuinfo mc_physcpuinfo; + struct xen_mc_msrinject mc_msrinject; + struct xen_mc_mceinject mc_mceinject; ++#if defined(__XEN__) || defined(__XEN_TOOLS__) ++ struct xen_mc_inject_v2 mc_inject_v2; ++#endif + } u; +}; +typedef struct xen_mc xen_mc_t; @@ -984,7 +1025,7 @@ Acked-by: jbeulich@novell.com + +#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86/xen-x86_32.h 2008-07-21 11:00:33.000000000 +0200 ++++ b/include/xen/interface/arch-x86/xen-x86_32.h 2011-03-17 13:50:24.000000000 +0100 @@ -0,0 +1,180 @@ +/****************************************************************************** + * xen-x86_32.h @@ -1096,8 +1137,8 @@ Acked-by: jbeulich@novell.com + __guest_handle_ ## name; \ + typedef struct { union { type *p; uint64_aligned_t q; }; } \ + __guest_handle_64_ ## name -+#undef set_xen_guest_handle -+#define set_xen_guest_handle(hnd, val) \ ++#undef set_xen_guest_handle_raw ++#define set_xen_guest_handle_raw(hnd, val) \ + do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ + (hnd).p = val; \ + } while ( 0 ) @@ -1167,7 +1208,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86/xen-x86_64.h 2008-04-02 12:34:02.000000000 +0200 ++++ b/include/xen/interface/arch-x86/xen-x86_64.h 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,212 @@ +/****************************************************************************** + * xen-x86_64.h @@ -1382,8 +1423,8 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86/xen.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,200 @@ ++++ b/include/xen/interface/arch-x86/xen.h 2011-03-17 13:50:24.000000000 +0100 +@@ -0,0 +1,201 @@ +/****************************************************************************** + * arch-x86/xen.h + * @@ -1430,10 +1471,11 @@ Acked-by: jbeulich@novell.com +#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) +#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name +#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) -+#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) ++#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0) +#ifdef __XEN_TOOLS__ +#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) +#endif ++#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) + +#if defined(__i386__) +#include "xen-x86_32.h" @@ -1585,7 +1627,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86_32.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/interface/arch-x86_32.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,27 @@ +/****************************************************************************** + * arch-x86_32.h @@ -1615,7 +1657,7 @@ Acked-by: jbeulich@novell.com + +#include "arch-x86/xen.h" --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/arch-x86_64.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/interface/arch-x86_64.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,27 @@ +/****************************************************************************** + * arch-x86_64.h @@ -1645,7 +1687,7 @@ Acked-by: jbeulich@novell.com + +#include "arch-x86/xen.h" --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/dom0_ops.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/interface/dom0_ops.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,120 @@ +/****************************************************************************** + * dom0_ops.h @@ -1768,8 +1810,8 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/domctl.h 2010-05-07 11:10:48.000000000 +0200 -@@ -0,0 +1,919 @@ ++++ b/include/xen/interface/domctl.h 2011-03-17 13:50:24.000000000 +0100 +@@ -0,0 +1,968 @@ +/****************************************************************************** + * domctl.h + * @@ -1809,11 +1851,6 @@ Acked-by: jbeulich@novell.com + +#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007 + -+struct xenctl_cpumap { -+ XEN_GUEST_HANDLE_64(uint8) bitmap; -+ uint32_t nr_cpus; -+}; -+ +/* + * NB. xen_domctl.domain is an IN/OUT parameter for this operation. + * If it is specified as zero, an id is auto-allocated and returned. @@ -2077,6 +2114,7 @@ Acked-by: jbeulich@novell.com +#define XEN_SCHEDULER_SEDF 4 +#define XEN_SCHEDULER_CREDIT 5 +#define XEN_SCHEDULER_CREDIT2 6 ++#define XEN_SCHEDULER_ARINC653 7 +/* Set or get info? */ +#define XEN_DOMCTL_SCHEDOP_putinfo 0 +#define XEN_DOMCTL_SCHEDOP_getinfo 1 @@ -2490,7 +2528,7 @@ Acked-by: jbeulich@novell.com +/* + * Page memory in and out. + */ -+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING (1 << 0) ++#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1 + +/* Domain memory paging */ +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 0 @@ -2498,6 +2536,19 @@ Acked-by: jbeulich@novell.com +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 2 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 3 + ++/* ++ * Access permissions. ++ * ++ * There are HVM hypercalls to set the per-page access permissions of every ++ * page in a domain. When one of these permissions--independent, read, ++ * write, and execute--is violated, the VCPU is paused and a memory event ++ * is sent with what happened. (See public/mem_event.h) The memory event ++ * handler can then resume the VCPU and redo the access with an ++ * ACCESS_RESUME mode for the following domctl. ++ */ ++#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2 ++#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 0 ++ +struct xen_domctl_mem_event_op { + uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ + uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */ @@ -2558,6 +2609,41 @@ Acked-by: jbeulich@novell.com +typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t); + ++#if defined(__i386__) || defined(__x86_64__) ++/* XEN_DOMCTL_setvcpuextstate */ ++/* XEN_DOMCTL_getvcpuextstate */ ++struct xen_domctl_vcpuextstate { ++ /* IN: VCPU that this call applies to. */ ++ uint32_t vcpu; ++ /* ++ * SET: xfeature support mask of struct (IN) ++ * GET: xfeature support mask of struct (IN/OUT) ++ * xfeature mask is served as identifications of the saving format ++ * so that compatible CPUs can have a check on format to decide ++ * whether it can restore. ++ */ ++ uint64_aligned_t xfeature_mask; ++ /* ++ * SET: Size of struct (IN) ++ * GET: Size of struct (IN/OUT) ++ */ ++ uint64_aligned_t size; ++ XEN_GUEST_HANDLE_64(uint64) buffer; ++}; ++typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t; ++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t); ++#endif ++ ++/* XEN_DOMCTL_set_access_required: sets whether a memory event listener ++ * must be present to handle page access events: if false, the page ++ * access will revert to full permissions if no one is listening; ++ * */ ++struct xen_domctl_set_access_required { ++ uint8_t access_required; ++}; ++typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t; ++DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t); ++ +struct xen_domctl { + uint32_t cmd; +#define XEN_DOMCTL_createdomain 1 @@ -2618,6 +2704,9 @@ Acked-by: jbeulich@novell.com +#define XEN_DOMCTL_gettscinfo 59 +#define XEN_DOMCTL_settscinfo 60 +#define XEN_DOMCTL_getpageframeinfo3 61 ++#define XEN_DOMCTL_setvcpuextstate 62 ++#define XEN_DOMCTL_getvcpuextstate 63 ++#define XEN_DOMCTL_set_access_required 64 +#define XEN_DOMCTL_gdbsx_guestmemio 1000 +#define XEN_DOMCTL_gdbsx_pausevcpu 1001 +#define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -2668,7 +2757,9 @@ Acked-by: jbeulich@novell.com + struct xen_domctl_mem_sharing_op mem_sharing_op; +#if defined(__i386__) || defined(__x86_64__) + struct xen_domctl_cpuid cpuid; ++ struct xen_domctl_vcpuextstate vcpuextstate; +#endif ++ struct xen_domctl_set_access_required access_required; + struct xen_domctl_gdbsx_memio gdbsx_guest_memio; + struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; + struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; @@ -2690,7 +2781,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/hvm/e820.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/interface/hvm/e820.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,34 @@ + +/* @@ -2727,7 +2818,7 @@ Acked-by: jbeulich@novell.com + +#endif /* __XEN_PUBLIC_HVM_E820_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/hvm/hvm_info_table.h 2010-05-07 11:10:48.000000000 +0200 ++++ b/include/xen/interface/hvm/hvm_info_table.h 2010-05-07 11:10:48.000000000 +0200 @@ -0,0 +1,75 @@ +/****************************************************************************** + * hvm/hvm_info_table.h @@ -2805,144 +2896,8 @@ Acked-by: jbeulich@novell.com + +#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/hvm/hvm_op.h 2009-06-23 09:28:21.000000000 +0200 -@@ -0,0 +1,133 @@ -+/* -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ -+#define __XEN_PUBLIC_HVM_HVM_OP_H__ -+ -+#include "../xen.h" -+ -+/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ -+#define HVMOP_set_param 0 -+#define HVMOP_get_param 1 -+struct xen_hvm_param { -+ domid_t domid; /* IN */ -+ uint32_t index; /* IN */ -+ uint64_t value; /* IN/OUT */ -+}; -+typedef struct xen_hvm_param xen_hvm_param_t; -+DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); -+ -+/* Set the logical level of one of a domain's PCI INTx wires. */ -+#define HVMOP_set_pci_intx_level 2 -+struct xen_hvm_set_pci_intx_level { -+ /* Domain to be updated. */ -+ domid_t domid; -+ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ -+ uint8_t domain, bus, device, intx; -+ /* Assertion level (0 = unasserted, 1 = asserted). */ -+ uint8_t level; -+}; -+typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; -+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); -+ -+/* Set the logical level of one of a domain's ISA IRQ wires. */ -+#define HVMOP_set_isa_irq_level 3 -+struct xen_hvm_set_isa_irq_level { -+ /* Domain to be updated. */ -+ domid_t domid; -+ /* ISA device identification, by ISA IRQ (0-15). */ -+ uint8_t isa_irq; -+ /* Assertion level (0 = unasserted, 1 = asserted). */ -+ uint8_t level; -+}; -+typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; -+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); -+ -+#define HVMOP_set_pci_link_route 4 -+struct xen_hvm_set_pci_link_route { -+ /* Domain to be updated. */ -+ domid_t domid; -+ /* PCI link identifier (0-3). */ -+ uint8_t link; -+ /* ISA IRQ (1-15), or 0 (disable link). */ -+ uint8_t isa_irq; -+}; -+typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; -+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); -+ -+/* Flushes all VCPU TLBs: @arg must be NULL. */ -+#define HVMOP_flush_tlbs 5 -+ -+/* Following tools-only interfaces may change in future. */ -+#if defined(__XEN__) || defined(__XEN_TOOLS__) -+ -+/* Track dirty VRAM. */ -+#define HVMOP_track_dirty_vram 6 -+struct xen_hvm_track_dirty_vram { -+ /* Domain to be tracked. */ -+ domid_t domid; -+ /* First pfn to track. */ -+ uint64_aligned_t first_pfn; -+ /* Number of pages to track. */ -+ uint64_aligned_t nr; -+ /* OUT variable. */ -+ /* Dirty bitmap buffer. */ -+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; -+}; -+typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; -+DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); -+ -+/* Notify that some pages got modified by the Device Model. */ -+#define HVMOP_modified_memory 7 -+struct xen_hvm_modified_memory { -+ /* Domain to be updated. */ -+ domid_t domid; -+ /* First pfn. */ -+ uint64_aligned_t first_pfn; -+ /* Number of pages. */ -+ uint64_aligned_t nr; -+}; -+typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; -+DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); -+ -+#define HVMOP_set_mem_type 8 -+typedef enum { -+ HVMMEM_ram_rw, /* Normal read/write guest RAM */ -+ HVMMEM_ram_ro, /* Read-only; writes are discarded */ -+ HVMMEM_mmio_dm, /* Reads and write go to the device model */ -+} hvmmem_type_t; -+/* Notify that a region of memory is to be treated in a specific way. */ -+struct xen_hvm_set_mem_type { -+ /* Domain to be updated. */ -+ domid_t domid; -+ /* Memory type */ -+ hvmmem_type_t hvmmem_type; -+ /* First pfn. */ -+ uint64_aligned_t first_pfn; -+ /* Number of pages. */ -+ uint64_aligned_t nr; -+}; -+typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; -+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); -+ -+ -+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ -+ -+#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/hvm/ioreq.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,119 @@ ++++ b/include/xen/interface/hvm/ioreq.h 2011-03-17 13:50:24.000000000 +0100 +@@ -0,0 +1,140 @@ +/* + * ioreq.h: I/O request definitions for device models + * Copyright (c) 2004, Intel Corporation. @@ -3045,11 +3000,32 @@ Acked-by: jbeulich@novell.com +}; +#endif /* defined(__ia64__) */ + -+#define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 -+#define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) -+#define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) -+#define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20) -+#define ACPI_GPE0_BLK_LEN 0x08 ++/* ++ * ACPI Control/Event register locations. Location is controlled by a ++ * version number in HVM_PARAM_ACPI_IOPORTS_LOCATION. ++ */ ++ ++/* Version 0 (default): Traditional Xen locations. */ ++#define ACPI_PM1A_EVT_BLK_ADDRESS_V0 0x1f40 ++#define ACPI_PM1A_CNT_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x04) ++#define ACPI_PM_TMR_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x08) ++#define ACPI_GPE0_BLK_ADDRESS_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0 + 0x20) ++#define ACPI_GPE0_BLK_LEN_V0 0x08 ++ ++/* Version 1: Locations preferred by modern Qemu. */ ++#define ACPI_PM1A_EVT_BLK_ADDRESS_V1 0xb000 ++#define ACPI_PM1A_CNT_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x04) ++#define ACPI_PM_TMR_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x08) ++#define ACPI_GPE0_BLK_ADDRESS_V1 0xafe0 ++#define ACPI_GPE0_BLK_LEN_V1 0x04 ++ ++/* Compatibility definitions for the default location (version 0). */ ++#define ACPI_PM1A_EVT_BLK_ADDRESS ACPI_PM1A_EVT_BLK_ADDRESS_V0 ++#define ACPI_PM1A_CNT_BLK_ADDRESS ACPI_PM1A_CNT_BLK_ADDRESS_V0 ++#define ACPI_PM_TMR_BLK_ADDRESS ACPI_PM_TMR_BLK_ADDRESS_V0 ++#define ACPI_GPE0_BLK_ADDRESS ACPI_GPE0_BLK_ADDRESS_V0 ++#define ACPI_GPE0_BLK_LEN ACPI_GPE0_BLK_LEN_V0 ++ + +#endif /* _IOREQ_H_ */ + @@ -3063,121 +3039,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/hvm/params.h 2009-04-07 13:58:49.000000000 +0200 -@@ -0,0 +1,111 @@ -+/* -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_HVM_PARAMS_H__ -+#define __XEN_PUBLIC_HVM_PARAMS_H__ -+ -+#include "hvm_op.h" -+ -+/* -+ * Parameter space for HVMOP_{set,get}_param. -+ */ -+ -+/* -+ * How should CPU0 event-channel notifications be delivered? -+ * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). -+ * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: -+ * Domain = val[47:32], Bus = val[31:16], -+ * DevFn = val[15: 8], IntX = val[ 1: 0] -+ * If val == 0 then CPU0 event-channel notifications are not delivered. -+ */ -+#define HVM_PARAM_CALLBACK_IRQ 0 -+ -+/* -+ * These are not used by Xen. They are here for convenience of HVM-guest -+ * xenbus implementations. -+ */ -+#define HVM_PARAM_STORE_PFN 1 -+#define HVM_PARAM_STORE_EVTCHN 2 -+ -+#define HVM_PARAM_PAE_ENABLED 4 -+ -+#define HVM_PARAM_IOREQ_PFN 5 -+ -+#define HVM_PARAM_BUFIOREQ_PFN 6 -+ -+#ifdef __ia64__ -+ -+#define HVM_PARAM_NVRAM_FD 7 -+#define HVM_PARAM_VHPT_SIZE 8 -+#define HVM_PARAM_BUFPIOREQ_PFN 9 -+ -+#elif defined(__i386__) || defined(__x86_64__) -+ -+/* Expose Viridian interfaces to this HVM guest? */ -+#define HVM_PARAM_VIRIDIAN 9 -+ -+#endif -+ -+/* -+ * Set mode for virtual timers (currently x86 only): -+ * delay_for_missed_ticks (default): -+ * Do not advance a vcpu's time beyond the correct delivery time for -+ * interrupts that have been missed due to preemption. Deliver missed -+ * interrupts when the vcpu is rescheduled and advance the vcpu's virtual -+ * time stepwise for each one. -+ * no_delay_for_missed_ticks: -+ * As above, missed interrupts are delivered, but guest time always tracks -+ * wallclock (i.e., real) time while doing so. -+ * no_missed_ticks_pending: -+ * No missed interrupts are held pending. Instead, to ensure ticks are -+ * delivered at some non-zero rate, if we detect missed ticks then the -+ * internal tick alarm is not disabled if the VCPU is preempted during the -+ * next tick period. -+ * one_missed_tick_pending: -+ * Missed interrupts are collapsed together and delivered as one 'late tick'. -+ * Guest time always tracks wallclock (i.e., real) time. -+ */ -+#define HVM_PARAM_TIMER_MODE 10 -+#define HVMPTM_delay_for_missed_ticks 0 -+#define HVMPTM_no_delay_for_missed_ticks 1 -+#define HVMPTM_no_missed_ticks_pending 2 -+#define HVMPTM_one_missed_tick_pending 3 -+ -+/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ -+#define HVM_PARAM_HPET_ENABLED 11 -+ -+/* Identity-map page directory used by Intel EPT when CR0.PG=0. */ -+#define HVM_PARAM_IDENT_PT 12 -+ -+/* Device Model domain, defaults to 0. */ -+#define HVM_PARAM_DM_DOMAIN 13 -+ -+/* ACPI S state: currently support S0 and S3 on x86. */ -+#define HVM_PARAM_ACPI_S_STATE 14 -+ -+/* TSS used on Intel when CR0.PE=0. */ -+#define HVM_PARAM_VM86_TSS 15 -+ -+/* Boolean: Enable aligning all periodic vpts to reduce interrupts */ -+#define HVM_PARAM_VPT_ALIGN 16 -+ -+#define HVM_NR_PARAMS 17 -+ -+#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/hvm/save.h 2008-04-02 12:34:02.000000000 +0200 ++++ b/include/xen/interface/hvm/save.h 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,88 @@ +/* + * hvm/save.h @@ -3268,7 +3130,7 @@ Acked-by: jbeulich@novell.com + +#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/io/fsif.h 2009-06-23 09:28:21.000000000 +0200 ++++ b/include/xen/interface/io/fsif.h 2009-06-23 09:28:21.000000000 +0200 @@ -0,0 +1,192 @@ +/****************************************************************************** + * fsif.h @@ -3463,134 +3325,7 @@ Acked-by: jbeulich@novell.com + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/io/pciif.h 2009-04-07 13:58:49.000000000 +0200 -@@ -0,0 +1,124 @@ -+/* -+ * PCI Backend/Frontend Common Data Structures & Macros -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Author: Ryan Wilson -+ */ -+#ifndef __XEN_PCI_COMMON_H__ -+#define __XEN_PCI_COMMON_H__ -+ -+/* Be sure to bump this number if you change this file */ -+#define XEN_PCI_MAGIC "7" -+ -+/* xen_pci_sharedinfo flags */ -+#define _XEN_PCIF_active (0) -+#define XEN_PCIF_active (1<<_XEN_PCIF_active) -+#define _XEN_PCIB_AERHANDLER (1) -+#define XEN_PCIB_AERHANDLER (1<<_XEN_PCIB_AERHANDLER) -+#define _XEN_PCIB_active (2) -+#define XEN_PCIB_active (1<<_XEN_PCIB_active) -+ -+/* xen_pci_op commands */ -+#define XEN_PCI_OP_conf_read (0) -+#define XEN_PCI_OP_conf_write (1) -+#define XEN_PCI_OP_enable_msi (2) -+#define XEN_PCI_OP_disable_msi (3) -+#define XEN_PCI_OP_enable_msix (4) -+#define XEN_PCI_OP_disable_msix (5) -+#define XEN_PCI_OP_aer_detected (6) -+#define XEN_PCI_OP_aer_resume (7) -+#define XEN_PCI_OP_aer_mmio (8) -+#define XEN_PCI_OP_aer_slotreset (9) -+ -+/* xen_pci_op error numbers */ -+#define XEN_PCI_ERR_success (0) -+#define XEN_PCI_ERR_dev_not_found (-1) -+#define XEN_PCI_ERR_invalid_offset (-2) -+#define XEN_PCI_ERR_access_denied (-3) -+#define XEN_PCI_ERR_not_implemented (-4) -+/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ -+#define XEN_PCI_ERR_op_failed (-5) -+ -+/* -+ * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry)) -+ * Should not exceed 128 -+ */ -+#define SH_INFO_MAX_VEC 128 -+ -+struct xen_msix_entry { -+ uint16_t vector; -+ uint16_t entry; -+}; -+struct xen_pci_op { -+ /* IN: what action to perform: XEN_PCI_OP_* */ -+ uint32_t cmd; -+ -+ /* OUT: will contain an error number (if any) from errno.h */ -+ int32_t err; -+ -+ /* IN: which device to touch */ -+ uint32_t domain; /* PCI Domain/Segment */ -+ uint32_t bus; -+ uint32_t devfn; -+ -+ /* IN: which configuration registers to touch */ -+ int32_t offset; -+ int32_t size; -+ -+ /* IN/OUT: Contains the result after a READ or the value to WRITE */ -+ uint32_t value; -+ /* IN: Contains extra infor for this operation */ -+ uint32_t info; -+ /*IN: param for msi-x */ -+ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC]; -+}; -+ -+/*used for pcie aer handling*/ -+struct xen_pcie_aer_op -+{ -+ -+ /* IN: what action to perform: XEN_PCI_OP_* */ -+ uint32_t cmd; -+ /*IN/OUT: return aer_op result or carry error_detected state as input*/ -+ int32_t err; -+ -+ /* IN: which device to touch */ -+ uint32_t domain; /* PCI Domain/Segment*/ -+ uint32_t bus; -+ uint32_t devfn; -+}; -+struct xen_pci_sharedinfo { -+ /* flags - XEN_PCIF_* */ -+ uint32_t flags; -+ struct xen_pci_op op; -+ struct xen_pcie_aer_op aer_op; -+}; -+ -+#endif /* __XEN_PCI_COMMON_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/io/tpmif.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/interface/io/tpmif.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,77 @@ +/****************************************************************************** + * tpmif.h @@ -3670,7 +3405,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/io/usbif.h 2010-02-24 13:13:46.000000000 +0100 ++++ b/include/xen/interface/io/usbif.h 2010-02-24 13:13:46.000000000 +0100 @@ -0,0 +1,151 @@ +/* + * usbif.h @@ -3824,7 +3559,7 @@ Acked-by: jbeulich@novell.com + +#endif /* __XEN_PUBLIC_IO_USBIF_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/io/vscsiif.h 2008-07-21 11:00:33.000000000 +0200 ++++ b/include/xen/interface/io/vscsiif.h 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,105 @@ +/****************************************************************************** + * vscsiif.h @@ -3932,7 +3667,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/kexec.h 2008-11-25 12:22:34.000000000 +0100 ++++ b/include/xen/interface/kexec.h 2008-11-25 12:22:34.000000000 +0100 @@ -0,0 +1,168 @@ +/****************************************************************************** + * kexec.h - Public portion @@ -4103,8 +3838,8 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/mem_event.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,69 @@ ++++ b/include/xen/interface/mem_event.h 2011-03-17 13:50:24.000000000 +0100 +@@ -0,0 +1,82 @@ +/****************************************************************************** + * mem_event.h + * @@ -4130,40 +3865,53 @@ Acked-by: jbeulich@novell.com +#ifndef _XEN_PUBLIC_MEM_EVENT_H +#define _XEN_PUBLIC_MEM_EVENT_H + -+ +#include "xen.h" +#include "io/ring.h" + -+ -+/* Memory event notification modes */ -+#define MEM_EVENT_MODE_ASYNC 0 -+#define MEM_EVENT_MODE_SYNC (1 << 0) -+#define MEM_EVENT_MODE_SYNC_ALL (1 << 1) ++/* Memory event type */ ++#define MEM_EVENT_TYPE_SHARED 0 ++#define MEM_EVENT_TYPE_PAGING 1 ++#define MEM_EVENT_TYPE_ACCESS 2 + +/* Memory event flags */ +#define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) -+#define MEM_EVENT_FLAG_DOM_PAUSED (1 << 1) -+#define MEM_EVENT_FLAG_OUT_OF_MEM (1 << 2) ++#define MEM_EVENT_FLAG_DROP_PAGE (1 << 1) + ++/* Reasons for the memory event request */ ++#define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */ ++#define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is address */ ++#define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is CR0 value */ ++#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */ ++#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */ ++#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */ + +typedef struct mem_event_shared_page { -+ int port; ++ uint32_t port; +} mem_event_shared_page_t; + +typedef struct mem_event_st { -+ unsigned long gfn; -+ unsigned long offset; -+ unsigned long p2mt; -+ int vcpu_id; -+ uint64_t flags; -+} mem_event_request_t, mem_event_response_t; ++ uint16_t type; ++ uint16_t flags; ++ uint32_t vcpu_id; + ++ uint64_t gfn; ++ uint64_t offset; ++ uint64_t gla; /* if gla_valid */ + -+DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t); ++ uint32_t p2mt; + ++ uint16_t access_r:1; ++ uint16_t access_w:1; ++ uint16_t access_x:1; ++ uint16_t gla_valid:1; ++ uint16_t available:12; + -+#endif ++ uint16_t reason; ++} mem_event_request_t, mem_event_response_t; ++ ++DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t); + ++#endif + +/* + * Local variables: @@ -4175,7 +3923,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/nmi.h 2009-06-23 09:28:21.000000000 +0200 ++++ b/include/xen/interface/nmi.h 2009-06-23 09:28:21.000000000 +0200 @@ -0,0 +1,80 @@ +/****************************************************************************** + * nmi.h @@ -4258,7 +4006,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/platform.h 2010-01-04 11:56:34.000000000 +0100 ++++ b/include/xen/interface/platform.h 2010-01-04 11:56:34.000000000 +0100 @@ -0,0 +1,393 @@ +/****************************************************************************** + * platform.h @@ -4654,8 +4402,8 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/sysctl.h 2010-05-07 11:10:48.000000000 +0200 -@@ -0,0 +1,607 @@ ++++ b/include/xen/interface/sysctl.h 2011-03-17 13:50:24.000000000 +0100 +@@ -0,0 +1,637 @@ +/****************************************************************************** + * sysctl.h + * @@ -4751,8 +4499,10 @@ Acked-by: jbeulich@novell.com +struct xen_sysctl_physinfo { + uint32_t threads_per_core; + uint32_t cores_per_socket; -+ uint32_t nr_cpus, max_cpu_id; -+ uint32_t nr_nodes, max_node_id; ++ uint32_t nr_cpus; /* # CPUs currently online */ ++ uint32_t max_cpu_id; /* Largest possible CPU ID on this host */ ++ uint32_t nr_nodes; /* # nodes currently online */ ++ uint32_t max_node_id; /* Largest possible node ID on this host */ + uint32_t cpu_khz; + uint64_aligned_t total_pages; + uint64_aligned_t free_pages; @@ -4881,6 +4631,11 @@ Acked-by: jbeulich@novell.com + uint64_aligned_t idle_time; /* idle time from boot */ + XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */ + XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */ ++ uint64_aligned_t pc3; ++ uint64_aligned_t pc6; ++ uint64_aligned_t pc7; ++ uint64_aligned_t cc3; ++ uint64_aligned_t cc6; +}; + +struct xen_sysctl_get_pmstat { @@ -4904,21 +4659,12 @@ Acked-by: jbeulich@novell.com +typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t; +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t); + -+/* -+ * Status codes. Must be greater than 0 to avoid confusing -+ * sysctl callers that see 0 as a plain successful return. -+ */ -+#define XEN_CPU_HOTPLUG_STATUS_OFFLINE 1 -+#define XEN_CPU_HOTPLUG_STATUS_ONLINE 2 -+#define XEN_CPU_HOTPLUG_STATUS_NEW 3 -+ +/* XEN_SYSCTL_cpu_hotplug */ +struct xen_sysctl_cpu_hotplug { + /* IN variables */ + uint32_t cpu; /* Physical cpu. */ +#define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0 +#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1 -+#define XEN_SYSCTL_CPU_HOTPLUG_STATUS 2 + uint32_t op; /* hotplug opcode */ +}; +typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t; @@ -5121,17 +4867,17 @@ Acked-by: jbeulich@novell.com + * IN: maximum addressable entry in the caller-provided arrays. + * OUT: largest cpu identifier in the system. + * If OUT is greater than IN then the arrays are truncated! ++ * If OUT is leass than IN then the array tails are not written by sysctl. + */ + uint32_t max_cpu_index; + + /* -+ * If not NULL, this array is filled with core/socket/node identifier for -+ * each cpu. ++ * If not NULL, these arrays are filled with core/socket/node identifier ++ * for each cpu. + * If a cpu has no core/socket/node information (e.g., cpu not present) -+ * then the sentinel value ~0u is written. -+ * The size of this array is specified by the caller in @max_cpu_index. -+ * If the actual @max_cpu_index is smaller than the array then the trailing -+ * elements of the array will not be written by the sysctl. ++ * then the sentinel value ~0u is written to each array. ++ * The number of array elements written by the sysctl is: ++ * min(@max_cpu_index_IN,@max_cpu_index_OUT)+1 + */ + XEN_GUEST_HANDLE_64(uint32) cpu_to_core; + XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; @@ -5193,14 +4939,46 @@ Acked-by: jbeulich@novell.com +typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t); + ++#define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64 ++/* ++ * This structure is used to pass a new ARINC653 schedule from a ++ * privileged domain (ie dom0) to Xen. ++ */ ++struct xen_sysctl_arinc653_schedule { ++ /* major_frame holds the time for the new schedule's major frame ++ * in nanoseconds. */ ++ uint64_aligned_t major_frame; ++ /* num_sched_entries holds how many of the entries in the ++ * sched_entries[] array are valid. */ ++ uint8_t num_sched_entries; ++ /* The sched_entries array holds the actual schedule entries. */ ++ struct { ++ /* dom_handle must match a domain's UUID */ ++ xen_domain_handle_t dom_handle; ++ /* If a domain has multiple VCPUs, vcpu_id specifies which one ++ * this schedule entry applies to. It should be set to 0 if ++ * there is only one VCPU for the domain. */ ++ unsigned int vcpu_id; ++ /* runtime specifies the amount of time that should be allocated ++ * to this VCPU per major frame. It is specified in nanoseconds */ ++ uint64_aligned_t runtime; ++ } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE]; ++}; ++typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t; ++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t); ++ +/* XEN_SYSCTL_scheduler_op */ +/* Set or get info? */ +#define XEN_SYSCTL_SCHEDOP_putinfo 0 +#define XEN_SYSCTL_SCHEDOP_getinfo 1 +struct xen_sysctl_scheduler_op { -+ uint32_t sched_id; /* XEN_SCHEDULER_* (domctl.h) */ -+ uint32_t cmd; /* XEN_SYSCTL_SCHEDOP_* */ ++ uint32_t cpupool_id; /* Cpupool whose scheduler is to be targetted. */ ++ uint32_t sched_id; /* XEN_SCHEDULER_* (domctl.h) */ ++ uint32_t cmd; /* XEN_SYSCTL_SCHEDOP_* */ + union { ++ struct xen_sysctl_sched_arinc653 { ++ XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule; ++ } sched_arinc653; + } u; +}; +typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t; @@ -5264,7 +5042,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/tmem.h 2010-01-04 11:56:34.000000000 +0100 ++++ b/include/xen/interface/tmem.h 2010-01-04 11:56:34.000000000 +0100 @@ -0,0 +1,144 @@ +/****************************************************************************** + * tmem.h @@ -5411,8 +5189,8 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/trace.h 2010-05-07 11:10:48.000000000 +0200 -@@ -0,0 +1,227 @@ ++++ b/include/xen/interface/trace.h 2010-08-31 09:24:21.000000000 +0200 +@@ -0,0 +1,230 @@ +/****************************************************************************** + * include/public/trace.h + * @@ -5454,6 +5232,7 @@ Acked-by: jbeulich@novell.com +#define TRC_PV 0x0020f000 /* Xen PV traces */ +#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ +#define TRC_PM 0x0080f000 /* Xen power management trace */ ++#define TRC_GUEST 0x0800f000 /* Guest-generated traces */ +#define TRC_ALL 0x0ffff000 +#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) +#define TRC_HD_CYCLE_FLAG (1UL<<31) @@ -5494,6 +5273,7 @@ Acked-by: jbeulich@novell.com +#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13) +#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14) +#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15) ++#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16) + +#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) +#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) @@ -5570,6 +5350,7 @@ Acked-by: jbeulich@novell.com +#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18) +#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) +#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19) ++#define TRC_HVM_RDTSC (TRC_HVM_HANDLER + 0x1a) +#define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20) +#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21) + @@ -5641,7 +5422,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/xen-compat.h 2010-01-04 11:56:34.000000000 +0100 ++++ b/include/xen/interface/xen-compat.h 2010-01-04 11:56:34.000000000 +0100 @@ -0,0 +1,44 @@ +/****************************************************************************** + * xen-compat.h @@ -5688,8 +5469,8 @@ Acked-by: jbeulich@novell.com + +#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/xenoprof.h 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,138 @@ ++++ b/include/xen/interface/xenoprof.h 2010-08-31 09:24:21.000000000 +0200 +@@ -0,0 +1,152 @@ +/****************************************************************************** + * xenoprof.h + * @@ -5742,7 +5523,11 @@ Acked-by: jbeulich@novell.com +#define XENOPROF_shutdown 13 +#define XENOPROF_get_buffer 14 +#define XENOPROF_set_backtrace 15 -+#define XENOPROF_last_op 15 ++ ++/* AMD IBS support */ ++#define XENOPROF_get_ibs_caps 16 ++#define XENOPROF_ibs_counter 17 ++#define XENOPROF_last_op 17 + +#define MAX_OPROF_EVENTS 32 +#define MAX_OPROF_DOMAINS 25 @@ -5816,6 +5601,16 @@ Acked-by: jbeulich@novell.com +} xenoprof_passive_t; +DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); + ++struct xenoprof_ibs_counter { ++ uint64_t op_enabled; ++ uint64_t fetch_enabled; ++ uint64_t max_cnt_fetch; ++ uint64_t max_cnt_op; ++ uint64_t rand_en; ++ uint64_t dispatched_ops; ++}; ++typedef struct xenoprof_ibs_counter xenoprof_ibs_counter_t; ++DEFINE_XEN_GUEST_HANDLE(xenoprof_ibs_counter_t); + +#endif /* __XEN_PUBLIC_XENOPROF_H__ */ + @@ -5829,7 +5624,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/xsm/acm.h 2010-01-04 11:56:34.000000000 +0100 ++++ b/include/xen/interface/xsm/acm.h 2010-01-04 11:56:34.000000000 +0100 @@ -0,0 +1,223 @@ +/* + * acm.h: Xen access control module interface defintions @@ -6055,7 +5850,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/xsm/acm_ops.h 2007-10-22 13:39:15.000000000 +0200 ++++ b/include/xen/interface/xsm/acm_ops.h 2007-10-22 13:39:15.000000000 +0200 @@ -0,0 +1,159 @@ +/* + * acm_ops.h: Xen access control module hypervisor commands @@ -6217,7 +6012,7 @@ Acked-by: jbeulich@novell.com + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-12/include/xen/interface/xsm/flask_op.h 2010-01-04 11:56:34.000000000 +0100 ++++ b/include/xen/interface/xsm/flask_op.h 2010-01-04 11:56:34.000000000 +0100 @@ -0,0 +1,47 @@ +/* + * This file contains the flask_op hypercall commands and definitions. diff --git a/patches.xen/xen3-auto-xen-arch.diff b/patches.xen/xen3-auto-xen-arch.diff index 0f096ae..ae988bd 100644 --- a/patches.xen/xen3-auto-xen-arch.diff +++ b/patches.xen/xen3-auto-xen-arch.diff @@ -1,5 +1,5 @@ Subject: xen3 xen-arch -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com @@ -11,6 +11,7 @@ places): +++ linux/arch/x86/kernel/apic/io_apic-xen.c +++ linux/arch/x86/kernel/apic/ipi-xen.c +++ linux/arch/x86/kernel/apic/probe_32-xen.c ++++ linux/arch/x86/kernel/apic-xen.c +++ linux/arch/x86/kernel/cpu/common_64-xen.c +++ linux/arch/x86/kernel/e820-xen.c +++ linux/arch/x86/kernel/head-xen.c @@ -19,9 +20,11 @@ places): +++ linux/arch/x86/kernel/io_apic-xen.c +++ linux/arch/x86/kernel/ipi-xen.c +++ linux/arch/x86/kernel/irq-xen.c ++++ linux/arch/x86/kernel/irq_work-xen.c +++ linux/arch/x86/kernel/ldt-xen.c +++ linux/arch/x86/kernel/microcode_core-xen.c +++ linux/arch/x86/kernel/mpparse-xen.c ++++ linux/arch/x86/kernel/msr-xen.c +++ linux/arch/x86/kernel/pci-nommu-xen.c +++ linux/arch/x86/kernel/process-xen.c +++ linux/arch/x86/kernel/setup-xen.c @@ -29,15 +32,18 @@ places): +++ linux/arch/x86/kernel/traps-xen.c +++ linux/arch/x86/kernel/x86_init-xen.c +++ linux/arch/x86/lib/cache-smp-xen.c ++++ linux/arch/x86/mm/dump_pagetables-xen.c +++ linux/arch/x86/mm/fault-xen.c +++ linux/arch/x86/mm/init-xen.c +++ linux/arch/x86/mm/iomap_32-xen.c -+++ linux/arch/x86/mm/ioremap-xen.c +++ linux/arch/x86/mm/pageattr-xen.c +++ linux/arch/x86/mm/pat-xen.c +++ linux/arch/x86/mm/pgtable-xen.c +++ linux/arch/x86/vdso/vdso32-setup-xen.c +++ linux/drivers/char/mem-xen.c ++++ linux/drivers/hwmon/coretemp-xen.c ++++ linux/drivers/hwmon/pkgtemp-xen.c ++++ linux/drivers/hwmon/via-cputemp-xen.c +++ linux/arch/x86/include/mach-xen/asm/desc.h +++ linux/arch/x86/include/mach-xen/asm/dma-mapping.h +++ linux/arch/x86/include/mach-xen/asm/fixmap.h @@ -47,6 +53,7 @@ places): +++ linux/arch/x86/include/mach-xen/asm/irqflags.h +++ linux/arch/x86/include/mach-xen/asm/mmu_context.h +++ linux/arch/x86/include/mach-xen/asm/pci.h ++++ linux/arch/x86/include/mach-xen/asm/perf_event.h +++ linux/arch/x86/include/mach-xen/asm/pgalloc.h +++ linux/arch/x86/include/mach-xen/asm/pgtable.h +++ linux/arch/x86/include/mach-xen/asm/pgtable-3level_types.h @@ -64,6 +71,7 @@ places): List of files folded into their native counterparts (and hence removed from this patch for xen-port-patches.py to not needlessly pick them up; for reference, prefixed with the version the removal occured): +2.6.18/arch/x86/kernel/quirks-xen.c 2.6.18/arch/x86/include/mach-xen/asm/pgtable-2level.h 2.6.18/arch/x86/include/mach-xen/asm/pgtable-2level-defs.h 2.6.19/arch/x86/include/mach-xen/asm/ptrace.h @@ -122,7 +130,7 @@ for reference, prefixed with the version the removal occured): 2.6.33/arch/x86/kernel/irq_64-xen.c --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-03-22 12:00:53.000000000 +0100 ++++ b/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-03-22 12:00:53.000000000 +0100 @@ -0,0 +1,208 @@ +/* + * processor_extcntl_xen.c - interface to notify Xen @@ -333,7 +341,7 @@ for reference, prefixed with the version the removal occured): +} +EXPORT_SYMBOL(arch_acpi_processor_init_extcntl); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200 ++++ b/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200 @@ -0,0 +1,113 @@ +/* + * sleep.c - x86-specific ACPI sleep support. @@ -449,7 +457,7 @@ for reference, prefixed with the version the removal occured): +core_initcall(acpisleep_dmi_init); +#endif /* CONFIG_ACPI_PV_SLEEP */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/apic/apic-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ b/arch/x86/kernel/apic/apic-xen.c 2007-06-12 13:12:48.000000000 +0200 @@ -0,0 +1,155 @@ +/* + * Local APIC handling, local APIC timers @@ -607,7 +615,7 @@ for reference, prefixed with the version the removal occured): + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/cpu/common-xen.c 2009-05-19 09:16:41.000000000 +0200 ++++ b/arch/x86/kernel/cpu/common-xen.c 2009-05-19 09:16:41.000000000 +0200 @@ -0,0 +1,745 @@ +#include +#include @@ -1355,7 +1363,7 @@ for reference, prefixed with the version the removal occured): +} +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2009-10-01 11:00:47.000000000 +0200 ++++ b/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2009-10-01 11:00:47.000000000 +0200 @@ -0,0 +1,134 @@ +#include +#include @@ -1492,7 +1500,7 @@ for reference, prefixed with the version the removal occured): +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/cpu/mtrr/main-xen.c 2008-01-28 12:24:18.000000000 +0100 ++++ b/arch/x86/kernel/cpu/mtrr/main-xen.c 2008-01-28 12:24:18.000000000 +0100 @@ -0,0 +1,198 @@ +#include +#include @@ -1693,7 +1701,7 @@ for reference, prefixed with the version the removal occured): + +subsys_initcall(mtrr_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/entry_32-xen.S 2009-05-19 09:16:41.000000000 +0200 ++++ b/arch/x86/kernel/entry_32-xen.S 2009-05-19 09:16:41.000000000 +0200 @@ -0,0 +1,1242 @@ +/* + * linux/arch/i386/entry.S @@ -2938,7 +2946,7 @@ for reference, prefixed with the version the removal occured): + +syscall_table_size=(.-sys_call_table) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/fixup.c 2008-01-28 12:24:18.000000000 +0100 ++++ b/arch/x86/kernel/fixup.c 2008-01-28 12:24:18.000000000 +0100 @@ -0,0 +1,88 @@ +/****************************************************************************** + * fixup.c @@ -3029,7 +3037,7 @@ for reference, prefixed with the version the removal occured): +} +__initcall(fixup_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/head_32-xen.S 2007-06-12 13:12:48.000000000 +0200 ++++ b/arch/x86/kernel/head_32-xen.S 2007-06-12 13:12:48.000000000 +0200 @@ -0,0 +1,207 @@ + + @@ -3239,7 +3247,7 @@ for reference, prefixed with the version the removal occured): + ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic") + ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/io_apic_32-xen.c 2009-03-18 10:39:31.000000000 +0100 ++++ b/arch/x86/kernel/io_apic_32-xen.c 2009-03-18 10:39:31.000000000 +0100 @@ -0,0 +1,2786 @@ +/* + * Intel IO-APIC support for multi-Pentium hosts. @@ -6028,7 +6036,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* CONFIG_ACPI */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/ioport_32-xen.c 2008-01-28 12:24:19.000000000 +0100 ++++ b/arch/x86/kernel/ioport_32-xen.c 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,123 @@ +/* + * linux/arch/i386/kernel/ioport.c @@ -6154,7 +6162,7 @@ for reference, prefixed with the version the removal occured): + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/ldt_32-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ b/arch/x86/kernel/ldt_32-xen.c 2007-06-12 13:12:48.000000000 +0200 @@ -0,0 +1,270 @@ +/* + * linux/kernel/ldt.c @@ -6427,7 +6435,7 @@ for reference, prefixed with the version the removal occured): + return ret; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/microcode-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ b/arch/x86/kernel/microcode-xen.c 2007-06-12 13:12:48.000000000 +0200 @@ -0,0 +1,144 @@ +/* + * Intel CPU Microcode Update Driver for Linux @@ -6574,7 +6582,7 @@ for reference, prefixed with the version the removal occured): +module_exit(microcode_exit) +MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/mpparse_32-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ b/arch/x86/kernel/mpparse_32-xen.c 2007-06-12 13:12:48.000000000 +0200 @@ -0,0 +1,1185 @@ +/* + * Intel Multiprocessor Specification 1.1 and 1.4 @@ -7762,7 +7770,7 @@ for reference, prefixed with the version the removal occured): +#endif /* CONFIG_X86_IO_APIC */ +#endif /* CONFIG_ACPI */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/pci-dma-xen.c 2009-11-06 10:23:23.000000000 +0100 ++++ b/arch/x86/kernel/pci-dma-xen.c 2009-11-06 10:23:23.000000000 +0100 @@ -0,0 +1,406 @@ +/* + * Dynamic DMA mapping support. @@ -8171,7 +8179,7 @@ for reference, prefixed with the version the removal occured): +} +EXPORT_SYMBOL(dma_sync_single_for_device); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/process_32-xen.c 2008-07-21 11:00:32.000000000 +0200 ++++ b/arch/x86/kernel/process_32-xen.c 2008-07-21 11:00:32.000000000 +0200 @@ -0,0 +1,877 @@ +/* + * linux/arch/i386/kernel/process.c @@ -9051,57 +9059,7 @@ for reference, prefixed with the version the removal occured): + return sp & ~0xf; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/quirks-xen.c 2008-01-28 12:24:19.000000000 +0100 -@@ -0,0 +1,47 @@ -+/* -+ * This file contains work-arounds for x86 and x86_64 platform bugs. -+ */ -+#include -+#include -+ -+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI) -+ -+static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) -+{ -+ u8 config, rev; -+ u32 word; -+ -+ /* BIOS may enable hardware IRQ balancing for -+ * E7520/E7320/E7525(revision ID 0x9 and below) -+ * based platforms. -+ * Disable SW irqbalance/affinity on those platforms. -+ */ -+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); -+ if (rev > 0x9) -+ return; -+ -+ printk(KERN_INFO "Intel E7520/7320/7525 detected."); -+ -+ /* enable access to config space*/ -+ pci_read_config_byte(dev, 0xf4, &config); -+ pci_write_config_byte(dev, 0xf4, config|0x2); -+ -+ /* read xTPR register */ -+ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word); -+ -+ if (!(word & (1 << 13))) { -+ struct xen_platform_op op; -+ printk(KERN_INFO "Disabling irq balancing and affinity\n"); -+ op.cmd = XENPF_platform_quirk; -+ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING; -+ WARN_ON(HYPERVISOR_platform_op(&op)); -+ } -+ -+ /* put back the original value for config space*/ -+ if (!(config & 0x2)) -+ pci_write_config_byte(dev, 0xf4, config); -+} -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/setup_32-xen.c 2008-04-22 15:41:51.000000000 +0200 ++++ b/arch/x86/kernel/setup_32-xen.c 2008-04-22 15:41:51.000000000 +0200 @@ -0,0 +1,1919 @@ +/* + * linux/arch/i386/kernel/setup.c @@ -11023,7 +10981,7 @@ for reference, prefixed with the version the removal occured): + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/smp_32-xen.c 2007-12-10 08:47:31.000000000 +0100 ++++ b/arch/x86/kernel/smp_32-xen.c 2007-12-10 08:47:31.000000000 +0100 @@ -0,0 +1,605 @@ +/* + * Intel SMP support routines. @@ -11631,8 +11589,8 @@ for reference, prefixed with the version the removal occured): +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/time-xen.c 2010-02-24 11:50:47.000000000 +0100 -@@ -0,0 +1,1224 @@ ++++ b/arch/x86/kernel/time-xen.c 2010-08-31 09:24:21.000000000 +0200 +@@ -0,0 +1,1242 @@ +/* + * linux/arch/i386/kernel/time.c + * @@ -11749,9 +11707,6 @@ for reference, prefixed with the version the removal occured): +static struct timespec shadow_tv; +static u32 shadow_tv_version; + -+static struct timeval monotonic_tv; -+static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED; -+ +/* Keep track of last time we did processing/updating of jiffies and xtime. */ +static u64 processed_system_time; /* System time (ns) at last processing. */ +static DEFINE_PER_CPU(u64, processed_system_time); @@ -12012,6 +11967,12 @@ for reference, prefixed with the version the removal occured): +} +EXPORT_SYMBOL(rtc_cmos_write); + ++static struct { ++ spinlock_t lock; ++ struct timeval tv; ++ u32 version; ++} monotonic = { .lock = SPIN_LOCK_UNLOCKED }; ++ +/* + * This version of gettimeofday has microsecond resolution + * and better than microsecond precision on fast x86 machines with TSC. @@ -12024,7 +11985,7 @@ for reference, prefixed with the version the removal occured): + s64 nsec; + unsigned int cpu; + struct shadow_time_info *shadow; -+ u32 local_time_version; ++ u32 local_time_version, monotonic_version; + + cpu = get_cpu(); + shadow = &per_cpu(shadow_time, cpu); @@ -12048,6 +12009,8 @@ for reference, prefixed with the version the removal occured): + __normalize_time(&sec, &nsec); + usec += (long)nsec / NSEC_PER_USEC; + ++ monotonic_version = monotonic.version; ++ + if (unlikely(!time_values_up_to_date(cpu))) { + /* + * We may have blocked for a long time, @@ -12069,17 +12032,16 @@ for reference, prefixed with the version the removal occured): + sec++; + } + -+ spin_lock_irqsave(&monotonic_lock, flags); -+ if ((sec > monotonic_tv.tv_sec) || -+ ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec))) -+ { -+ monotonic_tv.tv_sec = sec; -+ monotonic_tv.tv_usec = usec; -+ } else { -+ sec = monotonic_tv.tv_sec; -+ usec = monotonic_tv.tv_usec; ++ spin_lock_irqsave(&monotonic.lock, flags); ++ if (unlikely(sec < monotonic.tv.tv_sec) || ++ (sec == monotonic.tv.tv_sec && usec <= monotonic.tv.tv_usec)) { ++ sec = monotonic.tv.tv_sec; ++ usec = monotonic.tv.tv_usec; ++ } else if (likely(monotonic_version == monotonic.version)) { ++ monotonic.tv.tv_sec = sec; ++ monotonic.tv.tv_usec = usec; + } -+ spin_unlock_irqrestore(&monotonic_lock, flags); ++ spin_unlock_irqrestore(&monotonic.lock, flags); + + tv->tv_sec = sec; + tv->tv_usec = usec; @@ -12087,6 +12049,16 @@ for reference, prefixed with the version the removal occured): + +EXPORT_SYMBOL(do_gettimeofday); + ++/* Reset monotonic gettimeofday() timeval. */ ++static inline void monotonic_reset(void) ++{ ++ spin_lock(&monotonic.lock); ++ monotonic.tv.tv_sec = 0; ++ monotonic.tv.tv_usec = 0; ++ ++monotonic.version; ++ spin_unlock(&monotonic.lock); ++} ++ +int do_settimeofday(struct timespec *tv) +{ + time_t sec; @@ -12095,6 +12067,11 @@ for reference, prefixed with the version the removal occured): + struct shadow_time_info *shadow; + struct xen_platform_op op; + ++ if (unlikely(!tv)) { ++ monotonic_reset(); ++ return 0; ++ } ++ + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + @@ -12134,11 +12111,7 @@ for reference, prefixed with the version the removal occured): + } + ntp_clear(); + -+ /* Reset monotonic gettimeofday() timeval. */ -+ spin_lock(&monotonic_lock); -+ monotonic_tv.tv_sec = 0; -+ monotonic_tv.tv_usec = 0; -+ spin_unlock(&monotonic_lock); ++ monotonic_reset(); + + write_sequnlock_irq(&xtime_lock); + @@ -12274,6 +12247,7 @@ for reference, prefixed with the version the removal occured): + s64 delta, delta_cpu, stolen, blocked; + u64 sched_time; + unsigned int i, cpu = smp_processor_id(); ++ int schedule_clock_was_set_work = 0; + struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); + struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); + @@ -12335,12 +12309,14 @@ for reference, prefixed with the version the removal occured): + + if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) { + update_wallclock(); -+ if (keventd_up()) -+ schedule_work(&clock_was_set_work); ++ schedule_clock_was_set_work = 1; + } + + write_sequnlock(&xtime_lock); + ++ if (schedule_clock_was_set_work && keventd_up()) ++ schedule_work(&clock_was_set_work); ++ + /* + * Account stolen ticks. + * HACK: Passing NULL to account_steal_time() @@ -12858,7 +12834,7 @@ for reference, prefixed with the version the removal occured): +} +__initcall(xen_sysctl_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/traps_32-xen.c 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/kernel/traps_32-xen.c 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,1190 @@ +/* + * linux/arch/i386/traps.c @@ -14051,7 +14027,7 @@ for reference, prefixed with the version the removal occured): +__setup("call_trace=", call_trace_setup); +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mach-xen/Makefile 2007-06-12 13:12:48.000000000 +0200 ++++ b/arch/x86/mach-xen/Makefile 2007-06-12 13:12:48.000000000 +0200 @@ -0,0 +1,5 @@ +# +# Makefile for the linux kernel. @@ -14059,7 +14035,7 @@ for reference, prefixed with the version the removal occured): + +obj-y := setup.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mach-xen/setup.c 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/mach-xen/setup.c 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,158 @@ +/* + * Machine specific setup for generic @@ -14220,7 +14196,7 @@ for reference, prefixed with the version the removal occured): +#endif +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/lib/scrub.c 2008-02-08 12:30:51.000000000 +0100 ++++ b/arch/x86/lib/scrub.c 2008-02-08 12:30:51.000000000 +0100 @@ -0,0 +1,21 @@ +#include +#include @@ -14244,8 +14220,8 @@ for reference, prefixed with the version the removal occured): + clear_page(v); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/fault_32-xen.c 2007-12-10 08:47:31.000000000 +0100 -@@ -0,0 +1,779 @@ ++++ b/arch/x86/mm/fault_32-xen.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,792 @@ +/* + * linux/arch/i386/mm/fault.c + * @@ -15010,12 +14986,25 @@ for reference, prefixed with the version the removal occured): + return; + } + for (page = pgd_list; page; page = -+ (struct page *)page->index) -+ if (!vmalloc_sync_one(page_address(page), -+ address)) { ++ (struct page *)page->index) { ++ spinlock_t *lock = page->mapping ++ ? &((struct mm_struct *)page->mapping) ++ ->page_table_lock ++ : NULL; ++ pmd_t *pmd; ++ ++ if (lock) ++ spin_lock(lock); ++ pmd = vmalloc_sync_one(page_address(page), ++ address); ++ if (lock) ++ spin_unlock(lock); ++ ++ if (!pmd) { + BUG_ON(page != pgd_list); + break; + } ++ } + spin_unlock_irqrestore(&pgd_lock, flags); + if (!page) + set_bit(sync_index(address), insync); @@ -15026,7 +15015,7 @@ for reference, prefixed with the version the removal occured): +} +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/highmem_32-xen.c 2008-10-29 09:55:56.000000000 +0100 ++++ b/arch/x86/mm/highmem_32-xen.c 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,183 @@ +#include +#include @@ -15212,7 +15201,7 @@ for reference, prefixed with the version the removal occured): +EXPORT_SYMBOL(clear_highpage); +EXPORT_SYMBOL(copy_highpage); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/hypervisor.c 2009-06-09 15:01:37.000000000 +0200 ++++ b/arch/x86/mm/hypervisor.c 2009-06-09 15:01:37.000000000 +0200 @@ -0,0 +1,579 @@ +/****************************************************************************** + * mm/hypervisor.c @@ -15794,7 +15783,7 @@ for reference, prefixed with the version the removal occured): + return !rc; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/init_32-xen.c 2008-10-29 09:55:56.000000000 +0100 ++++ b/arch/x86/mm/init_32-xen.c 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,840 @@ +/* + * linux/arch/i386/mm/init.c @@ -16637,8 +16626,8 @@ for reference, prefixed with the version the removal occured): +#endif + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/ioremap_32-xen.c 2008-04-02 12:34:02.000000000 +0200 -@@ -0,0 +1,443 @@ ++++ b/arch/x86/mm/ioremap-xen.c 2010-09-16 13:31:46.000000000 +0200 +@@ -0,0 +1,439 @@ +/* + * arch/i386/mm/ioremap.c + * @@ -16686,7 +16675,7 @@ for reference, prefixed with the version the removal occured): + pgprot_t prot, + domid_t domid) +{ -+ int rc; ++ int rc = 0; + unsigned long i, start_address; + mmu_update_t *u, *v, *w; + @@ -16706,8 +16695,8 @@ for reference, prefixed with the version the removal occured): + direct_remap_area_pte_fn, &w); + if (rc) + goto out; -+ rc = -EFAULT; -+ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0) ++ rc = HYPERVISOR_mmu_update(u, v - u, NULL, domid); ++ if (rc < 0) + goto out; + v = w = u; + start_address = address; @@ -16731,13 +16720,9 @@ for reference, prefixed with the version the removal occured): + direct_remap_area_pte_fn, &w); + if (rc) + goto out; -+ rc = -EFAULT; -+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)) -+ goto out; ++ rc = HYPERVISOR_mmu_update(u, v - u, NULL, domid); + } + -+ rc = 0; -+ + out: + flush_tlb_all(); + @@ -17083,8 +17068,8 @@ for reference, prefixed with the version the removal occured): + } +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/pgtable_32-xen.c 2009-03-18 10:39:31.000000000 +0100 -@@ -0,0 +1,731 @@ ++++ b/arch/x86/mm/pgtable_32-xen.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,738 @@ +/* + * linux/arch/i386/mm/pgtable.c + */ @@ -17319,6 +17304,7 @@ for reference, prefixed with the version the removal occured): + *pprev = next; + if (next) + set_page_private(next, (unsigned long)pprev); ++ page->mapping = NULL; +} + +void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) @@ -17360,9 +17346,15 @@ for reference, prefixed with the version the removal occured): + pmd_t **pmd; + unsigned long flags; + ++ if (!pgd) ++ return NULL; ++ + pgd_test_and_unpin(pgd); + -+ if (PTRS_PER_PMD == 1 || !pgd) ++ /* Store a back link for vmalloc_sync_all(). */ ++ virt_to_page(pgd)->mapping = (void *)mm; ++ ++ if (PTRS_PER_PMD == 1) + return pgd; + + if (HAVE_SHARED_KERNEL_PMD) { @@ -17817,7 +17809,7 @@ for reference, prefixed with the version the removal occured): + mm_unpin(mm); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/oprofile/xenoprof.c 2008-01-28 12:24:19.000000000 +0100 ++++ b/arch/x86/oprofile/xenoprof.c 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,179 @@ +/** + * @file xenoprof.c @@ -17999,7 +17991,7 @@ for reference, prefixed with the version the removal occured): + xenoprofile_exit(); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/pci/irq-xen.c 2008-03-06 08:54:32.000000000 +0100 ++++ b/arch/x86/pci/irq-xen.c 2008-03-06 08:54:32.000000000 +0100 @@ -0,0 +1,1211 @@ +/* + * Low-Level PCI Support for PC -- Routing of Interrupts @@ -19213,7 +19205,7 @@ for reference, prefixed with the version the removal occured): + return count; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/pci/pcifront.c 2009-03-18 10:39:31.000000000 +0100 ++++ b/arch/x86/pci/pcifront.c 2009-03-18 10:39:31.000000000 +0100 @@ -0,0 +1,57 @@ +/* + * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core @@ -19273,7 +19265,7 @@ for reference, prefixed with the version the removal occured): + +arch_initcall(pcifront_x86_stub_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/ia32/ia32entry-xen.S 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/ia32/ia32entry-xen.S 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,666 @@ +/* + * Compatibility mode system call entry point for x86-64. @@ -19942,7 +19934,7 @@ for reference, prefixed with the version the removal occured): + .quad compat_sys_move_pages +ia32_syscall_end: --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/acpi/sleep_64-xen.c 2008-04-15 09:29:41.000000000 +0200 ++++ b/arch/x86/kernel/acpi/sleep_64-xen.c 2008-04-15 09:29:41.000000000 +0200 @@ -0,0 +1,146 @@ +/* + * acpi.c - Architecture-Specific Low-Level ACPI Support @@ -20091,7 +20083,7 @@ for reference, prefixed with the version the removal occured): +{ +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/e820_64-xen.c 2009-12-04 08:45:56.000000000 +0100 ++++ b/arch/x86/kernel/e820_64-xen.c 2009-12-04 08:45:56.000000000 +0100 @@ -0,0 +1,800 @@ +/* + * Handle the memory map. @@ -20894,7 +20886,7 @@ for reference, prefixed with the version the removal occured): + pci_mem_start, gapstart, gapsize); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/early_printk-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ b/arch/x86/kernel/early_printk-xen.c 2007-06-12 13:13:01.000000000 +0200 @@ -0,0 +1,302 @@ +#include +#include @@ -21199,7 +21191,7 @@ for reference, prefixed with the version the removal occured): + +__setup("earlyprintk=", setup_early_printk); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/entry_64-xen.S 2009-06-23 09:28:21.000000000 +0200 ++++ b/arch/x86/kernel/entry_64-xen.S 2009-06-23 09:28:21.000000000 +0200 @@ -0,0 +1,1317 @@ +/* + * linux/arch/x86_64/entry.S @@ -22519,7 +22511,7 @@ for reference, prefixed with the version the removal occured): +ENDPROC(arch_unwind_init_running) +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/head_64-xen.S 2009-06-23 09:28:21.000000000 +0200 ++++ b/arch/x86/kernel/head_64-xen.S 2010-11-08 17:27:03.000000000 +0100 @@ -0,0 +1,211 @@ +/* + * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit @@ -22729,11 +22721,11 @@ for reference, prefixed with the version the removal occured): + ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64) + ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page) + ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT) -+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel") ++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel") + ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic") + ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/head64-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ b/arch/x86/kernel/head64-xen.c 2007-06-12 13:13:01.000000000 +0200 @@ -0,0 +1,162 @@ +/* + * linux/arch/x86_64/kernel/head64.c -- prepare to run common code @@ -22898,7 +22890,7 @@ for reference, prefixed with the version the removal occured): + start_kernel(); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/io_apic_64-xen.c 2009-03-18 10:39:31.000000000 +0100 ++++ b/arch/x86/kernel/io_apic_64-xen.c 2009-03-18 10:39:31.000000000 +0100 @@ -0,0 +1,2270 @@ +/* + * Intel IO-APIC support for multi-Pentium hosts. @@ -25171,7 +25163,7 @@ for reference, prefixed with the version the removal occured): +#endif +#endif /* !CONFIG_XEN */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/ioport_64-xen.c 2008-01-28 12:24:19.000000000 +0100 ++++ b/arch/x86/kernel/ioport_64-xen.c 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,100 @@ +/* + * linux/arch/x86_64/kernel/ioport.c @@ -25274,7 +25266,7 @@ for reference, prefixed with the version the removal occured): + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/ldt_64-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ b/arch/x86/kernel/ldt_64-xen.c 2007-06-12 13:13:01.000000000 +0200 @@ -0,0 +1,282 @@ +/* + * linux/arch/x86_64/kernel/ldt.c @@ -25559,7 +25551,7 @@ for reference, prefixed with the version the removal occured): + return ret; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/mpparse_64-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ b/arch/x86/kernel/mpparse_64-xen.c 2007-06-12 13:13:01.000000000 +0200 @@ -0,0 +1,1011 @@ +/* + * Intel Multiprocessor Specification 1.1 and 1.4 @@ -26573,7 +26565,7 @@ for reference, prefixed with the version the removal occured): +#endif /*CONFIG_X86_IO_APIC*/ +#endif /*CONFIG_ACPI*/ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/process_64-xen.c 2008-08-07 12:44:36.000000000 +0200 ++++ b/arch/x86/kernel/process_64-xen.c 2008-08-07 12:44:36.000000000 +0200 @@ -0,0 +1,848 @@ +/* + * linux/arch/x86-64/kernel/process.c @@ -27424,7 +27416,7 @@ for reference, prefixed with the version the removal occured): + return sp & ~0xf; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/setup_64-xen.c 2009-06-23 09:28:21.000000000 +0200 ++++ b/arch/x86/kernel/setup_64-xen.c 2010-10-05 09:58:12.000000000 +0200 @@ -0,0 +1,1656 @@ +/* + * linux/arch/x86-64/kernel/setup.c @@ -28474,7 +28466,7 @@ for reference, prefixed with the version the removal occured): + */ +static void __init amd_detect_cmp(struct cpuinfo_x86 *c) +{ -+#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + unsigned bits; +#ifdef CONFIG_NUMA + int cpu = smp_processor_id(); @@ -28588,7 +28580,7 @@ for reference, prefixed with the version the removal occured): + +static void __cpuinit detect_ht(struct cpuinfo_x86 *c) +{ -+#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + u32 eax, ebx, ecx, edx; + int index_msb, core_bits; + @@ -28773,7 +28765,7 @@ for reference, prefixed with the version the removal occured): + c->x86 = 4; + } + -+#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; +#endif +} @@ -28981,7 +28973,7 @@ for reference, prefixed with the version the removal occured): + if (c->x86_cache_size >= 0) + seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + -+#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + if (smp_num_siblings * c->x86_max_cores > 1) { + int cpu = c - cpu_data; + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); @@ -29083,7 +29075,7 @@ for reference, prefixed with the version the removal occured): +device_initcall(add_pcspkr); +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/setup64-xen.c 2008-01-28 12:24:19.000000000 +0100 ++++ b/arch/x86/kernel/setup64-xen.c 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,367 @@ +/* + * X86-64 specific CPU setup. @@ -29453,7 +29445,7 @@ for reference, prefixed with the version the removal occured): + raw_local_save_flags(kernel_eflags); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/smp_64-xen.c 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/kernel/smp_64-xen.c 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,575 @@ +/* + * Intel SMP support routines. @@ -30031,7 +30023,7 @@ for reference, prefixed with the version the removal occured): +#endif +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/traps_64-xen.c 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/kernel/traps_64-xen.c 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,1173 @@ +/* + * linux/arch/x86-64/traps.c @@ -31207,7 +31199,7 @@ for reference, prefixed with the version the removal occured): +__setup("call_trace=", call_trace_setup); +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/vsyscall_64-xen.c 2007-06-18 08:38:13.000000000 +0200 ++++ b/arch/x86/kernel/vsyscall_64-xen.c 2007-06-18 08:38:13.000000000 +0200 @@ -0,0 +1,227 @@ +/* + * linux/arch/x86_64/kernel/vsyscall.c @@ -31437,7 +31429,7 @@ for reference, prefixed with the version the removal occured): + +__initcall(vsyscall_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/xen_entry_64.S 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/kernel/xen_entry_64.S 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,36 @@ +/* + * Copied from arch/xen/i386/kernel/entry.S @@ -31476,8 +31468,8 @@ for reference, prefixed with the version the removal occured): + XEN_PUT_VCPU_INFO(reg) +#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/fault_64-xen.c 2007-11-02 17:34:23.000000000 +0100 -@@ -0,0 +1,724 @@ ++++ b/arch/x86/mm/fault_64-xen.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,730 @@ +/* + * linux/arch/x86-64/mm/fault.c + * @@ -32157,6 +32149,9 @@ for reference, prefixed with the version the removal occured): +DEFINE_SPINLOCK(pgd_lock); +struct page *pgd_list; + ++#define pgd_page_table(what, pg) \ ++ spin_##what(&((struct mm_struct *)(pg)->mapping)->page_table_lock) ++ +void vmalloc_sync_all(void) +{ + /* Note that races in the updates of insync and start aren't @@ -32179,10 +32174,13 @@ for reference, prefixed with the version the removal occured): + page = (struct page *)page->index) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ ++ pgd_page_table(lock, page); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref)); ++ pgd_page_table(unlock, page); + } + spin_unlock(&pgd_lock); + set_bit(pgd_index(address), insync); @@ -32203,7 +32201,7 @@ for reference, prefixed with the version the removal occured): +} +__setup("pagefaulttrace", enable_pagefaulttrace); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/init_64-xen.c 2010-04-29 09:34:47.000000000 +0200 ++++ b/arch/x86/mm/init_64-xen.c 2010-04-29 09:34:47.000000000 +0200 @@ -0,0 +1,1244 @@ +/* + * linux/arch/x86_64/mm/init.c @@ -33450,7 +33448,7 @@ for reference, prefixed with the version the removal occured): + return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/pageattr_64-xen.c 2009-03-18 10:39:31.000000000 +0100 ++++ b/arch/x86/mm/pageattr_64-xen.c 2009-03-18 10:39:31.000000000 +0100 @@ -0,0 +1,508 @@ +/* + * Copyright 2002 Andi Kleen, SuSE Labs. @@ -33961,7 +33959,7 @@ for reference, prefixed with the version the removal occured): +EXPORT_SYMBOL(change_page_attr); +EXPORT_SYMBOL(global_flush_tlb); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/pci/msi-xen.c 2009-12-04 08:45:56.000000000 +0100 ++++ b/drivers/pci/msi-xen.c 2009-12-04 08:45:56.000000000 +0100 @@ -0,0 +1,910 @@ +/* + * File: msi.c @@ -34874,7 +34872,7 @@ for reference, prefixed with the version the removal occured): +#endif + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/agp.h 2007-06-22 09:08:06.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/agp.h 2007-06-22 09:08:06.000000000 +0200 @@ -0,0 +1,44 @@ +#ifndef AGP_H +#define AGP_H 1 @@ -34921,7 +34919,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/desc_32.h 2008-01-28 12:24:19.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/desc_32.h 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,166 @@ +#ifndef __ARCH_DESC_H +#define __ARCH_DESC_H @@ -35090,7 +35088,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/fixmap_32.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,155 @@ +/* + * fixmap.h: compile-time virtual memory allocation @@ -35248,7 +35246,7 @@ for reference, prefixed with the version the removal occured): +#endif /* !__ASSEMBLY__ */ +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/gnttab_dma.h 2007-08-06 15:10:49.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/gnttab_dma.h 2007-08-06 15:10:49.000000000 +0200 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2007 Herbert Xu @@ -35292,7 +35290,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _ASM_I386_GNTTAB_DMA_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/highmem.h 2008-10-29 09:55:56.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/highmem.h 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,97 @@ +/* + * highmem.h: virtual kernel memory mappings for high memory @@ -35392,7 +35390,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _ASM_HIGHMEM_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/hypercall_32.h 2009-06-23 09:28:21.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/hypercall_32.h 2009-06-23 09:28:21.000000000 +0200 @@ -0,0 +1,415 @@ +/****************************************************************************** + * hypercall.h @@ -35810,7 +35808,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __HYPERCALL_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/hypervisor.h 2009-07-13 14:25:35.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/hypervisor.h 2009-07-13 14:25:35.000000000 +0200 @@ -0,0 +1,263 @@ +/****************************************************************************** + * hypervisor.h @@ -36076,7 +36074,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __HYPERVISOR_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/irqflags_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/irqflags_32.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,127 @@ +/* + * include/asm-i386/irqflags.h @@ -36206,7 +36204,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/maddr_32.h 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/maddr_32.h 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,193 @@ +#ifndef _I386_MADDR_H +#define _I386_MADDR_H @@ -36402,7 +36400,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _I386_MADDR_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/mmu_context_32.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,108 @@ +#ifndef __I386_SCHED_H +#define __I386_SCHED_H @@ -36513,7 +36511,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgalloc_32.h 2008-07-21 11:00:33.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/pgalloc_32.h 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,59 @@ +#ifndef _I386_PGALLOC_H +#define _I386_PGALLOC_H @@ -36575,7 +36573,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _I386_PGALLOC_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,24 @@ +#ifndef _I386_PGTABLE_3LEVEL_DEFS_H +#define _I386_PGTABLE_3LEVEL_DEFS_H @@ -36602,7 +36600,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable-3level.h 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/pgtable-3level.h 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,211 @@ +#ifndef _I386_PGTABLE_3LEVEL_H +#define _I386_PGTABLE_3LEVEL_H @@ -36816,7 +36814,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _I386_PGTABLE_3LEVEL_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_32.h 2009-03-18 10:39:32.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/pgtable_32.h 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,550 @@ +#ifndef _I386_PGTABLE_H +#define _I386_PGTABLE_H @@ -37369,7 +37367,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _I386_PGTABLE_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor_32.h 2008-01-28 12:24:19.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/processor_32.h 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,743 @@ +/* + * include/asm-i386/processor.h @@ -38115,7 +38113,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __ASM_I386_PROCESSOR_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/smp_32.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,103 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H @@ -38221,7 +38219,7 @@ for reference, prefixed with the version the removal occured): +#endif +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/synch_bitops.h 2008-04-02 12:34:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/synch_bitops.h 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,126 @@ +#ifndef __XEN_SYNCH_BITOPS_H__ +#define __XEN_SYNCH_BITOPS_H__ @@ -38350,7 +38348,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __XEN_SYNCH_BITOPS_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/system_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/system_32.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,488 @@ +#ifndef __ASM_SYSTEM_H +#define __ASM_SYSTEM_H @@ -38841,7 +38839,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/tlbflush_32.h 2007-11-26 16:59:25.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/tlbflush_32.h 2007-11-26 16:59:25.000000000 +0100 @@ -0,0 +1,101 @@ +#ifndef _I386_TLBFLUSH_H +#define _I386_TLBFLUSH_H @@ -38945,7 +38943,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _I386_TLBFLUSH_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/vga.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/vga.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,20 @@ +/* + * Access to VGA videoram @@ -38968,7 +38966,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/xenoprof.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/xenoprof.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,48 @@ +/****************************************************************************** + * asm-i386/mach-xen/asm/xenoprof.h @@ -39019,7 +39017,7 @@ for reference, prefixed with the version the removal occured): +#endif /* CONFIG_XEN */ +#endif /* __ASM_XENOPROF_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/irq_vectors.h 2008-09-25 13:55:32.000000000 +0200 ++++ b/arch/x86/include/mach-xen/irq_vectors.h 2008-09-25 13:55:32.000000000 +0200 @@ -0,0 +1,125 @@ +/* + * This file should contain #defines for all of the interrupt vector @@ -39147,7 +39145,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _ASM_IRQ_VECTORS_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mach_traps.h 2007-06-12 13:14:02.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/mach_traps.h 2007-06-12 13:14:02.000000000 +0200 @@ -0,0 +1,33 @@ +/* + * include/asm-xen/asm-i386/mach-xen/mach_traps.h @@ -39183,7 +39181,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* !_MACH_TRAPS_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/desc_64.h 2008-01-28 12:24:19.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/desc_64.h 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,265 @@ +/* Written 2000 by Andi Kleen */ +#ifndef __ARCH_DESC_H @@ -39451,7 +39449,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/fixmap_64.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,112 @@ +/* + * fixmap.h: compile-time virtual memory allocation @@ -39566,7 +39564,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/hypercall_64.h 2009-06-23 09:28:21.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/hypercall_64.h 2009-06-23 09:28:21.000000000 +0200 @@ -0,0 +1,422 @@ +/****************************************************************************** + * hypercall.h @@ -39991,7 +39989,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __HYPERCALL_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/irqflags_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/irqflags_64.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,139 @@ +/* + * include/asm-x86_64/irqflags.h @@ -40133,7 +40131,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/maddr_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/maddr_64.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,161 @@ +#ifndef _X86_64_MADDR_H +#define _X86_64_MADDR_H @@ -40297,7 +40295,7 @@ for reference, prefixed with the version the removal occured): +#endif /* _X86_64_MADDR_H */ + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/mmu_context_64.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,136 @@ +#ifndef __X86_64_MMU_CONTEXT_H +#define __X86_64_MMU_CONTEXT_H @@ -40436,8 +40434,8 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgalloc_64.h 2007-06-18 08:38:13.000000000 +0200 -@@ -0,0 +1,204 @@ ++++ b/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,210 @@ +#ifndef _X86_64_PGALLOC_H +#define _X86_64_PGALLOC_H + @@ -40535,10 +40533,13 @@ for reference, prefixed with the version the removal occured): + pte_free(virt_to_page(pud)); +} + -+static inline void pgd_list_add(pgd_t *pgd) ++static inline void pgd_list_add(pgd_t *pgd, void *mm) +{ + struct page *page = virt_to_page(pgd); + ++ /* Store a back link for vmalloc_sync_all(). */ ++ page->mapping = mm; ++ + spin_lock(&pgd_lock); + page->index = (pgoff_t)pgd_list; + if (pgd_list) @@ -40559,6 +40560,8 @@ for reference, prefixed with the version the removal occured): + if (next) + next->private = (unsigned long)pprev; + spin_unlock(&pgd_lock); ++ ++ page->mapping = NULL; +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) @@ -40567,22 +40570,22 @@ for reference, prefixed with the version the removal occured): + * We allocate two contiguous pages for kernel and user. + */ + unsigned boundary; -+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1); ++ pgd_t *pgd; ++ ++ pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 1); + if (!pgd) + return NULL; -+ pgd_list_add(pgd); ++ pgd_list_add(pgd, mm); + /* + * Copy kernel pointers in from init. + * Could keep a freelist or slab cache of those because the kernel + * part never changes. + */ + boundary = pgd_index(__PAGE_OFFSET); -+ memset(pgd, 0, boundary * sizeof(pgd_t)); + memcpy(pgd + boundary, + init_level4_pgt + boundary, + (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); + -+ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */ + /* + * Set level3_user_pgt for vsyscall area + */ @@ -40595,6 +40598,8 @@ for reference, prefixed with the version the removal occured): +{ + pte_t *ptep = virt_to_ptep(pgd); + ++ pgd_list_del(pgd); ++ + if (!pte_write(*ptep)) { + xen_pgd_unpin(__pa(pgd)); + BUG_ON(HYPERVISOR_update_va_mapping( @@ -40614,7 +40619,6 @@ for reference, prefixed with the version the removal occured): + 0)); + } + -+ pgd_list_del(pgd); + free_pages((unsigned long)pgd, 1); +} + @@ -40643,7 +40647,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _X86_64_PGALLOC_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_64.h 2009-06-23 09:28:21.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/pgtable_64.h 2009-06-23 09:28:21.000000000 +0200 @@ -0,0 +1,585 @@ +#ifndef _X86_64_PGTABLE_H +#define _X86_64_PGTABLE_H @@ -41231,7 +41235,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _X86_64_PGTABLE_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor_64.h 2008-03-06 08:54:32.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/processor_64.h 2008-03-06 08:54:32.000000000 +0100 @@ -0,0 +1,502 @@ +/* + * include/asm-x86_64/processor.h @@ -41736,7 +41740,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __ASM_X86_64_PROCESSOR_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/smp_64.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,150 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H @@ -41889,7 +41893,7 @@ for reference, prefixed with the version the removal occured): +#endif + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/system_64.h 2007-11-26 16:59:25.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/system_64.h 2007-11-26 16:59:25.000000000 +0100 @@ -0,0 +1,256 @@ +#ifndef __ASM_SYSTEM_H +#define __ASM_SYSTEM_H @@ -42148,7 +42152,7 @@ for reference, prefixed with the version the removal occured): + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/tlbflush_64.h 2007-11-26 16:59:25.000000000 +0100 ++++ b/arch/x86/include/mach-xen/asm/tlbflush_64.h 2007-11-26 16:59:25.000000000 +0100 @@ -0,0 +1,103 @@ +#ifndef _X8664_TLBFLUSH_H +#define _X8664_TLBFLUSH_H @@ -42254,7 +42258,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _X8664_TLBFLUSH_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/xor_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/asm/xor_64.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,328 @@ +/* + * x86-64 changes / gcc fixes from Andi Kleen. @@ -42585,7 +42589,7 @@ for reference, prefixed with the version the removal occured): + deals with a load to a line that is being prefetched. */ +#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/setup_arch_post.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/setup_arch_post.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,63 @@ +/** + * machine_specific_* - Hooks for machine specific setup. @@ -42651,7 +42655,7 @@ for reference, prefixed with the version the removal occured): +#endif +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/setup_arch_pre.h 2007-06-12 13:14:13.000000000 +0200 ++++ b/arch/x86/include/mach-xen/setup_arch_pre.h 2007-06-12 13:14:13.000000000 +0200 @@ -0,0 +1,5 @@ +/* Hook to call BIOS initialisation function */ + @@ -42659,7 +42663,7 @@ for reference, prefixed with the version the removal occured): + +static void __init machine_specific_arch_setup(void); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/balloon.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/balloon.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,57 @@ +/****************************************************************************** + * balloon.h @@ -42719,7 +42723,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __ASM_BALLOON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/blkif.h 2008-07-21 11:00:33.000000000 +0200 ++++ b/include/xen/blkif.h 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,123 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy @@ -42845,7 +42849,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __XEN_BLKIF_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/compat_ioctl.h 2010-01-18 15:23:12.000000000 +0100 ++++ b/include/xen/compat_ioctl.h 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,75 @@ +/* + * This program is free software; you can redistribute it and/or @@ -42923,7 +42927,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __LINUX_XEN_COMPAT_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/cpu_hotplug.h 2007-08-16 18:07:01.000000000 +0200 ++++ b/include/xen/cpu_hotplug.h 2007-08-16 18:07:01.000000000 +0200 @@ -0,0 +1,41 @@ +#ifndef __XEN_CPU_HOTPLUG_H__ +#define __XEN_CPU_HOTPLUG_H__ @@ -42967,7 +42971,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __XEN_CPU_HOTPLUG_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/driver_util.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/driver_util.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,14 @@ + +#ifndef __ASM_XEN_DRIVER_UTIL_H__ @@ -42984,7 +42988,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __ASM_XEN_DRIVER_UTIL_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/firmware.h 2007-07-02 08:16:19.000000000 +0200 ++++ b/include/xen/firmware.h 2007-07-02 08:16:19.000000000 +0200 @@ -0,0 +1,10 @@ +#ifndef __XEN_FIRMWARE_H__ +#define __XEN_FIRMWARE_H__ @@ -42997,8 +43001,8 @@ for reference, prefixed with the version the removal occured): + +#endif /* __XEN_FIRMWARE_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/gnttab.h 2008-11-04 11:13:10.000000000 +0100 -@@ -0,0 +1,164 @@ ++++ b/include/xen/gnttab.h 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,202 @@ +/****************************************************************************** + * gnttab.h + * @@ -43041,6 +43045,7 @@ for reference, prefixed with the version the removal occured): +#include +#include /* maddr_t */ +#include ++#include +#include +#include + @@ -43162,35 +43167,46 @@ for reference, prefixed with the version the removal occured): + unmap->handle = handle; +} + -+#endif /* __ASM_GNTTAB_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/hvm.h 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,23 @@ -+/* Simple wrappers around HVM functions */ -+#ifndef XEN_HVM_H__ -+#define XEN_HVM_H__ -+ -+#include -+ -+static inline unsigned long hvm_get_parameter(int idx) -+{ -+ struct xen_hvm_param xhv; -+ int r; -+ -+ xhv.domid = DOMID_SELF; -+ xhv.index = idx; -+ r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); -+ if (r < 0) { -+ printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", -+ idx, r); -+ return 0; -+ } -+ return xhv.value; ++#define gnttab_check_GNTST_eagain_while(__HCop, __HCarg_p) \ ++{ \ ++ u8 __hc_delay = 1; \ ++ int __ret; \ ++ while (unlikely((__HCarg_p)->status == GNTST_eagain && __hc_delay)) { \ ++ msleep(__hc_delay++); \ ++ __ret = HYPERVISOR_grant_table_op(__HCop, (__HCarg_p), 1); \ ++ BUG_ON(__ret); \ ++ } \ ++ if (__hc_delay == 0) { \ ++ printk(KERN_ERR "%s: %s gnt busy\n", __func__, current->comm); \ ++ (__HCarg_p)->status = GNTST_bad_page; \ ++ } \ ++ if ((__HCarg_p)->status != GNTST_okay) \ ++ printk(KERN_ERR "%s: %s gnt status %x\n", \ ++ __func__, current->comm, (__HCarg_p)->status); \ ++} ++ ++#define gnttab_check_GNTST_eagain_do_while(__HCop, __HCarg_p) \ ++{ \ ++ u8 __hc_delay = 1; \ ++ int __ret; \ ++ do { \ ++ __ret = HYPERVISOR_grant_table_op(__HCop, (__HCarg_p), 1); \ ++ BUG_ON(__ret); \ ++ if ((__HCarg_p)->status == GNTST_eagain) \ ++ msleep(__hc_delay++); \ ++ } while ((__HCarg_p)->status == GNTST_eagain && __hc_delay); \ ++ if (__hc_delay == 0) { \ ++ printk(KERN_ERR "%s: %s gnt busy\n", __func__, current->comm); \ ++ (__HCarg_p)->status = GNTST_bad_page; \ ++ } \ ++ if ((__HCarg_p)->status != GNTST_okay) \ ++ printk(KERN_ERR "%s: %s gnt status %x\n", \ ++ __func__, current->comm, (__HCarg_p)->status); \ +} + -+#endif /* XEN_HVM_H__ */ ++#endif /* __ASM_GNTTAB_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/hypercall.h 2008-01-28 12:24:19.000000000 +0100 ++++ b/include/xen/hypercall.h 2008-01-28 12:24:19.000000000 +0100 @@ -0,0 +1,30 @@ +#ifndef __XEN_HYPERCALL_H__ +#define __XEN_HYPERCALL_H__ @@ -43223,7 +43239,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __XEN_HYPERCALL_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/hypervisor_sysfs.h 2007-06-22 09:08:06.000000000 +0200 ++++ b/include/xen/hypervisor_sysfs.h 2007-06-22 09:08:06.000000000 +0200 @@ -0,0 +1,30 @@ +/* + * copyright (c) 2006 IBM Corporation @@ -43256,7 +43272,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* _HYP_SYSFS_H_ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/pcifront.h 2007-06-18 08:38:13.000000000 +0200 ++++ b/include/xen/pcifront.h 2007-06-18 08:38:13.000000000 +0200 @@ -0,0 +1,83 @@ +/* + * PCI Frontend - arch-dependendent declarations @@ -43342,7 +43358,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __XEN_ASM_PCIFRONT_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/public/evtchn.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/public/evtchn.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,88 @@ +/****************************************************************************** + * evtchn.h @@ -43433,7 +43449,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __LINUX_PUBLIC_EVTCHN_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/public/gntdev.h 2008-04-02 12:34:02.000000000 +0200 ++++ b/include/xen/public/gntdev.h 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,119 @@ +/****************************************************************************** + * gntdev.h @@ -43555,7 +43571,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __LINUX_PUBLIC_GNTDEV_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/public/privcmd.h 2010-01-18 15:23:12.000000000 +0100 ++++ b/include/xen/public/privcmd.h 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,89 @@ +/****************************************************************************** + * privcmd.h @@ -43647,7 +43663,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/public/xenbus.h 2009-05-29 10:25:53.000000000 +0200 ++++ b/include/xen/public/xenbus.h 2009-05-29 10:25:53.000000000 +0200 @@ -0,0 +1,56 @@ +/****************************************************************************** + * xenbus.h @@ -43706,7 +43722,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __LINUX_PUBLIC_XENBUS_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/xen_proc.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/xen_proc.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,12 @@ + +#ifndef __ASM_XEN_PROC_H__ @@ -43721,7 +43737,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __ASM_XEN_PROC_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/xencons.h 2007-10-15 09:39:38.000000000 +0200 ++++ b/include/xen/xencons.h 2007-10-15 09:39:38.000000000 +0200 @@ -0,0 +1,17 @@ +#ifndef __ASM_XENCONS_H__ +#define __ASM_XENCONS_H__ @@ -43741,7 +43757,7 @@ for reference, prefixed with the version the removal occured): + +#endif /* __ASM_XENCONS_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/include/xen/xenoprof.h 2007-06-12 13:14:19.000000000 +0200 ++++ b/include/xen/xenoprof.h 2007-06-12 13:14:19.000000000 +0200 @@ -0,0 +1,42 @@ +/****************************************************************************** + * xen/xenoprof.h @@ -43786,8 +43802,8 @@ for reference, prefixed with the version the removal occured): +#endif /* CONFIG_XEN */ +#endif /* __XEN_XENOPROF_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/lib/swiotlb-xen.c 2009-04-07 13:58:49.000000000 +0200 -@@ -0,0 +1,739 @@ ++++ b/lib/swiotlb-xen.c 2010-09-16 13:31:46.000000000 +0200 +@@ -0,0 +1,765 @@ +/* + * Dynamic DMA mapping support. + * @@ -43837,6 +43853,14 @@ for reference, prefixed with the version the removal occured): + */ +#define IO_TLB_SHIFT 11 + ++/* ++ * Enumeration for sync targets ++ */ ++enum dma_sync_target { ++ SYNC_FOR_CPU = 0, ++ SYNC_FOR_DEVICE = 1, ++}; ++ +int swiotlb_force; + +static char *iotlb_virt_start; @@ -44230,11 +44254,26 @@ for reference, prefixed with the version the removal occured): +} + +static void -+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir) ++sync_single(struct device *hwdev, char *dma_addr, size_t size, ++ int dir, int target) +{ + struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr); -+ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE)); -+ __sync_single(buffer, dma_addr, size, dir); ++ switch (target) { ++ case SYNC_FOR_CPU: ++ if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) ++ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); ++ else ++ BUG_ON(dir != DMA_TO_DEVICE); ++ break; ++ case SYNC_FOR_DEVICE: ++ if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) ++ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); ++ else ++ BUG_ON(dir != DMA_FROM_DEVICE); ++ break; ++ default: ++ BUG(); ++ } +} + +static void @@ -44329,22 +44368,27 @@ for reference, prefixed with the version the removal occured): + * address back to the card, you must first perform a + * swiotlb_dma_sync_for_device, and then the device again owns the buffer + */ ++static inline void ++swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, ++ size_t size, int dir, int target) ++{ ++ BUG_ON(dir == DMA_NONE); ++ if (in_swiotlb_aperture(dev_addr)) ++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir, target); ++} ++ +void +swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir) +{ -+ BUG_ON(dir == DMA_NONE); -+ if (in_swiotlb_aperture(dev_addr)) -+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir); ++ swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir) +{ -+ BUG_ON(dir == DMA_NONE); -+ if (in_swiotlb_aperture(dev_addr)) -+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir); ++ swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); +} + +/* @@ -44428,9 +44472,9 @@ for reference, prefixed with the version the removal occured): + * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules + * and usage. + */ -+void -+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, -+ int nelems, int dir) ++static inline void ++swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, ++ int nelems, int dir, int target) +{ + int i; + @@ -44438,24 +44482,22 @@ for reference, prefixed with the version the removal occured): + + for (i = 0; i < nelems; i++, sg++) + if (in_swiotlb_aperture(sg->dma_address)) -+ sync_single(hwdev, -+ (void *)bus_to_virt(sg->dma_address), -+ sg->dma_length, dir); ++ sync_single(hwdev, bus_to_virt(sg->dma_address), ++ sg->dma_length, dir, target); ++} ++ ++void ++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, ++ int nelems, int dir) ++{ ++ swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir) +{ -+ int i; -+ -+ BUG_ON(dir == DMA_NONE); -+ -+ for (i = 0; i < nelems; i++, sg++) -+ if (in_swiotlb_aperture(sg->dma_address)) -+ sync_single(hwdev, -+ (void *)bus_to_virt(sg->dma_address), -+ sg->dma_length, dir); ++ swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); +} + +#ifdef CONFIG_HIGHMEM @@ -44528,7 +44570,7 @@ for reference, prefixed with the version the removal occured): +EXPORT_SYMBOL(swiotlb_dma_mapping_error); +EXPORT_SYMBOL(swiotlb_dma_supported); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/mm/tmem-xen.c 2009-06-23 09:28:21.000000000 +0200 ++++ b/mm/tmem-xen.c 2009-06-23 09:28:21.000000000 +0200 @@ -0,0 +1,41 @@ +/* + * Xen implementation for transcendent memory (tmem) @@ -44572,7 +44614,7 @@ for reference, prefixed with the version the removal occured): + return rc; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/scripts/Makefile.xen.awk 2007-08-06 15:10:49.000000000 +0200 ++++ b/scripts/Makefile.xen.awk 2007-08-06 15:10:49.000000000 +0200 @@ -0,0 +1,34 @@ +BEGIN { + is_rule = 0 diff --git a/patches.xen/xen3-auto-xen-drivers.diff b/patches.xen/xen3-auto-xen-drivers.diff index bb0cdfe..7ba630d 100644 --- a/patches.xen/xen3-auto-xen-drivers.diff +++ b/patches.xen/xen3-auto-xen-drivers.diff @@ -1,5 +1,5 @@ Subject: xen3 xen-drivers -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com @@ -12,12 +12,12 @@ and in case upstream wants to take the forward porting patches: 2.6.31/drivers/xen/evtchn/evtchn.c --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/balloon/Makefile 2007-06-12 13:13:44.000000000 +0200 ++++ b/drivers/xen/balloon/Makefile 2007-06-12 13:13:44.000000000 +0200 @@ -0,0 +1,2 @@ + +obj-y := balloon.o sysfs.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/balloon/balloon.c 2010-03-31 09:56:02.000000000 +0200 ++++ b/drivers/xen/balloon/balloon.c 2010-03-31 09:56:02.000000000 +0200 @@ -0,0 +1,757 @@ +/****************************************************************************** + * balloon.c @@ -777,7 +777,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/balloon/common.h 2009-06-09 15:01:37.000000000 +0200 ++++ b/drivers/xen/balloon/common.h 2009-06-09 15:01:37.000000000 +0200 @@ -0,0 +1,56 @@ +/****************************************************************************** + * balloon/common.h @@ -836,7 +836,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __XEN_BALLOON_COMMON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/balloon/sysfs.c 2009-06-09 15:01:37.000000000 +0200 ++++ b/drivers/xen/balloon/sysfs.c 2009-06-09 15:01:37.000000000 +0200 @@ -0,0 +1,167 @@ +/****************************************************************************** + * balloon/sysfs.c @@ -1006,15 +1006,15 @@ and in case upstream wants to take the forward porting patches: + unregister_balloon(&balloon_sysdev); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/Makefile 2009-06-09 15:01:37.000000000 +0200 ++++ b/drivers/xen/blkback/Makefile 2009-06-09 15:01:37.000000000 +0200 @@ -0,0 +1,4 @@ +obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o +obj-$(CONFIG_XEN_BLKBACK_PAGEMAP) += blkback-pagemap.o + +blkbk-y := blkback.o xenbus.o interface.o vbd.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/blkback.c 2010-03-22 12:00:53.000000000 +0100 -@@ -0,0 +1,703 @@ ++++ b/drivers/xen/blkback/blkback.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,672 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/backend/main.c + * @@ -1124,7 +1124,7 @@ and in case upstream wants to take the forward porting patches: + + +static int do_block_io_op(blkif_t *blkif); -+static int dispatch_rw_block_io(blkif_t *blkif, ++static void dispatch_rw_block_io(blkif_t *blkif, + blkif_request_t *req, + pending_req_t *pending_req); +static void make_response(blkif_t *blkif, u64 id, @@ -1329,13 +1329,13 @@ and in case upstream wants to take the forward porting patches: + blkif_request_t req; + pending_req_t *pending_req; + RING_IDX rc, rp; -+ int more_to_do = 0, ret; ++ int more_to_do = 0; + + rc = blk_rings->common.req_cons; + rp = blk_rings->common.sring->req_prod; + rmb(); /* Ensure we see queued requests up to 'rp'. */ + -+ while ((rc != rp) || (blkif->is_suspended_req)) { ++ while ((rc != rp)) { + + if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) + break; @@ -1352,14 +1352,6 @@ and in case upstream wants to take the forward porting patches: + break; + } + -+ /* Handle the suspended request first, if one exists */ -+ if(blkif->is_suspended_req) -+ { -+ memcpy(&req, &blkif->suspended_req, sizeof(req)); -+ blkif->is_suspended_req = 0; -+ goto handle_request; -+ } -+ + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); @@ -1378,19 +1370,17 @@ and in case upstream wants to take the forward porting patches: + /* Apply all sanity checks to /private copy/ of request. */ + barrier(); + -+handle_request: -+ ret = 0; + switch (req.operation) { + case BLKIF_OP_READ: + blkif->st_rd_req++; -+ ret = dispatch_rw_block_io(blkif, &req, pending_req); ++ dispatch_rw_block_io(blkif, &req, pending_req); + break; + case BLKIF_OP_WRITE_BARRIER: + blkif->st_br_req++; + /* fall through */ + case BLKIF_OP_WRITE: + blkif->st_wr_req++; -+ ret = dispatch_rw_block_io(blkif, &req, pending_req); ++ dispatch_rw_block_io(blkif, &req, pending_req); + break; + default: + /* A good sign something is wrong: sleep for a while to @@ -1403,17 +1393,6 @@ and in case upstream wants to take the forward porting patches: + free_req(pending_req); + break; + } -+ BUG_ON(ret != 0 && ret != -EAGAIN); -+ /* If we can't handle the request at the moment, save it, and break the -+ * loop */ -+ if(ret == -EAGAIN) -+ { -+ memcpy(&blkif->suspended_req, &req, sizeof(req)); -+ blkif->is_suspended_req = 1; -+ /* Return "no more work pending", restart will be handled 'out of -+ * band' */ -+ return 0; -+ } + + /* Yield point for this unbounded loop. */ + cond_resched(); @@ -1422,7 +1401,7 @@ and in case upstream wants to take the forward porting patches: + return more_to_do; +} + -+static int dispatch_rw_block_io(blkif_t *blkif, ++static void dispatch_rw_block_io(blkif_t *blkif, + blkif_request_t *req, + pending_req_t *pending_req) +{ @@ -1491,15 +1470,13 @@ and in case upstream wants to take the forward porting patches: + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); + BUG_ON(ret); + -+#define GENERAL_ERR (1<<0) -+#define EAGAIN_ERR (1<<1) + for (i = 0; i < nseg; i++) { -+ if (unlikely(map[i].status != 0)) { ++ if (unlikely(map[i].status == GNTST_eagain)) ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &map[i]) ++ if (unlikely(map[i].status != GNTST_okay)) { + DPRINTK("invalid buffer -- could not remap it\n"); + map[i].handle = BLKBACK_INVALID_HANDLE; -+ ret |= GENERAL_ERR; -+ if(map[i].status == GNTST_eagain) -+ ret |= EAGAIN_ERR; ++ ret = 1; + } else { + blkback_pagemap_set(vaddr_pagenr(pending_req, i), + pending_page(pending_req, i), @@ -1519,14 +1496,6 @@ and in case upstream wants to take the forward porting patches: + (req->seg[i].first_sect << 9); + } + -+ /* If any of grant maps failed with GNTST_eagain, suspend and retry later */ -+ if(ret & EAGAIN_ERR) -+ { -+ fast_flush_area(pending_req); -+ free_req(pending_req); -+ return -EAGAIN; -+ } -+ + if (ret) + goto fail_flush; + @@ -1592,7 +1561,7 @@ and in case upstream wants to take the forward porting patches: + else if (operation == WRITE || operation == WRITE_BARRIER) + blkif->st_wr_sect += preq.nr_sects; + -+ return 0; ++ return; + + fail_flush: + fast_flush_area(pending_req); @@ -1600,7 +1569,7 @@ and in case upstream wants to take the forward porting patches: + make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); + free_req(pending_req); + msleep(1); /* back off a bit */ -+ return 0; ++ return; + + fail_put_bio: + __end_block_io_op(pending_req, -EINVAL); @@ -1608,7 +1577,7 @@ and in case upstream wants to take the forward porting patches: + bio_put(bio); + unplug_queue(blkif); + msleep(1); /* back off a bit */ -+ return 0; ++ return; +} + + @@ -1719,7 +1688,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/blkback-pagemap.c 2009-06-09 15:01:37.000000000 +0200 ++++ b/drivers/xen/blkback/blkback-pagemap.c 2009-06-09 15:01:37.000000000 +0200 @@ -0,0 +1,96 @@ +#include +#include "blkback-pagemap.h" @@ -1818,7 +1787,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/blkback-pagemap.h 2009-06-09 15:01:37.000000000 +0200 ++++ b/drivers/xen/blkback/blkback-pagemap.h 2009-06-09 15:01:37.000000000 +0200 @@ -0,0 +1,37 @@ +#ifndef _BLKBACK_PAGEMAP_H_ +#define _BLKBACK_PAGEMAP_H_ @@ -1858,8 +1827,8 @@ and in case upstream wants to take the forward porting patches: + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/common.h 2010-03-22 12:00:53.000000000 +0100 -@@ -0,0 +1,155 @@ ++++ b/drivers/xen/blkback/common.h 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,153 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 @@ -1945,8 +1914,6 @@ and in case upstream wants to take the forward porting patches: + struct task_struct *xenblkd; + unsigned int waiting_reqs; + request_queue_t *plug; -+ int is_suspended_req; -+ blkif_request_t suspended_req; + + /* statistics */ + unsigned long st_print; @@ -2016,8 +1983,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __BLKIF__BACKEND__COMMON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/interface.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,185 @@ ++++ b/drivers/xen/blkback/interface.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,183 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/backend/interface.c + * @@ -2079,25 +2046,23 @@ and in case upstream wants to take the forward porting patches: +static int map_frontend_page(blkif_t *blkif, unsigned long shared_page) +{ + struct gnttab_map_grant_ref op; ++ int ret; + + gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr, + GNTMAP_host_map, shared_page, blkif->domid); + -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(100); -+ } while(op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ if (op.status) { -+ DPRINTK(" Grant table operation failure !\n"); -+ return op.status; ++ if (op.status == GNTST_okay) { ++ blkif->shmem_ref = shared_page; ++ blkif->shmem_handle = op.handle; ++ ret = 0; ++ } else { ++ DPRINTK(" Grant table operation failure %d!\n", (int)op.status); ++ ret = -EINVAL; + } + -+ blkif->shmem_ref = shared_page; -+ blkif->shmem_handle = op.handle; -+ -+ return 0; ++ return ret; +} + +static void unmap_frontend_page(blkif_t *blkif) @@ -2204,7 +2169,7 @@ and in case upstream wants to take the forward porting patches: + 0, 0, NULL, NULL); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/vbd.c 2010-03-22 12:00:53.000000000 +0100 ++++ b/drivers/xen/blkback/vbd.c 2010-03-22 12:00:53.000000000 +0100 @@ -0,0 +1,161 @@ +/****************************************************************************** + * blkback/vbd.c @@ -2368,8 +2333,8 @@ and in case upstream wants to take the forward porting patches: + xenbus_transaction_end(xbt, 1); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkback/xenbus.c 2010-03-22 12:00:53.000000000 +0100 -@@ -0,0 +1,551 @@ ++++ b/drivers/xen/blkback/xenbus.c 2010-11-25 09:36:37.000000000 +0100 +@@ -0,0 +1,557 @@ +/* Xenbus code for blkif backend + Copyright (C) 2005 Rusty Russell + Copyright (C) 2005 XenSource Ltd @@ -2752,6 +2717,11 @@ and in case upstream wants to take the forward porting patches: + if (dev->state == XenbusStateConnected) + break; + ++ /* Enforce precondition before potential leak point. ++ * blkif_disconnect() is idempotent. ++ */ ++ blkif_disconnect(be->blkif); ++ + err = connect_ring(be); + if (err) + break; @@ -2769,6 +2739,7 @@ and in case upstream wants to take the forward porting patches: + break; + /* fall through if not online */ + case XenbusStateUnknown: ++ /* implies blkif_disconnect() via blkback_remove() */ + device_unregister(&dev->dev); + break; + @@ -2922,7 +2893,7 @@ and in case upstream wants to take the forward porting patches: + xenbus_register_backend(&blkback); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkfront/Makefile 2007-06-12 13:13:44.000000000 +0200 ++++ b/drivers/xen/blkfront/Makefile 2007-06-12 13:13:44.000000000 +0200 @@ -0,0 +1,5 @@ + +obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o @@ -2930,8 +2901,8 @@ and in case upstream wants to take the forward porting patches: +xenblk-objs := blkfront.o vbd.o + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkfront/blkfront.c 2010-03-22 12:00:53.000000000 +0100 -@@ -0,0 +1,957 @@ ++++ b/drivers/xen/blkfront/blkfront.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,967 @@ +/****************************************************************************** + * blkfront.c + * @@ -3466,6 +3437,7 @@ and in case upstream wants to take the forward porting patches: +int blkif_ioctl(struct inode *inode, struct file *filep, + unsigned command, unsigned long argument) +{ ++ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; + int i; + + DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", @@ -3501,14 +3473,23 @@ and in case upstream wants to take the forward porting patches: + return 0; + + case CDROM_GET_CAPABILITY: { -+ struct blkfront_info *info = -+ inode->i_bdev->bd_disk->private_data; + struct gendisk *gd = info->gd; + if (gd->flags & GENHD_FL_CD) + return 0; + return -EINVAL; + } + default: ++ if (info->mi && info->gd) { ++ switch (info->mi->major) { ++ case SCSI_DISK0_MAJOR: ++ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: ++ case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: ++ case SCSI_CDROM_MAJOR: ++ return scsi_cmd_ioctl(filep, info->gd, command, ++ (void __user *)argument); ++ } ++ } ++ + /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", + command);*/ + return -EINVAL; /* same return as native Linux */ @@ -3890,7 +3871,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkfront/block.h 2010-02-24 13:13:46.000000000 +0100 ++++ b/drivers/xen/blkfront/block.h 2010-02-24 13:13:46.000000000 +0100 @@ -0,0 +1,160 @@ +/****************************************************************************** + * block.h @@ -4053,7 +4034,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __XEN_DRIVERS_BLOCK_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blkfront/vbd.c 2010-01-18 15:23:12.000000000 +0100 ++++ b/drivers/xen/blkfront/vbd.c 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,553 @@ +/****************************************************************************** + * vbd.c @@ -4609,7 +4590,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* CONFIG_SYSFS */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap/Makefile 2007-06-12 13:13:44.000000000 +0200 ++++ b/drivers/xen/blktap/Makefile 2007-06-12 13:13:44.000000000 +0200 @@ -0,0 +1,5 @@ +LINUXINCLUDE += -I../xen/include/public/io + @@ -4617,8 +4598,8 @@ and in case upstream wants to take the forward porting patches: + +xenblktap-y := xenbus.o interface.o blktap.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap/blktap.c 2010-04-29 09:34:47.000000000 +0200 -@@ -0,0 +1,1779 @@ ++++ b/drivers/xen/blktap/blktap.c 2011-02-17 09:58:10.000000000 +0100 +@@ -0,0 +1,1761 @@ +/****************************************************************************** + * drivers/xen/blktap/blktap.c + * @@ -4699,7 +4680,6 @@ and in case upstream wants to take the forward porting patches: + (_start + \ + ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ + ((_seg) * PAGE_SIZE)) -+static int blkif_reqs = MAX_PENDING_REQS; +static int mmap_pages = MMAP_PAGES; + +#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we @@ -4733,7 +4713,9 @@ and in case upstream wants to take the forward porting patches: + pid_t pid; /*tapdisk process id */ + enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace + shutdown */ -+ unsigned long *idx_map; /*Record the user ring id to kern ++ struct idx_map { ++ u16 mem, req; ++ } *idx_map; /*Record the user ring id to kern + [req id, idx] tuple */ + blkif_t *blkif; /*Associate blkif with tapdev */ + struct domid_translate_ext trans; /*Translation from domid to bus. */ @@ -4743,7 +4725,6 @@ and in case upstream wants to take the forward porting patches: +static struct tap_blkif *tapfds[MAX_TAP_DEV]; +static int blktap_next_minor; + -+module_param(blkif_reqs, int, 0); +/* Run-time switchable: /sys/module/blktap/parameters/ */ +static unsigned int log_stats = 0; +static unsigned int debug_lvl = 0; @@ -4774,12 +4755,6 @@ and in case upstream wants to take the forward porting patches: +static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq); +static int alloc_pending_reqs; + -+typedef unsigned int PEND_RING_IDX; -+ -+static inline int MASK_PEND_IDX(int i) { -+ return (i & (MAX_PENDING_REQS-1)); -+} -+ +static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) { + return (req - pending_reqs[idx]); +} @@ -4869,40 +4844,26 @@ and in case upstream wants to take the forward porting patches: + * ring ID. + */ + -+static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx) -+{ -+ return ((fe_dom << 16) | MASK_PEND_IDX(idx)); -+} -+ -+extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) -+{ -+ return (PEND_RING_IDX)(id & 0x0000ffff); -+} -+ -+extern inline int ID_TO_MIDX(unsigned long id) -+{ -+ return (int)(id >> 16); -+} -+ -+#define INVALID_REQ 0xdead0000 ++#define INVALID_MIDX 0xdead + +/*TODO: Convert to a free list*/ -+static inline int GET_NEXT_REQ(unsigned long *idx_map) ++static inline unsigned int GET_NEXT_REQ(const struct idx_map *idx_map) +{ -+ int i; ++ unsigned int i; ++ + for (i = 0; i < MAX_PENDING_REQS; i++) -+ if (idx_map[i] == INVALID_REQ) -+ return i; ++ if (idx_map[i].mem == INVALID_MIDX) ++ break; + -+ return INVALID_REQ; ++ return i; +} + -+static inline int OFFSET_TO_USR_IDX(int offset) ++static inline unsigned int OFFSET_TO_USR_IDX(unsigned long offset) +{ + return offset / BLKIF_MAX_SEGMENTS_PER_REQUEST; +} + -+static inline int OFFSET_TO_SEG(int offset) ++static inline unsigned int OFFSET_TO_SEG(unsigned long offset) +{ + return offset % BLKIF_MAX_SEGMENTS_PER_REQUEST; +} @@ -4939,13 +4900,11 @@ and in case upstream wants to take the forward porting patches: +{ + pte_t copy; + tap_blkif_t *info = NULL; -+ int offset, seg, usr_idx, pending_idx, mmap_idx; -+ unsigned long uvstart = 0; -+ unsigned long kvaddr; ++ unsigned int seg, usr_idx, pending_idx, mmap_idx, count = 0; ++ unsigned long offset, uvstart = 0; + struct page *pg; + struct grant_handle_pair *khandle; + struct gnttab_unmap_grant_ref unmap[2]; -+ int count = 0; + + /* + * If the address is before the start of the grant mapped region or @@ -4963,14 +4922,13 @@ and in case upstream wants to take the forward porting patches: + BUG_ON(!info); + BUG_ON(!info->idx_map); + -+ offset = (int) ((uvaddr - uvstart) >> PAGE_SHIFT); ++ offset = (uvaddr - uvstart) >> PAGE_SHIFT; + usr_idx = OFFSET_TO_USR_IDX(offset); + seg = OFFSET_TO_SEG(offset); + -+ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx])); -+ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]); ++ pending_idx = info->idx_map[usr_idx].req; ++ mmap_idx = info->idx_map[usr_idx].mem; + -+ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg); + pg = idx_to_page(mmap_idx, pending_idx, seg); + ClearPageReserved(pg); + info->foreign_map.map[offset + RING_PAGES] = NULL; @@ -4978,12 +4936,14 @@ and in case upstream wants to take the forward porting patches: + khandle = &pending_handle(mmap_idx, pending_idx, seg); + + if (khandle->kernel != INVALID_GRANT_HANDLE) { -+ gnttab_set_unmap_op(&unmap[count], kvaddr, ++ unsigned long pfn = page_to_pfn(pg); ++ ++ gnttab_set_unmap_op(&unmap[count], ++ (unsigned long)pfn_to_kaddr(pfn), + GNTMAP_host_map, khandle->kernel); + count++; + -+ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, -+ INVALID_P2M_ENTRY); ++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } + + if (khandle->user != INVALID_GRANT_HANDLE) { @@ -5242,7 +5202,7 @@ and in case upstream wants to take the forward porting patches: + filp->private_data = info; + info->mm = NULL; + -+ info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS, ++ info->idx_map = kmalloc(sizeof(*info->idx_map) * MAX_PENDING_REQS, + GFP_KERNEL); + + if (info->idx_map == NULL) @@ -5250,8 +5210,10 @@ and in case upstream wants to take the forward porting patches: + + if (idx > 0) { + init_waitqueue_head(&info->wait); -+ for (i = 0; i < MAX_PENDING_REQS; i++) -+ info->idx_map[i] = INVALID_REQ; ++ for (i = 0; i < MAX_PENDING_REQS; i++) { ++ info->idx_map[i].mem = INVALID_MIDX; ++ info->idx_map[i].req = ~0; ++ } + } + + DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx); @@ -5508,11 +5470,9 @@ and in case upstream wants to take the forward porting patches: + return blktap_major; + + case BLKTAP_QUERY_ALLOC_REQS: -+ { -+ WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n", -+ alloc_pending_reqs, blkif_reqs); -+ return (alloc_pending_reqs/blkif_reqs) * 100; -+ } ++ WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%lu\n", ++ alloc_pending_reqs, MAX_PENDING_REQS); ++ return (alloc_pending_reqs/MAX_PENDING_REQS) * 100; + } + return -ENOIOCTLCMD; +} @@ -5567,14 +5527,14 @@ and in case upstream wants to take the forward porting patches: + return -EINVAL; + + pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t) -+ * blkif_reqs, GFP_KERNEL); ++ * MAX_PENDING_REQS, GFP_KERNEL); + foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages); + + if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc]) + goto out_of_memory; + -+ DPRINTK("%s: reqs=%d, pages=%d\n", -+ __FUNCTION__, blkif_reqs, mmap_pages); ++ DPRINTK("%s: reqs=%lu, pages=%d\n", ++ __FUNCTION__, MAX_PENDING_REQS, mmap_pages); + + for (i = 0; i < MAX_PENDING_REQS; i++) { + list_add_tail(&pending_reqs[mmap_alloc][i].free_list, @@ -5674,14 +5634,14 @@ and in case upstream wants to take the forward porting patches: + } +} + -+static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx, -+ int tapidx) ++static void fast_flush_area(pending_req_t *req, unsigned int k_idx, ++ unsigned int u_idx, int tapidx) +{ + struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; -+ unsigned int i, invcount = 0, locked = 0; ++ unsigned int i, mmap_idx, invcount = 0, locked = 0; + struct grant_handle_pair *khandle; + uint64_t ptep; -+ int ret, mmap_idx; ++ int ret; + unsigned long uvaddr; + tap_blkif_t *info; + struct mm_struct *mm; @@ -5836,7 +5796,7 @@ and in case upstream wants to take the forward porting patches: + RING_IDX i, j, rp; + blkif_response_t *resp; + blkif_t *blkif=NULL; -+ int pending_idx, usr_idx, mmap_idx; ++ unsigned int pending_idx, usr_idx, mmap_idx; + pending_req_t *pending_req; + + if (!info) @@ -5858,18 +5818,23 @@ and in case upstream wants to take the forward porting patches: + ++info->ufe_ring.rsp_cons; + + /*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/ -+ usr_idx = (int)res.id; -+ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx])); -+ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]); -+ -+ if ( (mmap_idx >= mmap_alloc) || -+ (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) ) -+ WPRINTK("Incorrect req map" -+ "[%d], internal map [%d,%d (%d)]\n", -+ usr_idx, mmap_idx, -+ ID_TO_IDX(info->idx_map[usr_idx]), -+ MASK_PEND_IDX( -+ ID_TO_IDX(info->idx_map[usr_idx]))); ++ if (res.id >= MAX_PENDING_REQS) { ++ WPRINTK("incorrect req map [%llx]\n", ++ (unsigned long long)res.id); ++ continue; ++ } ++ ++ usr_idx = (unsigned int)res.id; ++ pending_idx = info->idx_map[usr_idx].req; ++ mmap_idx = info->idx_map[usr_idx].mem; ++ ++ if (mmap_idx >= mmap_alloc || ++ pending_idx >= MAX_PENDING_REQS) { ++ WPRINTK("incorrect req map [%d]," ++ " internal map [%d,%d]\n", ++ usr_idx, mmap_idx, pending_idx); ++ continue; ++ } + + pending_req = &pending_reqs[mmap_idx][pending_idx]; + blkif = pending_req->blkif; @@ -5888,7 +5853,7 @@ and in case upstream wants to take the forward porting patches: + info->foreign_map.map[offset] = NULL; + } + fast_flush_area(pending_req, pending_idx, usr_idx, info->minor); -+ info->idx_map[usr_idx] = INVALID_REQ; ++ info->idx_map[usr_idx].mem = INVALID_MIDX; + make_response(blkif, pending_req->id, res.operation, + res.status); + blkif_put(pending_req->blkif); @@ -6044,9 +6009,9 @@ and in case upstream wants to take the forward porting patches: + int ret, i, nr_sects = 0; + tap_blkif_t *info; + blkif_request_t *target; -+ int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx); -+ int usr_idx; -+ uint16_t mmap_idx = pending_req->mem_idx; ++ unsigned int mmap_idx = pending_req->mem_idx; ++ unsigned int pending_idx = RTN_PEND_IDX(pending_req, mmap_idx); ++ unsigned int usr_idx; + struct mm_struct *mm; + struct vm_area_struct *vma = NULL; + @@ -6059,8 +6024,8 @@ and in case upstream wants to take the forward porting patches: + + /* Check we have space on user ring - should never fail. */ + usr_idx = GET_NEXT_REQ(info->idx_map); -+ if (usr_idx == INVALID_REQ) { -+ BUG(); ++ if (usr_idx >= MAX_PENDING_REQS) { ++ WARN_ON(1); + goto fail_response; + } + @@ -6147,19 +6112,17 @@ and in case upstream wants to take the forward porting patches: + + uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2); + -+ if (unlikely(map[i].status != 0)) { -+ WPRINTK("invalid kernel buffer -- " -+ "could not remap it\n"); -+ if(map[i].status == GNTST_eagain) -+ WPRINTK("grant GNTST_eagain: please use blktap2\n"); -+ ret |= 1; ++ gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]); ++ ++ if (unlikely(map[i].status != GNTST_okay)) { ++ WPRINTK("invalid kernel buffer -- could not remap it\n"); ++ ret = 1; + map[i].handle = INVALID_GRANT_HANDLE; + } + -+ if (unlikely(map[i+1].status != 0)) { -+ WPRINTK("invalid user buffer -- " -+ "could not remap it\n"); -+ ret |= 1; ++ if (unlikely(map[i+1].status != GNTST_okay)) { ++ WPRINTK("invalid kernel buffer -- could not remap it\n"); ++ ret = 1; + map[i+1].handle = INVALID_GRANT_HANDLE; + } + @@ -6186,12 +6149,11 @@ and in case upstream wants to take the forward porting patches: + + uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i); + -+ if (unlikely(map[i].status != 0)) { -+ WPRINTK("invalid kernel buffer -- " -+ "could not remap it\n"); -+ if(map[i].status == GNTST_eagain) -+ WPRINTK("grant GNTST_eagain: please use blktap2\n"); -+ ret |= 1; ++ gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]); ++ ++ if (unlikely(map[i].status != GNTST_okay)) { ++ WPRINTK("invalid kernel buffer -- could not remap it\n"); ++ ret = 1; + map[i].handle = INVALID_GRANT_HANDLE; + } + @@ -6246,7 +6208,8 @@ and in case upstream wants to take the forward porting patches: + up_write(&mm->mmap_sem); + + /*record [mmap_idx,pending_idx] to [usr_idx] mapping*/ -+ info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx); ++ info->idx_map[usr_idx].mem = mmap_idx; ++ info->idx_map[usr_idx].req = pending_idx; + + blkif_get(blkif); + /* Finally, write the request message to the user ring. */ @@ -6399,7 +6362,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap/common.h 2008-09-15 13:40:15.000000000 +0200 ++++ b/drivers/xen/blktap/common.h 2008-09-15 13:40:15.000000000 +0200 @@ -0,0 +1,122 @@ +/* + * This program is free software; you can redistribute it and/or @@ -6524,8 +6487,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __BLKIF__BACKEND__COMMON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap/interface.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,185 @@ ++++ b/drivers/xen/blktap/interface.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,183 @@ +/****************************************************************************** + * drivers/xen/blktap/interface.c + * @@ -6587,25 +6550,23 @@ and in case upstream wants to take the forward porting patches: +static int map_frontend_page(blkif_t *blkif, unsigned long shared_page) +{ + struct gnttab_map_grant_ref op; ++ int ret; + + gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr, + GNTMAP_host_map, shared_page, blkif->domid); + -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while(op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ if (op.status) { -+ DPRINTK(" Grant table operation failure !\n"); -+ return op.status; ++ if (op.status == GNTST_okay) { ++ blkif->shmem_ref = shared_page; ++ blkif->shmem_handle = op.handle; ++ ret = 0; ++ } else { ++ DPRINTK("Grant table operation failure %d!\n", (int)op.status); ++ ret = -EINVAL; + } + -+ blkif->shmem_ref = shared_page; -+ blkif->shmem_handle = op.handle; -+ -+ return 0; ++ return ret; +} + +static void unmap_frontend_page(blkif_t *blkif) @@ -6712,8 +6673,8 @@ and in case upstream wants to take the forward porting patches: + 0, 0, NULL, NULL); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap/xenbus.c 2010-04-29 09:34:47.000000000 +0200 -@@ -0,0 +1,492 @@ ++++ b/drivers/xen/blktap/xenbus.c 2010-11-25 09:36:37.000000000 +0100 +@@ -0,0 +1,508 @@ +/* drivers/xen/blktap/xenbus.c + * + * Xenbus code for blktap @@ -7055,6 +7016,18 @@ and in case upstream wants to take the forward porting patches: + tap_update_blkif_status(be->blkif); +} + ++ ++static void blkif_disconnect(blkif_t *blkif) ++{ ++ if (blkif->xenblkd) { ++ kthread_stop(blkif->xenblkd); ++ blkif->xenblkd = NULL; ++ } ++ ++ /* idempotent */ ++ tap_blkif_free(blkif); ++} ++ +/** + * Callback received when the frontend's state changes. + */ @@ -7083,6 +7056,11 @@ and in case upstream wants to take the forward porting patches: + if (dev->state == XenbusStateConnected) + break; + ++ /* Enforce precondition before potential leak point. ++ * blkif_disconnect() is idempotent. ++ */ ++ blkif_disconnect(be->blkif); ++ + err = connect_ring(be); + if (err) + break; @@ -7090,11 +7068,7 @@ and in case upstream wants to take the forward porting patches: + break; + + case XenbusStateClosing: -+ if (be->blkif->xenblkd) { -+ kthread_stop(be->blkif->xenblkd); -+ be->blkif->xenblkd = NULL; -+ } -+ tap_blkif_free(be->blkif); ++ blkif_disconnect(be->blkif); + xenbus_switch_state(dev, XenbusStateClosing); + break; + @@ -7104,6 +7078,9 @@ and in case upstream wants to take the forward porting patches: + break; + /* fall through if not online */ + case XenbusStateUnknown: ++ /* Implies the effects of blkif_disconnect() via ++ * blktap_remove(). ++ */ + device_unregister(&dev->dev); + break; + @@ -7207,13 +7184,13 @@ and in case upstream wants to take the forward porting patches: + xenbus_register_backend(&blktap); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/Makefile 2009-05-29 10:25:53.000000000 +0200 ++++ b/drivers/xen/blktap2/Makefile 2009-05-29 10:25:53.000000000 +0200 @@ -0,0 +1,3 @@ +obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap.o + +blktap-objs := control.o ring.o wait_queue.o device.o request.o sysfs.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/blktap.h 2010-02-24 13:13:46.000000000 +0100 ++++ b/drivers/xen/blktap2/blktap.h 2010-02-24 13:13:46.000000000 +0100 @@ -0,0 +1,254 @@ +#ifndef _BLKTAP_H_ +#define _BLKTAP_H_ @@ -7470,7 +7447,7 @@ and in case upstream wants to take the forward porting patches: + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/control.c 2010-04-29 09:34:47.000000000 +0200 ++++ b/drivers/xen/blktap2/control.c 2010-04-29 09:34:47.000000000 +0200 @@ -0,0 +1,277 @@ +#include +#include @@ -7750,8 +7727,8 @@ and in case upstream wants to take the forward porting patches: +module_exit(blktap_exit); +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/device.c 2010-03-02 09:56:10.000000000 +0100 -@@ -0,0 +1,1194 @@ ++++ b/drivers/xen/blktap2/device.c 2010-11-25 09:36:37.000000000 +0100 +@@ -0,0 +1,1191 @@ +#include +#include +#include @@ -8035,7 +8012,6 @@ and in case upstream wants to take the forward porting patches: + + page = map[offset]; + if (page) { -+ ClearPageReserved(map[offset]); + if (PageBlkback(page)) { + ClearPageBlkback(page); + set_page_private(page, 0); @@ -8260,10 +8236,10 @@ and in case upstream wants to take the forward porting patches: + + uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i); + -+ if (unlikely(table->grants[grant].status)) { ++ if (unlikely(table->grants[grant].status != GNTST_okay)) { + BTERR("invalid kernel buffer: could not remap it\n"); -+ /* This should never happen: blkback should handle eagain first */ -+ BUG_ON(table->grants[grant].status == GNTST_eagain); ++ /* This should never happen: blkback should handle eagain first */ ++ BUG_ON(table->grants[grant].status == GNTST_eagain); + err |= 1; + table->grants[grant].handle = INVALID_GRANT_HANDLE; + } @@ -8272,19 +8248,18 @@ and in case upstream wants to take the forward porting patches: + foreign_mfn = table->grants[grant].dev_bus_addr >> PAGE_SHIFT; + grant++; + -+ if (xen_feature(XENFEAT_auto_translated_physmap)) -+ goto done; -+ -+ if (unlikely(table->grants[grant].status)) { -+ BTERR("invalid user buffer: could not remap it\n"); -+ err |= 1; -+ table->grants[grant].handle = INVALID_GRANT_HANDLE; ++ if (!xen_feature(XENFEAT_auto_translated_physmap)) { ++ if (unlikely(table->grants[grant].status != GNTST_okay)) { ++ /* This should never happen: blkback should handle eagain first */ ++ WARN_ON(table->grants[grant].status == GNTST_eagain); ++ BTERR("invalid user buffer: could not remap it\n"); ++ err |= 1; ++ table->grants[grant].handle = INVALID_GRANT_HANDLE; ++ } ++ request->handles[i].user = table->grants[grant].handle; ++ grant++; + } + -+ request->handles[i].user = table->grants[grant].handle; -+ grant++; -+ -+ done: + if (err) + continue; + @@ -8357,11 +8332,10 @@ and in case upstream wants to take the forward porting patches: + set_page_private(tap_page, page_private(page)); + SetPageBlkback(tap_page); + -+ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, -+ &map, 1); -+ BUG_ON(err); -+ /* We are not expecting the grant op to fail */ -+ BUG_ON(map.status != GNTST_okay); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &map); ++ ++ /* We are not expecting the grant op to fail */ ++ BUG_ON(map.status != GNTST_okay); + + err = vm_insert_page(ring->vma, uvaddr, tap_page); + if (err) { @@ -8645,7 +8619,7 @@ and in case upstream wants to take the forward porting patches: + blkdev_dequeue_request(req); + + spin_unlock_irq(&dev->lock); -+ down_read(&tap->tap_sem); ++ down_write(&tap->tap_sem); + + err = blktap_device_process_request(tap, request, req); + if (!err) @@ -8655,7 +8629,7 @@ and in case upstream wants to take the forward porting patches: + blktap_request_free(tap, request); + } + -+ up_read(&tap->tap_sem); ++ up_write(&tap->tap_sem); + spin_lock_irq(&dev->lock); + } + @@ -8947,7 +8921,7 @@ and in case upstream wants to take the forward porting patches: + BTERR("blktap device unregister failed\n"); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/request.c 2010-01-04 11:56:34.000000000 +0100 ++++ b/drivers/xen/blktap2/request.c 2010-01-04 11:56:34.000000000 +0100 @@ -0,0 +1,296 @@ +#include +#include @@ -9246,8 +9220,8 @@ and in case upstream wants to take the forward porting patches: + return err; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/ring.c 2010-04-29 09:34:47.000000000 +0200 -@@ -0,0 +1,613 @@ ++++ b/drivers/xen/blktap2/ring.c 2010-08-31 09:24:21.000000000 +0200 +@@ -0,0 +1,610 @@ +#include +#include + @@ -9362,12 +9336,9 @@ and in case upstream wants to take the forward porting patches: + + offset = (int)((uvaddr - vma->vm_start) >> PAGE_SHIFT); + page = map[offset]; -+ if (page) { -+ ClearPageReserved(page); -+ if (PageBlkback(page)) { -+ ClearPageBlkback(page); -+ set_page_private(page, 0); -+ } ++ if (page && PageBlkback(page)) { ++ ClearPageBlkback(page); ++ set_page_private(page, 0); + } + map[offset] = NULL; + @@ -9613,7 +9584,7 @@ and in case upstream wants to take the forward porting patches: + + down_read(&tap->tap_sem); + if (ring->ring.sring) -+ ring->ring.sring->pad[0] = msg; ++ ring->ring.sring->private.tapif_user.msg = msg; + up_read(&tap->tap_sem); +} + @@ -9723,7 +9694,7 @@ and in case upstream wants to take the forward porting patches: + struct blktap_ring *ring = &tap->ring; + + poll_wait(filp, &ring->poll_wait, wait); -+ if (ring->ring.sring->pad[0] != 0 || ++ if (ring->ring.sring->private.tapif_user.msg || + ring->ring.req_prod_pvt != ring->ring.sring->req_prod) { + RING_PUSH_REQUESTS(&ring->ring); + return POLLIN | POLLRDNORM; @@ -9862,7 +9833,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/sysfs.c 2009-12-16 11:43:21.000000000 +0100 ++++ b/drivers/xen/blktap2/sysfs.c 2011-03-02 12:00:16.000000000 +0100 @@ -0,0 +1,425 @@ +#include +#include @@ -9903,9 +9874,9 @@ and in case upstream wants to take the forward porting patches: +} + +static ssize_t blktap_sysfs_pause_device(struct class_device *, const char *, size_t); -+CLASS_DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); ++static CLASS_DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); +static ssize_t blktap_sysfs_resume_device(struct class_device *, const char *, size_t); -+CLASS_DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); ++static CLASS_DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); + +static ssize_t +blktap_sysfs_set_name(struct class_device *dev, const char *buf, size_t size) @@ -9963,8 +9934,8 @@ and in case upstream wants to take the forward porting patches: + + return size; +} -+CLASS_DEVICE_ATTR(name, S_IRUSR | S_IWUSR, -+ blktap_sysfs_get_name, blktap_sysfs_set_name); ++static CLASS_DEVICE_ATTR(name, S_IRUSR | S_IWUSR, ++ blktap_sysfs_get_name, blktap_sysfs_set_name); + +static ssize_t +blktap_sysfs_remove_device(struct class_device *dev, @@ -9983,7 +9954,7 @@ and in case upstream wants to take the forward porting patches: + + return (err ? : size); +} -+CLASS_DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); ++static CLASS_DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); + +static ssize_t +blktap_sysfs_pause_device(struct class_device *dev, @@ -10151,7 +10122,7 @@ and in case upstream wants to take the forward porting patches: + + return ret; +} -+CLASS_DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); ++static CLASS_DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); + +int +blktap_sysfs_create(struct blktap *tap) @@ -10231,8 +10202,8 @@ and in case upstream wants to take the forward porting patches: + + return -EINVAL; +} -+CLASS_ATTR(verbosity, S_IRUSR | S_IWUSR, -+ blktap_sysfs_show_verbosity, blktap_sysfs_set_verbosity); ++static CLASS_ATTR(verbosity, S_IRUSR | S_IWUSR, ++ blktap_sysfs_show_verbosity, blktap_sysfs_set_verbosity); + +static ssize_t +blktap_sysfs_show_devices(struct class *class, char *buf) @@ -10257,7 +10228,7 @@ and in case upstream wants to take the forward porting patches: + + return ret; +} -+CLASS_ATTR(devices, S_IRUSR, blktap_sysfs_show_devices, NULL); ++static CLASS_ATTR(devices, S_IRUSR, blktap_sysfs_show_devices, NULL); + +void +blktap_sysfs_free(void) @@ -10290,7 +10261,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/blktap2/wait_queue.c 2009-05-29 10:25:53.000000000 +0200 ++++ b/drivers/xen/blktap2/wait_queue.c 2009-05-29 10:25:53.000000000 +0200 @@ -0,0 +1,40 @@ +#include +#include @@ -10333,11 +10304,11 @@ and in case upstream wants to take the forward porting patches: + spin_unlock_irqrestore(&deferred_work_lock, flags); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/char/Makefile 2007-07-10 09:42:30.000000000 +0200 ++++ b/drivers/xen/char/Makefile 2007-07-10 09:42:30.000000000 +0200 @@ -0,0 +1 @@ +obj-$(CONFIG_XEN_DEVMEM) := mem.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/char/mem.c 2007-08-06 15:10:49.000000000 +0200 ++++ b/drivers/xen/char/mem.c 2007-08-06 15:10:49.000000000 +0200 @@ -0,0 +1,190 @@ +/* + * Originally from linux/drivers/char/mem.c @@ -10530,12 +10501,12 @@ and in case upstream wants to take the forward porting patches: + .open = open_mem, +}; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/console/Makefile 2007-06-12 13:13:44.000000000 +0200 ++++ b/drivers/xen/console/Makefile 2007-06-12 13:13:44.000000000 +0200 @@ -0,0 +1,2 @@ + +obj-y := console.o xencons_ring.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/console/console.c 2009-03-18 10:39:31.000000000 +0100 ++++ b/drivers/xen/console/console.c 2009-03-18 10:39:31.000000000 +0100 @@ -0,0 +1,753 @@ +/****************************************************************************** + * console.c @@ -11291,7 +11262,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/console/xencons_ring.c 2007-06-12 13:13:44.000000000 +0200 ++++ b/drivers/xen/console/xencons_ring.c 2007-06-12 13:13:44.000000000 +0200 @@ -0,0 +1,143 @@ +/* + * This program is free software; you can redistribute it and/or @@ -11437,7 +11408,7 @@ and in case upstream wants to take the forward porting patches: + (void)xencons_ring_init(); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/Makefile 2008-07-21 11:00:33.000000000 +0200 ++++ b/drivers/xen/core/Makefile 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,14 @@ +# +# Makefile for the linux kernel. @@ -11454,11 +11425,12 @@ and in case upstream wants to take the forward porting patches: +obj-$(CONFIG_KEXEC) += machine_kexec.o +obj-$(CONFIG_XEN_XENCOMM) += xencomm.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/cpu_hotplug.c 2009-04-07 13:58:48.000000000 +0200 -@@ -0,0 +1,176 @@ ++++ b/drivers/xen/core/cpu_hotplug.c 2011-01-24 12:06:05.000000000 +0100 +@@ -0,0 +1,179 @@ +#include +#include +#include ++#include +#include +#include +#include @@ -11482,7 +11454,7 @@ and in case upstream wants to take the forward porting patches: + return (current->mm != NULL); +} + -+static void vcpu_hotplug(unsigned int cpu) ++static void vcpu_hotplug(unsigned int cpu, struct sys_device *dev) +{ + int err; + char dir[32], state[32]; @@ -11499,10 +11471,12 @@ and in case upstream wants to take the forward porting patches: + + if (strcmp(state, "online") == 0) { + cpu_set(cpu, xenbus_allowed_cpumask); -+ (void)cpu_up(cpu); ++ if (!cpu_up(cpu) && dev) ++ kobject_uevent(&dev->kobj, KOBJ_ONLINE); + } else if (strcmp(state, "offline") == 0) { + cpu_clear(cpu, xenbus_allowed_cpumask); -+ (void)cpu_down(cpu); ++ if (!cpu_down(cpu) && dev) ++ kobject_uevent(&dev->kobj, KOBJ_OFFLINE); + } else { + printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", + state, cpu); @@ -11518,7 +11492,7 @@ and in case upstream wants to take the forward porting patches: + + if ((cpustr = strstr(node, "cpu/")) != NULL) { + sscanf(cpustr, "cpu/%u", &cpu); -+ vcpu_hotplug(cpu); ++ vcpu_hotplug(cpu, get_cpu_sysdev(cpu)); + } +} + @@ -11551,7 +11525,7 @@ and in case upstream wants to take the forward porting patches: + + if (!is_initial_xendomain()) { + for_each_possible_cpu(i) -+ vcpu_hotplug(i); ++ vcpu_hotplug(i, get_cpu_sysdev(i)); + printk(KERN_INFO "Brought up %ld CPUs\n", + (long)num_online_cpus()); + } @@ -11590,7 +11564,7 @@ and in case upstream wants to take the forward porting patches: + printk(KERN_CRIT "Failed to take all CPUs " + "down: %d.\n", err); + for_each_possible_cpu(cpu) -+ vcpu_hotplug(cpu); ++ vcpu_hotplug(cpu, NULL); + return err; + } + } @@ -11605,7 +11579,7 @@ and in case upstream wants to take the forward porting patches: + for_each_possible_cpu(cpu) { + if (cpu == 0) + continue; -+ vcpu_hotplug(cpu); ++ vcpu_hotplug(cpu, NULL); + } +} + @@ -11633,8 +11607,8 @@ and in case upstream wants to take the forward porting patches: + xenbus_allowed_cpumask = cpu_present_map; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/evtchn.c 2010-02-24 11:50:47.000000000 +0100 -@@ -0,0 +1,1187 @@ ++++ b/drivers/xen/core/evtchn.c 2010-11-25 09:36:37.000000000 +0100 +@@ -0,0 +1,1204 @@ +/****************************************************************************** + * evtchn.c + * @@ -11798,7 +11772,10 @@ and in case upstream wants to take the forward porting patches: + set_native_irq_info(i, cpumask_of_cpu(0)); + + memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); -+ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); ++ for_each_possible_cpu(i) ++ memset(cpu_evtchn_mask[i], ++ (i == 0) ? ~0 : 0, ++ sizeof(cpu_evtchn_mask[i])); +} + +static inline unsigned int cpu_from_evtchn(unsigned int evtchn) @@ -11896,6 +11873,20 @@ and in case upstream wants to take the forward porting patches: + /* Clear master flag /before/ clearing selector flag. */ + wmb(); +#endif ++ ++ /* ++ * Handle timer interrupts before all others, so that all ++ * hardirq handlers see an up-to-date system time even if we ++ * have just woken from a long idle period. ++ */ ++ if ((irq = __get_cpu_var(virq_to_irq)[VIRQ_TIMER]) != -1) { ++ port = evtchn_from_irq(irq); ++ l1i = port / BITS_PER_LONG; ++ l2i = port % BITS_PER_LONG; ++ if (active_evtchns(cpu, s, l1i) & (1ul<evtchn_pending_sel, 0); + + start_l1i = l1i = per_cpu(current_l1i, cpu); @@ -12823,7 +12814,7 @@ and in case upstream wants to take the forward porting patches: + } +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/firmware.c 2007-06-22 09:08:06.000000000 +0200 ++++ b/drivers/xen/core/firmware.c 2007-06-22 09:08:06.000000000 +0200 @@ -0,0 +1,74 @@ +#include +#include @@ -12900,7 +12891,7 @@ and in case upstream wants to take the forward porting patches: +#endif +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/gnttab.c 2009-03-18 10:39:31.000000000 +0100 ++++ b/drivers/xen/core/gnttab.c 2010-09-23 15:39:04.000000000 +0200 @@ -0,0 +1,773 @@ +/****************************************************************************** + * gnttab.c @@ -13391,7 +13382,7 @@ and in case upstream wants to take the forward porting patches: + return -ENOSYS; + } + -+ BUG_ON(rc || setup.status); ++ BUG_ON(rc || setup.status != GNTST_okay); + + if (shared == NULL) + shared = arch_gnttab_alloc_shared(frames); @@ -13475,7 +13466,7 @@ and in case upstream wants to take the forward porting patches: + err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, + &unmap, 1); + BUG_ON(err); -+ BUG_ON(unmap.status); ++ BUG_ON(unmap.status != GNTST_okay); + + write_sequnlock(&gnttab_dma_lock); + @@ -13676,7 +13667,7 @@ and in case upstream wants to take the forward porting patches: +core_initcall(gnttab_init); +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/hypervisor_sysfs.c 2007-07-10 09:42:30.000000000 +0200 ++++ b/drivers/xen/core/hypervisor_sysfs.c 2007-07-10 09:42:30.000000000 +0200 @@ -0,0 +1,57 @@ +/* + * copyright (c) 2006 IBM Corporation @@ -13736,7 +13727,7 @@ and in case upstream wants to take the forward porting patches: + +device_initcall(hypervisor_subsys_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/machine_kexec.c 2009-07-13 14:25:35.000000000 +0200 ++++ b/drivers/xen/core/machine_kexec.c 2009-07-13 14:25:35.000000000 +0200 @@ -0,0 +1,230 @@ +/* + * drivers/xen/core/machine_kexec.c @@ -13969,8 +13960,8 @@ and in case upstream wants to take the forward porting patches: + * End: + */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/machine_reboot.c 2008-09-01 12:07:31.000000000 +0200 -@@ -0,0 +1,247 @@ ++++ b/drivers/xen/core/machine_reboot.c 2011-01-24 12:06:05.000000000 +0100 +@@ -0,0 +1,242 @@ +#include +#include +#include @@ -14025,11 +14016,6 @@ and in case upstream wants to take the forward porting patches: + HYPERVISOR_shutdown(SHUTDOWN_poweroff); +} + -+int reboot_thru_bios = 0; /* for dmi_scan.c */ -+EXPORT_SYMBOL(machine_restart); -+EXPORT_SYMBOL(machine_halt); -+EXPORT_SYMBOL(machine_power_off); -+ +static void pre_suspend(void) +{ + HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; @@ -14219,7 +14205,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/pci.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/core/pci.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,83 @@ +/* + * vim:shiftwidth=8:noexpandtab @@ -14305,8 +14291,8 @@ and in case upstream wants to take the forward porting patches: + +core_initcall(hook_pci_bus); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/reboot.c 2008-08-07 12:44:36.000000000 +0200 -@@ -0,0 +1,335 @@ ++++ b/drivers/xen/core/reboot.c 2010-11-25 09:36:37.000000000 +0100 +@@ -0,0 +1,338 @@ +#define __KERNEL_SYSCALLS__ +#include +#include @@ -14586,6 +14572,15 @@ and in case upstream wants to take the forward porting patches: +{ + int err; + ++ err = register_xenbus_watch(&sysrq_watch); ++ if (err) { ++ printk(KERN_ERR "Failed to set sysrq watcher\n"); ++ return err; ++ } ++ ++ if (is_initial_xendomain()) ++ return 0; ++ + xenbus_scanf(XBT_NIL, "control", + "platform-feature-multiprocessor-suspend", + "%d", &fast_suspend); @@ -14596,12 +14591,6 @@ and in case upstream wants to take the forward porting patches: + return err; + } + -+ err = register_xenbus_watch(&sysrq_watch); -+ if (err) { -+ printk(KERN_ERR "Failed to set sysrq watcher\n"); -+ return err; -+ } -+ + /* suspend event channel */ + err = setup_suspend_evtchn(); + if (err) { @@ -14643,8 +14632,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* !defined(CONFIG_XEN) */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/smpboot.c 2009-05-19 09:16:41.000000000 +0200 -@@ -0,0 +1,460 @@ ++++ b/drivers/xen/core/smpboot.c 2010-11-08 17:27:03.000000000 +0100 +@@ -0,0 +1,456 @@ +/* + * Xen SMP booting functions + * @@ -14700,8 +14689,6 @@ and in case upstream wants to take the forward porting patches: +static char resched_name[NR_CPUS][15]; +static char callfunc_name[NR_CPUS][15]; + -+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; -+ +cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; +cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; +EXPORT_SYMBOL(cpu_core_map); @@ -14921,7 +14908,6 @@ and in case upstream wants to take the forward porting patches: + boot_cpu_data.apicid = apicid; + cpu_data[0] = boot_cpu_data; + -+ cpu_2_logical_apicid[0] = apicid; + x86_cpu_to_apicid[0] = apicid; + + current_thread_info()->cpu = 0; @@ -14978,7 +14964,6 @@ and in case upstream wants to take the forward porting patches: + cpu_data[cpu] = boot_cpu_data; + cpu_data[cpu].apicid = apicid; + -+ cpu_2_logical_apicid[cpu] = apicid; + x86_cpu_to_apicid[cpu] = apicid; + + idle = fork_idle(cpu); @@ -15106,7 +15091,7 @@ and in case upstream wants to take the forward porting patches: +} +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/xen_proc.c 2007-06-12 13:13:44.000000000 +0200 ++++ b/drivers/xen/core/xen_proc.c 2007-06-12 13:13:44.000000000 +0200 @@ -0,0 +1,23 @@ + +#include @@ -15132,7 +15117,7 @@ and in case upstream wants to take the forward porting patches: + +EXPORT_SYMBOL_GPL(remove_xen_proc_entry); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/xen_sysfs.c 2009-05-29 10:25:53.000000000 +0200 ++++ b/drivers/xen/core/xen_sysfs.c 2009-05-29 10:25:53.000000000 +0200 @@ -0,0 +1,427 @@ +/* + * copyright (c) 2006 IBM Corporation @@ -15562,13 +15547,13 @@ and in case upstream wants to take the forward porting patches: +module_init(hyper_sysfs_init); +module_exit(hyper_sysfs_exit); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/fbfront/Makefile 2007-06-12 13:13:45.000000000 +0200 ++++ b/drivers/xen/fbfront/Makefile 2007-06-12 13:13:45.000000000 +0200 @@ -0,0 +1,2 @@ +obj-$(CONFIG_XEN_FRAMEBUFFER) := xenfb.o +obj-$(CONFIG_XEN_KEYBOARD) += xenkbd.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/fbfront/xenfb.c 2009-12-04 08:45:56.000000000 +0100 -@@ -0,0 +1,888 @@ ++++ b/drivers/xen/fbfront/xenfb.c 2011-03-02 12:00:16.000000000 +0100 +@@ -0,0 +1,890 @@ +/* + * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device + * @@ -16306,30 +16291,29 @@ and in case upstream wants to take the forward porting patches: +static int xenfb_connect_backend(struct xenbus_device *dev, + struct xenfb_info *info) +{ -+ int ret; ++ int ret, irq; + struct xenbus_transaction xbt; + -+ ret = bind_listening_port_to_irqhandler( ++ irq = bind_listening_port_to_irqhandler( + dev->otherend_id, xenfb_event_handler, 0, "xenfb", info); -+ if (ret < 0) { -+ xenbus_dev_fatal(dev, ret, ++ if (irq < 0) { ++ xenbus_dev_fatal(dev, irq, + "bind_listening_port_to_irqhandler"); -+ return ret; ++ return irq; + } -+ info->irq = ret; + + again: + ret = xenbus_transaction_start(&xbt); + if (ret) { + xenbus_dev_fatal(dev, ret, "starting transaction"); -+ return ret; ++ goto unbind_irq; + } + ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", + virt_to_mfn(info->page)); + if (ret) + goto error_xenbus; + ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", -+ irq_to_evtchn_port(info->irq)); ++ irq_to_evtchn_port(irq)); + if (ret) + goto error_xenbus; + ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s", @@ -16344,15 +16328,18 @@ and in case upstream wants to take the forward porting patches: + if (ret == -EAGAIN) + goto again; + xenbus_dev_fatal(dev, ret, "completing transaction"); -+ return ret; ++ goto unbind_irq; + } + ++ info->irq = irq; + xenbus_switch_state(dev, XenbusStateInitialised); + return 0; + + error_xenbus: + xenbus_transaction_end(xbt, 1); + xenbus_dev_fatal(dev, ret, "writing xenstore"); ++ unbind_irq: ++ unbind_from_irqhandler(irq, info); + return ret; +} + @@ -16458,7 +16445,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/fbfront/xenkbd.c 2008-04-02 12:34:02.000000000 +0200 ++++ b/drivers/xen/fbfront/xenkbd.c 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,354 @@ +/* + * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device @@ -16815,12 +16802,12 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/gntdev/Makefile 2008-01-07 13:19:18.000000000 +0100 ++++ b/drivers/xen/gntdev/Makefile 2008-01-07 13:19:18.000000000 +0100 @@ -0,0 +1 @@ +obj-$(CONFIG_XEN_GRANT_DEV) := gntdev.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/gntdev/gntdev.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,1083 @@ ++++ b/drivers/xen/gntdev/gntdev.c 2011-01-03 12:43:21.000000000 +0100 +@@ -0,0 +1,1038 @@ +/****************************************************************************** + * gntdev.c + * @@ -17026,19 +17013,12 @@ and in case upstream wants to take the forward porting patches: + * value of *offset to the offset that should be mmap()-ed in order to map the + * grant reference. + */ -+static int add_grant_reference(struct file *flip, ++static int add_grant_reference(gntdev_file_private_data_t *private_data, + struct ioctl_gntdev_grant_ref *op, + uint64_t *offset) +{ -+ gntdev_file_private_data_t *private_data -+ = (gntdev_file_private_data_t *) flip->private_data; -+ + uint32_t slot_index; + -+ if (unlikely(private_data->free_list_size == 0)) { -+ return -ENOMEM; -+ } -+ + slot_index = private_data->free_list[--private_data->free_list_size]; + private_data->free_list[private_data->free_list_size] + = GNTDEV_FREE_LIST_INVALID; @@ -17062,19 +17042,17 @@ and in case upstream wants to take the forward porting patches: + * previous invocation of find_contiguous_free_range(), during the same + * invocation of the driver. + */ -+static int add_grant_references(struct file *flip, -+ int count, ++static int add_grant_references(gntdev_file_private_data_t *private_data, ++ uint32_t count, + struct ioctl_gntdev_grant_ref *ops, + uint32_t first_slot) +{ -+ gntdev_file_private_data_t *private_data -+ = (gntdev_file_private_data_t *) flip->private_data; -+ int i; ++ uint32_t i; + + for (i = 0; i < count; ++i) { + + /* First, mark the slot's entry in the free list as invalid. */ -+ int free_list_index = ++ uint32_t free_list_index = + private_data->grants[first_slot+i].u.free_list_index; + private_data->free_list[free_list_index] = + GNTDEV_FREE_LIST_INVALID; @@ -17094,16 +17072,16 @@ and in case upstream wants to take the forward porting patches: + * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to + * the number of valid entries. + */ -+static void compress_free_list(struct file *flip) ++static void compress_free_list(gntdev_file_private_data_t *private_data) +{ -+ gntdev_file_private_data_t *private_data -+ = (gntdev_file_private_data_t *) flip->private_data; -+ int i, j = 0, old_size, slot_index; ++ uint32_t i, j = 0, old_size; + + old_size = private_data->free_list_size; + for (i = 0; i < old_size; ++i) { + if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) { + if (i > j) { ++ int32_t slot_index; ++ + slot_index = private_data->free_list[i]; + private_data->free_list[j] = slot_index; + private_data->grants[slot_index].u @@ -17123,19 +17101,11 @@ and in case upstream wants to take the forward porting patches: + * + * Returns the index of the first slot if a range is found, otherwise -ENOMEM. + */ -+static int find_contiguous_free_range(struct file *flip, ++static int find_contiguous_free_range(gntdev_file_private_data_t *private_data, + uint32_t num_slots) +{ -+ gntdev_file_private_data_t *private_data -+ = (gntdev_file_private_data_t *) flip->private_data; -+ -+ int i; -+ int start_index = private_data->next_fit_index; -+ int range_start = 0, range_length; -+ -+ if (private_data->free_list_size < num_slots) { -+ return -ENOMEM; -+ } ++ uint32_t i, start_index = private_data->next_fit_index; ++ uint32_t range_start = 0, range_length; + + /* First search from the start_index to the end of the array. */ + range_length = 0; @@ -17401,7 +17371,7 @@ and in case upstream wants to take the forward porting patches: + vma->vm_mm->context.has_foreign_mappings = 1; +#endif + -+ exit_ret = -ENOMEM; ++ exit_ret = -ENOMEM; + for (i = 0; i < size; ++i) { + + flags = GNTMAP_host_map; @@ -17422,8 +17392,8 @@ and in case upstream wants to take the forward porting patches: + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, + &op, 1); + BUG_ON(ret); -+ if (op.status) { -+ if(op.status != GNTST_eagain) ++ if (op.status != GNTST_okay) { ++ if (op.status != GNTST_eagain) + printk(KERN_ERR "Error mapping the grant reference " + "into the kernel (%d). domid = %d; ref = %d\n", + op.status, @@ -17431,9 +17401,9 @@ and in case upstream wants to take the forward porting patches: + .u.valid.domid, + private_data->grants[slot_index+i] + .u.valid.ref); -+ else -+ /* Propagate eagain instead of trying to fix it up */ -+ exit_ret = -EAGAIN; ++ else ++ /* Propagate eagain instead of trying to fix it up */ ++ exit_ret = -EAGAIN; + goto undo_map_out; + } + @@ -17502,7 +17472,7 @@ and in case upstream wants to take the forward porting patches: + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, + &op, 1); + BUG_ON(ret); -+ if (op.status) { ++ if (op.status != GNTST_okay) { + printk(KERN_ERR "Error mapping the grant " + "reference into user space (%d). domid " + "= %d; ref = %d\n", op.status, @@ -17510,9 +17480,9 @@ and in case upstream wants to take the forward porting patches: + .valid.domid, + private_data->grants[slot_index+i].u + .valid.ref); -+ /* This should never happen after we've mapped into -+ * the kernel space. */ -+ BUG_ON(op.status == GNTST_eagain); ++ /* This should never happen after we've mapped into ++ * the kernel space. */ ++ BUG_ON(op.status == GNTST_eagain); + goto undo_map_out; + } + @@ -17536,7 +17506,7 @@ and in case upstream wants to take the forward porting patches: + } + + } -+ exit_ret = 0; ++ exit_ret = 0; + + up_write(&private_data->grants_sem); + return exit_ret; @@ -17608,7 +17578,7 @@ and in case upstream wants to take the forward porting patches: + ret = HYPERVISOR_grant_table_op( + GNTTABOP_unmap_grant_ref, &op, 1); + BUG_ON(ret); -+ if (op.status) ++ if (op.status != GNTST_okay) + printk("User unmap grant status = %d\n", + op.status); + } else { @@ -17625,7 +17595,7 @@ and in case upstream wants to take the forward porting patches: + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + &op, 1); + BUG_ON(ret); -+ if (op.status) ++ if (op.status != GNTST_okay) + printk("Kernel unmap grant status = %d\n", op.status); + + @@ -17695,89 +17665,82 @@ and in case upstream wants to take the forward porting patches: + case IOCTL_GNTDEV_MAP_GRANT_REF: + { + struct ioctl_gntdev_map_grant_ref op; ++ struct ioctl_gntdev_grant_ref *refs = NULL; ++ ++ if (copy_from_user(&op, (void __user *)arg, sizeof(op))) ++ return -EFAULT; ++ if (unlikely(op.count <= 0)) ++ return -EINVAL; ++ ++ if (op.count > 1 && op.count <= private_data->grants_size) { ++ struct ioctl_gntdev_grant_ref *u; ++ ++ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL); ++ if (!refs) ++ return -ENOMEM; ++ u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs; ++ if (copy_from_user(refs, (void __user *)u, ++ sizeof(*refs) * op.count)) { ++ kfree(refs); ++ return -EFAULT; ++ } ++ } ++ + down_write(&private_data->grants_sem); + down_write(&private_data->free_list_sem); + -+ if ((rc = copy_from_user(&op, (void __user *) arg, -+ sizeof(op)))) { -+ rc = -EFAULT; -+ goto map_out; -+ } -+ if (unlikely(op.count <= 0)) { -+ rc = -EINVAL; ++ if (unlikely(op.count > private_data->free_list_size)) { ++ rc = -ENOMEM; + goto map_out; + } + + if (op.count == 1) { -+ if ((rc = add_grant_reference(flip, &op.refs[0], ++ if ((rc = add_grant_reference(private_data, op.refs, + &op.index)) < 0) { + printk(KERN_ERR "Adding grant reference " + "failed (%d).\n", rc); + goto map_out; + } + } else { -+ struct ioctl_gntdev_grant_ref *refs, *u; -+ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL); -+ if (!refs) { -+ rc = -ENOMEM; -+ goto map_out; -+ } -+ u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs; -+ if ((rc = copy_from_user(refs, -+ (void __user *)u, -+ sizeof(*refs) * op.count))) { -+ printk(KERN_ERR "Copying refs from user failed" -+ " (%d).\n", rc); -+ rc = -EINVAL; -+ goto map_out; -+ } -+ if ((rc = find_contiguous_free_range(flip, op.count)) -+ < 0) { ++ if ((rc = find_contiguous_free_range(private_data, ++ op.count)) < 0) { + printk(KERN_ERR "Finding contiguous range " + "failed (%d).\n", rc); -+ kfree(refs); + goto map_out; + } + op.index = rc << PAGE_SHIFT; -+ if ((rc = add_grant_references(flip, op.count, ++ if ((rc = add_grant_references(private_data, op.count, + refs, rc))) { + printk(KERN_ERR "Adding grant references " + "failed (%d).\n", rc); -+ kfree(refs); + goto map_out; + } -+ compress_free_list(flip); -+ kfree(refs); -+ } -+ if ((rc = copy_to_user((void __user *) arg, -+ &op, -+ sizeof(op)))) { -+ printk(KERN_ERR "Copying result back to user failed " -+ "(%d)\n", rc); -+ rc = -EFAULT; -+ goto map_out; ++ compress_free_list(private_data); + } ++ + map_out: -+ up_write(&private_data->grants_sem); + up_write(&private_data->free_list_sem); ++ up_write(&private_data->grants_sem); ++ ++ kfree(refs); ++ ++ if (!rc && copy_to_user((void __user *)arg, &op, sizeof(op))) ++ rc = -EFAULT; + return rc; + } + case IOCTL_GNTDEV_UNMAP_GRANT_REF: + { + struct ioctl_gntdev_unmap_grant_ref op; -+ int i, start_index; -+ -+ down_write(&private_data->grants_sem); -+ down_write(&private_data->free_list_sem); ++ uint32_t i, start_index; + -+ if ((rc = copy_from_user(&op, -+ (void __user *) arg, -+ sizeof(op)))) { -+ rc = -EFAULT; -+ goto unmap_out; -+ } ++ if (copy_from_user(&op, (void __user *)arg, sizeof(op))) ++ return -EFAULT; + + start_index = op.index >> PAGE_SHIFT; ++ if (start_index + op.count > private_data->grants_size) ++ return -EINVAL; ++ ++ down_write(&private_data->grants_sem); + + /* First, check that all pages are in the NOT_YET_MAPPED + * state. @@ -17807,6 +17770,8 @@ and in case upstream wants to take the forward porting patches: + } + } + ++ down_write(&private_data->free_list_sem); ++ + /* Unmap pages and add them to the free list. + */ + for (i = 0; i < op.count; ++i) { @@ -17819,9 +17784,9 @@ and in case upstream wants to take the forward porting patches: + ++private_data->free_list_size; + } + ++ up_write(&private_data->free_list_sem); + unmap_out: + up_write(&private_data->grants_sem); -+ up_write(&private_data->free_list_sem); + return rc; + } + case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: @@ -17830,25 +17795,16 @@ and in case upstream wants to take the forward porting patches: + struct vm_area_struct *vma; + unsigned long vaddr; + -+ if ((rc = copy_from_user(&op, -+ (void __user *) arg, -+ sizeof(op)))) { -+ rc = -EFAULT; -+ goto get_offset_out; -+ } ++ if (copy_from_user(&op, (void __user *)arg, sizeof(op))) ++ return -EFAULT; ++ + vaddr = (unsigned long)op.vaddr; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, vaddr); -+ if (vma == NULL) { -+ rc = -EFAULT; -+ goto get_offset_unlock_out; -+ } -+ if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) { -+ printk(KERN_ERR "The vaddr specified does not belong " -+ "to a gntdev instance: %#lx\n", vaddr); ++ if (!vma || vma->vm_ops != &gntdev_vmops) { + rc = -EFAULT; -+ goto get_offset_unlock_out; ++ goto get_offset_out; + } + if (vma->vm_start != vaddr) { + printk(KERN_ERR "The vaddr specified in an " @@ -17857,45 +17813,31 @@ and in case upstream wants to take the forward porting patches: + "%#lx; vaddr = %#lx\n", + vma->vm_start, vaddr); + rc = -EFAULT; -+ goto get_offset_unlock_out; ++ goto get_offset_out; + } + op.offset = vma->vm_pgoff << PAGE_SHIFT; + op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; ++ get_offset_out: + up_read(¤t->mm->mmap_sem); -+ if ((rc = copy_to_user((void __user *) arg, -+ &op, -+ sizeof(op)))) { ++ if (!rc && copy_to_user((void __user *)arg, &op, sizeof(op))) + rc = -EFAULT; -+ goto get_offset_out; -+ } -+ goto get_offset_out; -+ get_offset_unlock_out: -+ up_read(¤t->mm->mmap_sem); -+ get_offset_out: + return rc; + } + case IOCTL_GNTDEV_SET_MAX_GRANTS: + { + struct ioctl_gntdev_set_max_grants op; -+ if ((rc = copy_from_user(&op, -+ (void __user *) arg, -+ sizeof(op)))) { -+ rc = -EFAULT; -+ goto set_max_out; -+ } ++ ++ if (copy_from_user(&op, (void __user *)arg, sizeof(op))) ++ return -EFAULT; ++ if (op.count > MAX_GRANTS_LIMIT) ++ return -EINVAL; ++ + down_write(&private_data->grants_sem); -+ if (private_data->grants) { ++ if (unlikely(private_data->grants)) + rc = -EBUSY; -+ goto set_max_unlock_out; -+ } -+ if (op.count > MAX_GRANTS_LIMIT) { -+ rc = -EINVAL; -+ goto set_max_unlock_out; -+ } -+ rc = init_private_data(private_data, op.count); -+ set_max_unlock_out: ++ else ++ rc = init_private_data(private_data, op.count); + up_write(&private_data->grants_sem); -+ set_max_out: + return rc; + } + default: @@ -17905,7 +17847,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netback/Makefile 2007-07-12 08:54:23.000000000 +0200 ++++ b/drivers/xen/netback/Makefile 2007-07-12 08:54:23.000000000 +0200 @@ -0,0 +1,5 @@ +obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o +obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o @@ -17913,7 +17855,7 @@ and in case upstream wants to take the forward porting patches: +netbk-y := netback.o xenbus.o interface.o accel.o +netloop-y := loopback.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netback/accel.c 2008-01-07 13:19:18.000000000 +0100 ++++ b/drivers/xen/netback/accel.c 2008-01-07 13:19:18.000000000 +0100 @@ -0,0 +1,269 @@ +/****************************************************************************** + * drivers/xen/netback/accel.c @@ -18185,8 +18127,8 @@ and in case upstream wants to take the forward porting patches: + INIT_LIST_HEAD(&accelerators_list); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netback/common.h 2010-02-24 13:13:46.000000000 +0100 -@@ -0,0 +1,220 @@ ++++ b/drivers/xen/netback/common.h 2011-02-17 09:58:10.000000000 +0100 +@@ -0,0 +1,226 @@ +/****************************************************************************** + * arch/xen/drivers/netif/backend/common.h + * @@ -18264,8 +18206,13 @@ and in case upstream wants to take the forward porting patches: + struct vm_struct *tx_comms_area; + struct vm_struct *rx_comms_area; + -+ /* Set of features that can be turned on in dev->features. */ -+ int features; ++ /* Flags that must not be set in dev->features */ ++ int features_disabled; ++ ++ /* Frontend feature information. */ ++ u8 can_sg:1; ++ u8 gso:1; ++ u8 csum:1; + + /* Internal feature information. */ + u8 can_queue:1; /* can queue packets for receiver? */ @@ -18371,6 +18318,7 @@ and in case upstream wants to take the forward porting patches: + +void netif_disconnect(netif_t *netif); + ++void netif_set_features(netif_t *netif); +netif_t *netif_alloc(struct device *parent, domid_t domid, unsigned int handle); +int netif_map(netif_t *netif, unsigned long tx_ring_ref, + unsigned long rx_ring_ref, unsigned int evtchn); @@ -18403,13 +18351,13 @@ and in case upstream wants to take the forward porting patches: +static inline int netbk_can_sg(struct net_device *dev) +{ + netif_t *netif = netdev_priv(dev); -+ return netif->features & NETIF_F_SG; ++ return netif->can_sg; +} + +#endif /* __NETIF__BACKEND__COMMON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netback/interface.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,398 @@ ++++ b/drivers/xen/netback/interface.c 2011-02-17 09:58:10.000000000 +0100 +@@ -0,0 +1,434 @@ +/****************************************************************************** + * arch/xen/drivers/netif/backend/interface.c + * @@ -18505,28 +18453,69 @@ and in case upstream wants to take the forward porting patches: + return 0; +} + -+static int netbk_set_sg(struct net_device *dev, u32 data) ++void netif_set_features(netif_t *netif) ++{ ++ struct net_device *dev = netif->dev; ++ int features = dev->features; ++ ++ if (netif->can_sg) ++ features |= NETIF_F_SG; ++ if (netif->gso) ++ features |= NETIF_F_TSO; ++ if (netif->csum) ++ features |= NETIF_F_IP_CSUM; ++ ++ features &= ~(netif->features_disabled); ++ ++ if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) ++ dev->mtu = ETH_DATA_LEN; ++ ++ dev->features = features; ++} ++ ++static int netbk_set_tx_csum(struct net_device *dev, u32 data) +{ ++ netif_t *netif = netdev_priv(dev); + if (data) { -+ netif_t *netif = netdev_priv(dev); ++ if (!netif->csum) ++ return -ENOSYS; ++ netif->features_disabled &= ~NETIF_F_IP_CSUM; ++ } else { ++ netif->features_disabled |= NETIF_F_IP_CSUM; ++ } ++ ++ netif_set_features(netif); ++ return 0; ++} + -+ if (!(netif->features & NETIF_F_SG)) ++static int netbk_set_sg(struct net_device *dev, u32 data) ++{ ++ netif_t *netif = netdev_priv(dev); ++ if (data) { ++ if (!netif->can_sg) + return -ENOSYS; ++ netif->features_disabled &= ~NETIF_F_SG; ++ } else { ++ netif->features_disabled |= NETIF_F_SG; + } + -+ return ethtool_op_set_sg(dev, data); ++ netif_set_features(netif); ++ return 0; +} + +static int netbk_set_tso(struct net_device *dev, u32 data) +{ ++ netif_t *netif = netdev_priv(dev); + if (data) { -+ netif_t *netif = netdev_priv(dev); -+ -+ if (!(netif->features & NETIF_F_TSO)) ++ if (!netif->gso) + return -ENOSYS; ++ netif->features_disabled &= ~NETIF_F_TSO; ++ } else { ++ netif->features_disabled |= NETIF_F_TSO; + } + -+ return ethtool_op_set_tso(dev, data); ++ netif_set_features(netif); ++ return 0; +} + +static void netbk_get_drvinfo(struct net_device *dev, @@ -18576,7 +18565,7 @@ and in case upstream wants to take the forward porting patches: + .get_drvinfo = netbk_get_drvinfo, + + .get_tx_csum = ethtool_op_get_tx_csum, -+ .set_tx_csum = ethtool_op_set_tx_csum, ++ .set_tx_csum = netbk_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = netbk_set_sg, + .get_tso = ethtool_op_get_tso, @@ -18608,6 +18597,8 @@ and in case upstream wants to take the forward porting patches: + memset(netif, 0, sizeof(*netif)); + netif->domid = domid; + netif->handle = handle; ++ netif->can_sg = 1; ++ netif->csum = 1; + atomic_set(&netif->refcnt, 1); + init_waitqueue_head(&netif->waiting_to_free); + netif->dev = dev; @@ -18627,7 +18618,8 @@ and in case upstream wants to take the forward porting patches: + dev->open = net_open; + dev->stop = net_close; + dev->change_mtu = netbk_change_mtu; -+ dev->features = NETIF_F_IP_CSUM; ++ ++ netif_set_features(netif); + + SET_ETHTOOL_OPS(dev, &network_ethtool_ops); + @@ -18663,15 +18655,11 @@ and in case upstream wants to take the forward porting patches: + + gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr, + GNTMAP_host_map, tx_ring_ref, netif->domid); -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while(op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ if (op.status) { -+ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n"); -+ return op.status; ++ if (op.status != GNTST_okay) { ++ DPRINTK(" Gnttab failure mapping tx_ring_ref %d!\n", (int)op.status); ++ return -EINVAL; + } + + netif->tx_shmem_ref = tx_ring_ref; @@ -18679,13 +18667,9 @@ and in case upstream wants to take the forward porting patches: + + gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr, + GNTMAP_host_map, rx_ring_ref, netif->domid); -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while(op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ if (op.status) { ++ if (op.status != GNTST_okay) { + struct gnttab_unmap_grant_ref unop; + + gnttab_set_unmap_op(&unop, @@ -18693,8 +18677,8 @@ and in case upstream wants to take the forward porting patches: + GNTMAP_host_map, netif->tx_shmem_handle); + VOID(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + &unop, 1)); -+ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n"); -+ return op.status; ++ DPRINTK(" Gnttab failure mapping rx_ring_ref %d!\n", (int)op.status); ++ return -EINVAL; + } + + netif->rx_shmem_ref = rx_ring_ref; @@ -18809,8 +18793,8 @@ and in case upstream wants to take the forward porting patches: + free_netdev(netif->dev); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netback/loopback.c 2007-08-06 15:10:49.000000000 +0200 -@@ -0,0 +1,324 @@ ++++ b/drivers/xen/netback/loopback.c 2011-01-03 12:43:21.000000000 +0100 +@@ -0,0 +1,309 @@ +/****************************************************************************** + * netback/loopback.c + * @@ -18875,6 +18859,7 @@ and in case upstream wants to take the forward porting patches: +struct net_private { + struct net_device *loopback_dev; + struct net_device_stats stats; ++ int loop_idx; +}; + +static int loopback_open(struct net_device *dev) @@ -18994,8 +18979,17 @@ and in case upstream wants to take the forward porting patches: + return &np->stats; +} + ++static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) ++{ ++ strcpy(info->driver, "netloop"); ++ snprintf(info->bus_info, ETHTOOL_BUSINFO_LEN, "vif-0-%d", ++ ((struct net_private *)netdev_priv(dev))->loop_idx); ++} ++ +static struct ethtool_ops network_ethtool_ops = +{ ++ .get_drvinfo = get_drvinfo, ++ + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, + .get_sg = ethtool_op_get_sg, @@ -19013,11 +19007,13 @@ and in case upstream wants to take the forward porting patches: +{ +} + -+static void loopback_construct(struct net_device *dev, struct net_device *lo) ++static void loopback_construct(struct net_device *dev, struct net_device *lo, ++ int loop_idx) +{ + struct net_private *np = netdev_priv(dev); + + np->loopback_dev = lo; ++ np->loop_idx = loop_idx; + + dev->open = loopback_open; + dev->stop = loopback_close; @@ -19063,8 +19059,8 @@ and in case upstream wants to take the forward porting patches: + if (!dev2) + goto fail_netdev2; + -+ loopback_construct(dev1, dev2); -+ loopback_construct(dev2, dev1); ++ loopback_construct(dev1, dev2, i); ++ loopback_construct(dev2, dev1, i); + + /* + * Initialise a dummy MAC address for the 'dummy backend' interface. We @@ -19091,23 +19087,6 @@ and in case upstream wants to take the forward porting patches: + return err; +} + -+static void __exit clean_loopback(int i) -+{ -+ struct net_device *dev1, *dev2; -+ char dev_name[IFNAMSIZ]; -+ -+ sprintf(dev_name, "vif0.%d", i); -+ dev1 = dev_get_by_name(dev_name); -+ sprintf(dev_name, "veth%d", i); -+ dev2 = dev_get_by_name(dev_name); -+ if (dev1 && dev2) { -+ unregister_netdev(dev2); -+ unregister_netdev(dev1); -+ free_netdev(dev2); -+ free_netdev(dev1); -+ } -+} -+ +static int __init loopback_init(void) +{ + int i, err = 0; @@ -19124,20 +19103,10 @@ and in case upstream wants to take the forward porting patches: + +module_init(loopback_init); + -+static void __exit loopback_exit(void) -+{ -+ int i; -+ -+ for (i = nloopbacks; i-- > 0; ) -+ clean_loopback(i); -+} -+ -+module_exit(loopback_exit); -+ +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netback/netback.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,1691 @@ ++++ b/drivers/xen/netback/netback.c 2011-02-17 09:58:10.000000000 +0100 +@@ -0,0 +1,1700 @@ +/****************************************************************************** + * drivers/xen/netback/netback.c + * @@ -19175,6 +19144,8 @@ and in case upstream wants to take the forward porting patches: + */ + +#include "common.h" ++#include ++#include +#include +#include + @@ -19245,7 +19216,14 @@ and in case upstream wants to take the forward porting patches: + return idx; +} + -+#define PKT_PROT_LEN 64 ++/* ++ * This is the amount of packet we copy rather than map, so that the ++ * guest can't fiddle with the contents of the headers while we do ++ * packet processing on them (netfilter, routing, etc). ++ */ ++#define PKT_PROT_LEN (ETH_HLEN + VLAN_HLEN + \ ++ sizeof(struct iphdr) + MAX_IPOPTLEN + \ ++ sizeof(struct tcphdr) + 40 /* MAX_TCP_OPTION_SPACE */) + +static struct pending_tx_info { + netif_tx_request_t req; @@ -19400,7 +19378,7 @@ and in case upstream wants to take the forward porting patches: + +static inline int netbk_max_required_rx_slots(netif_t *netif) +{ -+ if (netif->features & (NETIF_F_SG|NETIF_F_TSO)) ++ if (netif->can_sg || netif->gso) + return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */ + return 1; /* all in one */ +} @@ -19634,8 +19612,7 @@ and in case upstream wants to take the forward porting patches: + used to set up the operations on the top of + netrx_pending_operations, which have since been done. Check that + they didn't give any errors and advance over them. */ -+static int netbk_check_gop(int nr_frags, domid_t domid, -+ struct netrx_pending_operations *npo, int *eagain) ++static int netbk_check_gop(int nr_frags, domid_t domid, struct netrx_pending_operations *npo) +{ + multicall_entry_t *mcl; + gnttab_transfer_t *gop; @@ -19643,17 +19620,15 @@ and in case upstream wants to take the forward porting patches: + int status = NETIF_RSP_OKAY; + int i; + -+ *eagain = 0; -+ + for (i = 0; i <= nr_frags; i++) { + if (npo->meta[npo->meta_cons + i].copy) { + copy_op = npo->copy + npo->copy_cons++; -+ if (copy_op->status != GNTST_okay) { ++ if (unlikely(copy_op->status == GNTST_eagain)) ++ gnttab_check_GNTST_eagain_while(GNTTABOP_copy, copy_op); ++ if (unlikely(copy_op->status != GNTST_okay)) { + DPRINTK("Bad status %d from copy to DOM%d.\n", + copy_op->status, domid); + status = NETIF_RSP_ERROR; -+ if(copy_op->status == GNTST_eagain) -+ *eagain = 1; + } + } else { + if (!xen_feature(XENFEAT_auto_translated_physmap)) { @@ -19664,7 +19639,7 @@ and in case upstream wants to take the forward porting patches: + + gop = npo->trans + npo->trans_cons++; + /* Check the reassignment error code. */ -+ if (gop->status != 0) { ++ if (unlikely(gop->status != GNTST_okay)) { + DPRINTK("Bad status %d from grant transfer to DOM%u\n", + gop->status, domid); + /* @@ -19673,8 +19648,6 @@ and in case upstream wants to take the forward porting patches: + * a fatal error anyway. + */ + BUG_ON(gop->status == GNTST_bad_page); -+ if(gop->status == GNTST_eagain) -+ *eagain = 1; + status = NETIF_RSP_ERROR; + } + } @@ -19716,7 +19689,6 @@ and in case upstream wants to take the forward porting patches: + int nr_frags; + int count; + unsigned long offset; -+ int eagain; + + /* + * Putting hundreds of bytes on the stack is considered rude. @@ -19820,7 +19792,7 @@ and in case upstream wants to take the forward porting patches: + + netif = netdev_priv(skb->dev); + -+ status = netbk_check_gop(nr_frags, netif->domid, &npo, &eagain); ++ status = netbk_check_gop(nr_frags, netif->domid, &npo); + + /* We can't rely on skb_release_data to release the + pages used by fragments for us, since it tries to @@ -19831,22 +19803,14 @@ and in case upstream wants to take the forward porting patches: + /* (Freeing the fragments is safe since we copy + non-linear skbs destined for flipping interfaces) */ + if (!netif->copying_receiver) { -+ /* -+ * Cannot handle failed grant transfers at the moment (because -+ * mmu_updates likely completed) -+ */ -+ BUG_ON(eagain); + atomic_set(&(skb_shinfo(skb)->dataref), 1); + skb_shinfo(skb)->frag_list = NULL; + skb_shinfo(skb)->nr_frags = 0; + netbk_free_pages(nr_frags, meta + npo.meta_cons + 1); + } + -+ if(!eagain) -+ { -+ netif->stats.tx_bytes += skb->len; -+ netif->stats.tx_packets++; -+ } ++ netif->stats.tx_bytes += skb->len; ++ netif->stats.tx_packets++; + + id = meta[npo.meta_cons].id; + flags = nr_frags ? NETRXF_more_data : 0; @@ -19896,18 +19860,8 @@ and in case upstream wants to take the forward porting patches: + !netbk_queue_full(netif)) + netif_wake_queue(netif->dev); + -+ if(!eagain || netbk_queue_full(netif)) -+ { -+ netif_put(netif); -+ dev_kfree_skb(skb); -+ netif->stats.tx_dropped += !!eagain; -+ } -+ else -+ { -+ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 + -+ !!skb_shinfo(skb)->gso_size; -+ skb_queue_head(&rx_queue, skb); -+ } ++ netif_put(netif); ++ dev_kfree_skb(skb); + + npo.meta_cons += nr_frags + 1; + } @@ -19948,29 +19902,44 @@ and in case upstream wants to take the forward porting patches: + return netif->list.next != NULL; +} + ++/* Must be called with net_schedule_list_lock held. */ +static void remove_from_net_schedule_list(netif_t *netif) +{ -+ spin_lock_irq(&net_schedule_list_lock); + if (likely(__on_net_schedule_list(netif))) { + list_del(&netif->list); + netif->list.next = NULL; + netif_put(netif); + } ++} ++ ++static netif_t *poll_net_schedule_list(void) ++{ ++ netif_t *netif = NULL; ++ ++ spin_lock_irq(&net_schedule_list_lock); ++ if (!list_empty(&net_schedule_list)) { ++ netif = list_first_entry(&net_schedule_list, netif_t, list); ++ netif_get(netif); ++ remove_from_net_schedule_list(netif); ++ } + spin_unlock_irq(&net_schedule_list_lock); ++ return netif; +} + +static void add_to_net_schedule_list_tail(netif_t *netif) +{ ++ unsigned long flags; ++ + if (__on_net_schedule_list(netif)) + return; + -+ spin_lock_irq(&net_schedule_list_lock); ++ spin_lock_irqsave(&net_schedule_list_lock, flags); + if (!__on_net_schedule_list(netif) && + likely(netif_schedulable(netif))) { + list_add_tail(&netif->list, &net_schedule_list); + netif_get(netif); + } -+ spin_unlock_irq(&net_schedule_list_lock); ++ spin_unlock_irqrestore(&net_schedule_list_lock, flags); +} + +/* @@ -19999,7 +19968,9 @@ and in case upstream wants to take the forward porting patches: + +void netif_deschedule_work(netif_t *netif) +{ ++ spin_lock_irq(&net_schedule_list_lock); + remove_from_net_schedule_list(netif); ++ spin_unlock_irq(&net_schedule_list_lock); +} + + @@ -20062,7 +20033,6 @@ and in case upstream wants to take the forward porting patches: + u16 pending_idx; + PEND_RING_IDX dc, dp; + netif_t *netif; -+ int ret; + LIST_HEAD(list); + + dc = dealloc_cons; @@ -20097,11 +20067,17 @@ and in case upstream wants to take the forward porting patches: + gop++; + } + -+ if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB || -+ list_empty(&pending_inuse_head)) -+ break; ++ } while (dp != dealloc_prod); + -+ /* Copy any entries that have been pending for too long. */ ++ dealloc_cons = dc; ++ ++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ++ tx_unmap_ops, gop - tx_unmap_ops)) ++ BUG(); ++ ++ /* Copy any entries that have been pending for too long. */ ++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB && ++ !list_empty(&pending_inuse_head)) { + list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) { + if (time_after(inuse->alloc_time + HZ / 2, jiffies)) + break; @@ -20123,13 +20099,7 @@ and in case upstream wants to take the forward porting patches: + + break; + } -+ } while (dp != dealloc_prod); -+ -+ dealloc_cons = dc; -+ -+ ret = HYPERVISOR_grant_table_op( -+ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops); -+ BUG_ON(ret); ++ } + + list_for_each_entry_safe(inuse, n, &list, list) { + pending_idx = inuse - pending_inuse; @@ -20247,7 +20217,7 @@ and in case upstream wants to take the forward porting patches: + + /* Check status of header. */ + err = mop->status; -+ if (unlikely(err)) { ++ if (unlikely(err != GNTST_okay)) { + txp = &pending_tx_info[pending_idx].req; + make_tx_response(netif, txp, NETIF_RSP_ERROR); + pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; @@ -20268,12 +20238,12 @@ and in case upstream wants to take the forward porting patches: + + /* Check error status: if okay then remember grant handle. */ + newerr = (++mop)->status; -+ if (likely(!newerr)) { ++ if (likely(newerr == GNTST_okay)) { + set_phys_to_machine(idx_to_pfn(pending_idx), + FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT)); + grant_tx_handle[pending_idx] = mop->handle; + /* Had a previous error? Invalidate this fragment. */ -+ if (unlikely(err)) ++ if (unlikely(err != GNTST_okay)) + netif_idx_release(pending_idx); + continue; + } @@ -20285,7 +20255,7 @@ and in case upstream wants to take the forward porting patches: + netif_put(netif); + + /* Not the first error? Preceding frags already invalidated. */ -+ if (err) ++ if (err != GNTST_okay) + continue; + + /* First error: invalidate header and preceding fragments. */ @@ -20386,7 +20356,6 @@ and in case upstream wants to take the forward porting patches: +/* Called after netfront has transmitted */ +static void net_tx_action(unsigned long unused) +{ -+ struct list_head *ent; + struct sk_buff *skb; + netif_t *netif; + netif_tx_request_t txreq; @@ -20404,10 +20373,9 @@ and in case upstream wants to take the forward porting patches: + while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && + !list_empty(&net_schedule_list)) { + /* Get a netif from the list with work to do. */ -+ ent = net_schedule_list.next; -+ netif = list_entry(ent, netif_t, list); -+ netif_get(netif); -+ remove_from_net_schedule_list(netif); ++ netif = poll_net_schedule_list(); ++ if (!netif) ++ continue; + + RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do); + if (!work_to_do) { @@ -20607,6 +20575,16 @@ and in case upstream wants to take the forward porting patches: + + netbk_fill_frags(skb); + ++ /* ++ * If the initial fragment was < PKT_PROT_LEN then ++ * pull through some bytes from the other fragments to ++ * increase the linear region to PKT_PROT_LEN bytes. ++ */ ++ if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) { ++ int target = min_t(int, skb->len, PKT_PROT_LEN); ++ __pskb_pull_tail(skb, target - skb_headlen(skb)); ++ } ++ + skb->dev = netif->dev; + skb->protocol = eth_type_trans(skb, skb->dev); + @@ -20830,8 +20808,8 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netback/xenbus.c 2009-03-18 10:39:32.000000000 +0100 -@@ -0,0 +1,454 @@ ++++ b/drivers/xen/netback/xenbus.c 2011-02-17 09:58:10.000000000 +0100 +@@ -0,0 +1,455 @@ +/* Xenbus code for netif backend + Copyright (C) 2005 Rusty Russell + Copyright (C) 2005 XenSource Ltd @@ -20866,6 +20844,7 @@ and in case upstream wants to take the forward porting patches: +static int connect_rings(struct backend_info *); +static void connect(struct backend_info *); +static void backend_create_netif(struct backend_info *be); ++static void netback_disconnect(struct device *); + +static int netback_remove(struct xenbus_device *dev) +{ @@ -20873,16 +20852,22 @@ and in case upstream wants to take the forward porting patches: + + netback_remove_accelerators(be, dev); + -+ if (be->netif) { -+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); -+ netif_disconnect(be->netif); -+ be->netif = NULL; -+ } ++ netback_disconnect(&dev->dev); + kfree(be); + dev->dev.driver_data = NULL; + return 0; +} + ++static void netback_disconnect(struct device *xbdev_dev) ++{ ++ struct backend_info *be = xbdev_dev->driver_data; ++ ++ if (be->netif) { ++ kobject_uevent(&xbdev_dev->kobj, KOBJ_OFFLINE); ++ netif_disconnect(be->netif); ++ be->netif = NULL; ++ } ++} + +/** + * Entry point to this code when a new device is created. Allocate the basic @@ -21068,17 +21053,15 @@ and in case upstream wants to take the forward porting patches: + case XenbusStateConnected: + if (dev->state == XenbusStateConnected) + break; ++ ++ /* backend_create_netif() is idempotent */ + backend_create_netif(be); + if (be->netif) + connect(be); + break; + + case XenbusStateClosing: -+ if (be->netif) { -+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); -+ netif_disconnect(be->netif); -+ be->netif = NULL; -+ } ++ netback_disconnect(&dev->dev); + xenbus_switch_state(dev, XenbusStateClosing); + break; + @@ -21088,6 +21071,7 @@ and in case upstream wants to take the forward porting patches: + break; + /* fall through if not online */ + case XenbusStateUnknown: ++ /* implies netback_disconnect() via netback_remove() */ + device_unregister(&dev->dev); + break; + @@ -21184,6 +21168,7 @@ and in case upstream wants to take the forward porting patches: + +static int connect_rings(struct backend_info *be) +{ ++ netif_t *netif = be->netif; + struct xenbus_device *dev = be->dev; + unsigned long tx_ring_ref, rx_ring_ref; + unsigned int evtchn, rx_copy; @@ -21214,44 +21199,38 @@ and in case upstream wants to take the forward porting patches: + dev->otherend); + return err; + } -+ be->netif->copying_receiver = !!rx_copy; ++ netif->copying_receiver = !!rx_copy; + -+ if (be->netif->dev->tx_queue_len != 0) { ++ if (netif->dev->tx_queue_len != 0) { + if (xenbus_scanf(XBT_NIL, dev->otherend, + "feature-rx-notify", "%d", &val) < 0) + val = 0; + if (val) -+ be->netif->can_queue = 1; ++ netif->can_queue = 1; + else + /* Must be non-zero for pfifo_fast to work. */ -+ be->netif->dev->tx_queue_len = 1; ++ netif->dev->tx_queue_len = 1; + } + + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) + val = 0; -+ if (val) { -+ be->netif->features |= NETIF_F_SG; -+ be->netif->dev->features |= NETIF_F_SG; -+ } ++ netif->can_sg = !!val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", + &val) < 0) + val = 0; -+ if (val) { -+ be->netif->features |= NETIF_F_TSO; -+ be->netif->dev->features |= NETIF_F_TSO; -+ } ++ netif->gso = !!val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", + "%d", &val) < 0) + val = 0; -+ if (val) { -+ be->netif->features &= ~NETIF_F_IP_CSUM; -+ be->netif->dev->features &= ~NETIF_F_IP_CSUM; -+ } ++ netif->csum = !val; ++ ++ /* Set dev->features */ ++ netif_set_features(netif); + + /* Map the shared frame, irq etc. */ -+ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn); ++ err = netif_map(netif, tx_ring_ref, rx_ring_ref, evtchn); + if (err) { + xenbus_dev_fatal(dev, err, + "mapping shared-frames %lu/%lu port %u", @@ -21287,14 +21266,14 @@ and in case upstream wants to take the forward porting patches: + xenbus_register_backend(&netback); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netfront/Makefile 2007-07-12 08:54:23.000000000 +0200 ++++ b/drivers/xen/netfront/Makefile 2007-07-12 08:54:23.000000000 +0200 @@ -0,0 +1,4 @@ + +obj-$(CONFIG_XEN_NETDEV_FRONTEND) := xennet.o + +xennet-objs := netfront.o accel.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netfront/accel.c 2009-05-04 10:01:03.000000000 +0200 ++++ b/drivers/xen/netfront/accel.c 2009-05-04 10:01:03.000000000 +0200 @@ -0,0 +1,827 @@ +/****************************************************************************** + * Virtual network driver for conversing with remote driver backends. @@ -22124,8 +22103,8 @@ and in case upstream wants to take the forward porting patches: +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netfront/netfront.c 2009-04-07 13:58:48.000000000 +0200 -@@ -0,0 +1,2247 @@ ++++ b/drivers/xen/netfront/netfront.c 2010-11-25 09:36:37.000000000 +0100 +@@ -0,0 +1,2255 @@ +/****************************************************************************** + * Virtual network driver for conversing with remote driver backends. + * @@ -23900,6 +23879,13 @@ and in case upstream wants to take the forward porting patches: + xennet_set_tso(dev, 1); +} + ++static void netfront_get_drvinfo(struct net_device *dev, ++ struct ethtool_drvinfo *info) ++{ ++ strcpy(info->driver, "netfront"); ++ strcpy(info->bus_info, dev->class_dev.dev->bus_id); ++} ++ +static int network_connect(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); @@ -24008,6 +23994,7 @@ and in case upstream wants to take the forward porting patches: + +static struct ethtool_ops network_ethtool_ops = +{ ++ .get_drvinfo = netfront_get_drvinfo, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, + .get_sg = ethtool_op_get_sg, @@ -24339,7 +24326,7 @@ and in case upstream wants to take the forward porting patches: + } + + if (!MODPARM_rx_flip && !MODPARM_rx_copy) -+ MODPARM_rx_flip = 1; /* Default is to flip. */ ++ MODPARM_rx_copy = 1; /* Default is to copy. */ +#endif + + netif_init_accel(); @@ -24374,7 +24361,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/netfront/netfront.h 2010-02-24 13:13:46.000000000 +0100 ++++ b/drivers/xen/netfront/netfront.h 2010-02-24 13:13:46.000000000 +0100 @@ -0,0 +1,274 @@ +/****************************************************************************** + * Virtual network driver for conversing with remote driver backends. @@ -24651,7 +24638,7 @@ and in case upstream wants to take the forward porting patches: + struct xenbus_device *dev); +#endif /* NETFRONT_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/Makefile 2008-07-21 11:00:33.000000000 +0200 ++++ b/drivers/xen/pciback/Makefile 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,17 @@ +obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o + @@ -24671,7 +24658,7 @@ and in case upstream wants to take the forward porting patches: +EXTRA_CFLAGS += -DDEBUG +endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space.c 2009-05-04 10:01:03.000000000 +0200 ++++ b/drivers/xen/pciback/conf_space.c 2009-05-04 10:01:03.000000000 +0200 @@ -0,0 +1,435 @@ +/* + * PCI Backend - Functions for creating a virtual configuration space for @@ -25109,7 +25096,7 @@ and in case upstream wants to take the forward porting patches: + return pciback_config_capability_init(); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space.h 2008-10-29 09:55:56.000000000 +0100 ++++ b/drivers/xen/pciback/conf_space.h 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,126 @@ +/* + * PCI Backend - Common data structures for overriding the configuration space @@ -25238,7 +25225,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_capability.c 2008-10-29 09:55:56.000000000 +0100 ++++ b/drivers/xen/pciback/conf_space_capability.c 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,69 @@ +/* + * PCI Backend - Handles the virtual fields found on the capability lists @@ -25310,7 +25297,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_capability.h 2008-10-29 09:55:56.000000000 +0100 ++++ b/drivers/xen/pciback/conf_space_capability.h 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,23 @@ +/* + * PCI Backend - Data structures for special overlays for structures on @@ -25336,7 +25323,7 @@ and in case upstream wants to take the forward porting patches: + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_capability_msi.c 2008-09-15 13:40:15.000000000 +0200 ++++ b/drivers/xen/pciback/conf_space_capability_msi.c 2008-09-15 13:40:15.000000000 +0200 @@ -0,0 +1,79 @@ +/* + * PCI Backend -- Configuration overlay for MSI capability @@ -25418,7 +25405,7 @@ and in case upstream wants to take the forward porting patches: +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_capability_pm.c 2008-10-29 09:55:56.000000000 +0100 ++++ b/drivers/xen/pciback/conf_space_capability_pm.c 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,126 @@ +/* + * PCI Backend - Configuration space overlay for power management @@ -25547,7 +25534,7 @@ and in case upstream wants to take the forward porting patches: + .fields = caplist_pm, +}; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_capability_vpd.c 2008-10-29 09:55:56.000000000 +0100 ++++ b/drivers/xen/pciback/conf_space_capability_vpd.c 2008-10-29 09:55:56.000000000 +0100 @@ -0,0 +1,40 @@ +/* + * PCI Backend - Configuration space overlay for Vital Product Data @@ -25590,7 +25577,7 @@ and in case upstream wants to take the forward porting patches: + .fields = caplist_vpd, +}; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_header.c 2010-03-02 09:56:10.000000000 +0100 ++++ b/drivers/xen/pciback/conf_space_header.c 2010-03-02 09:56:10.000000000 +0100 @@ -0,0 +1,378 @@ +/* + * PCI Backend - Handles the virtual fields in the configuration space headers. @@ -25971,7 +25958,7 @@ and in case upstream wants to take the forward porting patches: + return err; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_quirks.c 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/pciback/conf_space_quirks.c 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,138 @@ +/* + * PCI Backend - Handle special overlays for broken devices. @@ -26112,7 +26099,7 @@ and in case upstream wants to take the forward porting patches: + return ret; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/conf_space_quirks.h 2007-06-12 13:13:45.000000000 +0200 ++++ b/drivers/xen/pciback/conf_space_quirks.h 2007-06-12 13:13:45.000000000 +0200 @@ -0,0 +1,35 @@ +/* + * PCI Backend - Data structures for special overlays for broken devices. @@ -26150,7 +26137,7 @@ and in case upstream wants to take the forward porting patches: + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/controller.c 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/pciback/controller.c 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,443 @@ +/* + * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. @@ -26596,7 +26583,7 @@ and in case upstream wants to take the forward porting patches: +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/passthrough.c 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/pciback/passthrough.c 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,176 @@ +/* + * PCI Backend - Provides restricted access to the real PCI bus topology @@ -26775,8 +26762,8 @@ and in case upstream wants to take the forward porting patches: + return 1; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/pci_stub.c 2009-06-09 15:01:37.000000000 +0200 -@@ -0,0 +1,1316 @@ ++++ b/drivers/xen/pciback/pci_stub.c 2011-03-02 12:00:16.000000000 +0100 +@@ -0,0 +1,1311 @@ +/* + * PCI Stub Driver - Grabs devices in backend to be exported later + * @@ -27774,8 +27761,7 @@ and in case upstream wants to take the forward porting patches: + err = count; + return err; +} -+ -+DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add); ++static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add); + +static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf, + size_t count) @@ -27794,8 +27780,7 @@ and in case upstream wants to take the forward porting patches: + err = count; + return err; +} -+ -+DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove); ++static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove); + +static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf) +{ @@ -27818,8 +27803,7 @@ and in case upstream wants to take the forward porting patches: + + return count; +} -+ -+DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL); ++static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL); + +static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf, + size_t count) @@ -27883,8 +27867,7 @@ and in case upstream wants to take the forward porting patches: + + return count; +} -+ -+DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add); ++static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add); + +static ssize_t permissive_add(struct device_driver *drv, const char *buf, + size_t count) @@ -27949,8 +27932,7 @@ and in case upstream wants to take the forward porting patches: + spin_unlock_irqrestore(&pcistub_devices_lock, flags); + return count; +} -+ -+DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add); ++static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add); + +#ifdef CONFIG_PCI_MSI + @@ -28094,7 +28076,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/pciback.h 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/pciback/pciback.h 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,126 @@ +/* + * PCI Backend Common Data Structures & Function Declarations @@ -28223,8 +28205,8 @@ and in case upstream wants to take the forward porting patches: +#endif + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/pciback_ops.c 2009-03-18 10:39:32.000000000 +0100 -@@ -0,0 +1,134 @@ ++++ b/drivers/xen/pciback/pciback_ops.c 2011-02-17 09:58:10.000000000 +0100 +@@ -0,0 +1,142 @@ +/* + * PCI Backend Operations - respond to PCI requests from Frontend + * @@ -28249,6 +28231,14 @@ and in case upstream wants to take the forward porting patches: + + /* Disable devices (but not bridges) */ + if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) { ++#ifdef CONFIG_PCI_MSI ++ /* The guest could have been abruptly killed without ++ * disabling MSI/MSI-X interrupts.*/ ++ if (dev->msix_enabled) ++ pci_disable_msix(dev); ++ if (dev->msi_enabled) ++ pci_disable_msi(dev); ++#endif + pci_disable_device(dev); + + pci_write_config_word(dev, PCI_COMMAND, 0); @@ -28360,7 +28350,7 @@ and in case upstream wants to take the forward porting patches: + return IRQ_HANDLED; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/slot.c 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/pciback/slot.c 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,187 @@ +/* + * PCI Backend - Provides a Virtual PCI bus (with real devices) @@ -28550,7 +28540,7 @@ and in case upstream wants to take the forward porting patches: + +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/vpci.c 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/pciback/vpci.c 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,242 @@ +/* + * PCI Backend - Provides a Virtual PCI bus (with real devices) @@ -28795,7 +28785,7 @@ and in case upstream wants to take the forward porting patches: + return found; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pciback/xenbus.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/pciback/xenbus.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,710 @@ +/* + * PCI Backend Xenbus Setup - handles setup with frontend and xend @@ -29508,7 +29498,7 @@ and in case upstream wants to take the forward porting patches: + xenbus_unregister_driver(&xenbus_pciback_driver); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pcifront/Makefile 2007-06-12 13:13:45.000000000 +0200 ++++ b/drivers/xen/pcifront/Makefile 2007-06-12 13:13:45.000000000 +0200 @@ -0,0 +1,7 @@ +obj-y += pcifront.o + @@ -29518,7 +29508,7 @@ and in case upstream wants to take the forward porting patches: +EXTRA_CFLAGS += -DDEBUG +endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pcifront/pci.c 2007-06-12 13:13:45.000000000 +0200 ++++ b/drivers/xen/pcifront/pci.c 2007-06-12 13:13:45.000000000 +0200 @@ -0,0 +1,46 @@ +/* + * PCI Frontend Operations - ensure only one PCI frontend runs at a time @@ -29567,8 +29557,8 @@ and in case upstream wants to take the forward porting patches: + spin_unlock(&pcifront_dev_lock); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pcifront/pci_op.c 2009-03-18 10:39:32.000000000 +0100 -@@ -0,0 +1,666 @@ ++++ b/drivers/xen/pcifront/pci_op.c 2010-11-25 09:36:37.000000000 +0100 +@@ -0,0 +1,670 @@ +/* + * PCI Frontend Operations - Communicates with frontend + * @@ -30100,14 +30090,18 @@ and in case upstream wants to take the forward porting patches: + } + + d = pci_scan_single_device(b, devfn); -+ if (d) { ++ if (d) + dev_info(&pdev->xdev->dev, "New device on " + "%04x:%02x:%02x.%02x found.\n", domain, bus, + PCI_SLOT(devfn), PCI_FUNC(devfn)); -+ pci_bus_add_device(d); -+ } + } + ++ /* Claim resources before going "live" with our devices */ ++ pci_walk_bus(b, pcifront_claim_resource, pdev); ++ ++ /* Create SysFS and notify udev of the devices. Aka: "going live" */ ++ pci_bus_add_devices(b); ++ + return 0; +} + @@ -30159,9 +30153,9 @@ and in case upstream wants to take the forward porting patches: + result = PCI_ERS_RESULT_NONE; + + pcidev = pci_get_bus_and_slot(bus, devfn); -+ if (!pcidev || !pcidev->driver){ -+ dev_err(&pcidev->dev, -+ "device or driver is NULL\n"); ++ if (!pcidev || !pcidev->driver) { ++ pci_dev_put(pcidev); ++ dev_err(&pdev->xdev->dev, "AER device or driver is NULL\n"); + return result; + } + pdrv = pcidev->driver; @@ -30236,8 +30230,8 @@ and in case upstream wants to take the forward porting patches: + return IRQ_HANDLED; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pcifront/pcifront.h 2009-03-18 10:39:32.000000000 +0100 -@@ -0,0 +1,55 @@ ++++ b/drivers/xen/pcifront/pcifront.h 2010-10-05 09:58:12.000000000 +0200 +@@ -0,0 +1,56 @@ +/* + * PCI Frontend - Common data structures & function declarations + * @@ -30270,6 +30264,7 @@ and in case upstream wants to take the forward porting patches: + + int evtchn; + int gnt_ref; ++ int irq; + + /* Lock this when doing any operations in sh_info */ + spinlock_t sh_info_lock; @@ -30294,8 +30289,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __XEN_PCIFRONT_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/pcifront/xenbus.c 2009-04-07 13:58:48.000000000 +0200 -@@ -0,0 +1,468 @@ ++++ b/drivers/xen/pcifront/xenbus.c 2010-10-05 09:58:12.000000000 +0200 +@@ -0,0 +1,483 @@ +/* + * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn) + * @@ -30346,6 +30341,7 @@ and in case upstream wants to take the forward porting patches: + + pdev->evtchn = INVALID_EVTCHN; + pdev->gnt_ref = INVALID_GRANT_REF; ++ pdev->irq = -1; + + INIT_WORK(&pdev->op_work, pcifront_do_aer, pdev); + @@ -30363,7 +30359,9 @@ and in case upstream wants to take the forward porting patches: + + /*For PCIE_AER error handling job*/ + flush_scheduled_work(); -+ unbind_from_irqhandler(pdev->evtchn, pdev); ++ ++ if (pdev->irq > 0) ++ unbind_from_irqhandler(pdev->irq, pdev); + + if (pdev->evtchn != INVALID_EVTCHN) + xenbus_free_evtchn(pdev->xdev, pdev->evtchn); @@ -30371,6 +30369,8 @@ and in case upstream wants to take the forward porting patches: + if (pdev->gnt_ref != INVALID_GRANT_REF) + gnttab_end_foreign_access(pdev->gnt_ref, + (unsigned long)pdev->sh_info); ++ else ++ free_page((unsigned long)pdev->sh_info); + + pdev->xdev->dev.driver_data = NULL; + @@ -30392,8 +30392,16 @@ and in case upstream wants to take the forward porting patches: + if (err) + goto out; + -+ bind_caller_port_to_irqhandler(pdev->evtchn, pcifront_handler_aer, -+ SA_SAMPLE_RANDOM, "pcifront", pdev); ++ err = bind_caller_port_to_irqhandler(pdev->evtchn, ++ pcifront_handler_aer, ++ SA_SAMPLE_RANDOM, ++ "pcifront", pdev); ++ if (err < 0) { ++ xenbus_dev_fatal(pdev->xdev, err, ++ "Failed to bind event channel"); ++ goto out; ++ } ++ pdev->irq = err; + + do_publish: + err = xenbus_transaction_start(&trans); @@ -30726,6 +30734,8 @@ and in case upstream wants to take the forward porting patches: + } + + err = pcifront_publish_info(pdev); ++ if (err) ++ free_pdev(pdev); + + out: + return err; @@ -30765,13 +30775,13 @@ and in case upstream wants to take the forward porting patches: +/* Initialize after the Xen PCI Frontend Stub is initialized */ +subsys_initcall(pcifront_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/privcmd/Makefile 2007-07-10 09:42:30.000000000 +0200 ++++ b/drivers/xen/privcmd/Makefile 2007-07-10 09:42:30.000000000 +0200 @@ -0,0 +1,3 @@ + +obj-y += privcmd.o +obj-$(CONFIG_COMPAT) += compat_privcmd.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/privcmd/compat_privcmd.c 2010-01-27 14:01:48.000000000 +0100 ++++ b/drivers/xen/privcmd/compat_privcmd.c 2010-01-27 14:01:48.000000000 +0100 @@ -0,0 +1,144 @@ +/* + * This program is free software; you can redistribute it and/or modify @@ -30918,7 +30928,7 @@ and in case upstream wants to take the forward porting patches: + return ret; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/privcmd/privcmd.c 2010-01-27 14:01:48.000000000 +0100 ++++ b/drivers/xen/privcmd/privcmd.c 2010-01-27 14:01:48.000000000 +0100 @@ -0,0 +1,491 @@ +/****************************************************************************** + * privcmd.c @@ -31412,14 +31422,14 @@ and in case upstream wants to take the forward porting patches: + +__initcall(privcmd_init); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsiback/Makefile 2008-07-21 11:00:33.000000000 +0200 ++++ b/drivers/xen/scsiback/Makefile 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,4 @@ +obj-$(CONFIG_XEN_SCSI_BACKEND) := xen-scsibk.o + +xen-scsibk-y := interface.o scsiback.o xenbus.o translate.o emulate.o + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsiback/common.h 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/scsiback/common.h 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2008, FUJITSU Limited @@ -31608,8 +31618,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __SCSIIF__BACKEND__COMMON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsiback/emulate.c 2009-03-18 10:39:32.000000000 +0100 -@@ -0,0 +1,474 @@ ++++ b/drivers/xen/scsiback/emulate.c 2011-02-02 12:19:11.000000000 +0100 +@@ -0,0 +1,484 @@ +/* + * Xen SCSI backend driver + * @@ -31640,6 +31650,11 @@ and in case upstream wants to take the forward porting patches: + * IN THE SOFTWARE. + */ + ++/* ++* Patched to support >2TB drives + allow tape & autoloader operations ++* 2010, Samuel Kvasnica, IMS Nanofabrication AG ++*/ ++ +#include +#include +#include @@ -31996,85 +32011,90 @@ and in case upstream wants to take the forward porting patches: + /* + Following commands do not require emulation. + */ -+ NO_EMULATE(TEST_UNIT_READY); /*0x00*/ -+ NO_EMULATE(REZERO_UNIT); /*0x01*/ ++ NO_EMULATE(TEST_UNIT_READY); /*0x00*/ /* sd,st */ ++ NO_EMULATE(REZERO_UNIT); /*0x01*/ /* st */ + NO_EMULATE(REQUEST_SENSE); /*0x03*/ + NO_EMULATE(FORMAT_UNIT); /*0x04*/ -+ NO_EMULATE(READ_BLOCK_LIMITS); /*0x05*/ ++ NO_EMULATE(READ_BLOCK_LIMITS); /*0x05*/ /* st */ + /*NO_EMULATE(REASSIGN_BLOCKS); *//*0x07*/ -+ /*NO_EMULATE(INITIALIZE_ELEMENT_STATUS); *//*0x07*/ -+ NO_EMULATE(READ_6); /*0x08*/ -+ NO_EMULATE(WRITE_6); /*0x0a*/ -+ /*NO_EMULATE(SEEK_6); *//*0x0b*/ ++ NO_EMULATE(INITIALIZE_ELEMENT_STATUS); /*0x07*/ /* ch */ ++ NO_EMULATE(READ_6); /*0x08*/ /* sd,st */ ++ NO_EMULATE(WRITE_6); /*0x0a*/ /* sd,st */ ++ NO_EMULATE(SEEK_6); /*0x0b*/ + /*NO_EMULATE(READ_REVERSE); *//*0x0f*/ -+ NO_EMULATE(WRITE_FILEMARKS); /*0x10*/ -+ NO_EMULATE(SPACE); /*0x11*/ ++ NO_EMULATE(WRITE_FILEMARKS); /*0x10*/ /* st */ ++ NO_EMULATE(SPACE); /*0x11*/ /* st */ + NO_EMULATE(INQUIRY); /*0x12*/ + /*NO_EMULATE(RECOVER_BUFFERED_DATA); *//*0x14*/ -+ /*NO_EMULATE(MODE_SELECT); *//*0x15*/ ++ NO_EMULATE(MODE_SELECT); /*0x15*/ /* st */ + /*NO_EMULATE(RESERVE); *//*0x16*/ + /*NO_EMULATE(RELEASE); *//*0x17*/ + /*NO_EMULATE(COPY); *//*0x18*/ -+ NO_EMULATE(ERASE); /*0x19*/ -+ NO_EMULATE(MODE_SENSE); /*0x1a*/ -+ /*NO_EMULATE(START_STOP); *//*0x1b*/ -+ /*NO_EMULATE(RECEIVE_DIAGNOSTIC); *//*0x1c*/ ++ NO_EMULATE(ERASE); /*0x19*/ /* st */ ++ NO_EMULATE(MODE_SENSE); /*0x1a*/ /* st */ ++ NO_EMULATE(START_STOP); /*0x1b*/ /* sd,st */ ++ NO_EMULATE(RECEIVE_DIAGNOSTIC); /*0x1c*/ + NO_EMULATE(SEND_DIAGNOSTIC); /*0x1d*/ -+ /*NO_EMULATE(ALLOW_MEDIUM_REMOVAL); *//*0x1e*/ ++ NO_EMULATE(ALLOW_MEDIUM_REMOVAL); /*0x1e*/ + + /*NO_EMULATE(SET_WINDOW); *//*0x24*/ -+ NO_EMULATE(READ_CAPACITY); /*0x25*/ -+ NO_EMULATE(READ_10); /*0x28*/ -+ NO_EMULATE(WRITE_10); /*0x2a*/ -+ /*NO_EMULATE(SEEK_10); *//*0x2b*/ -+ /*NO_EMULATE(POSITION_TO_ELEMENT); *//*0x2b*/ ++ NO_EMULATE(READ_CAPACITY); /*0x25*/ /* sd */ ++ NO_EMULATE(READ_10); /*0x28*/ /* sd */ ++ NO_EMULATE(WRITE_10); /*0x2a*/ /* sd */ ++ NO_EMULATE(SEEK_10); /*0x2b*/ /* st */ ++ NO_EMULATE(POSITION_TO_ELEMENT); /*0x2b*/ /* ch */ + /*NO_EMULATE(WRITE_VERIFY); *//*0x2e*/ + /*NO_EMULATE(VERIFY); *//*0x2f*/ + /*NO_EMULATE(SEARCH_HIGH); *//*0x30*/ + /*NO_EMULATE(SEARCH_EQUAL); *//*0x31*/ + /*NO_EMULATE(SEARCH_LOW); *//*0x32*/ -+ /*NO_EMULATE(SET_LIMITS); *//*0x33*/ -+ /*NO_EMULATE(PRE_FETCH); *//*0x34*/ -+ /*NO_EMULATE(READ_POSITION); *//*0x34*/ -+ /*NO_EMULATE(SYNCHRONIZE_CACHE); *//*0x35*/ -+ /*NO_EMULATE(LOCK_UNLOCK_CACHE); *//*0x36*/ -+ /*NO_EMULATE(READ_DEFECT_DATA); *//*0x37*/ -+ /*NO_EMULATE(MEDIUM_SCAN); *//*0x38*/ ++ NO_EMULATE(SET_LIMITS); /*0x33*/ ++ NO_EMULATE(PRE_FETCH); /*0x34*/ /* st! */ ++ NO_EMULATE(READ_POSITION); /*0x34*/ /* st */ ++ NO_EMULATE(SYNCHRONIZE_CACHE); /*0x35*/ /* sd */ ++ NO_EMULATE(LOCK_UNLOCK_CACHE); /*0x36*/ ++ NO_EMULATE(READ_DEFECT_DATA); /*0x37*/ ++ NO_EMULATE(MEDIUM_SCAN); /*0x38*/ + /*NO_EMULATE(COMPARE); *//*0x39*/ + /*NO_EMULATE(COPY_VERIFY); *//*0x3a*/ -+ /*NO_EMULATE(WRITE_BUFFER); *//*0x3b*/ -+ /*NO_EMULATE(READ_BUFFER); *//*0x3c*/ ++ NO_EMULATE(WRITE_BUFFER); /*0x3b*/ ++ NO_EMULATE(READ_BUFFER); /*0x3c*/ /* osst */ + /*NO_EMULATE(UPDATE_BLOCK); *//*0x3d*/ + /*NO_EMULATE(READ_LONG); *//*0x3e*/ + /*NO_EMULATE(WRITE_LONG); *//*0x3f*/ + /*NO_EMULATE(CHANGE_DEFINITION); *//*0x40*/ + /*NO_EMULATE(WRITE_SAME); *//*0x41*/ -+ /*NO_EMULATE(READ_TOC); *//*0x43*/ -+ /*NO_EMULATE(LOG_SELECT); *//*0x4c*/ -+ /*NO_EMULATE(LOG_SENSE); *//*0x4d*/ ++ NO_EMULATE(READ_TOC); /*0x43*/ /* sr */ ++ NO_EMULATE(LOG_SELECT); /*0x4c*/ ++ NO_EMULATE(LOG_SENSE); /*0x4d*/ /* st! */ + /*NO_EMULATE(MODE_SELECT_10); *//*0x55*/ + /*NO_EMULATE(RESERVE_10); *//*0x56*/ + /*NO_EMULATE(RELEASE_10); *//*0x57*/ -+ /*NO_EMULATE(MODE_SENSE_10); *//*0x5a*/ ++ NO_EMULATE(MODE_SENSE_10); /*0x5a*/ /* scsi_lib */ + /*NO_EMULATE(PERSISTENT_RESERVE_IN); *//*0x5e*/ + /*NO_EMULATE(PERSISTENT_RESERVE_OUT); *//*0x5f*/ + /* REPORT_LUNS *//*0xa0*//*Full emulaiton*/ -+ /*NO_EMULATE(MOVE_MEDIUM); *//*0xa5*/ -+ /*NO_EMULATE(EXCHANGE_MEDIUM); *//*0xa6*/ ++#ifdef MAINTENANCE_IN ++ NO_EMULATE(MAINTENANCE_IN); /*0xa3*/ /* IFT alua */ ++ NO_EMULATE(MAINTENANCE_OUT); /*0xa4*/ /* IFT alua */ ++#endif ++ NO_EMULATE(MOVE_MEDIUM); /*0xa5*/ /* ch */ ++ NO_EMULATE(EXCHANGE_MEDIUM); /*0xa6*/ /* ch */ + /*NO_EMULATE(READ_12); *//*0xa8*/ + /*NO_EMULATE(WRITE_12); *//*0xaa*/ + /*NO_EMULATE(WRITE_VERIFY_12); *//*0xae*/ + /*NO_EMULATE(SEARCH_HIGH_12); *//*0xb0*/ + /*NO_EMULATE(SEARCH_EQUAL_12); *//*0xb1*/ + /*NO_EMULATE(SEARCH_LOW_12); *//*0xb2*/ -+ /*NO_EMULATE(READ_ELEMENT_STATUS); *//*0xb8*/ -+ /*NO_EMULATE(SEND_VOLUME_TAG); *//*0xb6*/ ++ NO_EMULATE(READ_ELEMENT_STATUS); /*0xb8*/ /* ch */ ++ NO_EMULATE(SEND_VOLUME_TAG); /*0xb6*/ /* ch */ + /*NO_EMULATE(WRITE_LONG_2); *//*0xea*/ -+ /*NO_EMULATE(READ_16); *//*0x88*/ -+ /*NO_EMULATE(WRITE_16); *//*0x8a*/ -+ /*NO_EMULATE(VERIFY_16); *//*0x8f*/ -+ /*NO_EMULATE(SERVICE_ACTION_IN); *//*0x9e*/ ++ NO_EMULATE(READ_16); /*0x88*/ /* sd >2TB */ ++ NO_EMULATE(WRITE_16); /*0x8a*/ /* sd >2TB */ ++ NO_EMULATE(VERIFY_16); /*0x8f*/ ++ NO_EMULATE(SERVICE_ACTION_IN); /*0x9e*/ /* sd >2TB */ + ++/* st: QFA_REQUEST_BLOCK, QFA_SEEK_BLOCK might be needed ? */ + /* + Following commands require emulation. + */ @@ -32085,8 +32105,8 @@ and in case upstream wants to take the forward porting patches: + return; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsiback/interface.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,186 @@ ++++ b/drivers/xen/scsiback/interface.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,182 @@ +/* + * interface management. + * @@ -32153,27 +32173,23 @@ and in case upstream wants to take the forward porting patches: + unsigned long ring_ref) +{ + struct gnttab_map_grant_ref op; -+ int err; ++ int ret; + + gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr, + GNTMAP_host_map, ring_ref, + info->domid); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ do { -+ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); -+ BUG_ON(err); -+ msleep(10); -+ } while(op.status == GNTST_eagain); -+ -+ if (op.status) { -+ printk(KERN_ERR "scsiback: Grant table operation failure !\n"); -+ return op.status; ++ if (op.status != GNTST_okay) { ++ printk(KERN_ERR "scsiback: Grant table operation failure %d!\n", (int)op.status); ++ ret = -EINVAL; ++ } else { ++ info->shmem_ref = ring_ref; ++ info->shmem_handle = op.handle; ++ ret = 0; + } + -+ info->shmem_ref = ring_ref; -+ info->shmem_handle = op.handle; -+ -+ return (GNTST_okay); ++ return ret; +} + +static void unmap_frontend_page(struct vscsibk_info *info) @@ -32274,8 +32290,8 @@ and in case upstream wants to take the forward porting patches: + kmem_cache_destroy(scsiback_cachep); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsiback/scsiback.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,755 @@ ++++ b/drivers/xen/scsiback/scsiback.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,747 @@ +/* + * Xen SCSI backend driver + * @@ -32557,22 +32573,14 @@ and in case upstream wants to take the forward porting patches: + + err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments); + BUG_ON(err); -+ /* Retry maps with GNTST_eagain */ -+ for(i=0; i < nr_segments; i++) { -+ while(unlikely(map[i].status == GNTST_eagain)) -+ { -+ msleep(10); -+ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, -+ &map[i], -+ 1); -+ BUG_ON(err); -+ } -+ } + + for (i = 0; i < nr_segments; i++) { + struct page *pg; + -+ if (unlikely(map[i].status != 0)) { ++ /* Retry maps with GNTST_eagain */ ++ if (unlikely(map[i].status == GNTST_eagain)) ++ gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]); ++ if (unlikely(map[i].status != GNTST_okay)) { + printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n"); + map[i].handle = SCSIBACK_INVALID_HANDLE; + err |= 1; @@ -33032,7 +33040,7 @@ and in case upstream wants to take the forward porting patches: +MODULE_DESCRIPTION("Xen SCSI backend driver"); +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsiback/translate.c 2008-07-21 11:00:33.000000000 +0200 ++++ b/drivers/xen/scsiback/translate.c 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,168 @@ +/* + * Xen SCSI backend driver @@ -33203,7 +33211,7 @@ and in case upstream wants to take the forward porting patches: + +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsiback/xenbus.c 2009-03-18 10:39:32.000000000 +0100 ++++ b/drivers/xen/scsiback/xenbus.c 2009-03-18 10:39:32.000000000 +0100 @@ -0,0 +1,378 @@ +/* + * Xen SCSI backend driver @@ -33584,13 +33592,13 @@ and in case upstream wants to take the forward porting patches: + xenbus_unregister_driver(&scsiback); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsifront/Makefile 2008-07-21 11:00:33.000000000 +0200 ++++ b/drivers/xen/scsifront/Makefile 2008-07-21 11:00:33.000000000 +0200 @@ -0,0 +1,3 @@ + +obj-$(CONFIG_XEN_SCSI_FRONTEND) := xenscsi.o +xenscsi-objs := scsifront.o xenbus.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsifront/common.h 2010-02-24 13:13:46.000000000 +0100 ++++ b/drivers/xen/scsifront/common.h 2010-02-24 13:13:46.000000000 +0100 @@ -0,0 +1,135 @@ +/* + * Xen SCSI frontend driver @@ -33728,8 +33736,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsifront/scsifront.c 2008-07-21 11:00:33.000000000 +0200 -@@ -0,0 +1,511 @@ ++++ b/drivers/xen/scsifront/scsifront.c 2011-02-02 12:19:11.000000000 +0100 +@@ -0,0 +1,516 @@ +/* + * Xen SCSI frontend driver + * @@ -34089,6 +34097,11 @@ and in case upstream wants to take the forward porting patches: + int ref_cnt; + uint16_t rqid; + ++/* debug printk to identify more missing scsi commands ++ printk(KERN_INFO "scsicmd: len=%i, 0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x",sc->cmd_len, ++ sc->cmnd[0],sc->cmnd[1],sc->cmnd[2],sc->cmnd[3],sc->cmnd[4], ++ sc->cmnd[5],sc->cmnd[6],sc->cmnd[7],sc->cmnd[8],sc->cmnd[9]); ++*/ + if (RING_FULL(&info->ring)) { + goto out_host_busy; + } @@ -34242,8 +34255,8 @@ and in case upstream wants to take the forward porting patches: +MODULE_DESCRIPTION("Xen SCSI frontend driver"); +MODULE_LICENSE("GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/scsifront/xenbus.c 2010-03-31 09:56:02.000000000 +0200 -@@ -0,0 +1,421 @@ ++++ b/drivers/xen/scsifront/xenbus.c 2011-02-02 12:19:11.000000000 +0100 +@@ -0,0 +1,426 @@ +/* + * Xen SCSI frontend driver + * @@ -34273,7 +34286,11 @@ and in case upstream wants to take the forward porting patches: + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ -+ ++ ++/* ++* Patched to support >2TB drives ++* 2010, Samuel Kvasnica, IMS Nanofabrication AG ++*/ + +#include +#include "common.h" @@ -34465,6 +34482,7 @@ and in case upstream wants to take the forward porting patches: + host->max_channel = 0; + host->max_lun = VSCSIIF_MAX_LUN; + host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; ++ host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE; + + err = scsi_add_host(host, &dev->dev); + if (err) { @@ -34666,7 +34684,7 @@ and in case upstream wants to take the forward porting patches: +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/Makefile 2010-01-18 15:23:12.000000000 +0100 ++++ b/drivers/xen/sfc_netback/Makefile 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,12 @@ +EXTRA_CFLAGS += -Idrivers/xen/sfc_netback -Idrivers/xen/sfc_netutil -Idrivers/xen/netback -Idrivers/net/sfc -Idrivers/net/sfc/sfc_resource +EXTRA_CFLAGS += -D__ci_driver__ @@ -34681,7 +34699,7 @@ and in case upstream wants to take the forward porting patches: + +sfc_netback-objs := accel.o accel_fwd.o accel_msg.o accel_solarflare.o accel_xenbus.o accel_debugfs.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netback/accel.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,147 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -34831,7 +34849,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel.h 2010-01-18 15:23:12.000000000 +0100 ++++ b/drivers/xen/sfc_netback/accel.h 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,391 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -35225,7 +35243,7 @@ and in case upstream wants to take the forward porting patches: + + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel_debugfs.c 2008-02-26 10:54:11.000000000 +0100 ++++ b/drivers/xen/sfc_netback/accel_debugfs.c 2008-02-26 10:54:11.000000000 +0100 @@ -0,0 +1,148 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -35376,7 +35394,7 @@ and in case upstream wants to take the forward porting patches: + + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel_fwd.c 2008-04-02 12:34:02.000000000 +0200 ++++ b/drivers/xen/sfc_netback/accel_fwd.c 2008-04-02 12:34:02.000000000 +0200 @@ -0,0 +1,420 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -35799,7 +35817,7 @@ and in case upstream wants to take the forward porting patches: + return; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel_msg.c 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/accel_msg.c 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,392 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -36194,7 +36212,7 @@ and in case upstream wants to take the forward porting patches: + return; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel_solarflare.c 2010-01-18 15:23:12.000000000 +0100 ++++ b/drivers/xen/sfc_netback/accel_solarflare.c 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,1293 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -37490,7 +37508,7 @@ and in case upstream wants to take the forward porting patches: + spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel_solarflare.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/accel_solarflare.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,88 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -37581,7 +37599,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* NETBACK_ACCEL_SOLARFLARE_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/accel_xenbus.c 2010-01-04 11:56:34.000000000 +0100 ++++ b/drivers/xen/sfc_netback/accel_xenbus.c 2010-01-04 11:56:34.000000000 +0100 @@ -0,0 +1,833 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -38417,7 +38435,7 @@ and in case upstream wants to take the forward porting patches: + XenbusStateClosing); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,53 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -38473,7 +38491,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat/gcc.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat/gcc.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,158 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -38634,7 +38652,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat/gcc_x86.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat/gcc_x86.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,115 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -38752,7 +38770,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat/primitive.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat/primitive.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,77 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -38832,7 +38850,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat/sysdep.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat/sysdep.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,166 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -39001,7 +39019,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat/utils.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat/utils.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,269 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -39273,7 +39291,7 @@ and in case upstream wants to take the forward porting patches: +#endif /* __CI_COMPAT_UTILS_H__ */ +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat/x86.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat/x86.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,48 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -39324,7 +39342,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/compat/x86_64.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/compat/x86_64.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,54 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -39381,7 +39399,7 @@ and in case upstream wants to take the forward porting patches: +#endif /* __CI_COMPAT_X86_64_H__ */ +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/tools/config.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/tools/config.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,49 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -39433,7 +39451,7 @@ and in case upstream wants to take the forward porting patches: +#endif /* __CI_TOOLS_CONFIG_H__ */ +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/tools/debug.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/tools/debug.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,336 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -39772,7 +39790,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/tools/log.h 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netback/ci/tools/log.h 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,269 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -40044,7 +40062,7 @@ and in case upstream wants to take the forward porting patches: +#endif /* __CI_TOOLS_LOG_H__ */ +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,370 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -40417,7 +40435,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,362 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -40782,7 +40800,7 @@ and in case upstream wants to take the forward porting patches: +#endif /* __CI_TOOLS_LINUX_KERNEL_H__ */ +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netback/ci/tools/sysdep.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netback/ci/tools/sysdep.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,132 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -40917,7 +40935,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/Makefile 2008-02-26 10:54:11.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/Makefile 2008-02-26 10:54:11.000000000 +0100 @@ -0,0 +1,11 @@ +EXTRA_CFLAGS += -Idrivers/xen/sfc_netfront -Idrivers/xen/sfc_netutil -Idrivers/xen/netfront +EXTRA_CFLAGS += -D__ci_driver__ @@ -40931,7 +40949,7 @@ and in case upstream wants to take the forward porting patches: + +sfc_netfront-objs := accel_msg.o accel_bufs.o accel_netfront.o accel_vi.o accel_xenbus.o accel_tso.o accel_ssr.o accel_debugfs.o falcon_event.o falcon_vi.o pt_tx.o vi_init.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel.h 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/accel.h 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,495 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -41429,7 +41447,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* NETFRONT_ACCEL_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_bufs.c 2008-02-26 10:54:12.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_bufs.c 2008-02-26 10:54:12.000000000 +0100 @@ -0,0 +1,393 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -41825,7 +41843,7 @@ and in case upstream wants to take the forward porting patches: + return was_empty; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_bufs.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_bufs.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,181 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -42009,7 +42027,7 @@ and in case upstream wants to take the forward porting patches: +#endif /* NETFRONT_ACCEL_BUFS_H */ + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_debugfs.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/accel_debugfs.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,227 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -42239,7 +42257,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_msg.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/accel_msg.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,564 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -42806,7 +42824,7 @@ and in case upstream wants to take the forward porting patches: + &lock_state, vnic->msg_channel_irq); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_netfront.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/accel_netfront.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,328 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -43137,7 +43155,7 @@ and in case upstream wants to take the forward porting patches: +MODULE_LICENSE("GPL"); + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_ssr.c 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_ssr.c 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,308 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -43448,7 +43466,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_ssr.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_ssr.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,88 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -43539,7 +43557,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* NETFRONT_ACCEL_SSR_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_tso.c 2008-02-26 10:54:12.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_tso.c 2008-02-26 10:54:12.000000000 +0100 @@ -0,0 +1,511 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -44053,7 +44071,7 @@ and in case upstream wants to take the forward porting patches: + + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_tso.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_tso.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,57 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -44113,7 +44131,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* NETFRONT_ACCEL_TSO_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_vi.c 2010-01-18 15:23:12.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_vi.c 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,1202 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -45318,7 +45336,7 @@ and in case upstream wants to take the forward porting patches: + return 1; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_xenbus.c 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/accel_xenbus.c 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,776 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -46097,7 +46115,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/ef_vi_falcon.h 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/ef_vi_falcon.h 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,172 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -46272,7 +46290,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __EF_VI_FALCON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/ef_vi_falcon_core.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/ef_vi_falcon_core.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,1075 @@ + +#define EFVI_FALCON_EXTENDED_P_BAR 1 @@ -47350,7 +47368,7 @@ and in case upstream wants to take the forward porting patches: + #define EE_VPD_CYC_DAT_LBN 0 + #define EE_VPD_CYC_DAT_WIDTH 32 --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,43 @@ +//////////////---- Descriptors C Headers ----////////////// +// Receive Kernel IP Descriptor @@ -47396,7 +47414,7 @@ and in case upstream wants to take the forward porting patches: + #define TX_USR_BYTE_OFS_LBN 0 + #define TX_USR_BYTE_OFS_WIDTH 13 --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/ef_vi_falcon_event.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/ef_vi_falcon_event.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,123 @@ +//////////////---- Events Format C Header ----////////////// +//////////////---- Event entry ----////////////// @@ -47522,7 +47540,7 @@ and in case upstream wants to take the forward porting patches: + #define DRV_GEN_EV_DATA_LBN 0 + #define DRV_GEN_EV_DATA_WIDTH 60 --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/ef_vi_internal.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/ef_vi_internal.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,256 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -47781,7 +47799,7 @@ and in case upstream wants to take the forward porting patches: +#endif /* __CI_EF_VI_INTERNAL_H__ */ + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/etherfabric/ef_vi.h 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/etherfabric/ef_vi.h 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,647 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -48431,7 +48449,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __EFAB_EF_VI_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/falcon_event.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/falcon_event.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,346 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -48780,7 +48798,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/falcon_vi.c 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/falcon_vi.c 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,473 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -49256,7 +49274,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/pt_tx.c 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/pt_tx.c 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,91 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -49350,7 +49368,7 @@ and in case upstream wants to take the forward porting patches: + +/*! \cidoxg_end */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/sysdep.h 2009-04-07 13:58:48.000000000 +0200 ++++ b/drivers/xen/sfc_netfront/sysdep.h 2009-04-07 13:58:48.000000000 +0200 @@ -0,0 +1,185 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -49538,7 +49556,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __CI_CIUL_SYSDEP_LINUX_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netfront/vi_init.c 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netfront/vi_init.c 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,183 @@ +/**************************************************************************** + * Copyright 2002-2005: Level 5 Networks Inc. @@ -49724,7 +49742,7 @@ and in case upstream wants to take the forward porting patches: + vm->tx_bell = (char*) io_mmap + (TX_DESC_UPD_REG_KER_OFST & 4095); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/Makefile 2008-02-26 10:54:12.000000000 +0100 ++++ b/drivers/xen/sfc_netutil/Makefile 2008-02-26 10:54:12.000000000 +0100 @@ -0,0 +1,11 @@ +EXTRA_CFLAGS += -Idrivers/xen/sfc_netutil +EXTRA_CFLAGS += -Werror @@ -49738,7 +49756,7 @@ and in case upstream wants to take the forward porting patches: +sfc_netutil-objs := accel_cuckoo_hash.o accel_msg_iface.o accel_util.o + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,651 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -50392,7 +50410,7 @@ and in case upstream wants to take the forward porting patches: +EXPORT_SYMBOL_GPL(cuckoo_hash_dump); +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_cuckoo_hash.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netutil/accel_cuckoo_hash.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,227 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -50622,7 +50640,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* NET_ACCEL_CUCKOO_HASH_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_msg_iface.c 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netutil/accel_msg_iface.c 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,301 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -50926,7 +50944,7 @@ and in case upstream wants to take the forward porting patches: +} +EXPORT_SYMBOL_GPL(net_accel_msg_complete_send_notify); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_msg_iface.h 2010-01-18 15:23:12.000000000 +0100 ++++ b/drivers/xen/sfc_netutil/accel_msg_iface.h 2010-01-18 15:23:12.000000000 +0100 @@ -0,0 +1,415 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -51344,7 +51362,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* NET_ACCEL_MSG_IFACE_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_shared_fifo.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netutil/accel_shared_fifo.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,127 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -51474,8 +51492,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* NET_ACCEL_SHARED_FIFO_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_util.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,355 @@ ++++ b/drivers/xen/sfc_netutil/accel_util.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,336 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration + * @@ -51549,24 +51567,27 @@ and in case upstream wants to take the forward porting patches: + u64 *dev_bus_addr, unsigned flags) +{ + struct gnttab_map_grant_ref op; ++ int ret; + + gnttab_set_map_op(&op, (unsigned long)vaddr, flags, + gnt_ref, dev->otherend_id); + -+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + + if (op.status != GNTST_okay) { + xenbus_dev_error + (dev, op.status, + "failed mapping in shared page %d from domain %d\n", + gnt_ref, dev->otherend_id); ++ ret = -EINVAL; + } else { + *handle = op.handle; + if (dev_bus_addr) + *dev_bus_addr = op.dev_bus_addr; ++ ret = 0; + } + -+ return op.status; ++ return ret; +} + + @@ -51590,7 +51611,7 @@ and in case upstream wants to take the forward porting patches: + "failed unmapping page at handle %d error %d\n", + handle, op.status); + -+ return op.status; ++ return op.status == GNTST_okay ? 0 : -EINVAL; +} + + @@ -51622,7 +51643,7 @@ and in case upstream wants to take the forward porting patches: +/* Map a series of grants into a contiguous virtual area */ +static void *net_accel_map_grants_valloc(struct xenbus_device *dev, + unsigned *grants, int npages, -+ unsigned flags, void **priv, int *errno) ++ unsigned flags, void **priv) +{ + struct net_accel_valloc_grant_mapping *map; + struct vm_struct *vm; @@ -51650,16 +51671,12 @@ and in case upstream wants to take the forward porting patches: + + /* Do the actual mapping */ + addr = vm->addr; -+ if(errno != NULL) *errno = 0; ++ + for (i = 0; i < npages; i++) { + rc = net_accel_map_grant(dev, grants[i], map->grant_handles + i, + addr, NULL, flags); -+ if (rc != 0) -+ { -+ if(errno != NULL) -+ *errno = (rc == GNTST_eagain ? -EAGAIN : -EINVAL); ++ if (rc < 0) + goto undo; -+ } + addr = (void*)((unsigned long)addr + PAGE_SIZE); + } + @@ -51708,16 +51725,7 @@ and in case upstream wants to take the forward porting patches: + unsigned *grants, int npages, + void **priv) +{ -+ int errno; -+ void *ret; -+ -+ do { -+ ret = net_accel_map_grants_valloc(dev, grants, npages, -+ GNTMAP_host_map, priv, &errno); -+ if(errno) msleep(10); -+ } while(errno == -EAGAIN); -+ -+ return ret; ++ return net_accel_map_grants_valloc(dev, grants, npages, GNTMAP_host_map, priv); +} +EXPORT_SYMBOL(net_accel_map_grants_contig); + @@ -51733,16 +51741,7 @@ and in case upstream wants to take the forward porting patches: +void *net_accel_map_iomem_page(struct xenbus_device *dev, int gnt_ref, + void **priv) +{ -+ int errno; -+ void *ret; -+ -+ do { -+ ret = net_accel_map_grants_valloc(dev, &gnt_ref, 1, -+ GNTMAP_host_map, priv, &errno); -+ if(errno) msleep(10); -+ } while(errno == -EAGAIN); -+ -+ return ret; ++ return net_accel_map_grants_valloc(dev, &gnt_ref, 1, GNTMAP_host_map, priv); +} +EXPORT_SYMBOL(net_accel_map_iomem_page); + @@ -51832,7 +51831,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/sfc_netutil/accel_util.h 2008-02-20 09:32:49.000000000 +0100 ++++ b/drivers/xen/sfc_netutil/accel_util.h 2008-02-20 09:32:49.000000000 +0100 @@ -0,0 +1,127 @@ +/**************************************************************************** + * Solarflare driver for Xen network acceleration @@ -51962,14 +51961,14 @@ and in case upstream wants to take the forward porting patches: + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/tpmback/Makefile 2007-06-12 13:13:45.000000000 +0200 ++++ b/drivers/xen/tpmback/Makefile 2007-06-12 13:13:45.000000000 +0200 @@ -0,0 +1,4 @@ + +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmbk.o + +tpmbk-y += tpmback.o interface.o xenbus.o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/tpmback/common.h 2007-06-12 13:13:45.000000000 +0200 ++++ b/drivers/xen/tpmback/common.h 2007-06-12 13:13:45.000000000 +0200 @@ -0,0 +1,85 @@ +/****************************************************************************** + * drivers/xen/tpmback/common.h @@ -52057,8 +52056,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __TPMIF__BACKEND__COMMON_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/tpmback/interface.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,172 @@ ++++ b/drivers/xen/tpmback/interface.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,170 @@ + /***************************************************************************** + * drivers/xen/tpmback/interface.c + * @@ -52142,25 +52141,23 @@ and in case upstream wants to take the forward porting patches: +static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page) +{ + struct gnttab_map_grant_ref op; ++ int ret; + + gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr, + GNTMAP_host_map, shared_page, tpmif->domid); + -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while(op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ if (op.status) { -+ DPRINTK(" Grant table operation failure !\n"); -+ return op.status; ++ if (op.status != GNTST_okay) { ++ DPRINTK(" Grant table operation failure %d!\n", (int)op.status); ++ ret = -EINVAL; ++ } else { ++ tpmif->shmem_ref = shared_page; ++ tpmif->shmem_handle = op.handle; ++ ret = 0; + } + -+ tpmif->shmem_ref = shared_page; -+ tpmif->shmem_handle = op.handle; -+ -+ return 0; ++ return ret; +} + +static void unmap_frontend_page(tpmif_t *tpmif) @@ -52232,8 +52229,8 @@ and in case upstream wants to take the forward porting patches: + kmem_cache_destroy(tpmif_cachep); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/tpmback/tpmback.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,949 @@ ++++ b/drivers/xen/tpmback/tpmback.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,939 @@ +/****************************************************************************** + * drivers/xen/tpmback/tpmback.c + * @@ -52493,20 +52490,15 @@ and in case upstream wants to take the forward porting patches: + gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i), + GNTMAP_host_map, tx->ref, tpmif->domid); + -+ do { -+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, -+ &map_op, 1))) -+ BUG(); -+ if(map_op.status) msleep(10); -+ } while(map_op.status == GNTST_eagain); -+ -+ handle = map_op.handle; ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &map_op); + -+ if (map_op.status) { ++ if (map_op.status != GNTST_okay) { + DPRINTK(" Grant table operation failure !\n"); + return 0; + } + ++ handle = map_op.handle; ++ + tocopy = min_t(size_t, size - offset, PAGE_SIZE); + + if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) | @@ -52633,14 +52625,9 @@ and in case upstream wants to take the forward porting patches: + gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i), + GNTMAP_host_map, tx->ref, tpmif->domid); + -+ do { -+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, -+ &map_op, 1))) -+ BUG(); -+ if(map_op.status) msleep(10); -+ } while(map_op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &map_op); + -+ if (map_op.status) { ++ if (map_op.status != GNTST_okay) { + DPRINTK(" Grant table operation failure !\n"); + return -EFAULT; + } @@ -53184,7 +53171,7 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/tpmback/xenbus.c 2008-03-06 08:54:32.000000000 +0100 ++++ b/drivers/xen/tpmback/xenbus.c 2008-03-06 08:54:32.000000000 +0100 @@ -0,0 +1,289 @@ +/* Xenbus code for tpmif backend + Copyright (C) 2005 IBM Corporation @@ -53476,15 +53463,15 @@ and in case upstream wants to take the forward porting patches: + xenbus_unregister_driver(&tpmback); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbback/Makefile 2009-04-07 13:58:49.000000000 +0200 ++++ b/drivers/xen/usbback/Makefile 2009-04-07 13:58:49.000000000 +0200 @@ -0,0 +1,4 @@ +obj-$(CONFIG_XEN_USB_BACKEND) := usbbk.o + +usbbk-y := usbstub.o xenbus.o interface.o usbback.o + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbback/interface.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,257 @@ ++++ b/drivers/xen/usbback/interface.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,248 @@ +/* + * interface.c + * @@ -53597,16 +53584,11 @@ and in case upstream wants to take the forward porting patches: + gnttab_set_map_op(&op, (unsigned long)usbif->urb_ring_area->addr, + GNTMAP_host_map, urb_ring_ref, usbif->domid); + ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while (op.status == GNTST_eagain); -+ -+ if (op.status) { -+ printk(KERN_ERR "grant table failure mapping urb_ring_ref\n"); -+ return op.status; ++ if (op.status != GNTST_okay) { ++ printk(KERN_ERR "grant table failure mapping urb_ring_ref %d\n", (int)op.status); ++ return -EINVAL; + } + + usbif->urb_shmem_ref = urb_ring_ref; @@ -53615,21 +53597,17 @@ and in case upstream wants to take the forward porting patches: + gnttab_set_map_op(&op, (unsigned long)usbif->conn_ring_area->addr, + GNTMAP_host_map, conn_ring_ref, usbif->domid); + -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while (op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + -+ if (op.status) { ++ if (op.status != GNTST_okay) { + struct gnttab_unmap_grant_ref unop; + gnttab_set_unmap_op(&unop, + (unsigned long) usbif->urb_ring_area->addr, + GNTMAP_host_map, usbif->urb_shmem_handle); + VOID(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, + 1)); -+ printk(KERN_ERR "grant table failure mapping conn_ring_ref\n"); -+ return op.status; ++ printk(KERN_ERR "grant table failure mapping conn_ring_ref %d\n", (int)op.status); ++ return -EINVAL; + } + + usbif->conn_shmem_ref = conn_ring_ref; @@ -53743,8 +53721,8 @@ and in case upstream wants to take the forward porting patches: + kfree(usbif); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbback/usbback.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,1165 @@ ++++ b/drivers/xen/usbback/usbback.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,1159 @@ +/* + * usbback.c + * @@ -54139,19 +54117,13 @@ and in case upstream wants to take the forward porting patches: + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, + map, nr_segs); + BUG_ON(ret); -+ /* Make sure than none of the map ops failed with GNTST_eagain */ -+ for( i = 0; i < nr_segs; i++) { -+ while(map[i].status == GNTST_eagain) { -+ msleep(10); -+ ret = HYPERVISOR_grant_table_op( -+ GNTTABOP_map_grant_ref, -+ &map[i], 1); -+ BUG_ON(ret); -+ } -+ } + + for (i = 0; i < nr_segs; i++) { -+ if (unlikely(map[i].status != 0)) { ++ /* Make sure than none of the map ops failed with GNTST_eagain */ ++ if (unlikely(map[i].status == GNTST_eagain)) ++ gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]); ++ ++ if (unlikely(map[i].status != GNTST_okay)) { + printk(KERN_ERR "usbback: invalid buffer -- could not remap it\n"); + map[i].handle = USBBACK_INVALID_HANDLE; + ret |= 1; @@ -54911,7 +54883,7 @@ and in case upstream wants to take the forward porting patches: +MODULE_DESCRIPTION("Xen USB backend driver (usbback)"); +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbback/usbback.h 2009-11-06 10:23:23.000000000 +0100 ++++ b/drivers/xen/usbback/usbback.h 2009-11-06 10:23:23.000000000 +0100 @@ -0,0 +1,173 @@ +/* + * usbback.h @@ -55087,8 +55059,8 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __XEN_USBBACK_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbback/usbstub.c 2009-11-06 10:23:23.000000000 +0100 -@@ -0,0 +1,325 @@ ++++ b/drivers/xen/usbback/usbstub.c 2011-03-02 12:00:16.000000000 +0100 +@@ -0,0 +1,324 @@ +/* + * usbstub.c + * @@ -55371,8 +55343,7 @@ and in case upstream wants to take the forward porting patches: + + return count; +} -+ -+DRIVER_ATTR(port_ids, S_IRUSR, usbstub_show_portids, NULL); ++static DRIVER_ATTR(port_ids, S_IRUSR, usbstub_show_portids, NULL); + +/* table of devices that matches any usbdevice */ +static struct usb_device_id usbstub_table[] = { @@ -55415,7 +55386,7 @@ and in case upstream wants to take the forward porting patches: + usb_deregister(&usbback_usb_driver); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbback/xenbus.c 2009-11-06 10:23:23.000000000 +0100 ++++ b/drivers/xen/usbback/xenbus.c 2009-11-06 10:23:23.000000000 +0100 @@ -0,0 +1,338 @@ +/* + * xenbus.c @@ -55756,7 +55727,7 @@ and in case upstream wants to take the forward porting patches: + xenbus_unregister_driver(&usbback_driver); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbfront/Makefile 2009-10-15 11:45:41.000000000 +0200 ++++ b/drivers/xen/usbfront/Makefile 2009-10-15 11:45:41.000000000 +0200 @@ -0,0 +1,11 @@ +obj-$(CONFIG_XEN_USB_FRONTEND) := xen-hcd.o + @@ -55770,7 +55741,7 @@ and in case upstream wants to take the forward porting patches: +EXTRA_CFLAGS += -DXENHCD_PM +endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbfront/usbfront-dbg.c 2009-10-15 11:45:41.000000000 +0200 ++++ b/drivers/xen/usbfront/usbfront-dbg.c 2009-10-15 11:45:41.000000000 +0200 @@ -0,0 +1,100 @@ +/* + * usbfront-dbg.c @@ -55873,7 +55844,7 @@ and in case upstream wants to take the forward porting patches: + class_device_remove_file(cldev, &class_device_attr_statistics); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbfront/usbfront-hcd.c 2009-10-15 11:45:41.000000000 +0200 ++++ b/drivers/xen/usbfront/usbfront-hcd.c 2009-10-15 11:45:41.000000000 +0200 @@ -0,0 +1,231 @@ +/* + * usbfront-hcd.c @@ -56107,7 +56078,7 @@ and in case upstream wants to take the forward porting patches: +#endif +}; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbfront/usbfront-hub.c 2009-10-15 11:45:41.000000000 +0200 ++++ b/drivers/xen/usbfront/usbfront-hub.c 2009-10-15 11:45:41.000000000 +0200 @@ -0,0 +1,471 @@ +/* + * usbfront-hub.c @@ -56581,7 +56552,7 @@ and in case upstream wants to take the forward porting patches: + return ret; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbfront/usbfront-q.c 2009-10-15 11:45:41.000000000 +0200 ++++ b/drivers/xen/usbfront/usbfront-q.c 2009-10-15 11:45:41.000000000 +0200 @@ -0,0 +1,541 @@ +/* + * usbfront-q.c @@ -57125,7 +57096,7 @@ and in case upstream wants to take the forward porting patches: + return IRQ_HANDLED; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbfront/usbfront.h 2009-10-15 11:45:41.000000000 +0200 ++++ b/drivers/xen/usbfront/usbfront.h 2009-10-15 11:45:41.000000000 +0200 @@ -0,0 +1,203 @@ +/* + * usbfront.h @@ -57331,7 +57302,7 @@ and in case upstream wants to take the forward porting patches: + +#endif /* __XEN_USBFRONT_H__ */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/usbfront/xenbus.c 2010-03-31 09:56:02.000000000 +0200 ++++ b/drivers/xen/usbfront/xenbus.c 2010-03-31 09:56:02.000000000 +0200 @@ -0,0 +1,417 @@ +/* + * xenbus.c @@ -57751,7 +57722,7 @@ and in case upstream wants to take the forward porting patches: +MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (usbfront)"); +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/util.c 2007-07-10 09:42:30.000000000 +0200 ++++ b/drivers/xen/util.c 2007-07-10 09:42:30.000000000 +0200 @@ -0,0 +1,65 @@ +#include +#include @@ -57819,8 +57790,8 @@ and in case upstream wants to take the forward porting patches: +EXPORT_SYMBOL_GPL(free_vm_area); +#endif /* CONFIG_X86 */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_backend_client.c 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,154 @@ ++++ b/drivers/xen/xenbus/xenbus_backend_client.c 2010-09-23 15:39:04.000000000 +0200 +@@ -0,0 +1,151 @@ +/****************************************************************************** + * Backend-client-facing interface for the Xenbus driver. In other words, the + * interface between the Xenbus and the device-specific code in the backend @@ -57872,11 +57843,7 @@ and in case upstream wants to take the forward porting patches: + gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, + gnt_ref, dev->otherend_id); + -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while(op.status == GNTST_eagain); ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + + if (op.status != GNTST_okay) { + free_vm_area(area); @@ -57884,7 +57851,7 @@ and in case upstream wants to take the forward porting patches: + "mapping in shared page %d from domain %d", + gnt_ref, dev->otherend_id); + BUG_ON(!IS_ERR(ERR_PTR(op.status))); -+ return ERR_PTR(op.status); ++ return ERR_PTR(-EINVAL); + } + + /* Stuff the handle in an unused field */ @@ -57899,23 +57866,24 @@ and in case upstream wants to take the forward porting patches: + grant_handle_t *handle, void *vaddr) +{ + struct gnttab_map_grant_ref op; ++ int ret; + + gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, + gnt_ref, dev->otherend_id); -+ do { -+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) -+ BUG(); -+ msleep(10); -+ } while(op.status == GNTST_eagain); ++ ++ gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + + if (op.status != GNTST_okay) { + xenbus_dev_fatal(dev, op.status, + "mapping in shared page %d from domain %d", + gnt_ref, dev->otherend_id); -+ } else ++ ret = -EINVAL; ++ } else { + *handle = op.handle; ++ ret = 0; ++ } + -+ return op.status; ++ return ret; +} +EXPORT_SYMBOL_GPL(xenbus_map_ring); + @@ -57938,7 +57906,7 @@ and in case upstream wants to take the forward porting patches: + "unmapping page at handle %d error %d", + (int16_t)area->phys_addr, op.status); + -+ return op.status; ++ return op.status == GNTST_okay ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); + @@ -57958,7 +57926,7 @@ and in case upstream wants to take the forward porting patches: + "unmapping page at handle %d error %d", + handle, op.status); + -+ return op.status; ++ return op.status == GNTST_okay ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(xenbus_unmap_ring); + @@ -57976,8 +57944,8 @@ and in case upstream wants to take the forward porting patches: + +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_dev.c 2009-05-29 10:25:53.000000000 +0200 -@@ -0,0 +1,460 @@ ++++ b/drivers/xen/xenbus/xenbus_dev.c 2011-01-03 12:43:21.000000000 +0100 +@@ -0,0 +1,468 @@ +/* + * xenbus_dev.c + * @@ -58084,6 +58052,9 @@ and in case upstream wants to take the forward porting patches: + mutex_lock(&u->reply_mutex); + while (list_empty(&u->read_buffers)) { + mutex_unlock(&u->reply_mutex); ++ if (filp->f_flags & O_NONBLOCK) ++ return -EAGAIN; ++ + ret = wait_event_interruptible(u->read_waitq, + !list_empty(&u->read_buffers)); + if (ret) @@ -58336,6 +58307,7 @@ and in case upstream wants to take the forward porting patches: + struct xenbus_dev_data *u = filp->private_data; + struct xenbus_dev_transaction *trans, *tmp; + struct watch_adapter *watch, *tmp_watch; ++ struct read_buffer *rb, *tmp_rb; + + list_for_each_entry_safe(trans, tmp, &u->transactions, list) { + xenbus_transaction_end(trans->handle, 1); @@ -58349,6 +58321,10 @@ and in case upstream wants to take the forward porting patches: + free_watch_adapter(watch); + } + ++ list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { ++ list_del(&rb->list); ++ kfree(rb); ++ } + kfree(u); + + return 0; @@ -58439,302 +58415,7 @@ and in case upstream wants to take the forward porting patches: + return 0; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe_backend.c 2008-01-21 11:15:26.000000000 +0100 -@@ -0,0 +1,292 @@ -+/****************************************************************************** -+ * Talks to Xen Store to figure out what devices we have (backend half). -+ * -+ * Copyright (C) 2005 Rusty Russell, IBM Corporation -+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard -+ * Copyright (C) 2005, 2006 XenSource Ltd -+ * Copyright (C) 2007 Solarflare Communications, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License version 2 -+ * as published by the Free Software Foundation; or, when distributed -+ * separately from the Linux kernel or incorporated into other -+ * software packages, subject to the following license: -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this source file (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, copy, modify, -+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, -+ * and to permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#define DPRINTK(fmt, args...) \ -+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ -+ __FUNCTION__, __LINE__, ##args) -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "xenbus_comms.h" -+#include "xenbus_probe.h" -+ -+#ifdef HAVE_XEN_PLATFORM_COMPAT_H -+#include -+#endif -+ -+static int xenbus_uevent_backend(struct device *dev, char **envp, -+ int num_envp, char *buffer, int buffer_size); -+static int xenbus_probe_backend(const char *type, const char *domid); -+ -+extern int read_otherend_details(struct xenbus_device *xendev, -+ char *id_node, char *path_node); -+ -+static int read_frontend_details(struct xenbus_device *xendev) -+{ -+ return read_otherend_details(xendev, "frontend-id", "frontend"); -+} -+ -+/* backend/// => -- */ -+static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) -+{ -+ int domid, err; -+ const char *devid, *type, *frontend; -+ unsigned int typelen; -+ -+ type = strchr(nodename, '/'); -+ if (!type) -+ return -EINVAL; -+ type++; -+ typelen = strcspn(type, "/"); -+ if (!typelen || type[typelen] != '/') -+ return -EINVAL; -+ -+ devid = strrchr(nodename, '/') + 1; -+ -+ err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, -+ "frontend", NULL, &frontend, -+ NULL); -+ if (err) -+ return err; -+ if (strlen(frontend) == 0) -+ err = -ERANGE; -+ if (!err && !xenbus_exists(XBT_NIL, frontend, "")) -+ err = -ENOENT; -+ kfree(frontend); -+ -+ if (err) -+ return err; -+ -+ if (snprintf(bus_id, BUS_ID_SIZE, -+ "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE) -+ return -ENOSPC; -+ return 0; -+} -+ -+static struct xen_bus_type xenbus_backend = { -+ .root = "backend", -+ .levels = 3, /* backend/type// */ -+ .get_bus_id = backend_bus_id, -+ .probe = xenbus_probe_backend, -+ .error = -ENODEV, -+ .bus = { -+ .name = "xen-backend", -+ .match = xenbus_match, -+ .probe = xenbus_dev_probe, -+ .remove = xenbus_dev_remove, -+// .shutdown = xenbus_dev_shutdown, -+ .uevent = xenbus_uevent_backend, -+ }, -+ .dev = { -+ .bus_id = "xen-backend", -+ }, -+}; -+ -+static int xenbus_uevent_backend(struct device *dev, char **envp, -+ int num_envp, char *buffer, int buffer_size) -+{ -+ struct xenbus_device *xdev; -+ struct xenbus_driver *drv; -+ int i = 0; -+ int length = 0; -+ -+ DPRINTK(""); -+ -+ if (dev == NULL) -+ return -ENODEV; -+ -+ xdev = to_xenbus_device(dev); -+ if (xdev == NULL) -+ return -ENODEV; -+ -+ /* stuff we want to pass to /sbin/hotplug */ -+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -+ "XENBUS_TYPE=%s", xdev->devicetype); -+ -+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -+ "XENBUS_PATH=%s", xdev->nodename); -+ -+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -+ "XENBUS_BASE_PATH=%s", xenbus_backend.root); -+ -+ /* terminate, set to next free slot, shrink available space */ -+ envp[i] = NULL; -+ envp = &envp[i]; -+ num_envp -= i; -+ buffer = &buffer[length]; -+ buffer_size -= length; -+ -+ if (dev->driver) { -+ drv = to_xenbus_driver(dev->driver); -+ if (drv && drv->uevent) -+ return drv->uevent(xdev, envp, num_envp, buffer, -+ buffer_size); -+ } -+ -+ return 0; -+} -+ -+int xenbus_register_backend(struct xenbus_driver *drv) -+{ -+ drv->read_otherend_details = read_frontend_details; -+ -+ return xenbus_register_driver_common(drv, &xenbus_backend); -+} -+EXPORT_SYMBOL_GPL(xenbus_register_backend); -+ -+/* backend/// */ -+static int xenbus_probe_backend_unit(const char *dir, -+ const char *type, -+ const char *name) -+{ -+ char *nodename; -+ int err; -+ -+ nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); -+ if (!nodename) -+ return -ENOMEM; -+ -+ DPRINTK("%s\n", nodename); -+ -+ err = xenbus_probe_node(&xenbus_backend, type, nodename); -+ kfree(nodename); -+ return err; -+} -+ -+/* backend// */ -+static int xenbus_probe_backend(const char *type, const char *domid) -+{ -+ char *nodename; -+ int err = 0; -+ char **dir; -+ unsigned int i, dir_n = 0; -+ -+ DPRINTK(""); -+ -+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); -+ if (!nodename) -+ return -ENOMEM; -+ -+ dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); -+ if (IS_ERR(dir)) { -+ kfree(nodename); -+ return PTR_ERR(dir); -+ } -+ -+ for (i = 0; i < dir_n; i++) { -+ err = xenbus_probe_backend_unit(nodename, type, dir[i]); -+ if (err) -+ break; -+ } -+ kfree(dir); -+ kfree(nodename); -+ return err; -+} -+ -+static void backend_changed(struct xenbus_watch *watch, -+ const char **vec, unsigned int len) -+{ -+ DPRINTK(""); -+ -+ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); -+} -+ -+static struct xenbus_watch be_watch = { -+ .node = "backend", -+ .callback = backend_changed, -+}; -+ -+void xenbus_backend_suspend(int (*fn)(struct device *, void *)) -+{ -+ DPRINTK(""); -+ if (!xenbus_backend.error) -+ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); -+} -+ -+void xenbus_backend_resume(int (*fn)(struct device *, void *)) -+{ -+ DPRINTK(""); -+ if (!xenbus_backend.error) -+ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); -+} -+ -+void xenbus_backend_probe_and_watch(void) -+{ -+ xenbus_probe_devices(&xenbus_backend); -+ register_xenbus_watch(&be_watch); -+} -+ -+void xenbus_backend_bus_register(void) -+{ -+ xenbus_backend.error = bus_register(&xenbus_backend.bus); -+ if (xenbus_backend.error) -+ printk(KERN_WARNING -+ "XENBUS: Error registering backend bus: %i\n", -+ xenbus_backend.error); -+} -+ -+void xenbus_backend_device_register(void) -+{ -+ if (xenbus_backend.error) -+ return; -+ -+ xenbus_backend.error = device_register(&xenbus_backend.dev); -+ if (xenbus_backend.error) { -+ bus_unregister(&xenbus_backend.bus); -+ printk(KERN_WARNING -+ "XENBUS: Error registering backend device: %i\n", -+ xenbus_backend.error); -+ } -+} -+ -+int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) -+{ -+ return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); -+} -+EXPORT_SYMBOL_GPL(xenbus_for_each_backend); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/xenoprof/xenoprofile.c 2010-01-07 09:38:29.000000000 +0100 ++++ b/drivers/xen/xenoprof/xenoprofile.c 2010-01-07 09:38:29.000000000 +0100 @@ -0,0 +1,587 @@ +/** + * @file xenoprofile.c diff --git a/patches.xen/xen3-auto-xen-kconfig.diff b/patches.xen/xen3-auto-xen-kconfig.diff index 6aa73c7..c67101e 100644 --- a/patches.xen/xen3-auto-xen-kconfig.diff +++ b/patches.xen/xen3-auto-xen-kconfig.diff @@ -1,31 +1,29 @@ Subject: xen3 xen-kconfig -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com ---- head-2010-03-24.orig/arch/x86/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/arch/x86/Kconfig 2010-03-24 14:37:43.000000000 +0100 -@@ -70,6 +70,7 @@ config ARCH_DEFCONFIG +--- head-2011-01-30.orig/arch/x86/Kconfig 2011-01-31 12:42:58.000000000 +0100 ++++ head-2011-01-30/arch/x86/Kconfig 2011-01-31 14:42:03.000000000 +0100 +@@ -83,6 +83,7 @@ config ARCH_DEFCONFIG - config GENERIC_TIME + config GENERIC_CMOS_UPDATE def_bool y + depends on !X86_XEN - config GENERIC_CMOS_UPDATE + config CLOCKSOURCE_WATCHDOG def_bool y -@@ -226,12 +227,23 @@ config X86_64_SMP +@@ -215,11 +216,22 @@ config X86_64_SMP config X86_HT - bool + def_bool y - depends on SMP + depends on SMP && !XEN - default y config X86_TRAMPOLINE - bool + def_bool y depends on SMP || (64BIT && ACPI_SLEEP) + depends on !XEN -+ default y + +config X86_NO_TSS + bool @@ -35,10 +33,11 @@ Acked-by: jbeulich@novell.com +config X86_NO_IDT + bool + depends on X86_XEN || X86_64_XEN - default y ++ default y config X86_32_LAZY_GS -@@ -311,6 +323,17 @@ config X86_MPPARSE + def_bool y +@@ -291,6 +303,17 @@ config X86_MPPARSE For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it @@ -56,7 +55,7 @@ Acked-by: jbeulich@novell.com config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" depends on X86_32 && SMP -@@ -340,6 +363,13 @@ config X86_EXTENDED_PLATFORM +@@ -320,6 +343,13 @@ config X86_EXTENDED_PLATFORM generic distribution kernel, say Y here - otherwise say N. endif @@ -70,7 +69,7 @@ Acked-by: jbeulich@novell.com if X86_64 config X86_EXTENDED_PLATFORM bool "Support for extended (non-PC) x86 platforms" -@@ -669,6 +699,7 @@ source "arch/x86/Kconfig.cpu" +@@ -597,6 +627,7 @@ source "arch/x86/Kconfig.cpu" config HPET_TIMER def_bool X86_64 prompt "HPET Timer Support" if X86_32 @@ -78,16 +77,16 @@ Acked-by: jbeulich@novell.com ---help--- Use the IA-PC HPET (High Precision Event Timer) to manage time in preference to the PIT and RTC, if a HPET is -@@ -714,7 +745,7 @@ config GART_IOMMU - bool "GART IOMMU support" if EMBEDDED +@@ -642,7 +673,7 @@ config GART_IOMMU + bool "GART IOMMU support" if EXPERT default y select SWIOTLB -- depends on X86_64 && PCI && K8_NB -+ depends on X86_64 && PCI && K8_NB && !X86_64_XEN +- depends on X86_64 && PCI && AMD_NB ++ depends on X86_64 && PCI && AMD_NB && !X86_64_XEN ---help--- Support for full DMA access of devices with 32bit memory access only on systems with more than 3GB. This is usually needed for USB, -@@ -729,7 +760,7 @@ config GART_IOMMU +@@ -657,7 +688,7 @@ config GART_IOMMU config CALGARY_IOMMU bool "IBM Calgary IOMMU support" select SWIOTLB @@ -96,7 +95,7 @@ Acked-by: jbeulich@novell.com ---help--- Support for hardware IOMMUs in IBM's xSeries x366 and x460 systems. Needed to run systems with more than 3GB of memory -@@ -813,6 +844,7 @@ config NR_CPUS +@@ -740,6 +771,7 @@ config NR_CPUS default "1" if !SMP default "4096" if MAXSMP default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) @@ -104,7 +103,7 @@ Acked-by: jbeulich@novell.com default "8" if SMP ---help--- This allows you to specify the maximum number of CPUs which this -@@ -844,7 +876,7 @@ source "kernel/Kconfig.preempt" +@@ -782,7 +814,7 @@ source "kernel/Kconfig.preempt" config X86_UP_APIC bool "Local APIC support on uniprocessors" @@ -113,7 +112,7 @@ Acked-by: jbeulich@novell.com ---help--- A local APIC (Advanced Programmable Interrupt Controller) is an integrated interrupt controller in the CPU. If you have a single-CPU -@@ -870,15 +902,22 @@ config X86_UP_IOAPIC +@@ -808,15 +840,22 @@ config X86_UP_IOAPIC config X86_LOCAL_APIC def_bool y depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC @@ -135,8 +134,8 @@ Acked-by: jbeulich@novell.com + config X86_REROUTE_FOR_BROKEN_BOOT_IRQS bool "Reroute for broken boot IRQs" - default n -@@ -905,6 +944,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS + depends on X86_IO_APIC +@@ -842,6 +881,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS config X86_MCE bool "Machine Check / overheating reporting" @@ -144,7 +143,7 @@ Acked-by: jbeulich@novell.com ---help--- Machine Check support allows the processor to notify the kernel if it detects a problem (e.g. overheating, data corruption). -@@ -914,7 +954,7 @@ config X86_MCE +@@ -851,7 +891,7 @@ config X86_MCE config X86_MCE_INTEL def_bool y prompt "Intel MCE features" @@ -153,7 +152,7 @@ Acked-by: jbeulich@novell.com ---help--- Additional support for intel specific MCE features such as the thermal monitor. -@@ -930,7 +970,7 @@ config X86_MCE_XEON75XX +@@ -867,7 +907,7 @@ config X86_MCE_XEON75XX config X86_MCE_AMD def_bool y prompt "AMD MCE features" @@ -162,7 +161,7 @@ Acked-by: jbeulich@novell.com ---help--- Additional support for AMD specific MCE features such as the DRAM Error Threshold. -@@ -957,6 +997,10 @@ config X86_MCE_INJECT +@@ -892,6 +932,10 @@ config X86_MCE_INJECT If you don't know what a machine check is and you don't do kernel QA it is safe to say n. @@ -173,7 +172,7 @@ Acked-by: jbeulich@novell.com config X86_THERMAL_VECTOR def_bool y depends on X86_MCE_INTEL -@@ -1009,7 +1053,7 @@ config I8K +@@ -944,7 +988,7 @@ config I8K config X86_REBOOTFIXUPS bool "Enable X86 board specific fixups for reboot" @@ -182,7 +181,7 @@ Acked-by: jbeulich@novell.com ---help--- This enables chipset and/or board specific fixups to be done in order to get reboot to work correctly. This is only needed on -@@ -1026,6 +1070,7 @@ config X86_REBOOTFIXUPS +@@ -961,6 +1005,7 @@ config X86_REBOOTFIXUPS config MICROCODE tristate "/dev/cpu/microcode - microcode support" @@ -190,7 +189,7 @@ Acked-by: jbeulich@novell.com select FW_LOADER ---help--- If you say Y here, you will be able to update the microcode on -@@ -1216,7 +1261,7 @@ config DIRECT_GBPAGES +@@ -1154,7 +1199,7 @@ config DIRECT_GBPAGES # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" @@ -199,7 +198,7 @@ Acked-by: jbeulich@novell.com depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) ---help--- -@@ -1325,6 +1370,7 @@ config ARCH_SPARSEMEM_DEFAULT +@@ -1263,6 +1308,7 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SPARSEMEM_ENABLE def_bool y depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD @@ -207,7 +206,7 @@ Acked-by: jbeulich@novell.com select SPARSEMEM_STATIC if X86_32 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 -@@ -1405,6 +1451,7 @@ config X86_RESERVE_LOW_64K +@@ -1352,6 +1398,7 @@ config X86_RESERVE_LOW config MATH_EMULATION bool prompt "Math emulation" if X86_32 @@ -215,15 +214,15 @@ Acked-by: jbeulich@novell.com ---help--- Linux can emulate a math coprocessor (used for floating point operations) if you don't have one. 486DX and Pentium processors have -@@ -1432,6 +1479,7 @@ config MTRR - bool - default y - prompt "MTRR (Memory Type Range Register) support" if EMBEDDED +@@ -1378,6 +1425,7 @@ config MATH_EMULATION + config MTRR + def_bool y + prompt "MTRR (Memory Type Range Register) support" if EXPERT + depends on !XEN_UNPRIVILEGED_GUEST ---help--- On Intel P6 family processors (Pentium Pro, Pentium II and later) the Memory Type Range Registers (MTRRs) may be used to control -@@ -1517,7 +1565,7 @@ config ARCH_USES_PG_UNCACHED +@@ -1462,7 +1510,7 @@ config ARCH_USES_PG_UNCACHED config EFI bool "EFI runtime service support" @@ -232,7 +231,7 @@ Acked-by: jbeulich@novell.com ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). -@@ -1577,6 +1625,7 @@ source kernel/Kconfig.hz +@@ -1521,6 +1569,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" @@ -240,7 +239,7 @@ Acked-by: jbeulich@novell.com ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot -@@ -1594,6 +1643,7 @@ config KEXEC +@@ -1538,6 +1587,7 @@ config KEXEC config CRASH_DUMP bool "kernel crash dumps" depends on X86_64 || (X86_32 && HIGHMEM) @@ -248,7 +247,7 @@ Acked-by: jbeulich@novell.com ---help--- Generate crash dump after being started by kexec. This should be normally only set in special crash dump kernels -@@ -1714,6 +1764,7 @@ config COMPAT_VDSO +@@ -1657,6 +1707,7 @@ config COMPAT_VDSO def_bool y prompt "Compat VDSO support" depends on X86_32 || IA32_EMULATION @@ -256,7 +255,7 @@ Acked-by: jbeulich@novell.com ---help--- Map the 32-bit VDSO to the predictable old-style address too. -@@ -1783,6 +1834,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID +@@ -1728,6 +1779,7 @@ config USE_PERCPU_NUMA_NODE_ID depends on NUMA menu "Power management and ACPI options" @@ -264,7 +263,7 @@ Acked-by: jbeulich@novell.com config ARCH_HIBERNATION_HEADER def_bool y -@@ -1801,7 +1853,7 @@ config X86_APM_BOOT +@@ -1745,7 +1797,7 @@ config X86_APM_BOOT menuconfig APM tristate "APM (Advanced Power Management) BIOS support" @@ -273,7 +272,7 @@ Acked-by: jbeulich@novell.com ---help--- APM is a BIOS specification for saving power using several different techniques. This is mostly useful for battery powered laptops with -@@ -1962,6 +2014,7 @@ choice +@@ -1906,6 +1958,7 @@ choice config PCI_GOBIOS bool "BIOS" @@ -281,8 +280,8 @@ Acked-by: jbeulich@novell.com config PCI_GOMMCONFIG bool "MMConfig" -@@ -1973,6 +2026,13 @@ config PCI_GOOLPC - bool "OLPC" +@@ -1917,6 +1970,13 @@ config PCI_GOOLPC + bool "OLPC XO-1" depends on OLPC +config PCI_GOXEN_FE @@ -295,7 +294,7 @@ Acked-by: jbeulich@novell.com config PCI_GOANY bool "Any" -@@ -1980,7 +2040,7 @@ endchoice +@@ -1924,7 +1984,7 @@ endchoice config PCI_BIOS def_bool y @@ -304,9 +303,9 @@ Acked-by: jbeulich@novell.com # x86-64 doesn't support PCI BIOS access from long mode so always go direct. config PCI_DIRECT -@@ -2003,6 +2063,22 @@ config PCI_MMCONFIG - bool "Support mmconfig PCI config space access" - depends on X86_64 && PCI && ACPI +@@ -1966,6 +2026,22 @@ config PCI_CNB20LE_QUIRK + + You should say N unless you know you need this. +config XEN_PCIDEV_FRONTEND + bool "Xen PCI Frontend" if X86_64 @@ -327,7 +326,7 @@ Acked-by: jbeulich@novell.com config DMAR bool "Support for DMA Remapping Devices (EXPERIMENTAL)" depends on PCI_MSI && ACPI && EXPERIMENTAL -@@ -2065,6 +2141,7 @@ if X86_32 +@@ -2027,6 +2103,7 @@ if X86_32 config ISA bool "ISA support" @@ -335,7 +334,7 @@ Acked-by: jbeulich@novell.com ---help--- Find out whether you have ISA slots on your motherboard. ISA is the name of a bus system, i.e. the way the CPU talks to the other stuff -@@ -2092,6 +2169,7 @@ source "drivers/eisa/Kconfig" +@@ -2054,6 +2131,7 @@ source "drivers/eisa/Kconfig" config MCA bool "MCA support" @@ -343,25 +342,25 @@ Acked-by: jbeulich@novell.com ---help--- MicroChannel Architecture is found in some IBM PS/2 machines and laptops. It is a bus system similar to PCI or ISA. See -@@ -2196,4 +2274,6 @@ source "crypto/Kconfig" +@@ -2184,4 +2262,6 @@ source "crypto/Kconfig" source "arch/x86/kvm/Kconfig" +source "drivers/xen/Kconfig" + source "lib/Kconfig" ---- head-2010-03-24.orig/arch/x86/Kconfig.cpu 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/arch/x86/Kconfig.cpu 2010-03-24 14:36:44.000000000 +0100 -@@ -336,7 +336,7 @@ config X86_PPRO_FENCE +--- head-2011-01-30.orig/arch/x86/Kconfig.cpu 2011-01-31 12:42:52.000000000 +0100 ++++ head-2011-01-30/arch/x86/Kconfig.cpu 2011-01-31 14:42:03.000000000 +0100 +@@ -339,7 +339,7 @@ config X86_PPRO_FENCE config X86_F00F_BUG def_bool y - depends on M586MMX || M586TSC || M586 || M486 || M386 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT - config X86_WP_WORKS_OK + config X86_INVD_BUG def_bool y -@@ -393,6 +393,7 @@ config X86_P6_NOP +@@ -400,6 +400,7 @@ config X86_P6_NOP config X86_TSC def_bool y depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64 @@ -369,30 +368,30 @@ Acked-by: jbeulich@novell.com config X86_CMPXCHG64 def_bool y ---- head-2010-03-24.orig/arch/x86/Kconfig.debug 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/arch/x86/Kconfig.debug 2010-03-24 14:36:44.000000000 +0100 -@@ -136,7 +136,7 @@ config 4KSTACKS +--- head-2011-01-30.orig/arch/x86/Kconfig.debug 2011-01-31 12:42:52.000000000 +0100 ++++ head-2011-01-30/arch/x86/Kconfig.debug 2011-01-31 14:42:03.000000000 +0100 +@@ -139,7 +139,7 @@ config DEBUG_NX_TEST config DOUBLEFAULT default y - bool "Enable doublefault exception handler" if EMBEDDED + bool "Enable doublefault exception handler" if EXPERT - depends on X86_32 + depends on X86_32 && !X86_NO_TSS ---help--- This option allows trapping of rare doublefault exceptions that would otherwise cause a system to silently reboot. Disabling this ---- head-2010-03-24.orig/drivers/acpi/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/drivers/acpi/Kconfig 2010-03-24 14:36:44.000000000 +0100 -@@ -307,6 +307,7 @@ config ACPI_PCI_SLOT +--- head-2011-01-30.orig/drivers/acpi/Kconfig 2011-01-31 12:42:53.000000000 +0100 ++++ head-2011-01-30/drivers/acpi/Kconfig 2011-01-31 14:42:03.000000000 +0100 +@@ -320,6 +320,7 @@ config ACPI_PCI_SLOT config X86_PM_TIMER - bool "Power Management Timer Support" if EMBEDDED + bool "Power Management Timer Support" if EXPERT depends on X86 + depends on !XEN default y help The Power Management Timer is available on all ACPI-capable, -@@ -360,4 +361,13 @@ config ACPI_SBS - To compile this driver as a module, choose M here: - the modules will be called sbs and sbshc. +@@ -383,4 +384,13 @@ config ACPI_HED + + source "drivers/acpi/apei/Kconfig" +config ACPI_PV_SLEEP + bool @@ -404,9 +403,9 @@ Acked-by: jbeulich@novell.com + depends on (X86 || IA64) && XEN + default y endif # ACPI ---- head-2010-03-24.orig/drivers/char/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/drivers/char/Kconfig 2010-03-24 14:36:44.000000000 +0100 -@@ -1047,7 +1047,7 @@ config MAX_RAW_DEVS +--- head-2011-01-30.orig/drivers/char/Kconfig 2011-01-31 12:42:59.000000000 +0100 ++++ head-2011-01-30/drivers/char/Kconfig 2011-01-31 14:42:03.000000000 +0100 +@@ -1070,7 +1070,7 @@ config MAX_RAW_DEVS config HPET bool "HPET - High Precision Event Timer" if (X86 || IA64) default n @@ -415,11 +414,11 @@ Acked-by: jbeulich@novell.com help If you say Y here, you will have a miscdevice named "/dev/hpet/". Each open selects one of the timers supported by the HPET. The timers are ---- head-2010-03-24.orig/drivers/char/tpm/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/drivers/char/tpm/Kconfig 2010-03-24 14:36:44.000000000 +0100 -@@ -58,4 +58,13 @@ config TCG_INFINEON +--- head-2011-01-30.orig/drivers/char/tpm/Kconfig 2011-01-05 01:50:19.000000000 +0100 ++++ head-2011-01-30/drivers/char/tpm/Kconfig 2011-01-31 14:42:03.000000000 +0100 +@@ -60,4 +60,13 @@ config TCG_INFINEON Further information on this driver and the supported hardware - can be found at http://www.prosec.rub.de/tpm + can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ +config TCG_XEN + tristate "XEN TPM Interface" @@ -431,8 +430,8 @@ Acked-by: jbeulich@novell.com + will be called tpm_xenu. + endif # TCG_TPM ---- head-2010-03-24.orig/drivers/cpufreq/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/drivers/cpufreq/Kconfig 2010-03-24 14:36:44.000000000 +0100 +--- head-2011-01-30.orig/drivers/cpufreq/Kconfig 2011-01-31 12:42:53.000000000 +0100 ++++ head-2011-01-30/drivers/cpufreq/Kconfig 2011-01-31 14:42:03.000000000 +0100 @@ -1,5 +1,6 @@ config CPU_FREQ bool "CPU Frequency scaling" @@ -440,8 +439,8 @@ Acked-by: jbeulich@novell.com help CPU Frequency scaling allows you to change the clock speed of CPUs on the fly. This is a nice method to save power, because ---- head-2010-03-24.orig/drivers/serial/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/drivers/serial/Kconfig 2010-03-24 14:36:44.000000000 +0100 +--- head-2011-01-30.orig/drivers/tty/serial/Kconfig 2011-01-31 12:42:54.000000000 +0100 ++++ head-2011-01-30/drivers/tty/serial/Kconfig 2011-01-31 14:42:03.000000000 +0100 @@ -9,6 +9,7 @@ menu "Serial drivers" # The new 8250/16550 serial drivers config SERIAL_8250 @@ -450,8 +449,8 @@ Acked-by: jbeulich@novell.com select SERIAL_CORE ---help--- This selects whether you want to include the driver for the standard ---- head-2010-03-24.orig/drivers/xen/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/drivers/xen/Kconfig 2010-03-24 14:42:42.000000000 +0100 +--- head-2011-01-30.orig/drivers/xen/Kconfig 2011-01-31 12:42:35.000000000 +0100 ++++ head-2011-01-30/drivers/xen/Kconfig 2011-01-31 14:42:03.000000000 +0100 @@ -1,8 +1,357 @@ +# +# This Kconfig describe xen options @@ -835,9 +834,9 @@ Acked-by: jbeulich@novell.com If in doubt, say yes. config XEN_DEV_EVTCHN ---- head-2010-03-24.orig/fs/Kconfig 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/fs/Kconfig 2010-03-24 14:36:44.000000000 +0100 -@@ -160,6 +160,7 @@ config HUGETLBFS +--- head-2011-01-30.orig/fs/Kconfig 2011-01-31 12:42:58.000000000 +0100 ++++ head-2011-01-30/fs/Kconfig 2011-01-31 14:42:03.000000000 +0100 +@@ -141,6 +141,7 @@ config HUGETLBFS bool "HugeTLB file system support" depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \ SYS_SUPPORTS_HUGETLBFS || BROKEN @@ -845,8 +844,8 @@ Acked-by: jbeulich@novell.com help hugetlbfs is a filesystem backing for HugeTLB pages, based on ramfs. For architectures that support it, say Y here and read ---- head-2010-03-24.orig/kernel/Kconfig.preempt 2010-03-24 14:42:50.000000000 +0100 -+++ head-2010-03-24/kernel/Kconfig.preempt 2010-03-24 14:36:44.000000000 +0100 +--- head-2011-01-30.orig/kernel/Kconfig.preempt 2011-01-31 12:42:57.000000000 +0100 ++++ head-2011-01-30/kernel/Kconfig.preempt 2011-01-31 14:42:03.000000000 +0100 @@ -36,6 +36,7 @@ config PREEMPT_VOLUNTARY config PREEMPT diff --git a/patches.xen/xen3-fixup-arch-x86 b/patches.xen/xen3-fixup-arch-x86 index 74900a2..38ae086 100644 --- a/patches.xen/xen3-fixup-arch-x86 +++ b/patches.xen/xen3-fixup-arch-x86 @@ -2,32 +2,9 @@ Subject: xen3 x86 build fixes. From: jbeulich@novell.com Patch-mainline: n/a ---- head-2010-05-25.orig/arch/x86/include/asm/topology.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/topology.h 2010-03-24 15:06:06.000000000 +0100 -@@ -30,7 +30,7 @@ - # define ENABLE_TOPO_DEFINES - # endif - #else --# ifdef CONFIG_SMP -+# if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - # define ENABLE_TOPO_DEFINES - # endif - #endif ---- head-2010-05-25.orig/arch/x86/kdb/kdba_bt.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kdb/kdba_bt.c 2010-04-15 09:51:55.000000000 +0200 -@@ -3244,6 +3244,9 @@ bb_usage_mov(const struct bb_operand *sr - bb_is_int_reg(dst->base_rc) && - full_register_dst) { - #ifdef CONFIG_X86_32 -+#ifndef TSS_sysenter_sp0 -+#define TSS_sysenter_sp0 SYSENTER_stack_sp0 -+#endif - /* mov from TSS_sysenter_sp0+offset to esp to fix up the - * sysenter stack, it leaves esp well defined. mov - * TSS_ysenter_sp0+offset(%esp),%esp is followed by up to 5 ---- head-2010-05-25.orig/arch/x86/kernel/cpu/intel_cacheinfo.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/intel_cacheinfo.c 2010-05-25 09:20:14.000000000 +0200 -@@ -632,7 +632,7 @@ unsigned int __cpuinit init_intel_cachei +--- head-2011-02-08.orig/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 09:51:53.000000000 +0100 ++++ head-2011-02-08/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 10:02:51.000000000 +0100 +@@ -723,7 +723,7 @@ unsigned int __cpuinit init_intel_cachei static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) @@ -36,17 +13,17 @@ Patch-mainline: n/a static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; ---- head-2010-05-25.orig/arch/x86/power/Makefile 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/power/Makefile 2010-03-24 15:06:06.000000000 +0100 +--- head-2011-02-08.orig/arch/x86/power/Makefile 2009-09-10 00:13:59.000000000 +0200 ++++ head-2011-02-08/arch/x86/power/Makefile 2011-01-31 17:01:57.000000000 +0100 @@ -5,3 +5,5 @@ CFLAGS_cpu.o := $(nostackp) obj-$(CONFIG_PM_SLEEP) += cpu.o obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o + +disabled-obj-$(CONFIG_XEN) := cpu.o ---- head-2010-05-25.orig/arch/x86/power/cpu.c 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/power/cpu.c 2010-03-24 15:06:06.000000000 +0100 -@@ -126,7 +126,6 @@ static void do_fpu_end(void) +--- head-2011-02-08.orig/arch/x86/power/cpu.c 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-02-08/arch/x86/power/cpu.c 2011-01-31 17:01:57.000000000 +0100 +@@ -129,7 +129,6 @@ static void do_fpu_end(void) static void fix_processor_context(void) { @@ -54,7 +31,7 @@ Patch-mainline: n/a int cpu = smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); -@@ -139,10 +138,7 @@ static void fix_processor_context(void) +@@ -142,10 +141,7 @@ static void fix_processor_context(void) #ifdef CONFIG_X86_64 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; diff --git a/patches.xen/xen3-fixup-blktap2-pvops b/patches.xen/xen3-fixup-blktap2-pvops new file mode 100644 index 0000000..e0f8803 --- /dev/null +++ b/patches.xen/xen3-fixup-blktap2-pvops @@ -0,0 +1,150 @@ +Subject: adjust xen build after addition of pv-ops' blktap2 +From: jbeulich@novell.com +Patch-mainline: n/a + +--- head-2011-02-17.orig/drivers/xen/Makefile 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-02-17/drivers/xen/Makefile 2011-02-24 13:56:24.000000000 +0100 +@@ -8,7 +8,7 @@ obj-y += util.o + obj-$(CONFIG_XEN_BALLOON) += balloon/ + obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ + obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ +-obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ ++obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ blktap2-new/ + obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/ + obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/ + obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/ +--- head-2011-02-17.orig/drivers/xen/blktap2-new/Makefile 2011-02-24 13:49:49.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap2-new/Makefile 2011-02-24 13:59:48.000000000 +0100 +@@ -1,3 +1,4 @@ +-obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o ++obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap2-new.o + +-blktap-objs := control.o ring.o device.o request.o sysfs.o ++blktap2-new-y := control.o ring.o device.o request.o ++blktap2-new-$(CONFIG_SYSFS) += sysfs.o +--- head-2011-02-17.orig/drivers/xen/blktap2-new/blktap.h 2011-02-24 13:49:49.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap2-new/blktap.h 2011-02-24 14:08:40.000000000 +0100 +@@ -24,6 +24,8 @@ extern int blktap_device_major; + #define BTWARN(_f, _a...) BTPRINTK(0, KERN_WARNING, 0, _f, ##_a) + #define BTERR(_f, _a...) BTPRINTK(0, KERN_ERR, 0, _f, ##_a) + ++#define BLKTAP2_DEV_DIR "xen/blktap-2/" ++ + #define MAX_BLKTAP_DEVICE 1024 + + #define BLKTAP_DEVICE 4 +@@ -181,10 +183,17 @@ void blktap_ring_unmap_request(struct bl + void blktap_ring_set_message(struct blktap *, int); + void blktap_ring_kick_user(struct blktap *); + ++#ifdef CONFIG_SYSFS + int blktap_sysfs_init(void); + void blktap_sysfs_exit(void); + int blktap_sysfs_create(struct blktap *); + void blktap_sysfs_destroy(struct blktap *); ++#else ++static inline int blktap_sysfs_init(void) { return 0; } ++static inline void blktap_sysfs_exit(void) {} ++static inline int blktap_sysfs_create(struct blktap *tapdev) { return 0; } ++static inline void blktap_sysfs_destroy(struct blktap *tapdev) {} ++#endif + + int blktap_device_init(void); + void blktap_device_exit(void); +--- head-2011-02-17.orig/drivers/xen/blktap2-new/control.c 2011-02-24 13:49:49.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap2-new/control.c 2011-02-24 15:14:41.000000000 +0100 +@@ -164,7 +164,7 @@ blktap_control_ioctl(struct inode *inode + return -ENOIOCTLCMD; + } + +-static struct file_operations blktap_control_file_operations = { ++static const struct file_operations blktap_control_file_operations = { + .owner = THIS_MODULE, + .ioctl = blktap_control_ioctl, + }; +@@ -172,6 +172,7 @@ static struct file_operations blktap_con + static struct miscdevice blktap_control = { + .minor = MISC_DYNAMIC_MINOR, + .name = "blktap-control", ++ .nodename = BLKTAP2_DEV_DIR "control", + .fops = &blktap_control_file_operations, + }; + +--- head-2011-02-17.orig/drivers/xen/blktap2-new/device.c 2011-02-24 13:49:49.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap2-new/device.c 2011-02-24 14:51:43.000000000 +0100 +@@ -95,7 +95,7 @@ blktap_device_ioctl(struct block_device + return 0; + } + +-static struct block_device_operations blktap_device_file_operations = { ++static const struct block_device_operations blktap_device_file_operations = { + .owner = THIS_MODULE, + .open = blktap_device_open, + .release = blktap_device_release, +@@ -424,6 +424,12 @@ blktap_device_destroy_sync(struct blktap + !blktap_device_try_destroy(tap)); + } + ++static char *blktap_devnode(struct gendisk *gd, mode_t *mode) ++{ ++ return kasprintf(GFP_KERNEL, BLKTAP2_DEV_DIR "tapdev%u", ++ gd->first_minor); ++} ++ + int + blktap_device_create(struct blktap *tap, struct blktap_params *params) + { +@@ -464,6 +470,7 @@ blktap_device_create(struct blktap *tap, + + gd->major = blktap_device_major; + gd->first_minor = minor; ++ gd->devnode = blktap_devnode; + gd->fops = &blktap_device_file_operations; + gd->private_data = tapdev; + +--- head-2011-02-17.orig/drivers/xen/blktap2-new/ring.c 2011-02-24 13:49:49.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap2-new/ring.c 2011-02-24 15:14:47.000000000 +0100 +@@ -435,7 +435,7 @@ static unsigned int blktap_ring_poll(str + return 0; + } + +-static struct file_operations blktap_ring_file_operations = { ++static const struct file_operations blktap_ring_file_operations = { + .owner = THIS_MODULE, + .open = blktap_ring_open, + .release = blktap_ring_release, +--- head-2011-02-17.orig/drivers/xen/blktap2-new/sysfs.c 2011-02-24 13:49:49.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap2-new/sysfs.c 2011-02-24 14:40:36.000000000 +0100 +@@ -160,8 +160,8 @@ blktap_sysfs_store_pool(struct device *d + + return size; + } +-DEVICE_ATTR(pool, S_IRUSR|S_IWUSR, +- blktap_sysfs_show_pool, blktap_sysfs_store_pool); ++static DEVICE_ATTR(pool, S_IRUSR|S_IWUSR, ++ blktap_sysfs_show_pool, blktap_sysfs_store_pool); + + int + blktap_sysfs_create(struct blktap *tap) +@@ -259,6 +259,12 @@ blktap_sysfs_show_devices(struct class * + } + static CLASS_ATTR(devices, S_IRUGO, blktap_sysfs_show_devices, NULL); + ++static char *blktap_devnode(struct device *dev, mode_t *mode) ++{ ++ return kasprintf(GFP_KERNEL, BLKTAP2_DEV_DIR "blktap%u", ++ MINOR(dev->devt)); ++} ++ + void + blktap_sysfs_exit(void) + { +@@ -275,6 +281,8 @@ blktap_sysfs_init(void) + cls = class_create(THIS_MODULE, "blktap2"); + if (IS_ERR(cls)) + err = PTR_ERR(cls); ++ else ++ cls->devnode = blktap_devnode; + if (!err) + err = class_create_file(cls, &class_attr_verbosity); + if (!err) diff --git a/patches.xen/xen3-fixup-common b/patches.xen/xen3-fixup-common index a0abaf4..588fae4 100644 --- a/patches.xen/xen3-fixup-common +++ b/patches.xen/xen3-fixup-common @@ -2,8 +2,8 @@ Subject: Fix xen build. From: jbeulich@novell.com Patch-mainline: n/a ---- head-2010-05-25.orig/drivers/acpi/acpica/hwsleep.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/acpi/acpica/hwsleep.c 2010-03-24 15:02:17.000000000 +0100 +--- head-2011-02-17.orig/drivers/acpi/acpica/hwsleep.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/drivers/acpi/acpica/hwsleep.c 2011-01-31 17:01:49.000000000 +0100 @@ -419,6 +419,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_stat * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * @@ -20,8 +20,8 @@ Patch-mainline: n/a /******************************************************************************* * ---- head-2010-05-25.orig/drivers/base/cpu.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/base/cpu.c 2010-04-15 09:51:36.000000000 +0200 +--- head-2011-02-17.orig/drivers/base/cpu.c 2011-02-17 09:59:45.000000000 +0100 ++++ head-2011-02-17/drivers/base/cpu.c 2011-01-31 17:01:49.000000000 +0100 @@ -106,7 +106,7 @@ static inline void register_cpu_control( } #endif /* CONFIG_HOTPLUG_CPU */ @@ -40,8 +40,8 @@ Patch-mainline: n/a if (!error) error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes); #endif ---- head-2010-05-25.orig/drivers/ide/ide-lib.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/ide/ide-lib.c 2010-03-24 15:02:17.000000000 +0100 +--- head-2011-02-17.orig/drivers/ide/ide-lib.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/drivers/ide/ide-lib.c 2011-01-31 17:01:49.000000000 +0100 @@ -18,6 +18,16 @@ void ide_toggle_bounce(ide_drive_t *driv { u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ @@ -67,8 +67,8 @@ Patch-mainline: n/a if (drive->queue) blk_queue_bounce_limit(drive->queue, addr); ---- head-2010-05-25.orig/drivers/oprofile/buffer_sync.c 2010-04-15 09:43:44.000000000 +0200 -+++ head-2010-05-25/drivers/oprofile/buffer_sync.c 2010-04-15 09:51:29.000000000 +0200 +--- head-2011-02-17.orig/drivers/oprofile/buffer_sync.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/drivers/oprofile/buffer_sync.c 2011-01-31 17:01:49.000000000 +0100 @@ -47,7 +47,9 @@ static cpumask_var_t marked_cpus; static DEFINE_SPINLOCK(task_mortuary); static void process_task_mortuary(void); @@ -79,7 +79,7 @@ Patch-mainline: n/a /* Take ownership of the task struct and place it on the * list for processing. Only after two full buffer syncs -@@ -159,11 +161,13 @@ static void end_sync(void) +@@ -149,11 +151,13 @@ static struct notifier_block module_load int sync_start(void) { int err; @@ -93,7 +93,7 @@ Patch-mainline: n/a if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) return -ENOMEM; -@@ -313,12 +317,14 @@ static void add_cpu_mode_switch(unsigned +@@ -314,12 +318,14 @@ static void add_cpu_mode_switch(unsigned } } @@ -108,7 +108,7 @@ Patch-mainline: n/a static void add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) -@@ -541,10 +547,12 @@ void sync_buffer(int cpu) +@@ -542,10 +548,12 @@ void sync_buffer(int cpu) add_cpu_switch(cpu); @@ -121,7 +121,7 @@ Patch-mainline: n/a op_cpu_buffer_reset(cpu); available = op_cpu_buffer_entries(cpu); -@@ -554,12 +562,14 @@ void sync_buffer(int cpu) +@@ -555,12 +563,14 @@ void sync_buffer(int cpu) if (!sample) break; @@ -136,7 +136,7 @@ Patch-mainline: n/a if (is_code(sample->eip)) { flags = sample->event; -@@ -585,17 +595,21 @@ void sync_buffer(int cpu) +@@ -586,17 +596,21 @@ void sync_buffer(int cpu) cookie = get_exec_dcookie(mm); add_user_ctx_switch(new, cookie); } @@ -158,7 +158,7 @@ Patch-mainline: n/a if (state < sb_bt_start) /* ignore sample */ -@@ -612,9 +626,11 @@ void sync_buffer(int cpu) +@@ -613,9 +627,11 @@ void sync_buffer(int cpu) } release_mm(mm); @@ -170,9 +170,9 @@ Patch-mainline: n/a mark_done(cpu); ---- head-2010-05-25.orig/drivers/oprofile/cpu_buffer.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/oprofile/cpu_buffer.c 2010-03-24 15:02:17.000000000 +0100 -@@ -58,7 +58,11 @@ static void wq_sync_buffer(struct work_s +--- head-2011-02-17.orig/drivers/oprofile/cpu_buffer.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/drivers/oprofile/cpu_buffer.c 2011-01-31 17:01:49.000000000 +0100 +@@ -42,7 +42,11 @@ static void wq_sync_buffer(struct work_s #define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; @@ -184,7 +184,7 @@ Patch-mainline: n/a unsigned long oprofile_get_cpu_buffer_size(void) { -@@ -462,6 +466,7 @@ fail: +@@ -435,6 +439,7 @@ fail: return; } @@ -192,7 +192,7 @@ Patch-mainline: n/a int oprofile_add_domain_switch(int32_t domain_id) { struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; -@@ -480,6 +485,7 @@ int oprofile_add_domain_switch(int32_t d +@@ -453,6 +458,7 @@ int oprofile_add_domain_switch(int32_t d return 1; } @@ -200,8 +200,8 @@ Patch-mainline: n/a /* * This serves to avoid cpu buffer overflow, and makes sure ---- head-2010-05-25.orig/drivers/oprofile/oprof.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/oprofile/oprof.c 2010-03-24 15:02:17.000000000 +0100 +--- head-2011-02-17.orig/drivers/oprofile/oprof.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/drivers/oprofile/oprof.c 2011-01-31 17:01:49.000000000 +0100 @@ -39,6 +39,7 @@ static DEFINE_MUTEX(start_mutex); */ static int timer = 0; @@ -218,10 +218,10 @@ Patch-mainline: n/a int oprofile_setup(void) { ---- head-2010-05-25.orig/drivers/oprofile/oprofile_files.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/oprofile/oprofile_files.c 2010-03-24 15:02:17.000000000 +0100 -@@ -171,6 +171,8 @@ static const struct file_operations dump - .write = dump_write, +--- head-2011-02-17.orig/drivers/oprofile/oprofile_files.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/drivers/oprofile/oprofile_files.c 2011-01-31 17:01:49.000000000 +0100 +@@ -180,6 +180,8 @@ static const struct file_operations dump + .llseek = noop_llseek, }; +#ifdef CONFIG_XEN @@ -229,7 +229,7 @@ Patch-mainline: n/a #define TMPBUFSIZE 512 static unsigned int adomains = 0; -@@ -360,6 +362,8 @@ static const struct file_operations pass +@@ -369,6 +371,8 @@ static const struct file_operations pass .write = pdomain_write, }; @@ -238,7 +238,7 @@ Patch-mainline: n/a void oprofile_create_files(struct super_block *sb, struct dentry *root) { /* reinitialize default values */ -@@ -370,8 +374,10 @@ void oprofile_create_files(struct super_ +@@ -379,8 +383,10 @@ void oprofile_create_files(struct super_ oprofilefs_create_file(sb, root, "enable", &enable_fops); oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); @@ -249,9 +249,9 @@ Patch-mainline: n/a oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size); oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed); ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2009-05-19 09:16:41.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:02:17.000000000 +0100 -@@ -57,7 +57,6 @@ u8 cpu_2_logical_apicid[NR_CPUS] = { [0 +--- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2010-11-08 17:27:03.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-01-31 17:01:49.000000000 +0100 +@@ -55,7 +55,6 @@ static char callfunc_name[NR_CPUS][15]; cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; @@ -259,9 +259,9 @@ Patch-mainline: n/a #if defined(__i386__) u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff }; ---- head-2010-05-25.orig/include/linux/mm.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/include/linux/mm.h 2010-03-24 15:02:17.000000000 +0100 -@@ -211,6 +211,7 @@ struct vm_operations_struct { +--- head-2011-02-17.orig/include/linux/mm.h 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/include/linux/mm.h 2011-01-31 17:01:49.000000000 +0100 +@@ -222,6 +222,7 @@ struct vm_operations_struct { int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); @@ -269,7 +269,7 @@ Patch-mainline: n/a /* Area-specific function for clearing the PTE at @ptep. Returns the * original value of @ptep. */ pte_t (*zap_pte)(struct vm_area_struct *vma, -@@ -218,6 +219,7 @@ struct vm_operations_struct { +@@ -229,6 +230,7 @@ struct vm_operations_struct { /* called before close() to indicate no more pages should be mapped */ void (*unmap)(struct vm_area_struct *area); @@ -277,11 +277,11 @@ Patch-mainline: n/a #ifdef CONFIG_NUMA /* ---- head-2010-05-25.orig/include/linux/oprofile.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/include/linux/oprofile.h 2010-03-24 15:02:17.000000000 +0100 -@@ -16,8 +16,9 @@ - #include - #include +--- head-2011-02-17.orig/include/linux/oprofile.h 2011-02-17 10:05:41.000000000 +0100 ++++ head-2011-02-17/include/linux/oprofile.h 2011-02-17 10:06:04.000000000 +0100 +@@ -19,8 +19,9 @@ + #include + #include #include - +#ifdef CONFIG_XEN @@ -290,7 +290,7 @@ Patch-mainline: n/a /* Each escaped entry is prefixed by ESCAPE_CODE * then one of the following codes, then the -@@ -55,11 +56,12 @@ struct oprofile_operations { +@@ -58,11 +59,12 @@ struct oprofile_operations { /* create any necessary configuration files in the oprofile fs. * Optional. */ int (*create_files)(struct super_block * sb, struct dentry * root); @@ -304,9 +304,9 @@ Patch-mainline: n/a /* Do any necessary interrupt setup. Optional. */ int (*setup)(void); /* Do any necessary interrupt shutdown. Optional. */ ---- head-2010-05-25.orig/include/linux/page-flags.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/include/linux/page-flags.h 2010-03-24 15:02:17.000000000 +0100 -@@ -111,7 +111,7 @@ enum pageflags { +--- head-2011-02-17.orig/include/linux/page-flags.h 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/include/linux/page-flags.h 2011-01-31 17:01:49.000000000 +0100 +@@ -110,7 +110,7 @@ enum pageflags { #endif #ifdef CONFIG_XEN PG_foreign, /* Page is owned by foreign allocator. */ @@ -315,7 +315,7 @@ Patch-mainline: n/a PG_blkback, /* Page is owned by blkback */ #endif __NR_PAGEFLAGS, -@@ -355,9 +355,11 @@ CLEARPAGEFLAG(Uptodate, uptodate) +@@ -351,9 +351,11 @@ CLEARPAGEFLAG(Uptodate, uptodate) #define PageForeignDestructor(_page, order) \ ((void (*)(struct page *, unsigned int))(_page)->index)(_page, order) @@ -327,11 +327,11 @@ Patch-mainline: n/a #define PageBlkback(page) test_bit(PG_blkback, &(page)->flags) #define SetPageBlkback(page) set_bit(PG_blkback, &(page)->flags) ---- head-2010-05-25.orig/kernel/kexec.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/kernel/kexec.c 2010-05-25 09:20:04.000000000 +0200 -@@ -46,8 +46,10 @@ - #include - #endif +--- head-2011-02-17.orig/kernel/kexec.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/kernel/kexec.c 2011-01-31 17:01:49.000000000 +0100 +@@ -40,8 +40,10 @@ + #include + #include +#ifndef CONFIG_XEN /* Per cpu memory for storing cpu states in case of system crash. */ @@ -340,7 +340,7 @@ Patch-mainline: n/a /* vmcoreinfo stuff */ static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; -@@ -1226,6 +1228,7 @@ static void final_note(u32 *buf) +@@ -1214,6 +1216,7 @@ static void final_note(u32 *buf) memcpy(buf, ¬e, sizeof(note)); } @@ -348,7 +348,7 @@ Patch-mainline: n/a void crash_save_cpu(struct pt_regs *regs, int cpu) { struct elf_prstatus prstatus; -@@ -1251,9 +1254,11 @@ void crash_save_cpu(struct pt_regs *regs +@@ -1239,9 +1242,11 @@ void crash_save_cpu(struct pt_regs *regs &prstatus, sizeof(prstatus)); final_note(buf); } @@ -360,7 +360,7 @@ Patch-mainline: n/a /* Allocate memory for saving cpu registers. */ crash_notes = alloc_percpu(note_buf_t); if (!crash_notes) { -@@ -1261,6 +1266,7 @@ static int __init crash_notes_memory_ini +@@ -1249,6 +1254,7 @@ static int __init crash_notes_memory_ini " states failed\n"); return -ENOMEM; } @@ -368,9 +368,9 @@ Patch-mainline: n/a return 0; } module_init(crash_notes_memory_init) ---- head-2010-05-25.orig/mm/memory.c 2010-04-15 09:44:04.000000000 +0200 -+++ head-2010-05-25/mm/memory.c 2010-04-15 09:51:22.000000000 +0200 -@@ -944,10 +944,12 @@ static unsigned long zap_pte_range(struc +--- head-2011-02-17.orig/mm/memory.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/mm/memory.c 2011-01-31 17:01:49.000000000 +0100 +@@ -960,10 +960,12 @@ static unsigned long zap_pte_range(struc page->index > details->last_index)) continue; } @@ -383,9 +383,9 @@ Patch-mainline: n/a ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); ---- head-2010-05-25.orig/mm/mmap.c 2010-04-29 09:42:36.000000000 +0200 -+++ head-2010-05-25/mm/mmap.c 2010-04-29 09:43:01.000000000 +0200 -@@ -1946,8 +1946,10 @@ static void unmap_region(struct mm_struc +--- head-2011-02-17.orig/mm/mmap.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/mm/mmap.c 2011-01-31 17:01:49.000000000 +0100 +@@ -1928,8 +1928,10 @@ static void unmap_region(struct mm_struc static inline void unmap_vma(struct vm_area_struct *vma) { @@ -396,7 +396,7 @@ Patch-mainline: n/a } /* -@@ -2304,8 +2306,10 @@ void exit_mmap(struct mm_struct *mm) +@@ -2291,8 +2293,10 @@ void exit_mmap(struct mm_struct *mm) arch_exit_mmap(mm); diff --git a/patches.xen/xen3-fixup-kconfig b/patches.xen/xen3-fixup-kconfig index 420b53d..e81a191 100644 --- a/patches.xen/xen3-fixup-kconfig +++ b/patches.xen/xen3-fixup-kconfig @@ -2,9 +2,9 @@ Subject: Fix xen configuration. From: jbeulich@novell.com Patch-mainline: n/a ---- head-2010-03-24.orig/arch/x86/Kconfig 2010-03-24 14:37:43.000000000 +0100 -+++ head-2010-03-24/arch/x86/Kconfig 2010-03-24 15:02:14.000000000 +0100 -@@ -168,6 +168,7 @@ config HAVE_CPUMASK_OF_CPU_MAP +--- head-2011-02-17.orig/arch/x86/Kconfig 2011-01-31 14:42:03.000000000 +0100 ++++ head-2011-02-17/arch/x86/Kconfig 2011-01-31 17:01:38.000000000 +0100 +@@ -181,6 +181,7 @@ config HAVE_CPUMASK_OF_CPU_MAP config ARCH_HIBERNATION_POSSIBLE def_bool y @@ -12,19 +12,63 @@ Patch-mainline: n/a config ARCH_SUSPEND_POSSIBLE def_bool y ---- head-2010-03-24.orig/arch/x86/Kconfig.debug 2010-03-24 14:36:44.000000000 +0100 -+++ head-2010-03-24/arch/x86/Kconfig.debug 2010-03-24 15:02:14.000000000 +0100 -@@ -312,7 +312,7 @@ config DEBUG_STRICT_USER_COPY_CHECKS - - config KDB - bool "Built-in Kernel Debugger support" -- depends on DEBUG_KERNEL -+ depends on DEBUG_KERNEL && !XEN - select KALLSYMS - select KALLSYMS_ALL - help ---- head-2010-03-24.orig/drivers/xen/Kconfig 2010-03-24 14:42:42.000000000 +0100 -+++ head-2010-03-24/drivers/xen/Kconfig 2010-03-24 15:02:14.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/Kconfig.cpu 2011-01-31 14:42:03.000000000 +0100 ++++ head-2011-02-17/arch/x86/Kconfig.cpu 2011-03-03 17:48:58.000000000 +0100 +@@ -8,7 +8,7 @@ choice + + config M386 + bool "386" +- depends on X86_32 && !UML ++ depends on X86_32 && !UML && !XEN + ---help--- + This is the processor type of your CPU. This information is used for + optimizing purposes. In order to compile a kernel that can run on +@@ -49,7 +49,7 @@ config M386 + + config M486 + bool "486" +- depends on X86_32 ++ depends on X86_32 && !XEN + ---help--- + Select this for a 486 series processor, either Intel or one of the + compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX, +@@ -58,7 +58,7 @@ config M486 + + config M586 + bool "586/K5/5x86/6x86/6x86MX" +- depends on X86_32 ++ depends on X86_32 && !XEN + ---help--- + Select this for an 586 or 686 series processor such as the AMD K5, + the Cyrix 5x86, 6x86 and 6x86MX. This choice does not +@@ -66,14 +66,14 @@ config M586 + + config M586TSC + bool "Pentium-Classic" +- depends on X86_32 ++ depends on X86_32 && !XEN + ---help--- + Select this for a Pentium Classic processor with the RDTSC (Read + Time Stamp Counter) instruction for benchmarking. + + config M586MMX + bool "Pentium-MMX" +- depends on X86_32 ++ depends on X86_32 && !XEN + ---help--- + Select this for a Pentium with the MMX graphics/multimedia + extended instructions. +@@ -339,7 +339,7 @@ config X86_PPRO_FENCE + + config X86_F00F_BUG + def_bool y +- depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT ++ depends on M586MMX || M586TSC || M586 || M486 || M386 + + config X86_INVD_BUG + def_bool y +--- head-2011-02-17.orig/drivers/xen/Kconfig 2011-01-31 14:42:03.000000000 +0100 ++++ head-2011-02-17/drivers/xen/Kconfig 2011-02-24 14:05:09.000000000 +0100 @@ -22,6 +22,7 @@ config XEN_PRIVILEGED_GUEST config XEN_UNPRIVILEGED_GUEST @@ -33,6 +77,33 @@ Patch-mainline: n/a config XEN_PRIVCMD def_bool y +@@ -44,7 +45,7 @@ config XEN_BACKEND + + config XEN_BLKDEV_BACKEND + tristate "Block-device backend driver" +- depends on XEN_BACKEND ++ depends on BLOCK && XEN_BACKEND + default XEN_BACKEND + help + The block-device backend driver allows the kernel to export its +@@ -53,7 +54,7 @@ config XEN_BLKDEV_BACKEND + + config XEN_BLKDEV_TAP + tristate "Block-device tap backend driver" +- depends on XEN_BACKEND ++ depends on BLOCK && XEN_BACKEND + default XEN_BACKEND + help + The block tap driver is an alternative to the block back driver +@@ -65,7 +66,7 @@ config XEN_BLKDEV_TAP + + config XEN_BLKDEV_TAP2 + tristate "Block-device tap backend driver 2" +- depends on XEN_BACKEND ++ depends on BLOCK && XEN_BACKEND + default XEN_BACKEND + help + The block tap driver is an alternative to the block back driver @@ -116,7 +117,7 @@ config XEN_NETDEV_LOOPBACK config XEN_PCIDEV_BACKEND diff --git a/patches.xen/xen3-fixup-xen b/patches.xen/xen3-fixup-xen index 7ef8f87..fd05605 100644 --- a/patches.xen/xen3-fixup-xen +++ b/patches.xen/xen3-fixup-xen @@ -1,12 +1,12 @@ Subject: Fix Xen build wrt. Xen files coming from mainline. -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1017:948c933f8839) +From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) Patch-mainline: n/a Acked-by: jbeulich@novell.com ---- head-2010-05-12.orig/drivers/xen/Makefile 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/Makefile 2010-01-19 16:01:03.000000000 +0100 -@@ -1,12 +1,28 @@ +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-01-31 15:14:12.000000000 +0100 +@@ -1,22 +1,28 @@ -obj-y += grant-table.o features.o events.o manage.o +obj-y += core/ +obj-y += console/ @@ -17,18 +17,27 @@ Acked-by: jbeulich@novell.com -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_features.o := $(nostackp) - +-obj-$(CONFIG_BLOCK) += biomerge.o -obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o -obj-$(CONFIG_XEN_BALLOON) += balloon.o --obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o +-obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o +-obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o -obj-$(CONFIG_XENFS) += xenfs/ -obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o -\ No newline at end of file +-obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o +-obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o +-obj-$(CONFIG_XEN_DOM0) += pci.o +- +-xen-evtchn-y := evtchn.o +-xen-gntdev-y := gntdev.o +- +-xen-platform-pci-y := platform-pci.o +obj-y += util.o +obj-$(CONFIG_XEN_BALLOON) += balloon/ +obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ +obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ -+obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ ++obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ +obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/ +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/ +obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/ @@ -46,9 +55,9 @@ Acked-by: jbeulich@novell.com +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) += sfc_netutil/ +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) += sfc_netfront/ +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) += sfc_netback/ ---- head-2010-05-12.orig/drivers/xen/xenbus/Makefile 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/xenbus/Makefile 2010-01-19 16:01:03.000000000 +0100 -@@ -1,7 +1,9 @@ +--- head-2011-03-17.orig/drivers/xen/xenbus/Makefile 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/Makefile 2011-02-02 17:06:11.000000000 +0100 +@@ -1,12 +1,9 @@ -obj-y += xenbus.o +obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o +obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o @@ -60,12 +69,16 @@ Acked-by: jbeulich@novell.com -xenbus-objs += xenbus_probe.o +xenbus_be-objs = +xenbus_be-objs += xenbus_backend_client.o -+ + +-xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o +-xenbus-objs += $(xenbus-be-objs-y) +- +-obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o +xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o +obj-y += $(xenbus-y) $(xenbus-m) +obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_client.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_client.c 2010-01-19 16:01:03.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_client.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_client.c 2011-01-31 15:14:12.000000000 +0100 @@ -31,14 +31,17 @@ */ @@ -91,10 +104,14 @@ Acked-by: jbeulich@novell.com const char *xenbus_strstate(enum xenbus_state state) { -@@ -50,25 +53,13 @@ const char *xenbus_strstate(enum xenbus_ +@@ -49,28 +52,14 @@ const char *xenbus_strstate(enum xenbus_ + [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", - [ XenbusStateClosed ] = "Closed", +- [ XenbusStateClosed ] = "Closed", +- [XenbusStateReconfiguring] = "Reconfiguring", +- [XenbusStateReconfigured] = "Reconfigured", ++ [ XenbusStateClosed ] = "Closed", + [ XenbusStateReconfiguring ] = "Reconfiguring", + [ XenbusStateReconfigured ] = "Reconfigured", }; @@ -119,7 +136,7 @@ Acked-by: jbeulich@novell.com int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, -@@ -92,57 +83,26 @@ int xenbus_watch_path(struct xenbus_devi +@@ -94,53 +83,27 @@ int xenbus_watch_path(struct xenbus_devi EXPORT_SYMBOL_GPL(xenbus_watch_path); @@ -173,6 +190,67 @@ Acked-by: jbeulich@novell.com -EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); +EXPORT_SYMBOL_GPL(xenbus_watch_path2); +-static void xenbus_switch_fatal(struct xenbus_device *, int, int, +- const char *, ...); + +-static int +-__xenbus_switch_state(struct xenbus_device *dev, +- enum xenbus_state state, int depth) ++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) + { + /* We check whether the state is currently set to the given value, and + if not, then the state is set. We don't want to unconditionally +@@ -155,59 +118,29 @@ __xenbus_switch_state(struct xenbus_devi + would not get reset if the transaction was aborted. + */ + +- struct xenbus_transaction xbt; + int current_state; +- int err, abort; ++ int err; + + if (state == dev->state) + return 0; + +-again: +- abort = 1; +- +- err = xenbus_transaction_start(&xbt); +- if (err) { +- xenbus_switch_fatal(dev, depth, err, "starting transaction"); +- return 0; +- } +- +- err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); ++ err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ++ ¤t_state); + if (err != 1) +- goto abort; ++ return 0; + +- err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); ++ err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); + if (err) { +- xenbus_switch_fatal(dev, depth, err, "writing new state"); +- goto abort; ++ if (state != XenbusStateClosing) /* Avoid looping */ ++ xenbus_dev_fatal(dev, err, "writing new state"); ++ return err; + } + +- abort = 0; +-abort: +- err = xenbus_transaction_end(xbt, abort); +- if (err) { +- if (err == -EAGAIN && !abort) +- goto again; +- xenbus_switch_fatal(dev, depth, err, "ending transaction"); +- } else +- dev->state = state; ++ dev->state = state; + + return 0; + } -/** - * xenbus_switch_state @@ -183,10 +261,15 @@ Acked-by: jbeulich@novell.com - * Return 0 on success, or -errno on error. On error, the device will switch - * to XenbusStateClosing, and the error will be saved in the store. - */ - int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) - { - /* We check whether the state is currently set to the given value, and -@@ -201,13 +161,12 @@ static char *error_path(struct xenbus_de +-int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) +-{ +- return __xenbus_switch_state(dev, state, 0); +-} +- + EXPORT_SYMBOL_GPL(xenbus_switch_state); + + int xenbus_frontend_closed(struct xenbus_device *dev) +@@ -228,13 +161,12 @@ static char *error_path(struct xenbus_de } @@ -203,7 +286,7 @@ Acked-by: jbeulich@novell.com #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); -@@ -224,13 +183,13 @@ static void xenbus_va_dev_error(struct x +@@ -251,13 +183,13 @@ static void xenbus_va_dev_error(struct x path_buffer = error_path(dev); if (path_buffer == NULL) { @@ -219,7 +302,7 @@ Acked-by: jbeulich@novell.com dev->nodename, printf_buffer); goto fail; } -@@ -241,57 +200,32 @@ fail: +@@ -268,74 +200,30 @@ fail: } @@ -232,9 +315,7 @@ Acked-by: jbeulich@novell.com - * Report the given negative errno into the store, along with the given - * formatted message. - */ --void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) -+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, -+ ...) + void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; @@ -256,9 +337,7 @@ Acked-by: jbeulich@novell.com - * closedown of this driver and its peer. - */ --void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) -+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, -+ ...) + void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; @@ -271,6 +350,23 @@ Acked-by: jbeulich@novell.com } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); +-/** +- * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps +- * avoiding recursion within xenbus_switch_state. +- */ +-static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, +- const char *fmt, ...) +-{ +- va_list ap; +- +- va_start(ap, fmt); +- xenbus_va_dev_error(dev, err, fmt, ap); +- va_end(ap); + +- if (!depth) +- __xenbus_switch_state(dev, XenbusStateClosing, 1); +-} +- -/** - * xenbus_grant_ring - * @dev: xenbus device @@ -280,11 +376,10 @@ Acked-by: jbeulich@novell.com - * 0 on success, or -errno on error. On error, the device will switch to - * XenbusStateClosing, and the error will be saved in the store. - */ -+ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); -@@ -302,18 +236,12 @@ int xenbus_grant_ring(struct xenbus_devi +@@ -346,18 +234,12 @@ int xenbus_grant_ring(struct xenbus_devi EXPORT_SYMBOL_GPL(xenbus_grant_ring); @@ -304,7 +399,7 @@ Acked-by: jbeulich@novell.com alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, -@@ -328,36 +256,6 @@ int xenbus_alloc_evtchn(struct xenbus_de +@@ -372,36 +254,6 @@ int xenbus_alloc_evtchn(struct xenbus_de EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); @@ -341,7 +436,7 @@ Acked-by: jbeulich@novell.com int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; -@@ -374,189 +272,6 @@ int xenbus_free_evtchn(struct xenbus_dev +@@ -418,189 +270,6 @@ int xenbus_free_evtchn(struct xenbus_dev EXPORT_SYMBOL_GPL(xenbus_free_evtchn); @@ -531,8 +626,8 @@ Acked-by: jbeulich@novell.com enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_comms.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_comms.c 2010-01-19 16:01:03.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_comms.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 15:14:12.000000000 +0100 @@ -34,25 +34,55 @@ #include #include @@ -657,8 +752,8 @@ Acked-by: jbeulich@novell.com + return 0; } ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_comms.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_comms.h 2010-01-19 16:01:03.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_comms.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_comms.h 2011-01-31 15:14:12.000000000 +0100 @@ -43,4 +43,20 @@ int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; @@ -680,8 +775,8 @@ Acked-by: jbeulich@novell.com +} + #endif /* _XENBUS_COMMS_H */ ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_probe.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_probe.c 2010-01-26 09:08:16.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:51:15.000000000 +0100 @@ -4,6 +4,7 @@ * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard @@ -699,7 +794,7 @@ Acked-by: jbeulich@novell.com #include #include -@@ -40,32 +41,37 @@ +@@ -40,38 +41,46 @@ #include #include #include @@ -723,11 +818,12 @@ Acked-by: jbeulich@novell.com #include -#include -#include +- +#include +#include +#include +#ifdef MODULE -+#include + #include +#endif #include "xenbus_comms.h" @@ -738,50 +834,99 @@ Acked-by: jbeulich@novell.com +#endif int xen_store_evtchn; --EXPORT_SYMBOL(xen_store_evtchn); +-EXPORT_SYMBOL_GPL(xen_store_evtchn); - struct xenstore_domain_interface *xen_store_interface; +-EXPORT_SYMBOL_GPL(xen_store_interface); + static unsigned long xen_store_mfn; +extern struct mutex xenwatch_mutex; + static BLOCKING_NOTIFIER_HEAD(xenstore_chain); - static void wait_for_devices(struct xenbus_driver *xendrv); -@@ -74,9 +80,6 @@ static int xenbus_probe_frontend(const c - - static void xenbus_dev_shutdown(struct device *_dev); - --static int xenbus_dev_suspend(struct device *dev, pm_message_t state); --static int xenbus_dev_resume(struct device *dev); -- ++static void wait_for_devices(struct xenbus_driver *xendrv); ++ ++static int xenbus_probe_frontend(const char *type, const char *name); ++ ++static void xenbus_dev_shutdown(struct device *_dev); ++ /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) -@@ -98,16 +101,6 @@ int xenbus_match(struct device *_dev, st +@@ -92,7 +101,24 @@ int xenbus_match(struct device *_dev, st + return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } +-EXPORT_SYMBOL_GPL(xenbus_match); ++ ++/* device// => - */ ++static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) ++{ ++ nodename = strchr(nodename, '/'); ++ if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { ++ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); ++ return -EINVAL; ++ } ++ ++ strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); ++ if (!strchr(bus_id, '/')) { ++ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); ++ return -EINVAL; ++ } ++ *strchr(bus_id, '/') = '-'; ++ return 0; ++} + + + static void free_otherend_details(struct xenbus_device *dev) +@@ -112,30 +138,7 @@ static void free_otherend_watch(struct x + } + --static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env) +-static int talk_to_otherend(struct xenbus_device *dev) -{ -- struct xenbus_device *dev = to_xenbus_device(_dev); +- struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); - -- if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) -- return -ENOMEM; +- free_otherend_watch(dev); +- free_otherend_details(dev); - -- return 0; +- return drv->read_otherend_details(dev); +-} +- +- +- +-static int watch_otherend(struct xenbus_device *dev) +-{ +- struct xen_bus_type *bus = +- container_of(dev->dev.bus, struct xen_bus_type, bus); +- +- return xenbus_watch_pathfmt(dev, &dev->otherend_watch, +- bus->otherend_changed, +- "%s/%s", dev->otherend, "state"); -} - - /* device// => - */ - static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) +- +-int xenbus_read_otherend_details(struct xenbus_device *xendev, ++int read_otherend_details(struct xenbus_device *xendev, + char *id_node, char *path_node) { -@@ -176,9 +169,30 @@ static int read_backend_details(struct x - return read_otherend_details(xendev, "backend-id", "backend"); + int err = xenbus_gather(XBT_NIL, xendev->nodename, +@@ -160,11 +163,62 @@ int xenbus_read_otherend_details(struct + + return 0; } +-EXPORT_SYMBOL_GPL(xenbus_read_otherend_details); --static struct device_attribute xenbus_dev_attrs[] = { -- __ATTR_NULL --}; +-void xenbus_otherend_changed(struct xenbus_watch *watch, +- const char **vec, unsigned int len, +- int ignore_on_shutdown) ++ ++static int read_backend_details(struct xenbus_device *xendev) ++{ ++ return read_otherend_details(xendev, "backend-id", "backend"); ++} ++ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) +static int xenbus_uevent_frontend(struct device *dev, char **envp, + int num_envp, char *buffer, int buffer_size) @@ -806,25 +951,15 @@ Acked-by: jbeulich@novell.com + return 0; +} +#endif - - /* Bus type for frontend drivers. */ - static struct xen_bus_type xenbus_frontend = { -@@ -186,17 +200,19 @@ static struct xen_bus_type xenbus_fronte - .levels = 2, /* device/type/ */ - .get_bus_id = frontend_bus_id, - .probe = xenbus_probe_frontend, ++ ++/* Bus type for frontend drivers. */ ++static struct xen_bus_type xenbus_frontend = { ++ .root = "device", ++ .levels = 2, /* device/type/ */ ++ .get_bus_id = frontend_bus_id, ++ .probe = xenbus_probe_frontend, + .error = -ENODEV, - .bus = { -- .name = "xen", -- .match = xenbus_match, -- .uevent = xenbus_uevent, -- .probe = xenbus_dev_probe, -- .remove = xenbus_dev_remove, -- .shutdown = xenbus_dev_shutdown, -- .dev_attrs = xenbus_dev_attrs, -- -- .suspend = xenbus_dev_suspend, -- .resume = xenbus_dev_resume, ++ .bus = { + .name = "xen", + .match = xenbus_match, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) @@ -836,10 +971,15 @@ Acked-by: jbeulich@novell.com + }, + .dev = { + .bus_id = "xen", - }, - }; - -@@ -213,17 +229,16 @@ static void otherend_changed(struct xenb ++ }, ++}; ++ ++static void otherend_changed(struct xenbus_watch *watch, ++ const char **vec, unsigned int len) + { + struct xenbus_device *dev = + container_of(watch, struct xenbus_device, otherend_watch); +@@ -176,31 +230,54 @@ void xenbus_otherend_changed(struct xenb if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { @@ -861,7 +1001,14 @@ Acked-by: jbeulich@novell.com /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. -@@ -237,6 +252,7 @@ static void otherend_changed(struct xenb + */ + if (system_state > SYSTEM_RUNNING) { +- if (ignore_on_shutdown && (state == XenbusStateClosing)) ++ struct xen_bus_type *bus = bus; ++ bus = container_of(dev->dev.bus, struct xen_bus_type, bus); ++ /* If we're frontend, drive the state machine to Closed. */ ++ /* This should cause the backend to release our resources. */ ++ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } @@ -869,18 +1016,32 @@ Acked-by: jbeulich@novell.com if (drv->otherend_changed) drv->otherend_changed(dev, state); -@@ -256,8 +272,8 @@ static int talk_to_otherend(struct xenbu - - static int watch_otherend(struct xenbus_device *dev) - { -- return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, -- "%s/%s", dev->otherend, "state"); + } +-EXPORT_SYMBOL_GPL(xenbus_otherend_changed); ++ ++ ++static int talk_to_otherend(struct xenbus_device *dev) ++{ ++ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); ++ ++ free_otherend_watch(dev); ++ free_otherend_details(dev); ++ ++ return drv->read_otherend_details(dev); ++} ++ ++ ++ ++static int watch_otherend(struct xenbus_device *dev) ++{ + return xenbus_watch_path2(dev, dev->otherend, "state", + &dev->otherend_watch, otherend_changed); - } - ++} ++ -@@ -283,8 +299,9 @@ int xenbus_dev_probe(struct device *_dev + int xenbus_dev_probe(struct device *_dev) + { +@@ -224,8 +301,9 @@ int xenbus_dev_probe(struct device *_dev err = talk_to_otherend(dev); if (err) { @@ -892,7 +1053,7 @@ Acked-by: jbeulich@novell.com return err; } -@@ -294,7 +311,8 @@ int xenbus_dev_probe(struct device *_dev +@@ -235,7 +313,8 @@ int xenbus_dev_probe(struct device *_dev err = watch_otherend(dev); if (err) { @@ -902,7 +1063,28 @@ Acked-by: jbeulich@novell.com dev->nodename); return err; } -@@ -330,43 +348,64 @@ static void xenbus_dev_shutdown(struct d +@@ -244,9 +323,8 @@ int xenbus_dev_probe(struct device *_dev + fail: + xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); + xenbus_switch_state(dev, XenbusStateClosed); +- return err; ++ return -ENODEV; + } +-EXPORT_SYMBOL_GPL(xenbus_dev_probe); + + int xenbus_dev_remove(struct device *_dev) + { +@@ -264,44 +342,81 @@ int xenbus_dev_remove(struct device *_de + xenbus_switch_state(dev, XenbusStateClosed); + return 0; + } +-EXPORT_SYMBOL_GPL(xenbus_dev_remove); + +-void xenbus_dev_shutdown(struct device *_dev) ++static void xenbus_dev_shutdown(struct device *_dev) + { + struct xenbus_device *dev = to_xenbus_device(_dev); + unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); @@ -934,6 +1116,7 @@ Acked-by: jbeulich@novell.com out: put_device(&dev->dev); } +-EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); int xenbus_register_driver_common(struct xenbus_driver *drv, - struct xen_bus_type *bus, @@ -958,38 +1141,35 @@ Acked-by: jbeulich@novell.com + drv->driver.remove = xenbus_dev_remove; + drv->driver.shutdown = xenbus_dev_shutdown; +#endif - -- return driver_register(&drv->driver); ++ + mutex_lock(&xenwatch_mutex); + ret = driver_register(&drv->driver); + mutex_unlock(&xenwatch_mutex); + return ret; - } - --int __xenbus_register_frontend(struct xenbus_driver *drv, -- struct module *owner, const char *mod_name) ++} ++ +int xenbus_register_frontend(struct xenbus_driver *drv) - { - int ret; - - drv->read_otherend_details = read_backend_details; - -- ret = xenbus_register_driver_common(drv, &xenbus_frontend, -- owner, mod_name); ++{ ++ int ret; ++ ++ drv->read_otherend_details = read_backend_details; ++ + ret = xenbus_register_driver_common(drv, &xenbus_frontend); - if (ret) - return ret; ++ if (ret) ++ return ret; -@@ -375,7 +414,7 @@ int __xenbus_register_frontend(struct xe - - return 0; +- return driver_register(&drv->driver); ++ /* If this driver is loaded as a module wait for devices to attach. */ ++ wait_for_devices(drv); ++ ++ return 0; } --EXPORT_SYMBOL_GPL(__xenbus_register_frontend); +-EXPORT_SYMBOL_GPL(xenbus_register_driver_common); +EXPORT_SYMBOL_GPL(xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { -@@ -453,31 +492,30 @@ static void xenbus_dev_release(struct de +@@ -379,31 +494,30 @@ static void xenbus_dev_release(struct de } static ssize_t xendev_show_nodename(struct device *dev, @@ -1029,7 +1209,7 @@ Acked-by: jbeulich@novell.com int err; struct xenbus_device *xendev; size_t stringlen; -@@ -485,6 +523,9 @@ int xenbus_probe_node(struct xen_bus_typ +@@ -411,6 +525,9 @@ int xenbus_probe_node(struct xen_bus_typ enum xenbus_state state = xenbus_read_driver_state(nodename); @@ -1039,7 +1219,7 @@ Acked-by: jbeulich@novell.com if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ -@@ -509,15 +550,14 @@ int xenbus_probe_node(struct xen_bus_typ +@@ -435,15 +552,14 @@ int xenbus_probe_node(struct xen_bus_typ xendev->devicetype = tmpstring; init_completion(&xendev->down); @@ -1057,7 +1237,7 @@ Acked-by: jbeulich@novell.com /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) -@@ -525,22 +565,15 @@ int xenbus_probe_node(struct xen_bus_typ +@@ -451,28 +567,40 @@ int xenbus_probe_node(struct xen_bus_typ err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) @@ -1084,20 +1264,42 @@ Acked-by: jbeulich@novell.com device_unregister(&xendev->dev); fail: kfree(xendev); -@@ -553,8 +586,10 @@ static int xenbus_probe_frontend(const c - char *nodename; - int err; - -- nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", -- xenbus_frontend.root, type, name); + return err; + } +-EXPORT_SYMBOL_GPL(xenbus_probe_node); ++ ++/* device// */ ++static int xenbus_probe_frontend(const char *type, const char *name) ++{ ++ char *nodename; ++ int err; ++ + if (!strcmp(type, "console")) + return 0; + + nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); - if (!nodename) - return -ENOMEM; ++ if (!nodename) ++ return -ENOMEM; ++ ++ DPRINTK("%s", nodename); ++ ++ err = xenbus_probe_node(&xenbus_frontend, type, nodename); ++ kfree(nodename); ++ return err; ++} -@@ -591,6 +626,9 @@ int xenbus_probe_devices(struct xen_bus_ + static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) + { +@@ -486,7 +614,7 @@ static int xenbus_probe_device_type(stru + return PTR_ERR(dir); + + for (i = 0; i < dir_n; i++) { +- err = bus->probe(bus, type, dir[i]); ++ err = bus->probe(type, dir[i]); + if (err) + break; + } +@@ -501,6 +629,9 @@ int xenbus_probe_devices(struct xen_bus_ char **dir; unsigned int i, dir_n; @@ -1107,7 +1309,15 @@ Acked-by: jbeulich@novell.com dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); -@@ -627,15 +665,15 @@ static int strsep_len(const char *str, c +@@ -514,7 +645,6 @@ int xenbus_probe_devices(struct xen_bus_ + kfree(dir); + return err; + } +-EXPORT_SYMBOL_GPL(xenbus_probe_devices); + + static unsigned int char_count(const char *str, char c) + { +@@ -539,15 +669,15 @@ static int strsep_len(const char *str, c return (len == 0) ? i : -ERANGE; } @@ -1126,43 +1336,54 @@ Acked-by: jbeulich@novell.com exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { -@@ -663,14 +701,13 @@ void xenbus_dev_changed(const char *node +@@ -575,46 +705,81 @@ void xenbus_dev_changed(const char *node kfree(root); } -EXPORT_SYMBOL_GPL(xenbus_dev_changed); - static void frontend_changed(struct xenbus_watch *watch, - const char **vec, unsigned int len) - { - DPRINTK(""); - -- xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); +-int xenbus_dev_suspend(struct device *dev, pm_message_t state) ++static void frontend_changed(struct xenbus_watch *watch, ++ const char **vec, unsigned int len) ++{ ++ DPRINTK(""); ++ + dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); - } - - /* We watch for devices appearing and vanishing. */ -@@ -679,7 +716,7 @@ static struct xenbus_watch fe_watch = { - .callback = frontend_changed, - }; - --static int xenbus_dev_suspend(struct device *dev, pm_message_t state) ++} ++ ++/* We watch for devices appearing and vanishing. */ ++static struct xenbus_watch fe_watch = { ++ .node = "device", ++ .callback = frontend_changed, ++}; ++ +static int suspend_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; -@@ -692,14 +729,35 @@ static int xenbus_dev_suspend(struct dev +- struct xenbus_device *xdev +- = container_of(dev, struct xenbus_device, dev); ++ struct xenbus_device *xdev; + +- DPRINTK("%s", xdev->nodename); ++ DPRINTK(""); + + if (dev->driver == NULL) + return 0; drv = to_xenbus_driver(dev->driver); - xdev = container_of(dev, struct xenbus_device, dev); ++ xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) - err = drv->suspend(xdev, state); + err = drv->suspend(xdev); -+ if (err) -+ printk(KERN_WARNING + if (err) + printk(KERN_WARNING +- "xenbus: suspend %s failed: %i\n", dev_name(dev), err); + "xenbus: suspend %s failed: %i\n", dev->bus_id, err); -+ return 0; -+} -+ + return 0; + } +-EXPORT_SYMBOL_GPL(xenbus_dev_suspend); + +-int xenbus_dev_resume(struct device *dev) +static int suspend_cancel_dev(struct device *dev, void *data) +{ + int err = 0; @@ -1177,20 +1398,31 @@ Acked-by: jbeulich@novell.com + xdev = container_of(dev, struct xenbus_device, dev); + if (drv->suspend_cancel) + err = drv->suspend_cancel(xdev); - if (err) - printk(KERN_WARNING -- "xenbus: suspend %s failed: %i\n", dev_name(dev), err); ++ if (err) ++ printk(KERN_WARNING + "xenbus: suspend_cancel %s failed: %i\n", + dev->bus_id, err); - return 0; - } - --static int xenbus_dev_resume(struct device *dev) ++ return 0; ++} ++ +static int resume_dev(struct device *dev, void *data) { int err; struct xenbus_driver *drv; -@@ -717,7 +775,7 @@ static int xenbus_dev_resume(struct devi +- struct xenbus_device *xdev +- = container_of(dev, struct xenbus_device, dev); ++ struct xenbus_device *xdev; + +- DPRINTK("%s", xdev->nodename); ++ DPRINTK(""); + + if (dev->driver == NULL) + return 0; ++ + drv = to_xenbus_driver(dev->driver); ++ xdev = container_of(dev, struct xenbus_device, dev); ++ + err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", @@ -1199,7 +1431,7 @@ Acked-by: jbeulich@novell.com return err; } -@@ -728,7 +786,7 @@ static int xenbus_dev_resume(struct devi +@@ -625,7 +790,7 @@ int xenbus_dev_resume(struct device *dev if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", @@ -1208,7 +1440,7 @@ Acked-by: jbeulich@novell.com return err; } } -@@ -737,22 +795,52 @@ static int xenbus_dev_resume(struct devi +@@ -634,23 +799,52 @@ int xenbus_dev_resume(struct device *dev if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " @@ -1219,7 +1451,8 @@ Acked-by: jbeulich@novell.com return 0; } - +-EXPORT_SYMBOL_GPL(xenbus_dev_resume); ++ +void xenbus_suspend(void) +{ + DPRINTK(""); @@ -1249,7 +1482,7 @@ Acked-by: jbeulich@novell.com + xenbus_backend_resume(suspend_cancel_dev); +} +EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); -+ + /* A flag to determine if xenstored is 'ready' (i.e. has started) */ -int xenstored_ready = 0; +atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); @@ -1264,7 +1497,7 @@ Acked-by: jbeulich@novell.com ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); -@@ -767,9 +855,10 @@ void unregister_xenstore_notifier(struct +@@ -665,50 +859,167 @@ void unregister_xenstore_notifier(struct } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); @@ -1272,17 +1505,26 @@ Acked-by: jbeulich@novell.com + +void xenbus_probe(void *unused) { -- BUG_ON((xenstored_ready <= 0)); +- xenstored_ready = 1; + BUG_ON(!is_xenstored_ready()); ++ ++ /* Enumerate devices in xenstore and watch for changes. */ ++ xenbus_probe_devices(&xenbus_frontend); ++ register_xenbus_watch(&fe_watch); ++ xenbus_backend_probe_and_watch(); - /* Enumerate devices in xenstore and watch for changes. */ - xenbus_probe_devices(&xenbus_frontend); -@@ -780,71 +869,252 @@ void xenbus_probe(struct work_struct *un + /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } +-EXPORT_SYMBOL_GPL(xenbus_probe); --static int __init xenbus_probe_init(void) -+ +-static int __init xenbus_probe_initcall(void) +-{ +- if (!xen_domain()) +- return -ENODEV; + +- if (xen_initial_domain() || xen_hvm_domain()) +- return 0; +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) +static struct file_operations xsd_kva_fops; +static struct proc_dir_entry *xsd_kva_intf; @@ -1320,10 +1562,12 @@ Acked-by: jbeulich@novell.com + if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), + size, vma->vm_page_prot)) + return -EAGAIN; -+ -+ return 0; -+} -+ + +- xenbus_probe(NULL); + return 0; + } + +-device_initcall(xenbus_probe_initcall); +static int xsd_kva_read(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ @@ -1364,7 +1608,8 @@ Acked-by: jbeulich@novell.com + remove_xen_proc_entry("xsd_kva"); + remove_xen_proc_entry("xsd_port"); +#endif -+ + +-static int __init xenbus_init(void) + rc = xb_free_port(xen_store_evtchn); + if (rc != 0) + goto fail0; @@ -1404,24 +1649,17 @@ Acked-by: jbeulich@novell.com +static int xenbus_probe_init(void) { int err = 0; -+ unsigned long page = 0; + unsigned long page = 0; DPRINTK(""); - err = -ENODEV; - if (!xen_domain()) -- goto out_error; +- return err; + if (!is_running_on_xen()) + return -ENODEV; - - /* Register ourselves with the kernel bus subsystem */ -- err = bus_register(&xenbus_frontend.bus); -- if (err) -- goto out_error; -- -- err = xenbus_backend_bus_register(); -- if (err) -- goto out_unreg_front; ++ ++ /* Register ourselves with the kernel bus subsystem */ + xenbus_frontend.error = bus_register(&xenbus_frontend.bus); + if (xenbus_frontend.error) + printk(KERN_WARNING @@ -1433,31 +1671,34 @@ Acked-by: jbeulich@novell.com * Domain0 doesn't have a store_evtchn or store_mfn yet. */ - if (xen_initial_domain()) { -- /* dom0 not yet supported */ + if (is_initial_xendomain()) { -+ struct evtchn_alloc_unbound alloc_unbound; -+ -+ /* Allocate page. */ -+ page = get_zeroed_page(GFP_KERNEL); -+ if (!page) + struct evtchn_alloc_unbound alloc_unbound; + + /* Allocate Xenstore page */ + page = get_zeroed_page(GFP_KERNEL); + if (!page) +- goto out_error; + return -ENOMEM; -+ -+ xen_store_mfn = xen_start_info->store_mfn = -+ pfn_to_mfn(virt_to_phys((void *)page) >> -+ PAGE_SHIFT); -+ -+ /* Next allocate a local port which xenstored can bind to */ -+ alloc_unbound.dom = DOMID_SELF; + + xen_store_mfn = xen_start_info->store_mfn = + pfn_to_mfn(virt_to_phys((void *)page) >> +@@ -716,63 +1027,226 @@ static int __init xenbus_init(void) + + /* Next allocate a local port which xenstored can bind to */ + alloc_unbound.dom = DOMID_SELF; +- alloc_unbound.remote_dom = 0; + alloc_unbound.remote_dom = DOMID_SELF; -+ -+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, -+ &alloc_unbound); -+ if (err == -ENOSYS) + + err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, + &alloc_unbound); + if (err == -ENOSYS) +- goto out_error; + goto err; -+ BUG_ON(err); -+ xen_store_evtchn = xen_start_info->store_evtchn = -+ alloc_unbound.port; -+ + + BUG_ON(err); + xen_store_evtchn = xen_start_info->store_evtchn = + alloc_unbound.port; + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) + /* And finally publish the above info in /proc/xen */ + xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); @@ -1472,13 +1713,29 @@ Acked-by: jbeulich@novell.com + if (xsd_port_intf) + xsd_port_intf->read_proc = xsd_port_read; +#endif -+ xen_store_interface = mfn_to_virt(xen_store_mfn); + xen_store_interface = mfn_to_virt(xen_store_mfn); } else { -- xenstored_ready = 1; +- if (xen_hvm_domain()) { +- uint64_t v = 0; +- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); +- if (err) +- goto out_error; +- xen_store_evtchn = (int)v; +- err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); +- if (err) +- goto out_error; +- xen_store_mfn = (unsigned long)v; +- xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); +- } else { +- xen_store_evtchn = xen_start_info->store_evtchn; +- xen_store_mfn = xen_start_info->store_mfn; +- xen_store_interface = mfn_to_virt(xen_store_mfn); +- xenstored_ready = 1; +- } + atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); +#ifdef CONFIG_XEN - xen_store_evtchn = xen_start_info->store_evtchn; - xen_store_mfn = xen_start_info->store_mfn; ++ xen_store_evtchn = xen_start_info->store_evtchn; ++ xen_store_mfn = xen_start_info->store_mfn; + xen_store_interface = mfn_to_virt(xen_store_mfn); +#else + xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); @@ -1491,21 +1748,18 @@ Acked-by: jbeulich@novell.com + if (err) + goto err; } -- xen_store_interface = mfn_to_virt(xen_store_mfn); -+ -+ xenbus_dev_init(); ++ xenbus_dev_init(); ++ /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); -- goto out_unreg_back; +- goto out_error; + goto err; - } - -- if (!xen_initial_domain()) -- xenbus_probe(NULL); ++ } ++ + /* Register ourselves with the kernel device subsystem */ + if (!xenbus_frontend.error) { + xenbus_frontend.error = device_register(&xenbus_frontend.dev); @@ -1515,185 +1769,560 @@ Acked-by: jbeulich@novell.com + "XENBUS: Error registering frontend device: %i\n", + xenbus_frontend.error); + } -+ } + } + xenbus_backend_device_register(); -#ifdef CONFIG_XEN_COMPAT_XENFS -- /* -- * Create xenfs mountpoint in /proc for compatibility with -- * utilities that expect to find "xenbus" under "/proc/xen". -- */ -- proc_mkdir("xen", NULL); --#endif + if (!is_initial_xendomain()) + xenbus_probe(NULL); - - return 0; - -- out_unreg_back: -- xenbus_backend_bus_unregister(); ++ ++ return 0; ++ + err: -+ if (page) -+ free_page(page); - -- out_unreg_front: -- bus_unregister(&xenbus_frontend.bus); -+ /* + /* +- * Create xenfs mountpoint in /proc for compatibility with +- * utilities that expect to find "xenbus" under "/proc/xen". + * Do not unregister the xenbus front/backend buses here. The buses + * must exist because front/backend drivers will use them when they are + * registered. + */ +- proc_mkdir("xen", NULL); ++ ++ if (page != 0) ++ free_page(page); ++ return err; ++} ++ ++#ifdef CONFIG_XEN ++postcore_initcall(xenbus_probe_init); ++MODULE_LICENSE("Dual BSD/GPL"); ++#else ++int xenbus_init(void) ++{ ++ return xenbus_probe_init(); ++} + #endif + ++static int is_device_connecting(struct device *dev, void *data) ++{ ++ struct xenbus_device *xendev = to_xenbus_device(dev); ++ struct device_driver *drv = data; ++ struct xenbus_driver *xendrv; ++ ++ /* ++ * A device with no driver will never connect. We care only about ++ * devices which should currently be in the process of connecting. + */ ++ if (!dev->driver) ++ return 0; ++ ++ /* Is this search limited to a particular driver? */ ++ if (drv && (dev->driver != drv)) ++ return 0; ++ ++ xendrv = to_xenbus_driver(dev->driver); ++ return (xendev->state < XenbusStateConnected || ++ (xendev->state == XenbusStateConnected && ++ xendrv->is_ready && !xendrv->is_ready(xendev))); ++} ++ ++static int exists_connecting_device(struct device_driver *drv) ++{ ++ if (xenbus_frontend.error) ++ return xenbus_frontend.error; ++ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, ++ is_device_connecting); ++} ++ ++static int print_device_status(struct device *dev, void *data) ++{ ++ struct xenbus_device *xendev = to_xenbus_device(dev); ++ struct device_driver *drv = data; ++ struct xenbus_driver *xendrv; ++ ++ /* Is this operation limited to a particular driver? */ ++ if (drv && (dev->driver != drv)) ++ return 0; ++ ++ if (!dev->driver) { ++ /* Information only: is this too noisy? */ ++ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", ++ xendev->nodename); ++ return 0; ++ } ++ ++ if (xendev->state < XenbusStateConnected) { ++ enum xenbus_state rstate = XenbusStateUnknown; ++ if (xendev->otherend) ++ rstate = xenbus_read_driver_state(xendev->otherend); ++ printk(KERN_WARNING "XENBUS: Timeout connecting " ++ "to device: %s (local state %d, remote state %d)\n", ++ xendev->nodename, xendev->state, rstate); ++ } ++ ++ xendrv = to_xenbus_driver(dev->driver); ++ if (xendrv->is_ready && !xendrv->is_ready(xendev)) ++ printk(KERN_WARNING "XENBUS: Device not ready: %s\n", ++ xendev->nodename); ++ + return 0; ++} + +- out_error: +- if (page != 0) +- free_page(page); ++/* We only wait for device setup after most initcalls have run. */ ++static int ready_to_wait_for_devices; + +- return err; ++/* ++ * On a 5-minute timeout, wait for all devices currently configured. We need ++ * to do this to guarantee that the filesystems and / or network devices ++ * needed for boot are available, before we can allow the boot to proceed. ++ * ++ * This needs to be on a late_initcall, to happen after the frontend device ++ * drivers have been initialised, but before the root fs is mounted. ++ * ++ * A possible improvement here would be to have the tools add a per-device ++ * flag to the store entry, indicating whether it is needed at boot time. ++ * This would allow people who knew what they were doing to accelerate their ++ * boot slightly, but of course needs tools or manual intervention to set up ++ * those flags correctly. ++ */ ++static void wait_for_devices(struct xenbus_driver *xendrv) ++{ ++ unsigned long start = jiffies; ++ struct device_driver *drv = xendrv ? &xendrv->driver : NULL; ++ unsigned int seconds_waited = 0; ++ ++ if (!ready_to_wait_for_devices || !is_running_on_xen()) ++ return; ++ ++ while (exists_connecting_device(drv)) { ++ if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { ++ if (!seconds_waited) ++ printk(KERN_WARNING "XENBUS: Waiting for " ++ "devices to initialise: "); ++ seconds_waited += 5; ++ printk("%us...", 300 - seconds_waited); ++ if (seconds_waited == 300) ++ break; ++ } ++ ++ schedule_timeout_interruptible(HZ/10); ++ } ++ ++ if (seconds_waited) ++ printk("\n"); ++ ++ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, ++ print_device_status); + } + +-postcore_initcall(xenbus_init); ++#ifndef MODULE ++static int __init boot_wait_for_devices(void) ++{ ++ if (!xenbus_frontend.error) { ++ ready_to_wait_for_devices = 1; ++ wait_for_devices(NULL); ++ } ++ return 0; ++} + +-MODULE_LICENSE("GPL"); ++late_initcall(boot_wait_for_devices); ++#endif ++ ++int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) ++{ ++ return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); ++} ++EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 16:11:24.000000000 +0100 +@@ -34,44 +34,47 @@ + #ifndef _XENBUS_PROBE_H + #define _XENBUS_PROBE_H + ++#ifndef BUS_ID_SIZE + #define XEN_BUS_ID_SIZE 20 ++#else ++#define XEN_BUS_ID_SIZE BUS_ID_SIZE ++#endif ++ ++#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) ++extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); ++extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); ++extern void xenbus_backend_probe_and_watch(void); ++extern void xenbus_backend_bus_register(void); ++extern void xenbus_backend_device_register(void); ++#else ++static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} ++static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} ++static inline void xenbus_backend_probe_and_watch(void) {} ++static inline void xenbus_backend_bus_register(void) {} ++static inline void xenbus_backend_device_register(void) {} ++#endif + + struct xen_bus_type + { + char *root; ++ int error; + unsigned int levels; + int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); +- int (*probe)(struct xen_bus_type *bus, const char *type, +- const char *dir); +- void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, +- unsigned int len); ++ int (*probe)(const char *type, const char *dir); + struct bus_type bus; ++ struct device dev; + }; + + extern int xenbus_match(struct device *_dev, struct device_driver *_drv); + extern int xenbus_dev_probe(struct device *_dev); + extern int xenbus_dev_remove(struct device *_dev); + extern int xenbus_register_driver_common(struct xenbus_driver *drv, +- struct xen_bus_type *bus, +- struct module *owner, +- const char *mod_name); ++ struct xen_bus_type *bus); + extern int xenbus_probe_node(struct xen_bus_type *bus, + const char *type, + const char *nodename); + extern int xenbus_probe_devices(struct xen_bus_type *bus); + +-extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); +- +-extern void xenbus_dev_shutdown(struct device *_dev); +- +-extern int xenbus_dev_suspend(struct device *dev, pm_message_t state); +-extern int xenbus_dev_resume(struct device *dev); +- +-extern void xenbus_otherend_changed(struct xenbus_watch *watch, +- const char **vec, unsigned int len, +- int ignore_on_shutdown); +- +-extern int xenbus_read_otherend_details(struct xenbus_device *xendev, +- char *id_node, char *path_node); ++extern void dev_changed(const char *node, struct xen_bus_type *bus); + + #endif +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 14:49:58.000000000 +0100 +@@ -33,7 +33,7 @@ + + #define DPRINTK(fmt, args...) \ + pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ +- __func__, __LINE__, ##args) ++ __FUNCTION__, __LINE__, ##args) + + #include + #include +@@ -43,16 +43,35 @@ + #include + #include + ++#include + #include ++#include + #include +-#include + #include + #include ++#include ++#include + #include + + #include "xenbus_comms.h" + #include "xenbus_probe.h" + ++#ifdef HAVE_XEN_PLATFORM_COMPAT_H ++#include ++#endif ++ ++static int xenbus_uevent_backend(struct device *dev, char **envp, ++ int num_envp, char *buffer, int buffer_size); ++static int xenbus_probe_backend(const char *type, const char *domid); ++ ++extern int read_otherend_details(struct xenbus_device *xendev, ++ char *id_node, char *path_node); ++ ++static int read_frontend_details(struct xenbus_device *xendev) ++{ ++ return read_otherend_details(xendev, "frontend-id", "frontend"); ++} ++ + /* backend/// => -- */ + static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) + { +@@ -90,12 +109,32 @@ static int backend_bus_id(char bus_id[XE + return 0; + } + +-static int xenbus_uevent_backend(struct device *dev, +- struct kobj_uevent_env *env) ++static struct xen_bus_type xenbus_backend = { ++ .root = "backend", ++ .levels = 3, /* backend/type// */ ++ .get_bus_id = backend_bus_id, ++ .probe = xenbus_probe_backend, ++ .error = -ENODEV, ++ .bus = { ++ .name = "xen-backend", ++ .match = xenbus_match, ++ .probe = xenbus_dev_probe, ++ .remove = xenbus_dev_remove, ++// .shutdown = xenbus_dev_shutdown, ++ .uevent = xenbus_uevent_backend, ++ }, ++ .dev = { ++ .bus_id = "xen-backend", ++ }, ++}; ++ ++static int xenbus_uevent_backend(struct device *dev, char **envp, ++ int num_envp, char *buffer, int buffer_size) + { + struct xenbus_device *xdev; + struct xenbus_driver *drv; +- struct xen_bus_type *bus; ++ int i = 0; ++ int length = 0; + + DPRINTK(""); + +@@ -103,32 +142,46 @@ static int xenbus_uevent_backend(struct + return -ENODEV; + + xdev = to_xenbus_device(dev); +- bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); + if (xdev == NULL) + return -ENODEV; + + /* stuff we want to pass to /sbin/hotplug */ +- if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) +- return -ENOMEM; ++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, ++ "XENBUS_TYPE=%s", xdev->devicetype); + +- if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) +- return -ENOMEM; ++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, ++ "XENBUS_PATH=%s", xdev->nodename); + +- if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) +- return -ENOMEM; ++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, ++ "XENBUS_BASE_PATH=%s", xenbus_backend.root); ++ ++ /* terminate, set to next free slot, shrink available space */ ++ envp[i] = NULL; ++ envp = &envp[i]; ++ num_envp -= i; ++ buffer = &buffer[length]; ++ buffer_size -= length; + + if (dev->driver) { + drv = to_xenbus_driver(dev->driver); + if (drv && drv->uevent) +- return drv->uevent(xdev, env); ++ return drv->uevent(xdev, envp, num_envp, buffer, ++ buffer_size); + } -- out_error: - return err; + return 0; } -+#ifdef CONFIG_XEN - postcore_initcall(xenbus_probe_init); -- --MODULE_LICENSE("GPL"); -+MODULE_LICENSE("Dual BSD/GPL"); -+#else -+int xenbus_init(void) ++int xenbus_register_backend(struct xenbus_driver *drv) +{ -+ return xenbus_probe_init(); ++ drv->read_otherend_details = read_frontend_details; ++ ++ return xenbus_register_driver_common(drv, &xenbus_backend); +} -+#endif - - static int is_device_connecting(struct device *dev, void *data) ++EXPORT_SYMBOL_GPL(xenbus_register_backend); ++ + /* backend/// */ +-static int xenbus_probe_backend_unit(struct xen_bus_type *bus, +- const char *dir, ++static int xenbus_probe_backend_unit(const char *dir, + const char *type, + const char *name) { -@@ -871,6 +1141,8 @@ static int is_device_connecting(struct d +@@ -141,14 +194,13 @@ static int xenbus_probe_backend_unit(str - static int exists_connecting_device(struct device_driver *drv) - { -+ if (xenbus_frontend.error) -+ return xenbus_frontend.error; - return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, - is_device_connecting); + DPRINTK("%s\n", nodename); + +- err = xenbus_probe_node(bus, type, nodename); ++ err = xenbus_probe_node(&xenbus_backend, type, nodename); + kfree(nodename); + return err; } -@@ -879,6 +1151,7 @@ static int print_device_status(struct de + + /* backend// */ +-static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, +- const char *domid) ++static int xenbus_probe_backend(const char *type, const char *domid) { - struct xenbus_device *xendev = to_xenbus_device(dev); - struct device_driver *drv = data; -+ struct xenbus_driver *xendrv; + char *nodename; + int err = 0; +@@ -157,7 +209,7 @@ static int xenbus_probe_backend(struct x - /* Is this operation limited to a particular driver? */ - if (drv && (dev->driver != drv)) -@@ -888,7 +1161,10 @@ static int print_device_status(struct de - /* Information only: is this too noisy? */ - printk(KERN_INFO "XENBUS: Device with no driver: %s\n", - xendev->nodename); -- } else if (xendev->state < XenbusStateConnected) { -+ return 0; -+ } -+ -+ if (xendev->state < XenbusStateConnected) { - enum xenbus_state rstate = XenbusStateUnknown; - if (xendev->otherend) - rstate = xenbus_read_driver_state(xendev->otherend); -@@ -897,6 +1173,11 @@ static int print_device_status(struct de - xendev->nodename, xendev->state, rstate); - } + DPRINTK(""); -+ xendrv = to_xenbus_driver(dev->driver); -+ if (xendrv->is_ready && !xendrv->is_ready(xendev)) -+ printk(KERN_WARNING "XENBUS: Device not ready: %s\n", -+ xendev->nodename); -+ - return 0; - } +- nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); ++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); + if (!nodename) + return -ENOMEM; -@@ -923,7 +1204,7 @@ static void wait_for_devices(struct xenb - struct device_driver *drv = xendrv ? &xendrv->driver : NULL; - unsigned int seconds_waited = 0; +@@ -168,7 +220,7 @@ static int xenbus_probe_backend(struct x + } -- if (!ready_to_wait_for_devices || !xen_domain()) -+ if (!ready_to_wait_for_devices || !is_running_on_xen()) - return; + for (i = 0; i < dir_n; i++) { +- err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); ++ err = xenbus_probe_backend_unit(nodename, type, dir[i]); + if (err) + break; + } +@@ -177,39 +229,12 @@ static int xenbus_probe_backend(struct x + return err; + } - while (exists_connecting_device(drv)) { -@@ -950,10 +1231,18 @@ static void wait_for_devices(struct xenb - #ifndef MODULE - static int __init boot_wait_for_devices(void) +-static void frontend_changed(struct xenbus_watch *watch, +- const char **vec, unsigned int len) +-{ +- xenbus_otherend_changed(watch, vec, len, 0); +-} +- +-static struct device_attribute xenbus_backend_dev_attrs[] = { +- __ATTR_NULL +-}; +- +-static struct xen_bus_type xenbus_backend = { +- .root = "backend", +- .levels = 3, /* backend/type// */ +- .get_bus_id = backend_bus_id, +- .probe = xenbus_probe_backend, +- .otherend_changed = frontend_changed, +- .bus = { +- .name = "xen-backend", +- .match = xenbus_match, +- .uevent = xenbus_uevent_backend, +- .probe = xenbus_dev_probe, +- .remove = xenbus_dev_remove, +- .shutdown = xenbus_dev_shutdown, +- .dev_attrs = xenbus_backend_dev_attrs, +- }, +-}; +- + static void backend_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) { -- ready_to_wait_for_devices = 1; -- wait_for_devices(NULL); -+ if (!xenbus_frontend.error) { -+ ready_to_wait_for_devices = 1; -+ wait_for_devices(NULL); -+ } - return 0; - } + DPRINTK(""); - late_initcall(boot_wait_for_devices); - #endif -+ -+int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) -+{ -+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); -+} -+EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_probe.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_probe.h 2010-01-19 16:01:04.000000000 +0100 -@@ -34,43 +34,47 @@ - #ifndef _XENBUS_PROBE_H - #define _XENBUS_PROBE_H +- xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); ++ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); + } -+#ifndef BUS_ID_SIZE - #define XEN_BUS_ID_SIZE 20 -+#else -+#define XEN_BUS_ID_SIZE BUS_ID_SIZE -+#endif + static struct xenbus_watch be_watch = { +@@ -217,60 +242,51 @@ static struct xenbus_watch be_watch = { + .callback = backend_changed, + }; --#ifdef CONFIG_XEN_BACKEND -+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) - extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); - extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); - extern void xenbus_backend_probe_and_watch(void); --extern int xenbus_backend_bus_register(void); --extern void xenbus_backend_bus_unregister(void); -+extern void xenbus_backend_bus_register(void); -+extern void xenbus_backend_device_register(void); - #else - static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} - static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} - static inline void xenbus_backend_probe_and_watch(void) {} --static inline int xenbus_backend_bus_register(void) { return 0; } --static inline void xenbus_backend_bus_unregister(void) {} -+static inline void xenbus_backend_bus_register(void) {} -+static inline void xenbus_backend_device_register(void) {} - #endif +-static int read_frontend_details(struct xenbus_device *xendev) +-{ +- return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); +-} +- +-int xenbus_dev_is_online(struct xenbus_device *dev) ++void xenbus_backend_suspend(int (*fn)(struct device *, void *)) + { +- int rc, val; +- +- rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); +- if (rc != 1) +- val = 0; /* no online node present */ +- +- return val; ++ DPRINTK(""); ++ if (!xenbus_backend.error) ++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); + } +-EXPORT_SYMBOL_GPL(xenbus_dev_is_online); - struct xen_bus_type +-int __xenbus_register_backend(struct xenbus_driver *drv, +- struct module *owner, const char *mod_name) ++void xenbus_backend_resume(int (*fn)(struct device *, void *)) { - char *root; -+ int error; - unsigned int levels; - int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); - int (*probe)(const char *type, const char *dir); - struct bus_type bus; -+ struct device dev; - }; +- drv->read_otherend_details = read_frontend_details; +- +- return xenbus_register_driver_common(drv, &xenbus_backend, +- owner, mod_name); ++ DPRINTK(""); ++ if (!xenbus_backend.error) ++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); + } +-EXPORT_SYMBOL_GPL(__xenbus_register_backend); - extern int xenbus_match(struct device *_dev, struct device_driver *_drv); - extern int xenbus_dev_probe(struct device *_dev); - extern int xenbus_dev_remove(struct device *_dev); - extern int xenbus_register_driver_common(struct xenbus_driver *drv, -- struct xen_bus_type *bus, -- struct module *owner, -- const char *mod_name); -+ struct xen_bus_type *bus); - extern int xenbus_probe_node(struct xen_bus_type *bus, - const char *type, - const char *nodename); - extern int xenbus_probe_devices(struct xen_bus_type *bus); +-static int backend_probe_and_watch(struct notifier_block *notifier, +- unsigned long event, +- void *data) ++void xenbus_backend_probe_and_watch(void) + { +- /* Enumerate devices in xenstore and watch for changes. */ + xenbus_probe_devices(&xenbus_backend); + register_xenbus_watch(&be_watch); +- +- return NOTIFY_DONE; + } --extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); -+extern void dev_changed(const char *node, struct xen_bus_type *bus); +-static int __init xenbus_probe_backend_init(void) ++void xenbus_backend_bus_register(void) + { +- static struct notifier_block xenstore_notifier = { +- .notifier_call = backend_probe_and_watch +- }; +- int err; +- +- DPRINTK(""); +- +- /* Register ourselves with the kernel bus subsystem */ +- err = bus_register(&xenbus_backend.bus); +- if (err) +- return err; +- +- register_xenstore_notifier(&xenstore_notifier); ++ xenbus_backend.error = bus_register(&xenbus_backend.bus); ++ if (xenbus_backend.error) ++ printk(KERN_WARNING ++ "XENBUS: Error registering backend bus: %i\n", ++ xenbus_backend.error); ++} ++ ++void xenbus_backend_device_register(void) ++{ ++ if (xenbus_backend.error) ++ return; ++ ++ xenbus_backend.error = device_register(&xenbus_backend.dev); ++ if (xenbus_backend.error) { ++ bus_unregister(&xenbus_backend.bus); ++ printk(KERN_WARNING ++ "XENBUS: Error registering backend device: %i\n", ++ xenbus_backend.error); ++ } ++} - #endif ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_xs.c 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_xs.c 2010-01-19 16:01:04.000000000 +0100 +- return 0; ++int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) ++{ ++ return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); + } +-subsys_initcall(xenbus_probe_backend_init); ++EXPORT_SYMBOL_GPL(xenbus_for_each_backend); +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_xs.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 15:14:12.000000000 +0100 @@ -47,6 +47,14 @@ #include #include "xenbus_comms.h" @@ -1709,33 +2338,7 @@ Acked-by: jbeulich@novell.com struct xs_stored_msg { struct list_head list; -@@ -76,6 +84,14 @@ struct xs_handle { - /* - * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. - * response_mutex is never taken simultaneously with the other three. -+ * -+ * transaction_mutex must be held before incrementing -+ * transaction_count. The mutex is held when a suspend is in -+ * progress to prevent new transactions starting. -+ * -+ * When decrementing transaction_count to zero the wait queue -+ * should be woken up, the suspend code waits for count to -+ * reach zero. - */ - - /* One request at a time. */ -@@ -85,7 +101,9 @@ struct xs_handle { - struct mutex response_mutex; - - /* Protect transactions against save/restore. */ -- struct rw_semaphore transaction_mutex; -+ struct mutex transaction_mutex; -+ atomic_t transaction_count; -+ wait_queue_head_t transaction_wq; - - /* Protect watch (de)register against save/restore. */ - struct rw_semaphore watch_mutex; -@@ -108,7 +126,7 @@ static DEFINE_SPINLOCK(watch_events_lock +@@ -118,7 +126,7 @@ static DEFINE_SPINLOCK(watch_events_lock * carrying out work. */ static pid_t xenwatch_pid; @@ -1744,48 +2347,7 @@ Acked-by: jbeulich@novell.com static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) -@@ -157,6 +175,31 @@ static void *read_reply(enum xsd_sockmsg - return body; - } - -+static void transaction_start(void) -+{ -+ mutex_lock(&xs_state.transaction_mutex); -+ atomic_inc(&xs_state.transaction_count); -+ mutex_unlock(&xs_state.transaction_mutex); -+} -+ -+static void transaction_end(void) -+{ -+ if (atomic_dec_and_test(&xs_state.transaction_count)) -+ wake_up(&xs_state.transaction_wq); -+} -+ -+static void transaction_suspend(void) -+{ -+ mutex_lock(&xs_state.transaction_mutex); -+ wait_event(xs_state.transaction_wq, -+ atomic_read(&xs_state.transaction_count) == 0); -+} -+ -+static void transaction_resume(void) -+{ -+ mutex_unlock(&xs_state.transaction_mutex); -+} -+ - void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) - { - void *ret; -@@ -164,7 +207,7 @@ void *xenbus_dev_request_and_reply(struc - int err; - - if (req_msg.type == XS_TRANSACTION_START) -- down_read(&xs_state.transaction_mutex); -+ transaction_start(); - - mutex_lock(&xs_state.request_mutex); - -@@ -177,14 +220,13 @@ void *xenbus_dev_request_and_reply(struc +@@ -212,14 +220,13 @@ void *xenbus_dev_request_and_reply(struc mutex_unlock(&xs_state.request_mutex); @@ -1793,8 +2355,7 @@ Acked-by: jbeulich@novell.com + if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) -- up_read(&xs_state.transaction_mutex); -+ transaction_end(); + transaction_end(); return ret; } @@ -1802,16 +2363,7 @@ Acked-by: jbeulich@novell.com /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, -@@ -214,7 +256,7 @@ static void *xs_talkv(struct xenbus_tran - } - - for (i = 0; i < num_vecs; i++) { -- err = xb_write(iovec[i].iov_base, iovec[i].iov_len); -+ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; - if (err) { - mutex_unlock(&xs_state.request_mutex); - return ERR_PTR(err); -@@ -295,7 +337,7 @@ static char **split(char *strings, unsig +@@ -330,7 +337,7 @@ static char **split(char *strings, unsig char *p, **ret; /* Count the strings. */ @@ -1820,7 +2372,7 @@ Acked-by: jbeulich@novell.com /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); -@@ -309,6 +351,7 @@ static char **split(char *strings, unsig +@@ -344,6 +351,7 @@ static char **split(char *strings, unsig strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; @@ -1828,30 +2380,7 @@ Acked-by: jbeulich@novell.com return ret; } -@@ -432,11 +475,11 @@ int xenbus_transaction_start(struct xenb - { - char *id_str; - -- down_read(&xs_state.transaction_mutex); -+ transaction_start(); - - id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); - if (IS_ERR(id_str)) { -- up_read(&xs_state.transaction_mutex); -+ transaction_end(); - return PTR_ERR(id_str); - } - -@@ -461,7 +504,7 @@ int xenbus_transaction_end(struct xenbus - - err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); - -- up_read(&xs_state.transaction_mutex); -+ transaction_end(); - - return err; - } -@@ -622,6 +665,8 @@ void unregister_xenbus_watch(struct xenb +@@ -657,6 +665,8 @@ void unregister_xenbus_watch(struct xenb char token[sizeof(watch) * 2 + 1]; int err; @@ -1860,7 +2389,7 @@ Acked-by: jbeulich@novell.com sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); -@@ -639,11 +684,6 @@ void unregister_xenbus_watch(struct xenb +@@ -674,11 +684,6 @@ void unregister_xenbus_watch(struct xenb up_read(&xs_state.watch_mutex); @@ -1872,7 +2401,7 @@ Acked-by: jbeulich@novell.com /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { -@@ -655,14 +695,17 @@ void unregister_xenbus_watch(struct xenb +@@ -690,8 +695,11 @@ void unregister_xenbus_watch(struct xenb } spin_unlock(&watch_events_lock); @@ -1885,14 +2414,7 @@ Acked-by: jbeulich@novell.com } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); - void xs_suspend(void) - { -- down_write(&xs_state.transaction_mutex); -+ transaction_suspend(); - down_write(&xs_state.watch_mutex); - mutex_lock(&xs_state.request_mutex); - mutex_lock(&xs_state.response_mutex); -@@ -673,11 +716,9 @@ void xs_resume(void) +@@ -708,8 +716,6 @@ void xs_resume(void) struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; @@ -1900,19 +2422,11 @@ Acked-by: jbeulich@novell.com - mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); -- up_write(&xs_state.transaction_mutex); -+ transaction_resume(); + transaction_resume(); +@@ -731,11 +737,30 @@ void xs_suspend_cancel(void) + mutex_unlock(&xs_state.transaction_mutex); + } - /* No need for watches_lock: the watch_mutex is sufficient. */ - list_for_each_entry(watch, &watches, list) { -@@ -693,7 +734,25 @@ void xs_suspend_cancel(void) - mutex_unlock(&xs_state.response_mutex); - mutex_unlock(&xs_state.request_mutex); - up_write(&xs_state.watch_mutex); -- up_write(&xs_state.transaction_mutex); -+ mutex_unlock(&xs_state.transaction_mutex); -+} -+ +static int xenwatch_handle_callback(void *data) +{ + struct xs_stored_msg *msg = data; @@ -1929,10 +2443,10 @@ Acked-by: jbeulich@novell.com + do_exit(0); + + return 0; - } - ++} ++ static int xenwatch_thread(void *unused) -@@ -701,6 +760,7 @@ static int xenwatch_thread(void *unused) + { struct list_head *ent; struct xs_stored_msg *msg; @@ -1940,7 +2454,7 @@ Acked-by: jbeulich@novell.com for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); -@@ -716,17 +776,29 @@ static int xenwatch_thread(void *unused) +@@ -751,17 +776,29 @@ static int xenwatch_thread(void *unused) list_del(ent); spin_unlock(&watch_events_lock); @@ -1979,7 +2493,7 @@ Acked-by: jbeulich@novell.com } return 0; -@@ -820,6 +892,7 @@ static int xenbus_thread(void *unused) +@@ -855,6 +892,7 @@ static int xenbus_thread(void *unused) { int err; @@ -1987,7 +2501,7 @@ Acked-by: jbeulich@novell.com for (;;) { err = process_msg(); if (err) -@@ -834,7 +907,6 @@ static int xenbus_thread(void *unused) +@@ -869,7 +907,6 @@ static int xenbus_thread(void *unused) int xs_init(void) { @@ -1995,25 +2509,20 @@ Acked-by: jbeulich@novell.com struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); -@@ -843,13 +915,10 @@ int xs_init(void) +@@ -883,11 +920,6 @@ int xs_init(void) + atomic_set(&xs_state.transaction_count, 0); + init_waitqueue_head(&xs_state.transaction_wq); - mutex_init(&xs_state.request_mutex); - mutex_init(&xs_state.response_mutex); -- init_rwsem(&xs_state.transaction_mutex); -+ mutex_init(&xs_state.transaction_mutex); - init_rwsem(&xs_state.watch_mutex); -- - /* Initialize the shared memory rings to talk to xenstored */ - err = xb_init_comms(); - if (err) - return err; -+ atomic_set(&xs_state.transaction_count, 0); -+ init_waitqueue_head(&xs_state.transaction_wq); - +- task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) ---- head-2010-05-12.orig/include/xen/evtchn.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/evtchn.h 2010-01-19 16:01:04.000000000 +0100 + return PTR_ERR(task); +--- head-2011-03-17.orig/include/xen/evtchn.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/evtchn.h 2011-01-31 15:14:12.000000000 +0100 @@ -1,7 +1,11 @@ +#if defined(CONFIG_PARAVIRT_XEN) || !defined(__KERNEL__) +#include "public/evtchn.h" @@ -2189,8 +2698,39 @@ Acked-by: jbeulich@novell.com -#endif /* __LINUX_PUBLIC_EVTCHN_H__ */ +#endif /* __ASM_EVTCHN_H__ */ +#endif /* CONFIG_PARAVIRT_XEN */ ---- head-2010-05-12.orig/include/xen/interface/callback.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/callback.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/hvm.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/hvm.h 2011-01-31 15:14:12.000000000 +0100 +@@ -3,9 +3,8 @@ + #define XEN_HVM_H__ + + #include +-#include + +-static inline int hvm_get_parameter(int idx, uint64_t *value) ++static inline unsigned long hvm_get_parameter(int idx) + { + struct xen_hvm_param xhv; + int r; +@@ -16,15 +15,9 @@ static inline int hvm_get_parameter(int + if (r < 0) { + printk(KERN_ERR "Cannot get hvm parameter %d: %d!\n", + idx, r); +- return r; ++ return 0; + } +- *value = xhv.value; +- return r; ++ return xhv.value; + } + +-#define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 +-#define HVM_CALLBACK_VIA_TYPE_SHIFT 56 +-#define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ +- HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) +- + #endif /* XEN_HVM_H__ */ +--- head-2011-03-17.orig/include/xen/interface/callback.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/callback.h 2011-01-31 15:14:12.000000000 +0100 @@ -86,6 +86,8 @@ struct callback_register { uint16_t flags; xen_callback_t address; @@ -2213,8 +2753,8 @@ Acked-by: jbeulich@novell.com +#endif #endif /* __XEN_PUBLIC_CALLBACK_H__ */ ---- head-2010-05-12.orig/include/xen/interface/elfnote.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/elfnote.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/elfnote.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/elfnote.h 2011-03-17 13:50:24.000000000 +0100 @@ -3,6 +3,24 @@ * * Definitions used for the Xen ELF notes. @@ -2272,7 +2812,7 @@ Acked-by: jbeulich@novell.com * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be -@@ -140,6 +161,76 @@ +@@ -140,6 +161,82 @@ */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 @@ -2288,9 +2828,15 @@ Acked-by: jbeulich@novell.com +#define XEN_ELFNOTE_INIT_P2M 15 + +/* ++ * Whether or not the guest can deal with being passed an initrd not ++ * mapped through its initial page tables. ++ */ ++#define XEN_ELFNOTE_MOD_START_PFN 16 ++ ++/* + * The number of the highest elfnote defined. + */ -+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_INIT_P2M ++#define XEN_ELFNOTE_MAX XEN_ELFNOTE_MOD_START_PFN + +/* + * System information exported through crash notes. @@ -2349,8 +2895,8 @@ Acked-by: jbeulich@novell.com #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* ---- head-2010-05-12.orig/include/xen/interface/event_channel.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/event_channel.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/event_channel.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/event_channel.h 2011-01-31 15:14:12.000000000 +0100 @@ -3,6 +3,24 @@ * * Event channels between domains. @@ -2680,8 +3226,8 @@ Acked-by: jbeulich@novell.com +DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ ---- head-2010-05-12.orig/include/xen/interface/features.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/features.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/features.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/features.h 2011-01-31 15:14:12.000000000 +0100 @@ -3,6 +3,24 @@ * * Feature flags, reported by XENVER_get_features. @@ -2720,20 +3266,12 @@ Acked-by: jbeulich@novell.com + */ +#define XENFEAT_gnttab_map_avail_bits 7 + - #define XENFEAT_NR_SUBMAPS 1 - - #endif /* __XEN_PUBLIC_FEATURES_H__ */ ---- head-2010-05-12.orig/include/xen/interface/grant_table.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/grant_table.h 2010-01-19 16:01:04.000000000 +0100 -@@ -28,6 +28,7 @@ - #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ - #define __XEN_PUBLIC_GRANT_TABLE_H__ - -+#include "xen.h" + /* x86: Does this Xen host support the HVM callback vector type? */ + #define XENFEAT_hvm_callback_vector 8 - /*********************************** - * GRANT TABLE REPRESENTATION -@@ -84,12 +85,26 @@ +--- head-2011-03-17.orig/include/xen/interface/grant_table.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/grant_table.h 2011-01-31 15:14:12.000000000 +0100 +@@ -85,12 +85,26 @@ */ /* @@ -2761,7 +3299,7 @@ Acked-by: jbeulich@novell.com /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ -@@ -100,6 +115,7 @@ struct grant_entry { +@@ -101,6 +115,7 @@ struct grant_entry { */ uint32_t frame; }; @@ -2769,7 +3307,7 @@ Acked-by: jbeulich@novell.com /* * Type of grant entry. -@@ -107,10 +123,13 @@ struct grant_entry { +@@ -108,10 +123,13 @@ struct grant_entry { * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. @@ -2783,7 +3321,7 @@ Acked-by: jbeulich@novell.com #define GTF_type_mask (3U<<0) /* -@@ -118,6 +137,10 @@ struct grant_entry { +@@ -119,6 +137,10 @@ struct grant_entry { * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] @@ -2794,7 +3332,7 @@ Acked-by: jbeulich@novell.com */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) -@@ -125,6 +148,14 @@ struct grant_entry { +@@ -126,6 +148,14 @@ struct grant_entry { #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) @@ -2809,7 +3347,7 @@ Acked-by: jbeulich@novell.com /* * Subflags for GTF_accept_transfer: -@@ -141,15 +172,87 @@ struct grant_entry { +@@ -142,15 +172,87 @@ struct grant_entry { #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) @@ -2902,7 +3440,7 @@ Acked-by: jbeulich@novell.com /* * Handle to track a mapping created via a grant reference. -@@ -185,7 +288,8 @@ struct gnttab_map_grant_ref { +@@ -186,7 +288,8 @@ struct gnttab_map_grant_ref { grant_handle_t handle; uint64_t dev_bus_addr; }; @@ -2912,7 +3450,7 @@ Acked-by: jbeulich@novell.com /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings -@@ -207,7 +311,8 @@ struct gnttab_unmap_grant_ref { +@@ -208,7 +311,8 @@ struct gnttab_unmap_grant_ref { /* OUT parameters. */ int16_t status; /* GNTST_* */ }; @@ -2922,7 +3460,7 @@ Acked-by: jbeulich@novell.com /* * GNTTABOP_setup_table: Set up a grant table for comprising at least -@@ -225,9 +330,10 @@ struct gnttab_setup_table { +@@ -226,9 +330,10 @@ struct gnttab_setup_table { uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ @@ -2935,7 +3473,7 @@ Acked-by: jbeulich@novell.com /* * GNTTABOP_dump_table: Dump the contents of the grant table to the -@@ -240,7 +346,8 @@ struct gnttab_dump_table { +@@ -241,7 +346,8 @@ struct gnttab_dump_table { /* OUT parameters. */ int16_t status; /* GNTST_* */ }; @@ -2945,7 +3483,7 @@ Acked-by: jbeulich@novell.com /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The -@@ -253,13 +360,15 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_t +@@ -254,13 +360,15 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_t #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ @@ -2963,7 +3501,7 @@ Acked-by: jbeulich@novell.com /* * GNTTABOP_copy: Hypervisor based copy -@@ -283,24 +392,26 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_transf +@@ -284,24 +392,26 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_transf #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) @@ -3006,7 +3544,7 @@ Acked-by: jbeulich@novell.com /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared -@@ -318,10 +429,92 @@ struct gnttab_query_size { +@@ -319,10 +429,92 @@ struct gnttab_query_size { uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; @@ -3101,7 +3639,7 @@ Acked-by: jbeulich@novell.com */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) -@@ -348,6 +541,16 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_ +@@ -349,6 +541,16 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) @@ -3118,7 +3656,7 @@ Acked-by: jbeulich@novell.com /* * Values for error status returns. All errors are -ve. */ -@@ -361,7 +564,9 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_ +@@ -362,7 +564,9 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ @@ -3127,21 +3665,312 @@ Acked-by: jbeulich@novell.com +#define GNTST_address_too_big (-11) /* transfer page address too large. */ +#define GNTST_eagain (-12) /* Could not map at the moment. Retry. */ - #define GNTTABOP_error_msgs { \ - "okay", \ -@@ -374,7 +579,9 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_ - "no spare translation slot in the I/O MMU", \ - "permission denied", \ - "bad page", \ -- "copy arguments cross page boundary" \ -+ "copy arguments cross page boundary", \ -+ "page address size too large", \ -+ "could not map at the moment, retry" \ - } + #define GNTTABOP_error_msgs { \ + "okay", \ +@@ -375,7 +579,9 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_ + "no spare translation slot in the I/O MMU", \ + "permission denied", \ + "bad page", \ +- "copy arguments cross page boundary" \ ++ "copy arguments cross page boundary", \ ++ "page address size too large", \ ++ "could not map at the moment, retry" \ + } + + #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ +--- head-2011-03-17.orig/include/xen/interface/hvm/hvm_op.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/hvm/hvm_op.h 2011-03-17 13:50:24.000000000 +0100 +@@ -21,6 +21,9 @@ + #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ + #define __XEN_PUBLIC_HVM_HVM_OP_H__ + ++#include "../xen.h" ++#include "../trace.h" ++ + /* Get/set subcommands: the second argument of the hypercall is a + * pointer to a xen_hvm_param struct. */ + #define HVMOP_set_param 0 +@@ -30,17 +33,197 @@ struct xen_hvm_param { + uint32_t index; /* IN */ + uint64_t value; /* IN/OUT */ + }; +-DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_param); ++typedef struct xen_hvm_param xen_hvm_param_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); ++ ++/* Set the logical level of one of a domain's PCI INTx wires. */ ++#define HVMOP_set_pci_intx_level 2 ++struct xen_hvm_set_pci_intx_level { ++ /* Domain to be updated. */ ++ domid_t domid; ++ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ ++ uint8_t domain, bus, device, intx; ++ /* Assertion level (0 = unasserted, 1 = asserted). */ ++ uint8_t level; ++}; ++typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); ++ ++/* Set the logical level of one of a domain's ISA IRQ wires. */ ++#define HVMOP_set_isa_irq_level 3 ++struct xen_hvm_set_isa_irq_level { ++ /* Domain to be updated. */ ++ domid_t domid; ++ /* ISA device identification, by ISA IRQ (0-15). */ ++ uint8_t isa_irq; ++ /* Assertion level (0 = unasserted, 1 = asserted). */ ++ uint8_t level; ++}; ++typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); ++ ++#define HVMOP_set_pci_link_route 4 ++struct xen_hvm_set_pci_link_route { ++ /* Domain to be updated. */ ++ domid_t domid; ++ /* PCI link identifier (0-3). */ ++ uint8_t link; ++ /* ISA IRQ (1-15), or 0 (disable link). */ ++ uint8_t isa_irq; ++}; ++typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); ++ ++/* Flushes all VCPU TLBs: @arg must be NULL. */ ++#define HVMOP_flush_tlbs 5 ++ ++/* Following tools-only interfaces may change in future. */ ++#if defined(__XEN__) || defined(__XEN_TOOLS__) ++ ++/* Track dirty VRAM. */ ++#define HVMOP_track_dirty_vram 6 ++struct xen_hvm_track_dirty_vram { ++ /* Domain to be tracked. */ ++ domid_t domid; ++ /* First pfn to track. */ ++ uint64_aligned_t first_pfn; ++ /* Number of pages to track. */ ++ uint64_aligned_t nr; ++ /* OUT variable. */ ++ /* Dirty bitmap buffer. */ ++ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; ++}; ++typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); ++ ++/* Notify that some pages got modified by the Device Model. */ ++#define HVMOP_modified_memory 7 ++struct xen_hvm_modified_memory { ++ /* Domain to be updated. */ ++ domid_t domid; ++ /* First pfn. */ ++ uint64_aligned_t first_pfn; ++ /* Number of pages. */ ++ uint64_aligned_t nr; ++}; ++typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); ++ ++#define HVMOP_set_mem_type 8 ++typedef enum { ++ HVMMEM_ram_rw, /* Normal read/write guest RAM */ ++ HVMMEM_ram_ro, /* Read-only; writes are discarded */ ++ HVMMEM_mmio_dm, /* Reads and write go to the device model */ ++} hvmmem_type_t; ++/* Notify that a region of memory is to be treated in a specific way. */ ++struct xen_hvm_set_mem_type { ++ /* Domain to be updated. */ ++ domid_t domid; ++ /* Memory type */ ++ uint16_t hvmmem_type; ++ /* Number of pages. */ ++ uint32_t nr; ++ /* First pfn. */ ++ uint64_aligned_t first_pfn; ++}; ++typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); ++ ++#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + + /* Hint from PV drivers for pagetable destruction. */ + #define HVMOP_pagetable_dying 9 + struct xen_hvm_pagetable_dying { + /* Domain with a pagetable about to be destroyed. */ + domid_t domid; ++ uint16_t pad[3]; /* align next field on 8-byte boundary */ + /* guest physical address of the toplevel pagetable dying */ +- aligned_u64 gpa; ++ uint64_t gpa; + }; + typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t; +-DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_pagetable_dying_t); +- ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t); ++ ++/* Get the current Xen time, in nanoseconds since system boot. */ ++#define HVMOP_get_time 10 ++struct xen_hvm_get_time { ++ uint64_t now; /* OUT */ ++}; ++typedef struct xen_hvm_get_time xen_hvm_get_time_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t); ++ ++#define HVMOP_xentrace 11 ++struct xen_hvm_xentrace { ++ uint16_t event, extra_bytes; ++ uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)]; ++}; ++typedef struct xen_hvm_xentrace xen_hvm_xentrace_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); ++ ++/* Following tools-only interfaces may change in future. */ ++#if defined(__XEN__) || defined(__XEN_TOOLS__) ++ ++#define HVMOP_set_mem_access 12 ++typedef enum { ++ HVMMEM_access_n, ++ HVMMEM_access_r, ++ HVMMEM_access_w, ++ HVMMEM_access_rw, ++ HVMMEM_access_x, ++ HVMMEM_access_rx, ++ HVMMEM_access_wx, ++ HVMMEM_access_rwx, ++ HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically ++ * change to r-w on a write */ ++ HVMMEM_access_default /* Take the domain default */ ++} hvmmem_access_t; ++/* Notify that a region of memory is to have specific access types */ ++struct xen_hvm_set_mem_access { ++ /* Domain to be updated. */ ++ domid_t domid; ++ /* Memory type */ ++ uint16_t hvmmem_access; /* hvm_access_t */ ++ /* Number of pages, ignored on setting default access */ ++ uint32_t nr; ++ /* First pfn, or ~0ull to set the default access for new pages */ ++ uint64_aligned_t first_pfn; ++}; ++typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t); ++ ++#define HVMOP_get_mem_access 13 ++/* Get the specific access type for that region of memory */ ++struct xen_hvm_get_mem_access { ++ /* Domain to be queried. */ ++ domid_t domid; ++ /* Memory type: OUT */ ++ uint16_t hvmmem_access; /* hvm_access_t */ ++ /* pfn, or ~0ull for default access for new pages. IN */ ++ uint64_aligned_t pfn; ++}; ++typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t); ++ ++#define HVMOP_inject_trap 14 ++/* Inject a trap into a VCPU, which will get taken up on the next ++ * scheduling of it. Note that the caller should know enough of the ++ * state of the CPU before injecting, to know what the effect of ++ * injecting the trap will be. ++ */ ++struct xen_hvm_inject_trap { ++ /* Domain to be queried. */ ++ domid_t domid; ++ /* VCPU */ ++ uint32_t vcpuid; ++ /* Trap number */ ++ uint32_t trap; ++ /* Error code, or -1 to skip */ ++ uint32_t error_code; ++ /* CR2 for page faults */ ++ uint64_aligned_t cr2; ++}; ++typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t; ++DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t); ++ ++#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ ++ + #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ +--- head-2011-03-17.orig/include/xen/interface/hvm/params.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/hvm/params.h 2011-03-17 13:50:24.000000000 +0100 +@@ -33,11 +33,17 @@ + * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: + * Domain = val[47:32], Bus = val[31:16], + * DevFn = val[15: 8], IntX = val[ 1: 0] +- * val[63:56] == 2: val[7:0] is a vector number. ++ * val[63:56] == 2: val[7:0] is a vector number, check for ++ * XENFEAT_hvm_callback_vector to know if this delivery ++ * method is available. + * If val == 0 then CPU0 event-channel notifications are not delivered. + */ + #define HVM_PARAM_CALLBACK_IRQ 0 + ++/* ++ * These are not used by Xen. They are here for convenience of HVM-guest ++ * xenbus implementations. ++ */ + #define HVM_PARAM_STORE_PFN 1 + #define HVM_PARAM_STORE_EVTCHN 2 + +@@ -47,6 +53,19 @@ - #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ ---- head-2010-05-12.orig/include/xen/interface/io/blkif.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/blkif.h 2010-01-19 16:01:04.000000000 +0100 + #define HVM_PARAM_BUFIOREQ_PFN 6 + ++#ifdef __ia64__ ++ ++#define HVM_PARAM_NVRAM_FD 7 ++#define HVM_PARAM_VHPT_SIZE 8 ++#define HVM_PARAM_BUFPIOREQ_PFN 9 ++ ++#elif defined(__i386__) || defined(__x86_64__) ++ ++/* Expose Viridian interfaces to this HVM guest? */ ++#define HVM_PARAM_VIRIDIAN 9 ++ ++#endif ++ + /* + * Set mode for virtual timers (currently x86 only): + * delay_for_missed_ticks (default): +@@ -90,6 +109,34 @@ + /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ + #define HVM_PARAM_VPT_ALIGN 16 + +-#define HVM_NR_PARAMS 17 ++/* Console debug shared memory ring and event channel */ ++#define HVM_PARAM_CONSOLE_PFN 17 ++#define HVM_PARAM_CONSOLE_EVTCHN 18 ++ ++/* ++ * Select location of ACPI PM1a and TMR control blocks. Currently two locations ++ * are supported, specified by version 0 or 1 in this parameter: ++ * - 0: default, use the old addresses ++ * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48 ++ * - 1: use the new default qemu addresses ++ * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008 ++ * You can find these address definitions in ++ */ ++#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19 ++ ++/* Enable blocking memory events, async or sync (pause vcpu until response) ++ * onchangeonly indicates messages only on a change of value */ ++#define HVM_PARAM_MEMORY_EVENT_CR0 20 ++#define HVM_PARAM_MEMORY_EVENT_CR3 21 ++#define HVM_PARAM_MEMORY_EVENT_CR4 22 ++#define HVM_PARAM_MEMORY_EVENT_INT3 23 ++ ++#define HVMPME_MODE_MASK (3 << 0) ++#define HVMPME_mode_disabled 0 ++#define HVMPME_mode_async 1 ++#define HVMPME_mode_sync 2 ++#define HVMPME_onchangeonly (1 << 2) ++ ++#define HVM_NR_PARAMS 24 + + #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ +--- head-2011-03-17.orig/include/xen/interface/io/blkif.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/blkif.h 2011-03-17 13:50:24.000000000 +0100 @@ -3,6 +3,24 @@ * * Unified block-device I/O interface for Xen guest OSes. @@ -3189,7 +4018,7 @@ Acked-by: jbeulich@novell.com * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether -@@ -43,33 +63,57 @@ typedef uint64_t blkif_sector_t; +@@ -43,33 +63,96 @@ typedef uint64_t blkif_sector_t; * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 @@ -3206,6 +4035,31 @@ Acked-by: jbeulich@novell.com + * "feature-flush-cache" node! + */ +#define BLKIF_OP_FLUSH_DISKCACHE 3 ++/* ++ * Used in SLES sources for device specific command packet ++ * contained within the request. Reserved for that purpose. ++ */ ++#define BLKIF_OP_RESERVED_1 4 ++/* ++ * Recognised only if "feature-trim" is present in backend xenbus info. ++ * The "feature-trim" node contains a boolean indicating whether trim ++ * requests are likely to succeed or fail. Either way, a trim request ++ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by ++ * the underlying block-device hardware. The boolean simply indicates whether ++ * or not it is worthwhile for the frontend to attempt trim requests. ++ * If a backend does not recognise BLKIF_OP_TRIM, it should *not* ++ * create the "feature-trim" node! ++ * ++ * Trim operation is a request for the underlying block device to mark ++ * extents to be erased. Trim operations are passed with sector_number as the ++ * sector index to begin trim operations at and nr_sectors as the number of ++ * sectors to be trimmed. The specified sectors should be trimmed if the ++ * underlying block device supports trim operations, or a BLKIF_RSP_EOPNOTSUPP ++ * should be returned. More information about trim operations at: ++ * http://t13.org/Documents/UploadedDocuments/docs2008/ ++ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc ++ */ ++#define BLKIF_OP_TRIM 5 /* * Maximum scatter/gather segments per request. @@ -3247,8 +4101,22 @@ Acked-by: jbeulich@novell.com + uint64_t id; /* private guest value, echoed in resp */ + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - }; ++}; +typedef struct blkif_request blkif_request_t; ++ ++/* ++ * Cast to this structure when blkif_request.operation == BLKIF_OP_TRIM ++ * sizeof(struct blkif_request_trim) <= sizeof(struct blkif_request) ++ */ ++struct blkif_request_trim { ++ uint8_t operation; /* BLKIF_OP_TRIM */ ++ uint8_t reserved; /* */ ++ blkif_vdev_t handle; /* same as for read/write requests */ ++ uint64_t id; /* private guest value, echoed in resp */ ++ blkif_sector_t sector_number;/* start sector idx on disk */ ++ uint64_t nr_sectors; /* number of contiguous sectors to trim */ + }; ++typedef struct blkif_request_trim blkif_request_trim_t; struct blkif_response { - uint64_t id; /* copied from request */ @@ -3262,8 +4130,8 @@ Acked-by: jbeulich@novell.com /* * STATUS RETURN CODES. ---- head-2010-05-12.orig/include/xen/interface/io/console.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/console.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/io/console.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/console.h 2011-01-31 15:14:12.000000000 +0100 @@ -3,6 +3,24 @@ * * Console I/O interface for Xen guest OSes. @@ -3289,8 +4157,8 @@ Acked-by: jbeulich@novell.com * Copyright (c) 2005, Keir Fraser */ ---- head-2010-05-12.orig/include/xen/interface/io/fbif.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/fbif.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/io/fbif.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/fbif.h 2011-01-31 15:14:12.000000000 +0100 @@ -41,12 +41,13 @@ */ #define XENFB_TYPE_UPDATE 2 @@ -3455,8 +4323,8 @@ Acked-by: jbeulich@novell.com }; /* ---- head-2010-05-12.orig/include/xen/interface/io/kbdif.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/kbdif.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/io/kbdif.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/kbdif.h 2011-01-31 15:14:12.000000000 +0100 @@ -45,34 +45,38 @@ */ #define XENKBD_TYPE_POS 4 @@ -3564,8 +4432,8 @@ Acked-by: jbeulich@novell.com }; #endif ---- head-2010-05-12.orig/include/xen/interface/io/netif.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/netif.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/io/netif.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/netif.h 2011-01-31 15:14:12.000000000 +0100 @@ -3,6 +3,24 @@ * * Unified network-device I/O interface for Xen guest OSes. @@ -3751,8 +4619,8 @@ Acked-by: jbeulich@novell.com #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 ---- head-2010-05-12.orig/include/xen/interface/io/protocols.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/protocols.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/io/protocols.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/protocols.h 2011-01-31 15:14:12.000000000 +0100 @@ -1,10 +1,31 @@ +/****************************************************************************** + * protocols.h @@ -3795,8 +4663,8 @@ Acked-by: jbeulich@novell.com #else # error arch fixup needed here #endif ---- head-2010-05-12.orig/include/xen/interface/io/ring.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/ring.h 2010-02-24 13:13:46.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/io/ring.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/ring.h 2011-01-31 15:14:12.000000000 +0100 @@ -3,16 +3,42 @@ * * Shared producer-consumer ring macros. @@ -3841,18 +4709,13 @@ Acked-by: jbeulich@novell.com #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) -@@ -24,74 +50,84 @@ typedef unsigned int RING_IDX; - * A ring contains as many entries as will fit, rounded down to the nearest - * power of two (so we can mask with (size-1) to loop around). +@@ -31,74 +57,86 @@ typedef unsigned int RING_IDX; + /* + * The same for passing in an actual pointer instead of a name tag. */ -+#define __CONST_RING_SIZE(_s, _sz) \ -+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ -+ sizeof(((struct _s##_sring *)0)->ring[0]))) -+/* -+ * The same for passing in an actual pointer instead of a name tag. -+ */ - #define __RING_SIZE(_s, _sz) \ -- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) +-#define __RING_SIZE(_s, _sz) \ +- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) ++#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* @@ -3942,8 +4805,16 @@ Acked-by: jbeulich@novell.com +struct __name##_sring { \ + RING_IDX req_prod, req_event; \ + RING_IDX rsp_prod, rsp_event; \ -+ uint8_t netfront_smartpoll_active; \ -+ uint8_t pad[47]; \ ++ union { \ ++ struct { \ ++ uint8_t smartpoll_active; \ ++ } netif; \ ++ struct { \ ++ uint8_t msg; \ ++ } tapif_user; \ ++ uint8_t pvt_pad[4]; \ ++ } private; \ ++ uint8_t __pad[44]; \ + union __name##_sring_entry ring[1]; /* variable-length */ \ +}; \ + \ @@ -3970,7 +4841,7 @@ Acked-by: jbeulich@novell.com /* * Macros for manipulating rings. -@@ -109,86 +145,94 @@ struct __name##_back_ring { \ +@@ -116,86 +154,95 @@ struct __name##_back_ring { \ */ /* Initialising empty rings */ @@ -3981,7 +4852,8 @@ Acked-by: jbeulich@novell.com +#define SHARED_RING_INIT(_s) do { \ + (_s)->req_prod = (_s)->rsp_prod = 0; \ + (_s)->req_event = (_s)->rsp_event = 1; \ -+ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ ++ (void)memset((_s)->private.pvt_pad, 0, sizeof((_s)->private.pvt_pad)); \ ++ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ } while(0) -#define FRONT_RING_INIT(_r, _s, __size) do { \ @@ -4109,7 +4981,7 @@ Acked-by: jbeulich@novell.com } while (0) /* -@@ -221,40 +265,40 @@ struct __name##_back_ring { \ +@@ -228,40 +275,40 @@ struct __name##_back_ring { \ * field appropriately. */ @@ -4178,108 +5050,18 @@ Acked-by: jbeulich@novell.com } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ ---- head-2010-05-12.orig/include/xen/interface/io/xenbus.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/xenbus.h 2010-01-19 16:01:04.000000000 +0100 -@@ -3,42 +3,68 @@ - * - * Xenbus protocol details. - * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * - * Copyright (C) 2005 XenSource Ltd. - */ - - #ifndef _XEN_PUBLIC_IO_XENBUS_H - #define _XEN_PUBLIC_IO_XENBUS_H - --/* The state of either end of the Xenbus, i.e. the current communication -- status of initialisation across the bus. States here imply nothing about -- the state of the connection between the driver and the kernel's device -- layers. */ --enum xenbus_state --{ -- XenbusStateUnknown = 0, -- XenbusStateInitialising = 1, -- XenbusStateInitWait = 2, /* Finished early -- initialisation, but waiting -- for information from the peer -- or hotplug scripts. */ -- XenbusStateInitialised = 3, /* Initialised and waiting for a -- connection from the peer. */ -- XenbusStateConnected = 4, -- XenbusStateClosing = 5, /* The device is being closed -- due to an error or an unplug -- event. */ -- XenbusStateClosed = 6 -+/* -+ * The state of either end of the Xenbus, i.e. the current communication -+ * status of initialisation across the bus. States here imply nothing about -+ * the state of the connection between the driver and the kernel's device -+ * layers. -+ */ -+enum xenbus_state { -+ XenbusStateUnknown = 0, -+ -+ XenbusStateInitialising = 1, -+ -+ /* -+ * InitWait: Finished early initialisation but waiting for information -+ * from the peer or hotplug scripts. -+ */ -+ XenbusStateInitWait = 2, -+ -+ /* -+ * Initialised: Waiting for a connection from the peer. -+ */ -+ XenbusStateInitialised = 3, +--- head-2011-03-17.orig/include/xen/interface/io/xenbus.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/xenbus.h 2011-01-31 15:14:12.000000000 +0100 +@@ -36,6 +36,7 @@ enum xenbus_state -+ XenbusStateConnected = 4, -+ -+ /* -+ * Closing: The device is being closed due to an error or an unplug event. -+ */ -+ XenbusStateClosing = 5, -+ -+ XenbusStateClosed = 6, -+ -+ /* -+ * Reconfiguring: The device is being reconfigured. -+ */ -+ XenbusStateReconfiguring = 7, -+ -+ XenbusStateReconfigured = 8 + XenbusStateReconfigured = 8 }; +typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ -- --/* -- * Local variables: -- * c-file-style: "linux" -- * indent-tabs-mode: t -- * c-indent-level: 8 -- * c-basic-offset: 8 -- * tab-width: 8 -- * End: -- */ ---- head-2010-05-12.orig/include/xen/interface/io/xs_wire.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/io/xs_wire.h 2010-01-19 16:01:04.000000000 +0100 + +--- head-2011-03-17.orig/include/xen/interface/io/xs_wire.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/xs_wire.h 2011-01-31 15:14:12.000000000 +0100 @@ -1,6 +1,25 @@ /* * Details of the "wire" protocol between Xen Store Daemon and client @@ -4306,18 +5088,19 @@ Acked-by: jbeulich@novell.com * Copyright (C) 2005 Rusty Russell IBM Corporation */ -@@ -26,7 +45,9 @@ enum xsd_sockmsg_type +@@ -26,7 +45,10 @@ enum xsd_sockmsg_type XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, - XS_IS_DOMAIN_INTRODUCED + XS_IS_DOMAIN_INTRODUCED, + XS_RESUME, -+ XS_SET_TARGET ++ XS_SET_TARGET, ++ XS_RESTRICT }; #define XS_WRITE_NONE "NONE" -@@ -39,8 +60,14 @@ struct xsd_errors +@@ -39,8 +61,14 @@ struct xsd_errors int errnum; const char *errstring; }; @@ -4333,7 +5116,7 @@ Acked-by: jbeulich@novell.com XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), -@@ -56,6 +83,7 @@ static struct xsd_errors xsd_errors[] __ +@@ -56,6 +84,7 @@ static struct xsd_errors xsd_errors[] __ XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; @@ -4341,7 +5124,7 @@ Acked-by: jbeulich@novell.com struct xsd_sockmsg { -@@ -84,4 +112,11 @@ struct xenstore_domain_interface { +@@ -84,4 +113,11 @@ struct xenstore_domain_interface { XENSTORE_RING_IDX rsp_cons, rsp_prod; }; @@ -4353,9 +5136,9 @@ Acked-by: jbeulich@novell.com +#define XENSTORE_REL_PATH_MAX 2048 + #endif /* _XS_WIRE_H */ ---- head-2010-05-12.orig/include/xen/interface/memory.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/memory.h 2010-01-19 16:01:04.000000000 +0100 -@@ -3,20 +3,57 @@ +--- head-2011-03-17.orig/include/xen/interface/memory.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/memory.h 2011-01-31 15:14:12.000000000 +0100 +@@ -3,13 +3,31 @@ * * Memory reservation and information. * @@ -4383,15 +5166,12 @@ Acked-by: jbeulich@novell.com #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ +-#include +#include "xen.h" -+ + /* -- * Increase or decrease the specified domain's memory reservation. Returns a -- * -ve errcode on failure, or the # extents successfully allocated or freed. -+ * Increase or decrease the specified domain's memory reservation. Returns the -+ * number of extents successfully allocated or freed. - * arg == addr of struct xen_memory_reservation. - */ + * Increase or decrease the specified domain's memory reservation. Returns a +@@ -19,6 +37,26 @@ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 @@ -4410,12 +5190,15 @@ Acked-by: jbeulich@novell.com +#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) +/* Flag to populate physmap with populate-on-demand entries */ +#define XENMEMF_populate_on_demand (1<<16) ++/* Flag to request allocation only from the node specified */ ++#define XENMEMF_exact_node_request (1<<17) ++#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request) +#endif + struct xen_memory_reservation { /* -@@ -29,28 +66,70 @@ struct xen_memory_reservation { +@@ -31,28 +69,27 @@ struct xen_memory_reservation { * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ @@ -4445,57 +5228,29 @@ Acked-by: jbeulich@novell.com * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; -+}; +- + }; +-DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); +typedef struct xen_memory_reservation xen_memory_reservation_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); -+/* -+ * An atomic exchange of memory pages. If return code is zero then -+ * @out.extent_list provides GMFNs of the newly-allocated memory. -+ * Returns zero on complete success, otherwise a negative error code. -+ * On complete success then always @nr_exchanged == @in.nr_extents. -+ * On partial success @nr_exchanged indicates how much work was done. -+ */ -+#define XENMEM_exchange 11 -+struct xen_memory_exchange { -+ /* -+ * [IN] Details of memory extents to be exchanged (GMFN bases). -+ * Note that @in.address_bits is ignored and unused. -+ */ -+ struct xen_memory_reservation in; -+ -+ /* -+ * [IN/OUT] Details of new memory extents. -+ * We require that: -+ * 1. @in.domid == @out.domid -+ * 2. @in.nr_extents << @in.extent_order == -+ * @out.nr_extents << @out.extent_order -+ * 3. @in.extent_start and @out.extent_start lists must not overlap -+ * 4. @out.extent_start lists GPFN bases to be populated -+ * 5. @out.extent_start is overwritten with allocated GMFN bases -+ */ -+ struct xen_memory_reservation out; -+ -+ /* -+ * [OUT] Number of input extents that were successfully exchanged: -+ * 1. The first @nr_exchanged input extents were successfully -+ * deallocated. -+ * 2. The corresponding first entries in the output extent list correctly -+ * indicate the GMFNs that were successfully exchanged. -+ * 3. All other input and output extents are untouched. -+ * 4. If not all input exents are exchanged then the return code of this -+ * command will be non-zero. -+ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! -+ */ + /* + * An atomic exchange of memory pages. If return code is zero then +@@ -92,10 +129,11 @@ struct xen_memory_exchange { + * command will be non-zero. + * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! + */ +- unsigned long nr_exchanged; + xen_ulong_t nr_exchanged; }; --DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); +typedef struct xen_memory_exchange xen_memory_exchange_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); +-DEFINE_GUEST_HANDLE_STRUCT(xen_memory_exchange); /* * Returns the maximum machine frame number of mapped RAM in this system. -@@ -68,6 +147,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_re + * This command always succeeds (it never returns an error code). +@@ -112,6 +150,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_ex #define XENMEM_maximum_reservation 4 /* @@ -4507,7 +5262,7 @@ Acked-by: jbeulich@novell.com * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. -@@ -86,7 +170,7 @@ struct xen_machphys_mfn_list { +@@ -130,7 +173,7 @@ struct xen_machphys_mfn_list { * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ @@ -4516,31 +5271,32 @@ Acked-by: jbeulich@novell.com /* * Number of extents written to the above array. This will be smaller -@@ -94,7 +178,22 @@ struct xen_machphys_mfn_list { +@@ -138,7 +181,8 @@ struct xen_machphys_mfn_list { */ unsigned int nr_extents; }; -DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); +typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); -+ -+/* -+ * Returns the location in virtual address space of the machine_to_phys -+ * mapping table. Architectures which do not have a m2p table, or which do not -+ * map it by default into guest address space, do not implement this command. -+ * arg == addr of xen_machphys_mapping_t. -+ */ -+#define XENMEM_machphys_mapping 12 -+struct xen_machphys_mapping { + + /* + * Returns the location in virtual address space of the machine_to_phys +@@ -148,10 +192,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_ + */ + #define XENMEM_machphys_mapping 12 + struct xen_machphys_mapping { +- unsigned long v_start, v_end; /* Start and end virtual addresses. */ +- unsigned long max_mfn; /* Maximum MFN that can be looked up. */ + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ -+}; + }; +-DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t); +typedef struct xen_machphys_mapping xen_machphys_mapping_t; +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's -@@ -109,37 +208,84 @@ struct xen_add_to_physmap { +@@ -166,38 +211,22 @@ struct xen_add_to_physmap { /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ @@ -4560,17 +5316,11 @@ Acked-by: jbeulich@novell.com -DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); +typedef struct xen_add_to_physmap xen_add_to_physmap_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); -+ -+/*** REMOVED ***/ -+/*#define XENMEM_translate_gpfn_list 8*/ - /* +-/* - * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error - * code on failure. This call only works for auto-translated guests. -+ * Returns the pseudo-physical memory map as it was when the domain -+ * was started (specified by XENMEM_set_memory_map). -+ * arg == addr of xen_memory_map_t. - */ +- */ -#define XENMEM_translate_gpfn_list 8 -struct xen_translate_gpfn_list { - /* Which domain to translate for? */ @@ -4581,34 +5331,36 @@ Acked-by: jbeulich@novell.com - - /* List of GPFNs to translate. */ - GUEST_HANDLE(ulong) gpfn_list; -+#define XENMEM_memory_map 9 -+struct xen_memory_map { -+ /* -+ * On call the number of entries which can be stored in buffer. On -+ * return the number of entries which have been stored in -+ * buffer. -+ */ -+ unsigned int nr_entries; - - /* +- +- /* - * Output list to contain MFN translations. May be the same as the input - * list (in which case each input GPFN is overwritten with the output MFN). -+ * Entries in the buffer are in the same format as returned by the -+ * BIOS INT 0x15 EAX=0xE820 call. - */ +- */ - GUEST_HANDLE(ulong) mfn_list; +-}; +-DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); ++/*** REMOVED ***/ ++/*#define XENMEM_translate_gpfn_list 8*/ + + /* + * Returns the pseudo-physical memory map as it was when the domain +@@ -217,9 +246,10 @@ struct xen_memory_map { + * Entries in the buffer are in the same format as returned by the + * BIOS INT 0x15 EAX=0xE820 call. + */ +- GUEST_HANDLE(void) buffer; + XEN_GUEST_HANDLE(void) buffer; -+}; + }; +-DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map); +typedef struct xen_memory_map xen_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); -+ -+/* -+ * Returns the real physical memory map. Passes the same structure as -+ * XENMEM_memory_map. -+ * arg == addr of xen_memory_map_t. -+ */ -+#define XENMEM_machine_memory_map 10 -+ + + /* + * Returns the real physical memory map. Passes the same structure as +@@ -228,10 +258,37 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_ma + */ + #define XENMEM_machine_memory_map 10 + +/* + * Set the pseudo-physical memory map of a domain, as returned by + * XENMEM_memory_map. @@ -4618,8 +5370,7 @@ Acked-by: jbeulich@novell.com +struct xen_foreign_memory_map { + domid_t domid; + struct xen_memory_map map; - }; --DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); ++}; +typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); + @@ -4636,17 +5387,20 @@ Acked-by: jbeulich@novell.com + domid_t domid; +}; +typedef struct xen_pod_target xen_pod_target_t; -+ -+/* + + /* +- * Prevent the balloon driver from changing the memory reservation +- * during a driver critical region. + * Get the number of MFNs saved through memory sharing. + * The call never fails. -+ */ + */ +-extern spinlock_t xen_reservation_lock; +#define XENMEM_get_sharing_freed_pages 18 - ++ #endif /* __XEN_PUBLIC_MEMORY_H__ */ ---- head-2010-05-12.orig/include/xen/interface/physdev.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/physdev.h 2010-01-19 16:01:04.000000000 +0100 -@@ -21,10 +21,12 @@ +--- head-2011-03-17.orig/include/xen/interface/physdev.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/physdev.h 2011-03-17 13:50:24.000000000 +0100 +@@ -21,6 +21,8 @@ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ @@ -4655,23 +5409,10 @@ Acked-by: jbeulich@novell.com /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) -- * @cmd == PHYSDEVOP_??? (physdev operation). -+ * @cmd == PHYSDEVOP_??? (physdev operation). - * @args == Operation-specific extra arguments (NULL if none). - */ - -@@ -32,114 +34,231 @@ - * Notify end-of-interrupt (EOI) for the specified IRQ. - * @arg == pointer to physdev_eoi structure. - */ --#define PHYSDEVOP_eoi 12 -+#define PHYSDEVOP_eoi 12 - struct physdev_eoi { -- /* IN */ -- uint32_t irq; -+ /* IN */ -+ uint32_t irq; -+}; +@@ -37,6 +39,23 @@ struct physdev_eoi { + /* IN */ + uint32_t irq; + }; +typedef struct physdev_eoi physdev_eoi_t; +DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); + @@ -4686,164 +5427,86 @@ Acked-by: jbeulich@novell.com +struct physdev_pirq_eoi_gmfn { + /* IN */ + xen_pfn_t gmfn; - }; ++}; +typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; +DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. - * @arg == pointer to physdev_irq_status_query structure. - */ --#define PHYSDEVOP_irq_status_query 5 -+#define PHYSDEVOP_irq_status_query 5 - struct physdev_irq_status_query { -- /* IN */ -- uint32_t irq; -- /* OUT */ -- uint32_t flags; /* XENIRQSTAT_* */ -+ /* IN */ -+ uint32_t irq; -+ /* OUT */ -+ uint32_t flags; /* XENIRQSTAT_* */ +@@ -49,6 +68,8 @@ struct physdev_irq_status_query { + /* OUT */ + uint32_t flags; /* XENIRQSTAT_* */ }; +typedef struct physdev_irq_status_query physdev_irq_status_query_t; +DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ --#define _XENIRQSTAT_needs_eoi (0) --#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) -+#define _XENIRQSTAT_needs_eoi (0) -+#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) - - /* IRQ shared by multiple guests? */ --#define _XENIRQSTAT_shared (1) --#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) -+#define _XENIRQSTAT_shared (1) -+#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) - - /* - * Set the current VCPU's I/O privilege level. - * @arg == pointer to physdev_set_iopl structure. - */ --#define PHYSDEVOP_set_iopl 6 -+#define PHYSDEVOP_set_iopl 6 - struct physdev_set_iopl { -- /* IN */ -- uint32_t iopl; -+ /* IN */ -+ uint32_t iopl; + #define _XENIRQSTAT_needs_eoi (0) +@@ -67,6 +88,8 @@ struct physdev_set_iopl { + /* IN */ + uint32_t iopl; }; +typedef struct physdev_set_iopl physdev_set_iopl_t; +DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. - * @arg == pointer to physdev_set_iobitmap structure. - */ --#define PHYSDEVOP_set_iobitmap 7 -+#define PHYSDEVOP_set_iobitmap 7 +@@ -75,9 +98,15 @@ struct physdev_set_iopl { + #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { -- /* IN */ -- uint8_t * bitmap; -- uint32_t nr_ports; -+ /* IN */ + /* IN */ +#if __XEN_INTERFACE_VERSION__ >= 0x00030205 -+ XEN_GUEST_HANDLE(uint8) bitmap; ++ XEN_GUEST_HANDLE(uint8) bitmap; +#else -+ uint8_t *bitmap; + uint8_t * bitmap; +#endif -+ uint32_t nr_ports; + uint32_t nr_ports; }; +typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; +DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. - * @arg == pointer to physdev_apic structure. - */ --#define PHYSDEVOP_apic_read 8 --#define PHYSDEVOP_apic_write 9 -+#define PHYSDEVOP_apic_read 8 -+#define PHYSDEVOP_apic_write 9 - struct physdev_apic { -- /* IN */ -- unsigned long apic_physbase; -- uint32_t reg; -- /* IN or OUT */ -- uint32_t value; -+ /* IN */ -+ unsigned long apic_physbase; -+ uint32_t reg; -+ /* IN or OUT */ -+ uint32_t value; +@@ -92,6 +121,8 @@ struct physdev_apic { + /* IN or OUT */ + uint32_t value; }; +typedef struct physdev_apic physdev_apic_t; +DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. - * @arg == pointer to physdev_irq structure. - */ --#define PHYSDEVOP_alloc_irq_vector 10 --#define PHYSDEVOP_free_irq_vector 11 -+#define PHYSDEVOP_alloc_irq_vector 10 -+#define PHYSDEVOP_free_irq_vector 11 - struct physdev_irq { -- /* IN */ -- uint32_t irq; -- /* IN or OUT */ -- uint32_t vector; -+ /* IN */ -+ uint32_t irq; -+ /* IN or OUT */ -+ uint32_t vector; -+}; +@@ -105,6 +136,8 @@ struct physdev_irq { + /* IN or OUT */ + uint32_t vector; + }; +typedef struct physdev_irq physdev_irq_t; +DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); -+ -+#define MAP_PIRQ_TYPE_MSI 0x0 -+#define MAP_PIRQ_TYPE_GSI 0x1 -+#define MAP_PIRQ_TYPE_UNKNOWN 0x2 -+ -+#define PHYSDEVOP_map_pirq 13 -+struct physdev_map_pirq { -+ domid_t domid; -+ /* IN */ -+ int type; -+ /* IN */ -+ int index; -+ /* IN or OUT */ -+ int pirq; -+ /* IN */ -+ int bus; -+ /* IN */ -+ int devfn; -+ /* IN */ -+ int entry_nr; -+ /* IN */ -+ uint64_t table_base; -+}; + + #define MAP_PIRQ_TYPE_MSI 0x0 + #define MAP_PIRQ_TYPE_GSI 0x1 +@@ -128,6 +161,8 @@ struct physdev_map_pirq { + /* IN */ + uint64_t table_base; + }; +typedef struct physdev_map_pirq physdev_map_pirq_t; +DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); -+ -+#define PHYSDEVOP_unmap_pirq 14 -+struct physdev_unmap_pirq { -+ domid_t domid; -+ /* IN */ -+ int pirq; -+}; -+ + + #define PHYSDEVOP_unmap_pirq 14 + struct physdev_unmap_pirq { +@@ -135,6 +170,8 @@ struct physdev_unmap_pirq { + /* IN */ + int pirq; + }; +typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; +DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); -+ -+#define PHYSDEVOP_manage_pci_add 15 -+#define PHYSDEVOP_manage_pci_remove 16 -+struct physdev_manage_pci { -+ /* IN */ -+ uint8_t bus; -+ uint8_t devfn; -+}; -+ + + #define PHYSDEVOP_manage_pci_add 15 + #define PHYSDEVOP_manage_pci_remove 16 +@@ -143,6 +180,17 @@ struct physdev_manage_pci { + uint8_t bus; + uint8_t devfn; + }; +typedef struct physdev_manage_pci physdev_manage_pci_t; +DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); + @@ -4855,95 +5518,55 @@ Acked-by: jbeulich@novell.com +}; +typedef struct physdev_restore_msi physdev_restore_msi_t; +DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); -+ -+#define PHYSDEVOP_manage_pci_add_ext 20 -+struct physdev_manage_pci_ext { -+ /* IN */ -+ uint8_t bus; -+ uint8_t devfn; -+ unsigned is_extfn; -+ unsigned is_virtfn; -+ struct { -+ uint8_t bus; -+ uint8_t devfn; -+ } physfn; - }; + #define PHYSDEVOP_manage_pci_add_ext 20 + struct physdev_manage_pci_ext { +@@ -156,6 +204,8 @@ struct physdev_manage_pci_ext { + uint8_t devfn; + } physfn; + }; +typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; +DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); -+ + /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() - * hypercall since 0x00030202. - */ - struct physdev_op { -- uint32_t cmd; -- union { -- struct physdev_irq_status_query irq_status_query; -- struct physdev_set_iopl set_iopl; -- struct physdev_set_iobitmap set_iobitmap; -- struct physdev_apic apic_op; -- struct physdev_irq irq_op; -- } u; -+ uint32_t cmd; -+ union { -+ struct physdev_irq_status_query irq_status_query; -+ struct physdev_set_iopl set_iopl; -+ struct physdev_set_iobitmap set_iobitmap; -+ struct physdev_apic apic_op; -+ struct physdev_irq irq_op; -+ } u; -+}; +@@ -171,6 +221,8 @@ struct physdev_op { + struct physdev_irq irq_op; + } u; + }; +typedef struct physdev_op physdev_op_t; +DEFINE_XEN_GUEST_HANDLE(physdev_op_t); -+ -+#define PHYSDEVOP_setup_gsi 21 -+struct physdev_setup_gsi { -+ int gsi; -+ /* IN */ -+ uint8_t triggering; -+ /* IN */ -+ uint8_t polarity; -+ /* IN */ - }; + #define PHYSDEVOP_setup_gsi 21 + struct physdev_setup_gsi { +@@ -181,12 +233,10 @@ struct physdev_setup_gsi { + uint8_t polarity; + /* IN */ + }; +typedef struct physdev_setup_gsi physdev_setup_gsi_t; +DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t); + +-#define PHYSDEVOP_get_nr_pirqs 22 +-struct physdev_nr_pirqs { +- /* OUT */ +- uint32_t nr_pirqs; +-}; ++/* leave PHYSDEVOP 22 free */ + + /* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI + * the hypercall returns a free pirq */ +@@ -198,6 +248,9 @@ struct physdev_get_free_pirq { + uint32_t pirq; + }; + ++typedef struct physdev_get_free_pirq physdev_get_free_pirq_t; ++DEFINE_XEN_GUEST_HANDLE(physdev_get_free_pirq_t); + /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** -- * ** unsupported by newer versions of Xen. ** -+ * ** unsupported by newer versions of Xen. ** - */ --#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 -+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 - - /* - * These all-capitals physdev operation names are superceded by the new names - * (defined above) since interface version 0x00030202. - */ --#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query --#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl --#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap --#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read --#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write --#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector --#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector -+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query -+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl -+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap -+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read -+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write -+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector -+#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector - #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi --#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared -+#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared - - #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ ---- head-2010-05-12.orig/include/xen/interface/sched.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/sched.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/sched.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/sched.h 2011-01-31 15:14:12.000000000 +0100 @@ -3,6 +3,24 @@ * * Scheduler state interactions @@ -5000,7 +5623,7 @@ Acked-by: jbeulich@novell.com /* * Poll a set of event-channel ports. Return when one or more are pending. An -@@ -58,11 +77,26 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdow +@@ -58,11 +77,49 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdow */ #define SCHEDOP_poll 3 struct sched_poll { @@ -5026,11 +5649,41 @@ Acked-by: jbeulich@novell.com +}; +typedef struct sched_remote_shutdown sched_remote_shutdown_t; +DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); ++ ++/* ++ * Latch a shutdown code, so that when the domain later shuts down it ++ * reports this code to the control tools. ++ * @arg == as for SCHEDOP_shutdown. ++ */ ++#define SCHEDOP_shutdown_code 5 ++ ++/* ++ * Setup, poke and destroy a domain watchdog timer. ++ * @arg == pointer to sched_watchdog structure. ++ * With id == 0, setup a domain watchdog timer to cause domain shutdown ++ * after timeout, returns watchdog id. ++ * With id != 0 and timeout == 0, destroy domain watchdog timer. ++ * With id != 0 and timeout != 0, poke watchdog timer and set new timeout. ++ */ ++#define SCHEDOP_watchdog 6 ++struct sched_watchdog { ++ uint32_t id; /* watchdog ID */ ++ uint32_t timeout; /* timeout */ ++}; ++typedef struct sched_watchdog sched_watchdog_t; ++DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control ---- head-2010-05-12.orig/include/xen/interface/vcpu.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/vcpu.h 2010-01-19 16:01:04.000000000 +0100 +@@ -73,5 +130,6 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll); + #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ + #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ + #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ ++#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ + + #endif /* __XEN_PUBLIC_SCHED_H__ */ +--- head-2011-03-17.orig/include/xen/interface/vcpu.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/vcpu.h 2011-03-17 13:50:24.000000000 +0100 @@ -27,11 +27,13 @@ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ @@ -5201,7 +5854,7 @@ Acked-by: jbeulich@novell.com /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ -@@ -161,13 +169,65 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_sing +@@ -161,13 +169,62 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_sing * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. @@ -5225,8 +5878,7 @@ Acked-by: jbeulich@novell.com +/* + * Get the physical ID information for a pinned vcpu's underlying physical + * processor. The physical ID informmation is architecture-specific. -+ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and -+ * greater are reserved. ++ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id. + * This command returns -EINVAL if it is not a valid operation for this VCPU. + */ +#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ @@ -5235,10 +5887,8 @@ Acked-by: jbeulich@novell.com +}; +typedef struct vcpu_get_physid vcpu_get_physid_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); -+#define xen_vcpu_physid_to_x86_apicid(physid) \ -+ ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid))) -+#define xen_vcpu_physid_to_x86_acpiid(physid) \ -+ ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32))) ++#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid)) ++#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32)) + +/* + * Register a memory location to get a secondary copy of the vcpu time @@ -5269,8 +5919,8 @@ Acked-by: jbeulich@novell.com +DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t); #endif /* __XEN_PUBLIC_VCPU_H__ */ ---- head-2010-05-12.orig/include/xen/interface/version.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/version.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/version.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/version.h 2011-01-31 15:14:12.000000000 +0100 @@ -3,6 +3,24 @@ * * Xen version, type, and compile information. @@ -5365,8 +6015,8 @@ Acked-by: jbeulich@novell.com +typedef char xen_commandline_t[1024]; + #endif /* __XEN_PUBLIC_VERSION_H__ */ ---- head-2010-05-12.orig/include/xen/interface/xen.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/interface/xen.h 2010-05-07 11:10:48.000000000 +0200 +--- head-2011-03-17.orig/include/xen/interface/xen.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/xen.h 2011-03-17 13:50:24.000000000 +0100 @@ -3,35 +3,69 @@ * * Guest OS interface to Xen. @@ -5588,7 +6238,7 @@ Acked-by: jbeulich@novell.com #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* -@@ -163,9 +245,20 @@ +@@ -163,9 +245,23 @@ * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * @@ -5606,16 +6256,21 @@ Acked-by: jbeulich@novell.com + * cmd: MMUEXT_COPY_PAGE + * mfn: Machine frame number of the destination page. + * src_mfn: Machine frame number of the source page. ++ * ++ * cmd: MMUEXT_[UN]MARK_SUPER ++ * mfn: Machine frame number of head of superpage to be [un]marked. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 -@@ -182,24 +275,35 @@ +@@ -182,24 +278,37 @@ #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 +#define MMUEXT_CLEAR_PAGE 16 +#define MMUEXT_COPY_PAGE 17 +#define MMUEXT_FLUSH_CACHE_GLOBAL 18 ++#define MMUEXT_MARK_SUPER 19 ++#define MMUEXT_UNMARK_SUPER 20 #ifndef __ASSEMBLY__ struct mmuext_op { @@ -5635,7 +6290,7 @@ Acked-by: jbeulich@novell.com + unsigned int cmd; + union { + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR -+ * CLEAR_PAGE, COPY_PAGE */ ++ * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */ + xen_pfn_t mfn; + /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ + unsigned long linear_addr; @@ -5659,7 +6314,7 @@ Acked-by: jbeulich@novell.com #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ -@@ -224,11 +328,24 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); +@@ -224,11 +333,24 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 @@ -5685,21 +6340,24 @@ Acked-by: jbeulich@novell.com #ifndef __ASSEMBLY__ -@@ -260,6 +377,13 @@ typedef uint16_t domid_t; +@@ -260,6 +382,16 @@ typedef uint16_t domid_t; #define DOMID_XEN (0x7FF2U) /* + * DOMID_COW is used as the owner of sharable pages */ +#define DOMID_COW (0x7FF3U) + -+/* DOMID_INVALID is used to identity invalid domid */ -+#define DOMID_INVALID (0x7FFFU) ++/* DOMID_INVALID is used to identify pages with unknown owner. */ ++#define DOMID_INVALID (0x7FF4U) ++ ++/* Idle domain. */ ++#define DOMID_IDLE (0x7FFFU) + +/* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ -@@ -267,18 +391,19 @@ struct mmu_update { +@@ -267,18 +399,19 @@ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; @@ -5723,7 +6381,7 @@ Acked-by: jbeulich@novell.com /* * Event channel endpoints per domain: -@@ -287,173 +412,271 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_ent +@@ -287,173 +420,274 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_ent #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { @@ -6033,7 +6691,9 @@ Acked-by: jbeulich@novell.com + unsigned long pt_base; /* VIRTUAL address of page directory. */ + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ -+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ ++ unsigned long mod_start; /* VIRTUAL address of pre-loaded module */ ++ /* (PFN of pre-loaded module if */ ++ /* SIF_MOD_START_PFN set in flags). */ + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ + int8_t cmd_line[MAX_GUEST_CMDLINE]; + /* The pfn range here covers both page table and p->m table frames. */ @@ -6052,9 +6712,9 @@ Acked-by: jbeulich@novell.com #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ +#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ ++#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */ +#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ - --typedef uint64_t cpumap_t; ++ +/* + * A multiboot module is a package containing modules very similar to a + * multiboot module array. The only differences are: @@ -6080,7 +6740,8 @@ Acked-by: jbeulich@novell.com + /* Unused, must be zero */ + uint32_t pad; +}; -+ + +-typedef uint64_t cpumap_t; +typedef struct dom0_vga_console_info { + uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ +#define XEN_VGATYPE_TEXT_MODE_3 0x03 @@ -6125,7 +6786,7 @@ Acked-by: jbeulich@novell.com typedef uint8_t xen_domain_handle_t[16]; -@@ -461,6 +684,11 @@ typedef uint8_t xen_domain_handle_t[16]; +@@ -461,6 +695,11 @@ typedef uint8_t xen_domain_handle_t[16]; #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) @@ -6137,23 +6798,32 @@ Acked-by: jbeulich@novell.com #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ -@@ -468,4 +696,14 @@ typedef uint8_t xen_domain_handle_t[16]; +@@ -468,4 +707,23 @@ typedef uint8_t xen_domain_handle_t[16]; #endif /* !__ASSEMBLY__ */ +/* Default definitions for macros used by domctl/sysctl. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) ++ +#ifndef uint64_aligned_t +#define uint64_aligned_t uint64_t +#endif +#ifndef XEN_GUEST_HANDLE_64 +#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) +#endif ++ ++#ifndef __ASSEMBLY__ ++struct xenctl_cpumap { ++ XEN_GUEST_HANDLE_64(uint8) bitmap; ++ uint32_t nr_cpus; ++}; +#endif ++ ++#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + #endif /* __XEN_PUBLIC_XEN_H__ */ ---- head-2010-05-12.orig/include/xen/xenbus.h 2010-05-12 08:55:24.000000000 +0200 -+++ head-2010-05-12/include/xen/xenbus.h 2010-01-19 16:01:04.000000000 +0100 +--- head-2011-03-17.orig/include/xen/xenbus.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/xenbus.h 2011-01-31 17:53:45.000000000 +0100 @@ -39,7 +39,7 @@ #include #include @@ -6181,7 +6851,7 @@ Acked-by: jbeulich@novell.com /* A xenbus device. */ struct xenbus_device { -@@ -92,7 +101,8 @@ struct xenbus_driver { +@@ -92,9 +101,10 @@ struct xenbus_driver { void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); @@ -6189,8 +6859,11 @@ Acked-by: jbeulich@novell.com + int (*suspend)(struct xenbus_device *dev); + int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); - int (*uevent)(struct xenbus_device *, char **, int, char *, int); +- int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); ++ int (*uevent)(struct xenbus_device *, char **, int, char *, int); struct device_driver driver; + int (*read_otherend_details)(struct xenbus_device *dev); + int (*is_ready)(struct xenbus_device *dev); @@ -105,27 +115,8 @@ static inline struct xenbus_driver *to_x return container_of(drv, struct xenbus_driver, driver); } diff --git a/patches.xen/xen3-kdb-x86 b/patches.xen/xen3-kdb-x86 deleted file mode 100644 index 0b2f8f6..0000000 --- a/patches.xen/xen3-kdb-x86 +++ /dev/null @@ -1,43 +0,0 @@ -From: SGI KDB Development -Subject: kdb-v4.4-2.6.27-rc8-x86-1 -References: FATE#303971 -X-URL: ftp://oss.sgi.com/www/projects/kdb/download/v4.4/ -Patch-mainline: not yet - -The KDB x86 code. - -Acked-by: Bernhard Walle - -Automatically created from "patches.suse/kdb-x86" by xen-port-patches.py - ---- head-2010-04-15.orig/arch/x86/kdb/kdba_bt.c 2010-04-15 09:51:55.000000000 +0200 -+++ head-2010-04-15/arch/x86/kdb/kdba_bt.c 2010-04-15 11:41:39.000000000 +0200 -@@ -722,7 +722,7 @@ static const char *bb_spurious[] = { - /* relocate_kernel */ - "relocate_new_kernel", - #endif /* CONFIG_KEXEC */ --#ifdef CONFIG_XEN -+#ifdef CONFIG_PARAVIRT_XEN - /* arch/i386/xen/xen-asm.S */ - "xen_irq_enable_direct_end", - "xen_irq_disable_direct_end", -@@ -4874,7 +4874,7 @@ kdb_bb_all(int argc, const char **argv) - #ifdef CONFIG_MATH_EMULATION - " CONFIG_MATH_EMULATION" - #endif --#ifdef CONFIG_XEN -+#ifdef CONFIG_PARAVIRT_XEN - " CONFIG_XEN" - #endif - #ifdef CONFIG_DEBUG_INFO ---- head-2010-04-15.orig/arch/x86/kdb/kdba_support.c 2010-04-15 09:37:45.000000000 +0200 -+++ head-2010-04-15/arch/x86/kdb/kdba_support.c 2009-10-13 17:01:44.000000000 +0200 -@@ -1528,7 +1528,7 @@ extern void halt_current_cpu(struct pt_r - - void kdba_kdump_shutdown_slave(struct pt_regs *regs) - { --#ifndef CONFIG_XEN -+#ifndef CONFIG_PARAVIRT_XEN - halt_current_cpu(regs); - #endif /* CONFIG_XEN */ - } diff --git a/patches.xen/xen3-patch-2.6.18 b/patches.xen/xen3-patch-2.6.18 index 61bc8f0..1c1c9ca 100644 --- a/patches.xen/xen3-patch-2.6.18 +++ b/patches.xen/xen3-patch-2.6.18 @@ -6,36 +6,46 @@ Automatically created from "patches.kernel.org/patch-2.6.18" by xen-port-patches Acked-by: jbeulich@novell.com ---- head-2010-04-29.orig/arch/x86/Kconfig 2010-03-24 15:02:14.000000000 +0100 -+++ head-2010-04-29/arch/x86/Kconfig 2010-03-24 15:06:08.000000000 +0100 -@@ -70,7 +70,6 @@ config ARCH_DEFCONFIG +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-01-31 17:01:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-01-31 17:02:29.000000000 +0100 +@@ -83,7 +83,6 @@ config ARCH_DEFCONFIG - config GENERIC_TIME + config GENERIC_CMOS_UPDATE def_bool y - depends on !X86_XEN - config GENERIC_CMOS_UPDATE + config CLOCKSOURCE_WATCHDOG def_bool y -@@ -1665,7 +1664,7 @@ config KEXEC_JUMP +@@ -1609,7 +1608,7 @@ config KEXEC_JUMP code in physical address mode via KEXEC config PHYSICAL_START -- hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) -+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP || XEN) +- hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP) ++ hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP || XEN) default "0x1000000" ---help--- This gives the physical address where the kernel is loaded. ---- head-2010-04-29.orig/arch/x86/kernel/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/Makefile 2010-03-24 15:06:08.000000000 +0100 -@@ -138,5 +138,5 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-03-17.orig/arch/x86/kernel/Makefile 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/Makefile 2011-01-31 17:02:29.000000000 +0100 +@@ -129,5 +129,5 @@ ifeq ($(CONFIG_X86_64),y) pci-dma_64-$(CONFIG_XEN) += pci-dma_32.o endif -disabled-obj-$(CONFIG_XEN) := i8259_$(BITS).o reboot.o smpboot_$(BITS).o +disabled-obj-$(CONFIG_XEN) := i8253.o i8259_$(BITS).o reboot.o smpboot_$(BITS).o tsc_$(BITS).o %/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := ---- head-2010-04-29.orig/arch/x86/kernel/setup64-xen.c 2008-01-28 12:24:19.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup64-xen.c 2010-03-24 15:06:08.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2008-08-07 12:44:36.000000000 +0200 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:28:34.000000000 +0100 +@@ -50,7 +50,6 @@ + #include + #include + #include +-#include + #include + #include + #include +--- head-2011-03-17.orig/arch/x86/kernel/setup64-xen.c 2008-01-28 12:24:19.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup64-xen.c 2011-01-31 17:02:29.000000000 +0100 @@ -363,5 +363,7 @@ void __cpuinit cpu_init (void) fpu_init(); @@ -45,8 +55,8 @@ Acked-by: jbeulich@novell.com + if (raw_irqs_disabled()) + kernel_eflags &= ~X86_EFLAGS_IF; } ---- head-2010-04-29.orig/arch/x86/kernel/time-xen.c 2010-02-24 11:50:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/time-xen.c 2010-03-24 15:06:08.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2010-08-31 09:24:21.000000000 +0200 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-01-31 17:02:29.000000000 +0100 @@ -45,7 +45,6 @@ #include #include @@ -80,7 +90,7 @@ Acked-by: jbeulich@novell.com /* These are peridically updated in shared_info, and then copied here. */ struct shadow_time_info { u64 tsc_timestamp; /* TSC at last update of time vals. */ -@@ -175,24 +175,6 @@ static int __init __permitted_clock_jitt +@@ -172,24 +172,6 @@ static int __init __permitted_clock_jitt } __setup("permitted_clock_jitter=", __permitted_clock_jitter); @@ -105,7 +115,7 @@ Acked-by: jbeulich@novell.com /* * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, * yielding a 64-bit result. -@@ -229,14 +211,6 @@ static inline u64 scale_delta(u64 delta, +@@ -226,14 +208,6 @@ static inline u64 scale_delta(u64 delta, return product; } @@ -120,7 +130,7 @@ Acked-by: jbeulich@novell.com void init_cpu_khz(void) { u64 __cpu_khz = 1000000ULL << 32; -@@ -256,6 +230,7 @@ static u64 get_nsec_offset(struct shadow +@@ -253,6 +227,7 @@ static u64 get_nsec_offset(struct shadow return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); } @@ -128,7 +138,7 @@ Acked-by: jbeulich@novell.com static unsigned long get_usec_offset(struct shadow_time_info *shadow) { u64 now, delta; -@@ -263,6 +238,7 @@ static unsigned long get_usec_offset(str +@@ -260,6 +235,7 @@ static unsigned long get_usec_offset(str delta = now - shadow->tsc_timestamp; return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift); } @@ -136,16 +146,16 @@ Acked-by: jbeulich@novell.com static void __update_wallclock(time_t sec, long nsec) { -@@ -377,6 +353,8 @@ void rtc_cmos_write(unsigned char val, u +@@ -374,6 +350,8 @@ void rtc_cmos_write(unsigned char val, u } EXPORT_SYMBOL(rtc_cmos_write); +#ifdef CONFIG_X86_64 + - /* - * This version of gettimeofday has microsecond resolution - * and better than microsecond precision on fast x86 machines with TSC. -@@ -515,6 +493,8 @@ int do_settimeofday(struct timespec *tv) + static struct { + spinlock_t lock; + struct timeval tv; +@@ -530,6 +508,8 @@ int do_settimeofday(struct timespec *tv) EXPORT_SYMBOL(do_settimeofday); @@ -154,7 +164,7 @@ Acked-by: jbeulich@novell.com static void sync_xen_wallclock(unsigned long dummy); static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0); static void sync_xen_wallclock(unsigned long dummy) -@@ -566,11 +546,15 @@ static int set_rtc_mmss(unsigned long no +@@ -581,11 +561,15 @@ static int set_rtc_mmss(unsigned long no return retval; } @@ -170,7 +180,7 @@ Acked-by: jbeulich@novell.com { unsigned int cpu = get_cpu(); struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); -@@ -590,9 +574,9 @@ unsigned long long monotonic_clock(void) +@@ -605,9 +589,9 @@ unsigned long long monotonic_clock(void) return time; } @@ -181,7 +191,7 @@ Acked-by: jbeulich@novell.com unsigned long long sched_clock(void) { return monotonic_clock(); -@@ -762,6 +746,89 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -780,6 +764,89 @@ irqreturn_t timer_interrupt(int irq, voi return IRQ_HANDLED; } @@ -271,7 +281,7 @@ Acked-by: jbeulich@novell.com static void init_missing_ticks_accounting(unsigned int cpu) { struct vcpu_register_runstate_memory_area area; -@@ -908,7 +975,7 @@ static void setup_cpu0_timer_irq(void) +@@ -926,7 +993,7 @@ static void setup_cpu0_timer_irq(void) VIRQ_TIMER, 0, timer_interrupt, @@ -280,7 +290,7 @@ Acked-by: jbeulich@novell.com "timer0", NULL); BUG_ON(per_cpu(timer_irq, 0) < 0); -@@ -950,11 +1017,11 @@ void __init time_init(void) +@@ -968,11 +1035,11 @@ void __init time_init(void) update_wallclock(); @@ -293,7 +303,7 @@ Acked-by: jbeulich@novell.com vxtime.mode = VXTIME_TSC; vxtime.quot = (1000000L << 32) / vxtime_hz; vxtime.tsc_quot = (1000L << 32) / cpu_khz; -@@ -1129,7 +1196,7 @@ int __cpuinit local_setup_timer(unsigned +@@ -1147,7 +1214,7 @@ int __cpuinit local_setup_timer(unsigned irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, timer_interrupt, @@ -302,8 +312,32 @@ Acked-by: jbeulich@novell.com timer_name[cpu], NULL); if (irq < 0) ---- head-2010-04-29.orig/drivers/acpi/processor_perflib.c 2010-04-15 09:43:05.000000000 +0200 -+++ head-2010-04-29/drivers/acpi/processor_perflib.c 2010-05-06 14:22:32.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2010-09-16 13:31:46.000000000 +0200 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:31:26.000000000 +0100 +@@ -154,21 +154,6 @@ int create_lookup_pte_addr(struct mm_str + + EXPORT_SYMBOL(create_lookup_pte_addr); + +-static int noop_fn( +- pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) +-{ +- return 0; +-} +- +-int touch_pte_range(struct mm_struct *mm, +- unsigned long address, +- unsigned long size) +-{ +- return apply_to_page_range(mm, address, size, noop_fn, NULL); +-} +- +-EXPORT_SYMBOL(touch_pte_range); +- + /* + * Does @address reside within a non-highmem page that is local to this virtual + * machine (i.e., not an I/O page, nor a memory page belonging to another VM). +--- head-2011-03-17.orig/drivers/acpi/processor_perflib.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_perflib.c 2011-01-31 17:02:29.000000000 +0100 @@ -578,6 +578,8 @@ end: return result; } @@ -319,9 +353,9 @@ Acked-by: jbeulich@novell.com EXPORT_SYMBOL(acpi_processor_unregister_performance); + +#endif /* !CONFIG_PROCESSOR_EXTERNAL_CONTROL */ ---- head-2010-04-29.orig/drivers/char/agp/intel-agp.c 2010-04-15 09:43:13.000000000 +0200 -+++ head-2010-04-29/drivers/char/agp/intel-agp.c 2010-04-15 09:52:07.000000000 +0200 -@@ -452,6 +452,10 @@ static struct page *i8xx_alloc_pages(voi +--- head-2011-03-17.orig/drivers/char/agp/intel-gtt.c 2011-03-11 10:51:50.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/intel-gtt.c 2011-03-11 10:53:08.000000000 +0100 +@@ -156,6 +156,10 @@ static struct page *i8xx_alloc_pages(voi if (set_pages_uc(page, 4) < 0) { set_pages_wb(page, 4); @@ -332,8 +366,60 @@ Acked-by: jbeulich@novell.com __free_pages(page, 2); return NULL; } ---- head-2010-04-29.orig/drivers/xen/console/console.c 2009-03-18 10:39:31.000000000 +0100 -+++ head-2010-04-29/drivers/xen/console/console.c 2010-03-24 15:06:08.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkback/interface.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/drivers/xen/blkback/interface.c 2011-01-31 17:02:29.000000000 +0100 +@@ -41,11 +41,10 @@ blkif_t *blkif_alloc(domid_t domid) + { + blkif_t *blkif; + +- blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL); ++ blkif = kmem_cache_zalloc(blkif_cachep, GFP_KERNEL); + if (!blkif) + return ERR_PTR(-ENOMEM); + +- memset(blkif, 0, sizeof(*blkif)); + blkif->domid = domid; + spin_lock_init(&blkif->blk_ring_lock); + atomic_set(&blkif->refcnt, 1); +--- head-2011-03-17.orig/drivers/xen/blktap/interface.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/drivers/xen/blktap/interface.c 2011-01-31 17:02:29.000000000 +0100 +@@ -41,11 +41,10 @@ blkif_t *tap_alloc_blkif(domid_t domid) + { + blkif_t *blkif; + +- blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL); ++ blkif = kmem_cache_zalloc(blkif_cachep, GFP_KERNEL); + if (!blkif) + return ERR_PTR(-ENOMEM); + +- memset(blkif, 0, sizeof(*blkif)); + blkif->domid = domid; + spin_lock_init(&blkif->blk_ring_lock); + atomic_set(&blkif->refcnt, 1); +--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2010-04-29 09:34:47.000000000 +0200 ++++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-02-24 15:15:38.000000000 +0100 +@@ -145,7 +145,7 @@ blktap_control_ioctl(struct inode *inode + return -ENOIOCTLCMD; + } + +-static struct file_operations blktap_control_file_operations = { ++static const struct file_operations blktap_control_file_operations = { + .owner = THIS_MODULE, + .ioctl = blktap_control_ioctl, + }; +--- head-2011-03-17.orig/drivers/xen/blktap2/ring.c 2010-08-31 09:24:21.000000000 +0200 ++++ head-2011-03-17/drivers/xen/blktap2/ring.c 2011-02-24 15:15:44.000000000 +0100 +@@ -479,7 +479,7 @@ static unsigned int blktap_ring_poll(str + return 0; + } + +-static struct file_operations blktap_ring_file_operations = { ++static const struct file_operations blktap_ring_file_operations = { + .owner = THIS_MODULE, + .open = blktap_ring_open, + .release = blktap_ring_release, +--- head-2011-03-17.orig/drivers/xen/console/console.c 2009-03-18 10:39:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/console/console.c 2011-01-31 17:02:29.000000000 +0100 @@ -94,7 +94,6 @@ static int __init xencons_setup(char *st { char *q; @@ -342,8 +428,86 @@ Acked-by: jbeulich@novell.com console_use_vt = 1; if (!strncmp(str, "ttyS", 4)) { ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2009-06-23 09:28:21.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:06:08.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/netback/common.h 2011-02-17 09:58:10.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/common.h 2011-03-01 11:33:08.000000000 +0100 +@@ -100,7 +100,7 @@ typedef struct netif_st { + struct timer_list tx_queue_timeout; + + /* Statistics */ +- int nr_copied_skbs; ++ unsigned long nr_copied_skbs; + + /* Miscellaneous private stuff. */ + struct list_head list; /* scheduling list */ +--- head-2011-03-17.orig/drivers/xen/netback/interface.c 2011-02-17 09:58:10.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/interface.c 2011-03-17 14:10:30.000000000 +0100 +@@ -169,7 +169,7 @@ static const struct netif_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; + } netbk_stats[] = { +- { "copied_skbs", offsetof(netif_t, nr_copied_skbs) }, ++ { "copied_skbs", offsetof(netif_t, nr_copied_skbs) / sizeof(long) }, + }; + + static int netbk_get_stats_count(struct net_device *dev) +@@ -180,11 +180,11 @@ static int netbk_get_stats_count(struct + static void netbk_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) + { +- void *netif = netdev_priv(dev); ++ unsigned long *np = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(netbk_stats); i++) +- data[i] = *(int *)(netif + netbk_stats[i].offset); ++ data[i] = np[netbk_stats[i].offset]; + } + + static void netbk_get_strings(struct net_device *dev, u32 stringset, u8 * data) +--- head-2011-03-17.orig/drivers/xen/scsiback/interface.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/drivers/xen/scsiback/interface.c 2011-01-31 17:02:29.000000000 +0100 +@@ -46,11 +46,10 @@ struct vscsibk_info *vscsibk_info_alloc( + { + struct vscsibk_info *info; + +- info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL); ++ info = kmem_cache_zalloc(scsiback_cachep, GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + +- memset(info, 0, sizeof(*info)); + info->domid = domid; + spin_lock_init(&info->ring_lock); + atomic_set(&info->nr_unreplied_reqs, 0); +--- head-2011-03-17.orig/drivers/xen/tpmback/interface.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/drivers/xen/tpmback/interface.c 2011-01-31 17:02:29.000000000 +0100 +@@ -25,11 +25,10 @@ static tpmif_t *alloc_tpmif(domid_t domi + { + tpmif_t *tpmif; + +- tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL); ++ tpmif = kmem_cache_zalloc(tpmif_cachep, GFP_KERNEL); + if (tpmif == NULL) + goto out_of_memory; + +- memset(tpmif, 0, sizeof (*tpmif)); + tpmif->domid = domid; + tpmif->status = DISCONNECTED; + tpmif->bi = bi; +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2009-03-18 10:39:32.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:33:33.000000000 +0100 +@@ -521,9 +521,6 @@ int direct_kernel_remap_pfn_range(unsign + int create_lookup_pte_addr(struct mm_struct *mm, + unsigned long address, + uint64_t *ptep); +-int touch_pte_range(struct mm_struct *mm, +- unsigned long address, +- unsigned long size); + + int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, pgprot_t newprot); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2009-06-23 09:28:21.000000000 +0200 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:33:42.000000000 +0100 @@ -394,7 +394,6 @@ static inline int pmd_large(pmd_t pte) { /* @@ -352,8 +516,19 @@ Acked-by: jbeulich@novell.com */ #define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor_32.h 2008-01-28 12:24:19.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:06:08.000000000 +0100 +@@ -543,10 +542,6 @@ int create_lookup_pte_addr(struct mm_str + unsigned long address, + uint64_t *ptep); + +-int touch_pte_range(struct mm_struct *mm, +- unsigned long address, +- unsigned long size); +- + int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, pgprot_t newprot); + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor_32.h 2008-01-28 12:24:19.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:02:29.000000000 +0100 @@ -23,7 +23,7 @@ #include @@ -363,15 +538,15 @@ Acked-by: jbeulich@novell.com struct desc_struct { unsigned long a,b; ---- head-2010-04-29.orig/arch/x86/include/asm/thread_info.h 2010-05-06 14:21:27.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/thread_info.h 2010-03-24 15:06:08.000000000 +0100 -@@ -146,11 +146,15 @@ struct thread_info { +--- head-2011-03-17.orig/arch/x86/include/asm/thread_info.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/thread_info.h 2011-01-31 17:02:29.000000000 +0100 +@@ -144,11 +144,15 @@ struct thread_info { _TIF_USER_RETURN_NOTIFY) /* flags to check in __switch_to() */ +#ifndef CONFIG_XEN #define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) + (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) @@ -381,3 +556,36 @@ Acked-by: jbeulich@novell.com #define PREEMPT_ACTIVE 0x10000000 +--- head-2011-03-17.orig/arch/x86/kernel/quirks.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/quirks.c 2011-02-28 15:04:15.000000000 +0100 +@@ -6,7 +6,7 @@ + + #include + +-#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) ++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI) + + static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) + { +@@ -35,10 +35,21 @@ static void __devinit quirk_intel_irqbal + if (!(word & (1 << 13))) { + dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " + "disabling irq balancing and affinity\n"); ++#ifndef CONFIG_XEN + noirqdebug_setup(""); + #ifdef CONFIG_PROC_FS + no_irq_affinity = 1; + #endif ++#else ++ { ++ struct xen_platform_op op = { ++ .cmd = XENPF_platform_quirk, ++ .u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING ++ }; ++ ++ WARN_ON(HYPERVISOR_platform_op(&op)); ++ } ++#endif + } + + /* put back the original value for config space*/ diff --git a/patches.xen/xen3-patch-2.6.19 b/patches.xen/xen3-patch-2.6.19 index ea365ec..acdffa1 100644 --- a/patches.xen/xen3-patch-2.6.19 +++ b/patches.xen/xen3-patch-2.6.19 @@ -6,9 +6,9 @@ Automatically created from "patches.kernel.org/patch-2.6.19" by xen-port-patches Acked-by: jbeulich@novell.com ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-24 15:06:12.000000000 +0100 -@@ -528,6 +528,7 @@ config SCHED_OMIT_FRAME_POINTER +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-01-31 17:29:16.000000000 +0100 +@@ -537,6 +537,7 @@ config SCHED_OMIT_FRAME_POINTER menuconfig PARAVIRT_GUEST bool "Paravirtualized guest support" @@ -16,9 +16,9 @@ Acked-by: jbeulich@novell.com ---help--- Say Y here to get to see options related to running Linux under various hypervisors. This option alone does not add any kernel code. ---- head-2010-05-25.orig/arch/x86/kernel/acpi/boot.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/acpi/boot.c 2010-04-15 09:52:23.000000000 +0200 -@@ -71,8 +71,12 @@ int acpi_strict; +--- head-2011-03-11.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/boot.c 2011-03-11 10:54:41.000000000 +0100 +@@ -70,8 +70,12 @@ int acpi_strict; u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; @@ -28,10 +28,10 @@ Acked-by: jbeulich@novell.com +#else +#define acpi_skip_timer_override 0 +#endif + int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC - static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; -@@ -493,6 +497,7 @@ int acpi_register_gsi(struct device *dev +@@ -587,6 +591,7 @@ void __init acpi_set_irq_model_ioapic(vo #ifdef CONFIG_ACPI_HOTPLUG_CPU #include @@ -39,7 +39,7 @@ Acked-by: jbeulich@novell.com static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA -@@ -584,6 +589,9 @@ free_tmp_map: +@@ -678,6 +683,9 @@ free_tmp_map: out: return retval; } @@ -49,7 +49,7 @@ Acked-by: jbeulich@novell.com /* wrapper to silence section mismatch warning */ int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) -@@ -594,9 +602,11 @@ EXPORT_SYMBOL(acpi_map_lsapic); +@@ -688,9 +696,11 @@ EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { @@ -61,7 +61,7 @@ Acked-by: jbeulich@novell.com return (0); } -@@ -1660,7 +1670,7 @@ int __init acpi_mps_check(void) +@@ -1688,7 +1698,7 @@ int __init acpi_mps_check(void) return 0; } @@ -70,8 +70,8 @@ Acked-by: jbeulich@novell.com static int __init parse_acpi_skip_timer_override(char *arg) { acpi_skip_timer_override = 1; ---- head-2010-05-25.orig/arch/x86/kernel/apic/apic-xen.c 2007-06-12 13:12:48.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/apic/apic-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/apic/apic-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -54,7 +54,6 @@ static cpumask_t timer_bcast_ipi; /* * Knob to control our willingness to enable the local APIC. @@ -112,8 +112,8 @@ Acked-by: jbeulich@novell.com #endif } } ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2009-05-19 09:16:41.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/common-xen.c 2009-05-19 09:16:41.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -43,7 +43,7 @@ struct cpu_dev * cpu_devs[X86_VENDOR_NUM extern int disable_pse; @@ -177,16 +177,16 @@ Acked-by: jbeulich@novell.com /* Clear all 6 debug registers: */ set_debugreg(0, 0); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mcheck/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mcheck/Makefile 2010-03-24 15:06:12.000000000 +0100 -@@ -9,3 +9,5 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += thres - obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o - +--- head-2011-03-11.orig/arch/x86/kernel/cpu/mcheck/Makefile 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/mcheck/Makefile 2011-01-31 17:29:16.000000000 +0100 +@@ -11,3 +11,5 @@ obj-$(CONFIG_X86_MCE_INJECT) += mce-inje obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o + + obj-$(CONFIG_ACPI_APEI) += mce-apei.o + +disabled-obj-$(CONFIG_XEN) := therm_throt.o ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2009-10-01 11:00:47.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2009-10-01 11:00:47.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2011-01-31 17:29:16.000000000 +0100 @@ -53,8 +53,7 @@ static struct mc_info *g_mi; /*dom0 mce virq handler, logging physical mce error info*/ @@ -205,8 +205,8 @@ Acked-by: jbeulich@novell.com + mce_dom0_interrupt(VIRQ_MCA, NULL); } ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2009-05-19 09:16:41.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_32-xen.S 2009-05-19 09:16:41.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:29:16.000000000 +0100 @@ -80,8 +80,12 @@ VM_MASK = 0x00020000 NMI_MASK = 0x80000000 @@ -593,8 +593,8 @@ Acked-by: jbeulich@novell.com .section .rodata,"a" #include "syscall_table.S" ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2007-06-12 13:12:48.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head_32-xen.S 2007-06-12 13:12:48.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/head_32-xen.S 2011-01-31 17:29:16.000000000 +0100 @@ -62,7 +62,7 @@ ENTRY(startup_32) movl %eax,%gs cld # gcc2 wants the direction flag cleared at all times @@ -604,8 +604,8 @@ Acked-by: jbeulich@novell.com jmp start_kernel #define HYPERCALL_PAGE_OFFSET 0x1000 ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_32-xen.c 2009-03-18 10:39:31.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_32-xen.c 2009-03-18 10:39:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -31,6 +31,9 @@ #include #include @@ -1685,8 +1685,8 @@ Acked-by: jbeulich@novell.com + return 0; +} +early_param("noapic", parse_noapic); ---- head-2010-05-25.orig/arch/x86/kernel/ldt_32-xen.c 2007-06-12 13:12:48.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ldt_32-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -1,5 +1,5 @@ /* - * linux/kernel/ldt.c @@ -1694,8 +1694,8 @@ Acked-by: jbeulich@novell.com * * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds * Copyright (C) 1999 Ingo Molnar ---- head-2010-05-25.orig/arch/x86/kernel/microcode-xen.c 2007-06-12 13:12:48.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/microcode-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/microcode-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/microcode-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -2,6 +2,7 @@ * Intel CPU Microcode Update Driver for Linux * @@ -1831,8 +1831,8 @@ Acked-by: jbeulich@novell.com module_init(microcode_init) module_exit(microcode_exit) -MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_32-xen.c 2007-06-12 13:12:48.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/mpparse_32-xen.c 2007-06-12 13:12:48.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -30,6 +30,7 @@ #include @@ -1999,8 +1999,8 @@ Acked-by: jbeulich@novell.com /* * Mapping between Global System Interrups, which * represent all possible interrupts, and IRQs ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2009-11-06 10:23:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/pci-dma-xen.c 2009-11-06 10:23:23.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -110,8 +110,7 @@ dma_map_sg(struct device *hwdev, struct { int i, rc; @@ -2059,8 +2059,8 @@ Acked-by: jbeulich@novell.com if (swiotlb) swiotlb_unmap_single(dev, dma_addr, size, direction); else ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2008-07-21 11:00:32.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_32-xen.c 2008-07-21 11:00:32.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/process_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -37,6 +37,7 @@ #include #include @@ -2150,8 +2150,8 @@ Acked-by: jbeulich@novell.com sp -= get_random_int() % 8192; return sp & ~0xf; } ---- head-2010-05-25.orig/arch/x86/kernel/setup_32-xen.c 2008-04-22 15:41:51.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_32-xen.c 2008-04-22 15:41:51.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -56,6 +56,7 @@ #include #include @@ -2726,8 +2726,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_ACPI acpi_boot_init(); ---- head-2010-05-25.orig/arch/x86/kernel/smp_32-xen.c 2007-12-10 08:47:31.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_32-xen.c 2007-12-10 08:47:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -279,8 +279,7 @@ static inline void leave_mm (unsigned lo * 2) Leave the mm if we are in the lazy tlb mode. */ @@ -2827,8 +2827,8 @@ Acked-by: jbeulich@novell.com + return 0; +} +EXPORT_SYMBOL(smp_call_function_single); ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -88,7 +88,6 @@ int pit_latch_buggy; /* ext unsigned long vxtime_hz = PIT_TICK_RATE; struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */ @@ -2846,7 +2846,7 @@ Acked-by: jbeulich@novell.com DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); -@@ -246,11 +243,10 @@ static void __update_wallclock(time_t se +@@ -243,11 +240,10 @@ static void __update_wallclock(time_t se time_t wtm_sec, xtime_sec; u64 tmp, wc_nsec; @@ -2859,7 +2859,7 @@ Acked-by: jbeulich@novell.com /* Split wallclock base into seconds and nanoseconds. */ tmp = wc_nsec; -@@ -373,16 +369,10 @@ void do_gettimeofday(struct timeval *tv) +@@ -376,16 +372,10 @@ void do_gettimeofday(struct timeval *tv) shadow = &per_cpu(shadow_time, cpu); do { @@ -2876,7 +2876,7 @@ Acked-by: jbeulich@novell.com sec = xtime.tv_sec; usec += (xtime.tv_nsec / NSEC_PER_USEC); -@@ -509,7 +499,7 @@ static void sync_xen_wallclock(unsigned +@@ -524,7 +514,7 @@ static void sync_xen_wallclock(unsigned write_seqlock_irq(&xtime_lock); sec = xtime.tv_sec; @@ -2885,7 +2885,7 @@ Acked-by: jbeulich@novell.com __normalize_time(&sec, &nsec); op.cmd = XENPF_settime; -@@ -583,42 +573,49 @@ unsigned long long sched_clock(void) +@@ -598,42 +588,49 @@ unsigned long long sched_clock(void) } #endif @@ -2954,7 +2954,7 @@ Acked-by: jbeulich@novell.com { s64 delta, delta_cpu, stolen, blocked; u64 sched_time; -@@ -676,10 +673,15 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -692,10 +689,15 @@ irqreturn_t timer_interrupt(int irq, voi } /* System-wide jiffy work. */ @@ -2974,7 +2974,7 @@ Acked-by: jbeulich@novell.com } if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) { -@@ -724,7 +726,7 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -742,7 +744,7 @@ irqreturn_t timer_interrupt(int irq, voi if (delta_cpu > 0) { do_div(delta_cpu, NS_PER_TICK); per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK; @@ -2983,7 +2983,7 @@ Acked-by: jbeulich@novell.com account_user_time(current, (cputime_t)delta_cpu); else account_system_time(current, HARDIRQ_OFFSET, -@@ -738,10 +740,10 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -756,10 +758,10 @@ irqreturn_t timer_interrupt(int irq, voi /* Local timer processing (see update_process_times()). */ run_local_timers(); if (rcu_pending(cpu)) @@ -2996,7 +2996,7 @@ Acked-by: jbeulich@novell.com return IRQ_HANDLED; } -@@ -951,10 +953,11 @@ extern void (*late_time_init)(void); +@@ -969,10 +971,11 @@ extern void (*late_time_init)(void); /* Duplicate of time_init() below, with hpet_enable part added */ static void __init hpet_time_init(void) { @@ -3012,8 +3012,8 @@ Acked-by: jbeulich@novell.com if ((hpet_enable() >= 0) && hpet_use_timer) { printk("Using HPET for base-timer\n"); ---- head-2010-05-25.orig/arch/x86/kernel/traps_32-xen.c 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_32-xen.c 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -28,6 +28,7 @@ #include #include @@ -3460,8 +3460,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_KPROBES fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) { ---- head-2010-05-25.orig/arch/x86/mach-xen/setup.c 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/mach-xen/setup.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mach-xen/setup.c 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/mach-xen/setup.c 2011-01-31 17:29:16.000000000 +0100 @@ -103,8 +103,10 @@ void __init pre_setup_arch_hook(void) setup_xen_features(); @@ -3475,8 +3475,8 @@ Acked-by: jbeulich@novell.com if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; ---- head-2010-05-25.orig/arch/x86/mm/fault_32-xen.c 2007-12-10 08:47:31.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/fault_32-xen.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/arch/x86/mm/fault_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -27,21 +27,24 @@ #include #include @@ -3563,8 +3563,8 @@ Acked-by: jbeulich@novell.com yield(); down_read(&mm->mmap_sem); goto survive; ---- head-2010-05-25.orig/arch/x86/mm/highmem_32-xen.c 2008-10-29 09:55:56.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/highmem_32-xen.c 2008-10-29 09:55:56.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -38,11 +38,9 @@ static void *__kmap_atomic(struct page * idx = type + KM_TYPE_NR*smp_processor_id(); @@ -3631,8 +3631,8 @@ Acked-by: jbeulich@novell.com return (void*) vaddr; } ---- head-2010-05-25.orig/arch/x86/mm/hypervisor.c 2009-06-09 15:01:37.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/hypervisor.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/hypervisor.c 2009-06-09 15:01:37.000000000 +0200 ++++ head-2011-03-11/arch/x86/mm/hypervisor.c 2011-01-31 17:29:16.000000000 +0100 @@ -31,6 +31,7 @@ */ @@ -3641,7 +3641,7 @@ Acked-by: jbeulich@novell.com #include #include #include -@@ -44,6 +45,302 @@ +@@ -44,6 +45,300 @@ #include #include @@ -3652,7 +3652,6 @@ Acked-by: jbeulich@novell.com +#define NR_MMUEXT (BITS_PER_LONG / 4) + +DEFINE_PER_CPU(bool, xen_lazy_mmu); -+EXPORT_PER_CPU_SYMBOL(xen_lazy_mmu); +struct lazy_mmu { + unsigned int nr_mc, nr_mmu, nr_mmuext; + multicall_entry_t mc[NR_MC]; @@ -3712,7 +3711,6 @@ Acked-by: jbeulich@novell.com + + return 0; +} -+EXPORT_SYMBOL(xen_multicall_flush); + +int xen_multi_update_va_mapping(unsigned long va, pte_t pte, + unsigned long uvmf) @@ -3944,7 +3942,7 @@ Acked-by: jbeulich@novell.com void xen_l1_entry_update(pte_t *ptr, pte_t val) { mmu_update_t u; -@@ -546,7 +843,8 @@ int write_ldt_entry(void *ldt, int entry +@@ -546,7 +841,8 @@ int write_ldt_entry(void *ldt, int entry #define MAX_BATCHED_FULL_PTES 32 int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, @@ -3954,7 +3952,7 @@ Acked-by: jbeulich@novell.com { int rc = 0, i = 0; mmu_update_t u[MAX_BATCHED_FULL_PTES]; -@@ -559,10 +857,14 @@ int xen_change_pte_range(struct mm_struc +@@ -559,10 +855,14 @@ int xen_change_pte_range(struct mm_struc pte = pte_offset_map_lock(mm, pmd, addr, &ptl); do { if (pte_present(*pte)) { @@ -3970,8 +3968,8 @@ Acked-by: jbeulich@novell.com if (++i == MAX_BATCHED_FULL_PTES) { if ((rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF)) != 0) ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2008-10-29 09:55:56.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/init_32-xen.c 2008-10-29 09:55:56.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -313,8 +313,7 @@ static void __init permanent_kmaps_init( static void __meminit free_new_highpage(struct page *page, int pfn) { @@ -4150,8 +4148,8 @@ Acked-by: jbeulich@novell.com unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; ---- head-2010-05-25.orig/arch/x86/mm/ioremap_32-xen.c 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/ioremap_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:31:26.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/ioremap-xen.c 2011-02-07 15:37:37.000000000 +0100 @@ -12,7 +12,7 @@ #include #include @@ -4161,7 +4159,7 @@ Acked-by: jbeulich@novell.com #include #include #include -@@ -118,7 +118,7 @@ int direct_remap_pfn_range(struct vm_are +@@ -114,7 +114,7 @@ int direct_remap_pfn_range(struct vm_are if (domid == DOMID_SELF) return -EINVAL; @@ -4170,7 +4168,7 @@ Acked-by: jbeulich@novell.com vma->vm_mm->context.has_foreign_mappings = 1; -@@ -203,6 +203,7 @@ void __iomem * __ioremap(unsigned long p +@@ -184,6 +184,7 @@ void __iomem * __ioremap(unsigned long p void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; @@ -4178,7 +4176,7 @@ Acked-by: jbeulich@novell.com domid_t domid = DOMID_IO; /* Don't allow wraparound or zero size */ -@@ -234,6 +235,8 @@ void __iomem * __ioremap(unsigned long p +@@ -215,6 +216,8 @@ void __iomem * __ioremap(unsigned long p domid = DOMID_SELF; } @@ -4187,7 +4185,7 @@ Acked-by: jbeulich@novell.com /* * Mappings have to be page-aligned */ -@@ -249,10 +252,9 @@ void __iomem * __ioremap(unsigned long p +@@ -230,10 +233,9 @@ void __iomem * __ioremap(unsigned long p return NULL; area->phys_addr = phys_addr; addr = (void __iomem *) area->addr; @@ -4199,8 +4197,8 @@ Acked-by: jbeulich@novell.com vunmap((void __force *) addr); return NULL; } ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2009-03-18 10:39:31.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pgtable_32-xen.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -68,7 +68,9 @@ void show_mem(void) printk(KERN_INFO "%lu pages writeback\n", global_page_state(NR_WRITEBACK)); @@ -4256,8 +4254,8 @@ Acked-by: jbeulich@novell.com } pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) ---- head-2010-05-25.orig/arch/x86/pci/irq-xen.c 2008-03-06 08:54:32.000000000 +0100 -+++ head-2010-05-25/arch/x86/pci/irq-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/pci/irq-xen.c 2008-03-06 08:54:32.000000000 +0100 ++++ head-2011-03-11/arch/x86/pci/irq-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -991,10 +991,6 @@ static void __init pcibios_fixup_irqs(vo pci_name(bridge), 'A' + pin, irq); } @@ -4314,8 +4312,8 @@ Acked-by: jbeulich@novell.com - - return count; -} ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/ia32/ia32entry-xen.S 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:29:16.000000000 +0100 @@ -83,6 +83,7 @@ */ ENTRY(ia32_sysenter_target) @@ -4365,18 +4363,18 @@ Acked-by: jbeulich@novell.com .quad compat_sys_move_pages + .quad sys_getcpu ia32_syscall_end: ---- head-2010-05-25.orig/arch/x86/kernel/Makefile 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/Makefile 2010-03-24 15:06:12.000000000 +0100 -@@ -122,7 +122,7 @@ obj-$(CONFIG_X86_XEN) += fixup.o +--- head-2011-03-11.orig/arch/x86/kernel/Makefile 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/Makefile 2011-01-31 17:29:16.000000000 +0100 +@@ -115,7 +115,7 @@ obj-$(CONFIG_X86_XEN) += fixup.o ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) - obj-$(CONFIG_X86_XEN_GENAPIC) += genapic_xen_64.o + obj-$(CONFIG_X86_XEN_GENAPIC) += genapic_64.o genapic_xen_64.o - obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o - obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o obj-$(CONFIG_AUDIT) += audit_64.o -@@ -138,5 +138,7 @@ ifeq ($(CONFIG_X86_64),y) + + obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o +@@ -129,5 +129,7 @@ ifeq ($(CONFIG_X86_64),y) pci-dma_64-$(CONFIG_XEN) += pci-dma_32.o endif @@ -4385,8 +4383,8 @@ Acked-by: jbeulich@novell.com + smpboot_$(BITS).o tsc_$(BITS).o +disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += mpparse_64.o %/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := ---- head-2010-05-25.orig/arch/x86/kernel/e820_64-xen.c 2009-12-04 08:45:56.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_64-xen.c 2009-12-04 08:45:56.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -16,6 +16,7 @@ #include #include @@ -4878,8 +4876,8 @@ Acked-by: jbeulich@novell.com } unsigned long pci_mem_start = 0xaeedbabe; ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2007-06-12 13:13:01.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/early_printk-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -244,20 +244,16 @@ void early_printk(const char *fmt, ...) static int __initdata keep_early; @@ -4926,8 +4924,8 @@ Acked-by: jbeulich@novell.com } -__setup("earlyprintk=", setup_early_printk); ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2009-06-23 09:28:21.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_64-xen.S 2009-06-23 09:28:21.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:29:16.000000000 +0100 @@ -4,9 +4,6 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs @@ -5170,8 +5168,8 @@ Acked-by: jbeulich@novell.com ENTRY(alignment_check) errorentry do_alignment_check ---- head-2010-05-25.orig/arch/x86/kernel/head_64-xen.S 2009-06-23 09:28:21.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/head_64-xen.S 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head_64-xen.S 2010-11-08 17:27:03.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_64-xen.S 2011-01-31 17:29:16.000000000 +0100 @@ -5,9 +5,6 @@ * Copyright (C) 2000 Pavel Machek * Copyright (C) 2000 Karsten Keil @@ -5191,8 +5189,8 @@ Acked-by: jbeulich@novell.com gdt_end: /* asm/segment.h:GDT_ENTRIES must match this */ /* This should be a multiple of the cache line size */ ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2007-06-12 13:13:01.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head64-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/head64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -7,6 +7,9 @@ * Modified for Xen. */ @@ -5284,8 +5282,8 @@ Acked-by: jbeulich@novell.com - setup_boot_cpu_data(); start_kernel(); } ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_64-xen.c 2009-03-18 10:39:31.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_64-xen.c 2009-03-18 10:39:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -26,9 +26,12 @@ #include #include @@ -6985,8 +6983,8 @@ Acked-by: jbeulich@novell.com spin_unlock_irqrestore(&ioapic_lock, flags); return 0; ---- head-2010-05-25.orig/arch/x86/kernel/ioport_64-xen.c 2008-01-28 12:24:19.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ioport_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ioport_64-xen.c 2008-01-28 12:24:19.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ioport_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -58,6 +58,7 @@ asmlinkage long sys_ioperm(unsigned long memset(bitmap, 0xff, IO_BITMAP_BYTES); @@ -6995,8 +6993,8 @@ Acked-by: jbeulich@novell.com set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap); set_iobitmap.nr_ports = IO_BITMAP_BITS; ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_64-xen.c 2007-06-12 13:13:01.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/mpparse_64-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -41,8 +41,7 @@ int acpi_found_madt; * Various Linux-internal data structures created from the * MP-table. @@ -7610,9 +7608,9 @@ Acked-by: jbeulich@novell.com - -#endif /*CONFIG_X86_IO_APIC*/ #endif /*CONFIG_ACPI*/ ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2008-08-07 12:44:36.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -@@ -89,25 +89,24 @@ void idle_notifier_unregister(struct not +--- head-2011-03-11.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:28:34.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_64-xen.c 2011-02-02 08:30:50.000000000 +0100 +@@ -88,25 +88,24 @@ void idle_notifier_unregister(struct not } EXPORT_SYMBOL(idle_notifier_unregister); @@ -7643,7 +7641,7 @@ Acked-by: jbeulich@novell.com return; __exit_idle(); } -@@ -184,6 +183,9 @@ void cpu_idle (void) +@@ -183,6 +182,9 @@ void cpu_idle (void) play_dead(); enter_idle(); idle(); @@ -7653,7 +7651,7 @@ Acked-by: jbeulich@novell.com __exit_idle(); } -@@ -196,7 +198,7 @@ void cpu_idle (void) +@@ -195,7 +197,7 @@ void cpu_idle (void) void cpu_idle_wait(void) { unsigned int cpu, this_cpu = get_cpu(); @@ -7662,7 +7660,7 @@ Acked-by: jbeulich@novell.com set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); put_cpu(); -@@ -219,6 +221,8 @@ void cpu_idle_wait(void) +@@ -218,6 +220,8 @@ void cpu_idle_wait(void) } cpus_and(map, map, cpu_online_map); } while (!cpus_empty(map)); @@ -7671,7 +7669,7 @@ Acked-by: jbeulich@novell.com } EXPORT_SYMBOL_GPL(cpu_idle_wait); -@@ -250,9 +254,9 @@ void __show_regs(struct pt_regs * regs) +@@ -249,9 +253,9 @@ void __show_regs(struct pt_regs * regs) print_modules(); printk("Pid: %d, comm: %.20s %s %s %.*s\n", current->pid, current->comm, print_tainted(), @@ -7684,7 +7682,7 @@ Acked-by: jbeulich@novell.com printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip); printk_address(regs->rip); printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, -@@ -310,6 +314,7 @@ void exit_thread(void) +@@ -309,6 +313,7 @@ void exit_thread(void) kfree(t->io_bitmap_ptr); t->io_bitmap_ptr = NULL; @@ -7692,7 +7690,7 @@ Acked-by: jbeulich@novell.com /* * Careful, clear this in the TSS too: */ -@@ -340,6 +345,7 @@ void flush_thread(void) +@@ -339,6 +344,7 @@ void flush_thread(void) if (t->flags & _TIF_IA32) current_thread_info()->status |= TS_COMPAT; } @@ -7700,7 +7698,7 @@ Acked-by: jbeulich@novell.com tsk->thread.debugreg0 = 0; tsk->thread.debugreg1 = 0; -@@ -432,7 +438,7 @@ int copy_thread(int nr, unsigned long cl +@@ -431,7 +437,7 @@ int copy_thread(int nr, unsigned long cl asm("mov %%es,%0" : "=m" (p->thread.es)); asm("mov %%ds,%0" : "=m" (p->thread.ds)); @@ -7709,7 +7707,7 @@ Acked-by: jbeulich@novell.com p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; -@@ -440,6 +446,7 @@ int copy_thread(int nr, unsigned long cl +@@ -439,6 +445,7 @@ int copy_thread(int nr, unsigned long cl } memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES); @@ -7717,7 +7715,7 @@ Acked-by: jbeulich@novell.com } /* -@@ -474,6 +481,30 @@ static inline void __save_init_fpu( stru +@@ -473,6 +480,30 @@ static inline void __save_init_fpu( stru } /* @@ -7748,7 +7746,7 @@ Acked-by: jbeulich@novell.com * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: -@@ -501,6 +532,10 @@ __switch_to(struct task_struct *prev_p, +@@ -500,6 +531,10 @@ __switch_to(struct task_struct *prev_p, #endif multicall_entry_t _mcl[8], *mcl = _mcl; @@ -7759,7 +7757,7 @@ Acked-by: jbeulich@novell.com /* * This is basically '__unlazy_fpu', except that we queue a * multicall to indicate FPU task switch, rather than -@@ -513,7 +548,8 @@ __switch_to(struct task_struct *prev_p, +@@ -512,7 +547,8 @@ __switch_to(struct task_struct *prev_p, mcl->op = __HYPERVISOR_fpu_taskswitch; mcl->args[0] = 1; mcl++; @@ -7769,7 +7767,7 @@ Acked-by: jbeulich@novell.com /* * Reload esp0, LDT and the page table pointer: -@@ -608,21 +644,29 @@ __switch_to(struct task_struct *prev_p, +@@ -607,21 +643,29 @@ __switch_to(struct task_struct *prev_p, write_pda(oldrsp, next->userrsp); write_pda(pcurrent, next_p); write_pda(kernelstack, @@ -7809,7 +7807,7 @@ Acked-by: jbeulich@novell.com return prev_p; } -@@ -842,7 +886,7 @@ int dump_task_regs(struct task_struct *t +@@ -841,7 +885,7 @@ int dump_task_regs(struct task_struct *t unsigned long arch_align_stack(unsigned long sp) { @@ -7818,8 +7816,8 @@ Acked-by: jbeulich@novell.com sp -= get_random_int() % 8192; return sp & ~0xf; } ---- head-2010-05-25.orig/arch/x86/kernel/setup_64-xen.c 2009-06-23 09:28:21.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_64-xen.c 2010-10-05 09:58:12.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -88,9 +88,6 @@ extern struct edid_info edid_info; shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; EXPORT_SYMBOL(HYPERVISOR_shared_info); @@ -8256,8 +8254,8 @@ Acked-by: jbeulich@novell.com NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* VIA/Cyrix/Centaur-defined */ ---- head-2010-05-25.orig/arch/x86/kernel/setup64-xen.c 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -31,7 +31,7 @@ #include #endif @@ -8351,8 +8349,8 @@ Acked-by: jbeulich@novell.com orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks; } ---- head-2010-05-25.orig/arch/x86/kernel/smp_64-xen.c 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_64-xen.c 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -381,9 +381,8 @@ int smp_call_function_single (int cpu, v /* prevent preemption and reschedule on another processor */ int me = get_cpu(); @@ -8414,8 +8412,8 @@ Acked-by: jbeulich@novell.com - return 0; /* Should not happen */ -#endif -} ---- head-2010-05-25.orig/arch/x86/kernel/traps_64-xen.c 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_64-xen.c 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -23,6 +23,7 @@ #include #include @@ -8871,8 +8869,8 @@ Acked-by: jbeulich@novell.com -__setup("call_trace=", call_trace_setup); +early_param("call_trace", call_trace_setup); #endif ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2007-06-18 08:38:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/vsyscall_64-xen.c 2007-06-18 08:38:13.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -26,6 +26,10 @@ #include #include @@ -9066,8 +9064,8 @@ Acked-by: jbeulich@novell.com return 0; } ---- head-2010-05-25.orig/arch/x86/mm/fault_64-xen.c 2007-11-02 17:34:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/fault_64-xen.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/arch/x86/mm/fault_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -40,8 +40,7 @@ #define PF_RSVD (1<<3) #define PF_INSTR (1<<4) @@ -9196,17 +9194,17 @@ Acked-by: jbeulich@novell.com yield(); goto again; } -@@ -702,7 +696,7 @@ void vmalloc_sync_all(void) +@@ -707,7 +701,7 @@ void vmalloc_sync_all(void) if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else - BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref)); + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + pgd_page_table(unlock, page); } spin_unlock(&pgd_lock); - set_bit(pgd_index(address), insync); ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:34:47.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:34:47.000000000 +0200 ++++ head-2011-03-11/arch/x86/mm/init_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -61,8 +61,6 @@ EXPORT_SYMBOL(__kernel_page_user); int after_bootmem; @@ -9519,8 +9517,8 @@ Acked-by: jbeulich@novell.com } int kern_addr_valid(unsigned long addr) ---- head-2010-05-25.orig/arch/x86/mm/pageattr_64-xen.c 2009-03-18 10:39:31.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pageattr_64-xen.c 2009-03-18 10:39:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -377,8 +377,8 @@ static void revert_page(unsigned long ad BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); @@ -9585,8 +9583,8 @@ Acked-by: jbeulich@novell.com } } up_write(&init_mm.mmap_sem); ---- head-2010-05-25.orig/drivers/char/tpm/tpm_xen.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/char/tpm/tpm_xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/char/tpm/tpm_xen.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -85,8 +85,7 @@ static struct tpm_private *my_priv; /* local function prototypes */ @@ -9606,9 +9604,9 @@ Acked-by: jbeulich@novell.com { struct tpm_private *tp = tpm_priv; unsigned long flags; ---- head-2010-05-25.orig/drivers/pci/Kconfig 2010-03-24 14:00:05.000000000 +0100 -+++ head-2010-05-25/drivers/pci/Kconfig 2010-03-24 15:06:12.000000000 +0100 -@@ -64,7 +64,7 @@ config PCI_STUB +--- head-2011-03-11.orig/drivers/pci/Kconfig 2011-01-31 14:32:40.000000000 +0100 ++++ head-2011-03-11/drivers/pci/Kconfig 2011-01-31 17:29:16.000000000 +0100 +@@ -86,7 +86,7 @@ config XEN_PCIDEV_FE_DEBUG config HT_IRQ bool "Interrupts on hypertransport devices" default y @@ -9617,8 +9615,8 @@ Acked-by: jbeulich@novell.com help This allows native hypertransport devices to use interrupts. ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2009-12-04 08:45:56.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/pci/msi-xen.c 2009-12-04 08:45:56.000000000 +0100 ++++ head-2011-03-11/drivers/pci/msi-xen.c 2011-01-31 17:29:16.000000000 +0100 @@ -6,6 +6,7 @@ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) */ @@ -9850,8 +9848,8 @@ Acked-by: jbeulich@novell.com * allocated for this device function, are reclaimed to unused state, * which may be used later on. **/ ---- head-2010-05-25.orig/drivers/xen/Kconfig 2010-03-24 15:02:14.000000000 +0100 -+++ head-2010-05-25/drivers/xen/Kconfig 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/Kconfig 2011-02-24 14:05:09.000000000 +0100 ++++ head-2011-03-11/drivers/xen/Kconfig 2011-01-31 17:29:16.000000000 +0100 @@ -332,6 +332,10 @@ endmenu config HAVE_IRQ_IGNORE_UNHANDLED def_bool y @@ -9863,8 +9861,8 @@ Acked-by: jbeulich@novell.com config NO_IDLE_HZ def_bool y ---- head-2010-05-25.orig/drivers/xen/balloon/balloon.c 2010-03-31 09:56:02.000000000 +0200 -+++ head-2010-05-25/drivers/xen/balloon/balloon.c 2010-04-15 09:52:32.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/balloon/balloon.c 2010-03-31 09:56:02.000000000 +0200 ++++ head-2011-03-11/drivers/xen/balloon/balloon.c 2011-01-31 17:29:16.000000000 +0100 @@ -37,6 +37,7 @@ #include #include @@ -10062,8 +10060,8 @@ Acked-by: jbeulich@novell.com bs.driver_pages--; balloon_unlock(flags); ---- head-2010-05-25.orig/drivers/xen/blkback/blkback.c 2010-03-22 12:00:53.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/blkback.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blkback/blkback.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/drivers/xen/blkback/blkback.c 2011-01-31 17:29:16.000000000 +0100 @@ -294,7 +294,7 @@ static void blkif_notify_work(blkif_t *b wake_up(&blkif->wq); } @@ -10073,9 +10071,9 @@ Acked-by: jbeulich@novell.com { blkif_notify_work(dev_id); return IRQ_HANDLED; ---- head-2010-05-25.orig/drivers/xen/blkback/common.h 2010-03-22 12:00:53.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/common.h 2010-03-24 15:06:12.000000000 +0100 -@@ -146,7 +146,7 @@ void blkif_interface_init(void); +--- head-2011-03-11.orig/drivers/xen/blkback/common.h 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/drivers/xen/blkback/common.h 2011-01-31 17:29:16.000000000 +0100 +@@ -144,7 +144,7 @@ void blkif_interface_init(void); void blkif_xenbus_init(void); @@ -10084,18 +10082,30 @@ Acked-by: jbeulich@novell.com int blkif_schedule(void *arg); int blkback_barrier(struct xenbus_transaction xbt, ---- head-2010-05-25.orig/drivers/xen/blkfront/blkfront.c 2010-03-22 12:00:53.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/blkfront.c 2010-03-24 15:06:12.000000000 +0100 -@@ -70,7 +70,7 @@ static int setup_blkring(struct xenbus_d +--- head-2011-03-11.orig/drivers/xen/blkfront/blkfront.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/drivers/xen/blkfront/blkfront.c 2011-01-31 17:29:16.000000000 +0100 +@@ -70,9 +70,9 @@ static int setup_blkring(struct xenbus_d static void kick_pending_request_queues(struct blkfront_info *); -static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); +static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(void *arg); - static void blkif_recover(struct blkfront_info *); +-static void blkif_recover(struct blkfront_info *); ++static int blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); -@@ -733,7 +733,7 @@ void do_blkif_request(request_queue_t *r + static void blkif_free(struct blkfront_info *, int); + +@@ -149,7 +149,7 @@ static int blkfront_resume(struct xenbus + + err = talk_to_backend(dev, info); + if (info->connected == BLKIF_STATE_SUSPENDED && !err) +- blkif_recover(info); ++ err = blkif_recover(info); + + return err; + } +@@ -743,7 +743,7 @@ void do_blkif_request(request_queue_t *r } @@ -10104,9 +10114,40 @@ Acked-by: jbeulich@novell.com { struct request *req; blkif_response_t *bret; ---- head-2010-05-25.orig/drivers/xen/blktap/blktap.c 2010-04-29 09:34:47.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap/blktap.c 2010-04-29 09:43:21.000000000 +0200 -@@ -1288,7 +1288,7 @@ static void blkif_notify_work(blkif_t *b +@@ -854,7 +854,7 @@ static void blkif_completion(struct blk_ + gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); + } + +-static void blkif_recover(struct blkfront_info *info) ++static int blkif_recover(struct blkfront_info *info) + { + int i; + blkif_request_t *req; +@@ -862,8 +862,10 @@ static void blkif_recover(struct blkfron + int j; + + /* Stage 1: Make a safe copy of the shadow state. */ +- copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); +- memcpy(copy, info->shadow, sizeof(info->shadow)); ++ copy = kmemdup(info->shadow, sizeof(info->shadow), ++ GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); ++ if (!copy) ++ return -ENOMEM; + + /* Stage 2: Set up free list. */ + memset(&info->shadow, 0, sizeof(info->shadow)); +@@ -917,6 +919,8 @@ static void blkif_recover(struct blkfron + kick_pending_request_queues(info); + + spin_unlock_irq(&blkif_io_lock); ++ ++ return 0; + } + + int blkfront_is_ready(struct xenbus_device *dev) +--- head-2011-03-11.orig/drivers/xen/blktap/blktap.c 2011-02-17 09:58:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap/blktap.c 2011-02-17 10:07:17.000000000 +0100 +@@ -1272,7 +1272,7 @@ static void blkif_notify_work(blkif_t *b wake_up(&blkif->wq); } @@ -10115,8 +10156,8 @@ Acked-by: jbeulich@novell.com { blkif_notify_work(dev_id); return IRQ_HANDLED; ---- head-2010-05-25.orig/drivers/xen/blktap/common.h 2008-09-15 13:40:15.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap/common.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blktap/common.h 2008-09-15 13:40:15.000000000 +0200 ++++ head-2011-03-11/drivers/xen/blktap/common.h 2011-01-31 17:29:16.000000000 +0100 @@ -113,7 +113,7 @@ void tap_blkif_interface_init(void); void tap_blkif_xenbus_init(void); @@ -10126,8 +10167,8 @@ Acked-by: jbeulich@novell.com int tap_blkif_schedule(void *arg); int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif); ---- head-2010-05-25.orig/drivers/xen/blktap2/sysfs.c 2009-12-16 11:43:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/sysfs.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blktap2/sysfs.c 2011-03-02 12:00:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/sysfs.c 2011-01-31 17:29:16.000000000 +0100 @@ -150,7 +150,7 @@ blktap_sysfs_pause_device(struct class_d err = blktap_device_pause(tap); if (!err) { @@ -10216,8 +10257,8 @@ Acked-by: jbeulich@novell.com - return 0; + return err; } ---- head-2010-05-25.orig/drivers/xen/console/console.c 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/console/console.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/console/console.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-11/drivers/xen/console/console.c 2011-01-31 17:29:16.000000000 +0100 @@ -360,7 +360,7 @@ static struct tty_struct *xencons_tty; static int xencons_priv_irq; static char x_char; @@ -10263,8 +10304,8 @@ Acked-by: jbeulich@novell.com .open = xencons_open, .close = xencons_close, .write = xencons_write, ---- head-2010-05-25.orig/drivers/xen/console/xencons_ring.c 2007-06-12 13:13:44.000000000 +0200 -+++ head-2010-05-25/drivers/xen/console/xencons_ring.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/console/xencons_ring.c 2007-06-12 13:13:44.000000000 +0200 ++++ head-2011-03-11/drivers/xen/console/xencons_ring.c 2011-01-31 17:29:16.000000000 +0100 @@ -83,7 +83,7 @@ int xencons_ring_send(const char *data, return sent; } @@ -10283,9 +10324,9 @@ Acked-by: jbeulich@novell.com cons++; } ---- head-2010-05-25.orig/drivers/xen/core/evtchn.c 2010-02-24 11:50:47.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/evtchn.c 2010-04-23 14:11:32.000000000 +0200 -@@ -522,7 +522,7 @@ static void unbind_from_irq(unsigned int +--- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2010-11-25 09:36:37.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-01-31 17:29:16.000000000 +0100 +@@ -539,7 +539,7 @@ static void unbind_from_irq(unsigned int int bind_caller_port_to_irqhandler( unsigned int caller_port, @@ -10294,7 +10335,7 @@ Acked-by: jbeulich@novell.com unsigned long irqflags, const char *devname, void *dev_id) -@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(bind_caller_port_to_ir +@@ -562,7 +562,7 @@ EXPORT_SYMBOL_GPL(bind_caller_port_to_ir int bind_listening_port_to_irqhandler( unsigned int remote_domain, @@ -10303,7 +10344,7 @@ Acked-by: jbeulich@novell.com unsigned long irqflags, const char *devname, void *dev_id) -@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(bind_listening_port_to +@@ -586,7 +586,7 @@ EXPORT_SYMBOL_GPL(bind_listening_port_to int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, @@ -10312,7 +10353,7 @@ Acked-by: jbeulich@novell.com unsigned long irqflags, const char *devname, void *dev_id) -@@ -593,7 +593,7 @@ EXPORT_SYMBOL_GPL(bind_interdomain_evtch +@@ -610,7 +610,7 @@ EXPORT_SYMBOL_GPL(bind_interdomain_evtch int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, @@ -10321,7 +10362,7 @@ Acked-by: jbeulich@novell.com unsigned long irqflags, const char *devname, void *dev_id) -@@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(bind_virq_to_irqhandle +@@ -634,7 +634,7 @@ EXPORT_SYMBOL_GPL(bind_virq_to_irqhandle int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, @@ -10330,7 +10371,7 @@ Acked-by: jbeulich@novell.com unsigned long irqflags, const char *devname, void *dev_id) -@@ -693,16 +693,15 @@ int resend_irq_on_evtchn(unsigned int ir +@@ -710,16 +710,15 @@ int resend_irq_on_evtchn(unsigned int ir * Interface to generic handling in irq.c */ @@ -10349,7 +10390,7 @@ Acked-by: jbeulich@novell.com { int evtchn = evtchn_from_irq(irq); -@@ -710,21 +709,13 @@ static void shutdown_dynirq(unsigned int +@@ -727,28 +726,18 @@ static void shutdown_dynirq(unsigned int mask_evtchn(evtchn); } @@ -10375,12 +10416,20 @@ Acked-by: jbeulich@novell.com static void ack_dynirq(unsigned int irq) { -@@ -740,20 +731,22 @@ static void ack_dynirq(unsigned int irq) + int evtchn = evtchn_from_irq(irq); + +- move_native_irq(irq); +- + if (VALID_EVTCHN(evtchn)) { + mask_evtchn(evtchn); + clear_evtchn(evtchn); +@@ -757,20 +746,23 @@ static void ack_dynirq(unsigned int irq) static void end_dynirq(unsigned int irq) { - int evtchn = evtchn_from_irq(irq); -- ++ move_masked_irq(irq); + - if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) - unmask_evtchn(evtchn); + if (!(irq_desc[irq].status & IRQ_DISABLED)) @@ -10399,14 +10448,13 @@ Acked-by: jbeulich@novell.com + .disable = mask_dynirq, + .mask = mask_dynirq, + .unmask = unmask_dynirq, -+ .mask_ack = ack_dynirq, .ack = ack_dynirq, .end = end_dynirq, + .eoi = end_dynirq, #ifdef CONFIG_SMP .set_affinity = set_affinity_irq, #endif -@@ -815,7 +808,7 @@ static inline void pirq_query_unmask(int +@@ -832,7 +824,7 @@ static inline void pirq_query_unmask(int */ #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL) @@ -10415,7 +10463,7 @@ Acked-by: jbeulich@novell.com { struct evtchn_bind_pirq bind_pirq; int evtchn = evtchn_from_irq(irq); -@@ -830,7 +823,7 @@ static unsigned int startup_pirq(unsigne +@@ -847,7 +839,7 @@ static unsigned int startup_pirq(unsigne if (!probing_irq(irq)) printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq); @@ -10424,36 +10472,33 @@ Acked-by: jbeulich@novell.com } evtchn = bind_pirq.port; -@@ -842,11 +835,9 @@ static unsigned int startup_pirq(unsigne +@@ -859,7 +851,13 @@ static unsigned int startup_pirq(unsigne out: pirq_unmask_and_notify(evtchn, irq); -- -- return 0; ++} ++ ++#define disable_pirq mask_pirq + ++static unsigned int startup_pirq(unsigned int irq) ++{ ++ enable_pirq(irq); + return 0; } --static void shutdown_pirq(unsigned int irq) -+static void disable_pirq(unsigned int irq) - { - struct evtchn_close close; - int evtchn = evtchn_from_irq(irq); -@@ -865,46 +856,46 @@ static void shutdown_pirq(unsigned int i +@@ -882,46 +880,39 @@ static void shutdown_pirq(unsigned int i irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0); } -static void enable_pirq(unsigned int irq) -+static unsigned int startup_pirq(unsigned int irq) - { +-{ - startup_pirq(irq); -+ enable_pirq(irq); -+ return 0; - } - +-} +- -static void disable_pirq(unsigned int irq) -{ -} -+#define shutdown_pirq disable_pirq - +- -static void ack_pirq(unsigned int irq) +static void unmask_pirq(unsigned int irq) { @@ -10475,7 +10520,8 @@ Acked-by: jbeulich@novell.com static void end_pirq(unsigned int irq) { - int evtchn = evtchn_from_irq(irq); -- ++ move_masked_irq(irq); + if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) == - (IRQ_DISABLED|IRQ_PENDING)) { + (IRQ_DISABLED|IRQ_PENDING)) @@ -10496,30 +10542,29 @@ Acked-by: jbeulich@novell.com .disable = disable_pirq, + .mask = mask_pirq, + .unmask = unmask_pirq, -+ .mask_ack = ack_pirq, .ack = ack_pirq, .end = end_pirq, + .eoi = end_pirq, #ifdef CONFIG_SMP .set_affinity = set_affinity_irq, #endif -@@ -1087,7 +1078,8 @@ void evtchn_register_pirq(int irq) +@@ -1104,7 +1095,8 @@ void evtchn_register_pirq(int irq) if (identity_mapped_irq(irq) || type_from_irq(irq) != IRQT_UNBOUND) return; irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0); - irq_desc[irq].chip = &pirq_type; -+ set_irq_chip_and_handler_name(irq, &pirq_chip, handle_level_irq, -+ "level"); ++ set_irq_chip_and_handler_name(irq, &pirq_chip, handle_fasteoi_irq, ++ "fasteoi"); } int evtchn_map_pirq(int irq, int xen_pirq) -@@ -1110,11 +1102,18 @@ int evtchn_map_pirq(int irq, int xen_pir +@@ -1127,11 +1119,18 @@ int evtchn_map_pirq(int irq, int xen_pir spin_unlock(&irq_alloc_lock); if (irq < PIRQ_BASE) return -ENOSPC; - irq_desc[irq].chip = &pirq_type; + set_irq_chip_and_handler_name(irq, &pirq_chip, -+ handle_level_irq, "level"); ++ handle_fasteoi_irq, "fasteoi"); } else if (!xen_pirq) { if (unlikely(type_from_irq(irq) != IRQT_PIRQ)) return -EINVAL; @@ -10534,7 +10579,7 @@ Acked-by: jbeulich@novell.com irq_info[irq] = IRQ_UNBOUND; return 0; } else if (type_from_irq(irq) != IRQT_PIRQ -@@ -1160,10 +1159,9 @@ void __init xen_init_IRQ(void) +@@ -1177,10 +1176,9 @@ void __init xen_init_IRQ(void) for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) { irq_bindcount[i] = 0; @@ -10544,11 +10589,11 @@ Acked-by: jbeulich@novell.com - irq_desc[i].chip = &dynirq_type; + irq_desc[i].status |= IRQ_NOPROBE; + set_irq_chip_and_handler_name(i, &dynirq_chip, -+ handle_level_irq, "level"); ++ handle_fasteoi_irq, "fasteoi"); } /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */ -@@ -1179,9 +1177,7 @@ void __init xen_init_IRQ(void) +@@ -1196,9 +1194,7 @@ void __init xen_init_IRQ(void) continue; #endif @@ -10557,11 +10602,11 @@ Acked-by: jbeulich@novell.com - irq_desc[i].depth = 1; - irq_desc[i].chip = &pirq_type; + set_irq_chip_and_handler_name(i, &pirq_chip, -+ handle_level_irq, "level"); ++ handle_fasteoi_irq, "fasteoi"); } } ---- head-2010-05-25.orig/drivers/xen/core/gnttab.c 2009-03-18 10:39:31.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/gnttab.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/gnttab.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/drivers/xen/core/gnttab.c 2011-01-31 17:29:16.000000000 +0100 @@ -510,6 +510,7 @@ static void gnttab_page_free(struct page BUG_ON(order); ClearPageForeign(page); @@ -10579,9 +10624,14 @@ Acked-by: jbeulich@novell.com *pagep = new_page; SetPageForeign(page, gnttab_page_free); ---- head-2010-05-25.orig/drivers/xen/core/reboot.c 2008-08-07 12:44:36.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/reboot.c 2010-03-24 15:06:12.000000000 +0100 -@@ -14,6 +14,7 @@ +--- head-2011-03-11.orig/drivers/xen/core/reboot.c 2010-11-25 09:36:37.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/reboot.c 2011-01-31 17:29:16.000000000 +0100 +@@ -1,4 +1,3 @@ +-#define __KERNEL_SYSCALLS__ + #include + #include + #include +@@ -14,6 +13,7 @@ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include @@ -10589,7 +10639,7 @@ Acked-by: jbeulich@novell.com #endif MODULE_LICENSE("Dual BSD/GPL"); -@@ -231,7 +232,7 @@ static void sysrq_handler(struct xenbus_ +@@ -231,7 +231,7 @@ static void sysrq_handler(struct xenbus_ #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') @@ -10598,7 +10648,7 @@ Acked-by: jbeulich@novell.com #endif } -@@ -245,7 +246,7 @@ static struct xenbus_watch sysrq_watch = +@@ -245,7 +245,7 @@ static struct xenbus_watch sysrq_watch = .callback = sysrq_handler }; @@ -10607,8 +10657,8 @@ Acked-by: jbeulich@novell.com { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2010-03-24 15:02:17.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-01-31 17:29:16.000000000 +0100 @@ -25,8 +25,8 @@ #include #include @@ -10620,7 +10670,7 @@ Acked-by: jbeulich@novell.com extern int local_setup_timer(unsigned int cpu); extern void local_teardown_timer(unsigned int cpu); -@@ -61,8 +61,6 @@ cpumask_t cpu_core_map[NR_CPUS] __cachel +@@ -59,8 +59,6 @@ cpumask_t cpu_core_map[NR_CPUS] __cachel #if defined(__i386__) u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff }; EXPORT_SYMBOL(x86_cpu_to_apicid); @@ -10629,8 +10679,8 @@ Acked-by: jbeulich@novell.com #endif void __init prefill_possible_map(void) ---- head-2010-05-25.orig/drivers/xen/fbfront/xenfb.c 2009-12-04 08:45:56.000000000 +0100 -+++ head-2010-05-25/drivers/xen/fbfront/xenfb.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/fbfront/xenfb.c 2011-03-02 12:00:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/fbfront/xenfb.c 2011-01-31 17:29:16.000000000 +0100 @@ -524,8 +524,7 @@ static struct fb_ops xenfb_fb_ops = { .fb_set_par = xenfb_set_par, }; @@ -10641,8 +10691,8 @@ Acked-by: jbeulich@novell.com { /* * No in events recognized, simply ignore them all. ---- head-2010-05-25.orig/drivers/xen/fbfront/xenkbd.c 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/fbfront/xenkbd.c 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/drivers/xen/fbfront/xenkbd.c 2011-01-31 17:29:16.000000000 +0100 @@ -46,7 +46,7 @@ static void xenkbd_disconnect_backend(st * to do that. */ @@ -10652,9 +10702,9 @@ Acked-by: jbeulich@novell.com { struct xenkbd_info *info = dev_id; struct xenkbd_page *page = info->page; ---- head-2010-05-25.orig/drivers/xen/gntdev/gntdev.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/gntdev/gntdev.c 2010-03-24 15:06:12.000000000 +0100 -@@ -761,9 +761,6 @@ static pte_t gntdev_clear_pte(struct vm_ +--- head-2011-03-11.orig/drivers/xen/gntdev/gntdev.c 2011-01-03 12:43:21.000000000 +0100 ++++ head-2011-03-11/drivers/xen/gntdev/gntdev.c 2011-01-31 17:29:16.000000000 +0100 +@@ -744,9 +744,6 @@ static pte_t gntdev_clear_pte(struct vm_ BUG(); } @@ -10664,7 +10714,7 @@ Acked-by: jbeulich@novell.com /* Calculate the grant relating to this PTE. */ slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); -@@ -778,6 +775,10 @@ static pte_t gntdev_clear_pte(struct vm_ +@@ -761,6 +758,10 @@ static pte_t gntdev_clear_pte(struct vm_ GNTDEV_INVALID_HANDLE && !xen_feature(XENFEAT_auto_translated_physmap)) { /* NOT USING SHADOW PAGE TABLES. */ @@ -10675,7 +10725,7 @@ Acked-by: jbeulich@novell.com gnttab_set_unmap_op(&op, ptep_to_machine(ptep), GNTMAP_contains_pte, private_data->grants[slot_index] -@@ -790,7 +791,7 @@ static pte_t gntdev_clear_pte(struct vm_ +@@ -773,7 +774,7 @@ static pte_t gntdev_clear_pte(struct vm_ op.status); } else { /* USING SHADOW PAGE TABLES. */ @@ -10684,7 +10734,7 @@ Acked-by: jbeulich@novell.com } /* Finally, we unmap the grant from kernel space. */ -@@ -818,7 +819,7 @@ static pte_t gntdev_clear_pte(struct vm_ +@@ -801,7 +802,7 @@ static pte_t gntdev_clear_pte(struct vm_ INVALID_P2M_ENTRY); } else { @@ -10693,8 +10743,8 @@ Acked-by: jbeulich@novell.com } return copy; ---- head-2010-05-25.orig/drivers/xen/netback/accel.c 2008-01-07 13:19:18.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/accel.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/netback/accel.c 2008-01-07 13:19:18.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/accel.c 2011-01-31 17:29:16.000000000 +0100 @@ -65,7 +65,7 @@ static int match_accelerator(struct xenb if (IS_ERR(eth_name)) { @@ -10704,9 +10754,17 @@ Acked-by: jbeulich@novell.com __FUNCTION__, PTR_ERR(eth_name)); return 0; } else { ---- head-2010-05-25.orig/drivers/xen/netback/common.h 2010-02-24 13:13:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/common.h 2010-03-24 15:06:12.000000000 +0100 -@@ -203,7 +203,7 @@ void netif_deschedule_work(netif_t *neti +--- head-2011-03-11.orig/drivers/xen/netback/common.h 2011-03-01 11:33:08.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/common.h 2011-02-17 10:07:22.000000000 +0100 +@@ -101,6 +101,7 @@ typedef struct netif_st { + + /* Statistics */ + unsigned long nr_copied_skbs; ++ unsigned long rx_gso_csum_fixups; + + /* Miscellaneous private stuff. */ + struct list_head list; /* scheduling list */ +@@ -209,7 +210,7 @@ void netif_deschedule_work(netif_t *neti int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev); struct net_device_stats *netif_be_get_stats(struct net_device *dev); @@ -10715,29 +10773,107 @@ Acked-by: jbeulich@novell.com static inline int netbk_can_queue(struct net_device *dev) { ---- head-2010-05-25.orig/drivers/xen/netback/loopback.c 2007-08-06 15:10:49.000000000 +0200 -+++ head-2010-05-25/drivers/xen/netback/loopback.c 2010-03-24 15:06:12.000000000 +0100 -@@ -151,7 +151,7 @@ static int loopback_start_xmit(struct sk +--- head-2011-03-11.orig/drivers/xen/netback/interface.c 2011-02-17 09:58:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/interface.c 2011-02-17 10:07:27.000000000 +0100 +@@ -170,6 +170,7 @@ static const struct netif_stat { + u16 offset; + } netbk_stats[] = { + { "copied_skbs", offsetof(netif_t, nr_copied_skbs) / sizeof(long) }, ++ { "rx_gso_csum_fixups", offsetof(netif_t, rx_gso_csum_fixups) / sizeof(long) }, + }; + + static int netbk_get_stats_count(struct net_device *dev) +--- head-2011-03-11.orig/drivers/xen/netback/loopback.c 2011-01-03 12:43:21.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/loopback.c 2011-01-31 17:29:16.000000000 +0100 +@@ -152,16 +152,6 @@ static int loopback_start_xmit(struct sk np->stats.rx_bytes += skb->len; np->stats.rx_packets++; - if (skb->ip_summed == CHECKSUM_HW) { -+ if (skb->ip_summed == CHECKSUM_PARTIAL) { - /* Defer checksum calculation. */ - skb->proto_csum_blank = 1; - /* Must be a local packet: assert its integrity. */ ---- head-2010-05-25.orig/drivers/xen/netback/netback.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/netback.c 2010-03-24 15:06:12.000000000 +0100 -@@ -711,7 +711,7 @@ static void net_rx_action(unsigned long +- /* Defer checksum calculation. */ +- skb->proto_csum_blank = 1; +- /* Must be a local packet: assert its integrity. */ +- skb->proto_data_valid = 1; +- } +- +- skb->ip_summed = skb->proto_data_valid ? +- CHECKSUM_UNNECESSARY : CHECKSUM_NONE; +- + skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */ + skb->protocol = eth_type_trans(skb, dev); + skb->dev = dev; +--- head-2011-03-11.orig/drivers/xen/netback/netback.c 2011-02-17 09:58:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/netback.c 2011-02-09 15:35:10.000000000 +0100 +@@ -39,6 +39,7 @@ + #include + #include + #include ++#include + + /*define NETBE_DEBUG_INTERRUPT*/ + +@@ -314,7 +315,6 @@ int netif_be_start_xmit(struct sk_buff * + /* Copy only the header fields we use in this driver. */ + nskb->dev = skb->dev; + nskb->ip_summed = skb->ip_summed; +- nskb->proto_data_valid = skb->proto_data_valid; + dev_kfree_skb(skb); + skb = nskb; + } +@@ -706,10 +706,14 @@ static void net_rx_action(unsigned long id = meta[npo.meta_cons].id; flags = nr_frags ? NETRXF_more_data : 0; - if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ -+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ ++ switch (skb->ip_summed) { ++ case CHECKSUM_PARTIAL: /* local packet? */ flags |= NETRXF_csum_blank | NETRXF_data_validated; - else if (skb->proto_data_valid) /* remote but checksummed? */ +- else if (skb->proto_data_valid) /* remote but checksummed? */ ++ break; ++ case CHECKSUM_UNNECESSARY: /* remote but checksummed? */ flags |= NETRXF_data_validated; -@@ -1518,7 +1518,7 @@ static void netif_page_release(struct pa ++ break; ++ } + + if (meta[npo.meta_cons].copy) + offset = 0; +@@ -1451,18 +1455,12 @@ static void net_tx_action(unsigned long + netif_idx_release(pending_idx); + } + +- /* +- * Old frontends do not assert data_validated but we +- * can infer it from csum_blank so test both flags. +- */ +- if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) { ++ if (txp->flags & NETTXF_csum_blank) ++ skb->ip_summed = CHECKSUM_PARTIAL; ++ else if (txp->flags & NETTXF_data_validated) + skb->ip_summed = CHECKSUM_UNNECESSARY; +- skb->proto_data_valid = 1; +- } else { ++ else + skb->ip_summed = CHECKSUM_NONE; +- skb->proto_data_valid = 0; +- } +- skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank); + + netbk_fill_frags(skb); + +@@ -1479,6 +1477,12 @@ static void net_tx_action(unsigned long + skb->dev = netif->dev; + skb->protocol = eth_type_trans(skb, skb->dev); + ++ if (skb_checksum_setup(skb, &netif->rx_gso_csum_fixups)) { ++ DPRINTK("Can't setup checksum in net_tx_action\n"); ++ kfree_skb(skb); ++ continue; ++ } ++ + netif->stats.rx_bytes += skb->len; + netif->stats.rx_packets++; + +@@ -1527,7 +1531,7 @@ static void netif_page_release(struct pa netif_idx_release(idx); } @@ -10746,7 +10882,7 @@ Acked-by: jbeulich@novell.com { netif_t *netif = dev_id; -@@ -1585,7 +1585,7 @@ static netif_rx_response_t *make_rx_resp +@@ -1594,7 +1598,7 @@ static netif_rx_response_t *make_rx_resp } #ifdef NETBE_DEBUG_INTERRUPT @@ -10755,9 +10891,17 @@ Acked-by: jbeulich@novell.com { struct list_head *ent; netif_t *netif; ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.c 2009-04-07 13:58:48.000000000 +0200 -+++ head-2010-05-25/drivers/xen/netfront/netfront.c 2010-03-24 15:06:12.000000000 +0100 -@@ -136,7 +136,7 @@ static inline int netif_needs_gso(struct +--- head-2011-03-11.orig/drivers/xen/netfront/netfront.c 2010-11-25 09:36:37.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/netfront.c 2011-02-09 15:35:31.000000000 +0100 +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + + struct netfront_cb { + struct page *page; +@@ -136,7 +137,7 @@ static inline int netif_needs_gso(struct { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || @@ -10766,7 +10910,7 @@ Acked-by: jbeulich@novell.com } #else #define HAVE_GSO 0 -@@ -222,7 +222,7 @@ static void network_tx_buf_gc(struct net +@@ -222,7 +223,7 @@ static void network_tx_buf_gc(struct net static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *); @@ -10775,16 +10919,22 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); -@@ -992,7 +992,7 @@ static int network_start_xmit(struct sk_ +@@ -992,12 +993,10 @@ static int network_start_xmit(struct sk_ tx->flags = 0; extra = NULL; - if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ + if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; - #ifdef CONFIG_XEN - if (skb->proto_data_valid) /* remote but checksummed? */ -@@ -1049,7 +1049,7 @@ static int network_start_xmit(struct sk_ +-#ifdef CONFIG_XEN +- if (skb->proto_data_valid) /* remote but checksummed? */ ++ else if (skb->ip_summed == CHECKSUM_UNNECESSARY) + tx->flags |= NETTXF_data_validated; +-#endif + + #if HAVE_TSO + if (skb_shinfo(skb)->gso_size) { +@@ -1049,7 +1048,7 @@ static int network_start_xmit(struct sk_ return 0; } @@ -10793,8 +10943,109 @@ Acked-by: jbeulich@novell.com { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); ---- head-2010-05-25.orig/drivers/xen/pciback/pciback.h 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/pciback.h 2010-03-24 15:06:12.000000000 +0100 +@@ -1424,18 +1423,13 @@ err: + skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->len += skb->data_len; + +- /* +- * Old backends do not assert data_validated but we +- * can infer it from csum_blank so test both flags. +- */ +- if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) ++ if (rx->flags & NETRXF_csum_blank) ++ skb->ip_summed = CHECKSUM_PARTIAL; ++ else if (rx->flags & NETRXF_data_validated) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; +-#ifdef CONFIG_XEN +- skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); +- skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); +-#endif ++ + np->stats.rx_packets++; + np->stats.rx_bytes += skb->len; + +@@ -1480,6 +1474,11 @@ err: + /* Ethernet work: Delayed to here as it peeks the header. */ + skb->protocol = eth_type_trans(skb, dev); + ++ if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) { ++ kfree_skb(skb); ++ continue; ++ } ++ + /* Pass it up. */ + netif_receive_skb(skb); + dev->last_rx = jiffies; +@@ -1772,6 +1771,44 @@ static void xennet_set_features(struct n + xennet_set_tso(dev, 1); + } + ++static const struct xennet_stat { ++ char name[ETH_GSTRING_LEN]; ++ u16 offset; ++} xennet_stats[] = { ++ { ++ "rx_gso_csum_fixups", ++ offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long) ++ }, ++}; ++ ++static int xennet_get_stats_count(struct net_device *dev) ++{ ++ return ARRAY_SIZE(xennet_stats); ++} ++ ++static void xennet_get_ethtool_stats(struct net_device *dev, ++ struct ethtool_stats *stats, u64 *data) ++{ ++ unsigned long *np = netdev_priv(dev); ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) ++ data[i] = np[xennet_stats[i].offset]; ++} ++ ++static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data) ++{ ++ unsigned int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) ++ memcpy(data + i * ETH_GSTRING_LEN, ++ xennet_stats[i].name, ETH_GSTRING_LEN); ++ break; ++ } ++} ++ + static void netfront_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) + { +@@ -1897,6 +1934,10 @@ static struct ethtool_ops network_ethtoo + .set_tso = xennet_set_tso, + #endif + .get_link = ethtool_op_get_link, ++ ++ .get_stats_count = xennet_get_stats_count, ++ .get_ethtool_stats = xennet_get_ethtool_stats, ++ .get_strings = xennet_get_strings, + }; + + #ifdef CONFIG_SYSFS +--- head-2011-03-11.orig/drivers/xen/netfront/netfront.h 2010-02-24 13:13:46.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/netfront.h 2011-02-09 15:35:17.000000000 +0100 +@@ -150,6 +150,7 @@ struct netfront_info { + struct net_device *netdev; + + struct net_device_stats stats; ++ unsigned long rx_gso_csum_fixups; + + struct netif_tx_front_ring tx; + struct netif_rx_front_ring rx; +--- head-2011-03-11.orig/drivers/xen/pciback/pciback.h 2009-03-18 10:39:32.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/pciback.h 2011-01-31 17:29:16.000000000 +0100 @@ -99,7 +99,7 @@ int pciback_publish_pci_roots(struct pci void pciback_release_devices(struct pciback_device *pdev); @@ -10804,9 +11055,9 @@ Acked-by: jbeulich@novell.com void pciback_do_op(void *data); int pciback_xenbus_register(void); ---- head-2010-05-25.orig/drivers/xen/pciback/pciback_ops.c 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/pciback_ops.c 2010-03-24 15:06:12.000000000 +0100 -@@ -124,7 +124,7 @@ void pciback_do_op(void *data) +--- head-2011-03-11.orig/drivers/xen/pciback/pciback_ops.c 2011-02-17 09:58:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/pciback_ops.c 2011-02-17 10:07:33.000000000 +0100 +@@ -132,7 +132,7 @@ void pciback_do_op(void *data) test_and_schedule_op(pdev); } @@ -10815,9 +11066,9 @@ Acked-by: jbeulich@novell.com { struct pciback_device *pdev = dev_id; ---- head-2010-05-25.orig/drivers/xen/pcifront/pcifront.h 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pcifront/pcifront.h 2010-03-24 15:06:12.000000000 +0100 -@@ -50,6 +50,6 @@ void pcifront_free_roots(struct pcifront +--- head-2011-03-11.orig/drivers/xen/pcifront/pcifront.h 2010-10-05 09:58:12.000000000 +0200 ++++ head-2011-03-11/drivers/xen/pcifront/pcifront.h 2011-01-31 17:29:16.000000000 +0100 +@@ -51,6 +51,6 @@ void pcifront_free_roots(struct pcifront void pcifront_do_aer( void *data); @@ -10825,27 +11076,9 @@ Acked-by: jbeulich@novell.com +irqreturn_t pcifront_handler_aer(int irq, void *dev); #endif /* __XEN_PCIFRONT_H__ */ ---- head-2010-05-25.orig/drivers/xen/pcifront/pci_op.c 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pcifront/pci_op.c 2010-03-24 15:06:12.000000000 +0100 -@@ -530,10 +530,16 @@ int __devinit pcifront_rescan_root(struc - - d = pci_scan_single_device(b, devfn); - if (d) { -+ int err; -+ - dev_info(&pdev->xdev->dev, "New device on " - "%04x:%02x:%02x.%02x found.\n", domain, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); -- pci_bus_add_device(d); -+ err = pci_bus_add_device(d); -+ if (err) -+ dev_err(&pdev->xdev->dev, -+ "error %d adding device, continuing.\n", -+ err); - } - } - -@@ -658,7 +664,7 @@ void pcifront_do_aer(void *data) +--- head-2011-03-11.orig/drivers/xen/pcifront/pci_op.c 2010-11-25 09:36:37.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pcifront/pci_op.c 2011-01-31 17:29:16.000000000 +0100 +@@ -662,7 +662,7 @@ void pcifront_do_aer(void *data) } @@ -10854,8 +11087,8 @@ Acked-by: jbeulich@novell.com { struct pcifront_device *pdev = dev; schedule_pcifront_aer_op(pdev); ---- head-2010-05-25.orig/drivers/xen/privcmd/compat_privcmd.c 2010-01-27 14:01:48.000000000 +0100 -+++ head-2010-05-25/drivers/xen/privcmd/compat_privcmd.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/privcmd/compat_privcmd.c 2010-01-27 14:01:48.000000000 +0100 ++++ head-2011-03-11/drivers/xen/privcmd/compat_privcmd.c 2011-01-31 17:29:16.000000000 +0100 @@ -18,7 +18,6 @@ * Authors: Jimi Xenidis */ @@ -10864,8 +11097,8 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/drivers/xen/privcmd/privcmd.c 2010-01-27 14:01:48.000000000 +0100 -+++ head-2010-05-25/drivers/xen/privcmd/privcmd.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/privcmd/privcmd.c 2010-01-27 14:01:48.000000000 +0100 ++++ head-2011-03-11/drivers/xen/privcmd/privcmd.c 2011-01-31 17:29:16.000000000 +0100 @@ -71,43 +71,16 @@ static long privcmd_ioctl(struct file *f if (copy_from_user(&hypercall, udata, sizeof(hypercall))) return -EFAULT; @@ -10926,8 +11159,8 @@ Acked-by: jbeulich@novell.com vma->vm_ops = &privcmd_vm_ops; vma->vm_private_data = NULL; ---- head-2010-05-25.orig/drivers/xen/scsiback/common.h 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/common.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/scsiback/common.h 2009-03-18 10:39:32.000000000 +0100 ++++ head-2011-03-11/drivers/xen/scsiback/common.h 2011-01-31 17:29:16.000000000 +0100 @@ -147,7 +147,7 @@ typedef struct { #define VSCSI_TYPE_HOST 1 @@ -10937,9 +11170,9 @@ Acked-by: jbeulich@novell.com int scsiback_init_sring(struct vscsibk_info *info, unsigned long ring_ref, unsigned int evtchn); int scsiback_schedule(void *data); ---- head-2010-05-25.orig/drivers/xen/scsiback/scsiback.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/scsiback.c 2010-03-24 15:06:12.000000000 +0100 -@@ -467,7 +467,7 @@ void scsiback_cmd_exec(pending_req_t *pe +--- head-2011-03-11.orig/drivers/xen/scsiback/scsiback.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/drivers/xen/scsiback/scsiback.c 2011-01-31 17:29:16.000000000 +0100 +@@ -459,7 +459,7 @@ void scsiback_cmd_exec(pending_req_t *pe write = (data_dir == DMA_TO_DEVICE); rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL); @@ -10948,7 +11181,7 @@ Acked-by: jbeulich@novell.com rq->cmd_len = cmd_len; memcpy(rq->cmd, pending_req->cmnd, cmd_len); -@@ -511,7 +511,7 @@ static void scsiback_device_reset_exec(p +@@ -503,7 +503,7 @@ static void scsiback_device_reset_exec(p } @@ -10957,8 +11190,8 @@ Acked-by: jbeulich@novell.com { scsiback_notify_work((struct vscsibk_info *)dev_id); return IRQ_HANDLED; ---- head-2010-05-25.orig/drivers/xen/scsifront/common.h 2010-02-24 13:13:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsifront/common.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/scsifront/common.h 2010-02-24 13:13:46.000000000 +0100 ++++ head-2011-03-11/drivers/xen/scsifront/common.h 2011-01-31 17:29:16.000000000 +0100 @@ -128,7 +128,7 @@ struct vscsifrnt_info { int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); @@ -10968,8 +11201,8 @@ Acked-by: jbeulich@novell.com int scsifront_cmd_done(struct vscsifrnt_info *info); ---- head-2010-05-25.orig/drivers/xen/scsifront/scsifront.c 2008-07-21 11:00:33.000000000 +0200 -+++ head-2010-05-25/drivers/xen/scsifront/scsifront.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/scsifront/scsifront.c 2011-02-02 12:19:11.000000000 +0100 ++++ head-2011-03-11/drivers/xen/scsifront/scsifront.c 2011-01-31 17:29:16.000000000 +0100 @@ -100,7 +100,7 @@ static void scsifront_do_request(struct notify_remote_via_irq(irq); } @@ -10979,8 +11212,8 @@ Acked-by: jbeulich@novell.com { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel_xenbus.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel_xenbus.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netback/accel_xenbus.c 2010-01-04 11:56:34.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netback/accel_xenbus.c 2011-01-31 17:29:16.000000000 +0100 @@ -69,8 +69,7 @@ static void unlink_bend(struct netback_a @@ -11001,8 +11234,8 @@ Acked-by: jbeulich@novell.com { VPRINTK("netirq %d from device %s\n", irq, ((struct xenbus_device *)context)->nodename); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel.h 2009-04-07 13:58:48.000000000 +0200 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel.h 2009-04-07 13:58:48.000000000 +0200 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel.h 2011-01-31 17:29:16.000000000 +0100 @@ -467,10 +467,8 @@ void netfront_accel_msg_tx_fastpath(netf u32 ip, u16 port, u8 protocol); @@ -11016,8 +11249,8 @@ Acked-by: jbeulich@novell.com #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) extern void netfront_accel_msg_from_bend(struct work_struct *context); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_msg.c 2009-04-07 13:58:48.000000000 +0200 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel_msg.c 2009-04-07 13:58:48.000000000 +0200 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel_msg.c 2011-01-31 17:29:16.000000000 +0100 @@ -488,8 +488,7 @@ void netfront_accel_msg_from_bend(void * } @@ -11038,8 +11271,8 @@ Acked-by: jbeulich@novell.com { netfront_accel_vnic *vnic = (netfront_accel_vnic *)context; struct net_device *net_dev = vnic->net_dev; ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_tso.c 2008-02-26 10:54:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_tso.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel_tso.c 2008-02-26 10:54:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel_tso.c 2011-01-31 17:29:16.000000000 +0100 @@ -363,7 +363,7 @@ int netfront_accel_enqueue_skb_tso(netfr tso_check_safe(skb); @@ -11049,8 +11282,8 @@ Acked-by: jbeulich@novell.com EPRINTK("Trying to TSO send a packet without HW checksum\n"); tso_start(&state, skb); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_vi.c 2010-01-18 15:23:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel_vi.c 2010-01-18 15:23:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:29:16.000000000 +0100 @@ -463,7 +463,7 @@ netfront_accel_enqueue_skb_multi(netfron frag_i = -1; @@ -11069,8 +11302,8 @@ Acked-by: jbeulich@novell.com /* Set to zero to encourage falcon to work it out for us */ *(u16*)(skb->h.raw + skb->csum) = 0; } ---- head-2010-05-25.orig/drivers/xen/tpmback/common.h 2007-06-12 13:13:45.000000000 +0200 -+++ head-2010-05-25/drivers/xen/tpmback/common.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/tpmback/common.h 2007-06-12 13:13:45.000000000 +0200 ++++ head-2011-03-11/drivers/xen/tpmback/common.h 2011-01-31 17:29:16.000000000 +0100 @@ -61,7 +61,7 @@ void tpmif_deschedule_work(tpmif_t * tpm void tpmif_xenbus_init(void); void tpmif_xenbus_exit(void); @@ -11080,9 +11313,9 @@ Acked-by: jbeulich@novell.com long int tpmback_get_instance(struct backend_info *bi); ---- head-2010-05-25.orig/drivers/xen/tpmback/tpmback.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/tpmback/tpmback.c 2010-03-24 15:06:12.000000000 +0100 -@@ -507,7 +507,7 @@ static ssize_t vtpm_op_read(struct file +--- head-2011-03-11.orig/drivers/xen/tpmback/tpmback.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/drivers/xen/tpmback/tpmback.c 2011-01-31 17:29:16.000000000 +0100 +@@ -497,7 +497,7 @@ static ssize_t vtpm_op_read(struct file list_del(&pak->next); write_unlock_irqrestore(&dataex.pak_lock, flags); @@ -11091,7 +11324,7 @@ Acked-by: jbeulich@novell.com ret_size = min_t(size_t, size, left); -@@ -904,7 +904,7 @@ static void tpm_tx_action(unsigned long +@@ -894,7 +894,7 @@ static void tpm_tx_action(unsigned long } } @@ -11100,8 +11333,8 @@ Acked-by: jbeulich@novell.com { tpmif_t *tpmif = (tpmif_t *) dev_id; ---- head-2010-05-25.orig/drivers/xen/usbback/usbback.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbback/usbback.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/usbback/usbback.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/drivers/xen/usbback/usbback.c 2011-01-31 17:29:16.000000000 +0100 @@ -288,7 +288,7 @@ static void usbbk_notify_work(usbif_t *u wake_up(&usbif->wq); } @@ -11120,8 +11353,8 @@ Acked-by: jbeulich@novell.com { pending_req_t *pending_req = (pending_req_t *)urb->context; ---- head-2010-05-25.orig/drivers/xen/usbback/usbback.h 2009-11-06 10:23:23.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbback/usbback.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/usbback/usbback.h 2009-11-06 10:23:23.000000000 +0100 ++++ head-2011-03-11/drivers/xen/usbback/usbback.h 2011-01-31 17:29:16.000000000 +0100 @@ -158,7 +158,7 @@ int portid_add(const char *busid, int portid_remove(const domid_t domid, const unsigned int handle, @@ -11131,10 +11364,10 @@ Acked-by: jbeulich@novell.com int usbbk_schedule(void *arg); struct usbstub *find_attached_device(usbif_t *usbif, int port); void usbbk_attach_device(usbif_t *usbif, struct usbstub *stub); ---- head-2010-05-25.orig/drivers/xen/usbback/usbstub.c 2009-11-06 10:23:23.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbback/usbstub.c 2010-03-24 15:06:12.000000000 +0100 -@@ -284,7 +284,7 @@ static ssize_t usbstub_show_portids(stru - DRIVER_ATTR(port_ids, S_IRUSR, usbstub_show_portids, NULL); +--- head-2011-03-11.orig/drivers/xen/usbback/usbstub.c 2011-03-02 12:00:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/usbback/usbstub.c 2011-03-11 10:54:35.000000000 +0100 +@@ -283,7 +283,7 @@ static ssize_t usbstub_show_portids(stru + static DRIVER_ATTR(port_ids, S_IRUSR, usbstub_show_portids, NULL); /* table of devices that matches any usbdevice */ -static struct usb_device_id usbstub_table[] = { @@ -11142,7 +11375,7 @@ Acked-by: jbeulich@novell.com { .driver_info = 1 }, /* wildcard, see usb_match_id() */ { } /* Terminating entry */ }; -@@ -308,7 +308,7 @@ int __init usbstub_init(void) +@@ -307,7 +307,7 @@ int __init usbstub_init(void) goto out; } @@ -11151,7 +11384,7 @@ Acked-by: jbeulich@novell.com &driver_attr_port_ids); if (err) usb_deregister(&usbback_usb_driver); -@@ -319,7 +319,7 @@ out: +@@ -318,7 +318,7 @@ out: void usbstub_exit(void) { @@ -11160,8 +11393,8 @@ Acked-by: jbeulich@novell.com &driver_attr_port_ids); usb_deregister(&usbback_usb_driver); } ---- head-2010-05-25.orig/drivers/xen/usbfront/usbfront.h 2009-10-15 11:45:41.000000000 +0200 -+++ head-2010-05-25/drivers/xen/usbfront/usbfront.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/usbfront/usbfront.h 2009-10-15 11:45:41.000000000 +0200 ++++ head-2011-03-11/drivers/xen/usbfront/usbfront.h 2011-01-31 17:29:16.000000000 +0100 @@ -195,7 +195,7 @@ timer_action(struct usbfront_info *info, extern struct kmem_cache *xenhcd_urbp_cachep; extern struct hc_driver xen_usb20_hc_driver; @@ -11171,8 +11404,8 @@ Acked-by: jbeulich@novell.com void xenhcd_rhport_state_change(struct usbfront_info *info, int port, enum usb_device_speed speed); int xenhcd_schedule(void *arg); ---- head-2010-05-25.orig/drivers/xen/usbfront/usbfront-dbg.c 2009-10-15 11:45:41.000000000 +0200 -+++ head-2010-05-25/drivers/xen/usbfront/usbfront-dbg.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/usbfront/usbfront-dbg.c 2009-10-15 11:45:41.000000000 +0200 ++++ head-2011-03-11/drivers/xen/usbfront/usbfront-dbg.c 2011-01-31 17:29:16.000000000 +0100 @@ -90,7 +90,9 @@ static CLASS_DEVICE_ATTR(statistics, S_I static inline void create_debug_file(struct usbfront_info *info) { @@ -11184,8 +11417,8 @@ Acked-by: jbeulich@novell.com } static inline void remove_debug_file(struct usbfront_info *info) ---- head-2010-05-25.orig/drivers/xen/usbfront/usbfront-q.c 2009-10-15 11:45:41.000000000 +0200 -+++ head-2010-05-25/drivers/xen/usbfront/usbfront-q.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/usbfront/usbfront-q.c 2009-10-15 11:45:41.000000000 +0200 ++++ head-2011-03-11/drivers/xen/usbfront/usbfront-q.c 2011-01-31 17:29:16.000000000 +0100 @@ -236,7 +236,7 @@ __acquires(info->lock) COUNT(info->stats.complete); } @@ -11204,8 +11437,8 @@ Acked-by: jbeulich@novell.com { xenhcd_notify_work((struct usbfront_info *) dev_id); return IRQ_HANDLED; ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_comms.c 2010-01-19 16:01:03.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_comms.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:29:16.000000000 +0100 @@ -54,7 +54,7 @@ static DECLARE_WORK(probe_work, xenbus_p static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); @@ -11215,8 +11448,8 @@ Acked-by: jbeulich@novell.com { int old, new; ---- head-2010-05-25.orig/drivers/xen/xenoprof/xenoprofile.c 2010-01-07 09:38:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/xenoprof/xenoprofile.c 2010-01-07 09:38:29.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:29:16.000000000 +0100 @@ -194,8 +194,7 @@ done: oprofile_add_domain_switch(COORDINATOR_DOMAIN); } @@ -11227,9 +11460,9 @@ Acked-by: jbeulich@novell.com { struct xenoprof_buf * buf; static unsigned long flag; ---- head-2010-05-25.orig/include/asm-generic/pgtable.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/include/asm-generic/pgtable.h 2010-03-24 15:06:12.000000000 +0100 -@@ -100,7 +100,7 @@ static inline void ptep_set_wrprotect(st +--- head-2011-03-11.orig/include/asm-generic/pgtable.h 2011-03-11 10:52:21.000000000 +0100 ++++ head-2011-03-11/include/asm-generic/pgtable.h 2011-03-11 10:54:24.000000000 +0100 +@@ -157,7 +157,7 @@ static inline void pmdp_set_wrprotect(st #endif #ifndef arch_change_pte_range @@ -11237,9 +11470,9 @@ Acked-by: jbeulich@novell.com +#define arch_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable) 0 #endif - #ifndef __HAVE_ARCH_PTE_SAME ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc_32.h 2008-01-28 12:24:19.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:06:12.000000000 +0100 + #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc_32.h 2008-01-28 12:24:19.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:29:16.000000000 +0100 @@ -32,52 +32,110 @@ static inline struct desc_struct *get_cp return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; } @@ -11400,8 +11633,8 @@ Acked-by: jbeulich@novell.com static inline void clear_LDT(void) { int cpu = get_cpu(); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2007-06-12 13:14:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:29:16.000000000 +0100 @@ -55,7 +55,7 @@ enum fixed_addresses { #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ @@ -11423,8 +11656,8 @@ Acked-by: jbeulich@novell.com #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypercall_32.h 2009-06-23 09:28:21.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypercall_32.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypercall_32.h 2009-06-23 09:28:21.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypercall_32.h 2011-01-31 17:29:16.000000000 +0100 @@ -128,6 +128,23 @@ __res; \ }) @@ -11495,8 +11728,8 @@ Acked-by: jbeulich@novell.com return _hypercall3(int, grant_table_op, cmd, uop, count); } ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypercall_64.h 2009-06-23 09:28:21.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypercall_64.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypercall_64.h 2009-06-23 09:28:21.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypercall_64.h 2011-01-31 17:29:16.000000000 +0100 @@ -135,6 +135,23 @@ __res; \ }) @@ -11566,8 +11799,8 @@ Acked-by: jbeulich@novell.com return _hypercall3(int, grant_table_op, cmd, uop, count); } ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2009-07-13 14:25:35.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2009-07-13 14:25:35.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:29:16.000000000 +0100 @@ -44,6 +44,7 @@ #include #include @@ -11638,8 +11871,8 @@ Acked-by: jbeulich@novell.com static inline int HYPERVISOR_yield( void) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:29:16.000000000 +0100 @@ -53,7 +53,6 @@ static inline int pte_exec_kernel(pte_t * not possible, use pte_get_and_clear to obtain the old pte * value and then use set_pte to update it. -ben @@ -11696,8 +11929,8 @@ Acked-by: jbeulich@novell.com static inline int pte_same(pte_t a, pte_t b) { return a.pte_low == b.pte_low && a.pte_high == b.pte_high; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:33:33.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:37:16.000000000 +0100 @@ -260,31 +260,89 @@ static inline pte_t pte_mkhuge(pte_t pte # include #endif @@ -11879,8 +12112,8 @@ Acked-by: jbeulich@novell.com #include void make_lowmem_page_readonly(void *va, unsigned int feature); -@@ -526,10 +563,11 @@ int touch_pte_range(struct mm_struct *mm - unsigned long size); +@@ -523,10 +560,11 @@ int create_lookup_pte_addr(struct mm_str + uint64_t *ptep); int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, pgprot_t newprot); @@ -11894,7 +12127,7 @@ Acked-by: jbeulich@novell.com #define io_remap_pfn_range(vma,from,pfn,size,prot) \ direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO) -@@ -538,13 +576,6 @@ direct_remap_pfn_range(vma,from,pfn,size +@@ -535,13 +573,6 @@ direct_remap_pfn_range(vma,from,pfn,size #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) @@ -11908,8 +12141,8 @@ Acked-by: jbeulich@novell.com #include #endif /* _I386_PGTABLE_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:29:16.000000000 +0100 @@ -146,6 +146,18 @@ static inline void detect_ht(struct cpui #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ @@ -12021,8 +12254,8 @@ Acked-by: jbeulich@novell.com /* from system description table in BIOS. Mostly for MCA use, but others may find it useful. */ extern unsigned int machine_id; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_32.h 2007-06-12 13:14:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:29:16.000000000 +0100 @@ -79,25 +79,36 @@ static inline int hard_smp_processor_id( return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); } @@ -12067,8 +12300,8 @@ Acked-by: jbeulich@novell.com +#endif + #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_32.h 2007-06-12 13:14:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:29:16.000000000 +0100 @@ -267,6 +267,9 @@ static inline unsigned long __xchg(unsig #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ @@ -12119,8 +12352,8 @@ Acked-by: jbeulich@novell.com #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2007-11-26 16:59:25.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2007-11-26 16:59:25.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:29:16.000000000 +0100 @@ -8,8 +8,6 @@ #define __flush_tlb_global() xen_tlb_flush() #define __flush_tlb_all() xen_tlb_flush() @@ -12130,8 +12363,8 @@ Acked-by: jbeulich@novell.com #define cpu_has_invlpg (boot_cpu_data.x86 > 3) #define __flush_tlb_single(addr) xen_invlpg(addr) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2007-06-12 13:14:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:29:16.000000000 +0100 @@ -41,7 +41,7 @@ enum fixed_addresses { #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ @@ -12141,8 +12374,8 @@ Acked-by: jbeulich@novell.com FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:33:42.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:37:14.000000000 +0100 @@ -44,12 +44,9 @@ extern unsigned long __supported_pte_mas #define swapper_pg_dir init_level4_pgt @@ -12297,8 +12530,8 @@ Acked-by: jbeulich@novell.com /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) -@@ -547,10 +562,11 @@ int touch_pte_range(struct mm_struct *mm - unsigned long size); +@@ -543,10 +558,11 @@ int create_lookup_pte_addr(struct mm_str + uint64_t *ptep); int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, pgprot_t newprot); @@ -12312,7 +12545,7 @@ Acked-by: jbeulich@novell.com #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO) -@@ -572,8 +588,6 @@ int xen_change_pte_range(struct mm_struc +@@ -568,8 +584,6 @@ int xen_change_pte_range(struct mm_struc #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) @@ -12321,8 +12554,8 @@ Acked-by: jbeulich@novell.com #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_CLEAR_FLUSH ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_64.h 2008-03-06 08:54:32.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor_64.h 2008-03-06 08:54:32.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:29:16.000000000 +0100 @@ -484,6 +484,8 @@ static inline void __mwait(unsigned long : :"a" (eax), "c" (ecx)); } @@ -12332,8 +12565,8 @@ Acked-by: jbeulich@novell.com #define stack_current() \ ({ \ struct thread_info *ti; \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_64.h 2007-06-12 13:14:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:29:16.000000000 +0100 @@ -4,15 +4,12 @@ /* * We need the APIC definitions automatically as part of 'smp.h' @@ -12439,8 +12672,8 @@ Acked-by: jbeulich@novell.com +#endif /* !CONFIG_SMP */ #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_64.h 2007-11-26 16:59:25.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system_64.h 2007-11-26 16:59:25.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:29:16.000000000 +0100 @@ -24,6 +24,7 @@ #define __EXTRA_CLOBBER \ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" @@ -12449,8 +12682,8 @@ Acked-by: jbeulich@novell.com #define switch_to(prev,next,last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2007-11-26 16:59:25.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2007-11-26 16:59:25.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:29:16.000000000 +0100 @@ -12,9 +12,6 @@ */ #define __flush_tlb_global() xen_tlb_flush() @@ -12461,23 +12694,8 @@ Acked-by: jbeulich@novell.com #define __flush_tlb_all() __flush_tlb_global() #define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr) ---- head-2010-05-25.orig/include/linux/skbuff.h 2010-04-15 09:43:55.000000000 +0200 -+++ head-2010-05-25/include/linux/skbuff.h 2010-04-15 09:52:44.000000000 +0200 -@@ -2109,5 +2109,12 @@ static inline void skb_forward_csum(stru - } - - bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); -+ -+#ifdef CONFIG_XEN -+int skb_checksum_setup(struct sk_buff *skb); -+#else -+static inline int skb_checksum_setup(struct sk_buff *skb) { return 0; } -+#endif -+ - #endif /* __KERNEL__ */ - #endif /* _LINUX_SKBUFF_H */ ---- head-2010-05-25.orig/include/xen/evtchn.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/evtchn.h 2010-03-24 15:06:12.000000000 +0100 +--- head-2011-03-11.orig/include/xen/evtchn.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-11/include/xen/evtchn.h 2011-01-31 17:29:16.000000000 +0100 @@ -57,34 +57,34 @@ */ int bind_caller_port_to_irqhandler( @@ -12518,8 +12736,85 @@ Acked-by: jbeulich@novell.com unsigned long irqflags, const char *devname, void *dev_id); ---- head-2010-05-25.orig/include/xen/xencons.h 2007-10-15 09:39:38.000000000 +0200 -+++ head-2010-05-25/include/xen/xencons.h 2010-03-24 15:06:12.000000000 +0100 +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/include/xen/net-util.h 2011-02-09 15:49:42.000000000 +0100 +@@ -0,0 +1,74 @@ ++#ifndef __XEN_NETUTIL_H__ ++#define __XEN_NETUTIL_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++static inline int skb_checksum_setup(struct sk_buff *skb, ++ unsigned long *fixup_counter) ++{ ++ struct iphdr *iph = (void *)skb->data; ++ __be16 *csum = NULL; ++ int err = -EPROTO; ++ ++ if (skb->ip_summed != CHECKSUM_PARTIAL) { ++ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ ++ if (!skb_is_gso(skb)) ++ return 0; ++ ++ /* ++ * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy ++ * peers can fail to set NETRXF_csum_blank when sending a GSO ++ * frame. In this case force the SKB to CHECKSUM_PARTIAL and ++ * recalculate the partial checksum. ++ */ ++ ++*fixup_counter; ++ --csum; ++ } ++ ++ if (skb->protocol != htons(ETH_P_IP)) ++ goto out; ++ ++ skb->nh.iph = iph; ++ skb->h.raw = skb->nh.raw + 4 * iph->ihl; ++ if (skb->h.raw >= skb->tail) ++ goto out; ++ ++ switch (iph->protocol) { ++ case IPPROTO_TCP: ++ skb->csum = offsetof(struct tcphdr, check); ++ if (csum) ++ csum = &skb->h.th->check; ++ break; ++ case IPPROTO_UDP: ++ skb->csum = offsetof(struct udphdr, check); ++ if (csum) ++ csum = &skb->h.uh->check; ++ break; ++ default: ++ if (net_ratelimit()) ++ printk(KERN_ERR "Attempting to checksum a non-" ++ "TCP/UDP packet, dropping a protocol" ++ " %d packet\n", skb->nh.iph->protocol); ++ goto out; ++ } ++ ++ if ((skb->h.raw + skb->csum + sizeof(*csum)) > skb->tail) ++ goto out; ++ ++ if (csum) { ++ *csum = ~csum_tcpudp_magic(iph->saddr, iph->daddr, ++ skb->len - iph->ihl*4, ++ IPPROTO_TCP, 0); ++ skb->ip_summed = CHECKSUM_PARTIAL; ++ } ++ ++ err = 0; ++out: ++ return err; ++} ++ ++#endif /* __XEN_NETUTIL_H__ */ +--- head-2011-03-11.orig/include/xen/xencons.h 2007-10-15 09:39:38.000000000 +0200 ++++ head-2011-03-11/include/xen/xencons.h 2011-01-31 17:29:16.000000000 +0100 @@ -8,7 +8,7 @@ void xencons_force_flush(void); void xencons_resume(void); @@ -12529,20 +12824,20 @@ Acked-by: jbeulich@novell.com void xencons_tx(void); int xencons_ring_init(void); ---- head-2010-05-25.orig/mm/mprotect.c 2010-04-15 09:44:14.000000000 +0200 -+++ head-2010-05-25/mm/mprotect.c 2010-04-15 09:52:51.000000000 +0200 -@@ -90,7 +90,7 @@ static inline void change_pmd_range(stru - next = pmd_addr_end(addr, end); +--- head-2011-03-11.orig/mm/mprotect.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-11/mm/mprotect.c 2011-01-31 17:29:16.000000000 +0100 +@@ -97,7 +97,7 @@ static inline void change_pmd_range(stru + } if (pmd_none_or_clear_bad(pmd)) continue; - if (arch_change_pte_range(mm, pmd, addr, next, newprot)) + if (arch_change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable)) continue; - change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); - } while (pmd++, addr = next, addr != end); ---- head-2010-05-25.orig/mm/page_alloc.c 2010-03-24 14:59:37.000000000 +0100 -+++ head-2010-05-25/mm/page_alloc.c 2010-03-24 15:06:12.000000000 +0100 -@@ -4684,6 +4684,23 @@ static void __setup_per_zone_wmarks(void + change_pte_range(vma->vm_mm, pmd, addr, next, newprot, + dirty_accountable); +--- head-2011-03-11.orig/mm/page_alloc.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-11/mm/page_alloc.c 2011-02-08 10:03:14.000000000 +0100 +@@ -5004,6 +5004,23 @@ static void __setup_per_zone_wmarks(void spin_unlock_irqrestore(&zone->lock, flags); } @@ -12566,34 +12861,3 @@ Acked-by: jbeulich@novell.com /* update totalreserve_pages */ calculate_totalreserve_pages(); } ---- head-2010-05-25.orig/net/core/dev.c 2010-05-25 09:19:25.000000000 +0200 -+++ head-2010-05-25/net/core/dev.c 2010-05-25 09:21:41.000000000 +0200 -@@ -2036,17 +2036,15 @@ inline int skb_checksum_setup(struct sk_ - } - if ((skb->h.raw + skb->csum + 2) > skb->tail) - goto out; -- skb->ip_summed = CHECKSUM_HW; -+ skb->ip_summed = CHECKSUM_PARTIAL; - skb->proto_csum_blank = 0; - } - return 0; - out: - return -EPROTO; - } --#else --inline int skb_checksum_setup(struct sk_buff *skb) { return 0; } --#endif - EXPORT_SYMBOL(skb_checksum_setup); -+#endif - - static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, - struct net_device *dev, -@@ -2628,7 +2626,7 @@ int netif_receive_skb(struct sk_buff *sk - case CHECKSUM_UNNECESSARY: - skb->proto_data_valid = 1; - break; -- case CHECKSUM_HW: -+ case CHECKSUM_PARTIAL: - /* XXX Implement me. */ - default: - skb->proto_data_valid = 0; diff --git a/patches.xen/xen3-patch-2.6.20 b/patches.xen/xen3-patch-2.6.20 index f0919c3..dc5a2df 100644 --- a/patches.xen/xen3-patch-2.6.20 +++ b/patches.xen/xen3-patch-2.6.20 @@ -6,9 +6,9 @@ Automatically created from "patches.kernel.org/patch-2.6.20" by xen-port-patches Acked-by: jbeulich@novell.com ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-24 15:08:58.000000000 +0100 -@@ -1707,6 +1707,7 @@ config PHYSICAL_START +--- head-2011-02-17.orig/arch/x86/Kconfig 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/Kconfig 2011-01-31 17:32:16.000000000 +0100 +@@ -1651,6 +1651,7 @@ config PHYSICAL_START config RELOCATABLE bool "Build a relocatable kernel" @@ -16,18 +16,18 @@ Acked-by: jbeulich@novell.com default y ---help--- This builds a kernel image that retains relocation information -@@ -1729,7 +1730,8 @@ config X86_NEED_RELOCS +@@ -1672,7 +1673,8 @@ config X86_NEED_RELOCS + depends on X86_32 && RELOCATABLE config PHYSICAL_ALIGN - hex -- prompt "Alignment value to which kernel should be aligned" if X86_32 -+ prompt "Alignment value to which kernel should be aligned" if X86_32 && !XEN +- hex "Alignment value to which kernel should be aligned" if X86_32 ++ hex "Alignment value to which kernel should be aligned" if X86_32 && !XEN + default 0x2000 if XEN default "0x1000000" range 0x2000 0x1000000 ---help--- ---- head-2010-05-25.orig/arch/x86/kernel/asm-offsets_32.c 2010-01-19 16:00:16.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/asm-offsets_32.c 2011-01-31 14:54:00.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:32:16.000000000 +0100 @@ -55,6 +55,7 @@ void foo(void) OFFSET(TI_exec_domain, thread_info, exec_domain); OFFSET(TI_flags, thread_info, flags); @@ -36,7 +36,7 @@ Acked-by: jbeulich@novell.com OFFSET(TI_preempt_count, thread_info, preempt_count); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_restart_block, thread_info, restart_block); -@@ -110,6 +111,11 @@ void foo(void) +@@ -108,6 +109,11 @@ void foo(void) OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); @@ -48,8 +48,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_PARAVIRT BLANK(); OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -22,6 +22,7 @@ #define phys_pkg_id(a,b) a #endif @@ -447,8 +447,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_HOTPLUG_CPU void __cpuinit cpu_uninit(void) { ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2008-01-28 12:24:18.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2008-01-28 12:24:18.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -12,7 +12,7 @@ static DEFINE_MUTEX(mtrr_mutex); @@ -469,7 +469,7 @@ Acked-by: jbeulich@novell.com struct xen_platform_op op; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:08:58.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -0,0 +1,1002 @@ +#include +#include @@ -1473,8 +1473,8 @@ Acked-by: jbeulich@novell.com + return 0; +} +early_param("memmap", parse_memmap); ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:32:16.000000000 +0100 @@ -30,12 +30,13 @@ * 18(%esp) - %eax * 1C(%esp) - %ds @@ -2245,8 +2245,8 @@ Acked-by: jbeulich@novell.com ENTRY(fixup_4gb_segment) RING0_EC_FRAME pushl $do_fixup_4gb_segment ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/head_32-xen.S 2011-01-31 17:32:16.000000000 +0100 @@ -9,6 +9,7 @@ #include #include @@ -2363,8 +2363,8 @@ Acked-by: jbeulich@novell.com .ascii ",FEATURES=writable_page_tables" .ascii "|writable_descriptor_tables" .ascii "|auto_translated_physmap" ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -34,6 +34,7 @@ #include #include @@ -2557,8 +2557,8 @@ Acked-by: jbeulich@novell.com set_native_irq_info(irq, TARGET_CPUS); spin_unlock_irqrestore(&ioapic_lock, flags); ---- head-2010-05-25.orig/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -177,16 +177,14 @@ static int read_default_ldt(void __user { int err; @@ -2577,8 +2577,8 @@ Acked-by: jbeulich@novell.com err = -EFAULT; return err; ---- head-2010-05-25.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/microcode-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/microcode-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -1,7 +1,7 @@ /* * Intel CPU Microcode Update Driver for Linux @@ -2606,8 +2606,8 @@ Acked-by: jbeulich@novell.com return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -36,7 +36,7 @@ /* Have we found an MP table */ @@ -2657,8 +2657,8 @@ Acked-by: jbeulich@novell.com { struct mpc_config_processor processor; int boot_cpu = 0; ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -273,7 +273,7 @@ EXPORT_SYMBOL(dma_free_coherent); int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags) @@ -2694,8 +2694,8 @@ Acked-by: jbeulich@novell.com return 0; } EXPORT_SYMBOL(dma_declare_coherent_memory); ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/process_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -60,6 +60,7 @@ #include @@ -2820,106 +2820,8 @@ Acked-by: jbeulich@novell.com return prev_p; } ---- head-2010-05-25.orig/arch/x86/kernel/quirks-xen.c 2008-01-28 12:24:19.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/quirks-xen.c 2010-03-24 15:08:58.000000000 +0100 -@@ -3,10 +3,12 @@ - */ - #include - #include -+#include -+#include -+#include - - #if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI) -- --static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) -+static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev) - { - u8 config, rev; - u32 word; -@@ -14,14 +16,12 @@ static void __devinit quirk_intel_irqbal - /* BIOS may enable hardware IRQ balancing for - * E7520/E7320/E7525(revision ID 0x9 and below) - * based platforms. -- * Disable SW irqbalance/affinity on those platforms. -+ * For those platforms, make sure that the genapic is set to 'flat' - */ - pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); - if (rev > 0x9) - return; - -- printk(KERN_INFO "Intel E7520/7320/7525 detected."); -- - /* enable access to config space*/ - pci_read_config_byte(dev, 0xf4, &config); - pci_write_config_byte(dev, 0xf4, config|0x2); -@@ -30,6 +30,46 @@ static void __devinit quirk_intel_irqbal - raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word); - - if (!(word & (1 << 13))) { -+#ifndef CONFIG_XEN -+#ifdef CONFIG_X86_64 -+ if (genapic != &apic_flat) -+ panic("APIC mode must be flat on this system\n"); -+#elif defined(CONFIG_X86_GENERICARCH) -+ if (genapic != &apic_default) -+ panic("APIC mode must be default(flat) on this system. Use apic=default\n"); -+#endif -+#endif -+ } -+ -+ /* put back the original value for config space*/ -+ if (!(config & 0x2)) -+ pci_write_config_byte(dev, 0xf4, config); -+} -+ -+void __init quirk_intel_irqbalance(void) -+{ -+ u8 config, rev; -+ u32 word; -+ -+ /* BIOS may enable hardware IRQ balancing for -+ * E7520/E7320/E7525(revision ID 0x9 and below) -+ * based platforms. -+ * Disable SW irqbalance/affinity on those platforms. -+ */ -+ rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION); -+ if (rev > 0x9) -+ return; -+ -+ printk(KERN_INFO "Intel E7520/7320/7525 detected."); -+ -+ /* enable access to config space */ -+ config = read_pci_config_byte(0, 0, 0, 0xf4); -+ write_pci_config_byte(0, 0, 0, 0xf4, config|0x2); -+ -+ /* read xTPR register */ -+ word = read_pci_config_16(0, 0, 0x40, 0x4c); -+ -+ if (!(word & (1 << 13))) { - struct xen_platform_op op; - printk(KERN_INFO "Disabling irq balancing and affinity\n"); - op.cmd = XENPF_platform_quirk; -@@ -37,11 +77,12 @@ static void __devinit quirk_intel_irqbal - WARN_ON(HYPERVISOR_platform_op(&op)); - } - -- /* put back the original value for config space*/ -+ /* put back the original value for config space */ - if (!(config & 0x2)) -- pci_write_config_byte(dev, 0xf4, config); -+ write_pci_config_byte(0, 0, 0, 0xf4, config); - } --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, verify_quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, verify_quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, verify_quirk_intel_irqbalance); -+ - #endif ---- head-2010-05-25.orig/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -76,9 +76,6 @@ #include #endif @@ -4006,8 +3908,8 @@ Acked-by: jbeulich@novell.com if (is_initial_xendomain()) { #ifdef CONFIG_VT ---- head-2010-05-25.orig/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -659,6 +659,10 @@ int smp_call_function_single(int cpu, vo put_cpu(); return -EBUSY; @@ -4019,8 +3921,8 @@ Acked-by: jbeulich@novell.com spin_lock_bh(&call_lock); __smp_call_function_single(cpu, func, info, nonatomic, wait); spin_unlock_bh(&call_lock); ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/time-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -60,6 +60,7 @@ #include #include @@ -4029,7 +3931,7 @@ Acked-by: jbeulich@novell.com #include #include "mach_time.h" -@@ -128,11 +129,11 @@ static DEFINE_PER_CPU(struct vcpu_runsta +@@ -125,11 +126,11 @@ static DEFINE_PER_CPU(struct vcpu_runsta /* Must be signed, as it's compared with s64 quantities which can be -ve. */ #define NS_PER_TICK (1000000000LL/HZ) @@ -4043,7 +3945,7 @@ Acked-by: jbeulich@novell.com /* * GCC 4.3 can turn loops over an induction variable into division. We do -@@ -527,10 +528,7 @@ static int set_rtc_mmss(unsigned long no +@@ -542,10 +543,7 @@ static int set_rtc_mmss(unsigned long no /* gets recalled with irq locally disabled */ /* XXX - does irqsave resolve this? -johnstul */ spin_lock_irqsave(&rtc_lock, flags); @@ -4055,7 +3957,7 @@ Acked-by: jbeulich@novell.com spin_unlock_irqrestore(&rtc_lock, flags); return retval; -@@ -858,10 +856,7 @@ unsigned long get_cmos_time(void) +@@ -876,10 +874,7 @@ unsigned long get_cmos_time(void) spin_lock_irqsave(&rtc_lock, flags); @@ -4067,7 +3969,7 @@ Acked-by: jbeulich@novell.com spin_unlock_irqrestore(&rtc_lock, flags); -@@ -963,7 +958,7 @@ static void __init hpet_time_init(void) +@@ -981,7 +976,7 @@ static void __init hpet_time_init(void) printk("Using HPET for base-timer\n"); } @@ -4076,8 +3978,8 @@ Acked-by: jbeulich@novell.com } #endif ---- head-2010-05-25.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -29,6 +29,8 @@ #include #include @@ -4414,8 +4316,8 @@ Acked-by: jbeulich@novell.com -} -__setup("call_trace=", call_trace_setup); -#endif ---- head-2010-05-25.orig/arch/x86/kernel/vmlinux.lds.S 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/vmlinux.lds.S 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/vmlinux.lds.S 2011-02-17 09:59:45.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/vmlinux.lds.S 2011-01-31 17:32:16.000000000 +0100 @@ -84,6 +84,10 @@ SECTIONS { #ifdef CONFIG_X86_32 @@ -4427,8 +4329,8 @@ Acked-by: jbeulich@novell.com phys_startup_32 = startup_32 - LOAD_OFFSET; #else . = __START_KERNEL; ---- head-2010-05-25.orig/arch/x86/kvm/Kconfig 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kvm/Kconfig 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kvm/Kconfig 2011-02-17 09:59:45.000000000 +0100 ++++ head-2011-02-17/arch/x86/kvm/Kconfig 2011-01-31 17:32:16.000000000 +0100 @@ -7,6 +7,7 @@ source "virt/kvm/Kconfig" menuconfig VIRTUALIZATION bool "Virtualization" @@ -4437,8 +4339,8 @@ Acked-by: jbeulich@novell.com default y ---help--- Say Y here to get to see options for using your Linux host to run other ---- head-2010-05-25.orig/arch/x86/mm/fault_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/fault_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/fault_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -22,9 +22,9 @@ #include #include @@ -4483,8 +4385,8 @@ Acked-by: jbeulich@novell.com break; prefetch = (instr_lo == 0xF) && (opcode == 0x0D || opcode == 0x18); ---- head-2010-05-25.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -32,7 +32,7 @@ static void *__kmap_atomic(struct page * unsigned long vaddr; @@ -4539,8 +4441,8 @@ Acked-by: jbeulich@novell.com idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/init_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/init_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -233,8 +233,6 @@ static inline int page_kills_ppro(unsign #endif @@ -4601,8 +4503,8 @@ Acked-by: jbeulich@novell.com void __init pgtable_cache_init(void) { ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -203,7 +203,7 @@ void pte_free(struct page *pte) __free_page(pte); } @@ -4612,8 +4514,8 @@ Acked-by: jbeulich@novell.com { memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); } -@@ -243,7 +243,7 @@ static inline void pgd_list_del(pgd_t *p - set_page_private(next, (unsigned long)pprev); +@@ -244,7 +244,7 @@ static inline void pgd_list_del(pgd_t *p + page->mapping = NULL; } -void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) @@ -4621,7 +4523,7 @@ Acked-by: jbeulich@novell.com { unsigned long flags; -@@ -264,7 +264,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *c +@@ -265,7 +265,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *c } /* never called when PTRS_PER_PMD > 1 */ @@ -4630,8 +4532,8 @@ Acked-by: jbeulich@novell.com { unsigned long flags; /* can be called from interrupt context */ ---- head-2010-05-25.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/pci/irq-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/pci/irq-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/pci/irq-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -768,7 +768,7 @@ static void __init pirq_find_router(stru DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n", rt->rtr_vendor, rt->rtr_device); @@ -4650,8 +4552,8 @@ Acked-by: jbeulich@novell.com } static struct irq_info *pirq_get_info(struct pci_dev *dev) ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:32:16.000000000 +0100 @@ -261,7 +261,6 @@ ENTRY(system_call) movq %rax,ORIG_RAX-ARGOFFSET(%rsp) GET_THREAD_INFO(%rcx) @@ -4789,8 +4691,8 @@ Acked-by: jbeulich@novell.com - CFI_ENDPROC -ENDPROC(arch_unwind_init_running) -#endif ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/head64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/head64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -104,7 +104,10 @@ void __init x86_64_start_kernel(char * r machine_to_phys_order++; @@ -4803,8 +4705,8 @@ Acked-by: jbeulich@novell.com set_intr_gate(i, early_idt_handler); asm volatile("lidt %0" :: "m" (idt_descr)); #endif ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -202,14 +202,20 @@ static struct IO_APIC_route_entry ioapic * the interrupt, and we need to make sure the entry is fully populated * before that happens. @@ -4997,8 +4899,8 @@ Acked-by: jbeulich@novell.com } } ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -35,8 +35,6 @@ int smp_found_config; unsigned int __initdata maxcpus = NR_CPUS; @@ -5008,9 +4910,9 @@ Acked-by: jbeulich@novell.com /* * Various Linux-internal data structures created from the * MP-table. ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -@@ -119,29 +119,23 @@ void exit_idle(void) +--- head-2011-02-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:30:50.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:30:59.000000000 +0100 +@@ -118,29 +118,23 @@ void exit_idle(void) static void poll_idle (void) { local_irq_enable(); @@ -5051,7 +4953,7 @@ Acked-by: jbeulich@novell.com } #ifdef CONFIG_HOTPLUG_CPU -@@ -181,6 +175,12 @@ void cpu_idle (void) +@@ -180,6 +174,12 @@ void cpu_idle (void) idle = xen_idle; /* no alternatives */ if (cpu_is_offline(smp_processor_id())) play_dead(); @@ -5064,8 +4966,8 @@ Acked-by: jbeulich@novell.com enter_idle(); idle(); /* In many cases the interrupt that ended idle ---- head-2010-05-25.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -574,8 +574,7 @@ void __init setup_arch(char **cmdline_p) if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { @@ -5118,8 +5020,8 @@ Acked-by: jbeulich@novell.com c->x86_max_cores = intel_num_cpu_cores(c); srat_detect_node(); ---- head-2010-05-25.orig/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -384,12 +384,17 @@ int smp_call_function_single (int cpu, v put_cpu(); return 0; @@ -5138,8 +5040,8 @@ Acked-by: jbeulich@novell.com /* * this function sends a 'generic call function' IPI to all other CPUs ---- head-2010-05-25.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -30,9 +30,10 @@ #include #include @@ -5365,8 +5267,8 @@ Acked-by: jbeulich@novell.com -} -early_param("call_trace", call_trace_setup); -#endif ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -42,6 +42,7 @@ #include @@ -5408,8 +5310,8 @@ Acked-by: jbeulich@novell.com __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); } ---- head-2010-05-25.orig/arch/x86/mm/fault_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/fault_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/fault_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -23,9 +23,9 @@ #include #include @@ -5457,8 +5359,8 @@ Acked-by: jbeulich@novell.com } void dump_pagetable(unsigned long address) ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-04-29 09:46:52.000000000 +0200 +--- head-2011-02-17.orig/arch/x86/mm/init_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/init_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -1164,14 +1164,15 @@ static __init int x8664_sysctl_init(void __initcall(x8664_sysctl_init); #endif @@ -5478,8 +5380,8 @@ Acked-by: jbeulich@novell.com }; struct vm_area_struct *get_gate_vma(struct task_struct *tsk) ---- head-2010-05-25.orig/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -330,34 +330,40 @@ static struct page *split_large_page(uns return base; } @@ -5566,8 +5468,8 @@ Acked-by: jbeulich@novell.com } } ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/pci/msi-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/pci/msi-xen.c 2011-01-31 17:32:16.000000000 +0100 @@ -273,10 +273,8 @@ void disable_msi_mode(struct pci_dev *de pci_write_config_word(dev, msi_control_reg(pos), control); dev->msix_enabled = 0; @@ -5594,8 +5496,8 @@ Acked-by: jbeulich@novell.com } #ifdef CONFIG_PM ---- head-2010-05-25.orig/drivers/xen/balloon/balloon.c 2010-04-15 09:52:32.000000000 +0200 -+++ head-2010-05-25/drivers/xen/balloon/balloon.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/balloon/balloon.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/balloon/balloon.c 2011-01-31 17:32:16.000000000 +0100 @@ -106,8 +106,8 @@ static unsigned long __read_mostly total static LIST_HEAD(ballooned_pages); @@ -5616,8 +5518,8 @@ Acked-by: jbeulich@novell.com { int need_sleep = 0; long credit; ---- head-2010-05-25.orig/drivers/xen/blkback/blkback.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/blkback.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/blkback/blkback.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkback/blkback.c 2011-01-31 17:32:16.000000000 +0100 @@ -37,6 +37,7 @@ #include @@ -5626,8 +5528,8 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/drivers/xen/blkback/interface.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/interface.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/blkback/interface.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkback/interface.c 2011-01-31 17:32:16.000000000 +0100 @@ -35,7 +35,7 @@ #include #include @@ -5637,15 +5539,15 @@ Acked-by: jbeulich@novell.com blkif_t *blkif_alloc(domid_t domid) { ---- head-2010-05-25.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/blkfront.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkfront/blkfront.c 2011-01-31 17:32:16.000000000 +0100 @@ -71,7 +71,7 @@ static int setup_blkring(struct xenbus_d static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); -static void blkif_restart_queue(void *arg); +static void blkif_restart_queue(struct work_struct *arg); - static void blkif_recover(struct blkfront_info *); + static int blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); @@ -111,7 +111,7 @@ static int blkfront_probe(struct xenbus_ @@ -5669,8 +5571,8 @@ Acked-by: jbeulich@novell.com spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); ---- head-2010-05-25.orig/drivers/xen/blktap/blktap.c 2010-04-29 09:43:21.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap/blktap.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:07:17.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap/blktap.c 2011-01-31 17:32:16.000000000 +0100 @@ -40,6 +40,7 @@ #include @@ -5679,8 +5581,8 @@ Acked-by: jbeulich@novell.com #include #include #include "common.h" ---- head-2010-05-25.orig/drivers/xen/blktap/interface.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap/interface.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/blktap/interface.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap/interface.c 2011-01-31 17:32:16.000000000 +0100 @@ -35,7 +35,7 @@ #include #include @@ -5690,8 +5592,8 @@ Acked-by: jbeulich@novell.com blkif_t *tap_alloc_blkif(domid_t domid) { ---- head-2010-05-25.orig/drivers/xen/char/mem.c 2007-08-06 15:10:49.000000000 +0200 -+++ head-2010-05-25/drivers/xen/char/mem.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/char/mem.c 2007-08-06 15:10:49.000000000 +0200 ++++ head-2011-02-17/drivers/xen/char/mem.c 2011-01-31 17:32:16.000000000 +0100 @@ -157,7 +157,7 @@ static loff_t memory_lseek(struct file * { loff_t ret; @@ -5710,8 +5612,8 @@ Acked-by: jbeulich@novell.com return ret; } ---- head-2010-05-25.orig/drivers/xen/console/console.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/console/console.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/console/console.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/console/console.c 2011-01-31 17:32:16.000000000 +0100 @@ -85,11 +85,6 @@ static int xc_num = -1; #define XEN_HVC_MAJOR 229 #define XEN_HVC_MINOR 0 @@ -5746,9 +5648,9 @@ Acked-by: jbeulich@novell.com if (buf[i] == '\x0f') { /* ^O */ if (!sysrq_requested) { sysrq_requested = jiffies; ---- head-2010-05-25.orig/drivers/xen/core/reboot.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/reboot.c 2010-03-24 15:08:58.000000000 +0100 -@@ -34,8 +34,8 @@ static int suspend_cancelled; +--- head-2011-02-17.orig/drivers/xen/core/reboot.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/reboot.c 2011-01-31 17:32:16.000000000 +0100 +@@ -33,8 +33,8 @@ static int suspend_cancelled; /* Can we leave APs online when we suspend? */ static int fast_suspend; @@ -5759,7 +5661,7 @@ Acked-by: jbeulich@novell.com static int setup_suspend_evtchn(void); -@@ -105,7 +105,7 @@ static int xen_suspend(void *__unused) +@@ -104,7 +104,7 @@ static int xen_suspend(void *__unused) case SHUTDOWN_RESUMING: break; default: @@ -5768,7 +5670,7 @@ Acked-by: jbeulich@novell.com break; } -@@ -137,12 +137,12 @@ static void switch_shutdown_state(int ne +@@ -136,12 +136,12 @@ static void switch_shutdown_state(int ne /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) @@ -5783,9 +5685,9 @@ Acked-by: jbeulich@novell.com { int err; ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:08:58.000000000 +0100 -@@ -160,7 +160,12 @@ static void xen_smp_intr_exit(unsigned i +--- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-01-31 17:32:16.000000000 +0100 +@@ -158,7 +158,12 @@ static void xen_smp_intr_exit(unsigned i void __cpuinit cpu_bringup(void) { @@ -5798,7 +5700,7 @@ Acked-by: jbeulich@novell.com identify_cpu(cpu_data + smp_processor_id()); touch_softlockup_watchdog(); preempt_disable(); -@@ -299,11 +304,12 @@ void __init smp_prepare_cpus(unsigned in +@@ -296,11 +301,12 @@ void __init smp_prepare_cpus(unsigned in if (cpu == 0) continue; @@ -5814,7 +5716,7 @@ Acked-by: jbeulich@novell.com gdt_descr->address = get_zeroed_page(GFP_KERNEL); if (unlikely(!gdt_descr->address)) { printk(KERN_CRIT "CPU%d failed to allocate GDT\n", -@@ -312,6 +318,11 @@ void __init smp_prepare_cpus(unsigned in +@@ -309,6 +315,11 @@ void __init smp_prepare_cpus(unsigned in } gdt_descr->size = GDT_SIZE; memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE); @@ -5826,8 +5728,8 @@ Acked-by: jbeulich@novell.com make_page_readonly( (void *)gdt_descr->address, XENFEAT_writable_descriptor_tables); -@@ -331,10 +342,6 @@ void __init smp_prepare_cpus(unsigned in - cpu_2_logical_apicid[cpu] = apicid; +@@ -327,10 +338,6 @@ void __init smp_prepare_cpus(unsigned in + x86_cpu_to_apicid[cpu] = apicid; - idle = fork_idle(cpu); @@ -5837,8 +5739,8 @@ Acked-by: jbeulich@novell.com #ifdef __x86_64__ cpu_pda(cpu)->pcurrent = idle; cpu_pda(cpu)->cpunumber = cpu; ---- head-2010-05-25.orig/drivers/xen/fbfront/xenfb.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/fbfront/xenfb.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/fbfront/xenfb.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/fbfront/xenfb.c 2011-01-31 17:32:16.000000000 +0100 @@ -25,6 +25,7 @@ #include #include @@ -5847,8 +5749,8 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/drivers/xen/netback/loopback.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/loopback.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/netback/loopback.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/loopback.c 2011-01-31 17:32:16.000000000 +0100 @@ -54,6 +54,7 @@ #include #include /* secpath_reset() */ @@ -5857,8 +5759,8 @@ Acked-by: jbeulich@novell.com static int nloopbacks = -1; module_param(nloopbacks, int, 0); ---- head-2010-05-25.orig/drivers/xen/pciback/conf_space_header.c 2010-03-02 09:56:10.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/conf_space_header.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/pciback/conf_space_header.c 2010-03-02 09:56:10.000000000 +0100 ++++ head-2011-02-17/drivers/xen/pciback/conf_space_header.c 2011-01-31 17:32:16.000000000 +0100 @@ -24,7 +24,7 @@ static int command_read(struct pci_dev * int ret; @@ -5885,8 +5787,8 @@ Acked-by: jbeulich@novell.com if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: disable\n", pci_name(dev)); ---- head-2010-05-25.orig/drivers/xen/pciback/pciback.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/pciback.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/pciback/pciback.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/pciback/pciback.h 2011-01-31 17:32:16.000000000 +0100 @@ -100,7 +100,7 @@ void pciback_release_devices(struct pcib /* Handles events from front-end */ @@ -5896,9 +5798,9 @@ Acked-by: jbeulich@novell.com int pciback_xenbus_register(void); void pciback_xenbus_unregister(void); ---- head-2010-05-25.orig/drivers/xen/pciback/pciback_ops.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/pciback_ops.c 2010-03-24 15:08:58.000000000 +0100 -@@ -26,7 +26,7 @@ void pciback_reset_device(struct pci_dev +--- head-2011-02-17.orig/drivers/xen/pciback/pciback_ops.c 2011-02-17 10:07:33.000000000 +0100 ++++ head-2011-02-17/drivers/xen/pciback/pciback_ops.c 2011-02-17 10:07:46.000000000 +0100 +@@ -34,7 +34,7 @@ void pciback_reset_device(struct pci_dev pci_write_config_word(dev, PCI_COMMAND, 0); @@ -5907,7 +5809,7 @@ Acked-by: jbeulich@novell.com dev->is_busmaster = 0; } else { pci_read_config_word(dev, PCI_COMMAND, &cmd); -@@ -67,9 +67,9 @@ void test_and_schedule_op(struct pciback +@@ -75,9 +75,9 @@ void test_and_schedule_op(struct pciback * context because some of the pci_* functions can sleep (mostly due to ACPI * use of semaphores). This function is intended to be called from a work * queue in process context taking a struct pciback_device as a parameter */ @@ -5919,8 +5821,8 @@ Acked-by: jbeulich@novell.com struct pci_dev *dev; struct xen_pci_op *op = &pdev->sh_info->op; ---- head-2010-05-25.orig/drivers/xen/pciback/xenbus.c 2009-04-07 13:58:48.000000000 +0200 -+++ head-2010-05-25/drivers/xen/pciback/xenbus.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/pciback/xenbus.c 2009-04-07 13:58:48.000000000 +0200 ++++ head-2011-02-17/drivers/xen/pciback/xenbus.c 2011-01-31 17:32:16.000000000 +0100 @@ -33,7 +33,7 @@ static struct pciback_device *alloc_pdev pdev->evtchn_irq = INVALID_EVTCHN_IRQ; pdev->be_watching = 0; @@ -5930,9 +5832,9 @@ Acked-by: jbeulich@novell.com if (pciback_init_devices(pdev)) { kfree(pdev); ---- head-2010-05-25.orig/drivers/xen/pcifront/pci_op.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pcifront/pci_op.c 2010-03-24 15:08:58.000000000 +0100 -@@ -636,9 +636,9 @@ static pci_ers_result_t pcifront_common_ +--- head-2011-02-17.orig/drivers/xen/pcifront/pci_op.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/pcifront/pci_op.c 2011-01-31 17:32:16.000000000 +0100 +@@ -634,9 +634,9 @@ static pci_ers_result_t pcifront_common_ } @@ -5944,9 +5846,9 @@ Acked-by: jbeulich@novell.com int cmd = pdev->sh_info->aer_op.cmd; pci_channel_state_t state = (pci_channel_state_t)pdev->sh_info->aer_op.err; ---- head-2010-05-25.orig/drivers/xen/pcifront/pcifront.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pcifront/pcifront.h 2010-03-24 15:08:58.000000000 +0100 -@@ -48,7 +48,7 @@ int pcifront_rescan_root(struct pcifront +--- head-2011-02-17.orig/drivers/xen/pcifront/pcifront.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/pcifront/pcifront.h 2011-01-31 17:32:16.000000000 +0100 +@@ -49,7 +49,7 @@ int pcifront_rescan_root(struct pcifront unsigned int domain, unsigned int bus); void pcifront_free_roots(struct pcifront_device *pdev); @@ -5955,19 +5857,19 @@ Acked-by: jbeulich@novell.com irqreturn_t pcifront_handler_aer(int irq, void *dev); ---- head-2010-05-25.orig/drivers/xen/pcifront/xenbus.c 2009-04-07 13:58:48.000000000 +0200 -+++ head-2010-05-25/drivers/xen/pcifront/xenbus.c 2010-03-24 15:08:58.000000000 +0100 -@@ -49,7 +49,7 @@ static struct pcifront_device *alloc_pde - pdev->evtchn = INVALID_EVTCHN; +--- head-2011-02-17.orig/drivers/xen/pcifront/xenbus.c 2010-10-05 09:58:12.000000000 +0200 ++++ head-2011-02-17/drivers/xen/pcifront/xenbus.c 2011-01-31 17:32:16.000000000 +0100 +@@ -50,7 +50,7 @@ static struct pcifront_device *alloc_pde pdev->gnt_ref = INVALID_GRANT_REF; + pdev->irq = -1; - INIT_WORK(&pdev->op_work, pcifront_do_aer, pdev); + INIT_WORK(&pdev->op_work, pcifront_do_aer); dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n", pdev, pdev->sh_info); ---- head-2010-05-25.orig/drivers/xen/scsiback/interface.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/interface.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/scsiback/interface.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-02-17/drivers/xen/scsiback/interface.c 2011-01-31 17:32:16.000000000 +0100 @@ -40,7 +40,7 @@ #include @@ -5977,9 +5879,9 @@ Acked-by: jbeulich@novell.com struct vscsibk_info *vscsibk_info_alloc(domid_t domid) { ---- head-2010-05-25.orig/drivers/xen/scsiback/scsiback.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/scsiback.c 2010-03-24 15:08:58.000000000 +0100 -@@ -349,13 +349,11 @@ static int scsiback_merge_bio(struct req +--- head-2011-02-17.orig/drivers/xen/scsiback/scsiback.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/scsiback/scsiback.c 2011-01-31 17:32:16.000000000 +0100 +@@ -341,13 +341,11 @@ static int scsiback_merge_bio(struct req if (!rq->bio) blk_rq_bio_prep(q, rq, bio); @@ -5994,8 +5896,8 @@ Acked-by: jbeulich@novell.com } return 0; ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:32:16.000000000 +0100 @@ -465,7 +465,7 @@ netfront_accel_enqueue_skb_multi(netfron if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -6014,8 +5916,8 @@ Acked-by: jbeulich@novell.com } NETFRONT_ACCEL_PKTBUFF_FOR_EACH_FRAGMENT (skb, idx, frag_data, frag_len, { ---- head-2010-05-25.orig/drivers/xen/tpmback/interface.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/tpmback/interface.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/tpmback/interface.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-02-17/drivers/xen/tpmback/interface.c 2011-01-31 17:32:16.000000000 +0100 @@ -16,7 +16,7 @@ #include #include @@ -6025,9 +5927,9 @@ Acked-by: jbeulich@novell.com int num_frontends = 0; LIST_HEAD(tpmif_list); ---- head-2010-05-25.orig/drivers/xen/usbback/usbback.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbback/usbback.c 2010-03-24 15:08:58.000000000 +0100 -@@ -540,9 +540,10 @@ struct set_interface_request { +--- head-2011-02-17.orig/drivers/xen/usbback/usbback.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/usbback/usbback.c 2011-01-31 17:32:16.000000000 +0100 +@@ -534,9 +534,10 @@ struct set_interface_request { struct work_struct work; }; @@ -6040,7 +5942,7 @@ Acked-by: jbeulich@novell.com pending_req_t *pending_req = req->pending_req; struct usb_device *udev = req->pending_req->stub->udev; -@@ -570,7 +571,7 @@ static int usbbk_set_interface(pending_r +@@ -564,7 +565,7 @@ static int usbbk_set_interface(pending_r req->pending_req = pending_req; req->interface = interface; req->alternate = alternate; @@ -6049,7 +5951,7 @@ Acked-by: jbeulich@novell.com usb_get_dev(udev); schedule_work(&req->work); return 0; -@@ -582,9 +583,10 @@ struct clear_halt_request { +@@ -576,9 +577,10 @@ struct clear_halt_request { struct work_struct work; }; @@ -6062,7 +5964,7 @@ Acked-by: jbeulich@novell.com pending_req_t *pending_req = req->pending_req; struct usb_device *udev = req->pending_req->stub->udev; int ret; -@@ -610,7 +612,7 @@ static int usbbk_clear_halt(pending_req_ +@@ -604,7 +606,7 @@ static int usbbk_clear_halt(pending_req_ return -ENOMEM; req->pending_req = pending_req; req->pipe = pipe; @@ -6071,7 +5973,7 @@ Acked-by: jbeulich@novell.com usb_get_dev(udev); schedule_work(&req->work); -@@ -623,9 +625,10 @@ struct port_reset_request { +@@ -617,9 +619,10 @@ struct port_reset_request { struct work_struct work; }; @@ -6084,7 +5986,7 @@ Acked-by: jbeulich@novell.com pending_req_t *pending_req = req->pending_req; struct usb_device *udev = pending_req->stub->udev; int ret, ret_lock; -@@ -654,7 +657,7 @@ static int usbbk_port_reset(pending_req_ +@@ -648,7 +651,7 @@ static int usbbk_port_reset(pending_req_ return -ENOMEM; req->pending_req = pending_req; @@ -6093,8 +5995,8 @@ Acked-by: jbeulich@novell.com usb_get_dev(udev); schedule_work(&req->work); ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_comms.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_comms.c 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:32:16.000000000 +0100 @@ -49,8 +49,8 @@ static int xenbus_irq; @@ -6106,9 +6008,9 @@ Acked-by: jbeulich@novell.com static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-01-26 09:08:16.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:08:58.000000000 +0100 -@@ -856,7 +856,7 @@ void unregister_xenstore_notifier(struct +--- head-2011-02-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:51:15.000000000 +0100 ++++ head-2011-02-17/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:32:16.000000000 +0100 +@@ -860,7 +860,7 @@ void unregister_xenstore_notifier(struct EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); @@ -6117,8 +6019,8 @@ Acked-by: jbeulich@novell.com { BUG_ON(!is_xenstored_ready()); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -4,8 +4,6 @@ #include #include @@ -6304,8 +6206,8 @@ Acked-by: jbeulich@novell.com #endif /* !__ASSEMBLY__ */ #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -13,13 +13,16 @@ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H @@ -6324,8 +6226,8 @@ Acked-by: jbeulich@novell.com #ifndef __ASSEMBLY__ #include ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/highmem.h 2008-10-29 09:55:56.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2008-10-29 09:55:56.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:32:16.000000000 +0100 @@ -85,7 +85,7 @@ static inline void clear_user_highpage(s void copy_highpage(struct page *to, struct page *from); @@ -6335,8 +6237,8 @@ Acked-by: jbeulich@novell.com { copy_highpage(to, from); } ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:32:16.000000000 +0100 @@ -47,15 +47,6 @@ #include #include @@ -6353,8 +6255,8 @@ Acked-by: jbeulich@novell.com extern shared_info_t *HYPERVISOR_shared_info; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2007-06-12 13:14:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -22,9 +22,6 @@ #define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask) @@ -6449,8 +6351,8 @@ Acked-by: jbeulich@novell.com #endif /* __ASSEMBLY__ */ /* ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2007-06-12 13:14:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -27,14 +27,13 @@ static inline void enter_lazy_tlb(struct static inline void __prepare_arch_switch(void) { @@ -6490,8 +6392,8 @@ Acked-by: jbeulich@novell.com static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:32:16.000000000 +0100 @@ -1,8 +1,6 @@ #ifndef _I386_PGTABLE_3LEVEL_H #define _I386_PGTABLE_3LEVEL_H @@ -6590,8 +6492,8 @@ Acked-by: jbeulich@novell.com #define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \ ((_pte).pte_high << (32-PAGE_SHIFT))) #define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:37:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -38,14 +38,14 @@ struct vm_area_struct; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; @@ -6640,8 +6542,8 @@ Acked-by: jbeulich@novell.com #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define ptep_get_and_clear_full(mm, addr, ptep, full) \ ((full) ? ({ \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -20,6 +20,7 @@ #include #include @@ -6809,7 +6711,7 @@ Acked-by: jbeulich@novell.com #define cpu_relax() rep_nop() -+#define paravirt_enabled() 0 ++#define paravirt_enabled() 1 +#define __cpuid xen_cpuid + +#ifndef CONFIG_X86_NO_TSS @@ -6920,8 +6822,8 @@ Acked-by: jbeulich@novell.com +extern void secondary_cpu_init(void); + #endif /* __ASM_I386_PROCESSOR_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -8,6 +8,7 @@ #include #include @@ -6939,8 +6841,8 @@ Acked-by: jbeulich@novell.com extern cpumask_t cpu_possible_map; #define cpu_callin_map cpu_possible_map ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:32:16.000000000 +0100 @@ -139,17 +139,17 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" #define write_cr4(x) \ __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) @@ -6965,8 +6867,8 @@ Acked-by: jbeulich@novell.com static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc_64.h 2008-01-28 12:24:19.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc_64.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/desc_64.h 2008-01-28 12:24:19.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/desc_64.h 2011-01-31 17:32:16.000000000 +0100 @@ -9,62 +9,11 @@ #include @@ -7031,8 +6933,8 @@ Acked-by: jbeulich@novell.com extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS]; extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:37:14.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 17:32:16.000000000 +0100 @@ -237,19 +237,18 @@ extern unsigned int __kernel_page_user; static inline unsigned long pgd_bad(pgd_t pgd) @@ -7072,8 +6974,8 @@ Acked-by: jbeulich@novell.com #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:32:16.000000000 +0100 @@ -484,6 +484,14 @@ static inline void __mwait(unsigned long : :"a" (eax), "c" (ecx)); } @@ -7089,8 +6991,8 @@ Acked-by: jbeulich@novell.com extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); #define stack_current() \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:08:58.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:32:16.000000000 +0100 @@ -88,11 +88,6 @@ extern u8 x86_cpu_to_log_apicid[NR_CPUS] extern u8 bios_cpu_apicid[]; @@ -7117,9 +7019,35 @@ Acked-by: jbeulich@novell.com #endif /* !CONFIG_SMP */ #endif ---- head-2010-05-25.orig/kernel/kexec.c 2010-05-25 09:20:04.000000000 +0200 -+++ head-2010-05-25/kernel/kexec.c 2010-03-24 15:08:58.000000000 +0100 -@@ -375,7 +375,7 @@ static struct page *kimage_alloc_pages(g +--- head-2011-02-17.orig/include/xen/net-util.h 2011-02-09 15:49:42.000000000 +0100 ++++ head-2011-02-17/include/xen/net-util.h 2011-02-09 15:50:19.000000000 +0100 +@@ -39,12 +39,12 @@ static inline int skb_checksum_setup(str + + switch (iph->protocol) { + case IPPROTO_TCP: +- skb->csum = offsetof(struct tcphdr, check); ++ skb->csum_offset = offsetof(struct tcphdr, check); + if (csum) + csum = &skb->h.th->check; + break; + case IPPROTO_UDP: +- skb->csum = offsetof(struct udphdr, check); ++ skb->csum_offset = offsetof(struct udphdr, check); + if (csum) + csum = &skb->h.uh->check; + break; +@@ -56,7 +56,7 @@ static inline int skb_checksum_setup(str + goto out; + } + +- if ((skb->h.raw + skb->csum + sizeof(*csum)) > skb->tail) ++ if ((skb->h.raw + skb->csum_offset + sizeof(*csum)) > skb->tail) + goto out; + + if (csum) { +--- head-2011-02-17.orig/kernel/kexec.c 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-02-17/kernel/kexec.c 2011-01-31 17:32:16.000000000 +0100 +@@ -371,7 +371,7 @@ static struct page *kimage_alloc_pages(g if (limit == ~0UL) address_bits = BITS_PER_LONG; else @@ -7128,27 +7056,3 @@ Acked-by: jbeulich@novell.com if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) { __free_pages(pages, order); ---- head-2010-05-25.orig/net/core/dev.c 2010-05-25 09:21:41.000000000 +0200 -+++ head-2010-05-25/net/core/dev.c 2010-05-25 09:21:50.000000000 +0200 -@@ -2022,10 +2022,10 @@ inline int skb_checksum_setup(struct sk_ - goto out; - switch (skb->nh.iph->protocol) { - case IPPROTO_TCP: -- skb->csum = offsetof(struct tcphdr, check); -+ skb->csum_offset = offsetof(struct tcphdr, check); - break; - case IPPROTO_UDP: -- skb->csum = offsetof(struct udphdr, check); -+ skb->csum_offset = offsetof(struct udphdr, check); - break; - default: - if (net_ratelimit()) -@@ -2034,7 +2034,7 @@ inline int skb_checksum_setup(struct sk_ - " %d packet", skb->nh.iph->protocol); - goto out; - } -- if ((skb->h.raw + skb->csum + 2) > skb->tail) -+ if ((skb->h.raw + skb->csum_offset + 2) > skb->tail) - goto out; - skb->ip_summed = CHECKSUM_PARTIAL; - skb->proto_csum_blank = 0; diff --git a/patches.xen/xen3-patch-2.6.21 b/patches.xen/xen3-patch-2.6.21 index 0a4cf7b..9a24099 100644 --- a/patches.xen/xen3-patch-2.6.21 +++ b/patches.xen/xen3-patch-2.6.21 @@ -6,9 +6,9 @@ Automatically created from "patches.kernel.org/patch-2.6.21" by xen-port-patches Acked-by: jbeulich@novell.com ---- head-2010-04-29.orig/arch/x86/Kconfig 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/Kconfig 2010-03-24 15:09:08.000000000 +0100 -@@ -76,13 +76,15 @@ config GENERIC_CMOS_UPDATE +--- head-2011-02-17.orig/arch/x86/Kconfig 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/Kconfig 2011-01-31 17:32:22.000000000 +0100 +@@ -86,13 +86,15 @@ config GENERIC_CMOS_UPDATE config CLOCKSOURCE_WATCHDOG def_bool y @@ -25,9 +25,9 @@ Acked-by: jbeulich@novell.com config LOCKDEP_SUPPORT def_bool y ---- head-2010-04-29.orig/arch/x86/kernel/Makefile 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/Makefile 2010-03-24 15:09:08.000000000 +0100 -@@ -138,7 +138,7 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-02-17.orig/arch/x86/kernel/Makefile 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/Makefile 2011-01-31 17:32:22.000000000 +0100 +@@ -129,7 +129,7 @@ ifeq ($(CONFIG_X86_64),y) pci-dma_64-$(CONFIG_XEN) += pci-dma_32.o endif @@ -37,8 +37,8 @@ Acked-by: jbeulich@novell.com + smpboot_$(BITS).o tsc_$(BITS).o tsc_sync.o disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += mpparse_64.o %/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := ---- head-2010-04-29.orig/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/apic/apic-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/apic/apic-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -25,6 +25,8 @@ #include #include @@ -135,8 +135,8 @@ Acked-by: jbeulich@novell.com int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; ---- head-2010-04-29.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -612,7 +612,7 @@ void __init early_cpu_init(void) struct pt_regs * __devinit idle_regs(struct pt_regs *regs) { @@ -182,8 +182,8 @@ Acked-by: jbeulich@novell.com /* Clear all 6 debug registers: */ set_debugreg(0, 0); ---- head-2010-04-29.orig/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -14,6 +14,7 @@ #include #include @@ -223,8 +223,8 @@ Acked-by: jbeulich@novell.com return sum == 0; } ---- head-2010-04-29.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:32:22.000000000 +0100 @@ -30,7 +30,7 @@ * 18(%esp) - %eax * 1C(%esp) - %ds @@ -589,8 +589,8 @@ Acked-by: jbeulich@novell.com ENTRY(kernel_thread_helper) pushl $0 # fake return address for unwinder ---- head-2010-04-29.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head_32-xen.S 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/head_32-xen.S 2011-01-31 17:32:22.000000000 +0100 @@ -27,6 +27,7 @@ #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id @@ -623,8 +623,8 @@ Acked-by: jbeulich@novell.com /* get the PDA pointer */ movl $boot_pda, %eax ---- head-2010-04-29.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -167,7 +167,7 @@ static inline void io_apic_write(unsigne */ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) @@ -775,8 +775,8 @@ Acked-by: jbeulich@novell.com spin_unlock_irqrestore(&ioapic_lock, flags); return 0; ---- head-2010-04-29.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/microcode-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/microcode-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -108,7 +108,7 @@ static ssize_t microcode_write (struct f return ret; } @@ -786,8 +786,8 @@ Acked-by: jbeulich@novell.com .owner = THIS_MODULE, .write = microcode_write, .open = microcode_open, ---- head-2010-04-29.orig/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -1079,7 +1079,7 @@ int mp_register_gsi(u32 gsi, int trigger static int gsi_to_irq[MAX_GSI_NUM]; @@ -806,8 +806,8 @@ Acked-by: jbeulich@novell.com gsi = pci_irq++; gsi_to_irq[irq] = gsi; } else { ---- head-2010-04-29.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -308,7 +308,7 @@ int dma_declare_coherent_memory(struct d return DMA_MEMORY_IO; @@ -817,8 +817,8 @@ Acked-by: jbeulich@novell.com out: if (mem_base) iounmap(mem_base); ---- head-2010-04-29.orig/arch/x86/kernel/pcspeaker.c 2010-04-29 09:29:50.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/pcspeaker.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/pcspeaker.c 2011-02-17 09:59:44.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/pcspeaker.c 2011-01-31 17:32:22.000000000 +0100 @@ -6,6 +6,11 @@ static __init int add_pcspkr(void) { struct platform_device *pd; @@ -831,8 +831,8 @@ Acked-by: jbeulich@novell.com pd = platform_device_register_simple("pcspkr", -1, NULL, 0); return IS_ERR(pd) ? PTR_ERR(pd) : 0; ---- head-2010-04-29.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/process_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -38,6 +38,7 @@ #include #include @@ -945,8 +945,8 @@ Acked-by: jbeulich@novell.com return prev_p; } ---- head-2010-04-29.orig/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -33,7 +33,6 @@ #include #include @@ -1036,8 +1036,8 @@ Acked-by: jbeulich@novell.com - * c-basic-offset:8 - * End: - */ ---- head-2010-04-29.orig/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -335,8 +335,7 @@ static void flush_tlb_others(cpumask_t c /* * i'm not happy about this global shared spinlock in the @@ -1057,8 +1057,8 @@ Acked-by: jbeulich@novell.com flush_mm = NULL; flush_va = 0; ---- head-2010-04-29.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/time-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/time-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -50,6 +50,7 @@ #include #include @@ -1097,17 +1097,7 @@ Acked-by: jbeulich@novell.com unsigned int cpu_khz; /* Detected as we calibrate the TSC */ EXPORT_SYMBOL(cpu_khz); -@@ -112,9 +105,6 @@ static DEFINE_PER_CPU(struct shadow_time - static struct timespec shadow_tv; - static u32 shadow_tv_version; - --static struct timeval monotonic_tv; --static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED; -- - /* Keep track of last time we did processing/updating of jiffies and xtime. */ - static u64 processed_system_time; /* System time (ns) at last processing. */ - static DEFINE_PER_CPU(u64, processed_system_time); -@@ -209,7 +199,7 @@ static inline u64 scale_delta(u64 delta, +@@ -206,7 +199,7 @@ static inline u64 scale_delta(u64 delta, return product; } @@ -1116,7 +1106,7 @@ Acked-by: jbeulich@novell.com { u64 __cpu_khz = 1000000ULL << 32; struct vcpu_time_info *info = &vcpu_info(0)->time; -@@ -228,16 +218,6 @@ static u64 get_nsec_offset(struct shadow +@@ -225,16 +218,6 @@ static u64 get_nsec_offset(struct shadow return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); } @@ -1133,12 +1123,18 @@ Acked-by: jbeulich@novell.com static void __update_wallclock(time_t sec, long nsec) { long wtm_nsec, xtime_nsec; -@@ -350,142 +330,6 @@ void rtc_cmos_write(unsigned char val, u +@@ -347,160 +330,6 @@ void rtc_cmos_write(unsigned char val, u } EXPORT_SYMBOL(rtc_cmos_write); -#ifdef CONFIG_X86_64 - +-static struct { +- spinlock_t lock; +- struct timeval tv; +- u32 version; +-} monotonic = { .lock = SPIN_LOCK_UNLOCKED }; +- -/* - * This version of gettimeofday has microsecond resolution - * and better than microsecond precision on fast x86 machines with TSC. @@ -1151,7 +1147,7 @@ Acked-by: jbeulich@novell.com - s64 nsec; - unsigned int cpu; - struct shadow_time_info *shadow; -- u32 local_time_version; +- u32 local_time_version, monotonic_version; - - cpu = get_cpu(); - shadow = &per_cpu(shadow_time, cpu); @@ -1169,6 +1165,8 @@ Acked-by: jbeulich@novell.com - __normalize_time(&sec, &nsec); - usec += (long)nsec / NSEC_PER_USEC; - +- monotonic_version = monotonic.version; +- - if (unlikely(!time_values_up_to_date(cpu))) { - /* - * We may have blocked for a long time, @@ -1190,17 +1188,16 @@ Acked-by: jbeulich@novell.com - sec++; - } - -- spin_lock_irqsave(&monotonic_lock, flags); -- if ((sec > monotonic_tv.tv_sec) || -- ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec))) -- { -- monotonic_tv.tv_sec = sec; -- monotonic_tv.tv_usec = usec; -- } else { -- sec = monotonic_tv.tv_sec; -- usec = monotonic_tv.tv_usec; +- spin_lock_irqsave(&monotonic.lock, flags); +- if (unlikely(sec < monotonic.tv.tv_sec) || +- (sec == monotonic.tv.tv_sec && usec <= monotonic.tv.tv_usec)) { +- sec = monotonic.tv.tv_sec; +- usec = monotonic.tv.tv_usec; +- } else if (likely(monotonic_version == monotonic.version)) { +- monotonic.tv.tv_sec = sec; +- monotonic.tv.tv_usec = usec; - } -- spin_unlock_irqrestore(&monotonic_lock, flags); +- spin_unlock_irqrestore(&monotonic.lock, flags); - - tv->tv_sec = sec; - tv->tv_usec = usec; @@ -1208,6 +1205,16 @@ Acked-by: jbeulich@novell.com - -EXPORT_SYMBOL(do_gettimeofday); - +-/* Reset monotonic gettimeofday() timeval. */ +-static inline void monotonic_reset(void) +-{ +- spin_lock(&monotonic.lock); +- monotonic.tv.tv_sec = 0; +- monotonic.tv.tv_usec = 0; +- ++monotonic.version; +- spin_unlock(&monotonic.lock); +-} +- -int do_settimeofday(struct timespec *tv) -{ - time_t sec; @@ -1216,6 +1223,11 @@ Acked-by: jbeulich@novell.com - struct shadow_time_info *shadow; - struct xen_platform_op op; - +- if (unlikely(!tv)) { +- monotonic_reset(); +- return 0; +- } +- - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - @@ -1255,11 +1267,7 @@ Acked-by: jbeulich@novell.com - } - ntp_clear(); - -- /* Reset monotonic gettimeofday() timeval. */ -- spin_lock(&monotonic_lock); -- monotonic_tv.tv_sec = 0; -- monotonic_tv.tv_usec = 0; -- spin_unlock(&monotonic_lock); +- monotonic_reset(); - - write_sequnlock_irq(&xtime_lock); - @@ -1276,7 +1284,7 @@ Acked-by: jbeulich@novell.com static void sync_xen_wallclock(unsigned long dummy); static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0); static void sync_xen_wallclock(unsigned long dummy) -@@ -534,15 +378,7 @@ static int set_rtc_mmss(unsigned long no +@@ -549,15 +378,7 @@ static int set_rtc_mmss(unsigned long no return retval; } @@ -1292,7 +1300,7 @@ Acked-by: jbeulich@novell.com { unsigned int cpu = get_cpu(); struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); -@@ -562,21 +398,18 @@ unsigned long long sched_clock(void) +@@ -577,21 +398,18 @@ unsigned long long sched_clock(void) return time; } @@ -1320,7 +1328,7 @@ Acked-by: jbeulich@novell.com # ifdef CONFIG_FRAME_POINTER # ifdef __i386__ return ((unsigned long *)regs->ebp)[1]; -@@ -585,14 +418,11 @@ unsigned long profile_pc(struct pt_regs +@@ -600,14 +418,11 @@ unsigned long profile_pc(struct pt_regs # endif # else # ifdef __i386__ @@ -1337,7 +1345,7 @@ Acked-by: jbeulich@novell.com /* Return address is either directly at stack pointer or above a saved eflags. Eflags has bits 22-31 zero, kernel addresses don't. */ -@@ -746,19 +576,6 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -764,19 +579,6 @@ irqreturn_t timer_interrupt(int irq, voi return IRQ_HANDLED; } @@ -1357,7 +1365,7 @@ Acked-by: jbeulich@novell.com void mark_tsc_unstable(void) { #ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */ -@@ -814,21 +631,9 @@ static struct clocksource clocksource_xe +@@ -832,21 +634,9 @@ static struct clocksource clocksource_xe .mask = CLOCKSOURCE_MASK(64), .mult = 1 << XEN_SHIFT, /* time directly in nanoseconds */ .shift = XEN_SHIFT, @@ -1380,7 +1388,7 @@ Acked-by: jbeulich@novell.com static void init_missing_ticks_accounting(unsigned int cpu) { struct vcpu_register_runstate_memory_area area; -@@ -849,7 +654,7 @@ static void init_missing_ticks_accountin +@@ -867,7 +657,7 @@ static void init_missing_ticks_accountin } /* not static: needed by APM */ @@ -1389,7 +1397,7 @@ Acked-by: jbeulich@novell.com { unsigned long retval; unsigned long flags; -@@ -862,11 +667,11 @@ unsigned long get_cmos_time(void) +@@ -880,11 +670,11 @@ unsigned long get_cmos_time(void) return retval; } @@ -1402,7 +1410,7 @@ Acked-by: jbeulich@novell.com static void sync_cmos_clock(unsigned long dummy) { -@@ -910,7 +715,8 @@ static void sync_cmos_clock(unsigned lon +@@ -928,7 +718,8 @@ static void sync_cmos_clock(unsigned lon void notify_arch_cmos_timer(void) { @@ -1412,7 +1420,7 @@ Acked-by: jbeulich@novell.com mod_timer(&sync_xen_wallclock_timer, jiffies + 1); } -@@ -943,29 +749,11 @@ static int time_init_device(void) +@@ -961,29 +752,11 @@ static int time_init_device(void) device_initcall(time_init_device); @@ -1442,7 +1450,7 @@ Acked-by: jbeulich@novell.com static void setup_cpu0_timer_irq(void) { per_cpu(timer_irq, 0) = -@@ -973,7 +761,7 @@ static void setup_cpu0_timer_irq(void) +@@ -991,7 +764,7 @@ static void setup_cpu0_timer_irq(void) VIRQ_TIMER, 0, timer_interrupt, @@ -1451,7 +1459,7 @@ Acked-by: jbeulich@novell.com "timer0", NULL); BUG_ON(per_cpu(timer_irq, 0) < 0); -@@ -985,16 +773,9 @@ static struct vcpu_set_periodic_timer xe +@@ -1003,16 +776,9 @@ static struct vcpu_set_periodic_timer xe void __init time_init(void) { @@ -1471,7 +1479,7 @@ Acked-by: jbeulich@novell.com switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0, &xen_set_periodic_tick)) { -@@ -1013,18 +794,12 @@ void __init time_init(void) +@@ -1031,18 +797,12 @@ void __init time_init(void) per_cpu(processed_system_time, 0) = processed_system_time; init_missing_ticks_accounting(0); @@ -1494,7 +1502,7 @@ Acked-by: jbeulich@novell.com #endif /* Cannot request_irq() until kmem is initialised. */ -@@ -1194,7 +969,7 @@ int __cpuinit local_setup_timer(unsigned +@@ -1212,7 +972,7 @@ int __cpuinit local_setup_timer(unsigned irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, timer_interrupt, @@ -1503,7 +1511,7 @@ Acked-by: jbeulich@novell.com timer_name[cpu], NULL); if (irq < 0) -@@ -1283,7 +1058,7 @@ static ctl_table xen_table[] = { +@@ -1301,7 +1061,7 @@ static ctl_table xen_table[] = { }; static int __init xen_sysctl_init(void) { @@ -1512,8 +1520,8 @@ Acked-by: jbeulich@novell.com return 0; } __initcall(xen_sysctl_init); ---- head-2010-04-29.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -100,6 +100,7 @@ asmlinkage void fixup_4gb_segment(void); asmlinkage void machine_check(void); @@ -1588,8 +1596,8 @@ Acked-by: jbeulich@novell.com + return 1; +} +__setup("code_bytes=", code_bytes_setup); ---- head-2010-04-29.orig/arch/x86/mm/fault_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/fault_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/fault_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/fault_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -46,43 +46,17 @@ int unregister_page_fault_notifier(struc } EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); @@ -1660,8 +1668,8 @@ Acked-by: jbeulich@novell.com return; /* It's safe to allow irq's after cr2 has been saved and the vmalloc ---- head-2010-04-29.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -33,14 +33,16 @@ static void *__kmap_atomic(struct page * /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ @@ -1690,8 +1698,8 @@ Acked-by: jbeulich@novell.com return (void*) vaddr; } ---- head-2010-04-29.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/init_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/init_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/init_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -66,6 +66,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE @@ -1708,8 +1716,8 @@ Acked-by: jbeulich@novell.com make_lowmem_page_readonly(page_table, XENFEAT_writable_page_tables); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); ---- head-2010-04-29.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -149,6 +149,8 @@ void __set_fixmap (enum fixed_addresses void __init reserve_top_address(unsigned long reserve) { @@ -1719,7 +1727,7 @@ Acked-by: jbeulich@novell.com __FIXADDR_TOP = -reserve - PAGE_SIZE; __VMALLOC_RESERVE += reserve; } -@@ -258,6 +260,12 @@ void pgd_ctor(void *pgd, struct kmem_cac +@@ -259,6 +261,12 @@ void pgd_ctor(void *pgd, struct kmem_cac swapper_pg_dir + USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); @@ -1732,7 +1740,7 @@ Acked-by: jbeulich@novell.com pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); } -@@ -268,6 +276,7 @@ void pgd_dtor(void *pgd, struct kmem_cac +@@ -269,6 +277,7 @@ void pgd_dtor(void *pgd, struct kmem_cac { unsigned long flags; /* can be called from interrupt context */ @@ -1740,7 +1748,7 @@ Acked-by: jbeulich@novell.com spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); -@@ -292,6 +301,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) +@@ -299,6 +308,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; @@ -1748,7 +1756,7 @@ Acked-by: jbeulich@novell.com set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; -@@ -314,6 +324,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) +@@ -321,6 +331,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd[i]) goto out_oom; @@ -1756,7 +1764,7 @@ Acked-by: jbeulich@novell.com } spin_lock_irqsave(&pgd_lock, flags); -@@ -354,12 +365,17 @@ pgd_t *pgd_alloc(struct mm_struct *mm) +@@ -361,12 +372,17 @@ pgd_t *pgd_alloc(struct mm_struct *mm) out_oom: if (HAVE_SHARED_KERNEL_PMD) { @@ -1778,7 +1786,7 @@ Acked-by: jbeulich@novell.com kfree(pmd); } kmem_cache_free(pgd_cache, pgd); -@@ -383,7 +399,9 @@ void pgd_free(pgd_t *pgd) +@@ -390,7 +406,9 @@ void pgd_free(pgd_t *pgd) /* in the PAE case user pgd entries are overwritten before usage */ if (PTRS_PER_PMD > 1) { for (i = 0; i < USER_PTRS_PER_PGD; ++i) { @@ -1789,8 +1797,8 @@ Acked-by: jbeulich@novell.com kmem_cache_free(pmd_cache, pmd); } ---- head-2010-04-29.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:32:22.000000000 +0100 @@ -465,7 +465,7 @@ ia32_sys_call_table: .quad sys32_vm86_warning /* vm86old */ .quad compat_sys_wait4 @@ -1815,8 +1823,8 @@ Acked-by: jbeulich@novell.com .quad sys_getcpu + .quad sys_epoll_pwait ia32_syscall_end: ---- head-2010-04-29.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2008-04-15 09:29:41.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2008-04-15 09:29:41.000000000 +0200 ++++ head-2011-02-17/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -59,7 +59,7 @@ unsigned long acpi_wakeup_address = 0; unsigned long acpi_video_flags; extern char wakeup_start, wakeup_end; @@ -1838,8 +1846,8 @@ Acked-by: jbeulich@novell.com local_flush_tlb(); } #endif ---- head-2010-04-29.orig/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -90,6 +90,13 @@ static inline int bad_addr(unsigned long return 1; } @@ -1901,8 +1909,8 @@ Acked-by: jbeulich@novell.com { if (userdef) { printk(KERN_INFO "user-defined physical RAM map:\n"); ---- head-2010-04-29.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:32:22.000000000 +0100 @@ -629,6 +629,9 @@ END(invalidate_interrupt\num) ENTRY(call_function_interrupt) apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt @@ -1913,8 +1921,8 @@ Acked-by: jbeulich@novell.com #endif ENTRY(apic_timer_interrupt) ---- head-2010-04-29.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/head64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/head64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -45,8 +45,6 @@ static void __init clear_bss(void) #define OLD_CL_BASE_ADDR 0x90000 #define OLD_CL_OFFSET 0x90022 @@ -1942,8 +1950,8 @@ Acked-by: jbeulich@novell.com #endif } ---- head-2010-04-29.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -36,6 +36,7 @@ #include #endif @@ -2758,8 +2766,8 @@ Acked-by: jbeulich@novell.com else set_ioapic_affinity_irq(irq, TARGET_CPUS); } ---- head-2010-04-29.orig/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -60,9 +60,9 @@ unsigned long mp_lapic_addr = 0; /* Processor that is doing the boot up */ unsigned int boot_cpu_id = -1U; @@ -2781,9 +2789,9 @@ Acked-by: jbeulich@novell.com return gsi; ioapic = mp_find_ioapic(gsi); ---- head-2010-04-29.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -@@ -338,14 +338,17 @@ void load_gs_index(unsigned gs) +--- head-2011-02-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:30:59.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:05.000000000 +0100 +@@ -337,14 +337,17 @@ void load_gs_index(unsigned gs) void flush_thread(void) { struct task_struct *tsk = current; @@ -2806,8 +2814,8 @@ Acked-by: jbeulich@novell.com tsk->thread.debugreg0 = 0; tsk->thread.debugreg1 = 0; ---- head-2010-04-29.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -141,7 +141,7 @@ EXPORT_SYMBOL_GPL(edid_info); extern int root_mountflags; @@ -3081,8 +3089,8 @@ Acked-by: jbeulich@novell.com -} -device_initcall(add_pcspkr); -#endif ---- head-2010-04-29.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -26,6 +26,7 @@ #include #include @@ -3300,8 +3308,8 @@ Acked-by: jbeulich@novell.com #endif on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); hotcpu_notifier(cpu_vsyscall_notifier, 0); ---- head-2010-04-29.orig/arch/x86/mm/fault_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/fault_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/fault_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/fault_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -56,38 +56,17 @@ int unregister_page_fault_notifier(struc } EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); @@ -3367,8 +3375,8 @@ Acked-by: jbeulich@novell.com return; if (likely(regs->eflags & X86_EFLAGS_IF)) ---- head-2010-04-29.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:46:52.000000000 +0200 -+++ head-2010-04-29/arch/x86/mm/init_64-xen.c 2010-04-29 09:47:17.000000000 +0200 +--- head-2011-02-17.orig/arch/x86/mm/init_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/init_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -1145,20 +1145,30 @@ int kern_addr_valid(unsigned long addr) extern int exception_trace, page_fault_trace; @@ -3407,8 +3415,8 @@ Acked-by: jbeulich@novell.com return 0; } __initcall(x8664_sysctl_init); ---- head-2010-04-29.orig/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -350,8 +350,8 @@ static void flush_kernel_map(void *arg) void *adr = page_address(pg); if (cpu_has_clflush) @@ -3437,8 +3445,8 @@ Acked-by: jbeulich@novell.com large_pte = pte_mkhuge(large_pte); set_pte((pte_t *)pmd, large_pte); } ---- head-2010-04-29.orig/drivers/acpi/processor_extcntl.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-04-29/drivers/acpi/processor_extcntl.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/acpi/processor_extcntl.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-02-17/drivers/acpi/processor_extcntl.c 2011-01-31 17:32:22.000000000 +0100 @@ -32,9 +32,8 @@ #define ACPI_PROCESSOR_COMPONENT 0x01000000 @@ -3479,8 +3487,8 @@ Acked-by: jbeulich@novell.com if (ACPI_FAILURE(status)) return status; ---- head-2010-04-29.orig/drivers/char/tpm/tpm_xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/char/tpm/tpm_xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/char/tpm/tpm_xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/char/tpm/tpm_xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -481,7 +481,6 @@ static struct xenbus_device_id tpmfront_ static struct xenbus_driver tpmfront = { @@ -3501,8 +3509,8 @@ Acked-by: jbeulich@novell.com } static int tpmif_allocate_tx_buffers(struct tpm_private *tp) ---- head-2010-04-29.orig/drivers/pci/msi-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/pci/msi-xen.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/pci/msi-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/pci/msi-xen.c 2011-01-31 17:32:22.000000000 +0100 @@ -36,8 +36,6 @@ struct msi_dev_list { struct list_head list; spinlock_t pirq_list_lock; @@ -4047,8 +4055,8 @@ Acked-by: jbeulich@novell.com dev->irq = msi_dev_entry->default_irq; } ---- head-2010-04-29.orig/drivers/xen/balloon/sysfs.c 2009-06-09 15:01:37.000000000 +0200 -+++ head-2010-04-29/drivers/xen/balloon/sysfs.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/balloon/sysfs.c 2009-06-09 15:01:37.000000000 +0200 ++++ head-2011-02-17/drivers/xen/balloon/sysfs.c 2011-01-31 17:32:22.000000000 +0100 @@ -34,6 +34,7 @@ #include #include @@ -4057,9 +4065,9 @@ Acked-by: jbeulich@novell.com #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H ---- head-2010-04-29.orig/drivers/xen/blkback/xenbus.c 2010-03-22 12:00:53.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -@@ -537,7 +537,6 @@ static const struct xenbus_device_id blk +--- head-2011-02-17.orig/drivers/xen/blkback/xenbus.c 2010-11-25 09:36:37.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 +@@ -543,7 +543,6 @@ static const struct xenbus_device_id blk static struct xenbus_driver blkback = { .name = "vbd", @@ -4067,7 +4075,7 @@ Acked-by: jbeulich@novell.com .ids = blkback_ids, .probe = blkback_probe, .remove = blkback_remove, -@@ -547,5 +546,6 @@ static struct xenbus_driver blkback = { +@@ -553,5 +552,6 @@ static struct xenbus_driver blkback = { void blkif_xenbus_init(void) { @@ -4075,9 +4083,9 @@ Acked-by: jbeulich@novell.com + if (xenbus_register_backend(&blkback)) + BUG(); } ---- head-2010-04-29.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkfront/blkfront.c 2010-03-24 15:09:08.000000000 +0100 -@@ -928,7 +928,6 @@ MODULE_ALIAS("xen:vbd"); +--- head-2011-02-17.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blkfront/blkfront.c 2011-01-31 17:32:22.000000000 +0100 +@@ -942,7 +942,6 @@ MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", @@ -4085,9 +4093,9 @@ Acked-by: jbeulich@novell.com .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, ---- head-2010-04-29.orig/drivers/xen/blktap/xenbus.c 2010-04-29 09:34:47.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap/xenbus.c 2010-04-29 09:47:21.000000000 +0200 -@@ -478,7 +478,6 @@ static const struct xenbus_device_id blk +--- head-2011-02-17.orig/drivers/xen/blktap/xenbus.c 2010-11-25 09:36:37.000000000 +0100 ++++ head-2011-02-17/drivers/xen/blktap/xenbus.c 2011-01-31 17:32:22.000000000 +0100 +@@ -494,7 +494,6 @@ static const struct xenbus_device_id blk static struct xenbus_driver blktap = { .name = "tap", @@ -4095,7 +4103,7 @@ Acked-by: jbeulich@novell.com .ids = blktap_ids, .probe = blktap_probe, .remove = blktap_remove, -@@ -488,5 +487,6 @@ static struct xenbus_driver blktap = { +@@ -504,5 +503,6 @@ static struct xenbus_driver blktap = { void tap_blkif_xenbus_init(void) { @@ -4103,8 +4111,8 @@ Acked-by: jbeulich@novell.com + if (xenbus_register_backend(&blktap)) + BUG(); } ---- head-2010-04-29.orig/drivers/xen/core/evtchn.c 2010-04-23 14:11:32.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/evtchn.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/core/evtchn.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/evtchn.c 2011-01-31 17:32:22.000000000 +0100 @@ -145,7 +145,7 @@ static void bind_evtchn_to_cpu(unsigned BUG_ON(!test_bit(chn, s->evtchn_mask)); @@ -4122,10 +4130,42 @@ Acked-by: jbeulich@novell.com + irq_desc[i].affinity = cpumask_of_cpu(0); memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); - memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); ---- head-2010-04-29.orig/drivers/xen/core/smpboot.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/smpboot.c 2010-03-24 15:09:08.000000000 +0100 -@@ -116,7 +116,7 @@ static int __cpuinit xen_smp_intr_init(u + for_each_possible_cpu(i) +@@ -746,10 +746,10 @@ static void ack_dynirq(unsigned int irq) + + static void end_dynirq(unsigned int irq) + { +- move_masked_irq(irq); +- +- if (!(irq_desc[irq].status & IRQ_DISABLED)) ++ if (!(irq_desc[irq].status & IRQ_DISABLED)) { ++ move_masked_irq(irq); + unmask_dynirq(irq); ++ } + } + + static struct irq_chip dynirq_chip = { +@@ -893,13 +893,14 @@ static void unmask_pirq(unsigned int irq + + static void end_pirq(unsigned int irq) + { +- move_masked_irq(irq); +- + if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) == + (IRQ_DISABLED|IRQ_PENDING)) + shutdown_pirq(irq); +- else ++ else { ++ if (!(irq_desc[irq].status & IRQ_DISABLED)) ++ move_masked_irq(irq); + unmask_pirq(irq); ++ } + } + + static struct irq_chip pirq_chip = { +--- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-01-31 17:32:22.000000000 +0100 +@@ -114,7 +114,7 @@ static int __cpuinit xen_smp_intr_init(u rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu, smp_reschedule_interrupt, @@ -4134,7 +4174,7 @@ Acked-by: jbeulich@novell.com resched_name[cpu], NULL); if (rc < 0) -@@ -127,7 +127,7 @@ static int __cpuinit xen_smp_intr_init(u +@@ -125,7 +125,7 @@ static int __cpuinit xen_smp_intr_init(u rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR, cpu, smp_call_function_interrupt, @@ -4143,7 +4183,7 @@ Acked-by: jbeulich@novell.com callfunc_name[cpu], NULL); if (rc < 0) -@@ -256,7 +256,7 @@ void __init smp_prepare_cpus(unsigned in +@@ -254,7 +254,7 @@ void __init smp_prepare_cpus(unsigned in { unsigned int cpu; struct task_struct *idle; @@ -4152,7 +4192,7 @@ Acked-by: jbeulich@novell.com struct vcpu_get_physid cpu_id; #ifdef __x86_64__ struct desc_ptr *gdt_descr; -@@ -265,14 +265,8 @@ void __init smp_prepare_cpus(unsigned in +@@ -263,14 +263,8 @@ void __init smp_prepare_cpus(unsigned in #endif apicid = 0; @@ -4168,7 +4208,7 @@ Acked-by: jbeulich@novell.com boot_cpu_data.apicid = apicid; cpu_data[0] = boot_cpu_data; -@@ -328,14 +322,8 @@ void __init smp_prepare_cpus(unsigned in +@@ -325,14 +319,8 @@ void __init smp_prepare_cpus(unsigned in XENFEAT_writable_descriptor_tables); apicid = cpu; @@ -4184,9 +4224,9 @@ Acked-by: jbeulich@novell.com cpu_data[cpu] = boot_cpu_data; cpu_data[cpu].apicid = apicid; ---- head-2010-04-29.orig/drivers/xen/fbfront/xenfb.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/fbfront/xenfb.c 2010-03-24 15:09:08.000000000 +0100 -@@ -857,7 +857,6 @@ MODULE_ALIAS("xen:vfb"); +--- head-2011-02-17.orig/drivers/xen/fbfront/xenfb.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/fbfront/xenfb.c 2011-02-17 10:08:20.000000000 +0100 +@@ -859,7 +859,6 @@ MODULE_ALIAS("xen:vfb"); static struct xenbus_driver xenfb_driver = { .name = "vfb", @@ -4194,8 +4234,8 @@ Acked-by: jbeulich@novell.com .ids = xenfb_ids, .probe = xenfb_probe, .remove = xenfb_remove, ---- head-2010-04-29.orig/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/fbfront/xenkbd.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/fbfront/xenkbd.c 2011-01-31 17:32:22.000000000 +0100 @@ -323,7 +323,6 @@ MODULE_ALIAS("xen:vkbd"); static struct xenbus_driver xenkbd_driver = { @@ -4204,9 +4244,9 @@ Acked-by: jbeulich@novell.com .ids = xenkbd_ids, .probe = xenkbd_probe, .remove = xenkbd_remove, ---- head-2010-04-29.orig/drivers/xen/netback/interface.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/interface.c 2010-03-24 15:09:08.000000000 +0100 -@@ -121,7 +121,7 @@ static void netbk_get_drvinfo(struct net +--- head-2011-02-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:07:27.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/interface.c 2011-02-17 10:08:03.000000000 +0100 +@@ -162,7 +162,7 @@ static void netbk_get_drvinfo(struct net struct ethtool_drvinfo *info) { strcpy(info->driver, "netbk"); @@ -4215,9 +4255,9 @@ Acked-by: jbeulich@novell.com } static const struct netif_stat { ---- head-2010-04-29.orig/drivers/xen/netback/xenbus.c 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -@@ -439,7 +439,6 @@ static const struct xenbus_device_id net +--- head-2011-02-17.orig/drivers/xen/netback/xenbus.c 2011-02-17 09:58:10.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netback/xenbus.c 2011-02-17 10:08:09.000000000 +0100 +@@ -440,7 +440,6 @@ static const struct xenbus_device_id net static struct xenbus_driver netback = { .name = "vif", @@ -4225,7 +4265,7 @@ Acked-by: jbeulich@novell.com .ids = netback_ids, .probe = netback_probe, .remove = netback_remove, -@@ -450,5 +449,6 @@ static struct xenbus_driver netback = { +@@ -451,5 +450,6 @@ static struct xenbus_driver netback = { void netif_xenbus_init(void) { @@ -4233,9 +4273,18 @@ Acked-by: jbeulich@novell.com + if (xenbus_register_backend(&netback)) + BUG(); } ---- head-2010-04-29.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netfront/netfront.c 2010-03-24 15:09:08.000000000 +0100 -@@ -1892,20 +1892,19 @@ static struct ethtool_ops network_ethtoo +--- head-2011-02-17.orig/drivers/xen/netfront/netfront.c 2011-02-09 15:35:31.000000000 +0100 ++++ head-2011-02-17/drivers/xen/netfront/netfront.c 2011-02-09 15:38:24.000000000 +0100 +@@ -1813,7 +1813,7 @@ static void netfront_get_drvinfo(struct + struct ethtool_drvinfo *info) + { + strcpy(info->driver, "netfront"); +- strcpy(info->bus_info, dev->class_dev.dev->bus_id); ++ strcpy(info->bus_info, dev->dev.parent->bus_id); + } + + static int network_connect(struct net_device *dev) +@@ -1941,20 +1941,19 @@ static struct ethtool_ops network_ethtoo }; #ifdef CONFIG_SYSFS @@ -4262,7 +4311,7 @@ Acked-by: jbeulich@novell.com struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; -@@ -1935,20 +1934,19 @@ static ssize_t store_rxbuf_min(struct cl +@@ -1984,20 +1983,19 @@ static ssize_t store_rxbuf_min(struct cl return len; } @@ -4289,7 +4338,7 @@ Acked-by: jbeulich@novell.com struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; -@@ -1978,16 +1976,15 @@ static ssize_t store_rxbuf_max(struct cl +@@ -2027,16 +2025,15 @@ static ssize_t store_rxbuf_max(struct cl return len; } @@ -4310,7 +4359,7 @@ Acked-by: jbeulich@novell.com __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), -@@ -1999,8 +1996,8 @@ static int xennet_sysfs_addif(struct net +@@ -2048,8 +2045,8 @@ static int xennet_sysfs_addif(struct net int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { @@ -4321,7 +4370,7 @@ Acked-by: jbeulich@novell.com if (error) goto fail; } -@@ -2008,8 +2005,7 @@ static int xennet_sysfs_addif(struct net +@@ -2057,8 +2054,7 @@ static int xennet_sysfs_addif(struct net fail: while (--i >= 0) @@ -4331,7 +4380,7 @@ Acked-by: jbeulich@novell.com return error; } -@@ -2017,10 +2013,8 @@ static void xennet_sysfs_delif(struct ne +@@ -2066,10 +2062,8 @@ static void xennet_sysfs_delif(struct ne { int i; @@ -4344,7 +4393,7 @@ Acked-by: jbeulich@novell.com } #endif /* CONFIG_SYSFS */ -@@ -2186,7 +2180,6 @@ MODULE_ALIAS("xen:vif"); +@@ -2235,7 +2229,6 @@ MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", @@ -4352,8 +4401,8 @@ Acked-by: jbeulich@novell.com .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), ---- head-2010-04-29.orig/drivers/xen/pciback/xenbus.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/pciback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/pciback/xenbus.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/pciback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 @@ -683,7 +683,6 @@ static const struct xenbus_device_id xen static struct xenbus_driver xenbus_pciback_driver = { @@ -4362,9 +4411,9 @@ Acked-by: jbeulich@novell.com .ids = xenpci_ids, .probe = pciback_xenbus_probe, .remove = pciback_xenbus_remove, ---- head-2010-04-29.orig/drivers/xen/pcifront/xenbus.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/pcifront/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -@@ -449,7 +449,6 @@ MODULE_ALIAS("xen:pci"); +--- head-2011-02-17.orig/drivers/xen/pcifront/xenbus.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/pcifront/xenbus.c 2011-01-31 17:32:22.000000000 +0100 +@@ -464,7 +464,6 @@ MODULE_ALIAS("xen:pci"); static struct xenbus_driver xenbus_pcifront_driver = { .name = "pcifront", @@ -4372,8 +4421,8 @@ Acked-by: jbeulich@novell.com .ids = xenpci_ids, .probe = pcifront_xenbus_probe, .remove = pcifront_xenbus_remove, ---- head-2010-04-29.orig/drivers/xen/scsiback/xenbus.c 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-04-29/drivers/xen/scsiback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/scsiback/xenbus.c 2009-03-18 10:39:32.000000000 +0100 ++++ head-2011-02-17/drivers/xen/scsiback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 @@ -360,7 +360,6 @@ static struct xenbus_device_id scsiback_ static struct xenbus_driver scsiback = { @@ -4382,9 +4431,9 @@ Acked-by: jbeulich@novell.com .ids = scsiback_ids, .probe = scsiback_probe, .remove = scsiback_remove, ---- head-2010-04-29.orig/drivers/xen/scsifront/xenbus.c 2010-03-31 09:56:02.000000000 +0200 -+++ head-2010-04-29/drivers/xen/scsifront/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -@@ -401,7 +401,6 @@ MODULE_ALIAS("xen:vscsi"); +--- head-2011-02-17.orig/drivers/xen/scsifront/xenbus.c 2011-02-02 12:19:11.000000000 +0100 ++++ head-2011-02-17/drivers/xen/scsifront/xenbus.c 2011-02-08 10:03:34.000000000 +0100 +@@ -406,7 +406,6 @@ MODULE_ALIAS("xen:vscsi"); static struct xenbus_driver scsifront_driver = { .name = "vscsi", @@ -4392,8 +4441,8 @@ Acked-by: jbeulich@novell.com .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, ---- head-2010-04-29.orig/drivers/xen/tpmback/common.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/tpmback/common.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/tpmback/common.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/tpmback/common.h 2011-01-31 17:32:22.000000000 +0100 @@ -54,11 +54,11 @@ typedef struct tpmif_st { void tpmif_disconnect_complete(tpmif_t * tpmif); @@ -4408,9 +4457,9 @@ Acked-by: jbeulich@novell.com void tpmif_xenbus_exit(void); int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn); irqreturn_t tpmif_be_int(int irq, void *dev_id); ---- head-2010-04-29.orig/drivers/xen/tpmback/interface.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/tpmback/interface.c 2010-03-24 15:09:08.000000000 +0100 -@@ -160,13 +160,14 @@ void tpmif_disconnect_complete(tpmif_t * +--- head-2011-02-17.orig/drivers/xen/tpmback/interface.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/tpmback/interface.c 2011-01-31 17:32:22.000000000 +0100 +@@ -157,13 +157,14 @@ void tpmif_disconnect_complete(tpmif_t * free_tpmif(tpmif); } @@ -4427,9 +4476,9 @@ Acked-by: jbeulich@novell.com { kmem_cache_destroy(tpmif_cachep); } ---- head-2010-04-29.orig/drivers/xen/tpmback/tpmback.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/tpmback/tpmback.c 2010-03-24 15:09:08.000000000 +0100 -@@ -928,22 +928,30 @@ static int __init tpmback_init(void) +--- head-2011-02-17.orig/drivers/xen/tpmback/tpmback.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/tpmback/tpmback.c 2011-01-31 17:32:22.000000000 +0100 +@@ -918,22 +918,30 @@ static int __init tpmback_init(void) spin_lock_init(&tpm_schedule_list_lock); INIT_LIST_HEAD(&tpm_schedule_list); @@ -4464,8 +4513,8 @@ Acked-by: jbeulich@novell.com +module_exit(tpmback_exit) MODULE_LICENSE("Dual BSD/GPL"); ---- head-2010-04-29.orig/drivers/xen/tpmback/xenbus.c 2008-03-06 08:54:32.000000000 +0100 -+++ head-2010-04-29/drivers/xen/tpmback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/tpmback/xenbus.c 2008-03-06 08:54:32.000000000 +0100 ++++ head-2011-02-17/drivers/xen/tpmback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 @@ -270,7 +270,6 @@ static const struct xenbus_device_id tpm static struct xenbus_driver tpmback = { @@ -4486,8 +4535,8 @@ Acked-by: jbeulich@novell.com } void tpmif_xenbus_exit(void) ---- head-2010-04-29.orig/drivers/xen/usbback/xenbus.c 2009-11-06 10:23:23.000000000 +0100 -+++ head-2010-04-29/drivers/xen/usbback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/usbback/xenbus.c 2009-11-06 10:23:23.000000000 +0100 ++++ head-2011-02-17/drivers/xen/usbback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 @@ -320,7 +320,6 @@ static const struct xenbus_device_id usb static struct xenbus_driver usbback_driver = { @@ -4496,8 +4545,8 @@ Acked-by: jbeulich@novell.com .ids = usbback_ids, .probe = usbback_probe, .otherend_changed = frontend_changed, ---- head-2010-04-29.orig/drivers/xen/usbfront/xenbus.c 2010-03-31 09:56:02.000000000 +0200 -+++ head-2010-04-29/drivers/xen/usbfront/xenbus.c 2010-04-15 09:53:24.000000000 +0200 +--- head-2011-02-17.orig/drivers/xen/usbfront/xenbus.c 2010-03-31 09:56:02.000000000 +0200 ++++ head-2011-02-17/drivers/xen/usbfront/xenbus.c 2011-01-31 17:32:22.000000000 +0100 @@ -381,7 +381,6 @@ MODULE_ALIAS("xen:vusb"); static struct xenbus_driver usbfront_driver = { @@ -4506,9 +4555,9 @@ Acked-by: jbeulich@novell.com .ids = usbfront_ids, .probe = usbfront_probe, .otherend_changed = backend_changed, ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:09:08.000000000 +0100 -@@ -375,7 +375,9 @@ static void xenbus_dev_shutdown(struct d +--- head-2011-02-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:32:22.000000000 +0100 +@@ -377,7 +377,9 @@ static void xenbus_dev_shutdown(struct d } int xenbus_register_driver_common(struct xenbus_driver *drv, @@ -4519,7 +4568,7 @@ Acked-by: jbeulich@novell.com { int ret; -@@ -385,7 +387,10 @@ int xenbus_register_driver_common(struct +@@ -387,7 +389,10 @@ int xenbus_register_driver_common(struct drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) @@ -4531,7 +4580,7 @@ Acked-by: jbeulich@novell.com #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; -@@ -399,13 +404,15 @@ int xenbus_register_driver_common(struct +@@ -401,13 +406,15 @@ int xenbus_register_driver_common(struct return ret; } @@ -4549,7 +4598,7 @@ Acked-by: jbeulich@novell.com if (ret) return ret; -@@ -414,7 +421,7 @@ int xenbus_register_frontend(struct xenb +@@ -416,7 +423,7 @@ int xenbus_register_frontend(struct xenb return 0; } @@ -4558,8 +4607,8 @@ Acked-by: jbeulich@novell.com void xenbus_unregister_driver(struct xenbus_driver *drv) { ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_probe.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 16:11:24.000000000 +0100 ++++ head-2011-02-17/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 17:32:22.000000000 +0100 @@ -69,7 +69,9 @@ extern int xenbus_match(struct device *_ extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); @@ -4571,8 +4620,8 @@ Acked-by: jbeulich@novell.com extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2008-01-21 11:15:26.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 14:49:58.000000000 +0100 ++++ head-2011-02-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:32:22.000000000 +0100 @@ -172,13 +172,15 @@ static int xenbus_uevent_backend(struct return 0; } @@ -4592,8 +4641,8 @@ Acked-by: jbeulich@novell.com /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, ---- head-2010-04-29.orig/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-02-17/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:32:22.000000000 +0100 @@ -235,7 +235,7 @@ static int bind_virq(void) result = bind_virq_to_irqhandler(VIRQ_XENOPROF, i, @@ -4603,11 +4652,11 @@ Acked-by: jbeulich@novell.com "xenoprof", NULL); ---- head-2010-04-29.orig/arch/x86/include/asm/i8253.h 2010-04-29 09:29:50.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/i8253.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/asm/i8253.h 2011-02-17 09:59:44.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/asm/i8253.h 2011-01-31 17:32:22.000000000 +0100 @@ -8,10 +8,14 @@ - extern spinlock_t i8253_lock; + extern raw_spinlock_t i8253_lock; +#ifdef CONFIG_GENERIC_CLOCKEVENTS + @@ -4620,8 +4669,8 @@ Acked-by: jbeulich@novell.com #define inb_pit inb_p #define outb_pit outb_p ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:32:22.000000000 +0100 @@ -21,7 +21,7 @@ struct Xgt_desc_struct { extern struct Xgt_desc_struct idt_descr; @@ -4631,8 +4680,8 @@ Acked-by: jbeulich@novell.com static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:32:22.000000000 +0100 @@ -160,6 +160,19 @@ static inline void arch_leave_lazy_mmu_m #define arch_use_lazy_mmu_mode() unlikely(__get_cpu_var(xen_lazy_mmu)) #endif @@ -4662,8 +4711,8 @@ Acked-by: jbeulich@novell.com HYPERVISOR_shutdown( unsigned int reason) { ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:32:22.000000000 +0100 @@ -108,7 +108,7 @@ sysexit_scrit: /**** START OF SYSEXIT CR sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \ mov $__KERNEL_PDA, %ecx ; \ @@ -4673,8 +4722,8 @@ Acked-by: jbeulich@novell.com call evtchn_do_upcall ; \ add $4,%esp ; \ jmp ret_from_intr ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:32:22.000000000 +0100 @@ -27,13 +27,13 @@ static inline void enter_lazy_tlb(struct static inline void __prepare_arch_switch(void) { @@ -4702,8 +4751,8 @@ Acked-by: jbeulich@novell.com static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2008-07-21 11:00:33.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2008-07-21 11:00:33.000000000 +0200 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:32:22.000000000 +0100 @@ -6,12 +6,23 @@ #include /* for struct page */ #include /* for phys_to_virt and page_to_pseudophys */ @@ -4743,8 +4792,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_X86_PAE /* ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 17:32:22.000000000 +0100 @@ -275,6 +275,7 @@ static inline pte_t pte_mkhuge(pte_t pte */ #define pte_update(mm, addr, ptep) do { } while (0) @@ -4784,8 +4833,8 @@ Acked-by: jbeulich@novell.com #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:32:22.000000000 +0100 @@ -431,7 +431,7 @@ struct thread_struct { .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ @@ -4806,8 +4855,8 @@ Acked-by: jbeulich@novell.com set_fs(USER_DS); \ regs->xds = __USER_DS; \ regs->xes = __USER_DS; \ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:32:22.000000000 +0100 @@ -52,6 +52,11 @@ extern void cpu_exit_clear(void); extern void cpu_uninit(void); #endif @@ -4820,8 +4869,8 @@ Acked-by: jbeulich@novell.com /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 17:32:22.000000000 +0100 @@ -416,15 +416,6 @@ static inline int pmd_large(pmd_t pte) { #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) @@ -4838,8 +4887,8 @@ Acked-by: jbeulich@novell.com /* Change flags of a PTE */ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:32:22.000000000 +0100 @@ -7,6 +7,7 @@ #include #include @@ -4857,8 +4906,8 @@ Acked-by: jbeulich@novell.com #define NO_PROC_ID 0xFF /* No processor magic marker */ ---- head-2010-04-29.orig/include/xen/xenbus.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-04-29/include/xen/xenbus.h 2010-03-24 15:09:08.000000000 +0100 +--- head-2011-02-17.orig/include/xen/xenbus.h 2011-01-31 17:53:45.000000000 +0100 ++++ head-2011-02-17/include/xen/xenbus.h 2011-01-31 17:32:22.000000000 +0100 @@ -93,8 +93,7 @@ struct xenbus_device_id /* A xenbus driver. */ @@ -4897,9 +4946,9 @@ Acked-by: jbeulich@novell.com void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction ---- head-2010-04-29.orig/lib/swiotlb-xen.c 2009-04-07 13:58:49.000000000 +0200 -+++ head-2010-04-29/lib/swiotlb-xen.c 2010-03-24 15:09:08.000000000 +0100 -@@ -135,8 +135,8 @@ __setup("swiotlb=", setup_io_tlb_npages) +--- head-2011-02-17.orig/lib/swiotlb-xen.c 2010-09-16 13:31:46.000000000 +0200 ++++ head-2011-02-17/lib/swiotlb-xen.c 2011-01-31 17:32:22.000000000 +0100 +@@ -143,8 +143,8 @@ __setup("swiotlb=", setup_io_tlb_npages) * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the PCI DMA API. */ @@ -4910,7 +4959,7 @@ Acked-by: jbeulich@novell.com { unsigned long i, bytes; int rc; -@@ -221,7 +221,7 @@ swiotlb_init_with_default_size (size_t d +@@ -229,7 +229,7 @@ swiotlb_init_with_default_size (size_t d dma_bits); } @@ -4919,7 +4968,7 @@ Acked-by: jbeulich@novell.com swiotlb_init(void) { long ram_end; -@@ -457,8 +457,8 @@ swiotlb_full(struct device *dev, size_t +@@ -480,8 +480,8 @@ swiotlb_full(struct device *dev, size_t * When the mapping is small enough return a static buffer to limit * the damage, or panic when the transfer is too big. */ @@ -4930,7 +4979,16 @@ Acked-by: jbeulich@novell.com if (size > io_tlb_overflow && do_panic) { if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) -@@ -602,7 +602,7 @@ swiotlb_map_sg(struct device *hwdev, str +@@ -562,7 +562,7 @@ swiotlb_unmap_single(struct device *hwde + * address back to the card, you must first perform a + * swiotlb_dma_sync_for_device, and then the device again owns the buffer + */ +-static inline void ++static void + swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, int target) + { +@@ -630,7 +630,7 @@ swiotlb_map_sg(struct device *hwdev, str sg[0].dma_length = 0; return 0; } @@ -4939,7 +4997,7 @@ Acked-by: jbeulich@novell.com } else sg->dma_address = dev_addr; sg->dma_length = sg->length; -@@ -624,8 +624,7 @@ swiotlb_unmap_sg(struct device *hwdev, s +@@ -652,8 +652,7 @@ swiotlb_unmap_sg(struct device *hwdev, s for (i = 0; i < nelems; i++, sg++) if (in_swiotlb_aperture(sg->dma_address)) @@ -4949,23 +5007,12 @@ Acked-by: jbeulich@novell.com sg->dma_length, dir); else gnttab_dma_unmap_page(sg->dma_address); -@@ -648,8 +647,7 @@ swiotlb_sync_sg_for_cpu(struct device *h - - for (i = 0; i < nelems; i++, sg++) - if (in_swiotlb_aperture(sg->dma_address)) -- sync_single(hwdev, -- (void *)bus_to_virt(sg->dma_address), -+ sync_single(hwdev, bus_to_virt(sg->dma_address), - sg->dma_length, dir); - } - -@@ -663,8 +661,7 @@ swiotlb_sync_sg_for_device(struct device - - for (i = 0; i < nelems; i++, sg++) - if (in_swiotlb_aperture(sg->dma_address)) -- sync_single(hwdev, -- (void *)bus_to_virt(sg->dma_address), -+ sync_single(hwdev, bus_to_virt(sg->dma_address), - sg->dma_length, dir); - } - +@@ -666,7 +665,7 @@ swiotlb_unmap_sg(struct device *hwdev, s + * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules + * and usage. + */ +-static inline void ++static void + swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir, int target) + { diff --git a/patches.xen/xen3-patch-2.6.22 b/patches.xen/xen3-patch-2.6.22 index 1bfd042..0602b1c 100644 --- a/patches.xen/xen3-patch-2.6.22 +++ b/patches.xen/xen3-patch-2.6.22 @@ -6,9 +6,9 @@ Automatically created from "patches.kernel.org/patch-2.6.22" by xen-port-patches Acked-by: jbeulich@novell.com ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-24 15:09:15.000000000 +0100 -@@ -1709,7 +1709,7 @@ config PHYSICAL_START +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-01-31 17:32:29.000000000 +0100 +@@ -1653,7 +1653,7 @@ config PHYSICAL_START config RELOCATABLE bool "Build a relocatable kernel" @@ -17,7 +17,7 @@ Acked-by: jbeulich@novell.com default y ---help--- This builds a kernel image that retains relocation information -@@ -1769,7 +1769,6 @@ config COMPAT_VDSO +@@ -1712,7 +1712,6 @@ config COMPAT_VDSO def_bool y prompt "Compat VDSO support" depends on X86_32 || IA32_EMULATION @@ -25,7 +25,7 @@ Acked-by: jbeulich@novell.com ---help--- Map the 32-bit VDSO to the predictable old-style address too. -@@ -1992,6 +1991,7 @@ config PCI +@@ -1936,6 +1935,7 @@ config PCI bool "PCI support" default y select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) @@ -33,16 +33,16 @@ Acked-by: jbeulich@novell.com ---help--- Find out whether you have a PCI motherboard. PCI is the name of a bus system, i.e. the way the CPU talks to the other stuff inside ---- head-2010-05-25.orig/arch/x86/kernel/Makefile 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/Makefile 2010-03-24 15:09:15.000000000 +0100 -@@ -141,4 +141,4 @@ endif +--- head-2011-03-11.orig/arch/x86/kernel/Makefile 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/Makefile 2011-01-31 17:32:29.000000000 +0100 +@@ -132,4 +132,4 @@ endif disabled-obj-$(CONFIG_XEN) := early-quirks.o hpet.o i8253.o i8259_$(BITS).o reboot.o \ smpboot_$(BITS).o tsc_$(BITS).o tsc_sync.o disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += mpparse_64.o -%/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := +%/head_64.o %/head_64.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := ---- head-2010-05-25.orig/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/apic/apic-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/apic/apic-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -19,7 +19,6 @@ #include #include @@ -51,9 +51,9 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:09:15.000000000 +0100 -@@ -111,11 +111,6 @@ void foo(void) +--- head-2011-03-11.orig/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:32:29.000000000 +0100 +@@ -109,11 +109,6 @@ void foo(void) OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); @@ -65,8 +65,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_PARAVIRT BLANK(); OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -22,16 +22,40 @@ #define phys_pkg_id(a,b) a #endif @@ -359,8 +359,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_HOTPLUG_CPU void __cpuinit cpu_uninit(void) { ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -167,7 +167,7 @@ mtrr_del(int reg, unsigned long base, un EXPORT_SYMBOL(mtrr_add); EXPORT_SYMBOL(mtrr_del); @@ -370,8 +370,8 @@ Acked-by: jbeulich@novell.com { } ---- head-2010-05-25.orig/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -162,26 +162,27 @@ static struct resource standard_io_resou static int __init romsignature(const unsigned char *rom) @@ -499,8 +499,8 @@ Acked-by: jbeulich@novell.com start = 0x100000ULL; size = end - start; } ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:32:29.000000000 +0100 @@ -15,7 +15,7 @@ * I changed all the .align's to 4 (16 byte alignment), as that's faster * on a 486. @@ -607,8 +607,8 @@ Acked-by: jbeulich@novell.com movl %ecx, %fs UNWIND_ESPFIX_STACK popl %ecx ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_32-xen.S 2011-01-31 17:32:29.000000000 +0100 @@ -37,7 +37,8 @@ ENTRY(startup_32) /* Set up the stack pointer */ movl $(init_thread_union+THREAD_SIZE),%esp @@ -736,8 +736,8 @@ Acked-by: jbeulich@novell.com #if CONFIG_XEN_COMPAT <= 0x030002 /* * __xen_guest information ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -25,7 +25,6 @@ #include #include @@ -862,8 +862,8 @@ Acked-by: jbeulich@novell.com } void arch_teardown_msi_irq(unsigned int irq) ---- head-2010-05-25.orig/arch/x86/kernel/ioport_32-xen.c 2008-01-28 12:24:19.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ioport_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ioport_32-xen.c 2008-01-28 12:24:19.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ioport_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -12,10 +12,10 @@ #include #include @@ -876,8 +876,8 @@ Acked-by: jbeulich@novell.com #include /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ ---- head-2010-05-25.orig/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -10,7 +10,6 @@ #include #include @@ -886,8 +886,8 @@ Acked-by: jbeulich@novell.com #include #include ---- head-2010-05-25.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/microcode-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/microcode-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -135,7 +135,7 @@ static int __init microcode_dev_init (vo return 0; } @@ -897,8 +897,8 @@ Acked-by: jbeulich@novell.com { misc_deregister(µcode_dev); } ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -18,7 +18,6 @@ #include #include @@ -916,8 +916,8 @@ Acked-by: jbeulich@novell.com if (!num_processors) printk(KERN_ERR "SMP mptable: no processors registered!\n"); return num_processors; ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -13,6 +13,7 @@ #include #include @@ -968,8 +968,8 @@ Acked-by: jbeulich@novell.com dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -21,7 +21,6 @@ #include #include @@ -1067,102 +1067,8 @@ Acked-by: jbeulich@novell.com return prev_p; } ---- head-2010-05-25.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/quirks-xen.c 2010-03-24 15:09:15.000000000 +0100 -@@ -3,12 +3,10 @@ - */ - #include - #include --#include --#include --#include - - #if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI) --static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev) -+ -+static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) - { - u8 config, rev; - u32 word; -@@ -16,7 +14,7 @@ static void __devinit verify_quirk_intel - /* BIOS may enable hardware IRQ balancing for - * E7520/E7320/E7525(revision ID 0x9 and below) - * based platforms. -- * For those platforms, make sure that the genapic is set to 'flat' -+ * Disable SW irqbalance/affinity on those platforms. - */ - pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); - if (rev > 0x9) -@@ -30,59 +28,20 @@ static void __devinit verify_quirk_intel - raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word); - - if (!(word & (1 << 13))) { --#ifndef CONFIG_XEN --#ifdef CONFIG_X86_64 -- if (genapic != &apic_flat) -- panic("APIC mode must be flat on this system\n"); --#elif defined(CONFIG_X86_GENERICARCH) -- if (genapic != &apic_default) -- panic("APIC mode must be default(flat) on this system. Use apic=default\n"); --#endif --#endif -- } -- -- /* put back the original value for config space*/ -- if (!(config & 0x2)) -- pci_write_config_byte(dev, 0xf4, config); --} -- --void __init quirk_intel_irqbalance(void) --{ -- u8 config, rev; -- u32 word; -- -- /* BIOS may enable hardware IRQ balancing for -- * E7520/E7320/E7525(revision ID 0x9 and below) -- * based platforms. -- * Disable SW irqbalance/affinity on those platforms. -- */ -- rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION); -- if (rev > 0x9) -- return; -- -- printk(KERN_INFO "Intel E7520/7320/7525 detected."); -- -- /* enable access to config space */ -- config = read_pci_config_byte(0, 0, 0, 0xf4); -- write_pci_config_byte(0, 0, 0, 0xf4, config|0x2); -- -- /* read xTPR register */ -- word = read_pci_config_16(0, 0, 0x40, 0x4c); -- -- if (!(word & (1 << 13))) { - struct xen_platform_op op; -- printk(KERN_INFO "Disabling irq balancing and affinity\n"); -+ -+ printk(KERN_INFO "Intel E7520/7320/7525 detected. " -+ "Disabling irq balancing and affinity\n"); - op.cmd = XENPF_platform_quirk; - op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING; - WARN_ON(HYPERVISOR_platform_op(&op)); - } - -- /* put back the original value for config space */ -+ /* put back the original value for config space*/ - if (!(config & 0x2)) -- write_pci_config_byte(0, 0, 0, 0xf4, config); -+ pci_write_config_byte(dev, 0xf4, config); - } --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, verify_quirk_intel_irqbalance); --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, verify_quirk_intel_irqbalance); --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, verify_quirk_intel_irqbalance); -- -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); - #endif ---- head-2010-05-25.orig/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -13,7 +13,6 @@ #include #include @@ -1483,8 +1389,8 @@ Acked-by: jbeulich@novell.com - return 0; -} -EXPORT_SYMBOL(smp_call_function_single); ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -79,7 +79,6 @@ #include DEFINE_SPINLOCK(i8253_lock); @@ -1601,19 +1507,20 @@ Acked-by: jbeulich@novell.com unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); -@@ -446,10 +530,9 @@ EXPORT_SYMBOL(profile_pc); +@@ -446,11 +530,10 @@ EXPORT_SYMBOL(profile_pc); irqreturn_t timer_interrupt(int irq, void *dev_id) { s64 delta, delta_cpu, stolen, blocked; - u64 sched_time; unsigned int i, cpu = smp_processor_id(); + int schedule_clock_was_set_work = 0; struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); - struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); + struct vcpu_runstate_info runstate; /* * Here we are in the timer irq handler. We just have irqs locally -@@ -469,20 +552,7 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -470,20 +553,7 @@ irqreturn_t timer_interrupt(int irq, voi delta -= processed_system_time; delta_cpu -= per_cpu(processed_system_time, cpu); @@ -1635,7 +1542,7 @@ Acked-by: jbeulich@novell.com } while (!time_values_up_to_date(cpu)); if ((unlikely(delta < -(s64)permitted_clock_jitter) || -@@ -525,6 +595,9 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -528,6 +598,9 @@ irqreturn_t timer_interrupt(int irq, voi * HACK: Passing NULL to account_steal_time() * ensures that the ticks are accounted as stolen. */ @@ -1645,7 +1552,7 @@ Acked-by: jbeulich@novell.com if ((stolen > 0) && (delta_cpu > 0)) { delta_cpu -= stolen; if (unlikely(delta_cpu < 0)) -@@ -540,6 +613,8 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -543,6 +616,8 @@ irqreturn_t timer_interrupt(int irq, voi * HACK: Passing idle_task to account_steal_time() * ensures that the ticks are accounted as idle/wait. */ @@ -1654,7 +1561,7 @@ Acked-by: jbeulich@novell.com if ((blocked > 0) && (delta_cpu > 0)) { delta_cpu -= blocked; if (unlikely(delta_cpu < 0)) -@@ -576,7 +651,7 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -579,7 +654,7 @@ irqreturn_t timer_interrupt(int irq, voi return IRQ_HANDLED; } @@ -1663,7 +1570,7 @@ Acked-by: jbeulich@novell.com { #ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */ tsc_unstable = 1; -@@ -584,17 +659,13 @@ void mark_tsc_unstable(void) +@@ -587,17 +662,13 @@ void mark_tsc_unstable(void) } EXPORT_SYMBOL_GPL(mark_tsc_unstable); @@ -1685,7 +1592,7 @@ Acked-by: jbeulich@novell.com if (unlikely((s64)(ret - last) < 0)) { if (last - ret > permitted_clock_jitter -@@ -613,17 +684,25 @@ static cycle_t xen_clocksource_read(void +@@ -616,17 +687,25 @@ static cycle_t xen_clocksource_read(void } for (;;) { @@ -1713,7 +1620,7 @@ Acked-by: jbeulich@novell.com static struct clocksource clocksource_xen = { .name = "xen", .rating = 400, -@@ -632,19 +711,29 @@ static struct clocksource clocksource_xe +@@ -635,19 +714,29 @@ static struct clocksource clocksource_xe .mult = 1 << XEN_SHIFT, /* time directly in nanoseconds */ .shift = XEN_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, @@ -1748,7 +1655,7 @@ Acked-by: jbeulich@novell.com per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; -@@ -720,35 +809,6 @@ void notify_arch_cmos_timer(void) +@@ -723,35 +812,6 @@ void notify_arch_cmos_timer(void) mod_timer(&sync_xen_wallclock_timer, jiffies + 1); } @@ -1784,7 +1691,7 @@ Acked-by: jbeulich@novell.com extern void (*late_time_init)(void); /* Dynamically-mapped IRQ. */ -@@ -892,21 +952,21 @@ static void start_hz_timer(void) +@@ -895,21 +955,21 @@ static void start_hz_timer(void) cpu_clear(cpu, nohz_cpu_mask); } @@ -1810,8 +1717,8 @@ Acked-by: jbeulich@novell.com /* No locking required. Interrupts are disabled on all CPUs. */ void time_resume(void) ---- head-2010-05-25.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -52,7 +52,7 @@ #include #include @@ -1922,8 +1829,8 @@ Acked-by: jbeulich@novell.com unsigned long base = (kesp - uesp) & -THREAD_SIZE; unsigned long new_kesp = kesp - base; unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; ---- head-2010-05-25.orig/arch/x86/mm/fault_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/fault_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/fault_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -14,19 +14,20 @@ #include #include @@ -2039,13 +1946,13 @@ Acked-by: jbeulich@novell.com BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); for (address = start; address >= TASK_SIZE && address < hypervisor_virt_start; -@@ -739,4 +742,3 @@ void vmalloc_sync_all(void) +@@ -752,4 +755,3 @@ void vmalloc_sync_all(void) start = address + (1UL << PMD_SHIFT); } } -#endif ---- head-2010-05-25.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -26,7 +26,7 @@ void kunmap(struct page *page) * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. @@ -2088,9 +1995,9 @@ Acked-by: jbeulich@novell.com EXPORT_SYMBOL(kunmap_atomic); EXPORT_SYMBOL(kmap_atomic_to_page); EXPORT_SYMBOL(clear_highpage); ---- head-2010-05-25.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/hypervisor.c 2010-03-24 15:09:15.000000000 +0100 -@@ -421,13 +421,13 @@ void xen_tlb_flush_all(void) +--- head-2011-03-11.orig/arch/x86/mm/hypervisor.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/hypervisor.c 2011-01-31 17:32:29.000000000 +0100 +@@ -419,13 +419,13 @@ void xen_tlb_flush_all(void) } EXPORT_SYMBOL_GPL(xen_tlb_flush_all); @@ -2106,7 +2013,7 @@ Acked-by: jbeulich@novell.com BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } EXPORT_SYMBOL_GPL(xen_tlb_flush_mask); -@@ -441,14 +441,14 @@ void xen_invlpg_all(unsigned long ptr) +@@ -439,14 +439,14 @@ void xen_invlpg_all(unsigned long ptr) } EXPORT_SYMBOL_GPL(xen_invlpg_all); @@ -2123,8 +2030,8 @@ Acked-by: jbeulich@novell.com BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } EXPORT_SYMBOL_GPL(xen_invlpg_mask); ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/init_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -22,6 +22,7 @@ #include #include @@ -2396,8 +2303,8 @@ Acked-by: jbeulich@novell.com } void free_initmem(void) ---- head-2010-05-25.orig/arch/x86/mm/ioremap_32-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/ioremap_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:37:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/ioremap-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -13,6 +13,7 @@ #include #include @@ -2406,8 +2313,8 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -13,6 +13,7 @@ #include #include @@ -2425,8 +2332,8 @@ Acked-by: jbeulich@novell.com * -- wli */ DEFINE_SPINLOCK(pgd_lock); -@@ -245,37 +244,54 @@ static inline void pgd_list_del(pgd_t *p - set_page_private(next, (unsigned long)pprev); +@@ -246,37 +245,54 @@ static inline void pgd_list_del(pgd_t *p + page->mapping = NULL; } -void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) @@ -2498,7 +2405,7 @@ Acked-by: jbeulich@novell.com paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); -@@ -284,11 +300,46 @@ void pgd_dtor(void *pgd, struct kmem_cac +@@ -285,11 +301,46 @@ void pgd_dtor(void *pgd, struct kmem_cac pgd_test_and_unpin(pgd); } @@ -2546,9 +2453,9 @@ Acked-by: jbeulich@novell.com + pmd_t **pmds = NULL; unsigned long flags; - pgd_test_and_unpin(pgd); -@@ -296,37 +347,40 @@ pgd_t *pgd_alloc(struct mm_struct *mm) - if (PTRS_PER_PMD == 1 || !pgd) + if (!pgd) +@@ -303,37 +354,40 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + if (PTRS_PER_PMD == 1) return pgd; - if (HAVE_SHARED_KERNEL_PMD) { @@ -2611,7 +2518,7 @@ Acked-by: jbeulich@novell.com spin_lock_irqsave(&pgd_lock, flags); /* Protect against save/restore: move below 4GB under pgd_lock. */ -@@ -341,44 +395,43 @@ pgd_t *pgd_alloc(struct mm_struct *mm) +@@ -348,44 +402,43 @@ pgd_t *pgd_alloc(struct mm_struct *mm) /* Copy kernel pmd contents and write-protect the new pmds. */ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) { @@ -2669,7 +2576,7 @@ Acked-by: jbeulich@novell.com return NULL; } -@@ -398,35 +451,24 @@ void pgd_free(pgd_t *pgd) +@@ -405,35 +458,24 @@ void pgd_free(pgd_t *pgd) /* in the PAE case user pgd entries are overwritten before usage */ if (PTRS_PER_PMD > 1) { @@ -2715,7 +2622,7 @@ Acked-by: jbeulich@novell.com } void make_lowmem_page_readonly(void *va, unsigned int feature) -@@ -723,13 +765,13 @@ void mm_pin_all(void) +@@ -730,13 +772,13 @@ void mm_pin_all(void) spin_unlock_irqrestore(&pgd_lock, flags); } @@ -2731,8 +2638,8 @@ Acked-by: jbeulich@novell.com { struct task_struct *tsk = current; ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:32:29.000000000 +0100 @@ -431,11 +431,7 @@ ia32_sys_call_table: .quad sys_symlink .quad sys_lstat @@ -2781,8 +2688,8 @@ Acked-by: jbeulich@novell.com + .quad compat_sys_timerfd + .quad sys_eventfd +ia32_syscall_end: ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -60,19 +60,6 @@ unsigned long acpi_video_flags; extern char wakeup_start, wakeup_end; @@ -2838,8 +2745,8 @@ Acked-by: jbeulich@novell.com #endif } ---- head-2010-05-25.orig/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -17,6 +17,8 @@ #include #include @@ -2899,8 +2806,8 @@ Acked-by: jbeulich@novell.com if (paddr >= (end_pfn << PAGE_SHIFT)) break; ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -11,13 +11,12 @@ #ifdef __i386__ @@ -2965,8 +2872,8 @@ Acked-by: jbeulich@novell.com - } -} - ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:32:29.000000000 +0100 @@ -1249,3 +1249,23 @@ ENTRY(call_softirq) ret CFI_ENDPROC @@ -2991,8 +2898,8 @@ Acked-by: jbeulich@novell.com + CFI_ENDPROC +ENDPROC(ignore_sysret) +#endif ---- head-2010-05-25.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_64-xen.S 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head_64-xen.S 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_64-xen.S 2011-01-31 17:32:29.000000000 +0100 @@ -5,6 +5,7 @@ * Copyright (C) 2000 Pavel Machek * Copyright (C) 2000 Karsten Keil @@ -3087,8 +2994,8 @@ Acked-by: jbeulich@novell.com .endm .section __xen_guest ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -25,13 +25,21 @@ #include #include @@ -3170,8 +3077,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_SMP cpu_set(0, cpu_online_map); #endif ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -25,7 +25,6 @@ #include #include @@ -3280,8 +3187,8 @@ Acked-by: jbeulich@novell.com } void arch_teardown_msi_irq(unsigned int irq) ---- head-2010-05-25.orig/arch/x86/kernel/ioport_64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ioport_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ioport_64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ioport_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -13,10 +13,10 @@ #include #include @@ -3294,8 +3201,8 @@ Acked-by: jbeulich@novell.com #include /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ ---- head-2010-05-25.orig/arch/x86/kernel/ldt_64-xen.c 2007-06-12 13:13:01.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/ldt_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ldt_64-xen.c 2007-06-12 13:13:01.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -13,7 +13,6 @@ #include #include @@ -3304,8 +3211,8 @@ Acked-by: jbeulich@novell.com #include #include ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -17,7 +17,6 @@ #include #include @@ -3323,8 +3230,8 @@ Acked-by: jbeulich@novell.com if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:05.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:39.000000000 +0100 @@ -39,6 +39,7 @@ #include #include @@ -3338,10 +3245,10 @@ Acked-by: jbeulich@novell.com #include #include -#include - #include #include #include -@@ -232,16 +232,18 @@ void __cpuinit select_idle_routine(const + #include +@@ -231,16 +231,18 @@ void __cpuinit select_idle_routine(const static int __init idle_setup (char *str) { @@ -3365,7 +3272,7 @@ Acked-by: jbeulich@novell.com /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs * regs) -@@ -546,7 +548,7 @@ __switch_to(struct task_struct *prev_p, +@@ -545,7 +547,7 @@ __switch_to(struct task_struct *prev_p, * The AMD workaround requires it to be after DS reload, or * after DS has been cleared, which we do in __prepare_arch_switch. */ @@ -3374,8 +3281,8 @@ Acked-by: jbeulich@novell.com __save_init_fpu(prev_p); /* _not_ save_init_fpu() */ mcl->op = __HYPERVISOR_fpu_taskswitch; mcl->args[0] = 1; ---- head-2010-05-25.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -120,6 +120,8 @@ int bootloader_type; unsigned long saved_video_mode; @@ -3458,8 +3365,8 @@ Acked-by: jbeulich@novell.com }; ---- head-2010-05-25.orig/arch/x86/kernel/setup64-xen.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -113,9 +113,9 @@ void __init setup_per_cpu_areas(void) if (!NODE_DATA(cpu_to_node(i))) { printk("cpu with no node %d, num_online_nodes %d\n", @@ -3512,8 +3419,8 @@ Acked-by: jbeulich@novell.com } #ifndef CONFIG_X86_NO_TSS else ---- head-2010-05-25.orig/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -14,7 +14,6 @@ #include #include @@ -3577,8 +3484,8 @@ Acked-by: jbeulich@novell.com } /* ---- head-2010-05-25.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -32,6 +32,7 @@ #include #include @@ -3713,8 +3620,8 @@ Acked-by: jbeulich@novell.com if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 13, SIGSEGV) == NOTIFY_STOP) return; ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -45,14 +45,34 @@ #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) @@ -3852,8 +3759,8 @@ Acked-by: jbeulich@novell.com smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); return NOTIFY_DONE; } ---- head-2010-05-25.orig/arch/x86/mm/fault_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/fault_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/fault_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -15,22 +15,22 @@ #include #include @@ -3892,16 +3799,20 @@ Acked-by: jbeulich@novell.com if (is_prefetch(regs, address, error_code)) return; -@@ -646,7 +652,7 @@ do_sigbus: +@@ -646,10 +652,10 @@ do_sigbus: } DEFINE_SPINLOCK(pgd_lock); -struct page *pgd_list; +LIST_HEAD(pgd_list); + #define pgd_page_table(what, pg) \ +- spin_##what(&((struct mm_struct *)(pg)->mapping)->page_table_lock) ++ spin_##what(&((struct mm_struct *)(pg)->private)->page_table_lock) + void vmalloc_sync_all(void) { -@@ -666,8 +672,7 @@ void vmalloc_sync_all(void) +@@ -669,8 +675,7 @@ void vmalloc_sync_all(void) if (pgd_none(*pgd_ref)) continue; spin_lock(&pgd_lock); @@ -3910,9 +3821,9 @@ Acked-by: jbeulich@novell.com + list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; pgd = (pgd_t *)page_address(page) + pgd_index(address); - if (pgd_none(*pgd)) ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:47:17.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-04-29 09:47:36.000000000 +0200 + +--- head-2011-03-11.orig/arch/x86/mm/init_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -25,10 +25,12 @@ #include #include @@ -4146,8 +4057,8 @@ Acked-by: jbeulich@novell.com + SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); +} +#endif ---- head-2010-05-25.orig/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -215,13 +215,13 @@ void mm_pin_all(void) preempt_enable(); } @@ -4208,8 +4119,8 @@ Acked-by: jbeulich@novell.com /* Handle kernel mapping too which aliases part of the * lowmem */ if (__pa(address) < KERNEL_TEXT_SIZE) { ---- head-2010-05-25.orig/drivers/char/tpm/tpm_xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/char/tpm/tpm_xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/char/tpm/tpm_xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -463,7 +463,7 @@ static int tpmif_connect(struct xenbus_d tp->backend_id = domid; @@ -4219,8 +4130,471 @@ Acked-by: jbeulich@novell.com if (err <= 0) { WPRINTK("bind_listening_port_to_irqhandler failed " "(err=%d)\n", err); ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/hwmon/Kconfig 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/hwmon/Kconfig 2011-01-31 17:32:29.000000000 +0100 +@@ -392,7 +392,7 @@ config SENSORS_GPIO_FAN + + config SENSORS_CORETEMP + tristate "Intel Core/Core2/Atom temperature sensor" +- depends on X86 && PCI && EXPERIMENTAL ++ depends on X86 && PCI && !XEN && EXPERIMENTAL + help + If you say yes here you get support for the temperature + sensor inside your CPU. Most of the family 6 CPUs +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/drivers/hwmon/coretemp-xen.c 2011-01-31 17:32:29.000000000 +0100 +@@ -0,0 +1,449 @@ ++/* ++ * coretemp.c - Linux kernel module for hardware monitoring ++ * ++ * Copyright (C) 2007 Rudolf Marek ++ * ++ * Inspired from many hwmon drivers ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA ++ * 02110-1301 USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../xen/core/domctl.h" ++ ++#define DRVNAME "coretemp" ++#define coretemp_data pdev_entry ++ ++typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_LABEL, SHOW_NAME } SHOW; ++ ++/* ++ * Functions declaration ++ */ ++ ++static struct coretemp_data *coretemp_update_device(struct device *dev); ++ ++struct pdev_entry { ++ struct list_head list; ++ struct platform_device *pdev; ++ struct class_device *class_dev; ++ struct mutex update_lock; ++ const char *name; ++ u8 x86_model, x86_mask; ++ u32 ucode_rev; ++ char valid; /* zero until following fields are valid */ ++ unsigned long last_updated; /* in jiffies */ ++ int temp; ++ int tjmax; ++ u8 alarm; ++}; ++ ++static struct coretemp_data *coretemp_update_device(struct device *dev); ++ ++/* ++ * Sysfs stuff ++ */ ++ ++static ssize_t show_name(struct device *dev, struct device_attribute ++ *devattr, char *buf) ++{ ++ int ret; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct coretemp_data *data = dev_get_drvdata(dev); ++ ++ if (attr->index == SHOW_NAME) ++ ret = sprintf(buf, "%s\n", data->name); ++ else /* show label */ ++ ret = sprintf(buf, "Core %d\n", data->pdev->id); ++ return ret; ++} ++ ++static ssize_t show_alarm(struct device *dev, struct device_attribute ++ *devattr, char *buf) ++{ ++ struct coretemp_data *data = coretemp_update_device(dev); ++ /* read the Out-of-spec log, never clear */ ++ return sprintf(buf, "%d\n", data->alarm); ++} ++ ++static ssize_t show_temp(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct coretemp_data *data = coretemp_update_device(dev); ++ int err; ++ ++ if (attr->index == SHOW_TEMP) ++ err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN; ++ else ++ err = sprintf(buf, "%d\n", data->tjmax); ++ return err; ++} ++ ++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, ++ SHOW_TEMP); ++static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, ++ SHOW_TJMAX); ++static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL); ++static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); ++static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); ++ ++static struct attribute *coretemp_attributes[] = { ++ &sensor_dev_attr_name.dev_attr.attr, ++ &sensor_dev_attr_temp1_label.dev_attr.attr, ++ &dev_attr_temp1_crit_alarm.attr, ++ &sensor_dev_attr_temp1_input.dev_attr.attr, ++ &sensor_dev_attr_temp1_crit.dev_attr.attr, ++ NULL ++}; ++ ++static const struct attribute_group coretemp_group = { ++ .attrs = coretemp_attributes, ++}; ++ ++static struct coretemp_data *coretemp_update_device(struct device *dev) ++{ ++ struct coretemp_data *data = dev_get_drvdata(dev); ++ ++ mutex_lock(&data->update_lock); ++ ++ if (!data->valid || time_after(jiffies, data->last_updated + HZ)) { ++ u32 eax, edx; ++ ++ data->valid = 0; ++ if (rdmsr_safe_on_pcpu(data->pdev->id, MSR_IA32_THERM_STATUS, ++ &eax, &edx) < 0) ++ eax = ~0; ++ data->alarm = (eax >> 5) & 1; ++ /* update only if data has been valid */ ++ if (eax & 0x80000000) { ++ data->temp = data->tjmax - (((eax >> 16) ++ & 0x7f) * 1000); ++ data->valid = 1; ++ } else { ++ dev_dbg(dev, "Temperature data invalid (0x%x)\n", eax); ++ } ++ data->last_updated = jiffies; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ return data; ++} ++ ++static int coretemp_probe(struct platform_device *pdev) ++{ ++ struct coretemp_data *data = platform_get_drvdata(pdev); ++ int err; ++ u32 eax, edx; ++ ++ data->name = "coretemp"; ++ mutex_init(&data->update_lock); ++ /* Tjmax default is 100 degrees C */ ++ data->tjmax = 100000; ++ ++ /* test if we can access the THERM_STATUS MSR */ ++ err = rdmsr_safe_on_pcpu(pdev->id, MSR_IA32_THERM_STATUS, &eax, &edx); ++ if (err < 0) { ++ dev_err(&pdev->dev, ++ "Unable to access THERM_STATUS MSR, giving up\n"); ++ return err; ++ } ++ ++ /* Check if we have problem with errata AE18 of Core processors: ++ Readings might stop update when processor visited too deep sleep, ++ fixed for stepping D0 (6EC). ++ */ ++ ++ if ((data->x86_model == 0xe) && (data->x86_mask < 0xc)) { ++ /* check for microcode update */ ++ if (!(data->ucode_rev + 1)) ++ dev_warn(&pdev->dev, ++ "Cannot read microcode revision of CPU\n"); ++ else if (data->ucode_rev < 0x39) { ++ err = -ENODEV; ++ dev_err(&pdev->dev, ++ "Errata AE18 not fixed, update BIOS or " ++ "microcode of the CPU!\n"); ++ return err; ++ } ++ } ++ ++ /* Some processors have Tjmax 85 following magic should detect it ++ Intel won't disclose the information without signed NDA, but ++ individuals cannot sign it. Catch(ed) 22. ++ */ ++ ++ if (((data->x86_model == 0xf) && (data->x86_mask > 3)) || ++ (data->x86_model == 0xe)) { ++ err = rdmsr_safe_on_pcpu(data->pdev->id, 0xee, &eax, &edx); ++ if (err < 0) { ++ dev_warn(&pdev->dev, ++ "Unable to access MSR 0xEE, Tjmax left at %d " ++ "degrees C\n", data->tjmax/1000); ++ } else if (eax & 0x40000000) { ++ data->tjmax = 85000; ++ } ++ } ++ ++ /* Intel says that above should not work for desktop Core2 processors, ++ but it seems to work. There is no other way how get the absolute ++ readings. Warn the user about this. First check if are desktop, ++ bit 50 of MSR_IA32_PLATFORM_ID should be 0. ++ */ ++ ++ rdmsr_safe_on_pcpu(data->pdev->id, MSR_IA32_PLATFORM_ID, &eax, &edx); ++ ++ if ((data->x86_model == 0xf) && (!(edx & 0x00040000))) { ++ dev_warn(&pdev->dev, "Using undocumented features, absolute " ++ "temperature might be wrong!\n"); ++ } ++ ++ if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group))) ++ return err; ++ ++ data->class_dev = hwmon_device_register(&pdev->dev); ++ if (IS_ERR(data->class_dev)) { ++ err = PTR_ERR(data->class_dev); ++ dev_err(&pdev->dev, "Class registration failed (%d)\n", ++ err); ++ goto exit_class; ++ } ++ ++ return 0; ++ ++exit_class: ++ sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); ++ return err; ++} ++ ++static int coretemp_remove(struct platform_device *pdev) ++{ ++ struct coretemp_data *data = platform_get_drvdata(pdev); ++ ++ hwmon_device_unregister(data->class_dev); ++ sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); ++ return 0; ++} ++ ++static struct platform_driver coretemp_driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRVNAME, ++ }, ++ .probe = coretemp_probe, ++ .remove = coretemp_remove, ++}; ++ ++static LIST_HEAD(pdev_list); ++static DEFINE_MUTEX(pdev_list_mutex); ++ ++struct cpu_info { ++ struct pdev_entry *pdev_entry; ++ u8 x86; ++ u32 cpuid_6_eax; ++}; ++ ++static void get_cpuid_info(void *arg) ++{ ++ struct cpu_info *info = arg; ++ struct pdev_entry *pdev_entry = info->pdev_entry; ++ u32 val = cpuid_eax(1); ++ ++ info->x86 = ((val >> 8) & 0xf) + ((val >> 20) & 0xff); ++ pdev_entry->x86_model = ((val >> 4) & 0xf) | ((val >> 12) & 0xf0); ++ pdev_entry->x86_mask = val & 0xf; ++ ++ if (info->x86 != 6 || !pdev_entry->x86_model ++ || wrmsr_safe(MSR_IA32_UCODE_REV, 0, 0) < 0 ++ || (sync_core(), rdmsr_safe(MSR_IA32_UCODE_REV, ++ &val, &pdev_entry->ucode_rev)) < 0) ++ pdev_entry->ucode_rev = ~0; ++ ++ info->cpuid_6_eax = cpuid_eax(0) >= 6 ? cpuid_eax(6) : 0; ++} ++ ++static int coretemp_device_add(unsigned int cpu) ++{ ++ int err; ++ struct cpu_info info; ++ struct platform_device *pdev; ++ struct pdev_entry *pdev_entry; ++ ++ pdev_entry = kzalloc(sizeof(*pdev_entry), GFP_KERNEL); ++ if (!info.pdev_entry) ++ return -ENOMEM; ++ ++ info.pdev_entry = pdev_entry; ++ err = xen_set_physical_cpu_affinity(cpu); ++ if (!err) { ++ get_cpuid_info(&info); ++ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); ++ } else if (err > 0) { ++ static bool warned; ++ ++ if (!warned) { ++ warned = true; ++ printk(KERN_WARNING DRVNAME ++ "Cannot set physical CPU affinity" ++ " (assuming use of dom0_vcpus_pin)\n"); ++ } ++ err = smp_call_function_single(cpu, get_cpuid_info, &info, 1); ++ } ++ if (err) ++ goto exit_entry_free; ++ ++ /* check if family 6, models e, f */ ++ if (info.x86 != 0x6 || ++ !((pdev_entry->x86_model == 0xe) || (pdev_entry->x86_model == 0xf))) { ++ ++ /* supported CPU not found, but report the unknown ++ family 6 CPU */ ++ if ((info.x86 == 0x6) && (pdev_entry->x86_model > 0xf)) ++ printk(KERN_WARNING DRVNAME ": Unknown CPU " ++ "model 0x%x", pdev_entry->x86_model); ++ goto exit_entry_free; ++ } ++ ++ pdev = platform_device_alloc(DRVNAME, cpu); ++ if (!pdev) { ++ err = -ENOMEM; ++ printk(KERN_ERR DRVNAME ": Device allocation failed\n"); ++ goto exit_entry_free; ++ } ++ ++ platform_set_drvdata(pdev, pdev_entry); ++ pdev_entry->pdev = pdev; ++ ++ err = platform_device_add(pdev); ++ if (err) { ++ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", ++ err); ++ goto exit_device_put; ++ } ++ ++ mutex_lock(&pdev_list_mutex); ++ list_add_tail(&pdev_entry->list, &pdev_list); ++ mutex_unlock(&pdev_list_mutex); ++ ++ return 0; ++ ++exit_device_put: ++ platform_device_put(pdev); ++exit_entry_free: ++ kfree(info.pdev_entry); ++ return err; ++} ++ ++static void coretemp_device_remove(unsigned int cpu) ++{ ++ struct pdev_entry *p; ++ ++ mutex_lock(&pdev_list_mutex); ++ list_for_each_entry(p, &pdev_list, list) { ++ if (p->pdev->id == cpu) { ++ platform_device_unregister(p->pdev); ++ list_del(&p->list); ++ kfree(p); ++ } ++ } ++ mutex_unlock(&pdev_list_mutex); ++} ++ ++static int coretemp_cpu_callback(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ unsigned int cpu = (unsigned long) hcpu; ++ ++ switch (action) { ++ case CPU_ONLINE: ++ coretemp_device_add(cpu); ++ break; ++ case CPU_DEAD: ++ coretemp_device_remove(cpu); ++ break; ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block coretemp_cpu_notifier = { ++ .notifier_call = coretemp_cpu_callback, ++}; ++ ++static int __init coretemp_init(void) ++{ ++ int err = -ENODEV; ++ ++ if (!is_initial_xendomain()) ++ goto exit; ++ ++ /* quick check if we run Intel */ ++ if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) ++ goto exit; ++ ++ err = platform_driver_register(&coretemp_driver); ++ if (err) ++ goto exit; ++ ++ err = register_pcpu_notifier(&coretemp_cpu_notifier); ++ if (err) ++ goto exit_driver_unreg; ++ ++ if (list_empty(&pdev_list)) { ++ err = -ENODEV; ++ goto exit_notifier_unreg; ++ } ++ ++ return 0; ++ ++exit_notifier_unreg: ++ unregister_pcpu_notifier(&coretemp_cpu_notifier); ++exit_driver_unreg: ++ platform_driver_unregister(&coretemp_driver); ++exit: ++ return err; ++} ++ ++static void __exit coretemp_exit(void) ++{ ++ struct pdev_entry *p, *n; ++ ++ unregister_pcpu_notifier(&coretemp_cpu_notifier); ++ mutex_lock(&pdev_list_mutex); ++ list_for_each_entry_safe(p, n, &pdev_list, list) { ++ platform_device_unregister(p->pdev); ++ list_del(&p->list); ++ kfree(p); ++ } ++ mutex_unlock(&pdev_list_mutex); ++ platform_driver_unregister(&coretemp_driver); ++} ++ ++MODULE_AUTHOR("Rudolf Marek "); ++MODULE_DESCRIPTION("Intel Core temperature monitor"); ++MODULE_LICENSE("GPL"); ++ ++module_init(coretemp_init) ++module_exit(coretemp_exit) +--- head-2011-03-11.orig/drivers/pci/msi-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/pci/msi-xen.c 2011-01-31 17:32:29.000000000 +0100 @@ -12,16 +12,15 @@ #include #include @@ -4432,7 +4806,7 @@ Acked-by: jbeulich@novell.com /** * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state -@@ -697,12 +695,57 @@ void pci_no_msi(void) +@@ -697,12 +695,18 @@ void pci_no_msi(void) pci_msi_enable = 0; } @@ -4449,7 +4823,7 @@ Acked-by: jbeulich@novell.com + INIT_LIST_HEAD(&dev->msi_list); #endif +} -+ + + +/* Arch hooks */ + @@ -4458,47 +4832,8 @@ Acked-by: jbeulich@novell.com +{ + return 0; +} -+ -+#ifndef CONFIG_XEN -+int __attribute__ ((weak)) -+arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) -+{ -+ return 0; -+} -+ -+int __attribute__ ((weak)) -+arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -+{ -+ struct msi_desc *entry; -+ int ret; -+ -+ list_for_each_entry(entry, &dev->msi_list, list) { -+ ret = arch_setup_msi_irq(dev, entry); -+ if (ret) -+ return ret; -+ } -+ -+ return 0; -+} - -+void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) -+{ -+ return; -+} -+ -+void __attribute__ ((weak)) -+arch_teardown_msi_irqs(struct pci_dev *dev) -+{ -+ struct msi_desc *entry; -+ -+ list_for_each_entry(entry, &dev->msi_list, list) { -+ if (entry->irq != 0) -+ arch_teardown_msi_irq(entry->irq); -+ } -+} -+#endif ---- head-2010-05-25.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/blkfront.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkfront/blkfront.c 2011-01-31 17:32:29.000000000 +0100 @@ -244,7 +244,7 @@ static int setup_blkring(struct xenbus_d info->ring_ref = err; @@ -4508,8 +4843,8 @@ Acked-by: jbeulich@novell.com if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); ---- head-2010-05-25.orig/drivers/xen/char/mem.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/char/mem.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/char/mem.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/char/mem.c 2011-01-31 17:32:29.000000000 +0100 @@ -18,7 +18,6 @@ #include #include @@ -4518,8 +4853,8 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/drivers/xen/core/hypervisor_sysfs.c 2007-07-10 09:42:30.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/hypervisor_sysfs.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/hypervisor_sysfs.c 2007-07-10 09:42:30.000000000 +0200 ++++ head-2011-03-11/drivers/xen/core/hypervisor_sysfs.c 2011-01-31 17:32:29.000000000 +0100 @@ -50,7 +50,7 @@ static int __init hypervisor_subsys_init if (!is_running_on_xen()) return -ENODEV; @@ -4529,9 +4864,9 @@ Acked-by: jbeulich@novell.com return 0; } ---- head-2010-05-25.orig/drivers/xen/core/machine_reboot.c 2008-09-01 12:07:31.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/machine_reboot.c 2010-03-24 15:09:15.000000000 +0100 -@@ -85,6 +85,8 @@ static void post_suspend(int suspend_can +--- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-01-24 12:06:05.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-01-31 17:32:29.000000000 +0100 +@@ -80,6 +80,8 @@ static void post_suspend(int suspend_can #ifdef CONFIG_SMP cpu_initialized_map = cpu_online_map; #endif @@ -4540,9 +4875,9 @@ Acked-by: jbeulich@novell.com } shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT; ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:09:15.000000000 +0100 -@@ -160,13 +160,12 @@ static void xen_smp_intr_exit(unsigned i +--- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-01-31 17:32:29.000000000 +0100 +@@ -158,13 +158,12 @@ static void xen_smp_intr_exit(unsigned i void __cpuinit cpu_bringup(void) { @@ -4559,7 +4894,7 @@ Acked-by: jbeulich@novell.com touch_softlockup_watchdog(); preempt_disable(); local_irq_enable(); -@@ -186,11 +185,6 @@ static void __cpuinit cpu_initialize_con +@@ -184,11 +183,6 @@ static void __cpuinit cpu_initialize_con static DEFINE_SPINLOCK(ctxt_lock); struct task_struct *idle = idle_task(cpu); @@ -4571,7 +4906,7 @@ Acked-by: jbeulich@novell.com if (cpu_test_and_set(cpu, cpu_initialized_map)) return; -@@ -213,11 +207,11 @@ static void __cpuinit cpu_initialize_con +@@ -211,11 +205,11 @@ static void __cpuinit cpu_initialize_con smp_trap_init(ctxt.trap_ctxt); ctxt.ldt_ents = 0; @@ -4586,7 +4921,7 @@ Acked-by: jbeulich@novell.com ctxt.user_regs.cs = __KERNEL_CS; ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs); -@@ -230,7 +224,11 @@ static void __cpuinit cpu_initialize_con +@@ -228,7 +222,11 @@ static void __cpuinit cpu_initialize_con ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback; ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); @@ -4598,7 +4933,7 @@ Acked-by: jbeulich@novell.com ctxt.user_regs.cs = __KERNEL_CS; ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs); -@@ -260,9 +258,8 @@ void __init smp_prepare_cpus(unsigned in +@@ -258,9 +256,8 @@ void __init smp_prepare_cpus(unsigned in struct vcpu_get_physid cpu_id; #ifdef __x86_64__ struct desc_ptr *gdt_descr; @@ -4609,7 +4944,7 @@ Acked-by: jbeulich@novell.com apicid = 0; if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0) -@@ -312,14 +309,12 @@ void __init smp_prepare_cpus(unsigned in +@@ -309,14 +306,12 @@ void __init smp_prepare_cpus(unsigned in } gdt_descr->size = GDT_SIZE; memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE); @@ -4628,7 +4963,7 @@ Acked-by: jbeulich@novell.com apicid = cpu; if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) -@@ -334,6 +329,8 @@ void __init smp_prepare_cpus(unsigned in +@@ -330,6 +325,8 @@ void __init smp_prepare_cpus(unsigned in cpu_pda(cpu)->pcurrent = idle; cpu_pda(cpu)->cpunumber = cpu; clear_tsk_thread_flag(idle, TIF_FORK); @@ -4637,7 +4972,7 @@ Acked-by: jbeulich@novell.com #endif irq_ctx_init(cpu); -@@ -358,8 +355,12 @@ void __init smp_prepare_cpus(unsigned in +@@ -354,8 +351,12 @@ void __init smp_prepare_cpus(unsigned in #endif } @@ -4651,8 +4986,8 @@ Acked-by: jbeulich@novell.com prefill_possible_map(); } ---- head-2010-05-25.orig/drivers/xen/core/xen_sysfs.c 2009-05-29 10:25:53.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/xen_sysfs.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/xen_sysfs.c 2009-05-29 10:25:53.000000000 +0200 ++++ head-2011-03-11/drivers/xen/core/xen_sysfs.c 2011-01-31 17:32:29.000000000 +0100 @@ -30,12 +30,12 @@ HYPERVISOR_ATTR_RO(type); static int __init xen_sysfs_type_init(void) @@ -4747,9 +5082,133 @@ Acked-by: jbeulich@novell.com } #endif ---- head-2010-05-25.orig/drivers/xen/netback/netback.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/netback.c 2010-03-24 15:09:15.000000000 +0100 -@@ -198,7 +198,7 @@ static struct sk_buff *netbk_copy_skb(st +--- head-2011-03-11.orig/drivers/xen/netback/common.h 2011-02-17 10:07:22.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/common.h 2011-02-17 10:09:57.000000000 +0100 +@@ -107,7 +107,6 @@ typedef struct netif_st { + struct list_head list; /* scheduling list */ + atomic_t refcnt; + struct net_device *dev; +- struct net_device_stats stats; + + unsigned int carrier; + +@@ -209,7 +208,6 @@ void netif_schedule_work(netif_t *netif) + void netif_deschedule_work(netif_t *netif); + + int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev); +-struct net_device_stats *netif_be_get_stats(struct net_device *dev); + irqreturn_t netif_be_int(int irq, void *dev_id); + + static inline int netbk_can_queue(struct net_device *dev) +--- head-2011-03-11.orig/drivers/xen/netback/interface.c 2011-02-17 10:08:03.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/interface.c 2011-02-17 10:10:00.000000000 +0100 +@@ -255,7 +255,6 @@ netif_t *netif_alloc(struct device *pare + init_timer(&netif->tx_queue_timeout); + + dev->hard_start_xmit = netif_be_start_xmit; +- dev->get_stats = netif_be_get_stats; + dev->open = net_open; + dev->stop = net_close; + dev->change_mtu = netbk_change_mtu; +--- head-2011-03-11.orig/drivers/xen/netback/loopback.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/loopback.c 2011-01-31 17:32:29.000000000 +0100 +@@ -62,14 +62,17 @@ MODULE_PARM_DESC(nloopbacks, "Number of + + struct net_private { + struct net_device *loopback_dev; +- struct net_device_stats stats; + int loop_idx; + }; + ++static inline struct net_private *loopback_priv(struct net_device *dev) ++{ ++ return netdev_priv(dev); ++} ++ + static int loopback_open(struct net_device *dev) + { +- struct net_private *np = netdev_priv(dev); +- memset(&np->stats, 0, sizeof(np->stats)); ++ memset(&dev->stats, 0, sizeof(dev->stats)); + netif_start_queue(dev); + return 0; + } +@@ -130,10 +133,8 @@ static int skb_remove_foreign_references + + static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev) + { +- struct net_private *np = netdev_priv(dev); +- + if (!skb_remove_foreign_references(skb)) { +- np->stats.tx_dropped++; ++ dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } +@@ -143,19 +144,17 @@ static int loopback_start_xmit(struct sk + + skb_orphan(skb); + +- np->stats.tx_bytes += skb->len; +- np->stats.tx_packets++; ++ dev->stats.tx_bytes += skb->len; ++ dev->stats.tx_packets++; + + /* Switch to loopback context. */ +- dev = np->loopback_dev; +- np = netdev_priv(dev); ++ dev = loopback_priv(dev)->loopback_dev; + +- np->stats.rx_bytes += skb->len; +- np->stats.rx_packets++; ++ dev->stats.rx_bytes += skb->len; ++ dev->stats.rx_packets++; + + skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */ + skb->protocol = eth_type_trans(skb, dev); +- skb->dev = dev; + dev->last_rx = jiffies; + + /* Flush netfilter context: rx'ed skbuffs not expected to have any. */ +@@ -167,17 +166,11 @@ static int loopback_start_xmit(struct sk + return 0; + } + +-static struct net_device_stats *loopback_get_stats(struct net_device *dev) +-{ +- struct net_private *np = netdev_priv(dev); +- return &np->stats; +-} +- + static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + strcpy(info->driver, "netloop"); + snprintf(info->bus_info, ETHTOOL_BUSINFO_LEN, "vif-0-%d", +- ((struct net_private *)netdev_priv(dev))->loop_idx); ++ loopback_priv(dev)->loop_idx); + } + + static struct ethtool_ops network_ethtool_ops = +@@ -204,7 +197,7 @@ static void loopback_set_multicast_list( + static void loopback_construct(struct net_device *dev, struct net_device *lo, + int loop_idx) + { +- struct net_private *np = netdev_priv(dev); ++ struct net_private *np = loopback_priv(dev); + + np->loopback_dev = lo; + np->loop_idx = loop_idx; +@@ -212,7 +205,6 @@ static void loopback_construct(struct ne + dev->open = loopback_open; + dev->stop = loopback_close; + dev->hard_start_xmit = loopback_start_xmit; +- dev->get_stats = loopback_get_stats; + dev->set_multicast_list = loopback_set_multicast_list; + dev->change_mtu = NULL; /* allow arbitrary mtu */ + +--- head-2011-03-11.orig/drivers/xen/netback/netback.c 2011-02-09 15:35:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/netback.c 2011-02-09 15:54:33.000000000 +0100 +@@ -208,7 +208,7 @@ static struct sk_buff *netbk_copy_skb(st goto err; skb_reserve(nskb, 16 + NET_IP_ALIGN); @@ -4758,7 +5217,7 @@ Acked-by: jbeulich@novell.com if (headlen > skb_headlen(skb)) headlen = skb_headlen(skb); ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen); -@@ -244,11 +244,15 @@ static struct sk_buff *netbk_copy_skb(st +@@ -254,11 +254,15 @@ static struct sk_buff *netbk_copy_skb(st len -= copy; } @@ -4777,7 +5236,95 @@ Acked-by: jbeulich@novell.com return nskb; -@@ -1678,7 +1682,7 @@ static int __init netback_init(void) +@@ -348,7 +352,7 @@ int netif_be_start_xmit(struct sk_buff * + return 0; + + drop: +- netif->stats.tx_dropped++; ++ dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } +@@ -700,8 +704,8 @@ static void net_rx_action(unsigned long + netbk_free_pages(nr_frags, meta + npo.meta_cons + 1); + } + +- netif->stats.tx_bytes += skb->len; +- netif->stats.tx_packets++; ++ skb->dev->stats.tx_bytes += skb->len; ++ skb->dev->stats.tx_packets++; + + id = meta[npo.meta_cons].id; + flags = nr_frags ? NETRXF_more_data : 0; +@@ -786,12 +790,6 @@ static void netbk_tx_pending_timeout(uns + tasklet_schedule(&net_tx_tasklet); + } + +-struct net_device_stats *netif_be_get_stats(struct net_device *dev) +-{ +- netif_t *netif = netdev_priv(dev); +- return &netif->stats; +-} +- + static int __on_net_schedule_list(netif_t *netif) + { + return netif->list.next != NULL; +@@ -1428,10 +1426,12 @@ static void net_tx_action(unsigned long + + mop = tx_map_ops; + while ((skb = __skb_dequeue(&tx_queue)) != NULL) { ++ struct net_device *dev; + netif_tx_request_t *txp; + + pending_idx = *((u16 *)skb->data); + netif = pending_tx_info[pending_idx].netif; ++ dev = netif->dev; + txp = &pending_tx_info[pending_idx].req; + + /* Check the remap error code. */ +@@ -1439,6 +1439,7 @@ static void net_tx_action(unsigned long + DPRINTK("netback grant failed.\n"); + skb_shinfo(skb)->nr_frags = 0; + kfree_skb(skb); ++ dev->stats.rx_dropped++; + continue; + } + +@@ -1474,8 +1475,7 @@ static void net_tx_action(unsigned long + __pskb_pull_tail(skb, target - skb_headlen(skb)); + } + +- skb->dev = netif->dev; +- skb->protocol = eth_type_trans(skb, skb->dev); ++ skb->protocol = eth_type_trans(skb, dev); + + if (skb_checksum_setup(skb, &netif->rx_gso_csum_fixups)) { + DPRINTK("Can't setup checksum in net_tx_action\n"); +@@ -1483,18 +1483,19 @@ static void net_tx_action(unsigned long + continue; + } + +- netif->stats.rx_bytes += skb->len; +- netif->stats.rx_packets++; +- + if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) && + unlikely(skb_linearize(skb))) { + DPRINTK("Can't linearize skb in net_tx_action.\n"); + kfree_skb(skb); ++ dev->stats.rx_errors++; + continue; + } + ++ dev->stats.rx_bytes += skb->len; ++ dev->stats.rx_packets++; ++ + netif_rx(skb); +- netif->dev->last_rx = jiffies; ++ dev->last_rx = jiffies; + } + + out: +@@ -1691,7 +1692,7 @@ static int __init netback_init(void) (void)bind_virq_to_irqhandler(VIRQ_DEBUG, 0, netif_be_dbg, @@ -4786,9 +5333,166 @@ Acked-by: jbeulich@novell.com "net-be-dbg", &netif_be_dbg); #endif ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.c 2010-03-24 15:09:15.000000000 +0100 -@@ -513,7 +513,7 @@ static int setup_device(struct xenbus_de +--- head-2011-03-11.orig/drivers/xen/netback/xenbus.c 2011-02-17 10:08:09.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/xenbus.c 2011-01-31 17:32:29.000000000 +0100 +@@ -19,6 +19,7 @@ + + #include + #include ++#include + #include + #include "common.h" + +@@ -28,11 +29,12 @@ + printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) + #endif + ++static DECLARE_RWSEM(teardown_sem); + + static int connect_rings(struct backend_info *); + static void connect(struct backend_info *); + static void backend_create_netif(struct backend_info *be); +-static void netback_disconnect(struct device *); ++static void netback_disconnect(struct device *, int); + + static int netback_remove(struct xenbus_device *dev) + { +@@ -40,21 +42,26 @@ static int netback_remove(struct xenbus_ + + netback_remove_accelerators(be, dev); + +- netback_disconnect(&dev->dev); ++ netback_disconnect(&dev->dev, 1); + kfree(be); +- dev->dev.driver_data = NULL; + return 0; + } + +-static void netback_disconnect(struct device *xbdev_dev) ++static void netback_disconnect(struct device *xbdev_dev, int clear) + { + struct backend_info *be = xbdev_dev->driver_data; + +- if (be->netif) { ++ if (be->netif) + kobject_uevent(&xbdev_dev->kobj, KOBJ_OFFLINE); ++ ++ down_write(&teardown_sem); ++ if (be->netif) { + netif_disconnect(be->netif); + be->netif = NULL; + } ++ if (clear) ++ xbdev_dev->driver_data = NULL; ++ up_write(&teardown_sem); + } + + /** +@@ -159,8 +166,7 @@ fail: + static int netback_uevent(struct xenbus_device *xdev, char **envp, + int num_envp, char *buffer, int buffer_size) + { +- struct backend_info *be = xdev->dev.driver_data; +- netif_t *netif = be->netif; ++ struct backend_info *be; + int i = 0, length = 0; + char *val; + +@@ -178,8 +184,12 @@ static int netback_uevent(struct xenbus_ + kfree(val); + } + +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, +- "vif=%s", netif->dev->name); ++ down_read(&teardown_sem); ++ be = xdev->dev.driver_data; ++ if (be && be->netif) ++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, ++ &length, "vif=%s", be->netif->dev->name); ++ up_read(&teardown_sem); + + envp[i] = NULL; + +@@ -192,6 +202,7 @@ static void backend_create_netif(struct + int err; + long handle; + struct xenbus_device *dev = be->dev; ++ netif_t *netif; + + if (be->netif != NULL) + return; +@@ -202,13 +213,13 @@ static void backend_create_netif(struct + return; + } + +- be->netif = netif_alloc(&dev->dev, dev->otherend_id, handle); +- if (IS_ERR(be->netif)) { +- err = PTR_ERR(be->netif); +- be->netif = NULL; ++ netif = netif_alloc(&dev->dev, dev->otherend_id, handle); ++ if (IS_ERR(netif)) { ++ err = PTR_ERR(netif); + xenbus_dev_fatal(dev, err, "creating interface"); + return; + } ++ be->netif = netif; + + kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); + } +@@ -249,7 +260,7 @@ static void frontend_changed(struct xenb + break; + + case XenbusStateClosing: +- netback_disconnect(&dev->dev); ++ netback_disconnect(&dev->dev, 0); + xenbus_switch_state(dev, XenbusStateClosing); + break; + +--- head-2011-03-11.orig/drivers/xen/netfront/accel.c 2009-05-04 10:01:03.000000000 +0200 ++++ head-2011-03-11/drivers/xen/netfront/accel.c 2011-01-31 17:32:29.000000000 +0100 +@@ -548,7 +548,7 @@ static void accelerator_remove_hooks(str + + /* Last chance to get statistics from the accelerator */ + vif_state->hooks->get_stats(vif_state->np->netdev, +- &vif_state->np->stats); ++ &vif_state->np->netdev->stats); + + spin_unlock_irqrestore(&accelerator->vif_states_lock, + flags); +@@ -604,7 +604,8 @@ static int do_remove(struct netfront_inf + spin_lock_irqsave(&accelerator->vif_states_lock, flags); + + /* Last chance to get statistics from the accelerator */ +- np->accel_vif_state.hooks->get_stats(np->netdev, &np->stats); ++ np->accel_vif_state.hooks->get_stats(np->netdev, ++ &np->netdev->stats); + + spin_unlock_irqrestore(&accelerator->vif_states_lock, + flags); +@@ -804,9 +805,9 @@ void netfront_accelerator_call_stop_napi + /* + * No lock pre-requisites. Takes the vif_states_lock spinlock + */ +-int netfront_accelerator_call_get_stats(struct netfront_info *np, +- struct net_device *dev) ++int netfront_accelerator_call_get_stats(struct net_device *dev) + { ++ struct netfront_info *np = netdev_priv(dev); + struct netfront_accelerator *accelerator; + unsigned long flags; + int rc = 0; +@@ -819,7 +820,7 @@ int netfront_accelerator_call_get_stats( + if (np->accel_vif_state.hooks && + np->accelerator == accelerator) + rc = np->accel_vif_state.hooks->get_stats(dev, +- &np->stats); ++ &dev->stats); + spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); + } + return rc; +--- head-2011-03-11.orig/drivers/xen/netfront/netfront.c 2011-02-09 15:38:24.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/netfront.c 2011-02-09 15:54:17.000000000 +0100 +@@ -514,7 +514,7 @@ static int setup_device(struct xenbus_de memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( @@ -4797,8 +5501,102 @@ Acked-by: jbeulich@novell.com netdev); if (err < 0) goto fail; ---- head-2010-05-25.orig/drivers/xen/pciback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/xenbus.c 2010-03-24 15:09:15.000000000 +0100 +@@ -626,8 +626,6 @@ static int network_open(struct net_devic + { + struct netfront_info *np = netdev_priv(dev); + +- memset(&np->stats, 0, sizeof(np->stats)); +- + spin_lock_bh(&np->rx_lock); + if (netfront_carrier_ok(np)) { + network_alloc_rx_buffers(dev); +@@ -1028,8 +1026,8 @@ static int network_start_xmit(struct sk_ + if (notify) + notify_remote_via_irq(np->irq); + +- np->stats.tx_bytes += skb->len; +- np->stats.tx_packets++; ++ dev->stats.tx_bytes += skb->len; ++ dev->stats.tx_packets++; + dev->trans_start = jiffies; + + /* Note: It is not safe to access skb after network_tx_buf_gc()! */ +@@ -1043,7 +1041,7 @@ static int network_start_xmit(struct sk_ + return 0; + + drop: +- np->stats.tx_dropped++; ++ dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } +@@ -1362,7 +1360,7 @@ static int netif_poll(struct net_device + err: + while ((skb = __skb_dequeue(&tmpq))) + __skb_queue_tail(&errq, skb); +- np->stats.rx_errors++; ++ dev->stats.rx_errors++; + i = np->rx.rsp_cons; + continue; + } +@@ -1430,8 +1428,8 @@ err: + else + skb->ip_summed = CHECKSUM_NONE; + +- np->stats.rx_packets++; +- np->stats.rx_bytes += skb->len; ++ dev->stats.rx_packets++; ++ dev->stats.rx_bytes += skb->len; + + __skb_queue_tail(&rxq, skb); + +@@ -1686,10 +1684,8 @@ static int network_close(struct net_devi + + static struct net_device_stats *network_get_stats(struct net_device *dev) + { +- struct netfront_info *np = netdev_priv(dev); +- +- netfront_accelerator_call_get_stats(np, dev); +- return &np->stats; ++ netfront_accelerator_call_get_stats(dev); ++ return &dev->stats; + } + + static int xennet_set_mac_address(struct net_device *dev, void *p) +--- head-2011-03-11.orig/drivers/xen/netfront/netfront.h 2011-02-09 15:35:17.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/netfront.h 2011-02-09 15:54:19.000000000 +0100 +@@ -149,9 +149,6 @@ struct netfront_info { + struct list_head list; + struct net_device *netdev; + +- struct net_device_stats stats; +- unsigned long rx_gso_csum_fixups; +- + struct netif_tx_front_ring tx; + struct netif_rx_front_ring rx; + +@@ -193,6 +190,9 @@ struct netfront_info { + struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; + struct mmu_update rx_mmu[NET_RX_RING_SIZE]; + ++ /* Statistics */ ++ unsigned long rx_gso_csum_fixups; ++ + /* Private pointer to state internal to accelerator module */ + void *accel_priv; + /* The accelerator used by this netfront device */ +@@ -259,8 +259,7 @@ extern + void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, + struct net_device *dev); + extern +-int netfront_accelerator_call_get_stats(struct netfront_info *np, +- struct net_device *dev); ++int netfront_accelerator_call_get_stats(struct net_device *dev); + extern + void netfront_accelerator_add_watch(struct netfront_info *np); + +--- head-2011-03-11.orig/drivers/xen/pciback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/xenbus.c 2011-01-31 17:32:29.000000000 +0100 @@ -100,7 +100,7 @@ static int pciback_do_attach(struct pcib err = bind_interdomain_evtchn_to_irqhandler( @@ -4808,8 +5606,8 @@ Acked-by: jbeulich@novell.com if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error binding event channel to IRQ"); ---- head-2010-05-25.orig/drivers/xen/pcifront/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pcifront/xenbus.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/pcifront/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pcifront/xenbus.c 2011-01-31 17:32:29.000000000 +0100 @@ -11,10 +11,6 @@ #include #include "pcifront.h" @@ -4821,18 +5619,18 @@ Acked-by: jbeulich@novell.com #define INVALID_GRANT_REF (0) #define INVALID_EVTCHN (-1) -@@ -95,7 +91,7 @@ static int pcifront_publish_info(struct - goto out; +@@ -101,7 +97,7 @@ static int pcifront_publish_info(struct - bind_caller_port_to_irqhandler(pdev->evtchn, pcifront_handler_aer, -- SA_SAMPLE_RANDOM, "pcifront", pdev); -+ IRQF_SAMPLE_RANDOM, "pcifront", pdev); - - do_publish: - err = xenbus_transaction_start(&trans); ---- head-2010-05-25.orig/drivers/xen/scsifront/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsifront/xenbus.c 2010-03-24 15:09:15.000000000 +0100 -@@ -96,7 +96,7 @@ static int scsifront_alloc_ring(struct v + err = bind_caller_port_to_irqhandler(pdev->evtchn, + pcifront_handler_aer, +- SA_SAMPLE_RANDOM, ++ IRQF_SAMPLE_RANDOM, + "pcifront", pdev); + if (err < 0) { + xenbus_dev_fatal(pdev->xdev, err, +--- head-2011-03-11.orig/drivers/xen/scsifront/xenbus.c 2011-02-08 10:03:34.000000000 +0100 ++++ head-2011-03-11/drivers/xen/scsifront/xenbus.c 2011-02-08 10:03:46.000000000 +0100 +@@ -100,7 +100,7 @@ static int scsifront_alloc_ring(struct v err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, @@ -4841,8 +5639,8 @@ Acked-by: jbeulich@novell.com if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel_fwd.c 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel_fwd.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netback/accel_fwd.c 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-11/drivers/xen/sfc_netback/accel_fwd.c 2011-01-31 17:32:29.000000000 +0100 @@ -308,7 +308,7 @@ static struct netback_accel *for_a_vnic( static inline int packet_is_arp_reply(struct sk_buff *skb) { @@ -4868,8 +5666,8 @@ Acked-by: jbeulich@novell.com DPRINTK("%s: found gratuitous ARP for " MAC_FMT "\n", __FUNCTION__, MAC_ARG(mac)); ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel_solarflare.c 2010-01-18 15:23:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel_solarflare.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netback/accel_solarflare.c 2010-01-18 15:23:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netback/accel_solarflare.c 2011-01-31 17:32:29.000000000 +0100 @@ -113,7 +113,7 @@ bend_dl_tx_packet(struct efx_dl_device * BUG_ON(port == NULL); @@ -4879,8 +5677,8 @@ Acked-by: jbeulich@novell.com netback_accel_tx_packet(skb, port->fwd_priv); else { DPRINTK("Ignoring packet with missing mac address\n"); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_tso.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_tso.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel_tso.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel_tso.c 2011-01-31 17:32:29.000000000 +0100 @@ -33,10 +33,9 @@ #include "accel_tso.h" @@ -4942,8 +5740,8 @@ Acked-by: jbeulich@novell.com } tsoh_iph->tot_len = htons(ip_length); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:32:29.000000000 +0100 @@ -465,7 +465,7 @@ netfront_accel_enqueue_skb_multi(netfron if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -4962,8 +5760,16 @@ Acked-by: jbeulich@novell.com } NETFRONT_ACCEL_PKTBUFF_FOR_EACH_FRAGMENT (skb, idx, frag_data, frag_len, { ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_xenbus.c 2008-02-20 09:32:49.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_xenbus.c 2010-03-24 15:09:15.000000000 +0100 +@@ -791,7 +791,6 @@ static void netfront_accel_vi_rx_comple + } + + net_dev = vnic->net_dev; +- skb->dev = net_dev; + skb->protocol = eth_type_trans(skb, net_dev); + /* CHECKSUM_UNNECESSARY as hardware has done it already */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel_xenbus.c 2008-02-20 09:32:49.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel_xenbus.c 2011-01-31 17:32:29.000000000 +0100 @@ -356,7 +356,7 @@ static int vnic_setup_domU_shared_state( /* Create xenbus msg event channel */ err = bind_listening_port_to_irqhandler @@ -4982,8 +5788,8 @@ Acked-by: jbeulich@novell.com if (err < 0) { EPRINTK("Couldn't bind net event channel\n"); goto fail_net_irq; ---- head-2010-05-25.orig/drivers/xen/usbfront/xenbus.c 2010-04-15 09:53:24.000000000 +0200 -+++ head-2010-05-25/drivers/xen/usbfront/xenbus.c 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/usbfront/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/xen/usbfront/xenbus.c 2011-01-31 17:32:29.000000000 +0100 @@ -111,7 +111,7 @@ static int setup_rings(struct xenbus_dev info->conn_ring_ref = err; @@ -4993,9 +5799,34 @@ Acked-by: jbeulich@novell.com if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); ---- head-2010-05-25.orig/fs/aio.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/fs/aio.c 2010-03-24 15:09:15.000000000 +0100 -@@ -42,7 +42,7 @@ +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:32:29.000000000 +0100 +@@ -539,18 +539,15 @@ int xenbus_printf(struct xenbus_transact + { + va_list ap; + int ret; +-#define PRINTF_BUFFER_SIZE 4096 + char *printf_buffer; + +- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); +- if (printf_buffer == NULL) +- return -ENOMEM; +- + va_start(ap, fmt); +- ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); ++ printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); + va_end(ap); + +- BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ++ if (!printf_buffer) ++ return -ENOMEM; ++ + ret = xenbus_write(t, dir, node, printf_buffer); + + kfree(printf_buffer); +--- head-2011-03-11.orig/fs/aio.c 2011-03-11 10:52:11.000000000 +0100 ++++ head-2011-03-11/fs/aio.c 2011-03-11 10:55:30.000000000 +0100 +@@ -43,7 +43,7 @@ #ifdef CONFIG_EPOLL #include @@ -5004,7 +5835,7 @@ Acked-by: jbeulich@novell.com #endif #if DEBUG > 1 -@@ -1292,7 +1292,7 @@ static const struct file_operations aioq +@@ -1297,7 +1297,7 @@ static const struct file_operations aioq /* make_aio_fd: * Create a file descriptor that can be used to poll the event queue. @@ -5013,7 +5844,7 @@ Acked-by: jbeulich@novell.com */ static int make_aio_fd(struct kioctx *ioctx) -@@ -1301,7 +1301,8 @@ static int make_aio_fd(struct kioctx *io +@@ -1306,7 +1306,8 @@ static int make_aio_fd(struct kioctx *io struct inode *inode; struct file *file; @@ -5023,8 +5854,8 @@ Acked-by: jbeulich@novell.com if (error) return error; ---- head-2010-05-25.orig/arch/x86/include/asm/boot.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/boot.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/asm/boot.h 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/boot.h 2011-01-31 17:32:29.000000000 +0100 @@ -16,7 +16,7 @@ & ~(CONFIG_PHYSICAL_ALIGN - 1)) @@ -5034,8 +5865,8 @@ Acked-by: jbeulich@novell.com #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT #else #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -11,23 +11,24 @@ #include @@ -5200,8 +6031,8 @@ Acked-by: jbeulich@novell.com #endif #ifndef CONFIG_X86_NO_IDT ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -19,10 +19,8 @@ * the start of the fixmap. */ @@ -5225,8 +6056,8 @@ Acked-by: jbeulich@novell.com FIX_SHARED_INFO, #define NR_FIX_ISAMAPS 256 FIX_ISAMAP_END, ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:03:18.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:32:29.000000000 +0100 @@ -67,12 +67,17 @@ extern void FASTCALL(kunmap_high(struct void *kmap(struct page *page); @@ -5246,8 +6077,8 @@ Acked-by: jbeulich@novell.com #define flush_cache_kmaps() do { } while (0) void clear_highpage(struct page *); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:32:29.000000000 +0100 @@ -69,6 +69,8 @@ extern start_info_t *xen_start_info; #define is_initial_xendomain() 0 #endif @@ -5279,8 +6110,8 @@ Acked-by: jbeulich@novell.com #define arch_use_lazy_mmu_mode() unlikely(__get_cpu_var(xen_lazy_mmu)) #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -11,6 +11,40 @@ #define _ASM_IRQFLAGS_H @@ -5392,8 +6223,8 @@ Acked-by: jbeulich@novell.com push %esp ; \ mov %ecx, %fs ; \ call evtchn_do_upcall ; \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -6,6 +6,20 @@ #include #include @@ -5443,8 +6274,8 @@ Acked-by: jbeulich@novell.com + } while(0) #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -1,7 +1,6 @@ #ifndef _I386_PGALLOC_H #define _I386_PGALLOC_H @@ -5460,8 +6291,8 @@ Acked-by: jbeulich@novell.com -#define check_pgt_cache() do { } while (0) - #endif /* _I386_PGALLOC_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:32:29.000000000 +0100 @@ -52,32 +52,40 @@ static inline int pte_exec_kernel(pte_t * value and then use set_pte to update it. -ben */ @@ -5580,8 +6411,8 @@ Acked-by: jbeulich@novell.com -void vmalloc_sync_all(void); - #endif /* _I386_PGTABLE_3LEVEL_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2007-06-12 13:14:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2011-01-31 17:32:29.000000000 +0100 @@ -1,7 +1,7 @@ #ifndef _I386_PGTABLE_3LEVEL_DEFS_H #define _I386_PGTABLE_3LEVEL_DEFS_H @@ -5591,8 +6422,8 @@ Acked-by: jbeulich@novell.com /* * PGDIR_SHIFT determines what a top-level page table entry can map ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:07.000000000 +0100 @@ -24,11 +24,11 @@ #include #include @@ -5755,7 +6586,7 @@ Acked-by: jbeulich@novell.com #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else -@@ -597,10 +607,6 @@ int xen_change_pte_range(struct mm_struc +@@ -594,10 +604,6 @@ int xen_change_pte_range(struct mm_struc #define io_remap_pfn_range(vma,from,pfn,size,prot) \ direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO) @@ -5766,8 +6597,8 @@ Acked-by: jbeulich@novell.com #include #endif /* _I386_PGTABLE_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -21,6 +21,7 @@ #include #include @@ -5923,7 +6754,7 @@ Acked-by: jbeulich@novell.com #define cpu_relax() rep_nop() --#define paravirt_enabled() 0 +-#define paravirt_enabled() 1 -#define __cpuid xen_cpuid - #ifndef CONFIG_X86_NO_TSS @@ -5975,7 +6806,7 @@ Acked-by: jbeulich@novell.com } -+#define paravirt_enabled() 0 ++#define paravirt_enabled() 1 +#define __cpuid xen_cpuid + +#define load_esp0 xen_load_esp0 @@ -6010,8 +6841,8 @@ Acked-by: jbeulich@novell.com +extern int force_mwait; #endif /* __ASM_I386_PROCESSOR_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -8,19 +8,15 @@ #include #include @@ -6176,8 +7007,8 @@ Acked-by: jbeulich@novell.com extern u8 apicid_2_node[]; #ifdef CONFIG_X86_LOCAL_APIC ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -4,7 +4,7 @@ #include #include @@ -6563,8 +7394,8 @@ Acked-by: jbeulich@novell.com /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:32:29.000000000 +0100 @@ -29,8 +29,13 @@ * and page-granular flushes are available only on i486 and up. */ @@ -6599,8 +7430,8 @@ Acked-by: jbeulich@novell.com #define flush_tlb_kernel_range(start, end) flush_tlb_all() ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc_64.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc_64.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/desc_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -127,16 +127,6 @@ static inline void set_ldt_desc(unsigned DESC_LDT, size * 8 - 1); } @@ -6649,8 +7480,8 @@ Acked-by: jbeulich@novell.com } /* ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -15,7 +15,6 @@ #include #include @@ -6659,8 +7490,8 @@ Acked-by: jbeulich@novell.com #include /* ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags_64.h 2007-06-12 13:14:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irqflags_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -9,6 +9,7 @@ */ #ifndef _ASM_IRQFLAGS_H @@ -6716,8 +7547,8 @@ Acked-by: jbeulich@novell.com #else /* __ASSEMBLY__: */ # ifdef CONFIG_TRACE_IRQFLAGS ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2007-06-12 13:14:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -9,6 +9,9 @@ #include #include @@ -6728,8 +7559,8 @@ Acked-by: jbeulich@novell.com /* * possibly do the LDT unload here? */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2007-06-18 08:38:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -1,7 +1,6 @@ #ifndef _X86_64_PGALLOC_H #define _X86_64_PGALLOC_H @@ -6738,9 +7569,19 @@ Acked-by: jbeulich@novell.com #include #include #include -@@ -100,24 +99,16 @@ static inline void pgd_list_add(pgd_t *p +@@ -95,35 +94,25 @@ static inline void pud_free(pud_t *pud) + pte_free(virt_to_page(pud)); + } + +-static inline void pgd_list_add(pgd_t *pgd, void *mm) ++static inline void pgd_list_add(pgd_t *pgd, struct mm_struct *mm) + { struct page *page = virt_to_page(pgd); + /* Store a back link for vmalloc_sync_all(). */ +- page->mapping = mm; ++ set_page_private(page, (unsigned long)mm); + spin_lock(&pgd_lock); - page->index = (pgoff_t)pgd_list; - if (pgd_list) @@ -6764,10 +7605,13 @@ Acked-by: jbeulich@novell.com - next->private = (unsigned long)pprev; + list_del(&page->lru); spin_unlock(&pgd_lock); +- +- page->mapping = NULL; } ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:09:15.000000000 +0100 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:08.000000000 +0100 @@ -1,12 +1,14 @@ #ifndef _X86_64_PGTABLE_H #define _X86_64_PGTABLE_H @@ -6952,7 +7796,7 @@ Acked-by: jbeulich@novell.com extern int kern_addr_valid(unsigned long addr); -@@ -559,10 +573,6 @@ int xen_change_pte_range(struct mm_struc +@@ -555,10 +569,6 @@ int xen_change_pte_range(struct mm_struc #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO) @@ -6963,7 +7807,7 @@ Acked-by: jbeulich@novell.com #define HAVE_ARCH_UNMAPPED_AREA #define pgtable_cache_init() do { } while (0) -@@ -576,11 +586,14 @@ int xen_change_pte_range(struct mm_struc +@@ -572,11 +582,14 @@ int xen_change_pte_range(struct mm_struc #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) @@ -6978,8 +7822,8 @@ Acked-by: jbeulich@novell.com +#endif /* !__ASSEMBLY__ */ #endif /* _X86_64_PGTABLE_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -20,6 +20,7 @@ #include #include @@ -7063,8 +7907,8 @@ Acked-by: jbeulich@novell.com * NSC/Cyrix CPU indexed register access macros */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -11,12 +11,11 @@ extern int disable_apic; @@ -7117,8 +7961,8 @@ Acked-by: jbeulich@novell.com #endif /* ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -3,7 +3,7 @@ #include @@ -7260,8 +8104,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:32:29.000000000 +0100 @@ -2,7 +2,9 @@ #define _X8664_TLBFLUSH_H @@ -7272,9 +8116,9 @@ Acked-by: jbeulich@novell.com #define __flush_tlb() xen_tlb_flush() ---- head-2010-05-25.orig/include/linux/pci.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/include/linux/pci.h 2010-03-24 15:09:15.000000000 +0100 -@@ -321,7 +321,7 @@ struct pci_dev { +--- head-2011-03-11.orig/include/linux/pci.h 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-11/include/linux/pci.h 2011-01-31 17:32:29.000000000 +0100 +@@ -325,7 +325,7 @@ struct pci_dev { int rom_attr_enabled; /* has display of the rom attribute been enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ @@ -7283,9 +8127,58 @@ Acked-by: jbeulich@novell.com struct list_head msi_list; #endif struct pci_vpd *vpd; ---- head-2010-05-25.orig/lib/swiotlb-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/lib/swiotlb-xen.c 2010-03-24 15:09:15.000000000 +0100 -@@ -723,7 +723,6 @@ swiotlb_dma_supported (struct device *hw +--- head-2011-03-11.orig/include/xen/net-util.h 2011-02-09 15:50:19.000000000 +0100 ++++ head-2011-03-11/include/xen/net-util.h 2011-02-09 15:53:07.000000000 +0100 +@@ -11,6 +11,7 @@ static inline int skb_checksum_setup(str + unsigned long *fixup_counter) + { + struct iphdr *iph = (void *)skb->data; ++ unsigned char *th; + __be16 *csum = NULL; + int err = -EPROTO; + +@@ -32,31 +33,31 @@ static inline int skb_checksum_setup(str + if (skb->protocol != htons(ETH_P_IP)) + goto out; + +- skb->nh.iph = iph; +- skb->h.raw = skb->nh.raw + 4 * iph->ihl; +- if (skb->h.raw >= skb->tail) ++ th = skb->data + 4 * iph->ihl; ++ if (th >= skb_tail_pointer(skb)) + goto out; + ++ skb->csum_start = th - skb->head; + switch (iph->protocol) { + case IPPROTO_TCP: + skb->csum_offset = offsetof(struct tcphdr, check); + if (csum) +- csum = &skb->h.th->check; ++ csum = &((struct tcphdr *)th)->check; + break; + case IPPROTO_UDP: + skb->csum_offset = offsetof(struct udphdr, check); + if (csum) +- csum = &skb->h.uh->check; ++ csum = &((struct udphdr *)th)->check; + break; + default: + if (net_ratelimit()) + printk(KERN_ERR "Attempting to checksum a non-" + "TCP/UDP packet, dropping a protocol" +- " %d packet\n", skb->nh.iph->protocol); ++ " %d packet\n", iph->protocol); + goto out; + } + +- if ((skb->h.raw + skb->csum_offset + sizeof(*csum)) > skb->tail) ++ if ((th + skb->csum_offset + sizeof(*csum)) > skb_tail_pointer(skb)) + goto out; + + if (csum) { +--- head-2011-03-11.orig/lib/swiotlb-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/lib/swiotlb-xen.c 2011-01-31 17:32:29.000000000 +0100 +@@ -751,7 +751,6 @@ swiotlb_dma_supported (struct device *hw return (mask >= ((1UL << dma_bits) - 1)); } @@ -7293,9 +8186,9 @@ Acked-by: jbeulich@novell.com EXPORT_SYMBOL(swiotlb_map_single); EXPORT_SYMBOL(swiotlb_unmap_single); EXPORT_SYMBOL(swiotlb_map_sg); ---- head-2010-05-25.orig/mm/vmalloc.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/mm/vmalloc.c 2010-03-24 15:09:15.000000000 +0100 -@@ -1472,6 +1472,13 @@ static void *__vmalloc_area_node(struct +--- head-2011-03-11.orig/mm/vmalloc.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/mm/vmalloc.c 2011-01-31 17:32:29.000000000 +0100 +@@ -1485,6 +1485,13 @@ static void *__vmalloc_area_node(struct struct page **pages; unsigned int nr_pages, array_size, i; gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; @@ -7309,7 +8202,7 @@ Acked-by: jbeulich@novell.com nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); -@@ -1507,6 +1514,16 @@ static void *__vmalloc_area_node(struct +@@ -1520,6 +1527,16 @@ static void *__vmalloc_area_node(struct goto fail; } area->pages[i] = page; @@ -7326,7 +8219,7 @@ Acked-by: jbeulich@novell.com } if (map_vm_area(area, prot, &pages)) -@@ -1666,6 +1683,8 @@ void *vmalloc_exec(unsigned long size) +@@ -1729,6 +1746,8 @@ void *vmalloc_exec(unsigned long size) #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL @@ -7335,44 +8228,8 @@ Acked-by: jbeulich@novell.com #else #define GFP_VMALLOC32 GFP_KERNEL #endif ---- head-2010-05-25.orig/net/core/dev.c 2010-05-25 09:21:50.000000000 +0200 -+++ head-2010-05-25/net/core/dev.c 2010-05-25 09:22:00.000000000 +0200 -@@ -2015,12 +2015,17 @@ static struct netdev_queue *dev_pick_tx( - inline int skb_checksum_setup(struct sk_buff *skb) - { - if (skb->proto_csum_blank) { -+ struct iphdr *iph; -+ unsigned char *th; -+ - if (skb->protocol != htons(ETH_P_IP)) - goto out; -- skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl; -- if (skb->h.raw >= skb->tail) -+ iph = ip_hdr(skb); -+ th = skb_network_header(skb) + 4 * iph->ihl; -+ if (th >= skb_tail_pointer(skb)) - goto out; -- switch (skb->nh.iph->protocol) { -+ skb->csum_start = th - skb->head; -+ switch (iph->protocol) { - case IPPROTO_TCP: - skb->csum_offset = offsetof(struct tcphdr, check); - break; -@@ -2031,10 +2036,10 @@ inline int skb_checksum_setup(struct sk_ - if (net_ratelimit()) - printk(KERN_ERR "Attempting to checksum a non-" - "TCP/UDP packet, dropping a protocol" -- " %d packet", skb->nh.iph->protocol); -+ " %d packet", iph->protocol); - goto out; - } -- if ((skb->h.raw + skb->csum_offset + 2) > skb->tail) -+ if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) - goto out; - skb->ip_summed = CHECKSUM_PARTIAL; - skb->proto_csum_blank = 0; ---- head-2010-05-25.orig/scripts/Makefile.xen.awk 2007-08-06 15:10:49.000000000 +0200 -+++ head-2010-05-25/scripts/Makefile.xen.awk 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/scripts/Makefile.xen.awk 2007-08-06 15:10:49.000000000 +0200 ++++ head-2011-03-11/scripts/Makefile.xen.awk 2011-01-31 17:32:29.000000000 +0100 @@ -13,7 +13,7 @@ BEGIN { next } diff --git a/patches.xen/xen3-patch-2.6.23 b/patches.xen/xen3-patch-2.6.23 index a483f41..b210662 100644 --- a/patches.xen/xen3-patch-2.6.23 +++ b/patches.xen/xen3-patch-2.6.23 @@ -6,8 +6,8 @@ Automatically created from "patches.kernel.org/patch-2.6.23" by xen-port-patches Acked-by: jbeulich@novell.com ---- head-2010-05-25.orig/arch/x86/Kbuild 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/Kbuild 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/Kbuild 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kbuild 2011-01-31 17:49:31.000000000 +0100 @@ -2,7 +2,7 @@ obj-$(CONFIG_KVM) += kvm/ @@ -17,8 +17,8 @@ Acked-by: jbeulich@novell.com # lguest paravirtualization support obj-$(CONFIG_LGUEST_GUEST) += lguest/ ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -15,7 +15,7 @@ #ifndef CONFIG_ACPI_PV_SLEEP /* address in low memory of the wakeup routine. */ @@ -55,8 +55,8 @@ Acked-by: jbeulich@novell.com return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:49:31.000000000 +0100 @@ -20,7 +20,9 @@ #include #include @@ -67,7 +67,15 @@ Acked-by: jbeulich@novell.com #include #include "../../../drivers/lguest/lg.h" -@@ -123,7 +125,7 @@ void foo(void) +@@ -55,7 +57,6 @@ void foo(void) + OFFSET(TI_exec_domain, thread_info, exec_domain); + OFFSET(TI_flags, thread_info, flags); + OFFSET(TI_status, thread_info, status); +- OFFSET(TI_cpu, thread_info, cpu); + OFFSET(TI_preempt_count, thread_info, preempt_count); + OFFSET(TI_addr_limit, thread_info, addr_limit); + OFFSET(TI_restart_block, thread_info, restart_block); +@@ -121,7 +122,7 @@ void foo(void) OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); #endif @@ -76,8 +84,8 @@ Acked-by: jbeulich@novell.com BLANK(); OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -362,6 +362,8 @@ static void __cpuinit generic_identify(s if ( xlvl >= 0x80000004 ) get_model_name(c); /* Default name */ @@ -103,8 +111,8 @@ Acked-by: jbeulich@novell.com nexgen_init_cpu(); umc_init_cpu(); early_cpu_detect(); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -167,7 +167,7 @@ mtrr_del(int reg, unsigned long base, un EXPORT_SYMBOL(mtrr_add); EXPORT_SYMBOL(mtrr_del); @@ -114,8 +122,8 @@ Acked-by: jbeulich@novell.com { } ---- head-2010-05-25.orig/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -10,6 +10,7 @@ #include #include @@ -171,9 +179,9 @@ Acked-by: jbeulich@novell.com break; } } ---- head-2010-05-25.orig/arch/x86/kernel/entry_32.S 2010-01-19 16:00:16.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32.S 2010-03-24 15:09:22.000000000 +0100 -@@ -1109,7 +1109,7 @@ ENTRY(kernel_thread_helper) +--- head-2011-03-17.orig/arch/x86/kernel/entry_32.S 2011-02-01 14:10:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32.S 2011-02-01 14:10:55.000000000 +0100 +@@ -1047,7 +1047,7 @@ ENTRY(kernel_thread_helper) CFI_ENDPROC ENDPROC(kernel_thread_helper) @@ -182,17 +190,17 @@ Acked-by: jbeulich@novell.com /* Xen doesn't set %esp to be precisely what the normal sysenter entrypoint expects, so fix it up before using the normal path. */ ENTRY(xen_sysenter_target) -@@ -1202,7 +1202,7 @@ ENTRY(xen_failsafe_callback) - .previous - ENDPROC(xen_failsafe_callback) +@@ -1139,7 +1139,7 @@ ENDPROC(xen_failsafe_callback) + BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, + xen_evtchn_do_upcall) -#endif /* CONFIG_XEN */ +#endif /* CONFIG_PARAVIRT_XEN */ #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:49:31.000000000 +0100 @@ -452,9 +452,6 @@ restore_nocheck_notrace: 1: INTERRUPT_RETURN .section .fixup,"ax" @@ -203,8 +211,8 @@ Acked-by: jbeulich@novell.com pushl $0 # no error code pushl $do_iret_error jmp error_code ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_32-xen.S 2011-01-31 17:49:31.000000000 +0100 @@ -86,7 +86,10 @@ ENTRY(_stext) /* * BSS section @@ -259,8 +267,8 @@ Acked-by: jbeulich@novell.com - ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1) + ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") + ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1) ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -402,14 +402,6 @@ static void set_ioapic_affinity_irq(unsi # include /* kmalloc() */ # include /* time_after() */ @@ -374,8 +382,8 @@ Acked-by: jbeulich@novell.com { unsigned long t1 = jiffies; ---- head-2010-05-25.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/microcode-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -33,6 +33,7 @@ #include #include @@ -384,8 +392,8 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -24,7 +24,7 @@ #include @@ -395,8 +403,8 @@ Acked-by: jbeulich@novell.com int iommu_merge __read_mostly = 0; EXPORT_SYMBOL(iommu_merge); ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -241,6 +241,7 @@ early_param("idle", idle_setup); void show_regs(struct pt_regs * regs) { @@ -512,8 +520,8 @@ Acked-by: jbeulich@novell.com /* * Leave lazy mode, flushing any hypercalls made here. ---- head-2010-05-25.orig/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -114,19 +114,10 @@ static unsigned int highmem_pages = -1; /* * Setup options @@ -600,8 +608,8 @@ Acked-by: jbeulich@novell.com if (is_initial_xendomain()) { #ifdef CONFIG_VT ---- head-2010-05-25.orig/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -22,6 +22,7 @@ #include @@ -626,8 +634,8 @@ Acked-by: jbeulich@novell.com { if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) BUG(); ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -75,11 +75,12 @@ #include #include @@ -643,7 +651,7 @@ Acked-by: jbeulich@novell.com volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; #endif -@@ -757,56 +758,10 @@ unsigned long read_persistent_clock(void +@@ -760,56 +761,10 @@ unsigned long read_persistent_clock(void return retval; } @@ -702,8 +710,8 @@ Acked-by: jbeulich@novell.com } extern void (*late_time_init)(void); ---- head-2010-05-25.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -41,6 +41,10 @@ #include #endif @@ -910,8 +918,8 @@ Acked-by: jbeulich@novell.com #ifndef CONFIG_MATH_EMULATION ---- head-2010-05-25.orig/arch/x86/mach-xen/setup.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/mach-xen/setup.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mach-xen/setup.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/mach-xen/setup.c 2011-01-31 17:49:31.000000000 +0100 @@ -12,6 +12,7 @@ #include #include @@ -948,8 +956,8 @@ Acked-by: jbeulich@novell.com + set_pmd(pmd, __pmd(__pa_symbol(swapper_pg_pmd) | _PAGE_TABLE)); + } } ---- head-2010-05-25.orig/arch/x86/mm/fault_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/fault_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -346,7 +346,10 @@ static inline pmd_t *vmalloc_sync_one(pg pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) @@ -1035,8 +1043,8 @@ Acked-by: jbeulich@novell.com tsk->thread.cr2 = address; /* Kernel addresses are always protection faults */ tsk->thread.error_code = error_code | (address >= TASK_SIZE); ---- head-2010-05-25.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -34,17 +34,16 @@ void *kmap_atomic_prot(struct page *page /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); @@ -1058,8 +1066,8 @@ Acked-by: jbeulich@novell.com } void *kmap_atomic(struct page *page, enum km_type type) ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -96,7 +96,7 @@ static pte_t * __init one_page_table_ini #endif pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); @@ -1122,9 +1130,9 @@ Acked-by: jbeulich@novell.com if (!SHARED_KERNEL_PMD) { /* If we're in PAE mode and have a non-shared kernel pmd, then the pgd size must be a ---- head-2010-05-25.orig/arch/x86/mm/ioremap_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/ioremap_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -@@ -363,9 +363,8 @@ void iounmap(volatile void __iomem *addr +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:38:30.000000000 +0100 +@@ -344,9 +344,8 @@ void iounmap(volatile void __iomem *addr /* Reset the direct mapping. Can block */ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) { @@ -1135,8 +1143,8 @@ Acked-by: jbeulich@novell.com PAGE_KERNEL); global_flush_tlb(); } ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -198,7 +198,7 @@ void pte_free(struct page *pte) va, pfn_pte(pfn, PAGE_KERNEL), 0)) BUG(); @@ -1146,7 +1154,7 @@ Acked-by: jbeulich@novell.com ClearPageForeign(pte); init_page_count(pte); -@@ -248,7 +248,7 @@ static inline void pgd_list_del(pgd_t *p +@@ -249,7 +249,7 @@ static inline void pgd_list_del(pgd_t *p #if (PTRS_PER_PMD == 1) /* Non-PAE pgd constructor */ @@ -1155,7 +1163,7 @@ Acked-by: jbeulich@novell.com { unsigned long flags; -@@ -271,7 +271,7 @@ void pgd_ctor(void *pgd) +@@ -272,7 +272,7 @@ void pgd_ctor(void *pgd) } #else /* PTRS_PER_PMD > 1 */ /* PAE pgd constructor */ @@ -1164,7 +1172,7 @@ Acked-by: jbeulich@novell.com { /* PAE, kernel PMD may be shared */ -@@ -285,7 +285,7 @@ void pgd_ctor(void *pgd) +@@ -286,7 +286,7 @@ void pgd_ctor(void *pgd) } #endif /* PTRS_PER_PMD */ @@ -1173,7 +1181,7 @@ Acked-by: jbeulich@novell.com { unsigned long flags; /* can be called from interrupt context */ -@@ -637,9 +637,9 @@ static inline unsigned int pgd_walk_set_ +@@ -644,9 +644,9 @@ static inline unsigned int pgd_walk_set_ if (PageHighMem(page)) { if (pgprot_val(flags) & _PAGE_RW) @@ -1185,7 +1193,7 @@ Acked-by: jbeulich@novell.com } else { MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq, (unsigned long)__va(pfn << PAGE_SHIFT), -@@ -709,19 +709,19 @@ static void __pgd_pin(pgd_t *pgd) +@@ -716,19 +716,19 @@ static void __pgd_pin(pgd_t *pgd) pgd_walk(pgd, PAGE_KERNEL_RO); kmap_flush_unused(); xen_pgd_pin(__pa(pgd)); @@ -1208,7 +1216,7 @@ Acked-by: jbeulich@novell.com __pgd_unpin(pgd); } -@@ -759,7 +759,7 @@ void mm_pin_all(void) +@@ -766,7 +766,7 @@ void mm_pin_all(void) */ spin_lock_irqsave(&pgd_lock, flags); for (page = pgd_list; page; page = (struct page *)page->index) { @@ -1217,7 +1225,7 @@ Acked-by: jbeulich@novell.com __pgd_pin((pgd_t *)page_address(page)); } spin_unlock_irqrestore(&pgd_lock, flags); -@@ -767,7 +767,7 @@ void mm_pin_all(void) +@@ -774,7 +774,7 @@ void mm_pin_all(void) void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { @@ -1226,7 +1234,7 @@ Acked-by: jbeulich@novell.com mm_pin(mm); } -@@ -793,7 +793,7 @@ void arch_exit_mmap(struct mm_struct *mm +@@ -800,7 +800,7 @@ void arch_exit_mmap(struct mm_struct *mm task_unlock(tsk); @@ -1235,8 +1243,8 @@ Acked-by: jbeulich@novell.com (atomic_read(&mm->mm_count) == 1) && !mm->context.has_foreign_mappings) mm_unpin(mm); ---- head-2010-05-25.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/pci/irq-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -142,8 +142,9 @@ static void __init pirq_peer_trick(void) for(i = 1; i < 256; i++) { if (!busmap[i] || pci_find_bus(0, i)) @@ -1257,8 +1265,8 @@ Acked-by: jbeulich@novell.com r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; ---- head-2010-05-25.orig/arch/x86/xen/Kconfig 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/xen/Kconfig 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/xen/Kconfig 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/xen/Kconfig 2011-01-31 17:49:31.000000000 +0100 @@ -2,7 +2,7 @@ # This Kconfig describes xen options # @@ -1266,10 +1274,10 @@ Acked-by: jbeulich@novell.com -config XEN +config PARAVIRT_XEN bool "Xen guest support" - select PARAVIRT_ALL + select PARAVIRT select PARAVIRT_CLOCK ---- head-2010-05-25.orig/arch/x86/xen/xen-head.S 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/xen/xen-head.S 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/xen/xen-head.S 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/xen/xen-head.S 2011-01-31 17:49:31.000000000 +0100 @@ -1,7 +1,7 @@ /* Xen-specific pieces of head.S, intended to be included in the right place in head.S */ @@ -1285,8 +1293,8 @@ Acked-by: jbeulich@novell.com -#endif /*CONFIG_XEN */ +#endif /* CONFIG_PARAVIRT_XEN */ ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:49:31.000000000 +0100 @@ -105,7 +105,7 @@ ENTRY(ia32_sysenter_target) movl $VSYSCALL32_SYSEXIT,8(%rsp) movq %rax,(%rsp) @@ -1328,8 +1336,8 @@ Acked-by: jbeulich@novell.com .quad sys_eventfd + .quad sys32_fallocate ia32_syscall_end: ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -51,12 +51,10 @@ Low-Level Sleep Support -------------------------------------------------------------------------- */ @@ -1367,8 +1375,8 @@ Acked-by: jbeulich@novell.com void acpi_pci_link_exit(void) { } ---- head-2010-05-25.orig/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -224,37 +224,6 @@ unsigned long __init e820_end_of_ram(voi } @@ -1552,8 +1560,8 @@ Acked-by: jbeulich@novell.com (unsigned long long) e820.map[i].addr, (unsigned long long) (e820.map[i].addr + e820.map[i].size)); switch (e820.map[i].type) { ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -174,6 +174,7 @@ static __init void early_serial_init(cha * mappings. Someone should fix this for domain 0. For now, use fake serial. */ @@ -1588,8 +1596,8 @@ Acked-by: jbeulich@novell.com } if (keep_early) ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:49:31.000000000 +0100 @@ -310,7 +310,7 @@ sysret_signal: TRACE_IRQS_ON /* sti */ @@ -1617,8 +1625,8 @@ Acked-by: jbeulich@novell.com jz retint_restore_args TRACE_IRQS_ON XEN_UNBLOCK_EVENTS(%rsi) ---- head-2010-05-25.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_64-xen.S 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-01-31 17:49:31.000000000 +0100 @@ -23,7 +23,7 @@ #include #include @@ -1672,17 +1680,17 @@ Acked-by: jbeulich@novell.com - ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64) - ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page) - ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT) -- ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel") +- ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel") - ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic") - ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1) + ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad startup_64) + ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad hypercall_page) + ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT, _PAGE_PRESENT) -+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel") ++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel") + ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") + ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1) ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -90,7 +90,7 @@ void __init x86_64_start_kernel(char * r unsigned long machine_to_phys_nr_ents; int i; @@ -1692,8 +1700,8 @@ Acked-by: jbeulich@novell.com xen_start_info = (struct start_info *)real_mode_data; if (!xen_feature(XENFEAT_auto_translated_physmap)) ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -165,7 +165,9 @@ static inline void io_apic_write(unsigne #endif } @@ -1816,8 +1824,8 @@ Acked-by: jbeulich@novell.com } static struct irq_chip ioapic_chip __read_mostly = { ---- head-2010-05-25.orig/arch/x86/kernel/ldt_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -114,6 +114,8 @@ int init_new_context(struct task_struct memset(&mm->context, 0, sizeof(mm->context)); init_MUTEX(&mm->context.sem); @@ -1836,8 +1844,8 @@ Acked-by: jbeulich@novell.com spin_lock(&mm_unpinned_lock); list_del(&mm->context.unpinned); spin_unlock(&mm_unpinned_lock); ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -32,7 +32,6 @@ /* Have we found an MP table */ @@ -1894,8 +1902,8 @@ Acked-by: jbeulich@novell.com } void __init ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:39.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:50.000000000 +0100 @@ -26,6 +26,7 @@ #include #include @@ -1904,7 +1912,7 @@ Acked-by: jbeulich@novell.com #include #include #include -@@ -249,6 +250,7 @@ early_param("idle", idle_setup); +@@ -248,6 +249,7 @@ early_param("idle", idle_setup); void __show_regs(struct pt_regs * regs) { unsigned long fs, gs, shadowgs; @@ -1912,7 +1920,7 @@ Acked-by: jbeulich@novell.com unsigned int fsindex,gsindex; unsigned int ds,cs,es; -@@ -288,6 +290,14 @@ void __show_regs(struct pt_regs * regs) +@@ -287,6 +289,14 @@ void __show_regs(struct pt_regs * regs) fs,fsindex,gs,gsindex,shadowgs); printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es); @@ -1927,8 +1935,8 @@ Acked-by: jbeulich@novell.com } void show_regs(struct pt_regs *regs) ---- head-2010-05-25.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -828,6 +828,8 @@ static void __cpuinit init_amd(struct cp level = cpuid_eax(1); if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) @@ -2011,8 +2019,8 @@ Acked-by: jbeulich@novell.com }; static char *x86_power_flags[] = { "ts", /* temperature sensor */ ---- head-2010-05-25.orig/arch/x86/kernel/setup64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -125,11 +125,14 @@ void __init setup_per_cpu_areas(void) } @@ -2039,8 +2047,8 @@ Acked-by: jbeulich@novell.com /* others are initialized in smpboot.c */ pda->pcurrent = &init_task; pda->irqstackptr = boot_cpu_stack; ---- head-2010-05-25.orig/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -362,7 +362,7 @@ __smp_call_function_single(int cpu, void } @@ -2077,8 +2085,8 @@ Acked-by: jbeulich@novell.com put_cpu(); return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -34,6 +34,10 @@ #include #include @@ -2149,8 +2157,8 @@ Acked-by: jbeulich@novell.com if (panic_on_unrecovered_nmi) panic("NMI: Not continuing"); ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -42,6 +42,7 @@ #include #include @@ -2196,8 +2204,8 @@ Acked-by: jbeulich@novell.com write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } ---- head-2010-05-25.orig/arch/x86/mm/fault_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/fault_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -159,7 +159,9 @@ void dump_pagetable(unsigned long addres pmd_t *pmd; pte_t *pte; @@ -2319,8 +2327,8 @@ Acked-by: jbeulich@novell.com goto no_context; do_sigbus: ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:47:36.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-04-29 09:47:49.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -66,6 +66,9 @@ int after_bootmem; DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); extern unsigned long start_pfn; @@ -2352,7 +2360,7 @@ Acked-by: jbeulich@novell.com + unsigned long adr = page[pud_index(addr)]; + + addr_to_page(adr, page); -+ memcpy(level2_fixmap_pgt, page, PAGE_SIZE); ++ copy_page(level2_fixmap_pgt, page); + } + level3_kernel_pgt[pud_index(addr)] = + __pud(__pa_symbol(level2_fixmap_pgt) | _PAGE_TABLE); @@ -2444,8 +2452,8 @@ Acked-by: jbeulich@novell.com + return "[vsyscall]"; + return NULL; +} ---- head-2010-05-25.orig/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:49:31.000000000 +0100 @@ -171,7 +171,7 @@ void mm_pin(struct mm_struct *mm) mm_walk(mm, PAGE_KERNEL_RO); xen_pgd_pin(__pa(mm->pgd)); /* kernel */ @@ -2550,8 +2558,21 @@ Acked-by: jbeulich@novell.com ClearPagePrivate(pg); __free_page(pg); } ---- head-2010-05-25.orig/arch/x86/vdso/vdso32/note.S 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/vdso/vdso32/note.S 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/oprofile/xenoprof.c 2008-01-28 12:24:19.000000000 +0100 ++++ head-2011-03-17/arch/x86/oprofile/xenoprof.c 2011-01-31 17:49:31.000000000 +0100 +@@ -18,9 +18,9 @@ + #include + #include + #include ++#include + #include + +-#include + #include + #include + #include +--- head-2011-03-17.orig/arch/x86/vdso/vdso32/note.S 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/vdso/vdso32/note.S 2011-01-31 17:49:31.000000000 +0100 @@ -13,7 +13,7 @@ ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END @@ -2574,9 +2595,9 @@ Acked-by: jbeulich@novell.com .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */ ELFNOTE_END #endif ---- head-2010-05-25.orig/drivers/Makefile 2010-05-12 08:58:07.000000000 +0200 -+++ head-2010-05-25/drivers/Makefile 2010-05-12 09:00:57.000000000 +0200 -@@ -18,7 +18,7 @@ obj-$(CONFIG_PNP) += pnp/ +--- head-2011-03-17.orig/drivers/Makefile 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/drivers/Makefile 2011-01-31 17:49:31.000000000 +0100 +@@ -19,7 +19,7 @@ obj-$(CONFIG_PNP) += pnp/ obj-$(CONFIG_ARM_AMBA) += amba/ obj-$(CONFIG_VIRTIO) += virtio/ @@ -2585,34 +2606,34 @@ Acked-by: jbeulich@novell.com # regulators early, since some subsystems rely on them to initialize obj-$(CONFIG_REGULATOR) += regulator/ ---- head-2010-05-25.orig/drivers/block/Kconfig 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/block/Kconfig 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/block/Kconfig 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/block/Kconfig 2011-01-31 17:49:31.000000000 +0100 @@ -466,9 +466,9 @@ config XILINX_SYSACE help Include support for the Xilinx SystemACE CompactFlash interface -config XEN_BLKDEV_FRONTEND -+config XEN_BLKFRONT ++config PARAVIRT_XEN_BLKDEV_FRONTEND tristate "Xen virtual block device support" - depends on XEN + depends on PARAVIRT_XEN default y + select XEN_XENBUS_FRONTEND help - This driver implements the front-end of the Xen virtual ---- head-2010-05-25.orig/drivers/block/Makefile 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/block/Makefile 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/block/Makefile 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/block/Makefile 2011-01-31 17:49:31.000000000 +0100 @@ -35,7 +35,7 @@ obj-$(CONFIG_BLK_DEV_SX8) += sx8.o obj-$(CONFIG_BLK_DEV_UB) += ub.o obj-$(CONFIG_BLK_DEV_HD) += hd.o -obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o -+obj-$(CONFIG_XEN_BLKFRONT) += xen-blkfront.o ++obj-$(CONFIG_PARAVIRT_XEN_BLKDEV_FRONTEND) += xen-blkfront.o obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ + obj-$(CONFIG_BLK_DEV_RBD) += rbd.o - obj-$(CONFIG_CIPHER_TWOFISH) += loop_fish2.o ---- head-2010-05-25.orig/drivers/block/xen-blkfront.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/block/xen-blkfront.c 2010-04-15 09:54:11.000000000 +0200 -@@ -1057,7 +1057,6 @@ static const struct xenbus_device_id blk +--- head-2011-03-17.orig/drivers/block/xen-blkfront.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/block/xen-blkfront.c 2011-01-31 17:49:31.000000000 +0100 +@@ -1282,7 +1282,6 @@ static const struct xenbus_device_id blk static struct xenbus_driver blkfront = { .name = "vbd", @@ -2620,9 +2641,9 @@ Acked-by: jbeulich@novell.com .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, ---- head-2010-05-25.orig/drivers/char/Kconfig 2010-03-24 14:36:44.000000000 +0100 -+++ head-2010-05-25/drivers/char/Kconfig 2010-03-24 15:09:22.000000000 +0100 -@@ -646,7 +646,7 @@ config HVC_IUCV +--- head-2011-03-17.orig/drivers/char/Kconfig 2011-01-31 14:42:03.000000000 +0100 ++++ head-2011-03-17/drivers/char/Kconfig 2011-01-31 17:49:31.000000000 +0100 +@@ -669,7 +669,7 @@ config HVC_IUCV config HVC_XEN bool "Xen Hypervisor Console support" @@ -2631,89 +2652,34 @@ Acked-by: jbeulich@novell.com select HVC_DRIVER select HVC_IRQ default y ---- head-2010-05-25.orig/drivers/net/Kconfig 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/net/Kconfig 2010-04-15 09:54:18.000000000 +0200 -@@ -2862,9 +2862,9 @@ source "drivers/ieee802154/Kconfig" - - source "drivers/s390/net/Kconfig" +--- head-2011-03-17.orig/drivers/net/Kconfig 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/net/Kconfig 2011-01-31 17:49:31.000000000 +0100 +@@ -2960,9 +2960,9 @@ config TILE_NET + To compile this driver as a module, choose M here: the module + will be called tile_net. -config XEN_NETDEV_FRONTEND -+config XEN_NETFRONT ++config PARAVIRT_XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" - depends on XEN + depends on PARAVIRT_XEN + select XEN_XENBUS_FRONTEND default y help - The network device frontend driver allows the kernel to ---- head-2010-05-25.orig/drivers/net/Makefile 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/net/Makefile 2010-04-15 09:54:24.000000000 +0200 -@@ -166,7 +166,7 @@ obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2 +--- head-2011-03-17.orig/drivers/net/Makefile 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/net/Makefile 2011-01-31 17:49:31.000000000 +0100 +@@ -170,7 +170,7 @@ obj-$(CONFIG_PPTP) += pppox.o pptp.o obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o -obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o -+obj-$(CONFIG_XEN_NETFRONT) += xen-netfront.o ++obj-$(CONFIG_PARAVIRT_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_IFB) += ifb.o ---- head-2010-05-25.orig/drivers/net/xen-netfront.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/net/xen-netfront.c 2010-04-15 09:55:17.000000000 +0200 -@@ -36,8 +36,6 @@ - #include - #include - #include --#include --#include - #include - #include - #include -@@ -770,45 +768,6 @@ static RING_IDX xennet_fill_frags(struct - return cons; - } - --static int skb_checksum_setup(struct sk_buff *skb) --{ -- struct iphdr *iph; -- unsigned char *th; -- int err = -EPROTO; -- -- if (skb->protocol != htons(ETH_P_IP)) -- goto out; -- -- iph = (void *)skb->data; -- th = skb->data + 4 * iph->ihl; -- if (th >= skb_tail_pointer(skb)) -- goto out; -- -- skb->csum_start = th - skb->head; -- switch (iph->protocol) { -- case IPPROTO_TCP: -- skb->csum_offset = offsetof(struct tcphdr, check); -- break; -- case IPPROTO_UDP: -- skb->csum_offset = offsetof(struct udphdr, check); -- break; -- default: -- if (net_ratelimit()) -- printk(KERN_ERR "Attempting to checksum a non-" -- "TCP/UDP packet, dropping a protocol" -- " %d packet", iph->protocol); -- goto out; -- } -- -- if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) -- goto out; -- -- err = 0; -- --out: -- return err; --} -- - static int handle_incoming_queue(struct net_device *dev, - struct sk_buff_head *rxq) - { -@@ -1795,7 +1754,6 @@ static int __devexit xennet_remove(struc +--- head-2011-03-17.orig/drivers/net/xen-netfront.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/net/xen-netfront.c 2011-02-17 10:10:21.000000000 +0100 +@@ -1878,7 +1878,6 @@ static int __devexit xennet_remove(struc static struct xenbus_driver netfront_driver = { .name = "vif", @@ -2721,8 +2687,8 @@ Acked-by: jbeulich@novell.com .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(xennet_remove), ---- head-2010-05-25.orig/drivers/xen/Kconfig 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/Kconfig 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-01-31 17:49:31.000000000 +0100 @@ -23,6 +23,9 @@ config XEN_PRIVILEGED_GUEST config XEN_UNPRIVILEGED_GUEST def_bool !XEN_PRIVILEGED_GUEST @@ -2733,9 +2699,9 @@ Acked-by: jbeulich@novell.com config XEN_PRIVCMD def_bool y ---- head-2010-05-25.orig/drivers/xen/Makefile 2010-01-19 16:01:03.000000000 +0100 -+++ head-2010-05-25/drivers/xen/Makefile 2010-03-24 15:09:22.000000000 +0100 -@@ -1,10 +1,12 @@ +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-24 13:56:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-01-31 17:49:31.000000000 +0100 +@@ -1,10 +1,14 @@ -obj-y += core/ -obj-y += console/ -obj-y += evtchn/ @@ -2750,12 +2716,14 @@ Acked-by: jbeulich@novell.com +obj-y += xenbus/ +obj-$(CONFIG_XEN) += char/ + -+obj-$(CONFIG_XEN) += util.o ++xen-backend-$(CONFIG_XEN_BACKEND) := util.o ++ ++obj-$(CONFIG_XEN) += $(xen-backend-y) $(xen-backend-m) obj-$(CONFIG_XEN_BALLOON) += balloon/ obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ ---- head-2010-05-25.orig/drivers/xen/balloon/balloon.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/balloon/balloon.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/balloon/balloon.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/balloon/balloon.c 2011-01-31 17:49:31.000000000 +0100 @@ -324,13 +324,9 @@ static int increase_reservation(unsigned #ifndef MODULE @@ -2770,8 +2738,8 @@ Acked-by: jbeulich@novell.com vm_total_pages = nr_free_pagecache_pages(); #endif ---- head-2010-05-25.orig/drivers/xen/blkback/blkback.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/blkback.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkback/blkback.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/blkback.c 2011-01-31 17:49:31.000000000 +0100 @@ -156,7 +156,7 @@ static void unplug_queue(blkif_t *blkif) static void plug_queue(blkif_t *blkif, struct block_device *bdev) @@ -2781,28 +2749,36 @@ Acked-by: jbeulich@novell.com if (q == blkif->plug) return; ---- head-2010-05-25.orig/drivers/xen/blkback/common.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/common.h 2010-03-24 15:09:22.000000000 +0100 -@@ -82,7 +82,7 @@ typedef struct blkif_st { +--- head-2011-03-17.orig/drivers/xen/blkback/common.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/common.h 2011-01-31 17:49:31.000000000 +0100 +@@ -41,7 +41,6 @@ + #include + #include + #include +-#include + #include + #include "blkback-pagemap.h" + +@@ -82,7 +81,7 @@ typedef struct blkif_st { wait_queue_head_t wq; struct task_struct *xenblkd; unsigned int waiting_reqs; - request_queue_t *plug; + struct request_queue *plug; - int is_suspended_req; - blkif_request_t suspended_req; ---- head-2010-05-25.orig/drivers/xen/blkback/interface.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/interface.c 2010-03-24 15:09:22.000000000 +0100 -@@ -181,5 +181,5 @@ void blkif_free(blkif_t *blkif) + /* statistics */ + unsigned long st_print; +--- head-2011-03-17.orig/drivers/xen/blkback/interface.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/interface.c 2011-01-31 17:49:31.000000000 +0100 +@@ -178,5 +178,5 @@ void blkif_free(blkif_t *blkif) void __init blkif_interface_init(void) { blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), - 0, 0, NULL, NULL); + 0, 0, NULL); } ---- head-2010-05-25.orig/drivers/xen/blkback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/xenbus.c 2010-03-24 15:10:17.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/xenbus.c 2011-01-31 17:49:31.000000000 +0100 @@ -27,8 +27,6 @@ pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) @@ -2839,9 +2815,31 @@ Acked-by: jbeulich@novell.com return 0; } ---- head-2010-05-25.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/blkfront.c 2010-03-24 15:09:22.000000000 +0100 -@@ -688,7 +688,7 @@ static int blkif_queue_request(struct re +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-01-31 17:49:31.000000000 +0100 +@@ -574,14 +574,20 @@ int blkif_ioctl(struct inode *inode, str + return -EINVAL; + } + default: +- if (info->mi && info->gd) { ++ if (info->mi && info->gd && info->rq) { + switch (info->mi->major) { + case SCSI_DISK0_MAJOR: + case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: + case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: + case SCSI_CDROM_MAJOR: ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) + return scsi_cmd_ioctl(filep, info->gd, command, + (void __user *)argument); ++#else ++ return scsi_cmd_ioctl(filep, info->rq, ++ info->gd, command, ++ (void __user *)argument); ++#endif + } + } + +@@ -698,7 +704,7 @@ static int blkif_queue_request(struct re * do_blkif_request * read a block; request is in a request queue */ @@ -2850,8 +2848,8 @@ Acked-by: jbeulich@novell.com { struct blkfront_info *info = NULL; struct request *req; ---- head-2010-05-25.orig/drivers/xen/blkfront/block.h 2010-02-24 13:13:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/block.h 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkfront/block.h 2010-02-24 13:13:46.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/block.h 2011-01-31 17:49:31.000000000 +0100 @@ -107,7 +107,7 @@ struct blkfront_info struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; @@ -2870,8 +2868,8 @@ Acked-by: jbeulich@novell.com /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected ---- head-2010-05-25.orig/drivers/xen/blkfront/vbd.c 2010-01-18 15:23:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/vbd.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2010-01-18 15:23:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-01-31 17:49:31.000000000 +0100 @@ -298,7 +298,7 @@ xlbd_release_minors(struct xlbd_major_in static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) @@ -2881,9 +2879,17 @@ Acked-by: jbeulich@novell.com rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) ---- head-2010-05-25.orig/drivers/xen/blktap/common.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap/common.h 2010-03-24 15:09:22.000000000 +0100 -@@ -68,7 +68,7 @@ typedef struct blkif_st { +--- head-2011-03-17.orig/drivers/xen/blktap/common.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/common.h 2011-01-31 17:49:31.000000000 +0100 +@@ -40,7 +40,6 @@ + #include + #include + #include +-#include + + #define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \ + __FILE__ , __LINE__ , ## _a ) +@@ -68,7 +67,7 @@ typedef struct blkif_st { wait_queue_head_t wq; struct task_struct *xenblkd; unsigned int waiting_reqs; @@ -2892,17 +2898,17 @@ Acked-by: jbeulich@novell.com /* statistics */ unsigned long st_print; ---- head-2010-05-25.orig/drivers/xen/blktap/interface.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap/interface.c 2010-03-24 15:09:22.000000000 +0100 -@@ -181,5 +181,5 @@ void tap_blkif_kmem_cache_free(blkif_t * +--- head-2011-03-17.orig/drivers/xen/blktap/interface.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/interface.c 2011-01-31 17:49:31.000000000 +0100 +@@ -178,5 +178,5 @@ void tap_blkif_kmem_cache_free(blkif_t * void __init tap_blkif_interface_init(void) { blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t), - 0, 0, NULL, NULL); + 0, 0, NULL); } ---- head-2010-05-25.orig/drivers/xen/blktap/xenbus.c 2010-04-29 09:47:21.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap/xenbus.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blktap/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/xenbus.c 2011-01-31 17:49:31.000000000 +0100 @@ -50,8 +50,6 @@ struct backend_info int group_added; }; @@ -2939,8 +2945,8 @@ Acked-by: jbeulich@novell.com return 0; } ---- head-2010-05-25.orig/drivers/xen/blktap2/blktap.h 2010-02-24 13:13:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/blktap.h 2010-05-19 17:47:46.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/blktap2/blktap.h 2010-02-24 13:13:46.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/blktap.h 2011-01-31 17:49:31.000000000 +0100 @@ -203,6 +203,7 @@ blktap_validate_params(struct blktap *ta } @@ -2949,8 +2955,8 @@ Acked-by: jbeulich@novell.com int blktap_ring_init(int *); int blktap_ring_free(void); ---- head-2010-05-25.orig/drivers/xen/blktap2/control.c 2010-04-29 09:34:47.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap2/control.c 2010-05-19 17:51:54.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2011-02-24 15:15:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-01-31 17:49:31.000000000 +0100 @@ -194,14 +194,20 @@ blktap_control_destroy_device(struct blk clear_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse); @@ -2975,9 +2981,9 @@ Acked-by: jbeulich@novell.com static int __init blktap_control_init(void) { ---- head-2010-05-25.orig/drivers/xen/blktap2/device.c 2010-03-02 09:56:10.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/device.c 2010-03-24 15:09:22.000000000 +0100 -@@ -836,7 +836,7 @@ static void +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2010-11-25 09:36:37.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-01-31 17:49:31.000000000 +0100 +@@ -833,7 +833,7 @@ static void blktap_device_run_queue(struct blktap *tap) { int queued, err; @@ -2986,7 +2992,7 @@ Acked-by: jbeulich@novell.com struct request *req; struct blktap_ring *ring; struct blktap_device *dev; -@@ -913,7 +913,7 @@ blktap_device_run_queue(struct blktap *t +@@ -910,7 +910,7 @@ blktap_device_run_queue(struct blktap *t * dev->lock held on entry */ static void @@ -2995,7 +3001,7 @@ Acked-by: jbeulich@novell.com { struct request *req; struct blktap *tap; -@@ -1189,6 +1189,5 @@ void +@@ -1186,6 +1186,5 @@ void blktap_device_free(void) { if (blktap_device_major) @@ -3003,8 +3009,8 @@ Acked-by: jbeulich@novell.com - BTERR("blktap device unregister failed\n"); + unregister_blkdev(blktap_device_major, "tapdev"); } ---- head-2010-05-25.orig/drivers/xen/blktap2/sysfs.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/sysfs.c 2010-05-19 17:49:22.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/blktap2/sysfs.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/sysfs.c 2011-01-31 17:49:31.000000000 +0100 @@ -334,6 +334,24 @@ blktap_sysfs_create(struct blktap *tap) return err; } @@ -3049,9 +3055,19 @@ Acked-by: jbeulich@novell.com } static ssize_t ---- head-2010-05-25.orig/drivers/xen/core/reboot.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/reboot.c 2010-03-24 15:09:22.000000000 +0100 -@@ -4,6 +4,7 @@ +--- head-2011-03-17.orig/drivers/xen/core/gnttab.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/gnttab.c 2011-01-31 17:49:31.000000000 +0100 +@@ -42,7 +42,6 @@ + #include + #include + #include +-#include + #include + + #ifdef HAVE_XEN_PLATFORM_COMPAT_H +--- head-2011-03-17.orig/drivers/xen/core/reboot.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/reboot.c 2011-01-31 17:49:31.000000000 +0100 +@@ -3,6 +3,7 @@ #include #include #include @@ -3059,9 +3075,29 @@ Acked-by: jbeulich@novell.com #include #include #include ---- head-2010-05-25.orig/drivers/xen/scsiback/interface.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/interface.c 2010-03-24 15:09:22.000000000 +0100 -@@ -171,7 +171,7 @@ void scsiback_free(struct vscsibk_info * +--- head-2011-03-17.orig/drivers/xen/netback/common.h 2011-02-17 10:09:57.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/common.h 2011-01-31 17:49:31.000000000 +0100 +@@ -44,7 +44,6 @@ + #include + #include + #include +-#include + #include + + #define DPRINTK(_f, _a...) \ +--- head-2011-03-17.orig/drivers/xen/scsiback/common.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/common.h 2011-01-31 17:49:31.000000000 +0100 +@@ -55,7 +55,6 @@ + #include + #include + #include +-#include + #include + #include + #include +--- head-2011-03-17.orig/drivers/xen/scsiback/interface.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/interface.c 2011-01-31 17:49:31.000000000 +0100 +@@ -166,7 +166,7 @@ void scsiback_free(struct vscsibk_info * int __init scsiback_interface_init(void) { scsiback_cachep = kmem_cache_create("vscsiif_cache", @@ -3070,8 +3106,8 @@ Acked-by: jbeulich@novell.com if (!scsiback_cachep) { printk(KERN_ERR "scsiback: can't init scsi cache\n"); return -ENOMEM; ---- head-2010-05-25.orig/drivers/xen/scsifront/scsifront.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsifront/scsifront.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/scsifront/scsifront.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsifront/scsifront.c 2011-02-08 10:03:55.000000000 +0100 @@ -147,7 +147,7 @@ static void scsifront_cdb_cmd_done(struc add_id_to_freelist(info, id); @@ -3081,8 +3117,58 @@ Acked-by: jbeulich@novell.com if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; ---- head-2010-05-25.orig/drivers/xen/tpmback/interface.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/tpmback/interface.c 2010-03-24 15:09:22.000000000 +0100 +@@ -351,8 +351,7 @@ big_to_sg: + static int scsifront_queuecommand(struct scsi_cmnd *sc, + void (*done)(struct scsi_cmnd *)) + { +- struct vscsifrnt_info *info = +- (struct vscsifrnt_info *) sc->device->host->hostdata; ++ struct vscsifrnt_info *info = shost_priv(sc->device->host); + vscsiif_request_t *ring_req; + int ref_cnt; + uint16_t rqid; +@@ -428,8 +427,7 @@ static int scsifront_eh_abort_handler(st + static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) + { + struct Scsi_Host *host = sc->device->host; +- struct vscsifrnt_info *info = +- (struct vscsifrnt_info *) sc->device->host->hostdata; ++ struct vscsifrnt_info *info = shost_priv(host); + + vscsiif_request_t *ring_req; + uint16_t rqid; +--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_solarflare.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/accel_solarflare.c 2011-01-31 17:49:31.000000000 +0100 +@@ -37,7 +37,6 @@ + #include "ci/efhw/public.h" + + #include +-#include + #include + #include + +--- head-2011-03-17.orig/drivers/xen/sfc_netutil/accel_util.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/drivers/xen/sfc_netutil/accel_util.c 2011-01-31 17:49:31.000000000 +0100 +@@ -28,7 +28,6 @@ + #include + #include + #include +-#include + #include + + #include "accel_util.h" +--- head-2011-03-17.orig/drivers/xen/tpmback/common.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/tpmback/common.h 2011-01-31 17:49:31.000000000 +0100 +@@ -10,7 +10,6 @@ + #include + #include + #include +-#include + #include + #include + #include +--- head-2011-03-17.orig/drivers/xen/tpmback/interface.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/tpmback/interface.c 2011-01-31 17:49:31.000000000 +0100 @@ -13,6 +13,7 @@ #include "common.h" @@ -3091,7 +3177,7 @@ Acked-by: jbeulich@novell.com #include #include -@@ -163,7 +164,7 @@ void tpmif_disconnect_complete(tpmif_t * +@@ -160,7 +161,7 @@ void tpmif_disconnect_complete(tpmif_t * int __init tpmif_interface_init(void) { tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t), @@ -3100,8 +3186,18 @@ Acked-by: jbeulich@novell.com return tpmif_cachep ? 0 : -ENOMEM; } ---- head-2010-05-25.orig/drivers/xen/usbfront/xenbus.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbfront/xenbus.c 2010-04-15 09:53:49.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/usbback/usbback.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/usbback.h 2011-01-31 17:49:31.000000000 +0100 +@@ -57,7 +57,6 @@ + #include + #include + #include +-#include + #include + #include + #include +--- head-2011-03-17.orig/drivers/xen/usbfront/xenbus.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/xenbus.c 2011-01-31 17:49:31.000000000 +0100 @@ -393,7 +393,7 @@ static int __init usbfront_init(void) return -ENODEV; @@ -3111,8 +3207,8 @@ Acked-by: jbeulich@novell.com if (!xenhcd_urbp_cachep) { printk(KERN_ERR "usbfront failed to create kmem cache\n"); return -ENOMEM; ---- head-2010-05-25.orig/drivers/xen/util.c 2007-07-10 09:42:30.000000000 +0200 -+++ head-2010-05-25/drivers/xen/util.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/util.c 2007-07-10 09:42:30.000000000 +0200 ++++ head-2011-03-17/drivers/xen/util.c 2011-01-31 17:49:31.000000000 +0100 @@ -1,8 +1,5 @@ -#include +#include @@ -3169,9 +3265,22 @@ Acked-by: jbeulich@novell.com -} -EXPORT_SYMBOL_GPL(free_vm_area); -#endif /* CONFIG_X86 */ ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_client.c 2010-01-19 16:01:03.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:09:22.000000000 +0100 -@@ -30,19 +30,26 @@ +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_backend_client.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_backend_client.c 2011-01-31 17:49:31.000000000 +0100 +@@ -32,9 +32,9 @@ + + #include + #include ++#include + #include + #include +-#include + + /* Based on Rusty Russell's skeleton driver's map_page */ + struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref) +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_client.c 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_client.c 2011-01-31 17:49:31.000000000 +0100 +@@ -30,19 +30,25 @@ * IN THE SOFTWARE. */ @@ -3179,8 +3288,6 @@ Acked-by: jbeulich@novell.com #include #include #include --#include - #include +#else +#include +#include @@ -3190,7 +3297,8 @@ Acked-by: jbeulich@novell.com +#include +#include +#endif -+#include + #include +-#include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include @@ -3202,7 +3310,7 @@ Acked-by: jbeulich@novell.com const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { -@@ -60,6 +67,20 @@ const char *xenbus_strstate(enum xenbus_ +@@ -60,6 +66,20 @@ const char *xenbus_strstate(enum xenbus_ } EXPORT_SYMBOL_GPL(xenbus_strstate); @@ -3223,7 +3331,7 @@ Acked-by: jbeulich@novell.com int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, -@@ -83,6 +104,7 @@ int xenbus_watch_path(struct xenbus_devi +@@ -83,6 +103,7 @@ int xenbus_watch_path(struct xenbus_devi EXPORT_SYMBOL_GPL(xenbus_watch_path); @@ -3231,7 +3339,7 @@ Acked-by: jbeulich@novell.com int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, -@@ -101,8 +123,60 @@ int xenbus_watch_path2(struct xenbus_dev +@@ -101,8 +122,60 @@ int xenbus_watch_path2(struct xenbus_dev return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); @@ -3260,11 +3368,11 @@ Acked-by: jbeulich@novell.com + int err; + va_list ap; + char *path; - ++ + va_start(ap, pathfmt); + path = kvasprintf(GFP_KERNEL, pathfmt, ap); + va_end(ap); - ++ + if (!path) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); + return -ENOMEM; @@ -3277,8 +3385,8 @@ Acked-by: jbeulich@novell.com +} +EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); +#endif -+ -+ + + +/** + * xenbus_switch_state + * @dev: xenbus device @@ -3292,7 +3400,7 @@ Acked-by: jbeulich@novell.com int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and -@@ -161,8 +235,8 @@ static char *error_path(struct xenbus_de +@@ -161,8 +234,8 @@ static char *error_path(struct xenbus_de } @@ -3303,7 +3411,7 @@ Acked-by: jbeulich@novell.com { int ret; unsigned int len; -@@ -183,14 +257,16 @@ void _dev_error(struct xenbus_device *de +@@ -183,14 +256,16 @@ void _dev_error(struct xenbus_device *de path_buffer = error_path(dev); if (path_buffer == NULL) { @@ -3324,7 +3432,7 @@ Acked-by: jbeulich@novell.com goto fail; } -@@ -200,6 +276,15 @@ fail: +@@ -200,6 +275,15 @@ fail: } @@ -3337,10 +3445,10 @@ Acked-by: jbeulich@novell.com + * Report the given negative errno into the store, along with the given + * formatted message. + */ - void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, - ...) + void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { -@@ -212,6 +297,16 @@ void xenbus_dev_error(struct xenbus_devi + va_list ap; +@@ -211,6 +295,16 @@ void xenbus_dev_error(struct xenbus_devi EXPORT_SYMBOL_GPL(xenbus_dev_error); @@ -3354,10 +3462,10 @@ Acked-by: jbeulich@novell.com + * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly + * closedown of this driver and its peer. + */ - void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, - ...) + void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { -@@ -226,6 +321,15 @@ void xenbus_dev_fatal(struct xenbus_devi + va_list ap; +@@ -224,6 +318,15 @@ void xenbus_dev_fatal(struct xenbus_devi EXPORT_SYMBOL_GPL(xenbus_dev_fatal); @@ -3373,7 +3481,7 @@ Acked-by: jbeulich@novell.com int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); -@@ -236,6 +340,12 @@ int xenbus_grant_ring(struct xenbus_devi +@@ -234,6 +337,12 @@ int xenbus_grant_ring(struct xenbus_devi EXPORT_SYMBOL_GPL(xenbus_grant_ring); @@ -3386,7 +3494,7 @@ Acked-by: jbeulich@novell.com int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; -@@ -256,6 +366,38 @@ int xenbus_alloc_evtchn(struct xenbus_de +@@ -254,6 +363,38 @@ int xenbus_alloc_evtchn(struct xenbus_de EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); @@ -3425,7 +3533,7 @@ Acked-by: jbeulich@novell.com int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; -@@ -272,6 +414,191 @@ int xenbus_free_evtchn(struct xenbus_dev +@@ -270,6 +411,191 @@ int xenbus_free_evtchn(struct xenbus_dev EXPORT_SYMBOL_GPL(xenbus_free_evtchn); @@ -3617,8 +3725,8 @@ Acked-by: jbeulich@novell.com enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_comms.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_comms.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:49:31.000000000 +0100 @@ -34,12 +34,15 @@ #include #include @@ -3639,7 +3747,15 @@ Acked-by: jbeulich@novell.com #include "xenbus_comms.h" -@@ -112,6 +115,13 @@ static const void *get_input_chunk(XENST +@@ -49,7 +52,6 @@ + + static int xenbus_irq; + +-extern void xenbus_probe(struct work_struct *); + static DECLARE_WORK(probe_work, xenbus_probe); + + static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); +@@ -112,6 +114,13 @@ static const void *get_input_chunk(XENST return buf + MASK_XENSTORE_IDX(cons); } @@ -3653,7 +3769,7 @@ Acked-by: jbeulich@novell.com int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; -@@ -220,7 +230,9 @@ int xb_read(void *data, unsigned len) +@@ -220,7 +229,9 @@ int xb_read(void *data, unsigned len) return 0; } @@ -3664,7 +3780,7 @@ Acked-by: jbeulich@novell.com int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; -@@ -240,7 +252,11 @@ int xb_init_comms(void) +@@ -240,7 +251,11 @@ int xb_init_comms(void) if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); @@ -3676,8 +3792,8 @@ Acked-by: jbeulich@novell.com xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:49:31.000000000 +0100 @@ -43,21 +43,26 @@ #include #include @@ -3709,7 +3825,7 @@ Acked-by: jbeulich@novell.com #include "xenbus_comms.h" #include "xenbus_probe.h" -@@ -169,7 +174,7 @@ static int read_backend_details(struct x +@@ -170,7 +175,7 @@ static int read_backend_details(struct x return read_otherend_details(xendev, "backend-id", "backend"); } @@ -3718,7 +3834,7 @@ Acked-by: jbeulich@novell.com static int xenbus_uevent_frontend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { -@@ -208,12 +213,16 @@ static struct xen_bus_type xenbus_fronte +@@ -209,12 +214,16 @@ static struct xen_bus_type xenbus_fronte .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, @@ -3735,7 +3851,7 @@ Acked-by: jbeulich@novell.com }; static void otherend_changed(struct xenbus_watch *watch, -@@ -229,14 +238,15 @@ static void otherend_changed(struct xenb +@@ -230,14 +239,15 @@ static void otherend_changed(struct xenb if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { @@ -3754,7 +3870,7 @@ Acked-by: jbeulich@novell.com #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* -@@ -272,8 +282,13 @@ static int talk_to_otherend(struct xenbu +@@ -274,8 +284,13 @@ static int talk_to_otherend(struct xenbu static int watch_otherend(struct xenbus_device *dev) { @@ -3768,7 +3884,7 @@ Acked-by: jbeulich@novell.com } -@@ -299,9 +314,9 @@ int xenbus_dev_probe(struct device *_dev +@@ -301,9 +316,9 @@ int xenbus_dev_probe(struct device *_dev err = talk_to_otherend(dev); if (err) { @@ -3781,7 +3897,7 @@ Acked-by: jbeulich@novell.com return err; } -@@ -311,9 +326,9 @@ int xenbus_dev_probe(struct device *_dev +@@ -313,9 +328,9 @@ int xenbus_dev_probe(struct device *_dev err = watch_otherend(dev); if (err) { @@ -3794,7 +3910,7 @@ Acked-by: jbeulich@novell.com return err; } -@@ -358,8 +373,8 @@ static void xenbus_dev_shutdown(struct d +@@ -360,8 +375,8 @@ static void xenbus_dev_shutdown(struct d get_device(&dev->dev); if (dev->state != XenbusStateConnected) { @@ -3805,7 +3921,7 @@ Acked-by: jbeulich@novell.com goto out; } xenbus_switch_state(dev, XenbusStateClosing); -@@ -369,7 +384,8 @@ static void xenbus_dev_shutdown(struct d +@@ -371,7 +386,8 @@ static void xenbus_dev_shutdown(struct d timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) @@ -3815,7 +3931,7 @@ Acked-by: jbeulich@novell.com out: put_device(&dev->dev); } -@@ -557,7 +573,9 @@ int xenbus_probe_node(struct xen_bus_typ +@@ -559,7 +575,9 @@ int xenbus_probe_node(struct xen_bus_typ xendev->devicetype = tmpstring; init_completion(&xendev->down); @@ -3825,7 +3941,7 @@ Acked-by: jbeulich@novell.com xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; -@@ -572,15 +590,16 @@ int xenbus_probe_node(struct xen_bus_typ +@@ -574,15 +592,16 @@ int xenbus_probe_node(struct xen_bus_typ err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) @@ -3846,7 +3962,7 @@ Acked-by: jbeulich@novell.com device_unregister(&xendev->dev); fail: kfree(xendev); -@@ -596,7 +615,8 @@ static int xenbus_probe_frontend(const c +@@ -598,7 +617,8 @@ static int xenbus_probe_frontend(const c if (!strcmp(type, "console")) return 0; @@ -3856,7 +3972,7 @@ Acked-by: jbeulich@novell.com if (!nodename) return -ENOMEM; -@@ -672,7 +692,7 @@ static int strsep_len(const char *str, c +@@ -676,7 +696,7 @@ static int strsep_len(const char *str, c return (len == 0) ? i : -ERANGE; } @@ -3865,7 +3981,7 @@ Acked-by: jbeulich@novell.com { int exists, rootlen; struct xenbus_device *dev; -@@ -680,7 +700,7 @@ void dev_changed(const char *node, struc +@@ -684,7 +704,7 @@ void dev_changed(const char *node, struc const char *p, *root; if (bus->error || char_count(node, '/') < 2) @@ -3874,7 +3990,7 @@ Acked-by: jbeulich@novell.com exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { -@@ -714,7 +734,7 @@ static void frontend_changed(struct xenb +@@ -718,7 +738,7 @@ static void frontend_changed(struct xenb { DPRINTK(""); @@ -3883,15 +3999,24 @@ Acked-by: jbeulich@novell.com } /* We watch for devices appearing and vanishing. */ -@@ -939,6 +959,7 @@ static int xsd_port_read(char *page, cha +@@ -943,6 +963,7 @@ static int xsd_port_read(char *page, cha } #endif -+#if defined(CONFIG_XEN) || defined(MODULE) ++#if defined(CONFIG_XEN_XENBUS_DEV) || defined(MODULE) static int xb_free_port(evtchn_port_t port) { struct evtchn_close close; -@@ -994,11 +1015,18 @@ fail0: +@@ -958,7 +979,7 @@ int xenbus_conn(domid_t remote_dom, unsi + BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); + BUG_ON(!is_initial_xendomain()); + +-#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) ++#ifdef CONFIG_XEN_PRIVILEGED_GUEST + remove_xen_proc_entry("xsd_kva"); + remove_xen_proc_entry("xsd_port"); + #endif +@@ -998,11 +1019,18 @@ fail0: xen_store_evtchn = -1; return rc; } @@ -3911,15 +4036,15 @@ Acked-by: jbeulich@novell.com DPRINTK(""); -@@ -1017,6 +1045,7 @@ static int xenbus_probe_init(void) +@@ -1021,6 +1049,7 @@ static int xenbus_probe_init(void) * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { +#if defined(CONFIG_XEN) || defined(MODULE) struct evtchn_alloc_unbound alloc_unbound; - /* Allocate page. */ -@@ -1054,10 +1083,13 @@ static int xenbus_probe_init(void) + /* Allocate Xenstore page */ +@@ -1059,10 +1088,13 @@ static int xenbus_probe_init(void) if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif @@ -3934,7 +4059,7 @@ Acked-by: jbeulich@novell.com xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); -@@ -1073,7 +1105,9 @@ static int xenbus_probe_init(void) +@@ -1078,7 +1110,9 @@ static int xenbus_probe_init(void) goto err; } @@ -3944,7 +4069,7 @@ Acked-by: jbeulich@novell.com /* Initialize the interface to xenstore. */ err = xs_init(); -@@ -1083,6 +1117,7 @@ static int xenbus_probe_init(void) +@@ -1088,6 +1122,7 @@ static int xenbus_probe_init(void) goto err; } @@ -3952,7 +4077,7 @@ Acked-by: jbeulich@novell.com /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); -@@ -1093,6 +1128,7 @@ static int xenbus_probe_init(void) +@@ -1098,6 +1133,7 @@ static int xenbus_probe_init(void) xenbus_frontend.error); } } @@ -3960,18 +4085,14 @@ Acked-by: jbeulich@novell.com xenbus_backend_device_register(); if (!is_initial_xendomain()) -@@ -1101,8 +1137,10 @@ static int xenbus_probe_init(void) - return 0; +@@ -1112,16 +1148,22 @@ static int xenbus_probe_init(void) + * registered. + */ - err: +#if defined(CONFIG_XEN) || defined(MODULE) - if (page) + if (page != 0) free_page(page); +#endif - - /* - * Do not unregister the xenbus front/backend buses here. The buses -@@ -1113,11 +1151,15 @@ static int xenbus_probe_init(void) return err; } @@ -3989,8 +4110,8 @@ Acked-by: jbeulich@novell.com { return xenbus_probe_init(); } ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.h 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 17:49:31.000000000 +0100 @@ -62,7 +62,9 @@ struct xen_bus_type int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); @@ -4009,8 +4130,8 @@ Acked-by: jbeulich@novell.com +extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); #endif ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:49:31.000000000 +0100 @@ -236,7 +236,7 @@ static void backend_changed(struct xenbu { DPRINTK(""); @@ -4020,18 +4141,9 @@ Acked-by: jbeulich@novell.com } static struct xenbus_watch be_watch = { ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_xs.c 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_xs.c 2010-03-24 15:09:22.000000000 +0100 -@@ -256,7 +256,7 @@ static void *xs_talkv(struct xenbus_tran - } - - for (i = 0; i < num_vecs; i++) { -- err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; -+ err = xb_write(iovec[i].iov_base, iovec[i].iov_len); - if (err) { - mutex_unlock(&xs_state.request_mutex); - return ERR_PTR(err); -@@ -665,7 +665,9 @@ void unregister_xenbus_watch(struct xenb +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:49:31.000000000 +0100 +@@ -662,7 +662,9 @@ void unregister_xenbus_watch(struct xenb char token[sizeof(watch) * 2 + 1]; int err; @@ -4041,7 +4153,7 @@ Acked-by: jbeulich@novell.com sprintf(token, "%lX", (long)watch); -@@ -684,6 +686,11 @@ void unregister_xenbus_watch(struct xenb +@@ -681,6 +683,11 @@ void unregister_xenbus_watch(struct xenb up_read(&xs_state.watch_mutex); @@ -4053,7 +4165,7 @@ Acked-by: jbeulich@novell.com /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { -@@ -695,11 +702,8 @@ void unregister_xenbus_watch(struct xenb +@@ -692,11 +699,8 @@ void unregister_xenbus_watch(struct xenb } spin_unlock(&watch_events_lock); @@ -4066,7 +4178,7 @@ Acked-by: jbeulich@novell.com } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); -@@ -737,6 +741,7 @@ void xs_suspend_cancel(void) +@@ -734,6 +738,7 @@ void xs_suspend_cancel(void) mutex_unlock(&xs_state.transaction_mutex); } @@ -4074,7 +4186,7 @@ Acked-by: jbeulich@novell.com static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; -@@ -754,6 +759,7 @@ static int xenwatch_handle_callback(void +@@ -751,6 +756,7 @@ static int xenwatch_handle_callback(void return 0; } @@ -4082,7 +4194,7 @@ Acked-by: jbeulich@novell.com static int xenwatch_thread(void *unused) { -@@ -783,6 +789,7 @@ static int xenwatch_thread(void *unused) +@@ -780,6 +786,7 @@ static int xenwatch_thread(void *unused) msg = list_entry(ent, struct xs_stored_msg, list); @@ -4090,7 +4202,7 @@ Acked-by: jbeulich@novell.com /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock -@@ -799,6 +806,15 @@ static int xenwatch_thread(void *unused) +@@ -796,6 +803,15 @@ static int xenwatch_thread(void *unused) xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } @@ -4106,8 +4218,18 @@ Acked-by: jbeulich@novell.com } return 0; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:09:22.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:49:31.000000000 +0100 +@@ -26,7 +26,6 @@ + #include + #include + #include +-#include + #include + #include + #include "../../../drivers/oprofile/cpu_buffer.h" +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:49:31.000000000 +0100 @@ -53,6 +53,8 @@ extern unsigned long __FIXADDR_TOP; enum fixed_addresses { FIX_HOLE, @@ -4117,8 +4239,8 @@ Acked-by: jbeulich@novell.com #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:03:18.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:04:33.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:49:31.000000000 +0100 @@ -75,8 +75,7 @@ struct page *kmap_atomic_to_page(void *p #define kmap_atomic_pte(page, type) \ @@ -4129,8 +4251,8 @@ Acked-by: jbeulich@novell.com #define flush_cache_kmaps() do { } while (0) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/maddr_32.h 2008-04-02 12:34:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/maddr_32.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/maddr_32.h 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/maddr_32.h 2011-01-31 17:49:31.000000000 +0100 @@ -155,6 +155,7 @@ static inline paddr_t pte_machine_to_phy #ifdef CONFIG_X86_PAE @@ -4139,8 +4261,8 @@ Acked-by: jbeulich@novell.com static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:49:31.000000000 +0100 @@ -16,7 +16,7 @@ void mm_pin_all(void); static inline void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) @@ -4168,8 +4290,8 @@ Acked-by: jbeulich@novell.com /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:49:31.000000000 +0100 @@ -5,7 +5,7 @@ #include /* for struct page */ #include /* for phys_to_virt and page_to_pseudophys */ @@ -4198,8 +4320,8 @@ Acked-by: jbeulich@novell.com if (!PageHighMem(pte)) \ BUG_ON(HYPERVISOR_update_va_mapping( \ (unsigned long)__va(pfn << PAGE_SHIFT), \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:07.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:23.000000000 +0100 @@ -26,9 +26,6 @@ #include #include @@ -4331,8 +4453,8 @@ Acked-by: jbeulich@novell.com xen_l1_entry_update(ptep, __pte(0)); \ else \ *(ptep) = __pte(0); \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:49:31.000000000 +0100 @@ -23,26 +23,11 @@ #define pud_present(pud) 1 @@ -4361,8 +4483,8 @@ Acked-by: jbeulich@novell.com } /* Rules for using set_pte: the pte being assigned *must* be ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:49:31.000000000 +0100 @@ -89,7 +89,6 @@ struct cpuinfo_x86 { #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 @@ -4408,8 +4530,8 @@ Acked-by: jbeulich@novell.com /* * Size of io_bitmap. */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:49:31.000000000 +0100 @@ -205,11 +205,6 @@ static inline unsigned long get_limit(un */ @@ -4438,8 +4560,8 @@ Acked-by: jbeulich@novell.com extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:49:31.000000000 +0100 @@ -91,7 +91,11 @@ struct tlb_state DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); #endif /* SMP */ @@ -4453,8 +4575,8 @@ Acked-by: jbeulich@novell.com static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:49:31.000000000 +0100 @@ -23,9 +23,9 @@ * compile time, but to set the physical address only * in the boot process. @@ -4486,8 +4608,8 @@ Acked-by: jbeulich@novell.com } #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 17:49:31.000000000 +0100 @@ -76,7 +76,7 @@ static inline void switch_mm(struct mm_s if (likely(prev != next)) { @@ -4506,8 +4628,8 @@ Acked-by: jbeulich@novell.com mm_pin(next); switch_mm(prev, next, NULL); } ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:49:31.000000000 +0100 @@ -21,7 +21,7 @@ static inline void pmd_populate_kernel(s static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) @@ -4535,8 +4657,8 @@ Acked-by: jbeulich@novell.com BUG_ON(HYPERVISOR_update_va_mapping( (unsigned long)pud, pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:08.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:18.000000000 +0100 @@ -304,7 +304,7 @@ static inline pte_t ptep_get_and_clear_f { if (full) { @@ -4610,7 +4732,7 @@ Acked-by: jbeulich@novell.com (void)ptep_set_access_flags(vma, address, ptep, __pte, __young); \ else if (__young) \ set_pte(ptep, __pte); \ -@@ -570,6 +544,8 @@ int xen_change_pte_range(struct mm_struc +@@ -566,6 +540,8 @@ int xen_change_pte_range(struct mm_struc #define arch_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable) \ xen_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable) @@ -4619,7 +4741,7 @@ Acked-by: jbeulich@novell.com #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO) -@@ -587,7 +563,6 @@ int xen_change_pte_range(struct mm_struc +@@ -583,7 +559,6 @@ int xen_change_pte_range(struct mm_struc (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -4627,8 +4749,8 @@ Acked-by: jbeulich@novell.com #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_CLEAR_FLUSH ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:49:31.000000000 +0100 @@ -83,7 +83,6 @@ struct cpuinfo_x86 { #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 @@ -4677,8 +4799,8 @@ Acked-by: jbeulich@novell.com static inline void serialize_cpu(void) { __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:49:31.000000000 +0100 @@ -79,12 +79,16 @@ static inline unsigned long read_cr0(voi unsigned long cr0; asm volatile("movq %%cr0,%0" : "=r" (cr0)); @@ -4739,8 +4861,8 @@ Acked-by: jbeulich@novell.com #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:49:31.000000000 +0100 @@ -89,7 +89,11 @@ static inline void flush_tlb_range(struc #endif @@ -4754,9 +4876,9 @@ Acked-by: jbeulich@novell.com static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) ---- head-2010-05-25.orig/arch/x86/include/asm/thread_info.h 2010-03-24 15:06:08.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/asm/thread_info.h 2010-03-24 15:09:23.000000000 +0100 -@@ -153,7 +153,8 @@ struct thread_info { +--- head-2011-03-17.orig/arch/x86/include/asm/thread_info.h 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/thread_info.h 2011-01-31 17:49:31.000000000 +0100 +@@ -151,7 +151,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) #else @@ -4766,8 +4888,8 @@ Acked-by: jbeulich@novell.com #endif #define PREEMPT_ACTIVE 0x10000000 ---- head-2010-05-25.orig/arch/x86/include/asm/xen/interface.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/xen/interface.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/xen/interface.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/xen/interface.h 2011-01-31 17:49:31.000000000 +0100 @@ -10,17 +10,20 @@ #define _ASM_X86_XEN_INTERFACE_H @@ -4812,7 +4934,7 @@ Acked-by: jbeulich@novell.com #ifndef HYPERVISOR_VIRT_START @@ -66,7 +63,7 @@ DEFINE_GUEST_HANDLE(void); - #endif + #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT) /* Maximum number of virtual CPUs in multi-processor guests. */ -#define MAX_VIRT_CPUS 32 @@ -4820,8 +4942,8 @@ Acked-by: jbeulich@novell.com /* * SEGMENT DESCRIPTOR TABLES ---- head-2010-05-25.orig/include/linux/elfnote.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/elfnote.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/include/linux/elfnote.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/linux/elfnote.h 2011-01-31 17:49:31.000000000 +0100 @@ -52,7 +52,7 @@ 4484:.balign 4 ; \ .popsection ; @@ -4831,9 +4953,9 @@ Acked-by: jbeulich@novell.com ELFNOTE_START(name, type, "") \ desc ; \ ELFNOTE_END ---- head-2010-05-25.orig/include/linux/page-flags.h 2010-03-24 15:02:17.000000000 +0100 -+++ head-2010-05-25/include/linux/page-flags.h 2010-03-24 15:09:23.000000000 +0100 -@@ -126,8 +126,15 @@ enum pageflags { +--- head-2011-03-17.orig/include/linux/page-flags.h 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-03-17/include/linux/page-flags.h 2011-01-31 17:49:31.000000000 +0100 +@@ -125,8 +125,15 @@ enum pageflags { PG_fscache = PG_private_2, /* page backed by cache */ /* XEN */ @@ -4849,8 +4971,8 @@ Acked-by: jbeulich@novell.com /* SLOB */ PG_slob_free = PG_private, -@@ -441,10 +448,8 @@ PAGEFLAG_FALSE(MemError) - #define __PG_MLOCKED 0 +@@ -491,10 +498,8 @@ static inline int PageTransCompound(stru + #define __PG_COMPOUND_LOCK 0 #endif -#if !defined(CONFIG_XEN) @@ -4861,21 +4983,16 @@ Acked-by: jbeulich@novell.com #else # define __PG_XEN (1 << PG_foreign) #endif ---- head-2010-05-25.orig/include/linux/skbuff.h 2010-04-15 09:52:44.000000000 +0200 -+++ head-2010-05-25/include/linux/skbuff.h 2010-04-15 09:53:55.000000000 +0200 -@@ -2110,7 +2110,7 @@ static inline void skb_forward_csum(stru - - bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); - --#ifdef CONFIG_XEN -+#if defined(CONFIG_XEN) || defined(CONFIG_PARAVIRT_XEN) - int skb_checksum_setup(struct sk_buff *skb); - #else - static inline int skb_checksum_setup(struct sk_buff *skb) { return 0; } ---- head-2010-05-25.orig/include/xen/driver_util.h 2007-06-12 13:14:19.000000000 +0200 -+++ head-2010-05-25/include/xen/driver_util.h 2010-03-24 15:09:23.000000000 +0100 -@@ -5,10 +5,6 @@ - #include +--- head-2011-03-17.orig/include/xen/driver_util.h 2007-06-12 13:14:19.000000000 +0200 ++++ head-2011-03-17/include/xen/driver_util.h 2011-01-31 17:49:31.000000000 +0100 +@@ -1,14 +1,8 @@ ++#ifndef __XEN_DRIVER_UTIL_H__ ++#define __XEN_DRIVER_UTIL_H__ + +-#ifndef __ASM_XEN_DRIVER_UTIL_H__ +-#define __ASM_XEN_DRIVER_UTIL_H__ +- +-#include #include -/* Allocate/destroy a 'vmalloc' VM area. */ @@ -4884,9 +5001,10 @@ Acked-by: jbeulich@novell.com - extern struct class *get_xen_class(void); - #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ ---- head-2010-05-25.orig/include/xen/features.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/xen/features.h 2010-03-24 15:09:23.000000000 +0100 +-#endif /* __ASM_XEN_DRIVER_UTIL_H__ */ ++#endif /* __XEN_DRIVER_UTIL_H__ */ +--- head-2011-03-17.orig/include/xen/features.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/xen/features.h 2011-01-31 17:49:31.000000000 +0100 @@ -10,6 +10,7 @@ #define __XEN_FEATURES_H__ @@ -4901,11 +5019,11 @@ Acked-by: jbeulich@novell.com -#endif /* __ASM_XEN_FEATURES_H__ */ +#endif /* __XEN_FEATURES_H__ */ ---- head-2010-05-25.orig/include/xen/interface/arch-x86/xen.h 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/arch-x86/xen.h 2010-03-24 15:09:23.000000000 +0100 -@@ -49,6 +49,9 @@ - #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) +--- head-2011-03-17.orig/include/xen/interface/arch-x86/xen.h 2011-03-17 13:50:24.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/arch-x86/xen.h 2011-03-17 14:11:48.000000000 +0100 +@@ -50,6 +50,9 @@ #endif + #define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) +/* Allow co-existing Linux 2.6.23+ Xen interface definitions. */ +#define DEFINE_GUEST_HANDLE_STRUCT(name) struct name @@ -4913,8 +5031,8 @@ Acked-by: jbeulich@novell.com #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) ---- head-2010-05-25.orig/include/xen/interface/event_channel.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/event_channel.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/event_channel.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/event_channel.h 2011-01-31 17:49:31.000000000 +0100 @@ -250,6 +250,7 @@ struct evtchn_op { struct evtchn_unmask unmask; } u; @@ -4923,8 +5041,8 @@ Acked-by: jbeulich@novell.com typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); ---- head-2010-05-25.orig/include/xen/interface/io/netif.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/io/netif.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/io/netif.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/io/netif.h 2011-01-31 17:49:31.000000000 +0100 @@ -183,8 +183,22 @@ typedef struct netif_rx_response netif_r * Generate netif ring structures and types. */ @@ -4948,9 +5066,9 @@ Acked-by: jbeulich@novell.com #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 ---- head-2010-05-25.orig/include/xen/interface/memory.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/memory.h 2010-03-24 15:09:23.000000000 +0100 -@@ -85,6 +85,7 @@ struct xen_memory_reservation { +--- head-2011-03-17.orig/include/xen/interface/memory.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/memory.h 2011-01-31 17:49:31.000000000 +0100 +@@ -88,6 +88,7 @@ struct xen_memory_reservation { */ domid_t domid; }; @@ -4958,7 +5076,7 @@ Acked-by: jbeulich@novell.com typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); -@@ -178,6 +179,7 @@ struct xen_machphys_mfn_list { +@@ -181,6 +182,7 @@ struct xen_machphys_mfn_list { */ unsigned int nr_extents; }; @@ -4966,7 +5084,7 @@ Acked-by: jbeulich@novell.com typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); -@@ -219,6 +221,7 @@ struct xen_add_to_physmap { +@@ -222,6 +224,7 @@ struct xen_add_to_physmap { /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; @@ -4974,8 +5092,8 @@ Acked-by: jbeulich@novell.com typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); ---- head-2010-05-25.orig/include/xen/interface/sched.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/sched.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/sched.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/sched.h 2011-01-31 17:49:31.000000000 +0100 @@ -67,6 +67,7 @@ struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ @@ -4992,8 +5110,8 @@ Acked-by: jbeulich@novell.com typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); ---- head-2010-05-25.orig/include/xen/interface/version.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/version.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/include/xen/interface/version.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/version.h 2011-01-31 17:49:31.000000000 +0100 @@ -36,6 +36,9 @@ /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 @@ -5021,8 +5139,8 @@ Acked-by: jbeulich@novell.com #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 ---- head-2010-05-25.orig/include/xen/interface/xen.h 2010-05-07 11:10:48.000000000 +0200 -+++ head-2010-05-25/include/xen/interface/xen.h 2010-05-12 09:01:12.000000000 +0200 +--- head-2011-03-17.orig/include/xen/interface/xen.h 2011-03-17 13:50:24.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/xen.h 2011-03-17 14:12:10.000000000 +0100 @@ -32,7 +32,9 @@ #include #endif @@ -5052,7 +5170,7 @@ Acked-by: jbeulich@novell.com #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif -@@ -302,6 +304,7 @@ struct mmuext_op { +@@ -307,6 +309,7 @@ struct mmuext_op { xen_pfn_t src_mfn; } arg2; }; @@ -5060,7 +5178,7 @@ Acked-by: jbeulich@novell.com typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif -@@ -391,6 +394,7 @@ struct mmu_update { +@@ -399,6 +402,7 @@ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; @@ -5068,7 +5186,7 @@ Acked-by: jbeulich@novell.com typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); -@@ -399,9 +403,15 @@ DEFINE_XEN_GUEST_HANDLE(mmu_update_t); +@@ -407,9 +411,15 @@ DEFINE_XEN_GUEST_HANDLE(mmu_update_t); * NB. The fields are natural register size for this architecture. */ struct multicall_entry { @@ -5085,8 +5203,8 @@ Acked-by: jbeulich@novell.com typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); ---- head-2010-05-25.orig/include/xen/xenbus.h 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/include/xen/xenbus.h 2010-03-24 15:09:23.000000000 +0100 +--- head-2011-03-17.orig/include/xen/xenbus.h 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/include/xen/xenbus.h 2011-01-31 17:49:31.000000000 +0100 @@ -57,16 +57,20 @@ struct xenbus_watch void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); @@ -5108,7 +5226,17 @@ Acked-by: jbeulich@novell.com /* A xenbus device. */ struct xenbus_device { -@@ -214,6 +218,7 @@ int xenbus_watch_path(struct xenbus_devi +@@ -184,6 +188,9 @@ void xs_suspend_cancel(void); + /* Used by xenbus_dev to borrow kernel's store connection. */ + void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); + ++struct work_struct; ++void xenbus_probe(struct work_struct *); ++ + /* Prepare for domain suspend: then resume or cancel the suspend. */ + void xenbus_suspend(void); + void xenbus_resume(void); +@@ -214,6 +221,7 @@ int xenbus_watch_path(struct xenbus_devi const char **, unsigned int)); @@ -5116,7 +5244,7 @@ Acked-by: jbeulich@novell.com /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. -@@ -227,7 +232,13 @@ int xenbus_watch_path2(struct xenbus_dev +@@ -227,7 +235,13 @@ int xenbus_watch_path2(struct xenbus_dev const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); @@ -5131,100 +5259,3 @@ Acked-by: jbeulich@novell.com /** * Advertise in the store a change of the given driver to the given new_state. ---- head-2010-05-25.orig/net/core/dev.c 2010-05-25 09:22:00.000000000 +0200 -+++ head-2010-05-25/net/core/dev.c 2010-05-25 09:22:06.000000000 +0200 -@@ -139,7 +139,7 @@ - /* This should be increased if a protocol with a bigger head is added. */ - #define GRO_MAX_HEAD (MAX_HEADER + 128) - --#ifdef CONFIG_XEN -+#if defined(CONFIG_XEN) || defined(CONFIG_PARAVIRT_XEN) - #include - #include - #include -@@ -2011,42 +2011,54 @@ static struct netdev_queue *dev_pick_tx( - return netdev_get_tx_queue(dev, queue_index); - } - --#ifdef CONFIG_XEN -+#if defined(CONFIG_XEN) || defined(CONFIG_PARAVIRT_XEN) - inline int skb_checksum_setup(struct sk_buff *skb) - { -- if (skb->proto_csum_blank) { -- struct iphdr *iph; -- unsigned char *th; -+ struct iphdr *iph; -+ unsigned char *th; -+ int err = -EPROTO; - -- if (skb->protocol != htons(ETH_P_IP)) -- goto out; -- iph = ip_hdr(skb); -- th = skb_network_header(skb) + 4 * iph->ihl; -- if (th >= skb_tail_pointer(skb)) -- goto out; -- skb->csum_start = th - skb->head; -- switch (iph->protocol) { -- case IPPROTO_TCP: -- skb->csum_offset = offsetof(struct tcphdr, check); -- break; -- case IPPROTO_UDP: -- skb->csum_offset = offsetof(struct udphdr, check); -- break; -- default: -- if (net_ratelimit()) -- printk(KERN_ERR "Attempting to checksum a non-" -- "TCP/UDP packet, dropping a protocol" -- " %d packet", iph->protocol); -- goto out; -- } -- if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) -- goto out; -- skb->ip_summed = CHECKSUM_PARTIAL; -- skb->proto_csum_blank = 0; -+#ifdef CONFIG_XEN -+ if (!skb->proto_csum_blank) -+ return 0; -+#endif -+ -+ if (skb->protocol != htons(ETH_P_IP)) -+ goto out; -+ -+ iph = ip_hdr(skb); -+ th = skb_network_header(skb) + 4 * iph->ihl; -+ if (th >= skb_tail_pointer(skb)) -+ goto out; -+ -+ skb->csum_start = th - skb->head; -+ switch (iph->protocol) { -+ case IPPROTO_TCP: -+ skb->csum_offset = offsetof(struct tcphdr, check); -+ break; -+ case IPPROTO_UDP: -+ skb->csum_offset = offsetof(struct udphdr, check); -+ break; -+ default: -+ if (net_ratelimit()) -+ printk(KERN_ERR "Attempting to checksum a non-" -+ "TCP/UDP packet, dropping a protocol" -+ " %d packet", iph->protocol); -+ goto out; - } -- return 0; -+ -+ if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) -+ goto out; -+ -+#ifdef CONFIG_XEN -+ skb->ip_summed = CHECKSUM_PARTIAL; -+ skb->proto_csum_blank = 0; -+#endif -+ -+ err = 0; -+ - out: -- return -EPROTO; -+ return err; - } - EXPORT_SYMBOL(skb_checksum_setup); - #endif diff --git a/patches.xen/xen3-patch-2.6.24 b/patches.xen/xen3-patch-2.6.24 index 98c6d0c..69845ff 100644 --- a/patches.xen/xen3-patch-2.6.24 +++ b/patches.xen/xen3-patch-2.6.24 @@ -6,9 +6,9 @@ Automatically created from "patches.kernel.org/patch-2.6.24" by xen-port-patches Acked-by: jbeulich@novell.com ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-24 15:10:29.000000000 +0100 -@@ -76,15 +76,16 @@ config GENERIC_CMOS_UPDATE +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-01-31 17:56:27.000000000 +0100 +@@ -86,15 +86,16 @@ config GENERIC_CMOS_UPDATE config CLOCKSOURCE_WATCHDOG def_bool y @@ -28,7 +28,7 @@ Acked-by: jbeulich@novell.com config LOCKDEP_SUPPORT def_bool y -@@ -240,12 +241,12 @@ config X86_TRAMPOLINE +@@ -227,12 +228,12 @@ config X86_TRAMPOLINE config X86_NO_TSS bool @@ -43,7 +43,7 @@ Acked-by: jbeulich@novell.com default y config X86_32_LAZY_GS -@@ -327,6 +328,7 @@ config X86_MPPARSE +@@ -307,6 +308,7 @@ config X86_MPPARSE config X86_XEN bool "Xen-compatible" @@ -51,7 +51,7 @@ Acked-by: jbeulich@novell.com select XEN select X86_PAE select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST -@@ -367,6 +369,7 @@ endif +@@ -347,6 +349,7 @@ endif config X86_64_XEN bool "Enable Xen compatible kernel" @@ -59,7 +59,7 @@ Acked-by: jbeulich@novell.com select XEN select SWIOTLB help -@@ -702,7 +705,7 @@ source "arch/x86/Kconfig.cpu" +@@ -630,7 +633,7 @@ source "arch/x86/Kconfig.cpu" config HPET_TIMER def_bool X86_64 prompt "HPET Timer Support" if X86_32 @@ -68,7 +68,7 @@ Acked-by: jbeulich@novell.com ---help--- Use the IA-PC HPET (High Precision Event Timer) to manage time in preference to the PIT and RTC, if a HPET is -@@ -1056,7 +1059,7 @@ config I8K +@@ -991,7 +994,7 @@ config I8K config X86_REBOOTFIXUPS bool "Enable X86 board specific fixups for reboot" @@ -77,7 +77,7 @@ Acked-by: jbeulich@novell.com ---help--- This enables chipset and/or board specific fixups to be done in order to get reboot to work correctly. This is only needed on -@@ -1454,7 +1457,7 @@ config X86_RESERVE_LOW_64K +@@ -1401,7 +1404,7 @@ config X86_RESERVE_LOW config MATH_EMULATION bool prompt "Math emulation" if X86_32 @@ -86,7 +86,7 @@ Acked-by: jbeulich@novell.com ---help--- Linux can emulate a math coprocessor (used for floating point operations) if you don't have one. 486DX and Pentium processors have -@@ -1828,6 +1831,7 @@ endmenu +@@ -1769,6 +1772,7 @@ endmenu config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y depends on X86_64 || (X86_32 && HIGHMEM) @@ -94,7 +94,7 @@ Acked-by: jbeulich@novell.com config ARCH_ENABLE_MEMORY_HOTREMOVE def_bool y -@@ -2019,7 +2023,7 @@ choice +@@ -1963,7 +1967,7 @@ choice config PCI_GOBIOS bool "BIOS" @@ -103,7 +103,7 @@ Acked-by: jbeulich@novell.com config PCI_GOMMCONFIG bool "MMConfig" -@@ -2070,7 +2074,7 @@ config PCI_MMCONFIG +@@ -2033,7 +2037,7 @@ config PCI_CNB20LE_QUIRK config XEN_PCIDEV_FRONTEND bool "Xen PCI Frontend" if X86_64 @@ -112,7 +112,7 @@ Acked-by: jbeulich@novell.com select HOTPLUG default y help -@@ -2086,7 +2090,7 @@ config XEN_PCIDEV_FE_DEBUG +@@ -2049,7 +2053,7 @@ config XEN_PCIDEV_FE_DEBUG config DMAR bool "Support for DMA Remapping Devices (EXPERIMENTAL)" @@ -121,9 +121,9 @@ Acked-by: jbeulich@novell.com help DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. ---- head-2010-05-25.orig/arch/x86/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/Makefile 2010-03-24 15:10:29.000000000 +0100 -@@ -156,8 +156,8 @@ BOOT_TARGETS = bzlilo bzdisk fdimage fdi +--- head-2011-03-17.orig/arch/x86/Makefile 2011-02-01 14:10:15.000000000 +0100 ++++ head-2011-03-17/arch/x86/Makefile 2011-02-01 14:11:04.000000000 +0100 +@@ -158,8 +158,8 @@ BOOT_TARGETS = bzlilo bzdisk fdimage fdi PHONY += bzImage vmlinuz $(BOOT_TARGETS) ifdef CONFIG_XEN @@ -134,7 +134,7 @@ Acked-by: jbeulich@novell.com ifdef CONFIG_X86_64 LDFLAGS_vmlinux := -e startup_64 -@@ -171,6 +171,8 @@ KBUILD_IMAGE := $(boot)/vmlinuz +@@ -173,6 +173,8 @@ KBUILD_IMAGE := $(boot)/vmlinuz vmlinuz: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) @@ -143,8 +143,8 @@ Acked-by: jbeulich@novell.com else # Default kernel to build all: bzImage ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:56:27.000000000 +0100 @@ -125,20 +125,16 @@ sysenter_do_call: jmp int_ret_from_sys_call @@ -193,7858 +193,10063 @@ Acked-by: jbeulich@novell.com jmp cstar_do_call END(ia32_cstar_target) ---- head-2010-05-25.orig/arch/x86/kernel/Makefile 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/Makefile 2010-03-24 15:10:29.000000000 +0100 -@@ -141,4 +141,4 @@ endif - disabled-obj-$(CONFIG_XEN) := early-quirks.o hpet.o i8253.o i8259_$(BITS).o reboot.o \ - smpboot_$(BITS).o tsc_$(BITS).o tsc_sync.o - disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += mpparse_64.o --%/head_64.o %/head_64.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := -+%/head_64.o %/head_64.s: asflags-$(CONFIG_XEN) := ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -90,7 +90,7 @@ __setup("acpi_sleep=", acpi_sleep_setup) - - /* Ouch, we want to delete this. We already have better version in userspace, in - s2ram from suspend.sf.net project */ --static __init int reset_videomode_after_s3(struct dmi_system_id *d) -+static __init int reset_videomode_after_s3(const struct dmi_system_id *d) - { - acpi_realmode_flags |= 2; - return 0; ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -123,6 +123,3 @@ static int __init acpi_sleep_setup(char - __setup("acpi_sleep=", acpi_sleep_setup); - #endif /* CONFIG_ACPI_PV_SLEEP */ - --void acpi_pci_link_exit(void) --{ --} ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -214,7 +214,7 @@ static void __cpuinit get_cpu_vendor(str - - static int __init x86_fxsr_setup(char * s) - { -- /* Tell all the other CPU's to not use it... */ -+ /* Tell all the other CPUs to not use it... */ - disable_x86_fxsr = 1; - - /* ---- head-2010-05-25.orig/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -52,6 +52,13 @@ struct resource code_resource = { - .flags = IORESOURCE_BUSY | IORESOURCE_MEM - }; - -+struct resource bss_resource = { -+ .name = "Kernel bss", -+ .start = 0, -+ .end = 0, -+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM -+}; -+ - static struct resource system_rom_resource = { - .name = "System ROM", - .start = 0xf0000, -@@ -266,7 +273,9 @@ static struct e820map machine_e820; - * and also for regions reported as reserved by the e820. +--- head-2011-03-17.orig/arch/x86/include/asm/acpi.h 2011-03-11 10:52:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/acpi.h 2011-03-11 10:56:05.000000000 +0100 +@@ -152,6 +152,7 @@ static inline int acpi_notify_hypervisor */ - static void __init --legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource) -+legacy_init_iomem_resources(struct resource *code_resource, -+ struct resource *data_resource, -+ struct resource *bss_resource) + static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) { - int i; - -@@ -300,9 +309,11 @@ legacy_init_iomem_resources(struct resou - #ifndef CONFIG_XEN - request_resource(res, code_resource); - request_resource(res, data_resource); -+ request_resource(res, bss_resource); - #endif - #ifdef CONFIG_KEXEC -- request_resource(res, &crashk_res); -+ if (crashk_res.start != crashk_res.end) -+ request_resource(res, &crashk_res); - #ifdef CONFIG_XEN - xen_machine_kexec_register_resources(res); - #endif -@@ -329,9 +340,11 @@ static int __init request_standard_resou - - printk("Setting up standard PCI resources\n"); - if (efi_enabled) -- efi_initialize_iomem_resources(&code_resource, &data_resource); -+ efi_initialize_iomem_resources(&code_resource, -+ &data_resource, &bss_resource); ++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + /* + * Early models (<=5) of AMD Opterons are not supposed to go into + * C2 state. +@@ -166,6 +167,7 @@ static inline unsigned int acpi_processo + else if (c1e_detected) + return 1; else -- legacy_init_iomem_resources(&code_resource, &data_resource); -+ legacy_init_iomem_resources(&code_resource, -+ &data_resource, &bss_resource); ++#endif + return max_cstate; + } - /* EFI systems may still have VGA */ - request_resource(&iomem_resource, &video_ram_resource); -@@ -761,7 +774,7 @@ void __init e820_register_memory(void) - #define e820 machine_e820 - #endif - /* -- * Search for the bigest gap in the low 32 bits of the e820 -+ * Search for the biggest gap in the low 32 bits of the e820 - * memory space. - */ - last = 0x100000000ull; ---- head-2010-05-25.orig/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -24,7 +24,7 @@ - #include - #include - #include --#include -+#include - #include - #include +--- head-2011-03-17.orig/arch/x86/include/asm/mmu.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/mmu.h 2011-01-31 17:56:27.000000000 +0100 +@@ -11,6 +11,9 @@ + typedef struct { + void *ldt; + int size; ++#ifdef CONFIG_XEN ++ unsigned has_foreign_mappings:1; ++#endif + struct mutex lock; + void *vdso; + } mm_context_t; +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/agp.h 2007-06-22 09:08:06.000000000 +0200 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/agp.h 2011-01-31 17:56:27.000000000 +0100 +@@ -1,20 +1,22 @@ +-#ifndef AGP_H +-#define AGP_H 1 ++#ifndef _ASM_X86_AGP_H ++#define _ASM_X86_AGP_H -@@ -53,7 +53,7 @@ unsigned long end_pfn_map; + #include + #include + #include + +-/* +- * Functions to keep the agpgart mappings coherent with the MMU. +- * The GART gives the CPU a physical alias of pages in memory. The alias region is +- * mapped uncacheable. Make sure there are no conflicting mappings +- * with different cachability attributes for the same page. This avoids +- * data corruption on some CPUs. ++/* ++ * Functions to keep the agpgart mappings coherent with the MMU. The ++ * GART gives the CPU a physical alias of pages in memory. The alias ++ * region is mapped uncacheable. Make sure there are no conflicting ++ * mappings with different cachability attributes for the same ++ * page. This avoids data corruption on some CPUs. */ - static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; --extern struct resource code_resource, data_resource; -+extern struct resource code_resource, data_resource, bss_resource; +-/* Caller's responsibility to call global_flush_tlb() for +- * performance reasons */ ++/* ++ * Caller's responsibility to call global_flush_tlb() for performance ++ * reasons ++ */ + #define map_page_into_agp(page) ( \ + xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \ + ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)) +@@ -24,9 +26,11 @@ + change_page_attr(page, 1, PAGE_KERNEL)) + #define flush_agp_mappings() global_flush_tlb() - /* Check for some hardcoded bad areas that early boot is not allowed to touch */ - static inline int bad_addr(unsigned long *addrp, unsigned long size) -@@ -75,10 +75,15 @@ static inline int bad_addr(unsigned long +-/* Could use CLFLUSH here if the cpu supports it. But then it would +- need to be called for each cacheline of the whole page so it may not be +- worth it. Would need a page for it. */ ++/* ++ * Could use CLFLUSH here if the cpu supports it. But then it would ++ * need to be called for each cacheline of the whole page so it may ++ * not be worth it. Would need a page for it. ++ */ + #define flush_agp_cache() wbinvd() - /* initrd */ - #ifdef CONFIG_BLK_DEV_INITRD -- if (LOADER_TYPE && INITRD_START && last >= INITRD_START && -- addr < INITRD_START+INITRD_SIZE) { -- *addrp = PAGE_ALIGN(INITRD_START + INITRD_SIZE); -- return 1; -+ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { -+ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; -+ unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; -+ unsigned long ramdisk_end = ramdisk_image+ramdisk_size; -+ -+ if (last >= ramdisk_image && addr < ramdisk_end) { -+ *addrp = PAGE_ALIGN(ramdisk_end); -+ return 1; -+ } - } - #endif - /* kernel code */ -@@ -251,6 +256,7 @@ void __init e820_reserve_resources(struc - #ifndef CONFIG_XEN - request_resource(res, &code_resource); - request_resource(res, &data_resource); -+ request_resource(res, &bss_resource); - #endif - #ifdef CONFIG_KEXEC - if (crashk_res.start != crashk_res.end) -@@ -661,8 +667,8 @@ void __init setup_memory_region(void) - * Otherwise fake a memory map; one section from 0k->640k, - * the next section from 1mb->appropriate_mem_k - */ -- sanitize_e820_map(E820_MAP, &E820_MAP_NR); -- if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) -+ sanitize_e820_map(boot_params.e820_map, &boot_params.e820_entries); -+ if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0) - early_panic("Cannot find a valid memory map"); - printk(KERN_INFO "BIOS-provided physical RAM map:\n"); - e820_print_map("BIOS-e820"); -@@ -847,3 +853,22 @@ __init void e820_setup_gap(struct e820en - printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", - pci_mem_start, gapstart, gapsize); + /* Convert a physical address to an address suitable for the GART. */ +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/desc.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "desc_32.h" ++#else ++# include "desc_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/desc_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/desc_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -34,6 +34,18 @@ static inline void clear_LDT(void) + put_cpu(); } -+ -+int __init arch_get_ram_range(int slot, u64 *addr, u64 *size) + ++#ifndef CONFIG_X86_NO_TSS ++static inline unsigned long __store_tr(void) +{ -+ int i; ++ unsigned long tr; + -+ if (slot < 0 || slot >= e820.nr_map) -+ return -1; -+ for (i = slot; i < e820.nr_map; i++) { -+ if (e820.map[i].type != E820_RAM) -+ continue; -+ break; -+ } -+ if (i == e820.nr_map || e820.map[i].addr > (max_pfn << PAGE_SHIFT)) -+ return -1; -+ *addr = e820.map[i].addr; -+ *size = min_t(u64, e820.map[i].size + e820.map[i].addr, -+ max_pfn << PAGE_SHIFT) - *addr; -+ return i + 1; ++ asm volatile ("str %w0":"=r" (tr)); ++ return tr; +} ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -6,15 +6,10 @@ - #include - #include - #include -- --/* Simple VGA output */ -- --#ifdef __i386__ - #include --#else --#include --#endif + - #ifndef CONFIG_XEN -+/* Simple VGA output */ - #define VGABASE (__ISA_IO_base + 0xb8000) ++#define store_tr(tr) (tr) = __store_tr() ++#endif ++ + /* + * This is the ldt that every process will get unless we need + * something other than this. +@@ -47,6 +59,18 @@ extern struct desc_ptr cpu_gdt_descr[]; + /* the cpu gdt accessor */ + #define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address) - static int max_ypos = 25, max_xpos = 80; -@@ -264,10 +259,10 @@ static int __init setup_early_printk(cha - early_console = &early_serial_console; - } else if (!strncmp(buf, "vga", 3)) { - #ifndef CONFIG_XEN -- && SCREEN_INFO.orig_video_isVGA == 1) { -- max_xpos = SCREEN_INFO.orig_video_cols; -- max_ypos = SCREEN_INFO.orig_video_lines; -- current_ypos = SCREEN_INFO.orig_y; -+ && boot_params.screen_info.orig_video_isVGA == 1) { -+ max_xpos = boot_params.screen_info.orig_video_cols; -+ max_ypos = boot_params.screen_info.orig_video_lines; -+ current_ypos = boot_params.screen_info.orig_y; - #endif - early_console = &early_vga_console; - } else if (!strncmp(buf, "simnow", 6)) { ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:10:29.000000000 +0100 -@@ -254,6 +254,7 @@ check_userspace: - jb resume_kernel # not returning to v8086 or userspace - - ENTRY(resume_userspace) -+ LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret -@@ -341,6 +342,7 @@ sysenter_past_esp: - jae syscall_badsys - call *sys_call_table(,%eax,4) - movl %eax,PT_EAX(%esp) -+ LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx -@@ -406,6 +408,7 @@ syscall_call: - call *sys_call_table(,%eax,4) - movl %eax,PT_EAX(%esp) # store the return value - syscall_exit: -+ LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret -@@ -478,7 +481,7 @@ ldt_ss: - * is still available to implement the setting of the high - * 16-bits in the INTERRUPT_RETURN paravirt-op. - */ -- cmpl $0, paravirt_ops+PARAVIRT_enabled -+ cmpl $0, pv_info+PARAVIRT_enabled - jne restore_nocheck - #endif - -@@ -540,6 +543,7 @@ work_pending: - jz work_notifysig - work_resched: - call schedule -+ LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret -@@ -1268,6 +1272,6 @@ ENTRY(kernel_thread_helper) - ENDPROC(kernel_thread_helper) - - .section .rodata,"a" --#include "syscall_table.S" -+#include "syscall_table_32.S" - - syscall_table_size=(.-sys_call_table) ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:10:29.000000000 +0100 -@@ -57,7 +57,7 @@ - #include - #include - --#include "xen_entry.S" -+#include "xen_entry_64.S" - - .code64 - -@@ -275,6 +275,7 @@ ret_from_sys_call: - movl $_TIF_ALLWORK_MASK,%edi - /* edi: flagmask */ - sysret_check: -+ LOCKDEP_SYS_EXIT - GET_THREAD_INFO(%rcx) - XEN_BLOCK_EVENTS(%rsi) - TRACE_IRQS_OFF -@@ -365,6 +366,7 @@ int_ret_from_sys_call: - movl $_TIF_ALLWORK_MASK,%edi - /* edi: mask to check */ - int_with_check: -+ LOCKDEP_SYS_EXIT_IRQ - GET_THREAD_INFO(%rcx) - movl threadinfo_flags(%rcx),%edx - andl %edi,%edx -@@ -516,11 +518,12 @@ END(stub_rt_sigreturn) - - retint_check: - CFI_DEFAULT_STACK adj=1 -+ LOCKDEP_SYS_EXIT_IRQ - movl threadinfo_flags(%rcx),%edx - andl %edi,%edx - CFI_REMEMBER_STATE - jnz retint_careful --retint_restore_args: -+retint_restore_args: /* return to kernel space */ - movl EFLAGS-REST_SKIP(%rsp), %eax - shr $9, %eax # EAX[0] == IRET_EFLAGS.IF - XEN_GET_VCPU_INFO(%rsi) -@@ -841,7 +844,7 @@ error_call_handler: - movq ORIG_RAX(%rsp),%rsi # get error code - movq $-1,ORIG_RAX(%rsp) - call *%rax --error_exit: -+error_exit: - RESTORE_REST - /* cli */ - XEN_BLOCK_EVENTS(%rsi) -@@ -849,14 +852,11 @@ error_exit: - GET_THREAD_INFO(%rcx) - testb $3,CS-ARGOFFSET(%rsp) - jz retint_kernel -+ LOCKDEP_SYS_EXIT_IRQ - movl threadinfo_flags(%rcx),%edx - movl $_TIF_WORK_MASK,%edi - andl %edi,%edx - jnz retint_careful -- /* -- * The iret might restore flags: -- */ -- TRACE_IRQS_IRETQ - jmp retint_restore_args - - #if 0 -@@ -1071,7 +1071,7 @@ child_rip: - movq %rsi, %rdi - call *%rax - # exit -- xorl %edi, %edi -+ mov %eax, %edi - call do_exit - CFI_ENDPROC - ENDPROC(child_rip) ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,5 +1,5 @@ - /* -- * linux/arch/x86_64/kernel/head64.c -- prepare to run common code -+ * prepare to run common code - * - * Copyright (C) 2000 Andrea Arcangeli SuSE - * -@@ -21,7 +21,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -47,27 +46,16 @@ static void __init clear_bss(void) - } - #endif - --#define NEW_CL_POINTER 0x228 /* Relative to real mode data */ --#define OLD_CL_MAGIC_ADDR 0x20 --#define OLD_CL_MAGIC 0xA33F --#define OLD_CL_OFFSET 0x22 -- - static void __init copy_bootdata(char *real_mode_data) ++#ifndef CONFIG_XEN ++static inline void load_gdt(const struct desc_ptr *ptr) ++{ ++ asm volatile("lgdt %w0"::"m" (*ptr)); ++} ++ ++static inline void store_gdt(struct desc_ptr *ptr) ++{ ++ asm("sgdt %w0":"=m" (*ptr)); ++} ++#endif ++ + static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) { - #ifndef CONFIG_XEN -- unsigned long new_data; - char * command_line; - -- memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE); -- new_data = *(u32 *) (x86_boot_params + NEW_CL_POINTER); -- if (!new_data) { -- if (OLD_CL_MAGIC != *(u16 *)(real_mode_data + OLD_CL_MAGIC_ADDR)) { -- return; -- } -- new_data = __pa(real_mode_data) + *(u16 *)(real_mode_data + OLD_CL_OFFSET); -+ memcpy(&boot_params, real_mode_data, sizeof boot_params); -+ if (boot_params.hdr.cmd_line_ptr) { -+ command_line = __va(boot_params.hdr.cmd_line_ptr); -+ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); - } -- command_line = __va(new_data); -- memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); - #else - int max_cmdline; - -@@ -117,7 +105,7 @@ void __init x86_64_start_kernel(char * r - - for (i = 0; i < IDT_ENTRIES; i++) - set_intr_gate(i, early_idt_handler); -- asm volatile("lidt %0" :: "m" (idt_descr)); -+ load_idt((const struct desc_ptr *)&idt_descr); - #endif - - early_printk("Kernel alive\n"); ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -427,7 +427,7 @@ static struct irq_cpu_info { - - #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) - --#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) -+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i))) - - static cpumask_t balance_irq_affinity[NR_IRQS] = { - [0 ... NR_IRQS-1] = CPU_MASK_ALL -@@ -633,7 +633,7 @@ tryanotherirq: - - imbalance = move_this_load; - -- /* For physical_balance case, we accumlated both load -+ /* For physical_balance case, we accumulated both load - * values in the one of the siblings cpu_irq[], - * to use the same code for physical and logical processors - * as much as possible. -@@ -647,7 +647,7 @@ tryanotherirq: - * (A+B)/2 vs B - */ - load = CPU_IRQ(min_loaded) >> 1; -- for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { -+ for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) { - if (load > CPU_IRQ(j)) { - /* This won't change cpu_sibling_map[min_loaded] */ - load = CPU_IRQ(j); -@@ -1018,7 +1018,7 @@ static int EISA_ELCR(unsigned int irq) - #define default_MCA_trigger(idx) (1) - #define default_MCA_polarity(idx) (0) - --static int __init MPBIOS_polarity(int idx) -+static int MPBIOS_polarity(int idx) + struct gate_struct s; +@@ -87,6 +111,16 @@ static inline void set_system_gate_ist(i { - int bus = mp_irqs[idx].mpc_srcbus; - int polarity; -@@ -1347,6 +1347,11 @@ static void __init setup_IO_APIC_irqs(vo - continue; - } - -+ if (!first_notcon) { -+ apic_printk(APIC_VERBOSE, " not connected.\n"); -+ first_notcon = 1; -+ } + _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist); + } + - entry.trigger = irq_trigger(idx); - entry.polarity = irq_polarity(idx); - -@@ -1936,13 +1941,16 @@ __setup("no_timer_check", notimercheck); - static int __init timer_irq_works(void) - { - unsigned long t1 = jiffies; -+ unsigned long flags; - - if (no_timer_check) - return 1; - -+ local_save_flags(flags); - local_irq_enable(); - /* Let ten ticks pass... */ - mdelay((10 * 1000) / HZ); -+ local_irq_restore(flags); - - /* - * Expect a few ticks at least, to be sure some possible -@@ -2223,6 +2231,9 @@ static inline void __init check_timer(vo - { - int apic1, pin1, apic2, pin2; - int vector; -+ unsigned long flags; ++static inline void load_idt(const struct desc_ptr *ptr) ++{ ++ asm volatile("lidt %w0"::"m" (*ptr)); ++} + -+ local_irq_save(flags); - - /* - * get/set the timer IRQ vector: -@@ -2268,7 +2279,7 @@ static inline void __init check_timer(vo - } - if (disable_timer_pin_1 > 0) - clear_IO_APIC_pin(0, pin1); -- return; -+ goto out; - } - clear_IO_APIC_pin(apic1, pin1); - printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " -@@ -2291,7 +2302,7 @@ static inline void __init check_timer(vo - if (nmi_watchdog == NMI_IO_APIC) { - setup_nmi(); - } -- return; -+ goto out; - } - /* - * Cleanup, just in case ... -@@ -2315,7 +2326,7 @@ static inline void __init check_timer(vo - - if (timer_irq_works()) { - printk(" works.\n"); -- return; -+ goto out; - } - apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); - printk(" failed.\n"); -@@ -2331,11 +2342,13 @@ static inline void __init check_timer(vo - - if (timer_irq_works()) { - printk(" works.\n"); -- return; -+ goto out; - } - printk(" failed :(.\n"); - panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " - "report. Then try booting with the 'noapic' option"); -+out: -+ local_irq_restore(flags); - } - #else - int timer_uses_ioapic_pin_0 = 0; -@@ -2353,6 +2366,14 @@ int timer_uses_ioapic_pin_0 = 0; ++static inline void store_idt(struct desc_ptr *dtr) ++{ ++ asm("sidt %w0":"=m" (*dtr)); ++} + #endif - void __init setup_IO_APIC(void) - { -+#ifndef CONFIG_XEN -+ int i; -+ -+ /* Reserve all the system vectors. */ -+ for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++) -+ set_bit(i, used_vectors); + static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/dma-mapping.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "dma-mapping_32.h" ++#else ++# include "dma-mapping_64.h" ++#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "fixmap_32.h" ++#else ++# include "fixmap_64.h" +#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypercall.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,420 @@ ++/****************************************************************************** ++ * hypercall.h ++ * ++ * Linux-specific hypervisor handling. ++ * ++ * Copyright (c) 2002-2004, K A Fraser ++ * ++ * 64-bit updates: ++ * Benjamin Liu ++ * Jun Nakajima ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License version 2 ++ * as published by the Free Software Foundation; or, when distributed ++ * separately from the Linux kernel or incorporated into other ++ * software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this source file (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, copy, modify, ++ * merge, publish, distribute, sublicense, and/or sell copies of the Software, ++ * and to permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ + - enable_IO_APIC(); - - if (acpi_ioapic) -@@ -2542,7 +2563,7 @@ void destroy_irq(unsigned int irq) - #endif /* CONFIG_XEN */ - - /* -- * MSI mesage composition -+ * MSI message composition - */ - #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN) - static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) -@@ -2899,6 +2920,25 @@ int io_apic_set_pci_routing (int ioapic, - return 0; - } - -+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) -+{ -+ int i; ++#ifndef __HYPERCALL_H__ ++#define __HYPERCALL_H__ + -+ if (skip_ioapic_setup) -+ return -1; ++#ifndef __HYPERVISOR_H__ ++# error "please don't include this file directly" ++#endif + -+ for (i = 0; i < mp_irq_entries; i++) -+ if (mp_irqs[i].mpc_irqtype == mp_INT && -+ mp_irqs[i].mpc_srcbusirq == bus_irq) -+ break; -+ if (i >= mp_irq_entries) -+ return -1; ++#if CONFIG_XEN_COMPAT <= 0x030002 ++# include /* memcpy() */ ++#endif + -+ *trigger = irq_trigger(i); -+ *polarity = irq_polarity(i); -+ return 0; -+} ++#ifdef CONFIG_XEN ++#define HYPERCALL_ASM_OPERAND "%c" ++#define HYPERCALL_LOCATION(op) (hypercall_page + (op) * 32) ++#define HYPERCALL_C_OPERAND(name) "i" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) ++#else ++#define HYPERCALL_ASM_OPERAND "*%" ++#define HYPERCALL_LOCATION(op) (hypercall_stubs + (op) * 32) ++#define HYPERCALL_C_OPERAND(name) "g" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) ++#endif + - #endif /* CONFIG_ACPI */ - - static int __init parse_disable_timer_pin_1(char *arg) ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #ifdef CONFIG_ACPI - #include - #endif -@@ -584,7 +585,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, - #define default_PCI_trigger(idx) (1) - #define default_PCI_polarity(idx) (1) - --static int __init MPBIOS_polarity(int idx) -+static int MPBIOS_polarity(int idx) - { - int bus = mp_irqs[idx].mpc_srcbus; - int polarity; -@@ -871,6 +872,10 @@ static void __init setup_IO_APIC_irqs(vo - apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); - continue; - } -+ if (!first_notcon) { -+ apic_printk(APIC_VERBOSE, " not connected.\n"); -+ first_notcon = 1; -+ } - - irq = pin_2_irq(idx, apic, pin); - add_pin_to_irq(irq, apic, pin); -@@ -881,7 +886,7 @@ static void __init setup_IO_APIC_irqs(vo - } - - if (!first_notcon) -- apic_printk(APIC_VERBOSE," not connected.\n"); -+ apic_printk(APIC_VERBOSE, " not connected.\n"); - } - - #ifndef CONFIG_XEN -@@ -1277,10 +1282,13 @@ void disable_IO_APIC(void) - static int __init timer_irq_works(void) - { - unsigned long t1 = jiffies; -+ unsigned long flags; - -+ local_save_flags(flags); - local_irq_enable(); - /* Let ten ticks pass... */ - mdelay((10 * 1000) / HZ); -+ local_irq_restore(flags); - - /* - * Expect a few ticks at least, to be sure some possible -@@ -1655,6 +1663,9 @@ static inline void check_timer(void) - { - struct irq_cfg *cfg = irq_cfg + 0; - int apic1, pin1, apic2, pin2; -+ unsigned long flags; ++#define HYPERCALL_ARG(arg, n) \ ++ register typeof((arg)+0) __arg##n asm(HYPERCALL_arg##n) = (arg) + -+ local_irq_save(flags); - - /* - * get/set the timer IRQ vector: -@@ -1696,7 +1707,7 @@ static inline void check_timer(void) - } - if (disable_timer_pin_1 > 0) - clear_IO_APIC_pin(0, pin1); -- return; -+ goto out; - } - clear_IO_APIC_pin(apic1, pin1); - apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not " -@@ -1718,7 +1729,7 @@ static inline void check_timer(void) - if (nmi_watchdog == NMI_IO_APIC) { - setup_nmi(); - } -- return; -+ goto out; - } - /* - * Cleanup, just in case ... -@@ -1741,7 +1752,7 @@ static inline void check_timer(void) - - if (timer_irq_works()) { - apic_printk(APIC_VERBOSE," works.\n"); -- return; -+ goto out; - } - apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); - apic_printk(APIC_VERBOSE," failed.\n"); -@@ -1756,10 +1767,12 @@ static inline void check_timer(void) - - if (timer_irq_works()) { - apic_printk(APIC_VERBOSE," works.\n"); -- return; -+ goto out; - } - apic_printk(APIC_VERBOSE," failed :(.\n"); - panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n"); -+out: -+ local_irq_restore(flags); - } - #else - #define check_timer() ((void)0) -@@ -1775,7 +1788,7 @@ __setup("no_timer_check", notimercheck); - - /* - * -- * IRQ's that are handled by the PIC in the MPS IOAPIC case. -+ * IRQs that are handled by the PIC in the MPS IOAPIC case. - * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. - * Linux doesn't really care, as it's not actually used - * for any interrupt handling anyway. -@@ -1858,7 +1871,7 @@ static struct sysdev_class ioapic_sysdev - static int __init ioapic_init_sysfs(void) - { - struct sys_device * dev; -- int i, size, error = 0; -+ int i, size, error; - - error = sysdev_class_register(&ioapic_sysdev_class); - if (error) -@@ -1867,12 +1880,11 @@ static int __init ioapic_init_sysfs(void - for (i = 0; i < nr_ioapics; i++ ) { - size = sizeof(struct sys_device) + nr_ioapic_registers[i] - * sizeof(struct IO_APIC_route_entry); -- mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); -+ mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); - if (!mp_ioapic_data[i]) { - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); - continue; - } -- memset(mp_ioapic_data[i], 0, size); - dev = &mp_ioapic_data[i]->dev; - dev->id = i; - dev->cls = &ioapic_sysdev_class; -@@ -1933,7 +1945,7 @@ void destroy_irq(unsigned int irq) - #endif /* CONFIG_XEN */ - - /* -- * MSI mesage composition -+ * MSI message composition - */ - #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN) - static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) -@@ -2043,8 +2055,64 @@ void arch_teardown_msi_irq(unsigned int - destroy_irq(irq); - } - --#endif /* CONFIG_PCI_MSI */ -+#ifdef CONFIG_DMAR -+#ifdef CONFIG_SMP -+static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) -+{ -+ struct irq_cfg *cfg = irq_cfg + irq; -+ struct msi_msg msg; -+ unsigned int dest; -+ cpumask_t tmp; ++#define _hypercall0(type, name) \ ++({ \ ++ type __res; \ ++ asm volatile ( \ ++ "call " HYPERCALL_ASM_OPERAND "1" \ ++ : "=a" (__res) \ ++ : HYPERCALL_C_OPERAND(name) \ ++ : "memory" ); \ ++ __res; \ ++}) + -+ cpus_and(tmp, mask, cpu_online_map); -+ if (cpus_empty(tmp)) -+ return; ++#define _hypercall1(type, name, arg) \ ++({ \ ++ type __res; \ ++ HYPERCALL_ARG(arg, 1); \ ++ asm volatile ( \ ++ "call " HYPERCALL_ASM_OPERAND "2" \ ++ : "=a" (__res), "+r" (__arg1) \ ++ : HYPERCALL_C_OPERAND(name) \ ++ : "memory" ); \ ++ __res; \ ++}) + -+ if (assign_irq_vector(irq, mask)) -+ return; ++#define _hypercall2(type, name, a1, a2) \ ++({ \ ++ type __res; \ ++ HYPERCALL_ARG(a1, 1); \ ++ HYPERCALL_ARG(a2, 2); \ ++ asm volatile ( \ ++ "call " HYPERCALL_ASM_OPERAND "3" \ ++ : "=a" (__res), "+r" (__arg1), "+r" (__arg2) \ ++ : HYPERCALL_C_OPERAND(name) \ ++ : "memory" ); \ ++ __res; \ ++}) + -+ cpus_and(tmp, cfg->domain, mask); -+ dest = cpu_mask_to_apicid(tmp); ++#define _hypercall3(type, name, a1, a2, a3) \ ++({ \ ++ type __res; \ ++ HYPERCALL_ARG(a1, 1); \ ++ HYPERCALL_ARG(a2, 2); \ ++ HYPERCALL_ARG(a3, 3); \ ++ asm volatile ( \ ++ "call " HYPERCALL_ASM_OPERAND "4" \ ++ : "=a" (__res), "+r" (__arg1), \ ++ "+r" (__arg2), "+r" (__arg3) \ ++ : HYPERCALL_C_OPERAND(name) \ ++ : "memory" ); \ ++ __res; \ ++}) + -+ dmar_msi_read(irq, &msg); ++#define _hypercall4(type, name, a1, a2, a3, a4) \ ++({ \ ++ type __res; \ ++ HYPERCALL_ARG(a1, 1); \ ++ HYPERCALL_ARG(a2, 2); \ ++ HYPERCALL_ARG(a3, 3); \ ++ HYPERCALL_ARG(a4, 4); \ ++ asm volatile ( \ ++ "call " HYPERCALL_ASM_OPERAND "5" \ ++ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ ++ "+r" (__arg3), "+r" (__arg4) \ ++ : HYPERCALL_C_OPERAND(name) \ ++ : "memory" ); \ ++ __res; \ ++}) + -+ msg.data &= ~MSI_DATA_VECTOR_MASK; -+ msg.data |= MSI_DATA_VECTOR(cfg->vector); -+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; -+ msg.address_lo |= MSI_ADDR_DEST_ID(dest); ++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ++({ \ ++ type __res; \ ++ HYPERCALL_ARG(a1, 1); \ ++ HYPERCALL_ARG(a2, 2); \ ++ HYPERCALL_ARG(a3, 3); \ ++ HYPERCALL_ARG(a4, 4); \ ++ HYPERCALL_ARG(a5, 5); \ ++ asm volatile ( \ ++ "call " HYPERCALL_ASM_OPERAND "6" \ ++ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ ++ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ ++ : HYPERCALL_C_OPERAND(name) \ ++ : "memory" ); \ ++ __res; \ ++}) + -+ dmar_msi_write(irq, &msg); -+ irq_desc[irq].affinity = mask; -+} -+#endif /* CONFIG_SMP */ ++#define _hypercall(type, op, a1, a2, a3, a4, a5) \ ++({ \ ++ type __res; \ ++ HYPERCALL_ARG(a1, 1); \ ++ HYPERCALL_ARG(a2, 2); \ ++ HYPERCALL_ARG(a3, 3); \ ++ HYPERCALL_ARG(a4, 4); \ ++ HYPERCALL_ARG(a5, 5); \ ++ asm volatile ( \ ++ "call *%6" \ ++ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ ++ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ ++ : "g" (HYPERCALL_LOCATION(op)) \ ++ : "memory" ); \ ++ __res; \ ++}) + -+struct irq_chip dmar_msi_type = { -+ .name = "DMAR_MSI", -+ .unmask = dmar_msi_unmask, -+ .mask = dmar_msi_mask, -+ .ack = ack_apic_edge, -+#ifdef CONFIG_SMP -+ .set_affinity = dmar_msi_set_affinity, ++#ifdef CONFIG_X86_32 ++# include "hypercall_32.h" ++#else ++# include "hypercall_64.h" +#endif -+ .retrigger = ioapic_retrigger_irq, -+}; + -+int arch_setup_dmar_msi(unsigned int irq) ++static inline int __must_check ++HYPERVISOR_set_trap_table( ++ const trap_info_t *table) +{ -+ int ret; -+ struct msi_msg msg; ++ return _hypercall1(int, set_trap_table, table); ++} + -+ ret = msi_compose_msg(NULL, irq, &msg); -+ if (ret < 0) -+ return ret; -+ dmar_msi_write(irq, &msg); -+ set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, -+ "edge"); -+ return 0; ++static inline int __must_check ++HYPERVISOR_mmu_update( ++ mmu_update_t *req, unsigned int count, unsigned int *success_count, ++ domid_t domid) ++{ ++ if (arch_use_lazy_mmu_mode()) ++ return xen_multi_mmu_update(req, count, success_count, domid); ++ return _hypercall4(int, mmu_update, req, count, success_count, domid); +} -+#endif - -+#endif /* CONFIG_PCI_MSI */ - /* - * Hypertransport interrupt support - */ -@@ -2177,8 +2245,27 @@ int io_apic_set_pci_routing (int ioapic, - return 0; - } - --#endif /* CONFIG_ACPI */ - -+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) ++ ++static inline int __must_check ++HYPERVISOR_mmuext_op( ++ struct mmuext_op *op, unsigned int count, unsigned int *success_count, ++ domid_t domid) +{ -+ int i; ++ if (arch_use_lazy_mmu_mode()) ++ return xen_multi_mmuext_op(op, count, success_count, domid); ++ return _hypercall4(int, mmuext_op, op, count, success_count, domid); ++} + -+ if (skip_ioapic_setup) -+ return -1; ++static inline int __must_check ++HYPERVISOR_set_gdt( ++ unsigned long *frame_list, unsigned int entries) ++{ ++ return _hypercall2(int, set_gdt, frame_list, entries); ++} + -+ for (i = 0; i < mp_irq_entries; i++) -+ if (mp_irqs[i].mpc_irqtype == mp_INT && -+ mp_irqs[i].mpc_srcbusirq == bus_irq) -+ break; -+ if (i >= mp_irq_entries) -+ return -1; ++static inline int __must_check ++HYPERVISOR_stack_switch( ++ unsigned long ss, unsigned long esp) ++{ ++ return _hypercall2(int, stack_switch, ss, esp); ++} + -+ *trigger = irq_trigger(i); -+ *polarity = irq_polarity(i); -+ return 0; ++static inline int ++HYPERVISOR_fpu_taskswitch( ++ int set) ++{ ++ return _hypercall1(int, fpu_taskswitch, set); +} + -+#endif /* CONFIG_ACPI */ - - #ifndef CONFIG_XEN - /* -@@ -2217,3 +2304,4 @@ void __init setup_ioapic_dest(void) - } - #endif - #endif /* !CONFIG_XEN */ ++#if CONFIG_XEN_COMPAT <= 0x030002 ++static inline int __must_check ++HYPERVISOR_sched_op_compat( ++ int cmd, unsigned long arg) ++{ ++ return _hypercall2(int, sched_op_compat, cmd, arg); ++} ++#endif + ---- head-2010-05-25.orig/arch/x86/kernel/ioport_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ioport_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/i386/kernel/ioport.c -- * - * This contains the io-permission bitmap code - written by obz, with changes - * by Linus. - */ ---- head-2010-05-25.orig/arch/x86/kernel/ioport_64-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ioport_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/x86_64/kernel/ioport.c -- * - * This contains the io-permission bitmap code - written by obz, with changes - * by Linus. - */ ---- head-2010-05-25.orig/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/i386/kernel/ldt.c -- * - * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds - * Copyright (C) 1999 Ingo Molnar - */ -@@ -106,14 +104,14 @@ int init_new_context(struct task_struct - struct mm_struct * old_mm; - int retval = 0; - -- init_MUTEX(&mm->context.sem); -+ mutex_init(&mm->context.lock); - mm->context.size = 0; - mm->context.has_foreign_mappings = 0; - old_mm = current->mm; - if (old_mm && old_mm->context.size > 0) { -- down(&old_mm->context.sem); -+ mutex_lock(&old_mm->context.lock); - retval = copy_ldt(&mm->context, &old_mm->context); -- up(&old_mm->context.sem); -+ mutex_unlock(&old_mm->context.lock); - } - return retval; - } -@@ -149,7 +147,7 @@ static int read_ldt(void __user * ptr, u - if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) - bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - -- down(&mm->context.sem); -+ mutex_lock(&mm->context.lock); - size = mm->context.size*LDT_ENTRY_SIZE; - if (size > bytecount) - size = bytecount; -@@ -157,7 +155,7 @@ static int read_ldt(void __user * ptr, u - err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; -- up(&mm->context.sem); -+ mutex_unlock(&mm->context.lock); - if (err < 0) - goto error_return; - if (size != bytecount) { -@@ -213,7 +211,7 @@ static int write_ldt(void __user * ptr, - goto out; - } - -- down(&mm->context.sem); -+ mutex_lock(&mm->context.lock); - if (ldt_info.entry_number >= mm->context.size) { - error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); - if (error < 0) -@@ -240,7 +238,7 @@ install: - entry_1, entry_2); - - out_unlock: -- up(&mm->context.sem); -+ mutex_unlock(&mm->context.lock); - out: - return error; - } ---- head-2010-05-25.orig/arch/x86/kernel/ldt_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/x86_64/kernel/ldt.c -- * - * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds - * Copyright (C) 1999 Ingo Molnar - * Copyright (C) 2002 Andi Kleen -@@ -112,19 +110,14 @@ int init_new_context(struct task_struct - int retval = 0; - - memset(&mm->context, 0, sizeof(mm->context)); -- init_MUTEX(&mm->context.sem); -+ mutex_init(&mm->context.lock); - old_mm = current->mm; - if (old_mm) - mm->context.vdso = old_mm->context.vdso; - if (old_mm && old_mm->context.size > 0) { -- down(&old_mm->context.sem); -+ mutex_lock(&old_mm->context.lock); - retval = copy_ldt(&mm->context, &old_mm->context); -- up(&old_mm->context.sem); -- } -- if (retval == 0) { -- spin_lock(&mm_unpinned_lock); -- list_add(&mm->context.unpinned, &mm_unpinned); -- spin_unlock(&mm_unpinned_lock); -+ mutex_unlock(&old_mm->context.lock); - } - return retval; - } -@@ -148,11 +141,6 @@ void destroy_context(struct mm_struct *m - kfree(mm->context.ldt); - mm->context.size = 0; - } -- if (!PagePinned(virt_to_page(mm->pgd))) { -- spin_lock(&mm_unpinned_lock); -- list_del(&mm->context.unpinned); -- spin_unlock(&mm_unpinned_lock); -- } - } - - static int read_ldt(void __user * ptr, unsigned long bytecount) -@@ -166,7 +154,7 @@ static int read_ldt(void __user * ptr, u - if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) - bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - -- down(&mm->context.sem); -+ mutex_lock(&mm->context.lock); - size = mm->context.size*LDT_ENTRY_SIZE; - if (size > bytecount) - size = bytecount; -@@ -174,7 +162,7 @@ static int read_ldt(void __user * ptr, u - err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; -- up(&mm->context.sem); -+ mutex_unlock(&mm->context.lock); - if (err < 0) - goto error_return; - if (size != bytecount) { -@@ -227,7 +215,7 @@ static int write_ldt(void __user * ptr, - goto out; - } - -- down(&mm->context.sem); -+ mutex_lock(&mm->context.lock); - if (ldt_info.entry_number >= (unsigned)mm->context.size) { - error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); - if (error < 0) -@@ -256,7 +244,7 @@ install: - error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32))); - - out_unlock: -- up(&mm->context.sem); -+ mutex_unlock(&mm->context.lock); - out: - return error; - } ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1023,7 +1023,7 @@ void __init mp_config_acpi_legacy_irqs ( - - /* - * Use the default configuration for the IRQs 0-15. Unless -- * overriden by (MADT) interrupt source override entries. -+ * overridden by (MADT) interrupt source override entries. - */ - for (i = 0; i < 16; i++) { - int idx; ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0; - - /* Processor that is doing the boot up */ - unsigned int boot_cpu_id = -1U; -+EXPORT_SYMBOL(boot_cpu_id); ++static inline int __must_check ++HYPERVISOR_sched_op( ++ int cmd, void *arg) ++{ ++ return _hypercall2(int, sched_op, cmd, arg); ++} + - /* Internal processor count */ - unsigned int num_processors __cpuinitdata = 0; - -@@ -87,7 +89,7 @@ static int __init mpf_checksum(unsigned - } - - #ifndef CONFIG_XEN --static void __cpuinit MP_processor_info (struct mpc_config_processor *m) -+static void __cpuinit MP_processor_info(struct mpc_config_processor *m) - { - int cpu; - cpumask_t tmp_map; -@@ -124,13 +126,24 @@ static void __cpuinit MP_processor_info - cpu = 0; - } - bios_cpu_apicid[cpu] = m->mpc_apicid; -- x86_cpu_to_apicid[cpu] = m->mpc_apicid; -+ /* -+ * We get called early in the the start_kernel initialization -+ * process when the per_cpu data area is not yet setup, so we -+ * use a static array that is removed after the per_cpu data -+ * area is created. -+ */ -+ if (x86_cpu_to_apicid_ptr) { -+ u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr; -+ x86_cpu_to_apicid[cpu] = m->mpc_apicid; -+ } else { -+ per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; -+ } - - cpu_set(cpu, cpu_possible_map); - cpu_set(cpu, cpu_present_map); - } - #else --static void __cpuinit MP_processor_info (struct mpc_config_processor *m) -+static void __cpuinit MP_processor_info(struct mpc_config_processor *m) - { - num_processors++; - } ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -13,14 +13,13 @@ - #include - #include - #include --#include - #include - #include - #include - #include - #include --#include --#include -+#include -+#include - #include - - #ifdef __x86_64__ -@@ -106,27 +105,29 @@ int range_straddles_page_boundary(paddr_ - } - - int --dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, -+dma_map_sg(struct device *hwdev, struct scatterlist *sgl, int nents, - enum dma_data_direction direction) - { - int i, rc; - - BUG_ON(!valid_dma_direction(direction)); -- WARN_ON(nents == 0 || sg[0].length == 0); -+ WARN_ON(nents == 0 || sgl->length == 0); - - if (swiotlb) { -- rc = swiotlb_map_sg(hwdev, sg, nents, direction); -+ rc = swiotlb_map_sg(hwdev, sgl, nents, direction); - } else { -- for (i = 0; i < nents; i++ ) { -- BUG_ON(!sg[i].page); -- sg[i].dma_address = -- gnttab_dma_map_page(sg[i].page) + sg[i].offset; -- sg[i].dma_length = sg[i].length; -+ struct scatterlist *sg; ++static inline int __must_check ++HYPERVISOR_platform_op( ++ struct xen_platform_op *platform_op) ++{ ++ platform_op->interface_version = XENPF_INTERFACE_VERSION; ++ return _hypercall1(int, platform_op, platform_op); ++} + -+ for_each_sg(sgl, sg, nents, i) { -+ BUG_ON(!sg_page(sg)); -+ sg->dma_address = -+ gnttab_dma_map_page(sg_page(sg)) + sg->offset; -+ sg->dma_length = sg->length; - IOMMU_BUG_ON(address_needs_mapping( -- hwdev, sg[i].dma_address)); -+ hwdev, sg->dma_address)); - IOMMU_BUG_ON(range_straddles_page_boundary( -- page_to_pseudophys(sg[i].page) + sg[i].offset, -- sg[i].length)); -+ page_to_pseudophys(sg_page(sg)) + sg->offset, -+ sg->length)); - } - rc = nents; - } -@@ -137,17 +138,19 @@ dma_map_sg(struct device *hwdev, struct - EXPORT_SYMBOL(dma_map_sg); - - void --dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, -+dma_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nents, - enum dma_data_direction direction) - { - int i; - - BUG_ON(!valid_dma_direction(direction)); - if (swiotlb) -- swiotlb_unmap_sg(hwdev, sg, nents, direction); -+ swiotlb_unmap_sg(hwdev, sgl, nents, direction); - else { -- for (i = 0; i < nents; i++ ) -- gnttab_dma_unmap_page(sg[i].dma_address); -+ struct scatterlist *sg; ++struct xen_mc; ++static inline int __must_check ++HYPERVISOR_mca( ++ struct xen_mc *mc_op) ++{ ++ mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; ++ return _hypercall1(int, mca, mc_op); ++} + -+ for_each_sg(sgl, sg, nents, i) -+ gnttab_dma_unmap_page(sg->dma_address); - } - } - EXPORT_SYMBOL(dma_unmap_sg); -@@ -258,7 +261,8 @@ void dma_free_coherent(struct device *de - { - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; - int order = get_order(size); -- ++static inline int __must_check ++HYPERVISOR_set_debugreg( ++ unsigned int reg, unsigned long value) ++{ ++ return _hypercall2(int, set_debugreg, reg, value); ++} + -+ WARN_ON(irqs_disabled()); /* for portability */ - if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; - ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/i386/kernel/process.c -- * - * Copyright (C) 1995 Linus Torvalds - * - * Pentium III FXSR, SSE support -@@ -190,6 +188,10 @@ void cpu_idle(void) - } - } - -+static void do_nothing(void *unused) ++static inline unsigned long __must_check ++HYPERVISOR_get_debugreg( ++ unsigned int reg) +{ ++ return _hypercall1(unsigned long, get_debugreg, reg); +} + - void cpu_idle_wait(void) - { - unsigned int cpu, this_cpu = get_cpu(); -@@ -214,13 +216,20 @@ void cpu_idle_wait(void) - cpu_clear(cpu, map); - } - cpus_and(map, map, cpu_online_map); -+ /* -+ * We waited 1 sec, if a CPU still did not call idle -+ * it may be because it is in idle and not waking up -+ * because it has nothing to do. -+ * Give all the remaining CPUS a kick. -+ */ -+ smp_call_function_mask(map, do_nothing, 0, 0); - } while (!cpus_empty(map)); - - set_cpus_allowed(current, tmp); - } - EXPORT_SYMBOL_GPL(cpu_idle_wait); - --void __devinit select_idle_routine(const struct cpuinfo_x86 *c) -+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) - { - } - -@@ -238,34 +247,52 @@ static int __init idle_setup(char *str) - } - early_param("idle", idle_setup); - --void show_regs(struct pt_regs * regs) -+void __show_registers(struct pt_regs *regs, int all) - { - unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; - unsigned long d0, d1, d2, d3, d6, d7; -+ unsigned long esp; -+ unsigned short ss, gs; ++static inline int __must_check ++HYPERVISOR_memory_op( ++ unsigned int cmd, void *arg) ++{ ++ if (arch_use_lazy_mmu_mode()) ++ xen_multicall_flush(false); ++ return _hypercall2(int, memory_op, cmd, arg); ++} + -+ if (user_mode_vm(regs)) { -+ esp = regs->esp; -+ ss = regs->xss & 0xffff; -+ savesegment(gs, gs); -+ } else { -+ esp = (unsigned long) (®s->esp); -+ savesegment(ss, ss); -+ savesegment(gs, gs); -+ } - - printk("\n"); -- printk("Pid: %d, comm: %20s\n", current->pid, current->comm); -- printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); -+ printk("Pid: %d, comm: %s %s (%s %.*s)\n", -+ task_pid_nr(current), current->comm, -+ print_tainted(), init_utsname()->release, -+ (int)strcspn(init_utsname()->version, " "), -+ init_utsname()->version); ++static inline int __must_check ++HYPERVISOR_multicall( ++ multicall_entry_t *call_list, unsigned int nr_calls) ++{ ++ return _hypercall2(int, multicall, call_list, nr_calls); ++} + -+ printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", -+ 0xffff & regs->xcs, regs->eip, regs->eflags, -+ smp_processor_id()); - print_symbol("EIP is at %s\n", regs->eip); - -- if (user_mode_vm(regs)) -- printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); -- printk(" EFLAGS: %08lx %s (%s %.*s)\n", -- regs->eflags, print_tainted(), init_utsname()->release, -- (int)strcspn(init_utsname()->version, " "), -- init_utsname()->version); - printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", -- regs->eax,regs->ebx,regs->ecx,regs->edx); -- printk("ESI: %08lx EDI: %08lx EBP: %08lx", -- regs->esi, regs->edi, regs->ebp); -- printk(" DS: %04x ES: %04x FS: %04x\n", -- 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); -+ regs->eax, regs->ebx, regs->ecx, regs->edx); -+ printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", -+ regs->esi, regs->edi, regs->ebp, esp); -+ printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", -+ regs->xds & 0xffff, regs->xes & 0xffff, -+ regs->xfs & 0xffff, gs, ss); ++static inline int __must_check ++HYPERVISOR_event_channel_op( ++ int cmd, void *arg) ++{ ++ int rc = _hypercall2(int, event_channel_op, cmd, arg); + -+ if (!all) -+ return; - - cr0 = read_cr0(); - cr2 = read_cr2(); - cr3 = read_cr3(); - cr4 = read_cr4_safe(); -- printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); -+ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", -+ cr0, cr2, cr3, cr4); - - get_debugreg(d0, 0); - get_debugreg(d1, 1); -@@ -273,10 +300,16 @@ void show_regs(struct pt_regs * regs) - get_debugreg(d3, 3); - printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", - d0, d1, d2, d3); ++#if CONFIG_XEN_COMPAT <= 0x030002 ++ if (unlikely(rc == -ENOSYS)) { ++ struct evtchn_op op; ++ op.cmd = cmd; ++ memcpy(&op.u, arg, sizeof(op.u)); ++ rc = _hypercall1(int, event_channel_op_compat, &op); ++ memcpy(arg, &op.u, sizeof(op.u)); ++ } ++#endif + - get_debugreg(d6, 6); - get_debugreg(d7, 7); -- printk("DR6: %08lx DR7: %08lx\n", d6, d7); -+ printk("DR6: %08lx DR7: %08lx\n", -+ d6, d7); ++ return rc; +} - -+void show_regs(struct pt_regs *regs) ++ ++static inline int __must_check ++HYPERVISOR_xen_version( ++ int cmd, void *arg) +{ -+ __show_registers(regs, 1); - show_trace(NULL, regs, ®s->esp); - } - ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/x86-64/kernel/process.c -- * - * Copyright (C) 1995 Linus Torvalds - * - * Pentium III FXSR, SSE support -@@ -41,6 +39,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -172,6 +171,9 @@ void cpu_idle (void) - - if (__get_cpu_var(cpu_idle_state)) - __get_cpu_var(cpu_idle_state) = 0; -+ -+ tick_nohz_stop_sched_tick(); ++ return _hypercall2(int, xen_version, cmd, arg); ++} + - rmb(); - idle = xen_idle; /* no alternatives */ - if (cpu_is_offline(smp_processor_id())) -@@ -190,12 +192,17 @@ void cpu_idle (void) - __exit_idle(); - } - -+ tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } - } - -+static void do_nothing(void *unused) ++static inline int __must_check ++HYPERVISOR_console_io( ++ int cmd, unsigned int count, char *str) +{ ++ return _hypercall3(int, console_io, cmd, count, str); +} + - void cpu_idle_wait(void) - { - unsigned int cpu, this_cpu = get_cpu(); -@@ -221,6 +228,13 @@ void cpu_idle_wait(void) - cpu_clear(cpu, map); - } - cpus_and(map, map, cpu_online_map); -+ /* -+ * We waited 1 sec, if a CPU still did not call idle -+ * it may be because it is in idle and not waking up -+ * because it has nothing to do. -+ * Give all the remaining CPUS a kick. -+ */ -+ smp_call_function_mask(map, do_nothing, 0, 0); - } while (!cpus_empty(map)); - - set_cpus_allowed(current, tmp); -@@ -528,7 +542,7 @@ static inline void __switch_to_xtra(stru - * - * Kprobes not supported here. Set the probe on schedule instead. - */ --__kprobes struct task_struct * -+struct task_struct * - __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - { - struct thread_struct *prev = &prev_p->thread, ---- head-2010-05-25.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/quirks-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -41,7 +41,353 @@ static void __devinit quirk_intel_irqbal - if (!(config & 0x2)) - pci_write_config_byte(dev, 0xf4, config); - } --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); --DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, -+ quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, -+ quirk_intel_irqbalance); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, -+ quirk_intel_irqbalance); -+#endif -+ -+#if defined(CONFIG_HPET_TIMER) -+#include -+ -+unsigned long force_hpet_address; -+ -+static enum { -+ NONE_FORCE_HPET_RESUME, -+ OLD_ICH_FORCE_HPET_RESUME, -+ ICH_FORCE_HPET_RESUME, -+ VT8237_FORCE_HPET_RESUME, -+ NVIDIA_FORCE_HPET_RESUME, -+} force_hpet_resume_type; -+ -+static void __iomem *rcba_base; -+ -+static void ich_force_hpet_resume(void) ++static inline int __must_check ++HYPERVISOR_physdev_op( ++ int cmd, void *arg) +{ -+ u32 val; -+ -+ if (!force_hpet_address) -+ return; -+ -+ if (rcba_base == NULL) -+ BUG(); ++ int rc = _hypercall2(int, physdev_op, cmd, arg); + -+ /* read the Function Disable register, dword mode only */ -+ val = readl(rcba_base + 0x3404); -+ if (!(val & 0x80)) { -+ /* HPET disabled in HPTC. Trying to enable */ -+ writel(val | 0x80, rcba_base + 0x3404); ++#if CONFIG_XEN_COMPAT <= 0x030002 ++ if (unlikely(rc == -ENOSYS)) { ++ struct physdev_op op; ++ op.cmd = cmd; ++ memcpy(&op.u, arg, sizeof(op.u)); ++ rc = _hypercall1(int, physdev_op_compat, &op); ++ memcpy(arg, &op.u, sizeof(op.u)); + } ++#endif + -+ val = readl(rcba_base + 0x3404); -+ if (!(val & 0x80)) -+ BUG(); -+ else -+ printk(KERN_DEBUG "Force enabled HPET at resume\n"); -+ -+ return; ++ return rc; +} + -+static void ich_force_enable_hpet(struct pci_dev *dev) ++static inline int __must_check ++HYPERVISOR_grant_table_op( ++ unsigned int cmd, void *uop, unsigned int count) +{ -+ u32 val; -+ u32 uninitialized_var(rcba); -+ int err = 0; -+ -+ if (hpet_address || force_hpet_address) -+ return; -+ -+ pci_read_config_dword(dev, 0xF0, &rcba); -+ rcba &= 0xFFFFC000; -+ if (rcba == 0) { -+ printk(KERN_DEBUG "RCBA disabled. Cannot force enable HPET\n"); -+ return; -+ } -+ -+ /* use bits 31:14, 16 kB aligned */ -+ rcba_base = ioremap_nocache(rcba, 0x4000); -+ if (rcba_base == NULL) { -+ printk(KERN_DEBUG "ioremap failed. Cannot force enable HPET\n"); -+ return; -+ } -+ -+ /* read the Function Disable register, dword mode only */ -+ val = readl(rcba_base + 0x3404); -+ -+ if (val & 0x80) { -+ /* HPET is enabled in HPTC. Just not reported by BIOS */ -+ val = val & 0x3; -+ force_hpet_address = 0xFED00000 | (val << 12); -+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -+ force_hpet_address); -+ iounmap(rcba_base); -+ return; -+ } -+ -+ /* HPET disabled in HPTC. Trying to enable */ -+ writel(val | 0x80, rcba_base + 0x3404); -+ -+ val = readl(rcba_base + 0x3404); -+ if (!(val & 0x80)) { -+ err = 1; -+ } else { -+ val = val & 0x3; -+ force_hpet_address = 0xFED00000 | (val << 12); -+ } -+ -+ if (err) { -+ force_hpet_address = 0; -+ iounmap(rcba_base); -+ printk(KERN_DEBUG "Failed to force enable HPET\n"); -+ } else { -+ force_hpet_resume_type = ICH_FORCE_HPET_RESUME; -+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -+ force_hpet_address); -+ } ++ if (arch_use_lazy_mmu_mode()) ++ xen_multicall_flush(false); ++ return _hypercall3(int, grant_table_op, cmd, uop, count); +} + -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, -+ ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, -+ ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, -+ ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, -+ ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, -+ ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, -+ ich_force_enable_hpet); -+ -+ -+static struct pci_dev *cached_dev; -+ -+static void old_ich_force_hpet_resume(void) ++static inline int __must_check ++HYPERVISOR_vm_assist( ++ unsigned int cmd, unsigned int type) +{ -+ u32 val; -+ u32 uninitialized_var(gen_cntl); -+ -+ if (!force_hpet_address || !cached_dev) -+ return; -+ -+ pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); -+ gen_cntl &= (~(0x7 << 15)); -+ gen_cntl |= (0x4 << 15); -+ -+ pci_write_config_dword(cached_dev, 0xD0, gen_cntl); -+ pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); -+ val = gen_cntl >> 15; -+ val &= 0x7; -+ if (val == 0x4) -+ printk(KERN_DEBUG "Force enabled HPET at resume\n"); -+ else -+ BUG(); ++ return _hypercall2(int, vm_assist, cmd, type); +} + -+static void old_ich_force_enable_hpet(struct pci_dev *dev) ++static inline int __must_check ++HYPERVISOR_vcpu_op( ++ int cmd, unsigned int vcpuid, void *extra_args) +{ -+ u32 val; -+ u32 uninitialized_var(gen_cntl); ++ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); ++} + -+ if (hpet_address || force_hpet_address) -+ return; ++static inline int __must_check ++HYPERVISOR_suspend( ++ unsigned long srec) ++{ ++ struct sched_shutdown sched_shutdown = { ++ .reason = SHUTDOWN_suspend ++ }; + -+ pci_read_config_dword(dev, 0xD0, &gen_cntl); -+ /* -+ * Bit 17 is HPET enable bit. -+ * Bit 16:15 control the HPET base address. -+ */ -+ val = gen_cntl >> 15; -+ val &= 0x7; -+ if (val & 0x4) { -+ val &= 0x3; -+ force_hpet_address = 0xFED00000 | (val << 12); -+ printk(KERN_DEBUG "HPET at base address 0x%lx\n", -+ force_hpet_address); -+ return; -+ } ++ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, ++ &sched_shutdown, srec); + -+ /* -+ * HPET is disabled. Trying enabling at FED00000 and check -+ * whether it sticks -+ */ -+ gen_cntl &= (~(0x7 << 15)); -+ gen_cntl |= (0x4 << 15); -+ pci_write_config_dword(dev, 0xD0, gen_cntl); -+ -+ pci_read_config_dword(dev, 0xD0, &gen_cntl); -+ -+ val = gen_cntl >> 15; -+ val &= 0x7; -+ if (val & 0x4) { -+ /* HPET is enabled in HPTC. Just not reported by BIOS */ -+ val &= 0x3; -+ force_hpet_address = 0xFED00000 | (val << 12); -+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -+ force_hpet_address); -+ cached_dev = dev; -+ force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; -+ return; -+ } ++#if CONFIG_XEN_COMPAT <= 0x030002 ++ if (rc == -ENOSYS) ++ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, ++ SHUTDOWN_suspend, srec); ++#endif + -+ printk(KERN_DEBUG "Failed to force enable HPET\n"); ++ return rc; +} + -+/* -+ * Undocumented chipset features. Make sure that the user enforced -+ * this. -+ */ -+static void old_ich_force_enable_hpet_user(struct pci_dev *dev) ++#if CONFIG_XEN_COMPAT <= 0x030002 ++static inline int ++HYPERVISOR_nmi_op( ++ unsigned long op, void *arg) +{ -+ if (hpet_force_user) -+ old_ich_force_enable_hpet(dev); ++ return _hypercall2(int, nmi_op, op, arg); +} ++#endif + -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, -+ old_ich_force_enable_hpet_user); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, -+ old_ich_force_enable_hpet_user); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, -+ old_ich_force_enable_hpet_user); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, -+ old_ich_force_enable_hpet_user); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, -+ old_ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, -+ old_ich_force_enable_hpet); -+ -+ -+static void vt8237_force_hpet_resume(void) ++#ifndef CONFIG_XEN ++static inline unsigned long __must_check ++HYPERVISOR_hvm_op( ++ int op, void *arg) +{ -+ u32 val; -+ -+ if (!force_hpet_address || !cached_dev) -+ return; -+ -+ val = 0xfed00000 | 0x80; -+ pci_write_config_dword(cached_dev, 0x68, val); -+ -+ pci_read_config_dword(cached_dev, 0x68, &val); -+ if (val & 0x80) -+ printk(KERN_DEBUG "Force enabled HPET at resume\n"); -+ else -+ BUG(); ++ return _hypercall2(unsigned long, hvm_op, op, arg); +} ++#endif + -+static void vt8237_force_enable_hpet(struct pci_dev *dev) ++static inline int __must_check ++HYPERVISOR_callback_op( ++ int cmd, const void *arg) +{ -+ u32 uninitialized_var(val); -+ -+ if (!hpet_force_user || hpet_address || force_hpet_address) -+ return; -+ -+ pci_read_config_dword(dev, 0x68, &val); -+ /* -+ * Bit 7 is HPET enable bit. -+ * Bit 31:10 is HPET base address (contrary to what datasheet claims) -+ */ -+ if (val & 0x80) { -+ force_hpet_address = (val & ~0x3ff); -+ printk(KERN_DEBUG "HPET at base address 0x%lx\n", -+ force_hpet_address); -+ return; -+ } -+ -+ /* -+ * HPET is disabled. Trying enabling at FED00000 and check -+ * whether it sticks -+ */ -+ val = 0xfed00000 | 0x80; -+ pci_write_config_dword(dev, 0x68, val); -+ -+ pci_read_config_dword(dev, 0x68, &val); -+ if (val & 0x80) { -+ force_hpet_address = (val & ~0x3ff); -+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -+ force_hpet_address); -+ cached_dev = dev; -+ force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; -+ return; -+ } -+ -+ printk(KERN_DEBUG "Failed to force enable HPET\n"); ++ return _hypercall2(int, callback_op, cmd, arg); +} + -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, -+ vt8237_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, -+ vt8237_force_enable_hpet); -+ -+/* -+ * Undocumented chipset feature taken from LinuxBIOS. -+ */ -+static void nvidia_force_hpet_resume(void) ++static inline int __must_check ++HYPERVISOR_xenoprof_op( ++ int op, void *arg) +{ -+ pci_write_config_dword(cached_dev, 0x44, 0xfed00001); -+ printk(KERN_DEBUG "Force enabled HPET at resume\n"); ++ return _hypercall2(int, xenoprof_op, op, arg); +} + -+static void nvidia_force_enable_hpet(struct pci_dev *dev) ++static inline int __must_check ++HYPERVISOR_kexec_op( ++ unsigned long op, void *args) +{ -+ u32 uninitialized_var(val); -+ -+ if (!hpet_force_user || hpet_address || force_hpet_address) -+ return; -+ -+ pci_write_config_dword(dev, 0x44, 0xfed00001); -+ pci_read_config_dword(dev, 0x44, &val); -+ force_hpet_address = val & 0xfffffffe; -+ force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; -+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -+ force_hpet_address); -+ cached_dev = dev; -+ return; ++ return _hypercall2(int, kexec_op, op, args); +} + -+/* ISA Bridges */ -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, -+ nvidia_force_enable_hpet); -+ -+/* LPC bridges */ -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366, -+ nvidia_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, -+ nvidia_force_enable_hpet); -+ -+void force_hpet_resume(void) ++static inline int __must_check ++HYPERVISOR_tmem_op( ++ struct tmem_op *op) +{ -+ switch (force_hpet_resume_type) { -+ case ICH_FORCE_HPET_RESUME: -+ return ich_force_hpet_resume(); -+ -+ case OLD_ICH_FORCE_HPET_RESUME: -+ return old_ich_force_hpet_resume(); -+ -+ case VT8237_FORCE_HPET_RESUME: -+ return vt8237_force_hpet_resume(); -+ -+ case NVIDIA_FORCE_HPET_RESUME: -+ return nvidia_force_hpet_resume(); -+ -+ default: -+ break; -+ } ++ return _hypercall1(int, tmem_op, op); +} + - #endif ---- head-2010-05-25.orig/arch/x86/kernel/setup64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -15,7 +15,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -27,11 +26,12 @@ - #include - #include - #include -+#include - #ifdef CONFIG_XEN - #include - #endif - --char x86_boot_params[BOOT_PARAM_SIZE] __initdata; -+struct boot_params __initdata boot_params; - - cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; - -@@ -159,8 +159,8 @@ static void switch_pt(void) - - static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr) - { -- asm volatile("lgdt %0" :: "m" (*gdt_descr)); -- asm volatile("lidt %0" :: "m" (idt_descr)); -+ load_gdt(gdt_descr); -+ load_idt(idt_descr); - } - #endif - -@@ -252,6 +252,14 @@ void __cpuinit check_efer(void) - - unsigned long kernel_eflags; - -+#ifndef CONFIG_X86_NO_TSS -+/* -+ * Copies of the original ist values from the tss are only accessed during -+ * debugging, no special alignment required. -+ */ -+DEFINE_PER_CPU(struct orig_ist, orig_ist); -+#endif -+ - /* - * cpu_init() initializes state that is per-CPU. Some data is already - * initialized (naturally) in the bootstrap process, such as the GDT ---- head-2010-05-25.orig/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/i386/kernel/setup.c -- * - * Copyright (C) 1995 Linus Torvalds - * - * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 -@@ -70,6 +68,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_XEN - #include -@@ -80,13 +79,14 @@ static struct notifier_block xen_panic_b - xen_panic_event, NULL, 0 /* try to go last */ - }; - --int disable_pse __devinitdata = 0; -+int disable_pse __cpuinitdata = 0; - - /* - * Machine setup.. - */ - extern struct resource code_resource; - extern struct resource data_resource; -+extern struct resource bss_resource; - - /* cpu data as detected by the assembly code in head.S */ - struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; -@@ -98,9 +98,6 @@ unsigned long mmu_cr4_features; - - /* for MCA, but anyone else can use it if they want */ - unsigned int machine_id; --#ifdef CONFIG_MCA --EXPORT_SYMBOL(machine_id); ++#endif /* __HYPERCALL_H__ */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypercall_32.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypercall_32.h 2011-01-31 17:56:27.000000000 +0100 +@@ -1,191 +1,10 @@ +-/****************************************************************************** +- * hypercall.h +- * +- * Linux-specific hypervisor handling. +- * +- * Copyright (c) 2002-2004, K A Fraser +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation; or, when distributed +- * separately from the Linux kernel or incorporated into other +- * software packages, subject to the following license: +- * +- * Permission is hereby granted, free of charge, to any person obtaining a copy +- * of this source file (the "Software"), to deal in the Software without +- * restriction, including without limitation the rights to use, copy, modify, +- * merge, publish, distribute, sublicense, and/or sell copies of the Software, +- * and to permit persons to whom the Software is furnished to do so, subject to +- * the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +- * IN THE SOFTWARE. +- */ +- +-#ifndef __HYPERCALL_H__ +-#define __HYPERCALL_H__ +- +-#include /* memcpy() */ +-#include +- +-#ifndef __HYPERVISOR_H__ +-# error "please don't include this file directly" -#endif - unsigned int machine_submodel_id; - unsigned int BIOS_revision; - unsigned int mca_pentium_flag; -@@ -121,7 +118,7 @@ EXPORT_SYMBOL(apm_info); - struct edid_info edid_info; - EXPORT_SYMBOL_GPL(edid_info); - #ifndef CONFIG_XEN --#define copy_edid() (edid_info = EDID_INFO) -+#define copy_edid() (edid_info = boot_params.edid_info) - #endif - struct ist_info ist_info; - #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ -@@ -170,10 +167,11 @@ EXPORT_SYMBOL(edd); - */ - static inline void copy_edd(void) - { -- memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature)); -- memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info)); -- edd.mbr_signature_nr = EDD_MBR_SIG_NR; -- edd.edd_info_nr = EDD_NR; -+ memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, -+ sizeof(edd.mbr_signature)); -+ memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); -+ edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; -+ edd.edd_info_nr = boot_params.eddbuf_entries; - } - #endif - #else -@@ -418,6 +416,53 @@ extern unsigned long __init setup_memory - extern void zone_sizes_init(void); - #endif /* !CONFIG_NEED_MULTIPLE_NODES */ - -+static inline unsigned long long get_total_mem(void) -+{ -+ unsigned long long total; -+ -+ total = max_low_pfn - min_low_pfn; -+#ifdef CONFIG_HIGHMEM -+ total += highend_pfn - highstart_pfn; -+#endif -+ -+ return total << PAGE_SHIFT; -+} -+ -+#ifdef CONFIG_KEXEC -+#ifndef CONFIG_XEN -+static void __init reserve_crashkernel(void) -+{ -+ unsigned long long total_mem; -+ unsigned long long crash_size, crash_base; -+ int ret; -+ -+ total_mem = get_total_mem(); -+ -+ ret = parse_crashkernel(boot_command_line, total_mem, -+ &crash_size, &crash_base); -+ if (ret == 0 && crash_size > 0) { -+ if (crash_base > 0) { -+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " -+ "for crashkernel (System RAM: %ldMB)\n", -+ (unsigned long)(crash_size >> 20), -+ (unsigned long)(crash_base >> 20), -+ (unsigned long)(total_mem >> 20)); -+ crashk_res.start = crash_base; -+ crashk_res.end = crash_base + crash_size - 1; -+ reserve_bootmem(crash_base, crash_size); -+ } else -+ printk(KERN_INFO "crashkernel reservation failed - " -+ "you have to specify a base address\n"); -+ } -+} -+#else -+#define reserve_crashkernel xen_machine_kexec_setup_resources -+#endif -+#else -+static inline void __init reserve_crashkernel(void) -+{} -+#endif -+ - void __init setup_bootmem_allocator(void) - { - unsigned long bootmap_size; -@@ -473,30 +518,25 @@ void __init setup_bootmem_allocator(void - - #ifdef CONFIG_BLK_DEV_INITRD - if (xen_start_info->mod_start) { -- if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { -- /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/ -- initrd_start = INITRD_START + PAGE_OFFSET; -- initrd_end = initrd_start+INITRD_SIZE; -+ unsigned long ramdisk_image = __pa(xen_start_info->mod_start); -+ unsigned long ramdisk_size = xen_start_info->mod_len; -+ unsigned long ramdisk_end = ramdisk_image + ramdisk_size; -+ unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT; -+ -+ if (ramdisk_end <= end_of_lowmem) { -+ /*reserve_bootmem(ramdisk_image, ramdisk_size);*/ -+ initrd_start = ramdisk_image + PAGE_OFFSET; -+ initrd_end = initrd_start+ramdisk_size; - initrd_below_start_ok = 1; -- } -- else { -+ } else { - printk(KERN_ERR "initrd extends beyond end of memory " -- "(0x%08lx > 0x%08lx)\ndisabling initrd\n", -- INITRD_START + INITRD_SIZE, -- max_low_pfn << PAGE_SHIFT); -+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n", -+ ramdisk_end, end_of_lowmem); - initrd_start = 0; - } - } - #endif --#ifdef CONFIG_KEXEC +- -#ifdef CONFIG_XEN -- xen_machine_kexec_setup_resources(); +-#define HYPERCALL_STR(name) \ +- "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" -#else -- if (crashk_res.start != crashk_res.end) -- reserve_bootmem(crashk_res.start, -- crashk_res.end - crashk_res.start + 1); --#endif +-#define HYPERCALL_STR(name) \ +- "mov hypercall_stubs,%%eax; " \ +- "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ +- "call *%%eax" -#endif -+ reserve_crashkernel(); - } - - /* -@@ -574,7 +614,8 @@ void __init setup_arch(char **cmdline_p) - * the system table is valid. If not, then initialize normally. - */ - #ifdef CONFIG_EFI -- if ((LOADER_TYPE == 0x50) && EFI_SYSTAB) -+ if ((boot_params.hdr.type_of_loader == 0x50) && -+ boot_params.efi_info.efi_systab) - efi_enabled = 1; - #endif - -@@ -582,18 +623,18 @@ void __init setup_arch(char **cmdline_p) - properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd. - */ - ROOT_DEV = MKDEV(UNNAMED_MAJOR,0); -- screen_info = SCREEN_INFO; -+ screen_info = boot_params.screen_info; - copy_edid(); -- apm_info.bios = APM_BIOS_INFO; -- ist_info = IST_INFO; -- saved_videomode = VIDEO_MODE; -- if( SYS_DESC_TABLE.length != 0 ) { -- set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2); -- machine_id = SYS_DESC_TABLE.table[0]; -- machine_submodel_id = SYS_DESC_TABLE.table[1]; -- BIOS_revision = SYS_DESC_TABLE.table[2]; -+ apm_info.bios = boot_params.apm_bios_info; -+ ist_info = boot_params.ist_info; -+ saved_videomode = boot_params.hdr.vid_mode; -+ if( boot_params.sys_desc_table.length != 0 ) { -+ set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2); -+ machine_id = boot_params.sys_desc_table.table[0]; -+ machine_submodel_id = boot_params.sys_desc_table.table[1]; -+ BIOS_revision = boot_params.sys_desc_table.table[2]; - } -- bootloader_type = LOADER_TYPE; -+ bootloader_type = boot_params.hdr.type_of_loader; +- +-#define _hypercall0(type, name) \ +-({ \ +- type __res; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res) \ +- : \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall1(type, name, a1) \ +-({ \ +- type __res; \ +- long __ign1; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=b" (__ign1) \ +- : "1" ((long)(a1)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall2(type, name, a1, a2) \ +-({ \ +- type __res; \ +- long __ign1, __ign2; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ +- : "1" ((long)(a1)), "2" ((long)(a2)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall3(type, name, a1, a2, a3) \ +-({ \ +- type __res; \ +- long __ign1, __ign2, __ign3; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ +- "=d" (__ign3) \ +- : "1" ((long)(a1)), "2" ((long)(a2)), \ +- "3" ((long)(a3)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall4(type, name, a1, a2, a3, a4) \ +-({ \ +- type __res; \ +- long __ign1, __ign2, __ign3, __ign4; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ +- "=d" (__ign3), "=S" (__ign4) \ +- : "1" ((long)(a1)), "2" ((long)(a2)), \ +- "3" ((long)(a3)), "4" ((long)(a4)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ +-({ \ +- type __res; \ +- long __ign1, __ign2, __ign3, __ign4, __ign5; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ +- "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ +- : "1" ((long)(a1)), "2" ((long)(a2)), \ +- "3" ((long)(a3)), "4" ((long)(a4)), \ +- "5" ((long)(a5)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall(type, op, a1, a2, a3, a4, a5) \ +-({ \ +- type __res; \ +- register typeof((a1)+0) __arg1 asm("ebx") = (a1); \ +- register typeof((a2)+0) __arg2 asm("ecx") = (a2); \ +- register typeof((a3)+0) __arg3 asm("edx") = (a3); \ +- register typeof((a4)+0) __arg4 asm("esi") = (a4); \ +- register typeof((a5)+0) __arg5 asm("edi") = (a5); \ +- asm volatile ( \ +- "call *%6" \ +- : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ +- "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ +- : "0" (hypercall_page + (op) * 32) \ +- : "memory" ); \ +- __res; \ +-}) +- +-static inline int __must_check +-HYPERVISOR_set_trap_table( +- const trap_info_t *table) +-{ +- return _hypercall1(int, set_trap_table, table); +-} +- +-static inline int __must_check +-HYPERVISOR_mmu_update( +- mmu_update_t *req, unsigned int count, unsigned int *success_count, +- domid_t domid) +-{ +- if (arch_use_lazy_mmu_mode()) +- return xen_multi_mmu_update(req, count, success_count, domid); +- return _hypercall4(int, mmu_update, req, count, success_count, domid); +-} +- +-static inline int __must_check +-HYPERVISOR_mmuext_op( +- struct mmuext_op *op, unsigned int count, unsigned int *success_count, +- domid_t domid) +-{ +- if (arch_use_lazy_mmu_mode()) +- return xen_multi_mmuext_op(op, count, success_count, domid); +- return _hypercall4(int, mmuext_op, op, count, success_count, domid); +-} +- +-static inline int __must_check +-HYPERVISOR_set_gdt( +- unsigned long *frame_list, unsigned int entries) +-{ +- return _hypercall2(int, set_gdt, frame_list, entries); +-} +- +-static inline int __must_check +-HYPERVISOR_stack_switch( +- unsigned long ss, unsigned long esp) +-{ +- return _hypercall2(int, stack_switch, ss, esp); +-} ++#define HYPERCALL_arg1 "ebx" ++#define HYPERCALL_arg2 "ecx" ++#define HYPERCALL_arg3 "edx" ++#define HYPERCALL_arg4 "esi" ++#define HYPERCALL_arg5 "edi" - if (is_initial_xendomain()) { - const struct dom0_vga_console_info *info = -@@ -608,9 +649,9 @@ void __init setup_arch(char **cmdline_p) - screen_info.orig_video_isVGA = 0; ++#if CONFIG_XEN_COMPAT <= 0x030002 + static inline int __must_check + HYPERVISOR_set_callbacks( + unsigned long event_selector, unsigned long event_address, +@@ -195,80 +14,24 @@ HYPERVISOR_set_callbacks( + event_selector, event_address, + failsafe_selector, failsafe_address); + } +- +-static inline int +-HYPERVISOR_fpu_taskswitch( +- int set) +-{ +- return _hypercall1(int, fpu_taskswitch, set); +-} +- +-static inline int __must_check +-HYPERVISOR_sched_op_compat( +- int cmd, unsigned long arg) +-{ +- return _hypercall2(int, sched_op_compat, cmd, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_sched_op( +- int cmd, void *arg) +-{ +- return _hypercall2(int, sched_op, cmd, arg); +-} ++#endif - #ifdef CONFIG_BLK_DEV_RAM -- rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; -- rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); -- rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); -+ rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; -+ rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); -+ rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); - #endif + static inline long __must_check + HYPERVISOR_set_timer_op( + u64 timeout) + { +- unsigned long timeout_hi = (unsigned long)(timeout>>32); +- unsigned long timeout_lo = (unsigned long)timeout; +- return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); +-} +- +-static inline int __must_check +-HYPERVISOR_platform_op( +- struct xen_platform_op *platform_op) +-{ +- platform_op->interface_version = XENPF_INTERFACE_VERSION; +- return _hypercall1(int, platform_op, platform_op); +-} +- +-static inline int __must_check +-HYPERVISOR_set_debugreg( +- unsigned int reg, unsigned long value) +-{ +- return _hypercall2(int, set_debugreg, reg, value); +-} +- +-static inline unsigned long __must_check +-HYPERVISOR_get_debugreg( +- unsigned int reg) +-{ +- return _hypercall1(unsigned long, get_debugreg, reg); ++ return _hypercall2(long, set_timer_op, ++ (unsigned long)timeout, ++ (unsigned long)(timeout>>32)); + } - ARCH_SETUP -@@ -623,7 +664,7 @@ void __init setup_arch(char **cmdline_p) + static inline int __must_check + HYPERVISOR_update_descriptor( + u64 ma, u64 desc) + { +- return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); +-} +- +-static inline int __must_check +-HYPERVISOR_memory_op( +- unsigned int cmd, void *arg) +-{ +- if (arch_use_lazy_mmu_mode()) +- xen_multicall_flush(false); +- return _hypercall2(int, memory_op, cmd, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_multicall( +- multicall_entry_t *call_list, unsigned int nr_calls) +-{ +- return _hypercall2(int, multicall, call_list, nr_calls); ++ return _hypercall4(int, update_descriptor, ++ (unsigned long)ma, (unsigned long)(ma>>32), ++ (unsigned long)desc, (unsigned long)(desc>>32)); + } - copy_edd(); + static inline int __must_check +@@ -287,67 +50,6 @@ HYPERVISOR_update_va_mapping( + } -- if (!MOUNT_ROOT_RDONLY) -+ if (!boot_params.hdr.root_flags) - root_mountflags &= ~MS_RDONLY; - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; -@@ -635,6 +676,8 @@ void __init setup_arch(char **cmdline_p) - code_resource.end = virt_to_phys(_etext)-1; - data_resource.start = virt_to_phys(_etext); - data_resource.end = virt_to_phys(_edata)-1; -+ bss_resource.start = virt_to_phys(&__bss_start); -+ bss_resource.end = virt_to_phys(&__bss_stop)-1; - - if ((i = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE) - i = COMMAND_LINE_SIZE; -@@ -663,7 +706,7 @@ void __init setup_arch(char **cmdline_p) - /* - * NOTE: before this point _nobody_ is allowed to allocate - * any memory using the bootmem allocator. Although the -- * alloctor is now initialised only the first 8Mb of the kernel -+ * allocator is now initialised only the first 8Mb of the kernel - * virtual address space has been mapped. All allocations before - * paging_init() has completed must use the alloc_bootmem_low_pages() - * variant (which allocates DMA'able memory) and care must be taken -@@ -786,10 +829,8 @@ void __init setup_arch(char **cmdline_p) - acpi_boot_table_init(); - #endif - --#ifdef CONFIG_PCI --#ifdef CONFIG_X86_IO_APIC -- check_acpi_pci(); /* Checks more than just ACPI actually */ + static inline int __must_check +-HYPERVISOR_event_channel_op( +- int cmd, void *arg) +-{ +- int rc = _hypercall2(int, event_channel_op, cmd, arg); +- +-#if CONFIG_XEN_COMPAT <= 0x030002 +- if (unlikely(rc == -ENOSYS)) { +- struct evtchn_op op; +- op.cmd = cmd; +- memcpy(&op.u, arg, sizeof(op.u)); +- rc = _hypercall1(int, event_channel_op_compat, &op); +- memcpy(arg, &op.u, sizeof(op.u)); +- } -#endif -+#if defined(CONFIG_PCI) && !defined(CONFIG_XEN) -+ early_quirks(); - #endif - - #ifdef CONFIG_ACPI ---- head-2010-05-25.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,10 +1,5 @@ - /* -- * linux/arch/x86-64/kernel/setup.c -- * - * Copyright (C) 1995 Linus Torvalds -- * -- * Nov 2001 Dave Jones -- * Forked from i386 setup code. - */ - - /* -@@ -57,13 +52,13 @@ - #include - #include - #include --#include - #include - #include - #include - #include - #include - #include -+#include - #ifdef CONFIG_XEN - #include - #include -@@ -180,6 +175,12 @@ struct resource code_resource = { - .end = 0, - .flags = IORESOURCE_RAM, - }; -+struct resource bss_resource = { -+ .name = "Kernel bss", -+ .start = 0, -+ .end = 0, -+ .flags = IORESOURCE_RAM, -+}; - - #ifdef CONFIG_PROC_VMCORE - /* elfcorehdr= specifies the location of elf core header -@@ -232,10 +233,11 @@ EXPORT_SYMBOL(edd); - */ - static inline void copy_edd(void) +- +- return rc; +-} +- +-static inline int __must_check +-HYPERVISOR_xen_version( +- int cmd, void *arg) +-{ +- return _hypercall2(int, xen_version, cmd, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_console_io( +- int cmd, unsigned int count, char *str) +-{ +- return _hypercall3(int, console_io, cmd, count, str); +-} +- +-static inline int __must_check +-HYPERVISOR_physdev_op( +- int cmd, void *arg) +-{ +- int rc = _hypercall2(int, physdev_op, cmd, arg); +- +-#if CONFIG_XEN_COMPAT <= 0x030002 +- if (unlikely(rc == -ENOSYS)) { +- struct physdev_op op; +- op.cmd = cmd; +- memcpy(&op.u, arg, sizeof(op.u)); +- rc = _hypercall1(int, physdev_op_compat, &op); +- memcpy(arg, &op.u, sizeof(op.u)); +- } +-#endif +- +- return rc; +-} +- +-static inline int __must_check +-HYPERVISOR_grant_table_op( +- unsigned int cmd, void *uop, unsigned int count) +-{ +- if (arch_use_lazy_mmu_mode()) +- xen_multicall_flush(false); +- return _hypercall3(int, grant_table_op, cmd, uop, count); +-} +- +-static inline int __must_check + HYPERVISOR_update_va_mapping_otherdomain( + unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { -- memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature)); -- memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info)); -- edd.mbr_signature_nr = EDD_MBR_SIG_NR; -- edd.edd_info_nr = EDD_NR; -+ memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, -+ sizeof(edd.mbr_signature)); -+ memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); -+ edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; -+ edd.edd_info_nr = boot_params.eddbuf_entries; - } - #endif - #else -@@ -244,6 +246,41 @@ static inline void copy_edd(void) +@@ -358,86 +60,3 @@ HYPERVISOR_update_va_mapping_otherdomain + return _hypercall5(int, update_va_mapping_otherdomain, va, + new_val.pte_low, pte_hi, flags, domid); } - #endif - -+#ifdef CONFIG_KEXEC -+#ifndef CONFIG_XEN -+static void __init reserve_crashkernel(void) -+{ -+ unsigned long long free_mem; -+ unsigned long long crash_size, crash_base; -+ int ret; -+ -+ free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; -+ -+ ret = parse_crashkernel(boot_command_line, free_mem, -+ &crash_size, &crash_base); -+ if (ret == 0 && crash_size) { -+ if (crash_base > 0) { -+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " -+ "for crashkernel (System RAM: %ldMB)\n", -+ (unsigned long)(crash_size >> 20), -+ (unsigned long)(crash_base >> 20), -+ (unsigned long)(free_mem >> 20)); -+ crashk_res.start = crash_base; -+ crashk_res.end = crash_base + crash_size - 1; -+ reserve_bootmem(crash_base, crash_size); -+ } else -+ printk(KERN_INFO "crashkernel reservation failed - " -+ "you have to specify a base address\n"); -+ } -+} -+#else -+#define reserve_crashkernel xen_machine_kexec_setup_resources -+#endif -+#else -+static inline void __init reserve_crashkernel(void) -+{} -+#endif -+ - #ifndef CONFIG_XEN - #define EBDA_ADDR_POINTER 0x40E - -@@ -284,7 +321,7 @@ void __init setup_arch(char **cmdline_p) - atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); - - ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); -- screen_info = SCREEN_INFO; -+ screen_info = boot_params.screen_info; - - if (is_initial_xendomain()) { - const struct dom0_vga_console_info *info = -@@ -307,22 +344,22 @@ void __init setup_arch(char **cmdline_p) - #else - printk(KERN_INFO "Command line: %s\n", boot_command_line); - -- ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); -- screen_info = SCREEN_INFO; -- edid_info = EDID_INFO; -+ ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); -+ screen_info = boot_params.screen_info; -+ edid_info = boot_params.edid_info; - #endif /* !CONFIG_XEN */ -- saved_video_mode = SAVED_VIDEO_MODE; -- bootloader_type = LOADER_TYPE; -+ saved_video_mode = boot_params.hdr.vid_mode; -+ bootloader_type = boot_params.hdr.type_of_loader; - - #ifdef CONFIG_BLK_DEV_RAM -- rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; -- rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); -- rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); -+ rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; -+ rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); -+ rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); - #endif - setup_memory_region(); - copy_edd(); - -- if (!MOUNT_ROOT_RDONLY) -+ if (!boot_params.hdr.root_flags) - root_mountflags &= ~MS_RDONLY; - init_mm.start_code = (unsigned long) &_text; - init_mm.end_code = (unsigned long) &_etext; -@@ -333,6 +370,8 @@ void __init setup_arch(char **cmdline_p) - code_resource.end = virt_to_phys(&_etext)-1; - data_resource.start = virt_to_phys(&_etext); - data_resource.end = virt_to_phys(&_edata)-1; -+ bss_resource.start = virt_to_phys(&__bss_start); -+ bss_resource.end = virt_to_phys(&__bss_stop)-1; - - early_identify_cpu(&boot_cpu_data); - -@@ -360,6 +399,11 @@ void __init setup_arch(char **cmdline_p) - if (is_initial_xendomain()) - dmi_scan_machine(); - -+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) -+ /* setup to use the static apicid table during kernel startup */ -+ x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init; -+#endif -+ - /* How many end-of-memory variables you have, grandma! */ - max_low_pfn = end_pfn; - max_pfn = end_pfn; -@@ -424,52 +468,37 @@ void __init setup_arch(char **cmdline_p) - */ - acpi_reserve_bootmem(); - #endif --#ifdef CONFIG_XEN - #ifdef CONFIG_BLK_DEV_INITRD -+#ifdef CONFIG_XEN - if (xen_start_info->mod_start) { -- if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { -- /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/ -- initrd_start = INITRD_START + PAGE_OFFSET; -- initrd_end = initrd_start+INITRD_SIZE; -+ unsigned long ramdisk_image = __pa(xen_start_info->mod_start); -+ unsigned long ramdisk_size = xen_start_info->mod_len; -+#else -+ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { -+ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; -+ unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; -+#endif -+ unsigned long ramdisk_end = ramdisk_image + ramdisk_size; -+ unsigned long end_of_mem = end_pfn << PAGE_SHIFT; -+ -+ if (ramdisk_end <= end_of_mem) { -+#ifndef CONFIG_XEN -+ reserve_bootmem_generic(ramdisk_image, ramdisk_size); -+#endif -+ initrd_start = ramdisk_image + PAGE_OFFSET; -+ initrd_end = initrd_start+ramdisk_size; -+#ifdef CONFIG_XEN - initrd_below_start_ok = 1; -- } else { -- printk(KERN_ERR "initrd extends beyond end of memory " -- "(0x%08lx > 0x%08lx)\ndisabling initrd\n", -- (unsigned long)(INITRD_START + INITRD_SIZE), -- (unsigned long)(end_pfn << PAGE_SHIFT)); -- initrd_start = 0; -- } -- } - #endif --#else /* CONFIG_XEN */ --#ifdef CONFIG_BLK_DEV_INITRD -- if (LOADER_TYPE && INITRD_START) { -- if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { -- reserve_bootmem_generic(INITRD_START, INITRD_SIZE); -- initrd_start = INITRD_START + PAGE_OFFSET; -- initrd_end = initrd_start+INITRD_SIZE; -- } -- else { -+ } else { - printk(KERN_ERR "initrd extends beyond end of memory " -- "(0x%08lx > 0x%08lx)\ndisabling initrd\n", -- (unsigned long)(INITRD_START + INITRD_SIZE), -- (unsigned long)(end_pfn << PAGE_SHIFT)); -+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n", -+ ramdisk_end, end_of_mem); - initrd_start = 0; - } - } - #endif --#endif /* !CONFIG_XEN */ --#ifdef CONFIG_KEXEC +- +-static inline int __must_check +-HYPERVISOR_vm_assist( +- unsigned int cmd, unsigned int type) +-{ +- return _hypercall2(int, vm_assist, cmd, type); +-} +- +-static inline int __must_check +-HYPERVISOR_vcpu_op( +- int cmd, unsigned int vcpuid, void *extra_args) +-{ +- return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); +-} +- +-static inline int __must_check +-HYPERVISOR_suspend( +- unsigned long srec) +-{ +- struct sched_shutdown sched_shutdown = { +- .reason = SHUTDOWN_suspend +- }; +- +- int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, +- &sched_shutdown, srec); +- +-#if CONFIG_XEN_COMPAT <= 0x030002 +- if (rc == -ENOSYS) +- rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, +- SHUTDOWN_suspend, srec); +-#endif +- +- return rc; +-} +- +-#if CONFIG_XEN_COMPAT <= 0x030002 +-static inline int +-HYPERVISOR_nmi_op( +- unsigned long op, void *arg) +-{ +- return _hypercall2(int, nmi_op, op, arg); +-} +-#endif +- +-#ifndef CONFIG_XEN +-static inline unsigned long __must_check +-HYPERVISOR_hvm_op( +- int op, void *arg) +-{ +- return _hypercall2(unsigned long, hvm_op, op, arg); +-} +-#endif +- +-static inline int __must_check +-HYPERVISOR_callback_op( +- int cmd, const void *arg) +-{ +- return _hypercall2(int, callback_op, cmd, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_xenoprof_op( +- int op, void *arg) +-{ +- return _hypercall2(int, xenoprof_op, op, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_kexec_op( +- unsigned long op, void *args) +-{ +- return _hypercall2(int, kexec_op, op, args); +-} +- +-static inline int __must_check +-HYPERVISOR_tmem_op( +- struct tmem_op *op) +-{ +- return _hypercall1(int, tmem_op, op); +-} +- +- +-#endif /* __HYPERCALL_H__ */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypercall_64.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypercall_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -1,198 +1,10 @@ +-/****************************************************************************** +- * hypercall.h +- * +- * Linux-specific hypervisor handling. +- * +- * Copyright (c) 2002-2004, K A Fraser +- * +- * 64-bit updates: +- * Benjamin Liu +- * Jun Nakajima +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation; or, when distributed +- * separately from the Linux kernel or incorporated into other +- * software packages, subject to the following license: +- * +- * Permission is hereby granted, free of charge, to any person obtaining a copy +- * of this source file (the "Software"), to deal in the Software without +- * restriction, including without limitation the rights to use, copy, modify, +- * merge, publish, distribute, sublicense, and/or sell copies of the Software, +- * and to permit persons to whom the Software is furnished to do so, subject to +- * the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +- * IN THE SOFTWARE. +- */ +- +-#ifndef __HYPERCALL_H__ +-#define __HYPERCALL_H__ +- +-#include /* memcpy() */ +-#include +-#include +- +-#ifndef __HYPERVISOR_H__ +-# error "please don't include this file directly" +-#endif +- -#ifdef CONFIG_XEN -- xen_machine_kexec_setup_resources(); +-#define HYPERCALL_STR(name) \ +- "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" -#else -- if (crashk_res.start != crashk_res.end) { -- reserve_bootmem_generic(crashk_res.start, -- crashk_res.end - crashk_res.start + 1); -- } --#endif +-#define HYPERCALL_STR(name) \ +- "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ +- "add hypercall_stubs(%%rip),%%rax; " \ +- "call *%%rax" -#endif - -+ reserve_crashkernel(); - paging_init(); - #ifdef CONFIG_X86_LOCAL_APIC - /* -@@ -784,7 +813,7 @@ static void __init amd_detect_cmp(struct - but in the same order as the HT nodeids. - If that doesn't result in a usable node fall back to the - path for the previous case. */ -- int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); -+ int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); - if (ht_nodeid >= 0 && - apicid_to_node[ht_nodeid] != NUMA_NO_NODE) - node = apicid_to_node[ht_nodeid]; -@@ -799,6 +828,39 @@ static void __init amd_detect_cmp(struct - #endif - } - -+#define ENABLE_C1E_MASK 0x18000000 -+#define CPUID_PROCESSOR_SIGNATURE 1 -+#define CPUID_XFAM 0x0ff00000 -+#define CPUID_XFAM_K8 0x00000000 -+#define CPUID_XFAM_10H 0x00100000 -+#define CPUID_XFAM_11H 0x00200000 -+#define CPUID_XMOD 0x000f0000 -+#define CPUID_XMOD_REV_F 0x00040000 -+ -+#ifndef CONFIG_XEN -+/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ -+static __cpuinit int amd_apic_timer_broken(void) -+{ -+ u32 lo, hi; -+ u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); -+ switch (eax & CPUID_XFAM) { -+ case CPUID_XFAM_K8: -+ if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) -+ break; -+ case CPUID_XFAM_10H: -+ case CPUID_XFAM_11H: -+ rdmsr(MSR_K8_ENABLE_C1E, lo, hi); -+ if (lo & ENABLE_C1E_MASK) -+ return 1; -+ break; -+ default: -+ /* err on the side of caution */ -+ return 1; -+ } -+ return 0; -+} -+#endif -+ - static void __cpuinit init_amd(struct cpuinfo_x86 *c) - { - unsigned level; -@@ -828,7 +890,7 @@ static void __cpuinit init_amd(struct cp - level = cpuid_eax(1); - if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) - set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); -- if (c->x86 == 0x10) -+ if (c->x86 == 0x10 || c->x86 == 0x11) - set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); +-#define _hypercall0(type, name) \ +-({ \ +- type __res; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res) \ +- : \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall1(type, name, a1) \ +-({ \ +- type __res; \ +- long __ign1; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=D" (__ign1) \ +- : "1" ((long)(a1)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall2(type, name, a1, a2) \ +-({ \ +- type __res; \ +- long __ign1, __ign2; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ +- : "1" ((long)(a1)), "2" ((long)(a2)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall3(type, name, a1, a2, a3) \ +-({ \ +- type __res; \ +- long __ign1, __ign2, __ign3; \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ +- "=d" (__ign3) \ +- : "1" ((long)(a1)), "2" ((long)(a2)), \ +- "3" ((long)(a3)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall4(type, name, a1, a2, a3, a4) \ +-({ \ +- type __res; \ +- long __ign1, __ign2, __ign3; \ +- register long __arg4 asm("r10") = (long)(a4); \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ +- "=d" (__ign3), "+r" (__arg4) \ +- : "1" ((long)(a1)), "2" ((long)(a2)), \ +- "3" ((long)(a3)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ +-({ \ +- type __res; \ +- long __ign1, __ign2, __ign3; \ +- register long __arg4 asm("r10") = (long)(a4); \ +- register long __arg5 asm("r8") = (long)(a5); \ +- asm volatile ( \ +- HYPERCALL_STR(name) \ +- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ +- "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \ +- : "1" ((long)(a1)), "2" ((long)(a2)), \ +- "3" ((long)(a3)) \ +- : "memory" ); \ +- __res; \ +-}) +- +-#define _hypercall(type, op, a1, a2, a3, a4, a5) \ +-({ \ +- type __res; \ +- register typeof((a1)+0) __arg1 asm("rdi") = (a1); \ +- register typeof((a2)+0) __arg2 asm("rsi") = (a2); \ +- register typeof((a3)+0) __arg3 asm("rdx") = (a3); \ +- register typeof((a4)+0) __arg4 asm("r10") = (a4); \ +- register typeof((a5)+0) __arg5 asm("r8") = (a5); \ +- asm volatile ( \ +- "call *%6" \ +- : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ +- "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ +- : "0" (hypercall_page + (op) * 32) \ +- : "memory" ); \ +- __res; \ +-}) +- +-static inline int __must_check +-HYPERVISOR_set_trap_table( +- const trap_info_t *table) +-{ +- return _hypercall1(int, set_trap_table, table); +-} +- +-static inline int __must_check +-HYPERVISOR_mmu_update( +- mmu_update_t *req, unsigned int count, unsigned int *success_count, +- domid_t domid) +-{ +- if (arch_use_lazy_mmu_mode()) +- return xen_multi_mmu_update(req, count, success_count, domid); +- return _hypercall4(int, mmu_update, req, count, success_count, domid); +-} +- +-static inline int __must_check +-HYPERVISOR_mmuext_op( +- struct mmuext_op *op, unsigned int count, unsigned int *success_count, +- domid_t domid) +-{ +- if (arch_use_lazy_mmu_mode()) +- return xen_multi_mmuext_op(op, count, success_count, domid); +- return _hypercall4(int, mmuext_op, op, count, success_count, domid); +-} +- +-static inline int __must_check +-HYPERVISOR_set_gdt( +- unsigned long *frame_list, unsigned int entries) +-{ +- return _hypercall2(int, set_gdt, frame_list, entries); +-} +- +-static inline int __must_check +-HYPERVISOR_stack_switch( +- unsigned long ss, unsigned long esp) +-{ +- return _hypercall2(int, stack_switch, ss, esp); +-} ++#define HYPERCALL_arg1 "rdi" ++#define HYPERCALL_arg2 "rsi" ++#define HYPERCALL_arg3 "rdx" ++#define HYPERCALL_arg4 "r10" ++#define HYPERCALL_arg5 "r8" - /* Enable workaround for FXSAVE leak */ -@@ -870,6 +932,11 @@ static void __cpuinit init_amd(struct cp - /* Family 10 doesn't support C states in MWAIT so don't use it */ - if (c->x86 == 0x10 && !force_mwait) - clear_bit(X86_FEATURE_MWAIT, &c->x86_capability); -+ -+#ifndef CONFIG_XEN -+ if (amd_apic_timer_broken()) -+ disable_apic_timer = 1; ++#if CONFIG_XEN_COMPAT <= 0x030002 + static inline int __must_check + HYPERVISOR_set_callbacks( + unsigned long event_address, unsigned long failsafe_address, +@@ -201,27 +13,7 @@ HYPERVISOR_set_callbacks( + return _hypercall3(int, set_callbacks, + event_address, failsafe_address, syscall_address); + } +- +-static inline int +-HYPERVISOR_fpu_taskswitch( +- int set) +-{ +- return _hypercall1(int, fpu_taskswitch, set); +-} +- +-static inline int __must_check +-HYPERVISOR_sched_op_compat( +- int cmd, unsigned long arg) +-{ +- return _hypercall2(int, sched_op_compat, cmd, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_sched_op( +- int cmd, void *arg) +-{ +- return _hypercall2(int, sched_op, cmd, arg); +-} +#endif + + static inline long __must_check + HYPERVISOR_set_timer_op( +@@ -231,34 +23,6 @@ HYPERVISOR_set_timer_op( } - static void __cpuinit detect_ht(struct cpuinfo_x86 *c) -@@ -1182,6 +1249,7 @@ void __cpuinit print_cpu_info(struct cpu - static int show_cpuinfo(struct seq_file *m, void *v) + static inline int __must_check +-HYPERVISOR_platform_op( +- struct xen_platform_op *platform_op) +-{ +- platform_op->interface_version = XENPF_INTERFACE_VERSION; +- return _hypercall1(int, platform_op, platform_op); +-} +-static inline int __must_check +-HYPERVISOR_mca( +- struct xen_mc *mc_op) +-{ +- mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; +- return _hypercall1(int, mca, mc_op); +-} +-static inline int __must_check +-HYPERVISOR_set_debugreg( +- unsigned int reg, unsigned long value) +-{ +- return _hypercall2(int, set_debugreg, reg, value); +-} +- +-static inline unsigned long __must_check +-HYPERVISOR_get_debugreg( +- unsigned int reg) +-{ +- return _hypercall1(unsigned long, get_debugreg, reg); +-} +- +-static inline int __must_check + HYPERVISOR_update_descriptor( + unsigned long ma, unsigned long word) { - struct cpuinfo_x86 *c = v; -+ int cpu = 0; - - /* - * These flag bits must match the definitions in . -@@ -1191,7 +1259,7 @@ static int show_cpuinfo(struct seq_file - * applications want to get the raw CPUID data, they should access - * /dev/cpu//cpuid instead. - */ -- static char *x86_cap_flags[] = { -+ static const char *const x86_cap_flags[] = { - /* Intel-defined */ - "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", - "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", -@@ -1222,7 +1290,7 @@ static int show_cpuinfo(struct seq_file - /* Intel-defined (#2) */ - "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", - "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, -- NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt", -+ NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - - /* VIA/Cyrix/Centaur-defined */ -@@ -1232,10 +1300,10 @@ static int show_cpuinfo(struct seq_file - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - - /* AMD-defined (#2) */ -- "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy", -- "altmovcr8", "abm", "sse4a", -- "misalignsse", "3dnowprefetch", -- "osvw", "ibs", NULL, NULL, NULL, NULL, -+ "lahf_lm", "cmp_legacy", "svm", "extapic", -+ "cr8_legacy", "abm", "sse4a", "misalignsse", -+ "3dnowprefetch", "osvw", "ibs", "sse5", -+ "skinit", "wdt", NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, +@@ -266,22 +30,6 @@ HYPERVISOR_update_descriptor( + } -@@ -1245,7 +1313,7 @@ static int show_cpuinfo(struct seq_file - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - }; -- static char *x86_power_flags[] = { -+ static const char *const x86_power_flags[] = { - "ts", /* temperature sensor */ - "fid", /* frequency id control */ - "vid", /* voltage id control */ -@@ -1260,8 +1328,7 @@ static int show_cpuinfo(struct seq_file - - - #ifdef CONFIG_SMP -- if (!cpu_online(c-cpu_data)) -- return 0; -+ cpu = c->cpu_index; - #endif - - seq_printf(m,"processor\t: %u\n" -@@ -1269,7 +1336,7 @@ static int show_cpuinfo(struct seq_file - "cpu family\t: %d\n" - "model\t\t: %d\n" - "model name\t: %s\n", -- (unsigned)(c-cpu_data), -+ (unsigned)cpu, - c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", - c->x86, - (int)c->x86_model, -@@ -1281,7 +1348,7 @@ static int show_cpuinfo(struct seq_file - seq_printf(m, "stepping\t: unknown\n"); - - if (cpu_has(c,X86_FEATURE_TSC)) { -- unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data)); -+ unsigned int freq = cpufreq_quick_get((unsigned)cpu); - if (!freq) - freq = cpu_khz; - seq_printf(m, "cpu MHz\t\t: %u.%03u\n", -@@ -1294,9 +1361,9 @@ static int show_cpuinfo(struct seq_file - - #ifdef CONFIG_SMP - if (smp_num_siblings * c->x86_max_cores > 1) { -- int cpu = c - cpu_data; - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); -- seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); -+ seq_printf(m, "siblings\t: %d\n", -+ cpus_weight(per_cpu(cpu_core_map, cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); - } -@@ -1351,12 +1418,16 @@ static int show_cpuinfo(struct seq_file - - static void *c_start(struct seq_file *m, loff_t *pos) + static inline int __must_check +-HYPERVISOR_memory_op( +- unsigned int cmd, void *arg) +-{ +- if (arch_use_lazy_mmu_mode()) +- xen_multicall_flush(false); +- return _hypercall2(int, memory_op, cmd, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_multicall( +- multicall_entry_t *call_list, unsigned int nr_calls) +-{ +- return _hypercall2(int, multicall, call_list, nr_calls); +-} +- +-static inline int __must_check + HYPERVISOR_update_va_mapping( + unsigned long va, pte_t new_val, unsigned long flags) { -- return *pos < NR_CPUS ? cpu_data + *pos : NULL; -+ if (*pos == 0) /* just in case, cpu 0 is not the first */ -+ *pos = first_cpu(cpu_online_map); -+ if ((*pos) < NR_CPUS && cpu_online(*pos)) -+ return &cpu_data(*pos); -+ return NULL; +@@ -291,67 +39,6 @@ HYPERVISOR_update_va_mapping( } - static void *c_next(struct seq_file *m, void *v, loff_t *pos) + static inline int __must_check +-HYPERVISOR_event_channel_op( +- int cmd, void *arg) +-{ +- int rc = _hypercall2(int, event_channel_op, cmd, arg); +- +-#if CONFIG_XEN_COMPAT <= 0x030002 +- if (unlikely(rc == -ENOSYS)) { +- struct evtchn_op op; +- op.cmd = cmd; +- memcpy(&op.u, arg, sizeof(op.u)); +- rc = _hypercall1(int, event_channel_op_compat, &op); +- memcpy(arg, &op.u, sizeof(op.u)); +- } +-#endif +- +- return rc; +-} +- +-static inline int __must_check +-HYPERVISOR_xen_version( +- int cmd, void *arg) +-{ +- return _hypercall2(int, xen_version, cmd, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_console_io( +- int cmd, unsigned int count, char *str) +-{ +- return _hypercall3(int, console_io, cmd, count, str); +-} +- +-static inline int __must_check +-HYPERVISOR_physdev_op( +- int cmd, void *arg) +-{ +- int rc = _hypercall2(int, physdev_op, cmd, arg); +- +-#if CONFIG_XEN_COMPAT <= 0x030002 +- if (unlikely(rc == -ENOSYS)) { +- struct physdev_op op; +- op.cmd = cmd; +- memcpy(&op.u, arg, sizeof(op.u)); +- rc = _hypercall1(int, physdev_op_compat, &op); +- memcpy(arg, &op.u, sizeof(op.u)); +- } +-#endif +- +- return rc; +-} +- +-static inline int __must_check +-HYPERVISOR_grant_table_op( +- unsigned int cmd, void *uop, unsigned int count) +-{ +- if (arch_use_lazy_mmu_mode()) +- xen_multicall_flush(false); +- return _hypercall3(int, grant_table_op, cmd, uop, count); +-} +- +-static inline int __must_check + HYPERVISOR_update_va_mapping_otherdomain( + unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { -- ++*pos; -+ *pos = next_cpu(*pos, cpu_online_map); - return c_start(m, pos); +@@ -360,90 +47,8 @@ HYPERVISOR_update_va_mapping_otherdomain } ---- head-2010-05-25.orig/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -72,7 +72,7 @@ - * - * B stepping CPUs may hang. There are hardware work arounds - * for this. We warn about it in case your board doesn't have the work -- * arounds. Basically thats so I can tell anyone with a B stepping -+ * arounds. Basically that's so I can tell anyone with a B stepping - * CPU and SMP problems "tough". - * - * Specific items [From Pentium Processor Specification Update] -@@ -241,7 +241,7 @@ void leave_mm(unsigned long cpu) - * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); - * Stop ipi delivery for the old mm. This is not synchronized with - * the other cpus, but smp_invalidate_interrupt ignore flush ipis -- * for the wrong mm, and in the worst case we perform a superflous -+ * for the wrong mm, and in the worst case we perform a superfluous - * tlb flush. - * 1a2) set cpu_tlbstate to TLBSTATE_OK - * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 -@@ -309,6 +309,7 @@ irqreturn_t smp_invalidate_interrupt(int - smp_mb__after_clear_bit(); - out: - put_cpu_no_resched(); -+ __get_cpu_var(irq_stat).irq_tlb_count++; - - return IRQ_HANDLED; - } -@@ -580,7 +581,7 @@ static void stop_this_cpu (void * dummy) - */ - cpu_clear(smp_processor_id(), cpu_online_map); - disable_all_local_evtchn(); -- if (cpu_data[smp_processor_id()].hlt_works_ok) -+ if (cpu_data(smp_processor_id()).hlt_works_ok) - for(;;) halt(); - for (;;); - } -@@ -610,6 +611,7 @@ void xen_smp_send_stop(void) - */ - irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) + static inline int __must_check +-HYPERVISOR_vm_assist( +- unsigned int cmd, unsigned int type) +-{ +- return _hypercall2(int, vm_assist, cmd, type); +-} +- +-static inline int __must_check +-HYPERVISOR_vcpu_op( +- int cmd, unsigned int vcpuid, void *extra_args) +-{ +- return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); +-} +- +-static inline int __must_check + HYPERVISOR_set_segment_base( + int reg, unsigned long value) { -+ __get_cpu_var(irq_stat).irq_resched_count++; - - return IRQ_HANDLED; - } -@@ -632,6 +634,7 @@ irqreturn_t smp_call_function_interrupt( - */ - irq_enter(); - (*func)(info); -+ __get_cpu_var(irq_stat).irq_call_count++; - irq_exit(); - - if (wait) { ---- head-2010-05-25.orig/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -167,6 +167,7 @@ asmlinkage void smp_invalidate_interrupt - out: - ack_APIC_irq(); - cpu_clear(cpu, f->flush_cpumask); -+ add_pda(irq_tlb_count, 1); - } - - static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, -@@ -326,17 +327,27 @@ void unlock_ipi_call_lock(void) + return _hypercall2(int, set_segment_base, reg, value); } - - /* -- * this function sends a 'generic call function' IPI to one other CPU -- * in the system. -- * -- * cpu is a standard Linux logical CPU number. -+ * this function sends a 'generic call function' IPI to all other CPU -+ * of the system defined in the mask. - */ --static void --__smp_call_function_single(int cpu, void (*func) (void *info), void *info, -- int nonatomic, int wait) -+ -+static int -+__smp_call_function_mask(cpumask_t mask, -+ void (*func)(void *), void *info, -+ int wait) - { - struct call_data_struct data; -- int cpus = 1; -+ cpumask_t allbutself; -+ int cpus; -+ -+ allbutself = cpu_online_map; -+ cpu_clear(smp_processor_id(), allbutself); -+ -+ cpus_and(mask, mask, allbutself); -+ cpus = cpus_weight(mask); -+ -+ if (!cpus) -+ return 0; - - data.func = func; - data.info = info; -@@ -347,19 +358,55 @@ __smp_call_function_single(int cpu, void - - call_data = &data; - wmb(); -- /* Send a message to all other CPUs and wait for them to respond */ -- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); -+ -+ /* Send a message to other CPUs */ -+ if (cpus_equal(mask, allbutself)) -+ send_IPI_allbutself(CALL_FUNCTION_VECTOR); -+ else -+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (!wait) -- return; -+ return 0; - - while (atomic_read(&data.finished) != cpus) - cpu_relax(); -+ -+ return 0; -+} -+/** -+ * smp_call_function_mask(): Run a function on a set of other CPUs. -+ * @mask: The set of cpus to run on. Must not include the current cpu. -+ * @func: The function to run. This must be fast and non-blocking. -+ * @info: An arbitrary pointer to pass to the function. -+ * @wait: If true, wait (atomically) until function has completed on other CPUs. -+ * -+ * Returns 0 on success, else a negative status code. -+ * -+ * If @wait is true, then returns once @func has returned; otherwise -+ * it returns just before the target cpu calls @func. -+ * -+ * You must not call this function with disabled interrupts or from a -+ * hardware interrupt handler or from a bottom half handler. -+ */ -+int smp_call_function_mask(cpumask_t mask, -+ void (*func)(void *), void *info, -+ int wait) -+{ -+ int ret; -+ -+ /* Can deadlock when called with interrupts disabled */ -+ WARN_ON(irqs_disabled()); -+ -+ spin_lock(&call_lock); -+ ret = __smp_call_function_mask(mask, func, info, wait); -+ spin_unlock(&call_lock); -+ return ret; - } -+EXPORT_SYMBOL(smp_call_function_mask); - - /* - * smp_call_function_single - Run a function on a specific CPU -@@ -378,6 +425,7 @@ int smp_call_function_single (int cpu, v - int nonatomic, int wait) - { - /* prevent preemption and reschedule on another processor */ -+ int ret; - int me = get_cpu(); - - /* Can deadlock when called with interrupts disabled */ -@@ -391,51 +439,14 @@ int smp_call_function_single (int cpu, v - return 0; - } - -- spin_lock(&call_lock); -- __smp_call_function_single(cpu, func, info, nonatomic, wait); -- spin_unlock(&call_lock); -+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); -+ - put_cpu(); -- return 0; -+ return ret; - } - EXPORT_SYMBOL(smp_call_function_single); - - /* -- * this function sends a 'generic call function' IPI to all other CPUs -- * in the system. -- */ --static void __smp_call_function (void (*func) (void *info), void *info, -- int nonatomic, int wait) +- +-static inline int __must_check +-HYPERVISOR_suspend( +- unsigned long srec) -{ -- struct call_data_struct data; -- int cpus = num_online_cpus()-1; +- struct sched_shutdown sched_shutdown = { +- .reason = SHUTDOWN_suspend +- }; - -- if (!cpus) -- return; +- int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, +- &sched_shutdown, srec); - -- data.func = func; -- data.info = info; -- atomic_set(&data.started, 0); -- data.wait = wait; -- if (wait) -- atomic_set(&data.finished, 0); +-#if CONFIG_XEN_COMPAT <= 0x030002 +- if (rc == -ENOSYS) +- rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, +- SHUTDOWN_suspend, srec); +-#endif - -- call_data = &data; -- wmb(); -- /* Send a message to all other CPUs and wait for them to respond */ -- send_IPI_allbutself(CALL_FUNCTION_VECTOR); +- return rc; +-} - -- /* Wait for response */ -- while (atomic_read(&data.started) != cpus) -- cpu_relax(); +-#if CONFIG_XEN_COMPAT <= 0x030002 +-static inline int +-HYPERVISOR_nmi_op( +- unsigned long op, void *arg) +-{ +- return _hypercall2(int, nmi_op, op, arg); +-} +-#endif - -- if (!wait) -- return; +-#ifndef CONFIG_XEN +-static inline unsigned long __must_check +-HYPERVISOR_hvm_op( +- int op, void *arg) +-{ +- return _hypercall2(unsigned long, hvm_op, op, arg); +-} +-#endif - -- while (atomic_read(&data.finished) != cpus) -- cpu_relax(); +-static inline int __must_check +-HYPERVISOR_callback_op( +- int cmd, const void *arg) +-{ +- return _hypercall2(int, callback_op, cmd, arg); -} - --/* - * smp_call_function - run a function on all other CPUs. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. -@@ -453,10 +464,7 @@ static void __smp_call_function (void (* - int smp_call_function (void (*func) (void *info), void *info, int nonatomic, - int wait) - { -- spin_lock(&call_lock); -- __smp_call_function(func,info,nonatomic,wait); -- spin_unlock(&call_lock); -- return 0; -+ return smp_call_function_mask(cpu_online_map, func, info, wait); - } - EXPORT_SYMBOL(smp_call_function); - -@@ -485,7 +493,7 @@ void smp_send_stop(void) - /* Don't deadlock on the call lock in panic */ - nolock = !spin_trylock(&call_lock); - local_irq_save(flags); -- __smp_call_function(stop_this_cpu, NULL, 0, 0); -+ __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0); - if (!nolock) - spin_unlock(&call_lock); - disable_all_local_evtchn(); -@@ -505,7 +513,9 @@ asmlinkage irqreturn_t smp_reschedule_in - { - #ifndef CONFIG_XEN - ack_APIC_irq(); --#else -+#endif -+ add_pda(irq_resched_count, 1); -+#ifdef CONFIG_XEN - return IRQ_HANDLED; +-static inline int __must_check +-HYPERVISOR_xenoprof_op( +- int op, void *arg) +-{ +- return _hypercall2(int, xenoprof_op, op, arg); +-} +- +-static inline int __must_check +-HYPERVISOR_kexec_op( +- unsigned long op, void *args) +-{ +- return _hypercall2(int, kexec_op, op, args); +-} +- +-static inline int __must_check +-HYPERVISOR_tmem_op( +- struct tmem_op *op) +-{ +- return _hypercall1(int, tmem_op, op); +-} +- +-#endif /* __HYPERCALL_H__ */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:56:27.000000000 +0100 +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -200,7 +201,6 @@ static inline void xen_multicall_flush(b + extern char hypercall_page[PAGE_SIZE]; + #else + extern char *hypercall_stubs; +-#define hypercall_page hypercall_stubs + #define is_running_on_xen() (!!hypercall_stubs) #endif - } -@@ -535,6 +545,7 @@ asmlinkage irqreturn_t smp_call_function - exit_idle(); - irq_enter(); - (*func)(info); -+ add_pda(irq_call_count, 1); - irq_exit(); - if (wait) { - mb(); ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/i386/kernel/time.c -- * - * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * - * This file contains the PC-specific time handling details: -@@ -73,6 +71,7 @@ - #include - - #include -+#include - #include - - #include -@@ -535,6 +534,13 @@ irqreturn_t timer_interrupt(int irq, voi - struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); - struct vcpu_runstate_info runstate; -+ /* Keep nmi watchdog up to date */ -+#ifdef __i386__ -+ per_cpu(irq_stat, smp_processor_id()).irq0_irqs++; +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/io.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "io_32.h" +#else -+ add_pda(irq0_irqs, 1); ++# include "io_64.h" +#endif -+ - /* - * Here we are in the timer irq handler. We just have irqs locally - * disabled but we don't know if the timer_bh is running on the other -@@ -1008,7 +1014,7 @@ static int time_cpufreq_notifier(struct - struct cpufreq_freqs *freq = data; - struct xen_platform_op op; - -- if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC)) -+ if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) - return 0; +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "irqflags_32.h" ++#else ++# include "irqflags_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:56:27.000000000 +0100 +@@ -150,6 +150,23 @@ static inline int raw_irqs_disabled_flag + \ + raw_irqs_disabled_flags(flags); \ + }) ++ ++/* ++ * makes the traced hardirq state match with the machine state ++ * ++ * should be a rarely used function, only in places where its ++ * otherwise impossible to know the irq state, like in traps. ++ */ ++static inline void trace_hardirqs_fixup_flags(unsigned long flags) ++{ ++ if (raw_irqs_disabled_flags(flags)) ++ trace_hardirqs_off(); ++ else ++ trace_hardirqs_on(); ++} ++ ++#define trace_hardirqs_fixup() \ ++ trace_hardirqs_fixup_flags(__raw_local_save_flags()) + #endif /* __ASSEMBLY__ */ - if (val == CPUFREQ_PRECHANGE) -@@ -1046,30 +1052,33 @@ core_initcall(cpufreq_time_setup); - */ - static ctl_table xen_subtable[] = { - { -- .ctl_name = 1, -+ .ctl_name = CTL_XEN_INDEPENDENT_WALLCLOCK, - .procname = "independent_wallclock", - .data = &independent_wallclock, - .maxlen = sizeof(independent_wallclock), - .mode = 0644, -+ .strategy = sysctl_data, - .proc_handler = proc_dointvec - }, - { -- .ctl_name = 2, -+ .ctl_name = CTL_XEN_PERMITTED_CLOCK_JITTER, - .procname = "permitted_clock_jitter", - .data = &permitted_clock_jitter, - .maxlen = sizeof(permitted_clock_jitter), - .mode = 0644, -+ .strategy = sysctl_data, - .proc_handler = proc_doulongvec_minmax - }, -- { 0 } -+ { } - }; - static ctl_table xen_table[] = { - { -- .ctl_name = 123, -+ .ctl_name = CTL_XEN, - .procname = "xen", - .mode = 0555, -- .child = xen_subtable}, -- { 0 } -+ .child = xen_subtable -+ }, -+ { } - }; - static int __init xen_sysctl_init(void) - { ---- head-2010-05-25.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ /* -- * linux/arch/i386/traps.c -- * - * Copyright (C) 1991, 1992 Linus Torvalds - * - * Pentium III FXSR, SSE support -@@ -65,6 +63,11 @@ - - int panic_on_unrecovered_nmi; +@@ -181,4 +198,17 @@ static inline int raw_irqs_disabled_flag + # define TRACE_IRQS_OFF + #endif -+#ifndef CONFIG_XEN -+DECLARE_BITMAP(used_vectors, NR_VECTORS); -+EXPORT_SYMBOL_GPL(used_vectors); ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCKDEP_SYS_EXIT \ ++ pushl %eax; \ ++ pushl %ecx; \ ++ pushl %edx; \ ++ call lockdep_sys_exit; \ ++ popl %edx; \ ++ popl %ecx; \ ++ popl %eax; ++#else ++# define LOCKDEP_SYS_EXIT +#endif + - asmlinkage int system_call(void); - - /* Do we ignore FPU interrupts ? */ -@@ -120,7 +123,7 @@ struct stack_frame { - - static inline unsigned long print_context_stack(struct thread_info *tinfo, - unsigned long *stack, unsigned long ebp, -- struct stacktrace_ops *ops, void *data) -+ const struct stacktrace_ops *ops, void *data) - { - #ifdef CONFIG_FRAME_POINTER - struct stack_frame *frame = (struct stack_frame *)ebp; -@@ -157,7 +160,7 @@ static inline unsigned long print_contex - - void dump_trace(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, -- struct stacktrace_ops *ops, void *data) -+ const struct stacktrace_ops *ops, void *data) - { - unsigned long ebp = 0; - -@@ -229,7 +232,7 @@ static void print_trace_address(void *da - touch_nmi_watchdog(); - } - --static struct stacktrace_ops print_trace_ops = { -+static const struct stacktrace_ops print_trace_ops = { - .warning = print_trace_warning, - .warning_symbol = print_trace_warning_symbol, - .stack = print_trace_stack, -@@ -288,6 +291,11 @@ void dump_stack(void) - { - unsigned long stack; - -+ printk("Pid: %d, comm: %.20s %s %s %.*s\n", -+ current->pid, current->comm, print_tainted(), -+ init_utsname()->release, -+ (int)strcspn(init_utsname()->version, " "), -+ init_utsname()->version); - show_trace(current, NULL, &stack); - } + #endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -116,6 +116,22 @@ static inline int raw_irqs_disabled_flag + }) -@@ -296,48 +304,24 @@ EXPORT_SYMBOL(dump_stack); - void show_registers(struct pt_regs *regs) - { - int i; -- int in_kernel = 1; -- unsigned long esp; -- unsigned short ss, gs; -- -- esp = (unsigned long) (®s->esp); -- savesegment(ss, ss); -- savesegment(gs, gs); -- if (user_mode_vm(regs)) { -- in_kernel = 0; -- esp = regs->esp; -- ss = regs->xss & 0xffff; -- } + /* ++ * makes the traced hardirq state match with the machine state ++ * ++ * should be a rarely used function, only in places where its ++ * otherwise impossible to know the irq state, like in traps. ++ */ ++static inline void trace_hardirqs_fixup_flags(unsigned long flags) ++{ ++ if (raw_irqs_disabled_flags(flags)) ++ trace_hardirqs_off(); ++ else ++ trace_hardirqs_on(); ++} + - print_modules(); -- printk(KERN_EMERG "CPU: %d\n" -- KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n" -- KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n", -- smp_processor_id(), 0xffff & regs->xcs, regs->eip, -- print_tainted(), regs->eflags, init_utsname()->release, -- (int)strcspn(init_utsname()->version, " "), -- init_utsname()->version); -- print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); -- printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", -- regs->eax, regs->ebx, regs->ecx, regs->edx); -- printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", -- regs->esi, regs->edi, regs->ebp, esp); -- printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", -- regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss); -+ __show_registers(regs, 0); - printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", -- TASK_COMM_LEN, current->comm, current->pid, -+ TASK_COMM_LEN, current->comm, task_pid_nr(current), - current_thread_info(), current, task_thread_info(current)); - /* - * When in-kernel, we also print out the stack and code at the - * time of the fault.. - */ -- if (in_kernel) { -+ if (!user_mode_vm(regs)) { - u8 *eip; - unsigned int code_prologue = code_bytes * 43 / 64; - unsigned int code_len = code_bytes; - unsigned char c; - - printk("\n" KERN_EMERG "Stack: "); -- show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); -+ show_stack_log_lvl(NULL, regs, ®s->esp, KERN_EMERG); - - printk(KERN_EMERG "Code: "); - -@@ -382,11 +366,11 @@ int is_valid_bugaddr(unsigned long eip) - void die(const char * str, struct pt_regs * regs, long err) - { - static struct { -- spinlock_t lock; -+ raw_spinlock_t lock; - u32 lock_owner; - int lock_owner_depth; - } die = { -- .lock = __SPIN_LOCK_UNLOCKED(die.lock), -+ .lock = __RAW_SPIN_LOCK_UNLOCKED, - .lock_owner = -1, - .lock_owner_depth = 0 - }; -@@ -397,40 +381,33 @@ void die(const char * str, struct pt_reg ++#define trace_hardirqs_fixup() \ ++ trace_hardirqs_fixup_flags(__raw_local_save_flags()) ++/* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +@@ -143,6 +159,20 @@ static inline void halt(void) + # define TRACE_IRQS_ON + # define TRACE_IRQS_OFF + # endif ++# ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk ++# define LOCKDEP_SYS_EXIT_IRQ \ ++ TRACE_IRQS_ON; \ ++ sti; \ ++ SAVE_REST; \ ++ LOCKDEP_SYS_EXIT; \ ++ RESTORE_REST; \ ++ cli; \ ++ TRACE_IRQS_OFF; ++# else ++# define LOCKDEP_SYS_EXIT ++# define LOCKDEP_SYS_EXIT_IRQ ++# endif + #endif - if (die.lock_owner != raw_smp_processor_id()) { - console_verbose(); -- spin_lock_irqsave(&die.lock, flags); -+ raw_local_irq_save(flags); -+ __raw_spin_lock(&die.lock); - die.lock_owner = smp_processor_id(); - die.lock_owner_depth = 0; - bust_spinlocks(1); -- } -- else -- local_save_flags(flags); -+ } else -+ raw_local_irq_save(flags); - - if (++die.lock_owner_depth < 3) { -- int nl = 0; - unsigned long esp; - unsigned short ss; - - report_bug(regs->eip, regs); - -- printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); -+ printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, -+ ++die_counter); - #ifdef CONFIG_PREEMPT -- printk(KERN_EMERG "PREEMPT "); -- nl = 1; -+ printk("PREEMPT "); - #endif - #ifdef CONFIG_SMP -- if (!nl) -- printk(KERN_EMERG); - printk("SMP "); -- nl = 1; - #endif - #ifdef CONFIG_DEBUG_PAGEALLOC -- if (!nl) -- printk(KERN_EMERG); - printk("DEBUG_PAGEALLOC"); -- nl = 1; #endif -- if (nl) -- printk("\n"); -+ printk("\n"); +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/maddr.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "maddr_32.h" ++#else ++# include "maddr_64.h" ++#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "mmu_context_32.h" ++#else ++# include "mmu_context_64.h" ++#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,100 @@ ++#ifndef __x86_PCI_H ++#define __x86_PCI_H + - if (notify_die(DIE_OOPS, str, regs, err, - current->thread.trap_no, SIGSEGV) != - NOTIFY_STOP) { -@@ -454,7 +431,8 @@ void die(const char * str, struct pt_reg - bust_spinlocks(0); - die.lock_owner = -1; - add_taint(TAINT_DIE); -- spin_unlock_irqrestore(&die.lock, flags); -+ __raw_spin_unlock(&die.lock); -+ raw_local_irq_restore(flags); - - if (!regs) - return; -@@ -571,6 +549,7 @@ fastcall void do_##name(struct pt_regs * - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = (void __user *)siaddr; \ -+ trace_hardirqs_fixup(); \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ - return; \ -@@ -606,7 +585,7 @@ fastcall void __kprobes do_general_prote - printk_ratelimit()) - printk(KERN_INFO - "%s[%d] general protection eip:%lx esp:%lx error:%lx\n", -- current->comm, current->pid, -+ current->comm, task_pid_nr(current), - regs->eip, regs->esp, error_code); - - force_sig(SIGSEGV, current); -@@ -785,6 +764,8 @@ void restart_nmi(void) - #ifdef CONFIG_KPROBES - fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) - { -+ trace_hardirqs_fixup(); ++#include /* for struct page */ ++#include ++#include ++#include ++#include ++#include + - if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) - == NOTIFY_STOP) - return; -@@ -822,6 +803,8 @@ fastcall void __kprobes do_debug(struct - unsigned int condition; - struct task_struct *tsk = current; - -+ trace_hardirqs_fixup(); + - get_debugreg(condition, 6); - - if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, -@@ -1084,20 +1067,6 @@ asmlinkage void math_emulate(long arg) ++#ifdef __KERNEL__ ++ ++struct pci_sysdata { ++ int domain; /* PCI domain */ ++ int node; /* NUMA node */ ++#ifdef CONFIG_X86_64 ++ void* iommu; /* IOMMU private data */ ++#endif ++#ifdef CONFIG_XEN_PCIDEV_FRONTEND ++ struct pcifront_device *pdev; ++#endif ++}; ++ ++/* scan a bus after allocating a pci_sysdata for it */ ++extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); ++ ++static inline int pci_domain_nr(struct pci_bus *bus) ++{ ++ struct pci_sysdata *sd = bus->sysdata; ++ return sd->domain; ++} ++ ++static inline int pci_proc_domain(struct pci_bus *bus) ++{ ++ return pci_domain_nr(bus); ++} ++ ++ ++/* Can be used to override the logic in pci_scan_bus for skipping ++ already-configured bus numbers - to be used for buggy BIOSes ++ or architectures with incomplete PCI setup by the loader */ ++ ++#ifdef CONFIG_PCI ++extern unsigned int pcibios_assign_all_busses(void); ++#else ++#define pcibios_assign_all_busses() 0 ++#endif ++ ++#include ++#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain()) ++ ++extern unsigned long pci_mem_start; ++#define PCIBIOS_MIN_IO 0x1000 ++#define PCIBIOS_MIN_MEM (pci_mem_start) ++ ++#define PCIBIOS_MIN_CARDBUS_IO 0x4000 ++ ++void pcibios_config_init(void); ++struct pci_bus * pcibios_scan_root(int bus); ++ ++void pcibios_set_master(struct pci_dev *dev); ++void pcibios_penalize_isa_irq(int irq, int active); ++struct irq_routing_table *pcibios_get_irq_routing_table(void); ++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); ++ ++ ++#define HAVE_PCI_MMAP ++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ++ enum pci_mmap_state mmap_state, int write_combine); ++ ++ ++#ifdef CONFIG_PCI ++static inline void pci_dma_burst_advice(struct pci_dev *pdev, ++ enum pci_dma_burst_strategy *strat, ++ unsigned long *strategy_parameter) ++{ ++ *strat = PCI_DMA_BURST_INFINITY; ++ *strategy_parameter = ~0UL; ++} ++#endif ++ ++ ++#endif /* __KERNEL__ */ ++ ++#ifdef CONFIG_X86_32 ++# include "pci_32.h" ++#else ++# include "pci_64.h" ++#endif ++ ++/* implement the pci_ DMA API in terms of the generic device dma_ one */ ++#include ++ ++/* generic pci stuff */ ++#include ++ ++ ++ ++#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "pgalloc_32.h" ++#else ++# include "pgalloc_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *p + spin_unlock(&pgd_lock); + } - #endif /* CONFIG_MATH_EMULATION */ ++extern void pgd_test_and_unpin(pgd_t *); ++ + static inline pgd_t *pgd_alloc(struct mm_struct *mm) + { + /* +@@ -126,6 +128,7 @@ static inline pgd_t *pgd_alloc(struct mm + pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 1); + if (!pgd) + return NULL; ++ pgd_test_and_unpin(pgd); + pgd_list_add(pgd, mm); + /* + * Copy kernel pointers in from init. +@@ -147,29 +150,8 @@ static inline pgd_t *pgd_alloc(struct mm --#ifdef CONFIG_X86_F00F_BUG --void __init trap_init_f00f_bug(void) --{ -- __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); + static inline void pgd_free(pgd_t *pgd) + { +- pte_t *ptep = virt_to_ptep(pgd); - -- /* -- * Update the IDT descriptor and reload the IDT so that -- * it uses the read-only mapped virtual address. -- */ -- idt_descr.address = fix_to_virt(FIX_F00F_IDT); -- load_idt(&idt_descr); --} --#endif + pgd_list_del(pgd); +- +- if (!pte_write(*ptep)) { +- xen_pgd_unpin(__pa(pgd)); +- BUG_ON(HYPERVISOR_update_va_mapping( +- (unsigned long)pgd, +- pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL), +- 0)); +- } +- +- ptep = virt_to_ptep(__user_pgd(pgd)); +- +- if (!pte_write(*ptep)) { +- xen_pgd_unpin(__pa(__user_pgd(pgd))); +- BUG_ON(HYPERVISOR_update_va_mapping( +- (unsigned long)__user_pgd(pgd), +- pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, +- PAGE_KERNEL), +- 0)); +- } - ++ pgd_test_and_unpin(pgd); + free_pages((unsigned long)pgd, 1); + } - /* - * NB. All these are "trap gates" (i.e. events_mask isn't set) except ---- head-2010-05-25.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/x86-64/traps.c -- * - * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs - * -@@ -33,6 +31,7 @@ - #include - #include - #include -+#include - - #if defined(CONFIG_EDAC) - #include -@@ -205,7 +204,7 @@ static unsigned long *in_exception_stack - #define MSG(txt) ops->warning(data, txt) - - /* -- * x86-64 can have upto three kernel stacks: -+ * x86-64 can have up to three kernel stacks: - * process stack - * interrupt stack - * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack -@@ -219,7 +218,7 @@ static inline int valid_stack_ptr(struct +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "pgtable_32.h" ++#else ++# include "pgtable_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:23.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 17:56:27.000000000 +0100 +@@ -17,10 +17,7 @@ + #include + #include - void dump_trace(struct task_struct *tsk, struct pt_regs *regs, - unsigned long *stack, -- struct stacktrace_ops *ops, void *data) -+ const struct stacktrace_ops *ops, void *data) - { - const unsigned cpu = get_cpu(); - unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; -@@ -340,7 +339,7 @@ static void print_trace_address(void *da - printk_address(addr); - } +-#ifndef _I386_BITOPS_H +-#include +-#endif +- ++#include + #include + #include + #include +@@ -40,7 +37,7 @@ extern spinlock_t pgd_lock; + extern struct page *pgd_list; + void check_pgt_cache(void); --static struct stacktrace_ops print_trace_ops = { -+static const struct stacktrace_ops print_trace_ops = { - .warning = print_trace_warning, - .warning_symbol = print_trace_warning_symbol, - .stack = print_trace_stack, -@@ -404,6 +403,12 @@ void show_stack(struct task_struct *tsk, - void dump_stack(void) - { - unsigned long dummy; -+ -+ printk("Pid: %d, comm: %.20s %s %s %.*s\n", -+ current->pid, current->comm, print_tainted(), -+ init_utsname()->release, -+ (int)strcspn(init_utsname()->version, " "), -+ init_utsname()->version); - show_trace(NULL, NULL, &dummy); - } +-void pmd_ctor(void *, struct kmem_cache *, unsigned long); ++void pmd_ctor(struct kmem_cache *, void *); + void pgtable_cache_init(void); + void paging_init(void); -@@ -466,7 +471,7 @@ void out_of_line_bug(void) - EXPORT_SYMBOL(out_of_line_bug); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:18.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -9,7 +9,7 @@ + * the x86-64 page table tree. + */ + #include +-#include ++#include + #include + #include + #include +@@ -139,6 +139,7 @@ static inline void pgd_clear (pgd_t * pg + #define MAXMEM _AC(0x6fffffffff, UL) + #define VMALLOC_START _AC(0xffffc20000000000, UL) + #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) ++#define VMEMMAP_START _AC(0xffffe20000000000, UL) + #define MODULES_VADDR _AC(0xffffffff88000000, UL) + #define MODULES_END _AC(0xffffffffff000000, UL) + #define MODULES_LEN (MODULES_END - MODULES_VADDR) +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "processor_32.h" ++#else ++# include "processor_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:56:27.000000000 +0100 +@@ -70,17 +70,22 @@ struct cpuinfo_x86 { + char pad0; + int x86_power; + unsigned long loops_per_jiffy; ++#ifndef CONFIG_XEN + #ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif + unsigned char x86_max_cores; /* cpuid returned max cores value */ + unsigned char apicid; ++#endif + unsigned short x86_clflush_size; +-#ifdef CONFIG_SMP ++#ifdef CONFIG_X86_HT + unsigned char booted_cores; /* number of cores as seen by OS */ + __u8 phys_proc_id; /* Physical processor id. */ + __u8 cpu_core_id; /* Core id */ + #endif ++#ifdef CONFIG_SMP ++ __u8 cpu_index; /* index into per_cpu list */ ++#endif + } __attribute__((__aligned__(SMP_CACHE_BYTES))); --static DEFINE_SPINLOCK(die_lock); -+static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; - static int die_owner = -1; - static unsigned int die_nest_count; - -@@ -478,13 +483,13 @@ unsigned __kprobes long oops_begin(void) - oops_enter(); - - /* racy, but better than risking deadlock. */ -- local_irq_save(flags); -+ raw_local_irq_save(flags); - cpu = smp_processor_id(); -- if (!spin_trylock(&die_lock)) { -+ if (!__raw_spin_trylock(&die_lock)) { - if (cpu == die_owner) - /* nested oops. should stop eventually */; - else -- spin_lock(&die_lock); -+ __raw_spin_lock(&die_lock); - } - die_nest_count++; - die_owner = cpu; -@@ -498,12 +503,10 @@ void __kprobes oops_end(unsigned long fl - die_owner = -1; - bust_spinlocks(0); - die_nest_count--; -- if (die_nest_count) -- /* We still own the lock */ -- local_irq_restore(flags); -- else -+ if (!die_nest_count) - /* Nest count reaches zero, release the lock. */ -- spin_unlock_irqrestore(&die_lock, flags); -+ __raw_spin_unlock(&die_lock); -+ raw_local_irq_restore(flags); - if (panic_on_oops) - panic("Fatal exception"); - oops_exit(); -@@ -636,6 +639,7 @@ asmlinkage void do_##name(struct pt_regs - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = (void __user *)siaddr; \ -+ trace_hardirqs_fixup(); \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ - return; \ -@@ -741,11 +745,8 @@ mem_parity_error(unsigned char reason, s - - printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); + #define X86_VENDOR_INTEL 0 +@@ -106,14 +111,19 @@ DECLARE_PER_CPU(struct tss_struct, init_ + #endif --#if 0 /* XEN */ - /* Clear and disable the memory parity error line. */ -- reason = (reason & 0xf) | 4; -- outb(reason, 0x61); --#endif /* XEN */ -+ clear_mem_error(reason); - } + #ifdef CONFIG_SMP +-extern struct cpuinfo_x86 cpu_data[]; +-#define current_cpu_data cpu_data[smp_processor_id()] ++DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); ++#define cpu_data(cpu) per_cpu(cpu_info, cpu) ++#define current_cpu_data cpu_data(smp_processor_id()) + #else +-#define cpu_data (&boot_cpu_data) +-#define current_cpu_data boot_cpu_data ++#define cpu_data(cpu) boot_cpu_data ++#define current_cpu_data boot_cpu_data + #endif - static __kprobes void -@@ -754,14 +755,8 @@ io_check_error(unsigned char reason, str - printk("NMI: IOCK error (debug interrupt?)\n"); - show_registers(regs); +-extern int cpu_llc_id[NR_CPUS]; ++/* ++ * the following now lives in the per cpu area: ++ * extern int cpu_llc_id[NR_CPUS]; ++ */ ++DECLARE_PER_CPU(u8, cpu_llc_id); + extern char ignore_fpu_irq; --#if 0 /* XEN */ - /* Re-enable the IOCK line, wait for a few seconds */ -- reason = (reason & 0xf) | 8; -- outb(reason, 0x61); -- mdelay(2000); -- reason &= ~8; -- outb(reason, 0x61); --#endif /* XEN */ -+ clear_io_check_error(reason); + void __init cpu_detect(struct cpuinfo_x86 *c); +@@ -560,7 +570,9 @@ static inline void xen_set_iopl_mask(uns + * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx + * resulting in stale register contents being returned. + */ +-static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) ++static inline void cpuid(unsigned int op, ++ unsigned int *eax, unsigned int *ebx, ++ unsigned int *ecx, unsigned int *edx) + { + *eax = op; + *ecx = 0; +@@ -568,8 +580,9 @@ static inline void cpuid(unsigned int op } - static __kprobes void -@@ -821,6 +816,8 @@ asmlinkage __kprobes void default_do_nmi - /* runs on IST stack. */ - asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) + /* Some CPUID calls want 'count' to be placed in ecx */ +-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, +- int *edx) ++static inline void cpuid_count(unsigned int op, int count, ++ unsigned int *eax, unsigned int *ebx, ++ unsigned int *ecx, unsigned int *edx) { -+ trace_hardirqs_fixup(); -+ - if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { - return; - } -@@ -858,6 +855,8 @@ asmlinkage void __kprobes do_debug(struc - struct task_struct *tsk = current; - siginfo_t info; + *eax = op; + *ecx = count; +@@ -639,6 +652,17 @@ static inline unsigned int cpuid_edx(uns + #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" + #define K7_NOP8 K7_NOP7 ASM_NOP1 -+ trace_hardirqs_fixup(); ++/* P6 nops */ ++/* uses eax dependencies (Intel-recommended choice) */ ++#define P6_NOP1 GENERIC_NOP1 ++#define P6_NOP2 ".byte 0x66,0x90\n" ++#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" ++#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" ++#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" ++#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" ++#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" ++#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" + - get_debugreg(condition, 6); - - if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,4 @@ - /* -- * linux/arch/x86_64/kernel/vsyscall.c -- * - * Copyright (C) 2001 Andrea Arcangeli SuSE - * Copyright 2003 Andi Kleen, SuSE Labs. - * -@@ -50,12 +48,12 @@ - ({unsigned long v; \ - extern char __vsyscall_0; \ - asm("" : "=r" (v) : "0" (x)); \ -- ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); }) -+ ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); }) - - /* - * vsyscall_gtod_data contains data that is : - * - readonly from vsyscalls -- * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) -+ * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) - * Try to keep this structure as small as possible to avoid cache line ping pongs - */ - int __vgetcpu_mode __section_vgetcpu_mode; -@@ -66,6 +64,16 @@ struct vsyscall_gtod_data __vsyscall_gto - .sysctl_enabled = 1, - }; + #ifdef CONFIG_MK8 + #define ASM_NOP1 K8_NOP1 + #define ASM_NOP2 K8_NOP2 +@@ -657,6 +681,17 @@ static inline unsigned int cpuid_edx(uns + #define ASM_NOP6 K7_NOP6 + #define ASM_NOP7 K7_NOP7 + #define ASM_NOP8 K7_NOP8 ++#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ ++ defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ ++ defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) ++#define ASM_NOP1 P6_NOP1 ++#define ASM_NOP2 P6_NOP2 ++#define ASM_NOP3 P6_NOP3 ++#define ASM_NOP4 P6_NOP4 ++#define ASM_NOP5 P6_NOP5 ++#define ASM_NOP6 P6_NOP6 ++#define ASM_NOP7 P6_NOP7 ++#define ASM_NOP8 P6_NOP8 + #else + #define ASM_NOP1 GENERIC_NOP1 + #define ASM_NOP2 GENERIC_NOP2 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -62,19 +62,26 @@ struct cpuinfo_x86 { + int x86_cache_alignment; + int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ + __u8 x86_virt_bits, x86_phys_bits; ++#ifndef CONFIG_XEN + __u8 x86_max_cores; /* cpuid returned max cores value */ ++#endif + __u32 x86_power; + __u32 extended_cpuid_level; /* Max extended CPUID function supported */ + unsigned long loops_per_jiffy; +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ + #endif ++#ifndef CONFIG_XEN + __u8 apicid; +-#ifdef CONFIG_SMP ++#endif ++#ifdef CONFIG_X86_HT + __u8 booted_cores; /* number of cores as seen by OS */ + __u8 phys_proc_id; /* Physical Processor id. */ + __u8 cpu_core_id; /* Core id. */ + #endif ++#ifdef CONFIG_SMP ++ __u8 cpu_index; /* index into per_cpu list */ ++#endif + } ____cacheline_aligned; -+void update_vsyscall_tz(void) -+{ -+ unsigned long flags; -+ -+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); -+ /* sys_tz has changed */ -+ vsyscall_gtod_data.sys_tz = sys_tz; -+ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); -+} -+ - void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) - { - unsigned long flags; -@@ -79,8 +87,6 @@ void update_vsyscall(struct timespec *wa - vsyscall_gtod_data.clock.shift = clock->shift; - vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; - vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; -- vsyscall_gtod_data.sys_tz = sys_tz; -- vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; - vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; - write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); - } -@@ -166,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t) - if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) - return time_syscall(t); + #define X86_VENDOR_INTEL 0 +@@ -88,11 +95,12 @@ struct cpuinfo_x86 { + #define X86_VENDOR_UNKNOWN 0xff -- vgettimeofday(&tv, 0); -+ vgettimeofday(&tv, NULL); - result = tv.tv_sec; - if (t) - *t = result; -@@ -260,18 +266,10 @@ out: - return ret; - } + #ifdef CONFIG_SMP +-extern struct cpuinfo_x86 cpu_data[]; +-#define current_cpu_data cpu_data[smp_processor_id()] ++DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); ++#define cpu_data(cpu) per_cpu(cpu_info, cpu) ++#define current_cpu_data cpu_data(smp_processor_id()) + #else +-#define cpu_data (&boot_cpu_data) +-#define current_cpu_data boot_cpu_data ++#define cpu_data(cpu) boot_cpu_data ++#define current_cpu_data boot_cpu_data + #endif --static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen, -- void __user *oldval, size_t __user *oldlenp, -- void __user *newval, size_t newlen) --{ -- return -ENOSYS; --} -- - static ctl_table kernel_table2[] = { -- { .ctl_name = 99, .procname = "vsyscall64", -+ { .procname = "vsyscall64", - .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), - .mode = 0644, -- .strategy = vsyscall_sysctl_nostrat, - .proc_handler = vsyscall_sysctl_change }, - {} + extern char ignore_irq13; +@@ -343,6 +351,16 @@ struct extended_sigtable { }; -@@ -291,9 +289,9 @@ static void __cpuinit vsyscall_set_cpu(i - unsigned long d; - unsigned long node = 0; - #ifdef CONFIG_NUMA -- node = cpu_to_node[cpu]; -+ node = cpu_to_node(cpu); - #endif -- if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) -+ if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) - write_rdtscp_aux((node << 12) | cpu); - /* Store cpu number in limit so that it can be loaded quickly ---- head-2010-05-25.orig/arch/x86/mm/fault_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -25,6 +25,7 @@ - #include - #include - #include -+#include - #include - #include -@@ -32,33 +33,27 @@ ++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) ++#define ASM_NOP1 P6_NOP1 ++#define ASM_NOP2 P6_NOP2 ++#define ASM_NOP3 P6_NOP3 ++#define ASM_NOP4 P6_NOP4 ++#define ASM_NOP5 P6_NOP5 ++#define ASM_NOP6 P6_NOP6 ++#define ASM_NOP7 P6_NOP7 ++#define ASM_NOP8 P6_NOP8 ++#else + #define ASM_NOP1 K8_NOP1 + #define ASM_NOP2 K8_NOP2 + #define ASM_NOP3 K8_NOP3 +@@ -351,6 +369,7 @@ struct extended_sigtable { + #define ASM_NOP6 K8_NOP6 + #define ASM_NOP7 K8_NOP7 + #define ASM_NOP8 K8_NOP8 ++#endif - extern void die(const char *,struct pt_regs *,long); + /* Opteron nops */ + #define K8_NOP1 ".byte 0x90\n" +@@ -362,6 +381,17 @@ struct extended_sigtable { + #define K8_NOP7 K8_NOP4 K8_NOP3 + #define K8_NOP8 K8_NOP4 K8_NOP4 --static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); ++/* P6 nops */ ++/* uses eax dependencies (Intel-recommended choice) */ ++#define P6_NOP1 ".byte 0x90\n" ++#define P6_NOP2 ".byte 0x66,0x90\n" ++#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" ++#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" ++#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" ++#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" ++#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" ++#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" ++ + #define ASM_NOP_MAX 8 + + /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ +@@ -377,12 +407,6 @@ static inline void sync_core(void) + asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); + } + +-#define ARCH_HAS_PREFETCH +-static inline void prefetch(void *x) +-{ +- asm volatile("prefetcht0 (%0)" :: "r" (x)); +-} - --int register_page_fault_notifier(struct notifier_block *nb) -+#ifdef CONFIG_KPROBES -+static inline int notify_page_fault(struct pt_regs *regs) - { -- vmalloc_sync_all(); -- return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); --} --EXPORT_SYMBOL_GPL(register_page_fault_notifier); -+ int ret = 0; + #define ARCH_HAS_PREFETCHW 1 + static inline void prefetchw(void *x) + { +@@ -398,11 +422,6 @@ static inline void prefetchw(void *x) --int unregister_page_fault_notifier(struct notifier_block *nb) + #define cpu_relax() rep_nop() + +-static inline void serialize_cpu(void) -{ -- return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +- __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); -} --EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); -+ /* kprobe_running() needs smp_processor_id() */ -+ if (!user_mode_vm(regs)) { -+ preempt_disable(); -+ if (kprobe_running() && kprobe_fault_handler(regs, 14)) -+ ret = 1; -+ preempt_enable(); -+ } - --static inline int notify_page_fault(struct pt_regs *regs, long err) -+ return ret; -+} -+#else -+static inline int notify_page_fault(struct pt_regs *regs) +- + static inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) { -- struct die_args args = { -- .regs = regs, -- .str = "page fault", -- .err = err, -- .trapnr = 14, -- .signr = SIGSEGV -- }; -- return atomic_notifier_call_chain(¬ify_page_fault_chain, -- DIE_PAGE_FAULT, &args); -+ return 0; - } +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "smp_32.h" ++#else ++# include "smp_64.h" +#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:56:27.000000000 +0100 +@@ -11,7 +11,7 @@ + #endif - /* - * Return EIP plus the CS segment base. The segment limit is also -@@ -110,7 +105,7 @@ static inline unsigned long get_segment_ - LDT and other horrors are only used in user space. */ - if (seg & (1<<2)) { - /* Must lock the LDT while reading it. */ -- down(¤t->mm->context.sem); -+ mutex_lock(¤t->mm->context.lock); - desc = current->mm->context.ldt; - desc = (void *)desc + (seg & ~7); - } else { -@@ -123,7 +118,7 @@ static inline unsigned long get_segment_ - base = get_desc_base((unsigned long *)desc); + #if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__) +-#include ++#include + #include + #include + #ifdef CONFIG_X86_IO_APIC +@@ -29,19 +29,20 @@ + + extern void smp_alloc_memory(void); + extern int pic_mode; +-extern int smp_num_siblings; +-extern cpumask_t cpu_sibling_map[]; +-extern cpumask_t cpu_core_map[]; - if (seg & (1<<2)) { -- up(¤t->mm->context.sem); -+ mutex_unlock(¤t->mm->context.lock); - } else - put_cpu(); + extern void (*mtrr_hook) (void); + extern void zap_low_mappings (void); + extern void lock_ipi_call_lock(void); + extern void unlock_ipi_call_lock(void); -@@ -244,7 +239,7 @@ static void dump_fault_path(unsigned lon - if (mfn_to_pfn(mfn) >= highstart_pfn) - return; - #endif -- if (p[0] & _PAGE_PRESENT) { -+ if ((p[0] & _PAGE_PRESENT) && !(p[0] & _PAGE_PSE)) { - page = mfn_to_pfn(mfn) << PAGE_SHIFT; - p = (unsigned long *) __va(page); - address &= 0x001fffff; -@@ -270,7 +265,8 @@ static void dump_fault_path(unsigned lon - * it's allocated already. - */ - if ((machine_to_phys(page) >> PAGE_SHIFT) < max_low_pfn -- && (page & _PAGE_PRESENT)) { -+ && (page & _PAGE_PRESENT) -+ && !(page & _PAGE_PSE)) { - page = machine_to_phys(page & PAGE_MASK); - page = ((unsigned long *) __va(page))[(address >> PAGE_SHIFT) - & (PTRS_PER_PTE - 1)]; -@@ -416,6 +412,11 @@ fastcall void __kprobes do_page_fault(st - int write, si_code; - int fault; ++#ifndef CONFIG_XEN + #define MAX_APICID 256 +-extern u8 x86_cpu_to_apicid[]; ++extern u8 __initdata x86_cpu_to_apicid_init[]; ++extern void *x86_cpu_to_apicid_ptr; ++DECLARE_PER_CPU(u8, x86_cpu_to_apicid); ++#endif -+ /* -+ * We can fault from pretty much anywhere, with unknown IRQ state. -+ */ -+ trace_hardirqs_fixup(); -+ - /* get the address */ - address = read_cr2(); +-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] ++#define cpu_physical_id(cpu) (cpu) -@@ -453,7 +454,7 @@ fastcall void __kprobes do_page_fault(st - /* Can take a spurious fault if mapping changes R/O -> R/W. */ - if (spurious_fault(regs, address, error_code)) - return; -- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) -+ if (notify_page_fault(regs)) - return; - /* - * Don't take the mm semaphore here. If we fixup a prefetch -@@ -462,7 +463,7 @@ fastcall void __kprobes do_page_fault(st - goto bad_area_nosemaphore; - } + #ifdef CONFIG_HOTPLUG_CPU + extern void cpu_exit_clear(void); +@@ -148,7 +149,7 @@ extern unsigned int num_processors; + #else /* CONFIG_SMP */ -- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) -+ if (notify_page_fault(regs)) - return; + #define safe_smp_processor_id() 0 +-#define cpu_physical_id(cpu) boot_cpu_physical_apicid ++#define cpu_physical_id(cpu) 0 - /* It's safe to allow irq's after cr2 has been saved and the vmalloc -@@ -481,7 +482,7 @@ fastcall void __kprobes do_page_fault(st + #define NO_PROC_ID 0xFF /* No processor magic marker */ - /* When running in the kernel we expect faults to occur only to - * addresses in user space. All other faults represent errors in the -- * kernel and should generate an OOPS. Unfortunatly, in the case of an -+ * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem - * we will deadlock attempting to validate the fault against the - * address space. Luckily the kernel only validly references user -@@ -489,7 +490,7 @@ fastcall void __kprobes do_page_fault(st - * exceptions table. - * - * As the vast majority of faults will be valid we will only perform -- * the source reference check when there is a possibilty of a deadlock. -+ * the source reference check when there is a possibility of a deadlock. - * Attempt to lock the address space, if we cannot we then validate the - * source. If this is invalid we can skip the address space check, - * thus avoiding the deadlock. -@@ -598,8 +599,8 @@ bad_area_nosemaphore: - printk_ratelimit()) { - printk("%s%s[%d]: segfault at %08lx eip %08lx " - "esp %08lx error %lx\n", -- tsk->pid > 1 ? KERN_INFO : KERN_EMERG, -- tsk->comm, tsk->pid, address, regs->eip, -+ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, -+ tsk->comm, task_pid_nr(tsk), address, regs->eip, - regs->esp, error_code); - } - tsk->thread.cr2 = address; -@@ -664,8 +665,7 @@ no_context: - printk(KERN_ALERT "BUG: unable to handle kernel paging" - " request"); - printk(" at virtual address %08lx\n",address); -- printk(KERN_ALERT " printing eip:\n"); -- printk("%08lx\n", regs->eip); -+ printk(KERN_ALERT "printing eip: %08lx\n", regs->eip); - dump_fault_path(address); - } - tsk->thread.cr2 = address; -@@ -681,14 +681,14 @@ no_context: - */ - out_of_memory: - up_read(&mm->mmap_sem); -- if (is_init(tsk)) { -+ if (is_global_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (error_code & 4) -- do_exit(SIGKILL); -+ do_group_exit(SIGKILL); - goto no_context; +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -38,12 +38,11 @@ extern void smp_alloc_memory(void); + extern volatile unsigned long smp_invalidate_needed; + extern void lock_ipi_call_lock(void); + extern void unlock_ipi_call_lock(void); +-extern int smp_num_siblings; + extern void smp_send_reschedule(int cpu); ++extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), ++ void *info, int wait); - do_sigbus: ---- head-2010-05-25.orig/arch/x86/mm/fault_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -25,6 +25,7 @@ - #include - #include - #include -+#include +-extern cpumask_t cpu_sibling_map[NR_CPUS]; +-extern cpumask_t cpu_core_map[NR_CPUS]; +-extern u8 cpu_llc_id[NR_CPUS]; ++DECLARE_PER_CPU(u8, cpu_llc_id); - #include - #include -@@ -40,34 +41,27 @@ - #define PF_RSVD (1<<3) - #define PF_INSTR (1<<4) + #define SMP_TRAMPOLINE_BASE 0x6000 --static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); -- --/* Hook to register for page fault notifications */ --int register_page_fault_notifier(struct notifier_block *nb) -+#ifdef CONFIG_KPROBES -+static inline int notify_page_fault(struct pt_regs *regs) - { -- vmalloc_sync_all(); -- return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); --} --EXPORT_SYMBOL_GPL(register_page_fault_notifier); -+ int ret = 0; +@@ -70,6 +69,9 @@ extern unsigned __cpuinitdata disabled_c --int unregister_page_fault_notifier(struct notifier_block *nb) --{ -- return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); --} --EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); -+ /* kprobe_running() needs smp_processor_id() */ -+ if (!user_mode(regs)) { -+ preempt_disable(); -+ if (kprobe_running() && kprobe_fault_handler(regs, 14)) -+ ret = 1; -+ preempt_enable(); -+ } + #endif /* CONFIG_SMP */ --static inline int notify_page_fault(struct pt_regs *regs, long err) -+ return ret; -+} -+#else -+static inline int notify_page_fault(struct pt_regs *regs) ++#define safe_smp_processor_id() smp_processor_id() ++ ++#ifndef CONFIG_XEN + #ifdef CONFIG_X86_LOCAL_APIC + static inline int hard_smp_processor_id(void) { -- struct die_args args = { -- .regs = regs, -- .str = "page fault", -- .err = err, -- .trapnr = 14, -- .signr = SIGSEGV -- }; -- return atomic_notifier_call_chain(¬ify_page_fault_chain, -- DIE_PAGE_FAULT, &args); -+ return 0; - } -+#endif - - /* Sometimes the CPU reports invalid exceptions on prefetch. - Check that here and ignore. -@@ -175,7 +169,7 @@ void dump_pagetable(unsigned long addres - pmd = pmd_offset(pud, address); - if (bad_address(pmd)) goto bad; - printk("PMD %lx ", pmd_val(*pmd)); -- if (!pmd_present(*pmd)) goto ret; -+ if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; +@@ -82,8 +84,9 @@ static inline int hard_smp_processor_id( + * Some lowlevel functions might want to know about + * the real APIC ID <-> CPU # mapping. + */ +-extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ +-extern u8 x86_cpu_to_log_apicid[NR_CPUS]; ++extern u8 __initdata x86_cpu_to_apicid_init[]; ++extern void *x86_cpu_to_apicid_ptr; ++DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */ + extern u8 bios_cpu_apicid[]; - pte = pte_offset_kernel(pmd, address); - if (bad_address(pte)) goto bad; -@@ -294,7 +288,6 @@ static int vmalloc_fault(unsigned long a - return 0; + #ifdef CONFIG_X86_LOCAL_APIC +@@ -95,6 +98,7 @@ static inline int cpu_present_to_apicid( + return BAD_APICID; } + #endif ++#endif /* CONFIG_XEN */ --static int page_fault_trace; - int show_unhandled_signals = 1; + #ifndef CONFIG_SMP + #define stack_smp_processor_id() 0 +@@ -118,9 +122,9 @@ static __inline int logical_smp_processo + #endif + #ifdef CONFIG_SMP +-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] ++#define cpu_physical_id(cpu) (cpu) + #else +-#define cpu_physical_id(cpu) boot_cpu_id ++#define cpu_physical_id(cpu) 0 + #endif /* !CONFIG_SMP */ + #endif -@@ -371,6 +364,11 @@ asmlinkage void __kprobes do_page_fault( - if (!user_mode(regs)) - error_code &= ~PF_USER; /* means kernel */ +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/swiotlb.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "swiotlb_32.h" ++#else ++# include_next ++#endif +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "system_32.h" ++#else ++# include "system_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:56:27.000000000 +0100 +@@ -9,6 +9,7 @@ + #include -+ /* -+ * We can fault from pretty much anywhere, with unknown IRQ state. -+ */ -+ trace_hardirqs_fixup(); -+ - tsk = current; - mm = tsk->mm; - prefetchw(&mm->mmap_sem); -@@ -408,7 +406,7 @@ asmlinkage void __kprobes do_page_fault( - /* Can take a spurious fault if mapping changes R/O -> R/W. */ - if (spurious_fault(regs, address, error_code)) - return; -- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) -+ if (notify_page_fault(regs)) - return; - /* - * Don't take the mm semaphore here. If we fixup a prefetch -@@ -417,16 +415,12 @@ asmlinkage void __kprobes do_page_fault( - goto bad_area_nosemaphore; - } + #ifdef __KERNEL__ ++#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ -- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) -+ if (notify_page_fault(regs)) - return; - - if (likely(regs->eflags & X86_EFLAGS_IF)) - local_irq_enable(); - -- if (unlikely(page_fault_trace)) -- printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n", -- regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); -- - if (unlikely(error_code & PF_RSVD)) - pgtable_bad(address, regs, error_code); - -@@ -447,7 +441,7 @@ asmlinkage void __kprobes do_page_fault( - again: - /* When running in the kernel we expect faults to occur only to - * addresses in user space. All other faults represent errors in the -- * kernel and should generate an OOPS. Unfortunatly, in the case of an -+ * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem - * we will deadlock attempting to validate the fault against the - * address space. Luckily the kernel only validly references user -@@ -455,7 +449,7 @@ asmlinkage void __kprobes do_page_fault( - * exceptions table. - * - * As the vast majority of faults will be valid we will only perform -- * the source reference check when there is a possibilty of a deadlock. -+ * the source reference check when there is a possibility of a deadlock. - * Attempt to lock the address space, if we cannot we then validate the - * source. If this is invalid we can skip the address space check, - * thus avoiding the deadlock. -@@ -557,7 +551,7 @@ bad_area_nosemaphore: - if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && - printk_ratelimit()) { - printk( -- "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", -+ "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n", - tsk->pid > 1 ? KERN_INFO : KERN_EMERG, - tsk->comm, tsk->pid, address, regs->rip, - regs->rsp, error_code); -@@ -623,7 +617,7 @@ no_context: - */ - out_of_memory: - up_read(&mm->mmap_sem); -- if (is_init(current)) { -+ if (is_global_init(current)) { - yield(); - goto again; - } -@@ -690,10 +684,3 @@ void vmalloc_sync_all(void) - BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == - (__START_KERNEL & PGDIR_MASK))); + struct task_struct; /* one of the stranger aspects of C forward declarations.. */ + extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); +@@ -138,7 +139,7 @@ static inline unsigned long xen_read_cr4 + { + unsigned long val; + /* This could fault if %cr4 does not exist */ +- asm("1: movl %%cr4, %0 \n" ++ asm volatile("1: movl %%cr4, %0 \n" + "2: \n" + ".section __ex_table,\"a\" \n" + ".long 1b,2b \n" +@@ -157,6 +158,11 @@ static inline void xen_wbinvd(void) + asm volatile("wbinvd": : :"memory"); } -- --static int __init enable_pagefaulttrace(char *str) --{ -- page_fault_trace = 1; -- return 1; --} --__setup("pagefaulttrace", enable_pagefaulttrace); ---- head-2010-05-25.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/hypervisor.c 2010-03-24 15:10:29.000000000 +0100 -@@ -500,6 +500,9 @@ int xen_create_contiguous_region( - unsigned long frame, flags; - unsigned int i; - int rc, success; -+#ifdef CONFIG_64BIT -+ pte_t *ptep = NULL; -+#endif - struct xen_memory_exchange exchange = { - .in = { - .nr_extents = 1UL << order, -@@ -525,6 +528,27 @@ int xen_create_contiguous_region( - if (unlikely(order > MAX_CONTIG_ORDER)) - return -ENOMEM; -+#ifdef CONFIG_64BIT -+ if (unlikely(vstart > PAGE_OFFSET + MAXMEM)) { -+ unsigned int level; -+ -+ if (vstart < __START_KERNEL_map -+ || vstart + (PAGE_SIZE << order) > (unsigned long)_end) -+ return -EINVAL; -+ ptep = lookup_address((unsigned long)__va(__pa(vstart)), -+ &level); -+ if (ptep && pte_none(*ptep)) -+ ptep = NULL; -+ if (vstart < __START_KERNEL && ptep) -+ return -EINVAL; -+ if (order > MAX_CONTIG_ORDER - 1) -+ return -ENOMEM; -+ } -+#else -+ if (unlikely(vstart + (PAGE_SIZE << order) > (unsigned long)high_memory)) -+ return -EINVAL; -+#endif ++static inline void clflush(volatile void *__p) ++{ ++ asm volatile("clflush %0" : "+m" (*(char __force *)__p)); ++} + - set_xen_guest_handle(exchange.in.extent_start, in_frames); - set_xen_guest_handle(exchange.out.extent_start, &out_frame); + #define read_cr0() (xen_read_cr0()) + #define write_cr0(x) (xen_write_cr0(x)) + #define read_cr2() (xen_read_cr2()) +@@ -207,6 +213,7 @@ static inline unsigned long get_limit(un -@@ -537,9 +561,19 @@ int xen_create_contiguous_region( - in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i); - MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), - __pte_ma(0), 0); -+#ifdef CONFIG_64BIT -+ if (ptep) -+ MULTI_update_va_mapping(cr_mcl + i + (1U << order), -+ (unsigned long)__va(__pa(vstart)) + (i*PAGE_SIZE), -+ __pte_ma(0), 0); -+#endif - set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, - INVALID_P2M_ENTRY); - } -+#ifdef CONFIG_64BIT -+ if (ptep) -+ i += i; -+#endif - if (HYPERVISOR_multicall_check(cr_mcl, i, NULL)) - BUG(); + #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) + #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) ++#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) -@@ -573,9 +607,18 @@ int xen_create_contiguous_region( - frame = success ? (out_frame + i) : in_frames[i]; - MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), - pfn_pte_ma(frame, PAGE_KERNEL), 0); -+#ifdef CONFIG_64BIT -+ if (ptep) -+ MULTI_update_va_mapping(cr_mcl + i + (1U << order), -+ (unsigned long)__va(__pa(vstart)) + (i*PAGE_SIZE), -+ pfn_pte_ma(frame, PAGE_KERNEL_RO), 0); + /** + * read_barrier_depends - Flush all pending reads that subsequents reads +@@ -262,18 +269,18 @@ static inline unsigned long get_limit(un + + #define read_barrier_depends() do { } while(0) + ++#ifdef CONFIG_SMP ++#define smp_mb() mb() ++#ifdef CONFIG_X86_PPRO_FENCE ++# define smp_rmb() rmb() ++#else ++# define smp_rmb() barrier() +#endif - set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame); - } + #ifdef CONFIG_X86_OOSTORE +-/* Actually there are no OOO store capable CPUs for now that do SSE, +- but make it already an possibility. */ +-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) ++# define smp_wmb() wmb() + #else +-#define wmb() __asm__ __volatile__ ("": : :"memory") ++# define smp_wmb() barrier() + #endif - -+#ifdef CONFIG_64BIT -+ if (ptep) -+ i += i; -+#endif - cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order - ? UVMF_TLB_FLUSH|UVMF_ALL - : UVMF_INVLPG|UVMF_ALL; ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -94,7 +94,14 @@ static pte_t * __init one_page_table_ini +-#ifdef CONFIG_SMP +-#define smp_mb() mb() +-#define smp_rmb() rmb() +-#define smp_wmb() wmb() + #define smp_read_barrier_depends() read_barrier_depends() + #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #else - if (!(__pmd_val(*pmd) & _PAGE_PRESENT)) { +@@ -300,5 +307,6 @@ extern unsigned long arch_align_stack(un + extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + + void default_idle(void); ++void __show_registers(struct pt_regs *, int all); + #endif -- pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); -+ pte_t *page_table = NULL; -+ -+#ifdef CONFIG_DEBUG_PAGEALLOC -+ page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -11,8 +11,12 @@ + + #ifdef __KERNEL__ + +-#define __STR(x) #x +-#define STR(x) __STR(x) ++/* entries in ARCH_DLINFO: */ ++#ifdef CONFIG_IA32_EMULATION ++# define AT_VECTOR_SIZE_ARCH 2 ++#else ++# define AT_VECTOR_SIZE_ARCH 1 +#endif -+ if (!page_table) -+ page_table = -+ (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); - paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); - make_lowmem_page_readonly(page_table, -@@ -102,7 +109,7 @@ static pte_t * __init one_page_table_ini - set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); - BUG_ON(page_table != pte_offset_kernel(pmd, 0)); - } -- -+ - return pte_offset_kernel(pmd, 0); - } + #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" + #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" +@@ -92,7 +96,7 @@ static inline void write_cr0(unsigned lo -@@ -360,8 +367,13 @@ static void __init set_highmem_pages_ini - { - int pfn; - for (pfn = highstart_pfn; pfn < highend_pfn -- && pfn < xen_start_info->nr_pages; pfn++) -- add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); -+ && pfn < xen_start_info->nr_pages; pfn++) { -+ /* -+ * Holes under sparsemem might not have no mem_map[]: -+ */ -+ if (pfn_valid(pfn)) -+ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); -+ } + #define read_cr3() ({ \ + unsigned long __dummy; \ +- asm("movq %%cr3,%0" : "=r" (__dummy)); \ ++ asm volatile("movq %%cr3,%0" : "=r" (__dummy)); \ + machine_to_phys(__dummy); \ + }) - /* XEN: init high-mem pages outside initial allocation. */ - for (; pfn < highend_pfn; pfn++) { -@@ -785,35 +797,18 @@ int arch_add_memory(int nid, u64 start, - return __add_pages(zone, start_pfn, nr_pages); +@@ -105,7 +109,7 @@ static inline void write_cr3(unsigned lo + static inline unsigned long read_cr4(void) + { + unsigned long cr4; +- asm("movq %%cr4,%0" : "=r" (cr4)); ++ asm volatile("movq %%cr4,%0" : "=r" (cr4)); + return cr4; } --int remove_memory(u64 start, u64 size) --{ -- return -EINVAL; --} --EXPORT_SYMBOL_GPL(remove_memory); - #endif +@@ -131,12 +135,17 @@ static inline void write_cr8(unsigned lo - struct kmem_cache *pmd_cache; + #endif /* __KERNEL__ */ - void __init pgtable_cache_init(void) - { -- size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t); -- -- if (PTRS_PER_PMD > 1) { -+ if (PTRS_PER_PMD > 1) - pmd_cache = kmem_cache_create("pmd", -- PTRS_PER_PMD*sizeof(pmd_t), -- PTRS_PER_PMD*sizeof(pmd_t), -- SLAB_PANIC, -- pmd_ctor); -- if (!SHARED_KERNEL_PMD) { -- /* If we're in PAE mode and have a non-shared -- kernel pmd, then the pgd size must be a -- page size. This is because the pgd_list -- links through the page structure, so there -- can only be one pgd per page for this to -- work. */ -- pgd_size = PAGE_SIZE; -- } -- } -+ PTRS_PER_PMD*sizeof(pmd_t), -+ PTRS_PER_PMD*sizeof(pmd_t), -+ SLAB_PANIC, -+ pmd_ctor); - } ++static inline void clflush(volatile void *__p) ++{ ++ asm volatile("clflush %0" : "+m" (*(char __force *)__p)); ++} ++ + #define nop() __asm__ __volatile__ ("nop") - /* ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:47:49.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-04-29 09:48:00.000000000 +0200 -@@ -798,7 +798,7 @@ static void xen_finish_init_mapping(void - /* Setup the direct mapping of the physical memory at PAGE_OFFSET. - This runs before bootmem is initialized and gets pages directly from the - physical memory. To access them they are temporarily mapped. */ --void __meminit init_memory_mapping(unsigned long start, unsigned long end) -+void __init_refok init_memory_mapping(unsigned long start, unsigned long end) - { - unsigned long next; + #ifdef CONFIG_SMP + #define smp_mb() mb() +-#define smp_rmb() rmb() +-#define smp_wmb() wmb() ++#define smp_rmb() barrier() ++#define smp_wmb() barrier() + #define smp_read_barrier_depends() do {} while(0) + #else + #define smp_mb() barrier() +@@ -153,12 +162,8 @@ static inline void write_cr8(unsigned lo + */ + #define mb() asm volatile("mfence":::"memory") + #define rmb() asm volatile("lfence":::"memory") +- +-#ifdef CONFIG_UNORDERED_IO + #define wmb() asm volatile("sfence" ::: "memory") +-#else +-#define wmb() asm volatile("" ::: "memory") +-#endif ++ + #define read_barrier_depends() do {} while(0) + #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -@@ -932,12 +932,6 @@ error: +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "tlbflush_32.h" ++#else ++# include "tlbflush_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:56:27.000000000 +0100 +@@ -23,7 +23,6 @@ + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages +- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables + * + * ..but the i386 has somewhat limited tlb flushing capabilities, + * and page-granular flushes are available only on i486 and up. +@@ -97,10 +96,4 @@ static inline void flush_tlb_kernel_rang + flush_tlb_all(); } - EXPORT_SYMBOL_GPL(arch_add_memory); --int remove_memory(u64 start, u64 size) +-static inline void flush_tlb_pgtables(struct mm_struct *mm, +- unsigned long start, unsigned long end) -{ -- return -EINVAL; +- /* i386 does not keep any page table caches in TLB */ -} --EXPORT_SYMBOL_GPL(remove_memory); - - #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) - int memory_add_physaddr_to_nid(u64 start) - { -@@ -1216,14 +1210,6 @@ int in_gate_area_no_task(unsigned long a - return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); + #endif /* _I386_TLBFLUSH_H */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:56:27.000000000 +0100 +@@ -28,7 +28,6 @@ + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages +- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables + * + * x86-64 can only flush individual pages or full VMs. For a range flush + * we always do the full VM. Might be worth trying if for a small +@@ -95,12 +94,4 @@ static inline void flush_tlb_kernel_rang + flush_tlb_all(); } --#ifndef CONFIG_XEN --void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) +-static inline void flush_tlb_pgtables(struct mm_struct *mm, +- unsigned long start, unsigned long end) -{ -- return __alloc_bootmem_core(pgdat->bdata, size, -- SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); +- /* x86_64 does not keep any page table caches in a software TLB. +- The CPUs do in their hardware TLBs, but they are handled +- by the normal TLB flushing algorithms. */ -} --#endif - - const char *arch_vma_name(struct vm_area_struct *vma) + #endif /* _X8664_TLBFLUSH_H */ +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/xor.h 2011-01-31 17:56:27.000000000 +0100 +@@ -0,0 +1,5 @@ ++#ifdef CONFIG_X86_32 ++# include "../../xor_32.h" ++#else ++# include "xor_64.h" ++#endif +--- head-2011-03-17.orig/arch/x86/kernel/Makefile 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/Makefile 2011-01-31 17:56:27.000000000 +0100 +@@ -132,4 +132,4 @@ endif + disabled-obj-$(CONFIG_XEN) := early-quirks.o hpet.o i8253.o i8259_$(BITS).o reboot.o \ + smpboot_$(BITS).o tsc_$(BITS).o tsc_sync.o + disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += mpparse_64.o +-%/head_64.o %/head_64.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := ++%/head_64.o %/head_64.s: asflags-$(CONFIG_XEN) := +--- head-2011-03-17.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-03-22 12:00:53.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -68,7 +68,7 @@ static int xen_cx_notifier(struct acpi_p + + /* Get dependency relationships */ + if (cx->csd_count) { +- printk("Wow! _CSD is found. Not support for now!\n"); ++ pr_warning("_CSD found: Not supported for now!\n"); + kfree(buf); + return -EINVAL; + } else { +@@ -81,7 +81,7 @@ static int xen_cx_notifier(struct acpi_p + } + + if (!count) { +- printk("No available Cx info for cpu %d\n", pr->acpi_id); ++ pr_info("No available Cx info for cpu %d\n", pr->acpi_id); + kfree(buf); + return -EINVAL; + } +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -90,7 +90,7 @@ __setup("acpi_sleep=", acpi_sleep_setup) + + /* Ouch, we want to delete this. We already have better version in userspace, in + s2ram from suspend.sf.net project */ +-static __init int reset_videomode_after_s3(struct dmi_system_id *d) ++static __init int reset_videomode_after_s3(const struct dmi_system_id *d) { - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) -@@ -1232,3 +1218,48 @@ const char *arch_vma_name(struct vm_area - return "[vsyscall]"; - return NULL; - } -+ -+#ifdef CONFIG_SPARSEMEM_VMEMMAP -+/* -+ * Initialise the sparsemem vmemmap using huge-pages at the PMD level. -+ */ -+int __meminit vmemmap_populate(struct page *start_page, -+ unsigned long size, int node) -+{ -+ unsigned long addr = (unsigned long)start_page; -+ unsigned long end = (unsigned long)(start_page + size); -+ unsigned long next; -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ -+ for (; addr < end; addr = next) { -+ next = pmd_addr_end(addr, end); -+ -+ pgd = vmemmap_pgd_populate(addr, node); -+ if (!pgd) -+ return -ENOMEM; -+ pud = vmemmap_pud_populate(pgd, addr, node); -+ if (!pud) -+ return -ENOMEM; -+ -+ pmd = pmd_offset(pud, addr); -+ if (pmd_none(*pmd)) { -+ pte_t entry; -+ void *p = vmemmap_alloc_block(PMD_SIZE, node); -+ if (!p) -+ return -ENOMEM; -+ -+ entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); -+ mk_pte_huge(entry); -+ set_pmd(pmd, __pmd(pte_val(entry))); -+ -+ printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", -+ addr, addr + PMD_SIZE - 1, p, node); -+ } else -+ vmemmap_verify((pte_t *)pmd, node, addr, next); -+ } + acpi_realmode_flags |= 2; + return 0; +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -123,6 +123,3 @@ static int __init acpi_sleep_setup(char + __setup("acpi_sleep=", acpi_sleep_setup); + #endif /* CONFIG_ACPI_PV_SLEEP */ + +-void acpi_pci_link_exit(void) +-{ +-} +--- head-2011-03-17.orig/arch/x86/kernel/cpu/Makefile 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/Makefile 2011-02-03 14:29:46.000000000 +0100 +@@ -34,6 +34,8 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ + + obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o + ++disabled-obj-$(CONFIG_XEN) := perfctr-watchdog.o + -+ return 0; -+} -+#endif ---- head-2010-05-25.orig/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -17,9 +17,6 @@ - #include - #include + quiet_cmd_mkcapflags = MKCAP $@ + cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ --LIST_HEAD(mm_unpinned); --DEFINE_SPINLOCK(mm_unpinned_lock); -- - static void _pin_lock(struct mm_struct *mm, int lock) { - if (lock) - spin_lock(&mm->page_table_lock); -@@ -81,8 +78,8 @@ static void _pin_lock(struct mm_struct * - #define PIN_BATCH 8 - static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl); +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -214,7 +214,7 @@ static void __cpuinit get_cpu_vendor(str --static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags, -- unsigned int cpu, unsigned int seq) -+static inline unsigned int pgd_walk_set_prot(void *pt, pgprot_t flags, -+ unsigned int cpu, unsigned int seq) + static int __init x86_fxsr_setup(char * s) { - struct page *page = virt_to_page(pt); - unsigned long pfn = page_to_pfn(page); -@@ -100,9 +97,9 @@ static inline unsigned int mm_walk_set_p - return seq; +- /* Tell all the other CPU's to not use it... */ ++ /* Tell all the other CPUs to not use it... */ + disable_x86_fxsr = 1; + + /* +@@ -412,7 +412,9 @@ static void __cpuinit identify_cpu(struc + c->x86_model = c->x86_mask = 0; /* So far unknown... */ + c->x86_vendor_id[0] = '\0'; /* Unset */ + c->x86_model_id[0] = '\0'; /* Unset */ ++#ifndef CONFIG_XEN + c->x86_max_cores = 1; ++#endif + c->x86_clflush_size = 32; + memset(&c->x86_capability, 0, sizeof c->x86_capability); + +--- head-2011-03-17.orig/arch/x86/kernel/cpu/intel.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/intel.c 2011-01-31 17:56:27.000000000 +0100 +@@ -292,6 +292,7 @@ static void __cpuinit srat_detect_node(s + #endif } --static void mm_walk(struct mm_struct *mm, pgprot_t flags) -+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags) ++#ifndef CONFIG_XEN + /* + * find out the number of processor cores on the die + */ +@@ -309,6 +310,7 @@ static int __cpuinit intel_num_cpu_cores + else + return 1; + } ++#endif + + static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) { -- pgd_t *pgd; -+ pgd_t *pgd = pgd_base; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; -@@ -110,7 +107,6 @@ static void mm_walk(struct mm_struct *mm - unsigned int cpu, seq; - multicall_entry_t *mcl; +@@ -432,6 +434,7 @@ static void __cpuinit init_intel(struct + set_cpu_cap(c, X86_FEATURE_P3); + #endif -- pgd = mm->pgd; - cpu = get_cpu(); ++#ifndef CONFIG_XEN + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + /* + * let's use the legacy cpuid vector 0x1 and 0x4 for topology +@@ -442,6 +445,7 @@ static void __cpuinit init_intel(struct + detect_ht(c); + #endif + } ++#endif - /* -@@ -125,18 +121,18 @@ static void mm_walk(struct mm_struct *mm - continue; - pud = pud_offset(pgd, 0); - if (PTRS_PER_PUD > 1) /* not folded */ -- seq = mm_walk_set_prot(pud,flags,cpu,seq); -+ seq = pgd_walk_set_prot(pud,flags,cpu,seq); - for (u = 0; u < PTRS_PER_PUD; u++, pud++) { - if (pud_none(*pud)) - continue; - pmd = pmd_offset(pud, 0); - if (PTRS_PER_PMD > 1) /* not folded */ -- seq = mm_walk_set_prot(pmd,flags,cpu,seq); -+ seq = pgd_walk_set_prot(pmd,flags,cpu,seq); - for (m = 0; m < PTRS_PER_PMD; m++, pmd++) { - if (pmd_none(*pmd)) - continue; - pte = pte_offset_kernel(pmd,0); -- seq = mm_walk_set_prot(pte,flags,cpu,seq); -+ seq = pgd_walk_set_prot(pte,flags,cpu,seq); - } - } - } -@@ -148,12 +144,12 @@ static void mm_walk(struct mm_struct *mm - seq = 0; + /* Work around errata */ + srat_detect_node(c); +--- head-2011-03-17.orig/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 10:02:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 10:04:17.000000000 +0100 +@@ -290,8 +290,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_ + eax->split.type = types[leaf]; + eax->split.level = levels[leaf]; + eax->split.num_threads_sharing = 0; ++#ifndef CONFIG_XEN + eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; +- ++#endif + + if (assoc == 0xffff) + eax->split.is_fully_associative = 1; +@@ -578,8 +579,8 @@ unsigned int __cpuinit init_intel_cachei + unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; + unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ + unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ +- unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; + #ifdef CONFIG_X86_HT ++ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; + unsigned int cpu = c->cpu_index; + #endif + +@@ -613,16 +614,20 @@ unsigned int __cpuinit init_intel_cachei + break; + case 2: + new_l2 = this_leaf.size/1024; ++#ifdef CONFIG_X86_HT + num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + l2_id = c->apicid >> index_msb; ++#endif + break; + case 3: + new_l3 = this_leaf.size/1024; ++#ifdef CONFIG_X86_HT + num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; + index_msb = get_count_order( + num_threads_sharing); + l3_id = c->apicid >> index_msb; ++#endif + break; + default: + break; +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2011-01-31 17:56:27.000000000 +0100 +@@ -17,7 +17,7 @@ static int convert_log(struct mc_info *m + x86_mcinfo_lookup(mic, mi, MC_TYPE_GLOBAL); + if (mic == NULL) + { +- printk(KERN_ERR "DOM0_MCE_LOG: global data is NULL\n"); ++ pr_err("DOM0_MCE_LOG: global data is NULL\n"); + return -1; } - MULTI_update_va_mapping(mcl + seq, -- (unsigned long)__user_pgd(mm->pgd), -- pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags), -+ (unsigned long)__user_pgd(pgd_base), -+ pfn_pte(virt_to_phys(__user_pgd(pgd_base))>>PAGE_SHIFT, flags), - 0); - MULTI_update_va_mapping(mcl + seq + 1, -- (unsigned long)mm->pgd, -- pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags), -+ (unsigned long)pgd_base, -+ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags), - UVMF_TLB_FLUSH); - if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL))) - BUG(); -@@ -161,21 +157,35 @@ static void mm_walk(struct mm_struct *mm - put_cpu(); - } -+static void __pgd_pin(pgd_t *pgd) -+{ -+ pgd_walk(pgd, PAGE_KERNEL_RO); -+ xen_pgd_pin(__pa(pgd)); /* kernel */ -+ xen_pgd_pin(__pa(__user_pgd(pgd))); /* user */ -+ SetPagePinned(virt_to_page(pgd)); -+} -+ -+static void __pgd_unpin(pgd_t *pgd) -+{ -+ xen_pgd_unpin(__pa(pgd)); -+ xen_pgd_unpin(__pa(__user_pgd(pgd))); -+ pgd_walk(pgd, PAGE_KERNEL); -+ ClearPagePinned(virt_to_page(pgd)); -+} -+ -+void pgd_test_and_unpin(pgd_t *pgd) -+{ -+ if (PagePinned(virt_to_page(pgd))) -+ __pgd_unpin(pgd); -+} +@@ -75,7 +75,7 @@ urgent: + { + result = convert_log(g_mi); + if (result) { +- printk(KERN_ERR "MCE_DOM0_LOG: Log conversion failed\n"); ++ pr_err("MCE_DOM0_LOG: Log conversion failed\n"); + goto end; + } + /* After fetching the telem from DOM0, we need to dec the telem's +@@ -100,7 +100,7 @@ nonurgent: + { + result = convert_log(g_mi); + if (result) { +- printk(KERN_ERR "MCE_DOM0_LOG: Log conversion failed\n"); ++ pr_err("MCE_DOM0_LOG: Log conversion failed\n"); + goto end; + } + /* After fetching the telem from DOM0, we need to dec the telem's +@@ -125,7 +125,7 @@ void bind_virq_for_mce(void) + + g_mi = kmalloc(sizeof(struct mc_info), GFP_KERNEL); + if (ret < 0) +- printk(KERN_ERR "MCE_DOM0_LOG: bind_virq for DOM0 failed\n"); ++ pr_err("MCE_DOM0_LOG: bind_virq for DOM0 failed\n"); + + /* Log the machine checks left over from the previous reset. */ + mce_dom0_interrupt(VIRQ_MCA, NULL); +--- head-2011-03-17.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -52,6 +52,13 @@ struct resource code_resource = { + .flags = IORESOURCE_BUSY | IORESOURCE_MEM + }; + ++struct resource bss_resource = { ++ .name = "Kernel bss", ++ .start = 0, ++ .end = 0, ++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM ++}; + - void mm_pin(struct mm_struct *mm) + static struct resource system_rom_resource = { + .name = "System ROM", + .start = 0xf0000, +@@ -266,7 +273,9 @@ static struct e820map machine_e820; + * and also for regions reported as reserved by the e820. + */ + static void __init +-legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource) ++legacy_init_iomem_resources(struct resource *code_resource, ++ struct resource *data_resource, ++ struct resource *bss_resource) { - if (xen_feature(XENFEAT_writable_page_tables)) - return; - - pin_lock(mm); -- -- mm_walk(mm, PAGE_KERNEL_RO); -- xen_pgd_pin(__pa(mm->pgd)); /* kernel */ -- xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */ -- SetPagePinned(virt_to_page(mm->pgd)); -- spin_lock(&mm_unpinned_lock); -- list_del(&mm->context.unpinned); -- spin_unlock(&mm_unpinned_lock); -- -+ __pgd_pin(mm->pgd); - pin_unlock(mm); - } - -@@ -185,34 +195,30 @@ void mm_unpin(struct mm_struct *mm) - return; + int i; - pin_lock(mm); -- -- xen_pgd_unpin(__pa(mm->pgd)); -- xen_pgd_unpin(__pa(__user_pgd(mm->pgd))); -- mm_walk(mm, PAGE_KERNEL); -- ClearPagePinned(virt_to_page(mm->pgd)); -- spin_lock(&mm_unpinned_lock); -- list_add(&mm->context.unpinned, &mm_unpinned); -- spin_unlock(&mm_unpinned_lock); -- -+ __pgd_unpin(mm->pgd); - pin_unlock(mm); - } +@@ -300,9 +309,11 @@ legacy_init_iomem_resources(struct resou + #ifndef CONFIG_XEN + request_resource(res, code_resource); + request_resource(res, data_resource); ++ request_resource(res, bss_resource); + #endif + #ifdef CONFIG_KEXEC +- request_resource(res, &crashk_res); ++ if (crashk_res.start != crashk_res.end) ++ request_resource(res, &crashk_res); + #ifdef CONFIG_XEN + xen_machine_kexec_register_resources(res); + #endif +@@ -329,9 +340,11 @@ static int __init request_standard_resou - void mm_pin_all(void) - { -+ struct page *page; -+ unsigned long flags; -+ - if (xen_feature(XENFEAT_writable_page_tables)) - return; + printk("Setting up standard PCI resources\n"); + if (efi_enabled) +- efi_initialize_iomem_resources(&code_resource, &data_resource); ++ efi_initialize_iomem_resources(&code_resource, ++ &data_resource, &bss_resource); + else +- legacy_init_iomem_resources(&code_resource, &data_resource); ++ legacy_init_iomem_resources(&code_resource, ++ &data_resource, &bss_resource); + /* EFI systems may still have VGA */ + request_resource(&iomem_resource, &video_ram_resource); +@@ -761,7 +774,7 @@ void __init e820_register_memory(void) + #define e820 machine_e820 + #endif /* -- * Allow uninterrupted access to the mm_unpinned list. We don't -- * actually take the mm_unpinned_lock as it is taken inside mm_pin(). -+ * Allow uninterrupted access to the pgd_list. Also protects -+ * __pgd_pin() by disabling preemption. - * All other CPUs must be at a safe point (e.g., in stop_machine - * or offlined entirely). +- * Search for the bigest gap in the low 32 bits of the e820 ++ * Search for the biggest gap in the low 32 bits of the e820 + * memory space. */ -- preempt_disable(); -- while (!list_empty(&mm_unpinned)) -- mm_pin(list_entry(mm_unpinned.next, struct mm_struct, -- context.unpinned)); -- preempt_enable(); -+ spin_lock_irqsave(&pgd_lock, flags); -+ list_for_each_entry(page, &pgd_list, lru) { -+ if (!PagePinned(page)) -+ __pgd_pin((pgd_t *)page_address(page)); -+ } -+ spin_unlock_irqrestore(&pgd_lock, flags); - } - - void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) -@@ -331,11 +337,11 @@ static struct page *split_large_page(uns - return base; - } - --static void cache_flush_page(void *adr) -+void clflush_cache_range(void *adr, int size) - { - int i; -- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) -- asm volatile("clflush (%0)" :: "r" (adr + i)); -+ for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) -+ clflush(adr+i); - } + last = 0x100000000ull; +--- head-2011-03-17.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -24,7 +24,7 @@ + #include + #include + #include +-#include ++#include + #include + #include - static void flush_kernel_map(void *arg) -@@ -350,7 +356,7 @@ static void flush_kernel_map(void *arg) - asm volatile("wbinvd" ::: "memory"); - else list_for_each_entry(pg, l, lru) { - void *adr = page_address(pg); -- cache_flush_page(adr); -+ clflush_cache_range(adr, PAGE_SIZE); - } - __flush_tlb_all(); - } -@@ -418,6 +424,7 @@ __change_page_attr(unsigned long address - split = split_large_page(address, prot, ref_prot2); - if (!split) - return -ENOMEM; -+ pgprot_val(ref_prot2) &= ~_PAGE_NX; - set_pte(kpte, mk_pte(split, ref_prot2)); - kpte_page = split; - } -@@ -510,9 +517,14 @@ void global_flush_tlb(void) - struct page *pg, *next; - struct list_head l; +@@ -53,7 +53,7 @@ unsigned long end_pfn_map; + */ + static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; -- down_read(&init_mm.mmap_sem); -+ /* -+ * Write-protect the semaphore, to exclude two contexts -+ * doing a list_replace_init() call in parallel and to -+ * exclude new additions to the deferred_pages list: -+ */ -+ down_write(&init_mm.mmap_sem); - list_replace_init(&deferred_pages, &l); -- up_read(&init_mm.mmap_sem); -+ up_write(&init_mm.mmap_sem); +-extern struct resource code_resource, data_resource; ++extern struct resource code_resource, data_resource, bss_resource; - flush_map(&l); + /* Check for some hardcoded bad areas that early boot is not allowed to touch */ + static inline int bad_addr(unsigned long *addrp, unsigned long size) +@@ -75,10 +75,15 @@ static inline int bad_addr(unsigned long ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -6,6 +6,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -46,6 +47,8 @@ void show_mem(void) - for_each_online_pgdat(pgdat) { - pgdat_resize_lock(pgdat, &flags); - for (i = 0; i < pgdat->node_spanned_pages; ++i) { -+ if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) -+ touch_nmi_watchdog(); - page = pgdat_page_nr(pgdat, i); - total++; - if (PageHighMem(page)) -@@ -206,7 +209,7 @@ void pte_free(struct page *pte) - __free_page(pte); - } - --void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) -+void pmd_ctor(struct kmem_cache *cache, void *pmd) - { - memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); - } ---- head-2010-05-25.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/pci/irq-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -173,7 +173,7 @@ void eisa_set_level_irq(unsigned int irq - } - - /* -- * Common IRQ routing practice: nybbles in config space, -+ * Common IRQ routing practice: nibbles in config space, - * offset by some magic constant. - */ - static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) -@@ -496,6 +496,26 @@ static int pirq_amd756_set(struct pci_de - return 1; - } - -+/* -+ * PicoPower PT86C523 -+ */ -+static int pirq_pico_get(struct pci_dev *router, struct pci_dev *dev, int pirq) -+{ -+ outb(0x10 + ((pirq - 1) >> 1), 0x24); -+ return ((pirq - 1) & 1) ? (inb(0x26) >> 4) : (inb(0x26) & 0xf); -+} -+ -+static int pirq_pico_set(struct pci_dev *router, struct pci_dev *dev, int pirq, -+ int irq) -+{ -+ unsigned int x; -+ outb(0x10 + ((pirq - 1) >> 1), 0x24); -+ x = inb(0x26); -+ x = ((pirq - 1) & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | (irq)); -+ outb(x, 0x26); -+ return 1; -+} + /* initrd */ + #ifdef CONFIG_BLK_DEV_INITRD +- if (LOADER_TYPE && INITRD_START && last >= INITRD_START && +- addr < INITRD_START+INITRD_SIZE) { +- *addrp = PAGE_ALIGN(INITRD_START + INITRD_SIZE); +- return 1; ++ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { ++ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; ++ unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; ++ unsigned long ramdisk_end = ramdisk_image+ramdisk_size; + - #ifdef CONFIG_PCI_BIOS - - static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) -@@ -569,7 +589,7 @@ static __init int via_router_probe(struc - /* FIXME: We should move some of the quirk fixup stuff here */ - - /* -- * work arounds for some buggy BIOSes -+ * workarounds for some buggy BIOSes ++ if (last >= ramdisk_image && addr < ramdisk_end) { ++ *addrp = PAGE_ALIGN(ramdisk_end); ++ return 1; ++ } + } + #endif + /* kernel code */ +@@ -251,6 +256,7 @@ void __init e820_reserve_resources(struc + #ifndef CONFIG_XEN + request_resource(res, &code_resource); + request_resource(res, &data_resource); ++ request_resource(res, &bss_resource); + #endif + #ifdef CONFIG_KEXEC + if (crashk_res.start != crashk_res.end) +@@ -661,8 +667,8 @@ void __init setup_memory_region(void) + * Otherwise fake a memory map; one section from 0k->640k, + * the next section from 1mb->appropriate_mem_k */ - if (device == PCI_DEVICE_ID_VIA_82C586_0) { - switch(router->device) { -@@ -725,6 +745,24 @@ static __init int amd_router_probe(struc - return 1; +- sanitize_e820_map(E820_MAP, &E820_MAP_NR); +- if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) ++ sanitize_e820_map(boot_params.e820_map, &boot_params.e820_entries); ++ if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0) + early_panic("Cannot find a valid memory map"); + printk(KERN_INFO "BIOS-provided physical RAM map:\n"); + e820_print_map("BIOS-e820"); +@@ -847,3 +853,22 @@ __init void e820_setup_gap(struct e820en + printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", + pci_mem_start, gapstart, gapsize); } - -+static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) ++ ++int __init arch_get_ram_range(int slot, u64 *addr, u64 *size) +{ -+ switch (device) { -+ case PCI_DEVICE_ID_PICOPOWER_PT86C523: -+ r->name = "PicoPower PT86C523"; -+ r->get = pirq_pico_get; -+ r->set = pirq_pico_set; -+ return 1; ++ int i; + -+ case PCI_DEVICE_ID_PICOPOWER_PT86C523BBP: -+ r->name = "PicoPower PT86C523 rev. BB+"; -+ r->get = pirq_pico_get; -+ r->set = pirq_pico_set; -+ return 1; ++ if (slot < 0 || slot >= e820.nr_map) ++ return -1; ++ for (i = slot; i < e820.nr_map; i++) { ++ if (e820.map[i].type != E820_RAM) ++ continue; ++ break; + } -+ return 0; ++ if (i == e820.nr_map || e820.map[i].addr > (max_pfn << PAGE_SHIFT)) ++ return -1; ++ *addr = e820.map[i].addr; ++ *size = min_t(u64, e820.map[i].size + e820.map[i].addr, ++ max_pfn << PAGE_SHIFT) - *addr; ++ return i + 1; +} +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -6,15 +6,10 @@ + #include + #include + #include +- +-/* Simple VGA output */ +- +-#ifdef __i386__ + #include +-#else +-#include +-#endif + - static __initdata struct irq_router_handler pirq_routers[] = { - { PCI_VENDOR_ID_INTEL, intel_router_probe }, - { PCI_VENDOR_ID_AL, ali_router_probe }, -@@ -736,6 +774,7 @@ static __initdata struct irq_router_hand - { PCI_VENDOR_ID_VLSI, vlsi_router_probe }, - { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe }, - { PCI_VENDOR_ID_AMD, amd_router_probe }, -+ { PCI_VENDOR_ID_PICOPOWER, pico_router_probe }, - /* Someone with docs needs to add the ATI Radeon IGP */ - { 0, NULL } - }; -@@ -1014,7 +1053,7 @@ static void __init pcibios_fixup_irqs(vo - * Work around broken HP Pavilion Notebooks which assign USB to - * IRQ 9 even though it is actually wired to IRQ 11 - */ --static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d) -+static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d) - { - if (!broken_hp_bios_irq9) { - broken_hp_bios_irq9 = 1; -@@ -1027,7 +1066,7 @@ static int __init fix_broken_hp_bios_irq - * Work around broken Acer TravelMate 360 Notebooks which assign - * Cardbus to IRQ 11 even though it is actually wired to IRQ 10 - */ --static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d) -+static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) - { - if (!acer_tm360_irqrouting) { - acer_tm360_irqrouting = 1; ---- head-2010-05-25.orig/drivers/acpi/processor_idle.c 2010-04-15 09:43:01.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_idle.c 2010-04-15 09:55:39.000000000 +0200 -@@ -1159,6 +1159,14 @@ int acpi_processor_cst_has_changed(struc - if (!pr->flags.power_setup_done) - return -ENODEV; + #ifndef CONFIG_XEN ++/* Simple VGA output */ + #define VGABASE (__ISA_IO_base + 0xb8000) -+ if (processor_pm_external()) { -+ pr->flags.power = 0; -+ ret = acpi_processor_get_power_info(pr); -+ processor_notify_external(pr, -+ PROCESSOR_PM_CHANGE, PM_TYPE_IDLE); -+ return ret; -+ } -+ - cpuidle_pause_and_lock(); - cpuidle_disable_device(&pr->power.dev); - acpi_processor_get_power_info(pr); ---- head-2010-05-25.orig/drivers/cpuidle/Kconfig 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/cpuidle/Kconfig 2010-03-24 15:10:29.000000000 +0100 -@@ -1,6 +1,7 @@ + static int max_ypos = 25, max_xpos = 80; +@@ -264,10 +259,10 @@ static int __init setup_early_printk(cha + early_console = &early_serial_console; + } else if (!strncmp(buf, "vga", 3)) { + #ifndef CONFIG_XEN +- && SCREEN_INFO.orig_video_isVGA == 1) { +- max_xpos = SCREEN_INFO.orig_video_cols; +- max_ypos = SCREEN_INFO.orig_video_lines; +- current_ypos = SCREEN_INFO.orig_y; ++ && boot_params.screen_info.orig_video_isVGA == 1) { ++ max_xpos = boot_params.screen_info.orig_video_cols; ++ max_ypos = boot_params.screen_info.orig_video_lines; ++ current_ypos = boot_params.screen_info.orig_y; + #endif + early_console = &early_vga_console; + } else if (!strncmp(buf, "simnow", 6)) { +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:56:27.000000000 +0100 +@@ -254,6 +254,7 @@ check_userspace: + jb resume_kernel # not returning to v8086 or userspace - config CPU_IDLE - bool "CPU idle PM support" -+ depends on !PROCESSOR_EXTERNAL_CONTROL - default ACPI - help - CPU idle is a generic framework for supporting software-controlled ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:10:29.000000000 +0100 -@@ -260,6 +260,12 @@ static int msi_map_vector(struct pci_dev - map_irq.pirq : evtchn_map_pirq(-1, map_irq.pirq)); - } + ENTRY(resume_userspace) ++ LOCKDEP_SYS_EXIT + DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt + # setting need_resched or sigpending + # between sampling and the iret +@@ -341,6 +342,7 @@ sysenter_past_esp: + jae syscall_badsys + call *sys_call_table(,%eax,4) + movl %eax,PT_EAX(%esp) ++ LOCKDEP_SYS_EXIT + DISABLE_INTERRUPTS(CLBR_ANY) + TRACE_IRQS_OFF + movl TI_flags(%ebp), %ecx +@@ -406,6 +408,7 @@ syscall_call: + call *sys_call_table(,%eax,4) + movl %eax,PT_EAX(%esp) # store the return value + syscall_exit: ++ LOCKDEP_SYS_EXIT + DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt + # setting need_resched or sigpending + # between sampling and the iret +@@ -478,7 +481,7 @@ ldt_ss: + * is still available to implement the setting of the high + * 16-bits in the INTERRUPT_RETURN paravirt-op. + */ +- cmpl $0, paravirt_ops+PARAVIRT_enabled ++ cmpl $0, pv_info+PARAVIRT_enabled + jne restore_nocheck + #endif -+static void pci_intx_for_msi(struct pci_dev *dev, int enable) -+{ -+ if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) -+ pci_intx(dev, enable); -+} -+ - #ifdef CONFIG_PM - void pci_restore_msi_state(struct pci_dev *dev) - { -@@ -269,7 +275,7 @@ void pci_restore_msi_state(struct pci_de - if (!dev->msi_enabled && !dev->msix_enabled) - return; +@@ -540,6 +543,7 @@ work_pending: + jz work_notifysig + work_resched: + call schedule ++ LOCKDEP_SYS_EXIT + DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt + # setting need_resched or sigpending + # between sampling and the iret +@@ -1268,6 +1272,6 @@ ENTRY(kernel_thread_helper) + ENDPROC(kernel_thread_helper) -- pci_intx(dev, 0); /* disable intx */ -+ pci_intx_for_msi(dev, 0); - if (dev->msi_enabled) - msi_set_enable(dev, 0); - if (dev->msix_enabled) -@@ -306,7 +312,7 @@ static int msi_capability_init(struct pc - return -EBUSY; + .section .rodata,"a" +-#include "syscall_table.S" ++#include "syscall_table_32.S" - /* Set MSI enabled bits */ -- pci_intx(dev, 0); /* disable intx */ -+ pci_intx_for_msi(dev, 0); - msi_set_enable(dev, 1); - dev->msi_enabled = 1; + syscall_table_size=(.-sys_call_table) +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:56:27.000000000 +0100 +@@ -57,7 +57,7 @@ + #include + #include -@@ -380,7 +386,7 @@ static int msix_capability_init(struct p - return avail; - } +-#include "xen_entry.S" ++#include "xen_entry_64.S" + + .code64 -- pci_intx(dev, 0); /* disable intx */ -+ pci_intx_for_msi(dev, 0); - msix_set_enable(dev, 1); - dev->msix_enabled = 1; +@@ -275,6 +275,7 @@ ret_from_sys_call: + movl $_TIF_ALLWORK_MASK,%edi + /* edi: flagmask */ + sysret_check: ++ LOCKDEP_SYS_EXIT + GET_THREAD_INFO(%rcx) + XEN_BLOCK_EVENTS(%rsi) + TRACE_IRQS_OFF +@@ -365,6 +366,7 @@ int_ret_from_sys_call: + movl $_TIF_ALLWORK_MASK,%edi + /* edi: mask to check */ + int_with_check: ++ LOCKDEP_SYS_EXIT_IRQ + GET_THREAD_INFO(%rcx) + movl threadinfo_flags(%rcx),%edx + andl %edi,%edx +@@ -516,11 +518,12 @@ END(stub_rt_sigreturn) -@@ -516,7 +522,7 @@ void pci_disable_msi(struct pci_dev* dev + retint_check: + CFI_DEFAULT_STACK adj=1 ++ LOCKDEP_SYS_EXIT_IRQ + movl threadinfo_flags(%rcx),%edx + andl %edi,%edx + CFI_REMEMBER_STATE + jnz retint_careful +-retint_restore_args: ++retint_restore_args: /* return to kernel space */ + movl EFLAGS-REST_SKIP(%rsp), %eax + shr $9, %eax # EAX[0] == IRET_EFLAGS.IF + XEN_GET_VCPU_INFO(%rsi) +@@ -841,7 +844,7 @@ error_call_handler: + movq ORIG_RAX(%rsp),%rsi # get error code + movq $-1,ORIG_RAX(%rsp) + call *%rax +-error_exit: ++error_exit: + RESTORE_REST + /* cli */ + XEN_BLOCK_EVENTS(%rsi) +@@ -849,14 +852,11 @@ error_exit: + GET_THREAD_INFO(%rcx) + testb $3,CS-ARGOFFSET(%rsp) + jz retint_kernel ++ LOCKDEP_SYS_EXIT_IRQ + movl threadinfo_flags(%rcx),%edx + movl $_TIF_WORK_MASK,%edi + andl %edi,%edx + jnz retint_careful +- /* +- * The iret might restore flags: +- */ +- TRACE_IRQS_IRETQ + jmp retint_restore_args - /* Disable MSI mode */ - msi_set_enable(dev, 0); -- pci_intx(dev, 1); /* enable intx */ -+ pci_intx_for_msi(dev, 1); - dev->msi_enabled = 0; - } - EXPORT_SYMBOL(pci_disable_msi); -@@ -653,7 +659,7 @@ void pci_disable_msix(struct pci_dev* de + #if 0 +@@ -1071,7 +1071,7 @@ child_rip: + movq %rsi, %rdi + call *%rax + # exit +- xorl %edi, %edi ++ mov %eax, %edi + call do_exit + CFI_ENDPROC + ENDPROC(child_rip) +--- head-2011-03-17.orig/arch/x86/kernel/fixup.c 2008-01-28 12:24:18.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/fixup.c 2011-01-31 17:56:27.000000000 +0100 +@@ -34,7 +34,7 @@ + #include + #include - /* Disable MSI mode */ - msix_set_enable(dev, 0); -- pci_intx(dev, 1); /* enable intx */ -+ pci_intx_for_msi(dev, 1); - dev->msix_enabled = 0; - } - EXPORT_SYMBOL(pci_disable_msix); ---- head-2010-05-25.orig/drivers/oprofile/cpu_buffer.c 2010-03-24 15:02:17.000000000 +0100 -+++ head-2010-05-25/drivers/oprofile/cpu_buffer.c 2010-03-24 15:10:29.000000000 +0100 -@@ -442,6 +442,39 @@ void oprofile_add_pc(unsigned long pc, i - log_sample(cpu_buf, pc, 0, is_kernel, event); - } +-#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args ) ++#define DP(_f, _args...) pr_alert(" " _f "\n" , ## _args ) -+#ifdef CONFIG_XEN -+/* -+ * This is basically log_sample(b, ESCAPE_CODE, cpu_mode, CPU_TRACE_BEGIN), -+ * as was previously accessible through oprofile_add_pc(). -+ */ -+void oprofile_add_mode(int cpu_mode) -+{ -+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); -+ struct task_struct *task; -+ -+ if (nr_available_slots(cpu_buf) < 3) { -+ cpu_buf->sample_lost_overflow++; -+ return; -+ } -+ -+ task = current; -+ -+ /* notice a switch from user->kernel or vice versa */ -+ if (cpu_buf->last_cpu_mode != cpu_mode) { -+ cpu_buf->last_cpu_mode = cpu_mode; -+ add_code(cpu_buf, cpu_mode); -+ } -+ -+ /* notice a task switch */ -+ if (cpu_buf->last_task != task) { -+ cpu_buf->last_task = task; -+ add_code(cpu_buf, (unsigned long)task); -+ } -+ -+ add_code(cpu_buf, CPU_TRACE_BEGIN); -+} -+#endif -+ - void oprofile_add_trace(unsigned long pc) + fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code) { - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); ---- head-2010-05-25.orig/drivers/xen/balloon/balloon.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/balloon/balloon.c 2010-03-24 15:10:29.000000000 +0100 -@@ -324,6 +324,8 @@ static int increase_reservation(unsigned - - #ifndef MODULE - setup_per_zone_pages_min(); -+ if (rc > 0) -+ kswapd_run(0); - if (need_zonelists_rebuild) - build_all_zonelists(); - else ---- head-2010-05-25.orig/drivers/xen/blkback/blkback.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkback/blkback.c 2010-03-24 15:10:29.000000000 +0100 -@@ -275,13 +275,10 @@ static void __end_block_io_op(pending_re - } +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,5 +1,5 @@ + /* +- * linux/arch/x86_64/kernel/head64.c -- prepare to run common code ++ * prepare to run common code + * + * Copyright (C) 2000 Andrea Arcangeli SuSE + * +@@ -21,7 +21,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -47,27 +46,16 @@ static void __init clear_bss(void) } + #endif --static int end_block_io_op(struct bio *bio, unsigned int done, int error) -+static void end_block_io_op(struct bio *bio, int error) +-#define NEW_CL_POINTER 0x228 /* Relative to real mode data */ +-#define OLD_CL_MAGIC_ADDR 0x20 +-#define OLD_CL_MAGIC 0xA33F +-#define OLD_CL_OFFSET 0x22 +- + static void __init copy_bootdata(char *real_mode_data) { -- if (bio->bi_size != 0) -- return 1; - __end_block_io_op(bio->bi_private, error); - bio_put(bio); -- return error; - } + #ifndef CONFIG_XEN +- unsigned long new_data; + char * command_line; +- memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE); +- new_data = *(u32 *) (x86_boot_params + NEW_CL_POINTER); +- if (!new_data) { +- if (OLD_CL_MAGIC != *(u16 *)(real_mode_data + OLD_CL_MAGIC_ADDR)) { +- return; +- } +- new_data = __pa(real_mode_data) + *(u16 *)(real_mode_data + OLD_CL_OFFSET); ++ memcpy(&boot_params, real_mode_data, sizeof boot_params); ++ if (boot_params.hdr.cmd_line_ptr) { ++ command_line = __va(boot_params.hdr.cmd_line_ptr); ++ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); + } +- command_line = __va(new_data); +- memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); + #else + int max_cmdline; + +@@ -117,7 +105,7 @@ void __init x86_64_start_kernel(char * r ---- head-2010-05-25.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/blkfront.c 2010-03-24 15:10:29.000000000 +0100 -@@ -233,7 +233,7 @@ static int setup_blkring(struct xenbus_d - SHARED_RING_INIT(sring); - FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); + for (i = 0; i < IDT_ENTRIES; i++) + set_intr_gate(i, early_idt_handler); +- asm volatile("lidt %0" :: "m" (idt_descr)); ++ load_idt((const struct desc_ptr *)&idt_descr); + #endif -- memset(info->sg, 0, sizeof(info->sg)); -+ sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); + early_printk("Kernel alive\n"); +--- head-2011-03-17.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -427,7 +427,7 @@ static struct irq_cpu_info { - err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); - if (err < 0) { -@@ -651,9 +651,8 @@ static int blkif_queue_request(struct re + #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) - ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); - BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); -- for (i = 0; i < ring_req->nr_segments; ++i) { -- sg = info->sg + i; -- buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT; -+ for_each_sg(info->sg, sg, ring_req->nr_segments, i) { -+ buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; - fsect = sg->offset >> 9; - lsect = fsect + (sg->length >> 9) - 1; - /* install a grant reference. */ ---- head-2010-05-25.orig/drivers/xen/blktap2/control.c 2010-05-19 17:51:54.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap2/control.c 2010-03-24 15:10:29.000000000 +0100 -@@ -18,6 +18,7 @@ blktap_control_initialize_tap(struct blk - memset(tap, 0, sizeof(*tap)); - set_bit(BLKTAP_CONTROL, &tap->dev_inuse); - init_rwsem(&tap->tap_sem); -+ sg_init_table(tap->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); - init_waitqueue_head(&tap->wq); - atomic_set(&tap->refcnt, 0); +-#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) ++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i))) ---- head-2010-05-25.orig/drivers/xen/blktap2/device.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/device.c 2010-03-24 15:10:29.000000000 +0100 -@@ -665,8 +665,7 @@ blktap_device_process_request(struct blk - request->nr_pages = 0; - blkif_req.nr_segments = blk_rq_map_sg(req->q, req, tap->sg); - BUG_ON(blkif_req.nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); -- for (i = 0; i < blkif_req.nr_segments; ++i) { -- sg = tap->sg + i; -+ for_each_sg(tap->sg, sg, blkif_req.nr_segments, i) { - fsect = sg->offset >> 9; - lsect = fsect + (sg->length >> 9) - 1; - nr_sects += sg->length >> 9; -@@ -677,13 +676,13 @@ blktap_device_process_request(struct blk - .first_sect = fsect, - .last_sect = lsect }; + static cpumask_t balance_irq_affinity[NR_IRQS] = { + [0 ... NR_IRQS-1] = CPU_MASK_ALL +@@ -633,7 +633,7 @@ tryanotherirq: -- if (PageBlkback(sg->page)) { -+ if (PageBlkback(sg_page(sg))) { - /* foreign page -- use xen */ - if (blktap_prep_foreign(tap, - request, - &blkif_req, - i, -- sg->page, -+ sg_page(sg), - &table)) - goto out; - } else { -@@ -691,7 +690,7 @@ blktap_device_process_request(struct blk - if (blktap_map(tap, - request, - i, -- sg->page)) -+ sg_page(sg))) - goto out; - } + imbalance = move_this_load; + +- /* For physical_balance case, we accumlated both load ++ /* For physical_balance case, we accumulated both load + * values in the one of the siblings cpu_irq[], + * to use the same code for physical and logical processors + * as much as possible. +@@ -647,7 +647,7 @@ tryanotherirq: + * (A+B)/2 vs B + */ + load = CPU_IRQ(min_loaded) >> 1; +- for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { ++ for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) { + if (load > CPU_IRQ(j)) { + /* This won't change cpu_sibling_map[min_loaded] */ + load = CPU_IRQ(j); +@@ -1018,7 +1018,7 @@ static int EISA_ELCR(unsigned int irq) + #define default_MCA_trigger(idx) (1) + #define default_MCA_polarity(idx) (0) ---- head-2010-05-25.orig/drivers/xen/core/firmware.c 2007-06-22 09:08:06.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/firmware.c 2010-03-24 15:10:29.000000000 +0100 -@@ -1,4 +1,5 @@ - #include -+#include - #include - #include - #include ---- head-2010-05-25.orig/drivers/xen/core/machine_kexec.c 2009-07-13 14:25:35.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/machine_kexec.c 2010-03-24 15:10:29.000000000 +0100 -@@ -29,6 +29,10 @@ void __init xen_machine_kexec_setup_reso - int k = 0; - int rc; +-static int __init MPBIOS_polarity(int idx) ++static int MPBIOS_polarity(int idx) + { + int bus = mp_irqs[idx].mpc_srcbus; + int polarity; +@@ -1347,6 +1347,11 @@ static void __init setup_IO_APIC_irqs(vo + continue; + } -+ if (strstr(boot_command_line, "crashkernel=")) -+ printk(KERN_WARNING "Ignoring crashkernel command line, " -+ "parameter will be supplied by xen\n"); ++ if (!first_notcon) { ++ apic_printk(APIC_VERBOSE, " not connected.\n"); ++ first_notcon = 1; ++ } + - if (!is_initial_xendomain()) - return; + entry.trigger = irq_trigger(idx); + entry.polarity = irq_polarity(idx); -@@ -130,6 +134,13 @@ void __init xen_machine_kexec_setup_reso - xen_max_nr_phys_cpus)) - goto err; +@@ -1936,13 +1941,16 @@ __setup("no_timer_check", notimercheck); + static int __init timer_irq_works(void) + { + unsigned long t1 = jiffies; ++ unsigned long flags; -+#ifdef CONFIG_X86 -+ if (xen_create_contiguous_region((unsigned long)&vmcoreinfo_note, -+ get_order(sizeof(vmcoreinfo_note)), -+ BITS_PER_LONG)) -+ goto err; -+#endif -+ - return; + if (no_timer_check) + return 1; - err: -@@ -213,6 +224,13 @@ NORET_TYPE void machine_kexec(struct kim - panic("KEXEC_CMD_kexec hypercall should not return\n"); - } ++ local_save_flags(flags); + local_irq_enable(); + /* Let ten ticks pass... */ + mdelay((10 * 1000) / HZ); ++ local_irq_restore(flags); -+#ifdef CONFIG_X86 -+unsigned long paddr_vmcoreinfo_note(void) -+{ -+ return virt_to_machine(&vmcoreinfo_note); -+} -+#endif -+ - void machine_shutdown(void) + /* + * Expect a few ticks at least, to be sure some possible +@@ -2223,6 +2231,9 @@ static inline void __init check_timer(vo { - /* do nothing */ ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:10:29.000000000 +0100 -@@ -45,8 +45,8 @@ cpumask_t cpu_possible_map; - EXPORT_SYMBOL(cpu_possible_map); - cpumask_t cpu_initialized_map; - --struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; --EXPORT_SYMBOL(cpu_data); -+DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info); -+EXPORT_PER_CPU_SYMBOL(cpu_info); - - static DEFINE_PER_CPU(int, resched_irq); - static DEFINE_PER_CPU(int, callfunc_irq); -@@ -55,12 +55,12 @@ static char callfunc_name[NR_CPUS][15]; + int apic1, pin1, apic2, pin2; + int vector; ++ unsigned long flags; ++ ++ local_irq_save(flags); - u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; + /* + * get/set the timer IRQ vector: +@@ -2268,7 +2279,7 @@ static inline void __init check_timer(vo + } + if (disable_timer_pin_1 > 0) + clear_IO_APIC_pin(0, pin1); +- return; ++ goto out; + } + clear_IO_APIC_pin(apic1, pin1); + printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " +@@ -2291,7 +2302,7 @@ static inline void __init check_timer(vo + if (nmi_watchdog == NMI_IO_APIC) { + setup_nmi(); + } +- return; ++ goto out; + } + /* + * Cleanup, just in case ... +@@ -2315,7 +2326,7 @@ static inline void __init check_timer(vo --cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; --cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; -+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); -+DEFINE_PER_CPU(cpumask_t, cpu_core_map); + if (timer_irq_works()) { + printk(" works.\n"); +- return; ++ goto out; + } + apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); + printk(" failed.\n"); +@@ -2331,11 +2342,13 @@ static inline void __init check_timer(vo - #if defined(__i386__) --u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff }; --EXPORT_SYMBOL(x86_cpu_to_apicid); -+DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID; -+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); - #endif + if (timer_irq_works()) { + printk(" works.\n"); +- return; ++ goto out; + } + printk(" failed :(.\n"); + panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " + "report. Then try booting with the 'noapic' option"); ++out: ++ local_irq_restore(flags); + } + #else + int timer_uses_ioapic_pin_0 = 0; +@@ -2353,6 +2366,14 @@ int timer_uses_ioapic_pin_0 = 0; - void __init prefill_possible_map(void) -@@ -85,25 +85,25 @@ void __init smp_alloc_memory(void) - static inline void - set_cpu_sibling_map(unsigned int cpu) + void __init setup_IO_APIC(void) { -- cpu_data[cpu].phys_proc_id = cpu; -- cpu_data[cpu].cpu_core_id = 0; -+ cpu_data(cpu).phys_proc_id = cpu; -+ cpu_data(cpu).cpu_core_id = 0; ++#ifndef CONFIG_XEN ++ int i; ++ ++ /* Reserve all the system vectors. */ ++ for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++) ++ set_bit(i, used_vectors); ++#endif ++ + enable_IO_APIC(); -- cpu_sibling_map[cpu] = cpumask_of_cpu(cpu); -- cpu_core_map[cpu] = cpumask_of_cpu(cpu); -+ per_cpu(cpu_sibling_map, cpu) = cpumask_of_cpu(cpu); -+ per_cpu(cpu_core_map, cpu) = cpumask_of_cpu(cpu); + if (acpi_ioapic) +@@ -2542,7 +2563,7 @@ void destroy_irq(unsigned int irq) + #endif /* CONFIG_XEN */ -- cpu_data[cpu].booted_cores = 1; -+ cpu_data(cpu).booted_cores = 1; - } + /* +- * MSI mesage composition ++ * MSI message composition + */ + #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN) + static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) +@@ -2745,9 +2766,9 @@ int arch_setup_ht_irq(unsigned int irq, - static void - remove_siblinginfo(unsigned int cpu) + #ifdef CONFIG_ACPI + ++#ifndef CONFIG_XEN + int __init io_apic_get_unique_id (int ioapic, int apic_id) { -- cpu_data[cpu].phys_proc_id = BAD_APICID; -- cpu_data[cpu].cpu_core_id = BAD_APICID; -+ cpu_data(cpu).phys_proc_id = BAD_APICID; -+ cpu_data(cpu).cpu_core_id = BAD_APICID; +-#ifndef CONFIG_XEN + union IO_APIC_reg_00 reg_00; + static physid_mask_t apic_id_map = PHYSID_MASK_NONE; + physid_mask_t tmp; +@@ -2816,10 +2837,10 @@ int __init io_apic_get_unique_id (int io -- cpus_clear(cpu_sibling_map[cpu]); -- cpus_clear(cpu_core_map[cpu]); -+ cpus_clear(per_cpu(cpu_sibling_map, cpu)); -+ cpus_clear(per_cpu(cpu_core_map, cpu)); + apic_printk(APIC_VERBOSE, KERN_INFO + "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); +-#endif /* !CONFIG_XEN */ -- cpu_data[cpu].booted_cores = 0; -+ cpu_data(cpu).booted_cores = 0; + return apic_id; } ++#endif /* !CONFIG_XEN */ - static int __cpuinit xen_smp_intr_init(unsigned int cpu) -@@ -162,9 +162,9 @@ void __cpuinit cpu_bringup(void) - { - cpu_init(); - #ifdef __i386__ -- identify_secondary_cpu(cpu_data + smp_processor_id()); -+ identify_secondary_cpu(¤t_cpu_data); - #else -- identify_cpu(cpu_data + smp_processor_id()); -+ identify_cpu(¤t_cpu_data); - #endif - touch_softlockup_watchdog(); - preempt_disable(); -@@ -265,16 +265,16 @@ void __init smp_prepare_cpus(unsigned in - if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0) - apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id); - boot_cpu_data.apicid = apicid; -- cpu_data[0] = boot_cpu_data; -+ cpu_data(0) = boot_cpu_data; - cpu_2_logical_apicid[0] = apicid; -- x86_cpu_to_apicid[0] = apicid; -+ per_cpu(x86_cpu_to_apicid, 0) = apicid; + int __init io_apic_get_version (int ioapic) +@@ -2899,6 +2920,25 @@ int io_apic_set_pci_routing (int ioapic, + return 0; + } - current_thread_info()->cpu = 0; ++int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) ++{ ++ int i; ++ ++ if (skip_ioapic_setup) ++ return -1; ++ ++ for (i = 0; i < mp_irq_entries; i++) ++ if (mp_irqs[i].mpc_irqtype == mp_INT && ++ mp_irqs[i].mpc_srcbusirq == bus_irq) ++ break; ++ if (i >= mp_irq_entries) ++ return -1; ++ ++ *trigger = irq_trigger(i); ++ *polarity = irq_polarity(i); ++ return 0; ++} ++ + #endif /* CONFIG_ACPI */ - for (cpu = 0; cpu < NR_CPUS; cpu++) { -- cpus_clear(cpu_sibling_map[cpu]); -- cpus_clear(cpu_core_map[cpu]); -+ cpus_clear(per_cpu(cpu_sibling_map, cpu)); -+ cpus_clear(per_cpu(cpu_core_map, cpu)); - } - - set_cpu_sibling_map(0); -@@ -319,11 +319,12 @@ void __init smp_prepare_cpus(unsigned in - apicid = cpu; - if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) - apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id); -- cpu_data[cpu] = boot_cpu_data; -- cpu_data[cpu].apicid = apicid; -+ cpu_data(cpu) = boot_cpu_data; -+ cpu_data(cpu).cpu_index = cpu; -+ cpu_data(cpu).apicid = apicid; - - cpu_2_logical_apicid[cpu] = apicid; -- x86_cpu_to_apicid[cpu] = apicid; -+ per_cpu(x86_cpu_to_apicid, cpu) = apicid; + static int __init parse_disable_timer_pin_1(char *arg) +--- head-2011-03-17.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #ifdef CONFIG_ACPI + #include + #endif +@@ -584,7 +585,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, + #define default_PCI_trigger(idx) (1) + #define default_PCI_polarity(idx) (1) - #ifdef __x86_64__ - cpu_pda(cpu)->pcurrent = idle; ---- head-2010-05-25.orig/drivers/xen/netback/loopback.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/loopback.c 2010-03-24 15:10:29.000000000 +0100 -@@ -285,9 +285,9 @@ static void __exit clean_loopback(int i) - char dev_name[IFNAMSIZ]; - - sprintf(dev_name, "vif0.%d", i); -- dev1 = dev_get_by_name(dev_name); -+ dev1 = dev_get_by_name(&init_net, dev_name); - sprintf(dev_name, "veth%d", i); -- dev2 = dev_get_by_name(dev_name); -+ dev2 = dev_get_by_name(&init_net, dev_name); - if (dev1 && dev2) { - unregister_netdev(dev2); - unregister_netdev(dev1); ---- head-2010-05-25.orig/drivers/xen/netback/netback.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/netback.c 2010-03-24 15:10:29.000000000 +0100 -@@ -353,8 +353,8 @@ static void xen_network_done_notify(void - { - static struct net_device *eth0_dev = NULL; - if (unlikely(eth0_dev == NULL)) -- eth0_dev = __dev_get_by_name("eth0"); -- netif_rx_schedule(eth0_dev); -+ eth0_dev = __dev_get_by_name(&init_net, "eth0"); -+ netif_rx_schedule(eth0_dev, ???); - } - /* - * Add following to poll() function in NAPI driver (Tigon3 is example): ---- head-2010-05-25.orig/drivers/xen/netback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/xenbus.c 2010-03-24 15:10:29.000000000 +0100 -@@ -149,12 +149,10 @@ fail: - * and vif variables to the environment, for the benefit of the vif-* hotplug - * scripts. - */ --static int netback_uevent(struct xenbus_device *xdev, char **envp, -- int num_envp, char *buffer, int buffer_size) -+static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env) +-static int __init MPBIOS_polarity(int idx) ++static int MPBIOS_polarity(int idx) { - struct backend_info *be = xdev->dev.driver_data; - netif_t *netif = be->netif; -- int i = 0, length = 0; - char *val; + int bus = mp_irqs[idx].mpc_srcbus; + int polarity; +@@ -871,6 +872,10 @@ static void __init setup_IO_APIC_irqs(vo + apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); + continue; + } ++ if (!first_notcon) { ++ apic_printk(APIC_VERBOSE, " not connected.\n"); ++ first_notcon = 1; ++ } - DPRINTK("netback_uevent"); -@@ -166,15 +164,11 @@ static int netback_uevent(struct xenbus_ - return err; - } - else { -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, -- &length, "script=%s", val); -+ add_uevent_var(env, "script=%s", val); - kfree(val); + irq = pin_2_irq(idx, apic, pin); + add_pin_to_irq(irq, apic, pin); +@@ -881,7 +886,7 @@ static void __init setup_IO_APIC_irqs(vo } -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -- "vif=%s", netif->dev->name); -- -- envp[i] = NULL; -+ add_uevent_var(env, "vif=%s", netif->dev->name); - - return 0; - } ---- head-2010-05-25.orig/drivers/xen/netfront/accel.c 2009-05-04 10:01:03.000000000 +0200 -+++ head-2010-05-25/drivers/xen/netfront/accel.c 2010-03-24 15:10:29.000000000 +0100 -@@ -325,7 +325,7 @@ accelerator_set_vif_state_hooks(struct n - DPRINTK("%p\n",vif_state); - - /* Make sure there are no data path operations going on */ -- netif_poll_disable(vif_state->np->netdev); -+ napi_disable(&vif_state->np->napi); - netif_tx_lock_bh(vif_state->np->netdev); - - accelerator = vif_state->np->accelerator; -@@ -334,7 +334,7 @@ accelerator_set_vif_state_hooks(struct n - spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); - - netif_tx_unlock_bh(vif_state->np->netdev); -- netif_poll_enable(vif_state->np->netdev); -+ napi_enable(&vif_state->np->napi); - } - - -@@ -508,7 +508,7 @@ accelerator_remove_single_hook(struct ne - unsigned long flags; - - /* Make sure there are no data path operations going on */ -- netif_poll_disable(vif_state->np->netdev); -+ napi_disable(&vif_state->np->napi); - netif_tx_lock_bh(vif_state->np->netdev); - - spin_lock_irqsave(&accelerator->vif_states_lock, flags); -@@ -524,7 +524,7 @@ accelerator_remove_single_hook(struct ne - spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); - - netif_tx_unlock_bh(vif_state->np->netdev); -- netif_poll_enable(vif_state->np->netdev); -+ napi_enable(&vif_state->np->napi); + if (!first_notcon) +- apic_printk(APIC_VERBOSE," not connected.\n"); ++ apic_printk(APIC_VERBOSE, " not connected.\n"); } + #ifndef CONFIG_XEN +@@ -1277,10 +1282,13 @@ void disable_IO_APIC(void) + static int __init timer_irq_works(void) + { + unsigned long t1 = jiffies; ++ unsigned long flags; ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.c 2010-03-24 15:10:29.000000000 +0100 -@@ -626,6 +626,7 @@ static int network_open(struct net_devic - struct netfront_info *np = netdev_priv(dev); - - memset(&np->stats, 0, sizeof(np->stats)); -+ napi_enable(&np->napi); ++ local_save_flags(flags); + local_irq_enable(); + /* Let ten ticks pass... */ + mdelay((10 * 1000) / HZ); ++ local_irq_restore(flags); - spin_lock_bh(&np->rx_lock); - if (netfront_carrier_ok(np)) { -@@ -634,7 +635,7 @@ static int network_open(struct net_devic - if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ - netfront_accelerator_call_stop_napi_irq(np, dev); + /* + * Expect a few ticks at least, to be sure some possible +@@ -1655,6 +1663,9 @@ static inline void check_timer(void) + { + struct irq_cfg *cfg = irq_cfg + 0; + int apic1, pin1, apic2, pin2; ++ unsigned long flags; ++ ++ local_irq_save(flags); -- netif_rx_schedule(dev); -+ netif_rx_schedule(dev, &np->napi); + /* + * get/set the timer IRQ vector: +@@ -1696,7 +1707,7 @@ static inline void check_timer(void) + } + if (disable_timer_pin_1 > 0) + clear_IO_APIC_pin(0, pin1); +- return; ++ goto out; } - } - spin_unlock_bh(&np->rx_lock); -@@ -706,7 +707,7 @@ static void rx_refill_timeout(unsigned l - - netfront_accelerator_call_stop_napi_irq(np, dev); - -- netif_rx_schedule(dev); -+ netif_rx_schedule(dev, &np->napi); - } - - static void network_alloc_rx_buffers(struct net_device *dev) -@@ -1063,7 +1064,7 @@ static irqreturn_t netif_int(int irq, vo - if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { - netfront_accelerator_call_stop_napi_irq(np, dev); - -- netif_rx_schedule(dev); -+ netif_rx_schedule(dev, &np->napi); - dev->last_rx = jiffies; + clear_IO_APIC_pin(apic1, pin1); + apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not " +@@ -1718,7 +1729,7 @@ static inline void check_timer(void) + if (nmi_watchdog == NMI_IO_APIC) { + setup_nmi(); + } +- return; ++ goto out; } - } -@@ -1316,16 +1317,17 @@ static int xennet_set_skb_gso(struct sk_ - #endif - } - --static int netif_poll(struct net_device *dev, int *pbudget) -+static int netif_poll(struct napi_struct *napi, int budget) - { -- struct netfront_info *np = netdev_priv(dev); -+ struct netfront_info *np = container_of(napi, struct netfront_info, napi); -+ struct net_device *dev = np->netdev; - struct sk_buff *skb; - struct netfront_rx_info rinfo; - struct netif_rx_response *rx = &rinfo.rx; - struct netif_extra_info *extras = rinfo.extras; - RING_IDX i, rp; - struct multicall_entry *mcl; -- int work_done, budget, more_to_do = 1, accel_more_to_do = 1; -+ int work_done, more_to_do = 1, accel_more_to_do = 1; - struct sk_buff_head rxq; - struct sk_buff_head errq; - struct sk_buff_head tmpq; -@@ -1345,8 +1347,6 @@ static int netif_poll(struct net_device - skb_queue_head_init(&errq); - skb_queue_head_init(&tmpq); - -- if ((budget = *pbudget) > dev->quota) -- budget = dev->quota; - rp = np->rx.sring->rsp_prod; - rmb(); /* Ensure we see queued responses up to 'rp'. */ + /* + * Cleanup, just in case ... +@@ -1741,7 +1752,7 @@ static inline void check_timer(void) -@@ -1508,9 +1508,6 @@ err: - accel_more_to_do = 0; + if (timer_irq_works()) { + apic_printk(APIC_VERBOSE," works.\n"); +- return; ++ goto out; } + apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); + apic_printk(APIC_VERBOSE," failed.\n"); +@@ -1756,10 +1767,12 @@ static inline void check_timer(void) -- *pbudget -= work_done; -- dev->quota -= work_done; -- - if (work_done < budget) { - local_irq_save(flags); - -@@ -1527,14 +1524,14 @@ err: - } - - if (!more_to_do && !accel_more_to_do) -- __netif_rx_complete(dev); -+ __netif_rx_complete(dev, napi); - - local_irq_restore(flags); + if (timer_irq_works()) { + apic_printk(APIC_VERBOSE," works.\n"); +- return; ++ goto out; } - - spin_unlock(&np->rx_lock); - -- return more_to_do | accel_more_to_do; -+ return work_done; + apic_printk(APIC_VERBOSE," failed :(.\n"); + panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n"); ++out: ++ local_irq_restore(flags); } + #else + #define check_timer() ((void)0) +@@ -1775,7 +1788,7 @@ __setup("no_timer_check", notimercheck); - static void netif_release_tx_bufs(struct netfront_info *np) -@@ -1681,6 +1678,7 @@ static int network_close(struct net_devi + /* + * +- * IRQ's that are handled by the PIC in the MPS IOAPIC case. ++ * IRQs that are handled by the PIC in the MPS IOAPIC case. + * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. + * Linux doesn't really care, as it's not actually used + * for any interrupt handling anyway. +@@ -1858,7 +1871,7 @@ static struct sysdev_class ioapic_sysdev + static int __init ioapic_init_sysfs(void) { - struct netfront_info *np = netdev_priv(dev); - netif_stop_queue(np->netdev); -+ napi_disable(&np->napi); - return 0; - } + struct sys_device * dev; +- int i, size, error = 0; ++ int i, size, error; -@@ -2088,16 +2086,14 @@ static struct net_device * __devinit cre - netdev->hard_start_xmit = network_start_xmit; - netdev->stop = network_close; - netdev->get_stats = network_get_stats; -- netdev->poll = netif_poll; -+ netif_napi_add(netdev, &np->napi, netif_poll, 64); - netdev->set_multicast_list = network_set_multicast_list; - netdev->uninit = netif_uninit; - netdev->set_mac_address = xennet_set_mac_address; - netdev->change_mtu = xennet_change_mtu; -- netdev->weight = 64; - netdev->features = NETIF_F_IP_CSUM; - - SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); -- SET_MODULE_OWNER(netdev); - SET_NETDEV_DEV(netdev, &dev->dev); + error = sysdev_class_register(&ioapic_sysdev_class); + if (error) +@@ -1867,12 +1880,11 @@ static int __init ioapic_init_sysfs(void + for (i = 0; i < nr_ioapics; i++ ) { + size = sizeof(struct sys_device) + nr_ioapic_registers[i] + * sizeof(struct IO_APIC_route_entry); +- mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); ++ mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); + if (!mp_ioapic_data[i]) { + printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); + continue; + } +- memset(mp_ioapic_data[i], 0, size); + dev = &mp_ioapic_data[i]->dev; + dev->id = i; + dev->cls = &ioapic_sysdev_class; +@@ -1933,7 +1945,7 @@ void destroy_irq(unsigned int irq) + #endif /* CONFIG_XEN */ - np->netdev = netdev; ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.h 2010-02-24 13:13:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.h 2010-03-24 15:10:29.000000000 +0100 -@@ -157,6 +157,8 @@ struct netfront_info { - spinlock_t tx_lock; - spinlock_t rx_lock; + /* +- * MSI mesage composition ++ * MSI message composition + */ + #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN) + static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) +@@ -2043,8 +2055,64 @@ void arch_teardown_msi_irq(unsigned int + destroy_irq(irq); + } -+ struct napi_struct napi; +-#endif /* CONFIG_PCI_MSI */ ++#ifdef CONFIG_DMAR ++#ifdef CONFIG_SMP ++static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) ++{ ++ struct irq_cfg *cfg = irq_cfg + irq; ++ struct msi_msg msg; ++ unsigned int dest; ++ cpumask_t tmp; + - unsigned int irq; - unsigned int copying_receiver; - unsigned int carrier; ---- head-2010-05-25.orig/drivers/xen/pciback/Makefile 2008-07-21 11:00:33.000000000 +0200 -+++ head-2010-05-25/drivers/xen/pciback/Makefile 2010-03-24 15:10:29.000000000 +0100 -@@ -12,6 +12,4 @@ pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT - pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o - pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o - --ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y) --EXTRA_CFLAGS += -DDEBUG --endif -+ccflags-$(CONFIG_XEN_PCIDEV_BE_DEBUG) += -DDEBUG ---- head-2010-05-25.orig/drivers/xen/pcifront/Makefile 2007-06-12 13:13:45.000000000 +0200 -+++ head-2010-05-25/drivers/xen/pcifront/Makefile 2010-03-24 15:10:29.000000000 +0100 -@@ -2,6 +2,4 @@ obj-y += pcifront.o - - pcifront-y := pci_op.o xenbus.o pci.o ++ cpus_and(tmp, mask, cpu_online_map); ++ if (cpus_empty(tmp)) ++ return; ++ ++ if (assign_irq_vector(irq, mask)) ++ return; ++ ++ cpus_and(tmp, cfg->domain, mask); ++ dest = cpu_mask_to_apicid(tmp); ++ ++ dmar_msi_read(irq, &msg); ++ ++ msg.data &= ~MSI_DATA_VECTOR_MASK; ++ msg.data |= MSI_DATA_VECTOR(cfg->vector); ++ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; ++ msg.address_lo |= MSI_ADDR_DEST_ID(dest); ++ ++ dmar_msi_write(irq, &msg); ++ irq_desc[irq].affinity = mask; ++} ++#endif /* CONFIG_SMP */ ++ ++struct irq_chip dmar_msi_type = { ++ .name = "DMAR_MSI", ++ .unmask = dmar_msi_unmask, ++ .mask = dmar_msi_mask, ++ .ack = ack_apic_edge, ++#ifdef CONFIG_SMP ++ .set_affinity = dmar_msi_set_affinity, ++#endif ++ .retrigger = ioapic_retrigger_irq, ++}; ++ ++int arch_setup_dmar_msi(unsigned int irq) ++{ ++ int ret; ++ struct msi_msg msg; ++ ++ ret = msi_compose_msg(NULL, irq, &msg); ++ if (ret < 0) ++ return ret; ++ dmar_msi_write(irq, &msg); ++ set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, ++ "edge"); ++ return 0; ++} ++#endif --ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y) --EXTRA_CFLAGS += -DDEBUG --endif -+ccflags-$(CONFIG_XEN_PCIDEV_FE_DEBUG) += -DDEBUG ---- head-2010-05-25.orig/drivers/xen/scsiback/emulate.c 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/emulate.c 2010-03-24 15:10:29.000000000 +0100 -@@ -109,9 +109,10 @@ static void resp_not_supported_cmd(pendi ++#endif /* CONFIG_PCI_MSI */ + /* + * Hypertransport interrupt support + */ +@@ -2177,8 +2245,27 @@ int io_apic_set_pci_routing (int ioapic, + return 0; } +-#endif /* CONFIG_ACPI */ --static int __copy_to_sg(struct scatterlist *sg, unsigned int nr_sg, -+static int __copy_to_sg(struct scatterlist *sgl, unsigned int nr_sg, - void *buf, unsigned int buflen) - { -+ struct scatterlist *sg; - void *from = buf; - void *to; - unsigned int from_rest = buflen; -@@ -120,8 +121,8 @@ static int __copy_to_sg(struct scatterli - unsigned int i; - unsigned long pfn; - -- for (i = 0; i < nr_sg; i++) { -- if (sg->page == NULL) { -+ for_each_sg (sgl, sg, nr_sg, i) { -+ if (sg_page(sg) == NULL) { - printk(KERN_WARNING "%s: inconsistent length field in " - "scatterlist\n", __FUNCTION__); - return -ENOMEM; -@@ -130,7 +131,7 @@ static int __copy_to_sg(struct scatterli - to_capa = sg->length; - copy_size = min_t(unsigned int, to_capa, from_rest); ++int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) ++{ ++ int i; ++ ++ if (skip_ioapic_setup) ++ return -1; ++ ++ for (i = 0; i < mp_irq_entries; i++) ++ if (mp_irqs[i].mpc_irqtype == mp_INT && ++ mp_irqs[i].mpc_srcbusirq == bus_irq) ++ break; ++ if (i >= mp_irq_entries) ++ return -1; ++ ++ *trigger = irq_trigger(i); ++ *polarity = irq_polarity(i); ++ return 0; ++} ++ ++#endif /* CONFIG_ACPI */ -- pfn = page_to_pfn(sg->page); -+ pfn = page_to_pfn(sg_page(sg)); - to = pfn_to_kaddr(pfn) + (sg->offset); - memcpy(to, from, copy_size); + #ifndef CONFIG_XEN + /* +@@ -2217,3 +2304,4 @@ void __init setup_ioapic_dest(void) + } + #endif + #endif /* !CONFIG_XEN */ ++ +--- head-2011-03-17.orig/arch/x86/kernel/ioport_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ioport_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/i386/kernel/ioport.c +- * + * This contains the io-permission bitmap code - written by obz, with changes + * by Linus. + */ +--- head-2011-03-17.orig/arch/x86/kernel/ioport_64-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ioport_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/x86_64/kernel/ioport.c +- * + * This contains the io-permission bitmap code - written by obz, with changes + * by Linus. + */ +--- head-2011-03-17.orig/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/i386/kernel/ldt.c +- * + * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds + * Copyright (C) 1999 Ingo Molnar + */ +@@ -106,14 +104,14 @@ int init_new_context(struct task_struct + struct mm_struct * old_mm; + int retval = 0; -@@ -139,7 +140,6 @@ static int __copy_to_sg(struct scatterli - return 0; - } - -- sg++; - from += copy_size; +- init_MUTEX(&mm->context.sem); ++ mutex_init(&mm->context.lock); + mm->context.size = 0; + mm->context.has_foreign_mappings = 0; + old_mm = current->mm; + if (old_mm && old_mm->context.size > 0) { +- down(&old_mm->context.sem); ++ mutex_lock(&old_mm->context.lock); + retval = copy_ldt(&mm->context, &old_mm->context); +- up(&old_mm->context.sem); ++ mutex_unlock(&old_mm->context.lock); } - -@@ -148,9 +148,10 @@ static int __copy_to_sg(struct scatterli - return -ENOMEM; + return retval; } +@@ -149,7 +147,7 @@ static int read_ldt(void __user * ptr, u + if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) + bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; --static int __copy_from_sg(struct scatterlist *sg, unsigned int nr_sg, -+static int __copy_from_sg(struct scatterlist *sgl, unsigned int nr_sg, - void *buf, unsigned int buflen) - { -+ struct scatterlist *sg; - void *from; - void *to = buf; - unsigned int from_rest; -@@ -159,8 +160,8 @@ static int __copy_from_sg(struct scatter - unsigned int i; - unsigned long pfn; +- down(&mm->context.sem); ++ mutex_lock(&mm->context.lock); + size = mm->context.size*LDT_ENTRY_SIZE; + if (size > bytecount) + size = bytecount; +@@ -157,7 +155,7 @@ static int read_ldt(void __user * ptr, u + err = 0; + if (copy_to_user(ptr, mm->context.ldt, size)) + err = -EFAULT; +- up(&mm->context.sem); ++ mutex_unlock(&mm->context.lock); + if (err < 0) + goto error_return; + if (size != bytecount) { +@@ -213,7 +211,7 @@ static int write_ldt(void __user * ptr, + goto out; + } -- for (i = 0; i < nr_sg; i++) { -- if (sg->page == NULL) { -+ for_each_sg (sgl, sg, nr_sg, i) { -+ if (sg_page(sg) == NULL) { - printk(KERN_WARNING "%s: inconsistent length field in " - "scatterlist\n", __FUNCTION__); - return -ENOMEM; -@@ -175,13 +176,11 @@ static int __copy_from_sg(struct scatter - } - copy_size = from_rest; +- down(&mm->context.sem); ++ mutex_lock(&mm->context.lock); + if (ldt_info.entry_number >= mm->context.size) { + error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); + if (error < 0) +@@ -240,7 +238,7 @@ install: + entry_1, entry_2); -- pfn = page_to_pfn(sg->page); -+ pfn = page_to_pfn(sg_page(sg)); - from = pfn_to_kaddr(pfn) + (sg->offset); - memcpy(to, from, copy_size); + out_unlock: +- up(&mm->context.sem); ++ mutex_unlock(&mm->context.lock); + out: + return error; + } +--- head-2011-03-17.orig/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/x86_64/kernel/ldt.c +- * + * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds + * Copyright (C) 1999 Ingo Molnar + * Copyright (C) 2002 Andi Kleen +@@ -112,19 +110,14 @@ int init_new_context(struct task_struct + int retval = 0; - to_capa -= copy_size; -- -- sg++; - to += copy_size; + memset(&mm->context, 0, sizeof(mm->context)); +- init_MUTEX(&mm->context.sem); ++ mutex_init(&mm->context.lock); + old_mm = current->mm; + if (old_mm) + mm->context.vdso = old_mm->context.vdso; + if (old_mm && old_mm->context.size > 0) { +- down(&old_mm->context.sem); ++ mutex_lock(&old_mm->context.lock); + retval = copy_ldt(&mm->context, &old_mm->context); +- up(&old_mm->context.sem); +- } +- if (retval == 0) { +- spin_lock(&mm_unpinned_lock); +- list_add(&mm->context.unpinned, &mm_unpinned); +- spin_unlock(&mm_unpinned_lock); ++ mutex_unlock(&old_mm->context.lock); } + return retval; + } +@@ -148,11 +141,6 @@ void destroy_context(struct mm_struct *m + kfree(mm->context.ldt); + mm->context.size = 0; + } +- if (!PagePinned(virt_to_page(mm->pgd))) { +- spin_lock(&mm_unpinned_lock); +- list_del(&mm->context.unpinned); +- spin_unlock(&mm_unpinned_lock); +- } + } ---- head-2010-05-25.orig/drivers/xen/scsiback/scsiback.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/scsiback.c 2010-03-24 15:10:29.000000000 +0100 -@@ -260,6 +260,8 @@ static int scsiback_gnttab_data_map(vscs - write = (data_dir == DMA_TO_DEVICE); + static int read_ldt(void __user * ptr, unsigned long bytecount) +@@ -166,7 +154,7 @@ static int read_ldt(void __user * ptr, u + if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) + bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - if (nr_segments) { -+ struct scatterlist *sg; -+ - /* free of (sgl) in fast_flush_area()*/ - pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments, - GFP_KERNEL); -@@ -268,6 +270,8 @@ static int scsiback_gnttab_data_map(vscs - return -ENOMEM; - } +- down(&mm->context.sem); ++ mutex_lock(&mm->context.lock); + size = mm->context.size*LDT_ENTRY_SIZE; + if (size > bytecount) + size = bytecount; +@@ -174,7 +162,7 @@ static int read_ldt(void __user * ptr, u + err = 0; + if (copy_to_user(ptr, mm->context.ldt, size)) + err = -EFAULT; +- up(&mm->context.sem); ++ mutex_unlock(&mm->context.lock); + if (err < 0) + goto error_return; + if (size != bytecount) { +@@ -227,7 +215,7 @@ static int write_ldt(void __user * ptr, + goto out; + } -+ sg_init_table(pending_req->sgl, nr_segments); -+ - for (i = 0; i < nr_segments; i++) { - flags = GNTMAP_host_map; - if (write) -@@ -291,7 +295,7 @@ static int scsiback_gnttab_data_map(vscs - } - } +- down(&mm->context.sem); ++ mutex_lock(&mm->context.lock); + if (ldt_info.entry_number >= (unsigned)mm->context.size) { + error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); + if (error < 0) +@@ -256,7 +244,7 @@ install: + error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32))); -- for (i = 0; i < nr_segments; i++) { -+ for_each_sg (pending_req->sgl, sg, nr_segments, i) { - struct page *pg; + out_unlock: +- up(&mm->context.sem); ++ mutex_unlock(&mm->context.lock); + out: + return error; + } +--- head-2011-03-17.orig/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -65,8 +65,10 @@ unsigned long mp_lapic_addr; - if (unlikely(map[i].status != 0)) { -@@ -310,15 +314,14 @@ static int scsiback_gnttab_data_map(vscs - set_phys_to_machine(page_to_pfn(pg), - FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT)); + unsigned int def_to_bigsmp = 0; -- pending_req->sgl[i].page = pg; -- pending_req->sgl[i].offset = ring_req->seg[i].offset; -- pending_req->sgl[i].length = ring_req->seg[i].length; -- data_len += pending_req->sgl[i].length; -+ sg_set_page(sg, pg, ring_req->seg[i].length, -+ ring_req->seg[i].offset); -+ data_len += sg->length; ++#ifndef CONFIG_XEN + /* Processor that is doing the boot up */ + unsigned int boot_cpu_physical_apicid = -1U; ++#endif + /* Internal processor count */ + unsigned int __cpuinitdata num_processors; - barrier(); -- if (pending_req->sgl[i].offset >= PAGE_SIZE || -- pending_req->sgl[i].length > PAGE_SIZE || -- pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE) -+ if (sg->offset >= PAGE_SIZE || -+ sg->length > PAGE_SIZE || -+ sg->offset + sg->length > PAGE_SIZE) - err |= 1; +@@ -842,6 +844,7 @@ void __init mp_register_lapic_address(u6 + void __cpuinit mp_register_lapic (u8 id, u8 enabled) + { + struct mpc_config_processor processor; ++#ifndef CONFIG_XEN + int boot_cpu = 0; + + if (MAX_APICS - id <= 0) { +@@ -853,7 +856,6 @@ void __cpuinit mp_register_lapic (u8 id, + if (id == boot_cpu_physical_apicid) + boot_cpu = 1; - } -@@ -347,27 +350,14 @@ static int scsiback_merge_bio(struct req +-#ifndef CONFIG_XEN + processor.mpc_type = MP_PROCESSOR; + processor.mpc_apicid = id; + processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); +@@ -921,11 +923,11 @@ void __init mp_register_ioapic(u8 id, u3 + + #ifndef CONFIG_XEN + set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); +-#endif + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + tmpid = io_apic_get_unique_id(idx, id); + else ++#endif + tmpid = id; + if (tmpid == -1) { + nr_ioapics--; +@@ -1023,7 +1025,7 @@ void __init mp_config_acpi_legacy_irqs ( + + /* + * Use the default configuration for the IRQs 0-15. Unless +- * overriden by (MADT) interrupt source override entries. ++ * overridden by (MADT) interrupt source override entries. + */ + for (i = 0; i < 16; i++) { + int idx; +--- head-2011-03-17.orig/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -54,9 +54,12 @@ int nr_ioapics; + unsigned long mp_lapic_addr = 0; - blk_queue_bounce(q, &bio); -- if (!rq->bio) -- blk_rq_bio_prep(q, rq, bio); -- else if (!ll_back_merge_fn(q, rq, bio)) -- return -EINVAL; -- else { -- rq->biotail->bi_next = bio; -- rq->biotail = bio; -- } - -- return 0; -+ return blk_rq_append_bio(q, rq, bio); ++#ifndef CONFIG_XEN + /* Processor that is doing the boot up */ + unsigned int boot_cpu_id = -1U; ++EXPORT_SYMBOL(boot_cpu_id); ++#endif ++ + /* Internal processor count */ + unsigned int num_processors __cpuinitdata = 0; + +@@ -87,7 +90,7 @@ static int __init mpf_checksum(unsigned } + #ifndef CONFIG_XEN +-static void __cpuinit MP_processor_info (struct mpc_config_processor *m) ++static void __cpuinit MP_processor_info(struct mpc_config_processor *m) + { + int cpu; + cpumask_t tmp_map; +@@ -124,13 +127,24 @@ static void __cpuinit MP_processor_info + cpu = 0; + } + bios_cpu_apicid[cpu] = m->mpc_apicid; +- x86_cpu_to_apicid[cpu] = m->mpc_apicid; ++ /* ++ * We get called early in the the start_kernel initialization ++ * process when the per_cpu data area is not yet setup, so we ++ * use a static array that is removed after the per_cpu data ++ * area is created. ++ */ ++ if (x86_cpu_to_apicid_ptr) { ++ u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr; ++ x86_cpu_to_apicid[cpu] = m->mpc_apicid; ++ } else { ++ per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; ++ } - /* quoted scsi_lib.c/scsi_bi_endio */ --static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error) -+static void scsiback_bi_endio(struct bio *bio, int error) + cpu_set(cpu, cpu_possible_map); + cpu_set(cpu, cpu_present_map); + } + #else +-static void __cpuinit MP_processor_info (struct mpc_config_processor *m) ++static void __cpuinit MP_processor_info(struct mpc_config_processor *m) { -- if (bio->bi_size) -- return 1; -- - bio_put(bio); -- return 0; + num_processors++; } +@@ -611,12 +625,12 @@ void __init mp_register_lapic_address(u6 + void __cpuinit mp_register_lapic (u8 id, u8 enabled) + { + struct mpc_config_processor processor; ++#ifndef CONFIG_XEN + int boot_cpu = 0; + + if (id == boot_cpu_id) + boot_cpu = 1; +-#ifndef CONFIG_XEN + processor.mpc_type = MP_PROCESSOR; + processor.mpc_apicid = id; + processor.mpc_apicver = 0; +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -13,14 +13,13 @@ + #include + #include + #include +-#include + #include + #include + #include + #include + #include +-#include +-#include ++#include ++#include + #include -@@ -378,16 +368,16 @@ static int request_map_sg(struct request - struct request_queue *q = rq->q; - int nr_pages; - unsigned int nsegs = count; -- - unsigned int data_len = 0, len, bytes, off; -+ struct scatterlist *sg; - struct page *page; - struct bio *bio = NULL; - int i, err, nr_vecs = 0; + #ifdef __x86_64__ +@@ -106,27 +105,29 @@ int range_straddles_page_boundary(paddr_ + } -- for (i = 0; i < nsegs; i++) { -- page = pending_req->sgl[i].page; -- off = (unsigned int)pending_req->sgl[i].offset; -- len = (unsigned int)pending_req->sgl[i].length; -+ for_each_sg (pending_req->sgl, sg, nsegs, i) { -+ page = sg_page(sg); -+ off = sg->offset; -+ len = sg->length; - data_len += len; + int +-dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, ++dma_map_sg(struct device *hwdev, struct scatterlist *sgl, int nents, + enum dma_data_direction direction) + { + int i, rc; - nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT; -@@ -415,7 +405,7 @@ static int request_map_sg(struct request - if (bio->bi_vcnt >= nr_vecs) { - err = scsiback_merge_bio(rq, bio); - if (err) { -- bio_endio(bio, bio->bi_size, 0); -+ bio_endio(bio, 0); - goto free_bios; - } - bio = NULL; -@@ -438,7 +428,7 @@ free_bios: - /* - * call endio instead of bio_put incase it was bounced - */ -- bio_endio(bio, bio->bi_size, 0); -+ bio_endio(bio, 0); + BUG_ON(!valid_dma_direction(direction)); +- WARN_ON(nents == 0 || sg[0].length == 0); ++ WARN_ON(nents == 0 || sgl->length == 0); + + if (swiotlb) { +- rc = swiotlb_map_sg(hwdev, sg, nents, direction); ++ rc = swiotlb_map_sg(hwdev, sgl, nents, direction); + } else { +- for (i = 0; i < nents; i++ ) { +- BUG_ON(!sg[i].page); +- sg[i].dma_address = +- gnttab_dma_map_page(sg[i].page) + sg[i].offset; +- sg[i].dma_length = sg[i].length; ++ struct scatterlist *sg; ++ ++ for_each_sg(sgl, sg, nents, i) { ++ BUG_ON(!sg_page(sg)); ++ sg->dma_address = ++ gnttab_dma_map_page(sg_page(sg)) + sg->offset; ++ sg->dma_length = sg->length; + IOMMU_BUG_ON(address_needs_mapping( +- hwdev, sg[i].dma_address)); ++ hwdev, sg->dma_address)); + IOMMU_BUG_ON(range_straddles_page_boundary( +- page_to_pseudophys(sg[i].page) + sg[i].offset, +- sg[i].length)); ++ page_to_pseudophys(sg_page(sg)) + sg->offset, ++ sg->length)); + } + rc = nents; } +@@ -137,17 +138,19 @@ dma_map_sg(struct device *hwdev, struct + EXPORT_SYMBOL(dma_map_sg); - return err; ---- head-2010-05-25.orig/drivers/xen/scsifront/scsifront.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsifront/scsifront.c 2010-03-24 15:10:29.000000000 +0100 -@@ -246,11 +246,10 @@ static int map_data_for_request(struct v + void +-dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, ++dma_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nents, + enum dma_data_direction direction) { - grant_ref_t gref_head; - struct page *page; -- int err, i, ref, ref_cnt = 0; -+ int err, ref, ref_cnt = 0; - int write = (sc->sc_data_direction == DMA_TO_DEVICE); -- int nr_pages, off, len, bytes; -+ unsigned int i, nr_pages, off, len, bytes; - unsigned long buffer_pfn; -- unsigned int data_len = 0; + int i; - if (sc->sc_data_direction == DMA_NONE) - return 0; -@@ -263,25 +262,31 @@ static int map_data_for_request(struct v + BUG_ON(!valid_dma_direction(direction)); + if (swiotlb) +- swiotlb_unmap_sg(hwdev, sg, nents, direction); ++ swiotlb_unmap_sg(hwdev, sgl, nents, direction); + else { +- for (i = 0; i < nents; i++ ) +- gnttab_dma_unmap_page(sg[i].dma_address); ++ struct scatterlist *sg; ++ ++ for_each_sg(sgl, sg, nents, i) ++ gnttab_dma_unmap_page(sg->dma_address); + } + } + EXPORT_SYMBOL(dma_unmap_sg); +@@ -258,7 +261,8 @@ void dma_free_coherent(struct device *de + { + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; + int order = get_order(size); +- ++ ++ WARN_ON(irqs_disabled()); /* for portability */ + if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { + int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; - if (sc->use_sg) { - /* quoted scsi_lib.c/scsi_req_map_sg . */ -- struct scatterlist *sg = (struct scatterlist *)sc->request_buffer; -- nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; -+ struct scatterlist *sg, *sgl = (struct scatterlist *)sc->request_buffer; -+ unsigned int data_len = sc->request_bufflen; +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/i386/kernel/process.c +- * + * Copyright (C) 1995 Linus Torvalds + * + * Pentium III FXSR, SSE support +@@ -190,6 +188,10 @@ void cpu_idle(void) + } + } -+ nr_pages = (sc->request_bufflen + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (nr_pages > VSCSIIF_SG_TABLESIZE) { - printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); - ref_cnt = (-E2BIG); - goto big_to_sg; ++static void do_nothing(void *unused) ++{ ++} ++ + void cpu_idle_wait(void) + { + unsigned int cpu, this_cpu = get_cpu(); +@@ -214,13 +216,20 @@ void cpu_idle_wait(void) + cpu_clear(cpu, map); } + cpus_and(map, map, cpu_online_map); ++ /* ++ * We waited 1 sec, if a CPU still did not call idle ++ * it may be because it is in idle and not waking up ++ * because it has nothing to do. ++ * Give all the remaining CPUS a kick. ++ */ ++ smp_call_function_mask(map, do_nothing, 0, 0); + } while (!cpus_empty(map)); -- for (i = 0; i < sc->use_sg; i++) { -- page = sg[i].page; -- off = sg[i].offset; -- len = sg[i].length; -- data_len += len; -+ for_each_sg (sgl, sg, sc->use_sg, i) { -+ page = sg_page(sg); -+ off = sg->offset; -+ len = sg->length; - - buffer_pfn = page_to_phys(page) >> PAGE_SHIFT; - -- while (len > 0) { -+ while (len > 0 && data_len > 0) { -+ /* -+ * sg sends a scatterlist that is larger than -+ * the data_len it wants transferred for certain -+ * IO sizes -+ */ - bytes = min_t(unsigned int, len, PAGE_SIZE - off); -+ bytes = min(bytes, data_len); - - ref = gnttab_claim_grant_reference(&gref_head); - BUG_ON(ref == -ENOSPC); -@@ -296,6 +301,7 @@ static int map_data_for_request(struct v - - buffer_pfn++; - len -= bytes; -+ data_len -= bytes; - off = 0; - ref_cnt++; - } ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel_fwd.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel_fwd.c 2010-03-24 15:10:29.000000000 +0100 -@@ -181,10 +181,11 @@ int netback_accel_fwd_add(const __u8 *ma - unsigned long flags; - cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac); - struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv; -+ DECLARE_MAC_BUF(buf); - - BUG_ON(fwd_priv == NULL); - -- DPRINTK("Adding mac " MAC_FMT "\n", MAC_ARG(mac)); -+ DPRINTK("Adding mac %s\n", print_mac(buf, mac)); - - spin_lock_irqsave(&fwd_set->fwd_lock, flags); - -@@ -199,8 +200,8 @@ int netback_accel_fwd_add(const __u8 *ma - if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table, - (cuckoo_hash_key *)(&key), &rc) != 0) { - spin_unlock_irqrestore(&fwd_set->fwd_lock, flags); -- EPRINTK("MAC address " MAC_FMT " already accelerated.\n", -- MAC_ARG(mac)); -+ EPRINTK("MAC address %s already accelerated.\n", -+ print_mac(buf, mac)); - return -EEXIST; - } + set_cpus_allowed(current, tmp); + } + EXPORT_SYMBOL_GPL(cpu_idle_wait); -@@ -235,8 +236,9 @@ void netback_accel_fwd_remove(const __u8 - unsigned long flags; - cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac); - struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv; -+ DECLARE_MAC_BUF(buf); +-void __devinit select_idle_routine(const struct cpuinfo_x86 *c) ++void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) + { + } -- DPRINTK("Removing mac " MAC_FMT "\n", MAC_ARG(mac)); -+ DPRINTK("Removing mac %s\n", print_mac(buf, mac)); +@@ -238,34 +247,52 @@ static int __init idle_setup(char *str) + } + early_param("idle", idle_setup); - BUG_ON(fwd_priv == NULL); +-void show_regs(struct pt_regs * regs) ++void __show_registers(struct pt_regs *regs, int all) + { + unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; + unsigned long d0, d1, d2, d3, d6, d7; ++ unsigned long esp; ++ unsigned short ss, gs; ++ ++ if (user_mode_vm(regs)) { ++ esp = regs->esp; ++ ss = regs->xss & 0xffff; ++ savesegment(gs, gs); ++ } else { ++ esp = (unsigned long) (®s->esp); ++ savesegment(ss, ss); ++ savesegment(gs, gs); ++ } -@@ -394,14 +396,16 @@ void netback_accel_tx_packet(struct sk_b + printk("\n"); +- printk("Pid: %d, comm: %20s\n", current->pid, current->comm); +- printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); ++ printk("Pid: %d, comm: %s %s (%s %.*s)\n", ++ task_pid_nr(current), current->comm, ++ print_tainted(), init_utsname()->release, ++ (int)strcspn(init_utsname()->version, " "), ++ init_utsname()->version); ++ ++ printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", ++ 0xffff & regs->xcs, regs->eip, regs->eflags, ++ smp_processor_id()); + print_symbol("EIP is at %s\n", regs->eip); - if (is_broadcast_ether_addr(skb_mac_header(skb)) - && packet_is_arp_reply(skb)) { -+ DECLARE_MAC_BUF(buf); +- if (user_mode_vm(regs)) +- printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); +- printk(" EFLAGS: %08lx %s (%s %.*s)\n", +- regs->eflags, print_tainted(), init_utsname()->release, +- (int)strcspn(init_utsname()->version, " "), +- init_utsname()->version); + printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", +- regs->eax,regs->ebx,regs->ecx,regs->edx); +- printk("ESI: %08lx EDI: %08lx EBP: %08lx", +- regs->esi, regs->edi, regs->ebp); +- printk(" DS: %04x ES: %04x FS: %04x\n", +- 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); ++ regs->eax, regs->ebx, regs->ecx, regs->edx); ++ printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", ++ regs->esi, regs->edi, regs->ebp, esp); ++ printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", ++ regs->xds & 0xffff, regs->xes & 0xffff, ++ regs->xfs & 0xffff, gs, ss); + - /* - * update our fast path forwarding to reflect this - * gratuitous ARP - */ - mac = skb_mac_header(skb)+ETH_ALEN; ++ if (!all) ++ return; -- DPRINTK("%s: found gratuitous ARP for " MAC_FMT "\n", -- __FUNCTION__, MAC_ARG(mac)); -+ DPRINTK("%s: found gratuitous ARP for %s\n", -+ __FUNCTION__, print_mac(buf, mac)); + cr0 = read_cr0(); + cr2 = read_cr2(); + cr3 = read_cr3(); + cr4 = read_cr4_safe(); +- printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); ++ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", ++ cr0, cr2, cr3, cr4); - spin_lock_irqsave(&fwd_set->fwd_lock, flags); - /* ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel_msg.c 2008-02-20 09:32:49.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel_msg.c 2010-03-24 15:10:29.000000000 +0100 -@@ -57,11 +57,11 @@ static void netback_accel_msg_tx_localma - { - unsigned long lock_state; - struct net_accel_msg *msg; -+ DECLARE_MAC_BUF(buf); - - BUG_ON(bend == NULL || mac == NULL); - -- VPRINTK("Sending local mac message: " MAC_FMT "\n", -- MAC_ARG((const char *)mac)); -+ VPRINTK("Sending local mac message: %s\n", print_mac(buf, mac)); - - msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU, - &lock_state); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:10:29.000000000 +0100 -@@ -41,11 +41,13 @@ static void vnic_start_interrupts(netfro - /* Prime our interrupt */ - spin_lock_irqsave(&vnic->irq_enabled_lock, flags); - if (!netfront_accel_vi_enable_interrupts(vnic)) { -+ struct netfront_info *np = netdev_priv(vnic->net_dev); + get_debugreg(d0, 0); + get_debugreg(d1, 1); +@@ -273,10 +300,16 @@ void show_regs(struct pt_regs * regs) + get_debugreg(d3, 3); + printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", + d0, d1, d2, d3); + - /* Cripes, that was quick, better pass it up */ - netfront_accel_disable_net_interrupts(vnic); - vnic->irq_enabled = 0; - NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++); -- netif_rx_schedule(vnic->net_dev); -+ netif_rx_schedule(vnic->net_dev, &np->napi); - } else { - /* - * Nothing yet, make sure we get interrupts through -@@ -72,6 +74,7 @@ static void vnic_stop_interrupts(netfron - static void vnic_start_fastpath(netfront_accel_vnic *vnic) - { - struct net_device *net_dev = vnic->net_dev; -+ struct netfront_info *np = netdev_priv(net_dev); - unsigned long flags; + get_debugreg(d6, 6); + get_debugreg(d7, 7); +- printk("DR6: %08lx DR7: %08lx\n", d6, d7); ++ printk("DR6: %08lx DR7: %08lx\n", ++ d6, d7); ++} - DPRINTK("%s\n", __FUNCTION__); -@@ -80,9 +83,9 @@ static void vnic_start_fastpath(netfront - vnic->tx_enabled = 1; - spin_unlock_irqrestore(&vnic->tx_lock, flags); - -- netif_poll_disable(net_dev); -+ napi_disable(&np->napi); - vnic->poll_enabled = 1; -- netif_poll_enable(net_dev); -+ napi_enable(&np->napi); - - vnic_start_interrupts(vnic); - } -@@ -114,11 +117,11 @@ void vnic_stop_fastpath(netfront_accel_v - spin_unlock_irqrestore(&vnic->tx_lock, flags1); - - /* Must prevent polls and hold lock to modify poll_enabled */ -- netif_poll_disable(net_dev); -+ napi_disable(&np->napi); - spin_lock_irqsave(&vnic->irq_enabled_lock, flags1); - vnic->poll_enabled = 0; - spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1); -- netif_poll_enable(net_dev); -+ napi_enable(&np->napi); ++void show_regs(struct pt_regs *regs) ++{ ++ __show_registers(regs, 1); + show_trace(NULL, regs, ®s->esp); } +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:50.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:32:00.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/x86-64/kernel/process.c +- * + * Copyright (C) 1995 Linus Torvalds + * + * Pentium III FXSR, SSE support +@@ -41,6 +39,7 @@ + #include + #include + #include ++#include -@@ -324,8 +327,10 @@ static int vnic_process_localmac_msg(net - cuckoo_hash_mac_key key; + #include + #include +@@ -171,6 +170,9 @@ void cpu_idle (void) - if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) { -- DPRINTK("MAC has moved, could be local: " MAC_FMT "\n", -- MAC_ARG(msg->u.localmac.mac)); -+ DECLARE_MAC_BUF(buf); + if (__get_cpu_var(cpu_idle_state)) + __get_cpu_var(cpu_idle_state) = 0; + -+ DPRINTK("MAC has moved, could be local: %s\n", -+ print_mac(buf, msg->u.localmac.mac)); - key = cuckoo_mac_to_key(msg->u.localmac.mac); - spin_lock_irqsave(&vnic->table_lock, flags); - /* Try to remove it, not a big deal if not there */ -@@ -513,6 +518,8 @@ irqreturn_t netfront_accel_net_channel_i - - spin_lock_irqsave(&vnic->irq_enabled_lock, flags); - if (vnic->irq_enabled) { -+ struct netfront_info *np = netdev_priv(net_dev); ++ tick_nohz_stop_sched_tick(); + - netfront_accel_disable_net_interrupts(vnic); - vnic->irq_enabled = 0; - spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); -@@ -525,7 +532,7 @@ irqreturn_t netfront_accel_net_channel_i - vnic->stats.event_count_since_irq; - vnic->stats.event_count_since_irq = 0; - #endif -- netif_rx_schedule(net_dev); -+ netif_rx_schedule(net_dev, &np->napi); - } - else { - spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:10:29.000000000 +0100 -@@ -643,8 +643,10 @@ netfront_accel_vi_tx_post(netfront_accel - (cuckoo_hash_key *)(&key), &value); + rmb(); + idle = xen_idle; /* no alternatives */ + if (cpu_is_offline(smp_processor_id())) +@@ -189,12 +191,17 @@ void cpu_idle (void) + __exit_idle(); + } - if (!try_fastpath) { -- VPRINTK("try fast path false for mac: " MAC_FMT "\n", -- MAC_ARG(skb->data)); -+ DECLARE_MAC_BUF(buf); -+ -+ VPRINTK("try fast path false for mac: %s\n", -+ print_mac(buf, skb->data)); - - return NETFRONT_ACCEL_STATUS_CANT; ++ tick_nohz_restart_sched_tick(); + preempt_enable_no_resched(); + schedule(); + preempt_disable(); } -@@ -770,9 +772,10 @@ static void netfront_accel_vi_rx_comple - if (compare_ether_addr(skb->data, vnic->mac)) { - struct iphdr *ip = (struct iphdr *)(skb->data + ETH_HLEN); - u16 port; -+ DECLARE_MAC_BUF(buf); - -- DPRINTK("%s: saw wrong MAC address " MAC_FMT "\n", -- __FUNCTION__, MAC_ARG(skb->data)); -+ DPRINTK("%s: saw wrong MAC address %s\n", -+ __FUNCTION__, print_mac(buf, skb->data)); - - if (ip->protocol == IPPROTO_TCP) { - struct tcphdr *tcp = (struct tcphdr *) ---- head-2010-05-25.orig/drivers/xen/sfc_netutil/accel_util.h 2008-02-20 09:32:49.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netutil/accel_util.h 2010-03-24 15:10:29.000000000 +0100 -@@ -63,9 +63,6 @@ - DPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \ - } while(0) - --#define MAC_FMT "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" --#define MAC_ARG(_mac) (_mac)[0], (_mac)[1], (_mac)[2], (_mac)[3], (_mac)[4], (_mac)[5] -- - #include - - /*! Map a set of pages from another domain ---- head-2010-05-25.orig/drivers/xen/usbback/usbback.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbback/usbback.c 2010-04-15 17:36:18.000000000 +0200 -@@ -86,6 +86,8 @@ typedef struct { - static pending_req_t *pending_reqs; - static struct list_head pending_free; - static DEFINE_SPINLOCK(pending_free_lock); -+static LIST_HEAD(pending_urb_free); -+static DEFINE_SPINLOCK(urb_free_lock); - static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq); - - #define USBBACK_INVALID_HANDLE (~0) -@@ -272,6 +274,15 @@ fail: - - static void usbbk_free_urb(struct urb *urb) - { -+ unsigned long flags; -+ -+ spin_lock_irqsave(&urb_free_lock, flags); -+ list_add(&urb->urb_list, &pending_urb_free); -+ spin_unlock_irqrestore(&urb_free_lock, flags); -+} -+ -+static void _usbbk_free_urb(struct urb *urb) -+{ - if (usb_pipecontrol(urb->pipe)) - usb_buffer_free(urb->dev, sizeof(struct usb_ctrlrequest), - urb->setup_packet, urb->setup_dma); -@@ -282,6 +293,29 @@ static void usbbk_free_urb(struct urb *u - usb_free_urb(urb); } -+static void usbbk_free_urbs(void) ++static void do_nothing(void *unused) +{ -+ unsigned long flags; -+ struct list_head tmp_list; -+ -+ if (list_empty(&pending_urb_free)) -+ return; -+ -+ INIT_LIST_HEAD(&tmp_list); -+ -+ spin_lock_irqsave(&urb_free_lock, flags); -+ list_splice_init(&pending_urb_free, &tmp_list); -+ spin_unlock_irqrestore(&urb_free_lock, flags); -+ -+ while (!list_empty(&tmp_list)) { -+ struct urb *next_urb = list_first_entry(&tmp_list, struct urb, -+ urb_list); -+ -+ list_del(&next_urb->urb_list); -+ _usbbk_free_urb(next_urb); -+ } +} + - static void usbbk_notify_work(usbif_t *usbif) + void cpu_idle_wait(void) { - usbif->waiting_reqs = 1; -@@ -1059,8 +1093,11 @@ int usbbk_schedule(void *arg) + unsigned int cpu, this_cpu = get_cpu(); +@@ -220,6 +227,13 @@ void cpu_idle_wait(void) + cpu_clear(cpu, map); + } + cpus_and(map, map, cpu_online_map); ++ /* ++ * We waited 1 sec, if a CPU still did not call idle ++ * it may be because it is in idle and not waking up ++ * because it has nothing to do. ++ * Give all the remaining CPUS a kick. ++ */ ++ smp_call_function_mask(map, do_nothing, 0, 0); + } while (!cpus_empty(map)); - if (usbbk_start_submit_urb(usbif)) - usbif->waiting_reqs = 1; -+ -+ usbbk_free_urbs(); - } + set_cpus_allowed(current, tmp); +@@ -527,7 +541,7 @@ static inline void __switch_to_xtra(stru + * + * Kprobes not supported here. Set the probe on schedule instead. + */ +-__kprobes struct task_struct * ++struct task_struct * + __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + { + struct thread_struct *prev = &prev_p->thread, +--- head-2011-03-17.orig/arch/x86/kernel/quirks.c 2011-02-28 15:04:15.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/quirks.c 2011-02-28 15:11:55.000000000 +0100 +@@ -4,8 +4,6 @@ + #include + #include -+ usbbk_free_urbs(); - usbif->xenusbd = NULL; - usbif_put(usbif); +-#include +- + #if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI) ---- head-2010-05-25.orig/drivers/xen/usbfront/usbfront.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbfront/usbfront.h 2010-03-24 15:10:29.000000000 +0100 -@@ -82,6 +82,7 @@ struct urb_priv { - struct urb *urb; - int req_id; /* RING_REQUEST id for submitting */ - int unlink_req_id; /* RING_REQUEST id for unlinking */ -+ int status; - unsigned unlinked:1; /* dequeued marker */ - }; + static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) +@@ -65,6 +63,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN + #endif ---- head-2010-05-25.orig/drivers/xen/usbfront/usbfront-hcd.c 2009-10-15 11:45:41.000000000 +0200 -+++ head-2010-05-25/drivers/xen/usbfront/usbfront-hcd.c 2010-03-24 15:10:29.000000000 +0100 -@@ -114,7 +114,6 @@ static void xenhcd_stop(struct usb_hcd * - * non-error returns are promise to giveback the urb later - */ - static int xenhcd_urb_enqueue(struct usb_hcd *hcd, -- struct usb_host_endpoint *ep, - struct urb *urb, - gfp_t mem_flags) - { -@@ -130,6 +129,7 @@ static int xenhcd_urb_enqueue(struct usb - ret = -ENOMEM; - goto done; - } -+ urbp->status = 1; + #if defined(CONFIG_HPET_TIMER) ++#include ++ + unsigned long force_hpet_address; - ret = xenhcd_submit_urb(info, urbp); - if (ret != 0) -@@ -144,7 +144,7 @@ done: - * called as .urb_dequeue() - */ - static int xenhcd_urb_dequeue(struct usb_hcd *hcd, -- struct urb *urb) -+ struct urb *urb, int status) - { - struct usbfront_info *info = hcd_to_info(hcd); - struct urb_priv *urbp; -@@ -157,6 +157,7 @@ static int xenhcd_urb_dequeue(struct usb - if (!urbp) - goto done; + static enum { +--- head-2011-03-17.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -15,7 +15,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -27,11 +26,12 @@ + #include + #include + #include ++#include + #ifdef CONFIG_XEN + #include + #endif -+ urbp->status = status; - ret = xenhcd_unlink_urb(info, urbp); +-char x86_boot_params[BOOT_PARAM_SIZE] __initdata; ++struct boot_params __initdata boot_params; - done: ---- head-2010-05-25.orig/drivers/xen/usbfront/usbfront-q.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbfront/usbfront-q.c 2010-03-24 15:10:29.000000000 +0100 -@@ -236,7 +236,8 @@ __acquires(info->lock) - COUNT(info->stats.complete); - } - spin_unlock(&info->lock); -- usb_hcd_giveback_urb(info_to_hcd(info), urb); -+ usb_hcd_giveback_urb(info_to_hcd(info), urb, -+ urbp->status <= 0 ? urbp->status : urb->status); - spin_lock(&info->lock); - } + cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:10:29.000000000 +0100 -@@ -175,11 +175,9 @@ static int read_backend_details(struct x - } +@@ -159,8 +159,8 @@ static void switch_pt(void) - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) && (defined(CONFIG_XEN) || defined(MODULE)) --static int xenbus_uevent_frontend(struct device *dev, char **envp, -- int num_envp, char *buffer, int buffer_size) -+static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) + static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr) { - struct xenbus_device *xdev; -- int length = 0, i = 0; - - if (dev == NULL) - return -ENODEV; -@@ -188,12 +186,9 @@ static int xenbus_uevent_frontend(struct - return -ENODEV; - - /* stuff we want to pass to /sbin/hotplug */ -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -- "XENBUS_TYPE=%s", xdev->devicetype); -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -- "XENBUS_PATH=%s", xdev->nodename); -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -- "MODALIAS=xen:%s", xdev->devicetype); -+ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); -+ add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); -+ add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); - - return 0; +- asm volatile("lgdt %0" :: "m" (*gdt_descr)); +- asm volatile("lidt %0" :: "m" (idt_descr)); ++ load_gdt(gdt_descr); ++ load_idt(idt_descr); } ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:10:29.000000000 +0100 -@@ -60,8 +60,7 @@ - #include #endif --static int xenbus_uevent_backend(struct device *dev, char **envp, -- int num_envp, char *buffer, int buffer_size); -+static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env); - static int xenbus_probe_backend(const char *type, const char *domid); - - extern int read_otherend_details(struct xenbus_device *xendev, -@@ -128,13 +127,10 @@ static struct xen_bus_type xenbus_backen - }, - }; - --static int xenbus_uevent_backend(struct device *dev, char **envp, -- int num_envp, char *buffer, int buffer_size) -+static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) - { - struct xenbus_device *xdev; - struct xenbus_driver *drv; -- int i = 0; -- int length = 0; - - DPRINTK(""); - -@@ -146,27 +142,16 @@ static int xenbus_uevent_backend(struct - return -ENODEV; - - /* stuff we want to pass to /sbin/hotplug */ -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -- "XENBUS_TYPE=%s", xdev->devicetype); -+ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); +@@ -252,6 +252,14 @@ void __cpuinit check_efer(void) -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -- "XENBUS_PATH=%s", xdev->nodename); -+ add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); + unsigned long kernel_eflags; -- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, -- "XENBUS_BASE_PATH=%s", xenbus_backend.root); -- -- /* terminate, set to next free slot, shrink available space */ -- envp[i] = NULL; -- envp = &envp[i]; -- num_envp -= i; -- buffer = &buffer[length]; -- buffer_size -= length; -+ add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root); ++#ifndef CONFIG_X86_NO_TSS ++/* ++ * Copies of the original ist values from the tss are only accessed during ++ * debugging, no special alignment required. ++ */ ++DEFINE_PER_CPU(struct orig_ist, orig_ist); ++#endif ++ + /* + * cpu_init() initializes state that is per-CPU. Some data is already + * initialized (naturally) in the bootstrap process, such as the GDT +--- head-2011-03-17.orig/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/i386/kernel/setup.c +- * + * Copyright (C) 1995 Linus Torvalds + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 +@@ -70,6 +68,7 @@ + #include + #include + #include ++#include - if (dev->driver) { - drv = to_xenbus_driver(dev->driver); - if (drv && drv->uevent) -- return drv->uevent(xdev, envp, num_envp, buffer, -- buffer_size); -+ return drv->uevent(xdev, env); - } + #ifdef CONFIG_XEN + #include +@@ -80,13 +79,14 @@ static struct notifier_block xen_panic_b + xen_panic_event, NULL, 0 /* try to go last */ + }; - return 0; ---- head-2010-05-25.orig/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:10:29.000000000 +0100 -@@ -29,7 +29,6 @@ - #include - #include - #include --#include "../../../drivers/oprofile/cpu_buffer.h" - #include "../../../drivers/oprofile/event_buffer.h" +-int disable_pse __devinitdata = 0; ++int disable_pse __cpuinitdata = 0; - #define MAX_XENOPROF_SAMPLES 16 -@@ -142,8 +141,7 @@ static void xenoprof_add_pc(xenoprof_buf - if (xenoprof_is_escape(buf, tail) && - xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) { - tracing=1; -- oprofile_add_pc(ESCAPE_CODE, buf->event_log[tail].mode, -- CPU_TRACE_BEGIN); -+ oprofile_add_mode(buf->event_log[tail].mode); - if (!is_passive) - oprofile_samples++; - else ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/agp.h 2007-06-22 09:08:06.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:10:29.000000000 +0100 -@@ -1,20 +1,22 @@ --#ifndef AGP_H --#define AGP_H 1 -+#ifndef _ASM_X86_AGP_H -+#define _ASM_X86_AGP_H + /* + * Machine setup.. + */ + extern struct resource code_resource; + extern struct resource data_resource; ++extern struct resource bss_resource; - #include - #include - #include + /* cpu data as detected by the assembly code in head.S */ + struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; +@@ -98,9 +98,6 @@ unsigned long mmu_cr4_features; --/* -- * Functions to keep the agpgart mappings coherent with the MMU. -- * The GART gives the CPU a physical alias of pages in memory. The alias region is -- * mapped uncacheable. Make sure there are no conflicting mappings -- * with different cachability attributes for the same page. This avoids -- * data corruption on some CPUs. -+/* -+ * Functions to keep the agpgart mappings coherent with the MMU. The -+ * GART gives the CPU a physical alias of pages in memory. The alias -+ * region is mapped uncacheable. Make sure there are no conflicting -+ * mappings with different cachability attributes for the same -+ * page. This avoids data corruption on some CPUs. + /* for MCA, but anyone else can use it if they want */ + unsigned int machine_id; +-#ifdef CONFIG_MCA +-EXPORT_SYMBOL(machine_id); +-#endif + unsigned int machine_submodel_id; + unsigned int BIOS_revision; + unsigned int mca_pentium_flag; +@@ -121,7 +118,7 @@ EXPORT_SYMBOL(apm_info); + struct edid_info edid_info; + EXPORT_SYMBOL_GPL(edid_info); + #ifndef CONFIG_XEN +-#define copy_edid() (edid_info = EDID_INFO) ++#define copy_edid() (edid_info = boot_params.edid_info) + #endif + struct ist_info ist_info; + #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ +@@ -170,10 +167,11 @@ EXPORT_SYMBOL(edd); */ + static inline void copy_edd(void) + { +- memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature)); +- memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info)); +- edd.mbr_signature_nr = EDD_MBR_SIG_NR; +- edd.edd_info_nr = EDD_NR; ++ memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, ++ sizeof(edd.mbr_signature)); ++ memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); ++ edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; ++ edd.edd_info_nr = boot_params.eddbuf_entries; + } + #endif + #else +@@ -418,6 +416,53 @@ extern unsigned long __init setup_memory + extern void zone_sizes_init(void); + #endif /* !CONFIG_NEED_MULTIPLE_NODES */ --/* Caller's responsibility to call global_flush_tlb() for -- * performance reasons */ -+/* -+ * Caller's responsibility to call global_flush_tlb() for performance -+ * reasons -+ */ - #define map_page_into_agp(page) ( \ - xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \ - ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)) -@@ -24,9 +26,11 @@ - change_page_attr(page, 1, PAGE_KERNEL)) - #define flush_agp_mappings() global_flush_tlb() - --/* Could use CLFLUSH here if the cpu supports it. But then it would -- need to be called for each cacheline of the whole page so it may not be -- worth it. Would need a page for it. */ -+/* -+ * Could use CLFLUSH here if the cpu supports it. But then it would -+ * need to be called for each cacheline of the whole page so it may -+ * not be worth it. Would need a page for it. -+ */ - #define flush_agp_cache() wbinvd() - - /* Convert a physical address to an address suitable for the GART. */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "desc_32.h" -+#else -+# include "desc_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -34,6 +34,18 @@ static inline void clear_LDT(void) - put_cpu(); - } - -+#ifndef CONFIG_X86_NO_TSS -+static inline unsigned long __store_tr(void) ++static inline unsigned long long get_total_mem(void) +{ -+ unsigned long tr; -+ -+ asm volatile ("str %w0":"=r" (tr)); -+ return tr; -+} ++ unsigned long long total; + -+#define store_tr(tr) (tr) = __store_tr() ++ total = max_low_pfn - min_low_pfn; ++#ifdef CONFIG_HIGHMEM ++ total += highend_pfn - highstart_pfn; +#endif + - /* - * This is the ldt that every process will get unless we need - * something other than this. -@@ -47,6 +59,18 @@ extern struct desc_ptr cpu_gdt_descr[]; - /* the cpu gdt accessor */ - #define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address) - -+#ifndef CONFIG_XEN -+static inline void load_gdt(const struct desc_ptr *ptr) -+{ -+ asm volatile("lgdt %w0"::"m" (*ptr)); ++ return total << PAGE_SHIFT; +} + -+static inline void store_gdt(struct desc_ptr *ptr) ++#ifdef CONFIG_KEXEC ++#ifndef CONFIG_XEN ++static void __init reserve_crashkernel(void) +{ -+ asm("sgdt %w0":"=m" (*ptr)); -+} -+#endif -+ - static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) - { - struct gate_struct s; -@@ -87,6 +111,16 @@ static inline void set_system_gate_ist(i - { - _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist); - } ++ unsigned long long total_mem; ++ unsigned long long crash_size, crash_base; ++ int ret; + -+static inline void load_idt(const struct desc_ptr *ptr) -+{ -+ asm volatile("lidt %w0"::"m" (*ptr)); -+} ++ total_mem = get_total_mem(); + -+static inline void store_idt(struct desc_ptr *dtr) -+{ -+ asm("sidt %w0":"=m" (*dtr)); ++ ret = parse_crashkernel(boot_command_line, total_mem, ++ &crash_size, &crash_base); ++ if (ret == 0 && crash_size > 0) { ++ if (crash_base > 0) { ++ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " ++ "for crashkernel (System RAM: %ldMB)\n", ++ (unsigned long)(crash_size >> 20), ++ (unsigned long)(crash_base >> 20), ++ (unsigned long)(total_mem >> 20)); ++ crashk_res.start = crash_base; ++ crashk_res.end = crash_base + crash_size - 1; ++ reserve_bootmem(crash_base, crash_size); ++ } else ++ printk(KERN_INFO "crashkernel reservation failed - " ++ "you have to specify a base address\n"); ++ } +} - #endif - - static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/dma-mapping.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "dma-mapping_32.h" -+#else -+# include "dma-mapping_64.h" -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "fixmap_32.h" +#else -+# include "fixmap_64.h" -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypercall.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,420 @@ -+/****************************************************************************** -+ * hypercall.h -+ * -+ * Linux-specific hypervisor handling. -+ * -+ * Copyright (c) 2002-2004, K A Fraser -+ * -+ * 64-bit updates: -+ * Benjamin Liu -+ * Jun Nakajima -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License version 2 -+ * as published by the Free Software Foundation; or, when distributed -+ * separately from the Linux kernel or incorporated into other -+ * software packages, subject to the following license: -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this source file (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, copy, modify, -+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, -+ * and to permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#ifndef __HYPERCALL_H__ -+#define __HYPERCALL_H__ -+ -+#ifndef __HYPERVISOR_H__ -+# error "please don't include this file directly" -+#endif -+ -+#if CONFIG_XEN_COMPAT <= 0x030002 -+# include /* memcpy() */ ++#define reserve_crashkernel xen_machine_kexec_setup_resources +#endif -+ -+#ifdef CONFIG_XEN -+#define HYPERCALL_ASM_OPERAND "%c" -+#define HYPERCALL_LOCATION(op) (hypercall_page + (op) * 32) -+#define HYPERCALL_C_OPERAND(name) "i" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) +#else -+#define HYPERCALL_ASM_OPERAND "*%" -+#define HYPERCALL_LOCATION(op) (hypercall_stubs + (op) * 32) -+#define HYPERCALL_C_OPERAND(name) "g" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) ++static inline void __init reserve_crashkernel(void) ++{} +#endif + -+#define HYPERCALL_ARG(arg, n) \ -+ register typeof((arg)+0) __arg##n asm(HYPERCALL_arg##n) = (arg) -+ -+#define _hypercall0(type, name) \ -+({ \ -+ type __res; \ -+ asm volatile ( \ -+ "call " HYPERCALL_ASM_OPERAND "1" \ -+ : "=a" (__res) \ -+ : HYPERCALL_C_OPERAND(name) \ -+ : "memory" ); \ -+ __res; \ -+}) -+ -+#define _hypercall1(type, name, arg) \ -+({ \ -+ type __res; \ -+ HYPERCALL_ARG(arg, 1); \ -+ asm volatile ( \ -+ "call " HYPERCALL_ASM_OPERAND "2" \ -+ : "=a" (__res), "+r" (__arg1) \ -+ : HYPERCALL_C_OPERAND(name) \ -+ : "memory" ); \ -+ __res; \ -+}) -+ -+#define _hypercall2(type, name, a1, a2) \ -+({ \ -+ type __res; \ -+ HYPERCALL_ARG(a1, 1); \ -+ HYPERCALL_ARG(a2, 2); \ -+ asm volatile ( \ -+ "call " HYPERCALL_ASM_OPERAND "3" \ -+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2) \ -+ : HYPERCALL_C_OPERAND(name) \ -+ : "memory" ); \ -+ __res; \ -+}) -+ -+#define _hypercall3(type, name, a1, a2, a3) \ -+({ \ -+ type __res; \ -+ HYPERCALL_ARG(a1, 1); \ -+ HYPERCALL_ARG(a2, 2); \ -+ HYPERCALL_ARG(a3, 3); \ -+ asm volatile ( \ -+ "call " HYPERCALL_ASM_OPERAND "4" \ -+ : "=a" (__res), "+r" (__arg1), \ -+ "+r" (__arg2), "+r" (__arg3) \ -+ : HYPERCALL_C_OPERAND(name) \ -+ : "memory" ); \ -+ __res; \ -+}) -+ -+#define _hypercall4(type, name, a1, a2, a3, a4) \ -+({ \ -+ type __res; \ -+ HYPERCALL_ARG(a1, 1); \ -+ HYPERCALL_ARG(a2, 2); \ -+ HYPERCALL_ARG(a3, 3); \ -+ HYPERCALL_ARG(a4, 4); \ -+ asm volatile ( \ -+ "call " HYPERCALL_ASM_OPERAND "5" \ -+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ -+ "+r" (__arg3), "+r" (__arg4) \ -+ : HYPERCALL_C_OPERAND(name) \ -+ : "memory" ); \ -+ __res; \ -+}) + void __init setup_bootmem_allocator(void) + { + unsigned long bootmap_size; +@@ -473,30 +518,25 @@ void __init setup_bootmem_allocator(void + + #ifdef CONFIG_BLK_DEV_INITRD + if (xen_start_info->mod_start) { +- if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { +- /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/ +- initrd_start = INITRD_START + PAGE_OFFSET; +- initrd_end = initrd_start+INITRD_SIZE; ++ unsigned long ramdisk_image = __pa(xen_start_info->mod_start); ++ unsigned long ramdisk_size = xen_start_info->mod_len; ++ unsigned long ramdisk_end = ramdisk_image + ramdisk_size; ++ unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT; + -+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ -+({ \ -+ type __res; \ -+ HYPERCALL_ARG(a1, 1); \ -+ HYPERCALL_ARG(a2, 2); \ -+ HYPERCALL_ARG(a3, 3); \ -+ HYPERCALL_ARG(a4, 4); \ -+ HYPERCALL_ARG(a5, 5); \ -+ asm volatile ( \ -+ "call " HYPERCALL_ASM_OPERAND "6" \ -+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ -+ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ -+ : HYPERCALL_C_OPERAND(name) \ -+ : "memory" ); \ -+ __res; \ -+}) ++ if (ramdisk_end <= end_of_lowmem) { ++ /*reserve_bootmem(ramdisk_image, ramdisk_size);*/ ++ initrd_start = ramdisk_image + PAGE_OFFSET; ++ initrd_end = initrd_start+ramdisk_size; + initrd_below_start_ok = 1; +- } +- else { ++ } else { + printk(KERN_ERR "initrd extends beyond end of memory " +- "(0x%08lx > 0x%08lx)\ndisabling initrd\n", +- INITRD_START + INITRD_SIZE, +- max_low_pfn << PAGE_SHIFT); ++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n", ++ ramdisk_end, end_of_lowmem); + initrd_start = 0; + } + } + #endif +-#ifdef CONFIG_KEXEC +-#ifdef CONFIG_XEN +- xen_machine_kexec_setup_resources(); +-#else +- if (crashk_res.start != crashk_res.end) +- reserve_bootmem(crashk_res.start, +- crashk_res.end - crashk_res.start + 1); +-#endif +-#endif ++ reserve_crashkernel(); + } + + /* +@@ -574,7 +614,8 @@ void __init setup_arch(char **cmdline_p) + * the system table is valid. If not, then initialize normally. + */ + #ifdef CONFIG_EFI +- if ((LOADER_TYPE == 0x50) && EFI_SYSTAB) ++ if ((boot_params.hdr.type_of_loader == 0x50) && ++ boot_params.efi_info.efi_systab) + efi_enabled = 1; + #endif + +@@ -582,18 +623,18 @@ void __init setup_arch(char **cmdline_p) + properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd. + */ + ROOT_DEV = MKDEV(UNNAMED_MAJOR,0); +- screen_info = SCREEN_INFO; ++ screen_info = boot_params.screen_info; + copy_edid(); +- apm_info.bios = APM_BIOS_INFO; +- ist_info = IST_INFO; +- saved_videomode = VIDEO_MODE; +- if( SYS_DESC_TABLE.length != 0 ) { +- set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2); +- machine_id = SYS_DESC_TABLE.table[0]; +- machine_submodel_id = SYS_DESC_TABLE.table[1]; +- BIOS_revision = SYS_DESC_TABLE.table[2]; ++ apm_info.bios = boot_params.apm_bios_info; ++ ist_info = boot_params.ist_info; ++ saved_videomode = boot_params.hdr.vid_mode; ++ if( boot_params.sys_desc_table.length != 0 ) { ++ set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2); ++ machine_id = boot_params.sys_desc_table.table[0]; ++ machine_submodel_id = boot_params.sys_desc_table.table[1]; ++ BIOS_revision = boot_params.sys_desc_table.table[2]; + } +- bootloader_type = LOADER_TYPE; ++ bootloader_type = boot_params.hdr.type_of_loader; + + if (is_initial_xendomain()) { + const struct dom0_vga_console_info *info = +@@ -608,9 +649,9 @@ void __init setup_arch(char **cmdline_p) + screen_info.orig_video_isVGA = 0; + + #ifdef CONFIG_BLK_DEV_RAM +- rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; +- rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); +- rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); ++ rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; ++ rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); ++ rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); + #endif + + ARCH_SETUP +@@ -623,7 +664,7 @@ void __init setup_arch(char **cmdline_p) + + copy_edd(); + +- if (!MOUNT_ROOT_RDONLY) ++ if (!boot_params.hdr.root_flags) + root_mountflags &= ~MS_RDONLY; + init_mm.start_code = (unsigned long) _text; + init_mm.end_code = (unsigned long) _etext; +@@ -635,6 +676,8 @@ void __init setup_arch(char **cmdline_p) + code_resource.end = virt_to_phys(_etext)-1; + data_resource.start = virt_to_phys(_etext); + data_resource.end = virt_to_phys(_edata)-1; ++ bss_resource.start = virt_to_phys(&__bss_start); ++ bss_resource.end = virt_to_phys(&__bss_stop)-1; + + if ((i = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE) + i = COMMAND_LINE_SIZE; +@@ -663,7 +706,7 @@ void __init setup_arch(char **cmdline_p) + /* + * NOTE: before this point _nobody_ is allowed to allocate + * any memory using the bootmem allocator. Although the +- * alloctor is now initialised only the first 8Mb of the kernel ++ * allocator is now initialised only the first 8Mb of the kernel + * virtual address space has been mapped. All allocations before + * paging_init() has completed must use the alloc_bootmem_low_pages() + * variant (which allocates DMA'able memory) and care must be taken +@@ -786,10 +829,8 @@ void __init setup_arch(char **cmdline_p) + acpi_boot_table_init(); + #endif + +-#ifdef CONFIG_PCI +-#ifdef CONFIG_X86_IO_APIC +- check_acpi_pci(); /* Checks more than just ACPI actually */ +-#endif ++#if defined(CONFIG_PCI) && !defined(CONFIG_XEN) ++ early_quirks(); + #endif + + #ifdef CONFIG_ACPI +--- head-2011-03-17.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,10 +1,5 @@ + /* +- * linux/arch/x86-64/kernel/setup.c +- * + * Copyright (C) 1995 Linus Torvalds +- * +- * Nov 2001 Dave Jones +- * Forked from i386 setup code. + */ + + /* +@@ -57,13 +52,13 @@ + #include + #include + #include +-#include + #include + #include + #include + #include + #include + #include ++#include + #ifdef CONFIG_XEN + #include + #include +@@ -180,6 +175,12 @@ struct resource code_resource = { + .end = 0, + .flags = IORESOURCE_RAM, + }; ++struct resource bss_resource = { ++ .name = "Kernel bss", ++ .start = 0, ++ .end = 0, ++ .flags = IORESOURCE_RAM, ++}; + + #ifdef CONFIG_PROC_VMCORE + /* elfcorehdr= specifies the location of elf core header +@@ -232,10 +233,11 @@ EXPORT_SYMBOL(edd); + */ + static inline void copy_edd(void) + { +- memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature)); +- memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info)); +- edd.mbr_signature_nr = EDD_MBR_SIG_NR; +- edd.edd_info_nr = EDD_NR; ++ memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, ++ sizeof(edd.mbr_signature)); ++ memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); ++ edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; ++ edd.edd_info_nr = boot_params.eddbuf_entries; + } + #endif + #else +@@ -244,6 +246,41 @@ static inline void copy_edd(void) + } + #endif + ++#ifdef CONFIG_KEXEC ++#ifndef CONFIG_XEN ++static void __init reserve_crashkernel(void) ++{ ++ unsigned long long free_mem; ++ unsigned long long crash_size, crash_base; ++ int ret; + -+#define _hypercall(type, op, a1, a2, a3, a4, a5) \ -+({ \ -+ type __res; \ -+ HYPERCALL_ARG(a1, 1); \ -+ HYPERCALL_ARG(a2, 2); \ -+ HYPERCALL_ARG(a3, 3); \ -+ HYPERCALL_ARG(a4, 4); \ -+ HYPERCALL_ARG(a5, 5); \ -+ asm volatile ( \ -+ "call *%6" \ -+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ -+ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ -+ : "g" (HYPERCALL_LOCATION(op)) \ -+ : "memory" ); \ -+ __res; \ -+}) ++ free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; + -+#ifdef CONFIG_X86_32 -+# include "hypercall_32.h" ++ ret = parse_crashkernel(boot_command_line, free_mem, ++ &crash_size, &crash_base); ++ if (ret == 0 && crash_size) { ++ if (crash_base > 0) { ++ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " ++ "for crashkernel (System RAM: %ldMB)\n", ++ (unsigned long)(crash_size >> 20), ++ (unsigned long)(crash_base >> 20), ++ (unsigned long)(free_mem >> 20)); ++ crashk_res.start = crash_base; ++ crashk_res.end = crash_base + crash_size - 1; ++ reserve_bootmem(crash_base, crash_size); ++ } else ++ printk(KERN_INFO "crashkernel reservation failed - " ++ "you have to specify a base address\n"); ++ } ++} +#else -+# include "hypercall_64.h" ++#define reserve_crashkernel xen_machine_kexec_setup_resources ++#endif ++#else ++static inline void __init reserve_crashkernel(void) ++{} +#endif + -+static inline int __must_check -+HYPERVISOR_set_trap_table( -+ const trap_info_t *table) -+{ -+ return _hypercall1(int, set_trap_table, table); -+} -+ -+static inline int __must_check -+HYPERVISOR_mmu_update( -+ mmu_update_t *req, unsigned int count, unsigned int *success_count, -+ domid_t domid) -+{ -+ if (arch_use_lazy_mmu_mode()) -+ return xen_multi_mmu_update(req, count, success_count, domid); -+ return _hypercall4(int, mmu_update, req, count, success_count, domid); -+} -+ -+static inline int __must_check -+HYPERVISOR_mmuext_op( -+ struct mmuext_op *op, unsigned int count, unsigned int *success_count, -+ domid_t domid) -+{ -+ if (arch_use_lazy_mmu_mode()) -+ return xen_multi_mmuext_op(op, count, success_count, domid); -+ return _hypercall4(int, mmuext_op, op, count, success_count, domid); -+} -+ -+static inline int __must_check -+HYPERVISOR_set_gdt( -+ unsigned long *frame_list, unsigned int entries) -+{ -+ return _hypercall2(int, set_gdt, frame_list, entries); -+} -+ -+static inline int __must_check -+HYPERVISOR_stack_switch( -+ unsigned long ss, unsigned long esp) -+{ -+ return _hypercall2(int, stack_switch, ss, esp); -+} + #ifndef CONFIG_XEN + #define EBDA_ADDR_POINTER 0x40E + +@@ -284,7 +321,7 @@ void __init setup_arch(char **cmdline_p) + atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); + + ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); +- screen_info = SCREEN_INFO; ++ screen_info = boot_params.screen_info; + + if (is_initial_xendomain()) { + const struct dom0_vga_console_info *info = +@@ -307,22 +344,22 @@ void __init setup_arch(char **cmdline_p) + #else + printk(KERN_INFO "Command line: %s\n", boot_command_line); + +- ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); +- screen_info = SCREEN_INFO; +- edid_info = EDID_INFO; ++ ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); ++ screen_info = boot_params.screen_info; ++ edid_info = boot_params.edid_info; + #endif /* !CONFIG_XEN */ +- saved_video_mode = SAVED_VIDEO_MODE; +- bootloader_type = LOADER_TYPE; ++ saved_video_mode = boot_params.hdr.vid_mode; ++ bootloader_type = boot_params.hdr.type_of_loader; + + #ifdef CONFIG_BLK_DEV_RAM +- rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; +- rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); +- rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); ++ rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; ++ rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); ++ rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); + #endif + setup_memory_region(); + copy_edd(); + +- if (!MOUNT_ROOT_RDONLY) ++ if (!boot_params.hdr.root_flags) + root_mountflags &= ~MS_RDONLY; + init_mm.start_code = (unsigned long) &_text; + init_mm.end_code = (unsigned long) &_etext; +@@ -333,6 +370,8 @@ void __init setup_arch(char **cmdline_p) + code_resource.end = virt_to_phys(&_etext)-1; + data_resource.start = virt_to_phys(&_etext); + data_resource.end = virt_to_phys(&_edata)-1; ++ bss_resource.start = virt_to_phys(&__bss_start); ++ bss_resource.end = virt_to_phys(&__bss_stop)-1; + + early_identify_cpu(&boot_cpu_data); + +@@ -360,6 +399,11 @@ void __init setup_arch(char **cmdline_p) + if (is_initial_xendomain()) + dmi_scan_machine(); + ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) ++ /* setup to use the static apicid table during kernel startup */ ++ x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init; ++#endif + -+static inline int -+HYPERVISOR_fpu_taskswitch( -+ int set) -+{ -+ return _hypercall1(int, fpu_taskswitch, set); -+} + /* How many end-of-memory variables you have, grandma! */ + max_low_pfn = end_pfn; + max_pfn = end_pfn; +@@ -424,52 +468,37 @@ void __init setup_arch(char **cmdline_p) + */ + acpi_reserve_bootmem(); + #endif +-#ifdef CONFIG_XEN + #ifdef CONFIG_BLK_DEV_INITRD ++#ifdef CONFIG_XEN + if (xen_start_info->mod_start) { +- if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { +- /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/ +- initrd_start = INITRD_START + PAGE_OFFSET; +- initrd_end = initrd_start+INITRD_SIZE; ++ unsigned long ramdisk_image = __pa(xen_start_info->mod_start); ++ unsigned long ramdisk_size = xen_start_info->mod_len; ++#else ++ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { ++ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; ++ unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; ++#endif ++ unsigned long ramdisk_end = ramdisk_image + ramdisk_size; ++ unsigned long end_of_mem = end_pfn << PAGE_SHIFT; + -+#if CONFIG_XEN_COMPAT <= 0x030002 -+static inline int __must_check -+HYPERVISOR_sched_op_compat( -+ int cmd, unsigned long arg) -+{ -+ return _hypercall2(int, sched_op_compat, cmd, arg); -+} ++ if (ramdisk_end <= end_of_mem) { ++#ifndef CONFIG_XEN ++ reserve_bootmem_generic(ramdisk_image, ramdisk_size); +#endif ++ initrd_start = ramdisk_image + PAGE_OFFSET; ++ initrd_end = initrd_start+ramdisk_size; ++#ifdef CONFIG_XEN + initrd_below_start_ok = 1; +- } else { +- printk(KERN_ERR "initrd extends beyond end of memory " +- "(0x%08lx > 0x%08lx)\ndisabling initrd\n", +- (unsigned long)(INITRD_START + INITRD_SIZE), +- (unsigned long)(end_pfn << PAGE_SHIFT)); +- initrd_start = 0; +- } +- } + #endif +-#else /* CONFIG_XEN */ +-#ifdef CONFIG_BLK_DEV_INITRD +- if (LOADER_TYPE && INITRD_START) { +- if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { +- reserve_bootmem_generic(INITRD_START, INITRD_SIZE); +- initrd_start = INITRD_START + PAGE_OFFSET; +- initrd_end = initrd_start+INITRD_SIZE; +- } +- else { ++ } else { + printk(KERN_ERR "initrd extends beyond end of memory " +- "(0x%08lx > 0x%08lx)\ndisabling initrd\n", +- (unsigned long)(INITRD_START + INITRD_SIZE), +- (unsigned long)(end_pfn << PAGE_SHIFT)); ++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n", ++ ramdisk_end, end_of_mem); + initrd_start = 0; + } + } + #endif +-#endif /* !CONFIG_XEN */ +-#ifdef CONFIG_KEXEC +-#ifdef CONFIG_XEN +- xen_machine_kexec_setup_resources(); +-#else +- if (crashk_res.start != crashk_res.end) { +- reserve_bootmem_generic(crashk_res.start, +- crashk_res.end - crashk_res.start + 1); +- } +-#endif +-#endif +- ++ reserve_crashkernel(); + paging_init(); + #ifdef CONFIG_X86_LOCAL_APIC + /* +@@ -784,7 +813,7 @@ static void __init amd_detect_cmp(struct + but in the same order as the HT nodeids. + If that doesn't result in a usable node fall back to the + path for the previous case. */ +- int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); ++ int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); + if (ht_nodeid >= 0 && + apicid_to_node[ht_nodeid] != NUMA_NO_NODE) + node = apicid_to_node[ht_nodeid]; +@@ -799,6 +828,39 @@ static void __init amd_detect_cmp(struct + #endif + } + ++#define ENABLE_C1E_MASK 0x18000000 ++#define CPUID_PROCESSOR_SIGNATURE 1 ++#define CPUID_XFAM 0x0ff00000 ++#define CPUID_XFAM_K8 0x00000000 ++#define CPUID_XFAM_10H 0x00100000 ++#define CPUID_XFAM_11H 0x00200000 ++#define CPUID_XMOD 0x000f0000 ++#define CPUID_XMOD_REV_F 0x00040000 + -+static inline int __must_check -+HYPERVISOR_sched_op( -+ int cmd, void *arg) ++#ifndef CONFIG_XEN ++/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ ++static __cpuinit int amd_apic_timer_broken(void) +{ -+ return _hypercall2(int, sched_op, cmd, arg); ++ u32 lo, hi; ++ u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); ++ switch (eax & CPUID_XFAM) { ++ case CPUID_XFAM_K8: ++ if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) ++ break; ++ case CPUID_XFAM_10H: ++ case CPUID_XFAM_11H: ++ rdmsr(MSR_K8_ENABLE_C1E, lo, hi); ++ if (lo & ENABLE_C1E_MASK) ++ return 1; ++ break; ++ default: ++ /* err on the side of caution */ ++ return 1; ++ } ++ return 0; +} ++#endif + -+static inline int __must_check -+HYPERVISOR_platform_op( -+ struct xen_platform_op *platform_op) -+{ -+ platform_op->interface_version = XENPF_INTERFACE_VERSION; -+ return _hypercall1(int, platform_op, platform_op); -+} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) + { + unsigned level; +@@ -828,7 +890,7 @@ static void __cpuinit init_amd(struct cp + level = cpuid_eax(1); + if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) + set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); +- if (c->x86 == 0x10) ++ if (c->x86 == 0x10 || c->x86 == 0x11) + set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); + + /* Enable workaround for FXSAVE leak */ +@@ -870,6 +932,11 @@ static void __cpuinit init_amd(struct cp + /* Family 10 doesn't support C states in MWAIT so don't use it */ + if (c->x86 == 0x10 && !force_mwait) + clear_bit(X86_FEATURE_MWAIT, &c->x86_capability); + -+struct xen_mc; -+static inline int __must_check -+HYPERVISOR_mca( -+ struct xen_mc *mc_op) -+{ -+ mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; -+ return _hypercall1(int, mca, mc_op); -+} -+ -+static inline int __must_check -+HYPERVISOR_set_debugreg( -+ unsigned int reg, unsigned long value) -+{ -+ return _hypercall2(int, set_debugreg, reg, value); -+} -+ -+static inline unsigned long __must_check -+HYPERVISOR_get_debugreg( -+ unsigned int reg) -+{ -+ return _hypercall1(unsigned long, get_debugreg, reg); -+} -+ -+static inline int __must_check -+HYPERVISOR_memory_op( -+ unsigned int cmd, void *arg) -+{ -+ if (arch_use_lazy_mmu_mode()) -+ xen_multicall_flush(false); -+ return _hypercall2(int, memory_op, cmd, arg); -+} -+ -+static inline int __must_check -+HYPERVISOR_multicall( -+ multicall_entry_t *call_list, unsigned int nr_calls) -+{ -+ return _hypercall2(int, multicall, call_list, nr_calls); -+} -+ -+static inline int __must_check -+HYPERVISOR_event_channel_op( -+ int cmd, void *arg) -+{ -+ int rc = _hypercall2(int, event_channel_op, cmd, arg); -+ -+#if CONFIG_XEN_COMPAT <= 0x030002 -+ if (unlikely(rc == -ENOSYS)) { -+ struct evtchn_op op; -+ op.cmd = cmd; -+ memcpy(&op.u, arg, sizeof(op.u)); -+ rc = _hypercall1(int, event_channel_op_compat, &op); -+ memcpy(arg, &op.u, sizeof(op.u)); -+ } -+#endif -+ -+ return rc; -+} -+ -+static inline int __must_check -+HYPERVISOR_xen_version( -+ int cmd, void *arg) -+{ -+ return _hypercall2(int, xen_version, cmd, arg); -+} -+ -+static inline int __must_check -+HYPERVISOR_console_io( -+ int cmd, unsigned int count, char *str) -+{ -+ return _hypercall3(int, console_io, cmd, count, str); -+} -+ -+static inline int __must_check -+HYPERVISOR_physdev_op( -+ int cmd, void *arg) -+{ -+ int rc = _hypercall2(int, physdev_op, cmd, arg); -+ -+#if CONFIG_XEN_COMPAT <= 0x030002 -+ if (unlikely(rc == -ENOSYS)) { -+ struct physdev_op op; -+ op.cmd = cmd; -+ memcpy(&op.u, arg, sizeof(op.u)); -+ rc = _hypercall1(int, physdev_op_compat, &op); -+ memcpy(arg, &op.u, sizeof(op.u)); -+ } ++#ifndef CONFIG_XEN ++ if (amd_apic_timer_broken()) ++ disable_apic_timer = 1; +#endif -+ -+ return rc; -+} -+ -+static inline int __must_check -+HYPERVISOR_grant_table_op( -+ unsigned int cmd, void *uop, unsigned int count) -+{ -+ if (arch_use_lazy_mmu_mode()) -+ xen_multicall_flush(false); -+ return _hypercall3(int, grant_table_op, cmd, uop, count); -+} -+ -+static inline int __must_check -+HYPERVISOR_vm_assist( -+ unsigned int cmd, unsigned int type) -+{ -+ return _hypercall2(int, vm_assist, cmd, type); -+} -+ -+static inline int __must_check -+HYPERVISOR_vcpu_op( -+ int cmd, unsigned int vcpuid, void *extra_args) -+{ -+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); -+} -+ -+static inline int __must_check -+HYPERVISOR_suspend( -+ unsigned long srec) -+{ -+ struct sched_shutdown sched_shutdown = { -+ .reason = SHUTDOWN_suspend -+ }; -+ -+ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, -+ &sched_shutdown, srec); -+ -+#if CONFIG_XEN_COMPAT <= 0x030002 -+ if (rc == -ENOSYS) -+ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, -+ SHUTDOWN_suspend, srec); + } + + static void __cpuinit detect_ht(struct cpuinfo_x86 *c) +@@ -919,6 +986,7 @@ out: + #endif + } + ++#ifndef CONFIG_XEN + /* + * find out the number of processor cores on the die + */ +@@ -936,6 +1004,7 @@ static int __cpuinit intel_num_cpu_cores + else + return 1; + } +#endif -+ -+ return rc; -+} -+ -+#if CONFIG_XEN_COMPAT <= 0x030002 -+static inline int -+HYPERVISOR_nmi_op( -+ unsigned long op, void *arg) -+{ -+ return _hypercall2(int, nmi_op, op, arg); -+} + + static void srat_detect_node(void) + { +@@ -1000,7 +1069,9 @@ static void __cpuinit init_intel(struct + set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); + else + clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); ++#ifndef CONFIG_XEN + c->x86_max_cores = intel_num_cpu_cores(c); +#endif -+ + + srat_detect_node(); + } +@@ -1038,7 +1109,9 @@ void __cpuinit early_identify_cpu(struct + c->x86_model_id[0] = '\0'; /* Unset */ + c->x86_clflush_size = 64; + c->x86_cache_alignment = c->x86_clflush_size; +#ifndef CONFIG_XEN -+static inline unsigned long __must_check -+HYPERVISOR_hvm_op( -+ int op, void *arg) -+{ -+ return _hypercall2(unsigned long, hvm_op, op, arg); -+} + c->x86_max_cores = 1; +#endif -+ -+static inline int __must_check -+HYPERVISOR_callback_op( -+ int cmd, const void *arg) -+{ -+ return _hypercall2(int, callback_op, cmd, arg); -+} -+ -+static inline int __must_check -+HYPERVISOR_xenoprof_op( -+ int op, void *arg) -+{ -+ return _hypercall2(int, xenoprof_op, op, arg); -+} -+ -+static inline int __must_check -+HYPERVISOR_kexec_op( -+ unsigned long op, void *args) -+{ -+ return _hypercall2(int, kexec_op, op, args); -+} -+ -+static inline int __must_check -+HYPERVISOR_tmem_op( -+ struct tmem_op *op) -+{ -+ return _hypercall1(int, tmem_op, op); -+} -+ -+#endif /* __HYPERCALL_H__ */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypercall_32.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypercall_32.h 2010-03-24 15:10:29.000000000 +0100 -@@ -1,191 +1,10 @@ --/****************************************************************************** -- * hypercall.h -- * -- * Linux-specific hypervisor handling. -- * -- * Copyright (c) 2002-2004, K A Fraser -- * -- * This program is free software; you can redistribute it and/or -- * modify it under the terms of the GNU General Public License version 2 -- * as published by the Free Software Foundation; or, when distributed -- * separately from the Linux kernel or incorporated into other -- * software packages, subject to the following license: -- * -- * Permission is hereby granted, free of charge, to any person obtaining a copy -- * of this source file (the "Software"), to deal in the Software without -- * restriction, including without limitation the rights to use, copy, modify, -- * merge, publish, distribute, sublicense, and/or sell copies of the Software, -- * and to permit persons to whom the Software is furnished to do so, subject to -- * the following conditions: -- * -- * The above copyright notice and this permission notice shall be included in -- * all copies or substantial portions of the Software. -- * -- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -- * IN THE SOFTWARE. -- */ -- --#ifndef __HYPERCALL_H__ --#define __HYPERCALL_H__ -- --#include /* memcpy() */ --#include -- --#ifndef __HYPERVISOR_H__ --# error "please don't include this file directly" --#endif -- --#ifdef CONFIG_XEN --#define HYPERCALL_STR(name) \ -- "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" --#else --#define HYPERCALL_STR(name) \ -- "mov hypercall_stubs,%%eax; " \ -- "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ -- "call *%%eax" --#endif -- --#define _hypercall0(type, name) \ --({ \ -- type __res; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res) \ -- : \ -- : "memory" ); \ -- __res; \ --}) -- --#define _hypercall1(type, name, a1) \ --({ \ -- type __res; \ -- long __ign1; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=b" (__ign1) \ -- : "1" ((long)(a1)) \ -- : "memory" ); \ -- __res; \ --}) + c->extended_cpuid_level = 0; + memset(&c->x86_capability, 0, sizeof c->x86_capability); + +@@ -1182,6 +1255,7 @@ void __cpuinit print_cpu_info(struct cpu + static int show_cpuinfo(struct seq_file *m, void *v) + { + struct cpuinfo_x86 *c = v; ++ int cpu = 0; + + /* + * These flag bits must match the definitions in . +@@ -1191,7 +1265,7 @@ static int show_cpuinfo(struct seq_file + * applications want to get the raw CPUID data, they should access + * /dev/cpu//cpuid instead. + */ +- static char *x86_cap_flags[] = { ++ static const char *const x86_cap_flags[] = { + /* Intel-defined */ + "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", + "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", +@@ -1222,7 +1296,7 @@ static int show_cpuinfo(struct seq_file + /* Intel-defined (#2) */ + "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", + "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, +- NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt", ++ NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + + /* VIA/Cyrix/Centaur-defined */ +@@ -1232,10 +1306,10 @@ static int show_cpuinfo(struct seq_file + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + + /* AMD-defined (#2) */ +- "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy", +- "altmovcr8", "abm", "sse4a", +- "misalignsse", "3dnowprefetch", +- "osvw", "ibs", NULL, NULL, NULL, NULL, ++ "lahf_lm", "cmp_legacy", "svm", "extapic", ++ "cr8_legacy", "abm", "sse4a", "misalignsse", ++ "3dnowprefetch", "osvw", "ibs", "sse5", ++ "skinit", "wdt", NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + +@@ -1245,7 +1319,7 @@ static int show_cpuinfo(struct seq_file + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + }; +- static char *x86_power_flags[] = { ++ static const char *const x86_power_flags[] = { + "ts", /* temperature sensor */ + "fid", /* frequency id control */ + "vid", /* voltage id control */ +@@ -1260,8 +1334,7 @@ static int show_cpuinfo(struct seq_file + + + #ifdef CONFIG_SMP +- if (!cpu_online(c-cpu_data)) +- return 0; ++ cpu = c->cpu_index; + #endif + + seq_printf(m,"processor\t: %u\n" +@@ -1269,7 +1342,7 @@ static int show_cpuinfo(struct seq_file + "cpu family\t: %d\n" + "model\t\t: %d\n" + "model name\t: %s\n", +- (unsigned)(c-cpu_data), ++ (unsigned)cpu, + c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", + c->x86, + (int)c->x86_model, +@@ -1281,7 +1354,7 @@ static int show_cpuinfo(struct seq_file + seq_printf(m, "stepping\t: unknown\n"); + + if (cpu_has(c,X86_FEATURE_TSC)) { +- unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data)); ++ unsigned int freq = cpufreq_quick_get((unsigned)cpu); + if (!freq) + freq = cpu_khz; + seq_printf(m, "cpu MHz\t\t: %u.%03u\n", +@@ -1294,9 +1367,9 @@ static int show_cpuinfo(struct seq_file + + #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + if (smp_num_siblings * c->x86_max_cores > 1) { +- int cpu = c - cpu_data; + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); +- seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); ++ seq_printf(m, "siblings\t: %d\n", ++ cpus_weight(per_cpu(cpu_core_map, cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } +@@ -1351,12 +1424,16 @@ static int show_cpuinfo(struct seq_file + + static void *c_start(struct seq_file *m, loff_t *pos) + { +- return *pos < NR_CPUS ? cpu_data + *pos : NULL; ++ if (*pos == 0) /* just in case, cpu 0 is not the first */ ++ *pos = first_cpu(cpu_online_map); ++ if ((*pos) < NR_CPUS && cpu_online(*pos)) ++ return &cpu_data(*pos); ++ return NULL; + } + + static void *c_next(struct seq_file *m, void *v, loff_t *pos) + { +- ++*pos; ++ *pos = next_cpu(*pos, cpu_online_map); + return c_start(m, pos); + } + +--- head-2011-03-17.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -72,7 +72,7 @@ + * + * B stepping CPUs may hang. There are hardware work arounds + * for this. We warn about it in case your board doesn't have the work +- * arounds. Basically thats so I can tell anyone with a B stepping ++ * arounds. Basically that's so I can tell anyone with a B stepping + * CPU and SMP problems "tough". + * + * Specific items [From Pentium Processor Specification Update] +@@ -241,7 +241,7 @@ void leave_mm(unsigned long cpu) + * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); + * Stop ipi delivery for the old mm. This is not synchronized with + * the other cpus, but smp_invalidate_interrupt ignore flush ipis +- * for the wrong mm, and in the worst case we perform a superflous ++ * for the wrong mm, and in the worst case we perform a superfluous + * tlb flush. + * 1a2) set cpu_tlbstate to TLBSTATE_OK + * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 +@@ -309,6 +309,7 @@ irqreturn_t smp_invalidate_interrupt(int + smp_mb__after_clear_bit(); + out: + put_cpu_no_resched(); ++ __get_cpu_var(irq_stat).irq_tlb_count++; + + return IRQ_HANDLED; + } +@@ -580,7 +581,7 @@ static void stop_this_cpu (void * dummy) + */ + cpu_clear(smp_processor_id(), cpu_online_map); + disable_all_local_evtchn(); +- if (cpu_data[smp_processor_id()].hlt_works_ok) ++ if (cpu_data(smp_processor_id()).hlt_works_ok) + for(;;) halt(); + for (;;); + } +@@ -610,6 +611,7 @@ void xen_smp_send_stop(void) + */ + irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) + { ++ __get_cpu_var(irq_stat).irq_resched_count++; + + return IRQ_HANDLED; + } +@@ -632,6 +634,7 @@ irqreturn_t smp_call_function_interrupt( + */ + irq_enter(); + (*func)(info); ++ __get_cpu_var(irq_stat).irq_call_count++; + irq_exit(); + + if (wait) { +--- head-2011-03-17.orig/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -167,6 +167,7 @@ asmlinkage void smp_invalidate_interrupt + out: + ack_APIC_irq(); + cpu_clear(cpu, f->flush_cpumask); ++ add_pda(irq_tlb_count, 1); + } + + static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, +@@ -326,17 +327,27 @@ void unlock_ipi_call_lock(void) + } + + /* +- * this function sends a 'generic call function' IPI to one other CPU +- * in the system. +- * +- * cpu is a standard Linux logical CPU number. ++ * this function sends a 'generic call function' IPI to all other CPU ++ * of the system defined in the mask. + */ +-static void +-__smp_call_function_single(int cpu, void (*func) (void *info), void *info, +- int nonatomic, int wait) ++ ++static int ++__smp_call_function_mask(cpumask_t mask, ++ void (*func)(void *), void *info, ++ int wait) + { + struct call_data_struct data; +- int cpus = 1; ++ cpumask_t allbutself; ++ int cpus; ++ ++ allbutself = cpu_online_map; ++ cpu_clear(smp_processor_id(), allbutself); ++ ++ cpus_and(mask, mask, allbutself); ++ cpus = cpus_weight(mask); ++ ++ if (!cpus) ++ return 0; + + data.func = func; + data.info = info; +@@ -347,19 +358,55 @@ __smp_call_function_single(int cpu, void + + call_data = &data; + wmb(); +- /* Send a message to all other CPUs and wait for them to respond */ +- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); ++ ++ /* Send a message to other CPUs */ ++ if (cpus_equal(mask, allbutself)) ++ send_IPI_allbutself(CALL_FUNCTION_VECTOR); ++ else ++ send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + + /* Wait for response */ + while (atomic_read(&data.started) != cpus) + cpu_relax(); + + if (!wait) +- return; ++ return 0; + + while (atomic_read(&data.finished) != cpus) + cpu_relax(); ++ ++ return 0; ++} ++/** ++ * smp_call_function_mask(): Run a function on a set of other CPUs. ++ * @mask: The set of cpus to run on. Must not include the current cpu. ++ * @func: The function to run. This must be fast and non-blocking. ++ * @info: An arbitrary pointer to pass to the function. ++ * @wait: If true, wait (atomically) until function has completed on other CPUs. ++ * ++ * Returns 0 on success, else a negative status code. ++ * ++ * If @wait is true, then returns once @func has returned; otherwise ++ * it returns just before the target cpu calls @func. ++ * ++ * You must not call this function with disabled interrupts or from a ++ * hardware interrupt handler or from a bottom half handler. ++ */ ++int smp_call_function_mask(cpumask_t mask, ++ void (*func)(void *), void *info, ++ int wait) ++{ ++ int ret; ++ ++ /* Can deadlock when called with interrupts disabled */ ++ WARN_ON(irqs_disabled()); ++ ++ spin_lock(&call_lock); ++ ret = __smp_call_function_mask(mask, func, info, wait); ++ spin_unlock(&call_lock); ++ return ret; + } ++EXPORT_SYMBOL(smp_call_function_mask); + + /* + * smp_call_function_single - Run a function on a specific CPU +@@ -378,6 +425,7 @@ int smp_call_function_single (int cpu, v + int nonatomic, int wait) + { + /* prevent preemption and reschedule on another processor */ ++ int ret; + int me = get_cpu(); + + /* Can deadlock when called with interrupts disabled */ +@@ -391,51 +439,14 @@ int smp_call_function_single (int cpu, v + return 0; + } + +- spin_lock(&call_lock); +- __smp_call_function_single(cpu, func, info, nonatomic, wait); +- spin_unlock(&call_lock); ++ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); ++ + put_cpu(); +- return 0; ++ return ret; + } + EXPORT_SYMBOL(smp_call_function_single); + + /* +- * this function sends a 'generic call function' IPI to all other CPUs +- * in the system. +- */ +-static void __smp_call_function (void (*func) (void *info), void *info, +- int nonatomic, int wait) +-{ +- struct call_data_struct data; +- int cpus = num_online_cpus()-1; +- +- if (!cpus) +- return; +- +- data.func = func; +- data.info = info; +- atomic_set(&data.started, 0); +- data.wait = wait; +- if (wait) +- atomic_set(&data.finished, 0); +- +- call_data = &data; +- wmb(); +- /* Send a message to all other CPUs and wait for them to respond */ +- send_IPI_allbutself(CALL_FUNCTION_VECTOR); +- +- /* Wait for response */ +- while (atomic_read(&data.started) != cpus) +- cpu_relax(); +- +- if (!wait) +- return; +- +- while (atomic_read(&data.finished) != cpus) +- cpu_relax(); +-} +- +-/* + * smp_call_function - run a function on all other CPUs. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. +@@ -453,10 +464,7 @@ static void __smp_call_function (void (* + int smp_call_function (void (*func) (void *info), void *info, int nonatomic, + int wait) + { +- spin_lock(&call_lock); +- __smp_call_function(func,info,nonatomic,wait); +- spin_unlock(&call_lock); +- return 0; ++ return smp_call_function_mask(cpu_online_map, func, info, wait); + } + EXPORT_SYMBOL(smp_call_function); + +@@ -485,7 +493,7 @@ void smp_send_stop(void) + /* Don't deadlock on the call lock in panic */ + nolock = !spin_trylock(&call_lock); + local_irq_save(flags); +- __smp_call_function(stop_this_cpu, NULL, 0, 0); ++ __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0); + if (!nolock) + spin_unlock(&call_lock); + disable_all_local_evtchn(); +@@ -505,7 +513,9 @@ asmlinkage irqreturn_t smp_reschedule_in + { + #ifndef CONFIG_XEN + ack_APIC_irq(); +-#else ++#endif ++ add_pda(irq_resched_count, 1); ++#ifdef CONFIG_XEN + return IRQ_HANDLED; + #endif + } +@@ -535,6 +545,7 @@ asmlinkage irqreturn_t smp_call_function + exit_idle(); + irq_enter(); + (*func)(info); ++ add_pda(irq_call_count, 1); + irq_exit(); + if (wait) { + mb(); +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/i386/kernel/time.c +- * + * Copyright (C) 1991, 1992, 1995 Linus Torvalds + * + * This file contains the PC-specific time handling details: +@@ -73,6 +71,7 @@ + #include + + #include ++#include + #include + + #include +@@ -536,6 +535,13 @@ irqreturn_t timer_interrupt(int irq, voi + struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); + struct vcpu_runstate_info runstate; + ++ /* Keep nmi watchdog up to date */ ++#ifdef __i386__ ++ per_cpu(irq_stat, smp_processor_id()).irq0_irqs++; ++#else ++ add_pda(irq0_irqs, 1); ++#endif ++ + /* + * Here we are in the timer irq handler. We just have irqs locally + * disabled but we don't know if the timer_bh is running on the other +@@ -1011,7 +1017,7 @@ static int time_cpufreq_notifier(struct + struct cpufreq_freqs *freq = data; + struct xen_platform_op op; + +- if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC)) ++ if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) + return 0; + + if (val == CPUFREQ_PRECHANGE) +@@ -1049,30 +1055,33 @@ core_initcall(cpufreq_time_setup); + */ + static ctl_table xen_subtable[] = { + { +- .ctl_name = 1, ++ .ctl_name = CTL_XEN_INDEPENDENT_WALLCLOCK, + .procname = "independent_wallclock", + .data = &independent_wallclock, + .maxlen = sizeof(independent_wallclock), + .mode = 0644, ++ .strategy = sysctl_data, + .proc_handler = proc_dointvec + }, + { +- .ctl_name = 2, ++ .ctl_name = CTL_XEN_PERMITTED_CLOCK_JITTER, + .procname = "permitted_clock_jitter", + .data = &permitted_clock_jitter, + .maxlen = sizeof(permitted_clock_jitter), + .mode = 0644, ++ .strategy = sysctl_data, + .proc_handler = proc_doulongvec_minmax + }, +- { 0 } ++ { } + }; + static ctl_table xen_table[] = { + { +- .ctl_name = 123, ++ .ctl_name = CTL_XEN, + .procname = "xen", + .mode = 0555, +- .child = xen_subtable}, +- { 0 } ++ .child = xen_subtable ++ }, ++ { } + }; + static int __init xen_sysctl_init(void) + { +--- head-2011-03-17.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/i386/traps.c +- * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * Pentium III FXSR, SSE support +@@ -65,6 +63,11 @@ + + int panic_on_unrecovered_nmi; + ++#ifndef CONFIG_XEN ++DECLARE_BITMAP(used_vectors, NR_VECTORS); ++EXPORT_SYMBOL_GPL(used_vectors); ++#endif ++ + asmlinkage int system_call(void); + + /* Do we ignore FPU interrupts ? */ +@@ -120,7 +123,7 @@ struct stack_frame { + + static inline unsigned long print_context_stack(struct thread_info *tinfo, + unsigned long *stack, unsigned long ebp, +- struct stacktrace_ops *ops, void *data) ++ const struct stacktrace_ops *ops, void *data) + { + #ifdef CONFIG_FRAME_POINTER + struct stack_frame *frame = (struct stack_frame *)ebp; +@@ -157,7 +160,7 @@ static inline unsigned long print_contex + + void dump_trace(struct task_struct *task, struct pt_regs *regs, + unsigned long *stack, +- struct stacktrace_ops *ops, void *data) ++ const struct stacktrace_ops *ops, void *data) + { + unsigned long ebp = 0; + +@@ -229,7 +232,7 @@ static void print_trace_address(void *da + touch_nmi_watchdog(); + } + +-static struct stacktrace_ops print_trace_ops = { ++static const struct stacktrace_ops print_trace_ops = { + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, + .stack = print_trace_stack, +@@ -288,6 +291,11 @@ void dump_stack(void) + { + unsigned long stack; + ++ printk("Pid: %d, comm: %.20s %s %s %.*s\n", ++ current->pid, current->comm, print_tainted(), ++ init_utsname()->release, ++ (int)strcspn(init_utsname()->version, " "), ++ init_utsname()->version); + show_trace(current, NULL, &stack); + } + +@@ -296,48 +304,24 @@ EXPORT_SYMBOL(dump_stack); + void show_registers(struct pt_regs *regs) + { + int i; +- int in_kernel = 1; +- unsigned long esp; +- unsigned short ss, gs; +- +- esp = (unsigned long) (®s->esp); +- savesegment(ss, ss); +- savesegment(gs, gs); +- if (user_mode_vm(regs)) { +- in_kernel = 0; +- esp = regs->esp; +- ss = regs->xss & 0xffff; +- } ++ + print_modules(); +- printk(KERN_EMERG "CPU: %d\n" +- KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n" +- KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n", +- smp_processor_id(), 0xffff & regs->xcs, regs->eip, +- print_tainted(), regs->eflags, init_utsname()->release, +- (int)strcspn(init_utsname()->version, " "), +- init_utsname()->version); +- print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); +- printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", +- regs->eax, regs->ebx, regs->ecx, regs->edx); +- printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", +- regs->esi, regs->edi, regs->ebp, esp); +- printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", +- regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss); ++ __show_registers(regs, 0); + printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", +- TASK_COMM_LEN, current->comm, current->pid, ++ TASK_COMM_LEN, current->comm, task_pid_nr(current), + current_thread_info(), current, task_thread_info(current)); + /* + * When in-kernel, we also print out the stack and code at the + * time of the fault.. + */ +- if (in_kernel) { ++ if (!user_mode_vm(regs)) { + u8 *eip; + unsigned int code_prologue = code_bytes * 43 / 64; + unsigned int code_len = code_bytes; + unsigned char c; + + printk("\n" KERN_EMERG "Stack: "); +- show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); ++ show_stack_log_lvl(NULL, regs, ®s->esp, KERN_EMERG); + + printk(KERN_EMERG "Code: "); + +@@ -382,11 +366,11 @@ int is_valid_bugaddr(unsigned long eip) + void die(const char * str, struct pt_regs * regs, long err) + { + static struct { +- spinlock_t lock; ++ raw_spinlock_t lock; + u32 lock_owner; + int lock_owner_depth; + } die = { +- .lock = __SPIN_LOCK_UNLOCKED(die.lock), ++ .lock = __RAW_SPIN_LOCK_UNLOCKED, + .lock_owner = -1, + .lock_owner_depth = 0 + }; +@@ -397,40 +381,33 @@ void die(const char * str, struct pt_reg + + if (die.lock_owner != raw_smp_processor_id()) { + console_verbose(); +- spin_lock_irqsave(&die.lock, flags); ++ raw_local_irq_save(flags); ++ __raw_spin_lock(&die.lock); + die.lock_owner = smp_processor_id(); + die.lock_owner_depth = 0; + bust_spinlocks(1); +- } +- else +- local_save_flags(flags); ++ } else ++ raw_local_irq_save(flags); + + if (++die.lock_owner_depth < 3) { +- int nl = 0; + unsigned long esp; + unsigned short ss; + + report_bug(regs->eip, regs); + +- printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); ++ printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++ ++die_counter); + #ifdef CONFIG_PREEMPT +- printk(KERN_EMERG "PREEMPT "); +- nl = 1; ++ printk("PREEMPT "); + #endif + #ifdef CONFIG_SMP +- if (!nl) +- printk(KERN_EMERG); + printk("SMP "); +- nl = 1; + #endif + #ifdef CONFIG_DEBUG_PAGEALLOC +- if (!nl) +- printk(KERN_EMERG); + printk("DEBUG_PAGEALLOC"); +- nl = 1; + #endif +- if (nl) +- printk("\n"); ++ printk("\n"); ++ + if (notify_die(DIE_OOPS, str, regs, err, + current->thread.trap_no, SIGSEGV) != + NOTIFY_STOP) { +@@ -454,7 +431,8 @@ void die(const char * str, struct pt_reg + bust_spinlocks(0); + die.lock_owner = -1; + add_taint(TAINT_DIE); +- spin_unlock_irqrestore(&die.lock, flags); ++ __raw_spin_unlock(&die.lock); ++ raw_local_irq_restore(flags); + + if (!regs) + return; +@@ -571,6 +549,7 @@ fastcall void do_##name(struct pt_regs * + info.si_errno = 0; \ + info.si_code = sicode; \ + info.si_addr = (void __user *)siaddr; \ ++ trace_hardirqs_fixup(); \ + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ + == NOTIFY_STOP) \ + return; \ +@@ -606,7 +585,7 @@ fastcall void __kprobes do_general_prote + printk_ratelimit()) + printk(KERN_INFO + "%s[%d] general protection eip:%lx esp:%lx error:%lx\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + regs->eip, regs->esp, error_code); + + force_sig(SIGSEGV, current); +@@ -785,6 +764,8 @@ void restart_nmi(void) + #ifdef CONFIG_KPROBES + fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) + { ++ trace_hardirqs_fixup(); ++ + if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) + == NOTIFY_STOP) + return; +@@ -822,6 +803,8 @@ fastcall void __kprobes do_debug(struct + unsigned int condition; + struct task_struct *tsk = current; + ++ trace_hardirqs_fixup(); ++ + get_debugreg(condition, 6); + + if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, +@@ -1084,20 +1067,6 @@ asmlinkage void math_emulate(long arg) + + #endif /* CONFIG_MATH_EMULATION */ + +-#ifdef CONFIG_X86_F00F_BUG +-void __init trap_init_f00f_bug(void) +-{ +- __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); +- +- /* +- * Update the IDT descriptor and reload the IDT so that +- * it uses the read-only mapped virtual address. +- */ +- idt_descr.address = fix_to_virt(FIX_F00F_IDT); +- load_idt(&idt_descr); +-} +-#endif +- + + /* + * NB. All these are "trap gates" (i.e. events_mask isn't set) except +--- head-2011-03-17.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/x86-64/traps.c +- * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + * +@@ -33,6 +31,7 @@ + #include + #include + #include ++#include + + #if defined(CONFIG_EDAC) + #include +@@ -205,7 +204,7 @@ static unsigned long *in_exception_stack + #define MSG(txt) ops->warning(data, txt) + + /* +- * x86-64 can have upto three kernel stacks: ++ * x86-64 can have up to three kernel stacks: + * process stack + * interrupt stack + * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack +@@ -219,7 +218,7 @@ static inline int valid_stack_ptr(struct + + void dump_trace(struct task_struct *tsk, struct pt_regs *regs, + unsigned long *stack, +- struct stacktrace_ops *ops, void *data) ++ const struct stacktrace_ops *ops, void *data) + { + const unsigned cpu = get_cpu(); + unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; +@@ -340,7 +339,7 @@ static void print_trace_address(void *da + printk_address(addr); + } + +-static struct stacktrace_ops print_trace_ops = { ++static const struct stacktrace_ops print_trace_ops = { + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, + .stack = print_trace_stack, +@@ -404,6 +403,12 @@ void show_stack(struct task_struct *tsk, + void dump_stack(void) + { + unsigned long dummy; ++ ++ printk("Pid: %d, comm: %.20s %s %s %.*s\n", ++ current->pid, current->comm, print_tainted(), ++ init_utsname()->release, ++ (int)strcspn(init_utsname()->version, " "), ++ init_utsname()->version); + show_trace(NULL, NULL, &dummy); + } + +@@ -466,7 +471,7 @@ void out_of_line_bug(void) + EXPORT_SYMBOL(out_of_line_bug); + #endif + +-static DEFINE_SPINLOCK(die_lock); ++static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; + static int die_owner = -1; + static unsigned int die_nest_count; + +@@ -478,13 +483,13 @@ unsigned __kprobes long oops_begin(void) + oops_enter(); + + /* racy, but better than risking deadlock. */ +- local_irq_save(flags); ++ raw_local_irq_save(flags); + cpu = smp_processor_id(); +- if (!spin_trylock(&die_lock)) { ++ if (!__raw_spin_trylock(&die_lock)) { + if (cpu == die_owner) + /* nested oops. should stop eventually */; + else +- spin_lock(&die_lock); ++ __raw_spin_lock(&die_lock); + } + die_nest_count++; + die_owner = cpu; +@@ -498,12 +503,10 @@ void __kprobes oops_end(unsigned long fl + die_owner = -1; + bust_spinlocks(0); + die_nest_count--; +- if (die_nest_count) +- /* We still own the lock */ +- local_irq_restore(flags); +- else ++ if (!die_nest_count) + /* Nest count reaches zero, release the lock. */ +- spin_unlock_irqrestore(&die_lock, flags); ++ __raw_spin_unlock(&die_lock); ++ raw_local_irq_restore(flags); + if (panic_on_oops) + panic("Fatal exception"); + oops_exit(); +@@ -636,6 +639,7 @@ asmlinkage void do_##name(struct pt_regs + info.si_errno = 0; \ + info.si_code = sicode; \ + info.si_addr = (void __user *)siaddr; \ ++ trace_hardirqs_fixup(); \ + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ + == NOTIFY_STOP) \ + return; \ +@@ -741,11 +745,8 @@ mem_parity_error(unsigned char reason, s + + printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); + +-#if 0 /* XEN */ + /* Clear and disable the memory parity error line. */ +- reason = (reason & 0xf) | 4; +- outb(reason, 0x61); +-#endif /* XEN */ ++ clear_mem_error(reason); + } + + static __kprobes void +@@ -754,14 +755,8 @@ io_check_error(unsigned char reason, str + printk("NMI: IOCK error (debug interrupt?)\n"); + show_registers(regs); + +-#if 0 /* XEN */ + /* Re-enable the IOCK line, wait for a few seconds */ +- reason = (reason & 0xf) | 8; +- outb(reason, 0x61); +- mdelay(2000); +- reason &= ~8; +- outb(reason, 0x61); +-#endif /* XEN */ ++ clear_io_check_error(reason); + } + + static __kprobes void +@@ -821,6 +816,8 @@ asmlinkage __kprobes void default_do_nmi + /* runs on IST stack. */ + asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) + { ++ trace_hardirqs_fixup(); ++ + if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { + return; + } +@@ -858,6 +855,8 @@ asmlinkage void __kprobes do_debug(struc + struct task_struct *tsk = current; + siginfo_t info; + ++ trace_hardirqs_fixup(); ++ + get_debugreg(condition, 6); + + if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, +--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * linux/arch/x86_64/kernel/vsyscall.c +- * + * Copyright (C) 2001 Andrea Arcangeli SuSE + * Copyright 2003 Andi Kleen, SuSE Labs. + * +@@ -50,12 +48,12 @@ + ({unsigned long v; \ + extern char __vsyscall_0; \ + asm("" : "=r" (v) : "0" (x)); \ +- ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); }) ++ ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); }) + + /* + * vsyscall_gtod_data contains data that is : + * - readonly from vsyscalls +- * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) ++ * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) + * Try to keep this structure as small as possible to avoid cache line ping pongs + */ + int __vgetcpu_mode __section_vgetcpu_mode; +@@ -66,6 +64,16 @@ struct vsyscall_gtod_data __vsyscall_gto + .sysctl_enabled = 1, + }; + ++void update_vsyscall_tz(void) ++{ ++ unsigned long flags; ++ ++ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); ++ /* sys_tz has changed */ ++ vsyscall_gtod_data.sys_tz = sys_tz; ++ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); ++} ++ + void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) + { + unsigned long flags; +@@ -79,8 +87,6 @@ void update_vsyscall(struct timespec *wa + vsyscall_gtod_data.clock.shift = clock->shift; + vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; + vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; +- vsyscall_gtod_data.sys_tz = sys_tz; +- vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; + vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; + write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + } +@@ -166,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t) + if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) + return time_syscall(t); + +- vgettimeofday(&tv, 0); ++ vgettimeofday(&tv, NULL); + result = tv.tv_sec; + if (t) + *t = result; +@@ -260,18 +266,10 @@ out: + return ret; + } + +-static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen, +- void __user *oldval, size_t __user *oldlenp, +- void __user *newval, size_t newlen) +-{ +- return -ENOSYS; +-} - --#define _hypercall2(type, name, a1, a2) \ --({ \ -- type __res; \ -- long __ign1, __ign2; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ -- : "1" ((long)(a1)), "2" ((long)(a2)) \ -- : "memory" ); \ -- __res; \ --}) + static ctl_table kernel_table2[] = { +- { .ctl_name = 99, .procname = "vsyscall64", ++ { .procname = "vsyscall64", + .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), + .mode = 0644, +- .strategy = vsyscall_sysctl_nostrat, + .proc_handler = vsyscall_sysctl_change }, + {} + }; +@@ -291,9 +289,9 @@ static void __cpuinit vsyscall_set_cpu(i + unsigned long d; + unsigned long node = 0; + #ifdef CONFIG_NUMA +- node = cpu_to_node[cpu]; ++ node = cpu_to_node(cpu); + #endif +- if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) ++ if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) + write_rdtscp_aux((node << 12) | cpu); + + /* Store cpu number in limit so that it can be loaded quickly +--- head-2011-03-17.orig/arch/x86/mm/fault_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -32,33 +33,27 @@ + + extern void die(const char *,struct pt_regs *,long); + +-static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); - --#define _hypercall3(type, name, a1, a2, a3) \ --({ \ -- type __res; \ -- long __ign1, __ign2, __ign3; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ -- "=d" (__ign3) \ -- : "1" ((long)(a1)), "2" ((long)(a2)), \ -- "3" ((long)(a3)) \ -- : "memory" ); \ -- __res; \ --}) +-int register_page_fault_notifier(struct notifier_block *nb) ++#ifdef CONFIG_KPROBES ++static inline int notify_page_fault(struct pt_regs *regs) + { +- vmalloc_sync_all(); +- return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); +-} +-EXPORT_SYMBOL_GPL(register_page_fault_notifier); ++ int ret = 0; + +-int unregister_page_fault_notifier(struct notifier_block *nb) +-{ +- return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +-} +-EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); ++ /* kprobe_running() needs smp_processor_id() */ ++ if (!user_mode_vm(regs)) { ++ preempt_disable(); ++ if (kprobe_running() && kprobe_fault_handler(regs, 14)) ++ ret = 1; ++ preempt_enable(); ++ } + +-static inline int notify_page_fault(struct pt_regs *regs, long err) ++ return ret; ++} ++#else ++static inline int notify_page_fault(struct pt_regs *regs) + { +- struct die_args args = { +- .regs = regs, +- .str = "page fault", +- .err = err, +- .trapnr = 14, +- .signr = SIGSEGV +- }; +- return atomic_notifier_call_chain(¬ify_page_fault_chain, +- DIE_PAGE_FAULT, &args); ++ return 0; + } ++#endif + + /* + * Return EIP plus the CS segment base. The segment limit is also +@@ -110,7 +105,7 @@ static inline unsigned long get_segment_ + LDT and other horrors are only used in user space. */ + if (seg & (1<<2)) { + /* Must lock the LDT while reading it. */ +- down(¤t->mm->context.sem); ++ mutex_lock(¤t->mm->context.lock); + desc = current->mm->context.ldt; + desc = (void *)desc + (seg & ~7); + } else { +@@ -123,7 +118,7 @@ static inline unsigned long get_segment_ + base = get_desc_base((unsigned long *)desc); + + if (seg & (1<<2)) { +- up(¤t->mm->context.sem); ++ mutex_unlock(¤t->mm->context.lock); + } else + put_cpu(); + +@@ -244,7 +239,7 @@ static void dump_fault_path(unsigned lon + if (mfn_to_pfn(mfn) >= highstart_pfn) + return; + #endif +- if (p[0] & _PAGE_PRESENT) { ++ if ((p[0] & _PAGE_PRESENT) && !(p[0] & _PAGE_PSE)) { + page = mfn_to_pfn(mfn) << PAGE_SHIFT; + p = (unsigned long *) __va(page); + address &= 0x001fffff; +@@ -270,7 +265,8 @@ static void dump_fault_path(unsigned lon + * it's allocated already. + */ + if ((machine_to_phys(page) >> PAGE_SHIFT) < max_low_pfn +- && (page & _PAGE_PRESENT)) { ++ && (page & _PAGE_PRESENT) ++ && !(page & _PAGE_PSE)) { + page = machine_to_phys(page & PAGE_MASK); + page = ((unsigned long *) __va(page))[(address >> PAGE_SHIFT) + & (PTRS_PER_PTE - 1)]; +@@ -416,6 +412,11 @@ fastcall void __kprobes do_page_fault(st + int write, si_code; + int fault; + ++ /* ++ * We can fault from pretty much anywhere, with unknown IRQ state. ++ */ ++ trace_hardirqs_fixup(); ++ + /* get the address */ + address = read_cr2(); + +@@ -453,7 +454,7 @@ fastcall void __kprobes do_page_fault(st + /* Can take a spurious fault if mapping changes R/O -> R/W. */ + if (spurious_fault(regs, address, error_code)) + return; +- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) ++ if (notify_page_fault(regs)) + return; + /* + * Don't take the mm semaphore here. If we fixup a prefetch +@@ -462,7 +463,7 @@ fastcall void __kprobes do_page_fault(st + goto bad_area_nosemaphore; + } + +- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) ++ if (notify_page_fault(regs)) + return; + + /* It's safe to allow irq's after cr2 has been saved and the vmalloc +@@ -481,7 +482,7 @@ fastcall void __kprobes do_page_fault(st + + /* When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in the +- * kernel and should generate an OOPS. Unfortunatly, in the case of an ++ * kernel and should generate an OOPS. Unfortunately, in the case of an + * erroneous fault occurring in a code path which already holds mmap_sem + * we will deadlock attempting to validate the fault against the + * address space. Luckily the kernel only validly references user +@@ -489,7 +490,7 @@ fastcall void __kprobes do_page_fault(st + * exceptions table. + * + * As the vast majority of faults will be valid we will only perform +- * the source reference check when there is a possibilty of a deadlock. ++ * the source reference check when there is a possibility of a deadlock. + * Attempt to lock the address space, if we cannot we then validate the + * source. If this is invalid we can skip the address space check, + * thus avoiding the deadlock. +@@ -598,8 +599,8 @@ bad_area_nosemaphore: + printk_ratelimit()) { + printk("%s%s[%d]: segfault at %08lx eip %08lx " + "esp %08lx error %lx\n", +- tsk->pid > 1 ? KERN_INFO : KERN_EMERG, +- tsk->comm, tsk->pid, address, regs->eip, ++ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, ++ tsk->comm, task_pid_nr(tsk), address, regs->eip, + regs->esp, error_code); + } + tsk->thread.cr2 = address; +@@ -664,8 +665,7 @@ no_context: + printk(KERN_ALERT "BUG: unable to handle kernel paging" + " request"); + printk(" at virtual address %08lx\n",address); +- printk(KERN_ALERT " printing eip:\n"); +- printk("%08lx\n", regs->eip); ++ printk(KERN_ALERT "printing eip: %08lx\n", regs->eip); + dump_fault_path(address); + } + tsk->thread.cr2 = address; +@@ -681,14 +681,14 @@ no_context: + */ + out_of_memory: + up_read(&mm->mmap_sem); +- if (is_init(tsk)) { ++ if (is_global_init(tsk)) { + yield(); + down_read(&mm->mmap_sem); + goto survive; + } + printk("VM: killing process %s\n", tsk->comm); + if (error_code & 4) +- do_exit(SIGKILL); ++ do_group_exit(SIGKILL); + goto no_context; + + do_sigbus: +--- head-2011-03-17.orig/arch/x86/mm/fault_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -40,34 +41,27 @@ + #define PF_RSVD (1<<3) + #define PF_INSTR (1<<4) + +-static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); - --#define _hypercall4(type, name, a1, a2, a3, a4) \ --({ \ -- type __res; \ -- long __ign1, __ign2, __ign3, __ign4; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ -- "=d" (__ign3), "=S" (__ign4) \ -- : "1" ((long)(a1)), "2" ((long)(a2)), \ -- "3" ((long)(a3)), "4" ((long)(a4)) \ -- : "memory" ); \ -- __res; \ --}) +-/* Hook to register for page fault notifications */ +-int register_page_fault_notifier(struct notifier_block *nb) ++#ifdef CONFIG_KPROBES ++static inline int notify_page_fault(struct pt_regs *regs) + { +- vmalloc_sync_all(); +- return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); +-} +-EXPORT_SYMBOL_GPL(register_page_fault_notifier); ++ int ret = 0; + +-int unregister_page_fault_notifier(struct notifier_block *nb) +-{ +- return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +-} +-EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); ++ /* kprobe_running() needs smp_processor_id() */ ++ if (!user_mode(regs)) { ++ preempt_disable(); ++ if (kprobe_running() && kprobe_fault_handler(regs, 14)) ++ ret = 1; ++ preempt_enable(); ++ } + +-static inline int notify_page_fault(struct pt_regs *regs, long err) ++ return ret; ++} ++#else ++static inline int notify_page_fault(struct pt_regs *regs) + { +- struct die_args args = { +- .regs = regs, +- .str = "page fault", +- .err = err, +- .trapnr = 14, +- .signr = SIGSEGV +- }; +- return atomic_notifier_call_chain(¬ify_page_fault_chain, +- DIE_PAGE_FAULT, &args); ++ return 0; + } ++#endif + + /* Sometimes the CPU reports invalid exceptions on prefetch. + Check that here and ignore. +@@ -175,7 +169,7 @@ void dump_pagetable(unsigned long addres + pmd = pmd_offset(pud, address); + if (bad_address(pmd)) goto bad; + printk("PMD %lx ", pmd_val(*pmd)); +- if (!pmd_present(*pmd)) goto ret; ++ if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; + + pte = pte_offset_kernel(pmd, address); + if (bad_address(pte)) goto bad; +@@ -294,7 +288,6 @@ static int vmalloc_fault(unsigned long a + return 0; + } + +-static int page_fault_trace; + int show_unhandled_signals = 1; + + +@@ -371,6 +364,11 @@ asmlinkage void __kprobes do_page_fault( + if (!user_mode(regs)) + error_code &= ~PF_USER; /* means kernel */ + ++ /* ++ * We can fault from pretty much anywhere, with unknown IRQ state. ++ */ ++ trace_hardirqs_fixup(); ++ + tsk = current; + mm = tsk->mm; + prefetchw(&mm->mmap_sem); +@@ -408,7 +406,7 @@ asmlinkage void __kprobes do_page_fault( + /* Can take a spurious fault if mapping changes R/O -> R/W. */ + if (spurious_fault(regs, address, error_code)) + return; +- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) ++ if (notify_page_fault(regs)) + return; + /* + * Don't take the mm semaphore here. If we fixup a prefetch +@@ -417,16 +415,12 @@ asmlinkage void __kprobes do_page_fault( + goto bad_area_nosemaphore; + } + +- if (notify_page_fault(regs, error_code) == NOTIFY_STOP) ++ if (notify_page_fault(regs)) + return; + + if (likely(regs->eflags & X86_EFLAGS_IF)) + local_irq_enable(); + +- if (unlikely(page_fault_trace)) +- printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n", +- regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); - --#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ --({ \ -- type __res; \ -- long __ign1, __ign2, __ign3, __ign4, __ign5; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ -- "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ -- : "1" ((long)(a1)), "2" ((long)(a2)), \ -- "3" ((long)(a3)), "4" ((long)(a4)), \ -- "5" ((long)(a5)) \ -- : "memory" ); \ -- __res; \ --}) + if (unlikely(error_code & PF_RSVD)) + pgtable_bad(address, regs, error_code); + +@@ -447,7 +441,7 @@ asmlinkage void __kprobes do_page_fault( + again: + /* When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in the +- * kernel and should generate an OOPS. Unfortunatly, in the case of an ++ * kernel and should generate an OOPS. Unfortunately, in the case of an + * erroneous fault occurring in a code path which already holds mmap_sem + * we will deadlock attempting to validate the fault against the + * address space. Luckily the kernel only validly references user +@@ -455,7 +449,7 @@ asmlinkage void __kprobes do_page_fault( + * exceptions table. + * + * As the vast majority of faults will be valid we will only perform +- * the source reference check when there is a possibilty of a deadlock. ++ * the source reference check when there is a possibility of a deadlock. + * Attempt to lock the address space, if we cannot we then validate the + * source. If this is invalid we can skip the address space check, + * thus avoiding the deadlock. +@@ -557,7 +551,7 @@ bad_area_nosemaphore: + if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && + printk_ratelimit()) { + printk( +- "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", ++ "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n", + tsk->pid > 1 ? KERN_INFO : KERN_EMERG, + tsk->comm, tsk->pid, address, regs->rip, + regs->rsp, error_code); +@@ -623,7 +617,7 @@ no_context: + */ + out_of_memory: + up_read(&mm->mmap_sem); +- if (is_init(current)) { ++ if (is_global_init(current)) { + yield(); + goto again; + } +@@ -696,10 +690,3 @@ void vmalloc_sync_all(void) + BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == + (__START_KERNEL & PGDIR_MASK))); + } - --#define _hypercall(type, op, a1, a2, a3, a4, a5) \ --({ \ -- type __res; \ -- register typeof((a1)+0) __arg1 asm("ebx") = (a1); \ -- register typeof((a2)+0) __arg2 asm("ecx") = (a2); \ -- register typeof((a3)+0) __arg3 asm("edx") = (a3); \ -- register typeof((a4)+0) __arg4 asm("esi") = (a4); \ -- register typeof((a5)+0) __arg5 asm("edi") = (a5); \ -- asm volatile ( \ -- "call *%6" \ -- : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ -- "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ -- : "0" (hypercall_page + (op) * 32) \ -- : "memory" ); \ -- __res; \ --}) +-static int __init enable_pagefaulttrace(char *str) +-{ +- page_fault_trace = 1; +- return 1; +-} +-__setup("pagefaulttrace", enable_pagefaulttrace); +--- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-01-31 17:56:27.000000000 +0100 +@@ -71,8 +71,8 @@ static inline bool use_lazy_mmu_mode(voi + + static void multicall_failed(const multicall_entry_t *mc, int rc) + { +- printk(KERN_EMERG "hypercall#%lu(%lx, %lx, %lx, %lx)" +- " failed: %d (caller %lx)\n", ++ pr_emerg("hypercall#%lu(%lx, %lx, %lx, %lx) failed: %d" ++ " (caller %lx)\n", + mc->op, mc->args[0], mc->args[1], mc->args[2], mc->args[3], + rc, mc->args[5]); + BUG(); +@@ -498,6 +498,9 @@ int xen_create_contiguous_region( + unsigned long frame, flags; + unsigned int i; + int rc, success; ++#ifdef CONFIG_64BIT ++ pte_t *ptep = NULL; ++#endif + struct xen_memory_exchange exchange = { + .in = { + .nr_extents = 1UL << order, +@@ -523,6 +526,27 @@ int xen_create_contiguous_region( + if (unlikely(order > MAX_CONTIG_ORDER)) + return -ENOMEM; + ++#ifdef CONFIG_64BIT ++ if (unlikely(vstart > PAGE_OFFSET + MAXMEM)) { ++ unsigned int level; ++ ++ if (vstart < __START_KERNEL_map ++ || vstart + (PAGE_SIZE << order) > (unsigned long)_end) ++ return -EINVAL; ++ ptep = lookup_address((unsigned long)__va(__pa(vstart)), ++ &level); ++ if (ptep && pte_none(*ptep)) ++ ptep = NULL; ++ if (vstart < __START_KERNEL && ptep) ++ return -EINVAL; ++ if (order > MAX_CONTIG_ORDER - 1) ++ return -ENOMEM; ++ } ++#else ++ if (unlikely(vstart + (PAGE_SIZE << order) > (unsigned long)high_memory)) ++ return -EINVAL; ++#endif ++ + set_xen_guest_handle(exchange.in.extent_start, in_frames); + set_xen_guest_handle(exchange.out.extent_start, &out_frame); + +@@ -535,9 +559,19 @@ int xen_create_contiguous_region( + in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i); + MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), + __pte_ma(0), 0); ++#ifdef CONFIG_64BIT ++ if (ptep) ++ MULTI_update_va_mapping(cr_mcl + i + (1U << order), ++ (unsigned long)__va(__pa(vstart)) + (i*PAGE_SIZE), ++ __pte_ma(0), 0); ++#endif + set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, + INVALID_P2M_ENTRY); + } ++#ifdef CONFIG_64BIT ++ if (ptep) ++ i += i; ++#endif + if (HYPERVISOR_multicall_check(cr_mcl, i, NULL)) + BUG(); + +@@ -571,9 +605,18 @@ int xen_create_contiguous_region( + frame = success ? (out_frame + i) : in_frames[i]; + MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), + pfn_pte_ma(frame, PAGE_KERNEL), 0); ++#ifdef CONFIG_64BIT ++ if (ptep) ++ MULTI_update_va_mapping(cr_mcl + i + (1U << order), ++ (unsigned long)__va(__pa(vstart)) + (i*PAGE_SIZE), ++ pfn_pte_ma(frame, PAGE_KERNEL_RO), 0); ++#endif + set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame); + } - --static inline int __must_check --HYPERVISOR_set_trap_table( -- const trap_info_t *table) ++#ifdef CONFIG_64BIT ++ if (ptep) ++ i += i; ++#endif + cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order + ? UVMF_TLB_FLUSH|UVMF_ALL + : UVMF_INVLPG|UVMF_ALL; +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -94,7 +94,14 @@ static pte_t * __init one_page_table_ini + #else + if (!(__pmd_val(*pmd) & _PAGE_PRESENT)) { + #endif +- pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); ++ pte_t *page_table = NULL; ++ ++#ifdef CONFIG_DEBUG_PAGEALLOC ++ page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); ++#endif ++ if (!page_table) ++ page_table = ++ (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); + + paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); + make_lowmem_page_readonly(page_table, +@@ -102,7 +109,7 @@ static pte_t * __init one_page_table_ini + set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); + BUG_ON(page_table != pte_offset_kernel(pmd, 0)); + } +- ++ + return pte_offset_kernel(pmd, 0); + } + +@@ -360,8 +367,13 @@ static void __init set_highmem_pages_ini + { + int pfn; + for (pfn = highstart_pfn; pfn < highend_pfn +- && pfn < xen_start_info->nr_pages; pfn++) +- add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); ++ && pfn < xen_start_info->nr_pages; pfn++) { ++ /* ++ * Holes under sparsemem might not have no mem_map[]: ++ */ ++ if (pfn_valid(pfn)) ++ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); ++ } + + /* XEN: init high-mem pages outside initial allocation. */ + for (; pfn < highend_pfn; pfn++) { +@@ -785,35 +797,18 @@ int arch_add_memory(int nid, u64 start, + return __add_pages(zone, start_pfn, nr_pages); + } + +-int remove_memory(u64 start, u64 size) -{ -- return _hypercall1(int, set_trap_table, table); +- return -EINVAL; -} +-EXPORT_SYMBOL_GPL(remove_memory); + #endif + + struct kmem_cache *pmd_cache; + + void __init pgtable_cache_init(void) + { +- size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t); - --static inline int __must_check --HYPERVISOR_mmu_update( -- mmu_update_t *req, unsigned int count, unsigned int *success_count, -- domid_t domid) +- if (PTRS_PER_PMD > 1) { ++ if (PTRS_PER_PMD > 1) + pmd_cache = kmem_cache_create("pmd", +- PTRS_PER_PMD*sizeof(pmd_t), +- PTRS_PER_PMD*sizeof(pmd_t), +- SLAB_PANIC, +- pmd_ctor); +- if (!SHARED_KERNEL_PMD) { +- /* If we're in PAE mode and have a non-shared +- kernel pmd, then the pgd size must be a +- page size. This is because the pgd_list +- links through the page structure, so there +- can only be one pgd per page for this to +- work. */ +- pgd_size = PAGE_SIZE; +- } +- } ++ PTRS_PER_PMD*sizeof(pmd_t), ++ PTRS_PER_PMD*sizeof(pmd_t), ++ SLAB_PANIC, ++ pmd_ctor); + } + + /* +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -798,7 +798,7 @@ static void xen_finish_init_mapping(void + /* Setup the direct mapping of the physical memory at PAGE_OFFSET. + This runs before bootmem is initialized and gets pages directly from the + physical memory. To access them they are temporarily mapped. */ +-void __meminit init_memory_mapping(unsigned long start, unsigned long end) ++void __init_refok init_memory_mapping(unsigned long start, unsigned long end) + { + unsigned long next; + +@@ -932,12 +932,6 @@ error: + } + EXPORT_SYMBOL_GPL(arch_add_memory); + +-int remove_memory(u64 start, u64 size) -{ -- if (arch_use_lazy_mmu_mode()) -- return xen_multi_mmu_update(req, count, success_count, domid); -- return _hypercall4(int, mmu_update, req, count, success_count, domid); +- return -EINVAL; -} +-EXPORT_SYMBOL_GPL(remove_memory); - --static inline int __must_check --HYPERVISOR_mmuext_op( -- struct mmuext_op *op, unsigned int count, unsigned int *success_count, -- domid_t domid) + #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) + int memory_add_physaddr_to_nid(u64 start) + { +@@ -1216,14 +1210,6 @@ int in_gate_area_no_task(unsigned long a + return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); + } + +-#ifndef CONFIG_XEN +-void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) -{ -- if (arch_use_lazy_mmu_mode()) -- return xen_multi_mmuext_op(op, count, success_count, domid); -- return _hypercall4(int, mmuext_op, op, count, success_count, domid); +- return __alloc_bootmem_core(pgdat->bdata, size, +- SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); -} +-#endif - --static inline int __must_check --HYPERVISOR_set_gdt( -- unsigned long *frame_list, unsigned int entries) --{ -- return _hypercall2(int, set_gdt, frame_list, entries); --} + const char *arch_vma_name(struct vm_area_struct *vma) + { + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) +@@ -1232,3 +1218,48 @@ const char *arch_vma_name(struct vm_area + return "[vsyscall]"; + return NULL; + } ++ ++#ifdef CONFIG_SPARSEMEM_VMEMMAP ++/* ++ * Initialise the sparsemem vmemmap using huge-pages at the PMD level. ++ */ ++int __meminit vmemmap_populate(struct page *start_page, ++ unsigned long size, int node) ++{ ++ unsigned long addr = (unsigned long)start_page; ++ unsigned long end = (unsigned long)(start_page + size); ++ unsigned long next; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ for (; addr < end; addr = next) { ++ next = pmd_addr_end(addr, end); ++ ++ pgd = vmemmap_pgd_populate(addr, node); ++ if (!pgd) ++ return -ENOMEM; ++ pud = vmemmap_pud_populate(pgd, addr, node); ++ if (!pud) ++ return -ENOMEM; ++ ++ pmd = pmd_offset(pud, addr); ++ if (pmd_none(*pmd)) { ++ pte_t entry; ++ void *p = vmemmap_alloc_block(PMD_SIZE, node); ++ if (!p) ++ return -ENOMEM; ++ ++ entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); ++ mk_pte_huge(entry); ++ set_pmd(pmd, __pmd(pte_val(entry))); ++ ++ printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", ++ addr, addr + PMD_SIZE - 1, p, node); ++ } else ++ vmemmap_verify((pte_t *)pmd, node, addr, next); ++ } ++ ++ return 0; ++} ++#endif +--- head-2011-03-17.orig/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -17,9 +17,6 @@ + #include + #include + +-LIST_HEAD(mm_unpinned); +-DEFINE_SPINLOCK(mm_unpinned_lock); - --static inline int __must_check --HYPERVISOR_stack_switch( -- unsigned long ss, unsigned long esp) --{ -- return _hypercall2(int, stack_switch, ss, esp); --} -+#define HYPERCALL_arg1 "ebx" -+#define HYPERCALL_arg2 "ecx" -+#define HYPERCALL_arg3 "edx" -+#define HYPERCALL_arg4 "esi" -+#define HYPERCALL_arg5 "edi" + static void _pin_lock(struct mm_struct *mm, int lock) { + if (lock) + spin_lock(&mm->page_table_lock); +@@ -81,8 +78,8 @@ static void _pin_lock(struct mm_struct * + #define PIN_BATCH 8 + static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl); + +-static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags, +- unsigned int cpu, unsigned int seq) ++static inline unsigned int pgd_walk_set_prot(void *pt, pgprot_t flags, ++ unsigned int cpu, unsigned int seq) + { + struct page *page = virt_to_page(pt); + unsigned long pfn = page_to_pfn(page); +@@ -100,9 +97,9 @@ static inline unsigned int mm_walk_set_p + return seq; + } + +-static void mm_walk(struct mm_struct *mm, pgprot_t flags) ++static void pgd_walk(pgd_t *pgd_base, pgprot_t flags) + { +- pgd_t *pgd; ++ pgd_t *pgd = pgd_base; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; +@@ -110,7 +107,6 @@ static void mm_walk(struct mm_struct *mm + unsigned int cpu, seq; + multicall_entry_t *mcl; + +- pgd = mm->pgd; + cpu = get_cpu(); + + /* +@@ -125,18 +121,18 @@ static void mm_walk(struct mm_struct *mm + continue; + pud = pud_offset(pgd, 0); + if (PTRS_PER_PUD > 1) /* not folded */ +- seq = mm_walk_set_prot(pud,flags,cpu,seq); ++ seq = pgd_walk_set_prot(pud,flags,cpu,seq); + for (u = 0; u < PTRS_PER_PUD; u++, pud++) { + if (pud_none(*pud)) + continue; + pmd = pmd_offset(pud, 0); + if (PTRS_PER_PMD > 1) /* not folded */ +- seq = mm_walk_set_prot(pmd,flags,cpu,seq); ++ seq = pgd_walk_set_prot(pmd,flags,cpu,seq); + for (m = 0; m < PTRS_PER_PMD; m++, pmd++) { + if (pmd_none(*pmd)) + continue; + pte = pte_offset_kernel(pmd,0); +- seq = mm_walk_set_prot(pte,flags,cpu,seq); ++ seq = pgd_walk_set_prot(pte,flags,cpu,seq); + } + } + } +@@ -148,12 +144,12 @@ static void mm_walk(struct mm_struct *mm + seq = 0; + } + MULTI_update_va_mapping(mcl + seq, +- (unsigned long)__user_pgd(mm->pgd), +- pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags), ++ (unsigned long)__user_pgd(pgd_base), ++ pfn_pte(virt_to_phys(__user_pgd(pgd_base))>>PAGE_SHIFT, flags), + 0); + MULTI_update_va_mapping(mcl + seq + 1, +- (unsigned long)mm->pgd, +- pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags), ++ (unsigned long)pgd_base, ++ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags), + UVMF_TLB_FLUSH); + if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL))) + BUG(); +@@ -161,21 +157,35 @@ static void mm_walk(struct mm_struct *mm + put_cpu(); + } + ++static void __pgd_pin(pgd_t *pgd) ++{ ++ pgd_walk(pgd, PAGE_KERNEL_RO); ++ xen_pgd_pin(__pa(pgd)); /* kernel */ ++ xen_pgd_pin(__pa(__user_pgd(pgd))); /* user */ ++ SetPagePinned(virt_to_page(pgd)); ++} ++ ++static void __pgd_unpin(pgd_t *pgd) ++{ ++ xen_pgd_unpin(__pa(pgd)); ++ xen_pgd_unpin(__pa(__user_pgd(pgd))); ++ pgd_walk(pgd, PAGE_KERNEL); ++ ClearPagePinned(virt_to_page(pgd)); ++} ++ ++void pgd_test_and_unpin(pgd_t *pgd) ++{ ++ if (PagePinned(virt_to_page(pgd))) ++ __pgd_unpin(pgd); ++} ++ + void mm_pin(struct mm_struct *mm) + { + if (xen_feature(XENFEAT_writable_page_tables)) + return; + + pin_lock(mm); +- +- mm_walk(mm, PAGE_KERNEL_RO); +- xen_pgd_pin(__pa(mm->pgd)); /* kernel */ +- xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */ +- SetPagePinned(virt_to_page(mm->pgd)); +- spin_lock(&mm_unpinned_lock); +- list_del(&mm->context.unpinned); +- spin_unlock(&mm_unpinned_lock); +- ++ __pgd_pin(mm->pgd); + pin_unlock(mm); + } + +@@ -185,34 +195,30 @@ void mm_unpin(struct mm_struct *mm) + return; + + pin_lock(mm); +- +- xen_pgd_unpin(__pa(mm->pgd)); +- xen_pgd_unpin(__pa(__user_pgd(mm->pgd))); +- mm_walk(mm, PAGE_KERNEL); +- ClearPagePinned(virt_to_page(mm->pgd)); +- spin_lock(&mm_unpinned_lock); +- list_add(&mm->context.unpinned, &mm_unpinned); +- spin_unlock(&mm_unpinned_lock); +- ++ __pgd_unpin(mm->pgd); + pin_unlock(mm); + } + + void mm_pin_all(void) + { ++ struct page *page; ++ unsigned long flags; ++ + if (xen_feature(XENFEAT_writable_page_tables)) + return; + + /* +- * Allow uninterrupted access to the mm_unpinned list. We don't +- * actually take the mm_unpinned_lock as it is taken inside mm_pin(). ++ * Allow uninterrupted access to the pgd_list. Also protects ++ * __pgd_pin() by disabling preemption. + * All other CPUs must be at a safe point (e.g., in stop_machine + * or offlined entirely). + */ +- preempt_disable(); +- while (!list_empty(&mm_unpinned)) +- mm_pin(list_entry(mm_unpinned.next, struct mm_struct, +- context.unpinned)); +- preempt_enable(); ++ spin_lock_irqsave(&pgd_lock, flags); ++ list_for_each_entry(page, &pgd_list, lru) { ++ if (!PagePinned(page)) ++ __pgd_pin((pgd_t *)page_address(page)); ++ } ++ spin_unlock_irqrestore(&pgd_lock, flags); + } -+#if CONFIG_XEN_COMPAT <= 0x030002 - static inline int __must_check - HYPERVISOR_set_callbacks( - unsigned long event_selector, unsigned long event_address, -@@ -195,80 +14,24 @@ HYPERVISOR_set_callbacks( - event_selector, event_address, - failsafe_selector, failsafe_address); + void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +@@ -331,11 +337,11 @@ static struct page *split_large_page(uns + return base; + } + +-static void cache_flush_page(void *adr) ++void clflush_cache_range(void *adr, int size) + { + int i; +- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) +- asm volatile("clflush (%0)" :: "r" (adr + i)); ++ for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) ++ clflush(adr+i); } + + static void flush_kernel_map(void *arg) +@@ -350,7 +356,7 @@ static void flush_kernel_map(void *arg) + asm volatile("wbinvd" ::: "memory"); + else list_for_each_entry(pg, l, lru) { + void *adr = page_address(pg); +- cache_flush_page(adr); ++ clflush_cache_range(adr, PAGE_SIZE); + } + __flush_tlb_all(); + } +@@ -418,6 +424,7 @@ __change_page_attr(unsigned long address + split = split_large_page(address, prot, ref_prot2); + if (!split) + return -ENOMEM; ++ pgprot_val(ref_prot2) &= ~_PAGE_NX; + set_pte(kpte, mk_pte(split, ref_prot2)); + kpte_page = split; + } +@@ -510,9 +517,14 @@ void global_flush_tlb(void) + struct page *pg, *next; + struct list_head l; + +- down_read(&init_mm.mmap_sem); ++ /* ++ * Write-protect the semaphore, to exclude two contexts ++ * doing a list_replace_init() call in parallel and to ++ * exclude new additions to the deferred_pages list: ++ */ ++ down_write(&init_mm.mmap_sem); + list_replace_init(&deferred_pages, &l); +- up_read(&init_mm.mmap_sem); ++ up_write(&init_mm.mmap_sem); + + flush_map(&l); + +--- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -46,6 +47,8 @@ void show_mem(void) + for_each_online_pgdat(pgdat) { + pgdat_resize_lock(pgdat, &flags); + for (i = 0; i < pgdat->node_spanned_pages; ++i) { ++ if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) ++ touch_nmi_watchdog(); + page = pgdat_page_nr(pgdat, i); + total++; + if (PageHighMem(page)) +@@ -206,7 +209,7 @@ void pte_free(struct page *pte) + __free_page(pte); + } + +-void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) ++void pmd_ctor(struct kmem_cache *cache, void *pmd) + { + memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); + } +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -173,7 +173,7 @@ void eisa_set_level_irq(unsigned int irq + } + + /* +- * Common IRQ routing practice: nybbles in config space, ++ * Common IRQ routing practice: nibbles in config space, + * offset by some magic constant. + */ + static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) +@@ -496,6 +496,26 @@ static int pirq_amd756_set(struct pci_de + return 1; + } + ++/* ++ * PicoPower PT86C523 ++ */ ++static int pirq_pico_get(struct pci_dev *router, struct pci_dev *dev, int pirq) ++{ ++ outb(0x10 + ((pirq - 1) >> 1), 0x24); ++ return ((pirq - 1) & 1) ? (inb(0x26) >> 4) : (inb(0x26) & 0xf); ++} ++ ++static int pirq_pico_set(struct pci_dev *router, struct pci_dev *dev, int pirq, ++ int irq) ++{ ++ unsigned int x; ++ outb(0x10 + ((pirq - 1) >> 1), 0x24); ++ x = inb(0x26); ++ x = ((pirq - 1) & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | (irq)); ++ outb(x, 0x26); ++ return 1; ++} ++ + #ifdef CONFIG_PCI_BIOS + + static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +@@ -569,7 +589,7 @@ static __init int via_router_probe(struc + /* FIXME: We should move some of the quirk fixup stuff here */ + + /* +- * work arounds for some buggy BIOSes ++ * workarounds for some buggy BIOSes + */ + if (device == PCI_DEVICE_ID_VIA_82C586_0) { + switch(router->device) { +@@ -725,6 +745,24 @@ static __init int amd_router_probe(struc + return 1; + } + ++static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) ++{ ++ switch (device) { ++ case PCI_DEVICE_ID_PICOPOWER_PT86C523: ++ r->name = "PicoPower PT86C523"; ++ r->get = pirq_pico_get; ++ r->set = pirq_pico_set; ++ return 1; ++ ++ case PCI_DEVICE_ID_PICOPOWER_PT86C523BBP: ++ r->name = "PicoPower PT86C523 rev. BB+"; ++ r->get = pirq_pico_get; ++ r->set = pirq_pico_set; ++ return 1; ++ } ++ return 0; ++} ++ + static __initdata struct irq_router_handler pirq_routers[] = { + { PCI_VENDOR_ID_INTEL, intel_router_probe }, + { PCI_VENDOR_ID_AL, ali_router_probe }, +@@ -736,6 +774,7 @@ static __initdata struct irq_router_hand + { PCI_VENDOR_ID_VLSI, vlsi_router_probe }, + { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe }, + { PCI_VENDOR_ID_AMD, amd_router_probe }, ++ { PCI_VENDOR_ID_PICOPOWER, pico_router_probe }, + /* Someone with docs needs to add the ATI Radeon IGP */ + { 0, NULL } + }; +@@ -1014,7 +1053,7 @@ static void __init pcibios_fixup_irqs(vo + * Work around broken HP Pavilion Notebooks which assign USB to + * IRQ 9 even though it is actually wired to IRQ 11 + */ +-static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d) ++static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d) + { + if (!broken_hp_bios_irq9) { + broken_hp_bios_irq9 = 1; +@@ -1027,7 +1066,7 @@ static int __init fix_broken_hp_bios_irq + * Work around broken Acer TravelMate 360 Notebooks which assign + * Cardbus to IRQ 11 even though it is actually wired to IRQ 10 + */ +-static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d) ++static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) + { + if (!acer_tm360_irqrouting) { + acer_tm360_irqrouting = 1; +--- head-2011-03-17.orig/arch/x86/pci/pcifront.c 2009-03-18 10:39:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/pcifront.c 2011-01-31 17:56:27.000000000 +0100 +@@ -31,7 +31,7 @@ static int __init pcifront_x86_stub_init + if (raw_pci_ops) + return 0; + +- printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n"); ++ pr_info("PCI: setting up Xen PCI frontend stub\n"); + + /* Copied from arch/i386/pci/common.c */ + pci_cache_line_size = 32 >> 2; +--- head-2011-03-17.orig/drivers/acpi/processor_core.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_core.c 2011-01-31 17:56:27.000000000 +0100 +@@ -174,10 +174,20 @@ int acpi_get_cpuid(acpi_handle handle, i + if (apic_id == -1) + return apic_id; + ++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + for_each_possible_cpu(i) { + if (cpu_physical_id(i) == apic_id) + return i; + } ++#else ++ /* ++ * Use of cpu_physical_id() is bogus here. Rather than defining a ++ * stub enforcing a 1:1 mapping, we keep it undefined to catch bad ++ * uses. Return as if there was a 1:1 mapping. ++ */ ++ if (apic_id < NR_CPUS && cpu_possible(apic_id)) ++ return apic_id; ++#endif + return -1; + } + EXPORT_SYMBOL_GPL(acpi_get_cpuid); +--- head-2011-03-17.orig/drivers/acpi/processor_extcntl.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_extcntl.c 2011-01-31 17:56:27.000000000 +0100 +@@ -95,7 +95,7 @@ int processor_notify_external(struct acp + ret = processor_extcntl_ops->hotplug(pr, type); + break; + default: +- printk(KERN_ERR "Unsupport processor events %d.\n", event); ++ pr_err("Unsupported processor event %d.\n", event); + break; + } + +--- head-2011-03-17.orig/drivers/acpi/processor_idle.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_idle.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1081,6 +1081,14 @@ int acpi_processor_cst_has_changed(struc + if (!pr->flags.power_setup_done) + return -ENODEV; + ++ if (processor_pm_external()) { ++ pr->flags.power = 0; ++ ret = acpi_processor_get_power_info(pr); ++ processor_notify_external(pr, ++ PROCESSOR_PM_CHANGE, PM_TYPE_IDLE); ++ return ret; ++ } ++ + cpuidle_pause_and_lock(); + cpuidle_disable_device(&pr->power.dev); + acpi_processor_get_power_info(pr); +--- head-2011-03-17.orig/drivers/char/tpm/tpm_xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/char/tpm/tpm_xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -104,9 +104,9 @@ void __exit tpmif_exit(void); + #define DPRINTK(fmt, args...) \ + pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args) + #define IPRINTK(fmt, args...) \ +- printk(KERN_INFO "xen_tpm_fr: " fmt, ##args) ++ pr_info("xen_tpm_fr: " fmt, ##args) + #define WPRINTK(fmt, args...) \ +- printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args) ++ pr_warning("xen_tpm_fr: " fmt, ##args) + + #define GRANT_INVALID_REF 0 + +--- head-2011-03-17.orig/drivers/cpuidle/Kconfig 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/cpuidle/Kconfig 2011-01-31 17:56:27.000000000 +0100 +@@ -1,6 +1,7 @@ + + config CPU_IDLE + bool "CPU idle PM support" ++ depends on !PROCESSOR_EXTERNAL_CONTROL + default ACPI + help + CPU idle is a generic framework for supporting software-controlled +--- head-2011-03-17.orig/drivers/dma/Kconfig 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/dma/Kconfig 2011-01-31 17:56:27.000000000 +0100 +@@ -61,7 +61,7 @@ config INTEL_IOATDMA + tristate "Intel I/OAT DMA support" + depends on PCI && X86 + select DMA_ENGINE +- select DCA ++ select DCA if !XEN + select ASYNC_TX_DISABLE_PQ_VAL_DMA + select ASYNC_TX_DISABLE_XOR_VAL_DMA + help +--- head-2011-03-17.orig/drivers/dma/ioat/Makefile 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/dma/ioat/Makefile 2011-01-31 17:56:27.000000000 +0100 +@@ -1,2 +1,3 @@ + obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o +-ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o ++dca-$(CONFIG_DCA) := dca.o ++ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o $(dca-y) $(dca-m) +--- head-2011-03-17.orig/drivers/dma/ioat/dca.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/dma/ioat/dca.c 2011-01-31 17:56:27.000000000 +0100 +@@ -682,3 +682,15 @@ ioat3_dca_init(struct pci_dev *pdev, voi + + return dca; + } ++ ++void ioat_remove_dca_provider(struct pci_dev *pdev) ++{ ++ struct ioatdma_device *device = pci_get_drvdata(pdev); ++ ++ if (!device->dca) ++ return; ++ ++ unregister_dca_provider(device->dca, &pdev->dev); ++ free_dca_provider(device->dca); ++ device->dca = NULL; ++} +--- head-2011-03-17.orig/drivers/dma/ioat/dma.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/dma/ioat/dma.h 2011-01-31 17:56:27.000000000 +0100 +@@ -347,4 +347,22 @@ void ioat_kobject_del(struct ioatdma_dev + extern const struct sysfs_ops ioat_sysfs_ops; + extern struct ioat_sysfs_entry ioat_version_attr; + extern struct ioat_sysfs_entry ioat_cap_attr; ++ ++#ifndef CONFIG_XEN ++void ioat_remove_dca_provider(struct pci_dev *); ++#else ++static inline void ioat_remove_dca_provider(struct pci_dev *pdev) ++{ ++ struct ioatdma_device *device = pci_get_drvdata(pdev); ++ BUG_ON(device->dca); ++} ++static inline struct dca_provider *__devinit ++__ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) ++{ ++ return NULL; ++} ++#define ioat_dca_init __ioat_dca_init ++#define ioat2_dca_init __ioat_dca_init ++#endif ++ + #endif /* IOATDMA_H */ +--- head-2011-03-17.orig/drivers/dma/ioat/pci.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/drivers/dma/ioat/pci.c 2011-01-31 17:56:27.000000000 +0100 +@@ -29,7 +29,6 @@ + #include + #include + #include +-#include + #include + #include "dma.h" + #include "dma_v2.h" +@@ -170,11 +169,7 @@ static void __devexit ioat_remove(struct + return; + + dev_err(&pdev->dev, "Removing dma and dca services\n"); +- if (device->dca) { +- unregister_dca_provider(device->dca, &pdev->dev); +- free_dca_provider(device->dca); +- device->dca = NULL; +- } ++ ioat_remove_dca_provider(pdev); + ioat_dma_remove(device); + } + +--- head-2011-03-17.orig/drivers/firmware/dell_rbu.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/drivers/firmware/dell_rbu.c 2011-01-31 17:56:27.000000000 +0100 +@@ -175,9 +175,8 @@ static int create_packet(void *data, siz + (unsigned long)packet_data_temp_buf, ordernum, 0)) { + free_pages((unsigned long)packet_data_temp_buf, + ordernum); +- printk(KERN_WARNING +- "dell_rbu:%s: failed to adjust new " +- "packet\n", __func__); ++ pr_warning("dell_rbu:%s: failed to adjust new " ++ "packet\n", __func__); + retval = -ENOMEM; + spin_lock(&rbu_data.lock); + goto out_alloc_packet_array; +--- head-2011-03-17.orig/drivers/hwmon/coretemp-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/coretemp-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -49,7 +49,7 @@ static struct coretemp_data *coretemp_up + struct pdev_entry { + struct list_head list; + struct platform_device *pdev; +- struct class_device *class_dev; ++ struct device *hwmon_dev; + struct mutex update_lock; + const char *name; + u8 x86_model, x86_mask; +@@ -61,8 +61,6 @@ struct pdev_entry { + u8 alarm; + }; + +-static struct coretemp_data *coretemp_update_device(struct device *dev); - --static inline int --HYPERVISOR_fpu_taskswitch( -- int set) --{ -- return _hypercall1(int, fpu_taskswitch, set); --} -- --static inline int __must_check --HYPERVISOR_sched_op_compat( -- int cmd, unsigned long arg) --{ -- return _hypercall2(int, sched_op_compat, cmd, arg); --} -- --static inline int __must_check --HYPERVISOR_sched_op( -- int cmd, void *arg) --{ -- return _hypercall2(int, sched_op, cmd, arg); --} + /* + * Sysfs stuff + */ +@@ -224,9 +222,9 @@ static int coretemp_probe(struct platfor + if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group))) + return err; + +- data->class_dev = hwmon_device_register(&pdev->dev); +- if (IS_ERR(data->class_dev)) { +- err = PTR_ERR(data->class_dev); ++ data->hwmon_dev = hwmon_device_register(&pdev->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", + err); + goto exit_class; +@@ -243,7 +241,7 @@ static int coretemp_remove(struct platfo + { + struct coretemp_data *data = platform_get_drvdata(pdev); + +- hwmon_device_unregister(data->class_dev); ++ hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); + return 0; + } +@@ -315,9 +313,10 @@ static int coretemp_device_add(unsigned + if (err) + goto exit_entry_free; + +- /* check if family 6, models e, f */ ++ /* check if family 6, models e, f, 16 */ + if (info.x86 != 0x6 || +- !((pdev_entry->x86_model == 0xe) || (pdev_entry->x86_model == 0xf))) { ++ !((pdev_entry->x86_model == 0xe) || (pdev_entry->x86_model == 0xf) || ++ (pdev_entry->x86_model == 0x16))) { + + /* supported CPU not found, but report the unknown + family 6 CPU */ +--- head-2011-03-17.orig/drivers/oprofile/cpu_buffer.c 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-03-17/drivers/oprofile/cpu_buffer.c 2011-01-31 17:56:27.000000000 +0100 +@@ -415,6 +415,39 @@ void oprofile_add_pc(unsigned long pc, i + log_sample(cpu_buf, pc, 0, is_kernel, event); + } + ++#ifdef CONFIG_XEN ++/* ++ * This is basically log_sample(b, ESCAPE_CODE, cpu_mode, CPU_TRACE_BEGIN), ++ * as was previously accessible through oprofile_add_pc(). ++ */ ++void oprofile_add_mode(int cpu_mode) ++{ ++ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); ++ struct task_struct *task; ++ ++ if (nr_available_slots(cpu_buf) < 3) { ++ cpu_buf->sample_lost_overflow++; ++ return; ++ } ++ ++ task = current; ++ ++ /* notice a switch from user->kernel or vice versa */ ++ if (cpu_buf->last_cpu_mode != cpu_mode) { ++ cpu_buf->last_cpu_mode = cpu_mode; ++ add_code(cpu_buf, cpu_mode); ++ } ++ ++ /* notice a task switch */ ++ if (cpu_buf->last_task != task) { ++ cpu_buf->last_task = task; ++ add_code(cpu_buf, (unsigned long)task); ++ } ++ ++ add_code(cpu_buf, CPU_TRACE_BEGIN); ++} +#endif ++ + void oprofile_add_trace(unsigned long pc) + { + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); +--- head-2011-03-17.orig/drivers/pci/msi-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/pci/msi-xen.c 2011-01-31 17:56:27.000000000 +0100 +@@ -260,6 +260,12 @@ static int msi_map_vector(struct pci_dev + map_irq.pirq : evtchn_map_pirq(-1, map_irq.pirq)); + } + ++static void pci_intx_for_msi(struct pci_dev *dev, int enable) ++{ ++ if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) ++ pci_intx(dev, enable); ++} ++ + #ifdef CONFIG_PM + void pci_restore_msi_state(struct pci_dev *dev) + { +@@ -269,7 +275,7 @@ void pci_restore_msi_state(struct pci_de + if (!dev->msi_enabled && !dev->msix_enabled) + return; + +- pci_intx(dev, 0); /* disable intx */ ++ pci_intx_for_msi(dev, 0); + if (dev->msi_enabled) + msi_set_enable(dev, 0); + if (dev->msix_enabled) +@@ -306,7 +312,7 @@ static int msi_capability_init(struct pc + return -EBUSY; + + /* Set MSI enabled bits */ +- pci_intx(dev, 0); /* disable intx */ ++ pci_intx_for_msi(dev, 0); + msi_set_enable(dev, 1); + dev->msi_enabled = 1; + +@@ -380,7 +386,7 @@ static int msix_capability_init(struct p + return avail; + } + +- pci_intx(dev, 0); /* disable intx */ ++ pci_intx_for_msi(dev, 0); + msix_set_enable(dev, 1); + dev->msix_enabled = 1; + +@@ -516,7 +522,7 @@ void pci_disable_msi(struct pci_dev* dev + + /* Disable MSI mode */ + msi_set_enable(dev, 0); +- pci_intx(dev, 1); /* enable intx */ ++ pci_intx_for_msi(dev, 1); + dev->msi_enabled = 0; + } + EXPORT_SYMBOL(pci_disable_msi); +@@ -653,7 +659,7 @@ void pci_disable_msix(struct pci_dev* de + + /* Disable MSI mode */ + msix_set_enable(dev, 0); +- pci_intx(dev, 1); /* enable intx */ ++ pci_intx_for_msi(dev, 1); + dev->msix_enabled = 0; + } + EXPORT_SYMBOL(pci_disable_msix); +--- head-2011-03-17.orig/drivers/xen/balloon/balloon.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/balloon/balloon.c 2011-01-31 17:56:27.000000000 +0100 +@@ -124,10 +124,8 @@ static struct timer_list balloon_timer; + PAGE_TO_LIST(p)->prev = NULL; \ + } while(0) + +-#define IPRINTK(fmt, args...) \ +- printk(KERN_INFO "xen_mem: " fmt, ##args) +-#define WPRINTK(fmt, args...) \ +- printk(KERN_WARNING "xen_mem: " fmt, ##args) ++#define IPRINTK(fmt, args...) pr_info("xen_mem: " fmt, ##args) ++#define WPRINTK(fmt, args...) pr_warning("xen_mem: " fmt, ##args) + + /* balloon_append: add the given page to the balloon. */ + static void balloon_append(struct page *page, int account) +@@ -324,6 +322,8 @@ static int increase_reservation(unsigned + + #ifndef MODULE + setup_per_zone_pages_min(); ++ if (rc > 0) ++ kswapd_run(0); + if (need_zonelists_rebuild) + build_all_zonelists(); + else +@@ -477,7 +477,7 @@ static int balloon_init_watcher(struct n + + err = register_xenbus_watch(&target_watch); + if (err) +- printk(KERN_ERR "Failed to set balloon watcher\n"); ++ pr_err("Failed to set balloon watcher\n"); + + return NOTIFY_DONE; + } +--- head-2011-03-17.orig/drivers/xen/blkback/blkback.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/blkback.c 2011-01-31 17:56:27.000000000 +0100 +@@ -275,13 +275,10 @@ static void __end_block_io_op(pending_re + } + } + +-static int end_block_io_op(struct bio *bio, unsigned int done, int error) ++static void end_block_io_op(struct bio *bio, int error) + { +- if (bio->bi_size != 0) +- return 1; + __end_block_io_op(bio->bi_private, error); + bio_put(bio); +- return error; + } + + +@@ -664,7 +661,7 @@ static int __init blkif_init(void) + kfree(pending_reqs); + kfree(pending_grant_handles); + free_empty_pages_and_pagevec(pending_pages, mmap_pages); +- printk("%s: out of memory\n", __FUNCTION__); ++ pr_warning("%s: out of memory\n", __FUNCTION__); + return -ENOMEM; + } + +--- head-2011-03-17.orig/drivers/xen/blkback/blkback-pagemap.c 2009-06-09 15:01:37.000000000 +0200 ++++ head-2011-03-17/drivers/xen/blkback/blkback-pagemap.c 2011-01-31 17:56:27.000000000 +0100 +@@ -38,8 +38,8 @@ blkback_pagemap_set(int idx, struct page + + entry = blkback_pagemap + idx; + if (!blkback_pagemap_entry_clear(entry)) { +- printk("overwriting pagemap %d: d %u b %u g %u\n", +- idx, entry->domid, entry->busid, entry->gref); ++ pr_emerg("overwriting pagemap %d: d %u b %u g %u\n", ++ idx, entry->domid, entry->busid, entry->gref); + BUG(); + } + +@@ -63,7 +63,7 @@ blkback_pagemap_clear(struct page *page) + + entry = blkback_pagemap + idx; + if (blkback_pagemap_entry_clear(entry)) { +- printk("clearing empty pagemap %d\n", idx); ++ pr_emerg("clearing empty pagemap %d\n", idx); + BUG(); + } + +@@ -85,7 +85,7 @@ blkback_pagemap_read(struct page *page) + + entry = blkback_pagemap + idx; + if (blkback_pagemap_entry_clear(entry)) { +- printk("reading empty pagemap %d\n", idx); ++ pr_emerg("reading empty pagemap %d\n", idx); + BUG(); + } + +--- head-2011-03-17.orig/drivers/xen/blkback/vbd.c 2010-03-22 12:00:53.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/vbd.c 2011-01-31 17:56:27.000000000 +0100 +@@ -126,18 +126,18 @@ void vbd_resize(blkif_t *blkif) + struct xenbus_device *dev = blkif->be->dev; + unsigned long long new_size = vbd_size(vbd); + +- printk(KERN_INFO "VBD Resize: new size %Lu\n", new_size); ++ pr_info("VBD Resize: new size %Lu\n", new_size); + vbd->size = new_size; + again: + err = xenbus_transaction_start(&xbt); + if (err) { +- printk(KERN_WARNING "Error starting transaction"); ++ pr_warning("Error %d starting transaction", err); + return; + } + err = xenbus_printf(xbt, dev->nodename, "sectors", "%Lu", + vbd_size(vbd)); + if (err) { +- printk(KERN_WARNING "Error writing new size"); ++ pr_warning("Error %d writing new size", err); + goto abort; + } + /* +@@ -147,7 +147,7 @@ again: + */ + err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); + if (err) { +- printk(KERN_WARNING "Error writing the state"); ++ pr_warning("Error %d writing the state", err); + goto abort; + } + +@@ -155,7 +155,7 @@ again: + if (err == -EAGAIN) + goto again; + if (err) +- printk(KERN_WARNING "Error ending transaction"); ++ pr_warning("Error %d ending transaction", err); + abort: + xenbus_transaction_end(xbt, 1); + } +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-01-31 17:56:27.000000000 +0100 +@@ -233,7 +233,7 @@ static int setup_blkring(struct xenbus_d + SHARED_RING_INIT(sring); + FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); + +- memset(info->sg, 0, sizeof(info->sg)); ++ sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); + + err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); + if (err < 0) { +@@ -336,8 +336,7 @@ static void connect(struct blkfront_info + "sectors", "%Lu", §ors); + if (XENBUS_EXIST_ERR(err)) + return; +- printk(KERN_INFO "Setting capacity to %Lu\n", +- sectors); ++ pr_info("Setting capacity to %Lu\n", sectors); + set_capacity(info->gd, sectors); + + /* fall through */ +@@ -591,8 +590,6 @@ int blkif_ioctl(struct inode *inode, str + } + } + +- /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", +- command);*/ + return -EINVAL; /* same return as native Linux */ + } + +@@ -667,9 +664,8 @@ static int blkif_queue_request(struct re + + ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); + BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); +- for (i = 0; i < ring_req->nr_segments; ++i) { +- sg = info->sg + i; +- buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT; ++ for_each_sg(info->sg, sg, ring_req->nr_segments, i) { ++ buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; + fsect = sg->offset >> 9; + lsect = fsect + (sg->length >> 9) - 1; + /* install a grant reference. */ +@@ -785,8 +781,9 @@ static irqreturn_t blkif_int(int irq, vo + switch (bret->operation) { + case BLKIF_OP_WRITE_BARRIER: + if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { +- printk("blkfront: %s: write barrier op failed\n", +- info->gd->disk_name); ++ pr_warning("blkfront: %s:" ++ " write barrier op failed\n", ++ info->gd->disk_name); + uptodate = -EOPNOTSUPP; + info->feature_barrier = 0; + xlvbd_barrier(info); +--- head-2011-03-17.orig/drivers/xen/blkfront/block.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/block.h 2011-01-31 17:56:27.000000000 +0100 +@@ -59,7 +59,7 @@ + #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) + + #if 0 +-#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) ++#define DPRINTK_IOCTL(_f, _a...) pr_alert(_f, ## _a) + #else + #define DPRINTK_IOCTL(_f, _a...) ((void)0) + #endif +--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-01-31 17:56:27.000000000 +0100 +@@ -185,7 +185,8 @@ xlbd_alloc_major_info(int major, int min + return NULL; + } + +- printk("xen-vbd: registered block device major %i\n", ptr->major); ++ pr_info("xen-vbd: registered block device major %i\n", ++ ptr->major); + } + + ptr->minors = minors; +@@ -435,7 +436,8 @@ xlvbd_add(blkif_sector_t capacity, int v + + if ((vdevice>>EXT_SHIFT) > 1) { + /* this is above the extended range; something is wrong */ +- printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); ++ pr_warning("blkfront: vdevice %#x is above the extended" ++ " range; ignoring\n", vdevice); + return -ENODEV; + } + +@@ -494,15 +496,16 @@ xlvbd_barrier(struct blkfront_info *info + info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); + if (err) + return err; +- printk(KERN_INFO "blkfront: %s: barriers %s\n", +- info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); ++ pr_info("blkfront: %s: barriers %s\n", ++ info->gd->disk_name, ++ info->feature_barrier ? "enabled" : "disabled"); + return 0; + } + #else + int + xlvbd_barrier(struct blkfront_info *info) + { +- printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); ++ pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); + return -ENOSYS; + } + #endif +--- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-17 10:10:44.000000000 +0100 +@@ -776,8 +776,8 @@ static int blktap_ioctl(struct inode *in + case BLKTAP_IOCTL_PRINT_IDXS: + { + if (info) { +- printk("User Rings: \n-----------\n"); +- printk("UF: rsp_cons: %2d, req_prod_prv: %2d " ++ pr_info("User Rings: \n-----------\n"); ++ pr_info("UF: rsp_cons: %2d, req_prod_prv: %2d " + "| req_prod: %2d, rsp_prod: %2d\n", + info->ufe_ring.rsp_cons, + info->ufe_ring.req_prod_pvt, +--- head-2011-03-17.orig/drivers/xen/blktap/common.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/common.h 2011-01-31 17:56:27.000000000 +0100 +@@ -44,7 +44,7 @@ + #define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \ + __FILE__ , __LINE__ , ## _a ) + +-#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args) ++#define WPRINTK(fmt, args...) pr_warning("blktap: " fmt, ##args) + + struct backend_info; + +--- head-2011-03-17.orig/drivers/xen/blktap/xenbus.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/xenbus.c 2011-01-31 17:56:27.000000000 +0100 +@@ -359,8 +359,8 @@ static void tap_frontend_changed(struct + switch (frontend_state) { + case XenbusStateInitialising: + if (dev->state == XenbusStateClosed) { +- printk(KERN_INFO "%s: %s: prepare for reconnect\n", +- __FUNCTION__, dev->nodename); ++ pr_info("%s: %s: prepare for reconnect\n", ++ __FUNCTION__, dev->nodename); + xenbus_switch_state(dev, XenbusStateInitWait); + } + break; +@@ -461,9 +461,8 @@ static int connect_ring(struct backend_i + xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); + return -1; + } +- printk(KERN_INFO +- "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n", +- ring_ref, evtchn, be->blkif->blk_protocol, protocol); ++ pr_info("blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n", ++ ring_ref, evtchn, be->blkif->blk_protocol, protocol); + + /* Map the shared frame, irq etc. */ + err = tap_blkif_map(be->blkif, ring_ref, evtchn); +--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-01-31 17:56:27.000000000 +0100 +@@ -18,6 +18,7 @@ blktap_control_initialize_tap(struct blk + memset(tap, 0, sizeof(*tap)); + set_bit(BLKTAP_CONTROL, &tap->dev_inuse); + init_rwsem(&tap->tap_sem); ++ sg_init_table(tap->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); + init_waitqueue_head(&tap->wq); + atomic_set(&tap->refcnt, 0); + +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-01-31 17:56:27.000000000 +0100 +@@ -16,7 +16,7 @@ + #include "../blkback/blkback-pagemap.h" + + #if 0 +-#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) ++#define DPRINTK_IOCTL(_f, _a...) pr_alert(_f, ## _a) + #else + #define DPRINTK_IOCTL(_f, _a...) ((void)0) + #endif +@@ -133,8 +133,6 @@ blktap_device_ioctl(struct inode *inode, + return 0; + + default: +- /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", +- command);*/ + return -EINVAL; /* same return as native Linux */ + } + +@@ -662,8 +660,7 @@ blktap_device_process_request(struct blk + request->nr_pages = 0; + blkif_req.nr_segments = blk_rq_map_sg(req->q, req, tap->sg); + BUG_ON(blkif_req.nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); +- for (i = 0; i < blkif_req.nr_segments; ++i) { +- sg = tap->sg + i; ++ for_each_sg(tap->sg, sg, blkif_req.nr_segments, i) { + fsect = sg->offset >> 9; + lsect = fsect + (sg->length >> 9) - 1; + nr_sects += sg->length >> 9; +@@ -674,13 +671,13 @@ blktap_device_process_request(struct blk + .first_sect = fsect, + .last_sect = lsect }; + +- if (PageBlkback(sg->page)) { ++ if (PageBlkback(sg_page(sg))) { + /* foreign page -- use xen */ + if (blktap_prep_foreign(tap, + request, + &blkif_req, + i, +- sg->page, ++ sg_page(sg), + &table)) + goto out; + } else { +@@ -688,7 +685,7 @@ blktap_device_process_request(struct blk + if (blktap_map(tap, + request, + i, +- sg->page)) ++ sg_page(sg))) + goto out; + } - static inline long __must_check - HYPERVISOR_set_timer_op( - u64 timeout) - { -- unsigned long timeout_hi = (unsigned long)(timeout>>32); -- unsigned long timeout_lo = (unsigned long)timeout; -- return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); --} -- --static inline int __must_check --HYPERVISOR_platform_op( -- struct xen_platform_op *platform_op) --{ -- platform_op->interface_version = XENPF_INTERFACE_VERSION; -- return _hypercall1(int, platform_op, platform_op); --} -- --static inline int __must_check --HYPERVISOR_set_debugreg( -- unsigned int reg, unsigned long value) --{ -- return _hypercall2(int, set_debugreg, reg, value); --} -- --static inline unsigned long __must_check --HYPERVISOR_get_debugreg( -- unsigned int reg) --{ -- return _hypercall1(unsigned long, get_debugreg, reg); -+ return _hypercall2(long, set_timer_op, -+ (unsigned long)timeout, -+ (unsigned long)(timeout>>32)); +--- head-2011-03-17.orig/drivers/xen/console/console.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/console/console.c 2011-01-31 17:56:27.000000000 +0100 +@@ -716,10 +716,10 @@ static int __init xencons_init(void) + tty_set_operations(xencons_driver, &xencons_ops); + + if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) { +- printk("WARNING: Failed to register Xen virtual " +- "console driver as '%s%d'\n", +- DRV(xencons_driver)->name, +- DRV(xencons_driver)->name_base); ++ pr_warning("WARNING: Failed to register Xen virtual " ++ "console driver as '%s%d'\n", ++ DRV(xencons_driver)->name, ++ DRV(xencons_driver)->name_base); + put_tty_driver(xencons_driver); + xencons_driver = NULL; + return rc; +@@ -736,8 +736,8 @@ static int __init xencons_init(void) + BUG_ON(xencons_priv_irq < 0); + } + +- printk("Xen virtual console successfully installed as %s%d\n", +- DRV(xencons_driver)->name, xc_num); ++ pr_info("Xen virtual console successfully installed as %s%d\n", ++ DRV(xencons_driver)->name, xc_num); + + return 0; } +--- head-2011-03-17.orig/drivers/xen/console/xencons_ring.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/console/xencons_ring.c 2011-01-31 17:56:27.000000000 +0100 +@@ -125,7 +125,7 @@ int xencons_ring_init(void) + xen_start_info->console.domU.evtchn, + handle_input, 0, "xencons", NULL); + if (irq < 0) { +- printk(KERN_ERR "XEN console request irq failed %i\n", irq); ++ pr_err("XEN console request irq failed %i\n", irq); + return irq; + } - static inline int __must_check - HYPERVISOR_update_descriptor( - u64 ma, u64 desc) - { -- return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); --} -- --static inline int __must_check --HYPERVISOR_memory_op( -- unsigned int cmd, void *arg) --{ -- if (arch_use_lazy_mmu_mode()) -- xen_multicall_flush(false); -- return _hypercall2(int, memory_op, cmd, arg); --} -- --static inline int __must_check --HYPERVISOR_multicall( -- multicall_entry_t *call_list, unsigned int nr_calls) --{ -- return _hypercall2(int, multicall, call_list, nr_calls); -+ return _hypercall4(int, update_descriptor, -+ (unsigned long)ma, (unsigned long)(ma>>32), -+ (unsigned long)desc, (unsigned long)(desc>>32)); +--- head-2011-03-17.orig/drivers/xen/core/cpu_hotplug.c 2011-01-24 12:06:05.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/cpu_hotplug.c 2011-01-31 17:56:27.000000000 +0100 +@@ -36,7 +36,7 @@ static void vcpu_hotplug(unsigned int cp + sprintf(dir, "cpu/%u", cpu); + err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); + if (err != 1) { +- printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); ++ pr_err("XENBUS: Unable to read cpu state\n"); + return; + } + +@@ -49,7 +49,7 @@ static void vcpu_hotplug(unsigned int cp + if (!cpu_down(cpu) && dev) + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); + } else { +- printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", ++ pr_err("XENBUS: unknown state(%s) on CPU%d\n", + state, cpu); + } } +@@ -97,8 +97,7 @@ static int setup_cpu_watcher(struct noti + if (!is_initial_xendomain()) { + for_each_possible_cpu(i) + vcpu_hotplug(i, get_cpu_sysdev(i)); +- printk(KERN_INFO "Brought up %ld CPUs\n", +- (long)num_online_cpus()); ++ pr_info("Brought up %ld CPUs\n", (long)num_online_cpus()); + } - static inline int __must_check -@@ -287,67 +50,6 @@ HYPERVISOR_update_va_mapping( + return NOTIFY_DONE; +@@ -132,8 +131,7 @@ int smp_suspend(void) + continue; + err = cpu_down(cpu); + if (err) { +- printk(KERN_CRIT "Failed to take all CPUs " +- "down: %d.\n", err); ++ pr_crit("Failed to take all CPUs down: %d\n", err); + for_each_possible_cpu(cpu) + vcpu_hotplug(cpu, NULL); + return err; +@@ -161,8 +159,8 @@ int cpu_up_check(unsigned int cpu) + if (local_cpu_hotplug_request()) { + cpu_set(cpu, local_allowed_cpumask); + if (!cpu_isset(cpu, xenbus_allowed_cpumask)) { +- printk("%s: attempt to bring up CPU %u disallowed by " +- "remote admin.\n", __FUNCTION__, cpu); ++ pr_warning("%s: attempt to bring up CPU %u disallowed " ++ "by remote admin.\n", __FUNCTION__, cpu); + rc = -EBUSY; + } + } else if (!cpu_isset(cpu, local_allowed_cpumask) || +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-01-31 17:56:27.000000000 +0100 +@@ -350,8 +350,8 @@ static int find_unbound_irq(void) + + if (!warned) { + warned = 1; +- printk(KERN_WARNING "No available IRQ to bind to: " +- "increase NR_DYNIRQS.\n"); ++ pr_warning("No available IRQ to bind to: " ++ "increase NR_DYNIRQS.\n"); + } + + return -ENOSPC; +@@ -837,8 +837,7 @@ static void enable_pirq(unsigned int irq + bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE; + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) { + if (!probing_irq(irq)) +- printk(KERN_INFO "Failed to obtain physical IRQ %d\n", +- irq); ++ pr_info("Failed to obtain physical IRQ %d\n", irq); + return; + } + evtchn = bind_pirq.port; +@@ -1136,8 +1135,8 @@ int evtchn_map_pirq(int irq, int xen_pir + return 0; + } else if (type_from_irq(irq) != IRQT_PIRQ + || index_from_irq(irq) != xen_pirq) { +- printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - " +- "cannot map to PIRQ#%u\n", ++ pr_err("IRQ#%d is already mapped to %d:%u - " ++ "cannot map to PIRQ#%u\n", + irq, type_from_irq(irq), index_from_irq(irq), xen_pirq); + return -EINVAL; + } +--- head-2011-03-17.orig/drivers/xen/core/firmware.c 2007-06-22 09:08:06.000000000 +0200 ++++ head-2011-03-17/drivers/xen/core/firmware.c 2011-01-31 17:56:27.000000000 +0100 +@@ -1,4 +1,5 @@ + #include ++#include + #include + #include + #include +--- head-2011-03-17.orig/drivers/xen/core/gnttab.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/gnttab.c 2011-01-31 17:56:27.000000000 +0100 +@@ -691,7 +691,7 @@ int gnttab_resume(void) + resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); + shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); + if (shared == NULL) { +- printk("error to ioremap gnttab share frames\n"); ++ pr_warning("error to ioremap gnttab share frames\n"); + return -1; + } + } +--- head-2011-03-17.orig/drivers/xen/core/machine_kexec.c 2009-07-13 14:25:35.000000000 +0200 ++++ head-2011-03-17/drivers/xen/core/machine_kexec.c 2011-01-31 17:56:27.000000000 +0100 +@@ -29,6 +29,10 @@ void __init xen_machine_kexec_setup_reso + int k = 0; + int rc; + ++ if (strstr(boot_command_line, "crashkernel=")) ++ pr_warning("Ignoring crashkernel command line, " ++ "parameter will be supplied by xen\n"); ++ + if (!is_initial_xendomain()) + return; + +@@ -130,6 +134,13 @@ void __init xen_machine_kexec_setup_reso + xen_max_nr_phys_cpus)) + goto err; + ++#ifdef CONFIG_X86 ++ if (xen_create_contiguous_region((unsigned long)&vmcoreinfo_note, ++ get_order(sizeof(vmcoreinfo_note)), ++ BITS_PER_LONG)) ++ goto err; ++#endif ++ + return; + + err: +@@ -213,6 +224,13 @@ NORET_TYPE void machine_kexec(struct kim + panic("KEXEC_CMD_kexec hypercall should not return\n"); } - static inline int __must_check --HYPERVISOR_event_channel_op( -- int cmd, void *arg) --{ -- int rc = _hypercall2(int, event_channel_op, cmd, arg); -- --#if CONFIG_XEN_COMPAT <= 0x030002 -- if (unlikely(rc == -ENOSYS)) { -- struct evtchn_op op; -- op.cmd = cmd; -- memcpy(&op.u, arg, sizeof(op.u)); -- rc = _hypercall1(int, event_channel_op_compat, &op); -- memcpy(arg, &op.u, sizeof(op.u)); -- } --#endif -- -- return rc; --} -- --static inline int __must_check --HYPERVISOR_xen_version( -- int cmd, void *arg) --{ -- return _hypercall2(int, xen_version, cmd, arg); --} -- --static inline int __must_check --HYPERVISOR_console_io( -- int cmd, unsigned int count, char *str) --{ -- return _hypercall3(int, console_io, cmd, count, str); --} -- --static inline int __must_check --HYPERVISOR_physdev_op( -- int cmd, void *arg) --{ -- int rc = _hypercall2(int, physdev_op, cmd, arg); -- --#if CONFIG_XEN_COMPAT <= 0x030002 -- if (unlikely(rc == -ENOSYS)) { -- struct physdev_op op; -- op.cmd = cmd; -- memcpy(&op.u, arg, sizeof(op.u)); -- rc = _hypercall1(int, physdev_op_compat, &op); -- memcpy(arg, &op.u, sizeof(op.u)); -- } --#endif -- -- return rc; --} -- --static inline int __must_check --HYPERVISOR_grant_table_op( -- unsigned int cmd, void *uop, unsigned int count) --{ -- if (arch_use_lazy_mmu_mode()) -- xen_multicall_flush(false); -- return _hypercall3(int, grant_table_op, cmd, uop, count); --} -- --static inline int __must_check - HYPERVISOR_update_va_mapping_otherdomain( - unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) ++#ifdef CONFIG_X86 ++unsigned long paddr_vmcoreinfo_note(void) ++{ ++ return virt_to_machine(&vmcoreinfo_note); ++} ++#endif ++ + void machine_shutdown(void) { -@@ -358,86 +60,3 @@ HYPERVISOR_update_va_mapping_otherdomain - return _hypercall5(int, update_va_mapping_otherdomain, va, - new_val.pte_low, pte_hi, flags, domid); + /* do nothing */ +--- head-2011-03-17.orig/drivers/xen/core/machine_reboot.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/machine_reboot.c 2011-01-31 17:56:27.000000000 +0100 +@@ -204,8 +204,7 @@ int __xen_suspend(int fast_suspend, void + + #if defined(__i386__) || defined(__x86_64__) + if (xen_feature(XENFEAT_auto_translated_physmap)) { +- printk(KERN_WARNING "Cannot suspend in " +- "auto_translated_physmap mode.\n"); ++ pr_warning("Can't suspend in auto_translated_physmap mode\n"); + return -EOPNOTSUPP; + } + #endif +--- head-2011-03-17.orig/drivers/xen/core/reboot.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/reboot.c 2011-01-31 17:56:27.000000000 +0100 +@@ -82,14 +82,14 @@ static int xen_suspend(void *__unused) + daemonize("suspend"); + err = set_cpus_allowed(current, cpumask_of_cpu(0)); + if (err) { +- printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); ++ pr_err("Xen suspend can't run on CPU0 (%d)\n", err); + goto fail; + } + + do { + err = __xen_suspend(fast_suspend, xen_resume_notifier); + if (err) { +- printk(KERN_ERR "Xen suspend failed (%d)\n", err); ++ pr_err("Xen suspend failed (%d)\n", err); + goto fail; + } + if (!suspend_cancelled) +@@ -151,8 +151,8 @@ static void __shutdown_handler(struct wo + NULL, CLONE_FS | CLONE_FILES); + + if (err < 0) { +- printk(KERN_WARNING "Error creating shutdown process (%d): " +- "retrying...\n", -err); ++ pr_warning("Error creating shutdown process (%d): " ++ "retrying...\n", -err); + schedule_delayed_work(&shutdown_work, HZ/2); + } } +@@ -198,7 +198,7 @@ static void shutdown_handler(struct xenb + else if (strcmp(str, "halt") == 0) + new_state = SHUTDOWN_HALT; + else +- printk("Ignoring shutdown request: %s\n", str); ++ pr_warning("Ignoring shutdown request: %s\n", str); + + switch_shutdown_state(new_state); + +@@ -217,8 +217,7 @@ static void sysrq_handler(struct xenbus_ + if (err) + return; + if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { +- printk(KERN_ERR "Unable to read sysrq code in " +- "control/sysrq\n"); ++ pr_err("Unable to read sysrq code in control/sysrq\n"); + xenbus_transaction_end(xbt, 1); + return; + } +@@ -267,7 +266,7 @@ static int setup_suspend_evtchn(void) + return -1; + + port = irq_to_evtchn_port(irq); +- printk(KERN_INFO "suspend: event channel %d\n", port); ++ pr_info("suspend: event channel %d\n", port); + sprintf(portstr, "%d", port); + xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); + +@@ -280,7 +279,7 @@ static int setup_shutdown_watcher(void) + + err = register_xenbus_watch(&sysrq_watch); + if (err) { +- printk(KERN_ERR "Failed to set sysrq watcher\n"); ++ pr_err("Failed to set sysrq watcher\n"); + return err; + } + +@@ -293,14 +292,14 @@ static int setup_shutdown_watcher(void) + + err = register_xenbus_watch(&shutdown_watch); + if (err) { +- printk(KERN_ERR "Failed to set shutdown watcher\n"); ++ pr_err("Failed to set shutdown watcher\n"); + return err; + } + + /* suspend event channel */ + err = setup_suspend_evtchn(); + if (err) { +- printk(KERN_ERR "Failed to register suspend event channel\n"); ++ pr_err("Failed to register suspend event channel\n"); + return err; + } + +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-01-31 17:56:27.000000000 +0100 +@@ -36,31 +36,20 @@ extern void failsafe_callback(void); + extern void system_call(void); + extern void smp_trap_init(trap_info_t *); + +-/* Number of siblings per CPU package */ +-int smp_num_siblings = 1; - --static inline int __must_check --HYPERVISOR_vm_assist( -- unsigned int cmd, unsigned int type) --{ -- return _hypercall2(int, vm_assist, cmd, type); --} -- --static inline int __must_check --HYPERVISOR_vcpu_op( -- int cmd, unsigned int vcpuid, void *extra_args) --{ -- return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); --} -- --static inline int __must_check --HYPERVISOR_suspend( -- unsigned long srec) --{ -- struct sched_shutdown sched_shutdown = { -- .reason = SHUTDOWN_suspend -- }; -- -- int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, -- &sched_shutdown, srec); -- --#if CONFIG_XEN_COMPAT <= 0x030002 -- if (rc == -ENOSYS) -- rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, -- SHUTDOWN_suspend, srec); --#endif -- -- return rc; --} -- --#if CONFIG_XEN_COMPAT <= 0x030002 --static inline int --HYPERVISOR_nmi_op( -- unsigned long op, void *arg) --{ -- return _hypercall2(int, nmi_op, op, arg); --} --#endif + cpumask_t cpu_online_map; + EXPORT_SYMBOL(cpu_online_map); + cpumask_t cpu_possible_map; + EXPORT_SYMBOL(cpu_possible_map); + cpumask_t cpu_initialized_map; + +-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; +-EXPORT_SYMBOL(cpu_data); ++DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info); ++EXPORT_PER_CPU_SYMBOL(cpu_info); + + static DEFINE_PER_CPU(int, resched_irq); + static DEFINE_PER_CPU(int, callfunc_irq); + static char resched_name[NR_CPUS][15]; + static char callfunc_name[NR_CPUS][15]; + +-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; +-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; - --#ifndef CONFIG_XEN --static inline unsigned long __must_check --HYPERVISOR_hvm_op( -- int op, void *arg) --{ -- return _hypercall2(unsigned long, hvm_op, op, arg); --} +-#if defined(__i386__) +-u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff }; +-EXPORT_SYMBOL(x86_cpu_to_apicid); -#endif - --static inline int __must_check --HYPERVISOR_callback_op( -- int cmd, const void *arg) + void __init prefill_possible_map(void) + { + int i, rc; +@@ -80,30 +69,6 @@ void __init smp_alloc_memory(void) + { + } + +-static inline void +-set_cpu_sibling_map(unsigned int cpu) -{ -- return _hypercall2(int, callback_op, cmd, arg); --} +- cpu_data[cpu].phys_proc_id = cpu; +- cpu_data[cpu].cpu_core_id = 0; - --static inline int __must_check --HYPERVISOR_xenoprof_op( -- int op, void *arg) --{ -- return _hypercall2(int, xenoprof_op, op, arg); --} +- cpu_sibling_map[cpu] = cpumask_of_cpu(cpu); +- cpu_core_map[cpu] = cpumask_of_cpu(cpu); - --static inline int __must_check --HYPERVISOR_kexec_op( -- unsigned long op, void *args) --{ -- return _hypercall2(int, kexec_op, op, args); +- cpu_data[cpu].booted_cores = 1; -} - --static inline int __must_check --HYPERVISOR_tmem_op( -- struct tmem_op *op) +-static void +-remove_siblinginfo(unsigned int cpu) -{ -- return _hypercall1(int, tmem_op, op); --} -- -- --#endif /* __HYPERCALL_H__ */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypercall_64.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypercall_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -1,198 +1,10 @@ --/****************************************************************************** -- * hypercall.h -- * -- * Linux-specific hypervisor handling. -- * -- * Copyright (c) 2002-2004, K A Fraser -- * -- * 64-bit updates: -- * Benjamin Liu -- * Jun Nakajima -- * -- * This program is free software; you can redistribute it and/or -- * modify it under the terms of the GNU General Public License version 2 -- * as published by the Free Software Foundation; or, when distributed -- * separately from the Linux kernel or incorporated into other -- * software packages, subject to the following license: -- * -- * Permission is hereby granted, free of charge, to any person obtaining a copy -- * of this source file (the "Software"), to deal in the Software without -- * restriction, including without limitation the rights to use, copy, modify, -- * merge, publish, distribute, sublicense, and/or sell copies of the Software, -- * and to permit persons to whom the Software is furnished to do so, subject to -- * the following conditions: -- * -- * The above copyright notice and this permission notice shall be included in -- * all copies or substantial portions of the Software. -- * -- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -- * IN THE SOFTWARE. -- */ -- --#ifndef __HYPERCALL_H__ --#define __HYPERCALL_H__ -- --#include /* memcpy() */ --#include --#include -- --#ifndef __HYPERVISOR_H__ --# error "please don't include this file directly" --#endif -- --#ifdef CONFIG_XEN --#define HYPERCALL_STR(name) \ -- "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" --#else --#define HYPERCALL_STR(name) \ -- "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ -- "add hypercall_stubs(%%rip),%%rax; " \ -- "call *%%rax" --#endif -- --#define _hypercall0(type, name) \ --({ \ -- type __res; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res) \ -- : \ -- : "memory" ); \ -- __res; \ --}) -- --#define _hypercall1(type, name, a1) \ --({ \ -- type __res; \ -- long __ign1; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=D" (__ign1) \ -- : "1" ((long)(a1)) \ -- : "memory" ); \ -- __res; \ --}) -- --#define _hypercall2(type, name, a1, a2) \ --({ \ -- type __res; \ -- long __ign1, __ign2; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ -- : "1" ((long)(a1)), "2" ((long)(a2)) \ -- : "memory" ); \ -- __res; \ --}) -- --#define _hypercall3(type, name, a1, a2, a3) \ --({ \ -- type __res; \ -- long __ign1, __ign2, __ign3; \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ -- "=d" (__ign3) \ -- : "1" ((long)(a1)), "2" ((long)(a2)), \ -- "3" ((long)(a3)) \ -- : "memory" ); \ -- __res; \ --}) +- cpu_data[cpu].phys_proc_id = BAD_APICID; +- cpu_data[cpu].cpu_core_id = BAD_APICID; - --#define _hypercall4(type, name, a1, a2, a3, a4) \ --({ \ -- type __res; \ -- long __ign1, __ign2, __ign3; \ -- register long __arg4 asm("r10") = (long)(a4); \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ -- "=d" (__ign3), "+r" (__arg4) \ -- : "1" ((long)(a1)), "2" ((long)(a2)), \ -- "3" ((long)(a3)) \ -- : "memory" ); \ -- __res; \ --}) +- cpus_clear(cpu_sibling_map[cpu]); +- cpus_clear(cpu_core_map[cpu]); - --#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ --({ \ -- type __res; \ -- long __ign1, __ign2, __ign3; \ -- register long __arg4 asm("r10") = (long)(a4); \ -- register long __arg5 asm("r8") = (long)(a5); \ -- asm volatile ( \ -- HYPERCALL_STR(name) \ -- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ -- "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \ -- : "1" ((long)(a1)), "2" ((long)(a2)), \ -- "3" ((long)(a3)) \ -- : "memory" ); \ -- __res; \ --}) +- cpu_data[cpu].booted_cores = 0; +-} - --#define _hypercall(type, op, a1, a2, a3, a4, a5) \ --({ \ -- type __res; \ -- register typeof((a1)+0) __arg1 asm("rdi") = (a1); \ -- register typeof((a2)+0) __arg2 asm("rsi") = (a2); \ -- register typeof((a3)+0) __arg3 asm("rdx") = (a3); \ -- register typeof((a4)+0) __arg4 asm("r10") = (a4); \ -- register typeof((a5)+0) __arg5 asm("r8") = (a5); \ -- asm volatile ( \ -- "call *%6" \ -- : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ -- "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ -- : "0" (hypercall_page + (op) * 32) \ -- : "memory" ); \ -- __res; \ --}) + static int __cpuinit xen_smp_intr_init(unsigned int cpu) + { + int rc; +@@ -160,9 +125,9 @@ void __cpuinit cpu_bringup(void) + { + cpu_init(); + #ifdef __i386__ +- identify_secondary_cpu(cpu_data + smp_processor_id()); ++ identify_secondary_cpu(¤t_cpu_data); + #else +- identify_cpu(cpu_data + smp_processor_id()); ++ identify_cpu(¤t_cpu_data); + #endif + touch_softlockup_watchdog(); + preempt_disable(); +@@ -262,20 +227,9 @@ void __init smp_prepare_cpus(unsigned in + apicid = 0; + if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0) + apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id); +- boot_cpu_data.apicid = apicid; +- cpu_data[0] = boot_cpu_data; - --static inline int __must_check --HYPERVISOR_set_trap_table( -- const trap_info_t *table) --{ -- return _hypercall1(int, set_trap_table, table); --} +- x86_cpu_to_apicid[0] = apicid; - --static inline int __must_check --HYPERVISOR_mmu_update( -- mmu_update_t *req, unsigned int count, unsigned int *success_count, -- domid_t domid) --{ -- if (arch_use_lazy_mmu_mode()) -- return xen_multi_mmu_update(req, count, success_count, domid); -- return _hypercall4(int, mmu_update, req, count, success_count, domid); --} ++ cpu_data(0) = boot_cpu_data; + current_thread_info()->cpu = 0; + +- for (cpu = 0; cpu < NR_CPUS; cpu++) { +- cpus_clear(cpu_sibling_map[cpu]); +- cpus_clear(cpu_core_map[cpu]); +- } - --static inline int __must_check --HYPERVISOR_mmuext_op( -- struct mmuext_op *op, unsigned int count, unsigned int *success_count, -- domid_t domid) --{ -- if (arch_use_lazy_mmu_mode()) -- return xen_multi_mmuext_op(op, count, success_count, domid); -- return _hypercall4(int, mmuext_op, op, count, success_count, domid); --} +- set_cpu_sibling_map(0); - --static inline int __must_check --HYPERVISOR_set_gdt( -- unsigned long *frame_list, unsigned int entries) --{ -- return _hypercall2(int, set_gdt, frame_list, entries); --} + if (xen_smp_intr_init(0)) + BUG(); + +@@ -300,8 +254,7 @@ void __init smp_prepare_cpus(unsigned in + gdt_descr = &cpu_gdt_descr[cpu]; + gdt_descr->address = get_zeroed_page(GFP_KERNEL); + if (unlikely(!gdt_descr->address)) { +- printk(KERN_CRIT "CPU%d failed to allocate GDT\n", +- cpu); ++ pr_crit("CPU%d failed to allocate GDT\n", cpu); + continue; + } + gdt_descr->size = GDT_SIZE; +@@ -316,10 +269,8 @@ void __init smp_prepare_cpus(unsigned in + apicid = cpu; + if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) + apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id); +- cpu_data[cpu] = boot_cpu_data; +- cpu_data[cpu].apicid = apicid; - --static inline int __must_check --HYPERVISOR_stack_switch( -- unsigned long ss, unsigned long esp) --{ -- return _hypercall2(int, stack_switch, ss, esp); --} -+#define HYPERCALL_arg1 "rdi" -+#define HYPERCALL_arg2 "rsi" -+#define HYPERCALL_arg3 "rdx" -+#define HYPERCALL_arg4 "r10" -+#define HYPERCALL_arg5 "r8" +- x86_cpu_to_apicid[cpu] = apicid; ++ cpu_data(cpu) = boot_cpu_data; ++ cpu_data(cpu).cpu_index = cpu; -+#if CONFIG_XEN_COMPAT <= 0x030002 - static inline int __must_check - HYPERVISOR_set_callbacks( - unsigned long event_address, unsigned long failsafe_address, -@@ -201,27 +13,7 @@ HYPERVISOR_set_callbacks( - return _hypercall3(int, set_callbacks, - event_address, failsafe_address, syscall_address); + #ifdef __x86_64__ + cpu_pda(cpu)->pcurrent = idle; +@@ -382,8 +333,6 @@ int __cpu_disable(void) + if (cpu == 0) + return -EBUSY; + +- remove_siblinginfo(cpu); +- + cpu_clear(cpu, map); + fixup_irqs(map); + cpu_clear(cpu, cpu_online_map); +@@ -420,14 +369,11 @@ int __cpuinit __cpu_up(unsigned int cpu) + alternatives_smp_switch(1); + + /* This must be done before setting cpu_online_map */ +- set_cpu_sibling_map(cpu); + wmb(); + + rc = xen_smp_intr_init(cpu); +- if (rc) { +- remove_siblinginfo(cpu); ++ if (rc) + return rc; +- } + + cpu_set(cpu, cpu_online_map); + +--- head-2011-03-17.orig/drivers/xen/fbfront/xenfb.c 2011-02-17 10:08:20.000000000 +0100 ++++ head-2011-03-17/drivers/xen/fbfront/xenfb.c 2011-01-31 17:56:27.000000000 +0100 +@@ -240,8 +240,8 @@ static void xenfb_update_screen(struct x + mutex_unlock(&info->mm_lock); + + if (x2 < x1 || y2 < y1) { +- printk("xenfb_update_screen bogus rect %d %d %d %d\n", +- x1, x2, y1, y2); ++ pr_warning("xenfb_update_screen bogus rect %d %d %d %d\n", ++ x1, x2, y1, y2); + WARN_ON(1); + } + xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1); +--- head-2011-03-17.orig/drivers/xen/fbfront/xenkbd.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/fbfront/xenkbd.c 2011-01-31 17:56:27.000000000 +0100 +@@ -80,8 +80,8 @@ static irqreturn_t input_handler(int rq, + input_report_key(dev, event->key.keycode, + event->key.pressed); + else +- printk("xenkbd: unhandled keycode 0x%x\n", +- event->key.keycode); ++ pr_warning("xenkbd: unhandled keycode 0x%x\n", ++ event->key.keycode); + break; + case XENKBD_TYPE_POS: + if (event->pos.rel_z) +--- head-2011-03-17.orig/drivers/xen/gntdev/gntdev.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/gntdev/gntdev.c 2011-01-31 17:56:27.000000000 +0100 +@@ -378,14 +378,14 @@ static int __init gntdev_init(void) + struct class_device *device; + + if (!is_running_on_xen()) { +- printk(KERN_ERR "You must be running Xen to use gntdev\n"); ++ pr_err("You must be running Xen to use gntdev\n"); + return -ENODEV; + } + + gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops); + if (gntdev_major < 0) + { +- printk(KERN_ERR "Could not register gntdev device\n"); ++ pr_err("Could not register gntdev device\n"); + return -ENOMEM; + } + +@@ -394,18 +394,16 @@ static int __init gntdev_init(void) + * created manually using mknod. + */ + if ((class = get_xen_class()) == NULL) { +- printk(KERN_ERR "Error setting up xen_class\n"); +- printk(KERN_ERR "gntdev created with major number = %d\n", +- gntdev_major); ++ pr_err("Error setting up xen_class\n"); ++ pr_err("gntdev created, major number = %d\n", gntdev_major); + return 0; + } + + device = class_device_create(class, NULL, MKDEV(gntdev_major, 0), + NULL, GNTDEV_NAME); + if (IS_ERR(device)) { +- printk(KERN_ERR "Error creating gntdev device in xen_class\n"); +- printk(KERN_ERR "gntdev created with major number = %d\n", +- gntdev_major); ++ pr_err("Error creating gntdev device in xen_class\n"); ++ pr_err("gntdev created, major number = %d\n", gntdev_major); + return 0; + } + +@@ -491,7 +489,7 @@ static int gntdev_mmap (struct file *fli + gntdev_file_private_data_t *private_data = flip->private_data; + + if (unlikely(!private_data)) { +- printk(KERN_ERR "File's private data is NULL.\n"); ++ pr_err("file's private data is NULL\n"); + return -EINVAL; + } + +@@ -499,21 +497,21 @@ static int gntdev_mmap (struct file *fli + down_read(&private_data->grants_sem); + if (unlikely(!private_data->grants)) { + up_read(&private_data->grants_sem); +- printk(KERN_ERR "Attempted to mmap before ioctl.\n"); ++ pr_err("attempted to mmap before ioctl\n"); + return -EINVAL; + } + up_read(&private_data->grants_sem); + + if (unlikely((size <= 0) || + (size + slot_index) > private_data->grants_size)) { +- printk(KERN_ERR "Invalid number of pages or offset" +- "(num_pages = %d, first_slot = %ld).\n", ++ pr_err("Invalid number of pages or offset" ++ "(num_pages = %d, first_slot = %ld)\n", + size, slot_index); + return -ENXIO; + } + + if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) { +- printk(KERN_ERR "Writable mappings must be shared.\n"); ++ pr_err("writable mappings must be shared\n"); + return -EINVAL; + } + +@@ -522,8 +520,8 @@ static int gntdev_mmap (struct file *fli + for (i = 0; i < size; ++i) { + if (private_data->grants[slot_index + i].state != + GNTDEV_SLOT_NOT_YET_MAPPED) { +- printk(KERN_ERR "Slot (index = %ld) is in the wrong " +- "state (%d).\n", slot_index + i, ++ pr_err("Slot (index = %ld) is in the wrong " ++ "state (%d)\n", slot_index + i, + private_data->grants[slot_index + i].state); + up_write(&private_data->grants_sem); + return -EINVAL; +@@ -538,8 +536,7 @@ static int gntdev_mmap (struct file *fli + vma->vm_private_data = kzalloc(size * sizeof(struct page *), + GFP_KERNEL); + if (vma->vm_private_data == NULL) { +- printk(KERN_ERR "Couldn't allocate mapping structure for VM " +- "area.\n"); ++ pr_err("couldn't allocate mapping structure for VM area\n"); + return -ENOMEM; + } + +@@ -584,7 +581,7 @@ static int gntdev_mmap (struct file *fli + BUG_ON(ret); + if (op.status != GNTST_okay) { + if (op.status != GNTST_eagain) +- printk(KERN_ERR "Error mapping the grant reference " ++ pr_err("Error mapping the grant reference " + "into the kernel (%d). domid = %d; ref = %d\n", + op.status, + private_data->grants[slot_index+i] +@@ -631,8 +628,8 @@ static int gntdev_mmap (struct file *fli + + (i << PAGE_SHIFT), + &ptep))) + { +- printk(KERN_ERR "Error obtaining PTE pointer " +- "(%d).\n", ret); ++ pr_err("Error obtaining PTE pointer (%d)\n", ++ ret); + goto undo_map_out; + } + +@@ -663,7 +660,7 @@ static int gntdev_mmap (struct file *fli + &op, 1); + BUG_ON(ret); + if (op.status != GNTST_okay) { +- printk(KERN_ERR "Error mapping the grant " ++ pr_err("Error mapping the grant " + "reference into user space (%d). domid " + "= %d; ref = %d\n", op.status, + private_data->grants[slot_index+i].u +@@ -770,8 +767,8 @@ static pte_t gntdev_clear_pte(struct vm_ + GNTTABOP_unmap_grant_ref, &op, 1); + BUG_ON(ret); + if (op.status != GNTST_okay) +- printk("User unmap grant status = %d\n", +- op.status); ++ pr_warning("User unmap grant status = %d\n", ++ op.status); + } else { + /* USING SHADOW PAGE TABLES. */ + copy = ptep_get_and_clear_full(vma->vm_mm, addr, ptep, is_fullmm); +@@ -787,7 +784,8 @@ static pte_t gntdev_clear_pte(struct vm_ + &op, 1); + BUG_ON(ret); + if (op.status != GNTST_okay) +- printk("Kernel unmap grant status = %d\n", op.status); ++ pr_warning("Kernel unmap grant status = %d\n", ++ op.status); + + + /* Return slot to the not-yet-mapped state, so that it may be +@@ -845,8 +843,7 @@ static long gntdev_ioctl(struct file *fl + up_write(&private_data->grants_sem); + + if (rc) { +- printk (KERN_ERR "Initialising gntdev private data " +- "failed.\n"); ++ pr_err("Initialising gntdev private data failed\n"); + return rc; + } + } +@@ -888,22 +885,22 @@ private_data_initialised: + if (op.count == 1) { + if ((rc = add_grant_reference(private_data, op.refs, + &op.index)) < 0) { +- printk(KERN_ERR "Adding grant reference " +- "failed (%d).\n", rc); ++ pr_err("Adding grant reference failed (%d)\n", ++ rc); + goto map_out; + } + } else { + if ((rc = find_contiguous_free_range(private_data, + op.count)) < 0) { +- printk(KERN_ERR "Finding contiguous range " +- "failed (%d).\n", rc); ++ pr_err("Finding contiguous range failed" ++ " (%d)\n", rc); + goto map_out; + } + op.index = rc << PAGE_SHIFT; + if ((rc = add_grant_references(private_data, op.count, + refs, rc))) { +- printk(KERN_ERR "Adding grant references " +- "failed (%d).\n", rc); ++ pr_err("Adding grant references failed (%d)\n", ++ rc); + goto map_out; + } + compress_free_list(private_data); +@@ -942,15 +939,13 @@ private_data_initialised: + != GNTDEV_SLOT_NOT_YET_MAPPED)) { + if (private_data->grants[start_index + i].state + == GNTDEV_SLOT_INVALID) { +- printk(KERN_ERR +- "Tried to remove an invalid " ++ pr_err("Tried to remove an invalid " + "grant at offset 0x%x.", + (start_index + i) + << PAGE_SHIFT); + rc = -EINVAL; + } else { +- printk(KERN_ERR +- "Tried to remove a grant which " ++ pr_err("Tried to remove a grant which " + "is currently mmap()-ed at " + "offset 0x%x.", + (start_index + i) +@@ -998,7 +993,7 @@ private_data_initialised: + goto get_offset_out; + } + if (vma->vm_start != vaddr) { +- printk(KERN_ERR "The vaddr specified in an " ++ pr_err("The vaddr specified in an " + "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at " + "the start of the VM area. vma->vm_start = " + "%#lx; vaddr = %#lx\n", +--- head-2011-03-17.orig/drivers/xen/netback/common.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/common.h 2011-01-31 17:56:27.000000000 +0100 +@@ -49,10 +49,8 @@ + #define DPRINTK(_f, _a...) \ + pr_debug("(file=%s, line=%d) " _f, \ + __FILE__ , __LINE__ , ## _a ) +-#define IPRINTK(fmt, args...) \ +- printk(KERN_INFO "xen_net: " fmt, ##args) +-#define WPRINTK(fmt, args...) \ +- printk(KERN_WARNING "xen_net: " fmt, ##args) ++#define IPRINTK(fmt, args...) pr_info("xen_net: " fmt, ##args) ++#define WPRINTK(fmt, args...) pr_warning("xen_net: " fmt, ##args) + + typedef struct netif_st { + /* Unique identifier for this interface. */ +--- head-2011-03-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:10:00.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/interface.c 2011-03-17 14:12:41.000000000 +0100 +@@ -173,9 +173,13 @@ static const struct netif_stat { + { "rx_gso_csum_fixups", offsetof(netif_t, rx_gso_csum_fixups) / sizeof(long) }, + }; + +-static int netbk_get_stats_count(struct net_device *dev) ++static int netbk_get_sset_count(struct net_device *dev, int sset) + { +- return ARRAY_SIZE(netbk_stats); ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(netbk_stats); ++ } ++ return -EOPNOTSUPP; + } + + static void netbk_get_ethtool_stats(struct net_device *dev, +@@ -213,7 +217,7 @@ static struct ethtool_ops network_ethtoo + .set_tso = netbk_set_tso, + .get_link = ethtool_op_get_link, + +- .get_stats_count = netbk_get_stats_count, ++ .get_sset_count = netbk_get_sset_count, + .get_ethtool_stats = netbk_get_ethtool_stats, + .get_strings = netbk_get_strings, + }; +--- head-2011-03-17.orig/drivers/xen/netback/netback.c 2011-02-09 15:54:33.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/netback.c 2011-02-09 15:55:20.000000000 +0100 +@@ -362,8 +362,8 @@ static void xen_network_done_notify(void + { + static struct net_device *eth0_dev = NULL; + if (unlikely(eth0_dev == NULL)) +- eth0_dev = __dev_get_by_name("eth0"); +- netif_rx_schedule(eth0_dev); ++ eth0_dev = __dev_get_by_name(&init_net, "eth0"); ++ netif_rx_schedule(eth0_dev, ???); } + /* + * Add following to poll() function in NAPI driver (Tigon3 is example): +@@ -1605,28 +1605,30 @@ static irqreturn_t netif_be_dbg(int irq, + netif_t *netif; + int i = 0; + +- printk(KERN_ALERT "netif_schedule_list:\n"); ++ pr_alert("netif_schedule_list:\n"); + spin_lock_irq(&net_schedule_list_lock); + + list_for_each (ent, &net_schedule_list) { + netif = list_entry(ent, netif_t, list); +- printk(KERN_ALERT " %d: private(rx_req_cons=%08x " +- "rx_resp_prod=%08x\n", +- i, netif->rx.req_cons, netif->rx.rsp_prod_pvt); +- printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n", +- netif->tx.req_cons, netif->tx.rsp_prod_pvt); +- printk(KERN_ALERT " shared(rx_req_prod=%08x " +- "rx_resp_prod=%08x\n", +- netif->rx.sring->req_prod, netif->rx.sring->rsp_prod); +- printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n", +- netif->rx.sring->rsp_event, netif->tx.sring->req_prod); +- printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n", +- netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event); ++ pr_alert(" %d: private(rx_req_cons=%08x " ++ "rx_resp_prod=%08x\n", ++ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt); ++ pr_alert(" tx_req_cons=%08x tx_resp_prod=%08x)\n", ++ netif->tx.req_cons, netif->tx.rsp_prod_pvt); ++ pr_alert(" shared(rx_req_prod=%08x " ++ "rx_resp_prod=%08x\n", ++ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod); ++ pr_alert(" rx_event=%08x tx_req_prod=%08x\n", ++ netif->rx.sring->rsp_event, ++ netif->tx.sring->req_prod); ++ pr_alert(" tx_resp_prod=%08x, tx_event=%08x)\n", ++ netif->tx.sring->rsp_prod, ++ netif->tx.sring->rsp_event); + i++; + } + + spin_unlock_irq(&net_schedule_list_lock); +- printk(KERN_ALERT " ** End of netif_schedule_list **\n"); ++ pr_alert(" ** End of netif_schedule_list **\n"); + + return IRQ_HANDLED; + } +@@ -1656,7 +1658,7 @@ static int __init netback_init(void) + + mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS); + if (mmap_pages == NULL) { +- printk("%s: out of memory\n", __FUNCTION__); ++ pr_err("%s: out of memory\n", __FUNCTION__); + return -ENOMEM; + } + +--- head-2011-03-17.orig/drivers/xen/netback/xenbus.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 +@@ -163,11 +163,9 @@ fail: + * and vif variables to the environment, for the benefit of the vif-* hotplug + * scripts. + */ +-static int netback_uevent(struct xenbus_device *xdev, char **envp, +- int num_envp, char *buffer, int buffer_size) ++static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env) + { + struct backend_info *be; +- int i = 0, length = 0; + char *val; + + DPRINTK("netback_uevent"); +@@ -178,21 +176,16 @@ static int netback_uevent(struct xenbus_ + xenbus_dev_fatal(xdev, err, "reading script"); + return err; + } +- else { +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, +- &length, "script=%s", val); +- kfree(val); +- } ++ ++ add_uevent_var(env, "script=%s", val); ++ kfree(val); + + down_read(&teardown_sem); + be = xdev->dev.driver_data; + if (be && be->netif) +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, +- &length, "vif=%s", be->netif->dev->name); ++ add_uevent_var(env, "vif=%s", be->netif->dev->name); + up_read(&teardown_sem); + +- envp[i] = NULL; - --static inline int --HYPERVISOR_fpu_taskswitch( -- int set) --{ -- return _hypercall1(int, fpu_taskswitch, set); --} + return 0; + } + +@@ -240,8 +233,8 @@ static void frontend_changed(struct xenb + switch (frontend_state) { + case XenbusStateInitialising: + if (dev->state == XenbusStateClosed) { +- printk(KERN_INFO "%s: %s: prepare for reconnect\n", +- __FUNCTION__, dev->nodename); ++ pr_info("%s: %s: prepare for reconnect\n", ++ __FUNCTION__, dev->nodename); + xenbus_switch_state(dev, XenbusStateInitWait); + } + break; +--- head-2011-03-17.orig/drivers/xen/netfront/accel.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/accel.c 2011-01-31 17:56:27.000000000 +0100 +@@ -40,10 +40,8 @@ + #define DPRINTK(fmt, args...) \ + pr_debug("netfront/accel (%s:%d) " fmt, \ + __FUNCTION__, __LINE__, ##args) +-#define IPRINTK(fmt, args...) \ +- printk(KERN_INFO "netfront/accel: " fmt, ##args) +-#define WPRINTK(fmt, args...) \ +- printk(KERN_WARNING "netfront/accel: " fmt, ##args) ++#define IPRINTK(fmt, args...) pr_info("netfront/accel: " fmt, ##args) ++#define WPRINTK(fmt, args...) pr_warning("netfront/accel: " fmt, ##args) + + static int netfront_remove_accelerator(struct netfront_info *np, + struct xenbus_device *dev); +@@ -325,7 +323,7 @@ accelerator_set_vif_state_hooks(struct n + DPRINTK("%p\n",vif_state); + + /* Make sure there are no data path operations going on */ +- netif_poll_disable(vif_state->np->netdev); ++ napi_disable(&vif_state->np->napi); + netif_tx_lock_bh(vif_state->np->netdev); + + accelerator = vif_state->np->accelerator; +@@ -334,7 +332,7 @@ accelerator_set_vif_state_hooks(struct n + spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); + + netif_tx_unlock_bh(vif_state->np->netdev); +- netif_poll_enable(vif_state->np->netdev); ++ napi_enable(&vif_state->np->napi); + } + + +@@ -508,7 +506,7 @@ accelerator_remove_single_hook(struct ne + unsigned long flags; + + /* Make sure there are no data path operations going on */ +- netif_poll_disable(vif_state->np->netdev); ++ napi_disable(&vif_state->np->napi); + netif_tx_lock_bh(vif_state->np->netdev); + + spin_lock_irqsave(&accelerator->vif_states_lock, flags); +@@ -524,7 +522,7 @@ accelerator_remove_single_hook(struct ne + spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); + + netif_tx_unlock_bh(vif_state->np->netdev); +- netif_poll_enable(vif_state->np->netdev); ++ napi_enable(&vif_state->np->napi); + } + + +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-09 15:54:17.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-09 16:00:35.000000000 +0100 +@@ -207,10 +207,8 @@ static inline grant_ref_t xennet_get_rx_ + #define DPRINTK(fmt, args...) \ + pr_debug("netfront (%s:%d) " fmt, \ + __FUNCTION__, __LINE__, ##args) +-#define IPRINTK(fmt, args...) \ +- printk(KERN_INFO "netfront: " fmt, ##args) +-#define WPRINTK(fmt, args...) \ +- printk(KERN_WARNING "netfront: " fmt, ##args) ++#define IPRINTK(fmt, args...) pr_info("netfront: " fmt, ##args) ++#define WPRINTK(fmt, args...) pr_warning("netfront: " fmt, ##args) + + static int setup_device(struct xenbus_device *, struct netfront_info *); + static struct net_device *create_netdev(struct xenbus_device *); +@@ -262,16 +260,16 @@ static int __devinit netfront_probe(stru + + err = register_netdev(info->netdev); + if (err) { +- printk(KERN_WARNING "%s: register_netdev err=%d\n", +- __FUNCTION__, err); ++ pr_warning("%s: register_netdev err=%d\n", ++ __FUNCTION__, err); + goto fail; + } + + err = xennet_sysfs_addif(info->netdev); + if (err) { + unregister_netdev(info->netdev); +- printk(KERN_WARNING "%s: add sysfs failed err=%d\n", +- __FUNCTION__, err); ++ pr_warning("%s: add sysfs failed err=%d\n", ++ __FUNCTION__, err); + goto fail; + } + +@@ -621,11 +619,12 @@ int netfront_check_queue_ready(struct ne + } + EXPORT_SYMBOL(netfront_check_queue_ready); + - --static inline int __must_check --HYPERVISOR_sched_op_compat( -- int cmd, unsigned long arg) --{ -- return _hypercall2(int, sched_op_compat, cmd, arg); --} + static int network_open(struct net_device *dev) + { + struct netfront_info *np = netdev_priv(dev); + ++ napi_enable(&np->napi); ++ + spin_lock_bh(&np->rx_lock); + if (netfront_carrier_ok(np)) { + network_alloc_rx_buffers(dev); +@@ -633,7 +632,7 @@ static int network_open(struct net_devic + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ + netfront_accelerator_call_stop_napi_irq(np, dev); + +- netif_rx_schedule(dev); ++ netif_rx_schedule(dev, &np->napi); + } + } + spin_unlock_bh(&np->rx_lock); +@@ -667,9 +666,8 @@ static void network_tx_buf_gc(struct net + skb = np->tx_skbs[id]; + if (unlikely(gnttab_query_foreign_access( + np->grant_tx_ref[id]) != 0)) { +- printk(KERN_ALERT "network_tx_buf_gc: warning " +- "-- grant still in use by backend " +- "domain.\n"); ++ pr_alert("network_tx_buf_gc: grant still" ++ " in use by backend domain\n"); + BUG(); + } + gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); +@@ -705,7 +703,7 @@ static void rx_refill_timeout(unsigned l + + netfront_accelerator_call_stop_napi_irq(np, dev); + +- netif_rx_schedule(dev); ++ netif_rx_schedule(dev, &np->napi); + } + + static void network_alloc_rx_buffers(struct net_device *dev) +@@ -956,8 +954,7 @@ static int network_start_xmit(struct sk_ + + frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; + if (unlikely(frags > MAX_SKB_FRAGS + 1)) { +- printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", +- frags); ++ pr_alert("xennet: skb rides the rocket: %d frags\n", frags); + dump_stack(); + goto drop; + } +@@ -1060,7 +1057,7 @@ static irqreturn_t netif_int(int irq, vo + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { + netfront_accelerator_call_stop_napi_irq(np, dev); + +- netif_rx_schedule(dev); ++ netif_rx_schedule(dev, &np->napi); + dev->last_rx = jiffies; + } + } +@@ -1313,16 +1310,17 @@ static int xennet_set_skb_gso(struct sk_ + #endif + } + +-static int netif_poll(struct net_device *dev, int *pbudget) ++static int netif_poll(struct napi_struct *napi, int budget) + { +- struct netfront_info *np = netdev_priv(dev); ++ struct netfront_info *np = container_of(napi, struct netfront_info, napi); ++ struct net_device *dev = np->netdev; + struct sk_buff *skb; + struct netfront_rx_info rinfo; + struct netif_rx_response *rx = &rinfo.rx; + struct netif_extra_info *extras = rinfo.extras; + RING_IDX i, rp; + struct multicall_entry *mcl; +- int work_done, budget, more_to_do = 1, accel_more_to_do = 1; ++ int work_done, more_to_do = 1, accel_more_to_do = 1; + struct sk_buff_head rxq; + struct sk_buff_head errq; + struct sk_buff_head tmpq; +@@ -1342,8 +1340,6 @@ static int netif_poll(struct net_device + skb_queue_head_init(&errq); + skb_queue_head_init(&tmpq); + +- if ((budget = *pbudget) > dev->quota) +- budget = dev->quota; + rp = np->rx.sring->rsp_prod; + rmb(); /* Ensure we see queued responses up to 'rp'. */ + +@@ -1505,9 +1501,6 @@ err: + accel_more_to_do = 0; + } + +- *pbudget -= work_done; +- dev->quota -= work_done; - --static inline int __must_check --HYPERVISOR_sched_op( -- int cmd, void *arg) --{ -- return _hypercall2(int, sched_op, cmd, arg); --} -+#endif + if (work_done < budget) { + local_irq_save(flags); - static inline long __must_check - HYPERVISOR_set_timer_op( -@@ -231,34 +23,6 @@ HYPERVISOR_set_timer_op( +@@ -1524,14 +1517,14 @@ err: + } + + if (!more_to_do && !accel_more_to_do) +- __netif_rx_complete(dev); ++ __netif_rx_complete(dev, napi); + + local_irq_restore(flags); + } + + spin_unlock(&np->rx_lock); + +- return more_to_do | accel_more_to_do; ++ return work_done; } - static inline int __must_check --HYPERVISOR_platform_op( -- struct xen_platform_op *platform_op) --{ -- platform_op->interface_version = XENPF_INTERFACE_VERSION; -- return _hypercall1(int, platform_op, platform_op); --} --static inline int __must_check --HYPERVISOR_mca( -- struct xen_mc *mc_op) --{ -- mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; -- return _hypercall1(int, mca, mc_op); --} --static inline int __must_check --HYPERVISOR_set_debugreg( -- unsigned int reg, unsigned long value) --{ -- return _hypercall2(int, set_debugreg, reg, value); --} -- --static inline unsigned long __must_check --HYPERVISOR_get_debugreg( -- unsigned int reg) --{ -- return _hypercall1(unsigned long, get_debugreg, reg); --} -- --static inline int __must_check - HYPERVISOR_update_descriptor( - unsigned long ma, unsigned long word) + static void netif_release_tx_bufs(struct netfront_info *np) +@@ -1678,6 +1671,7 @@ static int network_close(struct net_devi { -@@ -266,22 +30,6 @@ HYPERVISOR_update_descriptor( + struct netfront_info *np = netdev_priv(dev); + netif_stop_queue(np->netdev); ++ napi_disable(&np->napi); + return 0; } - static inline int __must_check --HYPERVISOR_memory_op( -- unsigned int cmd, void *arg) --{ -- if (arch_use_lazy_mmu_mode()) -- xen_multicall_flush(false); -- return _hypercall2(int, memory_op, cmd, arg); --} -- --static inline int __must_check --HYPERVISOR_multicall( -- multicall_entry_t *call_list, unsigned int nr_calls) --{ -- return _hypercall2(int, multicall, call_list, nr_calls); --} -- --static inline int __must_check - HYPERVISOR_update_va_mapping( - unsigned long va, pte_t new_val, unsigned long flags) +@@ -1777,9 +1771,13 @@ static const struct xennet_stat { + }, + }; + +-static int xennet_get_stats_count(struct net_device *dev) ++static int xennet_get_sset_count(struct net_device *dev, int sset) { -@@ -291,67 +39,6 @@ HYPERVISOR_update_va_mapping( +- return ARRAY_SIZE(xennet_stats); ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(xennet_stats); ++ } ++ return -EOPNOTSUPP; } - static inline int __must_check --HYPERVISOR_event_channel_op( -- int cmd, void *arg) --{ -- int rc = _hypercall2(int, event_channel_op, cmd, arg); -- --#if CONFIG_XEN_COMPAT <= 0x030002 -- if (unlikely(rc == -ENOSYS)) { -- struct evtchn_op op; -- op.cmd = cmd; -- memcpy(&op.u, arg, sizeof(op.u)); -- rc = _hypercall1(int, event_channel_op_compat, &op); -- memcpy(arg, &op.u, sizeof(op.u)); -- } --#endif -- -- return rc; --} -- --static inline int __must_check --HYPERVISOR_xen_version( -- int cmd, void *arg) --{ -- return _hypercall2(int, xen_version, cmd, arg); --} -- --static inline int __must_check --HYPERVISOR_console_io( -- int cmd, unsigned int count, char *str) --{ -- return _hypercall3(int, console_io, cmd, count, str); --} -- --static inline int __must_check --HYPERVISOR_physdev_op( -- int cmd, void *arg) --{ -- int rc = _hypercall2(int, physdev_op, cmd, arg); -- --#if CONFIG_XEN_COMPAT <= 0x030002 -- if (unlikely(rc == -ENOSYS)) { -- struct physdev_op op; -- op.cmd = cmd; -- memcpy(&op.u, arg, sizeof(op.u)); -- rc = _hypercall1(int, physdev_op_compat, &op); -- memcpy(arg, &op.u, sizeof(op.u)); -- } --#endif -- -- return rc; --} -- --static inline int __must_check --HYPERVISOR_grant_table_op( -- unsigned int cmd, void *uop, unsigned int count) --{ -- if (arch_use_lazy_mmu_mode()) -- xen_multicall_flush(false); -- return _hypercall3(int, grant_table_op, cmd, uop, count); --} -- --static inline int __must_check - HYPERVISOR_update_va_mapping_otherdomain( - unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) - { -@@ -360,90 +47,8 @@ HYPERVISOR_update_va_mapping_otherdomain + static void xennet_get_ethtool_stats(struct net_device *dev, +@@ -1931,7 +1929,7 @@ static struct ethtool_ops network_ethtoo + #endif + .get_link = ethtool_op_get_link, + +- .get_stats_count = xennet_get_stats_count, ++ .get_sset_count = xennet_get_sset_count, + .get_ethtool_stats = xennet_get_ethtool_stats, + .get_strings = xennet_get_strings, + }; +@@ -2081,8 +2079,7 @@ static struct net_device * __devinit cre + + netdev = alloc_etherdev(sizeof(struct netfront_info)); + if (!netdev) { +- printk(KERN_WARNING "%s> alloc_etherdev failed.\n", +- __FUNCTION__); ++ pr_warning("%s: alloc_etherdev failed\n", __FUNCTION__); + return ERR_PTR(-ENOMEM); + } + +@@ -2117,14 +2114,14 @@ static struct net_device * __devinit cre + /* A grant for every tx ring slot */ + if (gnttab_alloc_grant_references(TX_MAX_TARGET, + &np->gref_tx_head) < 0) { +- printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); ++ pr_alert("#### netfront can't alloc tx grant refs\n"); + err = -ENOMEM; + goto exit; + } + /* A grant for every rx ring slot */ + if (gnttab_alloc_grant_references(RX_MAX_TARGET, + &np->gref_rx_head) < 0) { +- printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); ++ pr_alert("#### netfront can't alloc rx grant refs\n"); + err = -ENOMEM; + goto exit_free_tx; + } +@@ -2133,16 +2130,14 @@ static struct net_device * __devinit cre + netdev->hard_start_xmit = network_start_xmit; + netdev->stop = network_close; + netdev->get_stats = network_get_stats; +- netdev->poll = netif_poll; ++ netif_napi_add(netdev, &np->napi, netif_poll, 64); + netdev->set_multicast_list = network_set_multicast_list; + netdev->uninit = netif_uninit; + netdev->set_mac_address = xennet_set_mac_address; + netdev->change_mtu = xennet_change_mtu; +- netdev->weight = 64; + netdev->features = NETIF_F_IP_CSUM; + + SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); +- SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, &dev->dev); + + np->netdev = netdev; +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.h 2011-02-09 15:54:19.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.h 2011-01-31 17:56:27.000000000 +0100 +@@ -155,6 +155,8 @@ struct netfront_info { + spinlock_t tx_lock; + spinlock_t rx_lock; + ++ struct napi_struct napi; ++ + unsigned int irq; + unsigned int copying_receiver; + unsigned int carrier; +--- head-2011-03-17.orig/drivers/xen/pciback/Makefile 2008-07-21 11:00:33.000000000 +0200 ++++ head-2011-03-17/drivers/xen/pciback/Makefile 2011-01-31 17:56:27.000000000 +0100 +@@ -12,6 +12,4 @@ pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT + pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o + pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o + +-ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y) +-EXTRA_CFLAGS += -DDEBUG +-endif ++ccflags-$(CONFIG_XEN_PCIDEV_BE_DEBUG) += -DDEBUG +--- head-2011-03-17.orig/drivers/xen/pciback/conf_space_capability_msi.c 2008-09-15 13:40:15.000000000 +0200 ++++ head-2011-03-17/drivers/xen/pciback/conf_space_capability_msi.c 2011-01-31 17:56:27.000000000 +0100 +@@ -17,7 +17,8 @@ int pciback_enable_msi(struct pciback_de + status = pci_enable_msi(dev); + + if (status) { +- printk("error enable msi for guest %x status %x\n", otherend, status); ++ pr_err("error enable msi for guest %x status %x\n", ++ otherend, status); + op->value = 0; + return XEN_PCI_ERR_op_failed; + } +--- head-2011-03-17.orig/drivers/xen/pciback/conf_space_header.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pciback/conf_space_header.c 2011-01-31 17:56:27.000000000 +0100 +@@ -69,9 +69,9 @@ static int command_write(struct pci_dev + pci_name(dev)); + err = pci_set_mwi(dev); + if (err) { +- printk(KERN_WARNING +- "pciback: %s: cannot enable memory-write-invalidate (%d)\n", +- pci_name(dev), err); ++ pr_warning("pciback: %s: cannot enable" ++ " memory-write-invalidate (%d)\n", ++ pci_name(dev), err); + value &= ~PCI_COMMAND_INVALIDATE; + } + } +@@ -84,8 +84,8 @@ static int rom_write(struct pci_dev *dev + struct pci_bar_info *bar = data; + + if (unlikely(!bar)) { +- printk(KERN_WARNING "pciback: driver data not found for %s\n", +- pci_name(dev)); ++ pr_warning("pciback: driver data not found for %s\n", ++ pci_name(dev)); + return XEN_PCI_ERR_op_failed; + } + +@@ -118,8 +118,8 @@ static int bar_write(struct pci_dev *dev + struct pci_bar_info *bar = data; + + if (unlikely(!bar)) { +- printk(KERN_WARNING "pciback: driver data not found for %s\n", +- pci_name(dev)); ++ pr_warning("pciback: driver data not found for %s\n", ++ pci_name(dev)); + return XEN_PCI_ERR_op_failed; + } + +@@ -146,8 +146,8 @@ static int bar_read(struct pci_dev *dev, + struct pci_bar_info *bar = data; + + if (unlikely(!bar)) { +- printk(KERN_WARNING "pciback: driver data not found for %s\n", +- pci_name(dev)); ++ pr_warning("pciback: driver data not found for %s\n", ++ pci_name(dev)); + return XEN_PCI_ERR_op_failed; + } + +@@ -368,7 +368,7 @@ int pciback_config_header_add_fields(str + + default: + err = -EINVAL; +- printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n", ++ pr_err("pciback: %s: Unsupported header type %d!\n", + pci_name(dev), dev->hdr_type); + break; + } +--- head-2011-03-17.orig/drivers/xen/pciback/pci_stub.c 2011-03-02 12:00:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pciback/pci_stub.c 2011-03-11 10:55:55.000000000 +0100 +@@ -470,15 +470,15 @@ static void pcistub_remove(struct pci_de + found_psdev->pdev); + + if (found_psdev->pdev) { +- printk(KERN_WARNING "pciback: ****** removing device " +- "%s while still in-use! ******\n", +- pci_name(found_psdev->dev)); +- printk(KERN_WARNING "pciback: ****** driver domain may " +- "still access this device's i/o resources!\n"); +- printk(KERN_WARNING "pciback: ****** shutdown driver " +- "domain before binding device\n"); +- printk(KERN_WARNING "pciback: ****** to other drivers " +- "or domains\n"); ++ pr_warning("pciback: ****** removing device %s" ++ " while still in-use! ******\n", ++ pci_name(found_psdev->dev)); ++ pr_warning("pciback: ****** driver domain may still" ++ " access this device's i/o resources!\n"); ++ pr_warning("pciback: ****** shutdown driver " ++ "domain before binding device\n"); ++ pr_warning("pciback: ****** to other drivers " ++ "or domains\n"); + + pciback_release_pci_dev(found_psdev->pdev, + found_psdev->dev); +@@ -1261,7 +1261,7 @@ static int __init pcistub_init(void) + return err; + + parse_error: +- printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n", ++ pr_err("pciback: Error parsing pci_devs_to_hide at \"%s\"\n", + pci_devs_to_hide + pos); + return -EINVAL; + } +--- head-2011-03-17.orig/drivers/xen/pciback/slot.c 2009-03-18 10:39:32.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pciback/slot.c 2011-01-31 17:56:27.000000000 +0100 +@@ -64,9 +64,9 @@ int pciback_add_pci_dev(struct pciback_d + for (bus = 0; bus < PCI_BUS_NBR; bus++) + for (slot = 0; slot < PCI_SLOT_MAX; slot++) { + if (slot_dev->slots[bus][slot] == NULL) { +- printk(KERN_INFO +- "pciback: slot: %s: assign to virtual slot %d, bus %d\n", +- pci_name(dev), slot, bus); ++ pr_info("pciback: slot: %s: assign to" ++ " virtual slot %d, bus %d\n", ++ pci_name(dev), slot, bus); + slot_dev->slots[bus][slot] = dev; + goto unlock; + } +--- head-2011-03-17.orig/drivers/xen/pciback/vpci.c 2009-03-18 10:39:32.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pciback/vpci.c 2011-01-31 17:56:27.000000000 +0100 +@@ -111,9 +111,9 @@ int pciback_add_pci_dev(struct pciback_d + /* Assign to a new slot on the virtual PCI bus */ + for (slot = 0; slot < PCI_SLOT_MAX; slot++) { + if (list_empty(&vpci_dev->dev_list[slot])) { +- printk(KERN_INFO +- "pciback: vpci: %s: assign to virtual slot %d\n", +- pci_name(dev), slot); ++ pr_info("pciback: vpci: %s:" ++ " assign to virtual slot %d\n", ++ pci_name(dev), slot); + list_add_tail(&dev_entry->list, + &vpci_dev->dev_list[slot]); + func = PCI_FUNC(dev->devfn); +--- head-2011-03-17.orig/drivers/xen/pciback/xenbus.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pciback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 +@@ -695,8 +695,7 @@ int __init pciback_xenbus_register(void) + return -ENODEV; + pciback_wq = create_workqueue("pciback_workqueue"); + if (!pciback_wq) { +- printk(KERN_ERR "pciback_xenbus_register: create" +- "pciback_workqueue failed\n"); ++ pr_err("pciback_xenbus_register: create workqueue failed\n"); + return -EFAULT; + } + return xenbus_register_backend(&xenbus_pciback_driver); +--- head-2011-03-17.orig/drivers/xen/pcifront/Makefile 2007-06-12 13:13:45.000000000 +0200 ++++ head-2011-03-17/drivers/xen/pcifront/Makefile 2011-01-31 17:56:27.000000000 +0100 +@@ -2,6 +2,4 @@ obj-y += pcifront.o + + pcifront-y := pci_op.o xenbus.o pci.o + +-ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y) +-EXTRA_CFLAGS += -DDEBUG +-endif ++ccflags-$(CONFIG_XEN_PCIDEV_FE_DEBUG) += -DDEBUG +--- head-2011-03-17.orig/drivers/xen/pcifront/pci_op.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pcifront/pci_op.c 2011-01-31 17:56:27.000000000 +0100 +@@ -67,7 +67,7 @@ static void pcifront_init_sd(struct pcif + return; /* No resources, nothing to do */ + + if (magic != (sizeof(res) * 2) + 1) { +- printk(KERN_WARNING "pcifront: resource magic mismatch\n"); ++ pr_warning("pcifront: resource magic mismatch\n"); + return; + } + +@@ -105,9 +105,9 @@ static void pcifront_init_sd(struct pcif + err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, + "%s", buf); + if (err != 1) { +- printk(KERN_WARNING "pcifront: error reading " +- "resource %d on bus %04x:%02x\n", +- j, domain, bus); ++ pr_warning("pcifront: error reading " ++ "resource %d on bus %04x:%02x\n", ++ j, domain, bus); + continue; + } + +@@ -317,7 +317,7 @@ int pci_frontend_enable_msix(struct pci_ + struct pcifront_device *pdev = pcifront_get_pdev(sd); + + if (nvec > SH_INFO_MAX_VEC) { +- printk("too much vector for pci frontend%x\n", nvec); ++ pr_warning("too many vectors (%#x) for pci frontend\n", nvec); + return -EINVAL; + } + +@@ -336,12 +336,12 @@ int pci_frontend_enable_msix(struct pci_ + return 0; + } + else { +- printk("enable msix get value %x\n", op.value); ++ pr_err("enable msix get value %#x\n", op.value); + return op.value; + } + } + else { +- printk("enable msix get err %x\n", err); ++ pr_err("enable msix err %#x\n", err); + return err; + } + } +@@ -362,7 +362,7 @@ void pci_frontend_disable_msix(struct pc + + /* What should do for error ? */ + if (err) +- printk("pci_disable_msix get err %x\n", err); ++ pr_err("disable msix err %#x\n", err); + } + + int pci_frontend_enable_msi(struct pci_dev *dev) +@@ -382,8 +382,8 @@ int pci_frontend_enable_msi(struct pci_d + dev->irq = op.value; + } + else { +- printk("pci frontend enable msi failed for dev %x:%x \n", +- op.bus, op.devfn); ++ pr_err("pci frontend enable msi failed for dev %x:%x\n", ++ op.bus, op.devfn); + err = -EINVAL; + } + return err; +@@ -404,14 +404,14 @@ void pci_frontend_disable_msi(struct pci + err = do_pci_op(pdev, &op); + if (err == XEN_PCI_ERR_dev_not_found) { + /* XXX No response from backend, what shall we do? */ +- printk("get no response from backend for disable MSI\n"); ++ pr_err("no response from backend for disable MSI\n"); + return; + } + if (likely(!err)) + dev->irq = op.value; + else + /* how can pciback notify us fail? */ +- printk("get fake response frombackend \n"); ++ pr_err("got bogus response from backend\n"); + } + #endif /* CONFIG_PCI_MSI */ + +--- head-2011-03-17.orig/drivers/xen/scsiback/emulate.c 2011-02-02 12:19:11.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/emulate.c 2011-02-08 10:04:09.000000000 +0100 +@@ -114,9 +114,10 @@ static void resp_not_supported_cmd(pendi } - static inline int __must_check --HYPERVISOR_vm_assist( -- unsigned int cmd, unsigned int type) --{ -- return _hypercall2(int, vm_assist, cmd, type); --} -- --static inline int __must_check --HYPERVISOR_vcpu_op( -- int cmd, unsigned int vcpuid, void *extra_args) --{ -- return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); --} -- --static inline int __must_check - HYPERVISOR_set_segment_base( - int reg, unsigned long value) + +-static int __copy_to_sg(struct scatterlist *sg, unsigned int nr_sg, ++static int __copy_to_sg(struct scatterlist *sgl, unsigned int nr_sg, + void *buf, unsigned int buflen) { - return _hypercall2(int, set_segment_base, reg, value); ++ struct scatterlist *sg; + void *from = buf; + void *to; + unsigned int from_rest = buflen; +@@ -125,17 +126,17 @@ static int __copy_to_sg(struct scatterli + unsigned int i; + unsigned long pfn; + +- for (i = 0; i < nr_sg; i++) { +- if (sg->page == NULL) { +- printk(KERN_WARNING "%s: inconsistent length field in " +- "scatterlist\n", __FUNCTION__); ++ for_each_sg (sgl, sg, nr_sg, i) { ++ if (sg_page(sg) == NULL) { ++ pr_warning("%s: inconsistent length field in " ++ "scatterlist\n", __FUNCTION__); + return -ENOMEM; + } + + to_capa = sg->length; + copy_size = min_t(unsigned int, to_capa, from_rest); + +- pfn = page_to_pfn(sg->page); ++ pfn = page_to_pfn(sg_page(sg)); + to = pfn_to_kaddr(pfn) + (sg->offset); + memcpy(to, from, copy_size); + +@@ -144,18 +145,17 @@ static int __copy_to_sg(struct scatterli + return 0; + } + +- sg++; + from += copy_size; + } + +- printk(KERN_WARNING "%s: no space in scatterlist\n", +- __FUNCTION__); ++ pr_warning("%s: no space in scatterlist\n", __FUNCTION__); + return -ENOMEM; } -- --static inline int __must_check --HYPERVISOR_suspend( -- unsigned long srec) --{ -- struct sched_shutdown sched_shutdown = { -- .reason = SHUTDOWN_suspend -- }; -- -- int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, -- &sched_shutdown, srec); -- --#if CONFIG_XEN_COMPAT <= 0x030002 -- if (rc == -ENOSYS) -- rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, -- SHUTDOWN_suspend, srec); --#endif -- -- return rc; --} -- --#if CONFIG_XEN_COMPAT <= 0x030002 --static inline int --HYPERVISOR_nmi_op( -- unsigned long op, void *arg) --{ -- return _hypercall2(int, nmi_op, op, arg); --} --#endif -- --#ifndef CONFIG_XEN --static inline unsigned long __must_check --HYPERVISOR_hvm_op( -- int op, void *arg) --{ -- return _hypercall2(unsigned long, hvm_op, op, arg); --} --#endif -- --static inline int __must_check --HYPERVISOR_callback_op( -- int cmd, const void *arg) --{ -- return _hypercall2(int, callback_op, cmd, arg); --} -- --static inline int __must_check --HYPERVISOR_xenoprof_op( -- int op, void *arg) --{ -- return _hypercall2(int, xenoprof_op, op, arg); --} -- --static inline int __must_check --HYPERVISOR_kexec_op( -- unsigned long op, void *args) --{ -- return _hypercall2(int, kexec_op, op, args); --} -- --static inline int __must_check --HYPERVISOR_tmem_op( -- struct tmem_op *op) --{ -- return _hypercall1(int, tmem_op, op); --} -- --#endif /* __HYPERCALL_H__ */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:10:29.000000000 +0100 -@@ -44,6 +44,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -200,7 +201,6 @@ static inline void xen_multicall_flush(b - extern char hypercall_page[PAGE_SIZE]; - #else - extern char *hypercall_stubs; --#define hypercall_page hypercall_stubs - #define is_running_on_xen() (!!hypercall_stubs) - #endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "io_32.h" -+#else -+# include "io_64.h" -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "irqflags_32.h" -+#else -+# include "irqflags_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:10:29.000000000 +0100 -@@ -150,6 +150,23 @@ static inline int raw_irqs_disabled_flag - \ - raw_irqs_disabled_flags(flags); \ - }) +-static int __copy_from_sg(struct scatterlist *sg, unsigned int nr_sg, ++static int __copy_from_sg(struct scatterlist *sgl, unsigned int nr_sg, + void *buf, unsigned int buflen) + { ++ struct scatterlist *sg; + void *from; + void *to = buf; + unsigned int from_rest; +@@ -164,29 +164,26 @@ static int __copy_from_sg(struct scatter + unsigned int i; + unsigned long pfn; + +- for (i = 0; i < nr_sg; i++) { +- if (sg->page == NULL) { +- printk(KERN_WARNING "%s: inconsistent length field in " +- "scatterlist\n", __FUNCTION__); ++ for_each_sg (sgl, sg, nr_sg, i) { ++ if (sg_page(sg) == NULL) { ++ pr_warning("%s: inconsistent length field in " ++ "scatterlist\n", __FUNCTION__); + return -ENOMEM; + } + + from_rest = sg->length; + if ((from_rest > 0) && (to_capa < from_rest)) { +- printk(KERN_WARNING +- "%s: no space in destination buffer\n", +- __FUNCTION__); ++ pr_warning("%s: no space in destination buffer\n", ++ __FUNCTION__); + return -ENOMEM; + } + copy_size = from_rest; + +- pfn = page_to_pfn(sg->page); ++ pfn = page_to_pfn(sg_page(sg)); + from = pfn_to_kaddr(pfn) + (sg->offset); + memcpy(to, from, copy_size); + + to_capa -= copy_size; +- +- sg++; + to += copy_size; + } + +@@ -247,7 +244,7 @@ static void __report_luns(pending_req_t + + VSCSI_REPORT_LUNS_HEADER; + retry: + if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) { +- printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__); ++ pr_err("scsiback:%s kmalloc err\n", __FUNCTION__); + goto fail; + } + +--- head-2011-03-17.orig/drivers/xen/scsiback/interface.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/interface.c 2011-01-31 17:56:27.000000000 +0100 +@@ -71,7 +71,8 @@ static int map_frontend_page( struct vsc + gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + + if (op.status != GNTST_okay) { +- printk(KERN_ERR "scsiback: Grant table operation failure %d!\n", (int)op.status); ++ pr_err("scsiback: Grant table operation failure %d!\n", ++ (int)op.status); + ret = -EINVAL; + } else { + info->shmem_ref = ring_ref; +@@ -102,7 +103,7 @@ int scsiback_init_sring(struct vscsibk_i + int err; + + if (info->irq) { +- printk(KERN_ERR "scsiback: Already connected through?\n"); ++ pr_err("scsiback: Already connected through?\n"); + return -1; + } + +@@ -168,7 +169,7 @@ int __init scsiback_interface_init(void) + scsiback_cachep = kmem_cache_create("vscsiif_cache", + sizeof(struct vscsibk_info), 0, 0, NULL); + if (!scsiback_cachep) { +- printk(KERN_ERR "scsiback: can't init scsi cache\n"); ++ pr_err("scsiback: can't init scsi cache\n"); + return -ENOMEM; + } + +--- head-2011-03-17.orig/drivers/xen/scsiback/scsiback.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/scsiback.c 2011-01-31 17:56:27.000000000 +0100 +@@ -202,14 +202,14 @@ static void scsiback_print_status(char * + { + struct scsi_device *sdev = pending_req->sdev; + +- printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no, +- sdev->channel, sdev->id, sdev->lun); +- printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n", +- status_byte(errors), msg_byte(errors), +- host_byte(errors), driver_byte(errors)); ++ pr_err("scsiback: %d:%d:%d:%d ", ++ sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); ++ pr_err("status = 0x%02x, message = 0x%02x, host = 0x%02x," ++ " driver = 0x%02x\n", ++ status_byte(errors), msg_byte(errors), ++ host_byte(errors), driver_byte(errors)); + +- printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n", +- pending_req->cmnd[0]); ++ pr_err("scsiback: cmnd[0]=0x%02X\n", pending_req->cmnd[0]); + + if (CHECK_CONDITION & status_byte(errors)) + __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE); +@@ -260,14 +260,18 @@ static int scsiback_gnttab_data_map(vscs + write = (data_dir == DMA_TO_DEVICE); + + if (nr_segments) { ++ struct scatterlist *sg; + -+/* -+ * makes the traced hardirq state match with the machine state -+ * -+ * should be a rarely used function, only in places where its -+ * otherwise impossible to know the irq state, like in traps. -+ */ -+static inline void trace_hardirqs_fixup_flags(unsigned long flags) -+{ -+ if (raw_irqs_disabled_flags(flags)) -+ trace_hardirqs_off(); -+ else -+ trace_hardirqs_on(); -+} + /* free of (sgl) in fast_flush_area()*/ + pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments, + GFP_KERNEL); + if (!pending_req->sgl) { +- printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__); ++ pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__); + return -ENOMEM; + } + ++ sg_init_table(pending_req->sgl, nr_segments); + -+#define trace_hardirqs_fixup() \ -+ trace_hardirqs_fixup_flags(__raw_local_save_flags()) - #endif /* __ASSEMBLY__ */ + for (i = 0; i < nr_segments; i++) { + flags = GNTMAP_host_map; + if (write) +@@ -280,14 +284,14 @@ static int scsiback_gnttab_data_map(vscs + err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments); + BUG_ON(err); + +- for (i = 0; i < nr_segments; i++) { ++ for_each_sg (pending_req->sgl, sg, nr_segments, i) { + struct page *pg; + + /* Retry maps with GNTST_eagain */ + if (unlikely(map[i].status == GNTST_eagain)) + gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]); + if (unlikely(map[i].status != GNTST_okay)) { +- printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n"); ++ pr_err("scsiback: invalid buffer -- could not remap it\n"); + map[i].handle = SCSIBACK_INVALID_HANDLE; + err |= 1; + } +@@ -302,15 +306,14 @@ static int scsiback_gnttab_data_map(vscs + set_phys_to_machine(page_to_pfn(pg), + FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT)); + +- pending_req->sgl[i].page = pg; +- pending_req->sgl[i].offset = ring_req->seg[i].offset; +- pending_req->sgl[i].length = ring_req->seg[i].length; +- data_len += pending_req->sgl[i].length; ++ sg_set_page(sg, pg, ring_req->seg[i].length, ++ ring_req->seg[i].offset); ++ data_len += sg->length; + + barrier(); +- if (pending_req->sgl[i].offset >= PAGE_SIZE || +- pending_req->sgl[i].length > PAGE_SIZE || +- pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE) ++ if (sg->offset >= PAGE_SIZE || ++ sg->length > PAGE_SIZE || ++ sg->offset + sg->length > PAGE_SIZE) + err |= 1; + + } +@@ -339,27 +342,14 @@ static int scsiback_merge_bio(struct req - /* -@@ -181,4 +198,17 @@ static inline int raw_irqs_disabled_flag - # define TRACE_IRQS_OFF - #endif + blk_queue_bounce(q, &bio); -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define LOCKDEP_SYS_EXIT \ -+ pushl %eax; \ -+ pushl %ecx; \ -+ pushl %edx; \ -+ call lockdep_sys_exit; \ -+ popl %edx; \ -+ popl %ecx; \ -+ popl %eax; -+#else -+# define LOCKDEP_SYS_EXIT -+#endif -+ - #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -116,6 +116,22 @@ static inline int raw_irqs_disabled_flag - }) +- if (!rq->bio) +- blk_rq_bio_prep(q, rq, bio); +- else if (!ll_back_merge_fn(q, rq, bio)) +- return -EINVAL; +- else { +- rq->biotail->bi_next = bio; +- rq->biotail = bio; +- } +- +- return 0; ++ return blk_rq_append_bio(q, rq, bio); + } - /* -+ * makes the traced hardirq state match with the machine state -+ * -+ * should be a rarely used function, only in places where its -+ * otherwise impossible to know the irq state, like in traps. -+ */ -+static inline void trace_hardirqs_fixup_flags(unsigned long flags) -+{ -+ if (raw_irqs_disabled_flags(flags)) -+ trace_hardirqs_off(); -+ else -+ trace_hardirqs_on(); -+} -+ -+#define trace_hardirqs_fixup() \ -+ trace_hardirqs_fixup_flags(__raw_local_save_flags()) -+/* - * Used in the idle loop; sti takes one instruction cycle - * to complete: - */ -@@ -143,6 +159,20 @@ static inline void halt(void) - # define TRACE_IRQS_ON - # define TRACE_IRQS_OFF - # endif -+# ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk -+# define LOCKDEP_SYS_EXIT_IRQ \ -+ TRACE_IRQS_ON; \ -+ sti; \ -+ SAVE_REST; \ -+ LOCKDEP_SYS_EXIT; \ -+ RESTORE_REST; \ -+ cli; \ -+ TRACE_IRQS_OFF; -+# else -+# define LOCKDEP_SYS_EXIT -+# define LOCKDEP_SYS_EXIT_IRQ -+# endif - #endif - #endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/maddr.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "maddr_32.h" -+#else -+# include "maddr_64.h" -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "mmu_context_32.h" -+#else -+# include "mmu_context_64.h" -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,100 @@ -+#ifndef __x86_PCI_H -+#define __x86_PCI_H -+ -+#include /* for struct page */ -+#include -+#include -+#include -+#include -+#include -+ -+ -+#ifdef __KERNEL__ -+ -+struct pci_sysdata { -+ int domain; /* PCI domain */ -+ int node; /* NUMA node */ -+#ifdef CONFIG_X86_64 -+ void* iommu; /* IOMMU private data */ -+#endif -+#ifdef CONFIG_XEN_PCIDEV_FRONTEND -+ struct pcifront_device *pdev; -+#endif -+}; -+ -+/* scan a bus after allocating a pci_sysdata for it */ -+extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); -+ -+static inline int pci_domain_nr(struct pci_bus *bus) -+{ -+ struct pci_sysdata *sd = bus->sysdata; -+ return sd->domain; -+} -+ -+static inline int pci_proc_domain(struct pci_bus *bus) -+{ -+ return pci_domain_nr(bus); -+} -+ -+ -+/* Can be used to override the logic in pci_scan_bus for skipping -+ already-configured bus numbers - to be used for buggy BIOSes -+ or architectures with incomplete PCI setup by the loader */ -+ -+#ifdef CONFIG_PCI -+extern unsigned int pcibios_assign_all_busses(void); -+#else -+#define pcibios_assign_all_busses() 0 -+#endif -+ -+#include -+#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain()) -+ -+extern unsigned long pci_mem_start; -+#define PCIBIOS_MIN_IO 0x1000 -+#define PCIBIOS_MIN_MEM (pci_mem_start) -+ -+#define PCIBIOS_MIN_CARDBUS_IO 0x4000 -+ -+void pcibios_config_init(void); -+struct pci_bus * pcibios_scan_root(int bus); -+ -+void pcibios_set_master(struct pci_dev *dev); -+void pcibios_penalize_isa_irq(int irq, int active); -+struct irq_routing_table *pcibios_get_irq_routing_table(void); -+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); -+ -+ -+#define HAVE_PCI_MMAP -+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, -+ enum pci_mmap_state mmap_state, int write_combine); -+ -+ -+#ifdef CONFIG_PCI -+static inline void pci_dma_burst_advice(struct pci_dev *pdev, -+ enum pci_dma_burst_strategy *strat, -+ unsigned long *strategy_parameter) -+{ -+ *strat = PCI_DMA_BURST_INFINITY; -+ *strategy_parameter = ~0UL; -+} -+#endif -+ -+ -+#endif /* __KERNEL__ */ -+ -+#ifdef CONFIG_X86_32 -+# include "pci_32.h" -+#else -+# include "pci_64.h" -+#endif -+ -+/* implement the pci_ DMA API in terms of the generic device dma_ one */ -+#include -+ -+/* generic pci stuff */ -+#include -+ -+ -+ -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "pgalloc_32.h" -+#else -+# include "pgalloc_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -112,6 +112,8 @@ static inline void pgd_list_del(pgd_t *p - spin_unlock(&pgd_lock); + /* quoted scsi_lib.c/scsi_bi_endio */ +-static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error) ++static void scsiback_bi_endio(struct bio *bio, int error) + { +- if (bio->bi_size) +- return 1; +- + bio_put(bio); +- return 0; + } + + +@@ -370,16 +360,16 @@ static int request_map_sg(struct request + struct request_queue *q = rq->q; + int nr_pages; + unsigned int nsegs = count; +- + unsigned int data_len = 0, len, bytes, off; ++ struct scatterlist *sg; + struct page *page; + struct bio *bio = NULL; + int i, err, nr_vecs = 0; + +- for (i = 0; i < nsegs; i++) { +- page = pending_req->sgl[i].page; +- off = (unsigned int)pending_req->sgl[i].offset; +- len = (unsigned int)pending_req->sgl[i].length; ++ for_each_sg (pending_req->sgl, sg, nsegs, i) { ++ page = sg_page(sg); ++ off = sg->offset; ++ len = sg->length; + data_len += len; + + nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT; +@@ -407,7 +397,7 @@ static int request_map_sg(struct request + if (bio->bi_vcnt >= nr_vecs) { + err = scsiback_merge_bio(rq, bio); + if (err) { +- bio_endio(bio, bio->bi_size, 0); ++ bio_endio(bio, 0); + goto free_bios; + } + bio = NULL; +@@ -430,7 +420,7 @@ free_bios: + /* + * call endio instead of bio_put incase it was bounced + */ +- bio_endio(bio, bio->bi_size, 0); ++ bio_endio(bio, 0); + } + + return err; +@@ -473,7 +463,7 @@ void scsiback_cmd_exec(pending_req_t *pe + if (nr_segments) { + + if (request_map_sg(rq, pending_req, nr_segments)) { +- printk(KERN_ERR "scsiback: SG Request Map Error\n"); ++ pr_err("scsiback: SG Request Map Error\n"); + return; + } + } +@@ -632,7 +622,7 @@ static int scsiback_do_cmd_fn(struct vsc + } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) { + scsiback_device_reset_exec(pending_req); + } else { +- printk(KERN_ERR "scsiback: invalid parameter for request\n"); ++ pr_err("scsiback: invalid parameter for request\n"); + scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24), + 0, pending_req); + continue; +@@ -719,7 +709,7 @@ out_of_memory: + kfree(pending_reqs); + kfree(pending_grant_handles); + free_empty_pages_and_pagevec(pending_pages, mmap_pages); +- printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__); ++ pr_err("scsiback: %s: out of memory\n", __FUNCTION__); + return -ENOMEM; } -+extern void pgd_test_and_unpin(pgd_t *); +--- head-2011-03-17.orig/drivers/xen/scsiback/translate.c 2008-07-21 11:00:33.000000000 +0200 ++++ head-2011-03-17/drivers/xen/scsiback/translate.c 2011-01-31 17:56:27.000000000 +0100 +@@ -62,8 +62,8 @@ int scsiback_add_translation_entry(struc + if ((entry->v.chn == v->chn) && + (entry->v.tgt == v->tgt) && + (entry->v.lun == v->lun)) { +- printk(KERN_WARNING "scsiback: Virtual ID is already used. " +- "Assignment was not performed.\n"); ++ pr_warning("scsiback: Virtual ID is already used. " ++ "Assignment was not performed.\n"); + err = -EEXIST; + goto out; + } +@@ -72,7 +72,7 @@ int scsiback_add_translation_entry(struc + + /* Create a new translation entry and add to the list */ + if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) { +- printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__); ++ pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__); + err = -ENOMEM; + goto out; + } +--- head-2011-03-17.orig/drivers/xen/scsiback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 +@@ -102,14 +102,13 @@ struct scsi_device *scsiback_get_scsi_de + + shost = scsi_host_lookup(phy->hst); + if (IS_ERR(shost)) { +- printk(KERN_ERR "scsiback: host%d doesn't exist.\n", +- phy->hst); ++ pr_err("scsiback: host%d doesn't exist\n", phy->hst); + return NULL; + } + sdev = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun); + if (!sdev) { +- printk(KERN_ERR "scsiback: %d:%d:%d:%d doesn't exist.\n", +- phy->hst, phy->chn, phy->tgt, phy->lun); ++ pr_err("scsiback: %d:%d:%d:%d doesn't exist\n", ++ phy->hst, phy->chn, phy->tgt, phy->lun); + scsi_host_put(shost); + return NULL; + } +@@ -178,7 +177,8 @@ static void scsiback_do_lun_hotplug(stru + if (!err) { + if (xenbus_printf(XBT_NIL, dev->nodename, state_str, + "%d", XenbusStateInitialised)) { +- printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str); ++ pr_err("scsiback: xenbus_printf error %s\n", ++ state_str); + scsiback_del_translation_entry(be->info, &vir); + } + } else { +@@ -193,7 +193,8 @@ static void scsiback_do_lun_hotplug(stru + if (!scsiback_del_translation_entry(be->info, &vir)) { + if (xenbus_printf(XBT_NIL, dev->nodename, state_str, + "%d", XenbusStateClosed)) +- printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str); ++ pr_err("scsiback: xenbus_printf error %s\n", ++ state_str); + } + } + break; +@@ -203,7 +204,8 @@ static void scsiback_do_lun_hotplug(stru + /* modify vscsi-devs/dev-x/state */ + if (xenbus_printf(XBT_NIL, dev->nodename, state_str, + "%d", XenbusStateConnected)) { +- printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str); ++ pr_err("scsiback: xenbus_printf error %s\n", ++ state_str); + scsiback_del_translation_entry(be->info, &vir); + xenbus_printf(XBT_NIL, dev->nodename, state_str, + "%d", XenbusStateClosed); +@@ -346,7 +348,7 @@ static int scsiback_probe(struct xenbus_ + + + fail: +- printk(KERN_WARNING "scsiback: %s failed\n",__FUNCTION__); ++ pr_warning("scsiback: %s failed\n",__FUNCTION__); + scsiback_remove(dev); + + return err; +--- head-2011-03-17.orig/drivers/xen/scsifront/scsifront.c 2011-02-08 10:03:55.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsifront/scsifront.c 2011-01-31 17:56:27.000000000 +0100 +@@ -118,8 +118,8 @@ static void scsifront_gnttab_done(struct + for (i = 0; i < s->nr_segments; i++) { + if (unlikely(gnttab_query_foreign_access( + s->gref[i]) != 0)) { +- printk(KERN_ALERT "scsifront: " +- "grant still in use by backend.\n"); ++ pr_alert("scsifront: " ++ "grant still in use by backend\n"); + BUG(); + } + gnttab_end_foreign_access(s->gref[i], 0UL); +@@ -246,42 +246,47 @@ static int map_data_for_request(struct v + { + grant_ref_t gref_head; + struct page *page; +- int err, i, ref, ref_cnt = 0; ++ int err, ref, ref_cnt = 0; + int write = (sc->sc_data_direction == DMA_TO_DEVICE); +- int nr_pages, off, len, bytes; ++ unsigned int i, nr_pages, off, len, bytes; + unsigned long buffer_pfn; +- unsigned int data_len = 0; + + if (sc->sc_data_direction == DMA_NONE) + return 0; + + err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head); + if (err) { +- printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n"); ++ pr_err("scsifront: gnttab_alloc_grant_references() error\n"); + return -ENOMEM; + } + + if (sc->use_sg) { + /* quoted scsi_lib.c/scsi_req_map_sg . */ +- struct scatterlist *sg = (struct scatterlist *)sc->request_buffer; +- nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ struct scatterlist *sg, *sgl = (struct scatterlist *)sc->request_buffer; ++ unsigned int data_len = sc->request_bufflen; + ++ nr_pages = (sc->request_bufflen + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (nr_pages > VSCSIIF_SG_TABLESIZE) { +- printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); ++ pr_err("scsifront: Unable to map request_buffer for command!\n"); + ref_cnt = (-E2BIG); + goto big_to_sg; + } + +- for (i = 0; i < sc->use_sg; i++) { +- page = sg[i].page; +- off = sg[i].offset; +- len = sg[i].length; +- data_len += len; ++ for_each_sg (sgl, sg, sc->use_sg, i) { ++ page = sg_page(sg); ++ off = sg->offset; ++ len = sg->length; + + buffer_pfn = page_to_phys(page) >> PAGE_SHIFT; + +- while (len > 0) { ++ while (len > 0 && data_len > 0) { ++ /* ++ * sg sends a scatterlist that is larger than ++ * the data_len it wants transferred for certain ++ * IO sizes ++ */ + bytes = min_t(unsigned int, len, PAGE_SIZE - off); ++ bytes = min(bytes, data_len); + + ref = gnttab_claim_grant_reference(&gref_head); + BUG_ON(ref == -ENOSPC); +@@ -296,6 +301,7 @@ static int map_data_for_request(struct v + + buffer_pfn++; + len -= bytes; ++ data_len -= bytes; + off = 0; + ref_cnt++; + } +--- head-2011-03-17.orig/drivers/xen/scsifront/xenbus.c 2011-02-08 10:03:46.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsifront/xenbus.c 2011-02-08 10:04:06.000000000 +0100 +@@ -215,7 +215,7 @@ static int scsifront_probe(struct xenbus + if (IS_ERR(info->kthread)) { + err = PTR_ERR(info->kthread); + info->kthread = NULL; +- printk(KERN_ERR "scsifront: kthread start err %d\n", err); ++ pr_err("scsifront: kthread start err %d\n", err); + goto free_sring; + } + +@@ -227,7 +227,7 @@ static int scsifront_probe(struct xenbus + + err = scsi_add_host(host, &dev->dev); + if (err) { +- printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); ++ pr_err("scsifront: fail to add scsi host %d\n", err); + goto free_sring; + } + +@@ -318,7 +318,7 @@ static void scsifront_do_lun_hotplug(str + if (device_state == XenbusStateInitialised) { + sdev = scsi_device_lookup(info->host, chn, tgt, lun); + if (sdev) { +- printk(KERN_ERR "scsifront: Device already in use.\n"); ++ pr_err("scsifront: Device already in use.\n"); + scsi_device_put(sdev); + xenbus_printf(XBT_NIL, dev->nodename, + state_str, "%d", XenbusStateClosed); +--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_fwd.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/accel_fwd.c 2011-01-31 17:56:27.000000000 +0100 +@@ -181,10 +181,11 @@ int netback_accel_fwd_add(const __u8 *ma + unsigned long flags; + cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac); + struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv; ++ DECLARE_MAC_BUF(buf); + + BUG_ON(fwd_priv == NULL); + +- DPRINTK("Adding mac " MAC_FMT "\n", MAC_ARG(mac)); ++ DPRINTK("Adding mac %s\n", print_mac(buf, mac)); + + spin_lock_irqsave(&fwd_set->fwd_lock, flags); + +@@ -199,8 +200,8 @@ int netback_accel_fwd_add(const __u8 *ma + if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table, + (cuckoo_hash_key *)(&key), &rc) != 0) { + spin_unlock_irqrestore(&fwd_set->fwd_lock, flags); +- EPRINTK("MAC address " MAC_FMT " already accelerated.\n", +- MAC_ARG(mac)); ++ EPRINTK("MAC address %s already accelerated.\n", ++ print_mac(buf, mac)); + return -EEXIST; + } + +@@ -235,8 +236,9 @@ void netback_accel_fwd_remove(const __u8 + unsigned long flags; + cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac); + struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv; ++ DECLARE_MAC_BUF(buf); + +- DPRINTK("Removing mac " MAC_FMT "\n", MAC_ARG(mac)); ++ DPRINTK("Removing mac %s\n", print_mac(buf, mac)); + + BUG_ON(fwd_priv == NULL); + +@@ -394,14 +396,16 @@ void netback_accel_tx_packet(struct sk_b + + if (is_broadcast_ether_addr(skb_mac_header(skb)) + && packet_is_arp_reply(skb)) { ++ DECLARE_MAC_BUF(buf); + - static inline pgd_t *pgd_alloc(struct mm_struct *mm) + /* + * update our fast path forwarding to reflect this + * gratuitous ARP + */ + mac = skb_mac_header(skb)+ETH_ALEN; + +- DPRINTK("%s: found gratuitous ARP for " MAC_FMT "\n", +- __FUNCTION__, MAC_ARG(mac)); ++ DPRINTK("%s: found gratuitous ARP for %s\n", ++ __FUNCTION__, print_mac(buf, mac)); + + spin_lock_irqsave(&fwd_set->fwd_lock, flags); + /* +--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_msg.c 2008-02-20 09:32:49.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/accel_msg.c 2011-01-31 17:56:27.000000000 +0100 +@@ -57,11 +57,11 @@ static void netback_accel_msg_tx_localma { - /* -@@ -122,6 +124,7 @@ static inline pgd_t *pgd_alloc(struct mm - if (!pgd) - return NULL; - pgd_list_add(pgd); -+ pgd_test_and_unpin(pgd); - /* - * Copy kernel pointers in from init. - * Could keep a freelist or slab cache of those because the kernel -@@ -144,27 +147,7 @@ static inline pgd_t *pgd_alloc(struct mm + unsigned long lock_state; + struct net_accel_msg *msg; ++ DECLARE_MAC_BUF(buf); - static inline void pgd_free(pgd_t *pgd) + BUG_ON(bend == NULL || mac == NULL); + +- VPRINTK("Sending local mac message: " MAC_FMT "\n", +- MAC_ARG((const char *)mac)); ++ VPRINTK("Sending local mac message: %s\n", print_mac(buf, mac)); + + msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU, + &lock_state); +--- head-2011-03-17.orig/drivers/xen/sfc_netfront/accel_msg.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netfront/accel_msg.c 2011-01-31 17:56:27.000000000 +0100 +@@ -41,11 +41,13 @@ static void vnic_start_interrupts(netfro + /* Prime our interrupt */ + spin_lock_irqsave(&vnic->irq_enabled_lock, flags); + if (!netfront_accel_vi_enable_interrupts(vnic)) { ++ struct netfront_info *np = netdev_priv(vnic->net_dev); ++ + /* Cripes, that was quick, better pass it up */ + netfront_accel_disable_net_interrupts(vnic); + vnic->irq_enabled = 0; + NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++); +- netif_rx_schedule(vnic->net_dev); ++ netif_rx_schedule(vnic->net_dev, &np->napi); + } else { + /* + * Nothing yet, make sure we get interrupts through +@@ -72,6 +74,7 @@ static void vnic_stop_interrupts(netfron + static void vnic_start_fastpath(netfront_accel_vnic *vnic) { -- pte_t *ptep = virt_to_ptep(pgd); -- -- if (!pte_write(*ptep)) { -- xen_pgd_unpin(__pa(pgd)); -- BUG_ON(HYPERVISOR_update_va_mapping( -- (unsigned long)pgd, -- pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL), -- 0)); -- } -- -- ptep = virt_to_ptep(__user_pgd(pgd)); -- -- if (!pte_write(*ptep)) { -- xen_pgd_unpin(__pa(__user_pgd(pgd))); -- BUG_ON(HYPERVISOR_update_va_mapping( -- (unsigned long)__user_pgd(pgd), -- pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, -- PAGE_KERNEL), -- 0)); -- } -- -+ pgd_test_and_unpin(pgd); - pgd_list_del(pgd); - free_pages((unsigned long)pgd, 1); + struct net_device *net_dev = vnic->net_dev; ++ struct netfront_info *np = netdev_priv(net_dev); + unsigned long flags; + + DPRINTK("%s\n", __FUNCTION__); +@@ -80,9 +83,9 @@ static void vnic_start_fastpath(netfront + vnic->tx_enabled = 1; + spin_unlock_irqrestore(&vnic->tx_lock, flags); + +- netif_poll_disable(net_dev); ++ napi_disable(&np->napi); + vnic->poll_enabled = 1; +- netif_poll_enable(net_dev); ++ napi_enable(&np->napi); + + vnic_start_interrupts(vnic); + } +@@ -114,11 +117,11 @@ void vnic_stop_fastpath(netfront_accel_v + spin_unlock_irqrestore(&vnic->tx_lock, flags1); + + /* Must prevent polls and hold lock to modify poll_enabled */ +- netif_poll_disable(net_dev); ++ napi_disable(&np->napi); + spin_lock_irqsave(&vnic->irq_enabled_lock, flags1); + vnic->poll_enabled = 0; + spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1); +- netif_poll_enable(net_dev); ++ napi_enable(&np->napi); } ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "pgtable_32.h" -+#else -+# include "pgtable_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:10:29.000000000 +0100 -@@ -17,10 +17,7 @@ - #include - #include --#ifndef _I386_BITOPS_H --#include --#endif -- -+#include - #include - #include - #include -@@ -40,7 +37,7 @@ extern spinlock_t pgd_lock; - extern struct page *pgd_list; - void check_pgt_cache(void); --void pmd_ctor(void *, struct kmem_cache *, unsigned long); -+void pmd_ctor(struct kmem_cache *, void *); - void pgtable_cache_init(void); - void paging_init(void); +@@ -324,8 +327,10 @@ static int vnic_process_localmac_msg(net + cuckoo_hash_mac_key key; + + if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) { +- DPRINTK("MAC has moved, could be local: " MAC_FMT "\n", +- MAC_ARG(msg->u.localmac.mac)); ++ DECLARE_MAC_BUF(buf); ++ ++ DPRINTK("MAC has moved, could be local: %s\n", ++ print_mac(buf, msg->u.localmac.mac)); + key = cuckoo_mac_to_key(msg->u.localmac.mac); + spin_lock_irqsave(&vnic->table_lock, flags); + /* Try to remove it, not a big deal if not there */ +@@ -513,6 +518,8 @@ irqreturn_t netfront_accel_net_channel_i ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -9,7 +9,7 @@ - * the x86-64 page table tree. - */ - #include --#include -+#include - #include - #include - #include -@@ -139,6 +139,7 @@ static inline void pgd_clear (pgd_t * pg - #define MAXMEM _AC(0x6fffffffff, UL) - #define VMALLOC_START _AC(0xffffc20000000000, UL) - #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) -+#define VMEMMAP_START _AC(0xffffe20000000000, UL) - #define MODULES_VADDR _AC(0xffffffff88000000, UL) - #define MODULES_END _AC(0xffffffffff000000, UL) - #define MODULES_LEN (MODULES_END - MODULES_VADDR) ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "processor_32.h" -+#else -+# include "processor_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:10:29.000000000 +0100 -@@ -80,6 +80,7 @@ struct cpuinfo_x86 { - unsigned char booted_cores; /* number of cores as seen by OS */ - __u8 phys_proc_id; /* Physical processor id. */ - __u8 cpu_core_id; /* Core id */ -+ __u8 cpu_index; /* index into per_cpu list */ + spin_lock_irqsave(&vnic->irq_enabled_lock, flags); + if (vnic->irq_enabled) { ++ struct netfront_info *np = netdev_priv(net_dev); ++ + netfront_accel_disable_net_interrupts(vnic); + vnic->irq_enabled = 0; + spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); +@@ -525,7 +532,7 @@ irqreturn_t netfront_accel_net_channel_i + vnic->stats.event_count_since_irq; + vnic->stats.event_count_since_irq = 0; #endif - } __attribute__((__aligned__(SMP_CACHE_BYTES))); +- netif_rx_schedule(net_dev); ++ netif_rx_schedule(net_dev, &np->napi); + } + else { + spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); +--- head-2011-03-17.orig/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:56:27.000000000 +0100 +@@ -643,8 +643,10 @@ netfront_accel_vi_tx_post(netfront_accel + (cuckoo_hash_key *)(&key), &value); -@@ -106,14 +107,19 @@ DECLARE_PER_CPU(struct tss_struct, init_ - #endif + if (!try_fastpath) { +- VPRINTK("try fast path false for mac: " MAC_FMT "\n", +- MAC_ARG(skb->data)); ++ DECLARE_MAC_BUF(buf); ++ ++ VPRINTK("try fast path false for mac: %s\n", ++ print_mac(buf, skb->data)); + + return NETFRONT_ACCEL_STATUS_CANT; + } +@@ -770,9 +772,10 @@ static void netfront_accel_vi_rx_comple + if (compare_ether_addr(skb->data, vnic->mac)) { + struct iphdr *ip = (struct iphdr *)(skb->data + ETH_HLEN); + u16 port; ++ DECLARE_MAC_BUF(buf); - #ifdef CONFIG_SMP --extern struct cpuinfo_x86 cpu_data[]; --#define current_cpu_data cpu_data[smp_processor_id()] -+DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); -+#define cpu_data(cpu) per_cpu(cpu_info, cpu) -+#define current_cpu_data cpu_data(smp_processor_id()) +- DPRINTK("%s: saw wrong MAC address " MAC_FMT "\n", +- __FUNCTION__, MAC_ARG(skb->data)); ++ DPRINTK("%s: saw wrong MAC address %s\n", ++ __FUNCTION__, print_mac(buf, skb->data)); + + if (ip->protocol == IPPROTO_TCP) { + struct tcphdr *tcp = (struct tcphdr *) +--- head-2011-03-17.orig/drivers/xen/sfc_netutil/accel_msg_iface.c 2008-02-20 09:32:49.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netutil/accel_msg_iface.c 2011-01-31 17:56:27.000000000 +0100 +@@ -36,7 +36,7 @@ #else --#define cpu_data (&boot_cpu_data) --#define current_cpu_data boot_cpu_data -+#define cpu_data(cpu) boot_cpu_data -+#define current_cpu_data boot_cpu_data - #endif + #define NET_ACCEL_CHECK_MAGIC(_p, _errval) \ + if (_p->magic != NET_ACCEL_MSG_MAGIC) { \ +- printk(KERN_ERR "%s: passed invalid shared page %p!\n", \ ++ pr_err("%s: passed invalid shared page %p!\n", \ + __FUNCTION__, _p); \ + return _errval; \ + } +--- head-2011-03-17.orig/drivers/xen/sfc_netutil/accel_util.h 2008-02-20 09:32:49.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netutil/accel_util.h 2011-01-31 17:56:27.000000000 +0100 +@@ -63,9 +63,6 @@ + DPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \ + } while(0) --extern int cpu_llc_id[NR_CPUS]; -+/* -+ * the following now lives in the per cpu area: -+ * extern int cpu_llc_id[NR_CPUS]; -+ */ -+DECLARE_PER_CPU(u8, cpu_llc_id); - extern char ignore_fpu_irq; +-#define MAC_FMT "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" +-#define MAC_ARG(_mac) (_mac)[0], (_mac)[1], (_mac)[2], (_mac)[3], (_mac)[4], (_mac)[5] +- + #include - void __init cpu_detect(struct cpuinfo_x86 *c); -@@ -560,7 +566,9 @@ static inline void xen_set_iopl_mask(uns - * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx - * resulting in stale register contents being returned. - */ --static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) -+static inline void cpuid(unsigned int op, -+ unsigned int *eax, unsigned int *ebx, -+ unsigned int *ecx, unsigned int *edx) - { - *eax = op; - *ecx = 0; -@@ -568,8 +576,9 @@ static inline void cpuid(unsigned int op + /*! Map a set of pages from another domain +--- head-2011-03-17.orig/drivers/xen/tpmback/interface.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/tpmback/interface.c 2011-01-31 17:56:27.000000000 +0100 +@@ -48,7 +48,7 @@ static tpmif_t *alloc_tpmif(domid_t domi + out_of_memory: + if (tpmif != NULL) + kmem_cache_free(tpmif_cachep, tpmif); +- printk("%s: out of memory\n", __FUNCTION__); ++ pr_err("%s: out of memory\n", __FUNCTION__); + return ERR_PTR(-ENOMEM); } - /* Some CPUID calls want 'count' to be placed in ecx */ --static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, -- int *edx) -+static inline void cpuid_count(unsigned int op, int count, -+ unsigned int *eax, unsigned int *ebx, -+ unsigned int *ecx, unsigned int *edx) - { - *eax = op; - *ecx = count; -@@ -639,6 +648,17 @@ static inline unsigned int cpuid_edx(uns - #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" - #define K7_NOP8 K7_NOP7 ASM_NOP1 +--- head-2011-03-17.orig/drivers/xen/tpmback/tpmback.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/tpmback/tpmback.c 2011-01-31 17:56:27.000000000 +0100 +@@ -908,8 +908,7 @@ static int __init tpmback_init(void) + int rc; -+/* P6 nops */ -+/* uses eax dependencies (Intel-recommended choice) */ -+#define P6_NOP1 GENERIC_NOP1 -+#define P6_NOP2 ".byte 0x66,0x90\n" -+#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" -+#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" -+#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" -+#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" -+#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" -+#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" -+ - #ifdef CONFIG_MK8 - #define ASM_NOP1 K8_NOP1 - #define ASM_NOP2 K8_NOP2 -@@ -657,6 +677,17 @@ static inline unsigned int cpuid_edx(uns - #define ASM_NOP6 K7_NOP6 - #define ASM_NOP7 K7_NOP7 - #define ASM_NOP8 K7_NOP8 -+#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ -+ defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ -+ defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) -+#define ASM_NOP1 P6_NOP1 -+#define ASM_NOP2 P6_NOP2 -+#define ASM_NOP3 P6_NOP3 -+#define ASM_NOP4 P6_NOP4 -+#define ASM_NOP5 P6_NOP5 -+#define ASM_NOP6 P6_NOP6 -+#define ASM_NOP7 P6_NOP7 -+#define ASM_NOP8 P6_NOP8 - #else - #define ASM_NOP1 GENERIC_NOP1 - #define ASM_NOP2 GENERIC_NOP2 ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -74,6 +74,7 @@ struct cpuinfo_x86 { - __u8 booted_cores; /* number of cores as seen by OS */ - __u8 phys_proc_id; /* Physical Processor id. */ - __u8 cpu_core_id; /* Core id. */ -+ __u8 cpu_index; /* index into per_cpu list */ - #endif - } ____cacheline_aligned; + if ((rc = misc_register(&vtpms_miscdevice)) != 0) { +- printk(KERN_ALERT +- "Could not register misc device for TPM BE.\n"); ++ pr_alert("Could not register misc device for TPM BE\n"); + return rc; + } -@@ -88,11 +89,12 @@ struct cpuinfo_x86 { - #define X86_VENDOR_UNKNOWN 0xff +@@ -929,7 +928,7 @@ static int __init tpmback_init(void) + return rc; + } - #ifdef CONFIG_SMP --extern struct cpuinfo_x86 cpu_data[]; --#define current_cpu_data cpu_data[smp_processor_id()] -+DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); -+#define cpu_data(cpu) per_cpu(cpu_info, cpu) -+#define current_cpu_data cpu_data(smp_processor_id()) - #else --#define cpu_data (&boot_cpu_data) --#define current_cpu_data boot_cpu_data -+#define cpu_data(cpu) boot_cpu_data -+#define current_cpu_data boot_cpu_data - #endif +- printk(KERN_ALERT "Successfully initialized TPM backend driver.\n"); ++ pr_alert("Successfully initialized TPM backend driver\n"); - extern char ignore_irq13; -@@ -343,6 +345,16 @@ struct extended_sigtable { - }; + return 0; + } +--- head-2011-03-17.orig/drivers/xen/usbback/interface.c 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/drivers/xen/usbback/interface.c 2011-01-31 17:56:27.000000000 +0100 +@@ -113,7 +113,8 @@ static int map_frontend_pages(usbif_t *u + gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); + + if (op.status != GNTST_okay) { +- printk(KERN_ERR "grant table failure mapping urb_ring_ref %d\n", (int)op.status); ++ pr_err("grant table failure mapping urb_ring_ref %d\n", ++ (int)op.status); + return -EINVAL; + } +@@ -132,7 +133,8 @@ static int map_frontend_pages(usbif_t *u + GNTMAP_host_map, usbif->urb_shmem_handle); + VOID(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, + 1)); +- printk(KERN_ERR "grant table failure mapping conn_ring_ref %d\n", (int)op.status); ++ pr_err("grant table failure mapping conn_ring_ref %d\n", ++ (int)op.status); + return -EINVAL; + } -+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) -+#define ASM_NOP1 P6_NOP1 -+#define ASM_NOP2 P6_NOP2 -+#define ASM_NOP3 P6_NOP3 -+#define ASM_NOP4 P6_NOP4 -+#define ASM_NOP5 P6_NOP5 -+#define ASM_NOP6 P6_NOP6 -+#define ASM_NOP7 P6_NOP7 -+#define ASM_NOP8 P6_NOP8 -+#else - #define ASM_NOP1 K8_NOP1 - #define ASM_NOP2 K8_NOP2 - #define ASM_NOP3 K8_NOP3 -@@ -351,6 +363,7 @@ struct extended_sigtable { - #define ASM_NOP6 K8_NOP6 - #define ASM_NOP7 K8_NOP7 - #define ASM_NOP8 K8_NOP8 -+#endif +--- head-2011-03-17.orig/drivers/xen/usbback/usbback.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/usbback.c 2011-01-31 17:56:27.000000000 +0100 +@@ -86,6 +86,8 @@ typedef struct { + static pending_req_t *pending_reqs; + static struct list_head pending_free; + static DEFINE_SPINLOCK(pending_free_lock); ++static LIST_HEAD(pending_urb_free); ++static DEFINE_SPINLOCK(urb_free_lock); + static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq); + + #define USBBACK_INVALID_HANDLE (~0) +@@ -231,7 +233,7 @@ static int usbbk_alloc_urb(usbif_urb_req + else + pending_req->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!pending_req->urb) { +- printk(KERN_ERR "usbback: can't alloc urb\n"); ++ pr_err("usbback: can't alloc urb\n"); + ret = -ENOMEM; + goto fail; + } +@@ -241,7 +243,7 @@ static int usbbk_alloc_urb(usbif_urb_req + req->buffer_length, GFP_KERNEL, + &pending_req->transfer_dma); + if (!pending_req->buffer) { +- printk(KERN_ERR "usbback: can't alloc urb buffer\n"); ++ pr_err("usbback: can't alloc urb buffer\n"); + ret = -ENOMEM; + goto fail_free_urb; + } +@@ -252,7 +254,7 @@ static int usbbk_alloc_urb(usbif_urb_req + sizeof(struct usb_ctrlrequest), GFP_KERNEL, + &pending_req->setup_dma); + if (!pending_req->setup) { +- printk(KERN_ERR "usbback: can't alloc usb_ctrlrequest\n"); ++ pr_err("usbback: can't alloc usb_ctrlrequest\n"); + ret = -ENOMEM; + goto fail_free_buffer; + } +@@ -272,6 +274,15 @@ fail: + + static void usbbk_free_urb(struct urb *urb) + { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&urb_free_lock, flags); ++ list_add(&urb->urb_list, &pending_urb_free); ++ spin_unlock_irqrestore(&urb_free_lock, flags); ++} ++ ++static void _usbbk_free_urb(struct urb *urb) ++{ + if (usb_pipecontrol(urb->pipe)) + usb_buffer_free(urb->dev, sizeof(struct usb_ctrlrequest), + urb->setup_packet, urb->setup_dma); +@@ -282,6 +293,29 @@ static void usbbk_free_urb(struct urb *u + usb_free_urb(urb); + } + ++static void usbbk_free_urbs(void) ++{ ++ unsigned long flags; ++ struct list_head tmp_list; ++ ++ if (list_empty(&pending_urb_free)) ++ return; ++ ++ INIT_LIST_HEAD(&tmp_list); ++ ++ spin_lock_irqsave(&urb_free_lock, flags); ++ list_splice_init(&pending_urb_free, &tmp_list); ++ spin_unlock_irqrestore(&urb_free_lock, flags); ++ ++ while (!list_empty(&tmp_list)) { ++ struct urb *next_urb = list_first_entry(&tmp_list, struct urb, ++ urb_list); ++ ++ list_del(&next_urb->urb_list); ++ _usbbk_free_urb(next_urb); ++ } ++} ++ + static void usbbk_notify_work(usbif_t *usbif) + { + usbif->waiting_reqs = 1; +@@ -356,7 +390,7 @@ static int usbbk_gnttab_map(usbif_t *usb + nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs; + + if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) { +- printk(KERN_ERR "Bad number of segments in request\n"); ++ pr_err("Bad number of segments in request\n"); + ret = -EINVAL; + goto fail; + } +@@ -399,7 +433,7 @@ static int usbbk_gnttab_map(usbif_t *usb + gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]); + + if (unlikely(map[i].status != GNTST_okay)) { +- printk(KERN_ERR "usbback: invalid buffer -- could not remap it\n"); ++ pr_err("usbback: invalid buffer -- could not remap it\n"); + map[i].handle = USBBACK_INVALID_HANDLE; + ret |= 1; + } +@@ -927,7 +961,7 @@ static void dispatch_request_to_pending_ + + ret = usbbk_gnttab_map(usbif, req, pending_req); + if (ret) { +- printk(KERN_ERR "usbback: invalid buffer\n"); ++ pr_err("usbback: invalid buffer\n"); + ret = -ESHUTDOWN; + goto fail_free_urb; + } +@@ -950,7 +984,7 @@ static void dispatch_request_to_pending_ + + ret = usb_submit_urb(pending_req->urb, GFP_KERNEL); + if (ret) { +- printk(KERN_ERR "usbback: failed submitting urb, error %d\n", ret); ++ pr_err("usbback: failed submitting urb, error %d\n", ret); + ret = -ESHUTDOWN; + goto fail_flush_area; + } +@@ -982,7 +1016,7 @@ static int usbbk_start_submit_urb(usbif_ - /* Opteron nops */ - #define K8_NOP1 ".byte 0x90\n" -@@ -362,6 +375,17 @@ struct extended_sigtable { - #define K8_NOP7 K8_NOP4 K8_NOP3 - #define K8_NOP8 K8_NOP4 K8_NOP4 + while (rc != rp) { + if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) { +- printk(KERN_WARNING "RING_REQUEST_CONS_OVERFLOW\n"); ++ pr_warning("RING_REQUEST_CONS_OVERFLOW\n"); + break; + } -+/* P6 nops */ -+/* uses eax dependencies (Intel-recommended choice) */ -+#define P6_NOP1 ".byte 0x90\n" -+#define P6_NOP2 ".byte 0x66,0x90\n" -+#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" -+#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" -+#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" -+#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" -+#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" -+#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" +@@ -1053,8 +1087,11 @@ int usbbk_schedule(void *arg) + + if (usbbk_start_submit_urb(usbif)) + usbif->waiting_reqs = 1; + - #define ASM_NOP_MAX 8 ++ usbbk_free_urbs(); + } - /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -@@ -377,12 +401,6 @@ static inline void sync_core(void) - asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); - } ++ usbbk_free_urbs(); + usbif->xenusbd = NULL; + usbif_put(usbif); --#define ARCH_HAS_PREFETCH --static inline void prefetch(void *x) --{ -- asm volatile("prefetcht0 (%0)" :: "r" (x)); --} -- - #define ARCH_HAS_PREFETCHW 1 - static inline void prefetchw(void *x) - { -@@ -398,11 +416,6 @@ static inline void prefetchw(void *x) +--- head-2011-03-17.orig/drivers/xen/usbback/usbstub.c 2011-03-11 10:54:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/usbstub.c 2011-03-11 10:55:46.000000000 +0100 +@@ -150,7 +150,7 @@ static struct usbstub *usbstub_alloc(str - #define cpu_relax() rep_nop() + stub = kzalloc(sizeof(*stub), GFP_KERNEL); + if (!stub) { +- printk(KERN_ERR "no memory for alloc usbstub\n"); ++ pr_err("no memory for usbstub\n"); + return NULL; + } + kref_init(&stub->kref); +@@ -303,7 +303,7 @@ int __init usbstub_init(void) --static inline void serialize_cpu(void) --{ -- __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); --} -- - static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) + err = usb_register(&usbback_usb_driver); + if (err < 0) { +- printk(KERN_ERR "usbback: usb_register failed (error %d)\n", err); ++ pr_err("usbback: usb_register failed (%d)\n", err); + goto out; + } + +--- head-2011-03-17.orig/drivers/xen/usbback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 +@@ -242,8 +242,9 @@ static int connect_rings(usbif_t *usbif) + return err; + } + +- printk("usbback: urb-ring-ref %ld, conn-ring-ref %ld, event-channel %d\n", +- urb_ring_ref, conn_ring_ref, evtchn); ++ pr_info("usbback: urb-ring-ref %ld, conn-ring-ref %ld," ++ " event-channel %d\n", ++ urb_ring_ref, conn_ring_ref, evtchn); + + err = usbif_map(usbif, urb_ring_ref, conn_ring_ref, evtchn); + if (err) { +@@ -270,8 +271,8 @@ static void frontend_changed(struct xenb + + case XenbusStateInitialising: + if (dev->state == XenbusStateClosed) { +- printk("%s: %s: prepare for reconnect\n", +- __FUNCTION__, dev->nodename); ++ pr_info("%s: %s: prepare for reconnect\n", ++ __FUNCTION__, dev->nodename); + xenbus_switch_state(dev, XenbusStateInitWait); + } + break; +--- head-2011-03-17.orig/drivers/xen/usbfront/usbfront-dbg.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/usbfront-dbg.c 2011-01-31 17:56:27.000000000 +0100 +@@ -91,8 +91,8 @@ static inline void create_debug_file(str { ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "smp_32.h" -+#else -+# include "smp_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:10:29.000000000 +0100 -@@ -11,7 +11,7 @@ - #endif + struct class_device *cldev = info_to_hcd(info)->self.class_dev; + if (class_device_create_file(cldev, &class_device_attr_statistics)) +- printk(KERN_WARNING "statistics file not created for %s\n", +- info_to_hcd(info)->self.bus_name); ++ pr_warning("statistics file not created for %s\n", ++ info_to_hcd(info)->self.bus_name); + } - #if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__) --#include -+#include - #include - #include - #ifdef CONFIG_X86_IO_APIC -@@ -30,8 +30,8 @@ - extern void smp_alloc_memory(void); - extern int pic_mode; - extern int smp_num_siblings; --extern cpumask_t cpu_sibling_map[]; --extern cpumask_t cpu_core_map[]; -+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -+DECLARE_PER_CPU(cpumask_t, cpu_core_map); + static inline void remove_debug_file(struct usbfront_info *info) +--- head-2011-03-17.orig/drivers/xen/usbfront/usbfront-hcd.c 2009-10-15 11:45:41.000000000 +0200 ++++ head-2011-03-17/drivers/xen/usbfront/usbfront-hcd.c 2011-01-31 17:56:27.000000000 +0100 +@@ -114,7 +114,6 @@ static void xenhcd_stop(struct usb_hcd * + * non-error returns are promise to giveback the urb later + */ + static int xenhcd_urb_enqueue(struct usb_hcd *hcd, +- struct usb_host_endpoint *ep, + struct urb *urb, + gfp_t mem_flags) + { +@@ -130,6 +129,7 @@ static int xenhcd_urb_enqueue(struct usb + ret = -ENOMEM; + goto done; + } ++ urbp->status = 1; - extern void (*mtrr_hook) (void); - extern void zap_low_mappings (void); -@@ -39,9 +39,11 @@ extern void lock_ipi_call_lock(void); - extern void unlock_ipi_call_lock(void); + ret = xenhcd_submit_urb(info, urbp); + if (ret != 0) +@@ -144,7 +144,7 @@ done: + * called as .urb_dequeue() + */ + static int xenhcd_urb_dequeue(struct usb_hcd *hcd, +- struct urb *urb) ++ struct urb *urb, int status) + { + struct usbfront_info *info = hcd_to_info(hcd); + struct urb_priv *urbp; +@@ -157,6 +157,7 @@ static int xenhcd_urb_dequeue(struct usb + if (!urbp) + goto done; - #define MAX_APICID 256 --extern u8 x86_cpu_to_apicid[]; -+extern u8 __initdata x86_cpu_to_apicid_init[]; -+extern void *x86_cpu_to_apicid_ptr; -+DECLARE_PER_CPU(u8, x86_cpu_to_apicid); ++ urbp->status = status; + ret = xenhcd_unlink_urb(info, urbp); --#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] -+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) + done: +--- head-2011-03-17.orig/drivers/xen/usbfront/usbfront-q.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/usbfront-q.c 2011-01-31 17:56:27.000000000 +0100 +@@ -151,7 +151,7 @@ static int map_urb_for_request(struct us + + ret = gnttab_alloc_grant_references(USBIF_MAX_SEGMENTS_PER_REQUEST, &gref_head); + if (ret) { +- printk(KERN_ERR "usbfront: gnttab_alloc_grant_references() error\n"); ++ pr_err("usbfront: gnttab_alloc_grant_references() error\n"); + return -ENOMEM; + } - #ifdef CONFIG_HOTPLUG_CPU - extern void cpu_exit_clear(void); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -40,10 +40,19 @@ extern void lock_ipi_call_lock(void); - extern void unlock_ipi_call_lock(void); - extern int smp_num_siblings; - extern void smp_send_reschedule(int cpu); -+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), -+ void *info, int wait); +@@ -236,7 +236,8 @@ __acquires(info->lock) + COUNT(info->stats.complete); + } + spin_unlock(&info->lock); +- usb_hcd_giveback_urb(info_to_hcd(info), urb); ++ usb_hcd_giveback_urb(info_to_hcd(info), urb, ++ urbp->status <= 0 ? urbp->status : urb->status); + spin_lock(&info->lock); + } --extern cpumask_t cpu_sibling_map[NR_CPUS]; --extern cpumask_t cpu_core_map[NR_CPUS]; --extern u8 cpu_llc_id[NR_CPUS]; -+/* -+ * cpu_sibling_map and cpu_core_map now live -+ * in the per cpu area -+ * -+ * extern cpumask_t cpu_sibling_map[NR_CPUS]; -+ * extern cpumask_t cpu_core_map[NR_CPUS]; -+ */ -+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -+DECLARE_PER_CPU(cpumask_t, cpu_core_map); -+DECLARE_PER_CPU(u8, cpu_llc_id); +--- head-2011-03-17.orig/drivers/xen/usbfront/usbfront.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/usbfront.h 2011-01-31 17:56:27.000000000 +0100 +@@ -82,6 +82,7 @@ struct urb_priv { + struct urb *urb; + int req_id; /* RING_REQUEST id for submitting */ + int unlink_req_id; /* RING_REQUEST id for unlinking */ ++ int status; + unsigned unlinked:1; /* dequeued marker */ + }; - #define SMP_TRAMPOLINE_BASE 0x6000 +--- head-2011-03-17.orig/drivers/xen/usbfront/xenbus.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/xenbus.c 2011-01-31 17:56:27.000000000 +0100 +@@ -395,7 +395,7 @@ static int __init usbfront_init(void) + xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv", + sizeof(struct urb_priv), 0, 0, NULL); + if (!xenhcd_urbp_cachep) { +- printk(KERN_ERR "usbfront failed to create kmem cache\n"); ++ pr_err("usbfront failed to create kmem cache\n"); + return -ENOMEM; + } -@@ -70,6 +79,8 @@ extern unsigned __cpuinitdata disabled_c +--- head-2011-03-17.orig/drivers/xen/util.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/util.c 2011-01-31 17:56:27.000000000 +0100 +@@ -11,7 +11,7 @@ struct class *get_xen_class(void) - #endif /* CONFIG_SMP */ + xen_class = class_create(THIS_MODULE, "xen"); + if (IS_ERR(xen_class)) { +- printk("Failed to create xen sysfs class.\n"); ++ pr_err("failed to create xen sysfs class\n"); + xen_class = NULL; + } -+#define safe_smp_processor_id() smp_processor_id() -+ - #ifdef CONFIG_X86_LOCAL_APIC - static inline int hard_smp_processor_id(void) - { -@@ -82,8 +93,9 @@ static inline int hard_smp_processor_id( - * Some lowlevel functions might want to know about - * the real APIC ID <-> CPU # mapping. - */ --extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ --extern u8 x86_cpu_to_log_apicid[NR_CPUS]; -+extern u8 __initdata x86_cpu_to_apicid_init[]; -+extern void *x86_cpu_to_apicid_ptr; -+DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */ - extern u8 bios_cpu_apicid[]; +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:56:27.000000000 +0100 +@@ -238,13 +238,13 @@ int xb_init_comms(void) + int err; + + if (intf->req_prod != intf->req_cons) +- printk(KERN_ERR "XENBUS request ring is not quiescent " ++ pr_err("XENBUS request ring is not quiescent " + "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); + + if (intf->rsp_prod != intf->rsp_cons) { +- printk(KERN_WARNING "XENBUS response ring is not quiescent " +- "(%08x:%08x): fixing up\n", +- intf->rsp_cons, intf->rsp_prod); ++ pr_warning("XENBUS response ring is not quiescent" ++ " (%08x:%08x): fixing up\n", ++ intf->rsp_cons, intf->rsp_prod); + intf->rsp_cons = intf->rsp_prod; + } - #ifdef CONFIG_X86_LOCAL_APIC -@@ -118,8 +130,9 @@ static __inline int logical_smp_processo - #endif +@@ -259,7 +259,7 @@ int xb_init_comms(void) + xen_store_evtchn, wake_waiting, + 0, "xenbus", &xb_waitq); + if (err <= 0) { +- printk(KERN_ERR "XENBUS request irq failed %i\n", err); ++ pr_err("XENBUS request irq failed %i\n", err); + return err; + } - #ifdef CONFIG_SMP --#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] -+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) - #else -+extern unsigned int boot_cpu_id; - #define cpu_physical_id(cpu) boot_cpu_id - #endif /* !CONFIG_SMP */ - #endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/swiotlb.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "swiotlb_32.h" -+#else -+# include_next -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "system_32.h" -+#else -+# include "system_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:10:29.000000000 +0100 -@@ -9,6 +9,7 @@ - #include +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:56:27.000000000 +0100 +@@ -112,13 +112,13 @@ static int frontend_bus_id(char bus_id[X + { + nodename = strchr(nodename, '/'); + if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { +- printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); ++ pr_warning("XENBUS: bad frontend %s\n", nodename); + return -EINVAL; + } - #ifdef __KERNEL__ -+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ + strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); + if (!strchr(bus_id, '/')) { +- printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); ++ pr_warning("XENBUS: bus_id %s no slash\n", bus_id); + return -EINVAL; + } + *strchr(bus_id, '/') = '-'; +@@ -176,11 +176,9 @@ static int read_backend_details(struct x + } - struct task_struct; /* one of the stranger aspects of C forward declarations.. */ - extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); -@@ -138,7 +139,7 @@ static inline unsigned long xen_read_cr4 + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) && (defined(CONFIG_XEN) || defined(MODULE)) +-static int xenbus_uevent_frontend(struct device *dev, char **envp, +- int num_envp, char *buffer, int buffer_size) ++static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { - unsigned long val; - /* This could fault if %cr4 does not exist */ -- asm("1: movl %%cr4, %0 \n" -+ asm volatile("1: movl %%cr4, %0 \n" - "2: \n" - ".section __ex_table,\"a\" \n" - ".long 1b,2b \n" -@@ -157,6 +158,11 @@ static inline void xen_wbinvd(void) - asm volatile("wbinvd": : :"memory"); + struct xenbus_device *xdev; +- int length = 0, i = 0; + + if (dev == NULL) + return -ENODEV; +@@ -189,12 +187,9 @@ static int xenbus_uevent_frontend(struct + return -ENODEV; + + /* stuff we want to pass to /sbin/hotplug */ +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, +- "XENBUS_TYPE=%s", xdev->devicetype); +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, +- "XENBUS_PATH=%s", xdev->nodename); +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, +- "MODALIAS=xen:%s", xdev->devicetype); ++ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); ++ add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); ++ add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); + + return 0; + } +@@ -762,8 +757,8 @@ static int suspend_dev(struct device *de + if (drv->suspend) + err = drv->suspend(xdev); + if (err) +- printk(KERN_WARNING +- "xenbus: suspend %s failed: %i\n", dev->bus_id, err); ++ pr_warning("xenbus: suspend %s failed: %i\n", ++ dev->bus_id, err); + return 0; } -+static inline void clflush(volatile void *__p) -+{ -+ asm volatile("clflush %0" : "+m" (*(char __force *)__p)); -+} -+ - #define read_cr0() (xen_read_cr0()) - #define write_cr0(x) (xen_write_cr0(x)) - #define read_cr2() (xen_read_cr2()) -@@ -207,6 +213,7 @@ static inline unsigned long get_limit(un +@@ -782,9 +777,8 @@ static int suspend_cancel_dev(struct dev + if (drv->suspend_cancel) + err = drv->suspend_cancel(xdev); + if (err) +- printk(KERN_WARNING +- "xenbus: suspend_cancel %s failed: %i\n", +- dev->bus_id, err); ++ pr_warning("xenbus: suspend_cancel %s failed: %i\n", ++ dev->bus_id, err); + return 0; + } - #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) - #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) -+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) +@@ -804,9 +798,8 @@ static int resume_dev(struct device *dev + + err = talk_to_otherend(xdev); + if (err) { +- printk(KERN_WARNING +- "xenbus: resume (talk_to_otherend) %s failed: %i\n", +- dev->bus_id, err); ++ pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n", ++ dev->bus_id, err); + return err; + } + +@@ -815,18 +808,16 @@ static int resume_dev(struct device *dev + if (drv->resume) { + err = drv->resume(xdev); + if (err) { +- printk(KERN_WARNING +- "xenbus: resume %s failed: %i\n", +- dev->bus_id, err); ++ pr_warning("xenbus: resume %s failed: %i\n", ++ dev->bus_id, err); + return err; + } + } + + err = watch_otherend(xdev); + if (err) { +- printk(KERN_WARNING +- "xenbus_probe: resume (watch_otherend) %s failed: " +- "%d.\n", dev->bus_id, err); ++ pr_warning("xenbus_probe: resume (watch_otherend) %s failed:" ++ " %d\n", dev->bus_id, err); + return err; + } - /** - * read_barrier_depends - Flush all pending reads that subsequents reads -@@ -262,18 +269,18 @@ static inline unsigned long get_limit(un +@@ -1012,9 +1003,8 @@ int xenbus_conn(domid_t remote_dom, unsi + fail1: + rc2 = xb_free_port(xen_store_evtchn); + if (rc2 != 0) +- printk(KERN_WARNING +- "XENBUS: Error freeing xenstore event channel: %d\n", +- rc2); ++ pr_warning("XENBUS: Error freeing xenstore event channel:" ++ " %d\n", rc2); + fail0: + xen_store_evtchn = -1; + return rc; +@@ -1040,9 +1030,8 @@ static int __devinit xenbus_probe_init(v + /* Register ourselves with the kernel bus subsystem */ + xenbus_frontend.error = bus_register(&xenbus_frontend.bus); + if (xenbus_frontend.error) +- printk(KERN_WARNING +- "XENBUS: Error registering frontend bus: %i\n", +- xenbus_frontend.error); ++ pr_warning("XENBUS: Error registering frontend bus: %i\n", ++ xenbus_frontend.error); + xenbus_backend_bus_register(); - #define read_barrier_depends() do { } while(0) + /* +@@ -1117,8 +1106,8 @@ static int __devinit xenbus_probe_init(v + /* Initialize the interface to xenstore. */ + err = xs_init(); + if (err) { +- printk(KERN_WARNING +- "XENBUS: Error initializing xenstore comms: %i\n", err); ++ pr_warning("XENBUS: Error initializing xenstore comms: %i\n", ++ err); + goto err; + } -+#ifdef CONFIG_SMP -+#define smp_mb() mb() -+#ifdef CONFIG_X86_PPRO_FENCE -+# define smp_rmb() rmb() -+#else -+# define smp_rmb() barrier() -+#endif - #ifdef CONFIG_X86_OOSTORE --/* Actually there are no OOO store capable CPUs for now that do SSE, -- but make it already an possibility. */ --#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) -+# define smp_wmb() wmb() - #else --#define wmb() __asm__ __volatile__ ("": : :"memory") -+# define smp_wmb() barrier() +@@ -1128,9 +1117,8 @@ static int __devinit xenbus_probe_init(v + xenbus_frontend.error = device_register(&xenbus_frontend.dev); + if (xenbus_frontend.error) { + bus_unregister(&xenbus_frontend.bus); +- printk(KERN_WARNING +- "XENBUS: Error registering frontend device: %i\n", +- xenbus_frontend.error); ++ pr_warning("XENBUS: Error registering frontend device:" ++ " %d\n", xenbus_frontend.error); + } + } #endif -- --#ifdef CONFIG_SMP --#define smp_mb() mb() --#define smp_rmb() rmb() --#define smp_wmb() wmb() - #define smp_read_barrier_depends() read_barrier_depends() - #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) - #else -@@ -300,5 +307,6 @@ extern unsigned long arch_align_stack(un - extern void free_init_pages(char *what, unsigned long begin, unsigned long end); +@@ -1212,8 +1200,8 @@ static int print_device_status(struct de + + if (!dev->driver) { + /* Information only: is this too noisy? */ +- printk(KERN_INFO "XENBUS: Device with no driver: %s\n", +- xendev->nodename); ++ pr_info("XENBUS: Device with no driver: %s\n", ++ xendev->nodename); + return 0; + } - void default_idle(void); -+void __show_registers(struct pt_regs *, int all); +@@ -1221,15 +1209,15 @@ static int print_device_status(struct de + enum xenbus_state rstate = XenbusStateUnknown; + if (xendev->otherend) + rstate = xenbus_read_driver_state(xendev->otherend); +- printk(KERN_WARNING "XENBUS: Timeout connecting " +- "to device: %s (local state %d, remote state %d)\n", +- xendev->nodename, xendev->state, rstate); ++ pr_warning("XENBUS: Timeout connecting to device: %s" ++ " (local state %d, remote state %d)\n", ++ xendev->nodename, xendev->state, rstate); + } - #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:10:29.000000000 +0100 -@@ -11,8 +11,12 @@ + xendrv = to_xenbus_driver(dev->driver); + if (xendrv->is_ready && !xendrv->is_ready(xendev)) +- printk(KERN_WARNING "XENBUS: Device not ready: %s\n", +- xendev->nodename); ++ pr_warning("XENBUS: Device not ready: %s\n", ++ xendev->nodename); - #ifdef __KERNEL__ + return 0; + } +@@ -1263,8 +1251,8 @@ static void wait_for_devices(struct xenb + while (exists_connecting_device(drv)) { + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { + if (!seconds_waited) +- printk(KERN_WARNING "XENBUS: Waiting for " +- "devices to initialise: "); ++ pr_warning("XENBUS: Waiting for " ++ "devices to initialise: "); + seconds_waited += 5; + printk("%us...", 300 - seconds_waited); + if (seconds_waited == 300) +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:56:27.000000000 +0100 +@@ -60,8 +60,7 @@ + #include + #endif --#define __STR(x) #x --#define STR(x) __STR(x) -+/* entries in ARCH_DLINFO: */ -+#ifdef CONFIG_IA32_EMULATION -+# define AT_VECTOR_SIZE_ARCH 2 -+#else -+# define AT_VECTOR_SIZE_ARCH 1 -+#endif +-static int xenbus_uevent_backend(struct device *dev, char **envp, +- int num_envp, char *buffer, int buffer_size); ++static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env); + static int xenbus_probe_backend(const char *type, const char *domid); - #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" - #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" -@@ -92,7 +96,7 @@ static inline void write_cr0(unsigned lo + extern int read_otherend_details(struct xenbus_device *xendev, +@@ -128,13 +127,10 @@ static struct xen_bus_type xenbus_backen + }, + }; - #define read_cr3() ({ \ - unsigned long __dummy; \ -- asm("movq %%cr3,%0" : "=r" (__dummy)); \ -+ asm volatile("movq %%cr3,%0" : "=r" (__dummy)); \ - machine_to_phys(__dummy); \ - }) +-static int xenbus_uevent_backend(struct device *dev, char **envp, +- int num_envp, char *buffer, int buffer_size) ++static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) + { + struct xenbus_device *xdev; + struct xenbus_driver *drv; +- int i = 0; +- int length = 0; -@@ -105,7 +109,7 @@ static inline void write_cr3(unsigned lo - static inline unsigned long read_cr4(void) - { - unsigned long cr4; -- asm("movq %%cr4,%0" : "=r" (cr4)); -+ asm volatile("movq %%cr4,%0" : "=r" (cr4)); - return cr4; - } + DPRINTK(""); -@@ -131,12 +135,17 @@ static inline void write_cr8(unsigned lo +@@ -146,27 +142,16 @@ static int xenbus_uevent_backend(struct + return -ENODEV; - #endif /* __KERNEL__ */ + /* stuff we want to pass to /sbin/hotplug */ +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, +- "XENBUS_TYPE=%s", xdev->devicetype); ++ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); -+static inline void clflush(volatile void *__p) -+{ -+ asm volatile("clflush %0" : "+m" (*(char __force *)__p)); -+} -+ - #define nop() __asm__ __volatile__ ("nop") +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, +- "XENBUS_PATH=%s", xdev->nodename); ++ add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); - #ifdef CONFIG_SMP - #define smp_mb() mb() --#define smp_rmb() rmb() --#define smp_wmb() wmb() -+#define smp_rmb() barrier() -+#define smp_wmb() barrier() - #define smp_read_barrier_depends() do {} while(0) - #else - #define smp_mb() barrier() -@@ -153,12 +162,8 @@ static inline void write_cr8(unsigned lo - */ - #define mb() asm volatile("mfence":::"memory") - #define rmb() asm volatile("lfence":::"memory") +- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, +- "XENBUS_BASE_PATH=%s", xenbus_backend.root); - --#ifdef CONFIG_UNORDERED_IO - #define wmb() asm volatile("sfence" ::: "memory") --#else --#define wmb() asm volatile("" ::: "memory") --#endif -+ - #define read_barrier_depends() do {} while(0) - #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) +- /* terminate, set to next free slot, shrink available space */ +- envp[i] = NULL; +- envp = &envp[i]; +- num_envp -= i; +- buffer = &buffer[length]; +- buffer_size -= length; ++ add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "tlbflush_32.h" -+#else -+# include "tlbflush_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:10:29.000000000 +0100 -@@ -23,7 +23,6 @@ - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages -- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables - * - * ..but the i386 has somewhat limited tlb flushing capabilities, - * and page-granular flushes are available only on i486 and up. -@@ -97,10 +96,4 @@ static inline void flush_tlb_kernel_rang - flush_tlb_all(); + if (dev->driver) { + drv = to_xenbus_driver(dev->driver); + if (drv && drv->uevent) +- return drv->uevent(xdev, envp, num_envp, buffer, +- buffer_size); ++ return drv->uevent(xdev, env); + } + + return 0; +@@ -268,9 +253,8 @@ void xenbus_backend_bus_register(void) + { + xenbus_backend.error = bus_register(&xenbus_backend.bus); + if (xenbus_backend.error) +- printk(KERN_WARNING +- "XENBUS: Error registering backend bus: %i\n", +- xenbus_backend.error); ++ pr_warning("XENBUS: Error registering backend bus: %i\n", ++ xenbus_backend.error); } --static inline void flush_tlb_pgtables(struct mm_struct *mm, -- unsigned long start, unsigned long end) --{ -- /* i386 does not keep any page table caches in TLB */ --} -- - #endif /* _I386_TLBFLUSH_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:10:29.000000000 +0100 + void xenbus_backend_device_register(void) +@@ -281,9 +265,8 @@ void xenbus_backend_device_register(void + xenbus_backend.error = device_register(&xenbus_backend.dev); + if (xenbus_backend.error) { + bus_unregister(&xenbus_backend.bus); +- printk(KERN_WARNING +- "XENBUS: Error registering backend device: %i\n", +- xenbus_backend.error); ++ pr_warning("XENBUS: Error registering backend device: %i\n", ++ xenbus_backend.error); + } + } + +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:56:27.000000000 +0100 +@@ -135,9 +135,8 @@ static int get_error(const char *errorst + + for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { + if (i == ARRAY_SIZE(xsd_errors) - 1) { +- printk(KERN_WARNING +- "XENBUS xen store gave: unknown error %s", +- errorstring); ++ pr_warning("XENBUS xen store gave: unknown error %s", ++ errorstring); + return EINVAL; + } + } +@@ -278,9 +277,9 @@ static void *xs_talkv(struct xenbus_tran + + if (msg.type != type) { + if (printk_ratelimit()) +- printk(KERN_WARNING +- "XENBUS unexpected type [%d], expected [%d]\n", +- msg.type, type); ++ pr_warning("XENBUS unexpected type [%d]," ++ " expected [%d]\n", ++ msg.type, type); + kfree(ret); + return ERR_PTR(-EINVAL); + } +@@ -677,9 +676,8 @@ void unregister_xenbus_watch(struct xenb + + err = xs_unwatch(watch->node, token); + if (err) +- printk(KERN_WARNING +- "XENBUS Failed to release watch %s: %i\n", +- watch->node, err); ++ pr_warning("XENBUS Failed to release watch %s: %i\n", ++ watch->node, err); + + up_read(&xs_state.watch_mutex); + +@@ -909,8 +907,8 @@ static int xenbus_thread(void *unused) + for (;;) { + err = process_msg(); + if (err) +- printk(KERN_WARNING "XENBUS error %d while reading " +- "message\n", err); ++ pr_warning("XENBUS error %d while reading " ++ "message\n", err); + if (kthread_should_stop()) + break; + } +--- head-2011-03-17.orig/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:56:27.000000000 +0100 @@ -28,7 +28,6 @@ - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages -- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables - * - * x86-64 can only flush individual pages or full VMs. For a range flush - * we always do the full VM. Might be worth trying if for a small -@@ -95,12 +94,4 @@ static inline void flush_tlb_kernel_rang - flush_tlb_all(); + #include + #include + #include +-#include "../../../drivers/oprofile/cpu_buffer.h" + #include "../../../drivers/oprofile/event_buffer.h" + + #define MAX_XENOPROF_SAMPLES 16 +@@ -141,8 +140,7 @@ static void xenoprof_add_pc(xenoprof_buf + if (xenoprof_is_escape(buf, tail) && + xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) { + tracing=1; +- oprofile_add_pc(ESCAPE_CODE, buf->event_log[tail].mode, +- CPU_TRACE_BEGIN); ++ oprofile_add_mode(buf->event_log[tail].mode); + if (!is_passive) + oprofile_samples++; + else +@@ -566,8 +564,8 @@ int __init xenoprofile_init(struct oprof + active_defined = 0; + } + +- printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n", +- __func__, ret, init.num_events, xenoprof_is_primary); ++ pr_info("%s: ret %d, events %d, xenoprof_is_primary %d\n", ++ __func__, ret, init.num_events, xenoprof_is_primary); + return ret; } --static inline void flush_tlb_pgtables(struct mm_struct *mm, -- unsigned long start, unsigned long end) --{ -- /* x86_64 does not keep any page table caches in a software TLB. -- The CPUs do in their hardware TLBs, but they are handled -- by the normal TLB flushing algorithms. */ --} -- - #endif /* _X8664_TLBFLUSH_H */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/xor.h 2010-03-24 15:10:29.000000000 +0100 -@@ -0,0 +1,5 @@ -+#ifdef CONFIG_X86_32 -+# include "../../xor_32.h" -+#else -+# include "xor_64.h" -+#endif ---- head-2010-05-25.orig/arch/x86/include/asm/mmu.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/mmu.h 2010-03-24 15:10:29.000000000 +0100 -@@ -11,6 +11,9 @@ - typedef struct { - void *ldt; - int size; -+#ifdef CONFIG_XEN -+ unsigned has_foreign_mappings:1; -+#endif - struct mutex lock; - void *vdso; - } mm_context_t; ---- head-2010-05-25.orig/include/linux/kexec.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/include/linux/kexec.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-17.orig/include/linux/kexec.h 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/include/linux/kexec.h 2011-01-31 17:56:27.000000000 +0100 @@ -205,8 +205,15 @@ extern struct kimage *kexec_crash_image; #define VMCOREINFO_BYTES (4096) #define VMCOREINFO_NOTE_NAME "VMCOREINFO" @@ -8061,9 +10266,9 @@ Acked-by: jbeulich@novell.com /* Location of a reserved region to hold the crash kernel. */ ---- head-2010-05-25.orig/include/linux/oprofile.h 2010-03-24 15:02:17.000000000 +0100 -+++ head-2010-05-25/include/linux/oprofile.h 2010-03-24 15:10:29.000000000 +0100 -@@ -120,6 +120,8 @@ void oprofile_add_ext_sample(unsigned lo +--- head-2011-03-17.orig/include/linux/oprofile.h 2011-02-17 10:06:04.000000000 +0100 ++++ head-2011-03-17/include/linux/oprofile.h 2011-02-17 10:10:35.000000000 +0100 +@@ -123,6 +123,8 @@ void oprofile_add_ext_sample(unsigned lo * backtrace. */ void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); @@ -8072,8 +10277,8 @@ Acked-by: jbeulich@novell.com /* add a backtrace entry, to be called from the ->backtrace callback */ void oprofile_add_trace(unsigned long eip); ---- head-2010-05-25.orig/include/linux/sysctl.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/linux/sysctl.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-17.orig/include/linux/sysctl.h 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/include/linux/sysctl.h 2011-01-31 17:56:27.000000000 +0100 @@ -59,6 +59,7 @@ enum CTL_BUS=8, /* Busses */ CTL_ABI=9, /* Binary emulation */ @@ -8082,8 +10287,74 @@ Acked-by: jbeulich@novell.com CTL_ARLAN=254, /* arlan wireless driver */ CTL_S390DBF=5677, /* s390 debug */ CTL_SUNRPC=7249, /* sunrpc debug */ ---- head-2010-05-25.orig/include/xen/pcifront.h 2007-06-18 08:38:13.000000000 +0200 -+++ head-2010-05-25/include/xen/pcifront.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-17.orig/include/xen/cpu_hotplug.h 2007-08-16 18:07:01.000000000 +0200 ++++ head-2011-03-17/include/xen/cpu_hotplug.h 2011-01-31 17:56:27.000000000 +0100 +@@ -25,8 +25,8 @@ void cpu_bringup(void); + static inline int smp_suspend(void) + { + if (num_online_cpus() > 1) { +- printk(KERN_WARNING "Can't suspend SMP guests " +- "without CONFIG_HOTPLUG_CPU\n"); ++ pr_warning("Can't suspend SMP guests without" ++ " CONFIG_HOTPLUG_CPU\n"); + return -EOPNOTSUPP; + } + return 0; +--- head-2011-03-17.orig/include/xen/gnttab.h 2010-09-23 15:39:04.000000000 +0200 ++++ head-2011-03-17/include/xen/gnttab.h 2011-01-31 17:56:27.000000000 +0100 +@@ -172,11 +172,11 @@ gnttab_set_replace_op(struct gnttab_unma + BUG_ON(__ret); \ + } \ + if (__hc_delay == 0) { \ +- printk(KERN_ERR "%s: %s gnt busy\n", __func__, current->comm); \ ++ pr_err("%s: %s gnt busy\n", __func__, current->comm); \ + (__HCarg_p)->status = GNTST_bad_page; \ + } \ + if ((__HCarg_p)->status != GNTST_okay) \ +- printk(KERN_ERR "%s: %s gnt status %x\n", \ ++ pr_err("%s: %s gnt status %x\n", \ + __func__, current->comm, (__HCarg_p)->status); \ + } + +@@ -191,11 +191,11 @@ gnttab_set_replace_op(struct gnttab_unma + msleep(__hc_delay++); \ + } while ((__HCarg_p)->status == GNTST_eagain && __hc_delay); \ + if (__hc_delay == 0) { \ +- printk(KERN_ERR "%s: %s gnt busy\n", __func__, current->comm); \ ++ pr_err("%s: %s gnt busy\n", __func__, current->comm); \ + (__HCarg_p)->status = GNTST_bad_page; \ + } \ + if ((__HCarg_p)->status != GNTST_okay) \ +- printk(KERN_ERR "%s: %s gnt status %x\n", \ ++ pr_err("%s: %s gnt status %x\n", \ + __func__, current->comm, (__HCarg_p)->status); \ + } + +--- head-2011-03-17.orig/include/xen/hvm.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-17/include/xen/hvm.h 2011-01-31 17:56:27.000000000 +0100 +@@ -13,8 +13,7 @@ static inline unsigned long hvm_get_para + xhv.index = idx; + r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); + if (r < 0) { +- printk(KERN_ERR "Cannot get hvm parameter %d: %d!\n", +- idx, r); ++ pr_err("Cannot get hvm parameter %d: %d!\n", idx, r); + return 0; + } + return xhv.value; +--- head-2011-03-17.orig/include/xen/net-util.h 2011-02-09 15:53:07.000000000 +0100 ++++ head-2011-03-17/include/xen/net-util.h 2011-02-09 15:55:10.000000000 +0100 +@@ -51,7 +51,7 @@ static inline int skb_checksum_setup(str + break; + default: + if (net_ratelimit()) +- printk(KERN_ERR "Attempting to checksum a non-" ++ pr_err("Attempting to checksum a non-" + "TCP/UDP packet, dropping a protocol" + " %d packet\n", iph->protocol); + goto out; +--- head-2011-03-17.orig/include/xen/pcifront.h 2007-06-18 08:38:13.000000000 +0200 ++++ head-2011-03-17/include/xen/pcifront.h 2011-01-31 17:56:27.000000000 +0100 @@ -12,13 +12,11 @@ #ifndef __ia64__ @@ -8121,7 +10392,7 @@ Acked-by: jbeulich@novell.com struct pcifront_sd *sd) { --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/include/xen/sysctl.h 2010-03-24 15:10:29.000000000 +0100 ++++ head-2011-03-17/include/xen/sysctl.h 2011-01-31 17:56:27.000000000 +0100 @@ -0,0 +1,11 @@ +#ifndef _XEN_SYSCTL_H +#define _XEN_SYSCTL_H @@ -8134,8 +10405,8 @@ Acked-by: jbeulich@novell.com +}; + +#endif /* _XEN_SYSCTL_H */ ---- head-2010-05-25.orig/include/xen/xenbus.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/include/xen/xenbus.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-17.orig/include/xen/xenbus.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/include/xen/xenbus.h 2011-01-31 17:56:27.000000000 +0100 @@ -107,7 +107,7 @@ struct xenbus_driver { int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); @@ -8145,9 +10416,9 @@ Acked-by: jbeulich@novell.com struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); ---- head-2010-05-25.orig/kernel/kexec.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/kernel/kexec.c 2010-05-25 09:22:21.000000000 +0200 -@@ -53,7 +53,11 @@ note_buf_t __percpu *crash_notes; +--- head-2011-03-17.orig/kernel/kexec.c 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/kernel/kexec.c 2011-01-31 17:56:27.000000000 +0100 +@@ -47,7 +47,11 @@ note_buf_t __percpu *crash_notes; /* vmcoreinfo stuff */ static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; @@ -8160,7 +10431,7 @@ Acked-by: jbeulich@novell.com size_t vmcoreinfo_size; size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); -@@ -1272,6 +1276,7 @@ static int __init crash_notes_memory_ini +@@ -1260,6 +1264,7 @@ static int __init crash_notes_memory_ini module_init(crash_notes_memory_init) @@ -8168,7 +10439,7 @@ Acked-by: jbeulich@novell.com /* * parsing the "crashkernel" commandline * -@@ -1434,7 +1439,7 @@ int __init parse_crashkernel(char *cm +@@ -1422,7 +1427,7 @@ int __init parse_crashkernel(char *cm return 0; } @@ -8177,7 +10448,7 @@ Acked-by: jbeulich@novell.com void crash_save_vmcoreinfo(void) -@@ -1491,7 +1496,18 @@ static int __init crash_save_vmcoreinfo_ +@@ -1479,7 +1484,18 @@ static int __init crash_save_vmcoreinfo_ VMCOREINFO_SYMBOL(init_uts_ns); VMCOREINFO_SYMBOL(node_online_map); @@ -8196,9 +10467,9 @@ Acked-by: jbeulich@novell.com VMCOREINFO_SYMBOL(_stext); VMCOREINFO_SYMBOL(vmlist); ---- head-2010-05-25.orig/kernel/sysctl_binary.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/kernel/sysctl_binary.c 2010-04-15 09:55:30.000000000 +0200 -@@ -874,6 +874,14 @@ static const struct bin_table bin_bus_ta +--- head-2011-03-17.orig/kernel/sysctl_binary.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/kernel/sysctl_binary.c 2011-01-31 17:56:27.000000000 +0100 +@@ -873,6 +873,14 @@ static const struct bin_table bin_bus_ta }; @@ -8213,7 +10484,7 @@ Acked-by: jbeulich@novell.com static const struct bin_table bin_s390dbf_table[] = { { CTL_INT, 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" }, { CTL_INT, 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" }, -@@ -913,6 +921,9 @@ static const struct bin_table bin_root_t +@@ -912,6 +920,9 @@ static const struct bin_table bin_root_t { CTL_DIR, CTL_BUS, "bus", bin_bus_table }, { CTL_DIR, CTL_ABI, "abi" }, /* CTL_CPU not used */ @@ -8223,8 +10494,8 @@ Acked-by: jbeulich@novell.com /* CTL_ARLAN "arlan" no longer used */ { CTL_DIR, CTL_S390DBF, "s390dbf", bin_s390dbf_table }, { CTL_DIR, CTL_SUNRPC, "sunrpc", bin_sunrpc_table }, ---- head-2010-05-25.orig/kernel/sysctl_check.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/kernel/sysctl_check.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-17.orig/kernel/sysctl_check.c 2011-03-17 13:45:28.000000000 +0100 ++++ head-2011-03-17/kernel/sysctl_check.c 2011-01-31 17:56:27.000000000 +0100 @@ -4,6 +4,7 @@ #include #include @@ -8233,8 +10504,8 @@ Acked-by: jbeulich@novell.com static int sysctl_depth(struct ctl_table *table) ---- head-2010-05-25.orig/lib/swiotlb-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/lib/swiotlb-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-01-31 17:56:27.000000000 +0100 @@ -27,7 +27,7 @@ #include #include @@ -8244,7 +10515,7 @@ Acked-by: jbeulich@novell.com int swiotlb; EXPORT_SYMBOL(swiotlb); -@@ -574,9 +574,10 @@ swiotlb_sync_single_for_device(struct de +@@ -602,9 +602,10 @@ swiotlb_sync_single_for_device(struct de * same here. */ int @@ -8256,7 +10527,7 @@ Acked-by: jbeulich@novell.com struct phys_addr buffer; dma_addr_t dev_addr; char *map; -@@ -584,22 +585,22 @@ swiotlb_map_sg(struct device *hwdev, str +@@ -612,22 +613,22 @@ swiotlb_map_sg(struct device *hwdev, str BUG_ON(dir == DMA_NONE); @@ -8285,7 +10556,7 @@ Acked-by: jbeulich@novell.com return 0; } sg->dma_address = virt_to_bus(map); -@@ -615,19 +616,21 @@ swiotlb_map_sg(struct device *hwdev, str +@@ -643,19 +644,21 @@ swiotlb_map_sg(struct device *hwdev, str * concerning calls here are the same as for swiotlb_unmap_single() above. */ void @@ -8309,13 +10580,13 @@ Acked-by: jbeulich@novell.com } /* -@@ -638,31 +641,35 @@ swiotlb_unmap_sg(struct device *hwdev, s +@@ -666,17 +669,19 @@ swiotlb_unmap_sg(struct device *hwdev, s * and usage. */ - void --swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, -+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sgl, - int nelems, int dir) + static void +-swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, ++swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, + int nelems, int dir, int target) { + struct scatterlist *sg; int i; @@ -8326,26 +10597,8 @@ Acked-by: jbeulich@novell.com + for_each_sg(sgl, sg, nelems, i) { if (in_swiotlb_aperture(sg->dma_address)) sync_single(hwdev, bus_to_virt(sg->dma_address), - sg->dma_length, dir); + sg->dma_length, dir, target); + } } void --swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, -+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sgl, - int nelems, int dir) - { -+ struct scatterlist *sg; - int i; - - BUG_ON(dir == DMA_NONE); - -- for (i = 0; i < nelems; i++, sg++) -+ for_each_sg(sgl, sg, nelems, i) { - if (in_swiotlb_aperture(sg->dma_address)) - sync_single(hwdev, bus_to_virt(sg->dma_address), - sg->dma_length, dir); -+ } - } - - #ifdef CONFIG_HIGHMEM diff --git a/patches.xen/xen3-patch-2.6.25 b/patches.xen/xen3-patch-2.6.25 index 85cd9a8..20289b1 100644 --- a/patches.xen/xen3-patch-2.6.25 +++ b/patches.xen/xen3-patch-2.6.25 @@ -6,8 +6,8 @@ Signed-off-by: Greg Kroah-Hartman Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches.py ---- head-2010-04-29.orig/arch/x86/Kconfig 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/Kconfig 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-01-31 18:01:51.000000000 +0100 @@ -40,7 +40,7 @@ config X86 select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE @@ -17,8 +17,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK select HAVE_GENERIC_DMA_COHERENT if X86_32 -@@ -240,14 +240,12 @@ config X86_TRAMPOLINE - default y +@@ -227,14 +227,12 @@ config X86_TRAMPOLINE + depends on !XEN config X86_NO_TSS - bool @@ -34,7 +34,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches config X86_32_LAZY_GS def_bool y -@@ -920,9 +918,8 @@ config X86_VISWS_APIC +@@ -858,9 +856,8 @@ config X86_VISWS_APIC depends on X86_32 && X86_VISWS config X86_XEN_GENAPIC @@ -45,7 +45,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches config X86_REROUTE_FOR_BROKEN_BOOT_IRQS bool "Reroute for broken boot IRQs" -@@ -1371,7 +1368,7 @@ config ARCH_PROC_KCORE_TEXT +@@ -1309,7 +1306,7 @@ config ARCH_PROC_KCORE_TEXT config ARCH_SPARSEMEM_DEFAULT def_bool y @@ -54,8 +54,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches config ARCH_SPARSEMEM_ENABLE def_bool y -@@ -2073,10 +2070,10 @@ config PCI_MMCONFIG - depends on X86_64 && PCI && ACPI +@@ -2036,10 +2033,10 @@ config PCI_CNB20LE_QUIRK + You should say N unless you know you need this. config XEN_PCIDEV_FRONTEND - bool "Xen PCI Frontend" if X86_64 @@ -67,7 +67,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. -@@ -2084,7 +2081,6 @@ config XEN_PCIDEV_FRONTEND +@@ -2047,7 +2044,6 @@ config XEN_PCIDEV_FRONTEND config XEN_PCIDEV_FE_DEBUG bool "Xen PCI Frontend Debugging" depends on XEN_PCIDEV_FRONTEND @@ -75,9 +75,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches help Enables some debug statements within the PCI Frontend. ---- head-2010-04-29.orig/arch/x86/Kconfig.debug 2010-03-24 15:02:14.000000000 +0100 -+++ head-2010-04-29/arch/x86/Kconfig.debug 2010-03-24 15:10:37.000000000 +0100 -@@ -273,6 +273,7 @@ config DEBUG_BOOT_PARAMS +--- head-2011-03-11.orig/arch/x86/Kconfig.debug 2011-01-31 14:42:03.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig.debug 2011-01-31 18:01:51.000000000 +0100 +@@ -267,6 +267,7 @@ config DEBUG_BOOT_PARAMS bool "Debug boot parameters" depends on DEBUG_KERNEL depends on DEBUG_FS @@ -85,8 +85,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches ---help--- This option will cause struct boot_params to be exported via debugfs. ---- head-2010-04-29.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/ia32/ia32entry-xen.S 2011-01-31 18:01:51.000000000 +0100 @@ -12,7 +12,6 @@ #include #include @@ -130,9 +130,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + .quad compat_sys_timerfd_settime /* 325 */ + .quad compat_sys_timerfd_gettime ia32_syscall_end: ---- head-2010-04-29.orig/arch/x86/kernel/Makefile 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/Makefile 2010-03-24 15:10:37.000000000 +0100 -@@ -134,11 +134,10 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-03-11.orig/arch/x86/kernel/Makefile 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/Makefile 2011-01-31 18:01:51.000000000 +0100 +@@ -125,11 +125,10 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o @@ -145,9 +145,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches smpboot_$(BITS).o tsc_$(BITS).o tsc_sync.o -disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += mpparse_64.o -%/head_64.o %/head_64.s: asflags-$(CONFIG_XEN) := ---- head-2010-04-29.orig/arch/x86/kernel/acpi/boot.c 2010-04-15 09:52:23.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/acpi/boot.c 2010-04-15 09:56:18.000000000 +0200 -@@ -115,6 +115,11 @@ char *__init __acpi_map_table(unsigned l +--- head-2011-03-11.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 10:54:41.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/boot.c 2011-03-11 10:56:21.000000000 +0100 +@@ -162,6 +162,11 @@ char *__init __acpi_map_table(unsigned l if (!phys || !size) return NULL; @@ -160,7 +160,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } void __init __acpi_unmap_table(char *map, unsigned long size) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:10:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/sleep-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -0,0 +1,95 @@ +/* + * sleep.c - x86-specific ACPI sleep support. @@ -257,7 +257,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + +__setup("acpi_sleep=", acpi_sleep_setup); +#endif /* CONFIG_ACPI_PV_SLEEP */ ---- head-2010-04-29.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,117 +0,0 @@ -/* @@ -377,7 +377,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - -core_initcall(acpisleep_dmi_init); -#endif /* CONFIG_ACPI_PV_SLEEP */ ---- head-2010-04-29.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -/* @@ -505,9 +505,14 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -__setup("acpi_sleep=", acpi_sleep_setup); -#endif /* CONFIG_ACPI_PV_SLEEP */ - ---- head-2010-04-29.orig/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:10:37.000000000 +0100 -@@ -86,7 +86,7 @@ int setup_profiling_timer(unsigned int m +--- head-2011-03-11.orig/arch/x86/kernel/apic/apic-xen.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/apic/apic-xen.c 2011-02-24 15:45:13.000000000 +0100 +@@ -82,11 +82,12 @@ int setup_profiling_timer(unsigned int m + return -EINVAL; + } + ++#ifndef CONFIG_SMP + /* * This initializes the IO-APIC and APIC hardware if this is * a UP kernel. */ @@ -516,8 +521,13 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches { #ifdef CONFIG_X86_IO_APIC if (smp_found_config) ---- head-2010-04-29.orig/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:10:37.000000000 +0100 +@@ -96,3 +97,4 @@ int __init APIC_init_uniprocessor (void) + + return 0; + } ++#endif +--- head-2011-03-11.orig/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/asm-offsets_32.c 2011-01-31 18:01:51.000000000 +0100 @@ -24,8 +24,10 @@ #include #endif @@ -529,8 +539,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* workaround for a warning with -Wmissing-prototypes */ void foo(void); ---- head-2010-04-29.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/common-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -27,45 +27,50 @@ #include "cpu.h" @@ -745,7 +755,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches { int i; -@@ -427,20 +463,9 @@ static void __cpuinit identify_cpu(struc +@@ -429,20 +465,9 @@ static void __cpuinit identify_cpu(struc generic_identify(c); @@ -767,7 +777,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * Vendor-specific initialization. In this section we * canonicalize the feature flags, meaning if there are -@@ -462,23 +487,6 @@ static void __cpuinit identify_cpu(struc +@@ -464,23 +489,6 @@ static void __cpuinit identify_cpu(struc * we do "generic changes." */ @@ -791,7 +801,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* If the model name is still unset, do table lookup. */ if ( !c->x86_model_id[0] ) { char *p; -@@ -491,13 +499,6 @@ static void __cpuinit identify_cpu(struc +@@ -493,13 +501,6 @@ static void __cpuinit identify_cpu(struc c->x86, c->x86_model); } @@ -805,7 +815,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are -@@ -510,8 +511,14 @@ static void __cpuinit identify_cpu(struc +@@ -512,8 +513,14 @@ static void __cpuinit identify_cpu(struc boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } @@ -820,7 +830,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } void __init identify_boot_cpu(void) -@@ -519,7 +526,6 @@ void __init identify_boot_cpu(void) +@@ -521,7 +528,6 @@ void __init identify_boot_cpu(void) identify_cpu(&boot_cpu_data); sysenter_setup(); enable_sep_cpu(); @@ -828,7 +838,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) -@@ -576,6 +582,13 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -578,6 +584,13 @@ void __cpuinit detect_ht(struct cpuinfo_ } #endif @@ -842,7 +852,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) { char *vendor = NULL; -@@ -599,6 +612,17 @@ void __cpuinit print_cpu_info(struct cpu +@@ -601,6 +614,17 @@ void __cpuinit print_cpu_info(struct cpu printk("\n"); } @@ -860,7 +870,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; /* This is hacky. :) -@@ -608,16 +632,6 @@ cpumask_t cpu_initialized __cpuinitdata +@@ -610,16 +634,6 @@ cpumask_t cpu_initialized __cpuinitdata * They will insert themselves into the cpu_devs structure. * Then, when cpu_init() is called, we can just iterate over that array. */ @@ -877,7 +887,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void __init early_cpu_init(void) { intel_cpu_init(); -@@ -629,21 +643,13 @@ void __init early_cpu_init(void) +@@ -631,21 +645,13 @@ void __init early_cpu_init(void) nexgen_init_cpu(); umc_init_cpu(); early_cpu_detect(); @@ -901,7 +911,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches return regs; } -@@ -651,7 +657,7 @@ struct pt_regs * __devinit idle_regs(str +@@ -653,7 +659,7 @@ struct pt_regs * __devinit idle_regs(str * it's on the real one. */ void switch_to_new_gdt(void) { @@ -910,7 +920,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches unsigned long va, frames[16]; int f; -@@ -694,12 +700,6 @@ void __cpuinit cpu_init(void) +@@ -696,12 +702,6 @@ void __cpuinit cpu_init(void) if (cpu_has_vme || cpu_has_de) clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); @@ -923,7 +933,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches switch_to_new_gdt(); -@@ -712,7 +712,7 @@ void __cpuinit cpu_init(void) +@@ -714,7 +714,7 @@ void __cpuinit cpu_init(void) BUG(); enter_lazy_tlb(&init_mm, curr); @@ -932,8 +942,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches load_LDT(&init_mm.context); ---- head-2010-04-29.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -33,7 +33,7 @@ struct mtrr_ops generic_mtrr_ops = { struct mtrr_ops *mtrr_if = &generic_mtrr_ops; @@ -995,8 +1005,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches op.cmd = XENPF_del_memtype; op.u.del_memtype.handle = 0; op.u.del_memtype.reg = reg; ---- head-2010-04-29.orig/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -7,7 +7,6 @@ #include #include @@ -1367,8 +1377,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + print_memory_map("modified"); +} +#endif ---- head-2010-04-29.orig/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820_64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -1,4 +1,4 @@ -/* +/* @@ -2167,8 +2177,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } int __init arch_get_ram_range(int slot, u64 *addr, u64 *size) ---- head-2010-04-29.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/early_printk-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -222,7 +222,7 @@ static struct console simnow_console = { }; @@ -2178,8 +2188,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches static int early_console_initialized = 0; void early_printk(const char *fmt, ...) ---- head-2010-04-29.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_32-xen.S 2011-01-31 18:01:51.000000000 +0100 @@ -59,7 +59,7 @@ * for paravirtualization. The following will never clobber any registers: * INTERRUPT_RETURN (aka. "iret") @@ -2520,8 +2530,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#undef sys_fork +#undef sys_clone +#undef sys_vfork ---- head-2010-04-29.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_64-xen.S 2011-01-31 18:01:51.000000000 +0100 @@ -54,11 +54,9 @@ #include #include @@ -2822,19 +2832,19 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches TRACE_IRQS_OFF GET_THREAD_INFO(%rcx) jmp retint_restore_args ---- head-2010-04-29.orig/arch/x86/kernel/fixup.c 2008-01-28 12:24:18.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/fixup.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/fixup.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/fixup.c 2011-01-31 18:01:51.000000000 +0100 @@ -36,7 +36,7 @@ - #define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args ) + #define DP(_f, _args...) pr_alert(" " _f "\n" , ## _args ) -fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code) +void do_fixup_4gb_segment(struct pt_regs *regs, long error_code) { static unsigned long printed = 0; char info[100]; ---- head-2010-04-29.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -16,6 +16,7 @@ #include #include @@ -2940,8 +2950,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + start_kernel(); } ---- head-2010-04-29.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head_32-xen.S 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_32-xen.S 2011-01-31 18:01:51.000000000 +0100 @@ -3,6 +3,7 @@ .text #include @@ -2959,8 +2969,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches .fill 1024,4,0 ENTRY(empty_zero_page) .fill 4096,1,0 ---- head-2010-04-29.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -35,6 +35,7 @@ #include #include @@ -3032,8 +3042,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches .suspend = ioapic_suspend, .resume = ioapic_resume, }; ---- head-2010-04-29.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -32,9 +32,11 @@ #include #include @@ -3223,7 +3233,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +late_initcall(ioapic_insert_resources); +#endif /* !CONFIG_XEN */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/ioport-xen.c 2010-03-24 15:10:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ioport-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -0,0 +1,112 @@ +/* + * This contains the io-permission bitmap code - written by obz, with changes @@ -3337,7 +3347,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +out: + return rc; +} ---- head-2010-04-29.orig/arch/x86/kernel/ioport_32-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ioport_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -/* @@ -3461,7 +3471,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - set_iopl_mask(t->iopl); - return 0; -} ---- head-2010-04-29.orig/arch/x86/kernel/ioport_64-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ioport_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -/* @@ -3564,7 +3574,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - return 0; -} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/ldt-xen.c 2010-03-24 15:10:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ldt-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -0,0 +1,272 @@ +/* + * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds @@ -3838,7 +3848,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + } + return ret; +} ---- head-2010-04-29.orig/arch/x86/kernel/ldt_32-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ldt_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,265 +0,0 @@ -/* @@ -4106,7 +4116,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - } - return ret; -} ---- head-2010-04-29.orig/arch/x86/kernel/ldt_64-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,271 +0,0 @@ -/* @@ -4380,8 +4390,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - } - return ret; -} ---- head-2010-04-29.orig/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:44:51.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:56:14.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:44:51.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/machine_kexec_64.c 2011-01-31 18:01:51.000000000 +0100 @@ -407,7 +407,9 @@ void machine_kexec(struct kimage *image) void arch_crash_save_vmcoreinfo(void) @@ -4392,8 +4402,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches VMCOREINFO_SYMBOL(init_level4_pgt); #ifdef CONFIG_NUMA ---- head-2010-04-29.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/microcode-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/microcode-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -167,7 +167,7 @@ static int request_microcode(void) } @@ -4403,18 +4413,18 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches op.u.microcode.length = firmware->size; error = HYPERVISOR_platform_op(&op); ---- head-2010-04-29.orig/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -@@ -68,7 +68,7 @@ unsigned int def_to_bigsmp = 0; - /* Processor that is doing the boot up */ +--- head-2011-03-11.orig/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 18:01:51.000000000 +0100 +@@ -70,7 +70,7 @@ unsigned int def_to_bigsmp = 0; unsigned int boot_cpu_physical_apicid = -1U; + #endif /* Internal processor count */ -unsigned int __cpuinitdata num_processors; +unsigned int num_processors; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; -@@ -265,7 +265,7 @@ static void __init MP_ioapic_info (struc +@@ -267,7 +267,7 @@ static void __init MP_ioapic_info (struc if (!(m->mpc_flags & MPC_APIC_USABLE)) return; @@ -4423,7 +4433,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); if (nr_ioapics >= MAX_IO_APICS) { printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", -@@ -412,9 +412,9 @@ static int __init smp_read_mpc(struct mp +@@ -414,9 +414,9 @@ static int __init smp_read_mpc(struct mp mps_oem_check(mpc, oem, str); @@ -4435,7 +4445,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches * Save the local APIC address (it might be non-default) -- but only * if we're not using ACPI. */ -@@ -728,7 +728,7 @@ static int __init smp_scan_config (unsig +@@ -730,7 +730,7 @@ static int __init smp_scan_config (unsig unsigned long *bp = isa_bus_to_virt(base); struct intel_mp_floating *mpf; @@ -4444,7 +4454,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches if (sizeof(*mpf) != 16) printk("Error: MPF size\n"); -@@ -742,9 +742,10 @@ static int __init smp_scan_config (unsig +@@ -744,9 +744,10 @@ static int __init smp_scan_config (unsig smp_found_config = 1; #ifndef CONFIG_XEN @@ -4458,7 +4468,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches if (mpf->mpf_physptr) { /* * We cannot access to MPC table to compute -@@ -759,11 +760,12 @@ static int __init smp_scan_config (unsig +@@ -761,11 +762,12 @@ static int __init smp_scan_config (unsig unsigned long end = max_low_pfn * PAGE_SIZE; if (mpf->mpf_physptr + size > end) size = end - mpf->mpf_physptr; @@ -4474,7 +4484,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #endif mpf_found = mpf; -@@ -940,14 +942,14 @@ void __init mp_register_ioapic(u8 id, u3 +@@ -942,14 +944,14 @@ void __init mp_register_ioapic(u8 id, u3 */ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; mp_ioapic_routing[idx].gsi_base = gsi_base; @@ -4495,7 +4505,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } void __init -@@ -1063,15 +1065,16 @@ void __init mp_config_acpi_legacy_irqs ( +@@ -1065,15 +1067,16 @@ void __init mp_config_acpi_legacy_irqs ( } #define MAX_GSI_NUM 4096 @@ -4514,7 +4524,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches * represent all possible interrupts, and IRQs * assigned to actual devices. */ -@@ -1108,12 +1111,16 @@ int mp_register_gsi(u32 gsi, int trigger +@@ -1110,12 +1113,16 @@ int mp_register_gsi(u32 gsi, int trigger if ((1<mpc_apicid, phys_cpu_present_map); if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { /* @@ -4592,8 +4602,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } cpu_set(cpu, cpu_possible_map); ---- head-2010-04-29.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-dma-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -431,3 +431,23 @@ dma_sync_single_for_device(struct device swiotlb_sync_single_for_device(dev, dma_handle, size, direction); } @@ -4618,8 +4628,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + flush_write_buffers(); +} +EXPORT_SYMBOL(dma_sync_sg_for_device); ---- head-2010-04-29.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -23,7 +23,6 @@ #include #include @@ -5315,8 +5325,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + unsigned long range_end = mm->brk + 0x02000000; + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +} ---- head-2010-04-29.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:32:00.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_64-xen.c 2011-02-02 08:32:11.000000000 +0100 @@ -3,7 +3,7 @@ * * Pentium III FXSR, SSE support @@ -5349,7 +5359,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #include #include #include -@@ -73,7 +72,6 @@ EXPORT_SYMBOL(boot_option_idle_override) +@@ -72,7 +71,6 @@ EXPORT_SYMBOL(boot_option_idle_override) */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); @@ -5357,7 +5367,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches static ATOMIC_NOTIFIER_HEAD(idle_notifier); -@@ -81,13 +79,6 @@ void idle_notifier_register(struct notif +@@ -80,13 +78,6 @@ void idle_notifier_register(struct notif { atomic_notifier_chain_register(&idle_notifier, n); } @@ -5371,7 +5381,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void enter_idle(void) { -@@ -116,7 +107,7 @@ void exit_idle(void) +@@ -115,7 +106,7 @@ void exit_idle(void) * to poll the ->need_resched flag instead of waiting for the * cross-CPU IPI to arrive. Use this option with caution. */ @@ -5380,7 +5390,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches { local_irq_enable(); cpu_relax(); -@@ -131,10 +122,19 @@ static void xen_idle(void) +@@ -130,10 +121,19 @@ static void xen_idle(void) */ smp_mb(); local_irq_disable(); @@ -5404,7 +5414,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches current_thread_info()->status |= TS_POLLING; } -@@ -161,19 +161,15 @@ static inline void play_dead(void) +@@ -160,19 +160,15 @@ static inline void play_dead(void) * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ @@ -5426,7 +5436,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches rmb(); idle = xen_idle; /* no alternatives */ if (cpu_is_offline(smp_processor_id())) -@@ -203,49 +199,27 @@ static void do_nothing(void *unused) +@@ -202,49 +198,27 @@ static void do_nothing(void *unused) { } @@ -5489,7 +5499,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches { if (!strcmp(str, "poll")) { printk("using polling idle threads.\n"); -@@ -260,13 +234,13 @@ static int __init idle_setup (char *str) +@@ -259,13 +233,13 @@ static int __init idle_setup (char *str) } early_param("idle", idle_setup); @@ -5506,7 +5516,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches printk("\n"); print_modules(); -@@ -275,16 +249,16 @@ void __show_regs(struct pt_regs * regs) +@@ -274,16 +248,16 @@ void __show_regs(struct pt_regs * regs) init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); @@ -5530,7 +5540,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches printk("R10: %016lx R11: %016lx R12: %016lx\n", regs->r10, regs->r11, regs->r12); printk("R13: %016lx R14: %016lx R15: %016lx\n", -@@ -318,7 +292,7 @@ void show_regs(struct pt_regs *regs) +@@ -317,7 +291,7 @@ void show_regs(struct pt_regs *regs) { printk("CPU %d:", smp_processor_id()); __show_regs(regs); @@ -5539,7 +5549,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } /* -@@ -329,7 +303,7 @@ void exit_thread(void) +@@ -328,7 +302,7 @@ void exit_thread(void) struct task_struct *me = current; struct thread_struct *t = &me->thread; @@ -5548,7 +5558,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #ifndef CONFIG_X86_NO_TSS struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); #endif -@@ -382,7 +356,7 @@ void flush_thread(void) +@@ -381,7 +355,7 @@ void flush_thread(void) tsk->thread.debugreg3 = 0; tsk->thread.debugreg6 = 0; tsk->thread.debugreg7 = 0; @@ -5557,7 +5567,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * Forget coprocessor state.. */ -@@ -405,26 +379,21 @@ void release_thread(struct task_struct * +@@ -404,26 +378,21 @@ void release_thread(struct task_struct * static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) { @@ -5588,7 +5598,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } /* -@@ -436,7 +405,7 @@ void prepare_to_copy(struct task_struct +@@ -435,7 +404,7 @@ void prepare_to_copy(struct task_struct unlazy_fpu(tsk); } @@ -5597,7 +5607,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches unsigned long unused, struct task_struct * p, struct pt_regs * regs) { -@@ -448,14 +417,13 @@ int copy_thread(int nr, unsigned long cl +@@ -447,14 +416,13 @@ int copy_thread(int nr, unsigned long cl (THREAD_SIZE + task_stack_page(p))) - 1; *childregs = *regs; @@ -5619,7 +5629,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches set_tsk_thread_flag(p, TIF_FORK); -@@ -476,7 +444,7 @@ int copy_thread(int nr, unsigned long cl +@@ -475,7 +443,7 @@ int copy_thread(int nr, unsigned long cl memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES); set_tsk_thread_flag(p, TIF_IO_BITMAP); @@ -5628,7 +5638,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * Set a new TLS for the child thread? -@@ -484,7 +452,8 @@ int copy_thread(int nr, unsigned long cl +@@ -483,7 +451,8 @@ int copy_thread(int nr, unsigned long cl if (clone_flags & CLONE_SETTLS) { #ifdef CONFIG_IA32_EMULATION if (test_thread_flag(TIF_IA32)) @@ -5638,7 +5648,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches else #endif err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); -@@ -502,26 +471,32 @@ out: +@@ -501,26 +470,32 @@ out: return err; } @@ -5680,7 +5690,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { loaddebug(next, 0); loaddebug(next, 1); -@@ -531,12 +506,20 @@ static inline void __switch_to_xtra(stru +@@ -530,12 +505,20 @@ static inline void __switch_to_xtra(stru loaddebug(next, 6); loaddebug(next, 7); } @@ -5702,7 +5712,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced * -@@ -547,7 +530,7 @@ __switch_to(struct task_struct *prev_p, +@@ -546,7 +529,7 @@ __switch_to(struct task_struct *prev_p, { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; @@ -5711,7 +5721,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #ifndef CONFIG_X86_NO_TSS struct tss_struct *tss = &per_cpu(init_tss, cpu); #endif -@@ -581,11 +564,12 @@ __switch_to(struct task_struct *prev_p, +@@ -580,11 +563,12 @@ __switch_to(struct task_struct *prev_p, prev_p->fpu_counter = 0; /* @@ -5726,7 +5736,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches mcl++; /* -@@ -593,11 +577,12 @@ __switch_to(struct task_struct *prev_p, +@@ -592,11 +576,12 @@ __switch_to(struct task_struct *prev_p, * This is load_TLS(next, cpu) with multicalls. */ #define C(i) do { \ @@ -5742,7 +5752,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches mcl++; \ } \ } while (0) -@@ -605,7 +590,7 @@ __switch_to(struct task_struct *prev_p, +@@ -604,7 +589,7 @@ __switch_to(struct task_struct *prev_p, #undef C if (unlikely(prev->iopl != next->iopl)) { @@ -5751,7 +5761,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #if CONFIG_XEN_COMPAT > 0x030002 mcl->op = __HYPERVISOR_physdev_op; mcl->args[0] = PHYSDEVOP_set_iopl; -@@ -669,8 +654,6 @@ __switch_to(struct task_struct *prev_p, +@@ -668,8 +653,6 @@ __switch_to(struct task_struct *prev_p, /* * Switch the PDA context. */ @@ -5760,7 +5770,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches write_pda(pcurrent, next_p); write_pda(kernelstack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); -@@ -687,7 +670,8 @@ __switch_to(struct task_struct *prev_p, +@@ -686,7 +669,8 @@ __switch_to(struct task_struct *prev_p, /* * Now maybe reload the debug registers */ @@ -5770,7 +5780,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches __switch_to_xtra(prev_p, next_p); /* If the task has used fpu the last 5 timeslices, just do a full -@@ -702,23 +686,18 @@ __switch_to(struct task_struct *prev_p, +@@ -701,23 +685,18 @@ __switch_to(struct task_struct *prev_p, /* * sys_execve() executes a new program. */ @@ -5798,7 +5808,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches putname(filename); return error; } -@@ -728,18 +707,18 @@ void set_personality_64bit(void) +@@ -727,18 +706,18 @@ void set_personality_64bit(void) /* inherit personality from parent */ /* Make sure to be in 64bit mode */ @@ -5820,7 +5830,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } asmlinkage long -@@ -747,7 +726,7 @@ sys_clone(unsigned long clone_flags, uns +@@ -746,7 +725,7 @@ sys_clone(unsigned long clone_flags, uns void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) { if (!newsp) @@ -5829,7 +5839,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } -@@ -763,29 +742,29 @@ sys_clone(unsigned long clone_flags, uns +@@ -762,29 +741,29 @@ sys_clone(unsigned long clone_flags, uns */ asmlinkage long sys_vfork(struct pt_regs *regs) { @@ -5866,7 +5876,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches fp = *(u64 *)fp; } while (count++ < 16); return 0; -@@ -827,19 +806,19 @@ long do_arch_prctl(struct task_struct *t +@@ -826,19 +805,19 @@ long do_arch_prctl(struct task_struct *t /* Not strictly needed for fs, but do it for symmetry with gs */ if (addr >= TASK_SIZE_OF(task)) @@ -5892,7 +5902,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches task->thread.fsindex = 0; task->thread.fs = addr; if (doit) { -@@ -852,24 +831,24 @@ long do_arch_prctl(struct task_struct *t +@@ -851,24 +830,24 @@ long do_arch_prctl(struct task_struct *t } put_cpu(); break; @@ -5923,7 +5933,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches if (gsindex) rdmsrl(MSR_KERNEL_GS_BASE, base); else -@@ -877,40 +856,21 @@ long do_arch_prctl(struct task_struct *t +@@ -876,40 +855,21 @@ long do_arch_prctl(struct task_struct *t } else base = task->thread.gs; @@ -5968,7 +5978,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } unsigned long arch_align_stack(unsigned long sp) -@@ -919,3 +879,9 @@ unsigned long arch_align_stack(unsigned +@@ -918,3 +878,9 @@ unsigned long arch_align_stack(unsigned sp -= get_random_int() % 8192; return sp & ~0xf; } @@ -5978,205 +5988,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + unsigned long range_end = mm->brk + 0x02000000; + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +} ---- head-2010-04-29.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/quirks-xen.c 2010-03-24 15:10:37.000000000 +0100 -@@ -9,7 +9,7 @@ - static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) - { - u8 config, rev; -- u32 word; -+ u16 word; - - /* BIOS may enable hardware IRQ balancing for - * E7520/E7320/E7525(revision ID 0x9 and below) -@@ -24,14 +24,17 @@ static void __devinit quirk_intel_irqbal - pci_read_config_byte(dev, 0xf4, &config); - pci_write_config_byte(dev, 0xf4, config|0x2); - -- /* read xTPR register */ -- raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word); -+ /* -+ * read xTPR register. We may not have a pci_dev for device 8 -+ * because it might be hidden until the above write. -+ */ -+ pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word); - - if (!(word & (1 << 13))) { - struct xen_platform_op op; - -- printk(KERN_INFO "Intel E7520/7320/7525 detected. " -- "Disabling irq balancing and affinity\n"); -+ dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " -+ "disabling irq balancing and affinity\n"); - op.cmd = XENPF_platform_quirk; - op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING; - WARN_ON(HYPERVISOR_platform_op(&op)); -@@ -102,14 +105,16 @@ static void ich_force_enable_hpet(struct - pci_read_config_dword(dev, 0xF0, &rcba); - rcba &= 0xFFFFC000; - if (rcba == 0) { -- printk(KERN_DEBUG "RCBA disabled. Cannot force enable HPET\n"); -+ dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; " -+ "cannot force enable HPET\n"); - return; - } - - /* use bits 31:14, 16 kB aligned */ - rcba_base = ioremap_nocache(rcba, 0x4000); - if (rcba_base == NULL) { -- printk(KERN_DEBUG "ioremap failed. Cannot force enable HPET\n"); -+ dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; " -+ "cannot force enable HPET\n"); - return; - } - -@@ -120,8 +125,8 @@ static void ich_force_enable_hpet(struct - /* HPET is enabled in HPTC. Just not reported by BIOS */ - val = val & 0x3; - force_hpet_address = 0xFED00000 | (val << 12); -- printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -- force_hpet_address); -+ dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " -+ "0x%lx\n", force_hpet_address); - iounmap(rcba_base); - return; - } -@@ -140,11 +145,12 @@ static void ich_force_enable_hpet(struct - if (err) { - force_hpet_address = 0; - iounmap(rcba_base); -- printk(KERN_DEBUG "Failed to force enable HPET\n"); -+ dev_printk(KERN_DEBUG, &dev->dev, -+ "Failed to force enable HPET\n"); - } else { - force_hpet_resume_type = ICH_FORCE_HPET_RESUME; -- printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -- force_hpet_address); -+ dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " -+ "0x%lx\n", force_hpet_address); - } - } - -@@ -160,6 +166,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I - ich_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, - ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, -+ ich_force_enable_hpet); - - - static struct pci_dev *cached_dev; -@@ -204,8 +212,8 @@ static void old_ich_force_enable_hpet(st - if (val & 0x4) { - val &= 0x3; - force_hpet_address = 0xFED00000 | (val << 12); -- printk(KERN_DEBUG "HPET at base address 0x%lx\n", -- force_hpet_address); -+ dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", -+ force_hpet_address); - return; - } - -@@ -225,14 +233,14 @@ static void old_ich_force_enable_hpet(st - /* HPET is enabled in HPTC. Just not reported by BIOS */ - val &= 0x3; - force_hpet_address = 0xFED00000 | (val << 12); -- printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -- force_hpet_address); -+ dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " -+ "0x%lx\n", force_hpet_address); - cached_dev = dev; - force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; - return; - } - -- printk(KERN_DEBUG "Failed to force enable HPET\n"); -+ dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); - } - - /* -@@ -290,8 +298,8 @@ static void vt8237_force_enable_hpet(str - */ - if (val & 0x80) { - force_hpet_address = (val & ~0x3ff); -- printk(KERN_DEBUG "HPET at base address 0x%lx\n", -- force_hpet_address); -+ dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", -+ force_hpet_address); - return; - } - -@@ -305,14 +313,14 @@ static void vt8237_force_enable_hpet(str - pci_read_config_dword(dev, 0x68, &val); - if (val & 0x80) { - force_hpet_address = (val & ~0x3ff); -- printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -- force_hpet_address); -+ dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " -+ "0x%lx\n", force_hpet_address); - cached_dev = dev; - force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; - return; - } - -- printk(KERN_DEBUG "Failed to force enable HPET\n"); -+ dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); - } - - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, -@@ -340,7 +348,7 @@ static void nvidia_force_enable_hpet(str - pci_read_config_dword(dev, 0x44, &val); - force_hpet_address = val & 0xfffffffe; - force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; -- printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", -+ dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", - force_hpet_address); - cached_dev = dev; - return; -@@ -353,6 +361,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_N - nvidia_force_enable_hpet); - - /* LPC bridges */ -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260, -+ nvidia_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, - nvidia_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, -@@ -373,19 +383,19 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_N - void force_hpet_resume(void) - { - switch (force_hpet_resume_type) { -- case ICH_FORCE_HPET_RESUME: -- return ich_force_hpet_resume(); -- -- case OLD_ICH_FORCE_HPET_RESUME: -- return old_ich_force_hpet_resume(); -- -- case VT8237_FORCE_HPET_RESUME: -- return vt8237_force_hpet_resume(); -- -- case NVIDIA_FORCE_HPET_RESUME: -- return nvidia_force_hpet_resume(); -- -- default: -+ case ICH_FORCE_HPET_RESUME: -+ ich_force_hpet_resume(); -+ return; -+ case OLD_ICH_FORCE_HPET_RESUME: -+ old_ich_force_hpet_resume(); -+ return; -+ case VT8237_FORCE_HPET_RESUME: -+ vt8237_force_hpet_resume(); -+ return; -+ case NVIDIA_FORCE_HPET_RESUME: -+ nvidia_force_hpet_resume(); -+ return; -+ default: - break; - } - } ---- head-2010-04-29.orig/arch/x86/kernel/rtc.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/rtc.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/rtc.c 2011-03-15 16:52:08.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/rtc.c 2011-01-31 18:01:51.000000000 +0100 @@ -171,6 +171,11 @@ int update_persistent_clock(struct times unsigned long flags; int retval; @@ -6200,8 +6013,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches spin_lock_irqsave(&rtc_lock, flags); retval = x86_platform.get_wallclock(); spin_unlock_irqrestore(&rtc_lock, flags); ---- head-2010-04-29.orig/arch/x86/kernel/setup64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -31,7 +31,11 @@ #include #endif @@ -6331,8 +6144,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * <= is required because the CPU will access up to * 8 bits beyond the end of the IO permission bitmap. ---- head-2010-04-29.orig/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -47,9 +47,12 @@ #include #include @@ -6827,8 +6640,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +} + +subsys_initcall(request_standard_resources); ---- head-2010-04-29.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup_64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -15,7 +15,6 @@ #include #include @@ -7344,7 +7157,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -static void __init amd_detect_cmp(struct cpuinfo_x86 *c) +static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) { - #ifdef CONFIG_SMP + #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) unsigned bits; @@ -781,7 +784,54 @@ static void __init amd_detect_cmp(struct int node = 0; @@ -7391,7 +7204,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + +static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) +{ -+#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + unsigned bits, ecx; + + /* Multi core CPU? */ @@ -7549,7 +7362,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -static void __cpuinit detect_ht(struct cpuinfo_x86 *c) +void __cpuinit detect_ht(struct cpuinfo_x86 *c) { - #ifdef CONFIG_SMP + #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - u32 eax, ebx, ecx, edx; - int index_msb, core_bits; + u32 eax, ebx, ecx, edx; @@ -7600,16 +7413,16 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } #endif -@@ -1004,7 +1030,7 @@ static int __cpuinit intel_num_cpu_cores - return 1; +@@ -1006,7 +1032,7 @@ static int __cpuinit intel_num_cpu_cores } + #endif -static void srat_detect_node(void) +static void __cpuinit srat_detect_node(void) { #ifdef CONFIG_NUMA unsigned node; -@@ -1014,7 +1040,7 @@ static void srat_detect_node(void) +@@ -1016,7 +1042,7 @@ static void srat_detect_node(void) /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = apicid_to_node[apicid]; @@ -7618,7 +7431,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches node = first_node(node_online_map); numa_set_node(cpu, node); -@@ -1022,28 +1048,39 @@ static void srat_detect_node(void) +@@ -1024,28 +1050,39 @@ static void srat_detect_node(void) #endif } @@ -7662,7 +7475,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches n = c->extended_cpuid_level; if (n >= 0x80000008) { unsigned eax = cpuid_eax(0x80000008); -@@ -1060,14 +1097,11 @@ static void __cpuinit init_intel(struct +@@ -1062,15 +1099,12 @@ static void __cpuinit init_intel(struct c->x86_cache_alignment = c->x86_clflush_size * 2; if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) @@ -7674,14 +7487,15 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); - else - clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); -- c->x86_max_cores = intel_num_cpu_cores(c); + set_cpu_cap(c, X86_FEATURE_REP_GOOD); + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); + #ifndef CONFIG_XEN +- c->x86_max_cores = intel_num_cpu_cores(c); + c->x86_max_cores = intel_num_cpu_cores(c); + #endif srat_detect_node(); - } -@@ -1084,18 +1118,12 @@ static void __cpuinit get_cpu_vendor(str +@@ -1088,18 +1122,12 @@ static void __cpuinit get_cpu_vendor(str c->x86_vendor = X86_VENDOR_UNKNOWN; } @@ -7702,15 +7516,15 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches c->loops_per_jiffy = loops_per_jiffy; c->x86_cache_size = -1; -@@ -1106,6 +1134,7 @@ void __cpuinit early_identify_cpu(struct - c->x86_clflush_size = 64; +@@ -1111,6 +1139,7 @@ void __cpuinit early_identify_cpu(struct c->x86_cache_alignment = c->x86_clflush_size; + #ifndef CONFIG_XEN c->x86_max_cores = 1; + c->x86_coreid_bits = 0; + #endif c->extended_cpuid_level = 0; memset(&c->x86_capability, 0, sizeof c->x86_capability); - -@@ -1114,7 +1143,7 @@ void __cpuinit early_identify_cpu(struct +@@ -1120,7 +1149,7 @@ void __cpuinit early_identify_cpu(struct (unsigned int *)&c->x86_vendor_id[0], (unsigned int *)&c->x86_vendor_id[8], (unsigned int *)&c->x86_vendor_id[4]); @@ -7719,7 +7533,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches get_cpu_vendor(c); /* Initialize the standard set of capabilities */ -@@ -1132,7 +1161,7 @@ void __cpuinit early_identify_cpu(struct +@@ -1138,7 +1167,7 @@ void __cpuinit early_identify_cpu(struct c->x86 += (tfms >> 20) & 0xff; if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; @@ -7728,8 +7542,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; } else { /* Have CPUID level 0 only - unheard of */ -@@ -1142,18 +1171,6 @@ void __cpuinit early_identify_cpu(struct - #ifdef CONFIG_SMP +@@ -1148,18 +1177,6 @@ void __cpuinit early_identify_cpu(struct + #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; #endif -} @@ -7747,7 +7561,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); c->extended_cpuid_level = xlvl; -@@ -1174,6 +1191,30 @@ void __cpuinit identify_cpu(struct cpuin +@@ -1180,6 +1197,30 @@ void __cpuinit identify_cpu(struct cpuin c->x86_capability[2] = cpuid_edx(0x80860001); } @@ -7778,7 +7592,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches init_scattered_cpuid_features(c); #ifndef CONFIG_XEN -@@ -1205,8 +1246,7 @@ void __cpuinit identify_cpu(struct cpuin +@@ -1211,8 +1252,7 @@ void __cpuinit identify_cpu(struct cpuin break; } @@ -7788,7 +7602,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * On SMP, boot_cpu_data holds the common feature set between -@@ -1216,31 +1256,55 @@ void __cpuinit identify_cpu(struct cpuin +@@ -1222,31 +1262,55 @@ void __cpuinit identify_cpu(struct cpuin */ if (c != &boot_cpu_data) { /* AND the already accumulated flags with these */ @@ -7850,7 +7664,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * Get CPU information for use by the procfs. -@@ -1249,116 +1313,41 @@ void __cpuinit print_cpu_info(struct cpu +@@ -1255,116 +1319,41 @@ void __cpuinit print_cpu_info(struct cpu static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; @@ -7982,10 +7796,10 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); - + - #ifdef CONFIG_SMP + #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) if (smp_num_siblings * c->x86_max_cores > 1) { seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); -@@ -1367,48 +1356,43 @@ static int show_cpuinfo(struct seq_file +@@ -1373,48 +1362,43 @@ static int show_cpuinfo(struct seq_file seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); } @@ -8056,7 +7870,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } seq_printf(m, "\n\n"); -@@ -1435,8 +1419,8 @@ static void c_stop(struct seq_file *m, v +@@ -1441,8 +1425,8 @@ static void c_stop(struct seq_file *m, v { } @@ -8067,8 +7881,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches .next = c_next, .stop = c_stop, .show = show_cpuinfo, ---- head-2010-04-29.orig/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/smp_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -168,7 +168,7 @@ void __send_IPI_shortcut(unsigned int sh } } @@ -8094,8 +7908,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * ---- head-2010-04-29.orig/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/smp_64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -33,7 +33,7 @@ #ifndef CONFIG_XEN @@ -8348,8 +8162,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void smp_send_stop(void) { ---- head-2010-04-29.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/time-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -28,47 +28,19 @@ * serialize accesses to xtime/lost_ticks). */ @@ -8552,7 +8366,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches return sp[0]; if (sp[1] >> 22) return sp[1]; -@@ -749,25 +672,32 @@ static void init_missing_ticks_accountin +@@ -752,25 +675,32 @@ static void init_missing_ticks_accountin runstate->time[RUNSTATE_offline]; } @@ -8596,8 +8410,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } extern void (*late_time_init)(void); ---- head-2010-04-29.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -79,7 +79,8 @@ char ignore_fpu_irq = 0; * F0 0F bug workaround.. We have a special link segment * for this. @@ -9282,8 +9096,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches printk(KERN_INFO "Enabling fast FPU save and restore... "); set_in_cr4(X86_CR4_OSFXSR); printk("done.\n"); ---- head-2010-04-29.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -74,38 +74,41 @@ asmlinkage void alignment_check(void); asmlinkage void machine_check(void); asmlinkage void spurious_interrupt_bug(void); @@ -10019,8 +9833,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + return 1; +} +__setup("code_bytes=", code_bytes_setup); ---- head-2010-04-29.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -43,12 +43,7 @@ #include @@ -10152,7 +9966,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #ifdef CONFIG_XEN vsyscall_gtod_data.sysctl_enabled = 0; /* disable vgettimeofay() */ if (boot_cpu_has(X86_FEATURE_RDTSCP)) ---- head-2010-04-29.orig/arch/x86/kernel/xen_entry_64.S 2008-04-02 12:34:02.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/xen_entry_64.S 2008-04-02 12:34:02.000000000 +0200 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -/* @@ -10191,8 +10005,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \ - XEN_PUT_VCPU_INFO(reg) -#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg) ---- head-2010-04-29.orig/arch/x86/mach-xen/setup.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/arch/x86/mach-xen/setup.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mach-xen/setup.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/mach-xen/setup.c 2011-01-31 18:01:51.000000000 +0100 @@ -161,15 +161,12 @@ void __init machine_specific_arch_setup( /* Do an early initialization of the fixmap area */ @@ -10214,8 +10028,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } } --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/fault-xen.c 2010-03-24 15:10:37.000000000 +0100 -@@ -0,0 +1,1025 @@ ++++ head-2011-03-11/arch/x86/mm/fault-xen.c 2011-01-31 18:01:51.000000000 +0100 +@@ -0,0 +1,1036 @@ +/* + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. @@ -11156,6 +10970,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +DEFINE_SPINLOCK(pgd_lock); +LIST_HEAD(pgd_list); + ++#define pgd_page_table(what, pg) \ ++ spin_##what(&((struct mm_struct *)(pg)->private)->page_table_lock) ++ +void vmalloc_sync_all(void) +{ +#ifdef CONFIG_X86_32 @@ -11190,8 +11007,14 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + return; + } + list_for_each_entry(page, &pgd_list, lru) { -+ if (!vmalloc_sync_one(page_address(page), -+ address)) ++ pmd_t *pmd; ++ ++ pgd_page_table(lock, page); ++ pmd = vmalloc_sync_one(page_address(page), ++ address); ++ pgd_page_table(unlock, page); ++ ++ if (!pmd) + break; + } + spin_unlock_irqrestore(&pgd_lock, flags); @@ -11224,10 +11047,12 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ pgd_page_table(lock, page); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); ++ pgd_page_table(unlock, page); + } + spin_unlock_irqrestore(&pgd_lock, flags); + set_bit(pgd_index(address), insync); @@ -11241,9 +11066,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + (__START_KERNEL & PGDIR_MASK))); +#endif +} ---- head-2010-04-29.orig/arch/x86/mm/fault_32-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/fault_32-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,757 +0,0 @@ +@@ -1,770 +0,0 @@ -/* - * linux/arch/i386/mm/fault.c - * @@ -11987,12 +11812,25 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - return; - } - for (page = pgd_list; page; page = -- (struct page *)page->index) -- if (!vmalloc_sync_one(page_address(page), -- address)) { +- (struct page *)page->index) { +- spinlock_t *lock = page->mapping +- ? &((struct mm_struct *)page->mapping) +- ->page_table_lock +- : NULL; +- pmd_t *pmd; +- +- if (lock) +- spin_lock(lock); +- pmd = vmalloc_sync_one(page_address(page), +- address); +- if (lock) +- spin_unlock(lock); +- +- if (!pmd) { - BUG_ON(page != pgd_list); - break; - } +- } - spin_unlock_irqrestore(&pgd_lock, flags); - if (!page) - set_bit(sync_index(address), insync); @@ -12001,9 +11839,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - start = address + (1UL << PMD_SHIFT); - } -} ---- head-2010-04-29.orig/arch/x86/mm/fault_64-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/fault_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,686 +0,0 @@ +@@ -1,692 +0,0 @@ -/* - * linux/arch/x86-64/mm/fault.c - * @@ -12653,6 +12491,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -DEFINE_SPINLOCK(pgd_lock); -LIST_HEAD(pgd_list); - +-#define pgd_page_table(what, pg) \ +- spin_##what(&((struct mm_struct *)(pg)->private)->page_table_lock) +- -void vmalloc_sync_all(void) -{ - /* Note that races in the updates of insync and start aren't @@ -12674,10 +12515,13 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); +- +- pgd_page_table(lock, page); - if (pgd_none(*pgd)) - set_pgd(pgd, *pgd_ref); - else - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); +- pgd_page_table(unlock, page); - } - spin_unlock(&pgd_lock); - set_bit(pgd_index(address), insync); @@ -12690,8 +12534,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == - (__START_KERNEL & PGDIR_MASK))); -} ---- head-2010-04-29.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/highmem_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -18,6 +18,49 @@ void kunmap(struct page *page) kunmap_high(page); } @@ -12751,9 +12595,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); ---- head-2010-04-29.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/hypervisor.c 2010-03-24 15:10:37.000000000 +0100 -@@ -873,15 +873,11 @@ int xen_limit_pages_to_max_mfn( +--- head-2011-03-11.orig/arch/x86/mm/hypervisor.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/hypervisor.c 2011-01-31 18:01:51.000000000 +0100 +@@ -871,15 +871,11 @@ int xen_limit_pages_to_max_mfn( } EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn); @@ -12772,8 +12616,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define MAX_BATCHED_FULL_PTES 32 ---- head-2010-04-29.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/init_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/init_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -27,13 +27,13 @@ #include #include @@ -13659,8 +13503,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } #endif - ---- head-2010-04-29.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:48:00.000000000 +0200 -+++ head-2010-04-29/arch/x86/mm/init_64-xen.c 2010-04-29 09:50:58.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/init_64-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_64-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -46,14 +46,13 @@ #include #include @@ -14650,37 +14494,48 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches return 0; } #endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/ioremap-xen.c 2010-03-24 15:10:37.000000000 +0100 -@@ -0,0 +1,687 @@ -+/* -+ * Re-map IO memory to kernel address space so that we can access it. -+ * This is needed for high PCI addresses that aren't mapped in the -+ * 640k-1MB IO memory area on PC's -+ * -+ * (C) Copyright 1995 1996 Linus Torvalds -+ */ -+ +--- head-2011-03-11.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:38:30.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/ioremap-xen.c 2011-02-07 15:38:58.000000000 +0100 +@@ -1,6 +1,4 @@ + /* +- * arch/i386/mm/ioremap.c +- * + * Re-map IO memory to kernel address space so that we can access it. + * This is needed for high PCI addresses that aren't mapped in the + * 640k-1MB IO memory area on PC's +@@ -8,24 +6,41 @@ + * (C) Copyright 1995 1996 Linus Torvalds + */ + +-#include +#include -+#include -+#include + #include +-#include +-#include + #include +-#include +-#include +#include +#include +#include +#include + -+#include + #include +-#include +#include +#include -+#include + #include +#include -+#include -+ + #include + +-#define ISA_START_ADDRESS 0x0 +-#define ISA_END_ADDRESS 0x100000 +enum ioremap_mode { + IOR_MODE_UNCACHED, + IOR_MODE_CACHED, +}; -+ + +-static int direct_remap_area_pte_fn(pte_t *pte, +#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN) + +unsigned long __phys_addr(unsigned long x) @@ -14694,159 +14549,99 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif + +static int direct_remap_area_pte_fn(pte_t *pte, -+ struct page *pmd_page, + struct page *pmd_page, +- unsigned long address, + unsigned long address, -+ void *data) -+{ -+ mmu_update_t **v = (mmu_update_t **)data; -+ -+ BUG_ON(!pte_none(*pte)); -+ -+ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) << -+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK); -+ (*v)++; -+ -+ return 0; -+} -+ -+static int __direct_remap_pfn_range(struct mm_struct *mm, + void *data) + { + mmu_update_t **v = (mmu_update_t **)data; +@@ -40,9 +55,9 @@ static int direct_remap_area_pte_fn(pte_ + } + + static int __direct_remap_pfn_range(struct mm_struct *mm, +- unsigned long address, +- unsigned long mfn, +- unsigned long size, + unsigned long address, -+ unsigned long mfn, ++ phys_addr_t mfn, + unsigned long size, -+ pgprot_t prot, -+ domid_t domid) -+{ -+ int rc; -+ unsigned long i, start_address; -+ mmu_update_t *u, *v, *w; -+ -+ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); -+ if (u == NULL) -+ return -ENOMEM; -+ -+ start_address = address; -+ -+ flush_cache_all(); -+ -+ for (i = 0; i < size; i += PAGE_SIZE) { -+ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) { -+ /* Flush a full batch after filling in the PTE ptrs. */ + pgprot_t prot, + domid_t domid) + { +@@ -61,7 +76,7 @@ static int __direct_remap_pfn_range(stru + for (i = 0; i < size; i += PAGE_SIZE) { + if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) { + /* Flush a full batch after filling in the PTE ptrs. */ +- rc = apply_to_page_range(mm, start_address, + rc = apply_to_page_range(mm, start_address, -+ address - start_address, -+ direct_remap_area_pte_fn, &w); -+ if (rc) -+ goto out; -+ rc = -EFAULT; -+ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0) -+ goto out; -+ v = w = u; -+ start_address = address; -+ } -+ -+ /* -+ * Fill in the machine address: PTE ptr is done later by + address - start_address, + direct_remap_area_pte_fn, &w); + if (rc) +@@ -75,12 +90,12 @@ static int __direct_remap_pfn_range(stru + + /* + * Fill in the machine address: PTE ptr is done later by +- * apply_to_page_range(). + * apply_to_page_range(). -+ */ -+ v->val = __pte_val(pfn_pte_ma(mfn, prot)) | _PAGE_IO; -+ -+ mfn++; + */ + v->val = __pte_val(pfn_pte_ma(mfn, prot)) | _PAGE_IO; + + mfn++; +- address += PAGE_SIZE; + address += PAGE_SIZE; -+ v++; -+ } -+ -+ if (v != u) { -+ /* Final batch. */ -+ rc = apply_to_page_range(mm, start_address, -+ address - start_address, -+ direct_remap_area_pte_fn, &w); -+ if (rc) -+ goto out; -+ rc = -EFAULT; -+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)) -+ goto out; -+ } -+ -+ rc = 0; -+ -+ out: -+ flush_tlb_all(); -+ -+ free_page((unsigned long)u); -+ -+ return rc; -+} -+ -+int direct_remap_pfn_range(struct vm_area_struct *vma, + v++; + } + +@@ -103,9 +118,9 @@ static int __direct_remap_pfn_range(stru + } + + int direct_remap_pfn_range(struct vm_area_struct *vma, +- unsigned long address, +- unsigned long mfn, +- unsigned long size, + unsigned long address, -+ unsigned long mfn, ++ phys_addr_t mfn, + unsigned long size, -+ pgprot_t prot, -+ domid_t domid) -+{ -+ if (xen_feature(XENFEAT_auto_translated_physmap)) -+ return remap_pfn_range(vma, address, mfn, size, prot); -+ -+ if (domid == DOMID_SELF) -+ return -EINVAL; -+ -+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; -+ -+ vma->vm_mm->context.has_foreign_mappings = 1; -+ -+ return __direct_remap_pfn_range( -+ vma->vm_mm, address, mfn, size, prot, domid); -+} -+EXPORT_SYMBOL(direct_remap_pfn_range); -+ + pgprot_t prot, + domid_t domid) + { +@@ -124,9 +139,9 @@ int direct_remap_pfn_range(struct vm_are + } + EXPORT_SYMBOL(direct_remap_pfn_range); + +-int direct_kernel_remap_pfn_range(unsigned long address, +int direct_kernel_remap_pfn_range(unsigned long address, -+ unsigned long mfn, + unsigned long mfn, +- unsigned long size, + unsigned long size, -+ pgprot_t prot, -+ domid_t domid) -+{ -+ return __direct_remap_pfn_range( -+ &init_mm, address, mfn, size, prot, domid); -+} -+EXPORT_SYMBOL(direct_kernel_remap_pfn_range); -+ -+static int lookup_pte_fn( -+ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) -+{ -+ uint64_t *ptep = (uint64_t *)data; -+ if (ptep) -+ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) << -+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK); -+ return 0; -+} -+ + pgprot_t prot, + domid_t domid) + { +@@ -145,7 +160,7 @@ static int lookup_pte_fn( + return 0; + } + +-int create_lookup_pte_addr(struct mm_struct *mm, +int create_lookup_pte_addr(struct mm_struct *mm, -+ unsigned long address, -+ uint64_t *ptep) -+{ -+ return apply_to_page_range(mm, address, PAGE_SIZE, -+ lookup_pte_fn, ptep); -+} -+ -+EXPORT_SYMBOL(create_lookup_pte_addr); -+ -+static int noop_fn( -+ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) -+{ -+ return 0; -+} -+ -+int touch_pte_range(struct mm_struct *mm, -+ unsigned long address, -+ unsigned long size) -+{ -+ return apply_to_page_range(mm, address, size, noop_fn, NULL); -+} -+ -+EXPORT_SYMBOL(touch_pte_range); -+ + unsigned long address, + uint64_t *ptep) + { +@@ -155,21 +170,69 @@ int create_lookup_pte_addr(struct mm_str + + EXPORT_SYMBOL(create_lookup_pte_addr); + +-/* +- * Does @address reside within a non-highmem page that is local to this virtual +- * machine (i.e., not an I/O page, nor a memory page belonging to another VM). +- * See the comment that accompanies mfn_to_local_pfn() in page.h to understand +- * why this works. +- */ +-static inline int is_local_lowmem(unsigned long address) +#ifdef CONFIG_X86_32 +int page_is_ram(unsigned long pagenr) -+{ + { +- extern unsigned long max_low_pfn; +- return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn); + unsigned long addr, end; + int i; + @@ -14882,13 +14677,14 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + return 1; + } + return 0; -+} + } +#endif -+ -+/* + + /* +- * Generic mapping function (not visible outside): + * Fix up the linear direct mapping of the kernel to avoid cache attribute + * conflicts. -+ */ + */ +static int ioremap_change_attr(unsigned long vaddr, unsigned long size, + enum ioremap_mode mode) +{ @@ -14907,53 +14703,65 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + + return err; +} -+ -+/* -+ * Remap an arbitrary physical address space into the kernel virtual -+ * address space. Needed when the kernel wants to access high addresses -+ * directly. -+ * -+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously -+ * have to convert them into an offset in a page-aligned mapping, but the -+ * caller shouldn't need to know that small detail. -+ */ + + /* + * Remap an arbitrary physical address space into the kernel virtual +@@ -180,11 +243,12 @@ static inline int is_local_lowmem(unsign + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, + enum ioremap_mode mode) -+{ -+ unsigned long mfn, offset, last_addr, vaddr; + { +- void __iomem * addr; +- struct vm_struct * area; +- unsigned long offset, last_addr; ++ unsigned long offset, vaddr; ++ phys_addr_t mfn, last_addr; + struct vm_struct *area; -+ pgprot_t prot; -+ domid_t domid = DOMID_IO; -+ -+ /* Don't allow wraparound or zero size */ -+ last_addr = phys_addr + size - 1; -+ if (!size || last_addr < phys_addr) -+ return NULL; -+ -+ /* -+ * Don't remap the low PCI/ISA area, it's always mapped.. -+ */ -+ if (is_initial_xendomain() && last_addr < ISA_END_ADDRESS) -+ return (__force void __iomem *)isa_bus_to_virt((unsigned long)phys_addr); -+ -+ /* -+ * Don't allow anybody to remap normal RAM that we're using.. -+ */ + pgprot_t prot; + domid_t domid = DOMID_IO; + +@@ -196,28 +260,40 @@ void __iomem * __ioremap(unsigned long p + /* + * Don't remap the low PCI/ISA area, it's always mapped.. + */ +- if (is_initial_xendomain() && +- phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) +- return (void __iomem *) isa_bus_to_virt(phys_addr); ++ if (is_initial_xendomain() && last_addr < ISA_END_ADDRESS) ++ return (__force void __iomem *)isa_bus_to_virt((unsigned long)phys_addr); + + /* + * Don't allow anybody to remap normal RAM that we're using.. + */ +- if (is_local_lowmem(phys_addr)) { +- char *t_addr, *t_end; +- struct page *page; +- +- t_addr = bus_to_virt(phys_addr); +- t_end = t_addr + (size - 1); +- +- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) +- if(!PageReserved(page)) +- return NULL; + for (mfn = PFN_DOWN(phys_addr); mfn < PFN_UP(last_addr); mfn++) { + unsigned long pfn = mfn_to_local_pfn(mfn); + + if (pfn >= max_pfn) + continue; -+ -+ domid = DOMID_SELF; + + domid = DOMID_SELF; + + if (pfn >= max_pfn_mapped) /* bogus */ + continue; + + if (pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) + return NULL; -+ } -+ + } + +- prot = __pgprot(_KERNPG_TABLE | flags); + switch (mode) { + case IOR_MODE_UNCACHED: + default: @@ -14967,27 +14775,29 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + prot = PAGE_KERNEL; + break; + } -+ -+ /* -+ * Mappings have to be page-aligned -+ */ -+ offset = phys_addr & ~PAGE_MASK; -+ phys_addr &= PAGE_MASK; -+ size = PAGE_ALIGN(last_addr+1) - phys_addr; -+ -+ /* -+ * Ok, go for it.. -+ */ + + /* + * Mappings have to be page-aligned +@@ -229,20 +305,24 @@ void __iomem * __ioremap(unsigned long p + /* + * Ok, go for it.. + */ +- area = get_vm_area(size, VM_IOREMAP | (flags << 20)); + area = get_vm_area(size, VM_IOREMAP | (mode << 20)); -+ if (!area) -+ return NULL; -+ area->phys_addr = phys_addr; + if (!area) + return NULL; + area->phys_addr = phys_addr; +- addr = (void __iomem *) area->addr; +- if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr, +- phys_addr>>PAGE_SHIFT, + vaddr = (unsigned long) area->addr; + if (__direct_remap_pfn_range(&init_mm, vaddr, PFN_DOWN(phys_addr), -+ size, prot, domid)) { + size, prot, domid)) { +- vunmap((void __force *) addr); + free_vm_area(area); -+ return NULL; -+ } + return NULL; + } +- return (void __iomem *) (offset + (char __iomem *)addr); + + if (ioremap_change_attr(vaddr, size, mode) < 0) { + iounmap((void __iomem *) vaddr); @@ -14995,83 +14805,108 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + } + + return (void __iomem *) (vaddr + offset); -+} -+ -+/** -+ * ioremap_nocache - map bus memory into CPU space -+ * @offset: bus address of the memory -+ * @size: size of the resource to map -+ * -+ * ioremap_nocache performs a platform specific sequence of operations to -+ * make bus memory CPU accessible via the readb/readw/readl/writeb/ -+ * writew/writel functions and the other mmio helpers. The returned -+ * address is not guaranteed to be usable directly as a virtual + } +-EXPORT_SYMBOL(__ioremap); + + /** + * ioremap_nocache - map bus memory into CPU space +@@ -253,54 +333,30 @@ EXPORT_SYMBOL(__ioremap); + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual +- * address. + * address. -+ * -+ * This version of ioremap ensures that the memory is marked uncachable -+ * on the CPU as well as honouring existing caching rules from things like + * + * This version of ioremap ensures that the memory is marked uncachable + * on the CPU as well as honouring existing caching rules from things like +- * the PCI bus. Note that there are other caches and buffers on many + * the PCI bus. Note that there are other caches and buffers on many -+ * busses. In particular driver authors should read up on PCI writes -+ * -+ * It's useful if some control registers are in such an area and -+ * write combining or read caching is not desirable: + * busses. In particular driver authors should read up on PCI writes + * + * It's useful if some control registers are in such an area and + * write combining or read caching is not desirable: +- * + * -+ * Must be freed with iounmap. -+ */ + * Must be freed with iounmap. + */ +- +-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) -+{ + { +- unsigned long last_addr; +- void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); +- if (!p) +- return p; +- +- /* Guaranteed to be > phys_addr, as per __ioremap() */ +- last_addr = phys_addr + size - 1; +- +- if (is_local_lowmem(last_addr)) { +- struct page *ppage = virt_to_page(bus_to_virt(phys_addr)); +- unsigned long npages; +- +- phys_addr &= PAGE_MASK; +- +- /* This might overflow and become zero.. */ +- last_addr = PAGE_ALIGN(last_addr); +- +- /* .. but that's ok, because modulo-2**n arithmetic will make +- * the page-aligned "last - first" come out right. +- */ +- npages = (last_addr - phys_addr) >> PAGE_SHIFT; +- +- if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { +- iounmap(p); +- p = NULL; +- } +- global_flush_tlb(); +- } +- +- return p; + return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); -+} -+EXPORT_SYMBOL(ioremap_nocache); -+ + } + EXPORT_SYMBOL(ioremap_nocache); + +void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) +{ + return __ioremap(phys_addr, size, IOR_MODE_CACHED); +} +EXPORT_SYMBOL(ioremap_cache); + -+/** -+ * iounmap - Free a IO remapping -+ * @addr: virtual address from ioremap_* -+ * -+ * Caller must ensure there is only one unmapping for the same pointer. -+ */ -+void iounmap(volatile void __iomem *addr) -+{ -+ struct vm_struct *p, *o; -+ -+ if ((void __force *)addr <= high_memory) -+ return; -+ -+ /* -+ * __ioremap special-cases the PCI/ISA range by not instantiating a -+ * vm_area and by simply returning an address into the kernel mapping -+ * of ISA space. So handle that here. -+ */ + /** + * iounmap - Free a IO remapping + * @addr: virtual address from ioremap_* +@@ -319,10 +375,11 @@ void iounmap(volatile void __iomem *addr + * vm_area and by simply returning an address into the kernel mapping + * of ISA space. So handle that here. + */ +- if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) + if ((unsigned long)addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) -+ return; -+ + return; + +- addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); + addr = (volatile void __iomem *) + (PAGE_MASK & (unsigned long __force)addr); -+ -+ /* Use the vm area unlocked, assuming the caller -+ ensures there isn't another iounmap for the same address -+ in parallel. Reuse of the virtual address is prevented by -+ leaving it in the global lists until we're done with it. -+ cpa takes care of the direct mappings. */ -+ read_lock(&vmlist_lock); -+ for (p = vmlist; p; p = p->next) { -+ if (p->addr == addr) -+ break; -+ } -+ read_unlock(&vmlist_lock); -+ -+ if (!p) { + + /* Use the vm area unlocked, assuming the caller + ensures there isn't another iounmap for the same address +@@ -337,44 +394,209 @@ void iounmap(volatile void __iomem *addr + read_unlock(&vmlist_lock); + + if (!p) { +- printk("iounmap: bad address %p\n", addr); + printk(KERN_ERR "iounmap: bad address %p\n", addr); -+ dump_stack(); -+ return; -+ } -+ + dump_stack(); + return; + } + +- /* Reset the direct mapping. Can block */ +- if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) { +- change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)), +- get_vm_area_size(p) >> PAGE_SHIFT, +- PAGE_KERNEL); +- global_flush_tlb(); +- } + if ((p->flags >> 20) != IOR_MODE_CACHED) { + unsigned long n = get_vm_area_size(p) >> PAGE_SHIFT; + unsigned long mfn = p->phys_addr; @@ -15081,14 +14916,16 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + if (mfn_to_local_pfn(mfn) < max_pfn) + set_memory_wb(va, 1); + } -+ -+ /* Finally remove it */ -+ o = remove_vm_area((void *)addr); -+ BUG_ON(p != o || o == NULL); + + /* Finally remove it */ + o = remove_vm_area((void *)addr); + BUG_ON(p != o || o == NULL); +- kfree(p); + kfree(p); -+} -+EXPORT_SYMBOL(iounmap); -+ + } + EXPORT_SYMBOL(iounmap); + +-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) +int __initdata early_ioremap_debug; + +static int __init early_ioremap_debug_setup(char *str) @@ -15103,635 +14940,171 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] + __attribute__((aligned(PAGE_SIZE))); + -+#ifdef CONFIG_X86_32 -+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) -+{ -+ /* Don't assume we're using swapper_pg_dir at this point */ -+ pgd_t *base = __va(read_cr3()); -+ pgd_t *pgd = &base[pgd_index(addr)]; -+ pud_t *pud = pud_offset(pgd, addr); -+ pmd_t *pmd = pmd_offset(pud, addr); -+ -+ return pmd; -+} -+#else -+#define early_ioremap_pmd early_get_pmd -+#define make_lowmem_page_readonly early_make_page_readonly -+#define make_lowmem_page_writable make_page_writable -+#endif -+ -+static inline pte_t * __init early_ioremap_pte(unsigned long addr) -+{ -+ return &bm_pte[pte_index(addr)]; -+} -+ -+void __init early_ioremap_init(void) -+{ -+ pmd_t *pmd; -+ -+ if (early_ioremap_debug) -+ printk(KERN_INFO "early_ioremap_init()\n"); -+ -+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); -+ memset(bm_pte, 0, sizeof(bm_pte)); -+ make_lowmem_page_readonly(bm_pte, XENFEAT_writable_page_tables); -+ pmd_populate_kernel(&init_mm, pmd, bm_pte); -+ -+ /* -+ * The boot-ioremap range spans multiple pmds, for which -+ * we are not prepared: -+ */ -+ if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { -+ WARN_ON(1); -+ printk(KERN_WARNING "pmd %p != %p\n", -+ pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); -+ printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", -+ fix_to_virt(FIX_BTMAP_BEGIN)); -+ printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", -+ fix_to_virt(FIX_BTMAP_END)); -+ -+ printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); -+ printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", -+ FIX_BTMAP_BEGIN); -+ } -+} -+ -+#ifdef CONFIG_X86_32 -+void __init early_ioremap_clear(void) -+{ -+ pmd_t *pmd; -+ -+ if (early_ioremap_debug) -+ printk(KERN_INFO "early_ioremap_clear()\n"); -+ -+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); -+ pmd_clear(pmd); -+ make_lowmem_page_writable(bm_pte, XENFEAT_writable_page_tables); -+ /* paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); */ -+ __flush_tlb_all(); -+} -+ -+void __init early_ioremap_reset(void) -+{ -+ enum fixed_addresses idx; -+ unsigned long addr, phys; -+ pte_t *pte; -+ -+ after_paging_init = 1; -+ for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { -+ addr = fix_to_virt(idx); -+ pte = early_ioremap_pte(addr); -+ if (pte_present(*pte)) { -+ phys = __pte_val(*pte) & PAGE_MASK; -+ set_fixmap(idx, phys); -+ } -+ } -+} -+#endif /* CONFIG_X86_32 */ -+ -+static void __init __early_set_fixmap(enum fixed_addresses idx, -+ unsigned long phys, pgprot_t flags) -+{ -+ unsigned long addr = __fix_to_virt(idx); -+ pte_t *pte; -+ -+ if (idx >= __end_of_fixed_addresses) { -+ BUG(); -+ return; -+ } -+ pte = early_ioremap_pte(addr); -+ if (pgprot_val(flags)) -+ set_pte(pte, pfn_pte_ma(phys >> PAGE_SHIFT, flags)); -+ else -+ pte_clear(NULL, addr, pte); -+ __flush_tlb_one(addr); -+} -+ -+static inline void __init early_set_fixmap(enum fixed_addresses idx, -+ unsigned long phys) -+{ -+ if (after_paging_init) -+ set_fixmap(idx, phys); -+ else -+ __early_set_fixmap(idx, phys, PAGE_KERNEL); -+} -+ -+static inline void __init early_clear_fixmap(enum fixed_addresses idx) -+{ -+ if (after_paging_init) -+ clear_fixmap(idx); -+ else -+ __early_set_fixmap(idx, 0, __pgprot(0)); -+} -+ -+ -+int __initdata early_ioremap_nested; -+ -+static int __init check_early_ioremap_leak(void) -+{ -+ if (!early_ioremap_nested) -+ return 0; -+ -+ printk(KERN_WARNING -+ "Debug warning: early ioremap leak of %d areas detected.\n", -+ early_ioremap_nested); -+ printk(KERN_WARNING -+ "please boot with early_ioremap_debug and report the dmesg.\n"); -+ WARN_ON(1); -+ -+ return 1; -+} -+late_initcall(check_early_ioremap_leak); -+ -+void __init *early_ioremap(unsigned long phys_addr, unsigned long size) -+{ -+ unsigned long offset, last_addr; -+ unsigned int nrpages, nesting; -+ enum fixed_addresses idx0, idx; -+ -+ WARN_ON(system_state != SYSTEM_BOOTING); -+ -+ nesting = early_ioremap_nested; -+ if (early_ioremap_debug) { -+ printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", -+ phys_addr, size, nesting); -+ dump_stack(); -+ } -+ -+ /* Don't allow wraparound or zero size */ -+ last_addr = phys_addr + size - 1; -+ if (!size || last_addr < phys_addr) { -+ WARN_ON(1); -+ return NULL; -+ } -+ -+ if (nesting >= FIX_BTMAPS_NESTING) { -+ WARN_ON(1); -+ return NULL; -+ } -+ early_ioremap_nested++; -+ /* -+ * Mappings have to be page-aligned -+ */ -+ offset = phys_addr & ~PAGE_MASK; -+ phys_addr &= PAGE_MASK; -+ size = PAGE_ALIGN(last_addr) - phys_addr; -+ -+ /* -+ * Mappings have to fit in the FIX_BTMAP area. -+ */ -+ nrpages = size >> PAGE_SHIFT; -+ if (nrpages > NR_FIX_BTMAPS) { -+ WARN_ON(1); -+ return NULL; -+ } -+ -+ /* -+ * Ok, go for it.. -+ */ -+ idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; -+ idx = idx0; -+ while (nrpages > 0) { -+ early_set_fixmap(idx, phys_addr); -+ phys_addr += PAGE_SIZE; -+ --idx; -+ --nrpages; -+ } -+ if (early_ioremap_debug) -+ printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); -+ -+ return (void *) (offset + fix_to_virt(idx0)); -+} -+ -+void __init early_iounmap(void *addr, unsigned long size) -+{ -+ unsigned long virt_addr; -+ unsigned long offset; -+ unsigned int nrpages; -+ enum fixed_addresses idx; -+ unsigned int nesting; -+ -+ nesting = --early_ioremap_nested; -+ WARN_ON(nesting < 0); -+ -+ if (early_ioremap_debug) { -+ printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, -+ size, nesting); -+ dump_stack(); -+ } -+ -+ virt_addr = (unsigned long)addr; -+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { -+ WARN_ON(1); -+ return; -+ } -+ offset = virt_addr & ~PAGE_MASK; -+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; -+ -+ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; -+ while (nrpages > 0) { -+ early_clear_fixmap(idx); -+ --idx; -+ --nrpages; -+ } -+} -+ -+void __this_fixmap_does_not_exist(void) -+{ -+ WARN_ON(1); -+} ---- head-2010-04-29.orig/arch/x86/mm/ioremap_32-xen.c 2010-03-24 15:09:22.000000000 +0100 -+++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,445 +0,0 @@ --/* -- * arch/i386/mm/ioremap.c -- * -- * Re-map IO memory to kernel address space so that we can access it. -- * This is needed for high PCI addresses that aren't mapped in the -- * 640k-1MB IO memory area on PC's -- * -- * (C) Copyright 1995 1996 Linus Torvalds -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#define ISA_START_ADDRESS 0x0 --#define ISA_END_ADDRESS 0x100000 -- --static int direct_remap_area_pte_fn(pte_t *pte, -- struct page *pmd_page, -- unsigned long address, -- void *data) --{ -- mmu_update_t **v = (mmu_update_t **)data; -- -- BUG_ON(!pte_none(*pte)); -- -- (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) << -- PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK); -- (*v)++; -- -- return 0; --} -- --static int __direct_remap_pfn_range(struct mm_struct *mm, -- unsigned long address, -- unsigned long mfn, -- unsigned long size, -- pgprot_t prot, -- domid_t domid) --{ -- int rc; -- unsigned long i, start_address; -- mmu_update_t *u, *v, *w; -- -- u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); -- if (u == NULL) -- return -ENOMEM; -- -- start_address = address; -- -- flush_cache_all(); -- -- for (i = 0; i < size; i += PAGE_SIZE) { -- if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) { -- /* Flush a full batch after filling in the PTE ptrs. */ -- rc = apply_to_page_range(mm, start_address, -- address - start_address, -- direct_remap_area_pte_fn, &w); -- if (rc) -- goto out; -- rc = -EFAULT; -- if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0) -- goto out; -- v = w = u; -- start_address = address; -- } -- -- /* -- * Fill in the machine address: PTE ptr is done later by -- * apply_to_page_range(). -- */ -- v->val = __pte_val(pfn_pte_ma(mfn, prot)) | _PAGE_IO; -- -- mfn++; -- address += PAGE_SIZE; -- v++; -- } -- -- if (v != u) { -- /* Final batch. */ -- rc = apply_to_page_range(mm, start_address, -- address - start_address, -- direct_remap_area_pte_fn, &w); -- if (rc) -- goto out; -- rc = -EFAULT; -- if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)) -- goto out; -- } -- -- rc = 0; -- -- out: -- flush_tlb_all(); -- -- free_page((unsigned long)u); -- -- return rc; --} -- --int direct_remap_pfn_range(struct vm_area_struct *vma, -- unsigned long address, -- unsigned long mfn, -- unsigned long size, -- pgprot_t prot, -- domid_t domid) --{ -- if (xen_feature(XENFEAT_auto_translated_physmap)) -- return remap_pfn_range(vma, address, mfn, size, prot); -- -- if (domid == DOMID_SELF) -- return -EINVAL; -- -- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; -- -- vma->vm_mm->context.has_foreign_mappings = 1; -- -- return __direct_remap_pfn_range( -- vma->vm_mm, address, mfn, size, prot, domid); --} --EXPORT_SYMBOL(direct_remap_pfn_range); -- --int direct_kernel_remap_pfn_range(unsigned long address, -- unsigned long mfn, -- unsigned long size, -- pgprot_t prot, -- domid_t domid) --{ -- return __direct_remap_pfn_range( -- &init_mm, address, mfn, size, prot, domid); --} --EXPORT_SYMBOL(direct_kernel_remap_pfn_range); -- --static int lookup_pte_fn( -- pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) --{ -- uint64_t *ptep = (uint64_t *)data; -- if (ptep) -- *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) << -- PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK); -- return 0; --} -- --int create_lookup_pte_addr(struct mm_struct *mm, -- unsigned long address, -- uint64_t *ptep) --{ -- return apply_to_page_range(mm, address, PAGE_SIZE, -- lookup_pte_fn, ptep); --} -- --EXPORT_SYMBOL(create_lookup_pte_addr); -- --static int noop_fn( -- pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) --{ -- return 0; --} -- --int touch_pte_range(struct mm_struct *mm, -- unsigned long address, -- unsigned long size) --{ -- return apply_to_page_range(mm, address, size, noop_fn, NULL); --} -- --EXPORT_SYMBOL(touch_pte_range); -- --/* -- * Does @address reside within a non-highmem page that is local to this virtual -- * machine (i.e., not an I/O page, nor a memory page belonging to another VM). -- * See the comment that accompanies mfn_to_local_pfn() in page.h to understand -- * why this works. -- */ --static inline int is_local_lowmem(unsigned long address) --{ -- extern unsigned long max_low_pfn; -- return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn); --} -- --/* -- * Generic mapping function (not visible outside): -- */ -- --/* -- * Remap an arbitrary physical address space into the kernel virtual -- * address space. Needed when the kernel wants to access high addresses -- * directly. -- * -- * NOTE! We need to allow non-page-aligned mappings too: we will obviously -- * have to convert them into an offset in a page-aligned mapping, but the -- * caller shouldn't need to know that small detail. -- */ --void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) --{ -- void __iomem * addr; -- struct vm_struct * area; -- unsigned long offset, last_addr; -- pgprot_t prot; -- domid_t domid = DOMID_IO; -- -- /* Don't allow wraparound or zero size */ -- last_addr = phys_addr + size - 1; -- if (!size || last_addr < phys_addr) -- return NULL; -- -- /* -- * Don't remap the low PCI/ISA area, it's always mapped.. -- */ -- if (is_initial_xendomain() && -- phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) -- return (void __iomem *) isa_bus_to_virt(phys_addr); -- -- /* -- * Don't allow anybody to remap normal RAM that we're using.. -- */ -- if (is_local_lowmem(phys_addr)) { -- char *t_addr, *t_end; -- struct page *page; -- -- t_addr = bus_to_virt(phys_addr); -- t_end = t_addr + (size - 1); -- -- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) -- if(!PageReserved(page)) -- return NULL; -- -- domid = DOMID_SELF; -- } -- -- prot = __pgprot(_KERNPG_TABLE | flags); -- -- /* -- * Mappings have to be page-aligned -- */ -- offset = phys_addr & ~PAGE_MASK; -- phys_addr &= PAGE_MASK; -- size = PAGE_ALIGN(last_addr+1) - phys_addr; -- -- /* -- * Ok, go for it.. -- */ -- area = get_vm_area(size, VM_IOREMAP | (flags << 20)); -- if (!area) -- return NULL; -- area->phys_addr = phys_addr; -- addr = (void __iomem *) area->addr; -- if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr, -- phys_addr>>PAGE_SHIFT, -- size, prot, domid)) { -- vunmap((void __force *) addr); -- return NULL; -- } -- return (void __iomem *) (offset + (char __iomem *)addr); --} --EXPORT_SYMBOL(__ioremap); -- --/** -- * ioremap_nocache - map bus memory into CPU space -- * @offset: bus address of the memory -- * @size: size of the resource to map -- * -- * ioremap_nocache performs a platform specific sequence of operations to -- * make bus memory CPU accessible via the readb/readw/readl/writeb/ -- * writew/writel functions and the other mmio helpers. The returned -- * address is not guaranteed to be usable directly as a virtual -- * address. -- * -- * This version of ioremap ensures that the memory is marked uncachable -- * on the CPU as well as honouring existing caching rules from things like -- * the PCI bus. Note that there are other caches and buffers on many -- * busses. In particular driver authors should read up on PCI writes -- * -- * It's useful if some control registers are in such an area and -- * write combining or read caching is not desirable: -- * -- * Must be freed with iounmap. -- */ -- --void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) --{ -- unsigned long last_addr; -- void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); -- if (!p) -- return p; -- -- /* Guaranteed to be > phys_addr, as per __ioremap() */ -- last_addr = phys_addr + size - 1; -- -- if (is_local_lowmem(last_addr)) { -- struct page *ppage = virt_to_page(bus_to_virt(phys_addr)); -- unsigned long npages; -- -- phys_addr &= PAGE_MASK; -- -- /* This might overflow and become zero.. */ -- last_addr = PAGE_ALIGN(last_addr); -- -- /* .. but that's ok, because modulo-2**n arithmetic will make -- * the page-aligned "last - first" come out right. -- */ -- npages = (last_addr - phys_addr) >> PAGE_SHIFT; -- -- if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { -- iounmap(p); -- p = NULL; -- } -- global_flush_tlb(); -- } -- -- return p; --} --EXPORT_SYMBOL(ioremap_nocache); -- --/** -- * iounmap - Free a IO remapping -- * @addr: virtual address from ioremap_* -- * -- * Caller must ensure there is only one unmapping for the same pointer. -- */ --void iounmap(volatile void __iomem *addr) --{ -- struct vm_struct *p, *o; -- -- if ((void __force *)addr <= high_memory) -- return; -- -- /* -- * __ioremap special-cases the PCI/ISA range by not instantiating a -- * vm_area and by simply returning an address into the kernel mapping -- * of ISA space. So handle that here. -- */ -- if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) -- return; -- -- addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); -- -- /* Use the vm area unlocked, assuming the caller -- ensures there isn't another iounmap for the same address -- in parallel. Reuse of the virtual address is prevented by -- leaving it in the global lists until we're done with it. -- cpa takes care of the direct mappings. */ -- read_lock(&vmlist_lock); -- for (p = vmlist; p; p = p->next) { -- if (p->addr == addr) -- break; -- } -- read_unlock(&vmlist_lock); -- -- if (!p) { -- printk("iounmap: bad address %p\n", addr); -- dump_stack(); -- return; -- } -- -- /* Reset the direct mapping. Can block */ -- if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) { -- change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)), -- get_vm_area_size(p) >> PAGE_SHIFT, -- PAGE_KERNEL); -- global_flush_tlb(); -- } -- -- /* Finally remove it */ -- o = remove_vm_area((void *)addr); -- BUG_ON(p != o || o == NULL); -- kfree(p); --} --EXPORT_SYMBOL(iounmap); -- --void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) --{ ++#ifdef CONFIG_X86_32 ++static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) ++{ ++ /* Don't assume we're using swapper_pg_dir at this point */ ++ pgd_t *base = __va(read_cr3()); ++ pgd_t *pgd = &base[pgd_index(addr)]; ++ pud_t *pud = pud_offset(pgd, addr); ++ pmd_t *pmd = pmd_offset(pud, addr); ++ ++ return pmd; ++} ++#else ++#define early_ioremap_pmd early_get_pmd ++#define make_lowmem_page_readonly early_make_page_readonly ++#define make_lowmem_page_writable make_page_writable ++#endif ++ ++static inline pte_t * __init early_ioremap_pte(unsigned long addr) ++{ ++ return &bm_pte[pte_index(addr)]; ++} ++ ++void __init early_ioremap_init(void) ++{ ++ pmd_t *pmd; ++ ++ if (early_ioremap_debug) ++ printk(KERN_INFO "early_ioremap_init()\n"); ++ ++ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); ++ memset(bm_pte, 0, sizeof(bm_pte)); ++ make_lowmem_page_readonly(bm_pte, XENFEAT_writable_page_tables); ++ pmd_populate_kernel(&init_mm, pmd, bm_pte); ++ ++ /* ++ * The boot-ioremap range spans multiple pmds, for which ++ * we are not prepared: ++ */ ++ if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { ++ WARN_ON(1); ++ printk(KERN_WARNING "pmd %p != %p\n", ++ pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); ++ printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", ++ fix_to_virt(FIX_BTMAP_BEGIN)); ++ printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", ++ fix_to_virt(FIX_BTMAP_END)); ++ ++ printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); ++ printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", ++ FIX_BTMAP_BEGIN); ++ } ++} ++ ++#ifdef CONFIG_X86_32 ++void __init early_ioremap_clear(void) ++{ ++ pmd_t *pmd; ++ ++ if (early_ioremap_debug) ++ printk(KERN_INFO "early_ioremap_clear()\n"); ++ ++ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); ++ pmd_clear(pmd); ++ make_lowmem_page_writable(bm_pte, XENFEAT_writable_page_tables); ++ /* paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); */ ++ __flush_tlb_all(); ++} ++ ++void __init early_ioremap_reset(void) + { - unsigned long offset, last_addr; - unsigned int nrpages; -- enum fixed_addresses idx; -- -- /* Don't allow wraparound or zero size */ -- last_addr = phys_addr + size - 1; + enum fixed_addresses idx; ++ unsigned long addr, phys; ++ pte_t *pte; ++ ++ after_paging_init = 1; ++ for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { ++ addr = fix_to_virt(idx); ++ pte = early_ioremap_pte(addr); ++ if (pte_present(*pte)) { ++ phys = __pte_val(*pte) & PAGE_MASK; ++ set_fixmap(idx, phys); ++ } ++ } ++} ++#endif /* CONFIG_X86_32 */ ++ ++static void __init __early_set_fixmap(enum fixed_addresses idx, ++ unsigned long phys, pgprot_t flags) ++{ ++ unsigned long addr = __fix_to_virt(idx); ++ pte_t *pte; ++ ++ if (idx >= __end_of_fixed_addresses) { ++ BUG(); ++ return; ++ } ++ pte = early_ioremap_pte(addr); ++ if (pgprot_val(flags)) ++ set_pte(pte, pfn_pte_ma(phys >> PAGE_SHIFT, flags)); ++ else ++ pte_clear(NULL, addr, pte); ++ __flush_tlb_one(addr); ++} ++ ++static inline void __init early_set_fixmap(enum fixed_addresses idx, ++ unsigned long phys) ++{ ++ if (after_paging_init) ++ set_fixmap(idx, phys); ++ else ++ __early_set_fixmap(idx, phys, PAGE_KERNEL); ++} ++ ++static inline void __init early_clear_fixmap(enum fixed_addresses idx) ++{ ++ if (after_paging_init) ++ clear_fixmap(idx); ++ else ++ __early_set_fixmap(idx, 0, __pgprot(0)); ++} ++ ++ ++int __initdata early_ioremap_nested; ++ ++static int __init check_early_ioremap_leak(void) ++{ ++ if (!early_ioremap_nested) ++ return 0; ++ ++ printk(KERN_WARNING ++ "Debug warning: early ioremap leak of %d areas detected.\n", ++ early_ioremap_nested); ++ printk(KERN_WARNING ++ "please boot with early_ioremap_debug and report the dmesg.\n"); ++ WARN_ON(1); ++ ++ return 1; ++} ++late_initcall(check_early_ioremap_leak); ++ ++void __init *early_ioremap(unsigned long phys_addr, unsigned long size) ++{ ++ unsigned long offset, last_addr; ++ unsigned int nrpages, nesting; ++ enum fixed_addresses idx0, idx; ++ ++ WARN_ON(system_state != SYSTEM_BOOTING); ++ ++ nesting = early_ioremap_nested; ++ if (early_ioremap_debug) { ++ printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", ++ phys_addr, size, nesting); ++ dump_stack(); ++ } + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) -- return NULL; -- ++ if (!size || last_addr < phys_addr) { ++ WARN_ON(1); + return NULL; ++ } + - /* - * Don't remap the low PCI/ISA area, it's always mapped.. - */ @@ -15739,57 +15112,89 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) - return isa_bus_to_virt(phys_addr); - -- /* -- * Mappings have to be page-aligned -- */ -- offset = phys_addr & ~PAGE_MASK; -- phys_addr &= PAGE_MASK; -- size = PAGE_ALIGN(last_addr) - phys_addr; -- -- /* -- * Mappings have to fit in the FIX_BTMAP area. -- */ -- nrpages = size >> PAGE_SHIFT; ++ if (nesting >= FIX_BTMAPS_NESTING) { ++ WARN_ON(1); ++ return NULL; ++ } ++ early_ioremap_nested++; + /* + * Mappings have to be page-aligned + */ +@@ -386,41 +608,62 @@ void __init *bt_ioremap(unsigned long ph + * Mappings have to fit in the FIX_BTMAP area. + */ + nrpages = size >> PAGE_SHIFT; - if (nrpages > NR_FIX_BTMAPS) -- return NULL; -- -- /* -- * Ok, go for it.. -- */ ++ if (nrpages > NR_FIX_BTMAPS) { ++ WARN_ON(1); + return NULL; ++ } + + /* + * Ok, go for it.. + */ - idx = FIX_BTMAP_BEGIN; -- while (nrpages > 0) { ++ idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; ++ idx = idx0; + while (nrpages > 0) { - set_fixmap(idx, phys_addr); -- phys_addr += PAGE_SIZE; -- --idx; -- --nrpages; -- } ++ early_set_fixmap(idx, phys_addr); + phys_addr += PAGE_SIZE; + --idx; + --nrpages; + } - return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); --} -- ++ if (early_ioremap_debug) ++ printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); ++ ++ return (void *) (offset + fix_to_virt(idx0)); + } + -void __init bt_iounmap(void *addr, unsigned long size) --{ -- unsigned long virt_addr; -- unsigned long offset; -- unsigned int nrpages; -- enum fixed_addresses idx; -- -- virt_addr = (unsigned long)addr; ++void __init early_iounmap(void *addr, unsigned long size) + { + unsigned long virt_addr; + unsigned long offset; + unsigned int nrpages; + enum fixed_addresses idx; ++ unsigned int nesting; ++ ++ nesting = --early_ioremap_nested; ++ WARN_ON(nesting < 0); ++ ++ if (early_ioremap_debug) { ++ printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, ++ size, nesting); ++ dump_stack(); ++ } + + virt_addr = (unsigned long)addr; - if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) - return; - if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) -- return; -- offset = virt_addr & ~PAGE_MASK; -- nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; -- ++ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { ++ WARN_ON(1); + return; ++ } + offset = virt_addr & ~PAGE_MASK; + nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; + - idx = FIX_BTMAP_BEGIN; -- while (nrpages > 0) { ++ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; + while (nrpages > 0) { - clear_fixmap(idx); -- --idx; -- --nrpages; -- } --} ++ early_clear_fixmap(idx); + --idx; + --nrpages; + } + } ++ ++void __this_fixmap_does_not_exist(void) ++{ ++ WARN_ON(1); ++} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/pageattr-xen.c 2010-03-24 15:10:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pageattr-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -0,0 +1,1414 @@ +/* + * Copyright 2002 Andi Kleen, SuSE Labs. @@ -17205,7 +16610,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#ifdef CONFIG_CPA_DEBUG +#include "pageattr-test.c" +#endif ---- head-2010-04-29.orig/arch/x86/mm/pageattr_64-xen.c 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,542 +0,0 @@ -/* @@ -17750,8 +17155,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - -EXPORT_SYMBOL(change_page_attr); -EXPORT_SYMBOL(global_flush_tlb); ---- head-2010-04-29.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pgtable_32-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -29,8 +29,6 @@ #include #include @@ -17815,7 +17220,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * List of all pgd's needed for non-PAE so it can invalidate entries * in both cached and uncached pgd's; not needed for PAE since the -@@ -224,224 +175,191 @@ void pmd_ctor(struct kmem_cache *cache, +@@ -224,231 +175,194 @@ void pmd_ctor(struct kmem_cache *cache, * vmalloc faults work because attached pagetables are never freed. * -- wli */ @@ -17842,6 +17247,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - *pprev = next; - if (next) - set_page_private(next, (unsigned long)pprev); +- page->mapping = NULL; -} + struct page *page = virt_to_page(pgd); @@ -17902,33 +17308,25 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + USER_PTRS_PER_PGD, + KERNEL_PGD_PTRS); } -+ +-} +-#endif /* PTRS_PER_PMD */ + +-static void pgd_dtor(void *pgd) +-{ +- unsigned long flags; /* can be called from interrupt context */ + /* list required to sync kernel mapping updates */ + if (PAGETABLE_LEVELS == 2) + pgd_list_add(pgd); -+ -+ spin_unlock_irqrestore(&pgd_lock, flags); - } --#endif /* PTRS_PER_PMD */ - - static void pgd_dtor(void *pgd) - { - unsigned long flags; /* can be called from interrupt context */ - if (SHARED_KERNEL_PMD) - return; - -- paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); -- spin_lock_irqsave(&pgd_lock, flags); -- pgd_list_del(pgd); -- spin_unlock_irqrestore(&pgd_lock, flags); -+ if (!SHARED_KERNEL_PMD) { -+ spin_lock_irqsave(&pgd_lock, flags); -+ pgd_list_del(pgd); -+ spin_unlock_irqrestore(&pgd_lock, flags); -+ } - - pgd_test_and_unpin(pgd); +- paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); +- spin_lock_irqsave(&pgd_lock, flags); +- pgd_list_del(pgd); + spin_unlock_irqrestore(&pgd_lock, flags); +- +- pgd_test_and_unpin(pgd); } -#define UNSHARED_PTRS_PER_PGD \ @@ -17938,23 +17336,13 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - make sure its initialized with the appropriate kernel mappings. - Otherwise use a cached zeroed pmd. */ -static pmd_t *pmd_cache_alloc(int idx) -+#ifdef CONFIG_X86_PAE -+/* -+ * Mop up any pmd pages which may still be attached to the pgd. -+ * Normally they will be freed by munmap/exit_mmap, but any pmd we -+ * preallocate which never got a corresponding vma will need to be -+ * freed manually. -+ */ -+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) ++static void pgd_dtor(void *pgd) { - pmd_t *pmd; -+ int i; - +- - if (idx >= USER_PTRS_PER_PGD) { - pmd = (pmd_t *)__get_free_page(GFP_KERNEL); -+ for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { -+ pgd_t pgd = pgdp[i]; - +- -#ifndef CONFIG_XEN - if (pmd) - memcpy(pmd, @@ -17963,12 +17351,15 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#endif - } else - pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); -+ if (__pgd_val(pgd) != 0) { -+ pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); ++ unsigned long flags; /* can be called from interrupt context */ - return pmd; -} -+ pgdp[i] = xen_make_pgd(0); ++ if (!SHARED_KERNEL_PMD) { ++ spin_lock_irqsave(&pgd_lock, flags); ++ pgd_list_del(pgd); ++ spin_unlock_irqrestore(&pgd_lock, flags); ++ } -static void pmd_cache_free(pmd_t *pmd, int idx) -{ @@ -17978,39 +17369,40 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - free_page((unsigned long)pmd); - } else - kmem_cache_free(pmd_cache, pmd); -+ paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); -+ pmd_free(mm, pmd); -+ } -+ } ++ pgd_test_and_unpin(pgd); } -pgd_t *pgd_alloc(struct mm_struct *mm) ++#ifdef CONFIG_X86_PAE +/* -+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when -+ * updating the top-level pagetable entries to guarantee the -+ * processor notices the update. Since this is expensive, and -+ * all 4 top-level entries are used almost immediately in a -+ * new process's life, we just pre-populate them here. -+ * -+ * Also, if we're in a paravirt environment where the kernel pmd is -+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate -+ * and initialize the kernel pmds here. ++ * Mop up any pmd pages which may still be attached to the pgd. ++ * Normally they will be freed by munmap/exit_mmap, but any pmd we ++ * preallocate which never got a corresponding vma will need to be ++ * freed manually. + */ -+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) ++static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) { -+ pud_t *pud; -+ pmd_t *pmds[UNSHARED_PTRS_PER_PGD]; -+ unsigned long addr, flags; int i; - pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor); - pmd_t **pmds = NULL; - unsigned long flags; -- + +- if (!pgd) +- return NULL; ++ for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { ++ pgd_t pgd = pgdp[i]; + - pgd_test_and_unpin(pgd); - -- if (PTRS_PER_PMD == 1 || !pgd) +- /* Store a back link for vmalloc_sync_all(). */ +- virt_to_page(pgd)->mapping = (void *)mm; ++ if (__pgd_val(pgd) != 0) { ++ pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); + +- if (PTRS_PER_PMD == 1) - return pgd; -- ++ pgdp[i] = xen_make_pgd(0); + -#ifdef CONFIG_XEN - if (!SHARED_KERNEL_PMD) { - /* @@ -18023,14 +17415,34 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - if (!pmds) { - quicklist_free(0, pgd_dtor, pgd); - return NULL; -- } -- } ++ paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); ++ pmd_free(mm, pmd); + } + } -#endif ++} - /* Allocate pmds, remember virtual addresses. */ - for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { - pmd_t *pmd = pmd_cache_alloc(i); -- ++/* ++ * In PAE mode, we need to do a cr3 reload (=tlb flush) when ++ * updating the top-level pagetable entries to guarantee the ++ * processor notices the update. Since this is expensive, and ++ * all 4 top-level entries are used almost immediately in a ++ * new process's life, we just pre-populate them here. ++ * ++ * Also, if we're in a paravirt environment where the kernel pmd is ++ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate ++ * and initialize the kernel pmds here. ++ */ ++static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) ++{ ++ pud_t *pud; ++ pmd_t *pmds[UNSHARED_PTRS_PER_PGD]; ++ unsigned long addr, flags; ++ int i; + - if (!pmd) + /* + * We can race save/restore (if we sleep during a GFP_KERNEL memory @@ -18143,8 +17555,11 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + + /* so that alloc_pd can use it */ + mm->pgd = pgd; -+ if (pgd) ++ if (pgd) { ++ /* Store a back link for vmalloc_sync_all(). */ ++ set_page_private(virt_to_page(pgd), (unsigned long)mm); + pgd_ctor(pgd); ++ } + + if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { + free_page((unsigned long)pgd); @@ -18164,7 +17579,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * After this the pgd should not be pinned for the duration of this * function's execution. We should never sleep and thus never race: -@@ -450,39 +368,43 @@ void pgd_free(pgd_t *pgd) +@@ -457,39 +371,43 @@ void pgd_free(pgd_t *pgd) * 2. The machine addresses in PGD entries will not become invalid * due to a concurrent save/restore. */ @@ -18225,7 +17640,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches rc = HYPERVISOR_update_va_mapping( (unsigned long)va, pte_wrprotect(*pte), 0); BUG_ON(rc); -@@ -491,313 +413,15 @@ void make_lowmem_page_readonly(void *va, +@@ -498,313 +416,15 @@ void make_lowmem_page_readonly(void *va, void make_lowmem_page_writable(void *va, unsigned int feature) { pte_t *pte; @@ -18542,8 +17957,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - !mm->context.has_foreign_mappings) - mm_unpin(mm); -} ---- head-2010-04-29.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/pci/irq-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/pci/irq-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/pci/irq-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -204,6 +204,7 @@ static int pirq_ali_get(struct pci_dev * { static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; @@ -18641,8 +18056,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; ---- head-2010-04-29.orig/arch/x86/vdso/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-04-29/arch/x86/vdso/Makefile 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/Makefile 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/Makefile 2011-01-31 18:01:51.000000000 +0100 @@ -66,6 +66,7 @@ vdso32.so-$(VDSO32-y) += int80 vdso32.so-$(CONFIG_COMPAT) += syscall vdso32.so-$(VDSO32-y) += sysenter @@ -18651,8 +18066,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches vdso32.so-$(CONFIG_XEN) += $(xen-vdso32-y) vdso32-images = $(vdso32.so-y:%=vdso32-%.so) ---- head-2010-04-29.orig/arch/x86/vdso/vdso32/syscall.S 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/vdso/vdso32/syscall.S 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/vdso32/syscall.S 2011-03-15 16:52:08.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/vdso32/syscall.S 2011-01-31 18:01:51.000000000 +0100 @@ -19,8 +19,10 @@ __kernel_vsyscall: .Lpush_ebp: movl %ecx, %ebp @@ -18664,8 +18079,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches movl %ebp, %ecx popl %ebp .Lpop_ebp: ---- head-2010-04-29.orig/arch/x86/vdso/vdso32.S 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/vdso/vdso32.S 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/vdso32.S 2011-03-15 16:52:08.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/vdso32.S 2011-01-31 18:01:51.000000000 +0100 @@ -19,4 +19,16 @@ vdso32_sysenter_start: .incbin "arch/x86/vdso/vdso32-sysenter.so" vdso32_sysenter_end: @@ -18683,8 +18098,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif + __FINIT ---- head-2010-04-29.orig/arch/x86/vdso/vdso32-setup.c 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-04-29/arch/x86/vdso/vdso32-setup.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/vdso32-setup.c 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/vdso32-setup.c 2011-01-31 18:01:51.000000000 +0100 @@ -26,10 +26,6 @@ #include #include @@ -18741,7 +18156,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches static struct vm_area_struct gate_vma; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:10:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/vdso32-setup-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -0,0 +1,506 @@ +/* + * (C) Copyright 2002 Linus Torvalds @@ -19249,63 +18664,216 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +} + +#endif /* CONFIG_X86_64 */ ---- head-2010-04-29.orig/drivers/pci/msi-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/pci/msi-xen.c 2010-03-24 15:10:37.000000000 +0100 -@@ -45,6 +45,53 @@ struct msi_pirq_entry { - int entry_nr; +--- head-2011-03-11.orig/drivers/hwmon/coretemp-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/hwmon/coretemp-xen.c 2011-01-31 18:01:51.000000000 +0100 +@@ -38,7 +38,8 @@ + #define DRVNAME "coretemp" + #define coretemp_data pdev_entry + +-typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_LABEL, SHOW_NAME } SHOW; ++typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_TTARGET, SHOW_LABEL, ++ SHOW_NAME } SHOW; + + /* + * Functions declaration +@@ -58,6 +59,7 @@ struct pdev_entry { + unsigned long last_updated; /* in jiffies */ + int temp; + int tjmax; ++ int ttarget; + u8 alarm; }; -+/* Arch hooks */ -+ -+int __attribute__ ((weak)) -+arch_msi_check_device(struct pci_dev *dev, int nvec, int type) +@@ -96,8 +98,10 @@ static ssize_t show_temp(struct device * + + if (attr->index == SHOW_TEMP) + err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN; +- else ++ else if (attr->index == SHOW_TJMAX) + err = sprintf(buf, "%d\n", data->tjmax); ++ else ++ err = sprintf(buf, "%d\n", data->ttarget); + return err; + } + +@@ -105,6 +109,8 @@ static SENSOR_DEVICE_ATTR(temp1_input, S + SHOW_TEMP); + static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, + SHOW_TJMAX); ++static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, ++ SHOW_TTARGET); + static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL); + static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); + static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); +@@ -151,6 +157,56 @@ static struct coretemp_data *coretemp_up + return data; + } + ++static int adjust_tjmax(struct coretemp_data *c, u32 id, struct device *dev) +{ -+ return 0; -+} ++ /* The 100C is default for both mobile and non mobile CPUs */ + -+#ifndef CONFIG_XEN -+int __attribute__ ((weak)) -+arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) -+{ -+ return 0; -+} ++ int tjmax = 100000; ++ int ismobile = 1; ++ int err; ++ u32 eax, edx; + -+int __attribute__ ((weak)) -+arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -+{ -+ struct msi_desc *entry; -+ int ret; ++ /* Early chips have no MSR for TjMax */ + -+ list_for_each_entry(entry, &dev->msi_list, list) { -+ ret = arch_setup_msi_irq(dev, entry); -+ if (ret) -+ return ret; ++ if ((c->x86_model == 0xf) && (c->x86_mask < 4)) { ++ ismobile = 0; + } + -+ return 0; -+} ++ if ((c->x86_model > 0xe) && (ismobile)) { ++ ++ /* Now we can detect the mobile CPU using Intel provided table ++ http://softwarecommunity.intel.com/Wiki/Mobility/720.htm ++ For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU ++ */ ++ ++ err = rdmsr_safe_on_pcpu(id, 0x17, &eax, &edx); ++ if (err < 0) { ++ dev_warn(dev, ++ "Unable to access MSR 0x17, assuming desktop" ++ " CPU\n"); ++ ismobile = 0; ++ } else if (!(eax & 0x10000000)) { ++ ismobile = 0; ++ } ++ } + -+void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) -+{ -+ return; ++ if (ismobile) { ++ ++ err = rdmsr_safe_on_pcpu(id, 0xee, &eax, &edx); ++ if (err < 0) { ++ dev_warn(dev, ++ "Unable to access MSR 0xEE, for Tjmax, left" ++ " at default"); ++ } else if (eax & 0x40000000) { ++ tjmax = 85000; ++ } ++ } else { ++ dev_warn(dev, "Using relative temperature scale!\n"); ++ } ++ ++ return tjmax; +} + -+void __attribute__ ((weak)) -+arch_teardown_msi_irqs(struct pci_dev *dev) -+{ -+ struct msi_desc *entry; + static int coretemp_probe(struct platform_device *pdev) + { + struct coretemp_data *data = platform_get_drvdata(pdev); +@@ -159,8 +215,6 @@ static int coretemp_probe(struct platfor + + data->name = "coretemp"; + mutex_init(&data->update_lock); +- /* Tjmax default is 100 degrees C */ +- data->tjmax = 100000; + + /* test if we can access the THERM_STATUS MSR */ + err = rdmsr_safe_on_pcpu(pdev->id, MSR_IA32_THERM_STATUS, &eax, &edx); +@@ -189,38 +243,28 @@ static int coretemp_probe(struct platfor + } + } + +- /* Some processors have Tjmax 85 following magic should detect it +- Intel won't disclose the information without signed NDA, but +- individuals cannot sign it. Catch(ed) 22. +- */ ++ data->tjmax = adjust_tjmax(data, pdev->id, &pdev->dev); + +- if (((data->x86_model == 0xf) && (data->x86_mask > 3)) || +- (data->x86_model == 0xe)) { +- err = rdmsr_safe_on_pcpu(data->pdev->id, 0xee, &eax, &edx); ++ /* read the still undocumented IA32_TEMPERATURE_TARGET it exists ++ on older CPUs but not in this register */ ++ ++ if (data->x86_model > 0xe) { ++ err = rdmsr_safe_on_pcpu(pdev->id, 0x1a2, &eax, &edx); + if (err < 0) { +- dev_warn(&pdev->dev, +- "Unable to access MSR 0xEE, Tjmax left at %d " +- "degrees C\n", data->tjmax/1000); +- } else if (eax & 0x40000000) { +- data->tjmax = 85000; ++ dev_warn(&pdev->dev, "Unable to read" ++ " IA32_TEMPERATURE_TARGET MSR\n"); ++ } else { ++ data->ttarget = data->tjmax - ++ (((eax >> 8) & 0xff) * 1000); ++ err = device_create_file(&pdev->dev, ++ &sensor_dev_attr_temp1_max.dev_attr); ++ if (err) ++ return err; + } + } + +- /* Intel says that above should not work for desktop Core2 processors, +- but it seems to work. There is no other way how get the absolute +- readings. Warn the user about this. First check if are desktop, +- bit 50 of MSR_IA32_PLATFORM_ID should be 0. +- */ +- +- rdmsr_safe_on_pcpu(data->pdev->id, MSR_IA32_PLATFORM_ID, &eax, &edx); +- +- if ((data->x86_model == 0xf) && (!(edx & 0x00040000))) { +- dev_warn(&pdev->dev, "Using undocumented features, absolute " +- "temperature might be wrong!\n"); +- } +- + if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group))) +- return err; ++ goto exit_dev; + + data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(data->hwmon_dev)) { +@@ -234,6 +278,8 @@ static int coretemp_probe(struct platfor + + exit_class: + sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); ++exit_dev: ++ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); + return err; + } + +@@ -243,6 +289,7 @@ static int coretemp_remove(struct platfo + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); ++ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); + return 0; + } + +@@ -313,10 +360,10 @@ static int coretemp_device_add(unsigned + if (err) + goto exit_entry_free; + +- /* check if family 6, models e, f, 16 */ ++ /* check if family 6, models 0xe, 0xf, 0x16, 0x17 */ + if (info.x86 != 0x6 || + !((pdev_entry->x86_model == 0xe) || (pdev_entry->x86_model == 0xf) || +- (pdev_entry->x86_model == 0x16))) { ++ (pdev_entry->x86_model == 0x16) || (pdev_entry->x86_model == 0x17))) { + + /* supported CPU not found, but report the unknown + family 6 CPU */ +--- head-2011-03-11.orig/drivers/pci/msi-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/pci/msi-xen.c 2011-01-31 18:01:51.000000000 +0100 +@@ -45,6 +45,14 @@ struct msi_pirq_entry { + int entry_nr; + }; + ++/* Arch hooks */ + -+ list_for_each_entry(entry, &dev->msi_list, list) { -+ if (entry->irq != 0) -+ arch_teardown_msi_irq(entry->irq); -+ } ++int __attribute__ ((weak)) ++arch_msi_check_device(struct pci_dev *dev, int nvec, int type) ++{ ++ return 0; +} -+#endif + static void msi_set_enable(struct pci_dev *dev, int enable) { int pos; -@@ -266,7 +313,6 @@ static void pci_intx_for_msi(struct pci_ +@@ -266,7 +274,6 @@ static void pci_intx_for_msi(struct pci_ pci_intx(dev, enable); } @@ -19313,7 +18881,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void pci_restore_msi_state(struct pci_dev *dev) { int rc; -@@ -286,7 +332,7 @@ void pci_restore_msi_state(struct pci_de +@@ -286,7 +293,7 @@ void pci_restore_msi_state(struct pci_de rc = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore); WARN(rc && rc != -ENOSYS, "restore_msi -> %d\n", rc); } @@ -19322,7 +18890,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /** * msi_capability_init - configure device's MSI capability structure -@@ -707,51 +753,3 @@ void pci_msi_init_pci_dev(struct pci_dev +@@ -707,12 +714,3 @@ void pci_msi_init_pci_dev(struct pci_dev INIT_LIST_HEAD(&dev->msi_list); #endif } @@ -19335,48 +18903,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -{ - return 0; -} -- --#ifndef CONFIG_XEN --int __attribute__ ((weak)) --arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) --{ -- return 0; --} -- --int __attribute__ ((weak)) --arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) --{ -- struct msi_desc *entry; -- int ret; -- -- list_for_each_entry(entry, &dev->msi_list, list) { -- ret = arch_setup_msi_irq(dev, entry); -- if (ret) -- return ret; -- } -- -- return 0; --} -- --void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) --{ -- return; --} -- --void __attribute__ ((weak)) --arch_teardown_msi_irqs(struct pci_dev *dev) --{ -- struct msi_desc *entry; -- -- list_for_each_entry(entry, &dev->msi_list, list) { -- if (entry->irq != 0) -- arch_teardown_msi_irq(entry->irq); -- } --} --#endif ---- head-2010-04-29.orig/drivers/pci/pci.c 2010-04-29 09:30:41.000000000 +0200 -+++ head-2010-04-29/drivers/pci/pci.c 2010-04-15 09:56:06.000000000 +0200 -@@ -458,7 +458,12 @@ pci_find_parent_resource(const struct pc +--- head-2011-03-11.orig/drivers/pci/pci.c 2011-01-31 14:31:28.000000000 +0100 ++++ head-2011-03-11/drivers/pci/pci.c 2011-01-31 18:01:51.000000000 +0100 +@@ -471,7 +471,12 @@ pci_find_parent_resource(const struct pc * Restore the BAR values for a given device, so as to make it * accessible by its driver. */ @@ -19389,8 +18918,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches pci_restore_bars(struct pci_dev *dev) { int i; ---- head-2010-04-29.orig/drivers/xen/balloon/sysfs.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-04-29/drivers/xen/balloon/sysfs.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/balloon/sysfs.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-11/drivers/xen/balloon/sysfs.c 2011-01-31 18:01:51.000000000 +0100 @@ -104,7 +104,7 @@ static struct attribute_group balloon_in }; @@ -19400,8 +18929,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches }; static struct sys_device balloon_sysdev; ---- head-2010-04-29.orig/drivers/xen/blkback/blkback.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/blkback.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blkback/blkback.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkback/blkback.c 2011-01-31 18:01:51.000000000 +0100 @@ -150,7 +150,7 @@ static void unplug_queue(blkif_t *blkif) return; if (blkif->plug->unplug_fn) @@ -19421,9 +18950,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches blkif->plug = q; } ---- head-2010-04-29.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkfront/blkfront.c 2010-03-24 15:10:37.000000000 +0100 -@@ -739,7 +739,6 @@ static irqreturn_t blkif_int(int irq, vo +--- head-2011-03-11.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkfront/blkfront.c 2011-01-31 18:01:51.000000000 +0100 +@@ -752,7 +752,6 @@ static irqreturn_t blkif_int(int irq, vo RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; @@ -19431,7 +18960,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches spin_lock_irqsave(&blkif_io_lock, flags); -@@ -764,13 +763,13 @@ static irqreturn_t blkif_int(int irq, vo +@@ -777,14 +776,14 @@ static irqreturn_t blkif_int(int irq, vo ADD_ID_TO_FREELIST(info, id); @@ -19440,14 +18969,15 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { - printk("blkfront: %s: write barrier op failed\n", - info->gd->disk_name); + pr_warning("blkfront: %s:" + " write barrier op failed\n", + info->gd->disk_name); - uptodate = -EOPNOTSUPP; + ret = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } -@@ -781,10 +780,8 @@ static irqreturn_t blkif_int(int irq, vo +@@ -795,10 +794,8 @@ static irqreturn_t blkif_int(int irq, vo DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); @@ -19459,9 +18989,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches break; default: BUG(); ---- head-2010-04-29.orig/drivers/xen/blktap/blktap.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blktap/blktap.c 2010-03-24 15:10:37.000000000 +0100 -@@ -336,8 +336,8 @@ static pte_t blktap_clear_pte(struct vm_ +--- head-2011-03-11.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:10:44.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap/blktap.c 2011-02-17 10:10:56.000000000 +0100 +@@ -314,8 +314,8 @@ static pte_t blktap_clear_pte(struct vm_ uvstart = info->rings_vstart + (RING_PAGES << PAGE_SHIFT); } if (vma->vm_file == NULL || uvaddr < uvstart) @@ -19472,7 +19002,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* TODO Should these be changed to if statements? */ BUG_ON(!info); -@@ -380,8 +380,8 @@ static pte_t blktap_clear_pte(struct vm_ +@@ -359,8 +359,8 @@ static pte_t blktap_clear_pte(struct vm_ BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap)); /* USING SHADOW PAGE TABLES. */ @@ -19483,9 +19013,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } if (count) { ---- head-2010-04-29.orig/drivers/xen/blktap2/device.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blktap2/device.c 2010-03-24 15:10:37.000000000 +0100 -@@ -163,9 +163,9 @@ blktap_map_uaddr_fn(pte_t *ptep, struct +--- head-2011-03-11.orig/drivers/xen/blktap2/device.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/device.c 2011-01-31 18:01:51.000000000 +0100 +@@ -161,9 +161,9 @@ blktap_map_uaddr_fn(pte_t *ptep, struct } static int @@ -19497,7 +19027,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches PAGE_SIZE, blktap_map_uaddr_fn, &pte); } -@@ -173,18 +173,29 @@ static int +@@ -171,18 +171,29 @@ static int blktap_umap_uaddr_fn(pte_t *ptep, struct page *pmd_page, unsigned long addr, void *data) { @@ -19531,7 +19061,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } static inline void -@@ -198,17 +209,10 @@ flush_tlb_kernel_page(unsigned long kvad +@@ -196,17 +207,10 @@ flush_tlb_kernel_page(unsigned long kvad } static void @@ -19552,7 +19082,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } /* -@@ -336,8 +340,8 @@ blktap_unmap(struct blktap *tap, struct +@@ -333,8 +337,8 @@ blktap_unmap(struct blktap *tap, struct if (!xen_feature(XENFEAT_auto_translated_physmap) && request->handles[i].kernel == INVALID_GRANT_HANDLE) { @@ -19563,7 +19093,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); } -@@ -377,7 +381,7 @@ blktap_device_fail_pending_requests(stru +@@ -374,7 +378,7 @@ blktap_device_fail_pending_requests(stru blktap_unmap(tap, request); req = (struct request *)(unsigned long)request->id; @@ -19572,7 +19102,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches blktap_request_free(tap, request); } -@@ -400,16 +404,11 @@ blktap_device_finish_request(struct blkt +@@ -397,16 +401,11 @@ blktap_device_finish_request(struct blkt blkif_response_t *res, struct blktap_request *request) { @@ -19589,7 +19119,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches BTDBG("req %p res status %d operation %d/%d id %lld\n", req, res->status, res->operation, request->operation, -@@ -421,7 +420,8 @@ blktap_device_finish_request(struct blkt +@@ -418,7 +417,8 @@ blktap_device_finish_request(struct blkt if (unlikely(res->status != BLKIF_RSP_OKAY)) BTERR("Bad return from device data " "request: %x\n", res->status); @@ -19599,7 +19129,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches break; default: BUG(); -@@ -571,9 +571,9 @@ blktap_map(struct blktap *tap, +@@ -567,9 +567,9 @@ blktap_map(struct blktap *tap, if (!xen_feature(XENFEAT_auto_translated_physmap)) { pte = mk_pte(page, ring->vma->vm_page_prot); @@ -19611,7 +19141,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches flush_tlb_kernel_page(kvaddr); set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, pte_mfn(pte)); -@@ -896,7 +896,7 @@ blktap_device_run_queue(struct blktap *t +@@ -891,7 +891,7 @@ blktap_device_run_queue(struct blktap *t if (!err) queued++; else { @@ -19620,8 +19150,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches blktap_request_free(tap, request); } ---- head-2010-04-29.orig/drivers/xen/blktap2/ring.c 2010-04-29 09:34:47.000000000 +0200 -+++ head-2010-04-29/drivers/xen/blktap2/ring.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blktap2/ring.c 2011-02-24 15:15:44.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/ring.c 2011-01-31 18:01:51.000000000 +0100 @@ -103,8 +103,8 @@ blktap_ring_clear_pte(struct vm_area_str * mapped region. */ @@ -19633,7 +19163,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches offset = (int)((uvaddr - ring->user_vstart) >> PAGE_SHIFT); usr_idx = offset / BLKIF_MAX_SEGMENTS_PER_REQUEST; -@@ -146,8 +146,8 @@ blktap_ring_clear_pte(struct vm_area_str +@@ -143,8 +143,8 @@ blktap_ring_clear_pte(struct vm_area_str khandle->user); count++; } else @@ -19644,8 +19174,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches if (count) if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ---- head-2010-04-29.orig/drivers/xen/core/Makefile 2008-07-21 11:00:33.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/Makefile 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/Makefile 2008-07-21 11:00:33.000000000 +0200 ++++ head-2011-03-11/drivers/xen/core/Makefile 2011-01-31 18:01:51.000000000 +0100 @@ -10,5 +10,6 @@ obj-$(CONFIG_SYS_HYPERVISOR) += hypervis obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o @@ -19653,9 +19183,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +obj-$(CONFIG_X86_SMP) += spinlock.o obj-$(CONFIG_KEXEC) += machine_kexec.o obj-$(CONFIG_XEN_XENCOMM) += xencomm.o ---- head-2010-04-29.orig/drivers/xen/core/evtchn.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/evtchn.c 2010-04-23 15:15:37.000000000 +0200 -@@ -194,7 +194,7 @@ static inline unsigned int cpu_from_evtc +--- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-01-31 18:01:51.000000000 +0100 +@@ -197,7 +197,7 @@ static inline unsigned int cpu_from_evtc /* Upcall to generic IRQ layer. */ #ifdef CONFIG_X86 @@ -19664,7 +19194,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void __init xen_init_IRQ(void); void __init init_IRQ(void) { -@@ -203,13 +203,11 @@ void __init init_IRQ(void) +@@ -206,13 +206,11 @@ void __init init_IRQ(void) } #if defined (__i386__) static inline void exit_idle(void) {} @@ -19679,7 +19209,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches do_IRQ((regs)); \ } while (0) #endif -@@ -676,13 +674,12 @@ static void set_affinity_irq(unsigned in +@@ -693,13 +691,12 @@ static void set_affinity_irq(unsigned in int resend_irq_on_evtchn(unsigned int irq) { int masked, evtchn = evtchn_from_irq(irq); @@ -19694,51 +19224,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches if (!masked) unmask_evtchn(evtchn); -@@ -971,6 +968,43 @@ void disable_all_local_evtchn(void) - synch_set_bit(i, &s->evtchn_mask[0]); - } - -+/* Clear an irq's pending state, in preparation for polling on it. */ -+void xen_clear_irq_pending(int irq) -+{ -+ int evtchn = evtchn_from_irq(irq); -+ -+ if (VALID_EVTCHN(evtchn)) -+ clear_evtchn(evtchn); -+} -+ -+/* Set an irq's pending state, to avoid blocking on it. */ -+void xen_set_irq_pending(int irq) -+{ -+ int evtchn = evtchn_from_irq(irq); -+ -+ if (VALID_EVTCHN(evtchn)) -+ set_evtchn(evtchn); -+} -+ -+/* Test an irq's pending state. */ -+int xen_test_irq_pending(int irq) -+{ -+ int evtchn = evtchn_from_irq(irq); -+ -+ return VALID_EVTCHN(evtchn) && test_evtchn(evtchn); -+} -+ -+/* Poll waiting for an irq to become pending. In the usual case, the -+ irq will be disabled so it won't deliver an interrupt. */ -+void xen_poll_irq(int irq) -+{ -+ evtchn_port_t evtchn = evtchn_from_irq(irq); -+ -+ if (VALID_EVTCHN(evtchn) -+ && HYPERVISOR_poll_no_timeout(&evtchn, 1)) -+ BUG(); -+} -+ - static void restore_cpu_virqs(unsigned int cpu) - { - struct evtchn_bind_virq bind_virq; -@@ -1024,8 +1058,8 @@ static void restore_cpu_ipis(unsigned in +@@ -1041,8 +1038,8 @@ static void restore_cpu_ipis(unsigned in bind_evtchn_to_cpu(evtchn, cpu); /* Ready for use. */ @@ -19749,8 +19235,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } } ---- head-2010-04-29.orig/drivers/xen/core/hypervisor_sysfs.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/hypervisor_sysfs.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/hypervisor_sysfs.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/hypervisor_sysfs.c 2011-01-31 18:01:51.000000000 +0100 @@ -50,7 +50,7 @@ static int __init hypervisor_subsys_init if (!is_running_on_xen()) return -ENODEV; @@ -19760,9 +19246,21 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches return 0; } ---- head-2010-04-29.orig/drivers/xen/core/smpboot.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/smpboot.c 2010-03-24 15:10:37.000000000 +0100 -@@ -72,6 +72,10 @@ void __init prefill_possible_map(void) +--- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-01-31 18:01:51.000000000 +0100 +@@ -174,6 +174,9 @@ static int take_machine_down(void *_susp + post_suspend(suspend_cancelled); + gnttab_resume(); + if (!suspend_cancelled) { ++ extern void spinlock_resume(void); ++ ++ spinlock_resume(); + irq_resume(); + #ifdef __x86_64__ + /* +--- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-01-31 18:01:51.000000000 +0100 +@@ -59,6 +59,10 @@ void __init prefill_possible_map(void) return; for (i = 0; i < NR_CPUS; i++) { @@ -19773,7 +19271,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) cpu_set(i, cpu_possible_map); -@@ -134,6 +138,10 @@ static int __cpuinit xen_smp_intr_init(u +@@ -97,6 +101,10 @@ static int __cpuinit xen_smp_intr_init(u goto fail; per_cpu(callfunc_irq, cpu) = rc; @@ -19784,7 +19282,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0)) goto fail; -@@ -144,6 +152,7 @@ static int __cpuinit xen_smp_intr_init(u +@@ -107,6 +115,7 @@ static int __cpuinit xen_smp_intr_init(u unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); if (per_cpu(callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); @@ -19792,7 +19290,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches return rc; } -@@ -155,6 +164,7 @@ static void xen_smp_intr_exit(unsigned i +@@ -118,6 +127,7 @@ static void xen_smp_intr_exit(unsigned i unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); @@ -19800,7 +19298,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } #endif -@@ -207,36 +217,25 @@ static void __cpuinit cpu_initialize_con +@@ -170,36 +180,25 @@ static void __cpuinit cpu_initialize_con smp_trap_init(ctxt.trap_ctxt); ctxt.ldt_ents = 0; @@ -19844,8 +19342,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt)); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/drivers/xen/core/spinlock.c 2010-03-24 15:10:37.000000000 +0100 -@@ -0,0 +1,246 @@ ++++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:52:48.000000000 +0100 +@@ -0,0 +1,285 @@ +/* + * Xen spinlock functions + * @@ -19854,55 +19352,63 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + */ +#define XEN_SPINLOCK_SOURCE +#include -+#include +#include -+#include +#include ++#include +#include + -+extern irqreturn_t smp_reschedule_interrupt(int, void *); -+ -+static DEFINE_PER_CPU(int, spinlock_irq) = -1; -+static char spinlock_name[NR_CPUS][15]; -+ +struct spinning { + raw_spinlock_t *lock; + unsigned int ticket; + struct spinning *prev; +}; +static DEFINE_PER_CPU(struct spinning *, spinning); ++static DEFINE_PER_CPU(evtchn_port_t, poll_evtchn); +/* + * Protect removal of objects: Addition can be done lockless, and even + * removal itself doesn't need protection - what needs to be prevented is -+ * removed objects going out of scope (as they're allocated on the stack. ++ * removed objects going out of scope (as they're allocated on the stack). + */ -+static DEFINE_PER_CPU(raw_rwlock_t, spinning_rm_lock) = __RAW_RW_LOCK_UNLOCKED; ++struct rm_seq { ++ unsigned int idx; ++ atomic_t ctr[2]; ++}; ++static DEFINE_PER_CPU(struct rm_seq, rm_seq); + +int __cpuinit xen_spinlock_init(unsigned int cpu) +{ ++ struct evtchn_bind_ipi bind_ipi; + int rc; + -+ sprintf(spinlock_name[cpu], "spinlock%u", cpu); -+ rc = bind_ipi_to_irqhandler(SPIN_UNLOCK_VECTOR, -+ cpu, -+ smp_reschedule_interrupt, -+ IRQF_DISABLED|IRQF_NOBALANCING, -+ spinlock_name[cpu], -+ NULL); -+ if (rc < 0) -+ return rc; -+ -+ disable_irq(rc); /* make sure it's never delivered */ -+ per_cpu(spinlock_irq, cpu) = rc; ++ WARN_ON(per_cpu(poll_evtchn, cpu)); ++ bind_ipi.vcpu = cpu; ++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi); ++ if (!rc) ++ per_cpu(poll_evtchn, cpu) = bind_ipi.port; ++ else ++ pr_warning("No spinlock poll event channel for CPU#%u (%d)\n", ++ cpu, rc); + -+ return 0; ++ return rc; +} + +void __cpuinit xen_spinlock_cleanup(unsigned int cpu) +{ -+ if (per_cpu(spinlock_irq, cpu) >= 0) -+ unbind_from_irqhandler(per_cpu(spinlock_irq, cpu), NULL); -+ per_cpu(spinlock_irq, cpu) = -1; ++ struct evtchn_close close; ++ ++ close.port = per_cpu(poll_evtchn, cpu); ++ per_cpu(poll_evtchn, cpu) = 0; ++ WARN_ON(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)); ++} ++ ++void __cpuinit spinlock_resume(void) ++{ ++ unsigned int cpu; ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(poll_evtchn, cpu) = 0; ++ xen_spinlock_init(cpu); ++ } +} + +static unsigned int spin_adjust(struct spinning *spinning, @@ -19930,16 +19436,16 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +} + +bool xen_spin_wait(raw_spinlock_t *lock, unsigned int *ptok, -+ unsigned int flags) ++ unsigned int flags) +{ -+ int irq = __get_cpu_var(spinlock_irq); + bool rc; + typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask; -+ raw_rwlock_t *rm_lock; ++ unsigned int rm_idx; + struct spinning spinning, *other; + + /* If kicker interrupt not initialized yet, just spin. */ -+ if (unlikely(irq < 0) || unlikely(!cpu_online(raw_smp_processor_id()))) ++ if (unlikely(!cpu_online(raw_smp_processor_id())) ++ || unlikely(!__get_cpu_var(poll_evtchn))) + return false; + + /* announce we're spinning */ @@ -19953,7 +19459,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + do { + bool nested = false; + -+ xen_clear_irq_pending(irq); ++ clear_evtchn(__get_cpu_var(poll_evtchn)); + + /* + * Check again to make sure it didn't become free while @@ -19966,7 +19472,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + * without rechecking the lock. + */ + if (spinning.prev) -+ xen_set_irq_pending(irq); ++ set_evtchn(__get_cpu_var(poll_evtchn)); + rc = true; + break; + } @@ -20016,12 +19522,14 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + current_vcpu_info()->evtchn_upcall_mask = + nested ? upcall_mask : flags; + -+ xen_poll_irq(irq); ++ if (HYPERVISOR_poll_no_timeout(&__get_cpu_var(poll_evtchn), 1)) ++ BUG(); + + current_vcpu_info()->evtchn_upcall_mask = upcall_mask; + -+ rc = !xen_test_irq_pending(irq); -+ kstat_this_cpu.irqs[irq] += !rc; ++ rc = !test_evtchn(__get_cpu_var(poll_evtchn)); ++ if (!rc) ++ inc_irq_stat(irq_lock_count); + } while (spinning.prev || rc); + + /* @@ -20031,69 +19539,98 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + + /* announce we're done */ + __get_cpu_var(spinning) = other = spinning.prev; -+ rm_lock = &__get_cpu_var(spinning_rm_lock); + raw_local_irq_disable(); -+ __raw_write_lock(rm_lock); -+ __raw_write_unlock(rm_lock); -+ *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); ++ rm_idx = __get_cpu_var(rm_seq.idx); ++ smp_wmb(); ++ __get_cpu_var(rm_seq.idx) = rm_idx + 1; ++ mb(); + + /* + * Obtain new tickets for (or acquire) all those locks where + * above we avoided acquiring them. + */ -+ for (; other; other = other->prev) -+ if (!(other->ticket + 1)) { ++ if (other) { ++ do { + unsigned int token; + bool free; + ++ if (other->ticket + 1) ++ continue; + lock = other->lock; + __raw_spin_lock_preamble; + if (!free) + token = spin_adjust(other->prev, lock, token); + other->ticket = token >> TICKET_SHIFT; -+ } ++ } while ((other = other->prev) != NULL); ++ lock = spinning.lock; ++ } ++ ++ rm_idx &= 1; ++ while (__get_cpu_var(rm_seq.ctr[rm_idx].counter)) ++ cpu_relax(); + raw_local_irq_restore(upcall_mask); ++ *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); + + return rc; +} + +void xen_spin_kick(raw_spinlock_t *lock, unsigned int token) +{ -+ unsigned int cpu; ++ unsigned int cpu = raw_smp_processor_id(), ancor = cpu; ++ ++ if (unlikely(!cpu_online(cpu))) ++ cpu = -1, ancor = nr_cpu_ids; + + token &= (1U << TICKET_SHIFT) - 1; -+ for_each_online_cpu(cpu) { -+ raw_rwlock_t *rm_lock; -+ unsigned long flags; ++ while ((cpu = cpumask_next(cpu, cpu_online_mask)) != ancor) { ++ unsigned int flags; ++ atomic_t *rm_ctr; + struct spinning *spinning; + -+ if (cpu == raw_smp_processor_id()) -+ continue; ++ if (cpu >= nr_cpu_ids) { ++ if (ancor == nr_cpu_ids) ++ return; ++ cpu = cpumask_first(cpu_online_mask); ++ if (cpu == ancor) ++ return; ++ } ++ ++ flags = __raw_local_irq_save(); ++ for (;;) { ++ unsigned int rm_idx = per_cpu(rm_seq.idx, cpu); + -+ rm_lock = &per_cpu(spinning_rm_lock, cpu); -+ raw_local_irq_save(flags); -+ __raw_read_lock(rm_lock); ++ rm_ctr = per_cpu(rm_seq.ctr, cpu) + (rm_idx & 1); ++ atomic_inc(rm_ctr); ++#ifdef CONFIG_X86 /* atomic ops are full barriers */ ++ barrier(); ++#else ++ smp_mb(); ++#endif ++ spinning = per_cpu(spinning, cpu); ++ smp_rmb(); ++ if (rm_idx == per_cpu(rm_seq.idx, cpu)) ++ break; ++ atomic_dec(rm_ctr); ++ } + -+ spinning = per_cpu(spinning, cpu); -+ smp_rmb(); + while (spinning) { + if (spinning->lock == lock && spinning->ticket == token) + break; + spinning = spinning->prev; + } + -+ __raw_read_unlock(rm_lock); ++ atomic_dec(rm_ctr); + raw_local_irq_restore(flags); + + if (unlikely(spinning)) { -+ notify_remote_via_irq(per_cpu(spinlock_irq, cpu)); ++ notify_remote_via_evtchn(per_cpu(poll_evtchn, cpu)); + return; + } + } +} +EXPORT_SYMBOL(xen_spin_kick); ---- head-2010-04-29.orig/drivers/xen/core/xen_sysfs.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/xen_sysfs.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/xen_sysfs.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/xen_sysfs.c 2011-01-31 18:01:51.000000000 +0100 @@ -30,12 +30,12 @@ HYPERVISOR_ATTR_RO(type); static int __init xen_sysfs_type_init(void) @@ -20190,10 +19727,10 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } #endif ---- head-2010-04-29.orig/drivers/xen/gntdev/gntdev.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/gntdev/gntdev.c 2010-03-24 15:10:37.000000000 +0100 -@@ -791,7 +791,7 @@ static pte_t gntdev_clear_pte(struct vm_ - op.status); +--- head-2011-03-11.orig/drivers/xen/gntdev/gntdev.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/gntdev/gntdev.c 2011-01-31 18:01:51.000000000 +0100 +@@ -771,7 +771,7 @@ static pte_t gntdev_clear_pte(struct vm_ + op.status); } else { /* USING SHADOW PAGE TABLES. */ - copy = ptep_get_and_clear_full(vma->vm_mm, addr, ptep, is_fullmm); @@ -20201,7 +19738,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } /* Finally, we unmap the grant from kernel space. */ -@@ -819,7 +819,7 @@ static pte_t gntdev_clear_pte(struct vm_ +@@ -800,7 +800,7 @@ static pte_t gntdev_clear_pte(struct vm_ INVALID_P2M_ENTRY); } else { @@ -20210,8 +19747,19 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } return copy; ---- head-2010-04-29.orig/drivers/xen/scsifront/scsifront.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/scsifront/scsifront.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/pciback/pci_stub.c 2011-03-11 10:55:55.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/pci_stub.c 2011-01-31 18:01:51.000000000 +0100 +@@ -493,7 +493,7 @@ static void pcistub_remove(struct pci_de + } + } + +-static const struct pci_device_id pcistub_ids[] = { ++static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = { + { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, +--- head-2011-03-11.orig/drivers/xen/scsifront/scsifront.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/scsifront/scsifront.c 2011-01-31 18:01:51.000000000 +0100 @@ -260,19 +260,19 @@ static int map_data_for_request(struct v return -ENOMEM; } @@ -20227,7 +19775,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - nr_pages = (sc->request_bufflen + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; + nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; if (nr_pages > VSCSIIF_SG_TABLESIZE) { - printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); + pr_err("scsifront: Unable to map request_buffer for command!\n"); ref_cnt = (-E2BIG); goto big_to_sg; } @@ -20283,8 +19831,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } big_to_sg: ---- head-2010-04-29.orig/drivers/xen/usbfront/usbfront-dbg.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/drivers/xen/usbfront/usbfront-dbg.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/usbfront/usbfront-dbg.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/usbfront/usbfront-dbg.c 2011-01-31 18:01:51.000000000 +0100 @@ -43,17 +43,16 @@ * DEALINGS IN THE SOFTWARE. */ @@ -20319,8 +19867,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - if (class_device_create_file(cldev, &class_device_attr_statistics)) + struct device *dev = info_to_hcd(info)->self.controller; + if (device_create_file(dev, &dev_attr_statistics)) - printk(KERN_WARNING "statistics file not created for %s\n", - info_to_hcd(info)->self.bus_name); + pr_warning("statistics file not created for %s\n", + info_to_hcd(info)->self.bus_name); } static inline void remove_debug_file(struct usbfront_info *info) @@ -20330,9 +19878,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + struct device *dev = info_to_hcd(info)->self.controller; + device_remove_file(dev, &dev_attr_statistics); } ---- head-2010-04-29.orig/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:10:37.000000000 +0100 -@@ -78,7 +78,7 @@ static int xenoprof_resume(struct sys_de +--- head-2011-03-11.orig/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 18:01:51.000000000 +0100 +@@ -77,7 +77,7 @@ static int xenoprof_resume(struct sys_de static struct sysdev_class oprofile_sysclass = { @@ -20341,9 +19889,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches .resume = xenoprof_resume, .suspend = xenoprof_suspend }; ---- head-2010-04-29.orig/arch/x86/include/asm/e820.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/e820.h 2010-03-24 15:10:37.000000000 +0100 -@@ -61,7 +61,11 @@ struct e820map { +--- head-2011-03-11.orig/arch/x86/include/asm/e820.h 2011-03-15 16:52:08.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/e820.h 2011-01-31 18:01:51.000000000 +0100 +@@ -66,7 +66,11 @@ struct e820map { struct e820entry map[E820_X_MAX]; }; @@ -20355,8 +19903,22 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define ISA_END_ADDRESS 0x100000 #define BIOS_BEGIN 0x000a0000 ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/asm/hardirq.h 2011-03-15 16:52:08.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/hardirq.h 2011-01-31 18:01:51.000000000 +0100 +@@ -18,7 +18,11 @@ typedef struct { + #ifdef CONFIG_SMP + unsigned int irq_resched_count; + unsigned int irq_call_count; ++#ifndef CONFIG_XEN + unsigned int irq_tlb_count; ++#else ++ unsigned int irq_lock_count; ++#endif + #endif + #ifdef CONFIG_X86_THERMAL_VECTOR + unsigned int irq_thermal_count; +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/agp.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/agp.h 2011-01-31 18:01:51.000000000 +0100 @@ -13,18 +13,13 @@ * page. This avoids data corruption on some CPUs. */ @@ -20378,8 +19940,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * Could use CLFLUSH here if the cpu supports it. But then it would ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/desc.h 2011-01-31 18:01:51.000000000 +0100 @@ -1,5 +1,404 @@ +#ifndef _ASM_DESC_H_ +#define _ASM_DESC_H_ @@ -20787,7 +20349,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif /* __ASSEMBLY__ */ + #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/desc_32.h 2010-03-24 15:09:15.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc_32.h 2011-01-31 17:32:29.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,262 +0,0 @@ -#ifndef __ARCH_DESC_H @@ -21052,7 +20614,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#endif /* !__ASSEMBLY__ */ - -#endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/desc_64.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc_64.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -/* Written 2000 by Andi Kleen */ @@ -21283,8 +20845,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#endif /* !__ASSEMBLY__ */ - -#endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 18:01:51.000000000 +0100 @@ -64,7 +64,7 @@ enum fixed_addresses { #endif #ifdef CONFIG_X86_VISWS_APIC @@ -21331,8 +20893,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches __end_of_fixed_addresses }; ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 18:01:51.000000000 +0100 @@ -15,6 +15,7 @@ #include #include @@ -21379,8 +20941,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches __end_of_fixed_addresses }; ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:04:33.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 18:01:51.000000000 +0100 @@ -37,11 +37,6 @@ extern pte_t *pkmap_page_table; * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. @@ -21409,8 +20971,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void *kmap(struct page *page); void kunmap(struct page *page); ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 18:01:51.000000000 +0100 @@ -271,6 +271,25 @@ HYPERVISOR_poll( return rc; } @@ -21444,8 +21006,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#define uvm_multi(cpumask) ((unsigned long)cpus_addr(cpumask) | UVMF_MULTI) + #endif /* __HYPERVISOR_H__ */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irqflags.h 2011-01-31 18:01:51.000000000 +0100 @@ -1,5 +1,249 @@ -#ifdef CONFIG_X86_32 -# include "irqflags_32.h" @@ -21699,7 +21261,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + +#endif /* __ASSEMBLY__ */ #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags_32.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,214 +0,0 @@ -/* @@ -21916,7 +21478,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#endif - -#endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/irqflags_64.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags_64.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,178 +0,0 @@ -/* @@ -22097,17 +21659,298 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#endif - -#endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/maddr_32.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/maddr_32.h 2010-03-24 15:10:37.000000000 +0100 -@@ -1,6 +1,7 @@ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/maddr.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/maddr.h 2011-01-31 18:01:51.000000000 +0100 +@@ -1,5 +1,155 @@ ++#ifndef _X86_MADDR_H ++#define _X86_MADDR_H ++ ++#include ++#include ++#include ++#include ++ ++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ ++#define INVALID_P2M_ENTRY (~0UL) ++#define FOREIGN_FRAME_BIT (1UL << (BITS_PER_LONG - 1)) ++#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) ++ ++/* Definitions for machine and pseudophysical addresses. */ ++#ifdef CONFIG_X86_PAE ++typedef unsigned long long paddr_t; ++typedef unsigned long long maddr_t; ++#else ++typedef unsigned long paddr_t; ++typedef unsigned long maddr_t; ++#endif ++ ++#ifdef CONFIG_XEN ++ ++extern unsigned long *phys_to_machine_mapping; ++extern unsigned long max_mapnr; ++ ++#undef machine_to_phys_mapping ++extern unsigned long *machine_to_phys_mapping; ++extern unsigned int machine_to_phys_order; ++ ++static inline unsigned long pfn_to_mfn(unsigned long pfn) ++{ ++ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) ++ return pfn; ++ if (likely(max_mapnr)) ++ BUG_ON(pfn >= max_mapnr); ++ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; ++} ++ ++static inline int phys_to_machine_mapping_valid(unsigned long pfn) ++{ ++ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) ++ return 1; ++ if (likely(max_mapnr)) ++ BUG_ON(pfn >= max_mapnr); ++ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); ++} ++ ++static inline unsigned long mfn_to_pfn(unsigned long mfn) ++{ ++ unsigned long pfn; ++ ++ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) ++ return mfn; ++ ++ if (unlikely((mfn >> machine_to_phys_order) != 0)) ++ return max_mapnr; ++ ++ /* The array access can fail (e.g., device space beyond end of RAM). */ ++ asm ( ++ "1: "_ASM_MOV_UL" %1,%0\n" ++ "2:\n" ++ ".section .fixup,\"ax\"\n" ++ "3: "_ASM_MOV_UL" %2,%0\n" ++ " jmp 2b\n" ++ ".previous\n" ++ _ASM_EXTABLE(1b,3b) ++ : "=r" (pfn) ++ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); ++ ++ return pfn; ++} ++ ++/* ++ * We detect special mappings in one of two ways: ++ * 1. If the MFN is an I/O page then Xen will set the m2p entry ++ * to be outside our maximum possible pseudophys range. ++ * 2. If the MFN belongs to a different domain then we will certainly ++ * not have MFN in our p2m table. Conversely, if the page is ours, ++ * then we'll have p2m(m2p(MFN))==MFN. ++ * If we detect a special mapping then it doesn't have a 'struct page'. ++ * We force !pfn_valid() by returning an out-of-range pointer. ++ * ++ * NB. These checks require that, for any MFN that is not in our reservation, ++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if ++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. ++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. ++ * ++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must* ++ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we ++ * require. In all the cases we care about, the FOREIGN_FRAME bit is ++ * masked (e.g., pfn_to_mfn()) so behaviour there is correct. ++ */ ++static inline unsigned long mfn_to_local_pfn(phys_addr_t mfn) ++{ ++ unsigned long pfn = mfn_to_pfn(mfn); ++ if (likely(pfn < max_mapnr) ++ && likely(!xen_feature(XENFEAT_auto_translated_physmap)) ++ && unlikely(phys_to_machine_mapping[pfn] != mfn)) ++ return max_mapnr; /* force !pfn_valid() */ ++ return pfn; ++} ++ ++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) ++{ ++ if (likely(max_mapnr)) ++ BUG_ON(pfn >= max_mapnr); ++ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { ++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); ++ return; ++ } ++ phys_to_machine_mapping[pfn] = mfn; ++} ++ ++static inline maddr_t phys_to_machine(paddr_t phys) ++{ ++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); ++ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); ++ return machine; ++} ++ ++static inline paddr_t machine_to_phys(maddr_t machine) ++{ ++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); ++ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); ++ return phys; ++} ++ + #ifdef CONFIG_X86_32 + # include "maddr_32.h" + #else + # include "maddr_64.h" + #endif ++ ++#else /* !CONFIG_XEN */ ++ ++#define pfn_to_mfn(pfn) (pfn) ++#define mfn_to_pfn(mfn) (mfn) ++#define mfn_to_local_pfn(mfn) (mfn) ++#define set_phys_to_machine(pfn, mfn) ((void)0) ++#define phys_to_machine_mapping_valid(pfn) 1 ++#define phys_to_machine(phys) ((maddr_t)(phys)) ++#define machine_to_phys(mach) ((paddr_t)(mach)) ++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) ++#define __pte_ma(x) __pte(x) ++ ++#endif /* !CONFIG_XEN */ ++ ++/* VIRT <-> MACHINE conversion */ ++#define virt_to_machine(v) phys_to_machine(__pa(v)) ++#define virt_to_mfn(v) pfn_to_mfn(__pa(v) >> PAGE_SHIFT) ++#define mfn_to_virt(m) __va(mfn_to_pfn(m) << PAGE_SHIFT) ++ ++#endif /* _X86_MADDR_H */ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/maddr_32.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/maddr_32.h 2011-01-31 18:01:51.000000000 +0100 +@@ -1,130 +1,6 @@ #ifndef _I386_MADDR_H #define _I386_MADDR_H -+#include - #include - #include - -@@ -151,25 +152,9 @@ static inline paddr_t pte_machine_to_phy +-#include +-#include +- +-/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ +-#define INVALID_P2M_ENTRY (~0UL) +-#define FOREIGN_FRAME_BIT (1UL<<31) +-#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) +- +-/* Definitions for machine and pseudophysical addresses. */ +-#ifdef CONFIG_X86_PAE +-typedef unsigned long long paddr_t; +-typedef unsigned long long maddr_t; +-#else +-typedef unsigned long paddr_t; +-typedef unsigned long maddr_t; +-#endif +- +-#ifdef CONFIG_XEN +- +-extern unsigned long *phys_to_machine_mapping; +-extern unsigned long max_mapnr; +- +-#undef machine_to_phys_mapping +-extern unsigned long *machine_to_phys_mapping; +-extern unsigned int machine_to_phys_order; +- +-static inline unsigned long pfn_to_mfn(unsigned long pfn) +-{ +- if (xen_feature(XENFEAT_auto_translated_physmap)) +- return pfn; +- BUG_ON(max_mapnr && pfn >= max_mapnr); +- return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; +-} +- +-static inline int phys_to_machine_mapping_valid(unsigned long pfn) +-{ +- if (xen_feature(XENFEAT_auto_translated_physmap)) +- return 1; +- BUG_ON(max_mapnr && pfn >= max_mapnr); +- return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); +-} +- +-static inline unsigned long mfn_to_pfn(unsigned long mfn) +-{ +- unsigned long pfn; +- +- if (xen_feature(XENFEAT_auto_translated_physmap)) +- return mfn; +- +- if (unlikely((mfn >> machine_to_phys_order) != 0)) +- return max_mapnr; +- +- /* The array access can fail (e.g., device space beyond end of RAM). */ +- asm ( +- "1: movl %1,%0\n" +- "2:\n" +- ".section .fixup,\"ax\"\n" +- "3: movl %2,%0\n" +- " jmp 2b\n" +- ".previous\n" +- ".section __ex_table,\"a\"\n" +- " .align 4\n" +- " .long 1b,3b\n" +- ".previous" +- : "=r" (pfn) +- : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); +- +- return pfn; +-} +- +-/* +- * We detect special mappings in one of two ways: +- * 1. If the MFN is an I/O page then Xen will set the m2p entry +- * to be outside our maximum possible pseudophys range. +- * 2. If the MFN belongs to a different domain then we will certainly +- * not have MFN in our p2m table. Conversely, if the page is ours, +- * then we'll have p2m(m2p(MFN))==MFN. +- * If we detect a special mapping then it doesn't have a 'struct page'. +- * We force !pfn_valid() by returning an out-of-range pointer. +- * +- * NB. These checks require that, for any MFN that is not in our reservation, +- * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if +- * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. +- * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. +- * +- * NB2. When deliberately mapping foreign pages into the p2m table, you *must* +- * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we +- * require. In all the cases we care about, the FOREIGN_FRAME bit is +- * masked (e.g., pfn_to_mfn()) so behaviour there is correct. +- */ +-static inline unsigned long mfn_to_local_pfn(unsigned long mfn) +-{ +- unsigned long pfn = mfn_to_pfn(mfn); +- if ((pfn < max_mapnr) +- && !xen_feature(XENFEAT_auto_translated_physmap) +- && (phys_to_machine_mapping[pfn] != mfn)) +- return max_mapnr; /* force !pfn_valid() */ +- return pfn; +-} +- +-static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) +-{ +- BUG_ON(max_mapnr && pfn >= max_mapnr); +- if (xen_feature(XENFEAT_auto_translated_physmap)) { +- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); +- return; +- } +- phys_to_machine_mapping[pfn] = mfn; +-} +- +-static inline maddr_t phys_to_machine(paddr_t phys) +-{ +- maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); +- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); +- return machine; +-} +- +-static inline paddr_t machine_to_phys(maddr_t machine) +-{ +- paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); +- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); +- return phys; +-} +- + #ifdef CONFIG_X86_PAE + static inline paddr_t pte_phys_to_machine(paddr_t phys) + { +@@ -151,44 +27,9 @@ static inline paddr_t pte_machine_to_phy phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } @@ -22134,93 +21977,183 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#define pte_machine_to_phys machine_to_phys #endif - #else /* !CONFIG_XEN */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/maddr_64.h 2007-06-12 13:14:13.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/maddr_64.h 2010-03-24 15:10:37.000000000 +0100 -@@ -1,6 +1,7 @@ +-#else /* !CONFIG_XEN */ +- +-#define pfn_to_mfn(pfn) (pfn) +-#define mfn_to_pfn(mfn) (mfn) +-#define mfn_to_local_pfn(mfn) (mfn) +-#define set_phys_to_machine(pfn, mfn) ((void)0) +-#define phys_to_machine_mapping_valid(pfn) (1) +-#define phys_to_machine(phys) ((maddr_t)(phys)) +-#define machine_to_phys(mach) ((paddr_t)(mach)) +-#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) +-#define __pte_ma(x) __pte(x) +- +-#endif /* !CONFIG_XEN */ +- +-/* VIRT <-> MACHINE conversion */ +-#define virt_to_machine(v) (phys_to_machine(__pa(v))) +-#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) +-#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) +- + #endif /* _I386_MADDR_H */ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/maddr_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/maddr_64.h 2011-01-31 18:01:51.000000000 +0100 +@@ -1,124 +1,6 @@ #ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H -+#include - #include - #include - -@@ -16,6 +17,7 @@ typedef unsigned long maddr_t; - #ifdef CONFIG_XEN - - extern unsigned long *phys_to_machine_mapping; -+extern unsigned long max_mapnr; - - #undef machine_to_phys_mapping - extern unsigned long *machine_to_phys_mapping; -@@ -25,7 +27,7 @@ static inline unsigned long pfn_to_mfn(u - { - if (xen_feature(XENFEAT_auto_translated_physmap)) - return pfn; +-#include +-#include +- +-/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ +-#define INVALID_P2M_ENTRY (~0UL) +-#define FOREIGN_FRAME_BIT (1UL<<63) +-#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) +- +-/* Definitions for machine and pseudophysical addresses. */ +-typedef unsigned long paddr_t; +-typedef unsigned long maddr_t; +- +-#ifdef CONFIG_XEN +- +-extern unsigned long *phys_to_machine_mapping; +- +-#undef machine_to_phys_mapping +-extern unsigned long *machine_to_phys_mapping; +-extern unsigned int machine_to_phys_order; +- +-static inline unsigned long pfn_to_mfn(unsigned long pfn) +-{ +- if (xen_feature(XENFEAT_auto_translated_physmap)) +- return pfn; - BUG_ON(end_pfn && pfn >= end_pfn); -+ BUG_ON(max_mapnr && pfn >= max_mapnr); - return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; - } - -@@ -33,7 +35,7 @@ static inline int phys_to_machine_mappin - { - if (xen_feature(XENFEAT_auto_translated_physmap)) - return 1; +- return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; +-} +- +-static inline int phys_to_machine_mapping_valid(unsigned long pfn) +-{ +- if (xen_feature(XENFEAT_auto_translated_physmap)) +- return 1; - BUG_ON(end_pfn && pfn >= end_pfn); -+ BUG_ON(max_mapnr && pfn >= max_mapnr); - return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); - } - -@@ -45,7 +47,7 @@ static inline unsigned long mfn_to_pfn(u - return mfn; - - if (unlikely((mfn >> machine_to_phys_order) != 0)) +- return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); +-} +- +-static inline unsigned long mfn_to_pfn(unsigned long mfn) +-{ +- unsigned long pfn; +- +- if (xen_feature(XENFEAT_auto_translated_physmap)) +- return mfn; +- +- if (unlikely((mfn >> machine_to_phys_order) != 0)) - return end_pfn; -+ return max_mapnr; - - /* The array access can fail (e.g., device space beyond end of RAM). */ - asm ( -@@ -60,7 +62,7 @@ static inline unsigned long mfn_to_pfn(u - " .quad 1b,3b\n" - ".previous" - : "=r" (pfn) +- +- /* The array access can fail (e.g., device space beyond end of RAM). */ +- asm ( +- "1: movq %1,%0\n" +- "2:\n" +- ".section .fixup,\"ax\"\n" +- "3: movq %2,%0\n" +- " jmp 2b\n" +- ".previous\n" +- ".section __ex_table,\"a\"\n" +- " .align 8\n" +- " .quad 1b,3b\n" +- ".previous" +- : "=r" (pfn) - : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) ); -+ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); - - return pfn; - } -@@ -88,16 +90,16 @@ static inline unsigned long mfn_to_pfn(u - static inline unsigned long mfn_to_local_pfn(unsigned long mfn) - { - unsigned long pfn = mfn_to_pfn(mfn); +- +- return pfn; +-} +- +-/* +- * We detect special mappings in one of two ways: +- * 1. If the MFN is an I/O page then Xen will set the m2p entry +- * to be outside our maximum possible pseudophys range. +- * 2. If the MFN belongs to a different domain then we will certainly +- * not have MFN in our p2m table. Conversely, if the page is ours, +- * then we'll have p2m(m2p(MFN))==MFN. +- * If we detect a special mapping then it doesn't have a 'struct page'. +- * We force !pfn_valid() by returning an out-of-range pointer. +- * +- * NB. These checks require that, for any MFN that is not in our reservation, +- * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if +- * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. +- * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. +- * +- * NB2. When deliberately mapping foreign pages into the p2m table, you *must* +- * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we +- * require. In all the cases we care about, the FOREIGN_FRAME bit is +- * masked (e.g., pfn_to_mfn()) so behaviour there is correct. +- */ +-static inline unsigned long mfn_to_local_pfn(unsigned long mfn) +-{ +- unsigned long pfn = mfn_to_pfn(mfn); - if ((pfn < end_pfn) -+ if ((pfn < max_mapnr) - && !xen_feature(XENFEAT_auto_translated_physmap) - && (phys_to_machine_mapping[pfn] != mfn)) +- && !xen_feature(XENFEAT_auto_translated_physmap) +- && (phys_to_machine_mapping[pfn] != mfn)) - return end_pfn; /* force !pfn_valid() */ -+ return max_mapnr; /* force !pfn_valid() */ - return pfn; - } - - static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) - { +- return pfn; +-} +- +-static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) +-{ - BUG_ON(end_pfn && pfn >= end_pfn); -+ BUG_ON(max_mapnr && pfn >= max_mapnr); - if (xen_feature(XENFEAT_auto_translated_physmap)) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return; -@@ -135,9 +137,6 @@ static inline paddr_t pte_machine_to_phy +- if (xen_feature(XENFEAT_auto_translated_physmap)) { +- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); +- return; +- } +- phys_to_machine_mapping[pfn] = mfn; +-} +- +-static inline maddr_t phys_to_machine(paddr_t phys) +-{ +- maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); +- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); +- return machine; +-} +- +-static inline paddr_t machine_to_phys(maddr_t machine) +-{ +- paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); +- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); +- return phys; +-} +- + static inline paddr_t pte_phys_to_machine(paddr_t phys) + { + maddr_t machine; +@@ -135,27 +17,5 @@ static inline paddr_t pte_machine_to_phy return phys; } -#define __pte_ma(x) ((pte_t) { (x) } ) -#define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask) - - #else /* !CONFIG_XEN */ +-#else /* !CONFIG_XEN */ +- +-#define pfn_to_mfn(pfn) (pfn) +-#define mfn_to_pfn(mfn) (mfn) +-#define mfn_to_local_pfn(mfn) (mfn) +-#define set_phys_to_machine(pfn, mfn) ((void)0) +-#define phys_to_machine_mapping_valid(pfn) (1) +-#define phys_to_machine(phys) ((maddr_t)(phys)) +-#define machine_to_phys(mach) ((paddr_t)(mach)) +-#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) +-#define __pte_ma(x) __pte(x) +- +-#endif /* !CONFIG_XEN */ +- +-/* VIRT <-> MACHINE conversion */ +-#define virt_to_machine(v) (phys_to_machine(__pa(v))) +-#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) +-#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) +- + #endif /* _X86_64_MADDR_H */ - #define pfn_to_mfn(pfn) (pfn) ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 18:01:51.000000000 +0100 @@ -51,8 +51,6 @@ static inline void __prepare_arch_switch : : "r" (0) ); } @@ -22230,8 +22163,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 18:01:51.000000000 +0100 @@ -62,12 +62,6 @@ extern void mm_pin(struct mm_struct *mm) extern void mm_unpin(struct mm_struct *mm); void mm_pin_all(void); @@ -22272,8 +22205,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } } #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pci.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pci.h 2011-01-31 18:01:51.000000000 +0100 @@ -71,6 +71,7 @@ extern int pci_mmap_page_range(struct pc @@ -22314,8 +22247,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 18:01:51.000000000 +0100 @@ -3,69 +3,109 @@ #include @@ -22464,8 +22397,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif /* CONFIG_X86_PAE */ #endif /* _I386_PGALLOC_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 18:01:51.000000000 +0100 @@ -6,30 +6,13 @@ #include #include /* for phys_to_virt and page_to_pseudophys */ @@ -22501,7 +22434,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { -@@ -63,53 +46,58 @@ static inline void pgd_populate(struct m +@@ -63,56 +46,61 @@ static inline void pgd_populate(struct m } } @@ -22554,11 +22487,14 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + __pmd_free(virt_to_page(pud)); } - static inline void pgd_list_add(pgd_t *pgd) + static inline void pgd_list_add(pgd_t *pgd, struct mm_struct *mm) { struct page *page = virt_to_page(pgd); + unsigned long flags; + /* Store a back link for vmalloc_sync_all(). */ + set_page_private(page, (unsigned long)mm); + - spin_lock(&pgd_lock); + spin_lock_irqsave(&pgd_lock, flags); list_add(&page->lru, &pgd_list); @@ -22579,16 +22515,16 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches } extern void pgd_test_and_unpin(pgd_t *); -@@ -145,7 +133,7 @@ static inline pgd_t *pgd_alloc(struct mm +@@ -148,7 +136,7 @@ static inline pgd_t *pgd_alloc(struct mm return pgd; } -static inline void pgd_free(pgd_t *pgd) +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pgd_test_and_unpin(pgd); pgd_list_del(pgd); -@@ -161,17 +149,30 @@ static inline pte_t *pte_alloc_one_kerne + pgd_test_and_unpin(pgd); +@@ -164,17 +152,30 @@ static inline pte_t *pte_alloc_one_kerne return pte; } @@ -22621,9 +22557,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:10:37.000000000 +0100 -@@ -1,5 +1,467 @@ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable.h 2011-01-31 18:01:51.000000000 +0100 +@@ -1,5 +1,464 @@ +#ifndef _ASM_X86_PGTABLE_H +#define _ASM_X86_PGTABLE_H + @@ -22811,9 +22747,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + pgprot_val(pgprot)) & __supported_pte_mask); +} + -+static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) ++static inline pte_t pfn_pte_ma(phys_addr_t page_nr, pgprot_t pgprot) +{ -+ return __pte_ma((((phys_addr_t)page_nr << PAGE_SHIFT) | ++ return __pte_ma(((page_nr << PAGE_SHIFT) | + pgprot_val(pgprot)) & __supported_pte_mask); +} + @@ -23068,7 +23004,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + +int direct_remap_pfn_range(struct vm_area_struct *vma, + unsigned long address, -+ unsigned long mfn, ++ phys_addr_t mfn, + unsigned long size, + pgprot_t prot, + domid_t domid); @@ -23080,9 +23016,6 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +int create_lookup_pte_addr(struct mm_struct *mm, + unsigned long address, + uint64_t *ptep); -+int touch_pte_range(struct mm_struct *mm, -+ unsigned long address, -+ unsigned long size); + +int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, pgprot_t newprot, @@ -23091,8 +23024,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_X86_PGTABLE_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 18:01:51.000000000 +0100 @@ -18,16 +18,18 @@ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT) @@ -23257,8 +23190,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) #endif /* _I386_PGTABLE_3LEVEL_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:52.000000000 +0100 @@ -1,8 +1,6 @@ #ifndef _I386_PGTABLE_H #define _I386_PGTABLE_H @@ -23642,7 +23575,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) -@@ -496,72 +210,22 @@ extern pte_t *lookup_address(unsigned lo +@@ -496,69 +210,22 @@ extern pte_t *lookup_address(unsigned lo */ #define update_mmu_cache(vma,address,pte) do { } while (0) @@ -23702,9 +23635,6 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -int create_lookup_pte_addr(struct mm_struct *mm, - unsigned long address, - uint64_t *ptep); --int touch_pte_range(struct mm_struct *mm, -- unsigned long address, -- unsigned long size); - -int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, pgprot_t newprot, @@ -23722,8 +23652,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#include - #endif /* _I386_PGTABLE_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:47.000000000 +0100 @@ -13,49 +13,26 @@ #include #include @@ -24149,7 +24079,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ -@@ -456,101 +260,21 @@ static inline pte_t pte_modify(pte_t pte +@@ -456,97 +260,21 @@ static inline pte_t pte_modify(pte_t pte #define update_mmu_cache(vma,address,pte) do { } while (0) @@ -24232,10 +24162,6 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - unsigned long address, - uint64_t *ptep); - --int touch_pte_range(struct mm_struct *mm, -- unsigned long address, -- unsigned long size); -- -int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable); @@ -24254,7 +24180,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define pgtable_cache_init() do { } while (0) #define check_pgt_cache() do { } while (0) -@@ -563,13 +287,7 @@ pte_t *lookup_address(unsigned long addr +@@ -559,13 +287,7 @@ pte_t *lookup_address(unsigned long addr #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) @@ -24268,9 +24194,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #endif /* !__ASSEMBLY__ */ #endif /* _X86_64_PGTABLE_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:10:37.000000000 +0100 -@@ -1,5 +1,789 @@ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/processor.h 2011-01-31 18:01:51.000000000 +0100 +@@ -1,5 +1,795 @@ +#ifndef __ASM_X86_PROCESSOR_H +#define __ASM_X86_PROCESSOR_H + @@ -24347,8 +24273,10 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + /* number of 4K pages in DTLB/ITLB combined(in pages)*/ + int x86_tlbsize; + __u8 x86_virt_bits, x86_phys_bits; ++#ifndef CONFIG_XEN + /* cpuid returned core id bits */ + __u8 x86_coreid_bits; ++#endif + /* Max extended CPUID function supported */ + __u32 extended_cpuid_level; +#endif @@ -24361,16 +24289,20 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + int x86_cache_alignment; /* In bytes */ + int x86_power; + unsigned long loops_per_jiffy; ++#ifndef CONFIG_XEN +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif + u16 x86_max_cores; /* cpuid returned max cores value */ + u16 apicid; ++#endif + u16 x86_clflush_size; -+#ifdef CONFIG_SMP ++#ifdef CONFIG_X86_HT + u16 booted_cores; /* number of cores as seen by OS */ + u16 phys_proc_id; /* Physical processor id. */ + u16 cpu_core_id; /* Core id */ ++#endif ++#ifdef CONFIG_SMP + u16 cpu_index; /* index into per_cpu list */ +#endif +} __attribute__((__aligned__(SMP_CACHE_BYTES))); @@ -24676,7 +24608,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif + +#define __cpuid xen_cpuid -+#define paravirt_enabled() 0 ++#define paravirt_enabled() 1 + +/* + * These special macros can be used to get or set a debugging register @@ -25062,9 +24994,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#define KSTK_ESP(task) (task_pt_regs(task)->sp) + #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,751 +0,0 @@ +@@ -1,755 +0,0 @@ -/* - * include/asm-i386/processor.h - * @@ -25137,16 +25069,20 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - char pad0; - int x86_power; - unsigned long loops_per_jiffy; +-#ifndef CONFIG_XEN -#ifdef CONFIG_SMP - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ -#endif - unsigned char x86_max_cores; /* cpuid returned max cores value */ - unsigned char apicid; +-#endif - unsigned short x86_clflush_size; --#ifdef CONFIG_SMP +-#ifdef CONFIG_X86_HT - unsigned char booted_cores; /* number of cores as seen by OS */ - __u8 phys_proc_id; /* Physical processor id. */ - __u8 cpu_core_id; /* Core id */ +-#endif +-#ifdef CONFIG_SMP - __u8 cpu_index; /* index into per_cpu list */ -#endif -} __attribute__((__aligned__(SMP_CACHE_BYTES))); @@ -25613,7 +25549,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -} - - --#define paravirt_enabled() 0 +-#define paravirt_enabled() 1 -#define __cpuid xen_cpuid - -#define load_esp0 xen_load_esp0 @@ -25816,9 +25752,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -extern int force_mwait; - -#endif /* __ASM_I386_PROCESSOR_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor_64.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,461 +0,0 @@ +@@ -1,467 +0,0 @@ -/* - * include/asm-x86_64/processor.h - * @@ -25883,18 +25819,24 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - int x86_cache_alignment; - int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ - __u8 x86_virt_bits, x86_phys_bits; +-#ifndef CONFIG_XEN - __u8 x86_max_cores; /* cpuid returned max cores value */ +-#endif - __u32 x86_power; - __u32 extended_cpuid_level; /* Max extended CPUID function supported */ - unsigned long loops_per_jiffy; --#ifdef CONFIG_SMP +-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ -#endif +-#ifndef CONFIG_XEN - __u8 apicid; --#ifdef CONFIG_SMP +-#endif +-#ifdef CONFIG_X86_HT - __u8 booted_cores; /* number of cores as seen by OS */ - __u8 phys_proc_id; /* Physical Processor id. */ - __u8 cpu_core_id; /* Core id. */ +-#endif +-#ifdef CONFIG_SMP - __u8 cpu_index; /* index into per_cpu list */ -#endif -} ____cacheline_aligned; @@ -26280,9 +26222,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 - -#endif /* __ASM_X86_64_PROCESSOR_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:10:37.000000000 +0100 -@@ -1,56 +1,51 @@ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 18:01:51.000000000 +0100 +@@ -1,55 +1,50 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H @@ -26319,7 +26261,6 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -#define BAD_APICID 0xFFu -#ifdef CONFIG_SMP -#ifndef __ASSEMBLY__ -+extern int smp_num_siblings; +extern unsigned int num_processors; -/* @@ -26328,9 +26269,6 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - extern void smp_alloc_memory(void); -extern int pic_mode; --extern int smp_num_siblings; --DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); --DECLARE_PER_CPU(cpumask_t, cpu_core_map); +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); @@ -26339,15 +26277,15 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); + #ifndef CONFIG_XEN -#define MAX_APICID 256 -extern u8 __initdata x86_cpu_to_apicid_init[]; -extern void *x86_cpu_to_apicid_ptr; -+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -+DECLARE_PER_CPU(cpumask_t, cpu_core_map); +DECLARE_PER_CPU(u8, cpu_llc_id); DECLARE_PER_CPU(u8, x86_cpu_to_apicid); + #endif --#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +-#define cpu_physical_id(cpu) (cpu) - #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); @@ -26364,7 +26302,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches struct smp_ops { void (*smp_prepare_boot_cpu)(void); -@@ -104,11 +99,7 @@ void native_smp_prepare_cpus(unsigned in +@@ -103,11 +98,7 @@ void native_smp_prepare_cpus(unsigned in int native_cpu_up(unsigned int cpunum); void native_smp_cpus_done(unsigned int max_cpus); @@ -26377,7 +26315,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void xen_smp_send_stop(void); void xen_smp_send_reschedule(int cpu); -@@ -120,7 +111,12 @@ int xen_smp_call_function_mask(cpumask_t +@@ -119,7 +110,12 @@ int xen_smp_call_function_mask(cpumask_t #define smp_send_reschedule xen_smp_send_reschedule #define smp_call_function_mask xen_smp_call_function_mask @@ -26391,13 +26329,13 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * This function is needed by all SMP systems. It must _always_ be valid -@@ -130,64 +126,49 @@ int xen_smp_call_function_mask(cpumask_t +@@ -129,64 +125,49 @@ int xen_smp_call_function_mask(cpumask_t DECLARE_PER_CPU(int, cpu_number); #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) -extern cpumask_t cpu_possible_map; -#define cpu_callin_map cpu_possible_map -+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) ++#define cpu_physical_id(cpu) (cpu) + +#define safe_smp_processor_id() smp_processor_id() @@ -26419,7 +26357,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #else /* CONFIG_SMP */ #define safe_smp_processor_id() 0 - #define cpu_physical_id(cpu) boot_cpu_physical_apicid + #define cpu_physical_id(cpu) 0 -#define NO_PROC_ID 0xFF /* No processor magic marker */ - @@ -26476,9 +26414,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - +#endif /* !ASSEMBLY */ #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:10:37.000000000 +0100 -@@ -1,139 +1,103 @@ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 18:01:51.000000000 +0100 +@@ -1,130 +1,101 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H @@ -26520,41 +26458,32 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - * Private routines/data - */ - -+extern int smp_num_siblings; +extern unsigned int num_processors; + extern void smp_alloc_memory(void); -extern volatile unsigned long smp_invalidate_needed; extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); --extern int smp_num_siblings; -extern void smp_send_reschedule(int cpu); + extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); --/* -- * cpu_sibling_map and cpu_core_map now live -- * in the per cpu area -- * -- * extern cpumask_t cpu_sibling_map[NR_CPUS]; -- * extern cpumask_t cpu_core_map[NR_CPUS]; -- */ - DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); - DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u8, cpu_llc_id); - -#define SMP_TRAMPOLINE_BASE 0x6000 -+DECLARE_PER_CPU(u16, cpu_llc_id); -+DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -+DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); - +- -/* - * On x86 all CPUs are mapped 1:1 to the APIC space. - * This simplifies scheduling and IPI sending and - * compresses data structures. - */ -- ++#ifndef CONFIG_XEN ++DECLARE_PER_CPU(u16, cpu_llc_id); ++DECLARE_PER_CPU(u16, x86_cpu_to_apicid); ++DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); ++#endif + -static inline int num_booting_cpus(void) +#ifdef CONFIG_X86_LOCAL_APIC +static inline int cpu_present_to_apicid(int mps_cpu) @@ -26582,10 +26511,11 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches - -#endif /* CONFIG_SMP */ +#define raw_smp_processor_id() read_pda(cpunumber) -+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) ++#define cpu_physical_id(cpu) (cpu) -#define safe_smp_processor_id() smp_processor_id() - +-#ifndef CONFIG_XEN -#ifdef CONFIG_X86_LOCAL_APIC -static inline int hard_smp_processor_id(void) -{ @@ -26622,14 +26552,14 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + return cpus_weight(cpu_possible_map); } -#endif +-#endif /* CONFIG_XEN */ -#ifndef CONFIG_SMP +extern void smp_send_reschedule(int cpu); + +#else /* CONFIG_SMP */ + -+extern unsigned int boot_cpu_id; -+#define cpu_physical_id(cpu) boot_cpu_id ++#define cpu_physical_id(cpu) 0 #define stack_smp_processor_id() 0 -#define cpu_logical_map(x) (x) -#else @@ -26662,15 +26592,14 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #endif -#ifdef CONFIG_SMP --#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +-#define cpu_physical_id(cpu) (cpu) -#else --extern unsigned int boot_cpu_id; --#define cpu_physical_id(cpu) boot_cpu_id +-#define cpu_physical_id(cpu) 0 -#endif /* !CONFIG_SMP */ #endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:10:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock.h 2011-01-31 18:01:51.000000000 +0100 @@ -0,0 +1,341 @@ +#ifndef _X86_SPINLOCK_H_ +#define _X86_SPINLOCK_H_ @@ -27014,7 +26943,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:10:37.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-01-31 18:01:51.000000000 +0100 @@ -0,0 +1,36 @@ +#ifndef __ASM_SPINLOCK_TYPES_H +#define __ASM_SPINLOCK_TYPES_H @@ -27052,8 +26981,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } + +#endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system.h 2011-01-31 18:01:51.000000000 +0100 @@ -1,5 +1,393 @@ +#ifndef _ASM_X86_SYSTEM_H_ +#define _ASM_X86_SYSTEM_H_ @@ -27450,7 +27379,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +} + #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/system_32.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,312 +0,0 @@ -#ifndef __ASM_SYSTEM_H @@ -27765,8 +27694,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -void __show_registers(struct pt_regs *, int all); - -#endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 18:01:51.000000000 +0100 @@ -1,122 +1,9 @@ #ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H @@ -27943,8 +27872,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -extern void free_init_pages(char *what, unsigned long begin, unsigned long end); - #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/tlbflush.h 2011-01-31 18:01:51.000000000 +0100 @@ -1,5 +1,106 @@ +#ifndef _ASM_X86_TLBFLUSH_H +#define _ASM_X86_TLBFLUSH_H @@ -28054,7 +27983,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +} + +#endif /* _ASM_X86_TLBFLUSH_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -#ifndef _I386_TLBFLUSH_H @@ -28156,7 +28085,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -} - -#endif /* _I386_TLBFLUSH_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2010-03-24 15:10:29.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:56:27.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -#ifndef _X8664_TLBFLUSH_H @@ -28256,20 +28185,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches -} - -#endif /* _X8664_TLBFLUSH_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/irq_vectors.h 2008-09-25 13:55:32.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/mach-xen/irq_vectors.h 2010-03-24 15:10:37.000000000 +0100 -@@ -82,7 +82,8 @@ - - #define RESCHEDULE_VECTOR 0 - #define CALL_FUNCTION_VECTOR 1 --#define NR_IPIS 2 -+#define SPIN_UNLOCK_VECTOR 2 -+#define NR_IPIS 3 - - /* - * The maximum number of vectors supported by i386 processors ---- head-2010-04-29.orig/arch/x86/include/asm/mmu.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/asm/mmu.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/asm/mmu.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/mmu.h 2011-01-31 18:01:51.000000000 +0100 @@ -18,7 +18,7 @@ typedef struct { void *vdso; } mm_context_t; @@ -28279,9 +28196,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches void leave_mm(int cpu); #else static inline void leave_mm(int cpu) ---- head-2010-04-29.orig/arch/x86/include/asm/ptrace.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/ptrace.h 2010-03-24 15:12:22.000000000 +0100 -@@ -298,7 +298,9 @@ static inline unsigned long regs_get_ker +--- head-2011-03-11.orig/arch/x86/include/asm/ptrace.h 2011-03-15 16:52:08.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/ptrace.h 2011-01-31 18:01:51.000000000 +0100 +@@ -275,7 +275,9 @@ static inline unsigned long regs_get_ker } #define arch_has_single_step() (1) @@ -28292,10 +28209,10 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define arch_has_block_step() (1) #else #define arch_has_block_step() (boot_cpu_data.x86 >= 6) ---- head-2010-04-29.orig/arch/x86/include/asm/thread_info.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/asm/thread_info.h 2010-03-24 15:10:37.000000000 +0100 -@@ -96,6 +96,9 @@ struct thread_info { - #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ +--- head-2011-03-11.orig/arch/x86/include/asm/thread_info.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/thread_info.h 2011-01-31 18:01:51.000000000 +0100 +@@ -95,6 +95,9 @@ struct thread_info { + #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ +#ifdef CONFIG_X86_XEN @@ -28304,17 +28221,17 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -@@ -119,6 +122,7 @@ struct thread_info { - #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) +@@ -117,6 +120,7 @@ struct thread_info { + #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_CSTAR (1 << TIF_CSTAR) /* work to do in syscall_trace_enter() */ #define _TIF_WORK_SYSCALL_ENTRY \ -@@ -150,12 +154,12 @@ struct thread_info { +@@ -148,12 +152,12 @@ struct thread_info { #define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) + (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) -#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) -#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) @@ -28329,8 +28246,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define PREEMPT_ACTIVE 0x10000000 ---- head-2010-04-29.orig/arch/x86/include/asm/time.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/time.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/asm/time.h 2011-03-15 16:52:08.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/time.h 2011-01-31 18:01:51.000000000 +0100 @@ -7,4 +7,10 @@ extern void hpet_time_init(void); extern void time_init(void); @@ -28342,9 +28259,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches +#endif + #endif /* _ASM_X86_TIME_H */ ---- head-2010-04-29.orig/include/linux/page-flags.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-04-29/include/linux/page-flags.h 2010-03-24 15:10:37.000000000 +0100 -@@ -129,8 +129,8 @@ enum pageflags { +--- head-2011-03-11.orig/include/linux/page-flags.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/include/linux/page-flags.h 2011-01-31 18:01:51.000000000 +0100 +@@ -128,8 +128,8 @@ enum pageflags { #ifdef CONFIG_XEN PG_pinned = PG_locked, /* Cannot alias with PG_owner_priv_1 since * bad_page() checks should include this bit. @@ -28355,9 +28272,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #else PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, ---- head-2010-04-29.orig/include/linux/pci.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-04-29/include/linux/pci.h 2010-03-24 15:10:37.000000000 +0100 -@@ -781,6 +781,9 @@ int pci_reset_function(struct pci_dev *d +--- head-2011-03-11.orig/include/linux/pci.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-11/include/linux/pci.h 2011-01-31 18:01:51.000000000 +0100 +@@ -796,6 +796,9 @@ int pci_reset_function(struct pci_dev *d void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int pci_select_bars(struct pci_dev *dev, unsigned long flags); @@ -28367,9 +28284,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* ROM control related routines */ int pci_enable_rom(struct pci_dev *pdev); ---- head-2010-04-29.orig/include/xen/evtchn.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-04-29/include/xen/evtchn.h 2010-03-24 15:10:37.000000000 +0100 -@@ -133,12 +133,37 @@ static inline void clear_evtchn(int port +--- head-2011-03-11.orig/include/xen/evtchn.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/include/xen/evtchn.h 2011-01-31 18:01:51.000000000 +0100 +@@ -133,6 +133,18 @@ static inline void clear_evtchn(int port synch_clear_bit(port, s->evtchn_pending); } @@ -28388,28 +28305,9 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; - VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); - } - -+/* Clear an irq's pending state, in preparation for polling on it. */ -+void xen_clear_irq_pending(int irq); -+ -+/* Set an irq's pending state, to avoid blocking on it. */ -+void xen_set_irq_pending(int irq); -+ -+/* Test an irq's pending state. */ -+int xen_test_irq_pending(int irq); -+ -+/* Poll waiting for an irq to become pending. In the usual case, the -+ irq will be disabled so it won't deliver an interrupt. */ -+void xen_poll_irq(int irq); -+ - /* - * Use these to access the event channel underlying the IRQ handle returned - * by bind_*_to_irqhandler(). ---- head-2010-04-29.orig/kernel/sysctl_binary.c 2010-04-15 09:55:30.000000000 +0200 -+++ head-2010-04-29/kernel/sysctl_binary.c 2010-04-15 09:55:52.000000000 +0200 -@@ -875,7 +875,7 @@ static const struct bin_table bin_bus_ta +--- head-2011-03-11.orig/kernel/sysctl_binary.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/kernel/sysctl_binary.c 2011-01-31 18:01:51.000000000 +0100 +@@ -874,7 +874,7 @@ static const struct bin_table bin_bus_ta #ifdef CONFIG_XEN @@ -28418,8 +28316,8 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches { CTL_XEN_INDEPENDENT_WALLCLOCK, "independent_wallclock" }, { CTL_XEN_PERMITTED_CLOCK_JITTER, "permitted_clock_jitter" }, {} ---- head-2010-04-29.orig/lib/swiotlb-xen.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/lib/swiotlb-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/lib/swiotlb-xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/lib/swiotlb-xen.c 2011-01-31 18:01:51.000000000 +0100 @@ -30,7 +30,6 @@ #include @@ -28428,7 +28326,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1))) -@@ -289,6 +288,15 @@ __sync_single(struct phys_addr buffer, c +@@ -297,6 +296,15 @@ __sync_single(struct phys_addr buffer, c } } @@ -28444,7 +28342,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * Allocates bounce buffer and returns its kernel virtual address. */ -@@ -300,6 +308,15 @@ map_single(struct device *hwdev, struct +@@ -308,6 +316,15 @@ map_single(struct device *hwdev, struct unsigned int nslots, stride, index, wrap; struct phys_addr slot_buf; int i; @@ -28460,7 +28358,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * For mappings greater than a page, we limit the stride (and -@@ -319,12 +336,21 @@ map_single(struct device *hwdev, struct +@@ -327,12 +344,21 @@ map_single(struct device *hwdev, struct */ spin_lock_irqsave(&io_tlb_lock, flags); { @@ -28485,7 +28383,7 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches /* * If we find a slot that indicates we have 'nslots' * number of contiguous buffers, we allocate the -@@ -359,6 +385,7 @@ map_single(struct device *hwdev, struct +@@ -367,6 +393,7 @@ map_single(struct device *hwdev, struct index = 0; } while (index != wrap); @@ -28493,10 +28391,10 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches spin_unlock_irqrestore(&io_tlb_lock, flags); return NULL; } ---- head-2010-04-29.orig/mm/memory.c 2010-04-15 09:51:22.000000000 +0200 -+++ head-2010-04-29/mm/memory.c 2010-04-15 09:55:57.000000000 +0200 -@@ -2117,6 +2117,10 @@ int apply_to_page_range(struct mm_struct - unsigned long start = addr, end = addr + size; +--- head-2011-03-11.orig/mm/memory.c 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-03-11/mm/memory.c 2011-01-31 18:01:51.000000000 +0100 +@@ -2223,6 +2223,10 @@ int apply_to_page_range(struct mm_struct + unsigned long end = addr + size; int err; +#ifdef CONFIG_XEN @@ -28504,5 +28402,5 @@ Automatically created from "patches.kernel.org/patch-2.6.25" by xen-port-patches + mm = &init_mm; +#endif BUG_ON(addr >= end); - mmu_notifier_invalidate_range_start(mm, start, end); pgd = pgd_offset(mm, addr); + do { diff --git a/patches.xen/xen3-patch-2.6.26 b/patches.xen/xen3-patch-2.6.26 index 94c980c..d72349d 100644 --- a/patches.xen/xen3-patch-2.6.26 +++ b/patches.xen/xen3-patch-2.6.26 @@ -5,8 +5,10 @@ Patch-mainline: 2.6.26 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches.py ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-24 15:12:36.000000000 +0100 +2.6.35: arch/x86/include/asm/scatterlist.h change removed (would need to be reverted there) + +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-01-31 18:07:35.000000000 +0100 @@ -41,7 +41,7 @@ config X86 select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE select HAVE_SYSCALL_TRACEPOINTS @@ -16,7 +18,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches select HAVE_ARCH_TRACEHOOK select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_EFFICIENT_UNALIGNED_ACCESS -@@ -681,6 +681,7 @@ config NO_BOOTMEM +@@ -609,6 +609,7 @@ config NO_BOOTMEM config MEMTEST bool "Memtest" @@ -24,24 +26,24 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches ---help--- This option adds a kernel parameter 'memtest', which allows memtest to be set. -@@ -1255,7 +1256,7 @@ config ARCH_PHYS_ADDR_T_64BIT +@@ -1193,7 +1194,7 @@ config ARCH_DMA_ADDR_T_64BIT config DIRECT_GBPAGES - bool "Enable 1GB pages for kernel pagetables" if EMBEDDED + bool "Enable 1GB pages for kernel pagetables" if EXPERT default y - depends on X86_64 + depends on X86_64 && !XEN ---help--- Allow the kernel linear mapping to use 1GB pages on CPUs that support it. This can improve the kernel's performance a tiny bit by -@@ -2279,6 +2280,4 @@ source "crypto/Kconfig" +@@ -2267,6 +2268,4 @@ source "crypto/Kconfig" source "arch/x86/kvm/Kconfig" -source "drivers/xen/Kconfig" - source "lib/Kconfig" ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/ia32/ia32entry-xen.S 2011-01-31 18:07:35.000000000 +0100 @@ -129,12 +129,14 @@ sysenter_tracesys: SAVE_REST CLEAR_RREGS @@ -102,9 +104,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches .quad sys_alarm .quad sys_fstat /* (old)fstat */ .quad sys_pause ---- head-2010-05-25.orig/arch/x86/kernel/Makefile 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/Makefile 2010-03-24 15:12:36.000000000 +0100 -@@ -136,8 +136,7 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-03-11.orig/arch/x86/kernel/Makefile 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/Makefile 2011-01-31 18:07:35.000000000 +0100 +@@ -127,8 +127,7 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_XEN) += nmi_64.o time_64-$(CONFIG_XEN) += time_32.o @@ -115,25 +117,25 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - smpboot_$(BITS).o tsc_$(BITS).o tsc_sync.o +disabled-obj-$(CONFIG_XEN) := crash.o early-quirks.o hpet.o i8253.o i8259_$(BITS).o \ + pci-swiotlb_64.o reboot.o smpboot.o tlb_$(BITS).o tsc_$(BITS).o tsc_sync.o vsmp_64.o ---- head-2010-05-25.orig/arch/x86/kernel/acpi/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/Makefile 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/acpi/Makefile 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/Makefile 2011-01-31 18:07:35.000000000 +0100 @@ -15,4 +15,4 @@ $(obj)/wakeup_rm.o: $(obj)/realmode/w $(obj)/realmode/wakeup.bin: FORCE $(Q)$(MAKE) $(build)=$(obj)/realmode -disabled-obj-$(CONFIG_XEN) := cstate.o wakeup_$(BITS).o +disabled-obj-$(CONFIG_XEN) := cstate.o wakeup_%.o ---- head-2010-05-25.orig/arch/x86/kernel/acpi/boot.c 2010-04-15 09:56:18.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/acpi/boot.c 2010-04-15 10:03:01.000000000 +0200 -@@ -159,6 +159,7 @@ static int __init acpi_parse_madt(struct +--- head-2011-03-11.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 10:56:21.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/boot.c 2011-03-11 10:59:02.000000000 +0100 +@@ -206,6 +206,7 @@ static int __init acpi_parse_madt(struct static void __cpuinit acpi_register_lapic(int id, u8 enabled) { +#ifndef CONFIG_XEN unsigned int ver = 0; - if (!enabled) { -@@ -170,6 +171,7 @@ static void __cpuinit acpi_register_lapi + if (id >= (MAX_LOCAL_APIC-1)) { +@@ -222,6 +223,7 @@ static void __cpuinit acpi_register_lapi ver = apic_version[boot_cpu_physical_apicid]; generic_processor_info(id, ver); @@ -141,26 +143,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } static int __init -@@ -780,6 +782,7 @@ static int __init acpi_parse_fadt(struct - * returns 0 on success, < 0 on error - */ - -+#ifndef CONFIG_XEN - static void __init acpi_register_lapic_address(unsigned long address) - { - mp_lapic_addr = address; -@@ -791,6 +794,9 @@ static void __init acpi_register_lapic_a - GET_APIC_VERSION(apic_read(APIC_LVR)); - } - } -+#else -+#define acpi_register_lapic_address(address) -+#endif - - static int __init early_acpi_parse_madt_lapic_addr_ovr(void) - { ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/sleep-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -10,15 +10,19 @@ #include #include @@ -285,8 +269,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif } ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/common-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -5,7 +5,6 @@ #include #include @@ -492,12 +476,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches u32 capability, excap; cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; -@@ -376,14 +376,16 @@ static void __cpuinit generic_identify(s - if (c->x86 >= 0x6) +@@ -377,13 +377,15 @@ static void __cpuinit generic_identify(s c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; -+ c->initial_apicid = (ebx >> 24) & 0xFF; #ifndef CONFIG_XEN ++ c->initial_apicid = (ebx >> 24) & 0xFF; #ifdef CONFIG_X86_HT - c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); + c->apicid = phys_pkg_id(c->initial_apicid, 0); @@ -554,7 +537,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* Disabling the serial number may affect the cpuid level */ c->cpuid_level = cpuid_eax(0); -@@ -453,9 +452,11 @@ void __cpuinit identify_cpu(struct cpuin +@@ -455,9 +454,11 @@ void __cpuinit identify_cpu(struct cpuin memset(&c->x86_capability, 0, sizeof c->x86_capability); if (!have_cpuid_p()) { @@ -569,7 +552,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches c->x86 = 4; else c->x86 = 3; -@@ -488,10 +489,10 @@ void __cpuinit identify_cpu(struct cpuin +@@ -490,10 +491,10 @@ void __cpuinit identify_cpu(struct cpuin */ /* If the model name is still unset, do table lookup. */ @@ -582,7 +565,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches strcpy(c->x86_model_id, p); else /* Last resort... */ -@@ -505,9 +506,9 @@ void __cpuinit identify_cpu(struct cpuin +@@ -507,9 +508,9 @@ void __cpuinit identify_cpu(struct cpuin * common between the CPUs. The first time this routine gets * executed, c == &boot_cpu_data. */ @@ -594,7 +577,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } -@@ -551,7 +552,7 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -553,7 +554,7 @@ void __cpuinit detect_ht(struct cpuinfo_ if (smp_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); @@ -603,7 +586,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (smp_num_siblings > NR_CPUS) { printk(KERN_WARNING "CPU: Unsupported number of the " -@@ -561,7 +562,7 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -563,7 +564,7 @@ void __cpuinit detect_ht(struct cpuinfo_ } index_msb = get_count_order(smp_num_siblings); @@ -612,7 +595,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); -@@ -572,7 +573,7 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -574,7 +575,7 @@ void __cpuinit detect_ht(struct cpuinfo_ core_bits = get_count_order(c->x86_max_cores); @@ -621,7 +604,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches ((1 << core_bits) - 1); if (c->x86_max_cores > 1) -@@ -606,7 +607,7 @@ void __cpuinit print_cpu_info(struct cpu +@@ -608,7 +609,7 @@ void __cpuinit print_cpu_info(struct cpu else printk("%s", c->x86_model_id); @@ -630,7 +613,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches printk(" stepping %02x\n", c->x86_mask); else printk("\n"); -@@ -625,24 +626,17 @@ __setup("clearcpuid=", setup_disablecpui +@@ -627,24 +628,17 @@ __setup("clearcpuid=", setup_disablecpui cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; @@ -663,7 +646,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } /* Make sure %fs is initialized properly in idle threads */ -@@ -687,7 +681,7 @@ void __cpuinit cpu_init(void) +@@ -689,7 +683,7 @@ void __cpuinit cpu_init(void) int cpu = smp_processor_id(); struct task_struct *curr = current; #ifndef CONFIG_X86_NO_TSS @@ -672,7 +655,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif struct thread_struct *thread = &curr->thread; -@@ -740,7 +734,7 @@ void __cpuinit cpu_init(void) +@@ -742,7 +736,7 @@ void __cpuinit cpu_init(void) mxcsr_feature_mask_init(); } @@ -681,8 +664,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches void __cpuinit cpu_uninit(void) { int cpu = raw_smp_processor_id(); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -35,6 +35,8 @@ struct mtrr_ops *mtrr_if = &generic_mtrr unsigned int num_var_ranges; unsigned int mtrr_usage_table[MAX_VAR_RANGES]; @@ -837,8 +820,85 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } void mtrr_ap_init(void) ---- head-2010-05-25.orig/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/bugs.c 2010-08-02 00:11:14.000000000 +0200 ++++ head-2011-03-11/arch/x86/kernel/cpu/bugs.c 2011-03-03 16:41:48.000000000 +0100 +@@ -17,6 +17,7 @@ + #include + #include + ++#ifndef CONFIG_XEN + static int __init no_halt(char *s) + { + boot_cpu_data.hlt_works_ok = 0; +@@ -24,6 +25,7 @@ static int __init no_halt(char *s) + } + + __setup("no-hlt", no_halt); ++#endif + + static int __init no_387(char *s) + { +@@ -79,13 +81,16 @@ static void __init check_fpu(void) + : "=m" (*&fdiv_bug) + : "m" (*&x), "m" (*&y)); + ++#ifndef CONFIG_XEN + boot_cpu_data.fdiv_bug = fdiv_bug; + if (boot_cpu_data.fdiv_bug) + printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); ++#endif + } + + static void __init check_hlt(void) + { ++#ifndef CONFIG_XEN + if (boot_cpu_data.x86 >= 5 || paravirt_enabled()) + return; + +@@ -99,6 +104,7 @@ static void __init check_hlt(void) + halt(); + halt(); + printk(KERN_CONT "OK.\n"); ++#endif + } + + /* +--- head-2011-03-11.orig/arch/x86/kernel/cpu/proc.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/proc.c 2011-03-03 16:38:42.000000000 +0100 +@@ -10,7 +10,7 @@ + static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, + unsigned int cpu) + { +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + if (c->x86_max_cores * smp_num_siblings > 1) { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", +@@ -32,18 +32,22 @@ static void show_cpuinfo_misc(struct seq + */ + int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); + seq_printf(m, ++#ifndef CONFIG_XEN + "fdiv_bug\t: %s\n" + "hlt_bug\t\t: %s\n" + "f00f_bug\t: %s\n" + "coma_bug\t: %s\n" ++#endif + "fpu\t\t: %s\n" + "fpu_exception\t: %s\n" + "cpuid level\t: %d\n" + "wp\t\t: %s\n", ++#ifndef CONFIG_XEN + c->fdiv_bug ? "yes" : "no", + c->hlt_works_ok ? "no" : "yes", + c->f00f_bug ? "yes" : "no", + c->coma_bug ? "yes" : "no", ++#endif + c->hard_math ? "yes" : "no", + fpu_exception ? "yes" : "no", + c->cpuid_level, +--- head-2011-03-11.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820_32-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -469,7 +469,7 @@ int __init sanitize_e820_map(struct e820 * thinkpad 560x, for example, does not cooperate with the memory * detection code.) @@ -905,8 +965,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches saved_max_pfn = max_pfn; #endif e820.nr_map = 0; ---- head-2010-05-25.orig/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820_64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -41,11 +41,11 @@ unsigned long end_pfn; #ifndef CONFIG_XEN @@ -1235,8 +1295,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches e820.nr_map = 0; userdef = 1; return 0; ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/early_printk-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/early_printk-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -13,7 +13,7 @@ #define VGABASE (__ISA_IO_base + 0xb8000) @@ -1305,8 +1365,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #ifdef CONFIG_XEN } else if (!strncmp(buf, "xen", 3)) { early_console = &xenboot_console; ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_32-xen.S 2011-01-31 18:07:35.000000000 +0100 @@ -1,5 +1,4 @@ /* - * linux/arch/i386/entry.S @@ -1474,8 +1534,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches GET_THREAD_INFO(%ebp) movl $-EFAULT,PT_EAX(%esp) jmp resume_userspace ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_64-xen.S 2011-01-31 18:07:35.000000000 +0100 @@ -331,19 +331,17 @@ badsys: /* Do syscall tracing */ tracesys: @@ -1499,8 +1559,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* Use IRET because user could have changed frame */ /* ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -17,6 +17,7 @@ #include #include @@ -1652,8 +1712,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * At this point everything still needed from the boot loader ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_32-xen.S 2011-01-31 18:07:35.000000000 +0100 @@ -69,7 +69,7 @@ ENTRY(startup_32) cld # gcc2 wants the direction flag cleared at all times @@ -1663,8 +1723,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #define HYPERCALL_PAGE_OFFSET 0x1000 .org HYPERCALL_PAGE_OFFSET ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -88,6 +88,16 @@ int sis_apic_bug = -1; */ int nr_ioapic_registers[MAX_IO_APICS]; @@ -1993,8 +2053,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -43,13 +43,15 @@ #include #include @@ -2193,58 +2253,14 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches for (i = 0; i < nr_ioapics; i++) { --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:12:36.000000000 +0100 -@@ -0,0 +1,232 @@ ++++ head-2011-03-11/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:56:33.000000000 +0100 +@@ -0,0 +1,63 @@ +#include +#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include + +#include -+#include -+#include -+#include -+#include -+#include + +#ifdef CONFIG_X86_32 -+#ifndef CONFIG_XEN -+#include -+/* -+ * the following functions deal with sending IPIs between CPUs. -+ * -+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. -+ */ -+ -+static inline int __prepare_ICR(unsigned int shortcut, int vector) -+{ -+ unsigned int icr = shortcut | APIC_DEST_LOGICAL; -+ -+ switch (vector) { -+ default: -+ icr |= APIC_DM_FIXED | vector; -+ break; -+ case NMI_VECTOR: -+ icr |= APIC_DM_NMI; -+ break; -+ } -+ return icr; -+} -+ -+static inline int __prepare_ICR2(unsigned int mask) -+{ -+ return SET_APIC_DEST_FIELD(mask); -+} -+#else +#include + +DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]); @@ -2255,35 +2271,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + BUG_ON(irq < 0); + notify_remote_via_irq(irq); +} -+#endif + +void __send_IPI_shortcut(unsigned int shortcut, int vector) +{ -+#ifndef CONFIG_XEN -+ /* -+ * Subtle. In the case of the 'never do double writes' workaround -+ * we have to lock out interrupts to be safe. As we don't care -+ * of the value read we use an atomic rmw access to avoid costly -+ * cli/sti. Otherwise we use an even cheaper single atomic write -+ * to the APIC. -+ */ -+ unsigned int cfg; -+ -+ /* -+ * Wait for idle. -+ */ -+ apic_wait_icr_idle(); -+ -+ /* -+ * No need to touch the target chip field -+ */ -+ cfg = __prepare_ICR(shortcut, vector); -+ -+ /* -+ * Send the IPI. The write to APIC_ICR fires this off. -+ */ -+ apic_write_around(APIC_ICR, cfg); -+#else + int cpu; + + switch (shortcut) { @@ -2300,7 +2290,6 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + vector); + break; + } -+#endif +} + +void send_IPI_self(int vector) @@ -2308,127 +2297,29 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + __send_IPI_shortcut(APIC_DEST_SELF, vector); +} + -+#ifndef CONFIG_XEN -+/* -+ * This is used to send an IPI with no shorthand notation (the destination is -+ * specified in bits 56 to 63 of the ICR). -+ */ -+static inline void __send_IPI_dest_field(unsigned long mask, int vector) -+{ -+ unsigned long cfg; -+ -+ /* -+ * Wait for idle. -+ */ -+ if (unlikely(vector == NMI_VECTOR)) -+ safe_apic_wait_icr_idle(); -+ else -+ apic_wait_icr_idle(); -+ -+ /* -+ * prepare target chip field -+ */ -+ cfg = __prepare_ICR2(mask); -+ apic_write_around(APIC_ICR2, cfg); -+ -+ /* -+ * program the ICR -+ */ -+ cfg = __prepare_ICR(0, vector); -+ -+ /* -+ * Send the IPI. The write to APIC_ICR fires this off. -+ */ -+ apic_write_around(APIC_ICR, cfg); -+} -+#endif -+ -+/* -+ * This is only used on smaller machines. -+ */ +void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) +{ -+#ifndef CONFIG_XEN -+ unsigned long mask = cpus_addr(cpumask)[0]; -+#else + cpumask_t mask; + unsigned int cpu; -+#endif -+ unsigned long flags; + -+ local_irq_save(flags); -+#ifndef CONFIG_XEN -+ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); -+ __send_IPI_dest_field(mask, vector); -+#else + cpus_andnot(mask, cpumask, cpu_online_map); + WARN_ON(!cpus_empty(mask)); + for_each_online_cpu(cpu) + if (cpu_isset(cpu, cpumask)) + __send_IPI_one(cpu, vector); -+#endif -+ local_irq_restore(flags); +} + +void send_IPI_mask_sequence(cpumask_t mask, int vector) +{ -+#ifndef CONFIG_XEN -+ unsigned long flags; -+ unsigned int query_cpu; -+ -+ /* -+ * Hack. The clustered APIC addressing mode doesn't allow us to send -+ * to an arbitrary mask, so I do a unicasts to each CPU instead. This -+ * should be modified to do 1 message per cluster ID - mbligh -+ */ -+ -+ local_irq_save(flags); -+ for_each_possible_cpu(query_cpu) { -+ if (cpu_isset(query_cpu, mask)) { -+ __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), -+ vector); -+ } -+ } -+ local_irq_restore(flags); -+#else + send_IPI_mask_bitmask(mask, vector); -+#endif +} + +/* must come after the send_IPI functions above for inlining */ +#include + -+#ifndef CONFIG_XEN -+static int convert_apicid_to_cpu(int apic_id) -+{ -+ int i; -+ -+ for_each_possible_cpu(i) { -+ if (per_cpu(x86_cpu_to_apicid, i) == apic_id) -+ return i; -+ } -+ return -1; -+} -+ -+int safe_smp_processor_id(void) -+{ -+ int apicid, cpuid; -+ -+ if (!boot_cpu_has(X86_FEATURE_APIC)) -+ return 0; -+ -+ apicid = hard_smp_processor_id(); -+ if (apicid == BAD_APICID) -+ return 0; -+ -+ cpuid = convert_apicid_to_cpu(apicid); -+ -+ return cpuid >= 0 ? cpuid : 0; -+} +#endif -+#endif ---- head-2010-05-25.orig/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:56:14.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/machine_kexec_64.c 2010-04-15 10:03:05.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/machine_kexec_64.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/machine_kexec_64.c 2011-01-31 18:07:35.000000000 +0100 @@ -114,8 +114,6 @@ int __init machine_kexec_setup_resources return 0; } @@ -2438,8 +2329,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #else /* CONFIG_XEN */ #define x__pmd(x) __pmd(x) ---- head-2010-05-25.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/microcode-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/microcode-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -162,7 +162,7 @@ static int request_microcode(void) c->x86, c->x86_model, c->x86_mask); error = request_firmware(&firmware, name, µcode_pdev->dev); @@ -2449,28 +2340,32 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return error; } ---- head-2010-05-25.orig/arch/x86/kernel/mmconf-fam10h_64.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/mmconf-fam10h_64.c 2010-03-24 15:12:36.000000000 +0100 -@@ -215,6 +215,16 @@ void __cpuinit fam10h_check_enable_mmcfg +--- head-2011-03-11.orig/arch/x86/kernel/mmconf-fam10h_64.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/mmconf-fam10h_64.c 2011-01-31 18:07:35.000000000 +0100 +@@ -205,12 +205,20 @@ void __cpuinit fam10h_check_enable_mmcfg + return; + } + ++#ifndef CONFIG_XEN + printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); + val &= ~((FAM10H_MMIO_CONF_BASE_MASK<mpc_lapic); + + /* save the local APIC address, it might be non-default */ + if (!acpi_lapic) + mp_lapic_addr = mpc->mpc_lapic; ++#endif + + if (early) + return 1; @@ -2978,10 +2875,12 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + int linttypes[2] = { mp_ExtINT, mp_NMI }; + int i; + ++#ifndef CONFIG_XEN + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; ++#endif + + /* + * 2 CPUs, numbered 0 & 1. @@ -3057,7 +2956,12 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +/* + * Scan the memory blocks for an SMP configuration block. + */ ++#ifndef CONFIG_XEN +static void __init __get_smp_config(unsigned early) ++#else ++void __init get_smp_config(void) ++#define early 0 ++#endif +{ + struct intel_mp_floating *mpf = mpf_found; + @@ -3090,6 +2994,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + * Now see if we need to read further. + */ + if (mpf->mpf_feature1 != 0) { ++#ifndef CONFIG_XEN + if (early) { + /* + * local APIC has default address @@ -3097,6 +3002,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + return; + } ++#endif + + printk(KERN_INFO "Default MP configuration #%d\n", + mpf->mpf_feature1); @@ -3148,8 +3054,10 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + /* + * Only use the first configuration found. + */ ++#undef early +} + ++#ifndef CONFIG_XEN +void __init early_get_smp_config(void) +{ + __get_smp_config(1); @@ -3159,6 +3067,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +{ + __get_smp_config(0); +} ++#endif + +static int __init smp_scan_config(unsigned long base, unsigned long length, + unsigned reserve) @@ -3310,10 +3219,12 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +static u8 __init uniq_ioapic_id(u8 id) +{ +#ifdef CONFIG_X86_32 ++#ifndef CONFIG_XEN + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return io_apic_get_unique_id(nr_ioapics, id); + else ++#endif + return id; +#else + int i; @@ -3572,9 +3483,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + +#endif /* CONFIG_X86_IO_APIC */ +#endif /* CONFIG_ACPI */ ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/mpparse_32-xen.c 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,1161 +0,0 @@ +@@ -1,1163 +0,0 @@ -/* - * Intel Multiprocessor Specification 1.1 and 1.4 - * compliant MP-table parsing routines. @@ -3642,8 +3553,10 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -unsigned int def_to_bigsmp = 0; - +-#ifndef CONFIG_XEN -/* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; +-#endif -/* Internal processor count */ -unsigned int num_processors; - @@ -4421,6 +4334,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -void __cpuinit mp_register_lapic (u8 id, u8 enabled) -{ - struct mpc_config_processor processor; +-#ifndef CONFIG_XEN - int boot_cpu = 0; - - if (MAX_APICS - id <= 0) { @@ -4432,7 +4346,6 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - if (id == boot_cpu_physical_apicid) - boot_cpu = 1; - --#ifndef CONFIG_XEN - processor.mpc_type = MP_PROCESSOR; - processor.mpc_apicid = id; - processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); @@ -4500,11 +4413,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -#ifndef CONFIG_XEN - set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); --#endif - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - tmpid = io_apic_get_unique_id(idx, id); - else +-#endif - tmpid = id; - if (tmpid == -1) { - nr_ioapics--; @@ -4736,9 +4649,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -#endif /* CONFIG_X86_IO_APIC */ -#endif /* CONFIG_ACPI */ ---- head-2010-05-25.orig/arch/x86/kernel/mpparse_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,879 +0,0 @@ +@@ -1,880 +0,0 @@ -/* - * Intel Multiprocessor Specification 1.1 and 1.4 - * compliant MP-table parsing routines. @@ -4795,10 +4708,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -unsigned long mp_lapic_addr = 0; - - -- +-#ifndef CONFIG_XEN -/* Processor that is doing the boot up */ -unsigned int boot_cpu_id = -1U; -EXPORT_SYMBOL(boot_cpu_id); +-#endif - -/* Internal processor count */ -unsigned int num_processors; @@ -5369,12 +5283,12 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -void __cpuinit mp_register_lapic (u8 id, u8 enabled) -{ - struct mpc_config_processor processor; +-#ifndef CONFIG_XEN - int boot_cpu = 0; - - if (id == boot_cpu_id) - boot_cpu = 1; - --#ifndef CONFIG_XEN - processor.mpc_type = MP_PROCESSOR; - processor.mpc_apicid = id; - processor.mpc_apicver = 0; @@ -5618,8 +5532,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - return gsi; -} -#endif /*CONFIG_ACPI*/ ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-dma-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -1,280 +1,251 @@ -/* - * Dynamic DMA mapping support. @@ -6460,7 +6374,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); +#endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:12:36.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-nommu-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -0,0 +1,108 @@ +#include +#include @@ -6571,7 +6485,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + dma_ops = &nommu_dma_ops; +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/process-xen.c 2010-03-24 15:12:36.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -0,0 +1,188 @@ +#include +#include @@ -6761,8 +6675,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +} +early_param("idle", idle_setup); + ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_32-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -36,6 +36,7 @@ #include #include @@ -7042,8 +6956,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches math_state_restore(); /* ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:32:11.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_64-xen.c 2011-02-02 08:32:46.000000000 +0100 @@ -39,6 +39,7 @@ #include #include @@ -7052,7 +6966,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #include #include -@@ -102,17 +103,6 @@ void exit_idle(void) +@@ -101,17 +102,6 @@ void exit_idle(void) __exit_idle(); } @@ -7070,7 +6984,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static void xen_idle(void) { current_thread_info()->status &= ~TS_POLLING; -@@ -121,20 +111,10 @@ static void xen_idle(void) +@@ -120,20 +110,10 @@ static void xen_idle(void) * test NEED_RESCHED: */ smp_mb(); @@ -7094,7 +7008,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches current_thread_info()->status |= TS_POLLING; } -@@ -195,45 +175,6 @@ void cpu_idle(void) +@@ -194,45 +174,6 @@ void cpu_idle(void) } } @@ -7140,7 +7054,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs * regs) { -@@ -360,6 +301,7 @@ void flush_thread(void) +@@ -359,6 +300,7 @@ void flush_thread(void) /* * Forget coprocessor state.. */ @@ -7148,7 +7062,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches clear_fpu(tsk); clear_used_math(); } -@@ -471,6 +413,82 @@ out: +@@ -470,6 +412,82 @@ out: return err; } @@ -7231,7 +7145,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * This special macro can be used to load a debugging register */ -@@ -490,12 +508,12 @@ static inline void __switch_to_xtra(stru +@@ -489,12 +507,12 @@ static inline void __switch_to_xtra(stru /* we clear debugctl to make sure DS * is not in use when we change it */ debugctl = 0; @@ -7246,7 +7160,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { loaddebug(next, 0); -@@ -507,6 +525,15 @@ static inline void __switch_to_xtra(stru +@@ -506,6 +524,15 @@ static inline void __switch_to_xtra(stru loaddebug(next, 7); } @@ -7262,7 +7176,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #ifdef X86_BTS if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); -@@ -546,7 +573,7 @@ __switch_to(struct task_struct *prev_p, +@@ -545,7 +572,7 @@ __switch_to(struct task_struct *prev_p, /* we're going to use this soon, after a few expensive things */ if (next_p->fpu_counter>5) @@ -7271,7 +7185,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * This is basically '__unlazy_fpu', except that we queue a -@@ -677,8 +704,11 @@ __switch_to(struct task_struct *prev_p, +@@ -676,8 +703,11 @@ __switch_to(struct task_struct *prev_p, /* If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now @@ -7285,8 +7199,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return prev_p; } --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/setup-xen.c 2010-03-24 15:12:36.000000000 +0100 -@@ -0,0 +1,141 @@ ++++ head-2011-03-11/arch/x86/kernel/setup-xen.c 2011-01-31 18:07:35.000000000 +0100 +@@ -0,0 +1,143 @@ +#include +#include +#include @@ -7304,6 +7218,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +#ifdef CONFIG_X86_LOCAL_APIC +unsigned int num_processors; +unsigned disabled_cpus __cpuinitdata; ++#ifndef CONFIG_XEN +/* Processor that is doing the boot up */ +unsigned int boot_cpu_physical_apicid = -1U; +EXPORT_SYMBOL(boot_cpu_physical_apicid); @@ -7314,6 +7229,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +/* Bitmask of physically existing CPUs */ +physid_mask_t phys_cpu_present_map; +#endif ++#endif + +#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) +/* @@ -7428,8 +7344,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +} + +#endif ---- head-2010-05-25.orig/arch/x86/kernel/setup64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -15,6 +15,7 @@ #include #include @@ -7579,8 +7495,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + if (is_uv_system()) + uv_cpu_init(); } ---- head-2010-05-25.orig/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup_32-xen.c 2011-03-04 15:07:31.000000000 +0100 @@ -39,6 +39,7 @@ #include #include @@ -7622,8 +7538,15 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma page reg", -@@ -166,6 +174,8 @@ struct cpuinfo_x86 new_cpu_data __cpuini - struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; +@@ -161,11 +169,13 @@ static struct resource standard_io_resou + } }; + + /* cpu data as detected by the assembly code in head.S */ +-struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; ++struct cpuinfo_x86 new_cpu_data __cpuinitdata = { .wp_works_ok = 1 }; + /* common cpu data for all cpus */ +-struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; ++struct cpuinfo_x86 boot_cpu_data __read_mostly = { .wp_works_ok = 1 }; EXPORT_SYMBOL(boot_cpu_data); +unsigned int def_to_bigsmp; @@ -7847,8 +7770,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #ifdef CONFIG_X86_GENERICARCH generic_apic_probe(); #endif ---- head-2010-05-25.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup_64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -29,18 +29,22 @@ #include #include @@ -8152,7 +8075,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif } -@@ -1052,7 +1135,7 @@ static void __cpuinit early_init_intel(s +@@ -1054,7 +1137,7 @@ static void __cpuinit early_init_intel(s { if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) @@ -8161,7 +8084,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } static void __cpuinit init_intel(struct cpuinfo_x86 *c) -@@ -1095,9 +1178,6 @@ static void __cpuinit init_intel(struct +@@ -1097,9 +1180,6 @@ static void __cpuinit init_intel(struct if (c->x86 == 15) c->x86_cache_alignment = c->x86_clflush_size * 2; @@ -8171,7 +8094,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); -@@ -1106,6 +1186,32 @@ static void __cpuinit init_intel(struct +@@ -1110,6 +1190,32 @@ static void __cpuinit init_intel(struct srat_detect_node(); } @@ -8204,7 +8127,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; -@@ -1114,6 +1220,8 @@ static void __cpuinit get_cpu_vendor(str +@@ -1118,6 +1224,8 @@ static void __cpuinit get_cpu_vendor(str c->x86_vendor = X86_VENDOR_AMD; else if (!strcmp(v, "GenuineIntel")) c->x86_vendor = X86_VENDOR_INTEL; @@ -8213,7 +8136,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches else c->x86_vendor = X86_VENDOR_UNKNOWN; } -@@ -1161,15 +1269,16 @@ static void __cpuinit early_identify_cpu +@@ -1167,15 +1275,18 @@ static void __cpuinit early_identify_cpu c->x86 += (tfms >> 20) & 0xff; if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; @@ -8225,14 +8148,17 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches c->x86 = 4; } -+ c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; - #ifdef CONFIG_SMP +-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; ++#ifndef CONFIG_XEN ++ c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; ++#ifdef CONFIG_SMP + c->phys_proc_id = c->initial_apicid; ++#endif #endif /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); -@@ -1202,8 +1311,12 @@ static void __cpuinit early_identify_cpu +@@ -1208,8 +1319,12 @@ static void __cpuinit early_identify_cpu case X86_VENDOR_INTEL: early_init_intel(c); break; @@ -8245,7 +8171,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } /* -@@ -1240,6 +1353,10 @@ void __cpuinit identify_cpu(struct cpuin +@@ -1246,6 +1361,10 @@ void __cpuinit identify_cpu(struct cpuin init_intel(c); break; @@ -8256,7 +8182,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches case X86_VENDOR_UNKNOWN: default: display_cacheinfo(c); -@@ -1269,14 +1386,24 @@ void __cpuinit identify_cpu(struct cpuin +@@ -1275,14 +1394,24 @@ void __cpuinit identify_cpu(struct cpuin #endif select_idle_routine(c); @@ -8283,7 +8209,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static __init int setup_noclflush(char *arg) { setup_clear_cpu_cap(X86_FEATURE_CLFLSH); -@@ -1305,123 +1432,3 @@ static __init int setup_disablecpuid(cha +@@ -1311,123 +1440,3 @@ static __init int setup_disablecpuid(cha return 1; } __setup("clearcpuid=", setup_disablecpuid); @@ -8330,7 +8256,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - if (c->x86_cache_size >= 0) - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); - --#ifdef CONFIG_SMP +-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - if (smp_num_siblings * c->x86_max_cores > 1) { - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", @@ -8408,8 +8334,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - .show = show_cpuinfo, -}; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/smp-xen.c 2010-03-24 15:12:36.000000000 +0100 -@@ -0,0 +1,329 @@ ++++ head-2011-03-11/arch/x86/kernel/smp-xen.c 2011-01-31 18:07:35.000000000 +0100 +@@ -0,0 +1,327 @@ +/* + * Intel SMP support routines. + * @@ -8723,14 +8649,12 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + /* + * At this point the info structure may be out of scope unless wait==1 + */ -+ irq_enter(); + (*func)(info); +#ifdef CONFIG_X86_32 + __get_cpu_var(irq_stat).irq_call_count++; +#else + add_pda(irq_call_count, 1); +#endif -+ irq_exit(); + + if (wait) { + mb(); @@ -8739,7 +8663,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + + return IRQ_HANDLED; +} ---- head-2010-05-25.orig/arch/x86/kernel/smp_32-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,647 +0,0 @@ -/* @@ -9389,7 +9313,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - - return IRQ_HANDLED; -} ---- head-2010-05-25.orig/arch/x86/kernel/smp_64-xen.c 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp_64-xen.c 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,554 +0,0 @@ -/* @@ -9946,9 +9870,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - return IRQ_HANDLED; -#endif -} ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-03-24 15:12:36.000000000 +0100 -@@ -700,8 +700,6 @@ int xen_update_persistent_clock(void) +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-01-31 18:07:35.000000000 +0100 +@@ -703,8 +703,6 @@ int xen_update_persistent_clock(void) return 0; } @@ -9957,8 +9881,17 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* Dynamically-mapped IRQ. */ DEFINE_PER_CPU(int, timer_irq); ---- head-2010-05-25.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +@@ -933,7 +931,7 @@ int __cpuinit local_setup_timer(unsigned + return 0; + } + +-void __cpuexit local_teardown_timer(unsigned int cpu) ++void __cpuinit local_teardown_timer(unsigned int cpu) + { + BUG_ON(cpu == 0); + unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); +--- head-2011-03-11.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_32-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -9,26 +9,28 @@ * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. @@ -11043,8 +10976,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return 1; } __setup("kstack=", kstack_setup); ---- head-2010-05-25.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -33,6 +33,8 @@ #include #include @@ -11153,8 +11086,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches * Should be a barrier for any external CPU state. */ cpu_init(); ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -216,7 +216,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s return 0; } @@ -11164,8 +11097,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { return -ENOSYS; } ---- head-2010-05-25.orig/arch/x86/mach-xen/setup.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mach-xen/setup.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mach-xen/setup.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mach-xen/setup.c 2011-01-31 18:07:35.000000000 +0100 @@ -56,11 +56,7 @@ char * __init machine_specific_memory_se { int rc; @@ -11179,8 +11112,382 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); ---- head-2010-05-25.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-11/arch/x86/mm/dump_pagetables-xen.c 2011-01-31 18:07:35.000000000 +0100 +@@ -0,0 +1,371 @@ ++/* ++ * Debug helper to dump the current kernel pagetables of the system ++ * so that we can see what the various memory ranges are set to. ++ * ++ * (C) Copyright 2008 Intel Corporation ++ * ++ * Author: Arjan van de Ven ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; version 2 ++ * of the License. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++ ++/* ++ * The dumper groups pagetable entries of the same type into one, and for ++ * that it needs to keep some state when walking, and flush this state ++ * when a "break" in the continuity is found. ++ */ ++struct pg_state { ++ int level; ++ pgprot_t current_prot; ++ unsigned long start_address; ++ unsigned long current_address; ++ const struct addr_marker *marker; ++}; ++ ++struct addr_marker { ++ unsigned long start_address; ++ const char *name; ++}; ++ ++/* Address space markers hints */ ++static struct addr_marker address_markers[] = { ++ { 0, "User Space" }, ++#ifdef CONFIG_X86_64 ++ { HYPERVISOR_VIRT_START, "Hypervisor Space" }, ++ { HYPERVISOR_VIRT_END, "Low Kernel Mapping" }, ++ { VMALLOC_START, "vmalloc() Area" }, ++ { VMEMMAP_START, "Vmemmap" }, ++ { __START_KERNEL_map, "High Kernel Mapping" }, ++ { MODULES_VADDR, "Modules" }, ++ { MODULES_END, "End Modules" }, ++#else ++ { PAGE_OFFSET, "Kernel Mapping" }, ++ { 0/* VMALLOC_START */, "vmalloc() Area" }, ++ { 0/*VMALLOC_END*/, "vmalloc() End" }, ++# ifdef CONFIG_HIGHMEM ++ { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, ++# endif ++ { 0/*FIXADDR_START*/, "Fixmap Area" }, ++ { 0/*HYPERVISOR_VIRT_START*/, "Hypervisor Space" }, ++#endif ++ { -1, NULL } /* End of list */ ++}; ++ ++static inline bool hypervisor_space(unsigned long addr) { ++#ifdef CONFIG_X86_64 ++ return addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END; ++#else ++ return addr >= hypervisor_virt_start; ++#endif ++} ++ ++/* Multipliers for offsets within the PTEs */ ++#define PTE_LEVEL_MULT (PAGE_SIZE) ++#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) ++#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT) ++#define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT) ++ ++/* ++ * Print a readable form of a pgprot_t to the seq_file ++ */ ++static void printk_prot(struct seq_file *m, pgprot_t prot, int level) ++{ ++ pgprotval_t pr = pgprot_val(prot); ++ static const char * const level_name[] = ++ { "cr3", "pgd", "pud", "pmd", "pte" }; ++ ++ if (!pgprot_val(prot)) { ++ /* Not present */ ++ seq_printf(m, " "); ++ } else { ++ if (pr & _PAGE_USER) ++ seq_printf(m, "USR "); ++ else ++ seq_printf(m, " "); ++ if (pr & _PAGE_RW) ++ seq_printf(m, "RW "); ++ else ++ seq_printf(m, "ro "); ++ if (pr & _PAGE_PWT) ++ seq_printf(m, "PWT "); ++ else ++ seq_printf(m, " "); ++ if (pr & _PAGE_PCD) ++ seq_printf(m, "PCD "); ++ else ++ seq_printf(m, " "); ++ ++ /* Bit 9 has a different meaning on level 3 vs 4 */ ++ if (level <= 3) { ++ if (pr & _PAGE_PSE) ++ seq_printf(m, "PSE "); ++ else ++ seq_printf(m, " "); ++ } else { ++ if (pr & _PAGE_PAT) ++ seq_printf(m, "pat "); ++ else ++ seq_printf(m, " "); ++ } ++ if (pr & _PAGE_GLOBAL) ++ seq_printf(m, "GLB "); ++ else ++ seq_printf(m, " "); ++ if (pr & _PAGE_NX) ++ seq_printf(m, "NX "); ++ else ++ seq_printf(m, "x "); ++ } ++ seq_printf(m, "%s\n", level_name[level]); ++} ++ ++/* ++ * On 64 bits, sign-extend the 48 bit address to 64 bit ++ */ ++static unsigned long normalize_addr(unsigned long u) ++{ ++#ifdef CONFIG_X86_64 ++ return (signed long)(u << 16) >> 16; ++#else ++ return u; ++#endif ++} ++ ++/* ++ * This function gets called on a break in a continuous series ++ * of PTE entries; the next one is different so we need to ++ * print what we collected so far. ++ */ ++static void note_page(struct seq_file *m, struct pg_state *st, ++ pgprot_t new_prot, int level) ++{ ++ pgprotval_t prot, cur; ++ static const char units[] = "KMGTPE"; ++ ++ /* ++ * If we have a "break" in the series, we need to flush the state that ++ * we have now. "break" is either changing perms, levels or ++ * address space marker. ++ */ ++ prot = pgprot_val(new_prot) & ~(PTE_MASK); ++ cur = pgprot_val(st->current_prot) & ~(PTE_MASK); ++ ++ if (!st->level) { ++ /* First entry */ ++ st->current_prot = new_prot; ++ st->level = level; ++ st->marker = address_markers; ++ seq_printf(m, "---[ %s ]---\n", st->marker->name); ++ } else if (prot != cur || level != st->level || ++ st->current_address >= st->marker[1].start_address) { ++ const char *unit = units; ++ unsigned long delta; ++ ++ /* ++ * Now print the actual finished series ++ */ ++ seq_printf(m, "0x%p-0x%p ", ++ (void *)st->start_address, ++ (void *)st->current_address); ++ ++ delta = (st->current_address - st->start_address) >> 10; ++ while (!(delta & 1023) && unit[1]) { ++ delta >>= 10; ++ unit++; ++ } ++ seq_printf(m, "%9lu%c ", delta, *unit); ++ printk_prot(m, st->current_prot, st->level); ++ ++ /* ++ * We print markers for special areas of address space, ++ * such as the start of vmalloc space etc. ++ * This helps in the interpretation. ++ */ ++ if (st->current_address >= st->marker[1].start_address) { ++ st->marker++; ++ seq_printf(m, "---[ %s ]---\n", st->marker->name); ++ } ++ ++ st->start_address = st->current_address; ++ st->current_prot = new_prot; ++ st->level = level; ++ } ++} ++ ++static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, ++ unsigned long P) ++{ ++ int i; ++ pte_t *start; ++ ++ start = (pte_t *) pmd_page_vaddr(addr); ++ for (i = 0; i < PTRS_PER_PTE; i++) { ++ pgprot_t prot = pte_pgprot(*start); ++ ++ st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); ++ note_page(m, st, prot, 4); ++ start++; ++ } ++} ++ ++#if PTRS_PER_PMD > 1 ++ ++static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, ++ unsigned long P) ++{ ++ int i; ++ pmd_t *start; ++ ++ start = (pmd_t *) pud_page_vaddr(addr); ++ for (i = 0; i < PTRS_PER_PMD; i++) { ++ st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); ++ if (!hypervisor_space(st->current_address) ++ && !pmd_none(*start)) { ++ pgprotval_t prot = __pmd_val(*start) & ~PTE_MASK; ++ ++ if (pmd_large(*start) || !pmd_present(*start)) ++ note_page(m, st, __pgprot(prot), 3); ++ else ++ walk_pte_level(m, st, *start, ++ P + i * PMD_LEVEL_MULT); ++ } else ++ note_page(m, st, __pgprot(0), 3); ++ start++; ++ } ++} ++ ++#else ++#define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p) ++#define pud_large(a) pmd_large(__pmd(pud_val(a))) ++#define pud_none(a) pmd_none(__pmd(pud_val(a))) ++#endif ++ ++#if PTRS_PER_PUD > 1 ++ ++static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr, ++ unsigned long P) ++{ ++ int i; ++ pud_t *start; ++ ++ start = (pud_t *) pgd_page_vaddr(addr); ++ ++ for (i = 0; i < PTRS_PER_PUD; i++) { ++ st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); ++ if (!hypervisor_space(st->current_address) ++ && !pud_none(*start)) { ++ pgprotval_t prot = __pud_val(*start) & ~PTE_MASK; ++ ++ if (pud_large(*start) || !pud_present(*start)) ++ note_page(m, st, __pgprot(prot), 2); ++ else ++ walk_pmd_level(m, st, *start, ++ P + i * PUD_LEVEL_MULT); ++ } else ++ note_page(m, st, __pgprot(0), 2); ++ ++ start++; ++ } ++} ++ ++#else ++#define __pud_ma(x) ((pud_t){ __pgd_ma(x) }) ++#define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud_ma(__pgd_val(a)),p) ++#define pgd_large(a) pud_large(__pud_ma(__pgd_val(a))) ++#define pgd_none(a) pud_none(__pud_ma(__pgd_val(a))) ++#endif ++ ++static void walk_pgd_level(struct seq_file *m) ++{ ++#ifdef CONFIG_X86_64 ++ pgd_t *start = (pgd_t *) &init_level4_pgt; ++#else ++ pgd_t *start = swapper_pg_dir; ++#endif ++ int i; ++ struct pg_state st; ++ ++ memset(&st, 0, sizeof(st)); ++ ++ for (i = 0; i < PTRS_PER_PGD; i++) { ++ st.current_address = normalize_addr(i * PGD_LEVEL_MULT); ++ if (!pgd_none(*start)) { ++ pgprotval_t prot = __pgd_val(*start) & ~PTE_MASK; ++ ++ if (pgd_large(*start) || !pgd_present(*start)) ++ note_page(m, &st, __pgprot(prot), 1); ++ else ++ walk_pud_level(m, &st, *start, ++ i * PGD_LEVEL_MULT); ++ } else ++ note_page(m, &st, __pgprot(0), 1); ++ ++ start++; ++ } ++ ++ /* Flush out the last page */ ++ st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); ++ note_page(m, &st, __pgprot(0), 0); ++} ++ ++static int ptdump_show(struct seq_file *m, void *v) ++{ ++ walk_pgd_level(m); ++ return 0; ++} ++ ++static int ptdump_open(struct inode *inode, struct file *filp) ++{ ++ return single_open(filp, ptdump_show, NULL); ++} ++ ++static const struct file_operations ptdump_fops = { ++ .open = ptdump_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static int __init pt_dump_init(void) ++{ ++ struct dentry *pe; ++ ++#ifdef CONFIG_X86_32 ++ /* Not a compile-time constant on x86-32 */ ++ address_markers[2].start_address = VMALLOC_START; ++ address_markers[3].start_address = VMALLOC_END; ++# ifdef CONFIG_HIGHMEM ++ address_markers[4].start_address = PKMAP_BASE; ++ address_markers[5].start_address = FIXADDR_START; ++ address_markers[6].start_address = hypervisor_virt_start; ++# else ++ address_markers[4].start_address = FIXADDR_START; ++ address_markers[5].start_address = hypervisor_virt_start; ++# endif ++#endif ++ ++ pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, ++ &ptdump_fops); ++ if (!pe) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++__initcall(pt_dump_init); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Arjan van de Ven "); ++MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables"); +--- head-2011-03-11.orig/arch/x86/mm/fault-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/fault-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -510,6 +510,11 @@ static int vmalloc_fault(unsigned long a unsigned long pgd_paddr; pmd_t *pmd_k; @@ -11202,7 +11509,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches local_irq_enable(); /* -@@ -1017,9 +1022,5 @@ void vmalloc_sync_all(void) +@@ -1028,9 +1033,5 @@ void vmalloc_sync_all(void) if (address == start) start = address + PGDIR_SIZE; } @@ -11212,8 +11519,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - (__START_KERNEL & PGDIR_MASK))); #endif } ---- head-2010-05-25.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/highmem_32-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -200,6 +200,8 @@ EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kmap_atomic); @@ -11223,8 +11530,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +#endif EXPORT_SYMBOL(clear_highpage); EXPORT_SYMBOL(copy_highpage); ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/init_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_32-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -1,5 +1,4 @@ /* - * linux/arch/i386/mm/init.c @@ -11467,8 +11774,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif start += size; size = (unsigned long)__end_rodata - start; ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:50:58.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-04-29 09:51:25.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/init_64-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_64-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -52,9 +52,6 @@ #include @@ -11959,8 +12266,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + } +} #endif ---- head-2010-05-25.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/ioremap-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:38:58.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/ioremap-xen.c 2011-02-07 15:39:13.000000000 +0100 @@ -20,14 +20,11 @@ #include #include @@ -12009,9 +12316,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches mfn++; address += PAGE_SIZE; -@@ -189,10 +200,9 @@ int touch_pte_range(struct mm_struct *mm +@@ -170,10 +181,9 @@ int create_lookup_pte_addr(struct mm_str - EXPORT_SYMBOL(touch_pte_range); + EXPORT_SYMBOL(create_lookup_pte_addr); -#ifdef CONFIG_X86_32 int page_is_ram(unsigned long pagenr) @@ -12021,7 +12328,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches int i; #ifndef CONFIG_XEN -@@ -228,31 +238,51 @@ int page_is_ram(unsigned long pagenr) +@@ -209,31 +219,51 @@ int page_is_ram(unsigned long pagenr) } return 0; } @@ -12080,7 +12387,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses -@@ -262,12 +292,15 @@ static int ioremap_change_attr(unsigned +@@ -243,13 +273,15 @@ static int ioremap_change_attr(unsigned * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ @@ -12089,9 +12396,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +static void __iomem *__ioremap_caller(resource_size_t phys_addr, + unsigned long size, unsigned long prot_val, void *caller) { -- unsigned long mfn, offset, last_addr, vaddr; -+ unsigned long mfn, offset, vaddr; -+ resource_size_t last_addr; + unsigned long offset, vaddr; + phys_addr_t mfn, last_addr; struct vm_struct *area; + unsigned long new_prot_val; pgprot_t prot; @@ -12099,7 +12405,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches domid_t domid = DOMID_IO; /* Don't allow wraparound or zero size */ -@@ -275,6 +308,13 @@ static void __iomem *__ioremap(resource_ +@@ -257,6 +289,13 @@ static void __iomem *__ioremap(resource_ if (!size || last_addr < phys_addr) return NULL; @@ -12113,7 +12419,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Don't remap the low PCI/ISA area, it's always mapped.. */ -@@ -287,55 +327,86 @@ static void __iomem *__ioremap(resource_ +@@ -269,55 +308,86 @@ static void __iomem *__ioremap(resource_ for (mfn = PFN_DOWN(phys_addr); mfn < PFN_UP(last_addr); mfn++) { unsigned long pfn = mfn_to_local_pfn(mfn); @@ -12223,7 +12529,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return NULL; } -@@ -365,16 +436,72 @@ static void __iomem *__ioremap(resource_ +@@ -347,16 +417,72 @@ static void __iomem *__ioremap(resource_ */ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { @@ -12298,7 +12604,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /** * iounmap - Free a IO remapping * @addr: virtual address from ioremap_* -@@ -417,15 +544,7 @@ void iounmap(volatile void __iomem *addr +@@ -399,15 +525,7 @@ void iounmap(volatile void __iomem *addr return; } @@ -12315,7 +12621,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* Finally remove it */ o = remove_vm_area((void *)addr); -@@ -434,6 +553,37 @@ void iounmap(volatile void __iomem *addr +@@ -416,6 +534,37 @@ void iounmap(volatile void __iomem *addr } EXPORT_SYMBOL(iounmap); @@ -12353,7 +12659,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches int __initdata early_ioremap_debug; static int __init early_ioremap_debug_setup(char *str) -@@ -445,8 +595,8 @@ static int __init early_ioremap_debug_se +@@ -427,8 +576,8 @@ static int __init early_ioremap_debug_se early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; @@ -12364,7 +12670,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #ifdef CONFIG_X86_32 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) -@@ -461,8 +611,8 @@ static inline pmd_t * __init early_iorem +@@ -443,8 +592,8 @@ static inline pmd_t * __init early_iorem } #else #define early_ioremap_pmd early_get_pmd @@ -12374,7 +12680,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif static inline pte_t * __init early_ioremap_pte(unsigned long addr) -@@ -512,7 +662,7 @@ void __init early_ioremap_clear(void) +@@ -494,7 +643,7 @@ void __init early_ioremap_clear(void) pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); pmd_clear(pmd); make_lowmem_page_writable(bm_pte, XENFEAT_writable_page_tables); @@ -12383,7 +12689,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches __flush_tlb_all(); } -@@ -654,10 +804,11 @@ void __init early_iounmap(void *addr, un +@@ -636,10 +785,11 @@ void __init early_iounmap(void *addr, un unsigned long offset; unsigned int nrpages; enum fixed_addresses idx; @@ -12397,8 +12703,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (early_ioremap_debug) { printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, ---- head-2010-05-25.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pageattr-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pageattr-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -9,6 +9,8 @@ #include #include @@ -12980,7 +13286,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches bool kernel_page_present(struct page *page) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/mm/pat-xen.c 2010-03-24 15:12:36.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pat-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -0,0 +1,602 @@ +/* + * Handle caching attributes in page tables (PAT) @@ -13585,8 +13891,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +} + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/mm/pgtable-xen.c 2010-03-24 15:12:36.000000000 +0100 -@@ -0,0 +1,710 @@ ++++ head-2011-03-11/arch/x86/mm/pgtable-xen.c 2011-01-31 18:07:35.000000000 +0100 +@@ -0,0 +1,713 @@ +#include +#include +#include @@ -14209,8 +14515,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + + /* so that alloc_pd can use it */ + mm->pgd = pgd; -+ if (pgd) ++ if (pgd) { ++ /* Store a back link for vmalloc_sync_all(). */ ++ set_page_private(virt_to_page(pgd), (unsigned long)mm); + pgd_ctor(pgd); ++ } + + if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { + free_pages((unsigned long)pgd, PGD_ORDER); @@ -14297,8 +14606,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + + return young; +} ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pgtable_32-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -1,7 +1,3 @@ -/* - * linux/arch/i386/mm/pgtable.c @@ -14315,7 +14624,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches for_each_online_pgdat(pgdat) { pgdat_resize_lock(pgdat, &flags); for (i = 0; i < pgdat->node_spanned_pages; ++i) { -@@ -157,243 +152,6 @@ void __init reserve_top_address(unsigned +@@ -157,246 +152,6 @@ void __init reserve_top_address(unsigned __VMALLOC_RESERVE += reserve; } @@ -14509,8 +14818,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - - /* so that alloc_pd can use it */ - mm->pgd = pgd; -- if (pgd) +- if (pgd) { +- /* Store a back link for vmalloc_sync_all(). */ +- set_page_private(virt_to_page(pgd), (unsigned long)mm); - pgd_ctor(pgd); +- } - - if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { - free_page((unsigned long)pgd); @@ -14559,8 +14871,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches void make_lowmem_page_readonly(void *va, unsigned int feature) { pte_t *pte; ---- head-2010-05-25.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/pci/irq-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/pci/irq-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/pci/irq-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -140,9 +140,11 @@ static void __init pirq_peer_trick(void) busmap[e->bus] = 1; } @@ -14642,8 +14954,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } } ---- head-2010-05-25.orig/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/vdso32-setup-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/vdso32-setup-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -164,7 +164,7 @@ static __init void relocate_vdso(Elf32_E Elf32_Shdr *shdr; int i; @@ -14687,9 +14999,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches down_write(&mm->mmap_sem); /* Test compat mode once here, in case someone ---- head-2010-05-25.orig/drivers/acpi/processor_driver.c 2010-05-25 09:19:10.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_driver.c 2010-04-15 10:04:18.000000000 +0200 -@@ -489,7 +489,7 @@ static int acpi_processor_get_info(struc +--- head-2011-03-11.orig/drivers/acpi/processor_driver.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_driver.c 2011-01-31 18:07:35.000000000 +0100 +@@ -371,7 +371,7 @@ static int acpi_processor_get_info(struc * of /proc/cpuinfo */ status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer); @@ -14698,9 +15010,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches arch_fix_phys_package_id(pr->id, object.integer.value); return 0; ---- head-2010-05-25.orig/drivers/firmware/Kconfig 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/firmware/Kconfig 2010-03-24 15:12:36.000000000 +0100 -@@ -114,7 +114,7 @@ config DMIID +--- head-2011-03-11.orig/drivers/firmware/Kconfig 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/firmware/Kconfig 2011-01-31 18:07:35.000000000 +0100 +@@ -115,7 +115,7 @@ config DMIID config ISCSI_IBFT_FIND bool "iSCSI Boot Firmware Table Attributes" @@ -14709,9 +15021,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches default n help This option enables the kernel to find the region of memory ---- head-2010-05-25.orig/drivers/input/xen-kbdfront.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/input/xen-kbdfront.c 2010-04-15 10:04:29.000000000 +0200 -@@ -329,7 +329,6 @@ static const struct xenbus_device_id xen +--- head-2011-03-11.orig/drivers/input/xen-kbdfront.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/input/xen-kbdfront.c 2011-01-31 18:07:35.000000000 +0100 +@@ -331,7 +331,6 @@ static const struct xenbus_device_id xen static struct xenbus_driver xenkbd_driver = { .name = "vkbd", @@ -14719,9 +15031,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches .ids = xenkbd_ids, .probe = xenkbd_probe, .remove = xenkbd_remove, ---- head-2010-05-25.orig/drivers/oprofile/cpu_buffer.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/oprofile/cpu_buffer.c 2010-03-24 15:12:36.000000000 +0100 -@@ -502,7 +502,7 @@ fail: +--- head-2011-03-11.orig/drivers/oprofile/cpu_buffer.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/oprofile/cpu_buffer.c 2011-01-31 18:07:35.000000000 +0100 +@@ -475,7 +475,7 @@ fail: #ifdef CONFIG_XEN int oprofile_add_domain_switch(int32_t domain_id) { @@ -14730,9 +15042,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* should have space for switching into and out of domain (2 slots each) plus one sample and one cpu mode switch */ ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:12:36.000000000 +0100 -@@ -543,7 +543,7 @@ int pci_enable_msi(struct pci_dev* dev) +--- head-2011-03-11.orig/drivers/pci/msi-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/pci/msi-xen.c 2011-01-31 18:07:35.000000000 +0100 +@@ -504,7 +504,7 @@ int pci_enable_msi(struct pci_dev* dev) EXPORT_SYMBOL(pci_enable_msi); extern void pci_frontend_disable_msi(struct pci_dev* dev); @@ -14741,7 +15053,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { int pirq; struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev); -@@ -571,6 +571,10 @@ void pci_disable_msi(struct pci_dev* dev +@@ -532,6 +532,10 @@ void pci_disable_msi(struct pci_dev* dev pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; } @@ -14752,7 +15064,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches EXPORT_SYMBOL(pci_disable_msi); /** -@@ -675,7 +679,7 @@ int pci_enable_msix(struct pci_dev* dev, +@@ -636,7 +640,7 @@ int pci_enable_msix(struct pci_dev* dev, EXPORT_SYMBOL(pci_enable_msix); extern void pci_frontend_disable_msix(struct pci_dev* dev); @@ -14761,7 +15073,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { if (!pci_msi_enable || !dev || !dev->msix_enabled) return; -@@ -708,6 +712,10 @@ void pci_disable_msix(struct pci_dev* de +@@ -669,6 +673,10 @@ void pci_disable_msix(struct pci_dev* de pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; } @@ -14772,9 +15084,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches EXPORT_SYMBOL(pci_disable_msix); /** ---- head-2010-05-25.orig/drivers/video/Kconfig 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/video/Kconfig 2010-03-24 15:12:36.000000000 +0100 -@@ -2110,7 +2110,7 @@ config FB_VIRTUAL +--- head-2011-03-11.orig/drivers/video/Kconfig 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/video/Kconfig 2011-01-31 18:07:35.000000000 +0100 +@@ -2253,7 +2253,7 @@ config FB_VIRTUAL config XEN_FBDEV_FRONTEND tristate "Xen virtual frame buffer support" @@ -14783,9 +15095,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT ---- head-2010-05-25.orig/drivers/video/xen-fbfront.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/video/xen-fbfront.c 2010-04-15 10:04:38.000000000 +0200 -@@ -674,7 +674,6 @@ static struct xenbus_device_id xenfb_ids +--- head-2011-03-11.orig/drivers/video/xen-fbfront.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/video/xen-fbfront.c 2011-01-31 18:07:35.000000000 +0100 +@@ -679,7 +679,6 @@ static struct xenbus_device_id xenfb_ids static struct xenbus_driver xenfb_driver = { .name = "vfb", @@ -14793,8 +15105,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches .ids = xenfb_ids, .probe = xenfb_probe, .remove = xenfb_remove, ---- head-2010-05-25.orig/drivers/xen/Kconfig 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/Kconfig 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/Kconfig 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/drivers/xen/Kconfig 2011-01-31 18:07:35.000000000 +0100 @@ -2,8 +2,6 @@ # This Kconfig describe xen options # @@ -14804,9 +15116,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches config XEN bool ---- head-2010-05-25.orig/drivers/xen/Makefile 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/Makefile 2010-04-19 14:50:44.000000000 +0200 -@@ -1,13 +1,16 @@ +--- head-2011-03-11.orig/drivers/xen/Makefile 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/drivers/xen/Makefile 2011-02-28 15:13:33.000000000 +0100 +@@ -1,5 +1,7 @@ -obj-$(CONFIG_PARAVIRT_XEN) += grant-table.o +obj-$(CONFIG_PARAVIRT_XEN) += grant-table.o features.o events.o +xen-balloon-$(CONFIG_PARAVIRT_XEN) := balloon.o @@ -14815,19 +15127,20 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches obj-$(CONFIG_XEN) += core/ obj-$(CONFIG_XEN) += console/ obj-$(CONFIG_XEN) += evtchn/ - obj-y += xenbus/ - obj-$(CONFIG_XEN) += char/ +@@ -8,8 +10,9 @@ obj-$(CONFIG_XEN) += char/ + + xen-backend-$(CONFIG_XEN_BACKEND) := util.o --obj-$(CONFIG_XEN) += util.o +-obj-$(CONFIG_XEN) += $(xen-backend-y) $(xen-backend-m) -obj-$(CONFIG_XEN_BALLOON) += balloon/ -+obj-$(CONFIG_XEN) += features.o util.o -+obj-$(CONFIG_XEN_XENCOMM) += xencomm.o -+obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) ++obj-$(CONFIG_XEN) += features.o $(xen-backend-y) $(xen-backend-m) ++obj-$(CONFIG_XEN_XENCOMM) += xencomm.o ++obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ - obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ ---- head-2010-05-25.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/blkfront.c 2010-03-24 15:12:36.000000000 +0100 + obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ blktap2-new/ +--- head-2011-03-11.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkfront/blkfront.c 2011-01-31 18:07:35.000000000 +0100 @@ -285,7 +285,11 @@ static void backend_changed(struct xenbu break; @@ -14841,8 +15154,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (bd == NULL) { xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); break; ---- head-2010-05-25.orig/drivers/xen/blkfront/block.h 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/block.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blkfront/block.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkfront/block.h 2011-01-31 18:07:35.000000000 +0100 @@ -97,7 +97,6 @@ struct blk_shadow { struct blkfront_info { @@ -14851,9 +15164,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches struct gendisk *gd; int vdevice; blkif_vdev_t handle; ---- head-2010-05-25.orig/drivers/xen/blkfront/vbd.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/vbd.c 2010-03-24 15:12:36.000000000 +0100 -@@ -327,17 +327,32 @@ xlvbd_init_blk_queue(struct gendisk *gd, +--- head-2011-03-11.orig/drivers/xen/blkfront/vbd.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkfront/vbd.c 2011-01-31 18:07:35.000000000 +0100 +@@ -328,17 +328,33 @@ xlvbd_init_blk_queue(struct gendisk *gd, return 0; } @@ -14874,7 +15187,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + if ((vdevice>>EXT_SHIFT) > 1) { + /* this is above the extended range; something is wrong */ -+ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); ++ pr_warning("blkfront: vdevice %#x is above the extended range;" ++ " ignoring\n", vdevice); + return -ENODEV; + } + @@ -14890,7 +15204,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); -@@ -425,41 +440,6 @@ xlvbd_alloc_gendisk(int major, int minor +@@ -426,42 +442,6 @@ xlvbd_alloc_gendisk(int major, int minor return err; } @@ -14904,7 +15218,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - - if ((vdevice>>EXT_SHIFT) > 1) { - /* this is above the extended range; something is wrong */ -- printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); +- pr_warning("blkfront: vdevice %#x is above the extended" +- " range; ignoring\n", vdevice); - return -ENODEV; - } - @@ -14932,17 +15247,17 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches void xlvbd_del(struct blkfront_info *info) { ---- head-2010-05-25.orig/drivers/xen/blktap/blktap.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap/blktap.c 2010-04-29 09:51:23.000000000 +0200 -@@ -111,6 +111,7 @@ typedef struct tap_blkif { +--- head-2011-03-11.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:10:56.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap/blktap.c 2011-02-17 10:11:08.000000000 +0100 +@@ -110,6 +110,7 @@ typedef struct tap_blkif { unsigned long mode; /*current switching mode */ int minor; /*Minor number for tapdisk device */ pid_t pid; /*tapdisk process id */ + struct pid_namespace *pid_ns; /*... and its corresponding namespace */ enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace shutdown */ - unsigned long *idx_map; /*Record the user ring id to kern -@@ -301,16 +302,14 @@ static inline int OFFSET_TO_SEG(int offs + struct idx_map { +@@ -281,16 +282,14 @@ static inline unsigned int OFFSET_TO_SEG * BLKTAP VM OPS */ @@ -14962,7 +15277,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } static pte_t blktap_clear_pte(struct vm_area_struct *vma, -@@ -428,7 +427,7 @@ static void blktap_vma_close(struct vm_a +@@ -407,7 +406,7 @@ static void blktap_vma_close(struct vm_a } static struct vm_operations_struct blktap_vm_ops = { @@ -14971,7 +15286,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches zap_pte: blktap_clear_pte, open: blktap_vma_open, close: blktap_vma_close, -@@ -523,9 +522,8 @@ found: +@@ -502,9 +501,8 @@ found: tapfds[minor] = info; if ((class = get_xen_class()) != NULL) @@ -14983,7 +15298,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } out: -@@ -568,7 +566,7 @@ void signal_tapdisk(int idx) +@@ -547,7 +545,7 @@ void signal_tapdisk(int idx) return; if (info->pid > 0) { @@ -14992,7 +15307,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (ptask) info->status = CLEANSHUTDOWN; } -@@ -809,8 +807,9 @@ static int blktap_ioctl(struct inode *in +@@ -790,8 +788,9 @@ static int blktap_ioctl(struct inode *in { if (info) { info->pid = (pid_t)arg; @@ -15004,7 +15319,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } return 0; } -@@ -1762,9 +1761,7 @@ static int __init blkif_init(void) +@@ -1744,9 +1743,7 @@ static int __init blkif_init(void) * We only create the device when a request of a new device is * made. */ @@ -15015,8 +15330,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } else { /* this is bad, but not fatal */ WPRINTK("blktap: sysfs xen_class not created\n"); ---- head-2010-05-25.orig/drivers/xen/blktap2/blktap.h 2010-05-19 17:47:46.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap2/blktap.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blktap2/blktap.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/blktap.h 2011-01-31 18:07:35.000000000 +0100 @@ -127,7 +127,7 @@ struct blktap_ring { wait_queue_head_t poll_wait; @@ -15026,9 +15341,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches atomic_t sysfs_refcnt; struct mutex sysfs_mutex; }; ---- head-2010-05-25.orig/drivers/xen/blktap2/device.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/device.c 2010-04-19 11:30:22.000000000 +0200 -@@ -571,7 +571,8 @@ blktap_map(struct blktap *tap, +--- head-2011-03-11.orig/drivers/xen/blktap2/device.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/device.c 2011-01-31 18:07:35.000000000 +0100 +@@ -567,7 +567,8 @@ blktap_map(struct blktap *tap, if (!xen_feature(XENFEAT_auto_translated_physmap)) { pte = mk_pte(page, ring->vma->vm_page_prot); @@ -15038,8 +15353,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches flush_tlb_page(ring->vma, uvaddr); blktap_map_uaddr(NULL, kvaddr, mk_pte(page, PAGE_KERNEL)); flush_tlb_kernel_page(kvaddr); ---- head-2010-05-25.orig/drivers/xen/blktap2/ring.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/ring.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blktap2/ring.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/ring.c 2011-01-31 18:07:35.000000000 +0100 @@ -66,16 +66,15 @@ blktap_read_ring(struct blktap *tap) return 0; } @@ -15061,7 +15376,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } static pte_t -@@ -205,7 +204,7 @@ blktap_ring_vm_close(struct vm_area_stru +@@ -202,7 +201,7 @@ blktap_ring_vm_close(struct vm_area_stru static struct vm_operations_struct blktap_ring_vm_operations = { .close = blktap_ring_vm_close, .unmap = blktap_ring_vm_unmap, @@ -15070,24 +15385,24 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches .zap_pte = blktap_ring_clear_pte, }; ---- head-2010-05-25.orig/drivers/xen/blktap2/sysfs.c 2010-05-19 17:49:22.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap2/sysfs.c 2010-05-25 09:24:03.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/blktap2/sysfs.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/sysfs.c 2011-03-11 10:58:58.000000000 +0100 @@ -36,16 +36,21 @@ blktap_sysfs_exit(struct blktap *tap) blktap_sysfs_put(tap); } -static ssize_t blktap_sysfs_pause_device(struct class_device *, const char *, size_t); --CLASS_DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); +-static CLASS_DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); -static ssize_t blktap_sysfs_resume_device(struct class_device *, const char *, size_t); --CLASS_DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); +-static CLASS_DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); +static ssize_t blktap_sysfs_pause_device(struct device *, + struct device_attribute *, + const char *, size_t); -+DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); ++static DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); +static ssize_t blktap_sysfs_resume_device(struct device *, + struct device_attribute *, + const char *, size_t); -+DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); ++static DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); static ssize_t -blktap_sysfs_set_name(struct class_device *dev, const char *buf, size_t size) @@ -15118,9 +15433,10 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return size; } --CLASS_DEVICE_ATTR(name, S_IRUSR | S_IWUSR, -+DEVICE_ATTR(name, S_IRUSR | S_IWUSR, - blktap_sysfs_get_name, blktap_sysfs_set_name); +-static CLASS_DEVICE_ATTR(name, S_IRUSR | S_IWUSR, +- blktap_sysfs_get_name, blktap_sysfs_set_name); ++static DEVICE_ATTR(name, S_IRUSR | S_IWUSR, ++ blktap_sysfs_get_name, blktap_sysfs_set_name); static ssize_t -blktap_sysfs_remove_device(struct class_device *dev, @@ -15137,8 +15453,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return (err ? : size); } --CLASS_DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); -+DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); +-static CLASS_DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); ++static DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); static ssize_t -blktap_sysfs_pause_device(struct class_device *dev, @@ -15222,8 +15538,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches return ret; } --CLASS_DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); -+DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); +-static CLASS_DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); ++static DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); int blktap_sysfs_create(struct blktap *tap) @@ -15325,8 +15641,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } static ssize_t ---- head-2010-05-25.orig/drivers/xen/char/mem.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/char/mem.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/char/mem.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-11/drivers/xen/char/mem.c 2011-01-31 18:07:35.000000000 +0100 @@ -33,6 +33,27 @@ static inline int uncached_access(struct return 0; } @@ -15415,8 +15731,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* We want to return the real error code, not EAGAIN. */ return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, vma->vm_page_prot, DOMID_IO); ---- head-2010-05-25.orig/drivers/xen/console/console.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/console/console.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/console/console.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/console/console.c 2011-01-31 18:07:35.000000000 +0100 @@ -551,16 +551,18 @@ static int xencons_write( return i; } @@ -15458,8 +15774,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (tty->ldisc.flush_buffer != NULL) tty->ldisc.flush_buffer(tty); tty->closing = 0; ---- head-2010-05-25.orig/drivers/xen/core/Makefile 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/Makefile 2010-04-19 14:50:32.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/core/Makefile 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/Makefile 2011-01-31 18:07:35.000000000 +0100 @@ -2,7 +2,7 @@ # Makefile for the linux kernel. # @@ -15474,8 +15790,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches obj-$(CONFIG_X86_SMP) += spinlock.o obj-$(CONFIG_KEXEC) += machine_kexec.o -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o ---- head-2010-05-25.orig/drivers/xen/core/machine_kexec.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/machine_kexec.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/machine_kexec.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/machine_kexec.c 2011-01-31 18:07:35.000000000 +0100 @@ -5,6 +5,7 @@ #include @@ -15547,79 +15863,37 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Local variables: ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:12:36.000000000 +0100 -@@ -53,16 +53,15 @@ static DEFINE_PER_CPU(int, callfunc_irq) - static char resched_name[NR_CPUS][15]; - static char callfunc_name[NR_CPUS][15]; - --u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; -+#ifdef CONFIG_X86_LOCAL_APIC -+#define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid)) -+#else -+#define set_cpu_to_apicid(cpu, apicid) -+#endif - - DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); - DEFINE_PER_CPU(cpumask_t, cpu_core_map); - --#if defined(__i386__) --DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID; --EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); --#endif -- - void __init prefill_possible_map(void) - { - int i, rc; -@@ -157,7 +156,7 @@ static int __cpuinit xen_smp_intr_init(u +--- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-01-31 18:07:35.000000000 +0100 +@@ -120,7 +120,7 @@ static int __cpuinit xen_smp_intr_init(u } #ifdef CONFIG_HOTPLUG_CPU -static void xen_smp_intr_exit(unsigned int cpu) -+static void __cpuexit xen_smp_intr_exit(unsigned int cpu) ++static void __cpuinit xen_smp_intr_exit(unsigned int cpu) { if (cpu != 0) local_teardown_timer(cpu); -@@ -266,8 +265,7 @@ void __init smp_prepare_cpus(unsigned in - boot_cpu_data.apicid = apicid; - cpu_data(0) = boot_cpu_data; - -- cpu_2_logical_apicid[0] = apicid; -- per_cpu(x86_cpu_to_apicid, 0) = apicid; -+ set_cpu_to_apicid(0, apicid); - - current_thread_info()->cpu = 0; - -@@ -322,8 +320,7 @@ void __init smp_prepare_cpus(unsigned in - cpu_data(cpu).cpu_index = cpu; - cpu_data(cpu).apicid = apicid; - -- cpu_2_logical_apicid[cpu] = apicid; -- per_cpu(x86_cpu_to_apicid, cpu) = apicid; -+ set_cpu_to_apicid(cpu, apicid); - - #ifdef __x86_64__ - cpu_pda(cpu)->pcurrent = idle; -@@ -378,7 +375,7 @@ static int __init initialize_cpu_present +@@ -324,7 +324,7 @@ static int __init initialize_cpu_present } core_initcall(initialize_cpu_present_map); -int __cpu_disable(void) -+int __cpuexit __cpu_disable(void) ++int __cpuinit __cpu_disable(void) { cpumask_t map = cpu_online_map; unsigned int cpu = smp_processor_id(); -@@ -395,7 +392,7 @@ int __cpu_disable(void) +@@ -339,7 +339,7 @@ int __cpu_disable(void) return 0; } -void __cpu_die(unsigned int cpu) -+void __cpuexit __cpu_die(unsigned int cpu) ++void __cpuinit __cpu_die(unsigned int cpu) { while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; ---- head-2010-05-25.orig/drivers/xen/core/xen_proc.c 2007-06-12 13:13:44.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/xen_proc.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/xen_proc.c 2007-06-12 13:13:44.000000000 +0200 ++++ head-2011-03-11/drivers/xen/core/xen_proc.c 2011-01-31 18:07:35.000000000 +0100 @@ -8,7 +8,7 @@ static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode) { @@ -15629,8 +15903,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } ---- head-2010-05-25.orig/drivers/xen/fbfront/xenfb.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/fbfront/xenfb.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/fbfront/xenfb.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/fbfront/xenfb.c 2011-01-31 18:07:35.000000000 +0100 @@ -93,7 +93,7 @@ struct xenfb_info * only mappings. The former creates unfaulted pages. Preserves * invariant. The latter removes pages. Preserves invariant. @@ -15700,8 +15974,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches }; static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma) ---- head-2010-05-25.orig/drivers/xen/features.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/drivers/xen/features.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/features.c 2011-03-11 10:41:54.000000000 +0100 ++++ head-2011-03-11/drivers/xen/features.c 2011-01-31 18:07:35.000000000 +0100 @@ -9,14 +9,21 @@ #include #include @@ -15725,9 +15999,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches void xen_setup_features(void) { ---- head-2010-05-25.orig/drivers/xen/gntdev/gntdev.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/gntdev/gntdev.c 2010-03-24 15:12:36.000000000 +0100 -@@ -392,7 +392,7 @@ nomem_out: +--- head-2011-03-11.orig/drivers/xen/gntdev/gntdev.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/gntdev/gntdev.c 2011-01-31 18:07:35.000000000 +0100 +@@ -375,7 +375,7 @@ nomem_out: static int __init gntdev_init(void) { struct class *class; @@ -15735,8 +16009,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + struct device *device; if (!is_running_on_xen()) { - printk(KERN_ERR "You must be running Xen to use gntdev\n"); -@@ -417,8 +417,8 @@ static int __init gntdev_init(void) + pr_err("You must be running Xen to use gntdev\n"); +@@ -399,8 +399,8 @@ static int __init gntdev_init(void) return 0; } @@ -15745,9 +16019,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + device = device_create(class, NULL, MKDEV(gntdev_major, 0), + GNTDEV_NAME); if (IS_ERR(device)) { - printk(KERN_ERR "Error creating gntdev device in xen_class\n"); - printk(KERN_ERR "gntdev created with major number = %d\n", -@@ -435,7 +435,7 @@ static void __exit gntdev_exit(void) + pr_err("Error creating gntdev device in xen_class\n"); + pr_err("gntdev created, major number = %d\n", gntdev_major); +@@ -416,7 +416,7 @@ static void __exit gntdev_exit(void) { struct class *class; if ((class = get_xen_class()) != NULL) @@ -15756,9 +16030,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches unregister_chrdev(gntdev_major, GNTDEV_NAME); } ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.c 2010-03-24 15:12:36.000000000 +0100 -@@ -1464,8 +1464,7 @@ err: +--- head-2011-03-11.orig/drivers/xen/netfront/netfront.c 2011-02-09 16:00:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/netfront.c 2011-02-09 16:04:02.000000000 +0100 +@@ -1452,8 +1452,7 @@ err: } } @@ -15768,7 +16042,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; -@@ -1630,8 +1629,7 @@ static void netif_release_rx_bufs_flip(s +@@ -1623,8 +1622,7 @@ static void netif_release_rx_bufs_flip(s } } @@ -15778,8 +16052,87 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches spin_unlock_bh(&np->rx_lock); } ---- head-2010-05-25.orig/drivers/xen/privcmd/privcmd.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/privcmd/privcmd.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/pciback/pci_stub.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/pci_stub.c 2011-01-31 18:07:35.000000000 +0100 +@@ -5,7 +5,9 @@ + * Chris Bookholt + */ + #include ++#include + #include ++#include + #include + #include + #include +@@ -507,34 +509,46 @@ static void kill_domain_by_device(struct + { + struct xenbus_transaction xbt; + int err; +- char nodename[1024]; ++ char *nodename; + +- if (!psdev) ++ if (!psdev) { + dev_err(&psdev->dev->dev, + "device is NULL when do AER recovery/kill_domain\n"); +- sprintf(nodename, "/local/domain/0/backend/pci/%d/0", +- psdev->pdev->xdev->otherend_id); +- nodename[strlen(nodename)] = '\0'; +- +-again: +- err = xenbus_transaction_start(&xbt); +- if (err) +- { +- dev_err(&psdev->dev->dev, +- "error %d when start xenbus transaction\n", err); + return; + } +- /*PV AER handlers will set this flag*/ +- xenbus_printf(xbt, nodename, "aerState" , "aerfail" ); +- err = xenbus_transaction_end(xbt, 0); +- if (err) +- { +- if (err == -EAGAIN) +- goto again; ++ ++ nodename = kasprintf(GFP_KERNEL, ++ "/local/domain/0/backend/pci/%d/0", ++ psdev->pdev->xdev->otherend_id); ++ if (!nodename) { + dev_err(&psdev->dev->dev, +- "error %d when end xenbus transaction\n", err); ++ "not enough memory\n"); + return; + } ++ ++ do { ++ err = xenbus_transaction_start(&xbt); ++ if (err) { ++ dev_err(&psdev->dev->dev, ++ "error %d starting xenbus transaction\n", err); ++ break; ++ } ++ ++ /* PV AER handlers will set this flag */ ++ xenbus_printf(xbt, nodename, "aerState" , "aerfail" ); ++ ++ err = xenbus_transaction_end(xbt, 0); ++ switch (err) { ++ default: ++ dev_err(&psdev->dev->dev, ++ "error %d ending xenbus transaction\n", err); ++ break; ++ case 0: ++ case -EAGAIN: ++ break; ++ } ++ } while (err == -EAGAIN); ++ kfree(nodename); + } + + /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and +--- head-2011-03-11.orig/drivers/xen/privcmd/privcmd.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/privcmd/privcmd.c 2011-01-31 18:07:35.000000000 +0100 @@ -401,15 +401,13 @@ static long privcmd_ioctl(struct file *f } @@ -15799,9 +16152,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches }; static int privcmd_mmap(struct file * file, struct vm_area_struct * vma) ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:12:36.000000000 +0100 -@@ -440,7 +440,7 @@ int xenbus_map_ring_valloc(struct xenbus +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_client.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_client.c 2011-01-31 18:07:35.000000000 +0100 +@@ -437,7 +437,7 @@ int xenbus_map_ring_valloc(struct xenbus *vaddr = NULL; @@ -15810,7 +16163,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (!area) return -ENOMEM; -@@ -450,7 +450,7 @@ int xenbus_map_ring_valloc(struct xenbus +@@ -447,7 +447,7 @@ int xenbus_map_ring_valloc(struct xenbus BUG(); if (op.status != GNTST_okay) { @@ -15819,7 +16172,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); -@@ -549,7 +549,7 @@ int xenbus_unmap_ring_vfree(struct xenbu +@@ -546,7 +546,7 @@ int xenbus_unmap_ring_vfree(struct xenbu BUG(); if (op.status == GNTST_okay) @@ -15828,9 +16181,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:12:36.000000000 +0100 -@@ -174,7 +174,7 @@ static int read_backend_details(struct x +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 18:07:35.000000000 +0100 +@@ -175,7 +175,7 @@ static int read_backend_details(struct x return read_otherend_details(xendev, "backend-id", "backend"); } @@ -15839,7 +16192,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; -@@ -186,8 +186,10 @@ static int xenbus_uevent_frontend(struct +@@ -187,8 +187,10 @@ static int xenbus_uevent_frontend(struct return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ @@ -15850,7 +16203,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); return 0; -@@ -208,10 +210,8 @@ static struct xen_bus_type xenbus_fronte +@@ -209,10 +211,8 @@ static struct xen_bus_type xenbus_fronte .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, @@ -15861,7 +16214,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches }, #if defined(CONFIG_XEN) || defined(MODULE) .dev = { -@@ -529,6 +529,15 @@ static ssize_t xendev_show_devtype(struc +@@ -531,6 +531,15 @@ static ssize_t xendev_show_devtype(struc } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); @@ -15873,11 +16226,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +{ + return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); +} -+DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); ++static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, -@@ -589,10 +598,16 @@ int xenbus_probe_node(struct xen_bus_typ +@@ -591,10 +600,16 @@ int xenbus_probe_node(struct xen_bus_typ err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) @@ -15896,9 +16249,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); ---- head-2010-05-25.orig/fs/aio.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/fs/aio.c 2010-03-24 15:12:36.000000000 +0100 -@@ -1238,6 +1238,7 @@ static void io_destroy(struct kioctx *io +--- head-2011-03-11.orig/fs/aio.c 2011-03-11 10:55:30.000000000 +0100 ++++ head-2011-03-11/fs/aio.c 2011-03-11 10:58:46.000000000 +0100 +@@ -1243,6 +1243,7 @@ static void io_destroy(struct kioctx *io #ifdef CONFIG_EPOLL /* forget the poll file, but it's up to the user to close it */ if (ioctx->file) { @@ -15906,7 +16259,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches ioctx->file->private_data = 0; ioctx->file = 0; } -@@ -1262,6 +1263,7 @@ static int aio_queue_fd_close(struct ino +@@ -1267,6 +1268,7 @@ static int aio_queue_fd_close(struct ino spin_lock_irq(&ioctx->ctx_lock); ioctx->file = 0; spin_unlock_irq(&ioctx->ctx_lock); @@ -15914,7 +16267,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } return 0; } -@@ -1297,16 +1299,17 @@ static const struct file_operations aioq +@@ -1302,16 +1304,17 @@ static const struct file_operations aioq static int make_aio_fd(struct kioctx *ioctx) { @@ -15938,8 +16291,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches file->private_data = ioctx; ioctx->file = file; init_waitqueue_head(&ioctx->poll_wait); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/desc.h 2011-01-31 18:07:35.000000000 +0100 @@ -64,8 +64,8 @@ static inline struct desc_struct *get_cp } @@ -16082,8 +16435,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif /* __ASSEMBLY__ */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/dma-mapping.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/dma-mapping.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/dma-mapping.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/dma-mapping.h 2011-01-31 18:07:35.000000000 +0100 @@ -1,5 +1,17 @@ -#ifdef CONFIG_X86_32 -# include "dma-mapping_32.h" @@ -16107,8 +16460,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +extern int range_straddles_page_boundary(paddr_t p, size_t size); + +#endif /* _ASM_DMA_MAPPING_H_ */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap.h 2011-01-31 18:07:35.000000000 +0100 @@ -1,5 +1,13 @@ +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H @@ -16123,8 +16476,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + __set_fixmap(idx, 0, __pgprot(0)) + +#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 18:07:35.000000000 +0100 @@ -10,8 +10,8 @@ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ @@ -16180,8 +16533,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } static inline unsigned long virt_to_fix(const unsigned long vaddr) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 18:07:35.000000000 +0100 @@ -8,8 +8,8 @@ * Copyright (C) 1998 Ingo Molnar */ @@ -16244,8 +16597,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 18:07:35.000000000 +0100 @@ -8,7 +8,7 @@ * Gerhard.Wichert@pdb.siemens.de * @@ -16255,8 +16608,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches * up to 16 Terabyte physical memory. With current x86 CPUs * we now support up to 64 Gigabytes physical RAM. * ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/io.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/io.h 2011-01-31 18:07:35.000000000 +0100 @@ -1,5 +1,22 @@ +#ifndef _ASM_X86_IO_H +#define _ASM_X86_IO_H @@ -16280,8 +16633,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); + +#endif /* _ASM_X86_IO_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irqflags.h 2011-01-31 18:07:35.000000000 +0100 @@ -139,11 +139,11 @@ sysexit_ecrit: /**** END OF SYSEXIT CRIT #endif /* __ASSEMBLY__ */ @@ -16298,8 +16651,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static inline int raw_irqs_disabled_flags(unsigned long flags) { ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 18:07:35.000000000 +0100 @@ -94,7 +94,7 @@ static inline void switch_mm(struct mm_s BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); @@ -16325,8 +16678,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +} while (0) #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 18:07:35.000000000 +0100 @@ -21,7 +21,7 @@ void destroy_context(struct mm_struct *m static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { @@ -16370,8 +16723,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pci.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pci.h 2011-01-31 18:07:35.000000000 +0100 @@ -8,14 +8,13 @@ #include #include @@ -16424,8 +16777,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #ifdef CONFIG_PCI ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgalloc.h 2011-01-31 18:07:35.000000000 +0100 @@ -1,5 +1,149 @@ -#ifdef CONFIG_X86_32 -# include "pgalloc_32.h" @@ -16580,7 +16933,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +#endif /* PAGETABLE_LEVELS > 2 */ + +#endif /* _ASM_X86_PGALLOC_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -#ifndef _I386_PGALLOC_H @@ -16694,9 +17047,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -#endif /* CONFIG_X86_PAE */ - -#endif /* _I386_PGALLOC_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,179 +0,0 @@ +@@ -1,182 +0,0 @@ -#ifndef _X86_64_PGALLOC_H -#define _X86_64_PGALLOC_H - @@ -16779,11 +17132,14 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - __pmd_free(virt_to_page(pud)); -} - --static inline void pgd_list_add(pgd_t *pgd) +-static inline void pgd_list_add(pgd_t *pgd, struct mm_struct *mm) -{ - struct page *page = virt_to_page(pgd); - unsigned long flags; - +- /* Store a back link for vmalloc_sync_all(). */ +- set_page_private(page, (unsigned long)mm); +- - spin_lock_irqsave(&pgd_lock, flags); - list_add(&page->lru, &pgd_list); - spin_unlock_irqrestore(&pgd_lock, flags); @@ -16807,23 +17163,23 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - * We allocate two contiguous pages for kernel and user. - */ - unsigned boundary; -- pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1); +- pgd_t *pgd; +- +- pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 1); - if (!pgd) - return NULL; -- pgd_list_add(pgd); - pgd_test_and_unpin(pgd); +- pgd_list_add(pgd, mm); - /* - * Copy kernel pointers in from init. - * Could keep a freelist or slab cache of those because the kernel - * part never changes. - */ - boundary = pgd_index(__PAGE_OFFSET); -- memset(pgd, 0, boundary * sizeof(pgd_t)); - memcpy(pgd + boundary, - init_level4_pgt + boundary, - (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); - -- memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */ - /* - * Set level3_user_pgt for vsyscall area - */ @@ -16834,8 +17190,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ -- pgd_test_and_unpin(pgd); - pgd_list_del(pgd); +- pgd_test_and_unpin(pgd); - free_pages((unsigned long)pgd, 1); -} - @@ -16876,8 +17232,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) - -#endif /* _X86_64_PGALLOC_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable.h 2011-01-31 18:07:35.000000000 +0100 @@ -1,17 +1,15 @@ #ifndef _ASM_X86_PGTABLE_H #define _ASM_X86_PGTABLE_H @@ -17285,8 +17641,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #define arch_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable) \ xen_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 18:07:35.000000000 +0100 @@ -8,25 +8,28 @@ * Copyright (C) 1999 Ingo Molnar */ @@ -17379,8 +17735,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:52.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 18:07:35.000000000 +0100 @@ -38,16 +38,13 @@ void paging_init(void); #ifdef CONFIG_X86_PAE # include @@ -17591,8 +17947,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + direct_remap_pfn_range(vma, from, pfn, size, prot, DOMID_IO) #endif /* _I386_PGTABLE_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:47.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 18:07:35.000000000 +0100 @@ -31,7 +31,7 @@ extern void paging_init(void); #endif /* !__ASSEMBLY__ */ @@ -17864,8 +18220,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #define __HAVE_ARCH_PTE_SAME #endif /* !__ASSEMBLY__ */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:42:13.000000000 +0100 @@ -3,10 +3,6 @@ #include @@ -17909,7 +18265,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif /* -@@ -57,68 +56,80 @@ static inline void *current_text_addr(vo +@@ -57,74 +56,90 @@ static inline void *current_text_addr(vo */ struct cpuinfo_x86 { @@ -17933,22 +18289,36 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + char wp_works_ok; /* It doesn't on 386's */ + + /* Problems on some 486Dx4's and old 386's: */ ++#ifndef CONFIG_XEN + char hlt_works_ok; ++#endif + char hard_math; ++#ifndef CONFIG_XEN + char rfu; + char fdiv_bug; + char f00f_bug; ++#endif + char coma_bug; + char pad0; #else - /* number of 4K pages in DTLB/ITLB combined(in pages)*/ - int x86_tlbsize; - __u8 x86_virt_bits, x86_phys_bits; ++ /* Number of 4K pages in DTLB/ITLB combined(in pages): */ ++ int x86_tlbsize; ++ __u8 x86_virt_bits; ++ __u8 x86_phys_bits; + #ifndef CONFIG_XEN - /* cpuid returned core id bits */ - __u8 x86_coreid_bits; ++ /* CPUID returned core id bits: */ ++ __u8 x86_coreid_bits; + #endif - /* Max extended CPUID function supported */ - __u32 extended_cpuid_level; --#endif ++ /* Max extended CPUID function supported: */ ++ __u32 extended_cpuid_level; + #endif - int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ - __u32 x86_capability[NCAPINTS]; - char x86_vendor_id[16]; @@ -17958,15 +18328,6 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - int x86_cache_alignment; /* In bytes */ - int x86_power; - unsigned long loops_per_jiffy; -+ /* Number of 4K pages in DTLB/ITLB combined(in pages): */ -+ int x86_tlbsize; -+ __u8 x86_virt_bits; -+ __u8 x86_phys_bits; -+ /* CPUID returned core id bits: */ -+ __u8 x86_coreid_bits; -+ /* Max extended CPUID function supported: */ -+ __u32 extended_cpuid_level; -+#endif + /* Maximum supported CPUID level, -1=no CPUID: */ + int cpuid_level; + __u32 x86_capability[NCAPINTS]; @@ -17977,6 +18338,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + int x86_cache_alignment; /* In bytes */ + int x86_power; + unsigned long loops_per_jiffy; + #ifndef CONFIG_XEN #ifdef CONFIG_SMP - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ + /* cpus sharing the last level cache: */ @@ -17984,23 +18346,26 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #endif - u16 x86_max_cores; /* cpuid returned max cores value */ - u16 apicid; -- u16 x86_clflush_size; + /* cpuid returned max cores value: */ + u16 x86_max_cores; + u16 apicid; + u16 initial_apicid; + #endif +- u16 x86_clflush_size; + u16 x86_clflush_size; - #ifdef CONFIG_SMP + #ifdef CONFIG_X86_HT - u16 booted_cores; /* number of cores as seen by OS */ - u16 phys_proc_id; /* Physical processor id. */ - u16 cpu_core_id; /* Core id */ -- u16 cpu_index; /* index into per_cpu list */ + /* number of cores as seen by the OS: */ + u16 booted_cores; + /* Physical processor id: */ + u16 phys_proc_id; + /* Core id: */ + u16 cpu_core_id; + #endif + #ifdef CONFIG_SMP +- u16 cpu_index; /* index into per_cpu list */ + /* Index into per_cpu list: */ + u16 cpu_index; #endif @@ -18040,14 +18405,14 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #ifdef CONFIG_SMP DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); -@@ -129,7 +140,18 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_ +@@ -135,7 +150,18 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_ #define current_cpu_data boot_cpu_data #endif -void cpu_detect(struct cpuinfo_x86 *c); +static inline int hlt_works(int cpu) +{ -+#ifdef CONFIG_X86_32 ++#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) + return cpu_data(cpu).hlt_works_ok; +#else + return 1; @@ -18060,7 +18425,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches extern void identify_cpu(struct cpuinfo_x86 *); extern void identify_boot_cpu(void); -@@ -149,12 +171,12 @@ static inline void xen_cpuid(unsigned in +@@ -155,12 +181,12 @@ static inline void xen_cpuid(unsigned in unsigned int *ecx, unsigned int *edx) { /* ecx is often an input as well as an output. */ @@ -18079,7 +18444,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } static inline void load_cr3(pgd_t *pgdir) -@@ -166,57 +188,70 @@ static inline void load_cr3(pgd_t *pgdir +@@ -172,57 +198,70 @@ static inline void load_cr3(pgd_t *pgdir #ifdef CONFIG_X86_32 /* This is the TSS defined by the hardware. */ struct x86_hw_tss { @@ -18188,7 +18553,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * The extra 1 is there because the CPU will access an -@@ -224,135 +259,161 @@ struct tss_struct { +@@ -230,135 +269,161 @@ struct tss_struct { * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ @@ -18426,7 +18791,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ unsigned long debugctlmsr; /* Debug Store - if not 0 points to a DS Save Area configuration; -@@ -383,12 +444,12 @@ static inline void xen_set_iopl_mask(uns +@@ -389,12 +454,12 @@ static inline void xen_set_iopl_mask(uns } #ifndef CONFIG_X86_NO_TSS @@ -18442,18 +18807,18 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { tss->x86_tss.ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); -@@ -402,8 +463,8 @@ static inline void native_load_sp0(struc +@@ -408,8 +473,8 @@ static inline void native_load_sp0(struc } while (0) #endif -#define __cpuid xen_cpuid --#define paravirt_enabled() 0 +-#define paravirt_enabled() 1 +#define __cpuid xen_cpuid -+#define paravirt_enabled() 0 ++#define paravirt_enabled() 1 /* * These special macros can be used to get or set a debugging register -@@ -423,11 +484,12 @@ static inline void native_load_sp0(struc +@@ -429,11 +494,12 @@ static inline void native_load_sp0(struc * enable), so that any CPU's that boot up * after us can get the correct flags. */ @@ -18467,7 +18832,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; -@@ -437,6 +499,7 @@ static inline void set_in_cr4(unsigned l +@@ -443,6 +509,7 @@ static inline void set_in_cr4(unsigned l static inline void clear_in_cr4(unsigned long mask) { unsigned cr4; @@ -18475,7 +18840,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; -@@ -444,42 +507,42 @@ static inline void clear_in_cr4(unsigned +@@ -450,42 +517,42 @@ static inline void clear_in_cr4(unsigned } struct microcode_header { @@ -18539,7 +18904,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } mm_segment_t; -@@ -491,7 +554,7 @@ extern int kernel_thread(int (*fn)(void +@@ -497,7 +564,7 @@ extern int kernel_thread(int (*fn)(void /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); @@ -18548,7 +18913,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches extern void prepare_to_copy(struct task_struct *tsk); unsigned long get_wchan(struct task_struct *p); -@@ -528,118 +591,138 @@ static inline unsigned int cpuid_eax(uns +@@ -534,118 +601,138 @@ static inline unsigned int cpuid_eax(uns unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); @@ -18588,14 +18953,14 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { - __asm__ __volatile__("rep;nop": : :"memory"); + asm volatile("rep; nop" ::: "memory"); - } - --/* Stop speculative execution */ ++} ++ +static inline void cpu_relax(void) +{ + rep_nop(); -+} -+ + } + +-/* Stop speculative execution */ +/* Stop speculative execution: */ static inline void sync_core(void) { @@ -18689,12 +19054,12 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +extern unsigned int machine_id; +extern unsigned int machine_submodel_id; +extern unsigned int BIOS_revision; -+ -+/* Boot loader type from the setup header: */ -+extern int bootloader_type; -extern char ignore_fpu_irq; -#define cache_line_size() (boot_cpu_data.x86_cache_alignment) ++/* Boot loader type from the setup header: */ ++extern int bootloader_type; ++ +extern char ignore_fpu_irq; #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 @@ -18725,7 +19090,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static inline void prefetch(const void *x) { alternative_input(BASE_PREFETCH, -@@ -648,8 +731,11 @@ static inline void prefetch(const void * +@@ -654,8 +741,11 @@ static inline void prefetch(const void * "r" (x)); } @@ -18739,7 +19104,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches static inline void prefetchw(const void *x) { alternative_input(BASE_PREFETCH, -@@ -658,21 +744,25 @@ static inline void prefetchw(const void +@@ -664,21 +754,25 @@ static inline void prefetchw(const void "r" (x)); } @@ -18776,7 +19141,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches } /* -@@ -681,28 +771,15 @@ static inline void prefetchw(const void +@@ -687,28 +781,15 @@ static inline void prefetchw(const void * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ @@ -18813,7 +19178,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches extern unsigned long thread_saved_pc(struct task_struct *tsk); -@@ -734,18 +811,18 @@ extern unsigned long thread_saved_pc(str +@@ -740,18 +821,18 @@ extern unsigned long thread_saved_pc(str /* * User space process size. 47bits minus one guard page. */ @@ -18839,7 +19204,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX TASK_SIZE64 -@@ -758,32 +835,32 @@ extern unsigned long thread_saved_pc(str +@@ -764,32 +845,32 @@ extern unsigned long thread_saved_pc(str .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } @@ -18888,9 +19253,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +extern int set_tsc_mode(unsigned int val); #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:12:36.000000000 +0100 -@@ -1,5 +1,224 @@ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp.h 2011-01-31 18:07:35.000000000 +0100 +@@ -1,5 +1,225 @@ -#ifdef CONFIG_X86_32 -# include "smp_32.h" +#ifndef _ASM_X86_SMP_H_ @@ -18920,16 +19285,17 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +extern void (*mtrr_hook)(void); +extern void zap_low_mappings(void); + -+extern int smp_num_siblings; +extern unsigned int num_processors; +extern cpumask_t cpu_initialized; + -+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) ++#ifndef CONFIG_XEN ++#ifdef CONFIG_SMP +extern u16 x86_cpu_to_apicid_init[]; +extern u16 x86_bios_cpu_apicid_init[]; +extern void *x86_cpu_to_apicid_early_ptr; +extern void *x86_bios_cpu_apicid_early_ptr; -+#else + #else +-# include "smp_64.h" +#define x86_cpu_to_apicid_early_ptr NULL +#define x86_bios_cpu_apicid_early_ptr NULL +#endif @@ -18939,6 +19305,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +DECLARE_PER_CPU(u16, cpu_llc_id); +DECLARE_PER_CPU(u16, x86_cpu_to_apicid); +DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); ++#endif + +#ifdef CONFIG_SMP + @@ -19032,7 +19399,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +extern void prefill_possible_map(void); + +void smp_store_cpu_info(int id); -+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) ++#define cpu_physical_id(cpu) (cpu) + +/* We don't mark CPUs online until __cpu_up(), so we need another measure */ +static inline int num_booting_cpus(void) @@ -19065,7 +19432,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +#define safe_smp_processor_id() smp_processor_id() + +#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ -+#define cpu_physical_id(cpu) boot_cpu_physical_apicid ++#define cpu_physical_id(cpu) 0 +#define safe_smp_processor_id() 0 +#define stack_smp_processor_id() 0 +#endif @@ -19083,8 +19450,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +{ + return *(u32 *)(APIC_BASE + APIC_ID); +} - #else --# include "smp_64.h" ++#else +extern unsigned int read_apic_id(void); +#endif + @@ -19118,9 +19484,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +extern void unlock_ipi_call_lock(void); +#endif /* __ASSEMBLY__ */ #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_32.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_32.h 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,174 +0,0 @@ +@@ -1,173 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - @@ -19142,7 +19508,6 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -#define cpu_callout_map cpu_possible_map -#define cpu_callin_map cpu_possible_map - --extern int smp_num_siblings; -extern unsigned int num_processors; - -extern void smp_alloc_memory(void); @@ -19152,10 +19517,10 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -extern void (*mtrr_hook) (void); -extern void zap_low_mappings (void); - --DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); --DECLARE_PER_CPU(cpumask_t, cpu_core_map); +-#ifndef CONFIG_XEN -DECLARE_PER_CPU(u8, cpu_llc_id); -DECLARE_PER_CPU(u8, x86_cpu_to_apicid); +-#endif - -#ifdef CONFIG_HOTPLUG_CPU -extern void cpu_exit_clear(void); @@ -19249,7 +19614,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -DECLARE_PER_CPU(int, cpu_number); -#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) - --#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +-#define cpu_physical_id(cpu) (cpu) - -#define safe_smp_processor_id() smp_processor_id() - @@ -19262,7 +19627,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -#else /* CONFIG_SMP */ - -#define safe_smp_processor_id() 0 --#define cpu_physical_id(cpu) boot_cpu_physical_apicid +-#define cpu_physical_id(cpu) 0 - -#endif /* !CONFIG_SMP */ - @@ -19295,9 +19660,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -#endif /* !ASSEMBLY */ -#endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp_64.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp_64.h 2011-01-31 18:01:51.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,103 +0,0 @@ +@@ -1,101 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - @@ -19319,7 +19684,6 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -extern cpumask_t cpu_initialized; - --extern int smp_num_siblings; -extern unsigned int num_processors; - -extern void smp_alloc_memory(void); @@ -19329,11 +19693,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait); - --DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); --DECLARE_PER_CPU(cpumask_t, cpu_core_map); +-#ifndef CONFIG_XEN -DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); +-#endif - -#ifdef CONFIG_X86_LOCAL_APIC -static inline int cpu_present_to_apicid(int mps_cpu) @@ -19355,7 +19719,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -extern unsigned __cpuinitdata disabled_cpus; - -#define raw_smp_processor_id() read_pda(cpunumber) --#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +-#define cpu_physical_id(cpu) (cpu) - -#define stack_smp_processor_id() \ - ({ \ @@ -19377,8 +19741,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -#else /* CONFIG_SMP */ - --extern unsigned int boot_cpu_id; --#define cpu_physical_id(cpu) boot_cpu_id +-#define cpu_physical_id(cpu) 0 -#define stack_smp_processor_id() 0 - -#endif /* !CONFIG_SMP */ @@ -19401,8 +19764,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - -#endif - ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock.h 2011-01-31 18:07:35.000000000 +0100 @@ -95,7 +95,7 @@ void xen_spin_kick(raw_spinlock_t *, uns : \ : "memory", "cc") @@ -19464,8 +19827,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { unsigned int token; bool kick; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/swiotlb.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/swiotlb.h 2011-01-31 18:07:35.000000000 +0100 @@ -1,5 +1,4 @@ -#ifdef CONFIG_X86_32 -# include "swiotlb_32.h" @@ -19476,8 +19839,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches + +dma_addr_t swiotlb_map_single_phys(struct device *, phys_addr_t, size_t size, + int dir); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system.h 2011-01-31 18:07:35.000000000 +0100 @@ -28,22 +28,44 @@ struct task_struct *__switch_to(struct t * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. @@ -19676,8 +20039,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #else #define smp_mb() barrier() #define smp_rmb() barrier() ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/tlbflush.h 2011-01-31 18:07:35.000000000 +0100 @@ -86,8 +86,7 @@ static inline void flush_tlb_range(struc #define TLBSTATE_LAZY 2 @@ -19688,8 +20051,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches struct mm_struct *active_mm; int state; char __cacheline_padding[L1_CACHE_BYTES-8]; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/vga.h 2007-06-12 13:14:02.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/vga.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/vga.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/vga.h 2011-01-31 18:07:35.000000000 +0100 @@ -12,9 +12,9 @@ * access the videoram directly without any black magic. */ @@ -19702,8 +20065,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +#define vga_writeb(x, y) (*(y) = (x)) #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/xor_64.h 2007-06-12 13:14:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/xor_64.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/xor_64.h 2007-06-12 13:14:13.000000000 +0200 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/xor_64.h 2011-01-31 18:07:35.000000000 +0100 @@ -1,20 +1,23 @@ /* - * x86-64 changes / gcc fixes from Andi Kleen. @@ -20166,22 +20529,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu ---- head-2010-05-25.orig/arch/x86/include/asm/scatterlist.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/scatterlist.h 2010-03-24 15:12:36.000000000 +0100 -@@ -3,6 +3,10 @@ - - #define ISA_DMA_THRESHOLD (0x00ffffff) - -+#ifdef CONFIG_X86_XEN -+# define sg_dma_len(sg) ((sg)->dma_length) -+#endif -+ - #include - - #endif /* _ASM_X86_SCATTERLIST_H */ ---- head-2010-05-25.orig/include/linux/page-flags.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/include/linux/page-flags.h 2010-03-24 15:12:36.000000000 +0100 -@@ -349,29 +349,28 @@ static inline void SetPageUptodate(struc +--- head-2011-03-11.orig/include/linux/page-flags.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/include/linux/page-flags.h 2011-01-31 18:07:35.000000000 +0100 +@@ -345,28 +345,27 @@ static inline void SetPageUptodate(struc CLEARPAGEFLAG(Uptodate, uptodate) @@ -20202,6 +20552,11 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -#define PageNetback(page) test_bit(PG_netback, &(page)->flags) -#define SetPageNetback(page) set_bit(PG_netback, &(page)->flags) -#define ClearPageNetback(page) clear_bit(PG_netback, &(page)->flags) +-#endif +- +-#define PageBlkback(page) test_bit(PG_blkback, &(page)->flags) +-#define SetPageBlkback(page) set_bit(PG_blkback, &(page)->flags) +-#define ClearPageBlkback(page) clear_bit(PG_blkback, &(page)->flags) +#ifdef CONFIG_XEN +TESTPAGEFLAG(Foreign, foreign) +static inline void SetPageForeign(struct page *page, @@ -20222,17 +20577,12 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches +} +/*PAGEFLAG(Netback, netback)*/ +PAGEFLAG(Blkback, blkback) - #endif ++#endif --#define PageBlkback(page) test_bit(PG_blkback, &(page)->flags) --#define SetPageBlkback(page) set_bit(PG_blkback, &(page)->flags) --#define ClearPageBlkback(page) clear_bit(PG_blkback, &(page)->flags) -- extern void cancel_dirty_page(struct page *page, unsigned int account_size); - int test_clear_page_writeback(struct page *page); ---- head-2010-05-25.orig/include/xen/balloon.h 2007-06-12 13:14:19.000000000 +0200 -+++ head-2010-05-25/include/xen/balloon.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/include/xen/balloon.h 2007-06-12 13:14:19.000000000 +0200 ++++ head-2011-03-11/include/xen/balloon.h 2011-01-31 18:07:35.000000000 +0100 @@ -31,9 +31,12 @@ * IN THE SOFTWARE. */ @@ -20256,8 +20606,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches -#endif /* __ASM_BALLOON_H__ */ +#endif /* __XEN_BALLOON_H__ */ ---- head-2010-05-25.orig/include/xen/interface/grant_table.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/grant_table.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/include/xen/interface/grant_table.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-11/include/xen/interface/grant_table.h 2011-01-31 18:07:35.000000000 +0100 @@ -288,6 +288,7 @@ struct gnttab_map_grant_ref { grant_handle_t handle; uint64_t dev_bus_addr; @@ -20314,8 +20664,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); ---- head-2010-05-25.orig/include/xen/interface/io/fbif.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/io/fbif.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/include/xen/interface/io/fbif.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-11/include/xen/interface/io/fbif.h 2011-01-31 18:07:35.000000000 +0100 @@ -150,7 +150,12 @@ struct xenfb_page * framebuffer with a max resolution of 12,800x10,240. Should * be enough for a while with room leftover for expansion. @@ -20329,9 +20679,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches }; /* ---- head-2010-05-25.orig/include/xen/interface/memory.h 2010-03-24 15:09:23.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/memory.h 2010-03-24 15:12:36.000000000 +0100 -@@ -85,7 +85,6 @@ struct xen_memory_reservation { +--- head-2011-03-11.orig/include/xen/interface/memory.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/include/xen/interface/memory.h 2011-01-31 18:07:35.000000000 +0100 +@@ -88,7 +88,6 @@ struct xen_memory_reservation { */ domid_t domid; }; @@ -20339,7 +20689,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); -@@ -171,7 +170,11 @@ struct xen_machphys_mfn_list { +@@ -174,7 +173,11 @@ struct xen_machphys_mfn_list { * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ @@ -20351,7 +20701,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Number of extents written to the above array. This will be smaller -@@ -179,7 +182,6 @@ struct xen_machphys_mfn_list { +@@ -182,7 +185,6 @@ struct xen_machphys_mfn_list { */ unsigned int nr_extents; }; @@ -20359,7 +20709,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); -@@ -221,7 +223,6 @@ struct xen_add_to_physmap { +@@ -224,7 +226,6 @@ struct xen_add_to_physmap { /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; @@ -20367,8 +20717,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); ---- head-2010-05-25.orig/include/xen/interface/vcpu.h 2010-01-19 16:01:04.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/vcpu.h 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/include/xen/interface/vcpu.h 2011-01-31 15:14:12.000000000 +0100 ++++ head-2011-03-11/include/xen/interface/vcpu.h 2011-01-31 18:07:35.000000000 +0100 @@ -87,6 +87,7 @@ struct vcpu_runstate_info { */ uint64_t time[4]; @@ -20401,8 +20751,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); ---- head-2010-05-25.orig/lib/swiotlb-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/lib/swiotlb-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/lib/swiotlb-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/lib/swiotlb-xen.c 2011-01-31 18:07:35.000000000 +0100 @@ -20,6 +20,7 @@ #include #include @@ -20411,7 +20761,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches #include #include #include -@@ -288,15 +289,6 @@ __sync_single(struct phys_addr buffer, c +@@ -296,15 +297,6 @@ __sync_single(struct phys_addr buffer, c } } @@ -20427,7 +20777,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Allocates bounce buffer and returns its kernel virtual address. */ -@@ -335,61 +327,53 @@ map_single(struct device *hwdev, struct +@@ -343,61 +335,53 @@ map_single(struct device *hwdev, struct * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); @@ -20484,7 +20834,9 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches */ - if (io_tlb_list[index] >= nslots) { - int count = 0; -- ++ io_tlb_index = ((index + nslots) < iotlb_nslabs ++ ? (index + nslots) : 0); + - for (i = index; i < (int)(index + nslots); i++) - io_tlb_list[i] = 0; - for (i = index - 1; @@ -20502,9 +20854,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches - io_tlb_index = - ((index + nslots) < iotlb_nslabs - ? (index + nslots) : 0); -+ io_tlb_index = ((index + nslots) < iotlb_nslabs -+ ? (index + nslots) : 0); - +- - goto found; - } - index += stride; @@ -20530,7 +20880,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches spin_unlock_irqrestore(&io_tlb_lock, flags); /* -@@ -502,11 +486,13 @@ swiotlb_full(struct device *dev, size_t +@@ -525,11 +509,13 @@ swiotlb_full(struct device *dev, size_t * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ @@ -20549,7 +20899,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches void *map; struct phys_addr buffer; -@@ -517,7 +503,7 @@ swiotlb_map_single(struct device *hwdev, +@@ -540,7 +526,7 @@ swiotlb_map_single(struct device *hwdev, * we can safely return the device addr and not worry about bounce * buffering it. */ @@ -20558,7 +20908,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches !address_needs_mapping(hwdev, dev_addr)) return dev_addr; -@@ -525,8 +511,8 @@ swiotlb_map_single(struct device *hwdev, +@@ -548,8 +534,8 @@ swiotlb_map_single(struct device *hwdev, * Oh well, have to allocate and map a bounce buffer. */ gnttab_dma_unmap_page(dev_addr); @@ -20569,7 +20919,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches map = map_single(hwdev, buffer, size, dir); if (!map) { swiotlb_full(hwdev, size, dir, 1); -@@ -537,6 +523,26 @@ swiotlb_map_single(struct device *hwdev, +@@ -560,6 +546,26 @@ swiotlb_map_single(struct device *hwdev, return dev_addr; } @@ -20596,7 +20946,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous swiotlb_map_single call. All -@@ -546,8 +552,8 @@ swiotlb_map_single(struct device *hwdev, +@@ -569,8 +575,8 @@ swiotlb_map_single(struct device *hwdev, * whatever the device wrote there. */ void @@ -20607,7 +20957,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { BUG_ON(dir == DMA_NONE); if (in_swiotlb_aperture(dev_addr)) -@@ -555,7 +561,14 @@ swiotlb_unmap_single(struct device *hwde +@@ -578,7 +584,14 @@ swiotlb_unmap_single(struct device *hwde else gnttab_dma_unmap_page(dev_addr); } @@ -20622,34 +20972,46 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. -@@ -584,6 +597,26 @@ swiotlb_sync_single_for_device(struct de - sync_single(hwdev, bus_to_virt(dev_addr), size, dir); +@@ -613,6 +626,38 @@ swiotlb_sync_single_for_device(struct de } + /* ++ * Same as above, but for a sub-range of the mapping. ++ */ ++static void ++swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, ++ unsigned long offset, size_t size, ++ int dir, int target) ++{ ++ BUG_ON(dir == DMA_NONE); ++ if (in_swiotlb_aperture(dev_addr)) ++ sync_single(hwdev, bus_to_virt(dev_addr + offset), size, ++ dir, target); ++} ++ +void +swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, int dir) +{ -+ BUG_ON(dir == DMA_NONE); -+ if (in_swiotlb_aperture(dev_addr)) -+ sync_single(hwdev, bus_to_virt(dev_addr + offset), size, dir); ++ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, ++ SYNC_FOR_CPU); +} + +void +swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, int dir) +{ -+ BUG_ON(dir == DMA_NONE); -+ if (in_swiotlb_aperture(dev_addr)) -+ sync_single(hwdev, bus_to_virt(dev_addr + offset), size, dir); ++ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, ++ SYNC_FOR_DEVICE); +} + +void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, + struct dma_attrs *); - /* ++/* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_single -@@ -601,8 +634,8 @@ swiotlb_sync_single_for_device(struct de + * interface. Here the scatter gather list elements are each tagged with the +@@ -629,8 +674,8 @@ swiotlb_sync_single_for_device(struct de * same here. */ int @@ -20660,7 +21022,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { struct scatterlist *sg; struct phys_addr buffer; -@@ -626,7 +659,8 @@ swiotlb_map_sg(struct device *hwdev, str +@@ -654,7 +699,8 @@ swiotlb_map_sg(struct device *hwdev, str /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); @@ -20670,7 +21032,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches sgl[0].dma_length = 0; return 0; } -@@ -637,14 +671,22 @@ swiotlb_map_sg(struct device *hwdev, str +@@ -665,14 +711,22 @@ swiotlb_map_sg(struct device *hwdev, str } return nelems; } @@ -20695,7 +21057,7 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches { struct scatterlist *sg; int i; -@@ -659,6 +701,14 @@ swiotlb_unmap_sg(struct device *hwdev, s +@@ -687,6 +741,14 @@ swiotlb_unmap_sg(struct device *hwdev, s gnttab_dma_unmap_page(sg->dma_address); } } @@ -20710,8 +21072,8 @@ Automatically created from "patches.kernel.org/patch-2.6.26" by xen-port-patches /* * Make physical memory consistent for a set of streaming mode DMA translations -@@ -699,46 +749,6 @@ swiotlb_sync_sg_for_device(struct device - } +@@ -725,46 +787,6 @@ swiotlb_sync_sg_for_device(struct device + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } -#ifdef CONFIG_HIGHMEM diff --git a/patches.xen/xen3-patch-2.6.27 b/patches.xen/xen3-patch-2.6.27 index 49b5239..c86df98 100644 --- a/patches.xen/xen3-patch-2.6.27 +++ b/patches.xen/xen3-patch-2.6.27 @@ -7,9 +7,13 @@ Patch-mainline: 2.6.27 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches.py ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-24 15:12:46.000000000 +0100 -@@ -795,7 +795,7 @@ config AMD_IOMMU +Removed adjustments NO_HZ -> NO_HZ || NO_IDLE_HZ from kernel/{hr,}timer.c, +as they would get removed again by xen-clockevents (and really shouldn't +have been needed - see SLE11 SPn). + +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-02-01 14:38:38.000000000 +0100 +@@ -723,7 +723,7 @@ config AMD_IOMMU bool "AMD IOMMU support" select SWIOTLB select PCI_MSI @@ -18,7 +22,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches ---help--- With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides -@@ -1519,7 +1519,7 @@ config MTRR +@@ -1465,7 +1465,7 @@ config MTRR config MTRR_SANITIZER def_bool y prompt "MTRR cleanup support" @@ -27,8 +31,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches ---help--- Convert MTRR layout from continuous to discrete, so X drivers can add writeback entries. ---- head-2010-05-25.orig/arch/x86/Kconfig.debug 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig.debug 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/Kconfig.debug 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig.debug 2011-02-01 14:38:38.000000000 +0100 @@ -25,6 +25,7 @@ config STRICT_DEVMEM config X86_VERBOSE_BOOTUP bool "Enable verbose x86 bootup info messages" @@ -37,7 +41,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches ---help--- Enables the informational output from the decompression stage (e.g. bzImage) of the boot. If you disable this you will still -@@ -185,6 +186,7 @@ config X86_DS_SELFTEST +@@ -179,6 +180,7 @@ config IOMMU_LEAK config HAVE_MMIOTRACE_SUPPORT def_bool y @@ -45,9 +49,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches config X86_DECODER_SELFTEST bool "x86 instruction decoder selftest" ---- head-2010-05-25.orig/arch/x86/Makefile 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/Makefile 2010-03-24 15:12:46.000000000 +0100 -@@ -112,8 +112,8 @@ endif +--- head-2011-03-11.orig/arch/x86/Makefile 2011-02-01 14:11:04.000000000 +0100 ++++ head-2011-03-11/arch/x86/Makefile 2011-02-01 14:38:38.000000000 +0100 +@@ -117,8 +117,8 @@ endif KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) # Xen subarch support @@ -58,8 +62,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:38:38.000000000 +0100 @@ -15,6 +15,16 @@ #include #include @@ -301,9 +305,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + .quad sys_pipe2 + .quad sys_inotify_init1 ia32_syscall_end: ---- head-2010-05-25.orig/arch/x86/kernel/Makefile 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/Makefile 2010-03-24 15:12:46.000000000 +0100 -@@ -134,9 +134,11 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-03-11.orig/arch/x86/kernel/Makefile 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/Makefile 2011-02-01 14:38:38.000000000 +0100 +@@ -125,9 +125,11 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o @@ -318,9 +322,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + i8259.o irqinit_$(BITS).o pci-swiotlb_64.o reboot.o smpboot.o \ + tlb_$(BITS).o tsc.o tsc_sync.o vsmp_64.o +disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += probe_roms_32.o ---- head-2010-05-25.orig/arch/x86/kernel/acpi/boot.c 2010-04-15 10:03:01.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/acpi/boot.c 2010-04-15 10:05:36.000000000 +0200 -@@ -1317,6 +1317,7 @@ static int __init dmi_disable_acpi(const +--- head-2011-03-11.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 10:59:02.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/boot.c 2011-03-11 10:59:30.000000000 +0100 +@@ -1349,6 +1349,7 @@ static int __init dmi_disable_acpi(const return 0; } @@ -328,7 +332,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * Force ignoring BIOS IRQ0 pin2 override */ -@@ -1334,6 +1335,7 @@ static int __init dmi_ignore_irq0_timer_ +@@ -1366,6 +1367,7 @@ static int __init dmi_ignore_irq0_timer_ } return 0; } @@ -336,7 +340,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static int __init force_acpi_rsdt(const struct dmi_system_id *d) { -@@ -1454,6 +1456,7 @@ static struct dmi_system_id __initdata a +@@ -1486,6 +1488,7 @@ static struct dmi_system_id __initdata a {} }; @@ -344,7 +348,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* second table for DMI checks that should run after early-quirks */ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { /* -@@ -1500,6 +1503,7 @@ static struct dmi_system_id __initdata a +@@ -1532,6 +1535,7 @@ static struct dmi_system_id __initdata a }, {} }; @@ -352,7 +356,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * acpi_boot_table_init() and acpi_boot_init() -@@ -1574,8 +1578,10 @@ int __init early_acpi_boot_init(void) +@@ -1604,8 +1608,10 @@ int __init early_acpi_boot_init(void) int __init acpi_boot_init(void) { @@ -363,8 +367,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * If acpi_disabled, bail out ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -9,6 +9,7 @@ #include #include @@ -449,8 +453,21 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); ---- head-2010-05-25.orig/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/amd_nb.c 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/amd_nb.c 2011-02-01 14:38:38.000000000 +0100 +@@ -15,6 +15,10 @@ static u32 *flush_words; + struct pci_device_id amd_nb_misc_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, ++#ifdef CONFIG_XEN ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, /* Fam12, Fam14 */ ++#endif + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, + {} + }; +--- head-2011-03-11.orig/arch/x86/kernel/apic/apic-xen.c 2011-02-24 15:45:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/apic/apic-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -59,7 +59,10 @@ static cpumask_t timer_bcast_ipi; /* * Debug level, exported for io_apic.c @@ -463,8 +480,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #ifndef CONFIG_XEN static int modern_apic(void) ---- head-2010-05-25.orig/arch/x86/kernel/asm-offsets_64.c 2010-01-19 16:00:48.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/asm-offsets_64.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/asm-offsets_64.c 2010-01-19 16:00:48.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/asm-offsets_64.c 2011-02-01 14:38:38.000000000 +0100 @@ -132,7 +132,7 @@ int main(void) BLANK(); @@ -474,17 +491,17 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches BLANK(); OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/amd.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/amd.c 2010-03-24 15:12:46.000000000 +0100 -@@ -546,6 +546,7 @@ static void __cpuinit init_amd(struct cp +--- head-2011-03-11.orig/arch/x86/kernel/cpu/amd.c 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/amd.c 2011-02-01 14:38:38.000000000 +0100 +@@ -575,6 +575,7 @@ static void __cpuinit init_amd(struct cp fam10h_check_enable_mmcfg(); } +#ifndef CONFIG_XEN - if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { + if (c == &boot_cpu_data && c->x86 >= 0xf) { unsigned long long tseg; -@@ -565,6 +566,7 @@ static void __cpuinit init_amd(struct cp +@@ -594,6 +595,7 @@ static void __cpuinit init_amd(struct cp } } #endif @@ -492,8 +509,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } #ifdef CONFIG_X86_32 ---- head-2010-05-25.orig/arch/x86/kernel/cpu/bugs_64.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/bugs_64.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/bugs_64.c 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/bugs_64.c 2011-02-01 14:38:38.000000000 +0100 @@ -20,6 +20,7 @@ void __init check_bugs(void) #endif alternative_instructions(); @@ -508,8 +525,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches set_memory_4k((unsigned long)__va(0), 1); +#endif } ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/common-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -13,6 +13,7 @@ #include #include @@ -563,8 +580,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches { int i; -@@ -450,6 +464,8 @@ void __cpuinit identify_cpu(struct cpuin - c->x86_max_cores = 1; +@@ -452,6 +466,8 @@ void __cpuinit identify_cpu(struct cpuin + #endif c->x86_clflush_size = 32; memset(&c->x86_capability, 0, sizeof c->x86_capability); + if (boot_cpu_has(X86_FEATURE_SYSCALL32)) @@ -573,8 +590,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (!have_cpuid_p()) { /* --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/cpu/common_64-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -0,0 +1,773 @@ ++++ head-2011-03-11/arch/x86/kernel/cpu/common_64-xen.c 2011-02-01 14:38:38.000000000 +0100 +@@ -0,0 +1,777 @@ +#include +#include +#include @@ -716,7 +733,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + +void __cpuinit detect_ht(struct cpuinfo_x86 *c) +{ -+#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + u32 eax, ebx, ecx, edx; + int index_msb, core_bits; + @@ -870,8 +887,10 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + c->x86_model_id[0] = '\0'; /* Unset */ + c->x86_clflush_size = 64; + c->x86_cache_alignment = c->x86_clflush_size; ++#ifndef CONFIG_XEN + c->x86_max_cores = 1; + c->x86_coreid_bits = 0; ++#endif + c->extended_cpuid_level = 0; + memset(&c->x86_capability, 0, sizeof c->x86_capability); + @@ -905,10 +924,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + c->x86 = 4; + } + ++#ifndef CONFIG_XEN + c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; +#ifdef CONFIG_SMP + c->phys_proc_id = c->initial_apicid; +#endif ++#endif + /* AMD-defined flags: level 0x80000001 */ + xlvl = cpuid_eax(0x80000000); + c->extended_cpuid_level = xlvl; @@ -1349,7 +1370,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + uv_cpu_init(); +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/e820-xen.c 2010-03-24 15:12:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -0,0 +1,1553 @@ +/* + * Handle the memory map. @@ -2904,7 +2925,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + printk(KERN_INFO "Xen-provided physical RAM map:\n"); + _e820_print_map(&e820, who); +} ---- head-2010-05-25.orig/arch/x86/kernel/e820_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 18:07:35.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,860 +0,0 @@ -#include @@ -3767,7 +3788,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - print_memory_map("modified"); -} -#endif ---- head-2010-05-25.orig/arch/x86/kernel/e820_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 18:07:35.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,1052 +0,0 @@ -/* @@ -4822,8 +4843,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - max_pfn << PAGE_SHIFT) - *addr; - return i + 1; -} ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/early_printk-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -225,7 +225,7 @@ static struct console simnow_console = { static struct console *early_console = &early_vga_console; static int early_console_initialized; @@ -4833,8 +4854,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches { char buf[512]; int n; ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:38:38.000000000 +0100 @@ -51,15 +51,26 @@ #include #include @@ -5132,9 +5153,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches cmpl $nr_syscalls,%eax jb syscall_call jmp syscall_exit ---- head-2010-05-25.orig/arch/x86/kernel/entry_64.S 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/entry_64.S 2010-03-24 15:12:46.000000000 +0100 -@@ -1276,7 +1276,7 @@ ENTRY(arch_unwind_init_running) +--- head-2011-03-11.orig/arch/x86/kernel/entry_64.S 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_64.S 2011-02-16 16:02:30.000000000 +0100 +@@ -1253,7 +1253,7 @@ ENTRY(arch_unwind_init_running) END(arch_unwind_init_running) #endif @@ -5143,17 +5164,17 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches zeroentry xen_hypervisor_callback xen_do_hypervisor_callback /* -@@ -1373,7 +1373,7 @@ ENTRY(xen_failsafe_callback) - CFI_ENDPROC - END(xen_failsafe_callback) +@@ -1353,7 +1353,7 @@ END(xen_failsafe_callback) + apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ + xen_hvm_callback_vector xen_evtchn_do_upcall -#endif /* CONFIG_XEN */ +#endif /* CONFIG_PARAVIRT_XEN */ - #ifdef CONFIG_KDB - ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:12:46.000000000 +0100 + /* + * Some functions should be protected against kprobes +--- head-2011-03-11.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:38:38.000000000 +0100 @@ -53,12 +53,124 @@ #include #include @@ -5591,18 +5612,18 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches paranoidentry do_stack_segment */ errorentry do_stack_segment /* jmp paranoid_exit1 ---- head-2010-05-25.orig/arch/x86/kernel/fixup.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/fixup.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/fixup.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/fixup.c 2011-02-01 14:38:38.000000000 +0100 @@ -33,6 +33,7 @@ #include #include #include +#include - #define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args ) + #define DP(_f, _args...) pr_alert(" " _f "\n" , ## _args ) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/head-xen.c 2010-03-24 15:12:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -0,0 +1,57 @@ +#include +#include @@ -5662,7 +5683,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +#endif +} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/head32-xen.c 2010-03-24 15:12:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head32-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -0,0 +1,57 @@ +/* + * linux/arch/i386/kernel/head32.c -- prepare to run common code @@ -5721,8 +5742,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + + start_kernel(); +} ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head64-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head64-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -32,7 +32,26 @@ #include #include @@ -5883,8 +5904,19 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * At this point everything still needed from the boot loader ---- head-2010-05-25.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_64-xen.S 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_32-xen.S 2011-03-03 16:19:21.000000000 +0100 +@@ -61,8 +61,6 @@ ENTRY(startup_32) + movb %cl,X86_MASK + movl %edx,X86_CAPABILITY + +- movb $1,X86_HARD_MATH +- + xorl %eax,%eax # Clear GS + movl %eax,%gs + +--- head-2011-03-11.orig/arch/x86/kernel/head_64-xen.S 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_64-xen.S 2011-02-01 14:38:38.000000000 +0100 @@ -92,53 +92,6 @@ NEXT_PAGE(hypercall_page) #undef NEXT_PAGE @@ -5939,8 +5971,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches .section .bss.page_aligned, "aw", @nobits .align PAGE_SIZE ENTRY(empty_zero_page) ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_32-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -25,6 +25,7 @@ #include #include @@ -7425,7 +7457,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches MSI_DATA_DELIVERY_LOWPRI) | MSI_DATA_VECTOR(vector); } -@@ -2720,12 +2753,12 @@ int arch_setup_ht_irq(unsigned int irq, +@@ -2720,13 +2753,13 @@ int arch_setup_ht_irq(unsigned int irq, #endif /* CONFIG_HT_IRQ */ /* -------------------------------------------------------------------------- @@ -7435,11 +7467,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #ifdef CONFIG_ACPI + #ifndef CONFIG_XEN -int __init io_apic_get_unique_id (int ioapic, int apic_id) +int __init io_apic_get_unique_id(int ioapic, int apic_id) { - #ifndef CONFIG_XEN union IO_APIC_reg_00 reg_00; + static physid_mask_t apic_id_map = PHYSID_MASK_NONE; @@ -2735,10 +2768,10 @@ int __init io_apic_get_unique_id (int io int i = 0; @@ -7473,7 +7506,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches tmp = apicid_to_cpu_present(apic_id); physids_or(apic_id_map, apic_id_map, tmp); @@ -2802,7 +2835,7 @@ int __init io_apic_get_unique_id (int io - } + #endif /* !CONFIG_XEN */ -int __init io_apic_get_version (int ioapic) @@ -7564,8 +7597,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + } +} +#endif ---- head-2010-05-25.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/io_apic_64-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -45,6 +45,7 @@ #include #include @@ -8329,45 +8362,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } else { ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); ---- head-2010-05-25.orig/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -8,7 +8,6 @@ - #include - #include - #include --#include - #include - #include - -@@ -85,7 +84,7 @@ void __send_IPI_shortcut(unsigned int sh - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ -- apic_write_around(APIC_ICR, cfg); -+ apic_write(APIC_ICR, cfg); - #else - int cpu; - -@@ -132,7 +131,7 @@ static inline void __send_IPI_dest_field - * prepare target chip field - */ - cfg = __prepare_ICR2(mask); -- apic_write_around(APIC_ICR2, cfg); -+ apic_write(APIC_ICR2, cfg); - - /* - * program the ICR -@@ -142,7 +141,7 @@ static inline void __send_IPI_dest_field - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ -- apic_write_around(APIC_ICR, cfg); -+ apic_write(APIC_ICR, cfg); - } - #endif - ---- head-2010-05-25.orig/arch/x86/kernel/ldt-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/ldt-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ldt-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -20,9 +20,9 @@ #include @@ -8402,8 +8398,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches preempt_enable(); #endif } ---- head-2010-05-25.orig/arch/x86/kernel/machine_kexec_32.c 2010-01-19 16:00:16.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/machine_kexec_32.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/machine_kexec_32.c 2011-01-31 14:54:00.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/machine_kexec_32.c 2011-02-01 14:38:38.000000000 +0100 @@ -131,6 +131,8 @@ void machine_kexec_setup_load_arg(xen_ke xki->page_list[PA_PTE_0] = __ma(kexec_pte0); xki->page_list[PA_PTE_1] = __ma(kexec_pte1); @@ -8413,8 +8409,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } int __init machine_kexec_setup_resources(struct resource *hypervisor, ---- head-2010-05-25.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/microcode-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/microcode-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -5,13 +5,14 @@ * 2006 Shaohua Li * @@ -8479,8 +8475,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/mpparse-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -25,6 +25,9 @@ #include #include @@ -8710,12 +8706,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + return 6; + if (mp_irq->mp_dstirq != m->mpc_dstirq) + return 7; ++ ++ return 0; ++} -static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, - unsigned short oemsize) -+ return 0; -+} -+ +static void __init MP_intsrc_info(struct mpc_config_intsrc *m) { - int count = sizeof(*oemtable); /* the header size */ @@ -8802,7 +8798,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", -@@ -313,19 +280,41 @@ static int __init smp_read_mpc(struct mp +@@ -313,20 +280,44 @@ static int __init smp_read_mpc(struct mp } memcpy(oem, mpc->mpc_oem, 8); oem[8] = 0; @@ -8819,8 +8815,10 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - printk(KERN_INFO "MPTABLE: Product ID: %s ", str); + printk(KERN_INFO "MPTABLE: Product ID: %s\n", str); + #ifndef CONFIG_XEN printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); - ++#endif ++ + return 1; +} + @@ -8834,7 +8832,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + + if (!smp_check_mpc(mpc, oem, str)) + return 0; -+ + +#ifdef CONFIG_X86_32 + /* + * need to make sure summit and es7000's mps_oem_check is safe to be @@ -8847,10 +8845,11 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + } else + mps_oem_check(mpc, oem, str); +#endif ++#ifndef CONFIG_XEN /* save the local APIC address, it might be non-default */ if (!acpi_lapic) mp_lapic_addr = mpc->mpc_lapic; -@@ -333,12 +322,17 @@ static int __init smp_read_mpc(struct mp +@@ -335,12 +326,17 @@ static int __init smp_read_mpc(struct mp if (early) return 1; @@ -8871,7 +8870,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches while (count < mpc->mpc_length) { switch (*mpt) { case MP_PROCESSOR: -@@ -356,7 +350,9 @@ static int __init smp_read_mpc(struct mp +@@ -358,7 +354,9 @@ static int __init smp_read_mpc(struct mp { struct mpc_config_bus *m = (struct mpc_config_bus *)mpt; @@ -8881,7 +8880,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches mpt += sizeof(*m); count += sizeof(*m); break; -@@ -402,10 +398,14 @@ static int __init smp_read_mpc(struct mp +@@ -404,10 +402,14 @@ static int __init smp_read_mpc(struct mp count = mpc->mpc_length; break; } @@ -8899,7 +8898,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches setup_apic_routing(); if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); -@@ -431,7 +431,7 @@ static void __init construct_default_ioi +@@ -433,7 +435,7 @@ static void __init construct_default_ioi intsrc.mpc_type = MP_INTSRC; intsrc.mpc_irqflag = 0; /* conforming */ intsrc.mpc_srcbus = 0; @@ -8908,7 +8907,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches intsrc.mpc_irqtype = mp_INT; -@@ -492,40 +492,11 @@ static void __init construct_default_ioi +@@ -494,42 +496,11 @@ static void __init construct_default_ioi MP_intsrc_info(&intsrc); } @@ -8926,10 +8925,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - int linttypes[2] = { mp_ExtINT, mp_NMI }; - int i; - +-#ifndef CONFIG_XEN - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; +-#endif - - /* - * 2 CPUs, numbered 0 & 1. @@ -8951,7 +8952,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches bus.mpc_type = MP_BUS; bus.mpc_busid = 0; -@@ -554,7 +525,6 @@ static inline void __init construct_defa +@@ -558,7 +529,6 @@ static inline void __init construct_defa MP_bus_info(&bus); } @@ -8959,14 +8960,14 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches ioapic.mpc_type = MP_IOAPIC; ioapic.mpc_apicid = 2; ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; -@@ -566,7 +536,42 @@ static inline void __init construct_defa +@@ -570,7 +540,44 @@ static inline void __init construct_defa * We set up most of the low 16 IO-APIC pins according to MPS rules. */ construct_default_ioirq_mptable(mpc_default_type); +} +#else +static inline void __init construct_ioapic_table(int mpc_default_type) { } - #endif ++#endif + +static inline void __init construct_default_ISA_mptable(int mpc_default_type) +{ @@ -8975,10 +8976,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + int linttypes[2] = { mp_ExtINT, mp_NMI }; + int i; + ++#ifndef CONFIG_XEN + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + #endif + + /* + * 2 CPUs, numbered 0 & 1. @@ -9002,12 +9005,16 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches lintsrc.mpc_type = MP_LINTSRC; lintsrc.mpc_irqflag = 0; /* conforming */ lintsrc.mpc_srcbusid = 0; -@@ -584,10 +589,14 @@ static struct intel_mp_floating *mpf_fou - /* +@@ -589,7 +596,7 @@ static struct intel_mp_floating *mpf_fou * Scan the memory blocks for an SMP configuration block. */ + #ifndef CONFIG_XEN -static void __init __get_smp_config(unsigned early) +static void __init __get_smp_config(unsigned int early) + #else + void __init get_smp_config(void) + #define early 0 +@@ -597,6 +604,10 @@ void __init get_smp_config(void) { struct intel_mp_floating *mpf = mpf_found; @@ -9018,7 +9025,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (acpi_lapic && early) return; /* -@@ -604,7 +613,7 @@ static void __init __get_smp_config(unsi +@@ -613,7 +624,7 @@ void __init get_smp_config(void) printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); @@ -9027,7 +9034,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (mpf->mpf_feature2 & (1 << 7)) { printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); pic_mode = 1; -@@ -635,8 +644,10 @@ static void __init __get_smp_config(unsi +@@ -646,8 +657,10 @@ void __init get_smp_config(void) * Read the physical hardware table. Anything here will * override the defaults. */ @@ -9039,7 +9046,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "... disabling SMP support. " -@@ -690,10 +701,11 @@ void __init get_smp_config(void) +@@ -704,10 +717,11 @@ void __init get_smp_config(void) static int __init smp_scan_config(unsigned long base, unsigned long length, unsigned reserve) { @@ -9053,7 +9060,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches BUILD_BUG_ON(sizeof(*mpf) != 16); while (length > 0) { -@@ -703,16 +715,22 @@ static int __init smp_scan_config(unsign +@@ -717,16 +731,22 @@ static int __init smp_scan_config(unsign !mpf_checksum((unsigned char *)bp, 16) && ((mpf->mpf_specification == 1) || (mpf->mpf_specification == 4))) { @@ -9079,7 +9086,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * We cannot access to MPC table to compute * table size yet, as only few megabytes from -@@ -722,27 +740,18 @@ static int __init smp_scan_config(unsign +@@ -736,27 +756,18 @@ static int __init smp_scan_config(unsign * PAGE_SIZE from mpg->mpf_physptr yields BUG() * in reserve_bootmem. */ @@ -9111,7 +9118,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } bp += 4; length -= 16; -@@ -750,12 +759,16 @@ static int __init smp_scan_config(unsign +@@ -764,12 +775,16 @@ static int __init smp_scan_config(unsign return 0; } @@ -9129,7 +9136,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * FIXME: Linux assumes you have 640K of base ram.. * this continues the error... -@@ -802,300 +815,297 @@ void __init find_smp_config(void) +@@ -816,302 +831,297 @@ void __init find_smp_config(void) __find_smp_config(1); } @@ -9196,10 +9203,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -static u8 __init uniq_ioapic_id(u8 id) -{ -#ifdef CONFIG_X86_32 +-#ifndef CONFIG_XEN - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && - !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - return io_apic_get_unique_id(nr_ioapics, id); - else +-#endif - return id; -#else - int i; @@ -9443,11 +9452,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - * Fabricate the legacy ISA bus (bus #31). - */ - mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; -+ if (enable_update_mptable && alloc_mptable) { -+ u64 startt = 0; -+#ifdef CONFIG_X86_TRAMPOLINE -+ startt = TRAMPOLINE_BASE; - #endif +-#endif - set_bit(MP_ISA_BUS, mp_bus_not_pci); - Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); - @@ -9469,7 +9474,11 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - intsrc.mpc_srcbus = MP_ISA_BUS; -#ifdef CONFIG_X86_IO_APIC - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; --#endif ++ if (enable_update_mptable && alloc_mptable) { ++ u64 startt = 0; ++#ifdef CONFIG_X86_TRAMPOLINE ++ startt = TRAMPOLINE_BASE; + #endif - /* - * Use the default configuration for the IRQs 0-15. Unless - * overridden by (MADT) interrupt source override entries. @@ -9677,34 +9686,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -#endif /* CONFIG_X86_IO_APIC */ -#endif /* CONFIG_ACPI */ +late_initcall(update_mp_table); ---- head-2010-05-25.orig/arch/x86/kernel/apic/nmi.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/apic/nmi.c 2010-04-15 10:05:32.000000000 +0200 -@@ -28,7 +28,9 @@ - #include - #include - -+#ifndef CONFIG_XEN - #include -+#endif - #include - #include - #include -@@ -176,11 +178,13 @@ int __init check_nmi_watchdog(void) - kfree(prev_nmi_count); - return 0; - error: -+#ifndef CONFIG_XEN - if (nmi_watchdog == NMI_IO_APIC) { - if (!timer_through_8259) - legacy_pic->chip->mask(0); - on_each_cpu(__acpi_nmi_disable, NULL, 1); - } -+#endif - - #ifdef CONFIG_X86_32 - timer_ack = 0; ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -5,13 +5,13 @@ #include @@ -10060,8 +10043,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches no_iommu_init(); return 0; ---- head-2010-05-25.orig/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/pci-nommu-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -84,18 +84,12 @@ static int nommu_dma_supported(struct de return 1; } @@ -10082,8 +10065,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches }; void __init no_iommu_init(void) ---- head-2010-05-25.orig/arch/x86/kernel/probe_roms_32.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/probe_roms_32.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/probe_roms_32.c 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/probe_roms_32.c 2011-02-01 14:38:38.000000000 +0100 @@ -131,7 +131,7 @@ void __init probe_roms(void) upper = system_rom_resource.start; @@ -10093,8 +10076,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (romsignature(rom)) { length = extension_rom_resource.end - extension_rom_resource.start + 1; if (romchecksum(rom, length)) { ---- head-2010-05-25.orig/arch/x86/kernel/process-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process-xen.c 2011-03-03 15:59:49.000000000 +0100 @@ -6,6 +6,13 @@ #include #include @@ -10109,7 +10092,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches struct kmem_cache *task_xstate_cachep; -@@ -45,6 +52,70 @@ void arch_task_cache_init(void) +@@ -45,6 +52,41 @@ void arch_task_cache_init(void) SLAB_PANIC, NULL); } @@ -10125,35 +10108,6 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +void (*pm_idle)(void); +EXPORT_SYMBOL(pm_idle); + -+#ifdef CONFIG_X86_32 -+/* -+ * This halt magic was a workaround for ancient floppy DMA -+ * wreckage. It should be safe to remove. -+ */ -+static int hlt_counter; -+void disable_hlt(void) -+{ -+ hlt_counter++; -+} -+EXPORT_SYMBOL(disable_hlt); -+ -+void enable_hlt(void) -+{ -+ hlt_counter--; -+} -+EXPORT_SYMBOL(enable_hlt); -+ -+static inline int hlt_use_halt(void) -+{ -+ return (!hlt_counter && boot_cpu_data.hlt_works_ok); -+} -+#else -+static inline int hlt_use_halt(void) -+{ -+ return 1; -+} -+#endif -+ +/* + * We use this if we don't have any better + * idle routine.. @@ -10180,7 +10134,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static void do_nothing(void *unused) { } -@@ -61,7 +132,7 @@ void cpu_idle_wait(void) +@@ -61,7 +103,7 @@ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ @@ -10189,7 +10143,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } EXPORT_SYMBOL_GPL(cpu_idle_wait); -@@ -125,60 +196,175 @@ static void poll_idle(void) +@@ -125,60 +167,175 @@ static void poll_idle(void) * * idle=mwait overrides this decision and forces the usage of mwait. */ @@ -10385,8 +10339,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return -1; boot_option_idle_override = 1; ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_32-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -59,15 +59,11 @@ #include #include @@ -10490,9 +10444,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -65,15 +65,6 @@ asmlinkage extern void ret_from_fork(voi +--- head-2011-03-11.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:32:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_64-xen.c 2011-02-02 08:34:01.000000000 +0100 +@@ -64,15 +64,6 @@ asmlinkage extern void ret_from_fork(voi unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; @@ -10508,7 +10462,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) -@@ -103,25 +94,13 @@ void exit_idle(void) +@@ -102,25 +93,13 @@ void exit_idle(void) __exit_idle(); } @@ -10537,7 +10491,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches local_irq_disable(); cpu_clear(smp_processor_id(), cpu_initialized); preempt_enable_no_resched(); -@@ -146,12 +125,11 @@ void cpu_idle(void) +@@ -145,12 +124,11 @@ void cpu_idle(void) current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { @@ -10552,7 +10506,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (cpu_is_offline(smp_processor_id())) play_dead(); /* -@@ -161,7 +139,10 @@ void cpu_idle(void) +@@ -160,7 +138,10 @@ void cpu_idle(void) */ local_irq_disable(); enter_idle(); @@ -10564,7 +10518,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* In many cases the interrupt that ended idle has already called exit_idle. But some idle loops can be woken up without interrupt. */ -@@ -271,7 +252,7 @@ void exit_thread(void) +@@ -270,7 +251,7 @@ void exit_thread(void) } } @@ -10573,7 +10527,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches { WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs)); } -@@ -372,10 +353,10 @@ int copy_thread(int nr, unsigned long cl +@@ -371,10 +352,10 @@ int copy_thread(int nr, unsigned long cl p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; @@ -10588,7 +10542,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); -@@ -416,7 +397,9 @@ out: +@@ -415,7 +396,9 @@ out: void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { @@ -10599,7 +10553,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches load_gs_index(0); regs->ip = new_ip; regs->sp = new_sp; -@@ -555,8 +538,8 @@ static inline void __switch_to_xtra(stru +@@ -554,8 +537,8 @@ static inline void __switch_to_xtra(stru struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { @@ -10610,7 +10564,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches int cpu = smp_processor_id(); #ifndef CONFIG_X86_NO_TSS struct tss_struct *tss = &per_cpu(init_tss, cpu); -@@ -659,12 +642,25 @@ __switch_to(struct task_struct *prev_p, +@@ -658,12 +641,25 @@ __switch_to(struct task_struct *prev_p, */ if (unlikely(next->es)) loadsegment(es, next->es); @@ -10637,7 +10591,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches */ if (unlikely(next->fsindex)) loadsegment(fs, next->fsindex); -@@ -683,7 +679,8 @@ __switch_to(struct task_struct *prev_p, +@@ -682,7 +678,8 @@ __switch_to(struct task_struct *prev_p, */ write_pda(pcurrent, next_p); write_pda(kernelstack, @@ -10647,7 +10601,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #ifdef CONFIG_CC_STACKPROTECTOR write_pda(stack_canary, next_p->stack_canary); -@@ -844,7 +841,7 @@ long do_arch_prctl(struct task_struct *t +@@ -843,7 +840,7 @@ long do_arch_prctl(struct task_struct *t set_32bit_tls(task, FS_TLS, addr); if (doit) { load_TLS(&task->thread, cpu); @@ -10656,7 +10610,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } task->thread.fsindex = FS_TLS_SEL; task->thread.fs = 0; -@@ -854,7 +851,7 @@ long do_arch_prctl(struct task_struct *t +@@ -853,7 +850,7 @@ long do_arch_prctl(struct task_struct *t if (doit) { /* set the selector to 0 to not confuse __switch_to */ @@ -10665,7 +10619,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr); } -@@ -878,7 +875,7 @@ long do_arch_prctl(struct task_struct *t +@@ -877,7 +874,7 @@ long do_arch_prctl(struct task_struct *t if (task->thread.gsindex == GS_TLS_SEL) base = read_32bit_tls(task, GS_TLS); else if (doit) { @@ -10674,132 +10628,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (gsindex) rdmsrl(MSR_KERNEL_GS_BASE, base); else ---- head-2010-05-25.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/quirks-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -63,6 +63,7 @@ static enum { - ICH_FORCE_HPET_RESUME, - VT8237_FORCE_HPET_RESUME, - NVIDIA_FORCE_HPET_RESUME, -+ ATI_FORCE_HPET_RESUME, - } force_hpet_resume_type; - - static void __iomem *rcba_base; -@@ -156,6 +157,8 @@ static void ich_force_enable_hpet(struct - - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, - ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, -+ ich_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, - ich_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, -@@ -172,6 +175,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I - - static struct pci_dev *cached_dev; - -+static void hpet_print_force_info(void) -+{ -+ printk(KERN_INFO "HPET not enabled in BIOS. " -+ "You might try hpet=force boot option\n"); -+} -+ - static void old_ich_force_hpet_resume(void) - { - u32 val; -@@ -251,8 +260,12 @@ static void old_ich_force_enable_hpet_us - { - if (hpet_force_user) - old_ich_force_enable_hpet(dev); -+ else -+ hpet_print_force_info(); - } - -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, -+ old_ich_force_enable_hpet_user); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, - old_ich_force_enable_hpet_user); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, -@@ -288,9 +301,14 @@ static void vt8237_force_enable_hpet(str - { - u32 uninitialized_var(val); - -- if (!hpet_force_user || hpet_address || force_hpet_address) -+ if (hpet_address || force_hpet_address) - return; - -+ if (!hpet_force_user) { -+ hpet_print_force_info(); -+ return; -+ } -+ - pci_read_config_dword(dev, 0x68, &val); - /* - * Bit 7 is HPET enable bit. -@@ -328,6 +346,36 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_V - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, - vt8237_force_enable_hpet); - -+static void ati_force_hpet_resume(void) -+{ -+ pci_write_config_dword(cached_dev, 0x14, 0xfed00000); -+ printk(KERN_DEBUG "Force enabled HPET at resume\n"); -+} -+ -+static void ati_force_enable_hpet(struct pci_dev *dev) -+{ -+ u32 uninitialized_var(val); -+ -+ if (hpet_address || force_hpet_address) -+ return; -+ -+ if (!hpet_force_user) { -+ hpet_print_force_info(); -+ return; -+ } -+ -+ pci_write_config_dword(dev, 0x14, 0xfed00000); -+ pci_read_config_dword(dev, 0x14, &val); -+ force_hpet_address = val; -+ force_hpet_resume_type = ATI_FORCE_HPET_RESUME; -+ dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", -+ force_hpet_address); -+ cached_dev = dev; -+ return; -+} -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, -+ ati_force_enable_hpet); -+ - /* - * Undocumented chipset feature taken from LinuxBIOS. - */ -@@ -341,8 +389,13 @@ static void nvidia_force_enable_hpet(str - { - u32 uninitialized_var(val); - -- if (!hpet_force_user || hpet_address || force_hpet_address) -+ if (hpet_address || force_hpet_address) -+ return; -+ -+ if (!hpet_force_user) { -+ hpet_print_force_info(); - return; -+ } - - pci_write_config_dword(dev, 0x44, 0xfed00001); - pci_read_config_dword(dev, 0x44, &val); -@@ -395,6 +448,9 @@ void force_hpet_resume(void) - case NVIDIA_FORCE_HPET_RESUME: - nvidia_force_hpet_resume(); - return; -+ case ATI_FORCE_HPET_RESUME: -+ ati_force_hpet_resume(); -+ return; - default: - break; - } ---- head-2010-05-25.orig/arch/x86/kernel/setup-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -1,141 +1,1133 @@ +--- head-2011-03-11.orig/arch/x86/kernel/setup-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup-xen.c 2011-03-04 15:09:03.000000000 +0100 +@@ -1,143 +1,1132 @@ -#include +/* + * Copyright (C) 1995 Linus Torvalds @@ -10926,25 +10757,15 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + +shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; +EXPORT_SYMBOL(HYPERVISOR_shared_info); - --#ifdef CONFIG_X86_LOCAL_APIC --unsigned int num_processors; --unsigned disabled_cpus __cpuinitdata; --/* Processor that is doing the boot up */ --unsigned int boot_cpu_physical_apicid = -1U; --EXPORT_SYMBOL(boot_cpu_physical_apicid); ++ +static int xen_panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block xen_panic_block = { + xen_panic_event, NULL, 0 /* try to go last */ +}; - --DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; --EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); ++ +unsigned long *phys_to_machine_mapping; +EXPORT_SYMBOL(phys_to_machine_mapping); - --/* Bitmask of physically existing CPUs */ --physid_mask_t phys_cpu_present_map; ++ +unsigned long *pfn_to_mfn_frame_list_list, +#ifdef CONFIG_X86_64 + *pfn_to_mfn_frame_list[512]; @@ -10960,23 +10781,24 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +#ifndef ARCH_SETUP +#define ARCH_SETUP +#endif -+ -+#ifndef CONFIG_XEN + +-#ifdef CONFIG_X86_LOCAL_APIC +-unsigned int num_processors; +-unsigned disabled_cpus __cpuinitdata; + #ifndef CONFIG_XEN +-/* Processor that is doing the boot up */ +-unsigned int boot_cpu_physical_apicid = -1U; +-EXPORT_SYMBOL(boot_cpu_physical_apicid); +#ifndef CONFIG_DEBUG_BOOT_PARAMS +struct boot_params __initdata boot_params; +#else +struct boot_params boot_params; +#endif - #endif - --#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) - /* -- * Copy data used in early init routines from the initial arrays to the -- * per cpu data areas. These arrays then become expendable and the -- * *_early_ptr's are zeroed indicating that the static arrays are gone. ++#endif ++ ++/* + * Machine setup.. - */ --static void __init setup_per_cpu_maps(void) ++ */ +static struct resource data_resource = { + .name = "Kernel data", + .start = 0, @@ -10997,8 +10819,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; -+ -+ + +-DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; +-EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); + +-/* Bitmask of physically existing CPUs */ +-physid_mask_t phys_cpu_present_map; +#ifdef CONFIG_X86_32 +#ifndef CONFIG_XEN +/* This value is set up by the early boot code to point to the value @@ -11006,7 +10832,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + address, and must not be in the .bss segment! */ +unsigned long init_pg_tables_start __initdata = ~0UL; +unsigned long init_pg_tables_end __initdata = ~0UL; -+#endif + #endif + +static struct resource video_ram_resource = { + .name = "Video RAM area", @@ -11016,9 +10842,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +}; + +/* cpu data as detected by the assembly code in head.S */ -+struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; ++struct cpuinfo_x86 new_cpu_data __cpuinitdata = { .wp_works_ok = 1, .hard_math = 1 }; +/* common cpu data for all cpus */ -+struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1}; ++struct cpuinfo_x86 boot_cpu_data __read_mostly = { .wp_works_ok = 1, .hard_math = 1 }; +EXPORT_SYMBOL(boot_cpu_data); +#ifndef CONFIG_XEN +static void set_mca_bus(int x) @@ -11044,8 +10870,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +EXPORT_SYMBOL(ist_info); +#elif defined(CONFIG_X86_SPEEDSTEP_SMI) +struct ist_info ist_info; -+#endif -+ + #endif + +-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) +#else +struct cpuinfo_x86 boot_cpu_data __read_mostly; +EXPORT_SYMBOL(boot_cpu_data); @@ -11061,7 +10888,10 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +/* Boot loader ID as an integer, for the benefit of proc_dointvec */ +int bootloader_type; + -+/* + /* +- * Copy data used in early init routines from the initial arrays to the +- * per cpu data areas. These arrays then become expendable and the +- * *_early_ptr's are zeroed indicating that the static arrays are gone. + * Early DMI memory + */ +int dmi_alloc_index; @@ -11095,7 +10925,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + * copy_edd() - Copy the BIOS EDD information + * from boot_params into a safe place. + * -+ */ + */ +-static void __init setup_per_cpu_maps(void) +static inline void copy_edd(void) +{ + memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, @@ -11187,15 +11018,6 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + u64 ramdisk_size = boot_params.hdr.ramdisk_size; + u64 ramdisk_end = ramdisk_image + ramdisk_size; + u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; -+ -+ if (!boot_params.hdr.type_of_loader || -+ !ramdisk_image || !ramdisk_size) -+ return; /* No initrd provided by bootloader */ -+#else -+ unsigned long ramdisk_image = __pa(xen_start_info->mod_start); -+ unsigned long ramdisk_size = xen_start_info->mod_len; -+ unsigned long ramdisk_end = ramdisk_image + ramdisk_size; -+ unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT; - for_each_possible_cpu(cpu) { - per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu]; @@ -11204,6 +11026,15 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -#ifdef CONFIG_NUMA - per_cpu(x86_cpu_to_node_map, cpu) = - x86_cpu_to_node_map_init[cpu]; ++ if (!boot_params.hdr.type_of_loader || ++ !ramdisk_image || !ramdisk_size) ++ return; /* No initrd provided by bootloader */ ++#else ++ unsigned long ramdisk_image = __pa(xen_start_info->mod_start); ++ unsigned long ramdisk_size = xen_start_info->mod_len; ++ unsigned long ramdisk_end = ramdisk_image + ramdisk_size; ++ unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT; ++ + if (!xen_start_info->mod_start || !ramdisk_size) + return; /* No initrd provided by bootloader */ #endif @@ -11249,22 +11080,22 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + initrd_start = 0; #endif + free_early(ramdisk_image, ramdisk_end); - } ++} +#else +static void __init reserve_initrd(void) +{ -+} + } +#endif /* CONFIG_BLK_DEV_INITRD */ -+ + +-#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP +-cpumask_t *cpumask_of_cpu_map __read_mostly; +-EXPORT_SYMBOL(cpumask_of_cpu_map); +static void __init parse_setup_data(void) +{ +#ifndef CONFIG_XEN + struct setup_data *data; + u64 pa_data; - --#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP --cpumask_t *cpumask_of_cpu_map __read_mostly; --EXPORT_SYMBOL(cpumask_of_cpu_map); ++ + if (boot_params.hdr.version < 0x0209) + return; + pa_data = boot_params.hdr.setup_data; @@ -11292,7 +11123,11 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + struct setup_data *data; + u64 pa_data; + int found = 0; -+ + +- /* alloc_bootmem zeroes memory */ +- cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids); +- for (i = 0; i < nr_cpu_ids; i++) +- cpu_set(i, cpumask_of_cpu_map[i]); + if (boot_params.hdr.version < 0x0209) + return; + pa_data = boot_params.hdr.setup_data; @@ -11306,11 +11141,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + } + if (!found) + return; - -- /* alloc_bootmem zeroes memory */ -- cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids); -- for (i = 0; i < nr_cpu_ids; i++) -- cpu_set(i, cpumask_of_cpu_map[i]); ++ + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); + memcpy(&e820_saved, &e820, sizeof(struct e820map)); + printk(KERN_INFO "extended physical RAM map:\n"); @@ -11345,7 +11176,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - * Great future not-so-futuristic plan: make i386 and x86_64 do it - * the same way + * --------- Crashkernel reservation ------------------------------ -+ */ + */ +-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +-EXPORT_SYMBOL(__per_cpu_offset); + +#ifdef CONFIG_KEXEC + @@ -11384,8 +11217,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + total = max_low_pfn - min_low_pfn; +#ifdef CONFIG_HIGHMEM + total += highend_pfn - highstart_pfn; -+#endif -+ + #endif + + return total << PAGE_SHIFT; +} + @@ -11480,9 +11313,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +/* elfcorehdr= specifies the location of elf core header + * stored by the crashed kernel. This option will be passed + * by kexec loader to the capture kernel. - */ --unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; --EXPORT_SYMBOL(__per_cpu_offset); ++ */ +static int __init setup_elfcorehdr(char *arg) +{ + char *end; @@ -11492,28 +11323,28 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + return end > arg ? 0 : -EINVAL; +} +early_param("elfcorehdr", setup_elfcorehdr); - #endif - ++#endif ++ +static struct x86_quirks default_x86_quirks __initdata; + +struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; + -+/* + /* +- * Great future plan: +- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. +- * Always point %gs to its beginning + * Determine if we were loaded by an EFI loader. If so, then we have also been + * passed the efi memmap, systab, etc., so we should use these data structures + * for initialization. Note, the efi init code path is determined by the + * global efi_enabled. This allows the same kernel image to be used on existing + * systems (with a traditional BIOS) as well as on EFI systems. -+ */ - /* -- * Great future plan: -- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. -- * Always point %gs to its beginning + */ +-void __init setup_per_cpu_areas(void) ++/* + * setup_arch - architecture-specific boot-time initializations + * + * Note: On x86_64, fixmaps are ready for use even before this is called. - */ --void __init setup_per_cpu_areas(void) ++ */ + +void __init setup_arch(char **cmdline_p) { @@ -11560,6 +11391,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, + VMASST_TYPE_4gb_segments)); +#endif ++ set_iopl.iopl = 1; ++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl)); +#endif /* CONFIG_XEN */ + +#ifdef CONFIG_X86_32 @@ -11824,13 +11657,13 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +#endif + + initmem_init(0, max_pfn); - ++ +#ifdef CONFIG_ACPI_SLEEP + /* + * Reserve low memory region for sleep support. + */ + acpi_reserve_bootmem(); - #endif ++#endif +#ifdef CONFIG_X86_FIND_SMP_CONFIG + /* + * Find and reserve possible boot-time SMP configuration: @@ -11879,7 +11712,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + int ret; + + difference = xen_start_info->nr_pages - max_pfn; -+ + + set_xen_guest_handle(reservation.extent_start, + ((unsigned long *)xen_start_info->mfn_list) + max_pfn); + reservation.nr_extents = difference; @@ -11941,7 +11774,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + +#ifdef CONFIG_X86_GENERICARCH + generic_apic_probe(); -+#endif + #endif + +#ifndef CONFIG_XEN + early_quirks(); @@ -11999,9 +11832,6 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + if (is_initial_xendomain()) + e820_setup_gap(); + -+ set_iopl.iopl = 1; -+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl)); -+ +#ifdef CONFIG_VT +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; @@ -12023,7 +11853,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + return NOTIFY_DONE; +} +#endif /* !CONFIG_XEN */ ---- head-2010-05-25.orig/arch/x86/kernel/setup64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 18:07:35.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,370 +0,0 @@ -/* @@ -12396,7 +12226,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - if (is_uv_system()) - uv_cpu_init(); -} ---- head-2010-05-25.orig/arch/x86/kernel/setup_32-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_32-xen.c 2011-03-04 15:07:31.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,1153 +0,0 @@ -/* @@ -12570,9 +12400,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -} }; - -/* cpu data as detected by the assembly code in head.S */ --struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; +-struct cpuinfo_x86 new_cpu_data __cpuinitdata = { .wp_works_ok = 1 }; -/* common cpu data for all cpus */ --struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; +-struct cpuinfo_x86 boot_cpu_data __read_mostly = { .wp_works_ok = 1 }; -EXPORT_SYMBOL(boot_cpu_data); - -unsigned int def_to_bigsmp; @@ -13552,9 +13382,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -} - -subsys_initcall(request_standard_resources); ---- head-2010-05-25.orig/arch/x86/kernel/setup_64-xen.c 2010-03-24 15:12:36.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 18:07:35.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,1434 +0,0 @@ +@@ -1,1442 +0,0 @@ -/* - * Copyright (C) 1995 Linus Torvalds - */ @@ -14401,7 +14231,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - */ -static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) -{ --#ifdef CONFIG_SMP +-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - unsigned bits; -#ifdef CONFIG_NUMA - int cpu = smp_processor_id(); @@ -14448,7 +14278,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - -static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) -{ --#ifdef CONFIG_SMP +-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - unsigned bits, ecx; - - /* Multi core CPU? */ @@ -14604,7 +14434,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - -void __cpuinit detect_ht(struct cpuinfo_x86 *c) -{ --#ifdef CONFIG_SMP +-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - u32 eax, ebx, ecx, edx; - int index_msb, core_bits; - @@ -14652,6 +14482,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -#endif -} - +-#ifndef CONFIG_XEN -/* - * find out the number of processor cores on the die - */ @@ -14669,6 +14500,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - else - return 1; -} +-#endif - -static void __cpuinit srat_detect_node(void) -{ @@ -14738,7 +14570,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - if (c->x86 == 6) - set_cpu_cap(c, X86_FEATURE_REP_GOOD); - set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); +-#ifndef CONFIG_XEN - c->x86_max_cores = intel_num_cpu_cores(c); +-#endif - - srat_detect_node(); -} @@ -14798,8 +14632,10 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - c->x86_model_id[0] = '\0'; /* Unset */ - c->x86_clflush_size = 64; - c->x86_cache_alignment = c->x86_clflush_size; +-#ifndef CONFIG_XEN - c->x86_max_cores = 1; - c->x86_coreid_bits = 0; +-#endif - c->extended_cpuid_level = 0; - memset(&c->x86_capability, 0, sizeof c->x86_capability); - @@ -14833,10 +14669,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - c->x86 = 4; - } - +-#ifndef CONFIG_XEN - c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; -#ifdef CONFIG_SMP - c->phys_proc_id = c->initial_apicid; -#endif +-#endif - /* AMD-defined flags: level 0x80000001 */ - xlvl = cpuid_eax(0x80000000); - c->extended_cpuid_level = xlvl; @@ -14989,8 +14827,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - return 1; -} -__setup("clearcpuid=", setup_disablecpuid); ---- head-2010-05-25.orig/arch/x86/kernel/smp-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/smp-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/smp-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -121,132 +121,14 @@ void xen_smp_send_reschedule(int cpu) send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } @@ -15145,7 +14983,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches disable_all_local_evtchn(); local_irq_restore(flags); } -@@ -298,21 +175,8 @@ irqreturn_t smp_reschedule_interrupt(int +@@ -298,30 +175,24 @@ irqreturn_t smp_reschedule_interrupt(int irqreturn_t smp_call_function_interrupt(int irq, void *dev_id) { @@ -15162,15 +15000,13 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - /* - * At this point the info structure may be out of scope unless wait==1 - */ - irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); #ifdef CONFIG_X86_32 __get_cpu_var(irq_stat).irq_call_count++; #else -@@ -320,10 +184,19 @@ irqreturn_t smp_call_function_interrupt( + add_pda(irq_call_count, 1); #endif - irq_exit(); - if (wait) { - mb(); @@ -15181,20 +15017,18 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + +irqreturn_t smp_call_function_single_interrupt(int irq, void *dev_id) +{ -+ irq_enter(); + generic_smp_call_function_single_interrupt(); +#ifdef CONFIG_X86_32 + __get_cpu_var(irq_stat).irq_call_count++; +#else + add_pda(irq_call_count, 1); +#endif -+ irq_exit(); return IRQ_HANDLED; } ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -459,7 +459,7 @@ irqreturn_t timer_interrupt(int irq, voi +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-02-01 14:38:38.000000000 +0100 +@@ -460,7 +460,7 @@ irqreturn_t timer_interrupt(int irq, voi /* Keep nmi watchdog up to date */ #ifdef __i386__ @@ -15203,7 +15037,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #else add_pda(irq0_irqs, 1); #endif -@@ -747,9 +747,7 @@ void __init time_init(void) +@@ -750,9 +750,7 @@ void __init time_init(void) update_wallclock(); @@ -15213,7 +15047,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* Cannot request_irq() until kmem is initialised. */ late_time_init = setup_cpu0_timer_irq; -@@ -806,7 +804,8 @@ static void stop_hz_timer(void) +@@ -809,7 +807,8 @@ static void stop_hz_timer(void) /* Leave ourselves in tick mode if rcu or softirq or timer pending. */ if (rcu_needs_cpu(cpu) || local_softirq_pending() || @@ -15223,8 +15057,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches cpu_clear(cpu, nohz_cpu_mask); j = jiffies + 1; } ---- head-2010-05-25.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_32-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -1,5 +1,6 @@ /* * Copyright (C) 1991, 1992 Linus Torvalds @@ -15801,8 +15635,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches unsigned long base = (kesp - uesp) & -THREAD_SIZE; unsigned long new_kesp = kesp - base; unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; ---- head-2010-05-25.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps_64-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -10,73 +10,56 @@ * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'entry.S'. @@ -16763,8 +16597,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static int __init code_bytes_setup(char *s) { code_bytes = simple_strtoul(s, NULL, 0); ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -42,7 +42,8 @@ #include #include @@ -16805,8 +16639,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches hotcpu_notifier(cpu_vsyscall_notifier, 0); return 0; } ---- head-2010-05-25.orig/arch/x86/mach-xen/setup.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mach-xen/setup.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mach-xen/setup.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mach-xen/setup.c 2011-02-03 14:23:14.000000000 +0100 @@ -17,6 +17,8 @@ #include #include @@ -16864,7 +16698,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; EXPORT_SYMBOL(machine_to_phys_mapping); unsigned int machine_to_phys_order; -@@ -117,33 +78,66 @@ void __init pre_setup_arch_hook(void) +@@ -117,30 +78,60 @@ void __init pre_setup_arch_hook(void) (unsigned long *)xen_start_info->mfn_list; } @@ -16893,20 +16727,18 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches .type = CALLBACKTYPE_failsafe, - .address = { __KERNEL_CS, (unsigned long)failsafe_callback }, + .address = CALLBACK_ADDR(failsafe_callback) -+ }; + }; +#ifdef CONFIG_X86_64 + static struct callback_register __initdata syscall = { + .type = CALLBACKTYPE_syscall, + .address = CALLBACK_ADDR(system_call) - }; ++ }; +#endif -+#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) static struct callback_register __initdata nmi_cb = { .type = CALLBACKTYPE_nmi, - .address = { __KERNEL_CS, (unsigned long)nmi }, + .address = CALLBACK_ADDR(nmi) }; -+#endif ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event); if (ret == 0) @@ -16930,15 +16762,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #endif BUG_ON(ret); -+#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) - ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb); - #if CONFIG_XEN_COMPAT <= 0x030002 - if (ret == -ENOSYS) { -@@ -154,15 +148,43 @@ void __init machine_specific_arch_setup( - HYPERVISOR_nmi_op(XENNMI_register_callback, &cb); +@@ -155,14 +146,41 @@ void __init machine_specific_arch_setup( } #endif -+#endif +#ifdef CONFIG_X86_32 /* Do an early initialization of the fixmap area */ @@ -16978,8 +16804,67 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } +#endif } ---- head-2010-05-25.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/Makefile 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/Makefile 2011-02-01 14:38:38.000000000 +0100 +@@ -27,6 +27,7 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology_6 + obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o + + obj-$(CONFIG_XEN) += hypervisor.o ++disabled-obj-$(CONFIG_XEN) := gup.o + + obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o + +--- head-2011-03-11.orig/arch/x86/mm/dump_pagetables-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/dump_pagetables-xen.c 2011-02-01 14:38:38.000000000 +0100 +@@ -45,7 +45,7 @@ static struct addr_marker address_marker + { 0, "User Space" }, + #ifdef CONFIG_X86_64 + { HYPERVISOR_VIRT_START, "Hypervisor Space" }, +- { HYPERVISOR_VIRT_END, "Low Kernel Mapping" }, ++ { PAGE_OFFSET, "Low Kernel Mapping" }, + { VMALLOC_START, "vmalloc() Area" }, + { VMEMMAP_START, "Vmemmap" }, + { __START_KERNEL_map, "High Kernel Mapping" }, +@@ -160,8 +160,8 @@ static void note_page(struct seq_file *m + * we have now. "break" is either changing perms, levels or + * address space marker. + */ +- prot = pgprot_val(new_prot) & ~(PTE_MASK); +- cur = pgprot_val(st->current_prot) & ~(PTE_MASK); ++ prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); ++ cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); + + if (!st->level) { + /* First entry */ +@@ -234,7 +234,7 @@ static void walk_pmd_level(struct seq_fi + st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); + if (!hypervisor_space(st->current_address) + && !pmd_none(*start)) { +- pgprotval_t prot = __pmd_val(*start) & ~PTE_MASK; ++ pgprotval_t prot = __pmd_val(*start) & PTE_FLAGS_MASK; + + if (pmd_large(*start) || !pmd_present(*start)) + note_page(m, st, __pgprot(prot), 3); +@@ -267,7 +267,7 @@ static void walk_pud_level(struct seq_fi + st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); + if (!hypervisor_space(st->current_address) + && !pud_none(*start)) { +- pgprotval_t prot = __pud_val(*start) & ~PTE_MASK; ++ pgprotval_t prot = __pud_val(*start) & PTE_FLAGS_MASK; + + if (pud_large(*start) || !pud_present(*start)) + note_page(m, st, __pgprot(prot), 2); +@@ -303,7 +303,7 @@ static void walk_pgd_level(struct seq_fi + for (i = 0; i < PTRS_PER_PGD; i++) { + st.current_address = normalize_addr(i * PGD_LEVEL_MULT); + if (!pgd_none(*start)) { +- pgprotval_t prot = __pgd_val(*start) & ~PTE_MASK; ++ pgprotval_t prot = __pgd_val(*start) & PTE_FLAGS_MASK; + + if (pgd_large(*start) || !pgd_present(*start)) + note_page(m, &st, __pgprot(prot), 1); +--- head-2011-03-11.orig/arch/x86/mm/fault-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/fault-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -10,6 +10,7 @@ #include #include @@ -17056,7 +16941,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches print_vma_addr(" in ", regs->ip); printk("\n"); } -@@ -946,81 +947,45 @@ LIST_HEAD(pgd_list); +@@ -949,89 +950,52 @@ LIST_HEAD(pgd_list); void vmalloc_sync_all(void) { #ifdef CONFIG_X86_32 @@ -17092,8 +16977,14 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - return; - } - list_for_each_entry(page, &pgd_list, lru) { -- if (!vmalloc_sync_one(page_address(page), -- address)) +- pmd_t *pmd; +- +- pgd_page_table(lock, page); +- pmd = vmalloc_sync_one(page_address(page), +- address); +- pgd_page_table(unlock, page); +- +- if (!pmd) - break; - } - spin_unlock_irqrestore(&pgd_lock, flags); @@ -17105,8 +16996,13 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + + spin_lock_irqsave(&pgd_lock, flags); + list_for_each_entry(page, &pgd_list, lru) { -+ if (!vmalloc_sync_one(page_address(page), -+ address)) ++ pmd_t *pmd; ++ ++ pgd_page_table(lock, page); ++ pmd = vmalloc_sync_one(page_address(page), address); ++ pgd_page_table(unlock, page); ++ ++ if (!pmd) + break; } - if (address == start && test_bit(sync_index(address), insync)) @@ -17137,10 +17033,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); +- pgd_page_table(lock, page); - if (pgd_none(*pgd)) - set_pgd(pgd, *pgd_ref); - else - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); +- pgd_page_table(unlock, page); - } - spin_unlock_irqrestore(&pgd_lock, flags); - set_bit(pgd_index(address), insync); @@ -17154,10 +17052,12 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ pgd_page_table(lock, page); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); ++ pgd_page_table(unlock, page); } - if (address == start) - start = address + PGDIR_SIZE; @@ -17165,9 +17065,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } #endif } ---- head-2010-05-25.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/hypervisor.c 2010-03-24 15:12:46.000000000 +0100 -@@ -713,6 +713,72 @@ void xen_destroy_contiguous_region(unsig +--- head-2011-03-11.orig/arch/x86/mm/hypervisor.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/hypervisor.c 2011-02-01 14:38:38.000000000 +0100 +@@ -711,6 +711,72 @@ void xen_destroy_contiguous_region(unsig } EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); @@ -17240,7 +17140,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static void undo_limit_pages(struct page *pages, unsigned int order) { BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); -@@ -879,42 +945,9 @@ int write_ldt_entry(struct desc_struct * +@@ -877,42 +943,9 @@ int write_ldt_entry(struct desc_struct * return HYPERVISOR_update_descriptor(mach_lp, *(const u64*)desc); } @@ -17287,8 +17187,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + maddr_t mach_gp = virt_to_machine(gdt + entry); + return HYPERVISOR_update_descriptor(mach_gp, *(const u64*)desc); } ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/init_32-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_32-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -54,6 +54,7 @@ unsigned int __VMALLOC_RESERVE = 128 << 20; @@ -18118,8 +18018,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +{ + return reserve_bootmem(phys, len, flags); +} ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:51:25.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-04-29 09:51:38.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/init_64-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_64-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -21,6 +21,7 @@ #include #include @@ -18958,7 +18858,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - default: - return; - } -- + - incr = sizeof(unsigned long); - start_phys_aligned = ALIGN(start_phys, incr); - count = (size - (start_phys_aligned - start_phys))/incr; @@ -18987,48 +18887,47 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - val, start_bad, last_bad + incr); - reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); - } - --} + unsigned long next, last_map_addr = end; --static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE; +-} + start = (unsigned long)__va(start); + end = (unsigned long)__va(end); +-static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE; ++ for (; start < end; start = next) { ++ pgd_t *pgd = pgd_offset_k(start); ++ unsigned long pud_phys; ++ pud_t *pud; + -static int __init parse_memtest(char *arg) -{ - if (arg) - memtest_pattern = simple_strtoul(arg, NULL, 0); - return 0; -} -+ for (; start < end; start = next) { -+ pgd_t *pgd = pgd_offset_k(start); -+ unsigned long pud_phys; -+ pud_t *pud; - --early_param("memtest", parse_memtest); + next = (start + PGDIR_SIZE) & PGDIR_MASK; + if (next > end) + next = end; --static void __init early_memtest(unsigned long start, unsigned long end) --{ -- u64 t_start, t_size; -- unsigned pattern; +-early_param("memtest", parse_memtest); + if (__pgd_val(*pgd)) { + last_map_addr = phys_pud_update(pgd, __pa(start), + __pa(end), page_size_mask); + continue; + } -- if (!memtest_pattern) -- return; +-static void __init early_memtest(unsigned long start, unsigned long end) +-{ +- u64 t_start, t_size; +- unsigned pattern; + pud = alloc_low_page(&pud_phys); + last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), + page_size_mask); + unmap_low_page(pud); -+ -+ if(!after_bootmem) { + +- if (!memtest_pattern) +- return; ++ if (!after_bootmem) { + if (max_pfn_mapped) + make_page_readonly(__va(pud_phys), + XENFEAT_writable_page_tables); @@ -19484,8 +19383,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } return 0; } ---- head-2010-05-25.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/ioremap-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:39:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/ioremap-xen.c 2011-02-07 15:40:39.000000000 +0100 @@ -13,6 +13,7 @@ #include #include @@ -19494,7 +19393,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include #include -@@ -274,7 +275,8 @@ int ioremap_check_change_attr(unsigned l +@@ -255,7 +256,8 @@ int ioremap_check_change_attr(unsigned l for (sz = rc = 0; sz < size && !rc; ++mfn, sz += PAGE_SIZE) { unsigned long pfn = mfn_to_local_pfn(mfn); @@ -19504,10 +19403,10 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches continue; rc = ioremap_change_attr((unsigned long)__va(pfn << PAGE_SHIFT), PAGE_SIZE, prot_val); -@@ -297,11 +299,14 @@ static void __iomem *__ioremap_caller(re +@@ -278,11 +280,14 @@ static void __iomem *__ioremap_caller(re { - unsigned long mfn, offset, vaddr; - resource_size_t last_addr; + unsigned long offset, vaddr; + phys_addr_t mfn, last_addr; + const resource_size_t unaligned_phys_addr = phys_addr; + const unsigned long unaligned_size = size; struct vm_struct *area; @@ -19519,7 +19418,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; -@@ -318,7 +323,7 @@ static void __iomem *__ioremap_caller(re +@@ -299,7 +304,7 @@ static void __iomem *__ioremap_caller(re /* * Don't remap the low PCI/ISA area, it's always mapped.. */ @@ -19528,7 +19427,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return (__force void __iomem *)isa_bus_to_virt((unsigned long)phys_addr); /* -@@ -342,7 +347,7 @@ static void __iomem *__ioremap_caller(re +@@ -323,7 +328,7 @@ static void __iomem *__ioremap_caller(re phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; @@ -19537,7 +19436,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches prot_val, &new_prot_val); if (retval) { pr_debug("Warning: reserve_memtype returned %d\n", retval); -@@ -410,7 +415,10 @@ static void __iomem *__ioremap_caller(re +@@ -391,7 +396,10 @@ static void __iomem *__ioremap_caller(re return NULL; } @@ -19549,7 +19448,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } /** -@@ -438,7 +446,7 @@ void __iomem *ioremap_nocache(resource_s +@@ -419,7 +427,7 @@ void __iomem *ioremap_nocache(resource_s { /* * Ideally, this should be: @@ -19558,7 +19457,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches * * Till we fix all X drivers to use ioremap_wc(), we will use * UC MINUS. -@@ -462,7 +470,7 @@ EXPORT_SYMBOL(ioremap_nocache); +@@ -443,7 +451,7 @@ EXPORT_SYMBOL(ioremap_nocache); */ void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) { @@ -19567,7 +19466,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, __builtin_return_address(0)); else -@@ -502,6 +510,14 @@ static void __iomem *ioremap_default(res +@@ -483,6 +491,14 @@ static void __iomem *ioremap_default(res } #endif @@ -19582,7 +19481,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /** * iounmap - Free a IO remapping * @addr: virtual address from ioremap_* -@@ -526,6 +542,8 @@ void iounmap(volatile void __iomem *addr +@@ -507,6 +523,8 @@ void iounmap(volatile void __iomem *addr addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); @@ -19591,7 +19490,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by -@@ -533,7 +551,7 @@ void iounmap(volatile void __iomem *addr +@@ -514,7 +532,7 @@ void iounmap(volatile void __iomem *addr cpa takes care of the direct mappings. */ read_lock(&vmlist_lock); for (p = vmlist; p; p = p->next) { @@ -19600,7 +19499,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches break; } read_unlock(&vmlist_lock); -@@ -547,7 +565,7 @@ void iounmap(volatile void __iomem *addr +@@ -528,7 +546,7 @@ void iounmap(volatile void __iomem *addr free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); /* Finally remove it */ @@ -19609,7 +19508,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches BUG_ON(p != o || o == NULL); kfree(p); } -@@ -567,7 +585,7 @@ void *xlate_dev_mem_ptr(unsigned long ph +@@ -548,7 +566,7 @@ void *xlate_dev_mem_ptr(unsigned long ph if (page_is_ram(start >> PAGE_SHIFT)) return __va(phys); @@ -19618,7 +19517,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (addr) addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); -@@ -595,8 +613,7 @@ static int __init early_ioremap_debug_se +@@ -576,8 +594,7 @@ static int __init early_ioremap_debug_se early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; @@ -19628,7 +19527,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #ifdef CONFIG_X86_32 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) -@@ -695,10 +712,11 @@ static void __init __early_set_fixmap(en +@@ -676,10 +693,11 @@ static void __init __early_set_fixmap(en return; } pte = early_ioremap_pte(addr); @@ -19641,7 +19540,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches __flush_tlb_one(addr); } -@@ -726,13 +744,11 @@ static int __init check_early_ioremap_le +@@ -707,13 +725,11 @@ static int __init check_early_ioremap_le { if (!early_ioremap_nested) return 0; @@ -19658,8 +19557,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return 1; } ---- head-2010-05-25.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pageattr-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pageattr-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -34,6 +34,47 @@ struct cpa_data { unsigned force_split : 1; }; @@ -19874,8 +19773,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return _set_memory_wb(addr, numpages); } ---- head-2010-05-25.orig/arch/x86/mm/pat-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pat-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pat-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pat-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -12,6 +12,8 @@ #include #include @@ -20585,8 +20484,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +late_initcall(pat_memtype_list_init); + +#endif /* CONFIG_DEBUG_FS */ ---- head-2010-05-25.orig/arch/x86/mm/pgtable-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pgtable-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pgtable-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -4,6 +4,7 @@ #include #include @@ -20828,7 +20727,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #ifdef CONFIG_X86_64 /* We allocate two contiguous pages for kernel and user. */ -@@ -616,19 +611,52 @@ static void pgd_mop_up_pmds(struct mm_st +@@ -616,22 +611,55 @@ static void pgd_mop_up_pmds(struct mm_st pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -20844,8 +20743,11 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - /* so that alloc_pd can use it */ mm->pgd = pgd; -- if (pgd) +- if (pgd) { +- /* Store a back link for vmalloc_sync_all(). */ +- set_page_private(virt_to_page(pgd), (unsigned long)mm); - pgd_ctor(pgd); +- } - if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { - free_pages((unsigned long)pgd, PGD_ORDER); @@ -20875,6 +20777,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + pgd_ctor(pgd); + pgd_prepopulate_pmd(mm, pgd, pmds); + ++ /* Store a back link for vmalloc_sync_all(). */ ++ set_page_private(virt_to_page(pgd), (unsigned long)mm); ++ + spin_unlock_irqrestore(&pgd_lock, flags); return pgd; @@ -20888,7 +20793,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } void pgd_free(struct mm_struct *mm, pgd_t *pgd) -@@ -644,6 +672,7 @@ void pgd_free(struct mm_struct *mm, pgd_ +@@ -647,6 +675,7 @@ void pgd_free(struct mm_struct *mm, pgd_ pgd_dtor(pgd); pgd_mop_up_pmds(mm, pgd); @@ -20896,7 +20801,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches free_pages((unsigned long)pgd, PGD_ORDER); } -@@ -686,7 +715,7 @@ int ptep_test_and_clear_young(struct vm_ +@@ -689,7 +718,7 @@ int ptep_test_and_clear_young(struct vm_ if (pte_young(*ptep)) ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, @@ -20905,7 +20810,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (ret) pte_update(vma->vm_mm, addr, ptep); -@@ -708,3 +737,42 @@ int ptep_clear_flush_young(struct vm_are +@@ -711,3 +740,42 @@ int ptep_clear_flush_young(struct vm_are return young; } @@ -20948,8 +20853,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + set_pte_vaddr(address, pte); + fixmaps_set++; +} ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pgtable_32-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -25,51 +25,49 @@ #include #include @@ -21130,14 +21035,35 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches void make_lowmem_page_readonly(void *va, unsigned int feature) { pte_t *pte; ---- head-2010-05-25.orig/arch/x86/pci/amd_bus.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/pci/amd_bus.c 2010-03-24 15:12:46.000000000 +0100 -@@ -390,6 +390,14 @@ static int __init pci_io_ecs_init(void) +--- head-2011-03-11.orig/arch/x86/pci/amd_bus.c 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/arch/x86/pci/amd_bus.c 2011-02-01 14:38:38.000000000 +0100 +@@ -350,6 +350,7 @@ static int __init early_fill_mp_bus_info + + #define ENABLE_CF8_EXT_CFG (1ULL << 46) + ++#ifndef CONFIG_XEN + static void enable_pci_io_ecs(void *unused) + { + u64 reg; +@@ -378,6 +379,7 @@ static int __cpuinit amd_cpu_notify(stru + static struct notifier_block __cpuinitdata amd_cpu_notifier = { + .notifier_call = amd_cpu_notify, + }; ++#endif /* CONFIG_XEN */ + + static void __init pci_enable_pci_io_ecs(void) + { +@@ -419,10 +421,19 @@ static int __init pci_io_ecs_init(void) + if (early_pci_allowed()) + pci_enable_pci_io_ecs(); + ++#ifndef CONFIG_XEN + register_cpu_notifier(&amd_cpu_notifier); for_each_online_cpu(cpu) amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, (void *)(long)cpu); -+#ifdef CONFIG_XEN -+ { ++#else ++ if (cpu = 1, cpu) { + u64 reg; + rdmsrl(MSR_AMD64_NB_CFG, reg); + if (!(reg & ENABLE_CF8_EXT_CFG)) @@ -21147,7 +21073,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches pci_probe |= PCI_HAS_IO_ECS; return 0; -@@ -397,6 +405,10 @@ static int __init pci_io_ecs_init(void) +@@ -430,6 +441,10 @@ static int __init pci_io_ecs_init(void) static int __init amd_postcore_init(void) { @@ -21158,8 +21084,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return 0; ---- head-2010-05-25.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/pci/irq-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/pci/irq-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/pci/irq-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -11,8 +11,8 @@ #include #include @@ -22034,8 +21960,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } return 0; } ---- head-2010-05-25.orig/arch/x86/vdso/Makefile 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/vdso/Makefile 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/Makefile 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/Makefile 2011-02-01 14:38:38.000000000 +0100 @@ -65,9 +65,7 @@ obj-$(VDSO32-y) += vdso32-syms.lds vdso32.so-$(VDSO32-y) += int80 vdso32.so-$(CONFIG_COMPAT) += syscall @@ -22047,8 +21973,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches vdso32-images = $(vdso32.so-y:%=vdso32-%.so) ---- head-2010-05-25.orig/arch/x86/vdso/vdso32.S 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/vdso/vdso32.S 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/vdso32.S 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/vdso32.S 2011-02-01 14:38:38.000000000 +0100 @@ -9,7 +9,7 @@ vdso32_int80_end: .globl vdso32_syscall_start, vdso32_syscall_end @@ -22075,8 +22001,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -#endif - __FINIT ---- head-2010-05-25.orig/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/vdso/vdso32-setup-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:38:38.000000000 +0100 @@ -195,50 +195,28 @@ static __init void relocate_vdso(Elf32_E } } @@ -22210,18 +22136,17 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } memcpy(syscall_page, vsyscall, vsyscall_len); ---- head-2010-05-25.orig/arch/x86/xen/Kconfig 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/arch/x86/xen/Kconfig 2010-03-24 15:12:46.000000000 +0100 -@@ -17,7 +17,7 @@ config XEN_MAX_DOMAIN_MEMORY - int "Maximum allowed size of a domain in gigabytes" - default 8 if X86_32 - default 32 if X86_64 +--- head-2011-03-11.orig/arch/x86/xen/Kconfig 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/xen/Kconfig 2011-02-01 14:38:38.000000000 +0100 +@@ -31,14 +31,14 @@ config XEN_PVHVM + config XEN_MAX_DOMAIN_MEMORY + int + default 128 - depends on XEN + depends on PARAVIRT_XEN help - The pseudo-physical to machine address array is sized - according to the maximum possible memory size of a Xen -@@ -26,7 +26,7 @@ config XEN_MAX_DOMAIN_MEMORY + This only affects the sizing of some bss arrays, the unused + portions of which are freed. config XEN_SAVE_RESTORE bool @@ -22230,26 +22155,26 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches default y config XEN_DEBUG_FS ---- head-2010-05-25.orig/drivers/acpi/processor_driver.c 2010-04-15 10:04:18.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_driver.c 2010-05-25 09:24:20.000000000 +0200 -@@ -629,10 +629,12 @@ static int __cpuinit acpi_processor_add( - if (result) - goto err_free_cpumask; +--- head-2011-03-11.orig/drivers/acpi/processor_driver.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_driver.c 2011-02-01 14:38:38.000000000 +0100 +@@ -512,10 +512,12 @@ static int __cpuinit acpi_processor_add( + per_cpu(processors, pr->id) = pr; + #endif - sysdev = get_cpu_sysdev(pr->id); - if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { - result = -EFAULT; -- goto err_remove_fs; +- goto err_free_cpumask; + if (pr->id != -1) { + sysdev = get_cpu_sysdev(pr->id); + if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { + result = -EFAULT; -+ goto err_remove_fs; ++ goto err_free_cpumask; + } } #if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) -@@ -717,7 +719,8 @@ static int acpi_processor_remove(struct +@@ -599,7 +601,8 @@ static int acpi_processor_remove(struct acpi_processor_power_exit(pr, device); @@ -22257,10 +22182,10 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + if (pr->id != -1) + sysfs_remove_link(&device->dev.kobj, "sysdev"); - acpi_processor_remove_fs(device); - ---- head-2010-05-25.orig/drivers/acpi/processor_perflib.c 2010-05-06 14:22:32.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_perflib.c 2010-05-06 14:23:47.000000000 +0200 + if (pr->cdev) { + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); +--- head-2011-03-11.orig/drivers/acpi/processor_perflib.c 2011-01-31 17:02:29.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_perflib.c 2011-02-01 14:38:38.000000000 +0100 @@ -187,6 +187,12 @@ int acpi_processor_ppc_has_changed(struc { int ret; @@ -22274,8 +22199,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (ignore_ppc) { /* * Only when it is notification event, the _OST object ---- head-2010-05-25.orig/drivers/char/tpm/tpm_vtpm.c 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/char/tpm/tpm_vtpm.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/char/tpm/tpm_vtpm.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_vtpm.c 2011-02-01 14:38:38.000000000 +0100 @@ -347,7 +347,7 @@ static int _vtpm_send_queued(struct tpm_ { int rc; @@ -22285,12 +22210,38 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches unsigned char buffer[1]; struct vtpm_state *vtpms; vtpms = (struct vtpm_state *)chip_get_private(chip); ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -92,12 +92,10 @@ arch_teardown_msi_irqs(struct pci_dev *d +--- head-2011-03-11.orig/drivers/dma/ioat/dma.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/dma/ioat/dma.h 2011-02-01 14:38:38.000000000 +0100 +@@ -363,6 +363,7 @@ __ioat_dca_init(struct pci_dev *pdev, vo } + #define ioat_dca_init __ioat_dca_init + #define ioat2_dca_init __ioat_dca_init ++#define ioat3_dca_init __ioat_dca_init #endif + #endif /* IOATDMA_H */ +--- head-2011-03-11.orig/drivers/hwmon/coretemp-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/hwmon/coretemp-xen.c 2011-02-01 14:38:38.000000000 +0100 +@@ -360,10 +360,11 @@ static int coretemp_device_add(unsigned + if (err) + goto exit_entry_free; + +- /* check if family 6, models 0xe, 0xf, 0x16, 0x17 */ ++ /* check if family 6, models 0xe, 0xf, 0x16, 0x17, 0x1A */ + if (info.x86 != 0x6 || + !((pdev_entry->x86_model == 0xe) || (pdev_entry->x86_model == 0xf) || +- (pdev_entry->x86_model == 0x16) || (pdev_entry->x86_model == 0x17))) { ++ (pdev_entry->x86_model == 0x16) || (pdev_entry->x86_model == 0x17) || ++ (pdev_entry->x86_model == 0x1A))) { + + /* supported CPU not found, but report the unknown + family 6 CPU */ +--- head-2011-03-11.orig/drivers/pci/msi-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/pci/msi-xen.c 2011-02-01 14:38:38.000000000 +0100 +@@ -53,12 +53,10 @@ arch_msi_check_device(struct pci_dev *de + return 0; + } + -static void msi_set_enable(struct pci_dev *dev, int enable) +static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) { @@ -22301,7 +22252,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; -@@ -107,6 +105,11 @@ static void msi_set_enable(struct pci_de +@@ -68,6 +66,11 @@ static void msi_set_enable(struct pci_de } } @@ -22313,7 +22264,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static void msix_set_enable(struct pci_dev *dev, int enable) { int pos; -@@ -219,8 +222,7 @@ static int msi_get_dev_owner(struct pci_ +@@ -180,8 +183,7 @@ static int msi_get_dev_owner(struct pci_ BUG_ON(!is_initial_xendomain()); if (get_owner && (owner = get_owner(dev)) >= 0) { @@ -22323,16 +22274,16 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return owner; } -@@ -240,7 +242,7 @@ static int msi_unmap_pirq(struct pci_dev +@@ -201,7 +203,7 @@ static int msi_unmap_pirq(struct pci_dev ? pirq : evtchn_get_xen_pirq(pirq); if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap))) - printk(KERN_WARNING "unmap irq %x failed\n", pirq); -+ dev_warn(&dev->dev, "unmap irq %x failed\n", pirq); ++ dev_warn(&dev->dev, "unmap irq %d failed\n", pirq); if (rc < 0) return rc; -@@ -288,7 +290,7 @@ static int msi_map_vector(struct pci_dev +@@ -249,7 +251,7 @@ static int msi_map_vector(struct pci_dev map_irq.table_base = table_base; if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq))) @@ -22341,7 +22292,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (rc < 0) return rc; -@@ -399,10 +401,9 @@ static int msix_capability_init(struct p +@@ -360,10 +362,9 @@ static int msix_capability_init(struct p mapped = 0; list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) { if (pirq_entry->entry_nr == entries[i].entry) { @@ -22355,7 +22306,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches (entries + i)->vector = pirq_entry->pirq; mapped = 1; break; -@@ -528,9 +529,8 @@ int pci_enable_msi(struct pci_dev* dev) +@@ -489,9 +490,8 @@ int pci_enable_msi(struct pci_dev* dev) /* Check whether driver already requested for MSI-X irqs */ if (dev->msix_enabled) { @@ -22367,7 +22318,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return -EINVAL; } -@@ -612,7 +612,8 @@ int pci_enable_msix(struct pci_dev* dev, +@@ -573,7 +573,8 @@ int pci_enable_msix(struct pci_dev* dev, temp = dev->irq; ret = pci_frontend_enable_msix(dev, entries, nvec); if (ret) { @@ -22377,7 +22328,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return ret; } dev->msix_enabled = 1; -@@ -663,9 +664,8 @@ int pci_enable_msix(struct pci_dev* dev, +@@ -624,9 +625,8 @@ int pci_enable_msix(struct pci_dev* dev, temp = dev->irq; /* Check whether driver already requested for MSI vector */ if (dev->msi_enabled) { @@ -22389,16 +22340,16 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return -EINVAL; } ---- head-2010-05-25.orig/drivers/xen/Makefile 2010-04-19 14:50:44.000000000 +0200 -+++ head-2010-05-25/drivers/xen/Makefile 2010-04-19 14:51:09.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/Makefile 2011-02-28 15:13:33.000000000 +0100 ++++ head-2011-03-11/drivers/xen/Makefile 2011-02-01 14:38:38.000000000 +0100 @@ -1,4 +1,4 @@ -obj-$(CONFIG_PARAVIRT_XEN) += grant-table.o features.o events.o +obj-$(CONFIG_PARAVIRT_XEN) += grant-table.o features.o events.o manage.o xen-balloon-$(CONFIG_PARAVIRT_XEN) := balloon.o xen-balloon-$(CONFIG_XEN) := balloon/ ---- head-2010-05-25.orig/drivers/xen/balloon/balloon.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/balloon/balloon.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/balloon/balloon.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/balloon/balloon.c 2011-02-01 14:38:38.000000000 +0100 @@ -82,7 +82,7 @@ struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; @@ -22408,8 +22359,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else ---- head-2010-05-25.orig/drivers/xen/balloon/sysfs.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/balloon/sysfs.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/balloon/sysfs.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/balloon/sysfs.c 2011-02-01 14:38:38.000000000 +0100 @@ -45,6 +45,7 @@ #define BALLOON_SHOW(name, format, args...) \ @@ -22437,8 +22388,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches { char memstring[64], *endchar; unsigned long long target_bytes; ---- head-2010-05-25.orig/drivers/xen/blktap/blktap.c 2010-04-29 09:51:23.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap/blktap.c 2010-04-29 09:51:40.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:11:08.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap/blktap.c 2011-02-17 10:11:18.000000000 +0100 @@ -54,6 +54,7 @@ #include #include @@ -22447,7 +22398,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include #define MAX_TAP_DEV 256 /*the maximum number of tapdisk ring devices */ -@@ -523,7 +524,7 @@ found: +@@ -502,7 +503,7 @@ found: if ((class = get_xen_class()) != NULL) device_create(class, NULL, MKDEV(blktap_major, minor), @@ -22456,7 +22407,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } out: -@@ -1761,7 +1762,8 @@ static int __init blkif_init(void) +@@ -1743,7 +1744,8 @@ static int __init blkif_init(void) * We only create the device when a request of a new device is * made. */ @@ -22466,8 +22417,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } else { /* this is bad, but not fatal */ WPRINTK("blktap: sysfs xen_class not created\n"); ---- head-2010-05-25.orig/drivers/xen/blktap2/device.c 2010-04-19 11:30:22.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap2/device.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blktap2/device.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/device.c 2011-02-01 14:38:38.000000000 +0100 @@ -3,6 +3,7 @@ #include #include @@ -22476,8 +22427,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include #include ---- head-2010-05-25.orig/drivers/xen/blktap2/sysfs.c 2010-05-25 09:24:03.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap2/sysfs.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/blktap2/sysfs.c 2011-03-11 10:58:58.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/sysfs.c 2011-02-01 14:38:38.000000000 +0100 @@ -307,8 +307,8 @@ blktap_sysfs_create(struct blktap *tap) ring = &tap->ring; @@ -22489,8 +22440,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (IS_ERR(dev)) return PTR_ERR(dev); ---- head-2010-05-25.orig/drivers/xen/char/mem.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/char/mem.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/char/mem.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/char/mem.c 2011-02-01 14:38:38.000000000 +0100 @@ -35,7 +35,7 @@ static inline int uncached_access(struct static inline int range_is_allowed(unsigned long pfn, unsigned long size) @@ -22512,8 +22463,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches }; static int xen_mmap_mem(struct file * file, struct vm_area_struct * vma) ---- head-2010-05-25.orig/drivers/xen/console/console.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/console/console.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/console/console.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/console/console.c 2011-02-01 14:38:38.000000000 +0100 @@ -431,9 +431,7 @@ static void __xencons_tx_flush(void) if (work_done && (xencons_tty != NULL)) { @@ -22536,9 +22487,21 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches tty->closing = 0; spin_lock_irqsave(&xencons_lock, flags); xencons_tty = NULL; ---- head-2010-05-25.orig/drivers/xen/core/evtchn.c 2010-04-23 15:15:37.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/evtchn.c 2010-04-23 15:17:15.000000000 +0200 -@@ -751,8 +751,9 @@ static struct irq_chip dynirq_chip = { +--- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-02-01 14:38:38.000000000 +0100 +@@ -126,7 +126,11 @@ static int irq_bindcount[NR_IRQS]; + + #ifdef CONFIG_SMP + ++#if CONFIG_NR_CPUS <= 256 + static u8 cpu_evtchn[NR_EVENT_CHANNELS]; ++#else ++static u16 cpu_evtchn[NR_EVENT_CHANNELS]; ++#endif + static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; + + static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, +@@ -767,8 +771,9 @@ static struct irq_chip dynirq_chip = { }; /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */ @@ -22549,7 +22512,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq) { -@@ -799,25 +800,31 @@ static inline void pirq_query_unmask(int +@@ -815,25 +820,31 @@ static inline void pirq_query_unmask(int set_bit(irq - PIRQ_BASE, pirq_needs_eoi); } @@ -22586,10 +22549,10 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) { - if (!probing_irq(irq)) + if (bind_pirq.flags) - printk(KERN_INFO "Failed to obtain physical IRQ %d\n", - irq); + pr_info("Failed to obtain physical IRQ %d\n", irq); return; -@@ -893,6 +900,7 @@ static struct irq_chip pirq_chip = { + } +@@ -910,6 +921,7 @@ static struct irq_chip pirq_chip = { .ack = ack_pirq, .end = end_pirq, .eoi = end_pirq, @@ -22597,15 +22560,15 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #ifdef CONFIG_SMP .set_affinity = set_affinity_irq, #endif -@@ -1005,6 +1013,7 @@ void xen_poll_irq(int irq) - BUG(); +@@ -985,6 +997,7 @@ void disable_all_local_evtchn(void) + synch_set_bit(i, &s->evtchn_mask[0]); } +#ifdef CONFIG_PM_SLEEP static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; -@@ -1097,6 +1106,7 @@ void irq_resume(void) +@@ -1077,6 +1090,7 @@ void irq_resume(void) } } @@ -22613,7 +22576,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #if defined(CONFIG_X86_IO_APIC) #define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE)) -@@ -1179,7 +1189,7 @@ void __init xen_init_IRQ(void) +@@ -1159,7 +1173,7 @@ void __init xen_init_IRQ(void) * BITS_TO_LONGS(ALIGN(NR_PIRQS, PAGE_SIZE * 8))); eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT; if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn) == 0) @@ -22622,9 +22585,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* No event channels are 'live' right now. */ for (i = 0; i < NR_EVENT_CHANNELS; i++) ---- head-2010-05-25.orig/drivers/xen/core/gnttab.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/gnttab.c 2010-03-24 15:12:46.000000000 +0100 -@@ -449,6 +449,7 @@ static int map_pte_fn(pte_t *pte, struct +--- head-2011-03-11.orig/drivers/xen/core/gnttab.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/gnttab.c 2011-02-01 14:38:38.000000000 +0100 +@@ -448,6 +448,7 @@ static int map_pte_fn(pte_t *pte, struct return 0; } @@ -22632,7 +22595,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { -@@ -456,6 +457,7 @@ static int unmap_pte_fn(pte_t *pte, stru +@@ -455,6 +456,7 @@ static int unmap_pte_fn(pte_t *pte, stru set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } @@ -22640,7 +22603,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches void *arch_gnttab_alloc_shared(unsigned long *frames) { -@@ -636,6 +638,75 @@ void __gnttab_dma_map_page(struct page * +@@ -635,6 +637,75 @@ void __gnttab_dma_map_page(struct page * } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } @@ -22716,7 +22679,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) -@@ -643,6 +714,7 @@ int gnttab_resume(void) +@@ -642,6 +713,7 @@ int gnttab_resume(void) return gnttab_map(0, nr_grant_frames - 1); } @@ -22724,7 +22687,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches int gnttab_suspend(void) { #ifdef CONFIG_X86 -@@ -652,6 +724,7 @@ int gnttab_suspend(void) +@@ -651,6 +723,7 @@ int gnttab_suspend(void) #endif return 0; } @@ -22732,7 +22695,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #else /* !CONFIG_XEN */ -@@ -762,6 +835,18 @@ int __devinit gnttab_init(void) +@@ -761,6 +834,18 @@ int __devinit gnttab_init(void) gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; @@ -22751,8 +22714,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return 0; ini_nomem: ---- head-2010-05-25.orig/drivers/xen/core/machine_kexec.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/machine_kexec.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/machine_kexec.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/machine_kexec.c 2011-02-01 14:38:38.000000000 +0100 @@ -57,8 +57,7 @@ void __init xen_machine_kexec_setup_reso /* allocate xen_phys_cpus */ @@ -22807,17 +22770,17 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches void __init xen_machine_kexec_register_resources(struct resource *res) { int k; ---- head-2010-05-25.orig/drivers/xen/core/machine_reboot.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/machine_reboot.c 2010-03-24 15:12:46.000000000 +0100 -@@ -57,6 +57,7 @@ EXPORT_SYMBOL(machine_restart); - EXPORT_SYMBOL(machine_halt); - EXPORT_SYMBOL(machine_power_off); +--- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-02-01 14:38:38.000000000 +0100 +@@ -52,6 +52,7 @@ void machine_power_off(void) + HYPERVISOR_shutdown(SHUTDOWN_poweroff); + } +#ifdef CONFIG_PM_SLEEP static void pre_suspend(void) { HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; -@@ -113,6 +114,7 @@ static void post_suspend(int suspend_can +@@ -108,6 +109,7 @@ static void post_suspend(int suspend_can HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(pfn_to_mfn_frame_list_list); } @@ -22825,7 +22788,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #else /* !(defined(__i386__) || defined(__x86_64__)) */ -@@ -131,6 +133,7 @@ static void post_suspend(int suspend_can +@@ -126,6 +128,7 @@ static void post_suspend(int suspend_can #endif @@ -22833,7 +22796,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches struct suspend { int fast_suspend; void (*resume_notifier)(int); -@@ -224,7 +227,8 @@ int __xen_suspend(int fast_suspend, void +@@ -221,7 +224,8 @@ int __xen_suspend(int fast_suspend, void if (fast_suspend) { xenbus_suspend(); @@ -22843,14 +22806,14 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (err < 0) xenbus_suspend_cancel(); } else { -@@ -247,3 +251,4 @@ int __xen_suspend(int fast_suspend, void +@@ -244,3 +248,4 @@ int __xen_suspend(int fast_suspend, void return 0; } +#endif ---- head-2010-05-25.orig/drivers/xen/core/reboot.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/reboot.c 2010-03-24 15:12:46.000000000 +0100 -@@ -29,17 +29,12 @@ MODULE_LICENSE("Dual BSD/GPL"); +--- head-2011-03-11.orig/drivers/xen/core/reboot.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/reboot.c 2011-02-01 14:38:38.000000000 +0100 +@@ -28,17 +28,12 @@ MODULE_LICENSE("Dual BSD/GPL"); /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; @@ -22868,7 +22831,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) -@@ -69,6 +64,13 @@ static int shutdown_process(void *__unus +@@ -68,6 +63,13 @@ static int shutdown_process(void *__unus return 0; } @@ -22882,7 +22845,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); -@@ -118,6 +120,10 @@ static int xen_suspend(void *__unused) +@@ -117,6 +119,10 @@ static int xen_suspend(void *__unused) return 0; } @@ -22893,7 +22856,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; -@@ -194,8 +200,10 @@ static void shutdown_handler(struct xenb +@@ -193,8 +199,10 @@ static void shutdown_handler(struct xenb new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); @@ -22904,7 +22867,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else -@@ -247,6 +255,7 @@ static struct xenbus_watch sysrq_watch = +@@ -245,6 +253,7 @@ static struct xenbus_watch sysrq_watch = .callback = sysrq_handler }; @@ -22912,7 +22875,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); -@@ -274,6 +283,9 @@ static int setup_suspend_evtchn(void) +@@ -272,6 +281,9 @@ static int setup_suspend_evtchn(void) return 0; } @@ -22922,8 +22885,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static int setup_shutdown_watcher(void) { ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-02-01 14:38:38.000000000 +0100 @@ -27,6 +27,7 @@ extern irqreturn_t smp_reschedule_interrupt(int, void *); @@ -22932,7 +22895,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches extern int local_setup_timer(unsigned int cpu); extern void local_teardown_timer(unsigned int cpu); -@@ -50,8 +51,10 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); +@@ -47,8 +48,10 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); static DEFINE_PER_CPU(int, resched_irq); static DEFINE_PER_CPU(int, callfunc_irq); @@ -22941,9 +22904,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static char callfunc_name[NR_CPUS][15]; +static char call1func_name[NR_CPUS][15]; - #ifdef CONFIG_X86_LOCAL_APIC - #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid)) -@@ -76,15 +79,13 @@ void __init prefill_possible_map(void) + void __init prefill_possible_map(void) + { +@@ -64,20 +67,19 @@ void __init prefill_possible_map(void) break; #endif rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); @@ -22959,10 +22922,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -{ -} - - static inline void - set_cpu_sibling_map(unsigned int cpu) - { -@@ -113,7 +114,8 @@ static int __cpuinit xen_smp_intr_init(u + static int __cpuinit xen_smp_intr_init(unsigned int cpu) { int rc; @@ -22972,7 +22932,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches sprintf(resched_name[cpu], "resched%u", cpu); rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, -@@ -137,6 +139,17 @@ static int __cpuinit xen_smp_intr_init(u +@@ -101,6 +103,17 @@ static int __cpuinit xen_smp_intr_init(u goto fail; per_cpu(callfunc_irq, cpu) = rc; @@ -22990,7 +22950,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches rc = xen_spinlock_init(cpu); if (rc < 0) goto fail; -@@ -151,6 +164,8 @@ static int __cpuinit xen_smp_intr_init(u +@@ -115,6 +128,8 @@ static int __cpuinit xen_smp_intr_init(u unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); if (per_cpu(callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); @@ -22999,7 +22959,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches xen_spinlock_cleanup(cpu); return rc; } -@@ -163,6 +178,7 @@ static void __cpuexit xen_smp_intr_exit( +@@ -127,6 +142,7 @@ static void __cpuinit xen_smp_intr_exit( unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); @@ -23007,7 +22967,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches xen_spinlock_cleanup(cpu); } #endif -@@ -170,11 +186,7 @@ static void __cpuexit xen_smp_intr_exit( +@@ -134,11 +150,7 @@ static void __cpuinit xen_smp_intr_exit( void __cpuinit cpu_bringup(void) { cpu_init(); @@ -23019,7 +22979,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches touch_softlockup_watchdog(); preempt_disable(); local_irq_enable(); -@@ -254,9 +266,6 @@ void __init smp_prepare_cpus(unsigned in +@@ -218,9 +230,6 @@ void __init smp_prepare_cpus(unsigned in struct task_struct *idle; int apicid; struct vcpu_get_physid cpu_id; @@ -23029,16 +22989,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches void *gdt_addr; apicid = 0; -@@ -269,7 +278,7 @@ void __init smp_prepare_cpus(unsigned in - - current_thread_info()->cpu = 0; - -- for (cpu = 0; cpu < NR_CPUS; cpu++) { -+ for_each_possible_cpu (cpu) { - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); - } -@@ -296,21 +305,10 @@ void __init smp_prepare_cpus(unsigned in +@@ -249,20 +258,10 @@ void __init smp_prepare_cpus(unsigned in if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); @@ -23046,8 +22997,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - gdt_descr = &cpu_gdt_descr[cpu]; - gdt_descr->address = get_zeroed_page(GFP_KERNEL); - if (unlikely(!gdt_descr->address)) { -- printk(KERN_CRIT "CPU%d failed to allocate GDT\n", -- cpu); +- pr_crit("CPU%d failed to allocate GDT\n", cpu); - continue; - } - gdt_descr->size = GDT_SIZE; @@ -23062,7 +23012,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches make_page_readonly(gdt_addr, XENFEAT_writable_descriptor_tables); apicid = cpu; -@@ -356,8 +354,8 @@ void __init smp_prepare_boot_cpu(void) +@@ -305,8 +304,8 @@ void __init smp_prepare_boot_cpu(void) { #ifdef __i386__ init_gdt(smp_processor_id()); @@ -23072,18 +23022,36 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches prefill_possible_map(); } ---- head-2010-05-25.orig/drivers/xen/core/spinlock.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/spinlock.c 2010-03-24 15:12:46.000000000 +0100 -@@ -12,6 +12,8 @@ - #include - #include - +--- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:17:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:51:35.000000000 +0100 +@@ -5,6 +5,10 @@ + * portions of this file. + */ + #define XEN_SPINLOCK_SOURCE ++#include ++ +#ifdef TICKET_SHIFT + - extern irqreturn_t smp_reschedule_interrupt(int, void *); + #include + #include + #include +@@ -55,6 +59,7 @@ void __cpuinit xen_spinlock_cleanup(unsi + WARN_ON(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)); + } - static DEFINE_PER_CPU(int, spinlock_irq) = -1; -@@ -78,13 +80,13 @@ static unsigned int spin_adjust(struct s ++#ifdef CONFIG_PM_SLEEP + void __cpuinit spinlock_resume(void) + { + unsigned int cpu; +@@ -64,6 +69,7 @@ void __cpuinit spinlock_resume(void) + xen_spinlock_init(cpu); + } + } ++#endif + + static unsigned int spin_adjust(struct spinning *spinning, + const raw_spinlock_t *lock, +@@ -86,7 +92,7 @@ static unsigned int spin_adjust(struct s unsigned int xen_spin_adjust(const raw_spinlock_t *lock, unsigned int token) { @@ -23092,14 +23060,14 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } bool xen_spin_wait(raw_spinlock_t *lock, unsigned int *ptok, - unsigned int flags) - { -- int irq = __get_cpu_var(spinlock_irq); -+ int irq = x86_read_percpu(spinlock_irq); - bool rc; - typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask; - raw_rwlock_t *rm_lock; -@@ -97,9 +99,9 @@ bool xen_spin_wait(raw_spinlock_t *lock, +@@ -99,21 +105,21 @@ bool xen_spin_wait(raw_spinlock_t *lock, + + /* If kicker interrupt not initialized yet, just spin. */ + if (unlikely(!cpu_online(raw_smp_processor_id())) +- || unlikely(!__get_cpu_var(poll_evtchn))) ++ || unlikely(!x86_read_percpu(poll_evtchn))) + return false; + /* announce we're spinning */ spinning.ticket = *ptok >> TICKET_SHIFT; spinning.lock = lock; @@ -23111,7 +23079,23 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches upcall_mask = current_vcpu_info()->evtchn_upcall_mask; do { -@@ -145,11 +147,11 @@ bool xen_spin_wait(raw_spinlock_t *lock, + bool nested = false; + +- clear_evtchn(__get_cpu_var(poll_evtchn)); ++ clear_evtchn(x86_read_percpu(poll_evtchn)); + + /* + * Check again to make sure it didn't become free while +@@ -126,7 +132,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + * without rechecking the lock. + */ + if (spinning.prev) +- set_evtchn(__get_cpu_var(poll_evtchn)); ++ set_evtchn(x86_read_percpu(poll_evtchn)); + rc = true; + break; + } +@@ -153,11 +159,11 @@ bool xen_spin_wait(raw_spinlock_t *lock, bool kick, free; other->ticket = -1; @@ -23125,33 +23109,57 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (!free) token = spin_adjust( other->prev, lock, -@@ -182,7 +184,8 @@ bool xen_spin_wait(raw_spinlock_t *lock, +@@ -181,7 +187,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + + current_vcpu_info()->evtchn_upcall_mask = upcall_mask; + +- rc = !test_evtchn(__get_cpu_var(poll_evtchn)); ++ rc = !test_evtchn(x86_read_percpu(poll_evtchn)); + if (!rc) + inc_irq_stat(irq_lock_count); + } while (spinning.prev || rc); +@@ -192,11 +198,12 @@ bool xen_spin_wait(raw_spinlock_t *lock, */ /* announce we're done */ - __get_cpu_var(spinning) = other = spinning.prev; + other = spinning.prev; + x86_write_percpu(spinning, other); - rm_lock = &__get_cpu_var(spinning_rm_lock); raw_local_irq_disable(); - __raw_write_lock(rm_lock); -@@ -199,7 +202,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, - bool free; +- rm_idx = __get_cpu_var(rm_seq.idx); ++ rm_idx = x86_read_percpu(rm_seq.idx); + smp_wmb(); +- __get_cpu_var(rm_seq.idx) = rm_idx + 1; ++ x86_write_percpu(rm_seq.idx, rm_idx + 1); + mb(); + /* +@@ -211,7 +218,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + if (other->ticket + 1) + continue; lock = other->lock; - __raw_spin_lock_preamble; + __ticket_spin_lock_preamble; if (!free) token = spin_adjust(other->prev, lock, token); other->ticket = token >> TICKET_SHIFT; -@@ -244,3 +247,5 @@ void xen_spin_kick(raw_spinlock_t *lock, +@@ -220,7 +227,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + } + + rm_idx &= 1; +- while (__get_cpu_var(rm_seq.ctr[rm_idx].counter)) ++ while (x86_read_percpu(rm_seq.ctr[rm_idx].counter)) + cpu_relax(); + raw_local_irq_restore(upcall_mask); + *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); +@@ -283,3 +290,5 @@ void xen_spin_kick(raw_spinlock_t *lock, } } EXPORT_SYMBOL(xen_spin_kick); + +#endif /* TICKET_SHIFT */ ---- head-2010-05-25.orig/drivers/xen/fbfront/xenfb.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/fbfront/xenfb.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/fbfront/xenfb.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/fbfront/xenfb.c 2011-02-17 10:11:23.000000000 +0100 @@ -18,6 +18,7 @@ * frame buffer. */ @@ -23197,33 +23205,53 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return 0; error_nomem: -@@ -882,4 +906,5 @@ static void __exit xenfb_cleanup(void) +@@ -884,4 +908,5 @@ static void __exit xenfb_cleanup(void) module_init(xenfb_init); module_exit(xenfb_cleanup); +MODULE_DESCRIPTION("Xen virtual framebuffer device frontend"); MODULE_LICENSE("GPL"); ---- head-2010-05-25.orig/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-25/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/fbfront/xenkbd.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/fbfront/xenkbd.c 2011-02-01 14:38:38.000000000 +0100 @@ -350,4 +350,5 @@ static void __exit xenkbd_cleanup(void) module_init(xenkbd_init); module_exit(xenkbd_cleanup); +MODULE_DESCRIPTION("Xen virtual keyboard/pointer device frontend"); MODULE_LICENSE("GPL"); ---- head-2010-05-25.orig/drivers/xen/gntdev/gntdev.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/gntdev/gntdev.c 2010-03-24 15:12:46.000000000 +0100 -@@ -418,7 +418,7 @@ static int __init gntdev_init(void) +--- head-2011-03-11.orig/drivers/xen/gntdev/gntdev.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/gntdev/gntdev.c 2011-02-01 14:38:38.000000000 +0100 +@@ -400,7 +400,7 @@ static int __init gntdev_init(void) } device = device_create(class, NULL, MKDEV(gntdev_major, 0), - GNTDEV_NAME); + NULL, GNTDEV_NAME); if (IS_ERR(device)) { - printk(KERN_ERR "Error creating gntdev device in xen_class\n"); - printk(KERN_ERR "gntdev created with major number = %d\n", ---- head-2010-05-25.orig/drivers/xen/netfront/accel.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/accel.c 2010-03-24 15:12:46.000000000 +0100 + pr_err("Error creating gntdev device in xen_class\n"); + pr_err("gntdev created, major number = %d\n", gntdev_major); +--- head-2011-03-11.orig/drivers/xen/netback/netback.c 2011-02-09 15:55:20.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netback/netback.c 2011-02-01 14:38:38.000000000 +0100 +@@ -36,7 +36,7 @@ + + #include "common.h" + #include +-#include ++#include + #include + #include + #include +@@ -115,7 +115,7 @@ static inline int netif_page_index(struc + */ + #define PKT_PROT_LEN (ETH_HLEN + VLAN_HLEN + \ + sizeof(struct iphdr) + MAX_IPOPTLEN + \ +- sizeof(struct tcphdr) + 40 /* MAX_TCP_OPTION_SPACE */) ++ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) + + static struct pending_tx_info { + netif_tx_request_t req; +--- head-2011-03-11.orig/drivers/xen/netfront/accel.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/accel.c 2011-02-01 14:38:38.000000000 +0100 @@ -28,6 +28,7 @@ * IN THE SOFTWARE. */ @@ -23232,9 +23260,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.c 2010-03-24 15:12:46.000000000 +0100 -@@ -640,7 +640,7 @@ static int network_open(struct net_devic +--- head-2011-03-11.orig/drivers/xen/netfront/netfront.c 2011-02-09 16:04:02.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/netfront.c 2011-02-01 14:38:38.000000000 +0100 +@@ -637,7 +637,7 @@ static int network_open(struct net_devic } spin_unlock_bh(&np->rx_lock); @@ -23243,8 +23271,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return 0; } ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel.h 2010-01-18 15:23:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netback/accel.h 2010-01-18 15:23:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netback/accel.h 2011-02-01 14:38:38.000000000 +0100 @@ -25,6 +25,7 @@ #ifndef NETBACK_ACCEL_H #define NETBACK_ACCEL_H @@ -23253,8 +23281,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel.h 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel.h 2011-02-01 14:38:38.000000000 +0100 @@ -35,6 +35,7 @@ #include @@ -23263,9 +23291,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include enum netfront_accel_post_status { ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:12:46.000000000 +0100 -@@ -150,7 +150,7 @@ int xenbus_watch_pathfmt(struct xenbus_d +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_client.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_client.c 2011-02-01 14:38:38.000000000 +0100 +@@ -149,7 +149,7 @@ int xenbus_watch_pathfmt(struct xenbus_d char *path; va_start(ap, pathfmt); @@ -23274,9 +23302,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches va_end(ap); if (!path) { ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_comms.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_comms.c 2010-03-24 15:12:46.000000000 +0100 -@@ -249,14 +249,11 @@ int xb_init_comms(void) +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_comms.c 2011-02-01 14:38:38.000000000 +0100 +@@ -248,14 +248,11 @@ int xb_init_comms(void) intf->rsp_cons = intf->rsp_prod; } @@ -23292,7 +23320,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { -@@ -265,6 +262,20 @@ int xb_init_comms(void) +@@ -264,6 +261,20 @@ int xb_init_comms(void) } xenbus_irq = err; @@ -23304,7 +23332,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, + 0, "xenbus", &xb_waitq); + if (err <= 0) { -+ printk(KERN_ERR "XENBUS request irq failed %i\n", err); ++ pr_err("XENBUS request irq failed %i\n", err); + return err; + } + xenbus_irq = err; @@ -23313,8 +23341,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches return 0; } ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:38:38.000000000 +0100 @@ -36,6 +36,7 @@ __FUNCTION__, __LINE__, ##args) @@ -23323,9 +23351,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/fs/aio.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/fs/aio.c 2010-03-24 15:12:46.000000000 +0100 -@@ -1302,7 +1302,7 @@ static int make_aio_fd(struct kioctx *io +--- head-2011-03-11.orig/fs/aio.c 2011-03-11 10:58:46.000000000 +0100 ++++ head-2011-03-11/fs/aio.c 2011-03-11 10:59:16.000000000 +0100 +@@ -1307,7 +1307,7 @@ static int make_aio_fd(struct kioctx *io int fd; struct file *file; @@ -23334,8 +23362,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches if (fd < 0) return fd; ---- head-2010-05-25.orig/include/Kbuild 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/include/Kbuild 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/include/Kbuild 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/include/Kbuild 2011-02-01 14:38:38.000000000 +0100 @@ -8,5 +8,6 @@ header-y += mtd/ header-y += rdma/ header-y += video/ @@ -23343,21 +23371,21 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +header-y += xen/public/ header-y += xen/ header-y += scsi/ ---- head-2010-05-25.orig/include/asm-generic/pgtable.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/include/asm-generic/pgtable.h 2010-03-24 15:12:46.000000000 +0100 -@@ -99,10 +99,6 @@ static inline void ptep_set_wrprotect(st - } +--- head-2011-03-11.orig/include/asm-generic/pgtable.h 2011-03-11 10:54:24.000000000 +0100 ++++ head-2011-03-11/include/asm-generic/pgtable.h 2011-03-11 10:59:22.000000000 +0100 +@@ -156,10 +156,6 @@ static inline void pmdp_set_wrprotect(st + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif -#ifndef arch_change_pte_range -#define arch_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable) 0 -#endif - - #ifndef __HAVE_ARCH_PTE_SAME - #define pte_same(A,B) (pte_val(A) == pte_val(B)) - #endif ---- head-2010-05-25.orig/arch/x86/include/asm/kexec.h 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/asm/kexec.h 2010-03-24 15:12:46.000000000 +0100 + #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH + extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, +--- head-2011-03-11.orig/arch/x86/include/asm/kexec.h 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/kexec.h 2011-02-01 14:38:38.000000000 +0100 @@ -5,8 +5,21 @@ # define PA_CONTROL_PAGE 0 # define VA_CONTROL_PAGE 1 @@ -23380,8 +23408,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #else # define PA_CONTROL_PAGE 0 # define VA_CONTROL_PAGE 1 ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/desc.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:38:38.000000000 +0100 @@ -31,11 +31,17 @@ extern struct desc_ptr idt_descr; extern gate_desc idt_table[]; #endif @@ -23450,8 +23478,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * This routine sets up an interrupt gate at directory privilege level 3. */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:38:38.000000000 +0100 @@ -7,7 +7,58 @@ # include "fixmap_64.h" #endif @@ -23511,8 +23539,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + return __virt_to_fix(vaddr); +} #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-02-01 14:38:38.000000000 +0100 @@ -58,10 +58,17 @@ enum fixed_addresses { #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ @@ -23630,8 +23658,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - #endif /* !__ASSEMBLY__ */ #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-02-01 14:38:38.000000000 +0100 @@ -12,6 +12,7 @@ #define _ASM_FIXMAP_64_H @@ -23731,8 +23759,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -} - #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:05:00.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:38:38.000000000 +0100 @@ -73,6 +73,9 @@ struct page *kmap_atomic_to_page(void *p #define flush_cache_kmaps() do { } while (0) @@ -23743,8 +23771,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches void clear_highpage(struct page *); static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypercall.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypercall.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypercall.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypercall.h 2011-02-01 14:38:38.000000000 +0100 @@ -332,9 +332,19 @@ static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) @@ -23766,8 +23794,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches } static inline int __must_check ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 14:38:38.000000000 +0100 @@ -35,7 +35,6 @@ #include @@ -23815,8 +23843,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/io.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:38:38.000000000 +0100 @@ -3,20 +3,140 @@ #define ARCH_HAS_IOREMAP_WC @@ -23964,7 +23992,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + #endif /* _ASM_X86_IO_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:12:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:27:18.000000000 +0100 @@ -0,0 +1,52 @@ +#ifndef _ASM_IRQ_VECTORS_H +#define _ASM_IRQ_VECTORS_H @@ -23977,8 +24005,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + +#define RESCHEDULE_VECTOR 0 +#define CALL_FUNCTION_VECTOR 1 -+#define CALL_FUNC_SINGLE_VECTOR 2 -+#define SPIN_UNLOCK_VECTOR 3 ++#define NMI_VECTOR 0x02 ++#define CALL_FUNC_SINGLE_VECTOR 3 +#define NR_IPIS 4 + +/* @@ -24018,8 +24046,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches +#define NR_IRQ_VECTORS NR_IRQS + +#endif /* _ASM_IRQ_VECTORS_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:38:38.000000000 +0100 @@ -118,7 +118,7 @@ static inline void halt(void) #ifndef CONFIG_X86_64 @@ -24084,8 +24112,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #else # define TRACE_IRQS_ON # define TRACE_IRQS_OFF ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:38:38.000000000 +0100 @@ -1,5 +1,42 @@ +#ifndef __ASM_X86_MMU_CONTEXT_H +#define __ASM_X86_MMU_CONTEXT_H @@ -24129,8 +24157,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + + +#endif /* __ASM_X86_MMU_CONTEXT_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-02-01 14:38:38.000000000 +0100 @@ -1,32 +1,6 @@ #ifndef __I386_SCHED_H #define __I386_SCHED_H @@ -24175,8 +24203,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -} while (0) - #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-02-01 14:38:38.000000000 +0100 @@ -1,23 +1,6 @@ #ifndef __X86_64_MMU_CONTEXT_H #define __X86_64_MMU_CONTEXT_H @@ -24224,8 +24252,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -} - #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pci.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:38:38.000000000 +0100 @@ -21,6 +21,8 @@ struct pci_sysdata { #endif }; @@ -24235,8 +24263,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* scan a bus after allocating a pci_sysdata for it */ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 14:38:38.000000000 +0100 @@ -7,6 +7,9 @@ #include /* for phys_to_virt and page_to_pseudophys */ @@ -24247,8 +24275,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-07 15:40:30.000000000 +0100 @@ -13,11 +13,12 @@ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ @@ -24496,15 +24524,15 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches + return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | prot); } - static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) + static inline pte_t pfn_pte_ma(phys_addr_t page_nr, pgprot_t pgprot) { -- return __pte_ma((((phys_addr_t)page_nr << PAGE_SHIFT) | +- return __pte_ma(((page_nr << PAGE_SHIFT) | - pgprot_val(pgprot)) & __supported_pte_mask); + pgprotval_t prot = pgprot_val(pgprot); + + if (prot & _PAGE_PRESENT) + prot &= __supported_pte_mask; -+ return __pte_ma(((phys_addr_t)page_nr << PAGE_SHIFT) | prot); ++ return __pte_ma((page_nr << PAGE_SHIFT) | prot); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) @@ -24661,9 +24689,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #include #include -@@ -576,10 +639,6 @@ int touch_pte_range(struct mm_struct *mm - unsigned long address, - unsigned long size); +@@ -573,10 +636,6 @@ int create_lookup_pte_addr(struct mm_str + unsigned long address, + uint64_t *ptep); -int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, pgprot_t newprot, @@ -24672,8 +24700,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:38:38.000000000 +0100 @@ -14,11 +14,11 @@ #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", \ @@ -24709,8 +24737,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* Find an entry in the second-level page table.. */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:38:38.000000000 +0100 @@ -89,10 +89,10 @@ extern unsigned long pg0[]; /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t. can temporarily clear it. */ @@ -24760,8 +24788,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:38:38.000000000 +0100 @@ -23,6 +23,8 @@ extern void xen_init_pt(void); extern pud_t level3_kernel_pgt[512]; extern pud_t level3_ident_pgt[512]; @@ -24842,9 +24870,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:12:46.000000000 +0100 -@@ -134,7 +134,7 @@ extern __u32 cleared_cpu_caps[NCAPINTS +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:42:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:44:23.000000000 +0100 +@@ -144,7 +144,7 @@ extern __u32 cleared_cpu_caps[NCAPINTS #ifdef CONFIG_SMP DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); #define cpu_data(cpu) per_cpu(cpu_info, cpu) @@ -24853,7 +24881,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #else #define cpu_data(cpu) boot_cpu_data #define current_cpu_data boot_cpu_data -@@ -153,7 +153,7 @@ static inline int hlt_works(int cpu) +@@ -163,7 +163,7 @@ static inline int hlt_works(int cpu) extern void cpu_detect(struct cpuinfo_x86 *c); @@ -24862,7 +24890,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches extern void identify_boot_cpu(void); extern void identify_secondary_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); -@@ -267,15 +267,11 @@ struct tss_struct { +@@ -277,15 +277,11 @@ struct tss_struct { struct thread_struct *io_bitmap_owner; /* @@ -24879,7 +24907,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches DECLARE_PER_CPU(struct tss_struct, init_tss); -@@ -667,11 +663,36 @@ static inline void __sti_mwait(unsigned +@@ -677,11 +673,36 @@ static inline void __sti_mwait(unsigned extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); @@ -24918,19 +24946,19 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches extern void enable_sep_cpu(void); extern int sysenter_setup(void); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:12:46.000000000 +0100 -@@ -25,23 +25,16 @@ extern cpumask_t cpu_initialized; +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:38:38.000000000 +0100 +@@ -25,25 +25,18 @@ extern cpumask_t cpu_initialized; extern void (*mtrr_hook)(void); extern void zap_low_mappings(void); +extern int __cpuinit get_local_pda(int cpu); + - extern int smp_num_siblings; extern unsigned int num_processors; extern cpumask_t cpu_initialized; --#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) + #ifndef CONFIG_XEN +-#ifdef CONFIG_SMP -extern u16 x86_cpu_to_apicid_init[]; -extern u16 x86_bios_cpu_apicid_init[]; -extern void *x86_cpu_to_apicid_early_ptr; @@ -24943,11 +24971,15 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); +-DECLARE_PER_CPU(u16, x86_cpu_to_apicid); +-DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); + - DECLARE_PER_CPU(u16, x86_cpu_to_apicid); - DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); ++DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); ++DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); + #endif -@@ -63,9 +56,9 @@ struct smp_ops { + #ifdef CONFIG_SMP +@@ -64,9 +57,9 @@ struct smp_ops { void (*smp_send_stop)(void); void (*smp_send_reschedule)(int cpu); @@ -24960,7 +24992,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches }; /* Globals due to paravirt */ -@@ -103,11 +96,14 @@ static inline void smp_send_reschedule(i +@@ -104,11 +97,14 @@ static inline void smp_send_reschedule(i smp_ops.smp_send_reschedule(cpu); } @@ -24968,18 +25000,18 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - void (*func) (void *info), void *info, - int wait) +static inline void arch_send_call_function_single_ipi(int cpu) - { -- return smp_ops.smp_call_function_mask(mask, func, info, wait); ++{ + smp_ops.send_call_func_single_ipi(cpu); +} + +static inline void arch_send_call_function_ipi(cpumask_t mask) -+{ + { +- return smp_ops.smp_call_function_mask(mask, func, info, wait); + smp_ops.send_call_func_ipi(mask); } void native_smp_prepare_boot_cpu(void); -@@ -119,23 +115,19 @@ int native_cpu_up(unsigned int cpunum); +@@ -120,23 +116,19 @@ int native_cpu_up(unsigned int cpunum); void xen_smp_send_stop(void); void xen_smp_send_reschedule(int cpu); @@ -25005,9 +25037,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -extern void prefill_possible_map(void); - void smp_store_cpu_info(int id); - #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) + #define cpu_physical_id(cpu) (cpu) -@@ -146,6 +138,14 @@ static inline int num_booting_cpus(void) +@@ -147,6 +139,14 @@ static inline int num_booting_cpus(void) } #endif /* CONFIG_SMP */ @@ -25022,7 +25054,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches extern unsigned disabled_cpus __cpuinitdata; #ifdef CONFIG_X86_32_SMP -@@ -213,12 +213,8 @@ static inline int hard_smp_processor_id( +@@ -214,12 +214,8 @@ static inline int hard_smp_processor_id( #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_HOTPLUG_CPU @@ -25035,8 +25067,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -extern void unlock_ipi_call_lock(void); #endif /* __ASSEMBLY__ */ #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:38:38.000000000 +0100 @@ -38,6 +38,8 @@ # define UNLOCK_LOCK_PREFIX #endif @@ -25314,8 +25346,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) { ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:38:38.000000000 +0100 @@ -11,6 +11,10 @@ typedef union { unsigned int slock; struct { @@ -25335,9 +25367,22 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches }; } raw_spinlock_t; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:12:46.000000000 +0100 -@@ -137,7 +137,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system.h 2011-03-03 15:58:55.000000000 +0100 +@@ -68,10 +68,12 @@ do { \ + [next] "d" (next)); \ + } while (0) + ++#ifndef CONFIG_XEN + /* + * disable hlt during certain critical i/o operations + */ + #define HAVE_DISABLE_HLT ++#endif + #else + #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" + #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" +@@ -137,7 +139,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) @@ -25346,7 +25391,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * Load a segment. Fall back on loading the zero -@@ -154,14 +154,14 @@ extern void load_gs_index(unsigned); +@@ -154,14 +156,14 @@ extern void load_gs_index(unsigned); "jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b,3b) \ @@ -25363,7 +25408,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches static inline unsigned long get_limit(unsigned long segment) { -@@ -269,6 +269,7 @@ static inline void xen_wbinvd(void) +@@ -269,6 +271,7 @@ static inline void xen_wbinvd(void) #ifdef CONFIG_X86_64 #define read_cr8() (xen_read_cr8()) #define write_cr8(x) (xen_write_cr8(x)) @@ -25371,7 +25416,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #endif /* Clear the 'TS' bit */ -@@ -287,13 +288,12 @@ static inline void clflush(volatile void +@@ -287,13 +290,12 @@ static inline void clflush(volatile void void disable_hlt(void); void enable_hlt(void); @@ -25386,8 +25431,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * Force strict CPU ordering. ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/xor_64.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/xor_64.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/xor_64.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/xor_64.h 2011-02-01 14:38:38.000000000 +0100 @@ -1,3 +1,6 @@ +#ifndef ASM_X86__XOR_64_H +#define ASM_X86__XOR_64_H @@ -25401,9 +25446,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) + +#endif /* ASM_X86__XOR_64_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/irq_vectors.h 2010-03-24 15:10:37.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/irq_vectors.h 2008-09-25 13:55:32.000000000 +0200 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,126 +0,0 @@ +@@ -1,125 +0,0 @@ -/* - * This file should contain #defines for all of the interrupt vector - * numbers used by this architecture. @@ -25488,8 +25533,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches - -#define RESCHEDULE_VECTOR 0 -#define CALL_FUNCTION_VECTOR 1 --#define SPIN_UNLOCK_VECTOR 2 --#define NR_IPIS 3 +-#define NR_IPIS 2 - -/* - * The maximum number of vectors supported by i386 processors @@ -25530,7 +25574,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -#define NR_IRQ_VECTORS NR_IRQS - -#endif /* _ASM_IRQ_VECTORS_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/setup_arch_post.h 2007-06-12 13:14:13.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/setup_arch_post.h 2007-06-12 13:14:13.000000000 +0200 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -/** @@ -25596,7 +25640,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -#endif -#endif -} ---- head-2010-05-25.orig/arch/x86/include/mach-xen/setup_arch_pre.h 2007-06-12 13:14:13.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/setup_arch_pre.h 2007-06-12 13:14:13.000000000 +0200 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -/* Hook to call BIOS initialisation function */ @@ -25604,9 +25648,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches -#define ARCH_SETUP machine_specific_arch_setup(); - -static void __init machine_specific_arch_setup(void); ---- head-2010-05-25.orig/arch/x86/include/asm/traps.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/traps.h 2010-03-24 15:12:46.000000000 +0100 -@@ -37,6 +37,9 @@ asmlinkage void alignment_check(void); +--- head-2011-03-11.orig/arch/x86/include/asm/traps.h 2011-03-15 16:45:55.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/traps.h 2011-02-01 14:38:38.000000000 +0100 +@@ -38,6 +38,9 @@ asmlinkage void alignment_check(void); asmlinkage void machine_check(void); #endif /* CONFIG_X86_MCE */ asmlinkage void simd_coprocessor_error(void); @@ -25616,7 +25660,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches dotraplinkage void do_divide_error(struct pt_regs *, long); dotraplinkage void do_debug(struct pt_regs *, long); -@@ -65,6 +68,9 @@ dotraplinkage void do_machine_check(stru +@@ -66,6 +69,9 @@ dotraplinkage void do_machine_check(stru dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *, long); @@ -25626,9 +25670,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #endif static inline int get_si_code(unsigned long condition) ---- head-2010-05-25.orig/include/linux/page-flags.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/include/linux/page-flags.h 2010-03-24 15:12:46.000000000 +0100 -@@ -126,12 +126,12 @@ enum pageflags { +--- head-2011-03-11.orig/include/linux/page-flags.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/include/linux/page-flags.h 2011-02-01 14:38:38.000000000 +0100 +@@ -125,12 +125,12 @@ enum pageflags { PG_fscache = PG_private_2, /* page backed by cache */ /* XEN */ @@ -25643,7 +25687,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, #endif -@@ -227,8 +227,12 @@ PAGEFLAG(Active, active) __CLEARPAGEFLAG +@@ -225,8 +225,12 @@ PAGEFLAG(Active, active) __CLEARPAGEFLAG TESTCLEARFLAG(Active, active) __PAGEFLAG(Slab, slab) PAGEFLAG(Checked, checked) /* Used by some filesystems */ @@ -25656,9 +25700,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) ---- head-2010-05-25.orig/include/xen/interface/memory.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/include/xen/interface/memory.h 2010-03-24 15:12:46.000000000 +0100 -@@ -85,6 +85,7 @@ struct xen_memory_reservation { +--- head-2011-03-11.orig/include/xen/interface/memory.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/include/xen/interface/memory.h 2011-02-01 14:38:38.000000000 +0100 +@@ -88,6 +88,7 @@ struct xen_memory_reservation { */ domid_t domid; }; @@ -25666,7 +25710,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); -@@ -170,11 +171,7 @@ struct xen_machphys_mfn_list { +@@ -173,11 +174,7 @@ struct xen_machphys_mfn_list { * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ @@ -25678,7 +25722,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches /* * Number of extents written to the above array. This will be smaller -@@ -182,6 +179,7 @@ struct xen_machphys_mfn_list { +@@ -185,6 +182,7 @@ struct xen_machphys_mfn_list { */ unsigned int nr_extents; }; @@ -25686,7 +25730,7 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); -@@ -223,6 +221,7 @@ struct xen_add_to_physmap { +@@ -226,6 +224,7 @@ struct xen_add_to_physmap { /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; @@ -25694,15 +25738,16 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/include/xen/public/Kbuild 2010-03-24 15:12:46.000000000 +0100 -@@ -0,0 +1,4 @@ +--- head-2011-03-11.orig/include/xen/public/Kbuild 2011-01-31 14:31:28.000000000 +0100 ++++ head-2011-03-11/include/xen/public/Kbuild 2011-02-01 14:38:38.000000000 +0100 +@@ -1 +1,5 @@ +header-y += evtchn.h +header-y += gntdev.h + header-y += iomulti.h +header-y += privcmd.h +header-y += xenbus.h ---- head-2010-05-25.orig/include/xen/public/privcmd.h 2010-01-18 15:23:12.000000000 +0100 -+++ head-2010-05-25/include/xen/public/privcmd.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/include/xen/public/privcmd.h 2010-01-18 15:23:12.000000000 +0100 ++++ head-2011-03-11/include/xen/public/privcmd.h 2011-02-01 14:38:38.000000000 +0100 @@ -35,10 +35,6 @@ #include @@ -25714,8 +25759,8 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches typedef struct privcmd_hypercall { __u64 op; ---- head-2010-05-25.orig/include/xen/public/xenbus.h 2009-05-29 10:25:53.000000000 +0200 -+++ head-2010-05-25/include/xen/public/xenbus.h 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-11.orig/include/xen/public/xenbus.h 2009-05-29 10:25:53.000000000 +0200 ++++ head-2011-03-11/include/xen/public/xenbus.h 2011-02-01 14:38:38.000000000 +0100 @@ -35,10 +35,6 @@ #include @@ -25727,20 +25772,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches typedef struct xenbus_alloc { domid_t dom; __u32 port; ---- head-2010-05-25.orig/kernel/hrtimer.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/kernel/hrtimer.c 2010-03-24 15:12:46.000000000 +0100 -@@ -1108,7 +1108,7 @@ ktime_t hrtimer_get_remaining(const stru - } - EXPORT_SYMBOL_GPL(hrtimer_get_remaining); - --#ifdef CONFIG_NO_HZ -+#if defined(CONFIG_NO_HZ) || defined(CONFIG_NO_IDLE_HZ) - /** - * hrtimer_get_next_event - get the time until next expiry event - * ---- head-2010-05-25.orig/kernel/kexec.c 2010-05-25 09:22:21.000000000 +0200 -+++ head-2010-05-25/kernel/kexec.c 2010-03-24 15:12:46.000000000 +0100 -@@ -55,7 +55,7 @@ note_buf_t __percpu *crash_notes; +--- head-2011-03-11.orig/kernel/kexec.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/kernel/kexec.c 2011-02-01 14:38:38.000000000 +0100 +@@ -49,7 +49,7 @@ note_buf_t __percpu *crash_notes; static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; u32 #if defined(CONFIG_XEN) && defined(CONFIG_X86) @@ -25749,20 +25783,9 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches #endif vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; size_t vmcoreinfo_size; ---- head-2010-05-25.orig/kernel/timer.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/kernel/timer.c 2010-04-15 10:05:03.000000000 +0200 -@@ -1044,7 +1044,7 @@ static inline void __run_timers(struct t - spin_unlock_irq(&base->lock); - } - --#ifdef CONFIG_NO_HZ -+#if defined(CONFIG_NO_HZ) || defined(CONFIG_NO_IDLE_HZ) - /* - * Find out when the next timer event is due to happen. This - * is used on S/390 to stop all activity when a CPU is idle. ---- head-2010-05-25.orig/lib/swiotlb-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/lib/swiotlb-xen.c 2010-03-24 15:12:46.000000000 +0100 -@@ -750,7 +750,7 @@ swiotlb_sync_sg_for_device(struct device +--- head-2011-03-11.orig/lib/swiotlb-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-11/lib/swiotlb-xen.c 2011-02-01 14:38:38.000000000 +0100 +@@ -788,7 +788,7 @@ swiotlb_sync_sg_for_device(struct device } int @@ -25771,14 +25794,14 @@ Automatically created from "patches.kernel.org/patch-2.6.27" by xen-port-patches { return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); } ---- head-2010-05-25.orig/mm/mprotect.c 2010-04-15 09:52:51.000000000 +0200 -+++ head-2010-05-25/mm/mprotect.c 2010-04-15 10:05:09.000000000 +0200 -@@ -90,8 +90,6 @@ static inline void change_pmd_range(stru - next = pmd_addr_end(addr, end); +--- head-2011-03-11.orig/mm/mprotect.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-11/mm/mprotect.c 2011-02-01 14:38:38.000000000 +0100 +@@ -97,8 +97,6 @@ static inline void change_pmd_range(stru + } if (pmd_none_or_clear_bad(pmd)) continue; - if (arch_change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable)) - continue; - change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); + change_pte_range(vma->vm_mm, pmd, addr, next, newprot, + dirty_accountable); } while (pmd++, addr = next, addr != end); - } diff --git a/patches.xen/xen3-patch-2.6.28 b/patches.xen/xen3-patch-2.6.28 index d80b6fe..0230c87 100644 --- a/patches.xen/xen3-patch-2.6.28 +++ b/patches.xen/xen3-patch-2.6.28 @@ -7,9 +7,9 @@ Patch-mainline: 2.6.28 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches.py ---- head-2010-04-29.orig/arch/ia64/Kconfig 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/ia64/Kconfig 2010-03-24 15:14:47.000000000 +0100 -@@ -231,7 +231,7 @@ config IA64_HP_SIM +--- head-2011-03-17.orig/arch/ia64/Kconfig 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/ia64/Kconfig 2011-02-01 14:39:24.000000000 +0100 +@@ -230,7 +230,7 @@ config IA64_HP_SIM config IA64_XEN_GUEST bool "Xen guest" select SWIOTLB @@ -18,8 +18,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches help Build a kernel that runs on Xen guest domain. At this moment only 16KB page size in supported. ---- head-2010-04-29.orig/arch/ia64/Makefile 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/ia64/Makefile 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/ia64/Makefile 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/ia64/Makefile 2011-02-01 14:39:24.000000000 +0100 @@ -55,7 +55,7 @@ core-$(CONFIG_IA64_XEN_GUEST) += arch/ia core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ @@ -27,10 +27,10 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -core-$(CONFIG_XEN) += arch/ia64/xen/ +core-$(CONFIG_PARAVIRT_XEN) += arch/ia64/xen/ - drivers-$(CONFIG_KDB) += arch/$(ARCH)/kdb/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ ---- head-2010-04-29.orig/arch/ia64/include/asm/xen/hypervisor.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/ia64/include/asm/xen/hypervisor.h 2010-03-24 15:14:47.000000000 +0100 + drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ +--- head-2011-03-17.orig/arch/ia64/include/asm/xen/hypervisor.h 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/ia64/include/asm/xen/hypervisor.h 2011-02-01 14:39:24.000000000 +0100 @@ -40,7 +40,7 @@ #include #include @@ -40,8 +40,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; ---- head-2010-04-29.orig/arch/ia64/include/asm/xen/interface.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/ia64/include/asm/xen/interface.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/ia64/include/asm/xen/interface.h 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/ia64/include/asm/xen/interface.h 2011-02-01 14:39:24.000000000 +0100 @@ -56,29 +56,21 @@ #ifndef _ASM_IA64_XEN_INTERFACE_H #define _ASM_IA64_XEN_INTERFACE_H @@ -88,8 +88,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #ifndef __ASSEMBLY__ ---- head-2010-04-29.orig/arch/ia64/kernel/asm-offsets.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/ia64/kernel/asm-offsets.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/ia64/kernel/asm-offsets.c 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/ia64/kernel/asm-offsets.c 2011-02-01 14:39:24.000000000 +0100 @@ -290,7 +290,7 @@ void foo(void) DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); @@ -99,8 +99,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches BLANK(); DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); ---- head-2010-04-29.orig/arch/ia64/xen/Kconfig 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/ia64/xen/Kconfig 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/ia64/xen/Kconfig 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/ia64/xen/Kconfig 2011-02-02 15:36:46.000000000 +0100 @@ -2,7 +2,7 @@ # This Kconfig describes xen/ia64 options # @@ -110,20 +110,20 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches bool "Xen hypervisor support" default y depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL -@@ -17,9 +17,9 @@ config XEN +@@ -16,10 +16,6 @@ config XEN + Enable Xen hypervisor support. Resulting kernel runs both as a guest OS on Xen and natively on hardware. - config XEN_XENCOMM +-config XEN_XENCOMM - depends on XEN -+ depends on PARAVIRT_XEN - bool - +- bool +- config NO_IDLE_HZ - depends on XEN + depends on PARAVIRT_XEN bool ---- head-2010-04-29.orig/arch/ia64/xen/xcom_hcall.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/ia64/xen/xcom_hcall.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/ia64/xen/xcom_hcall.c 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/ia64/xen/xcom_hcall.c 2011-02-01 14:39:24.000000000 +0100 @@ -343,7 +343,7 @@ xencommize_memory_reservation(struct xen int xencomm_hypercall_memory_op(unsigned int cmd, void *arg) @@ -133,9 +133,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches struct xen_memory_reservation *xmr = NULL; int rc; struct xencomm_handle *desc; ---- head-2010-04-29.orig/arch/x86/Kconfig 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/Kconfig 2010-03-24 15:14:47.000000000 +0100 -@@ -1093,7 +1093,7 @@ config MICROCODE +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-01 14:39:24.000000000 +0100 +@@ -1028,7 +1028,7 @@ config MICROCODE config MICROCODE_INTEL bool "Intel microcode patch loading support" @@ -144,7 +144,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches default MICROCODE select FW_LOADER ---help--- -@@ -1106,7 +1106,7 @@ config MICROCODE_INTEL +@@ -1041,7 +1041,7 @@ config MICROCODE_INTEL config MICROCODE_AMD bool "AMD microcode patch loading support" @@ -153,7 +153,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches select FW_LOADER ---help--- If you select this option, microcode patch loading support for AMD -@@ -1404,6 +1404,7 @@ config HIGHPTE +@@ -1342,6 +1342,7 @@ config HIGHPTE config X86_CHECK_BIOS_CORRUPTION bool "Check for low memory corruption" @@ -161,26 +161,26 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches ---help--- Periodically check for memory corruption in low memory, which is suspected to be caused by BIOS. Even when enabled in the -@@ -1434,6 +1435,7 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_C +@@ -1372,6 +1373,7 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_C - config X86_RESERVE_LOW_64K - bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen" + config X86_RESERVE_LOW + int "Amount of low memory, in kilobytes, to reserve for the BIOS" + depends on !XEN - default y + default 64 + range 4 640 ---help--- - Reserve the first 64K of physical RAM on BIOSes that are known -@@ -1550,8 +1552,8 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAU +@@ -1495,8 +1497,8 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAU + config X86_PAT - bool - default y -- prompt "x86 PAT support" if EMBEDDED + def_bool y +- prompt "x86 PAT support" if EXPERT - depends on MTRR -+ prompt "x86 PAT support" if EMBEDDED || XEN_UNPRIVILEGED_GUEST ++ prompt "x86 PAT support" if EXPERT || XEN_UNPRIVILEGED_GUEST + depends on MTRR || (XEN_UNPRIVILEGED_GUEST && XEN_PCIDEV_FRONTEND) ---help--- Use PAT attributes to setup page level cache control. -@@ -2129,7 +2131,7 @@ config DMAR_FLOPPY_WA +@@ -2091,7 +2093,7 @@ config DMAR_FLOPPY_WA config INTR_REMAP bool "Support for Interrupt Remapping (EXPERIMENTAL)" @@ -189,36 +189,38 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches ---help--- Supports Interrupt remapping for IO-APIC and MSI devices. To use x2apic mode in the CPU's which support x2APIC enhancements or ---- head-2010-04-29.orig/arch/x86/Kconfig.cpu 2010-03-24 14:36:44.000000000 +0100 -+++ head-2010-04-29/arch/x86/Kconfig.cpu 2010-03-24 15:14:47.000000000 +0100 -@@ -493,7 +493,7 @@ config CPU_SUP_TRANSMETA_32 - config CPU_SUP_UMC_32 +--- head-2011-03-17.orig/arch/x86/Kconfig.cpu 2011-03-03 17:48:58.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig.cpu 2011-03-03 16:02:15.000000000 +0100 +@@ -446,7 +446,7 @@ config CPU_SUP_INTEL + config CPU_SUP_CYRIX_32 default y - bool "Support UMC processors" if PROCESSOR_SELECT + bool "Support Cyrix processors" if PROCESSOR_SELECT - depends on !64BIT + depends on !64BIT && !XEN ---help--- - This enables detection, tunings and quirks for UMC processors - -@@ -506,13 +506,13 @@ config CPU_SUP_UMC_32 + This enables detection, tunings and quirks for Cyrix processors - config X86_DS - def_bool X86_PTRACE_BTS -- depends on X86_DEBUGCTLMSR -+ depends on X86_DEBUGCTLMSR && !XEN - select HAVE_HW_BRANCH_TRACER +@@ -486,7 +486,7 @@ config CPU_SUP_CENTAUR + config CPU_SUP_TRANSMETA_32 + default y + bool "Support Transmeta processors" if PROCESSOR_SELECT +- depends on !64BIT ++ depends on !64BIT && !XEN + ---help--- + This enables detection, tunings and quirks for Transmeta processors - config X86_PTRACE_BTS - bool "Branch Trace Store" +@@ -500,7 +500,7 @@ config CPU_SUP_TRANSMETA_32 + config CPU_SUP_UMC_32 default y -- depends on X86_DEBUGCTLMSR -+ depends on X86_DEBUGCTLMSR && !XEN - depends on BROKEN + bool "Support UMC processors" if PROCESSOR_SELECT +- depends on !64BIT ++ depends on !64BIT && !XEN ---help--- - This adds a ptrace interface to the hardware's branch trace store. ---- head-2010-04-29.orig/arch/x86/Makefile 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/Makefile 2010-03-24 15:14:47.000000000 +0100 -@@ -112,7 +112,7 @@ endif + This enables detection, tunings and quirks for UMC processors + +--- head-2011-03-17.orig/arch/x86/Makefile 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/Makefile 2011-02-01 14:39:24.000000000 +0100 +@@ -117,7 +117,7 @@ endif KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) # Xen subarch support @@ -227,7 +229,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches mcore-$(CONFIG_XEN) := arch/x86/mach-xen/ KBUILD_CFLAGS += $(mflags-y) -@@ -157,7 +157,7 @@ PHONY += bzImage vmlinuz $(BOOT_TARGETS) +@@ -159,7 +159,7 @@ PHONY += bzImage vmlinuz $(BOOT_TARGETS) ifdef CONFIG_XEN KBUILD_CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \ @@ -236,8 +238,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches ifdef CONFIG_X86_64 LDFLAGS_vmlinux := -e startup_64 ---- head-2010-04-29.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:39:24.000000000 +0100 @@ -39,11 +39,11 @@ .endm @@ -346,9 +348,21 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches .quad sys_getgroups16 /* 80 */ .quad sys_setgroups16 .quad sys32_old_select ---- head-2010-04-29.orig/arch/x86/include/asm/cpufeature.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/cpufeature.h 2010-03-24 15:14:47.000000000 +0100 -@@ -251,7 +251,11 @@ extern const char * const x86_power_flag +--- head-2011-03-17.orig/arch/x86/include/asm/agp.h 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/agp.h 2011-02-01 14:39:24.000000000 +0100 +@@ -15,6 +15,9 @@ + #define map_page_into_agp(page) set_pages_uc(page, 1) + #define unmap_page_from_agp(page) set_pages_wb(page, 1) + ++#define map_pages_into_agp set_pages_array_uc ++#define unmap_pages_from_agp set_pages_array_wb ++ + /* + * Could use CLFLUSH here if the cpu supports it. But then it would + * need to be called for each cacheline of the whole page so it may +--- head-2011-03-17.orig/arch/x86/include/asm/cpufeature.h 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/cpufeature.h 2011-02-01 14:39:24.000000000 +0100 +@@ -276,7 +276,11 @@ extern const char * const x86_power_flag #define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) @@ -360,9 +374,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) ---- head-2010-04-29.orig/arch/x86/include/asm/hw_irq.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/hw_irq.h 2010-03-24 15:14:47.000000000 +0100 -@@ -119,6 +119,7 @@ extern void smp_error_interrupt(struct p +--- head-2011-03-17.orig/arch/x86/include/asm/hw_irq.h 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/hw_irq.h 2011-02-01 14:39:24.000000000 +0100 +@@ -128,6 +128,7 @@ extern void smp_error_interrupt(struct p extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #endif #ifdef CONFIG_SMP @@ -370,7 +384,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches extern void smp_reschedule_interrupt(struct pt_regs *); extern void smp_call_function_interrupt(struct pt_regs *); extern void smp_call_function_single_interrupt(struct pt_regs *); -@@ -127,6 +128,12 @@ extern void smp_invalidate_interrupt(str +@@ -136,6 +137,12 @@ extern void smp_invalidate_interrupt(str #else extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *); #endif @@ -383,12 +397,12 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #endif extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); ---- head-2010-04-29.orig/arch/x86/include/asm/segment.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/segment.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/segment.h 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/segment.h 2011-02-01 14:39:24.000000000 +0100 @@ -186,7 +186,9 @@ - #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) - #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3) - #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3) + #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) + #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) + #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) -#ifndef CONFIG_PARAVIRT +#if defined(CONFIG_X86_XEN) +#define get_kernel_rpl() (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) @@ -396,16 +410,40 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #define get_kernel_rpl() 0 #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:14:47.000000000 +0100 -@@ -40,4 +40,4 @@ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/agp.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/agp.h 2011-02-01 14:39:24.000000000 +0100 +@@ -21,6 +21,23 @@ + /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \ + set_pages_wb(page, 1)) + ++#define map_pages_into_agp(pages, nr) ({ \ ++ __typeof__(nr) n__; \ ++ int rc__ = 0; \ ++ for (n__ = 0; n__ < (nr) && !rc__; ++n__) \ ++ rc__ = xen_create_contiguous_region( \ ++ (unsigned long)page_address((pages)[n__]), 0, 32); \ ++ rc__ ?: set_pages_array_uc(pages, nr); \ ++}) ++#define unmap_pages_from_agp(pages, nr) ({ \ ++ __typeof__(nr) n__; \ ++ for (n__ = 0; n__ < nr; ++n__) \ ++ xen_destroy_contiguous_region( \ ++ (unsigned long)page_address((pages)[n__]), 0); \ ++ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \ ++ set_pages_array_wb(pages, nr); \ ++}) ++ + /* + * Could use CLFLUSH here if the cpu supports it. But then it would + * need to be called for each cacheline of the whole page so it may +@@ -40,4 +57,4 @@ #define free_gatt_pages(table, order) \ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table)) -#endif +#endif /* _ASM_X86_AGP_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _ASM_DESC_H_ -#define _ASM_DESC_H_ @@ -481,8 +519,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_DESC_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/dma-mapping.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/dma-mapping.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/dma-mapping.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/dma-mapping.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,17 +1,12 @@ -#ifndef _ASM_DMA_MAPPING_H_ +#ifndef _ASM_X86_DMA_MAPPING_H_ @@ -507,8 +545,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* _ASM_DMA_MAPPING_H_ */ +#endif /* _ASM_X86_DMA_MAPPING_H_ */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _ASM_FIXMAP_H -#define _ASM_FIXMAP_H @@ -534,8 +572,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } -#endif +#endif /* _ASM_X86_FIXMAP_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-02-01 14:39:24.000000000 +0100 @@ -10,8 +10,8 @@ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ @@ -588,8 +626,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #endif /* !__ASSEMBLY__ */ -#endif +#endif /* _ASM_X86_FIXMAP_32_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-02-01 14:39:24.000000000 +0100 @@ -8,8 +8,8 @@ * Copyright (C) 1998 Ingo Molnar */ @@ -654,8 +692,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_FIXMAP_64_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:05:00.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:05:09.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:39:24.000000000 +0100 @@ -15,8 +15,8 @@ * Copyright (C) 1999 Ingo Molnar */ @@ -689,8 +727,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* _ASM_HIGHMEM_H */ +#endif /* _ASM_X86_HIGHMEM_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:39:24.000000000 +0100 @@ -5,20 +5,6 @@ #include @@ -750,8 +788,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:27:18.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:31:50.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _ASM_IRQ_VECTORS_H -#define _ASM_IRQ_VECTORS_H @@ -768,8 +806,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* _ASM_IRQ_VECTORS_H */ +#endif /* _ASM_X86_IRQ_VECTORS_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:39:24.000000000 +0100 @@ -157,23 +157,6 @@ static inline int raw_irqs_disabled_flag raw_irqs_disabled_flags(flags); \ }) @@ -794,8 +832,23 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #else #ifdef CONFIG_X86_64 ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/maddr.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/maddr.h 2011-02-01 14:39:24.000000000 +0100 +@@ -59,10 +59,10 @@ static inline unsigned long mfn_to_pfn(u + + /* The array access can fail (e.g., device space beyond end of RAM). */ + asm ( +- "1: "_ASM_MOV_UL" %1,%0\n" ++ "1: "_ASM_MOV" %1,%0\n" + "2:\n" + ".section .fixup,\"ax\"\n" +- "3: "_ASM_MOV_UL" %2,%0\n" ++ "3: "_ASM_MOV" %2,%0\n" + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b,3b) +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef __ASM_X86_MMU_CONTEXT_H -#define __ASM_X86_MMU_CONTEXT_H @@ -810,8 +863,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* __ASM_X86_MMU_CONTEXT_H */ +#endif /* _ASM_X86_MMU_CONTEXT_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef __I386_SCHED_H -#define __I386_SCHED_H @@ -826,8 +879,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_MMU_CONTEXT_32_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef __X86_64_MMU_CONTEXT_H -#define __X86_64_MMU_CONTEXT_H @@ -842,8 +895,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_MMU_CONTEXT_64_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef __x86_PCI_H -#define __x86_PCI_H @@ -867,16 +920,16 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_PCI_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 14:39:24.000000000 +0100 @@ -149,4 +149,4 @@ extern void __pud_free_tlb(struct mmu_ga #endif /* PAGETABLE_LEVELS > 3 */ #endif /* PAGETABLE_LEVELS > 2 */ -#endif /* _ASM_X86_PGALLOC_H */ +#endif /* _ASM_X86_PGALLOC_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-07 15:40:30.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-07 15:41:11.000000000 +0100 @@ -14,11 +14,11 @@ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ @@ -989,14 +1042,14 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #define set_pte(ptep, pte) xen_set_pte(ptep, pte) #define set_pte_at(mm, addr, ptep, pte) xen_set_pte_at(mm, addr, ptep, pte) -@@ -641,4 +680,4 @@ int touch_pte_range(struct mm_struct *mm +@@ -638,4 +677,4 @@ int create_lookup_pte_addr(struct mm_str #endif /* __ASSEMBLY__ */ -#endif /* _ASM_X86_PGTABLE_H */ +#endif /* _ASM_X86_PGTABLE_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _I386_PGTABLE_3LEVEL_H -#define _I386_PGTABLE_3LEVEL_H @@ -1049,8 +1102,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* _I386_PGTABLE_3LEVEL_H */ +#endif /* _ASM_X86_PGTABLE_3LEVEL_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _I386_PGTABLE_3LEVEL_DEFS_H -#define _I386_PGTABLE_3LEVEL_DEFS_H @@ -1065,8 +1118,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ +#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _I386_PGTABLE_H -#define _I386_PGTABLE_H @@ -1108,8 +1161,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* _I386_PGTABLE_H */ +#endif /* _ASM_X86_PGTABLE_32_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _X86_64_PGTABLE_H -#define _X86_64_PGTABLE_H @@ -1157,8 +1210,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* _X86_64_PGTABLE_H */ +#endif /* _ASM_X86_PGTABLE_64_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:44:23.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:45:14.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef __ASM_X86_PROCESSOR_H -#define __ASM_X86_PROCESSOR_H @@ -1175,20 +1228,31 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #include #include -@@ -76,11 +77,11 @@ struct cpuinfo_x86 { +@@ -72,21 +73,21 @@ struct cpuinfo_x86 { + char rfu; + char fdiv_bug; + char f00f_bug; +-#endif + char coma_bug; + char pad0; ++#endif + #else + /* Number of 4K pages in DTLB/ITLB combined(in pages): */ int x86_tlbsize; __u8 x86_virt_bits; __u8 x86_phys_bits; +#endif + #ifndef CONFIG_XEN /* CPUID returned core id bits: */ __u8 x86_coreid_bits; + #endif /* Max extended CPUID function supported: */ __u32 extended_cpuid_level; -#endif /* Maximum supported CPUID level, -1=no CPUID: */ int cpuid_level; __u32 x86_capability[NCAPINTS]; -@@ -140,6 +141,8 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_ +@@ -150,6 +151,8 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_ #define current_cpu_data boot_cpu_data #endif @@ -1196,8 +1260,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + static inline int hlt_works(int cpu) { - #ifdef CONFIG_X86_32 -@@ -153,6 +156,8 @@ static inline int hlt_works(int cpu) + #if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) +@@ -163,6 +166,8 @@ static inline int hlt_works(int cpu) extern void cpu_detect(struct cpuinfo_x86 *c); @@ -1206,7 +1270,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches extern void early_cpu_init(void); extern void identify_boot_cpu(void); extern void identify_secondary_cpu(struct cpuinfo_x86 *); -@@ -161,11 +166,8 @@ extern void init_scattered_cpuid_feature +@@ -171,11 +176,8 @@ extern void init_scattered_cpuid_feature extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned short num_cache_leaves; @@ -1219,7 +1283,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static inline void xen_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) -@@ -327,7 +329,12 @@ struct i387_fxsave_struct { +@@ -337,7 +339,12 @@ struct i387_fxsave_struct { /* 16*16 bytes for each XMM-reg = 256 bytes: */ u32 xmm_space[64]; @@ -1233,7 +1297,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } __attribute__((aligned(16))); -@@ -351,10 +358,23 @@ struct i387_soft_struct { +@@ -361,10 +368,23 @@ struct i387_soft_struct { u32 entry_eip; }; @@ -1257,7 +1321,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches }; #if defined(CONFIG_X86_64) && !defined(CONFIG_X86_NO_TSS) -@@ -412,9 +432,14 @@ struct thread_struct { +@@ -422,9 +442,14 @@ struct thread_struct { unsigned io_bitmap_max; /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ unsigned long debugctlmsr; @@ -1275,7 +1339,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches }; static inline unsigned long xen_get_debugreg(int regno) -@@ -502,41 +527,6 @@ static inline void clear_in_cr4(unsigned +@@ -512,41 +537,6 @@ static inline void clear_in_cr4(unsigned write_cr4(cr4); } @@ -1317,14 +1381,14 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches typedef struct { unsigned long seg; } mm_segment_t; -@@ -884,4 +874,4 @@ extern void start_thread(struct pt_regs +@@ -894,4 +884,4 @@ extern void start_thread(struct pt_regs extern int get_tsc_mode(unsigned long adr); extern int set_tsc_mode(unsigned int val); -#endif +#endif /* _ASM_X86_PROCESSOR_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _ASM_X86_SMP_H_ -#define _ASM_X86_SMP_H_ @@ -1333,17 +1397,20 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #ifndef __ASSEMBLY__ #include #include -@@ -34,6 +34,9 @@ extern cpumask_t cpu_initialized; +@@ -34,7 +34,12 @@ extern cpumask_t cpu_initialized; DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); ++#endif +#ifdef CONFIG_X86_32 +DECLARE_PER_CPU(int, cpu_number); +#endif - DECLARE_PER_CPU(u16, x86_cpu_to_apicid); - DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); -@@ -51,12 +54,16 @@ extern struct { ++#ifndef CONFIG_XEN + DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); + DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); + #endif +@@ -52,12 +57,16 @@ extern struct { struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); @@ -1361,7 +1428,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches void (*send_call_func_ipi)(cpumask_t mask); void (*send_call_func_single_ipi)(int cpu); }; -@@ -91,6 +98,21 @@ static inline int __cpu_up(unsigned int +@@ -92,6 +101,21 @@ static inline int __cpu_up(unsigned int return smp_ops.cpu_up(cpu); } @@ -1383,7 +1450,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static inline void smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); -@@ -106,13 +128,20 @@ static inline void arch_send_call_functi +@@ -107,13 +131,20 @@ static inline void arch_send_call_functi smp_ops.send_call_func_ipi(mask); } @@ -1404,7 +1471,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches void xen_smp_send_stop(void); void xen_smp_send_reschedule(int cpu); void xen_send_call_func_ipi(cpumask_t mask); -@@ -123,10 +152,11 @@ void xen_send_call_func_single_ipi(int c +@@ -124,10 +155,11 @@ void xen_send_call_func_single_ipi(int c #define arch_send_call_function_single_ipi xen_send_call_func_single_ipi #define arch_send_call_function_ipi xen_send_call_func_ipi @@ -1417,8 +1484,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +extern void prefill_possible_map(void); void smp_store_cpu_info(int id); - #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -@@ -136,15 +166,11 @@ static inline int num_booting_cpus(void) + #define cpu_physical_id(cpu) (cpu) +@@ -137,15 +169,11 @@ static inline int num_booting_cpus(void) { return cpus_weight(cpu_callout_map); } @@ -1435,7 +1502,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches extern unsigned disabled_cpus __cpuinitdata; -@@ -154,7 +180,6 @@ extern unsigned disabled_cpus __cpuinitd +@@ -155,7 +183,6 @@ extern unsigned disabled_cpus __cpuinitd * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ @@ -1443,7 +1510,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) #define safe_smp_processor_id() smp_processor_id() -@@ -177,30 +202,33 @@ DECLARE_PER_CPU(int, cpu_number); +@@ -178,30 +205,33 @@ DECLARE_PER_CPU(int, cpu_number); #ifdef CONFIG_X86_LOCAL_APIC @@ -1484,7 +1551,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } # endif /* APIC_DEFINITION */ -@@ -212,9 +240,11 @@ static inline int hard_smp_processor_id( +@@ -213,9 +243,11 @@ static inline int hard_smp_processor_id( #endif /* CONFIG_X86_LOCAL_APIC */ @@ -1499,8 +1566,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #endif /* __ASSEMBLY__ */ -#endif +#endif /* _ASM_X86_SMP_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _X86_SPINLOCK_H_ -#define _X86_SPINLOCK_H_ @@ -1515,8 +1582,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_SPINLOCK_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef __ASM_SPINLOCK_TYPES_H -#define __ASM_SPINLOCK_TYPES_H @@ -1531,8 +1598,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_SPINLOCK_TYPES_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 15:58:55.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:01:23.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef _ASM_X86_SYSTEM_H_ -#define _ASM_X86_SYSTEM_H_ @@ -1552,15 +1619,15 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + "memory"); \ } while (0) - /* -@@ -403,4 +406,4 @@ static inline void rdtsc_barrier(void) + #ifndef CONFIG_XEN +@@ -405,4 +408,4 @@ static inline void rdtsc_barrier(void) alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); } -#endif +#endif /* _ASM_X86_SYSTEM_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/system_64.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system_64.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef __ASM_SYSTEM_H -#define __ASM_SYSTEM_H @@ -1575,8 +1642,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_SYSTEM_64_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:39:24.000000000 +0100 @@ -63,6 +63,10 @@ static inline void flush_tlb_range(struc __flush_tlb(); } @@ -1601,8 +1668,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #endif #endif /* SMP */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/vga.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/vga.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/vga.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/vga.h 2011-02-01 14:39:24.000000000 +0100 @@ -4,8 +4,8 @@ * (c) 1998 Martin Mares */ @@ -1620,8 +1687,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif +#endif /* _ASM_X86_VGA_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/xor.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/xor.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/xor.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/xor.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ #ifdef CONFIG_X86_32 -# include "../../xor_32.h" @@ -1629,8 +1696,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #else # include "xor_64.h" #endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/xor_64.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/xor_64.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/xor_64.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/xor_64.h 2011-02-01 14:39:24.000000000 +0100 @@ -1,5 +1,5 @@ -#ifndef ASM_X86__XOR_64_H -#define ASM_X86__XOR_64_H @@ -1645,9 +1712,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* ASM_X86__XOR_64_H */ +#endif /* _ASM_X86_XOR_64_H */ ---- head-2010-04-29.orig/arch/x86/kernel/Makefile 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/Makefile 2010-03-24 15:14:47.000000000 +0100 -@@ -138,7 +138,7 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-03-17.orig/arch/x86/kernel/Makefile 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/Makefile 2011-02-01 14:39:24.000000000 +0100 +@@ -129,7 +129,7 @@ ifeq ($(CONFIG_X86_64),y) time_64-$(CONFIG_XEN) += time_32.o endif @@ -1658,8 +1725,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + hpet.o i8253.o i8259.o irqinit_$(BITS).o pci-swiotlb_64.o reboot.o \ + smpboot.o tlb_$(BITS).o tsc.o tsc_sync.o uv_%.o vsmp_64.o disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += probe_roms_32.o ---- head-2010-04-29.orig/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -10,6 +10,7 @@ #include #include @@ -1688,8 +1755,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0; ---- head-2010-04-29.orig/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/apic/apic-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/apic-xen.c 2011-02-24 15:49:32.000000000 +0100 @@ -1,60 +1,13 @@ /* - * Local APIC handling, local APIC timers @@ -1754,7 +1821,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * Debug level, exported for io_apic.c -@@ -64,37 +17,44 @@ unsigned int apic_verbosity; +@@ -64,21 +17,29 @@ unsigned int apic_verbosity; /* Have we found an MP table */ int smp_found_config; @@ -1796,13 +1863,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches int setup_profiling_timer(unsigned int multiplier) { - return -EINVAL; - } - --/* -- * This initializes the IO-APIC and APIC hardware if this is -- * a UP kernel. -- */ +@@ -93,9 +54,12 @@ int setup_profiling_timer(unsigned int m int __init APIC_init_uniprocessor(void) { #ifdef CONFIG_X86_IO_APIC @@ -1818,9 +1879,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #endif return 0; ---- head-2010-04-29.orig/arch/x86/kernel/cpu/addon_cpuid_features.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/cpu/addon_cpuid_features.c 2010-03-24 15:14:47.000000000 +0100 -@@ -74,7 +74,7 @@ void __cpuinit init_scattered_cpuid_feat +--- head-2011-03-17.orig/arch/x86/kernel/cpu/topology.c 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/topology.c 2011-02-01 14:39:24.000000000 +0100 +@@ -28,7 +28,7 @@ */ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) { @@ -1829,8 +1890,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches unsigned int eax, ebx, ecx, edx, sub_index; unsigned int ht_mask_width, core_plus_mask_width; unsigned int core_select_mask, core_level_siblings; ---- head-2010-04-29.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:40:32.000000000 +0100 @@ -1,33 +1,73 @@ #include +#include @@ -1910,7 +1971,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, -@@ -63,17 +103,168 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page +@@ -63,17 +103,171 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page #endif [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, } }; @@ -1921,9 +1982,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - +#ifdef CONFIG_X86_32 static int cachesize_override __cpuinitdata = -1; - static int disable_x86_serial_nr __cpuinitdata = 1; - --struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; ++ +static int __init cachesize_setup(char *str) +{ + get_option(&str, &cachesize_override); @@ -1945,7 +2004,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + return 1; +} +__setup("nosep", x86_sep_setup); ++#endif + ++#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) +/* Standard macro to see if a specific flag is changeable */ +static inline int flag_is_changeable_p(u32 flag) +{ @@ -1980,6 +2041,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + return flag_is_changeable_p(X86_EFLAGS_ID); +} + + static int disable_x86_serial_nr __cpuinitdata = 1; + +-struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; +static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { @@ -2082,7 +2146,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* Not much we can do here... */ /* Check if at least it has cpuid */ if (c->cpuid_level == -1) { -@@ -83,28 +274,22 @@ static void __cpuinit default_init(struc +@@ -83,28 +277,22 @@ static void __cpuinit default_init(struc else if (c->x86 == 3) strcpy(c->x86_model_id, "386"); } @@ -2116,7 +2180,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches v = (unsigned int *) c->x86_model_id; cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); -@@ -123,30 +308,34 @@ int __cpuinit get_model_name(struct cpui +@@ -123,30 +311,34 @@ int __cpuinit get_model_name(struct cpui while (q <= &c->x86_model_id[48]) *q++ = '\0'; /* Zero-pad the rest */ } @@ -2160,7 +2224,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* do processor-specific cache resizing */ if (this_cpu->c_size_cache) l2size = this_cpu->c_size_cache(c, l2size); -@@ -157,116 +346,106 @@ void __cpuinit display_cacheinfo(struct +@@ -157,116 +349,106 @@ void __cpuinit display_cacheinfo(struct if (l2size == 0) return; /* Again, no L2 cache is possible */ @@ -2347,7 +2411,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches { /* Get vendor name */ cpuid(0x00000000, (unsigned int *)&c->cpuid_level, -@@ -275,50 +454,87 @@ void __init cpu_detect(struct cpuinfo_x8 +@@ -275,48 +457,85 @@ void __init cpu_detect(struct cpuinfo_x8 (unsigned int *)&c->x86_vendor_id[4]); c->x86 = 4; @@ -2426,14 +2490,14 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + + if (c->extended_cpuid_level >= 0x80000007) + c->x86_power = cpuid_edx(0x80000007); - - } - ++ ++} ++ +static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_X86_32 + int i; -+ + + /* + * First of all, decide if this is a 486 or higher + * It's a 486 if we can modify the AC flag @@ -2453,12 +2517,10 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + } + } +#endif -+} -+ + } + /* - * Do minimum CPU detection early. - * Fields really needed: vendor, cpuid_level, family, model, mask, -@@ -328,25 +544,65 @@ static void __cpuinit early_get_cap(stru +@@ -328,25 +547,65 @@ static void __cpuinit early_get_cap(stru * WARNING: this function is only called on the BP. Don't add code here * that is supposed to run on all CPUs. */ @@ -2496,7 +2558,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + this_cpu->c_early_init(c); + + validate_pat_support(c); -+ + +- early_get_cap(c); +#ifdef CONFIG_SMP + c->cpu_index = boot_cpu_id; +#endif @@ -2506,8 +2569,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +{ + struct cpu_dev **cdev; + int count = 0; - -- early_get_cap(c); ++ + printk("KERNEL supported cpus:\n"); + for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { + struct cpu_dev *cpudev = *cdev; @@ -2533,7 +2595,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } /* -@@ -364,88 +620,41 @@ static void __cpuinit detect_nopl(struct +@@ -364,88 +623,43 @@ static void __cpuinit detect_nopl(struct static void __cpuinit generic_identify(struct cpuinfo_x86 *c) { @@ -2564,8 +2626,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - if (c->x86 >= 0x6) - c->x86_model += ((tfms >> 16) & 0xF) << 4; - c->x86_mask = tfms & 15; -- c->initial_apicid = (ebx >> 24) & 0xFF; -#ifndef CONFIG_XEN +- c->initial_apicid = (ebx >> 24) & 0xFF; -#ifdef CONFIG_X86_HT - c->apicid = phys_pkg_id(c->initial_apicid, 0); - c->phys_proc_id = c->initial_apicid; @@ -2626,9 +2688,10 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - return 1; -} -__setup("serialnumber", x86_serial_nr_setup); ++#ifndef CONFIG_XEN + if (c->cpuid_level >= 0x00000001) { + c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; -+#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) ++#ifdef CONFIG_X86_32 +# ifdef CONFIG_X86_HT + c->apicid = phys_pkg_id(c->initial_apicid, 0); +# else @@ -2640,6 +2703,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + c->phys_proc_id = c->initial_apicid; +#endif + } ++#endif + get_model_name(c); /* Default name */ @@ -2649,7 +2713,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * This does the hard work of actually picking apart the CPU stuff... -@@ -457,32 +666,31 @@ static void __cpuinit identify_cpu(struc +@@ -457,34 +671,33 @@ static void __cpuinit identify_cpu(struc c->loops_per_jiffy = loops_per_jiffy; c->x86_cache_size = -1; c->x86_vendor = X86_VENDOR_UNKNOWN; @@ -2657,8 +2721,10 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches c->x86_model = c->x86_mask = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ + #ifndef CONFIG_XEN c->x86_max_cores = 1; + c->x86_coreid_bits = 0; + #endif +#ifdef CONFIG_X86_64 + c->x86_clflush_size = 64; +#else @@ -2693,7 +2759,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * Vendor-specific initialization. In this section we * canonicalize the feature flags, meaning if there are -@@ -516,6 +724,10 @@ static void __cpuinit identify_cpu(struc +@@ -518,6 +731,10 @@ static void __cpuinit identify_cpu(struc c->x86, c->x86_model); } @@ -2704,7 +2770,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are -@@ -524,7 +736,7 @@ static void __cpuinit identify_cpu(struc +@@ -526,7 +743,7 @@ static void __cpuinit identify_cpu(struc */ if (c != &boot_cpu_data) { /* AND the already accumulated flags with these */ @@ -2713,7 +2779,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } -@@ -532,72 +744,91 @@ static void __cpuinit identify_cpu(struc +@@ -534,72 +751,91 @@ static void __cpuinit identify_cpu(struc for (i = 0; i < NCAPINTS; i++) c->x86_capability[i] &= ~cleared_cpu_caps[i]; @@ -2842,7 +2908,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static __init int setup_noclflush(char *arg) { -@@ -615,18 +846,26 @@ void __cpuinit print_cpu_info(struct cpu +@@ -617,18 +853,26 @@ void __cpuinit print_cpu_info(struct cpu else if (c->cpuid_level >= 0) vendor = c->x86_vendor_id; @@ -2876,7 +2942,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } static __init int setup_disablecpuid(char *arg) -@@ -642,19 +881,124 @@ __setup("clearcpuid=", setup_disablecpui +@@ -644,19 +888,124 @@ __setup("clearcpuid=", setup_disablecpui cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; @@ -3009,7 +3075,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* Make sure %fs is initialized properly in idle threads */ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) { -@@ -662,36 +1006,154 @@ struct pt_regs * __cpuinit idle_regs(str +@@ -664,36 +1013,154 @@ struct pt_regs * __cpuinit idle_regs(str regs->fs = __KERNEL_PERCPU; return regs; } @@ -3186,7 +3252,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); -@@ -745,19 +1207,21 @@ void __cpuinit cpu_init(void) +@@ -747,19 +1214,21 @@ void __cpuinit cpu_init(void) /* * Force FPU initialization: */ @@ -3218,9 +3284,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + + #endif ---- head-2010-04-29.orig/arch/x86/kernel/cpu/common_64-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common_64-xen.c 2011-02-01 14:38:38.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,773 +0,0 @@ +@@ -1,777 +0,0 @@ -#include -#include -#include @@ -3362,7 +3428,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - -void __cpuinit detect_ht(struct cpuinfo_x86 *c) -{ --#ifdef CONFIG_SMP +-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - u32 eax, ebx, ecx, edx; - int index_msb, core_bits; - @@ -3516,8 +3582,10 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - c->x86_model_id[0] = '\0'; /* Unset */ - c->x86_clflush_size = 64; - c->x86_cache_alignment = c->x86_clflush_size; +-#ifndef CONFIG_XEN - c->x86_max_cores = 1; - c->x86_coreid_bits = 0; +-#endif - c->extended_cpuid_level = 0; - memset(&c->x86_capability, 0, sizeof c->x86_capability); - @@ -3551,10 +3619,12 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - c->x86 = 4; - } - +-#ifndef CONFIG_XEN - c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; -#ifdef CONFIG_SMP - c->phys_proc_id = c->initial_apicid; -#endif +-#endif - /* AMD-defined flags: level 0x80000001 */ - xlvl = cpuid_eax(0x80000000); - c->extended_cpuid_level = xlvl; @@ -3994,9 +4064,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - if (is_uv_system()) - uv_cpu_init(); -} ---- head-2010-04-29.orig/arch/x86/kernel/dumpstack_64.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/dumpstack_64.c 2010-03-24 15:14:47.000000000 +0100 -@@ -22,6 +22,7 @@ +--- head-2011-03-17.orig/arch/x86/kernel/dumpstack_64.c 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/dumpstack_64.c 2011-02-01 14:39:24.000000000 +0100 +@@ -21,6 +21,7 @@ #define N_EXCEPTION_STACKS_END \ (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) @@ -4004,7 +4074,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static char x86_stack_ids[][8] = { [ DEBUG_STACK-1 ] = "#DB", [ NMI_STACK-1 ] = "NMI", -@@ -33,10 +34,12 @@ static char x86_stack_ids[][8] = { +@@ -32,10 +33,12 @@ static char x86_stack_ids[][8] = { N_EXCEPTION_STACKS_END ] = "#DB[?]" #endif }; @@ -4017,7 +4087,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches unsigned k; /* -@@ -96,6 +99,7 @@ static unsigned long *in_exception_stack +@@ -95,6 +98,7 @@ static unsigned long *in_exception_stack } #endif } @@ -4025,8 +4095,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches return NULL; } ---- head-2010-04-29.orig/arch/x86/kernel/e820-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/e820-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -167,6 +167,9 @@ static void __init _e820_print_map(const case E820_NVS: printk(KERN_CONT "(ACPI NVS)\n"); @@ -4107,8 +4177,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #undef e820 #ifndef CONFIG_XEN ---- head-2010-04-29.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -3,10 +3,18 @@ #include #include @@ -4939,8 +5009,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } + early_param("earlyprintk", setup_early_printk); ---- head-2010-04-29.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:39:24.000000000 +0100 @@ -700,7 +700,7 @@ ENTRY(interrupt) ENTRY(irq_entries_start) RING0_INT_FRAME @@ -5050,8 +5120,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #include ---- head-2010-04-29.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:39:24.000000000 +0100 @@ -66,35 +66,9 @@ .code64 @@ -5141,19 +5211,19 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #ifdef CONFIG_X86_MCE /* runs on exception stack */ ---- head-2010-04-29.orig/arch/x86/kernel/fixup.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/fixup.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/fixup.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/fixup.c 2011-02-01 14:39:24.000000000 +0100 @@ -37,7 +37,7 @@ - #define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args ) + #define DP(_f, _args...) pr_alert(" " _f "\n" , ## _args ) -void do_fixup_4gb_segment(struct pt_regs *regs, long error_code) +dotraplinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code) { static unsigned long printed = 0; char info[100]; ---- head-2010-04-29.orig/arch/x86/kernel/head-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -36,6 +36,7 @@ void __init reserve_ebda_region(void) /* start of EBDA area */ @@ -5162,8 +5232,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* Fixup: bios puts an EBDA in the top 64K segment */ /* of conventional memory, but does not adjust lowmem. */ ---- head-2010-04-29.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head64-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -151,12 +151,11 @@ void __init x86_64_start_kernel(char * r load_idt((const struct desc_ptr *)&idt_descr); #endif @@ -5180,8 +5250,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -0,0 +1,3949 @@ ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:39:24.000000000 +0100 +@@ -0,0 +1,3937 @@ +/* + * Intel IO-APIC support for multi-Pentium hosts. + * @@ -5215,8 +5285,6 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +#include +#include +#include -+#include -+#include +#include +#include +#include /* time_after() */ @@ -5224,8 +5292,6 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +#include +#endif +#include -+#include -+#include + +#include +#include @@ -5237,13 +5303,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +#include +#include +#include -+#include -+#include +#include -+#include -+#include -+#include -+#include + +#include +#include @@ -5808,7 +5868,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + apic_write(APIC_ICR, cfg); +} +#endif /* !CONFIG_SMP && CONFIG_X86_32*/ -+#endif /* CONFIG_XEN */ ++#endif /* !CONFIG_XEN */ + +#ifdef CONFIG_X86_32 +/* @@ -6910,7 +6970,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + +/* Where if anywhere is the i8259 connect in external int mode */ +static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; -+#endif /* CONFIG_XEN */ ++#endif /* !CONFIG_XEN */ + +void __init enable_IO_APIC(void) +{ @@ -6983,9 +7043,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +#endif +} + -+#ifdef CONFIG_XEN -+#define disable_IO_APIC() ((void)0) -+#else ++#ifndef CONFIG_XEN +/* + * Not an __init, needed by the reboot code + */ @@ -7604,7 +7662,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + .retrigger = ioapic_retrigger_irq, +}; +#endif -+#endif /* CONFIG_XEN */ ++#endif /* !CONFIG_XEN */ + +static inline void init_IO_APIC_traps(void) +{ @@ -8007,7 +8065,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +{ + if (sis_apic_bug == -1) + sis_apic_bug = 0; -+#ifdef CONFIG_XEN ++#ifdef CONFIG_X86_XEN + if (is_initial_xendomain()) { + struct xen_platform_op op = { .cmd = XENPF_platform_quirk }; + op.u.platform_quirk.quirk_id = sis_apic_bug ? @@ -8166,7 +8224,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + __clear_irq_vector(irq); + spin_unlock_irqrestore(&vector_lock, flags); +} -+#endif /* CONFIG_XEN */ ++#endif /* !CONFIG_XEN */ + +/* + * MSI message composition @@ -8831,9 +8889,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +#ifdef CONFIG_ACPI + +#ifdef CONFIG_X86_32 ++#ifndef CONFIG_XEN +int __init io_apic_get_unique_id(int ioapic, int apic_id) +{ -+#ifndef CONFIG_XEN + union IO_APIC_reg_00 reg_00; + static physid_mask_t apic_id_map = PHYSID_MASK_NONE; + physid_mask_t tmp; @@ -8902,10 +8960,10 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + + apic_printk(APIC_VERBOSE, KERN_INFO + "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); -+#endif /* !CONFIG_XEN */ + + return apic_id; +} ++#endif /* !CONFIG_XEN */ + +int __init io_apic_get_version(int ioapic) +{ @@ -9131,7 +9189,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + * IO APICS that are mapped in on a BAR in PCI space. */ +late_initcall(ioapic_insert_resources); +#endif /* !CONFIG_XEN */ ---- head-2010-04-29.orig/arch/x86/kernel/io_apic_32-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/io_apic_32-xen.c 2011-02-01 14:38:38.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,2985 +0,0 @@ -/* @@ -11894,9 +11952,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - -#ifdef CONFIG_ACPI - +-#ifndef CONFIG_XEN -int __init io_apic_get_unique_id(int ioapic, int apic_id) -{ --#ifndef CONFIG_XEN - union IO_APIC_reg_00 reg_00; - static physid_mask_t apic_id_map = PHYSID_MASK_NONE; - physid_mask_t tmp; @@ -11965,10 +12023,10 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - - apic_printk(APIC_VERBOSE, KERN_INFO - "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); --#endif /* !CONFIG_XEN */ - - return apic_id; -} +-#endif /* !CONFIG_XEN */ - - -int __init io_apic_get_version(int ioapic) @@ -12119,7 +12177,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - } -} -#endif ---- head-2010-04-29.orig/arch/x86/kernel/io_apic_64-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/io_apic_64-xen.c 2011-02-01 14:38:38.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,2448 +0,0 @@ -/* @@ -14570,8 +14628,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - * IO APICS that are mapped in on a BAR in PCI space. */ -late_initcall(ioapic_insert_resources); -#endif /* !CONFIG_XEN */ ---- head-2010-04-29.orig/arch/x86/kernel/ioport-xen.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/ioport-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ioport-xen.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ioport-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -14,6 +14,7 @@ #include #include @@ -14580,32 +14638,19 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #include /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ ---- head-2010-04-29.orig/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -21,6 +21,8 @@ - #ifdef CONFIG_X86_32 - #ifndef CONFIG_XEN - #include -+#include -+ - /* - * the following functions deal with sending IPIs between CPUs. - * -@@ -197,10 +199,8 @@ void send_IPI_mask_sequence(cpumask_t ma - #endif +--- head-2011-03-17.orig/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:56:33.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:56:51.000000000 +0100 +@@ -57,7 +57,4 @@ void send_IPI_mask_sequence(cpumask_t ma + send_IPI_mask_bitmask(mask, vector); } -/* must come after the send_IPI functions above for inlining */ -#include - - #ifndef CONFIG_XEN -+/* must come after the send_IPI functions above for inlining */ - static int convert_apicid_to_cpu(int apic_id) - { - int i; + #endif --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/irq-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -0,0 +1,193 @@ ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-01 14:39:24.000000000 +0100 +@@ -0,0 +1,200 @@ +/* + * Common interrupt code for 32 and 64 bit + */ @@ -14679,6 +14724,11 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); + seq_printf(p, " TLB shootdowns\n"); ++#else ++ seq_printf(p, "LCK: "); ++ for_each_online_cpu(j) ++ seq_printf(p, "%10u ", irq_stats(j)->irq_lock_count); ++ seq_printf(p, " Spinlock wakeups\n"); +#endif +#endif +#ifdef CONFIG_X86_MCE @@ -14776,6 +14826,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + sum += irq_stats(cpu)->irq_call_count; +#ifndef CONFIG_XEN + sum += irq_stats(cpu)->irq_tlb_count; ++#else ++ sum += irq_stats(cpu)->irq_lock_count; +#endif +#endif +#ifdef CONFIG_X86_MCE @@ -14799,8 +14851,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches +#endif + return sum; +} ---- head-2010-04-29.orig/arch/x86/kernel/ldt-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/ldt-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ldt-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ldt-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -18,6 +18,7 @@ #include #include @@ -14809,7 +14861,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #ifdef CONFIG_SMP static void flush_ldt(void *current_mm) ---- head-2010-04-29.orig/arch/x86/kernel/microcode-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/microcode-xen.c 2011-02-01 14:38:38.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,214 +0,0 @@ -/* @@ -15027,7 +15079,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -module_init(microcode_init) -module_exit(microcode_exit) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:14:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -0,0 +1,225 @@ +/* + * Intel CPU Microcode Update Driver for Linux @@ -15254,9 +15306,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + +module_init(microcode_init); +module_exit(microcode_exit); ---- head-2010-04-29.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -406,7 +406,9 @@ static int __init smp_read_mpc(struct mp +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:39:24.000000000 +0100 +@@ -410,7 +410,9 @@ static int __init smp_read_mpc(struct mp generic_bigsmp_probe(); #endif @@ -15266,7 +15318,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; -@@ -611,6 +613,9 @@ static void __init __get_smp_config(unsi +@@ -622,6 +624,9 @@ void __init get_smp_config(void) printk(KERN_INFO "Using ACPI for processor (LAPIC) " "configuration information\n"); @@ -15276,8 +15328,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) && !defined(CONFIG_XEN) ---- head-2010-04-29.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -41,11 +41,12 @@ EXPORT_SYMBOL(bad_dma_address); /* Dummy device used for NULL arguments (normally ISA). Better would be probably a smaller DMA mask, but this is bug-to-bug compatible @@ -15556,8 +15608,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static int __init pci_iommu_init(void) { calgary_iommu_init(); ---- head-2010-04-29.orig/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -5,6 +5,7 @@ #include @@ -15595,9 +15647,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches .map_single = gnttab_map_single, .unmap_single = gnttab_unmap_single, .map_sg = gnttab_map_sg, ---- head-2010-04-29.orig/arch/x86/kernel/process-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -180,7 +180,8 @@ static void mwait_idle(void) +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 15:59:49.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:00:33.000000000 +0100 +@@ -151,7 +151,8 @@ static void mwait_idle(void) static void poll_idle(void) { local_irq_enable(); @@ -15607,8 +15659,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } #ifndef CONFIG_XEN ---- head-2010-04-29.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_32-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:34:28.000000000 +0100 @@ -37,6 +37,7 @@ #include #include @@ -15617,7 +15669,16 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #include #include -@@ -60,6 +61,8 @@ +@@ -51,8 +52,6 @@ + #endif + + #include +-#include +-#include + + #include + +@@ -60,6 +59,8 @@ #include #include #include @@ -15626,7 +15687,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void cstar_ret_from_fork(void) __asm__("cstar_ret_from_fork"); -@@ -78,42 +81,12 @@ unsigned long thread_saved_pc(struct tas +@@ -78,42 +79,12 @@ unsigned long thread_saved_pc(struct tas return ((unsigned long *)tsk->thread.sp)[3]; } @@ -15671,7 +15732,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * The idle thread. There's no useful work to be -@@ -155,12 +128,13 @@ void cpu_idle(void) +@@ -155,12 +126,13 @@ void cpu_idle(void) } } @@ -15686,7 +15747,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches if (user_mode_vm(regs)) { sp = regs->sp; -@@ -173,11 +147,15 @@ void __show_registers(struct pt_regs *re +@@ -173,11 +145,15 @@ void __show_registers(struct pt_regs *re } printk("\n"); @@ -15704,7 +15765,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, -@@ -216,7 +194,7 @@ void __show_registers(struct pt_regs *re +@@ -216,7 +192,7 @@ void __show_registers(struct pt_regs *re void show_regs(struct pt_regs *regs) { @@ -15713,7 +15774,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches show_trace(NULL, regs, ®s->sp, regs->bp); } -@@ -269,6 +247,14 @@ void exit_thread(void) +@@ -269,6 +245,14 @@ void exit_thread(void) t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); } @@ -15728,7 +15789,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } void flush_thread(void) -@@ -434,6 +420,35 @@ int set_tsc_mode(unsigned int val) +@@ -434,6 +418,35 @@ int set_tsc_mode(unsigned int val) return 0; } @@ -15764,7 +15825,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static noinline void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) { -@@ -443,14 +458,7 @@ __switch_to_xtra(struct task_struct *pre +@@ -443,14 +456,7 @@ __switch_to_xtra(struct task_struct *pre prev = &prev_p->thread; next = &next_p->thread; @@ -15780,7 +15841,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches if (next->debugctlmsr != debugctl) update_debugctlmsr(next->debugctlmsr); -@@ -474,13 +482,13 @@ __switch_to_xtra(struct task_struct *pre +@@ -474,13 +480,13 @@ __switch_to_xtra(struct task_struct *pre hard_enable_TSC(); } @@ -15796,9 +15857,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } /* ---- head-2010-04-29.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_64-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -40,11 +40,11 @@ +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:34:01.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:34:22.000000000 +0100 +@@ -40,25 +40,23 @@ #include #include #include @@ -15812,15 +15873,22 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #include #include #include -@@ -58,6 +58,7 @@ + #include + #include + #include +-#include + #include + #include #include #include #include +- +-#include +#include - #include + asmlinkage extern void ret_from_fork(void); -@@ -71,6 +72,13 @@ void idle_notifier_register(struct notif +@@ -70,6 +68,13 @@ void idle_notifier_register(struct notif { atomic_notifier_chain_register(&idle_notifier, n); } @@ -15834,7 +15902,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches void enter_idle(void) { -@@ -94,25 +102,12 @@ void exit_idle(void) +@@ -93,25 +98,12 @@ void exit_idle(void) __exit_idle(); } @@ -15862,7 +15930,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * The idle thread. There's no useful work to be -@@ -157,63 +152,74 @@ void cpu_idle(void) +@@ -156,63 +148,74 @@ void cpu_idle(void) } /* Prints also some state that isn't saved in the pt_regs */ @@ -15963,7 +16031,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches show_trace(NULL, regs, (void *)(regs + 1), regs->bp); } -@@ -250,6 +256,14 @@ void exit_thread(void) +@@ -249,6 +252,14 @@ void exit_thread(void) #endif t->io_bitmap_max = 0; } @@ -15978,7 +16046,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } void xen_load_gs_index(unsigned gs) -@@ -330,10 +344,10 @@ void prepare_to_copy(struct task_struct +@@ -329,10 +340,10 @@ void prepare_to_copy(struct task_struct int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, unsigned long unused, @@ -15991,7 +16059,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches struct task_struct *me = current; childregs = ((struct pt_regs *) -@@ -377,10 +391,10 @@ int copy_thread(int nr, unsigned long cl +@@ -376,10 +387,10 @@ int copy_thread(int nr, unsigned long cl if (test_thread_flag(TIF_IA32)) err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); @@ -16006,7 +16074,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches goto out; } p->thread.iopl = current->thread.iopl; -@@ -487,13 +501,27 @@ static inline void __switch_to_xtra(stru +@@ -486,13 +497,27 @@ static inline void __switch_to_xtra(stru next = &next_p->thread; debugctl = prev->debugctlmsr; @@ -16040,7 +16108,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches if (next->debugctlmsr != debugctl) update_debugctlmsr(next->debugctlmsr); -@@ -517,13 +545,13 @@ static inline void __switch_to_xtra(stru +@@ -516,13 +541,13 @@ static inline void __switch_to_xtra(stru hard_enable_TSC(); } @@ -16056,7 +16124,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } /* -@@ -555,7 +583,7 @@ __switch_to(struct task_struct *prev_p, +@@ -554,7 +579,7 @@ __switch_to(struct task_struct *prev_p, multicall_entry_t _mcl[8], *mcl = _mcl; /* we're going to use this soon, after a few expensive things */ @@ -16065,7 +16133,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches prefetch(next->xstate); /* -@@ -636,12 +664,12 @@ __switch_to(struct task_struct *prev_p, +@@ -635,12 +660,12 @@ __switch_to(struct task_struct *prev_p, if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL))) BUG(); @@ -16080,7 +16148,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches if (unlikely(next->ds)) loadsegment(ds, next->ds); -@@ -655,7 +683,7 @@ __switch_to(struct task_struct *prev_p, +@@ -654,7 +679,7 @@ __switch_to(struct task_struct *prev_p, */ arch_leave_lazy_cpu_mode(); @@ -16089,7 +16157,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches * Switch FS and GS. * * Segment register != 0 always requires a reload. Also -@@ -674,10 +702,10 @@ __switch_to(struct task_struct *prev_p, +@@ -673,10 +698,10 @@ __switch_to(struct task_struct *prev_p, if (next->gs) WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs)); @@ -16102,7 +16170,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches write_pda(kernelstack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); -@@ -718,7 +746,7 @@ long sys_execve(char __user *name, char +@@ -717,7 +742,7 @@ long sys_execve(char __user *name, char char __user * __user *envp, struct pt_regs *regs) { long error; @@ -16111,7 +16179,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches filename = getname(name); error = PTR_ERR(filename); -@@ -776,56 +804,56 @@ asmlinkage long sys_vfork(struct pt_regs +@@ -775,56 +800,56 @@ asmlinkage long sys_vfork(struct pt_regs unsigned long get_wchan(struct task_struct *p) { unsigned long stack; @@ -16189,7 +16257,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } put_cpu(); break; -@@ -880,8 +908,7 @@ long do_arch_prctl(struct task_struct *t +@@ -879,8 +904,7 @@ long do_arch_prctl(struct task_struct *t rdmsrl(MSR_KERNEL_GS_BASE, base); else base = task->thread.gs; @@ -16199,74 +16267,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches base = task->thread.gs; ret = put_user(base, (unsigned long __user *)addr); break; ---- head-2010-04-29.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/quirks-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -352,9 +352,27 @@ static void ati_force_hpet_resume(void) - printk(KERN_DEBUG "Force enabled HPET at resume\n"); - } - -+static u32 ati_ixp4x0_rev(struct pci_dev *dev) -+{ -+ u32 d; -+ u8 b; -+ -+ pci_read_config_byte(dev, 0xac, &b); -+ b &= ~(1<<5); -+ pci_write_config_byte(dev, 0xac, b); -+ pci_read_config_dword(dev, 0x70, &d); -+ d |= 1<<8; -+ pci_write_config_dword(dev, 0x70, d); -+ pci_read_config_dword(dev, 0x8, &d); -+ d &= 0xff; -+ dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); -+ return d; -+} -+ - static void ati_force_enable_hpet(struct pci_dev *dev) - { -- u32 uninitialized_var(val); -+ u32 d, val; -+ u8 b; - - if (hpet_address || force_hpet_address) - return; -@@ -364,14 +382,33 @@ static void ati_force_enable_hpet(struct - return; - } - -+ d = ati_ixp4x0_rev(dev); -+ if (d < 0x82) -+ return; -+ -+ /* base address */ - pci_write_config_dword(dev, 0x14, 0xfed00000); - pci_read_config_dword(dev, 0x14, &val); -+ -+ /* enable interrupt */ -+ outb(0x72, 0xcd6); b = inb(0xcd7); -+ b |= 0x1; -+ outb(0x72, 0xcd6); outb(b, 0xcd7); -+ outb(0x72, 0xcd6); b = inb(0xcd7); -+ if (!(b & 0x1)) -+ return; -+ pci_read_config_dword(dev, 0x64, &d); -+ d |= (1<<10); -+ pci_write_config_dword(dev, 0x64, d); -+ pci_read_config_dword(dev, 0x64, &d); -+ if (!(d & (1<<10))) -+ return; -+ - force_hpet_address = val; - force_hpet_resume_type = ATI_FORCE_HPET_RESUME; - dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", - force_hpet_address); - cached_dev = dev; -- return; - } - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, - ati_force_enable_hpet); ---- head-2010-04-29.orig/arch/x86/kernel/setup-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-04 15:09:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:22:12.000000000 +0100 @@ -261,6 +261,9 @@ unsigned long saved_video_mode; #define RAMDISK_LOAD_FLAG 0x4000 @@ -16519,7 +16521,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures * for initialization. Note, the efi init code path is determined by the -@@ -691,6 +884,9 @@ void __init setup_arch(char **cmdline_p) +@@ -693,6 +886,9 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif @@ -16529,7 +16531,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches early_cpu_init(); early_ioremap_init(); -@@ -785,6 +981,19 @@ void __init setup_arch(char **cmdline_p) +@@ -787,6 +983,19 @@ void __init setup_arch(char **cmdline_p) bss_resource.start = virt_to_phys(&__bss_start); bss_resource.end = virt_to_phys(&__bss_stop)-1; @@ -16549,7 +16551,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; -@@ -794,13 +1003,8 @@ void __init setup_arch(char **cmdline_p) +@@ -796,13 +1005,8 @@ void __init setup_arch(char **cmdline_p) check_efer(); #endif @@ -16565,7 +16567,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* after early param, so could get panic from serial */ reserve_early_setup_data(); -@@ -819,10 +1023,15 @@ void __init setup_arch(char **cmdline_p) +@@ -821,10 +1025,15 @@ void __init setup_arch(char **cmdline_p) finish_e820_parsing(); @@ -16582,7 +16584,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #ifndef CONFIG_XEN /* after parse_early_param, so could debug it */ -@@ -868,6 +1077,10 @@ void __init setup_arch(char **cmdline_p) +@@ -870,6 +1079,10 @@ void __init setup_arch(char **cmdline_p) num_physpages = max_pfn; max_mapnr = max_pfn; @@ -16593,7 +16595,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* How many end-of-memory variables you have, grandma! */ /* need this before calling reserve_initrd */ -@@ -879,6 +1092,10 @@ void __init setup_arch(char **cmdline_p) +@@ -881,6 +1094,10 @@ void __init setup_arch(char **cmdline_p) high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; #endif @@ -16604,7 +16606,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* max_pfn_mapped is updated here */ max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn< @@ -16668,7 +16670,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches # else # ifdef __i386__ unsigned long *sp = (unsigned long *)®s->sp; -@@ -574,6 +570,7 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -577,6 +573,7 @@ irqreturn_t timer_interrupt(int irq, voi run_local_timers(); if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user_mode_vm(get_irq_regs())); @@ -16676,7 +16678,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches scheduler_tick(); run_posix_cpu_timers(current); profile_tick(CPU_PROFILING); -@@ -803,7 +800,8 @@ static void stop_hz_timer(void) +@@ -806,7 +803,8 @@ static void stop_hz_timer(void) smp_mb(); /* Leave ourselves in tick mode if rcu or softirq or timer pending. */ @@ -16687,7 +16689,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches time_before_eq(j, jiffies))) { cpu_clear(cpu, nohz_cpu_mask); --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/kernel/traps-xen.c 2010-03-24 15:14:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -0,0 +1,1022 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds @@ -17711,7 +17713,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + trap_ctxt[t->vector].address = t->address; + } +} ---- head-2010-04-29.orig/arch/x86/kernel/traps_32-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/traps_32-xen.c 2011-02-01 14:38:38.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,1222 +0,0 @@ -/* @@ -18936,7 +18938,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - return 1; -} -__setup("code_bytes=", code_bytes_setup); ---- head-2010-04-29.orig/arch/x86/kernel/traps_64-xen.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/traps_64-xen.c 2011-02-01 14:38:38.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,1238 +0,0 @@ -/* @@ -20177,8 +20179,21 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches - return 1; -} -__setup("code_bytes=", code_bytes_setup); ---- head-2010-04-29.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/fault-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/dump_pagetables-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/dump_pagetables-xen.c 2011-02-01 14:39:24.000000000 +0100 +@@ -160,8 +160,8 @@ static void note_page(struct seq_file *m + * we have now. "break" is either changing perms, levels or + * address space marker. + */ +- prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); +- cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); ++ prot = pgprot_val(new_prot) & PTE_FLAGS_MASK; ++ cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK; + + if (!st->level) { + /* First entry */ +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -35,6 +35,7 @@ #include #include @@ -20289,7 +20304,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } printk("VM: killing process %s\n", tsk->comm); -@@ -946,14 +929,15 @@ LIST_HEAD(pgd_list); +@@ -949,14 +932,15 @@ LIST_HEAD(pgd_list); void vmalloc_sync_all(void) { @@ -20309,7 +20324,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches unsigned long flags; struct page *page; -@@ -966,10 +950,8 @@ void vmalloc_sync_all(void) +@@ -974,10 +958,8 @@ void vmalloc_sync_all(void) spin_unlock_irqrestore(&pgd_lock, flags); } #else /* CONFIG_X86_64 */ @@ -20322,8 +20337,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches const pgd_t *pgd_ref = pgd_offset_k(address); unsigned long flags; struct page *page; ---- head-2010-04-29.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, return (void*) vaddr; @@ -20332,8 +20347,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches struct page *kmap_atomic_to_page(void *ptr) { ---- head-2010-04-29.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/init_32-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -34,6 +34,7 @@ #include @@ -20553,8 +20568,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches save_pg_dir(); zap_low_mappings(); ---- head-2010-04-29.orig/arch/x86/mm/init_64-xen.c 2010-04-29 09:51:38.000000000 +0200 -+++ head-2010-04-29/arch/x86/mm/init_64-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -34,6 +34,7 @@ #include @@ -21061,7 +21076,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches void free_init_pages(char *what, unsigned long begin, unsigned long end) --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-04-29/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:14:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -0,0 +1,61 @@ +/* + * Copyright © 2008 Ingo Molnar @@ -21124,8 +21139,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + pagefault_enable(); +} +EXPORT_SYMBOL_GPL(iounmap_atomic); ---- head-2010-04-29.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/ioremap-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:40:39.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:07.000000000 +0100 @@ -25,20 +25,51 @@ #ifdef CONFIG_X86_64 @@ -21223,7 +21238,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches v->val = __pte_val(pte_mkspecial(pfn_pte_ma(mfn, prot))); mfn++; -@@ -240,6 +293,25 @@ int page_is_ram(unsigned long pagenr) +@@ -221,6 +274,25 @@ int page_is_ram(unsigned long pagenr) return 0; } @@ -21249,7 +21264,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. -@@ -327,6 +399,12 @@ static void __iomem *__ioremap_caller(re +@@ -308,6 +380,12 @@ static void __iomem *__ioremap_caller(re return (__force void __iomem *)isa_bus_to_virt((unsigned long)phys_addr); /* @@ -21262,7 +21277,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches * Don't allow anybody to remap normal RAM that we're using.. */ for (mfn = PFN_DOWN(phys_addr); mfn < PFN_UP(last_addr); mfn++) { -@@ -381,16 +459,16 @@ static void __iomem *__ioremap_caller(re +@@ -362,16 +440,16 @@ static void __iomem *__ioremap_caller(re switch (prot_val) { case _PAGE_CACHE_UC: default: @@ -21283,7 +21298,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches break; } -@@ -490,7 +568,7 @@ static void __iomem *ioremap_default(res +@@ -471,7 +549,7 @@ static void __iomem *ioremap_default(res unsigned long size) { unsigned long flags; @@ -21292,7 +21307,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches int err; /* -@@ -502,11 +580,11 @@ static void __iomem *ioremap_default(res +@@ -483,11 +561,11 @@ static void __iomem *ioremap_default(res if (err < 0) return NULL; @@ -21307,7 +21322,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } #endif -@@ -602,7 +680,7 @@ void unxlate_dev_mem_ptr(unsigned long p +@@ -583,7 +661,7 @@ void unxlate_dev_mem_ptr(unsigned long p } #endif @@ -21316,7 +21331,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static int __init early_ioremap_debug_setup(char *str) { -@@ -721,12 +799,12 @@ static void __init __early_set_fixmap(en +@@ -702,12 +780,12 @@ static void __init __early_set_fixmap(en } static inline void __init early_set_fixmap(enum fixed_addresses idx, @@ -21332,7 +21347,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } static inline void __init early_clear_fixmap(enum fixed_addresses idx) -@@ -737,16 +815,22 @@ static inline void __init early_clear_fi +@@ -718,16 +796,22 @@ static inline void __init early_clear_fi __early_set_fixmap(idx, 0, __pgprot(0)); } @@ -21360,7 +21375,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches printk(KERN_WARNING "please boot with early_ioremap_debug and report the dmesg.\n"); -@@ -754,18 +838,33 @@ static int __init check_early_ioremap_le +@@ -735,18 +819,33 @@ static int __init check_early_ioremap_le } late_initcall(check_early_ioremap_leak); @@ -21398,7 +21413,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches dump_stack(); } -@@ -776,17 +875,13 @@ void __init *early_ioremap(unsigned long +@@ -757,17 +856,13 @@ void __init *early_ioremap(unsigned long return NULL; } @@ -21418,7 +21433,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * Mappings have to fit in the FIX_BTMAP area. -@@ -800,10 +895,10 @@ void __init *early_ioremap(unsigned long +@@ -781,10 +876,10 @@ void __init *early_ioremap(unsigned long /* * Ok, go for it.. */ @@ -21431,7 +21446,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches phys_addr += PAGE_SIZE; --idx; --nrpages; -@@ -811,24 +906,55 @@ void __init *early_ioremap(unsigned long +@@ -792,24 +887,55 @@ void __init *early_ioremap(unsigned long if (early_ioremap_debug) printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); @@ -21493,7 +21508,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches dump_stack(); } -@@ -840,12 +966,13 @@ void __init early_iounmap(void *addr, un +@@ -821,12 +947,13 @@ void __init early_iounmap(void *addr, un offset = virt_addr & ~PAGE_MASK; nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; @@ -21508,8 +21523,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } void __this_fixmap_does_not_exist(void) ---- head-2010-04-29.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pageattr-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -25,15 +25,27 @@ * The current flushing context - we pass it instead of 5 arguments: */ @@ -22278,8 +22293,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #ifdef CONFIG_HIBERNATION bool kernel_page_present(struct page *page) ---- head-2010-04-29.orig/arch/x86/mm/pat-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pat-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pat-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -7,24 +7,24 @@ * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. */ @@ -22579,8 +22594,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches -#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ ---- head-2010-04-29.orig/arch/x86/mm/pgtable-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pgtable-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -129,7 +129,7 @@ void __pud_free_tlb(struct mmu_gather *t static void _pin_lock(struct mm_struct *mm, int lock) { if (lock) @@ -22611,8 +22626,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches { unsigned long flags; /* can be called from interrupt context */ ---- head-2010-04-29.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -122,7 +122,6 @@ void __init reserve_top_address(unsigned printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", (int)-reserve); @@ -22631,8 +22646,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches return 0; } early_param("vmalloc", parse_vmalloc); ---- head-2010-04-29.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/pci/irq-xen.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-02-01 14:39:24.000000000 +0100 @@ -499,7 +499,7 @@ static int pirq_amd756_get(struct pci_de if (pirq <= 4) irq = read_config_nybble(router, 0x56, pirq - 1); @@ -22765,9 +22780,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } } #endif ---- head-2010-04-29.orig/arch/x86/xen/Kconfig 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/xen/Kconfig 2010-03-24 15:14:47.000000000 +0100 -@@ -31,7 +31,7 @@ config XEN_SAVE_RESTORE +--- head-2011-03-17.orig/arch/x86/xen/Kconfig 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/xen/Kconfig 2011-02-01 14:39:24.000000000 +0100 +@@ -43,7 +43,7 @@ config XEN_SAVE_RESTORE config XEN_DEBUG_FS bool "Enable Xen debug and tuning parameters in debugfs" @@ -22776,8 +22791,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches default n help Enable statistics output and various tuning options in debugfs. ---- head-2010-04-29.orig/drivers/acpi/acpica/hwsleep.c 2010-03-24 15:02:17.000000000 +0100 -+++ head-2010-04-29/drivers/acpi/acpica/hwsleep.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/drivers/acpi/acpica/hwsleep.c 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/acpica/hwsleep.c 2011-02-01 14:39:24.000000000 +0100 @@ -396,8 +396,7 @@ acpi_status asmlinkage acpi_enter_sleep_ err = acpi_notify_hypervisor_state(sleep_state, PM1Acontrol, PM1Bcontrol); @@ -22788,8 +22803,45 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches return_ACPI_STATUS(AE_ERROR); } #endif ---- head-2010-04-29.orig/drivers/acpi/processor_extcntl.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-04-29/drivers/acpi/processor_extcntl.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/drivers/acpi/processor_core.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_core.c 2011-02-01 14:39:24.000000000 +0100 +@@ -165,13 +165,20 @@ exit: + + int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) + { +- int i; ++ int i = 0; + int apic_id = -1; + ++ if (type < 0) { ++ if (!processor_cntl_external()) ++ return -1; ++ type = ~type; ++ i = 1; ++ } ++ + apic_id = map_mat_entry(handle, type, acpi_id); + if (apic_id == -1) + apic_id = map_madt_entry(type, acpi_id); +- if (apic_id == -1) ++ if (apic_id == -1 || i) + return apic_id; + + #ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL +--- head-2011-03-17.orig/drivers/acpi/processor_driver.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_driver.c 2011-02-01 14:39:24.000000000 +0100 +@@ -326,7 +326,8 @@ static int acpi_processor_get_info(struc + if (pr->id == -1) { + if (ACPI_FAILURE + (acpi_processor_hotadd_init(pr->handle, &pr->id)) && +- !processor_cntl_external()) { ++ get_cpu_id(pr->handle, ~device_declaration, ++ pr->acpi_id) < 0) { + return -ENODEV; + } + } +--- head-2011-03-17.orig/drivers/acpi/processor_extcntl.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_extcntl.c 2011-02-01 14:39:24.000000000 +0100 @@ -30,7 +30,6 @@ #include @@ -22798,9 +22850,29 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_extcntl") ---- head-2010-04-29.orig/drivers/firmware/dmi_scan.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/drivers/firmware/dmi_scan.c 2010-04-15 10:05:59.000000000 +0200 -@@ -420,6 +420,11 @@ static bool dmi_matches(const struct dmi +--- head-2011-03-17.orig/drivers/char/agp/generic.c 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/generic.c 2011-02-01 14:39:24.000000000 +0100 +@@ -1217,7 +1217,7 @@ int agp_generic_alloc_pages(struct agp_b + } + + #ifdef CONFIG_X86 +- set_pages_array_uc(mem->pages, num_pages); ++ map_pages_into_agp(mem->pages, num_pages); + #endif + ret = 0; + out: +@@ -1250,7 +1250,7 @@ void agp_generic_destroy_pages(struct ag + return; + + #ifdef CONFIG_X86 +- set_pages_array_wb(mem->pages, mem->page_count); ++ unmap_pages_from_agp(mem->pages, mem->page_count); + #endif + + for (i = 0; i < mem->page_count; i++) { +--- head-2011-03-17.orig/drivers/firmware/dmi_scan.c 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/drivers/firmware/dmi_scan.c 2011-02-17 10:11:37.000000000 +0100 +@@ -482,6 +482,11 @@ static bool dmi_matches(const struct dmi { int i; @@ -22812,9 +22884,20 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n"); for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) { ---- head-2010-04-29.orig/drivers/pci/msi-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/pci/msi-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -305,8 +305,16 @@ static int msi_map_vector(struct pci_dev +--- head-2011-03-17.orig/drivers/idle/Kconfig 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/drivers/idle/Kconfig 2011-02-01 14:39:24.000000000 +0100 +@@ -10,7 +10,7 @@ config INTEL_IDLE + processors intel_idle does not support. + + menu "Memory power savings" +-depends on X86_64 ++depends on X86_64 && !XEN + + config I7300_IDLE_IOAT_CHANNEL + bool +--- head-2011-03-17.orig/drivers/pci/msi-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/pci/msi-xen.c 2011-02-01 14:39:24.000000000 +0100 +@@ -266,8 +266,16 @@ static int msi_map_vector(struct pci_dev * dev->irq in dom0 will be 'Xen pirq' if this device belongs to * to another domain, and will be 'Linux irq' if it belongs to dom0. */ @@ -22833,7 +22916,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches } static void pci_intx_for_msi(struct pci_dev *dev, int enable) -@@ -761,3 +769,24 @@ void pci_msi_init_pci_dev(struct pci_dev +@@ -722,3 +730,24 @@ void pci_msi_init_pci_dev(struct pci_dev INIT_LIST_HEAD(&dev->msi_list); #endif } @@ -22858,9 +22941,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + return; + msi_acpi_init(); +} ---- head-2010-04-29.orig/drivers/pci/probe.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/drivers/pci/probe.c 2010-04-29 09:52:00.000000000 +0200 -@@ -1212,6 +1212,11 @@ static void pci_init_capabilities(struct +--- head-2011-03-17.orig/drivers/pci/probe.c 2011-03-17 14:35:46.000000000 +0100 ++++ head-2011-03-17/drivers/pci/probe.c 2011-02-01 14:39:24.000000000 +0100 +@@ -1214,6 +1214,11 @@ static void pci_init_capabilities(struct /* Vital Product Data */ pci_vpd_pci22_init(dev); @@ -22872,24 +22955,44 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* Alternative Routing-ID Forwarding */ pci_enable_ari(dev); ---- head-2010-04-29.orig/drivers/xen/Makefile 2010-04-19 14:51:09.000000000 +0200 -+++ head-2010-04-29/drivers/xen/Makefile 2010-04-19 14:52:08.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-02 15:36:33.000000000 +0100 +@@ -344,9 +344,6 @@ config XEN_SMPBOOT + def_bool y + depends on SMP && !PPC_XEN + +-config XEN_XENCOMM +- bool +- + config XEN_DEVMEM + def_bool y + +@@ -452,4 +449,7 @@ config SWIOTLB_XEN + depends on PCI + select SWIOTLB + ++config XEN_XENCOMM ++ bool ++ + endmenu +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-02-01 14:39:24.000000000 +0100 @@ -1,4 +1,5 @@ obj-$(CONFIG_PARAVIRT_XEN) += grant-table.o features.o events.o manage.o +xen-hotplug-$(CONFIG_PARAVIRT_XEN) := cpu_hotplug.o xen-balloon-$(CONFIG_PARAVIRT_XEN) := balloon.o xen-balloon-$(CONFIG_XEN) := balloon/ -@@ -9,6 +10,7 @@ obj-y += xenbus/ - obj-$(CONFIG_XEN) += char/ +@@ -11,6 +12,7 @@ obj-$(CONFIG_XEN) += char/ + xen-backend-$(CONFIG_XEN_BACKEND) := util.o - obj-$(CONFIG_XEN) += features.o util.o -+obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) - obj-$(CONFIG_XEN_XENCOMM) += xencomm.o - obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) + obj-$(CONFIG_XEN) += features.o $(xen-backend-y) $(xen-backend-m) ++obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) + obj-$(CONFIG_XEN_XENCOMM) += xencomm.o + obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ ---- head-2010-04-29.orig/drivers/xen/blkback/vbd.c 2010-03-22 12:00:53.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkback/vbd.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkback/vbd.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/vbd.c 2011-02-01 14:39:24.000000000 +0100 @@ -95,7 +95,8 @@ int vbd_create(blkif_t *blkif, blkif_vde void vbd_free(struct vbd *vbd) { @@ -22900,17 +23003,17 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches vbd->bdev = NULL; } ---- head-2010-04-29.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkfront/blkfront.c 2010-03-24 15:14:47.000000000 +0100 -@@ -343,6 +343,7 @@ static void connect(struct blkfront_info - printk(KERN_INFO "Setting capacity to %Lu\n", - sectors); +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-02-01 14:39:24.000000000 +0100 +@@ -342,6 +342,7 @@ static void connect(struct blkfront_info + return; + pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); + revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: -@@ -501,9 +502,15 @@ static void blkif_restart_queue_callback +@@ -500,9 +501,15 @@ static void blkif_restart_queue_callback schedule_work(&info->work); } @@ -22927,7 +23030,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches if (!info->xbdev) return -ENODEV; -@@ -512,9 +519,16 @@ int blkif_open(struct inode *inode, stru +@@ -511,9 +518,16 @@ int blkif_open(struct inode *inode, stru } @@ -22945,7 +23048,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches info->users--; if (info->users == 0) { /* Check whether we have been instructed to close. We will -@@ -533,9 +547,16 @@ int blkif_release(struct inode *inode, s +@@ -532,10 +546,17 @@ int blkif_release(struct inode *inode, s } @@ -22953,12 +23056,14 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { +- struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; + struct block_device *bd = inode->i_bdev; +#else +int blkif_ioctl(struct block_device *bd, fmode_t mode, + unsigned command, unsigned long argument) +{ +#endif ++ struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", @@ -22970,18 +23075,24 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches struct hd_geometry geo; int ret; -@@ -571,8 +591,7 @@ int blkif_ioctl(struct inode *inode, str - return 0; - - case CDROM_GET_CAPABILITY: { -- struct blkfront_info *info = -- inode->i_bdev->bd_disk->private_data; -+ struct blkfront_info *info = bd->bd_disk->private_data; - struct gendisk *gd = info->gd; - if (gd->flags & GENHD_FL_CD) - return 0; ---- head-2010-04-29.orig/drivers/xen/blkfront/block.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkfront/block.h 2010-03-24 15:14:47.000000000 +0100 +@@ -586,10 +606,14 @@ int blkif_ioctl(struct inode *inode, str + #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) + return scsi_cmd_ioctl(filep, info->gd, command, + (void __user *)argument); +-#else ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) + return scsi_cmd_ioctl(filep, info->rq, + info->gd, command, + (void __user *)argument); ++#else ++ return scsi_cmd_ioctl(info->rq, info->gd, ++ mode, command, ++ (void __user *)argument); + #endif + } + } +--- head-2011-03-17.orig/drivers/xen/blkfront/block.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/block.h 2011-02-01 14:39:24.000000000 +0100 @@ -123,10 +123,17 @@ struct blkfront_info extern spinlock_t blkif_io_lock; @@ -23000,22 +23111,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); ---- head-2010-04-29.orig/drivers/xen/blkfront/vbd.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkfront/vbd.c 2010-03-24 15:14:47.000000000 +0100 -@@ -110,7 +110,11 @@ static struct block_device_operations xl - .owner = THIS_MODULE, - .open = blkif_open, - .release = blkif_release, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) - .ioctl = blkif_ioctl, -+#else -+ .locked_ioctl = blkif_ioctl, -+#endif - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) - .getgeo = blkif_getgeo - #endif ---- head-2010-04-29.orig/drivers/xen/blktap2/device.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blktap2/device.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-02-07 14:14:26.000000000 +0100 @@ -36,10 +36,10 @@ dev_to_blktap(struct blktap_device *dev) } @@ -23063,9 +23160,27 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches struct hd_geometry geo; int ret; ---- head-2010-04-29.orig/drivers/xen/core/evtchn.c 2010-04-23 15:17:15.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/evtchn.c 2010-04-23 15:18:24.000000000 +0200 -@@ -145,7 +145,7 @@ static void bind_evtchn_to_cpu(unsigned +@@ -762,7 +761,7 @@ blktap_device_close_bdev(struct blktap * + dev = &tap->device; + + if (dev->bdev) +- blkdev_put(dev->bdev); ++ blkdev_put(dev->bdev, FMODE_WRITE); + + dev->bdev = NULL; + clear_bit(BLKTAP_PASSTHROUGH, &tap->dev_inuse); +@@ -786,7 +785,7 @@ blktap_device_open_bdev(struct blktap *t + if (!bdev->bd_disk) { + BTERR("device %x:%x doesn't exist\n", + MAJOR(pdev), MINOR(pdev)); +- blkdev_put(dev->bdev); ++ blkdev_put(bdev, FMODE_WRITE); + return -ENOENT; + } + +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-01 14:39:24.000000000 +0100 +@@ -149,7 +149,7 @@ static void bind_evtchn_to_cpu(unsigned BUG_ON(!test_bit(chn, s->evtchn_mask)); if (irq != -1) @@ -23074,7 +23189,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]); set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]); -@@ -158,7 +158,7 @@ static void init_evtchn_cpu_bindings(voi +@@ -162,7 +162,7 @@ static void init_evtchn_cpu_bindings(voi /* By default all event channels notify CPU#0. */ for (i = 0; i < NR_IRQS; i++) @@ -23082,17 +23197,17 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches + irq_to_desc(i)->affinity = cpumask_of_cpu(0); memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); - memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); -@@ -728,7 +728,7 @@ static void ack_dynirq(unsigned int irq) + for_each_possible_cpu(i) +@@ -747,7 +747,7 @@ static void ack_dynirq(unsigned int irq) static void end_dynirq(unsigned int irq) { -- if (!(irq_desc[irq].status & IRQ_DISABLED)) -+ if (!(irq_to_desc(irq)->status & IRQ_DISABLED)) +- if (!(irq_desc[irq].status & IRQ_DISABLED)) { ++ if (!(irq_to_desc(irq)->status & IRQ_DISABLED)) { + move_masked_irq(irq); unmask_dynirq(irq); - } - -@@ -821,7 +821,7 @@ static void enable_pirq(unsigned int irq + } +@@ -841,7 +841,7 @@ static void enable_pirq(unsigned int irq bind_pirq.pirq = evtchn_get_xen_pirq(irq); /* NB. We are happy to share unless we are probing. */ bind_pirq.flags = test_and_clear_bit(irq - PIRQ_BASE, probing_pirq) @@ -23101,16 +23216,23 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches ? 0 : BIND_PIRQ__WILL_SHARE; if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) { if (bind_pirq.flags) -@@ -881,7 +881,7 @@ static void unmask_pirq(unsigned int irq +@@ -900,11 +900,13 @@ static void unmask_pirq(unsigned int irq static void end_pirq(unsigned int irq) { - if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) == -+ if ((irq_to_desc(irq)->status & (IRQ_DISABLED|IRQ_PENDING)) == ++ const struct irq_desc *desc = irq_to_desc(irq); ++ ++ if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == (IRQ_DISABLED|IRQ_PENDING)) shutdown_pirq(irq); - else -@@ -1067,7 +1067,7 @@ static void restore_cpu_ipis(unsigned in + else { +- if (!(irq_desc[irq].status & IRQ_DISABLED)) ++ if (!(desc->status & IRQ_DISABLED)) + move_masked_irq(irq); + unmask_pirq(irq); + } +@@ -1051,7 +1053,7 @@ static void restore_cpu_ipis(unsigned in bind_evtchn_to_cpu(evtchn, cpu); /* Ready for use. */ @@ -23119,17 +23241,17 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches unmask_evtchn(evtchn); } } -@@ -1203,7 +1203,7 @@ void __init xen_init_IRQ(void) +@@ -1187,7 +1189,7 @@ void __init xen_init_IRQ(void) for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) { irq_bindcount[i] = 0; - irq_desc[i].status |= IRQ_NOPROBE; + irq_to_desc(i)->status |= IRQ_NOPROBE; set_irq_chip_and_handler_name(i, &dynirq_chip, - handle_level_irq, "level"); + handle_fasteoi_irq, "fasteoi"); } ---- head-2010-04-29.orig/drivers/xen/core/smpboot.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/smpboot.c 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-02-01 14:39:24.000000000 +0100 @@ -25,10 +25,6 @@ #include #include @@ -23141,7 +23263,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches extern int local_setup_timer(unsigned int cpu); extern void local_teardown_timer(unsigned int cpu); -@@ -183,7 +179,7 @@ static void __cpuexit xen_smp_intr_exit( +@@ -147,7 +143,7 @@ static void __cpuinit xen_smp_intr_exit( } #endif @@ -23150,7 +23272,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches { cpu_init(); identify_secondary_cpu(¤t_cpu_data); -@@ -436,6 +432,20 @@ int __cpuinit __cpu_up(unsigned int cpu) +@@ -381,6 +377,20 @@ int __cpuinit __cpu_up(unsigned int cpu) return 0; } @@ -23171,31 +23293,20 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches void __init smp_cpus_done(unsigned int max_cpus) { } ---- head-2010-04-29.orig/drivers/xen/core/spinlock.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/spinlock.c 2010-03-24 15:14:47.000000000 +0100 -@@ -14,8 +14,6 @@ - - #ifdef TICKET_SHIFT - --extern irqreturn_t smp_reschedule_interrupt(int, void *); -- - static DEFINE_PER_CPU(int, spinlock_irq) = -1; - static char spinlock_name[NR_CPUS][15]; - ---- head-2010-04-29.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netfront/netfront.c 2010-03-24 15:14:47.000000000 +0100 -@@ -956,7 +956,7 @@ static int network_start_xmit(struct sk_ +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-01 14:39:24.000000000 +0100 +@@ -952,7 +952,7 @@ static int network_start_xmit(struct sk_ return 0; } - frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; + frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { - printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", - frags); ---- head-2010-04-29.orig/drivers/xen/scsifront/scsifront.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-04-29/drivers/xen/scsifront/scsifront.c 2010-03-24 15:14:47.000000000 +0100 -@@ -348,7 +348,7 @@ static int scsifront_queuecommand(struct + pr_alert("xennet: skb rides the rocket: %d frags\n", frags); + dump_stack(); +--- head-2011-03-17.orig/drivers/xen/scsifront/scsifront.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsifront/scsifront.c 2011-02-08 10:04:41.000000000 +0100 +@@ -352,7 +352,7 @@ static int scsifront_queuecommand(struct memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; @@ -23204,7 +23315,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; -@@ -418,7 +418,7 @@ static int scsifront_dev_reset_handler(s +@@ -421,7 +421,7 @@ static int scsifront_dev_reset_handler(s memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; @@ -23213,8 +23324,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches ring_req->nr_segments = 0; scsifront_do_request(info); ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_probe.h 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.h 2011-02-01 14:39:24.000000000 +0100 @@ -40,6 +40,11 @@ #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif @@ -23227,8 +23338,8 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); ---- head-2010-04-29.orig/include/xen/cpu_hotplug.h 2007-08-16 18:07:01.000000000 +0200 -+++ head-2010-04-29/include/xen/cpu_hotplug.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/include/xen/cpu_hotplug.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/include/xen/cpu_hotplug.h 2011-02-01 14:39:24.000000000 +0100 @@ -15,8 +15,6 @@ void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); @@ -23238,9 +23349,9 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) ---- head-2010-04-29.orig/lib/swiotlb-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/lib/swiotlb-xen.c 2010-03-24 15:14:47.000000000 +0100 -@@ -49,7 +49,6 @@ int swiotlb; +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 14:39:24.000000000 +0100 +@@ -57,7 +57,6 @@ enum dma_sync_target { int swiotlb_force; @@ -23248,7 +23359,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches static unsigned long iotlb_nslabs; /* -@@ -57,16 +56,7 @@ static unsigned long iotlb_nslabs; +@@ -65,16 +64,7 @@ static unsigned long iotlb_nslabs; * swiotlb_sync_single_*, to see if the memory was in fact allocated by this * API. */ @@ -23266,7 +23377,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * When the IOMMU overflows we return a fallback buffer. This sets the size. -@@ -151,15 +141,15 @@ swiotlb_init_with_default_size(size_t de +@@ -159,15 +149,15 @@ swiotlb_init_with_default_size(size_t de /* * Get IO TLB memory from the low pages */ @@ -23285,7 +23396,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT), dma_bits); } while (rc && dma_bits++ < max_dma_bits); -@@ -170,10 +160,10 @@ swiotlb_init_with_default_size(size_t de +@@ -178,10 +168,10 @@ swiotlb_init_with_default_size(size_t de "some DMA memory (e.g., dom0_mem=-128M).\n"); iotlb_nslabs = i; i <<= IO_TLB_SHIFT; @@ -23298,7 +23409,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches if (bits > dma_bits) dma_bits = bits; -@@ -181,6 +171,7 @@ swiotlb_init_with_default_size(size_t de +@@ -189,6 +179,7 @@ swiotlb_init_with_default_size(size_t de break; } } @@ -23306,7 +23417,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * Allocate and initialize the free list array. This array is used -@@ -209,15 +200,12 @@ swiotlb_init_with_default_size(size_t de +@@ -217,15 +208,12 @@ swiotlb_init_with_default_size(size_t de if (rc) panic("No suitable physical memory available for SWIOTLB overflow buffer!\n"); @@ -23323,7 +23434,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches dma_bits); } -@@ -245,6 +233,18 @@ swiotlb_init(void) +@@ -253,6 +241,18 @@ swiotlb_init(void) printk(KERN_INFO "Software IO TLB disabled\n"); } @@ -23342,7 +23453,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * We use __copy_to_user_inatomic to transfer to the host buffer because the * buffer may be mapped read-only (e.g, in blkback driver) but lower-level -@@ -354,7 +354,7 @@ map_single(struct device *hwdev, struct +@@ -362,7 +362,7 @@ map_single(struct device *hwdev, struct io_tlb_list[i] = 0; for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; @@ -23351,7 +23462,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches /* * Update the indices to avoid searching in the next -@@ -396,7 +396,7 @@ found: +@@ -404,7 +404,7 @@ found: static struct phys_addr dma_addr_to_phys_addr(char *dma_addr) { @@ -23360,7 +23471,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches struct phys_addr buffer = io_tlb_orig_addr[index]; buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1); buffer.page += buffer.offset >> PAGE_SHIFT; -@@ -412,7 +412,7 @@ unmap_single(struct device *hwdev, char +@@ -420,7 +420,7 @@ unmap_single(struct device *hwdev, char { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; @@ -23369,7 +23480,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr); /* -@@ -504,7 +504,7 @@ _swiotlb_map_single(struct device *hwdev +@@ -527,7 +527,7 @@ _swiotlb_map_single(struct device *hwdev * buffering it. */ if (!range_straddles_page_boundary(paddr, size) && @@ -23378,7 +23489,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches return dev_addr; /* -@@ -555,9 +555,11 @@ void +@@ -578,9 +578,11 @@ void swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, struct dma_attrs *attrs) { @@ -23392,60 +23503,36 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches else gnttab_dma_unmap_page(dev_addr); } -@@ -583,36 +585,44 @@ void - swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) - { -+ char *dma_addr = bus_to_virt(dev_addr); -+ - BUG_ON(dir == DMA_NONE); -- if (in_swiotlb_aperture(dev_addr)) -- sync_single(hwdev, bus_to_virt(dev_addr), size, dir); -+ if (is_swiotlb_buffer(dev_addr)) -+ sync_single(hwdev, dma_addr, size, dir); - } - - void - swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) +@@ -606,9 +608,11 @@ static void + swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, int target) { + char *dma_addr = bus_to_virt(dev_addr); + BUG_ON(dir == DMA_NONE); - if (in_swiotlb_aperture(dev_addr)) -- sync_single(hwdev, bus_to_virt(dev_addr), size, dir); +- sync_single(hwdev, bus_to_virt(dev_addr), size, dir, target); + if (is_swiotlb_buffer(dev_addr)) -+ sync_single(hwdev, dma_addr, size, dir); ++ sync_single(hwdev, dma_addr, size, dir, target); } void - swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) +@@ -633,10 +637,11 @@ swiotlb_sync_single_range(struct device + unsigned long offset, size_t size, + int dir, int target) { + char *dma_addr = bus_to_virt(dev_addr); + BUG_ON(dir == DMA_NONE); - if (in_swiotlb_aperture(dev_addr)) -- sync_single(hwdev, bus_to_virt(dev_addr + offset), size, dir); +- sync_single(hwdev, bus_to_virt(dev_addr + offset), size, +- dir, target); + if (is_swiotlb_buffer(dev_addr)) -+ sync_single(hwdev, dma_addr + offset, size, dir); ++ sync_single(hwdev, dma_addr + offset, size, dir, target); } void - swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) - { -+ char *dma_addr = bus_to_virt(dev_addr); -+ - BUG_ON(dir == DMA_NONE); -- if (in_swiotlb_aperture(dev_addr)) -- sync_single(hwdev, bus_to_virt(dev_addr + offset), size, dir); -+ if (is_swiotlb_buffer(dev_addr)) -+ sync_single(hwdev, dma_addr + offset, size, dir); - } - - void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, -@@ -650,7 +660,7 @@ swiotlb_map_sg_attrs(struct device *hwde +@@ -690,7 +695,7 @@ swiotlb_map_sg_attrs(struct device *hwde if (range_straddles_page_boundary(page_to_pseudophys(sg_page(sg)) + sg->offset, sg->length) @@ -23454,7 +23541,7 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches gnttab_dma_unmap_page(dev_addr); buffer.page = sg_page(sg); buffer.offset = sg->offset; -@@ -694,7 +704,7 @@ swiotlb_unmap_sg_attrs(struct device *hw +@@ -734,7 +739,7 @@ swiotlb_unmap_sg_attrs(struct device *hw BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { @@ -23463,27 +23550,18 @@ Automatically created from "patches.kernel.org/patch-2.6.28" by xen-port-patches unmap_single(hwdev, bus_to_virt(sg->dma_address), sg->dma_length, dir); else -@@ -727,7 +737,7 @@ swiotlb_sync_sg_for_cpu(struct device *h - BUG_ON(dir == DMA_NONE); - - for_each_sg(sgl, sg, nelems, i) { -- if (in_swiotlb_aperture(sg->dma_address)) -+ if (sg->dma_address != sg_phys(sg)) - sync_single(hwdev, bus_to_virt(sg->dma_address), - sg->dma_length, dir); - } -@@ -743,7 +753,7 @@ swiotlb_sync_sg_for_device(struct device +@@ -767,7 +772,7 @@ swiotlb_sync_sg(struct device *hwdev, st BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (in_swiotlb_aperture(sg->dma_address)) + if (sg->dma_address != sg_phys(sg)) sync_single(hwdev, bus_to_virt(sg->dma_address), - sg->dma_length, dir); + sg->dma_length, dir, target); } ---- head-2010-04-29.orig/mm/vmalloc.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-04-29/mm/vmalloc.c 2010-03-24 15:14:47.000000000 +0100 -@@ -479,6 +479,8 @@ static void vmap_debug_free_range(unsign +--- head-2011-03-17.orig/mm/vmalloc.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/mm/vmalloc.c 2011-02-01 14:39:24.000000000 +0100 +@@ -478,6 +478,8 @@ static void vmap_debug_free_range(unsign #ifdef CONFIG_DEBUG_PAGEALLOC vunmap_page_range(start, end); flush_tlb_kernel_range(start, end); diff --git a/patches.xen/xen3-patch-2.6.29 b/patches.xen/xen3-patch-2.6.29 index eafca42..3389e0b 100644 --- a/patches.xen/xen3-patch-2.6.29 +++ b/patches.xen/xen3-patch-2.6.29 @@ -7,9 +7,9 @@ Patch-mainline: 2.6.29 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches.py ---- head-2010-04-29.orig/arch/x86/Kconfig 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/Kconfig 2010-03-24 15:17:58.000000000 +0100 -@@ -331,7 +331,6 @@ config X86_XEN +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-01 14:42:26.000000000 +0100 +@@ -311,7 +311,6 @@ config X86_XEN select X86_PAE select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST @@ -17,7 +17,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches help Choose this option if you plan to run this kernel on top of the Xen Hypervisor. -@@ -369,7 +368,6 @@ config X86_64_XEN +@@ -349,7 +348,6 @@ config X86_64_XEN bool "Enable Xen compatible kernel" depends on X86_64 select XEN @@ -25,7 +25,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches help This option will compile a kernel compatible with Xen hypervisor -@@ -819,7 +817,7 @@ config AMD_IOMMU_STATS +@@ -747,7 +745,7 @@ config AMD_IOMMU_STATS # need this always selected by IOMMU for the VIA workaround config SWIOTLB @@ -34,18 +34,18 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches ---help--- Support for software bounce buffers used on x86-64 systems which don't have a hardware IOMMU (e.g. the current generation -@@ -925,7 +923,7 @@ config X86_XEN_GENAPIC +@@ -862,7 +860,7 @@ config X86_XEN_GENAPIC + config X86_REROUTE_FOR_BROKEN_BOOT_IRQS bool "Reroute for broken boot IRQs" - default n - depends on X86_IO_APIC + depends on X86_IO_APIC && !XEN ---help--- This option enables a workaround that fixes a source of spurious interrupts. This is recommended when threaded ---- head-2010-04-29.orig/arch/x86/Makefile 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/Makefile 2010-03-24 15:17:58.000000000 +0100 -@@ -156,8 +156,8 @@ BOOT_TARGETS = bzlilo bzdisk fdimage fdi +--- head-2011-03-17.orig/arch/x86/Makefile 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/Makefile 2011-02-01 14:42:26.000000000 +0100 +@@ -158,8 +158,8 @@ BOOT_TARGETS = bzlilo bzdisk fdimage fdi PHONY += bzImage vmlinuz $(BOOT_TARGETS) ifdef CONFIG_XEN @@ -56,8 +56,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches ifdef CONFIG_X86_64 LDFLAGS_vmlinux := -e startup_64 ---- head-2010-04-29.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:42:26.000000000 +0100 @@ -363,9 +363,9 @@ ENTRY(ia32_syscall) orl $TS_COMPAT,TI_status(%r10) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) @@ -81,9 +81,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches END(ia32_syscall) ia32_badsys: ---- head-2010-04-29.orig/arch/x86/include/asm/hw_irq.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/asm/hw_irq.h 2010-03-24 15:17:58.000000000 +0100 -@@ -136,7 +136,9 @@ extern irqreturn_t smp_call_function_sin +--- head-2011-03-17.orig/arch/x86/include/asm/hw_irq.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/hw_irq.h 2011-02-01 14:42:26.000000000 +0100 +@@ -145,7 +145,9 @@ extern irqreturn_t smp_call_function_sin #endif #endif @@ -93,18 +93,18 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); ---- head-2010-04-29.orig/arch/x86/include/asm/hypervisor.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/include/asm/hypervisor.h 2010-03-24 15:17:58.000000000 +0100 -@@ -24,3 +24,7 @@ extern void init_hypervisor(struct cpuin - extern void init_hypervisor_platform(void); +--- head-2011-03-17.orig/arch/x86/include/asm/hypervisor.h 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/hypervisor.h 2011-02-01 14:42:26.000000000 +0100 +@@ -60,3 +60,7 @@ static inline bool hypervisor_x2apic_ava + } #endif + +#ifdef HAVE_XEN_PLATFORM_COMPAT_H +#include_next +#endif ---- head-2010-04-29.orig/arch/x86/include/asm/kexec.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/asm/kexec.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/kexec.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/kexec.h 2011-02-01 14:42:26.000000000 +0100 @@ -12,13 +12,10 @@ /* * The hypervisor interface implicitly requires that all entries (except @@ -122,8 +122,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches # endif /* CONFIG_XEN */ #else # define PA_CONTROL_PAGE 0 ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:42:26.000000000 +0100 @@ -342,16 +342,14 @@ static inline void set_intr_gate(unsigne _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); } @@ -145,8 +145,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (first_system_vector > vector) first_system_vector = vector; } else ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-02-01 14:42:26.000000000 +0100 @@ -16,7 +16,6 @@ #include #include @@ -167,8 +167,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_PARAVIRT FIX_PARAVIRT_BOOTMAP, #else ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:05:09.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:05:16.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:42:26.000000000 +0100 @@ -79,6 +79,7 @@ static inline void clear_user_highpage(s clear_highpage(page); } @@ -177,8 +177,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #define __HAVE_ARCH_CLEAR_USER_HIGHPAGE void copy_highpage(struct page *to, struct page *from); ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 14:42:26.000000000 +0100 @@ -69,6 +69,8 @@ extern start_info_t *xen_start_info; #define is_initial_xendomain() 0 #endif @@ -197,8 +197,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t, unsigned long flags); ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:42:26.000000000 +0100 @@ -4,6 +4,7 @@ #define ARCH_HAS_IOREMAP_WC @@ -265,8 +265,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches extern void early_ioremap_reset(void); extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); extern void __iomem *early_memremap(unsigned long offset, unsigned long size); ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:31:50.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:32:00.000000000 +0100 @@ -24,6 +24,8 @@ #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) @@ -288,8 +288,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches # else # define NR_PIRQS (NR_VECTORS + 32 * MAX_IO_APICS) # endif ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-02-01 14:42:26.000000000 +0100 @@ -3,10 +3,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) @@ -331,8 +331,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:42:26.000000000 +0100 @@ -22,6 +22,8 @@ struct pci_sysdata { }; @@ -376,8 +376,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif #endif /* _ASM_X86_PCI_H */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-07 15:41:11.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:42:26.000000000 +0100 @@ -22,12 +22,10 @@ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ @@ -414,7 +414,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. -@@ -309,41 +318,43 @@ static inline pte_t pte_mkspecial(pte_t +@@ -309,41 +318,42 @@ static inline pte_t pte_mkspecial(pte_t extern pteval_t __supported_pte_mask; @@ -437,7 +437,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + return protval; } --static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) +-static inline pte_t pfn_pte_ma(phys_addr_t page_nr, pgprot_t pgprot) +static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { - pgprotval_t prot = pgprot_val(pgprot); @@ -447,11 +447,10 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - if (prot & _PAGE_PRESENT) - prot &= __supported_pte_mask; -- return __pte_ma(((phys_addr_t)page_nr << PAGE_SHIFT) | prot); -+static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) +- return __pte_ma((page_nr << PAGE_SHIFT) | prot); ++static inline pte_t pfn_pte_ma(phys_addr_t page_nr, pgprot_t pgprot) +{ -+ return __pte_ma(((phys_addr_t)page_nr << PAGE_SHIFT) | -+ massage_pgprot(pgprot)); ++ return __pte_ma((page_nr << PAGE_SHIFT) | massage_pgprot(pgprot)); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) @@ -477,7 +476,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return __pte(val); } -@@ -359,11 +370,33 @@ static inline pgprot_t pgprot_modify(pgp +@@ -359,11 +369,33 @@ static inline pgprot_t pgprot_modify(pgp #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) @@ -514,8 +513,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:42:26.000000000 +0100 @@ -151,6 +151,7 @@ static inline int pte_none(pte_t pte) #define PTE_FILE_MAX_BITS 32 @@ -524,8 +523,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:42:26.000000000 +0100 @@ -107,15 +107,6 @@ extern unsigned long pg0[]; #endif @@ -542,8 +541,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:42:26.000000000 +0100 @@ -149,8 +149,8 @@ static inline void xen_pgd_clear(pgd_t * #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) @@ -577,9 +576,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ & ((1U << SWP_TYPE_BITS) - 1)) #define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:17:58.000000000 +0100 -@@ -111,6 +111,7 @@ struct cpuinfo_x86 { +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:45:14.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:45:38.000000000 +0100 +@@ -121,6 +121,7 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; #endif @@ -587,7 +586,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 -@@ -124,6 +125,10 @@ struct cpuinfo_x86 { +@@ -134,6 +135,10 @@ struct cpuinfo_x86 { #define X86_VENDOR_UNKNOWN 0xff @@ -598,7 +597,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * capabilities of CPUs */ -@@ -354,7 +359,7 @@ struct i387_soft_struct { +@@ -364,7 +369,7 @@ struct i387_soft_struct { u8 no_update; u8 rm; u8 alimit; @@ -607,7 +606,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches u32 entry_eip; }; -@@ -695,6 +700,19 @@ extern void switch_to_new_gdt(void); +@@ -705,6 +710,19 @@ extern void switch_to_new_gdt(void); extern void cpu_init(void); extern void init_gdt(int cpu); @@ -627,8 +626,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static inline void update_debugctlmsr(unsigned long debugctlmsr) { #ifndef CONFIG_X86_DEBUGCTLMSR ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:42:26.000000000 +0100 @@ -18,9 +18,26 @@ #include #include @@ -657,32 +656,32 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches extern void (*mtrr_hook)(void); extern void zap_low_mappings(void); -@@ -29,7 +46,6 @@ extern int __cpuinit get_local_pda(int c +@@ -28,7 +45,6 @@ extern void zap_low_mappings(void); + extern int __cpuinit get_local_pda(int cpu); - extern int smp_num_siblings; extern unsigned int num_processors; -extern cpumask_t cpu_initialized; + #ifndef CONFIG_XEN DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); - DECLARE_PER_CPU(cpumask_t, cpu_core_map); -@@ -38,6 +54,16 @@ DECLARE_PER_CPU(u16, cpu_llc_id); +@@ -39,6 +55,16 @@ DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(int, cpu_number); #endif -+static inline struct cpumask *cpu_sibling_mask(int cpu) ++static inline const struct cpumask *cpu_sibling_mask(int cpu) +{ -+ return &per_cpu(cpu_sibling_map, cpu); ++ return cpumask_of(cpu); +} + -+static inline struct cpumask *cpu_core_mask(int cpu) ++static inline const struct cpumask *cpu_core_mask(int cpu) +{ -+ return &per_cpu(cpu_core_map, cpu); ++ return cpumask_of(cpu); +} + - DECLARE_PER_CPU(u16, x86_cpu_to_apicid); - DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); - -@@ -64,7 +90,7 @@ struct smp_ops { + #ifndef CONFIG_XEN + DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); + DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); +@@ -67,7 +93,7 @@ struct smp_ops { void (*cpu_die)(unsigned int cpu); void (*play_dead)(void); @@ -691,7 +690,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches void (*send_call_func_single_ipi)(int cpu); }; -@@ -125,7 +151,7 @@ static inline void arch_send_call_functi +@@ -128,7 +154,7 @@ static inline void arch_send_call_functi static inline void arch_send_call_function_ipi(cpumask_t mask) { @@ -700,7 +699,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } void cpu_disable_common(void); -@@ -144,13 +170,13 @@ extern int __cpu_disable(void); +@@ -147,13 +173,13 @@ extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); void xen_smp_send_stop(void); void xen_smp_send_reschedule(int cpu); @@ -716,7 +715,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches void play_dead(void); -@@ -164,7 +190,7 @@ void smp_store_cpu_info(int id); +@@ -167,7 +193,7 @@ void smp_store_cpu_info(int id); /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { @@ -725,8 +724,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #else static inline void prefill_possible_map(void) ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:42:26.000000000 +0100 @@ -337,6 +337,7 @@ static inline int __raw_spin_is_contende { return __raw_spin(is_contended)(lock); @@ -735,8 +734,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) { ---- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:01:23.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:05:49.000000000 +0100 @@ -18,12 +18,12 @@ # define AT_VECTOR_SIZE_ARCH 1 #endif @@ -752,7 +751,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. -@@ -298,6 +298,8 @@ extern void free_init_pages(char *what, +@@ -300,6 +300,8 @@ extern void free_init_pages(char *what, void xen_idle(void); @@ -761,8 +760,19 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking ---- head-2010-04-29.orig/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/thread_info.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/thread_info.h 2011-02-01 14:42:26.000000000 +0100 +@@ -154,7 +154,7 @@ struct thread_info { + + #else + #define _TIF_WORK_CTXSW (_TIF_NOTSC \ +- /*todo | _TIF_DEBUGCTLMSR | _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS*/) ++ /*todo | _TIF_DEBUGCTLMSR | _TIF_DS_AREA_MSR */) + #endif + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -163,6 +163,8 @@ static int __init acpi_sleep_setup(char #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_nohwsig", 10) == 0) @@ -772,8 +782,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); ---- head-2010-04-29.orig/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/apic/apic-xen.c 2011-02-24 15:49:32.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/apic-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -32,7 +32,7 @@ static int __init apic_set_verbosity(cha else if (strcmp("verbose", arg) == 0) apic_verbosity = APIC_VERBOSE; @@ -783,20 +793,20 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches " use apic=verbose or apic=debug\n", arg); return -EINVAL; } ---- head-2010-04-29.orig/arch/x86/kernel/cpu/Makefile 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/arch/x86/kernel/cpu/Makefile 2010-03-24 15:17:58.000000000 +0100 -@@ -34,6 +34,8 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ +--- head-2011-03-17.orig/arch/x86/kernel/cpu/Makefile 2011-02-03 14:29:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/Makefile 2011-02-01 14:42:26.000000000 +0100 +@@ -34,7 +34,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o -+disabled-obj-$(CONFIG_XEN) := hypervisor.o vmware.o -+ +-disabled-obj-$(CONFIG_XEN) := perfctr-watchdog.o ++disabled-obj-$(CONFIG_XEN) := hypervisor.o perfctr-watchdog.o vmware.o + quiet_cmd_mkcapflags = MKCAP $@ cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ - ---- head-2010-04-29.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:17:58.000000000 +0100 -@@ -38,17 +38,41 @@ +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:40:32.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:41:35.000000000 +0100 +@@ -38,17 +38,45 @@ #include #include #include @@ -821,8 +831,10 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches +#endif +cpumask_var_t cpu_initialized_mask; + ++#ifndef CONFIG_XEN +/* representing cpus for which sibling maps can be computed */ +cpumask_var_t cpu_sibling_setup_mask; ++#endif + +#else /* CONFIG_X86_32 */ + @@ -831,7 +843,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches +cpumask_t cpu_callout_map; +#endif +cpumask_t cpu_initialized; ++#ifndef CONFIG_XEN +cpumask_t cpu_sibling_setup_map; ++#endif + +#endif /* CONFIG_X86_32 */ + @@ -839,7 +853,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static struct cpu_dev *this_cpu __cpuinitdata; #ifdef CONFIG_X86_64 -@@ -377,7 +401,7 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -380,7 +408,7 @@ void __cpuinit detect_ht(struct cpuinfo_ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); } else if (smp_num_siblings > 1) { @@ -848,7 +862,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches printk(KERN_WARNING "CPU: Unsupported number of siblings %d", smp_num_siblings); smp_num_siblings = 1; -@@ -728,6 +752,7 @@ static void __cpuinit identify_cpu(struc +@@ -735,6 +763,7 @@ static void __cpuinit identify_cpu(struc detect_ht(c); #endif @@ -856,7 +870,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are -@@ -879,8 +904,6 @@ static __init int setup_disablecpuid(cha +@@ -886,8 +915,6 @@ static __init int setup_disablecpuid(cha } __setup("clearcpuid=", setup_disablecpuid); @@ -865,7 +879,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_X86_64 struct x8664_pda **_cpu_pda __read_mostly; EXPORT_SYMBOL(_cpu_pda); -@@ -889,7 +912,7 @@ EXPORT_SYMBOL(_cpu_pda); +@@ -896,7 +923,7 @@ EXPORT_SYMBOL(_cpu_pda); struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; #endif @@ -874,7 +888,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static void __ref switch_pt(int cpu) { -@@ -949,8 +972,8 @@ void __cpuinit pda_init(int cpu) +@@ -956,8 +983,8 @@ void __cpuinit pda_init(int cpu) } #ifndef CONFIG_X86_NO_TSS @@ -885,7 +899,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif extern asmlinkage void ignore_sysret(void); -@@ -1038,7 +1061,7 @@ void __cpuinit cpu_init(void) +@@ -1045,7 +1072,7 @@ void __cpuinit cpu_init(void) me = current; @@ -894,7 +908,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches panic("CPU#%d already initialized!\n", cpu); printk(KERN_INFO "Initializing CPU#%d\n", cpu); -@@ -1163,7 +1186,7 @@ void __cpuinit cpu_init(void) +@@ -1170,7 +1197,7 @@ void __cpuinit cpu_init(void) #endif struct thread_struct *thread = &curr->thread; @@ -903,8 +917,26 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); for (;;) local_irq_enable(); } ---- head-2010-04-29.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/intel.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/intel.c 2011-02-01 14:42:26.000000000 +0100 +@@ -36,10 +36,15 @@ static void __cpuinit early_init_intel(s + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + + if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { ++#ifndef CONFIG_XEN + misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + c->cpuid_level = cpuid_eax(0); + get_cpu_cap(c); ++#else ++ pr_warning("CPUID levels are restricted -" ++ " update hypervisor\n"); ++#endif + } + } + +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -33,7 +33,7 @@ struct mtrr_ops generic_mtrr_ops = { struct mtrr_ops *mtrr_if = &generic_mtrr_ops; @@ -914,8 +946,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static u64 tom2; ---- head-2010-04-29.orig/arch/x86/kernel/e820-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/e820-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -719,6 +719,27 @@ void __init e820_mark_nosave_regions(uns } } @@ -967,8 +999,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif {} }; ---- head-2010-04-29.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -904,49 +904,6 @@ static struct console early_dbgp_console }; #endif @@ -1039,8 +1071,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_EARLY_PRINTK_DBGP } else if (!strncmp(buf, "dbgp", 4)) { if (early_dbgp_init(buf+4) < 0) ---- head-2010-04-29.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:42:26.000000000 +0100 @@ -690,28 +690,37 @@ END(syscall_badsys) 27:; @@ -1699,8 +1731,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + * End of kprobes section + */ + .popsection ---- head-2010-04-29.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:42:26.000000000 +0100 @@ -14,15 +14,15 @@ * * NOTE: This code handles signal-recognition, which happens every time @@ -3341,8 +3373,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + * End of kprobes section + */ + .popsection ---- head-2010-04-29.orig/arch/x86/kernel/head-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -36,7 +36,6 @@ void __init reserve_ebda_region(void) /* start of EBDA area */ @@ -3351,8 +3383,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Fixup: bios puts an EBDA in the top 64K segment */ /* of conventional memory, but does not adjust lowmem. */ ---- head-2010-04-29.orig/arch/x86/kernel/head32-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head32-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head32-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head32-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -12,9 +12,12 @@ #include #include @@ -3366,8 +3398,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); #ifndef CONFIG_XEN ---- head-2010-04-29.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/head64-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -31,9 +31,10 @@ #include #include @@ -3389,9 +3421,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); reserve_early(round_up(__pa_symbol(&_end), PAGE_SIZE), ---- head-2010-04-29.orig/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:17:58.000000000 +0100 -@@ -122,102 +122,276 @@ static int __init parse_noapic(char *str +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:42:26.000000000 +0100 +@@ -112,102 +112,276 @@ static int __init parse_noapic(char *str } early_param("noapic", parse_noapic); @@ -3730,16 +3762,16 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches struct io_apic { unsigned int index; -@@ -230,7 +404,7 @@ static __attribute_const__ struct io_api +@@ -220,7 +394,7 @@ static __attribute_const__ struct io_api return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK); } -#endif -+#endif /* CONFIG_XEN */ ++#endif /* !CONFIG_XEN */ static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) { -@@ -285,11 +459,10 @@ static inline void io_apic_modify(unsign +@@ -275,11 +449,10 @@ static inline void io_apic_modify(unsign writel(value, &io_apic->data); } @@ -3752,7 +3784,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches spin_lock_irqsave(&ioapic_lock, flags); entry = cfg->irq_2_pin; -@@ -375,13 +548,32 @@ static void ioapic_mask_entry(int apic, +@@ -365,13 +538,32 @@ static void ioapic_mask_entry(int apic, } #ifdef CONFIG_SMP @@ -3788,7 +3820,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches entry = cfg->irq_2_pin; for (;;) { unsigned int reg; -@@ -411,36 +603,61 @@ static void __target_IO_APIC_irq(unsigne +@@ -401,36 +593,61 @@ static void __target_IO_APIC_irq(unsigne } } @@ -3870,7 +3902,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #endif /* CONFIG_SMP */ -@@ -449,16 +666,18 @@ static void set_ioapic_affinity_irq(unsi +@@ -439,16 +656,18 @@ static void set_ioapic_affinity_irq(unsi * shared ISA-space IRQs, so we have to support them. We are super * fast in the common case, and fast for shared ISA-space IRQs. */ @@ -3894,7 +3926,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches cfg->irq_2_pin = entry; entry->apic = apic; entry->pin = pin; -@@ -473,7 +692,7 @@ static void add_pin_to_irq(unsigned int +@@ -463,7 +682,7 @@ static void add_pin_to_irq(unsigned int entry = entry->next; } @@ -3903,7 +3935,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches entry = entry->next; entry->apic = apic; entry->pin = pin; -@@ -482,11 +701,10 @@ static void add_pin_to_irq(unsigned int +@@ -472,11 +691,10 @@ static void add_pin_to_irq(unsigned int /* * Reroute an IRQ to a different pin. */ @@ -3916,7 +3948,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches struct irq_pin_list *entry = cfg->irq_2_pin; int replaced = 0; -@@ -503,18 +721,16 @@ static void __init replace_pin_at_irq(un +@@ -493,18 +711,16 @@ static void __init replace_pin_at_irq(un /* why? call replace before add? */ if (!replaced) @@ -3937,7 +3969,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { unsigned int reg; pin = entry->pin; -@@ -527,13 +743,13 @@ static inline void io_apic_modify_irq(un +@@ -517,13 +733,13 @@ static inline void io_apic_modify_irq(un } } @@ -3954,7 +3986,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { /* * Synchronize the IO-APIC and the CPU by doing -@@ -544,47 +760,64 @@ void io_apic_sync(struct irq_pin_list *e +@@ -534,47 +750,64 @@ void io_apic_sync(struct irq_pin_list *e readl(&io_apic->data); } @@ -4031,16 +4063,16 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) { struct IO_APIC_route_entry entry; -@@ -624,6 +857,8 @@ void send_IPI_self(int vector) +@@ -614,6 +847,8 @@ void send_IPI_self(int vector) apic_write(APIC_ICR, cfg); } #endif /* !CONFIG_SMP && CONFIG_X86_32*/ +#else +#define add_pin_to_irq_cpu(cfg, cpu, apic, pin) - #endif /* CONFIG_XEN */ + #endif /* !CONFIG_XEN */ #ifdef CONFIG_X86_32 -@@ -864,7 +1099,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector +@@ -854,7 +1089,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector */ static int EISA_ELCR(unsigned int irq) { @@ -4049,7 +4081,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches unsigned int port = 0x4d0 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } -@@ -1089,52 +1324,118 @@ void unlock_vector_lock(void) +@@ -1079,52 +1314,118 @@ void unlock_vector_lock(void) { spin_unlock(&vector_lock); } @@ -4193,7 +4225,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) -@@ -1152,10 +1453,12 @@ void __setup_vector_irq(int cpu) +@@ -1142,10 +1443,12 @@ void __setup_vector_irq(int cpu) /* This function must be called with vector_lock held */ int irq, vector; struct irq_cfg *cfg; @@ -4208,7 +4240,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches continue; vector = cfg->vector; per_cpu(vector_irq, cpu)[vector] = irq; -@@ -1167,7 +1470,7 @@ void __setup_vector_irq(int cpu) +@@ -1157,7 +1460,7 @@ void __setup_vector_irq(int cpu) continue; cfg = irq_cfg(irq); @@ -4217,7 +4249,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches per_cpu(vector_irq, cpu)[vector] = -1; } } -@@ -1205,11 +1508,8 @@ static inline int IO_APIC_irq_trigger(in +@@ -1195,11 +1498,8 @@ static inline int IO_APIC_irq_trigger(in } #endif @@ -4230,7 +4262,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || trigger == IOAPIC_LEVEL) -@@ -1240,8 +1540,8 @@ static void ioapic_register_intr(int irq +@@ -1230,8 +1530,8 @@ static void ioapic_register_intr(int irq handle_edge_irq, "edge"); } #else /* !CONFIG_XEN */ @@ -4241,7 +4273,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif static int setup_ioapic_entry(int apic, int irq, -@@ -1305,24 +1605,25 @@ static int setup_ioapic_entry(int apic, +@@ -1295,24 +1595,25 @@ static int setup_ioapic_entry(int apic, return 0; } @@ -4273,7 +4305,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif apic_printk(APIC_VERBOSE,KERN_DEBUG -@@ -1333,16 +1634,15 @@ static void setup_IO_APIC_irq(int apic, +@@ -1323,16 +1624,15 @@ static void setup_IO_APIC_irq(int apic, if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, @@ -4294,7 +4326,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches disable_8259A_irq(irq); ioapic_write_entry(apic, pin, entry); -@@ -1352,6 +1652,9 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1342,6 +1642,9 @@ static void __init setup_IO_APIC_irqs(vo { int apic, pin, idx, irq; int notcon = 0; @@ -4304,7 +4336,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); -@@ -1386,9 +1689,15 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1376,9 +1679,15 @@ static void __init setup_IO_APIC_irqs(vo if (multi_timer_check(apic, irq)) continue; #endif @@ -4322,7 +4354,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches irq_trigger(idx), irq_polarity(idx)); } } -@@ -1448,6 +1757,7 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1438,6 +1747,7 @@ __apicdebuginit(void) print_IO_APIC(void union IO_APIC_reg_03 reg_03; unsigned long flags; struct irq_cfg *cfg; @@ -4330,7 +4362,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches unsigned int irq; if (apic_verbosity == APIC_QUIET) -@@ -1537,8 +1847,11 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1527,8 +1837,11 @@ __apicdebuginit(void) print_IO_APIC(void } } printk(KERN_DEBUG "IRQ to pin mappings:\n"); @@ -4344,7 +4376,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (!entry) continue; printk(KERN_DEBUG "IRQ%d ", irq); -@@ -2030,14 +2343,16 @@ static unsigned int startup_ioapic_irq(u +@@ -2018,14 +2331,16 @@ static unsigned int startup_ioapic_irq(u { int was_pending = 0; unsigned long flags; @@ -4363,7 +4395,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches spin_unlock_irqrestore(&ioapic_lock, flags); return was_pending; -@@ -2051,7 +2366,7 @@ static int ioapic_retrigger_irq(unsigned +@@ -2039,7 +2354,7 @@ static int ioapic_retrigger_irq(unsigned unsigned long flags; spin_lock_irqsave(&vector_lock, flags); @@ -4372,7 +4404,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches spin_unlock_irqrestore(&vector_lock, flags); return 1; -@@ -2100,35 +2415,35 @@ static DECLARE_DELAYED_WORK(ir_migration +@@ -2088,35 +2403,35 @@ static DECLARE_DELAYED_WORK(ir_migration * as simple as edge triggered migration and we can do the irq migration * with a simple atomic update to IO-APIC RTE. */ @@ -4419,7 +4451,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches spin_unlock_irqrestore(&ioapic_lock, flags); } -@@ -2140,24 +2455,20 @@ static void migrate_ioapic_irq(int irq, +@@ -2128,24 +2443,20 @@ static void migrate_ioapic_irq(int irq, */ modify_irte(irq, &irte); @@ -4451,7 +4483,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Interrupt in progress. Migrating irq now will change the * vector information in the IO-APIC RTE and that will confuse -@@ -2169,14 +2480,15 @@ static int migrate_irq_remapped_level(in +@@ -2157,14 +2468,15 @@ static int migrate_irq_remapped_level(in } /* everthing is clear. we have right of way */ @@ -4470,7 +4502,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return ret; } -@@ -2197,7 +2509,7 @@ static void ir_irq_migration(struct work +@@ -2185,7 +2497,7 @@ static void ir_irq_migration(struct work continue; } @@ -4479,7 +4511,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches spin_unlock_irqrestore(&desc->lock, flags); } } -@@ -2206,28 +2518,33 @@ static void ir_irq_migration(struct work +@@ -2194,28 +2506,33 @@ static void ir_irq_migration(struct work /* * Migrates the IRQ destination in the process context. */ @@ -4521,7 +4553,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches irq_enter(); me = smp_processor_id(); -@@ -2237,6 +2554,9 @@ asmlinkage void smp_irq_move_cleanup_int +@@ -2225,6 +2542,9 @@ asmlinkage void smp_irq_move_cleanup_int struct irq_cfg *cfg; irq = __get_cpu_var(vector_irq)[vector]; @@ -4531,7 +4563,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches desc = irq_to_desc(irq); if (!desc) continue; -@@ -2246,7 +2566,7 @@ asmlinkage void smp_irq_move_cleanup_int +@@ -2234,7 +2554,7 @@ asmlinkage void smp_irq_move_cleanup_int if (!cfg->move_cleanup_count) goto unlock; @@ -4540,7 +4572,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches goto unlock; __get_cpu_var(vector_irq)[vector] = -1; -@@ -2258,28 +2578,45 @@ unlock: +@@ -2246,28 +2566,45 @@ unlock: irq_exit(); } @@ -4596,7 +4628,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_INTR_REMAP static void ack_x2apic_level(unsigned int irq) { -@@ -2290,11 +2627,14 @@ static void ack_x2apic_edge(unsigned int +@@ -2278,11 +2615,14 @@ static void ack_x2apic_edge(unsigned int { ack_x2APIC_irq(); } @@ -4612,7 +4644,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches move_native_irq(irq); ack_APIC_irq(); } -@@ -2303,18 +2643,21 @@ atomic_t irq_mis_count; +@@ -2291,18 +2631,21 @@ atomic_t irq_mis_count; static void ack_apic_level(unsigned int irq) { @@ -4637,7 +4669,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #endif -@@ -2338,7 +2681,8 @@ static void ack_apic_level(unsigned int +@@ -2326,7 +2669,8 @@ static void ack_apic_level(unsigned int * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro */ @@ -4647,7 +4679,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); #endif -@@ -2377,17 +2721,18 @@ static void ack_apic_level(unsigned int +@@ -2365,17 +2709,18 @@ static void ack_apic_level(unsigned int * accurate and is causing problems then it is a hardware bug * and you can go talk to the chipset vendor about it. */ @@ -4670,7 +4702,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches spin_unlock(&ioapic_lock); } #endif -@@ -2439,24 +2784,23 @@ static inline void init_IO_APIC_traps(vo +@@ -2427,24 +2772,23 @@ static inline void init_IO_APIC_traps(vo * Also, we've got to be careful not to trash gate * 0x80, because int 0x80 is hm, kind of importantish. ;) */ @@ -4700,7 +4732,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } } } -@@ -2482,7 +2826,7 @@ static void unmask_lapic_irq(unsigned in +@@ -2470,7 +2814,7 @@ static void unmask_lapic_irq(unsigned in apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); } @@ -4709,7 +4741,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { ack_APIC_irq(); } -@@ -2494,11 +2838,8 @@ static struct irq_chip lapic_chip __read +@@ -2482,11 +2826,8 @@ static struct irq_chip lapic_chip __read .ack = ack_lapic_irq, }; @@ -4722,7 +4754,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches desc->status &= ~IRQ_LEVEL; set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, "edge"); -@@ -2602,7 +2943,9 @@ int timer_through_8259 __initdata; +@@ -2590,7 +2931,9 @@ int timer_through_8259 __initdata; */ static inline void __init check_timer(void) { @@ -4733,7 +4765,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches int apic1, pin1, apic2, pin2; unsigned long flags; unsigned int ver; -@@ -2617,7 +2960,7 @@ static inline void __init check_timer(vo +@@ -2605,7 +2948,7 @@ static inline void __init check_timer(vo * get/set the timer IRQ vector: */ disable_8259A_irq(0); @@ -4742,7 +4774,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * As IRQ0 is to be enabled in the 8259A, the virtual -@@ -2668,10 +3011,10 @@ static inline void __init check_timer(vo +@@ -2656,10 +2999,10 @@ static inline void __init check_timer(vo * Ok, does IRQ0 through the IOAPIC work? */ if (no_pin1) { @@ -4755,7 +4787,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (timer_irq_works()) { if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); -@@ -2697,9 +3040,9 @@ static inline void __init check_timer(vo +@@ -2685,9 +3028,9 @@ static inline void __init check_timer(vo /* * legacy devices should be connected to IO APIC #0 */ @@ -4767,7 +4799,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches enable_8259A_irq(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); -@@ -2731,7 +3074,7 @@ static inline void __init check_timer(vo +@@ -2719,7 +3062,7 @@ static inline void __init check_timer(vo apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...\n"); @@ -4776,7 +4808,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ enable_8259A_irq(0); -@@ -2930,22 +3273,26 @@ unsigned int create_irq_nr(unsigned int +@@ -2918,22 +3261,26 @@ unsigned int create_irq_nr(unsigned int unsigned int irq; unsigned int new; unsigned long flags; @@ -4813,7 +4845,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches irq = new; break; } -@@ -2953,15 +3300,21 @@ unsigned int create_irq_nr(unsigned int +@@ -2941,15 +3288,21 @@ unsigned int create_irq_nr(unsigned int if (irq > 0) { dynamic_irq_init(irq); @@ -4836,7 +4868,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (irq == 0) irq = -1; -@@ -2972,14 +3325,22 @@ int create_irq(void) +@@ -2960,14 +3313,22 @@ int create_irq(void) void destroy_irq(unsigned int irq) { unsigned long flags; @@ -4859,8 +4891,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + __clear_irq_vector(irq, cfg); spin_unlock_irqrestore(&vector_lock, flags); } - #endif /* CONFIG_XEN */ -@@ -2993,16 +3354,13 @@ static int msi_compose_msg(struct pci_de + #endif /* !CONFIG_XEN */ +@@ -2981,16 +3342,13 @@ static int msi_compose_msg(struct pci_de struct irq_cfg *cfg; int err; unsigned dest; @@ -4880,7 +4912,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_INTR_REMAP if (irq_remapped(irq)) { -@@ -3056,64 +3414,48 @@ static int msi_compose_msg(struct pci_de +@@ -3044,64 +3402,48 @@ static int msi_compose_msg(struct pci_de } #ifdef CONFIG_SMP @@ -4958,7 +4990,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); -@@ -3127,16 +3469,10 @@ static void ir_set_msi_irq_affinity(unsi +@@ -3115,16 +3457,10 @@ static void ir_set_msi_irq_affinity(unsi * at the new destination. So, time to cleanup the previous * vector allocation. */ @@ -4978,7 +5010,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif #endif /* CONFIG_SMP */ -@@ -3195,7 +3531,7 @@ static int msi_alloc_irte(struct pci_dev +@@ -3183,7 +3519,7 @@ static int msi_alloc_irte(struct pci_dev } #endif @@ -4987,7 +5019,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { int ret; struct msi_msg msg; -@@ -3204,7 +3540,7 @@ static int setup_msi_irq(struct pci_dev +@@ -3192,7 +3528,7 @@ static int setup_msi_irq(struct pci_dev if (ret < 0) return ret; @@ -4996,7 +5028,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches write_msi_msg(irq, &msg); #ifdef CONFIG_INTR_REMAP -@@ -3224,26 +3560,13 @@ static int setup_msi_irq(struct pci_dev +@@ -3212,26 +3548,13 @@ static int setup_msi_irq(struct pci_dev return 0; } @@ -5025,7 +5057,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches irq = create_irq_nr(irq_want); if (irq == 0) return -1; -@@ -3257,7 +3580,7 @@ int arch_setup_msi_irq(struct pci_dev *d +@@ -3245,7 +3568,7 @@ int arch_setup_msi_irq(struct pci_dev *d goto error; no_ir: #endif @@ -5034,7 +5066,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (ret < 0) { destroy_irq(irq); return ret; -@@ -3275,7 +3598,7 @@ int arch_setup_msi_irqs(struct pci_dev * +@@ -3263,7 +3586,7 @@ int arch_setup_msi_irqs(struct pci_dev * { unsigned int irq; int ret, sub_handle; @@ -5043,7 +5075,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches unsigned int irq_want; #ifdef CONFIG_INTR_REMAP -@@ -3283,10 +3606,11 @@ int arch_setup_msi_irqs(struct pci_dev * +@@ -3271,10 +3594,11 @@ int arch_setup_msi_irqs(struct pci_dev * int index = 0; #endif @@ -5058,7 +5090,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (irq == 0) return -1; #ifdef CONFIG_INTR_REMAP -@@ -3318,7 +3642,7 @@ int arch_setup_msi_irqs(struct pci_dev * +@@ -3306,7 +3630,7 @@ int arch_setup_msi_irqs(struct pci_dev * } no_ir: #endif @@ -5067,7 +5099,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (ret < 0) goto error; sub_handle++; -@@ -3337,24 +3661,18 @@ void arch_teardown_msi_irq(unsigned int +@@ -3325,24 +3649,18 @@ void arch_teardown_msi_irq(unsigned int #ifdef CONFIG_DMAR #ifdef CONFIG_SMP @@ -5097,7 +5129,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches dmar_msi_read(irq, &msg); -@@ -3364,9 +3682,8 @@ static void dmar_msi_set_affinity(unsign +@@ -3352,9 +3670,8 @@ static void dmar_msi_set_affinity(unsign msg.address_lo |= MSI_ADDR_DEST_ID(dest); dmar_msi_write(irq, &msg); @@ -5108,7 +5140,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif /* CONFIG_SMP */ struct irq_chip dmar_msi_type = { -@@ -3398,24 +3715,18 @@ int arch_setup_dmar_msi(unsigned int irq +@@ -3386,24 +3703,18 @@ int arch_setup_dmar_msi(unsigned int irq #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP @@ -5138,7 +5170,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches hpet_msi_read(irq, &msg); -@@ -3425,9 +3736,8 @@ static void hpet_msi_set_affinity(unsign +@@ -3413,9 +3724,8 @@ static void hpet_msi_set_affinity(unsign msg.address_lo |= MSI_ADDR_DEST_ID(dest); hpet_msi_write(irq, &msg); @@ -5149,7 +5181,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif /* CONFIG_SMP */ struct irq_chip hpet_msi_type = { -@@ -3480,28 +3790,21 @@ static void target_ht_irq(unsigned int i +@@ -3468,28 +3778,21 @@ static void target_ht_irq(unsigned int i write_ht_irq_msg(irq, &msg); } @@ -5184,7 +5216,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif static struct irq_chip ht_irq_chip = { -@@ -3519,17 +3822,14 @@ int arch_setup_ht_irq(unsigned int irq, +@@ -3507,17 +3810,14 @@ int arch_setup_ht_irq(unsigned int irq, { struct irq_cfg *cfg; int err; @@ -5205,7 +5237,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); -@@ -3565,7 +3865,7 @@ int arch_setup_ht_irq(unsigned int irq, +@@ -3553,7 +3853,7 @@ int arch_setup_ht_irq(unsigned int irq, int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset) { @@ -5214,7 +5246,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches struct irq_cfg *cfg; int mmr_pnode; unsigned long mmr_value; -@@ -3573,7 +3873,9 @@ int arch_enable_uv_irq(char *irq_name, u +@@ -3561,7 +3861,9 @@ int arch_enable_uv_irq(char *irq_name, u unsigned long flags; int err; @@ -5225,7 +5257,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (err != 0) return err; -@@ -3582,8 +3884,6 @@ int arch_enable_uv_irq(char *irq_name, u +@@ -3570,8 +3872,6 @@ int arch_enable_uv_irq(char *irq_name, u irq_name); spin_unlock_irqrestore(&vector_lock, flags); @@ -5234,7 +5266,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); -@@ -3594,7 +3894,7 @@ int arch_enable_uv_irq(char *irq_name, u +@@ -3582,7 +3882,7 @@ int arch_enable_uv_irq(char *irq_name, u entry->polarity = 0; entry->trigger = 0; entry->mask = 0; @@ -5243,7 +5275,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); -@@ -3635,10 +3935,29 @@ int __init io_apic_get_redir_entries (in +@@ -3623,10 +3923,29 @@ int __init io_apic_get_redir_entries (in return reg_01.bits.entries; } @@ -5275,7 +5307,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* -------------------------------------------------------------------------- ACPI-based IOAPIC Configuration -@@ -3738,6 +4057,10 @@ int __init io_apic_get_version(int ioapi +@@ -3726,6 +4045,10 @@ int __init io_apic_get_version(int ioapi int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) { @@ -5286,7 +5318,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_XEN if (irq < PIRQ_BASE || irq >= PIRQ_BASE + NR_PIRQS) { apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ %d\n", -@@ -3752,13 +4075,21 @@ int io_apic_set_pci_routing (int ioapic, +@@ -3740,13 +4063,21 @@ int io_apic_set_pci_routing (int ioapic, return -EINVAL; } @@ -5311,7 +5343,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return 0; } -@@ -3797,7 +4128,7 @@ void __init setup_ioapic_dest(void) +@@ -3785,7 +4116,7 @@ void __init setup_ioapic_dest(void) int pin, ioapic, irq, irq_entry; struct irq_desc *desc; struct irq_cfg *cfg; @@ -5320,7 +5352,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (skip_ioapic_setup == 1) return; -@@ -3813,9 +4144,10 @@ void __init setup_ioapic_dest(void) +@@ -3801,9 +4132,10 @@ void __init setup_ioapic_dest(void) * when you have too many devices, because at that time only boot * cpu is online. */ @@ -5333,7 +5365,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches irq_trigger(irq_entry), irq_polarity(irq_entry)); continue; -@@ -3825,19 +4157,18 @@ void __init setup_ioapic_dest(void) +@@ -3813,19 +4145,18 @@ void __init setup_ioapic_dest(void) /* * Honour affinities which have been set in early boot */ @@ -5356,7 +5388,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } } -@@ -3886,7 +4217,6 @@ void __init ioapic_init_mappings(void) +@@ -3874,7 +4205,6 @@ void __init ioapic_init_mappings(void) struct resource *ioapic_res; int i; @@ -5364,8 +5396,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches ioapic_res = ioapic_setup_resources(); for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { ---- head-2010-04-29.orig/arch/x86/kernel/ioport-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/ioport-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ioport-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ioport-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -36,7 +36,7 @@ static void set_bitmap(unsigned long *bi */ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) @@ -5375,30 +5407,18 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches struct physdev_set_iobitmap set_iobitmap; if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) ---- head-2010-04-29.orig/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:17:58.000000000 +0100 -@@ -150,31 +150,28 @@ static inline void __send_IPI_dest_field - /* - * This is only used on smaller machines. - */ +--- head-2011-03-17.orig/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:56:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:56:59.000000000 +0100 +@@ -40,21 +40,29 @@ void send_IPI_self(int vector) + __send_IPI_shortcut(APIC_DEST_SELF, vector); + } + -void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) +void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) { - #ifndef CONFIG_XEN -- unsigned long mask = cpus_addr(cpumask)[0]; -+ unsigned long mask = cpumask_bits(cpumask)[0]; - #else - cpumask_t mask; unsigned int cpu; - #endif - unsigned long flags; - local_irq_save(flags); - #ifndef CONFIG_XEN -- WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); -+ WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); - __send_IPI_dest_field(mask, vector); - #else - cpus_andnot(mask, cpumask, cpu_online_map); - WARN_ON(!cpus_empty(mask)); - for_each_online_cpu(cpu) @@ -5407,61 +5427,28 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); + for_each_cpu_and(cpu, cpumask, cpu_online_mask) + __send_IPI_one(cpu, vector); - #endif - local_irq_restore(flags); } -void send_IPI_mask_sequence(cpumask_t mask, int vector) +void send_IPI_mask_sequence(const struct cpumask *mask, int vector) { - #ifndef CONFIG_XEN - unsigned long flags; -@@ -187,18 +184,37 @@ void send_IPI_mask_sequence(cpumask_t ma - */ - - local_irq_save(flags); -- for_each_possible_cpu(query_cpu) { -- if (cpu_isset(query_cpu, mask)) { -- __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), -- vector); -- } -- } -+ for_each_cpu(query_cpu, mask) -+ __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); - local_irq_restore(flags); - #else send_IPI_mask_bitmask(mask, vector); - #endif } +void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +{ -+ unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); + -+ /* See Hack comment above */ -+ -+ local_irq_save(flags); -+#ifndef CONFIG_XEN -+ for_each_cpu(query_cpu, mask) -+ if (query_cpu != this_cpu) -+ __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), -+ vector); -+#else + WARN_ON(!cpumask_subset(mask, cpu_online_mask)); + for_each_cpu_and(query_cpu, mask, cpu_online_mask) + if (query_cpu != this_cpu) + __send_IPI_one(query_cpu, vector); -+#endif -+ local_irq_restore(flags); +} + - #ifndef CONFIG_XEN - /* must come after the send_IPI functions above for inlining */ - static int convert_apicid_to_cpu(int apic_id) ---- head-2010-04-29.orig/arch/x86/kernel/irq-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/irq-xen.c 2010-03-24 15:17:58.000000000 +0100 + #endif +--- head-2011-03-17.orig/arch/x86/kernel/irq-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -5,10 +5,11 @@ #include #include @@ -5475,7 +5462,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches atomic_t irq_err_count; -@@ -43,57 +44,57 @@ void ack_bad_irq(unsigned int irq) +@@ -43,62 +44,62 @@ void ack_bad_irq(unsigned int irq) /* * /proc/interrupts printing: */ @@ -5513,6 +5500,12 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); seq_printf(p, " TLB shootdowns\n"); + #else +- seq_printf(p, "LCK: "); ++ seq_printf(p, "%*s: ", prec, "LCK"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_lock_count); + seq_printf(p, " Spinlock wakeups\n"); #endif #endif #ifdef CONFIG_X86_MCE @@ -5544,7 +5537,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif return 0; } -@@ -101,25 +102,31 @@ static int show_other_interrupts(struct +@@ -106,25 +107,31 @@ static int show_other_interrupts(struct int show_interrupts(struct seq_file *p, void *v) { unsigned long flags, any_count = 0; @@ -5579,7 +5572,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches spin_lock_irqsave(&desc->lock, flags); #ifndef CONFIG_SMP any_count = kstat_irqs(i); -@@ -131,7 +138,7 @@ int show_interrupts(struct seq_file *p, +@@ -136,7 +143,7 @@ int show_interrupts(struct seq_file *p, if (!action && !any_count) goto out; @@ -5588,8 +5581,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else ---- head-2010-04-29.orig/arch/x86/kernel/ldt-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/ldt-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ldt-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ldt-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -12,8 +12,8 @@ #include #include @@ -5600,9 +5593,27 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #include #include #include ---- head-2010-04-29.orig/arch/x86/kernel/machine_kexec_32.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/machine_kexec_32.c 2010-03-24 15:17:58.000000000 +0100 -@@ -123,13 +123,7 @@ void machine_kexec_setup_load_arg(xen_ke +--- head-2011-03-17.orig/arch/x86/kernel/machine_kexec_32.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/machine_kexec_32.c 2011-02-01 14:42:26.000000000 +0100 +@@ -46,6 +46,17 @@ static int machine_kexec_alloc_page_tabl + { + image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); + #ifdef CONFIG_X86_PAE ++#ifdef CONFIG_XEN /* machine address must fit into xki->page_list[PA_PGD] */ ++ if (image->arch.pgd) { ++ struct page *pg = virt_to_page(image->arch.pgd); ++ ++ if (xen_limit_pages_to_max_mfn(pg, 0, BITS_PER_LONG) < 0) { ++ image->arch.pgd = NULL; ++ __free_page(pg); ++ return -ENOMEM; ++ } ++ } ++#endif + image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL); + image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL); + #endif +@@ -123,13 +134,7 @@ void machine_kexec_setup_load_arg(xen_ke memcpy(control_page, relocate_kernel, PAGE_SIZE); xki->page_list[PA_CONTROL_PAGE] = __ma(control_page); @@ -5617,8 +5628,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (image->type == KEXEC_TYPE_DEFAULT) xki->page_list[PA_SWAP_PAGE] = page_to_phys(image->swap_page); ---- head-2010-04-29.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -2,7 +2,7 @@ * Intel Multiprocessor Specification 1.1 and 1.4 * compliant MP-table parsing routines. @@ -5864,7 +5875,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { int i; -@@ -242,57 +241,55 @@ static void __init MP_intsrc_info(struct +@@ -242,59 +241,57 @@ static void __init MP_intsrc_info(struct #endif @@ -5926,8 +5937,10 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches printk(KERN_INFO "MPTABLE: Product ID: %s\n", str); + #ifndef CONFIG_XEN - printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); + printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic); + #endif return 1; } @@ -5937,12 +5950,13 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { char str[16]; char oem[10]; -@@ -317,14 +314,14 @@ static int __init smp_read_mpc(struct mp - #endif +@@ -320,15 +317,15 @@ static int __init smp_read_mpc(struct mp + #ifndef CONFIG_XEN /* save the local APIC address, it might be non-default */ if (!acpi_lapic) - mp_lapic_addr = mpc->mpc_lapic; + mp_lapic_addr = mpc->lapic; + #endif if (early) return 1; @@ -5956,7 +5970,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } /* -@@ -333,12 +330,11 @@ static int __init smp_read_mpc(struct mp +@@ -337,12 +334,11 @@ static int __init smp_read_mpc(struct mp if (x86_quirks->mpc_record) *x86_quirks->mpc_record = 0; @@ -5971,7 +5985,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* ACPI may have already provided this data */ if (!acpi_lapic) MP_processor_info(m); -@@ -348,8 +344,7 @@ static int __init smp_read_mpc(struct mp +@@ -352,8 +348,7 @@ static int __init smp_read_mpc(struct mp } case MP_BUS: { @@ -5981,7 +5995,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_X86_IO_APIC MP_bus_info(m); #endif -@@ -360,30 +355,28 @@ static int __init smp_read_mpc(struct mp +@@ -364,30 +359,28 @@ static int __init smp_read_mpc(struct mp case MP_IOAPIC: { #ifdef CONFIG_X86_IO_APIC @@ -6020,7 +6034,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches MP_lintsrc_info(m); mpt += sizeof(*m); count += sizeof(*m); -@@ -394,8 +387,8 @@ static int __init smp_read_mpc(struct mp +@@ -398,8 +391,8 @@ static int __init smp_read_mpc(struct mp printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); printk(KERN_ERR "type %x\n", *mpt); print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, @@ -6031,7 +6045,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches break; } if (x86_quirks->mpc_record) -@@ -426,16 +419,16 @@ static int __init ELCR_trigger(unsigned +@@ -430,16 +423,16 @@ static int __init ELCR_trigger(unsigned static void __init construct_default_ioirq_mptable(int mpc_default_type) { @@ -6054,7 +6068,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * If true, we have an ISA/PCI system with no IRQ entries -@@ -478,30 +471,30 @@ static void __init construct_default_ioi +@@ -482,30 +475,30 @@ static void __init construct_default_ioi * irqflag field (level sensitive, active high polarity). */ if (ELCR_trigger(i)) @@ -6096,7 +6110,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches switch (mpc_default_type) { default: printk(KERN_ERR "???\nUnknown standard configuration %d\n", -@@ -509,29 +502,29 @@ static void __init construct_ioapic_tabl +@@ -513,29 +506,29 @@ static void __init construct_ioapic_tabl /* fall through */ case 1: case 5: @@ -6136,7 +6150,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches MP_ioapic_info(&ioapic); /* -@@ -545,8 +538,8 @@ static inline void __init construct_ioap +@@ -549,8 +542,8 @@ static inline void __init construct_ioap static inline void __init construct_default_ISA_mptable(int mpc_default_type) { @@ -6147,7 +6161,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches int linttypes[2] = { mp_ExtINT, mp_NMI }; int i; -@@ -558,30 +551,30 @@ static inline void __init construct_defa +@@ -564,30 +557,30 @@ static inline void __init construct_defa /* * 2 CPUs, numbered 0 & 1. */ @@ -6193,7 +6207,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches MP_lintsrc_info(&lintsrc); } } -@@ -595,26 +588,23 @@ static void __init __get_smp_config(unsi +@@ -606,26 +599,23 @@ void __init get_smp_config(void) { struct intel_mp_floating *mpf = mpf_found; @@ -6231,7 +6245,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); -@@ -669,15 +659,15 @@ static void __init __get_smp_config(unsi +@@ -682,15 +672,15 @@ void __init get_smp_config(void) * ISA defaults and hope it will work. */ if (!mp_irq_entries) { @@ -6251,7 +6265,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches MP_bus_info(&bus); construct_default_ioirq_mptable(0); -@@ -823,14 +813,14 @@ void __init find_smp_config(void) +@@ -839,14 +829,14 @@ void __init find_smp_config(void) #ifdef CONFIG_X86_IO_APIC static u8 __initdata irq_used[MAX_IRQ_SOURCES]; @@ -6269,7 +6283,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return 0; /* not legacy */ -@@ -842,9 +832,9 @@ static int __init get_MP_intsrc_index(s +@@ -858,9 +848,9 @@ static int __init get_MP_intsrc_index(s if (mp_irqs[i].mp_irqflag != 0x0f) continue; @@ -6281,7 +6295,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches continue; if (irq_used[i]) { /* already claimed */ -@@ -860,10 +850,10 @@ static int __init get_MP_intsrc_index(s +@@ -876,10 +866,10 @@ static int __init get_MP_intsrc_index(s #define SPARE_SLOT_NUM 20 @@ -6294,7 +6308,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches unsigned long mpc_new_phys, unsigned long mpc_new_length) { -@@ -875,36 +865,33 @@ static int __init replace_intsrc_all(st +@@ -891,36 +881,33 @@ static int __init replace_intsrc_all(st int count = sizeof(*mpc); unsigned char *mpt = ((unsigned char *)mpc) + count; @@ -6338,7 +6352,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches apic_printk(APIC_VERBOSE, "OLD "); print_MP_intsrc_info(m); -@@ -925,14 +912,14 @@ static int __init replace_intsrc_all(st +@@ -941,14 +928,14 @@ static int __init replace_intsrc_all(st nr_m_spare++; } #endif @@ -6357,7 +6371,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mpt += sizeof(*m); count += sizeof(*m); break; -@@ -942,7 +929,7 @@ static int __init replace_intsrc_all(st +@@ -958,7 +945,7 @@ static int __init replace_intsrc_all(st printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); printk(KERN_ERR "type %x\n", *mpt); print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, @@ -6366,7 +6380,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches goto out; } } -@@ -964,9 +951,8 @@ static int __init replace_intsrc_all(st +@@ -980,9 +967,8 @@ static int __init replace_intsrc_all(st assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); m_spare[nr_m_spare] = NULL; } else { @@ -6378,7 +6392,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (!mpc_new_phys) { printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count); } else { -@@ -978,17 +964,16 @@ static int __init replace_intsrc_all(st +@@ -994,17 +980,16 @@ static int __init replace_intsrc_all(st } } assign_to_mpc_intsrc(&mp_irqs[i], m); @@ -6400,7 +6414,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return 0; } -@@ -1034,8 +1019,7 @@ static int __init update_mp_table(void) +@@ -1050,8 +1035,7 @@ static int __init update_mp_table(void) char str[16]; char oem[10]; struct intel_mp_floating *mpf; @@ -6410,7 +6424,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (!enable_update_mptable) return 0; -@@ -1061,7 +1045,7 @@ static int __init update_mp_table(void) +@@ -1077,7 +1061,7 @@ static int __init update_mp_table(void) printk(KERN_INFO "mpf: %lx\n", (long)arbitrary_virt_to_machine(mpf)); printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); @@ -6419,7 +6433,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mpc_new_phys = 0; printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n", mpc_new_length); -@@ -1070,10 +1054,10 @@ static int __init update_mp_table(void) +@@ -1086,10 +1070,10 @@ static int __init update_mp_table(void) if (!mpc_new_phys) { unsigned char old, new; /* check if we can change the postion */ @@ -6434,7 +6448,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (old == new) { printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); return 0; -@@ -1085,7 +1069,7 @@ static int __init update_mp_table(void) +@@ -1101,7 +1085,7 @@ static int __init update_mp_table(void) mpc_new_bus = phys_to_machine(mpc_new_phys); mpf->mpf_physptr = mpc_new_bus; mpc_new = phys_to_virt(mpc_new_phys); @@ -6443,8 +6457,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mpc = mpc_new; /* check if we can modify that */ if (mpc_new_bus - mpf->mpf_physptr) { ---- head-2010-04-29.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -6,6 +6,7 @@ #include #include @@ -6531,8 +6545,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches forbid_dac = 1; } } ---- head-2010-04-29.orig/arch/x86/kernel/process-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:00:33.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:05:57.000000000 +0100 @@ -1,13 +1,17 @@ #include #include @@ -6551,7 +6565,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); -@@ -99,6 +103,9 @@ static inline int hlt_use_halt(void) +@@ -70,6 +74,9 @@ EXPORT_SYMBOL(pm_idle); */ void xen_idle(void) { @@ -6561,7 +6575,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we -@@ -111,11 +118,27 @@ void xen_idle(void) +@@ -82,11 +89,27 @@ void xen_idle(void) else local_irq_enable(); current_thread_info()->status |= TS_POLLING; @@ -6589,7 +6603,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static void do_nothing(void *unused) { } -@@ -149,24 +172,37 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); +@@ -120,24 +143,37 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { @@ -6627,7 +6641,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } else local_irq_enable(); } -@@ -179,9 +215,13 @@ static void mwait_idle(void) +@@ -150,9 +186,13 @@ static void mwait_idle(void) */ static void poll_idle(void) { @@ -6641,7 +6655,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #ifndef CONFIG_XEN -@@ -267,7 +307,7 @@ static void c1e_idle(void) +@@ -238,7 +278,7 @@ static void c1e_idle(void) rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); if (lo & K8_INTP_C1E_ACTIVE_MASK) { c1e_detected = 1; @@ -6650,8 +6664,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mark_tsc_unstable("TSC halt in AMD C1E"); printk(KERN_INFO "System has AMD C1E enabled\n"); set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); ---- head-2010-04-29.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_32-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:34:28.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:36:38.000000000 +0100 @@ -38,11 +38,13 @@ #include #include @@ -6668,7 +6682,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #include #include #include -@@ -59,10 +61,9 @@ +@@ -57,10 +59,9 @@ #include #include @@ -6680,7 +6694,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void cstar_ret_from_fork(void) __asm__("cstar_ret_from_fork"); -@@ -108,9 +109,6 @@ void cpu_idle(void) +@@ -106,9 +107,6 @@ void cpu_idle(void) check_pgt_cache(); rmb(); @@ -6690,7 +6704,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (cpu_is_offline(cpu)) play_dead(); -@@ -208,7 +206,7 @@ extern void kernel_thread_helper(void); +@@ -206,7 +204,7 @@ extern void kernel_thread_helper(void); /* * Create a kernel thread */ @@ -6699,7 +6713,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { struct pt_regs regs; -@@ -247,14 +245,8 @@ void exit_thread(void) +@@ -245,14 +243,8 @@ void exit_thread(void) t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); } @@ -6716,7 +6730,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } void flush_thread(void) -@@ -267,7 +259,7 @@ void flush_thread(void) +@@ -265,7 +257,7 @@ void flush_thread(void) tsk->thread.debugreg3 = 0; tsk->thread.debugreg6 = 0; tsk->thread.debugreg7 = 0; @@ -6725,7 +6739,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches clear_tsk_thread_flag(tsk, TIF_DEBUG); /* * Forget coprocessor state.. -@@ -294,9 +286,9 @@ void prepare_to_copy(struct task_struct +@@ -292,9 +284,9 @@ void prepare_to_copy(struct task_struct int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, unsigned long unused, @@ -6737,7 +6751,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches struct task_struct *tsk; int err; -@@ -340,13 +332,19 @@ int copy_thread(int nr, unsigned long cl +@@ -338,13 +330,19 @@ int copy_thread(int nr, unsigned long cl kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } @@ -6758,7 +6772,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches regs->fs = 0; set_fs(USER_DS); regs->ds = __USER_DS; -@@ -420,47 +418,18 @@ int set_tsc_mode(unsigned int val) +@@ -418,47 +416,18 @@ int set_tsc_mode(unsigned int val) return 0; } @@ -6810,7 +6824,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { -@@ -481,14 +450,6 @@ __switch_to_xtra(struct task_struct *pre +@@ -479,14 +448,6 @@ __switch_to_xtra(struct task_struct *pre else hard_enable_TSC(); } @@ -6825,7 +6839,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } /* -@@ -518,7 +479,8 @@ __switch_to_xtra(struct task_struct *pre +@@ -516,7 +477,8 @@ __switch_to_xtra(struct task_struct *pre * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ @@ -6835,7 +6849,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; -@@ -698,7 +660,7 @@ asmlinkage int sys_vfork(struct pt_regs +@@ -696,7 +658,7 @@ asmlinkage int sys_vfork(struct pt_regs asmlinkage int sys_execve(struct pt_regs regs) { int error; @@ -6844,8 +6858,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches filename = getname((char __user *) regs.bx); error = PTR_ERR(filename); ---- head-2010-04-29.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process_64-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:34:22.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:36:43.000000000 +0100 @@ -42,6 +42,8 @@ #include #include @@ -6855,15 +6869,15 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #include #include -@@ -59,6 +61,7 @@ +@@ -57,6 +59,7 @@ #include #include #include +#include - #include + asmlinkage extern void ret_from_fork(void); -@@ -158,14 +161,18 @@ void __show_regs(struct pt_regs *regs, i +@@ -154,14 +157,18 @@ void __show_regs(struct pt_regs *regs, i unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; unsigned int ds, cs, es; @@ -6884,7 +6898,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); printk_address(regs->ip, 1); printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, -@@ -256,14 +263,8 @@ void exit_thread(void) +@@ -252,14 +259,8 @@ void exit_thread(void) #endif t->io_bitmap_max = 0; } @@ -6901,7 +6915,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } void xen_load_gs_index(unsigned gs) -@@ -399,6 +400,11 @@ int copy_thread(int nr, unsigned long cl +@@ -395,6 +396,11 @@ int copy_thread(int nr, unsigned long cl } p->thread.iopl = current->thread.iopl; @@ -6913,7 +6927,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches err = 0; out: if (err && p->thread.io_bitmap_ptr) { -@@ -495,35 +501,14 @@ static inline void __switch_to_xtra(stru +@@ -491,35 +497,14 @@ static inline void __switch_to_xtra(stru struct task_struct *next_p) { struct thread_struct *prev, *next; @@ -6953,7 +6967,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { -@@ -544,14 +529,6 @@ static inline void __switch_to_xtra(stru +@@ -540,14 +525,6 @@ static inline void __switch_to_xtra(stru else hard_enable_TSC(); } @@ -6968,7 +6982,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } /* -@@ -562,8 +539,9 @@ static inline void __switch_to_xtra(stru +@@ -558,8 +535,9 @@ static inline void __switch_to_xtra(stru * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. @@ -6979,19 +6993,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; ---- head-2010-04-29.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/quirks-xen.c 2010-03-24 15:17:58.000000000 +0100 -@@ -169,6 +169,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I - ich_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, - ich_force_enable_hpet); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, -+ ich_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, - ich_force_enable_hpet); - ---- head-2010-04-29.orig/arch/x86/kernel/setup-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/setup-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:22:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:22:27.000000000 +0100 @@ -93,11 +93,13 @@ #include #include @@ -7203,7 +7206,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* List of systems that have known low memory corruption BIOS problems */ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { -@@ -1023,15 +894,25 @@ void __init setup_arch(char **cmdline_p) +@@ -1025,15 +896,25 @@ void __init setup_arch(char **cmdline_p) finish_e820_parsing(); @@ -7230,7 +7233,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifndef CONFIG_XEN /* after parse_early_param, so could debug it */ -@@ -1039,8 +920,6 @@ void __init setup_arch(char **cmdline_p) +@@ -1041,8 +922,6 @@ void __init setup_arch(char **cmdline_p) insert_resource(&iomem_resource, &data_resource); insert_resource(&iomem_resource, &bss_resource); @@ -7239,7 +7242,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef CONFIG_X86_32 if (ppro_with_ram_bug()) { -@@ -1295,7 +1174,7 @@ void __init setup_arch(char **cmdline_p) +@@ -1297,7 +1176,7 @@ void __init setup_arch(char **cmdline_p) ioapic_init_mappings(); /* need to wait for io_apic is mapped */ @@ -7248,8 +7251,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches kvm_guest_init(); ---- head-2010-04-29.orig/arch/x86/kernel/smp-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/smp-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/smp-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/smp-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -1,7 +1,7 @@ /* * Intel SMP support routines. @@ -7294,7 +7297,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } /* -@@ -165,11 +152,7 @@ void xen_smp_send_stop(void) +@@ -165,22 +152,14 @@ void xen_smp_send_stop(void) */ irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) { @@ -7307,9 +7310,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return IRQ_HANDLED; } -@@ -177,11 +160,7 @@ irqreturn_t smp_call_function_interrupt( + irqreturn_t smp_call_function_interrupt(int irq, void *dev_id) { - irq_enter(); generic_smp_call_function_interrupt(); -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_call_count++; @@ -7317,12 +7319,12 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - add_pda(irq_call_count, 1); -#endif + inc_irq_stat(irq_call_count); - irq_exit(); return IRQ_HANDLED; -@@ -191,11 +170,7 @@ irqreturn_t smp_call_function_single_int + } +@@ -188,11 +167,7 @@ irqreturn_t smp_call_function_interrupt( + irqreturn_t smp_call_function_single_interrupt(int irq, void *dev_id) { - irq_enter(); generic_smp_call_function_single_interrupt(); -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_call_count++; @@ -7330,12 +7332,12 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - add_pda(irq_call_count, 1); -#endif + inc_irq_stat(irq_call_count); - irq_exit(); return IRQ_HANDLED; ---- head-2010-04-29.orig/arch/x86/kernel/time-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/time-xen.c 2010-05-11 17:14:09.000000000 +0200 -@@ -454,11 +454,7 @@ irqreturn_t timer_interrupt(int irq, voi + } +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-02-01 14:42:26.000000000 +0100 +@@ -455,11 +455,7 @@ irqreturn_t timer_interrupt(int irq, voi struct vcpu_runstate_info runstate; /* Keep nmi watchdog up to date */ @@ -7348,7 +7350,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Here we are in the timer irq handler. We just have irqs locally -@@ -518,7 +514,6 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -521,7 +517,6 @@ irqreturn_t timer_interrupt(int irq, voi /* * Account stolen ticks. @@ -7356,7 +7358,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches * ensures that the ticks are accounted as stolen. */ stolen = runstate.time[RUNSTATE_runnable] -@@ -531,12 +526,11 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -534,12 +529,11 @@ irqreturn_t timer_interrupt(int irq, voi do_div(stolen, NS_PER_TICK); per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK; per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK; @@ -7370,7 +7372,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches * ensures that the ticks are accounted as idle/wait. */ blocked = runstate.time[RUNSTATE_blocked] -@@ -548,18 +542,23 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -551,18 +545,23 @@ irqreturn_t timer_interrupt(int irq, voi do_div(blocked, NS_PER_TICK); per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK; per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK; @@ -7398,7 +7400,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } /* Offlined for more than a few seconds? Avoid lockup warnings. */ -@@ -788,7 +786,7 @@ static void stop_hz_timer(void) +@@ -791,7 +790,7 @@ static void stop_hz_timer(void) unsigned long j; int rc; @@ -7407,7 +7409,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */ -@@ -804,7 +802,7 @@ static void stop_hz_timer(void) +@@ -807,7 +806,7 @@ static void stop_hz_timer(void) local_softirq_pending() || (j = get_next_timer_interrupt(jiffies), time_before_eq(j, jiffies))) { @@ -7416,7 +7418,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches j = jiffies + 1; } -@@ -835,7 +833,7 @@ static void start_hz_timer(void) +@@ -838,7 +837,7 @@ static void start_hz_timer(void) } #endif BUG_ON(rc); @@ -7425,8 +7427,23 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } void xen_safe_halt(void) ---- head-2010-04-29.orig/arch/x86/kernel/traps-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/traps-xen.c 2010-03-24 15:17:58.000000000 +0100 +@@ -848,14 +847,12 @@ void xen_safe_halt(void) + HYPERVISOR_block(); + start_hz_timer(); + } +-EXPORT_SYMBOL(xen_safe_halt); + + void xen_halt(void) + { + if (irqs_disabled()) + VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL)); + } +-EXPORT_SYMBOL(xen_halt); + + /* No locking required. Interrupts are disabled on all CPUs. */ + void time_resume(void) +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -20,7 +20,6 @@ #include #include @@ -7654,8 +7671,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #else math_state_restore(); ---- head-2010-04-29.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -17,6 +17,9 @@ * want per guest time just set the kernel.vsyscall64 sysctl to 0. */ @@ -7683,8 +7700,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches base = __vsyscall_gtod_data.clock.cycle_last; mask = __vsyscall_gtod_data.clock.mask; mult = __vsyscall_gtod_data.clock.mult; ---- head-2010-04-29.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/fault-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -53,7 +53,7 @@ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) @@ -7821,9 +7838,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches do_sigbus: up_read(&mm->mmap_sem); ---- head-2010-04-29.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/hypervisor.c 2010-03-24 15:17:58.000000000 +0100 -@@ -79,12 +79,12 @@ static void multicall_failed(const multi +--- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-01 14:42:26.000000000 +0100 +@@ -78,12 +78,12 @@ static void multicall_failed(const multi BUG(); } @@ -7839,18 +7856,18 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches lazy->nr_mc = 0; @@ -112,6 +112,11 @@ int xen_multicall_flush(bool ret_last) { - return 0; } -+ + +void xen_multicall_flush(bool force) { + if (force || use_lazy_mmu_mode()) + _xen_multicall_flush(false); +} - EXPORT_SYMBOL(xen_multicall_flush); - ++ int xen_multi_update_va_mapping(unsigned long va, pte_t pte, -@@ -130,7 +135,7 @@ int xen_multi_update_va_mapping(unsigned + unsigned long uvmf) + { +@@ -128,7 +133,7 @@ int xen_multi_update_va_mapping(unsigned #endif if (unlikely(lazy->nr_mc == NR_MC)) @@ -7859,7 +7876,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mc = lazy->mc + lazy->nr_mc++; mc->op = __HYPERVISOR_update_va_mapping; -@@ -169,7 +174,7 @@ int xen_multi_mmu_update(mmu_update_t *s +@@ -167,7 +172,7 @@ int xen_multi_mmu_update(mmu_update_t *s merge = lazy->nr_mc && !commit && mmu_may_merge(mc - 1, __HYPERVISOR_mmu_update, domid); if (unlikely(lazy->nr_mc == NR_MC) && !merge) { @@ -7868,7 +7885,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mc = lazy->mc; commit = count > NR_MMU || success_count; } -@@ -207,7 +212,7 @@ int xen_multi_mmu_update(mmu_update_t *s +@@ -205,7 +210,7 @@ int xen_multi_mmu_update(mmu_update_t *s break; } @@ -7877,7 +7894,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } int xen_multi_mmuext_op(struct mmuext_op *src, unsigned int count, -@@ -291,7 +296,7 @@ int xen_multi_mmuext_op(struct mmuext_op +@@ -289,7 +294,7 @@ int xen_multi_mmuext_op(struct mmuext_op merge = lazy->nr_mc && !commit && mmu_may_merge(mc - 1, __HYPERVISOR_mmuext_op, domid); if (unlikely(lazy->nr_mc == NR_MC) && !merge) { @@ -7886,7 +7903,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches mc = lazy->mc; commit = count > NR_MMUEXT || success_count; } -@@ -338,7 +343,7 @@ int xen_multi_mmuext_op(struct mmuext_op +@@ -336,7 +341,7 @@ int xen_multi_mmuext_op(struct mmuext_op break; } @@ -7895,8 +7912,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } void xen_l1_entry_update(pte_t *ptr, pte_t val) ---- head-2010-04-29.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/init_32-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -71,7 +71,7 @@ static unsigned long __initdata table_to static int __initdata after_init_bootmem; @@ -8110,8 +8127,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #endif ---- head-2010-04-29.orig/arch/x86/mm/init_64-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/init_64-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -841,7 +841,7 @@ static void __init init_gbpages(void) #endif } @@ -8157,8 +8174,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches pci_iommu_alloc(); /* clear_bss() already clear the empty_zero_page */ ---- head-2010-04-29.orig/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -17,9 +17,21 @@ */ @@ -8197,9 +8214,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); pgprot_val(prot) |= _PAGE_IOMAP; ---- head-2010-04-29.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/ioremap-xen.c 2010-03-24 15:17:58.000000000 +0100 -@@ -293,25 +293,6 @@ int page_is_ram(unsigned long pagenr) +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:07.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:20.000000000 +0100 +@@ -274,25 +274,6 @@ int page_is_ram(unsigned long pagenr) return 0; } @@ -8225,7 +8242,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. -@@ -402,7 +383,8 @@ static void __iomem *__ioremap_caller(re +@@ -383,7 +364,8 @@ static void __iomem *__ioremap_caller(re * Check if the request spans more than any BAR in the iomem resource * tree. */ @@ -8235,7 +8252,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Don't allow anybody to remap normal RAM that we're using.. -@@ -746,38 +728,10 @@ void __init early_ioremap_init(void) +@@ -727,38 +709,10 @@ void __init early_ioremap_init(void) } } @@ -8274,8 +8291,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static void __init __early_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) ---- head-2010-04-29.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pageattr-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -524,22 +524,28 @@ static int split_large_page(pte_t *kpte, set_pte(&pbase[i], pfn_pte_ma(mfn, ref_prot)); @@ -8422,8 +8439,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches out: return ret; } ---- head-2010-04-29.orig/arch/x86/mm/pat-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/mm/pat-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pat-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -11,6 +11,7 @@ #include #include @@ -8770,8 +8787,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) /* get Nth element of the linked list */ ---- head-2010-04-29.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/arch/x86/pci/irq-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -16,8 +16,7 @@ #include #include @@ -8972,8 +8989,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } return 0; } ---- head-2010-04-29.orig/arch/x86/pci/pcifront.c 2009-03-18 10:39:31.000000000 +0100 -+++ head-2010-04-29/arch/x86/pci/pcifront.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/pci/pcifront.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/pcifront.c 2011-02-01 14:42:26.000000000 +0100 @@ -8,8 +8,8 @@ #include #include @@ -8984,8 +9001,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static int pcifront_enable_irq(struct pci_dev *dev) { ---- head-2010-04-29.orig/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -349,7 +349,7 @@ int __init sysenter_setup(void) } @@ -8995,140 +9012,31 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { struct mm_struct *mm = current->mm; unsigned long addr; ---- head-2010-04-29.orig/drivers/acpi/Kconfig 2010-03-24 14:36:44.000000000 +0100 -+++ head-2010-04-29/drivers/acpi/Kconfig 2010-03-24 15:17:58.000000000 +0100 -@@ -9,7 +9,7 @@ menuconfig ACPI - depends on PCI - depends on PM - select PNP +--- head-2011-03-17.orig/drivers/acpi/Kconfig 2011-01-31 14:42:03.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/Kconfig 2011-02-01 14:42:26.000000000 +0100 +@@ -196,7 +196,7 @@ config ACPI_DOCK + config ACPI_PROCESSOR + tristate "Processor" + select THERMAL - select CPU_IDLE + select CPU_IDLE if !PROCESSOR_EXTERNAL_CONTROL default y help - Advanced Configuration and Power Interface (ACPI) support for ---- head-2010-04-29.orig/drivers/acpi/processor_extcntl.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/drivers/acpi/processor_extcntl.c 2010-03-24 15:17:58.000000000 +0100 -@@ -230,3 +230,117 @@ err_out: - kfree(perf); - return ret; - } -+ -+/* -+ * Objects and functions removed in native 2.6.29, and thus moved here. -+ */ -+#ifdef CONFIG_SMP -+static void smp_callback(void *v) -+{ -+ /* we already woke the CPU up, nothing more to do */ -+} -+ -+/* -+ * This function gets called when a part of the kernel has a new latency -+ * requirement. This means we need to get all processors out of their C-state, -+ * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that -+ * wakes them all right up. -+ */ -+static int acpi_processor_latency_notify(struct notifier_block *b, -+ unsigned long l, void *v) -+{ -+ smp_call_function(smp_callback, NULL, 1); -+ return NOTIFY_OK; -+} -+ -+struct notifier_block acpi_processor_latency_notifier = { -+ .notifier_call = acpi_processor_latency_notify, -+}; -+#endif -+ -+/* -+ * bm_history -- bit-mask with a bit per jiffy of bus-master activity -+ * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms -+ * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms -+ * 100 HZ: 0x0000000F: 4 jiffies = 40ms -+ * reduce history for more aggressive entry into C3 -+ */ -+static unsigned int bm_history __read_mostly = -+ (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); -+module_param(bm_history, uint, 0644); -+ -+int acpi_processor_set_power_policy(struct acpi_processor *pr) -+{ -+ unsigned int i; -+ unsigned int state_is_set = 0; -+ struct acpi_processor_cx *lower = NULL; -+ struct acpi_processor_cx *higher = NULL; -+ struct acpi_processor_cx *cx; -+ -+ -+ if (!pr) -+ return -EINVAL; -+ -+ /* -+ * This function sets the default Cx state policy (OS idle handler). -+ * Our scheme is to promote quickly to C2 but more conservatively -+ * to C3. We're favoring C2 for its characteristics of low latency -+ * (quick response), good power savings, and ability to allow bus -+ * mastering activity. Note that the Cx state policy is completely -+ * customizable and can be altered dynamically. -+ */ -+ -+ /* startup state */ -+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { -+ cx = &pr->power.states[i]; -+ if (!cx->valid) -+ continue; -+ -+ if (!state_is_set) -+ pr->power.state = cx; -+ state_is_set++; -+ break; -+ } -+ -+ if (!state_is_set) -+ return -ENODEV; -+ -+ /* demotion */ -+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { -+ cx = &pr->power.states[i]; -+ if (!cx->valid) -+ continue; -+ -+ if (lower) { -+ cx->demotion.state = lower; -+ cx->demotion.threshold.ticks = cx->latency_ticks; -+ cx->demotion.threshold.count = 1; -+ if (cx->type == ACPI_STATE_C3) -+ cx->demotion.threshold.bm = bm_history; -+ } -+ -+ lower = cx; -+ } -+ -+ /* promotion */ -+ for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { -+ cx = &pr->power.states[i]; -+ if (!cx->valid) -+ continue; -+ -+ if (higher) { -+ cx->promotion.state = higher; -+ cx->promotion.threshold.ticks = cx->latency_ticks; -+ if (cx->type >= ACPI_STATE_C2) -+ cx->promotion.threshold.count = 4; -+ else -+ cx->promotion.threshold.count = 10; -+ if (higher->type == ACPI_STATE_C3) -+ cx->promotion.threshold.bm = bm_history; -+ } -+ -+ higher = cx; -+ } -+ -+ return 0; -+} ---- head-2010-04-29.orig/drivers/acpi/processor_idle.c 2010-04-15 09:55:39.000000000 +0200 -+++ head-2010-04-29/drivers/acpi/processor_idle.c 2010-04-15 10:06:51.000000000 +0200 -@@ -123,6 +123,7 @@ static struct dmi_system_id __cpuinitdat + This driver installs ACPI as the idle handler for Linux and uses +--- head-2011-03-17.orig/drivers/acpi/processor_core.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_core.c 2011-02-01 14:42:26.000000000 +0100 +@@ -192,7 +192,7 @@ int acpi_get_cpuid(acpi_handle handle, i + * stub enforcing a 1:1 mapping, we keep it undefined to catch bad + * uses. Return as if there was a 1:1 mapping. + */ +- if (apic_id < NR_CPUS && cpu_possible(apic_id)) ++ if (apic_id < nr_cpu_ids && cpu_possible(apic_id)) + return apic_id; + #endif + return -1; +--- head-2011-03-17.orig/drivers/acpi/processor_idle.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_idle.c 2011-02-01 14:42:26.000000000 +0100 +@@ -125,6 +125,7 @@ static struct dmi_system_id __cpuinitdat }; @@ -9136,7 +9044,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Callers should disable interrupts before the call and enable * interrupts after return. -@@ -141,6 +142,7 @@ static void acpi_safe_halt(void) +@@ -143,6 +144,7 @@ static void acpi_safe_halt(void) } current_thread_info()->status |= TS_POLLING; } @@ -9144,7 +9052,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #ifdef ARCH_APICTIMER_STOPS_ON_C3 -@@ -211,7 +213,7 @@ static void lapic_timer_state_broadcast( +@@ -213,7 +215,7 @@ static void lapic_timer_state_broadcast( static void lapic_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_cx *cstate) { } static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } @@ -9153,70 +9061,34 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches struct acpi_processor_cx *cx, int broadcast) { -@@ -259,7 +261,8 @@ int acpi_processor_resume(struct acpi_de +@@ -261,7 +263,7 @@ int acpi_processor_resume(struct acpi_de return 0; } --#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) -+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) \ -+ && !defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) +-#if defined(CONFIG_X86) ++#if defined(CONFIG_X86) && !defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) static void tsc_check_state(int state) { switch (boot_cpu_data.x86_vendor) { -@@ -600,7 +603,11 @@ static void acpi_processor_power_verify_ - */ - cx->valid = 1; +@@ -621,7 +623,9 @@ static int acpi_processor_power_verify(s + unsigned int i; + unsigned int working = 0; +#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL - cx->latency_ticks = cx->latency; -+#else -+ cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); + pr->power.timer_broadcast_on_state = INT_MAX; +#endif - /* - * On older chipsets, BM_RLD needs to be set - * in order for Bus Master activity to wake the -@@ -633,7 +640,11 @@ static int acpi_processor_power_verify(s - if (!cx->address) - break; - cx->valid = 1; -+#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL - cx->latency_ticks = cx->latency; /* Normalize latency */ -+#else -+ cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); -+#endif - break; - - case ACPI_STATE_C3: -@@ -676,6 +687,20 @@ static int acpi_processor_get_power_info - - pr->power.count = acpi_processor_power_verify(pr); -+#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+ /* -+ * Set Default Policy -+ * ------------------ -+ * Now that we know which states are supported, set the default -+ * policy. Note that this policy can be changed dynamically -+ * (e.g. encourage deeper sleeps to conserve battery life when -+ * not on AC). -+ */ -+ result = acpi_processor_set_power_policy(pr); -+ if (result) -+ return result; -+#endif -+ - /* - * if one state of type C2 or C3 is available, mark this - * CPU as being "idle manageable" -@@ -773,6 +798,7 @@ static const struct file_operations acpi - }; - #endif + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { + struct acpi_processor_cx *cx = &pr->power.states[i]; +@@ -693,6 +697,7 @@ static int acpi_processor_get_power_info + return 0; + } +#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL /** * acpi_idle_bm_check - checks if bus master activity was detected */ -@@ -1142,6 +1168,13 @@ static int acpi_processor_setup_cpuidle( +@@ -1064,6 +1069,13 @@ static int acpi_processor_setup_cpuidle( return 0; } @@ -9230,33 +9102,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches int acpi_processor_cst_has_changed(struct acpi_processor *pr) { int ret = 0; -@@ -1208,6 +1241,10 @@ int __cpuinit acpi_processor_power_init( - "ACPI: processor limited to max C-state %d\n", - max_cstate); - first_run++; -+#if defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) && defined(CONFIG_SMP) -+ pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, -+ &acpi_processor_latency_notifier); -+#endif - } - - if (!pr) -@@ -1267,5 +1304,12 @@ int acpi_processor_power_exit(struct acp - acpi_device_dir(device)); - #endif - -+#if defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) && defined(CONFIG_SMP) -+ /* Unregister the idle handler when processor #0 is removed. */ -+ if (pr->id == 0) -+ pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, -+ &acpi_processor_latency_notifier); -+#endif -+ - return 0; - } ---- head-2010-04-29.orig/drivers/gpu/drm/i915/i915_drv.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/drivers/gpu/drm/i915/i915_drv.c 2010-04-29 09:52:19.000000000 +0200 -@@ -533,7 +533,7 @@ static struct drm_driver driver = { +--- head-2011-03-17.orig/drivers/gpu/drm/i915/i915_drv.c 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/i915/i915_drv.c 2011-03-17 14:13:15.000000000 +0100 +@@ -722,7 +722,7 @@ static struct drm_driver driver = { .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, @@ -9265,23 +9113,23 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches .poll = drm_poll, .fasync = drm_fasync, .read = drm_read, ---- head-2010-04-29.orig/drivers/gpu/drm/i915/i915_drv.h 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/drivers/gpu/drm/i915/i915_drv.h 2010-04-29 09:52:20.000000000 +0200 -@@ -926,6 +926,11 @@ int i915_gem_idle(struct drm_device *dev - uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - uint32_t flush_domains); - int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); +--- head-2011-03-17.orig/drivers/gpu/drm/i915/i915_drv.h 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/i915/i915_drv.h 2011-03-17 14:13:13.000000000 +0100 +@@ -1162,6 +1162,11 @@ int __must_check i915_do_wait_request(st + uint32_t seqno, + bool interruptible, + struct intel_ring_buffer *ring); +#ifdef CONFIG_XEN +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma); +#else +#define i915_gem_mmap drm_gem_mmap +#endif int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); - int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, - int write); ---- head-2010-04-29.orig/drivers/gpu/drm/i915/i915_gem.c 2010-04-29 09:29:49.000000000 +0200 -+++ head-2010-04-29/drivers/gpu/drm/i915/i915_gem.c 2010-04-15 10:06:44.000000000 +0200 -@@ -1146,6 +1146,17 @@ i915_gem_mmap_ioctl(struct drm_device *d + int __must_check + i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, +--- head-2011-03-17.orig/drivers/gpu/drm/i915/i915_gem.c 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/i915/i915_gem.c 2011-02-08 10:05:05.000000000 +0100 +@@ -1152,6 +1152,17 @@ i915_gem_mmap_ioctl(struct drm_device *d return 0; } @@ -9299,9 +9147,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /** * i915_gem_fault - fault a page into the GTT * vma: VMA in question ---- head-2010-04-29.orig/drivers/oprofile/buffer_sync.c 2010-04-15 09:51:29.000000000 +0200 -+++ head-2010-04-29/drivers/oprofile/buffer_sync.c 2010-04-15 10:06:28.000000000 +0200 -@@ -537,7 +537,6 @@ void sync_buffer(int cpu) +--- head-2011-03-17.orig/drivers/oprofile/buffer_sync.c 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-03-17/drivers/oprofile/buffer_sync.c 2011-02-01 14:42:26.000000000 +0100 +@@ -538,7 +538,6 @@ void sync_buffer(int cpu) int cpu_mode = CPU_MODE_KERNEL; sync_buffer_state state = sb_buffer_start; unsigned int i; @@ -9309,7 +9157,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches unsigned long available; unsigned long flags; struct op_entry entry; -@@ -562,15 +561,6 @@ void sync_buffer(int cpu) +@@ -563,15 +562,6 @@ void sync_buffer(int cpu) if (!sample) break; @@ -9325,7 +9173,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (is_code(sample->eip)) { flags = sample->event; if (flags & TRACE_BEGIN) { -@@ -596,8 +586,11 @@ void sync_buffer(int cpu) +@@ -597,8 +587,11 @@ void sync_buffer(int cpu) add_user_ctx_switch(new, cookie); } #ifdef CONFIG_XEN @@ -9339,9 +9187,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif if (op_cpu_buffer_get_size(&entry)) add_data(&entry, mm); ---- head-2010-04-29.orig/drivers/oprofile/cpu_buffer.c 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/drivers/oprofile/cpu_buffer.c 2010-03-24 15:17:58.000000000 +0100 -@@ -444,34 +444,15 @@ void oprofile_add_pc(unsigned long pc, i +--- head-2011-03-17.orig/drivers/oprofile/cpu_buffer.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/oprofile/cpu_buffer.c 2011-02-01 14:42:26.000000000 +0100 +@@ -417,34 +417,15 @@ void oprofile_add_pc(unsigned long pc, i #ifdef CONFIG_XEN /* @@ -9378,7 +9226,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #endif -@@ -502,17 +483,18 @@ fail: +@@ -475,17 +456,18 @@ fail: #ifdef CONFIG_XEN int oprofile_add_domain_switch(int32_t domain_id) { @@ -9405,9 +9253,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches current_domain = domain_id; ---- head-2010-04-29.orig/drivers/pci/msi-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/drivers/pci/msi-xen.c 2010-03-24 15:17:58.000000000 +0100 -@@ -763,30 +763,21 @@ void pci_no_msi(void) +--- head-2011-03-17.orig/drivers/pci/msi-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/pci/msi-xen.c 2011-02-01 14:42:26.000000000 +0100 +@@ -724,30 +724,21 @@ void pci_no_msi(void) pci_msi_enable = 0; } @@ -9450,9 +9298,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - return; - msi_acpi_init(); -} ---- head-2010-04-29.orig/drivers/xen/Kconfig 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-04-29/drivers/xen/Kconfig 2010-03-24 15:18:46.000000000 +0100 -@@ -388,6 +388,7 @@ config XEN_DEV_EVTCHN +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-02 15:36:33.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-02 15:37:07.000000000 +0100 +@@ -393,6 +393,7 @@ config XEN_BACKEND config XENFS tristate "Xen filesystem" @@ -9460,18 +9308,18 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches default y help The xen filesystem provides a way for domains to share ---- head-2010-04-29.orig/drivers/xen/Makefile 2010-04-19 14:52:08.000000000 +0200 -+++ head-2010-04-29/drivers/xen/Makefile 2010-04-19 14:52:22.000000000 +0200 -@@ -13,6 +13,7 @@ obj-$(CONFIG_XEN) += features.o util.o - obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) - obj-$(CONFIG_XEN_XENCOMM) += xencomm.o - obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) -+obj-$(CONFIG_XENFS) += xenfs/ +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-02-24 14:09:54.000000000 +0100 +@@ -15,6 +15,7 @@ obj-$(CONFIG_XEN) += features.o $(xen- + obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) + obj-$(CONFIG_XEN_XENCOMM) += xencomm.o + obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) ++obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ - obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ ---- head-2010-04-29.orig/drivers/xen/balloon/sysfs.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/balloon/sysfs.c 2010-03-24 15:17:58.000000000 +0100 + obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ blktap2-new/ +--- head-2011-03-17.orig/drivers/xen/balloon/sysfs.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/balloon/sysfs.c 2011-02-01 14:42:26.000000000 +0100 @@ -67,7 +67,7 @@ static ssize_t store_target_kb(struct sy struct sysdev_attribute *attr, const char *buf, size_t count) @@ -9535,9 +9383,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches }; static struct attribute *balloon_info_attrs[] = { ---- head-2010-04-29.orig/drivers/xen/blkfront/vbd.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/drivers/xen/blkfront/vbd.c 2010-03-24 15:17:58.000000000 +0100 -@@ -308,6 +308,10 @@ xlvbd_init_blk_queue(struct gendisk *gd, +--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-02-01 14:42:26.000000000 +0100 +@@ -305,6 +305,10 @@ xlvbd_init_blk_queue(struct gendisk *gd, if (rq == NULL) return -1; @@ -9548,9 +9396,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, 512); ---- head-2010-04-29.orig/drivers/xen/core/cpu_hotplug.c 2009-04-07 13:58:48.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/cpu_hotplug.c 2010-03-24 15:17:58.000000000 +0100 -@@ -10,10 +10,10 @@ +--- head-2011-03-17.orig/drivers/xen/core/cpu_hotplug.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/cpu_hotplug.c 2011-02-01 14:42:26.000000000 +0100 +@@ -11,10 +11,10 @@ * Set of CPUs that remote admin software will allow us to bring online. * Notified to us via xenbus. */ @@ -9563,20 +9411,21 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static int local_cpu_hotplug_request(void) { -@@ -40,10 +40,10 @@ static void vcpu_hotplug(unsigned int cp +@@ -41,11 +41,11 @@ static void vcpu_hotplug(unsigned int cp } if (strcmp(state, "online") == 0) { - cpu_set(cpu, xenbus_allowed_cpumask); + cpumask_set_cpu(cpu, xenbus_allowed_cpumask); - (void)cpu_up(cpu); + if (!cpu_up(cpu) && dev) + kobject_uevent(&dev->kobj, KOBJ_ONLINE); } else if (strcmp(state, "offline") == 0) { - cpu_clear(cpu, xenbus_allowed_cpumask); + cpumask_clear_cpu(cpu, xenbus_allowed_cpumask); - (void)cpu_down(cpu); + if (!cpu_down(cpu) && dev) + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); } else { - printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", -@@ -75,7 +75,7 @@ static int smpboot_cpu_notify(struct not +@@ -78,7 +78,7 @@ static int smpboot_cpu_notify(struct not * as it's always executed from within a stopmachine kthread. */ if ((action == CPU_DOWN_PREPARE) && local_cpu_hotplug_request()) @@ -9585,7 +9434,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return NOTIFY_OK; } -@@ -156,21 +156,26 @@ int cpu_up_check(unsigned int cpu) +@@ -157,21 +157,26 @@ int cpu_up_check(unsigned int cpu) int rc = 0; if (local_cpu_hotplug_request()) { @@ -9593,8 +9442,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - if (!cpu_isset(cpu, xenbus_allowed_cpumask)) { + cpumask_set_cpu(cpu, local_allowed_cpumask); + if (!cpumask_test_cpu(cpu, xenbus_allowed_cpumask)) { - printk("%s: attempt to bring up CPU %u disallowed by " - "remote admin.\n", __FUNCTION__, cpu); + pr_warning("%s: attempt to bring up CPU %u disallowed " + "by remote admin.\n", __FUNCTION__, cpu); rc = -EBUSY; } - } else if (!cpu_isset(cpu, local_allowed_cpumask) || @@ -9618,8 +9467,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + BUG(); + cpumask_setall(local_allowed_cpumask); } ---- head-2010-04-29.orig/drivers/xen/core/evtchn.c 2010-04-23 15:18:24.000000000 +0200 -+++ head-2010-04-29/drivers/xen/core/evtchn.c 2010-04-23 15:19:25.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-01 14:42:26.000000000 +0100 @@ -36,6 +36,7 @@ #include #include @@ -9715,8 +9564,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - #ifdef CONFIG_SMP - static u8 cpu_evtchn[NR_EVENT_CHANNELS]; -@@ -157,8 +180,12 @@ static void init_evtchn_cpu_bindings(voi + #if CONFIG_NR_CPUS <= 256 +@@ -161,8 +184,12 @@ static void init_evtchn_cpu_bindings(voi int i; /* By default all event channels notify CPU#0. */ @@ -9730,8 +9579,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + } memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); - memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); -@@ -232,7 +259,7 @@ static DEFINE_PER_CPU(unsigned int, curr + for_each_possible_cpu(i) +@@ -239,7 +266,7 @@ static DEFINE_PER_CPU(unsigned int, curr static DEFINE_PER_CPU(unsigned int, current_l2i); /* NB. Interrupts are disabled on entry. */ @@ -9740,7 +9589,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches { unsigned long l1, l2; unsigned long masked_l1, masked_l2; -@@ -320,14 +347,25 @@ asmlinkage void evtchn_do_upcall(struct +@@ -341,14 +368,25 @@ asmlinkage void evtchn_do_upcall(struct irq_exit(); } @@ -9761,15 +9610,15 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + if (!cfg->bindcount) { + desc->status |= IRQ_NOPROBE; + set_irq_chip_and_handler_name(irq, &dynirq_chip, -+ handle_level_irq, -+ "level"); ++ handle_fasteoi_irq, ++ "fasteoi"); return irq; + } + } if (!warned) { warned = 1; -@@ -345,14 +383,15 @@ static int bind_caller_port_to_irq(unsig +@@ -366,14 +404,15 @@ static int bind_caller_port_to_irq(unsig spin_lock(&irq_mapping_update_lock); if ((irq = evtchn_to_irq[caller_port]) == -1) { @@ -9788,7 +9637,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches out: spin_unlock(&irq_mapping_update_lock); -@@ -367,7 +406,7 @@ static int bind_local_port_to_irq(unsign +@@ -388,7 +427,7 @@ static int bind_local_port_to_irq(unsign BUG_ON(evtchn_to_irq[local_port] != -1); @@ -9797,7 +9646,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches struct evtchn_close close = { .port = local_port }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); -@@ -375,8 +414,8 @@ static int bind_local_port_to_irq(unsign +@@ -396,8 +435,8 @@ static int bind_local_port_to_irq(unsign } evtchn_to_irq[local_port] = irq; @@ -9808,7 +9657,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches out: spin_unlock(&irq_mapping_update_lock); -@@ -420,7 +459,7 @@ static int bind_virq_to_irq(unsigned int +@@ -441,7 +480,7 @@ static int bind_virq_to_irq(unsigned int spin_lock(&irq_mapping_update_lock); if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { @@ -9817,7 +9666,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches goto out; bind_virq.virq = virq; -@@ -431,14 +470,14 @@ static int bind_virq_to_irq(unsigned int +@@ -452,14 +491,14 @@ static int bind_virq_to_irq(unsigned int evtchn = bind_virq.port; evtchn_to_irq[evtchn] = irq; @@ -9834,7 +9683,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches out: spin_unlock(&irq_mapping_update_lock); -@@ -453,7 +492,7 @@ static int bind_ipi_to_irq(unsigned int +@@ -474,7 +513,7 @@ static int bind_ipi_to_irq(unsigned int spin_lock(&irq_mapping_update_lock); if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) { @@ -9843,7 +9692,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches goto out; bind_ipi.vcpu = cpu; -@@ -463,14 +502,14 @@ static int bind_ipi_to_irq(unsigned int +@@ -484,14 +523,14 @@ static int bind_ipi_to_irq(unsigned int evtchn = bind_ipi.port; evtchn_to_irq[evtchn] = irq; @@ -9860,7 +9709,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches out: spin_unlock(&irq_mapping_update_lock); -@@ -485,7 +524,7 @@ static void unbind_from_irq(unsigned int +@@ -506,7 +545,7 @@ static void unbind_from_irq(unsigned int spin_lock(&irq_mapping_update_lock); @@ -9869,7 +9718,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches close.port = evtchn; if ((type_from_irq(irq) != IRQT_CALLER_PORT) && HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) -@@ -508,11 +547,15 @@ static void unbind_from_irq(unsigned int +@@ -529,11 +568,15 @@ static void unbind_from_irq(unsigned int bind_evtchn_to_cpu(evtchn, 0); evtchn_to_irq[evtchn] = -1; @@ -9886,7 +9735,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } spin_unlock(&irq_mapping_update_lock); -@@ -664,10 +707,9 @@ static void rebind_irq_to_cpu(unsigned i +@@ -685,10 +728,9 @@ static void rebind_irq_to_cpu(unsigned i rebind_evtchn_to_cpu(evtchn, tcpu); } @@ -9899,7 +9748,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } #endif -@@ -835,7 +877,7 @@ static void enable_pirq(unsigned int irq +@@ -854,7 +896,7 @@ static void enable_pirq(unsigned int irq evtchn_to_irq[evtchn] = irq; bind_evtchn_to_cpu(evtchn, 0); @@ -9908,7 +9757,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches out: pirq_unmask_and_notify(evtchn, irq); -@@ -857,7 +899,7 @@ static void disable_pirq(unsigned int ir +@@ -884,7 +926,7 @@ static void shutdown_pirq(unsigned int i bind_evtchn_to_cpu(evtchn, 0); evtchn_to_irq[evtchn] = -1; @@ -9916,8 +9765,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + irq_cfg(irq)->info = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0); } - static unsigned int startup_pirq(unsigned int irq) -@@ -1023,7 +1065,7 @@ static void restore_cpu_virqs(unsigned i + static void unmask_pirq(unsigned int irq) +@@ -1009,7 +1051,7 @@ static void restore_cpu_virqs(unsigned i if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; @@ -9926,7 +9775,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Get a new binding from Xen. */ bind_virq.virq = virq; -@@ -1035,7 +1077,7 @@ static void restore_cpu_virqs(unsigned i +@@ -1021,7 +1063,7 @@ static void restore_cpu_virqs(unsigned i /* Record the new mapping. */ evtchn_to_irq[evtchn] = irq; @@ -9935,7 +9784,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches bind_evtchn_to_cpu(evtchn, cpu); /* Ready for use. */ -@@ -1052,7 +1094,7 @@ static void restore_cpu_ipis(unsigned in +@@ -1038,7 +1080,7 @@ static void restore_cpu_ipis(unsigned in if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) continue; @@ -9944,7 +9793,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Get a new binding from Xen. */ bind_ipi.vcpu = cpu; -@@ -1063,7 +1105,7 @@ static void restore_cpu_ipis(unsigned in +@@ -1049,7 +1091,7 @@ static void restore_cpu_ipis(unsigned in /* Record the new mapping. */ evtchn_to_irq[evtchn] = irq; @@ -9953,7 +9802,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches bind_evtchn_to_cpu(evtchn, cpu); /* Ready for use. */ -@@ -1075,6 +1117,7 @@ static void restore_cpu_ipis(unsigned in +@@ -1061,6 +1103,7 @@ static void restore_cpu_ipis(unsigned in void irq_resume(void) { unsigned int cpu, irq, evtchn; @@ -9961,7 +9810,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches init_evtchn_cpu_bindings(); -@@ -1091,12 +1134,17 @@ void irq_resume(void) +@@ -1077,12 +1120,17 @@ void irq_resume(void) mask_evtchn(evtchn); /* Check that no PIRQs are still bound. */ @@ -9983,7 +9832,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) evtchn_to_irq[evtchn] = -1; -@@ -1108,10 +1156,56 @@ void irq_resume(void) +@@ -1094,10 +1142,56 @@ void irq_resume(void) } #endif @@ -10007,7 +9856,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + desc->chip_data = kzalloc(sizeof(struct irq_cfg), GFP_ATOMIC); + } + if (!desc->chip_data) { -+ printk(KERN_ERR "cannot alloc irq_cfg\n"); ++ pr_emerg("cannot alloc irq_cfg\n"); + BUG(); + } + @@ -10041,16 +9890,16 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #else #define identity_mapped_irq(irq) (1) #endif -@@ -1121,7 +1215,7 @@ void evtchn_register_pirq(int irq) +@@ -1107,7 +1201,7 @@ void evtchn_register_pirq(int irq) BUG_ON(irq < PIRQ_BASE || irq - PIRQ_BASE >= NR_PIRQS); if (identity_mapped_irq(irq) || type_from_irq(irq) != IRQT_UNBOUND) return; - irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0); + irq_cfg(irq)->info = mk_irq_info(IRQT_PIRQ, irq, 0); - set_irq_chip_and_handler_name(irq, &pirq_chip, handle_level_irq, - "level"); + set_irq_chip_and_handler_name(irq, &pirq_chip, handle_fasteoi_irq, + "fasteoi"); } -@@ -1134,12 +1228,17 @@ int evtchn_map_pirq(int irq, int xen_pir +@@ -1120,12 +1214,17 @@ int evtchn_map_pirq(int irq, int xen_pir irq = PIRQ_BASE + NR_PIRQS - 1; spin_lock(&irq_alloc_lock); do { @@ -10070,7 +9919,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches break; } } while (--irq >= PIRQ_BASE); -@@ -1158,7 +1257,7 @@ int evtchn_map_pirq(int irq, int xen_pir +@@ -1144,7 +1243,7 @@ int evtchn_map_pirq(int irq, int xen_pir * then causes a warning in dynamic_irq_cleanup(). */ set_irq_chip_and_handler(irq, NULL, NULL); @@ -10079,7 +9928,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return 0; } else if (type_from_irq(irq) != IRQT_PIRQ || index_from_irq(irq) != xen_pirq) { -@@ -1195,23 +1294,17 @@ void __init xen_init_IRQ(void) +@@ -1181,23 +1280,17 @@ void __init xen_init_IRQ(void) for (i = 0; i < NR_EVENT_CHANNELS; i++) mask_evtchn(i); @@ -10094,7 +9943,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - irq_to_desc(i)->status |= IRQ_NOPROBE; set_irq_chip_and_handler_name(i, &dynirq_chip, - handle_level_irq, "level"); + handle_fasteoi_irq, "fasteoi"); } - /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */ @@ -10107,8 +9956,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (!identity_mapped_irq(i)) continue; ---- head-2010-04-29.orig/drivers/xen/core/machine_reboot.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/machine_reboot.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/core/machine_reboot.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/machine_reboot.c 2011-02-01 14:42:26.000000000 +0100 @@ -19,6 +19,9 @@ #include @@ -10119,7 +9968,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Power off function, if any -@@ -84,7 +87,7 @@ static void post_suspend(int suspend_can +@@ -79,7 +82,7 @@ static void post_suspend(int suspend_can pfn_to_mfn(xen_start_info->console.domU.mfn); } else { #ifdef CONFIG_SMP @@ -10128,7 +9977,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif for_each_possible_cpu(i) setup_runstate_area(i); -@@ -222,6 +225,12 @@ int __xen_suspend(int fast_suspend, void +@@ -219,6 +222,12 @@ int __xen_suspend(int fast_suspend, void if (num_possible_cpus() == 1) fast_suspend = 0; @@ -10141,7 +9990,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches suspend.fast_suspend = fast_suspend; suspend.resume_notifier = resume_notifier; -@@ -248,6 +257,8 @@ int __xen_suspend(int fast_suspend, void +@@ -245,6 +254,8 @@ int __xen_suspend(int fast_suspend, void if (!fast_suspend) smp_resume(); @@ -10150,11 +9999,11 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return 0; } ---- head-2010-04-29.orig/drivers/xen/core/smpboot.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/smpboot.c 2010-03-24 15:17:58.000000000 +0100 -@@ -36,11 +36,7 @@ extern void smp_trap_init(trap_info_t *) - /* Number of siblings per CPU package */ - int smp_num_siblings = 1; +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-02-01 14:42:26.000000000 +0100 +@@ -33,11 +33,7 @@ extern void failsafe_callback(void); + extern void system_call(void); + extern void smp_trap_init(trap_info_t *); -cpumask_t cpu_online_map; -EXPORT_SYMBOL(cpu_online_map); @@ -10165,7 +10014,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); -@@ -76,10 +72,14 @@ void __init prefill_possible_map(void) +@@ -64,10 +60,14 @@ void __init prefill_possible_map(void) #endif rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { @@ -10180,8 +10029,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + ++total_cpus; } - static inline void -@@ -203,7 +203,7 @@ static void __cpuinit cpu_initialize_con + static int __cpuinit xen_smp_intr_init(unsigned int cpu) +@@ -167,7 +167,7 @@ static void __cpuinit cpu_initialize_con struct task_struct *idle = idle_task(cpu); @@ -10190,7 +10039,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return; spin_lock(&ctxt_lock); -@@ -284,13 +284,15 @@ void __init smp_prepare_cpus(unsigned in +@@ -237,13 +237,15 @@ void __init smp_prepare_cpus(unsigned in if (xen_smp_intr_init(0)) BUG(); @@ -10202,14 +10051,14 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { - for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--) -+ for (cpu = nr_cpu_ids-1; !cpumask_test_cpu(cpu, cpu_possible_mask); cpu--) ++ for (cpu = nr_cpu_ids-1; !cpu_possible(cpu); cpu--) continue; - cpu_clear(cpu, cpu_possible_map); + set_cpu_possible(cpu, false); } for_each_possible_cpu (cpu) { -@@ -328,10 +330,8 @@ void __init smp_prepare_cpus(unsigned in +@@ -278,10 +280,8 @@ void __init smp_prepare_cpus(unsigned in #ifdef CONFIG_HOTPLUG_CPU if (is_initial_xendomain()) @@ -10221,7 +10070,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } init_xenbus_allowed_cpumask(); -@@ -364,14 +364,17 @@ void __init smp_prepare_boot_cpu(void) +@@ -314,22 +314,24 @@ void __init smp_prepare_boot_cpu(void) */ static int __init initialize_cpu_present_map(void) { @@ -10235,15 +10084,13 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } core_initcall(initialize_cpu_present_map); - int __cpuexit __cpu_disable(void) + int __cpuinit __cpu_disable(void) { - cpumask_t map = cpu_online_map; unsigned int cpu = smp_processor_id(); if (cpu == 0) -@@ -379,9 +382,8 @@ int __cpuexit __cpu_disable(void) - - remove_siblinginfo(cpu); + return -EBUSY; - cpu_clear(cpu, map); - fixup_irqs(map); @@ -10253,16 +10100,16 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return 0; } -@@ -424,7 +426,7 @@ int __cpuinit __cpu_up(unsigned int cpu) +@@ -369,7 +371,7 @@ int __cpuinit __cpu_up(unsigned int cpu) + if (rc) return rc; - } - cpu_set(cpu, cpu_online_map); + set_cpu_online(cpu, true); rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); -@@ -436,7 +438,7 @@ void __ref play_dead(void) +@@ -381,7 +383,7 @@ void __ref play_dead(void) { idle_task_exit(); local_irq_disable(); @@ -10271,21 +10118,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches preempt_enable_no_resched(); VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL)); #ifdef CONFIG_HOTPLUG_CPU ---- head-2010-04-29.orig/drivers/xen/core/spinlock.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/drivers/xen/core/spinlock.c 2010-03-24 15:17:58.000000000 +0100 -@@ -173,7 +173,8 @@ bool xen_spin_wait(raw_spinlock_t *lock, - current_vcpu_info()->evtchn_upcall_mask = upcall_mask; - - rc = !xen_test_irq_pending(irq); -- kstat_this_cpu.irqs[irq] += !rc; -+ if (!rc) -+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); - } while (spinning.prev || rc); - - /* ---- head-2010-04-29.orig/drivers/xen/netback/interface.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/interface.c 2010-03-24 15:17:58.000000000 +0100 -@@ -176,6 +176,14 @@ static struct ethtool_ops network_ethtoo +--- head-2011-03-17.orig/drivers/xen/netback/interface.c 2011-03-17 14:12:41.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/interface.c 2011-02-17 10:15:18.000000000 +0100 +@@ -222,6 +222,15 @@ static struct ethtool_ops network_ethtoo .get_strings = netbk_get_strings, }; @@ -10294,28 +10129,36 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + .ndo_stop = net_close, + .ndo_start_xmit = netif_be_start_xmit, + .ndo_change_mtu = netbk_change_mtu, -+ .ndo_get_stats = netif_be_get_stats, ++ .ndo_set_mac_address = eth_mac_addr, ++ .ndo_validate_addr = eth_validate_addr, +}; + netif_t *netif_alloc(struct device *parent, domid_t domid, unsigned int handle) { int err = 0; -@@ -210,11 +218,7 @@ netif_t *netif_alloc(struct device *pare +@@ -258,10 +267,7 @@ netif_t *netif_alloc(struct device *pare init_timer(&netif->tx_queue_timeout); - dev->hard_start_xmit = netif_be_start_xmit; -- dev->get_stats = netif_be_get_stats; - dev->open = net_open; - dev->stop = net_close; - dev->change_mtu = netbk_change_mtu; -+ dev->netdev_ops = &netif_be_netdev_ops; - dev->features = NETIF_F_IP_CSUM; ++ dev->netdev_ops = &netif_be_netdev_ops; - SET_ETHTOOL_OPS(dev, &network_ethtool_ops); ---- head-2010-04-29.orig/drivers/xen/netback/loopback.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/loopback.c 2010-03-24 15:17:58.000000000 +0100 -@@ -201,19 +201,21 @@ static void loopback_set_multicast_list( + netif_set_features(netif); + +--- head-2011-03-17.orig/drivers/xen/netback/loopback.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/loopback.c 2011-03-01 11:52:05.000000000 +0100 +@@ -155,7 +155,6 @@ static int loopback_start_xmit(struct sk + + skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */ + skb->protocol = eth_type_trans(skb, dev); +- dev->last_rx = jiffies; + + /* Flush netfilter context: rx'ed skbuffs not expected to have any. */ + nf_reset(skb); +@@ -194,6 +193,14 @@ static void loopback_set_multicast_list( { } @@ -10325,19 +10168,18 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + .ndo_start_xmit = loopback_start_xmit, + .ndo_set_multicast_list = loopback_set_multicast_list, + .ndo_change_mtu = NULL, /* allow arbitrary mtu */ -+ .ndo_get_stats = loopback_get_stats, +}; + - static void loopback_construct(struct net_device *dev, struct net_device *lo) + static void loopback_construct(struct net_device *dev, struct net_device *lo, + int loop_idx) { - struct net_private *np = netdev_priv(dev); - +@@ -202,12 +209,7 @@ static void loopback_construct(struct ne np->loopback_dev = lo; -- + np->loop_idx = loop_idx; + - dev->open = loopback_open; - dev->stop = loopback_close; - dev->hard_start_xmit = loopback_start_xmit; -- dev->get_stats = loopback_get_stats; - dev->set_multicast_list = loopback_set_multicast_list; - dev->change_mtu = NULL; /* allow arbitrary mtu */ - @@ -10345,9 +10187,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches dev->tx_queue_len = 0; dev->features = (NETIF_F_HIGHDMA | ---- head-2010-04-29.orig/drivers/xen/netback/netback.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netback/netback.c 2010-03-24 15:17:58.000000000 +0100 -@@ -354,7 +354,7 @@ static void xen_network_done_notify(void +--- head-2011-03-17.orig/drivers/xen/netback/netback.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/netback.c 2011-03-01 11:52:09.000000000 +0100 +@@ -363,7 +363,7 @@ static void xen_network_done_notify(void static struct net_device *eth0_dev = NULL; if (unlikely(eth0_dev == NULL)) eth0_dev = __dev_get_by_name(&init_net, "eth0"); @@ -10356,9 +10198,17 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } /* * Add following to poll() function in NAPI driver (Tigon3 is example): ---- head-2010-04-29.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/drivers/xen/netfront/netfront.c 2010-03-24 15:17:58.000000000 +0100 -@@ -635,7 +635,7 @@ static int network_open(struct net_devic +@@ -1495,7 +1495,6 @@ static void net_tx_action(unsigned long + dev->stats.rx_packets++; + + netif_rx(skb); +- dev->last_rx = jiffies; + } + + out: +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-09 16:04:26.000000000 +0100 +@@ -632,7 +632,7 @@ static int network_open(struct net_devic if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); @@ -10367,7 +10217,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } } spin_unlock_bh(&np->rx_lock); -@@ -707,7 +707,7 @@ static void rx_refill_timeout(unsigned l +@@ -703,7 +703,7 @@ static void rx_refill_timeout(unsigned l netfront_accelerator_call_stop_napi_irq(np, dev); @@ -10376,7 +10226,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } static void network_alloc_rx_buffers(struct net_device *dev) -@@ -1064,8 +1064,7 @@ static irqreturn_t netif_int(int irq, vo +@@ -1057,8 +1057,7 @@ static irqreturn_t netif_int(int irq, vo if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); @@ -10386,7 +10236,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } } -@@ -1481,7 +1480,6 @@ err: +@@ -1474,7 +1473,6 @@ err: /* Pass it up. */ netif_receive_skb(skb); @@ -10394,7 +10244,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } /* If we get a callback with very few responses, reduce fill target. */ -@@ -1523,7 +1521,7 @@ err: +@@ -1516,7 +1514,7 @@ err: } if (!more_to_do && !accel_more_to_do) @@ -10403,7 +10253,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches local_irq_restore(flags); } -@@ -2024,6 +2022,18 @@ static void network_set_multicast_list(s +@@ -2069,6 +2067,18 @@ static void network_set_multicast_list(s { } @@ -10422,7 +10272,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; -@@ -2080,15 +2090,8 @@ static struct net_device * __devinit cre +@@ -2124,15 +2134,8 @@ static struct net_device * __devinit cre goto exit_free_tx; } @@ -10439,7 +10289,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); -@@ -2119,7 +2122,7 @@ inetdev_notify(struct notifier_block *th +@@ -2163,7 +2166,7 @@ inetdev_notify(struct notifier_block *th struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ @@ -10448,8 +10298,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches send_fake_arp(dev); return NOTIFY_DONE; ---- head-2010-04-29.orig/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/sfc_netfront/accel_msg.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netfront/accel_msg.c 2011-02-01 14:42:26.000000000 +0100 @@ -47,7 +47,7 @@ static void vnic_start_interrupts(netfro netfront_accel_disable_net_interrupts(vnic); vnic->irq_enabled = 0; @@ -10468,9 +10318,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } else { spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:17:58.000000000 +0100 -@@ -170,7 +170,6 @@ EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_client.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_client.c 2011-02-01 14:42:26.000000000 +0100 +@@ -169,7 +169,6 @@ EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); /** * xenbus_switch_state * @dev: xenbus device @@ -10478,7 +10328,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches * @state: new state * * Advertise in the store a change of the given driver to the given new_state. -@@ -304,7 +303,7 @@ EXPORT_SYMBOL_GPL(xenbus_dev_error); +@@ -302,7 +301,7 @@ EXPORT_SYMBOL_GPL(xenbus_dev_error); * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by @@ -10486,9 +10336,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ - void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:17:58.000000000 +0100 + void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:42:26.000000000 +0100 @@ -42,6 +42,7 @@ #include #include @@ -10506,9 +10356,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches +#endif + struct xenstore_domain_interface *xen_store_interface; - static unsigned long xen_store_mfn; -@@ -197,6 +202,12 @@ static int xenbus_uevent_frontend(struct + static unsigned long xen_store_mfn; +@@ -198,6 +203,12 @@ static int xenbus_uevent_frontend(struct } #endif @@ -10521,7 +10371,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", -@@ -205,13 +216,16 @@ static struct xen_bus_type xenbus_fronte +@@ -206,13 +217,16 @@ static struct xen_bus_type xenbus_fronte .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { @@ -10544,7 +10394,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif }, #if defined(CONFIG_XEN) || defined(MODULE) -@@ -584,7 +598,17 @@ int xenbus_probe_node(struct xen_bus_typ +@@ -586,7 +600,17 @@ int xenbus_probe_node(struct xen_bus_typ xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; @@ -10562,52 +10412,52 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (err) goto fail; -@@ -770,7 +794,7 @@ static int suspend_dev(struct device *de +@@ -774,7 +798,7 @@ static int suspend_dev(struct device *de err = drv->suspend(xdev); if (err) - printk(KERN_WARNING -- "xenbus: suspend %s failed: %i\n", dev->bus_id, err); -+ "xenbus: suspend %s failed: %i\n", dev_name(dev), err); + pr_warning("xenbus: suspend %s failed: %i\n", +- dev->bus_id, err); ++ dev_name(dev), err); return 0; } -@@ -791,7 +815,7 @@ static int suspend_cancel_dev(struct dev +@@ -794,7 +818,7 @@ static int suspend_cancel_dev(struct dev + err = drv->suspend_cancel(xdev); if (err) - printk(KERN_WARNING - "xenbus: suspend_cancel %s failed: %i\n", -- dev->bus_id, err); -+ dev_name(dev), err); + pr_warning("xenbus: suspend_cancel %s failed: %i\n", +- dev->bus_id, err); ++ dev_name(dev), err); return 0; } -@@ -813,7 +837,7 @@ static int resume_dev(struct device *dev +@@ -815,7 +839,7 @@ static int resume_dev(struct device *dev + err = talk_to_otherend(xdev); if (err) { - printk(KERN_WARNING - "xenbus: resume (talk_to_otherend) %s failed: %i\n", -- dev->bus_id, err); -+ dev_name(dev), err); + pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n", +- dev->bus_id, err); ++ dev_name(dev), err); return err; } -@@ -824,7 +848,7 @@ static int resume_dev(struct device *dev +@@ -825,7 +849,7 @@ static int resume_dev(struct device *dev + err = drv->resume(xdev); if (err) { - printk(KERN_WARNING - "xenbus: resume %s failed: %i\n", -- dev->bus_id, err); -+ dev_name(dev), err); + pr_warning("xenbus: resume %s failed: %i\n", +- dev->bus_id, err); ++ dev_name(dev), err); return err; } } @@ -833,7 +857,7 @@ static int resume_dev(struct device *dev + err = watch_otherend(xdev); if (err) { - printk(KERN_WARNING - "xenbus_probe: resume (watch_otherend) %s failed: " -- "%d.\n", dev->bus_id, err); -+ "%d.\n", dev_name(dev), err); + pr_warning("xenbus_probe: resume (watch_otherend) %s failed:" +- " %d\n", dev->bus_id, err); ++ " %d\n", dev_name(dev), err); return err; } -@@ -1145,6 +1169,14 @@ static int __devinit xenbus_probe_init(v +@@ -1143,6 +1167,14 @@ static int __devinit xenbus_probe_init(v if (!is_initial_xendomain()) xenbus_probe(NULL); @@ -10622,21 +10472,19 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return 0; err: ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_probe.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe.h 2010-03-24 15:17:58.000000000 +0100 -@@ -45,6 +45,10 @@ +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.h 2011-02-07 14:42:39.000000000 +0100 +@@ -43,6 +43,8 @@ + #ifdef CONFIG_PARAVIRT_XEN + #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ++#define dev_name(dev) ((dev)->bus_id) #endif -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) -+#define dev_name(dev) ((dev)->bus_id) -+#endif -+ #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) - extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); - extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 14:42:26.000000000 +0100 @@ -36,6 +36,7 @@ __FUNCTION__, __LINE__, ##args) @@ -10645,7 +10493,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #include #include #include -@@ -108,6 +109,10 @@ static int backend_bus_id(char bus_id[BU +@@ -108,6 +109,10 @@ static int backend_bus_id(char bus_id[XE return 0; } @@ -10676,9 +10524,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches }, .dev = { .bus_id = "xen-backend", ---- head-2010-04-29.orig/drivers/xen/xenbus/xenbus_xs.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenbus/xenbus_xs.c 2010-03-24 15:17:58.000000000 +0100 -@@ -227,6 +227,9 @@ void *xenbus_dev_request_and_reply(struc +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_xs.c 2011-02-01 14:42:26.000000000 +0100 +@@ -226,6 +226,9 @@ void *xenbus_dev_request_and_reply(struc return ret; } @@ -10688,9 +10536,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, ---- head-2010-04-29.orig/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-04-29/drivers/xen/xenoprof/xenoprofile.c 2010-03-24 15:17:58.000000000 +0100 -@@ -50,7 +50,7 @@ static int xenoprof_enabled = 0; +--- head-2011-03-17.orig/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenoprof/xenoprofile.c 2011-02-01 14:42:26.000000000 +0100 +@@ -49,7 +49,7 @@ static int xenoprof_enabled = 0; static int xenoprof_is_primary = 0; static int active_defined; @@ -10699,7 +10547,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* Number of buffers in shared area (one per VCPU) */ static int nbuf; -@@ -339,11 +339,11 @@ static int xenoprof_setup(void) +@@ -338,11 +338,11 @@ static int xenoprof_setup(void) active_defined = 1; } @@ -10714,24 +10562,35 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL); ---- head-2010-04-29.orig/include/acpi/processor.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-04-29/include/acpi/processor.h 2010-03-24 15:17:58.000000000 +0100 -@@ -451,6 +451,13 @@ extern int processor_extcntl_prepare(str - extern int acpi_processor_get_performance_info(struct acpi_processor *pr); - extern int acpi_processor_get_psd(struct acpi_processor *pr); - void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **); -+ -+/* -+ * Declarations for objects and functions removed in native 2.6.29, and -+ * thus moved to drivers/acpi/processor_extcntl.c. -+ */ -+extern struct notifier_block acpi_processor_latency_notifier; -+int acpi_processor_set_power_policy(struct acpi_processor *); - #else - static inline int processor_cntl_external(void) {return 0;} - static inline int processor_pm_external(void) {return 0;} ---- head-2010-04-29.orig/include/xen/cpu_hotplug.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/include/xen/cpu_hotplug.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/include/acpi/processor.h 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/include/acpi/processor.h 2011-02-01 14:42:26.000000000 +0100 +@@ -91,13 +91,24 @@ struct acpi_processor_cx { + }; + + struct acpi_processor_power { ++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL ++ union { /* 'dev' is actually only used for taking its address. */ ++#endif + struct cpuidle_device dev; ++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + struct acpi_processor_cx *state; + unsigned long bm_check_timestamp; + u32 default_state; ++#else ++ struct { ++#endif + int count; + struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; ++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + int timer_broadcast_on_state; ++#else ++ }; }; ++#endif + }; + + /* Performance Management */ +--- head-2011-03-17.orig/include/xen/cpu_hotplug.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/include/xen/cpu_hotplug.h 2011-02-01 14:42:26.000000000 +0100 @@ -5,7 +5,7 @@ #include @@ -10741,8 +10600,8 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #endif #if defined(CONFIG_HOTPLUG_CPU) ---- head-2010-04-29.orig/include/xen/evtchn.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-04-29/include/xen/evtchn.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/include/xen/evtchn.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/include/xen/evtchn.h 2011-02-01 14:42:26.000000000 +0100 @@ -48,6 +48,18 @@ * LOW-LEVEL DEFINITIONS */ @@ -10762,20 +10621,20 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. ---- head-2010-04-29.orig/include/xen/xenbus.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-04-29/include/xen/xenbus.h 2010-03-24 15:17:58.000000000 +0100 -@@ -322,7 +322,9 @@ void xenbus_dev_error(struct xenbus_devi +--- head-2011-03-17.orig/include/xen/xenbus.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/include/xen/xenbus.h 2011-02-02 16:58:42.000000000 +0100 +@@ -325,7 +325,9 @@ void xenbus_dev_error(struct xenbus_devi void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); -+#if defined(CONFIG_XEN) || defined(MODULE) ++#if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) int xenbus_dev_init(void); +#endif const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); ---- head-2010-04-29.orig/lib/swiotlb-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-04-29/lib/swiotlb-xen.c 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 14:42:26.000000000 +0100 @@ -8,6 +8,7 @@ * Copyright (C) 2000, 2003 Hewlett-Packard Co * David Mosberger-Tang @@ -10793,7 +10652,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches #include #include #include -@@ -30,27 +33,11 @@ +@@ -30,24 +33,9 @@ #include #include @@ -10801,7 +10660,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1))) --/* + /* - * Maximum allowable number of contiguous slabs to map, - * must be a power of 2. What is the appropriate value ? - * The complexity of {map,unmap}_single is linearly dependent on this value. @@ -10814,6 +10673,14 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches - */ -#define IO_TLB_SHIFT 11 - +-/* + * Enumeration for sync targets + */ + enum dma_sync_target { +@@ -55,10 +43,9 @@ enum dma_sync_target { + SYNC_FOR_DEVICE = 1, + }; + +int swiotlb; int swiotlb_force; @@ -10822,7 +10689,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Used to do a quick range check in swiotlb_unmap_single and * swiotlb_sync_single_*, to see if the memory was in fact allocated by this -@@ -59,6 +46,12 @@ static unsigned long iotlb_nslabs; +@@ -67,6 +54,12 @@ static unsigned long iotlb_nslabs; static char *io_tlb_start, *io_tlb_end; /* @@ -10835,7 +10702,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches * When the IOMMU overflows we return a fallback buffer. This sets the size. */ static unsigned long io_tlb_overflow = 32*1024; -@@ -76,10 +69,7 @@ static unsigned int io_tlb_index; +@@ -84,10 +77,7 @@ static unsigned int io_tlb_index; * We need to save away the original address corresponding to a mapped entry * for the sync operations. */ @@ -10847,7 +10714,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Protect the above data structures in the map and unmap calls -@@ -101,9 +91,9 @@ setup_io_tlb_npages(char *str) +@@ -109,9 +99,9 @@ setup_io_tlb_npages(char *str) { /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */ if (isdigit(*str)) { @@ -10859,7 +10726,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } if (*str == ',') ++str; -@@ -121,35 +111,17 @@ setup_io_tlb_npages(char *str) +@@ -129,35 +119,17 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ @@ -10900,7 +10767,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT), dma_bits); } while (rc && dma_bits++ < max_dma_bits); -@@ -158,12 +130,12 @@ swiotlb_init_with_default_size(size_t de +@@ -166,12 +138,12 @@ swiotlb_init_with_default_size(size_t de panic("No suitable physical memory available for SWIOTLB buffer!\n" "Use dom0_mem Xen boot parameter to reserve\n" "some DMA memory (e.g., dom0_mem=-128M).\n"); @@ -10917,7 +10784,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (bits > dma_bits) dma_bits = bits; -@@ -171,18 +143,88 @@ swiotlb_init_with_default_size(size_t de +@@ -179,18 +151,88 @@ swiotlb_init_with_default_size(size_t de break; } } @@ -11010,7 +10877,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Get the overflow emergency buffer -@@ -200,13 +242,7 @@ swiotlb_init_with_default_size(size_t de +@@ -208,13 +250,7 @@ swiotlb_init_with_default_size(size_t de if (rc) panic("No suitable physical memory available for SWIOTLB overflow buffer!\n"); @@ -11025,7 +10892,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } void __init -@@ -233,6 +269,11 @@ swiotlb_init(void) +@@ -241,6 +277,11 @@ swiotlb_init(void) printk(KERN_INFO "Software IO TLB disabled\n"); } @@ -11037,7 +10904,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches static int is_swiotlb_buffer(dma_addr_t addr) { unsigned long pfn = mfn_to_local_pfn(PFN_DOWN(addr)); -@@ -246,46 +287,50 @@ static int is_swiotlb_buffer(dma_addr_t +@@ -254,46 +295,50 @@ static int is_swiotlb_buffer(dma_addr_t } /* @@ -11119,7 +10986,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } } -@@ -293,12 +338,11 @@ __sync_single(struct phys_addr buffer, c +@@ -301,12 +346,11 @@ __sync_single(struct phys_addr buffer, c * Allocates bounce buffer and returns its kernel virtual address. */ static void * @@ -11133,7 +11000,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches int i; unsigned long mask; unsigned long offset_slots; -@@ -306,6 +350,10 @@ map_single(struct device *hwdev, struct +@@ -314,6 +358,10 @@ map_single(struct device *hwdev, struct mask = dma_get_seg_boundary(hwdev); offset_slots = -IO_TLB_SEGSIZE; @@ -11144,7 +11011,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches max_slots = mask + 1 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); -@@ -328,7 +376,7 @@ map_single(struct device *hwdev, struct +@@ -336,7 +384,7 @@ map_single(struct device *hwdev, struct */ spin_lock_irqsave(&io_tlb_lock, flags); index = ALIGN(io_tlb_index, stride); @@ -11153,7 +11020,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches index = 0; wrap = index; -@@ -336,7 +384,7 @@ map_single(struct device *hwdev, struct +@@ -344,7 +392,7 @@ map_single(struct device *hwdev, struct while (iommu_is_span_boundary(index, nslots, offset_slots, max_slots)) { index += stride; @@ -11162,7 +11029,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches index = 0; if (index == wrap) goto not_found; -@@ -360,13 +408,13 @@ map_single(struct device *hwdev, struct +@@ -368,13 +416,13 @@ map_single(struct device *hwdev, struct * Update the indices to avoid searching in the next * round. */ @@ -11178,7 +11045,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches index = 0; } while (index != wrap); -@@ -381,29 +429,14 @@ found: +@@ -389,29 +437,14 @@ found: * This is needed when we sync the memory. Then we sync the buffer if * needed. */ @@ -11212,7 +11079,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * dma_addr is the kernel virtual address of the bounce buffer to unmap. */ -@@ -413,13 +446,13 @@ unmap_single(struct device *hwdev, char +@@ -421,13 +454,13 @@ unmap_single(struct device *hwdev, char unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; @@ -11229,9 +11096,9 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Return the buffer to the free list by setting the corresponding -@@ -453,9 +486,13 @@ unmap_single(struct device *hwdev, char - static void - sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir) +@@ -462,17 +495,21 @@ static void + sync_single(struct device *hwdev, char *dma_addr, size_t size, + int dir, int target) { - struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr); + int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; @@ -11239,13 +11106,22 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches + + phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1)); + - BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE)); -- __sync_single(buffer, dma_addr, size, dir); -+ swiotlb_bounce(phys, dma_addr, size, dir); - } - - static void -@@ -469,7 +506,7 @@ swiotlb_full(struct device *dev, size_t + switch (target) { + case SYNC_FOR_CPU: + if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) +- __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); ++ swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); + else + BUG_ON(dir != DMA_TO_DEVICE); + break; + case SYNC_FOR_DEVICE: + if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) +- __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); ++ swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); + else + BUG_ON(dir != DMA_FROM_DEVICE); + break; +@@ -492,7 +529,7 @@ swiotlb_full(struct device *dev, size_t * the damage, or panic when the transfer is too big. */ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %zu bytes at " @@ -11254,7 +11130,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (size > io_tlb_overflow && do_panic) { if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) -@@ -494,7 +531,6 @@ _swiotlb_map_single(struct device *hwdev +@@ -517,7 +554,6 @@ _swiotlb_map_single(struct device *hwdev dma_addr_t dev_addr = gnttab_dma_map_page(page) + offset_in_page(paddr); void *map; @@ -11262,7 +11138,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches BUG_ON(dir == DMA_NONE); -@@ -503,23 +539,21 @@ _swiotlb_map_single(struct device *hwdev +@@ -526,23 +562,21 @@ _swiotlb_map_single(struct device *hwdev * we can safely return the device addr and not worry about bounce * buffering it. */ @@ -11290,7 +11166,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches return dev_addr; } -@@ -536,6 +570,7 @@ swiotlb_map_single(struct device *hwdev, +@@ -559,6 +593,7 @@ swiotlb_map_single(struct device *hwdev, { return _swiotlb_map_single(hwdev, virt_to_phys(ptr), size, dir, NULL); } @@ -11298,7 +11174,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches dma_addr_t swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) -@@ -555,7 +590,7 @@ void +@@ -578,7 +613,7 @@ void swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, struct dma_attrs *attrs) { @@ -11307,7 +11183,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(dev_addr)) -@@ -571,6 +606,8 @@ swiotlb_unmap_single(struct device *hwde +@@ -594,6 +629,8 @@ swiotlb_unmap_single(struct device *hwde { return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); } @@ -11316,55 +11192,51 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. -@@ -585,48 +622,50 @@ void - swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) +@@ -608,7 +645,7 @@ static void + swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, int target) { - char *dma_addr = bus_to_virt(dev_addr); + char *dma_addr = swiotlb_bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(dev_addr)) - sync_single(hwdev, dma_addr, size, dir); +@@ -621,6 +658,7 @@ swiotlb_sync_single_for_cpu(struct devic + { + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } +EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) +@@ -628,6 +666,7 @@ swiotlb_sync_single_for_device(struct de { -- char *dma_addr = bus_to_virt(dev_addr); -+ char *dma_addr = swiotlb_bus_to_virt(dev_addr); - - BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(dev_addr)) - sync_single(hwdev, dma_addr, size, dir); + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } +EXPORT_SYMBOL(swiotlb_sync_single_for_device); - void - swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) + /* + * Same as above, but for a sub-range of the mapping. +@@ -637,7 +676,7 @@ swiotlb_sync_single_range(struct device + unsigned long offset, size_t size, + int dir, int target) { - char *dma_addr = bus_to_virt(dev_addr); + char *dma_addr = swiotlb_bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(dev_addr)) - sync_single(hwdev, dma_addr + offset, size, dir); +@@ -651,6 +690,7 @@ swiotlb_sync_single_range_for_cpu(struct + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_CPU); } +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) - { -- char *dma_addr = bus_to_virt(dev_addr); -+ char *dma_addr = swiotlb_bus_to_virt(dev_addr); - - BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(dev_addr)) - sync_single(hwdev, dma_addr + offset, size, dir); +@@ -659,9 +699,8 @@ swiotlb_sync_single_range_for_device(str + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_DEVICE); } +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); @@ -11373,7 +11245,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_single -@@ -648,23 +687,23 @@ swiotlb_map_sg_attrs(struct device *hwde +@@ -683,23 +722,23 @@ swiotlb_map_sg_attrs(struct device *hwde int dir, struct dma_attrs *attrs) { struct scatterlist *sg; @@ -11406,7 +11278,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches if (!map) { /* Don't panic here, we expect map_sg users to do proper error handling. */ -@@ -674,7 +713,7 @@ swiotlb_map_sg_attrs(struct device *hwde +@@ -709,7 +748,7 @@ swiotlb_map_sg_attrs(struct device *hwde sgl[0].dma_length = 0; return 0; } @@ -11415,7 +11287,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches } else sg->dma_address = dev_addr; sg->dma_length = sg->length; -@@ -689,6 +728,7 @@ swiotlb_map_sg(struct device *hwdev, str +@@ -724,6 +763,7 @@ swiotlb_map_sg(struct device *hwdev, str { return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); } @@ -11423,7 +11295,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules -@@ -705,7 +745,7 @@ swiotlb_unmap_sg_attrs(struct device *hw +@@ -740,7 +780,7 @@ swiotlb_unmap_sg_attrs(struct device *hw for_each_sg(sgl, sg, nelems, i) { if (sg->dma_address != sg_phys(sg)) @@ -11432,7 +11304,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches sg->dma_length, dir); else gnttab_dma_unmap_page(sg->dma_address); -@@ -719,6 +759,7 @@ swiotlb_unmap_sg(struct device *hwdev, s +@@ -754,6 +794,7 @@ swiotlb_unmap_sg(struct device *hwdev, s { return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); } @@ -11440,27 +11312,26 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Make physical memory consistent for a set of streaming mode DMA translations -@@ -738,10 +779,11 @@ swiotlb_sync_sg_for_cpu(struct device *h +@@ -773,7 +814,7 @@ swiotlb_sync_sg(struct device *hwdev, st for_each_sg(sgl, sg, nelems, i) { if (sg->dma_address != sg_phys(sg)) - sync_single(hwdev, bus_to_virt(sg->dma_address), + sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), - sg->dma_length, dir); + sg->dma_length, dir, target); } } +@@ -784,6 +825,7 @@ swiotlb_sync_sg_for_cpu(struct device *h + { + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); + } +EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); void - swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sgl, -@@ -754,16 +796,18 @@ swiotlb_sync_sg_for_device(struct device - - for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != sg_phys(sg)) -- sync_single(hwdev, bus_to_virt(sg->dma_address), -+ sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), - sg->dma_length, dir); - } + swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, +@@ -791,12 +833,14 @@ swiotlb_sync_sg_for_device(struct device + { + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } +EXPORT_SYMBOL(swiotlb_sync_sg_for_device); @@ -11474,7 +11345,7 @@ Automatically created from "patches.kernel.org/patch-2.6.29" by xen-port-patches /* * Return whether the given PCI device DMA address mask can be supported -@@ -776,14 +820,4 @@ swiotlb_dma_supported (struct device *hw +@@ -809,14 +853,4 @@ swiotlb_dma_supported (struct device *hw { return (mask >= ((1UL << dma_bits) - 1)); } diff --git a/patches.xen/xen3-patch-2.6.30 b/patches.xen/xen3-patch-2.6.30 index afcc552..9eb76d2 100644 --- a/patches.xen/xen3-patch-2.6.30 +++ b/patches.xen/xen3-patch-2.6.30 @@ -7,8 +7,8 @@ Patch-mainline: 2.6.30 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches.py ---- head-2010-05-25.orig/arch/ia64/include/asm/xen/hypervisor.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/ia64/include/asm/xen/hypervisor.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/ia64/include/asm/xen/hypervisor.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/ia64/include/asm/xen/hypervisor.h 2011-02-01 14:44:12.000000000 +0100 @@ -34,13 +34,13 @@ #define _ASM_IA64_XEN_HYPERVISOR_H @@ -24,19 +24,19 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; ---- head-2010-05-25.orig/arch/ia64/kernel/vmlinux.lds.S 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/ia64/kernel/vmlinux.lds.S 2010-03-24 15:25:06.000000000 +0100 -@@ -182,7 +182,7 @@ SECTIONS - __start_gate_section = .; - *(.data.gate) - __stop_gate_section = .; +--- head-2011-03-17.orig/arch/ia64/kernel/vmlinux.lds.S 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/arch/ia64/kernel/vmlinux.lds.S 2011-02-01 14:44:12.000000000 +0100 +@@ -183,7 +183,7 @@ SECTIONS { + __start_gate_section = .; + *(.data..gate) + __stop_gate_section = .; -#ifdef CONFIG_XEN +#ifdef CONFIG_PARAVIRT_XEN - . = ALIGN(PAGE_SIZE); - __xen_start_gate_section = .; - *(.data.gate.xen) ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-24 15:25:06.000000000 +0100 + . = ALIGN(PAGE_SIZE); + __xen_start_gate_section = .; + *(.data..gate.xen) +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-01 14:44:12.000000000 +0100 @@ -49,8 +49,8 @@ config X86 select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_DMA_API_DEBUG @@ -45,10 +45,10 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - select HAVE_KERNEL_LZMA + select HAVE_KERNEL_BZIP2 if !XEN + select HAVE_KERNEL_LZMA if !XEN + select HAVE_KERNEL_XZ select HAVE_KERNEL_LZO select HAVE_HW_BREAKPOINT - select PERF_EVENTS -@@ -337,11 +337,11 @@ config X86_XEN +@@ -317,11 +317,11 @@ config X86_XEN config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" @@ -62,7 +62,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches config X86_EXTENDED_PLATFORM bool "Support for extended (non-PC) x86 platforms" default y -@@ -371,7 +371,7 @@ config X86_64_XEN +@@ -351,7 +351,7 @@ config X86_64_XEN help This option will compile a kernel compatible with Xen hypervisor @@ -71,7 +71,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches config X86_EXTENDED_PLATFORM bool "Support for extended (non-PC) x86 platforms" default y -@@ -842,7 +842,7 @@ config MAXSMP +@@ -769,7 +769,7 @@ config MAXSMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP @@ -80,7 +80,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches range 2 512 if SMP && !MAXSMP default "1" if !SMP default "4096" if MAXSMP -@@ -916,10 +916,6 @@ config X86_VISWS_APIC +@@ -854,10 +854,6 @@ config X86_VISWS_APIC def_bool y depends on X86_32 && X86_VISWS @@ -90,10 +90,10 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - config X86_REROUTE_FOR_BROKEN_BOOT_IRQS bool "Reroute for broken boot IRQs" - default n ---- head-2010-05-25.orig/arch/x86/Makefile 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/Makefile 2010-03-24 15:25:06.000000000 +0100 -@@ -111,10 +111,6 @@ endif + depends on X86_IO_APIC && !XEN +--- head-2011-03-17.orig/arch/x86/Makefile 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/Makefile 2011-02-01 14:44:12.000000000 +0100 +@@ -116,10 +116,6 @@ endif # prevent gcc from generating any FP code by mistake KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) @@ -104,7 +104,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) -@@ -187,10 +183,10 @@ endif +@@ -189,10 +185,10 @@ endif $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@ @@ -116,8 +116,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches PHONY += install install: ---- head-2010-05-25.orig/arch/x86/boot/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/boot/Makefile 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/boot/Makefile 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-17/arch/x86/boot/Makefile 2011-02-01 14:44:12.000000000 +0100 @@ -204,6 +204,12 @@ $(obj)/vmlinux-stripped: OBJCOPYFLAGS := $(obj)/vmlinux-stripped: vmlinux FORCE $(call if_changed,objcopy) @@ -132,8 +132,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ + sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/$(bzImage) \ System.map "$(INSTALL_PATH)" ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:44:12.000000000 +0100 @@ -502,7 +502,7 @@ ia32_sys_call_table: .quad sys32_olduname .quad sys_umask /* 60 */ @@ -150,8 +150,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + .quad compat_sys_preadv + .quad compat_sys_pwritev ia32_syscall_end: ---- head-2010-05-25.orig/arch/x86/include/asm/kexec.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/asm/kexec.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/kexec.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/kexec.h 2011-02-01 14:44:12.000000000 +0100 @@ -21,8 +21,14 @@ # define PA_CONTROL_PAGE 0 # define VA_CONTROL_PAGE 1 @@ -167,8 +167,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 ---- head-2010-05-25.orig/arch/x86/include/asm/page_64_types.h 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/page_64_types.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/page_64_types.h 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/page_64_types.h 2011-02-01 14:44:12.000000000 +0100 @@ -69,7 +69,15 @@ extern void init_extra_mapping_wb(unsign #endif /* !__ASSEMBLY__ */ @@ -185,8 +185,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif #endif /* _ASM_X86_PAGE_64_DEFS_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:44:12.000000000 +0100 @@ -39,7 +39,7 @@ extern gate_desc idt_table[]; struct gdt_page { struct desc_struct gdt[GDT_ENTRIES]; @@ -234,8 +234,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches BUG(); } #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:44:12.000000000 +0100 @@ -1,11 +1,154 @@ +/* + * fixmap.h: compile-time virtual memory allocation @@ -412,7 +412,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-02-01 14:39:24.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -/* @@ -540,7 +540,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - -#endif /* !__ASSEMBLY__ */ -#endif /* _ASM_X86_FIXMAP_32_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-02-01 14:42:26.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -/* @@ -633,8 +633,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) - -#endif /* _ASM_X86_FIXMAP_64_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:05:16.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/highmem.h 2010-03-24 17:05:22.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:44:12.000000000 +0100 @@ -62,6 +62,7 @@ void *kmap_atomic_prot(struct page *page void *kmap_atomic(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); @@ -643,9 +643,13 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct page *kmap_atomic_to_page(void *ptr); #define kmap_atomic_pte(page, type) \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:25:06.000000000 +0100 -@@ -46,7 +46,7 @@ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 14:44:12.000000000 +0100 +@@ -43,10 +43,11 @@ + #include + #include + #include ++#include #include #include #include @@ -654,7 +658,17 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches extern shared_info_t *HYPERVISOR_shared_info; -@@ -153,20 +153,16 @@ int __must_check xen_multi_mmuext_op(str +@@ -71,7 +72,9 @@ extern start_info_t *xen_start_info; + + #define init_hypervisor(c) ((void)((c)->x86_hyper_vendor = X86_HYPER_VENDOR_XEN)) + ++DECLARE_PER_CPU(struct vcpu_runstate_info, runstate); + struct vcpu_runstate_info *setup_runstate_area(unsigned int cpu); ++#define vcpu_running(cpu) (per_cpu(runstate.state, cpu) == RUNSTATE_running) + + /* arch/xen/kernel/evtchn.c */ + /* Force a proper event-channel callback from Xen. */ +@@ -153,20 +156,16 @@ int __must_check xen_multi_mmuext_op(str #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) { @@ -678,8 +692,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #if 0 /* All uses are in places potentially called asynchronously, but * asynchronous code should rather not make use of lazy mode at all. ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:44:12.000000000 +0100 @@ -5,6 +5,10 @@ #include @@ -855,7 +869,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif /* _ASM_X86_IO_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/ipi.h 2010-03-24 15:25:06.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/ipi.h 2011-02-01 14:44:12.000000000 +0100 @@ -0,0 +1,13 @@ +#ifndef _ASM_X86_IPI_H +#define _ASM_X86_IPI_H @@ -870,8 +884,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +void xen_send_IPI_self(int vector); + +#endif /* _ASM_X86_IPI_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:44:12.000000000 +0100 @@ -94,7 +94,7 @@ static inline void halt(void) #ifdef CONFIG_X86_64 @@ -889,8 +903,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches call evtchn_do_upcall ; \ add $4,%esp ; \ jmp ret_from_intr ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:32:00.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:33:07.000000000 +0100 @@ -2,29 +2,46 @@ #define _ASM_X86_IRQ_VECTORS_H @@ -904,13 +918,13 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -#define RESCHEDULE_VECTOR 0 -#define CALL_FUNCTION_VECTOR 1 --#define CALL_FUNC_SINGLE_VECTOR 2 --#define SPIN_UNLOCK_VECTOR 3 +-#define NMI_VECTOR 0x02 +-#define CALL_FUNC_SINGLE_VECTOR 3 -#define NR_IPIS 4 +#define RESCHEDULE_VECTOR 0 +#define CALL_FUNCTION_VECTOR 1 -+#define CALL_FUNC_SINGLE_VECTOR 2 -+#define SPIN_UNLOCK_VECTOR 3 ++#define NMI_VECTOR 0x02 ++#define CALL_FUNC_SINGLE_VECTOR 3 +#define NR_IPIS 4 /* @@ -955,6 +969,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches * are bound using the provided bind/unbind functions. */ +#define PIRQ_BASE 0 ++ ++#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS ) ++#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) -#define PIRQ_BASE 0 -#if defined(NR_CPUS) && defined(MAX_IO_APICS) @@ -962,48 +979,45 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -# define NR_PIRQS (NR_VECTORS + 32 * NR_CPUS) -# elif defined(CONFIG_SPARSE_IRQ) && 8 * NR_CPUS > 32 * MAX_IO_APICS -# define NR_PIRQS (NR_VECTORS + 8 * NR_CPUS) -+#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS ) -+#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) -+ -+#ifdef CONFIG_X86_IO_APIC -+# if !defined(NR_CPUS) || !defined(MAX_IO_APICS) -+/* nothing */ -+# elif defined(CONFIG_SPARSE_IRQ) ++#if defined(CONFIG_X86_IO_APIC) ++# ifdef CONFIG_SPARSE_IRQ ++# define NR_PIRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) + # else +-# define NR_PIRQS (NR_VECTORS + 32 * MAX_IO_APICS) +# define NR_PIRQS \ -+ (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ ++ (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \ + (NR_VECTORS + CPU_VECTOR_LIMIT) : \ + (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) -+# elif NR_CPUS < MAX_IO_APICS -+# define NR_PIRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) - # else --# define NR_PIRQS (NR_VECTORS + 32 * MAX_IO_APICS) -+# define NR_PIRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) # endif +#elif defined(CONFIG_XEN_PCIDEV_FRONTEND) +# define NR_PIRQS (NR_VECTORS + CPU_VECTOR_LIMIT) +#else /* !CONFIG_X86_IO_APIC: */ +# define NR_PIRQS NR_IRQS_LEGACY -+#endif -+ + #endif + +-#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS) +-#define NR_DYNIRQS 256 +#ifndef __ASSEMBLY__ -+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SPARSE_IRQ) ++#ifdef CONFIG_SPARSE_IRQ +extern int nr_pirqs; +#else +# define nr_pirqs NR_PIRQS +#endif - #endif - --#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS) --#define NR_DYNIRQS 256 ++#endif ++ +#define DYNIRQ_BASE (PIRQ_BASE + nr_pirqs) ++#ifdef CONFIG_SPARSE_IRQ ++#define NR_DYNIRQS CPU_VECTOR_LIMIT ++#else +#define NR_DYNIRQS 256 ++#endif -#define NR_IRQS (NR_PIRQS + NR_DYNIRQS) +#define NR_IRQS (NR_PIRQS + NR_DYNIRQS) #endif /* _ASM_X86_IRQ_VECTORS_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:44:12.000000000 +0100 @@ -26,11 +26,117 @@ static inline void xen_activate_mm(struc int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); @@ -1142,7 +1156,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +#endif #endif /* _ASM_X86_MMU_CONTEXT_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2010-03-24 15:17:58.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-02-01 14:42:26.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -#ifndef _ASM_X86_MMU_CONTEXT_32_H @@ -1228,7 +1242,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - asm("movl %0,%%gs": :"r" (0)); - -#endif /* _ASM_X86_MMU_CONTEXT_32_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-02-01 14:39:24.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -#ifndef _ASM_X86_MMU_CONTEXT_64_H @@ -1337,8 +1351,14 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -} while (0) - -#endif /* _ASM_X86_MMU_CONTEXT_64_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:25:06.000000000 +0100 +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mutex.h 2011-02-01 14:44:12.000000000 +0100 +@@ -0,0 +1,3 @@ ++#define arch_cpu_is_running(cpu) vcpu_running(cpu) ++ ++#include_next +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:44:12.000000000 +0100 @@ -41,7 +41,6 @@ static inline int pci_proc_domain(struct return pci_domain_nr(bus); } @@ -1408,8 +1428,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static inline const struct cpumask * cpumask_of_pcibus(const struct pci_bus *bus) { ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:44:12.000000000 +0100 @@ -1,178 +1,9 @@ #ifndef _ASM_X86_PGTABLE_H #define _ASM_X86_PGTABLE_H @@ -1752,7 +1772,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Mask out unsupported bits in a present pgprot. Non-present pgprots * can use those bits for other purposes, so leave them be. -@@ -391,68 +284,208 @@ static inline int is_new_memtype_allowed +@@ -390,68 +283,208 @@ static inline int is_new_memtype_allowed return 1; } @@ -2004,7 +2024,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] -@@ -479,28 +512,6 @@ extern void arch_report_meminfo(struct s +@@ -478,28 +511,6 @@ extern void arch_report_meminfo(struct s #ifndef __ASSEMBLY__ @@ -2033,7 +2053,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* local pte updates need not use xchg for locking */ static inline pte_t xen_local_ptep_get_and_clear(pte_t *ptep, pte_t res) { -@@ -633,15 +644,18 @@ static inline void clone_pgd_range(pgd_t +@@ -632,15 +643,18 @@ static inline void clone_pgd_range(pgd_t memcpy(dst, src, count * sizeof(pgd_t)); } @@ -2055,8 +2075,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifdef CONFIG_HIGHPTE #include struct page *kmap_atomic_to_page(void *); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:44:12.000000000 +0100 @@ -20,21 +20,6 @@ __FILE__, __LINE__, &(e), __pgd_val(e), \ (pgd_val(e) & PTE_PFN_MASK) >> PAGE_SHIFT) @@ -2113,7 +2133,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \ ((_pte).pte_high << (32-PAGE_SHIFT))) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2010-03-24 15:14:47.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level-defs.h 2011-02-01 14:39:24.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H @@ -2141,7 +2161,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - -#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable-3level_types.h 2010-03-24 15:25:06.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level_types.h 2011-02-01 14:44:12.000000000 +0100 @@ -0,0 +1,44 @@ +#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H +#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H @@ -2187,8 +2207,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + + +#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:44:12.000000000 +0100 @@ -1,6 +1,8 @@ #ifndef _ASM_X86_PGTABLE_32_H #define _ASM_X86_PGTABLE_32_H @@ -2321,8 +2341,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - direct_remap_pfn_range(vma, from, pfn, size, prot, DOMID_IO) - #endif /* _ASM_X86_PGTABLE_32_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:44:12.000000000 +0100 @@ -2,6 +2,8 @@ #define _ASM_X86_PGTABLE_64_H @@ -2519,7 +2539,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64_types.h 2010-03-24 15:25:06.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64_types.h 2011-02-01 14:44:12.000000000 +0100 @@ -0,0 +1,63 @@ +#ifndef _ASM_X86_PGTABLE_64_DEFS_H +#define _ASM_X86_PGTABLE_64_DEFS_H @@ -2585,7 +2605,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + +#endif /* _ASM_X86_PGTABLE_64_DEFS_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_types.h 2010-03-24 15:25:06.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_types.h 2011-02-01 14:44:12.000000000 +0100 @@ -0,0 +1,388 @@ +#ifndef _ASM_X86_PGTABLE_DEFS_H +#define _ASM_X86_PGTABLE_DEFS_H @@ -2791,7 +2811,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +#define __pgd_ma(x) ((pgd_t) { (x) } ) +static inline pgd_t xen_make_pgd(pgdval_t val) +{ -+ if (val & _PAGE_PRESENT) ++ if (likely(val & _PAGE_PRESENT)) + val = pte_phys_to_machine(val); + return (pgd_t) { val }; +} @@ -2801,10 +2821,10 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +{ + pgdval_t ret = __pgd_val(pgd); +#if PAGETABLE_LEVELS == 2 && CONFIG_XEN_COMPAT <= 0x030002 -+ if (ret) ++ if (likely(ret)) + ret = machine_to_phys(ret) | _PAGE_PRESENT; +#else -+ if (ret & _PAGE_PRESENT) ++ if (likely(ret & _PAGE_PRESENT)) + ret = pte_machine_to_phys(ret); +#endif + return ret; @@ -2821,7 +2841,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +#define __pud_ma(x) ((pud_t) { (x) } ) +static inline pud_t xen_make_pud(pudval_t val) +{ -+ if (val & _PAGE_PRESENT) ++ if (likely(val & _PAGE_PRESENT)) + val = pte_phys_to_machine(val); + return (pud_t) { val }; +} @@ -2830,7 +2850,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +static inline pudval_t xen_pud_val(pud_t pud) +{ + pudval_t ret = __pud_val(pud); -+ if (ret & _PAGE_PRESENT) ++ if (likely(ret & _PAGE_PRESENT)) + ret = pte_machine_to_phys(ret); + return ret; +} @@ -2850,7 +2870,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +#define __pmd_ma(x) ((pmd_t) { (x) } ) +static inline pmd_t xen_make_pmd(pmdval_t val) +{ -+ if (val & _PAGE_PRESENT) ++ if (likely(val & _PAGE_PRESENT)) + val = pte_phys_to_machine(val); + return (pmd_t) { val }; +} @@ -2860,10 +2880,10 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +{ + pmdval_t ret = __pmd_val(pmd); +#if CONFIG_XEN_COMPAT <= 0x030002 -+ if (ret) ++ if (likely(ret)) + ret = pte_machine_to_phys(ret) | _PAGE_PRESENT; +#else -+ if (ret & _PAGE_PRESENT) ++ if (likely(ret & _PAGE_PRESENT)) + ret = pte_machine_to_phys(ret); +#endif + return ret; @@ -2892,7 +2912,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +#define __pte_ma(x) ((pte_t) { .pte = (x) } ) +static inline pte_t xen_make_pte(pteval_t val) +{ -+ if ((val & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT) ++ if (likely((val & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT)) + val = pte_phys_to_machine(val); + return (pte_t) { .pte = val }; +} @@ -2901,7 +2921,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +static inline pteval_t xen_pte_val(pte_t pte) +{ + pteval_t ret = __pte_val(pte); -+ if ((pte.pte_low & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT) ++ if (likely((pte.pte_low & (_PAGE_PRESENT|_PAGE_IOMAP)) == _PAGE_PRESENT)) + ret = pte_machine_to_phys(ret); + return ret; +} @@ -2975,8 +2995,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_X86_PGTABLE_DEFS_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:45:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:45:53.000000000 +0100 @@ -16,6 +16,7 @@ struct mm_struct; #include #include @@ -2985,8 +3005,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #include #include #include -@@ -74,10 +75,10 @@ struct cpuinfo_x86 { - char pad0; +@@ -78,10 +79,10 @@ struct cpuinfo_x86 { + #endif #else /* Number of 4K pages in DTLB/ITLB combined(in pages): */ - int x86_tlbsize; @@ -2995,22 +3015,19 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches __u8 x86_virt_bits; __u8 x86_phys_bits; -#endif + #ifndef CONFIG_XEN /* CPUID returned core id bits: */ __u8 x86_coreid_bits; - /* Max extended CPUID function supported: */ -@@ -92,9 +93,9 @@ struct cpuinfo_x86 { - int x86_cache_alignment; /* In bytes */ - int x86_power; - unsigned long loops_per_jiffy; --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN) +@@ -101,7 +102,7 @@ struct cpuinfo_x86 { + #ifndef CONFIG_XEN + #ifdef CONFIG_SMP /* cpus sharing the last level cache: */ - cpumask_t llc_shared_map; + cpumask_var_t llc_shared_map; #endif /* cpuid returned max cores value: */ u16 x86_max_cores; -@@ -138,7 +139,7 @@ extern struct cpuinfo_x86 new_cpu_data; +@@ -148,7 +149,7 @@ extern struct cpuinfo_x86 new_cpu_data; extern __u32 cleared_cpu_caps[NCAPINTS]; #ifdef CONFIG_SMP @@ -3019,7 +3036,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define cpu_data(cpu) per_cpu(cpu_info, cpu) #define current_cpu_data __get_cpu_var(cpu_info) #else -@@ -251,7 +252,6 @@ struct x86_hw_tss { +@@ -261,7 +262,6 @@ struct x86_hw_tss { #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 @@ -3027,7 +3044,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifndef CONFIG_X86_NO_TSS struct tss_struct { -@@ -267,11 +267,6 @@ struct tss_struct { +@@ -277,11 +277,6 @@ struct tss_struct { * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; @@ -3039,7 +3056,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * .. and then another 0x100 bytes for the emergency kernel stack: -@@ -280,7 +275,7 @@ struct tss_struct { +@@ -290,7 +285,7 @@ struct tss_struct { } ____cacheline_aligned; @@ -3048,7 +3065,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Save the original ist values for checking stack pointers during debugging -@@ -363,6 +358,11 @@ struct i387_soft_struct { +@@ -373,6 +368,11 @@ struct i387_soft_struct { u32 entry_eip; }; @@ -3060,7 +3077,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2]; -@@ -372,6 +372,7 @@ struct xsave_hdr_struct { +@@ -382,6 +382,7 @@ struct xsave_hdr_struct { struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; @@ -3068,7 +3085,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); -@@ -382,11 +383,37 @@ union thread_xstate { +@@ -392,11 +393,37 @@ union thread_xstate { struct xsave_struct xsave; }; @@ -3108,7 +3125,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches extern unsigned int xstate_size; extern void free_thread_xstate(struct task_struct *); extern struct kmem_cache *task_xstate_cachep; -@@ -659,6 +686,7 @@ static inline void __sti_mwait(unsigned +@@ -669,6 +696,7 @@ static inline void __sti_mwait(unsigned extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); extern void select_idle_routine(const struct cpuinfo_x86 *c); @@ -3116,7 +3133,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches extern unsigned long boot_option_idle_override; extern unsigned long idle_halt; -@@ -696,9 +724,9 @@ extern int sysenter_setup(void); +@@ -706,9 +734,9 @@ extern int sysenter_setup(void); extern struct desc_ptr early_gdt_descr; extern void cpu_set_gdt(int); @@ -3128,7 +3145,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static inline unsigned long get_debugctlmsr(void) { -@@ -783,6 +811,7 @@ static inline void spin_lock_prefetch(co +@@ -793,6 +821,7 @@ static inline void spin_lock_prefetch(co * User space process size: 3GB (default). */ #define TASK_SIZE PAGE_OFFSET @@ -3136,7 +3153,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP -@@ -840,7 +869,7 @@ extern unsigned long thread_saved_pc(str +@@ -850,7 +879,7 @@ extern unsigned long thread_saved_pc(str /* * User space process size. 47bits minus one guard page. */ @@ -3145,7 +3162,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* This decides where the kernel will search for a free chunk of vm * space during mmap's. -@@ -849,12 +878,12 @@ extern unsigned long thread_saved_pc(str +@@ -859,12 +888,12 @@ extern unsigned long thread_saved_pc(str 0xc0000000 : 0xFFFFe000) #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ @@ -3161,9 +3178,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define INIT_THREAD { \ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:25:06.000000000 +0100 -@@ -15,53 +15,25 @@ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:44:12.000000000 +0100 +@@ -15,34 +15,8 @@ # include # endif #endif @@ -3197,32 +3214,19 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -extern int __cpuinit get_local_pda(int cpu); +#include - extern int smp_num_siblings; extern unsigned int num_processors; --DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); --DECLARE_PER_CPU(cpumask_t, cpu_core_map); -+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); -+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); +@@ -51,9 +25,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_m + DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); + #endif -#ifdef CONFIG_X86_32 DECLARE_PER_CPU(int, cpu_number); -#endif - static inline struct cpumask *cpu_sibling_mask(int cpu) - { -- return &per_cpu(cpu_sibling_map, cpu); -+ return per_cpu(cpu_sibling_map, cpu); - } - - static inline struct cpumask *cpu_core_mask(int cpu) + static inline const struct cpumask *cpu_sibling_mask(int cpu) { -- return &per_cpu(cpu_core_map, cpu); -+ return per_cpu(cpu_core_map, cpu); - } - - DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -@@ -149,9 +121,10 @@ static inline void arch_send_call_functi +@@ -152,9 +124,10 @@ static inline void arch_send_call_functi smp_ops.send_call_func_single_ipi(cpu); } @@ -3235,7 +3239,17 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } void cpu_disable_common(void); -@@ -176,14 +149,12 @@ void xen_send_call_func_single_ipi(int c +@@ -167,6 +140,9 @@ void native_cpu_die(unsigned int cpu); + void native_play_dead(void); + void play_dead_common(void); + ++void smp_store_cpu_info(int id); ++#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) ++ + #else /* CONFIG_XEN */ + + extern int __cpu_disable(void); +@@ -179,26 +155,17 @@ void xen_send_call_func_single_ipi(int c #define smp_send_stop xen_smp_send_stop #define smp_send_reschedule xen_smp_send_reschedule #define arch_send_call_function_single_ipi xen_send_call_func_single_ipi @@ -3248,10 +3262,11 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -extern void prefill_possible_map(void); - - void smp_store_cpu_info(int id); - #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) - -@@ -192,10 +163,6 @@ static inline int num_booting_cpus(void) +-void smp_store_cpu_info(int id); +-#define cpu_physical_id(cpu) (cpu) +- + /* We don't mark CPUs online until __cpu_up(), so we need another measure */ + static inline int num_booting_cpus(void) { return cpumask_weight(cpu_callout_mask); } @@ -3262,7 +3277,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif /* CONFIG_SMP */ extern unsigned disabled_cpus __cpuinitdata; -@@ -206,11 +173,11 @@ extern unsigned disabled_cpus __cpuinitd +@@ -209,11 +176,11 @@ extern unsigned disabled_cpus __cpuinitd * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ @@ -3276,18 +3291,18 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define stack_smp_processor_id() \ ({ \ -@@ -220,10 +187,6 @@ extern unsigned disabled_cpus __cpuinitd +@@ -223,10 +190,6 @@ extern unsigned disabled_cpus __cpuinitd }) #define safe_smp_processor_id() smp_processor_id() -#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ --#define cpu_physical_id(cpu) boot_cpu_physical_apicid +-#define cpu_physical_id(cpu) 0 -#define safe_smp_processor_id() 0 -#define stack_smp_processor_id() 0 #endif #ifdef CONFIG_X86_LOCAL_APIC -@@ -235,28 +198,9 @@ static inline int logical_smp_processor_ +@@ -238,28 +201,9 @@ static inline int logical_smp_processor_ return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } @@ -3316,7 +3331,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #else /* CONFIG_X86_LOCAL_APIC */ -@@ -266,11 +210,5 @@ static inline int hard_smp_processor_id( +@@ -269,11 +213,5 @@ static inline int hard_smp_processor_id( #endif /* CONFIG_X86_LOCAL_APIC */ @@ -3328,8 +3343,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_SMP_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:44:12.000000000 +0100 @@ -255,40 +255,18 @@ static __always_inline void __ticket_spi static inline int xen_spinlock_init(unsigned int cpu) { return 0; } static inline void xen_spinlock_cleanup(unsigned int cpu) {} @@ -3424,8 +3439,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define _raw_spin_relax(lock) cpu_relax() #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:44:12.000000000 +0100 @@ -26,6 +26,20 @@ typedef union { # define TICKET_SHIFT 16 u16 cur, seq; @@ -3447,8 +3462,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif }; } raw_spinlock_t; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:05:49.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:06:47.000000000 +0100 @@ -21,9 +21,24 @@ struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct *__switch_to(struct task_struct *prev, @@ -3500,7 +3515,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches : /* reloaded segment registers */ \ "memory"); \ } while (0) -@@ -87,27 +107,44 @@ do { \ +@@ -89,27 +109,44 @@ do { \ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ "r12", "r13", "r14", "r15" @@ -3551,7 +3566,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches : "memory", "cc" __EXTRA_CLOBBER) #endif -@@ -166,6 +203,25 @@ extern void xen_load_gs_index(unsigned); +@@ -168,6 +205,25 @@ extern void xen_load_gs_index(unsigned); #define savesegment(seg, value) \ asm("mov %%" #seg ",%0":"=r" (value) : : "memory") @@ -3577,8 +3592,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:44:12.000000000 +0100 @@ -86,21 +86,20 @@ static inline void flush_tlb_range(struc flush_tlb_mm(vma->vm_mm); } @@ -3612,17 +3627,17 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +extern void zap_low_mappings(void); + #endif /* _ASM_X86_TLBFLUSH_H */ ---- head-2010-05-25.orig/arch/x86/kernel/Makefile 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/Makefile 2010-03-24 15:25:06.000000000 +0100 -@@ -122,7 +122,6 @@ obj-$(CONFIG_X86_XEN) += fixup.o +--- head-2011-03-17.orig/arch/x86/kernel/Makefile 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/Makefile 2011-02-01 14:44:12.000000000 +0100 +@@ -115,7 +115,6 @@ obj-$(CONFIG_X86_XEN) += fixup.o ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) - obj-$(CONFIG_X86_XEN_GENAPIC) += genapic_64.o genapic_xen_64.o - obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o - obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o obj-$(CONFIG_AUDIT) += audit_64.o -@@ -134,11 +133,10 @@ ifeq ($(CONFIG_X86_64),y) + + obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o +@@ -125,11 +124,10 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o @@ -3637,9 +3652,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + i8259.o irqinit_$(BITS).o pci-swiotlb.o reboot.o smpboot.o tsc.o \ + tsc_sync.o uv_%.o vsmp_64.o disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += probe_roms_32.o ---- head-2010-05-25.orig/arch/x86/kernel/acpi/boot.c 2010-04-15 10:05:36.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/acpi/boot.c 2010-04-15 10:07:05.000000000 +0200 -@@ -115,11 +115,6 @@ char *__init __acpi_map_table(unsigned l +--- head-2011-03-17.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 10:59:30.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/boot.c 2011-03-11 10:59:49.000000000 +0100 +@@ -162,11 +162,6 @@ char *__init __acpi_map_table(unsigned l if (!phys || !size) return NULL; @@ -3651,7 +3666,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return early_ioremap(phys, size); } void __init __acpi_unmap_table(char *map, unsigned long size) -@@ -151,8 +146,10 @@ static int __init acpi_parse_madt(struct +@@ -198,8 +193,10 @@ static int __init acpi_parse_madt(struct madt->address); } @@ -3662,8 +3677,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -104,6 +104,7 @@ int acpi_save_state_mem(void) stack_start.sp = temp_stack + sizeof(temp_stack); early_gdt_descr.address = @@ -3672,9 +3687,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0; ---- head-2010-05-25.orig/arch/x86/kernel/apic/Makefile 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/apic/Makefile 2010-03-24 15:25:06.000000000 +0100 -@@ -17,3 +17,10 @@ obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o +--- head-2011-03-17.orig/arch/x86/kernel/apic/Makefile 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/Makefile 2011-02-01 14:44:12.000000000 +0100 +@@ -19,3 +19,9 @@ obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_ES7000) += es7000_32.o obj-$(CONFIG_X86_SUMMIT) += summit_32.o @@ -3684,32 +3699,19 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +probe_64-$(CONFIG_XEN) := probe_32.o + +disabled-obj-$(CONFIG_XEN) := apic_flat_$(BITS).o -+disabled-obj-$(filter-out $(CONFIG_SMP),$(CONFIG_XEN)) += ipi.o ---- head-2010-05-25.orig/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/apic-xen.c 2010-03-24 15:25:06.000000000 +0100 -@@ -4,11 +4,20 @@ - - #include - #include -+#include - - #include +--- head-2011-03-17.orig/arch/x86/kernel/apic/apic-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/apic-xen.c 2011-02-01 14:44:12.000000000 +0100 +@@ -9,6 +9,8 @@ #include #include +unsigned int num_processors; -+ -+/* -+ * Map cpu index to physical APIC ID -+ */ -+DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; -+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); + /* * Debug level, exported for io_apic.c */ ---- head-2010-05-25.orig/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -1,7 +1,7 @@ /* * Intel IO-APIC support for multi-Pentium hosts. @@ -3719,7 +3721,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches * * Many thanks to Stig Venaas for trying out countless experimental * patches and reporting/debugging problems patiently! -@@ -46,6 +46,7 @@ +@@ -42,6 +42,7 @@ #include #include #include @@ -3727,9 +3729,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #include #include #include -@@ -61,9 +62,7 @@ - #include - #include +@@ -51,9 +52,7 @@ + #include + #include -#include -#include @@ -3738,7 +3740,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifdef CONFIG_XEN #include -@@ -97,11 +96,11 @@ static DEFINE_SPINLOCK(vector_lock); +@@ -87,11 +86,11 @@ static DEFINE_SPINLOCK(vector_lock); int nr_ioapic_registers[MAX_IO_APICS]; /* I/O APIC entries */ @@ -3752,7 +3754,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* # of MP IRQ source entries */ int mp_irq_entries; -@@ -114,10 +113,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BU +@@ -104,10 +103,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BU int skip_ioapic_setup; @@ -3773,7 +3775,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return 0; } early_param("noapic", parse_noapic); -@@ -372,7 +380,7 @@ set_extra_move_desc(struct irq_desc *des +@@ -362,7 +370,7 @@ set_extra_move_desc(struct irq_desc *des if (!cfg->move_in_progress) { /* it means that domain is not changed */ @@ -3782,7 +3784,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches cfg->move_desc_pending = 1; } } -@@ -397,12 +405,20 @@ struct io_apic { +@@ -387,12 +395,20 @@ struct io_apic { unsigned int index; unsigned int unused[3]; unsigned int data; @@ -3802,9 +3804,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + struct io_apic __iomem *io_apic = io_apic_base(apic); + writel(vector, &io_apic->eoi); } - #endif /* CONFIG_XEN */ + #endif /* !CONFIG_XEN */ -@@ -416,7 +432,7 @@ static inline unsigned int io_apic_read( +@@ -406,7 +422,7 @@ static inline unsigned int io_apic_read( struct physdev_apic apic_op; int ret; @@ -3813,7 +3815,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches apic_op.reg = reg; ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); if (ret) -@@ -434,7 +450,7 @@ static inline void io_apic_write(unsigne +@@ -424,7 +440,7 @@ static inline void io_apic_write(unsigne #else struct physdev_apic apic_op; @@ -3822,7 +3824,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches apic_op.reg = reg; apic_op.value = value; WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op)); -@@ -522,7 +538,7 @@ __ioapic_write_entry(int apic, int pin, +@@ -512,7 +528,7 @@ __ioapic_write_entry(int apic, int pin, io_apic_write(apic, 0x10 + 2*pin, eu.w1); } @@ -3831,7 +3833,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); -@@ -558,11 +574,11 @@ static void send_cleanup_vector(struct i +@@ -548,11 +564,11 @@ static void send_cleanup_vector(struct i for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) cfg->move_cleanup_count++; for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) @@ -3845,7 +3847,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches free_cpumask_var(cleanup_mask); } cfg->move_in_progress = 0; -@@ -583,16 +599,12 @@ static void __target_IO_APIC_irq(unsigne +@@ -573,16 +589,12 @@ static void __target_IO_APIC_irq(unsigne apic = entry->apic; pin = entry->pin; @@ -3862,7 +3864,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches reg = io_apic_read(apic, 0x10 + pin*2); reg &= ~IO_APIC_REDIR_VECTOR_MASK; reg |= vector; -@@ -607,8 +619,9 @@ static int +@@ -597,8 +609,9 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); /* @@ -3874,7 +3876,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches */ static unsigned int set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) -@@ -624,9 +637,12 @@ set_desc_affinity(struct irq_desc *desc, +@@ -614,9 +627,12 @@ set_desc_affinity(struct irq_desc *desc, if (assign_irq_vector(irq, cfg, mask)) return BAD_APICID; @@ -3889,7 +3891,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } static void -@@ -840,23 +856,6 @@ static void clear_IO_APIC (void) +@@ -830,23 +846,6 @@ static void clear_IO_APIC (void) for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) clear_IO_APIC_pin(apic, pin); } @@ -3912,8 +3914,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -#endif /* !CONFIG_SMP && CONFIG_X86_32*/ #else #define add_pin_to_irq_cpu(cfg, cpu, apic, pin) - #endif /* CONFIG_XEN */ -@@ -868,8 +867,9 @@ void send_IPI_self(int vector) + #endif /* !CONFIG_XEN */ +@@ -858,8 +857,9 @@ void send_IPI_self(int vector) */ #define MAX_PIRQS 8 @@ -3925,7 +3927,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static int __init ioapic_pirq_setup(char *str) { -@@ -878,10 +878,6 @@ static int __init ioapic_pirq_setup(char +@@ -868,10 +868,6 @@ static int __init ioapic_pirq_setup(char get_options(str, ARRAY_SIZE(ints), ints); @@ -3936,7 +3938,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches apic_printk(APIC_VERBOSE, KERN_INFO "PIRQ redirection, working around broken MP-BIOS.\n"); max = MAX_PIRQS; -@@ -903,75 +899,106 @@ __setup("pirq=", ioapic_pirq_setup); +@@ -893,75 +889,106 @@ __setup("pirq=", ioapic_pirq_setup); #endif /* CONFIG_X86_32 */ #ifdef CONFIG_INTR_REMAP @@ -4082,7 +4084,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { /* * for now plain restore of previous settings. -@@ -980,7 +1007,17 @@ void reinit_intr_remapped_IO_APIC(int in +@@ -970,7 +997,17 @@ void reinit_intr_remapped_IO_APIC(int in * table entries. for now, do a plain restore, and wait for * the setup_IO_APIC_irqs() to do proper initialization. */ @@ -4101,7 +4103,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } #endif -@@ -992,10 +1029,10 @@ static int find_irq_entry(int apic, int +@@ -982,10 +1019,10 @@ static int find_irq_entry(int apic, int int i; for (i = 0; i < mp_irq_entries; i++) @@ -4116,7 +4118,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return i; return -1; -@@ -1010,13 +1047,13 @@ static int __init find_isa_irq_pin(int i +@@ -1000,13 +1037,13 @@ static int __init find_isa_irq_pin(int i int i; for (i = 0; i < mp_irq_entries; i++) { @@ -4134,7 +4136,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } return -1; } -@@ -1026,17 +1063,17 @@ static int __init find_isa_irq_apic(int +@@ -1016,17 +1053,17 @@ static int __init find_isa_irq_apic(int int i; for (i = 0; i < mp_irq_entries; i++) { @@ -4156,7 +4158,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return apic; } } -@@ -1062,23 +1099,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, +@@ -1052,23 +1089,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, return -1; } for (i = 0; i < mp_irq_entries; i++) { @@ -4187,7 +4189,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return irq; /* * Use the first all-but-pin matching entry as a -@@ -1121,7 +1158,7 @@ static int EISA_ELCR(unsigned int irq) +@@ -1111,7 +1148,7 @@ static int EISA_ELCR(unsigned int irq) * EISA conforming in the MP table, that means its trigger type must * be read in from the ELCR */ @@ -4196,7 +4198,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define default_EISA_polarity(idx) default_ISA_polarity(idx) /* PCI interrupts are always polarity one level triggered, -@@ -1138,13 +1175,13 @@ static int EISA_ELCR(unsigned int irq) +@@ -1128,13 +1165,13 @@ static int EISA_ELCR(unsigned int irq) static int MPBIOS_polarity(int idx) { @@ -4212,7 +4214,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { case 0: /* conforms, ie. bus-type dependent polarity */ if (test_bit(bus, mp_bus_not_pci)) -@@ -1180,13 +1217,13 @@ static int MPBIOS_polarity(int idx) +@@ -1170,13 +1207,13 @@ static int MPBIOS_polarity(int idx) static int MPBIOS_trigger(int idx) { @@ -4228,7 +4230,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { case 0: /* conforms, ie. bus-type dependent */ if (test_bit(bus, mp_bus_not_pci)) -@@ -1264,16 +1301,16 @@ int (*ioapic_renumber_irq)(int ioapic, i +@@ -1254,16 +1291,16 @@ int (*ioapic_renumber_irq)(int ioapic, i static int pin_2_irq(int idx, int apic, int pin) { int irq, i; @@ -4248,7 +4250,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } else { /* * PCI IRQs are mapped in order -@@ -1366,7 +1403,7 @@ __assign_irq_vector(int irq, struct irq_ +@@ -1356,7 +1393,7 @@ __assign_irq_vector(int irq, struct irq_ int new_cpu; int vector, offset; @@ -4257,7 +4259,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches vector = current_vector; offset = current_offset; -@@ -1476,9 +1513,7 @@ void __setup_vector_irq(int cpu) +@@ -1466,9 +1503,7 @@ void __setup_vector_irq(int cpu) } static struct irq_chip ioapic_chip; @@ -4267,7 +4269,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define IOAPIC_AUTO -1 #define IOAPIC_EDGE 0 -@@ -1517,7 +1552,6 @@ static void ioapic_register_intr(int irq +@@ -1507,7 +1542,6 @@ static void ioapic_register_intr(int irq else desc->status &= ~IRQ_LEVEL; @@ -4275,7 +4277,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (irq_remapped(irq)) { desc->status |= IRQ_MOVE_PCNTXT; if (trigger) -@@ -1529,7 +1563,7 @@ static void ioapic_register_intr(int irq +@@ -1519,7 +1553,7 @@ static void ioapic_register_intr(int irq handle_edge_irq, "edge"); return; } @@ -4284,7 +4286,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || trigger == IOAPIC_LEVEL) set_irq_chip_and_handler_name(irq, &ioapic_chip, -@@ -1544,37 +1578,44 @@ static void ioapic_register_intr(int irq +@@ -1534,37 +1568,44 @@ static void ioapic_register_intr(int irq #define ioapic_register_intr(irq, desc, trigger) evtchn_register_pirq(irq) #endif @@ -4303,9 +4305,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches memset(entry,0,sizeof(*entry)); -#ifdef CONFIG_INTR_REMAP ++#ifndef CONFIG_XEN if (intr_remapping_enabled) { - struct intel_iommu *iommu = map_ioapic_to_ir(apic); -+#ifndef CONFIG_XEN + struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); struct irte irte; struct IR_IO_APIC_route_entry *ir_entry = @@ -4340,21 +4342,20 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches irte.vector = vector; irte.dest_id = IRTE_DEST(destination); -@@ -1584,18 +1625,22 @@ static int setup_ioapic_entry(int apic, +@@ -1574,18 +1615,23 @@ static int setup_ioapic_entry(int apic, ir_entry->zero = 0; ir_entry->format = 1; ir_entry->index = (index & 0x7fff); -- } else + /* + * IO-APIC RTE will be configured with virtual vector. + * irq handler will do the explicit EOI to the io-apic. + */ + ir_entry->vector = pin; + } else #endif -- { + { - entry->delivery_mode = INT_DELIVERY_MODE; - entry->dest_mode = INT_DEST_MODE; -+ } else { + entry->delivery_mode = apic->irq_delivery_mode; + entry->dest_mode = apic->irq_dest_mode; entry->dest = destination; @@ -4368,7 +4369,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* Mask level triggered irqs. * Use IRQ_DELAYED_DISABLE for edge triggered irqs. -@@ -1605,7 +1650,7 @@ static int setup_ioapic_entry(int apic, +@@ -1595,7 +1641,7 @@ static int setup_ioapic_entry(int apic, return 0; } @@ -4377,7 +4378,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches int trigger, int polarity) { struct irq_cfg *cfg; -@@ -1617,26 +1662,26 @@ static void setup_IO_APIC_irq(int apic, +@@ -1607,26 +1653,26 @@ static void setup_IO_APIC_irq(int apic, cfg = desc->chip_data; @@ -4411,7 +4412,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches __clear_irq_vector(irq, cfg); return; } -@@ -1645,12 +1690,12 @@ static void setup_IO_APIC_irq(int apic, +@@ -1635,12 +1681,12 @@ static void setup_IO_APIC_irq(int apic, if (irq < NR_IRQS_LEGACY) disable_8259A_irq(irq); @@ -4426,7 +4427,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches int notcon = 0; struct irq_desc *desc; struct irq_cfg *cfg; -@@ -1658,21 +1703,19 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1648,21 +1694,19 @@ static void __init setup_IO_APIC_irqs(vo apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); @@ -4453,7 +4454,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches continue; } if (notcon) { -@@ -1681,23 +1724,30 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1671,23 +1715,30 @@ static void __init setup_IO_APIC_irqs(vo notcon = 0; } @@ -4491,7 +4492,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches irq_trigger(idx), irq_polarity(idx)); } } -@@ -1711,15 +1761,13 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1701,15 +1752,13 @@ static void __init setup_IO_APIC_irqs(vo /* * Set up the timer pin, possibly with the 8259A-master behind. */ @@ -4508,7 +4509,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches memset(&entry, 0, sizeof(entry)); -@@ -1727,10 +1775,10 @@ static void __init setup_timer_IRQ0_pin( +@@ -1717,10 +1766,10 @@ static void __init setup_timer_IRQ0_pin( * We use logical delivery to get the timer IRQ * to the first CPU. */ @@ -4523,7 +4524,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches entry.polarity = 0; entry.trigger = 0; entry.vector = vector; -@@ -1744,7 +1792,7 @@ static void __init setup_timer_IRQ0_pin( +@@ -1734,7 +1783,7 @@ static void __init setup_timer_IRQ0_pin( /* * Add it to the IO-APIC irq-routing table: */ @@ -4532,7 +4533,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } -@@ -1766,7 +1814,7 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1756,7 +1805,7 @@ __apicdebuginit(void) print_IO_APIC(void printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); for (i = 0; i < nr_ioapics; i++) printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", @@ -4541,7 +4542,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * We are a bit conservative about what we expect. We have to -@@ -1786,7 +1834,7 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1776,7 +1825,7 @@ __apicdebuginit(void) print_IO_APIC(void spin_unlock_irqrestore(&ioapic_lock, flags); printk("\n"); @@ -4550,7 +4551,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); -@@ -2050,13 +2098,6 @@ void __init enable_IO_APIC(void) +@@ -2040,13 +2089,6 @@ void __init enable_IO_APIC(void) int apic; unsigned long flags; @@ -4564,7 +4565,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * The number of IO-APIC IRQ registers (== #pins): */ -@@ -2129,8 +2170,13 @@ void disable_IO_APIC(void) +@@ -2117,8 +2159,13 @@ void disable_IO_APIC(void) * If the i8259 is routed through an IOAPIC * Put that IOAPIC in virtual wire mode * so legacy interrupts can be delivered. @@ -4579,7 +4580,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct IO_APIC_route_entry entry; memset(&entry, 0, sizeof(entry)); -@@ -2150,7 +2196,10 @@ void disable_IO_APIC(void) +@@ -2138,7 +2185,10 @@ void disable_IO_APIC(void) ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); } @@ -4591,7 +4592,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } #ifdef CONFIG_X86_32 -@@ -2165,7 +2214,7 @@ static void __init setup_ioapic_ids_from +@@ -2153,7 +2203,7 @@ static void __init setup_ioapic_ids_from { union IO_APIC_reg_00 reg_00; physid_mask_t phys_id_present_map; @@ -4600,7 +4601,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches int i; unsigned char old_id; unsigned long flags; -@@ -2184,26 +2233,26 @@ static void __init setup_ioapic_ids_from +@@ -2172,26 +2222,26 @@ static void __init setup_ioapic_ids_from * This is broken; anything with a real cpu count has to * circumvent this idiocy regardless. */ @@ -4634,7 +4635,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } /* -@@ -2211,10 +2260,10 @@ static void __init setup_ioapic_ids_from +@@ -2199,10 +2249,10 @@ static void __init setup_ioapic_ids_from * system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ @@ -4648,7 +4649,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches for (i = 0; i < get_physical_broadcast(); i++) if (!physid_isset(i, phys_id_present_map)) break; -@@ -2223,13 +2272,13 @@ static void __init setup_ioapic_ids_from +@@ -2211,13 +2261,13 @@ static void __init setup_ioapic_ids_from printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", i); physid_set(i, phys_id_present_map); @@ -4665,7 +4666,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches physids_or(phys_id_present_map, phys_id_present_map, tmp); } -@@ -2238,11 +2287,11 @@ static void __init setup_ioapic_ids_from +@@ -2226,11 +2276,11 @@ static void __init setup_ioapic_ids_from * We need to adjust the IRQ routing table * if the ID changed. */ @@ -4681,7 +4682,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Read the right value from the MPC table and -@@ -2250,20 +2299,20 @@ static void __init setup_ioapic_ids_from +@@ -2238,20 +2288,20 @@ static void __init setup_ioapic_ids_from */ apic_printk(APIC_VERBOSE, KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", @@ -4707,7 +4708,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches printk("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); -@@ -2366,7 +2415,7 @@ static int ioapic_retrigger_irq(unsigned +@@ -2354,7 +2404,7 @@ static int ioapic_retrigger_irq(unsigned unsigned long flags; spin_lock_irqsave(&vector_lock, flags); @@ -4716,7 +4717,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches spin_unlock_irqrestore(&vector_lock, flags); return 1; -@@ -2374,7 +2423,7 @@ static int ioapic_retrigger_irq(unsigned +@@ -2362,7 +2412,7 @@ static int ioapic_retrigger_irq(unsigned #else static int ioapic_retrigger_irq(unsigned int irq) { @@ -4725,7 +4726,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return 1; } -@@ -2392,37 +2441,24 @@ static int ioapic_retrigger_irq(unsigned +@@ -2380,37 +2430,24 @@ static int ioapic_retrigger_irq(unsigned #ifdef CONFIG_SMP #ifdef CONFIG_INTR_REMAP @@ -4769,7 +4770,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned int irq; if (!cpumask_intersects(mask, cpu_online_mask)) -@@ -2438,14 +2474,7 @@ migrate_ioapic_irq_desc(struct irq_desc +@@ -2426,14 +2463,7 @@ migrate_ioapic_irq_desc(struct irq_desc set_extra_move_desc(desc, mask); @@ -4785,7 +4786,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); -@@ -2458,61 +2487,7 @@ migrate_ioapic_irq_desc(struct irq_desc +@@ -2446,61 +2476,7 @@ migrate_ioapic_irq_desc(struct irq_desc if (cfg->move_in_progress) send_cleanup_vector(cfg); @@ -4848,7 +4849,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } /* -@@ -2521,13 +2496,6 @@ static void ir_irq_migration(struct work +@@ -2509,13 +2485,6 @@ static void ir_irq_migration(struct work static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) { @@ -4862,7 +4863,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches migrate_ioapic_irq_desc(desc, mask); } static void set_ir_ioapic_affinity_irq(unsigned int irq, -@@ -2537,6 +2505,11 @@ static void set_ir_ioapic_affinity_irq(u +@@ -2525,6 +2494,11 @@ static void set_ir_ioapic_affinity_irq(u set_ir_ioapic_affinity_irq_desc(desc, mask); } @@ -4874,7 +4875,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif asmlinkage void smp_irq_move_cleanup_interrupt(void) -@@ -2550,6 +2523,7 @@ asmlinkage void smp_irq_move_cleanup_int +@@ -2538,6 +2512,7 @@ asmlinkage void smp_irq_move_cleanup_int me = smp_processor_id(); for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { unsigned int irq; @@ -4882,7 +4883,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct irq_desc *desc; struct irq_cfg *cfg; irq = __get_cpu_var(vector_irq)[vector]; -@@ -2569,6 +2543,18 @@ asmlinkage void smp_irq_move_cleanup_int +@@ -2557,6 +2532,18 @@ asmlinkage void smp_irq_move_cleanup_int if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) goto unlock; @@ -4901,7 +4902,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches __get_cpu_var(vector_irq)[vector] = -1; cfg->move_cleanup_count--; unlock: -@@ -2591,7 +2577,7 @@ static void irq_complete_move(struct irq +@@ -2579,7 +2566,7 @@ static void irq_complete_move(struct irq /* domain has not changed, but affinity did */ me = smp_processor_id(); @@ -4910,7 +4911,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches *descp = desc = move_irq_desc(desc, me); /* get the new one */ cfg = desc->chip_data; -@@ -2617,17 +2603,51 @@ static void irq_complete_move(struct irq +@@ -2605,17 +2592,51 @@ static void irq_complete_move(struct irq static inline void irq_complete_move(struct irq_desc **descp) {} #endif @@ -4964,7 +4965,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif static void ack_apic_edge(unsigned int irq) -@@ -2693,6 +2713,9 @@ static void ack_apic_level(unsigned int +@@ -2681,6 +2702,9 @@ static void ack_apic_level(unsigned int */ ack_APIC_irq(); @@ -4974,7 +4975,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* Now we can move and renable the irq */ if (unlikely(do_unmask_irq)) { /* Only migrate the irq if the ack has been received. -@@ -2738,6 +2761,26 @@ static void ack_apic_level(unsigned int +@@ -2726,6 +2750,26 @@ static void ack_apic_level(unsigned int #endif } @@ -5001,7 +5002,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static struct irq_chip ioapic_chip __read_mostly = { .name = "IO-APIC", .startup = startup_ioapic_irq, -@@ -2751,20 +2794,20 @@ static struct irq_chip ioapic_chip __rea +@@ -2739,20 +2783,20 @@ static struct irq_chip ioapic_chip __rea .retrigger = ioapic_retrigger_irq, }; @@ -5023,10 +5024,10 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches .retrigger = ioapic_retrigger_irq, }; -#endif - #endif /* CONFIG_XEN */ + #endif /* !CONFIG_XEN */ static inline void init_IO_APIC_traps(void) -@@ -2786,7 +2829,7 @@ static inline void init_IO_APIC_traps(vo +@@ -2774,7 +2818,7 @@ static inline void init_IO_APIC_traps(vo */ for_each_irq_desc(irq, desc) { #ifdef CONFIG_XEN @@ -5035,7 +5036,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches continue; #endif cfg = desc->chip_data; -@@ -2948,19 +2991,15 @@ static inline void __init check_timer(vo +@@ -2936,19 +2980,15 @@ static inline void __init check_timer(vo int cpu = boot_cpu_id; int apic1, pin1, apic2, pin2; unsigned long flags; @@ -5056,7 +5057,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * As IRQ0 is to be enabled in the 8259A, the virtual -@@ -2974,7 +3013,13 @@ static inline void __init check_timer(vo +@@ -2962,7 +3002,13 @@ static inline void __init check_timer(vo apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); init_8259A(1); #ifdef CONFIG_X86_32 @@ -5071,7 +5072,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif pin1 = find_isa_irq_pin(0, mp_INT); -@@ -2994,10 +3039,8 @@ static inline void __init check_timer(vo +@@ -2982,10 +3028,8 @@ static inline void __init check_timer(vo * 8259A. */ if (pin1 == -1) { @@ -5082,7 +5083,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches pin1 = pin2; apic1 = apic2; no_pin1 = 1; -@@ -3013,8 +3056,17 @@ static inline void __init check_timer(vo +@@ -3001,8 +3045,17 @@ static inline void __init check_timer(vo if (no_pin1) { add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); @@ -5101,7 +5102,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (timer_irq_works()) { if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); -@@ -3024,10 +3076,9 @@ static inline void __init check_timer(vo +@@ -3012,10 +3065,9 @@ static inline void __init check_timer(vo clear_IO_APIC_pin(0, pin1); goto out; } @@ -5113,7 +5114,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches clear_IO_APIC_pin(apic1, pin1); if (!no_pin1) apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " -@@ -3042,7 +3093,6 @@ static inline void __init check_timer(vo +@@ -3030,7 +3082,6 @@ static inline void __init check_timer(vo */ replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); @@ -5121,7 +5122,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches enable_8259A_irq(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); -@@ -3057,6 +3107,7 @@ static inline void __init check_timer(vo +@@ -3045,6 +3096,7 @@ static inline void __init check_timer(vo /* * Cleanup, just in case ... */ @@ -5129,7 +5130,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches disable_8259A_irq(0); clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); -@@ -3082,6 +3133,7 @@ static inline void __init check_timer(vo +@@ -3070,6 +3122,7 @@ static inline void __init check_timer(vo apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } @@ -5137,7 +5138,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches disable_8259A_irq(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); -@@ -3099,6 +3151,7 @@ static inline void __init check_timer(vo +@@ -3087,6 +3140,7 @@ static inline void __init check_timer(vo apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } @@ -5145,7 +5146,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option.\n"); -@@ -3131,7 +3184,7 @@ out: +@@ -3119,7 +3173,7 @@ out: void __init setup_IO_APIC(void) { @@ -5154,7 +5155,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches enable_IO_APIC(); #else /* -@@ -3213,8 +3266,8 @@ static int ioapic_resume(struct sys_devi +@@ -3201,8 +3255,8 @@ static int ioapic_resume(struct sys_devi spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(dev->id, 0); @@ -5165,7 +5166,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches io_apic_write(dev->id, 0, reg_00.raw); } spin_unlock_irqrestore(&ioapic_lock, flags); -@@ -3264,6 +3317,7 @@ static int __init ioapic_init_sysfs(void +@@ -3252,6 +3306,7 @@ static int __init ioapic_init_sysfs(void device_initcall(ioapic_init_sysfs); @@ -5173,7 +5174,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Dynamic irq allocate and deallocation */ -@@ -3278,11 +3332,11 @@ unsigned int create_irq_nr(unsigned int +@@ -3266,11 +3321,11 @@ unsigned int create_irq_nr(unsigned int struct irq_desc *desc_new = NULL; irq = 0; @@ -5189,7 +5190,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches desc_new = irq_to_desc_alloc_cpu(new, cpu); if (!desc_new) { printk(KERN_INFO "can not get irq_desc for %d\n", new); -@@ -3292,7 +3346,7 @@ unsigned int create_irq_nr(unsigned int +@@ -3280,7 +3335,7 @@ unsigned int create_irq_nr(unsigned int if (cfg_new->vector != 0) continue; @@ -5198,7 +5199,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches irq = new; break; } -@@ -3307,7 +3361,6 @@ unsigned int create_irq_nr(unsigned int +@@ -3295,7 +3350,6 @@ unsigned int create_irq_nr(unsigned int return irq; } @@ -5206,7 +5207,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches int create_irq(void) { unsigned int irq_want; -@@ -3336,9 +3389,7 @@ void destroy_irq(unsigned int irq) +@@ -3324,9 +3378,7 @@ void destroy_irq(unsigned int irq) if (desc) desc->chip_data = cfg; @@ -5216,7 +5217,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq, cfg); spin_unlock_irqrestore(&vector_lock, flags); -@@ -3355,14 +3406,16 @@ static int msi_compose_msg(struct pci_de +@@ -3343,14 +3395,16 @@ static int msi_compose_msg(struct pci_de int err; unsigned dest; @@ -5236,7 +5237,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (irq_remapped(irq)) { struct irte irte; int ir_index; -@@ -3374,9 +3427,9 @@ static int msi_compose_msg(struct pci_de +@@ -3362,9 +3416,9 @@ static int msi_compose_msg(struct pci_de memset (&irte, 0, sizeof(irte)); irte.present = 1; @@ -5248,7 +5249,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); -@@ -3388,16 +3441,19 @@ static int msi_compose_msg(struct pci_de +@@ -3376,16 +3430,19 @@ static int msi_compose_msg(struct pci_de MSI_ADDR_IR_SHV | MSI_ADDR_IR_INDEX1(ir_index) | MSI_ADDR_IR_INDEX2(ir_index); @@ -5274,7 +5275,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches MSI_ADDR_REDIRECTION_CPU: MSI_ADDR_REDIRECTION_LOWPRI) | MSI_ADDR_DEST_ID(dest); -@@ -3405,7 +3461,7 @@ static int msi_compose_msg(struct pci_de +@@ -3393,7 +3450,7 @@ static int msi_compose_msg(struct pci_de msg->data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | @@ -5283,7 +5284,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches MSI_DATA_DELIVERY_FIXED: MSI_DATA_DELIVERY_LOWPRI) | MSI_DATA_VECTOR(cfg->vector); -@@ -3491,15 +3547,16 @@ static struct irq_chip msi_chip = { +@@ -3479,15 +3536,16 @@ static struct irq_chip msi_chip = { .retrigger = ioapic_retrigger_irq, }; @@ -5302,7 +5303,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches .retrigger = ioapic_retrigger_irq, }; -@@ -3529,7 +3586,6 @@ static int msi_alloc_irte(struct pci_dev +@@ -3517,7 +3575,6 @@ static int msi_alloc_irte(struct pci_dev } return index; } @@ -5310,7 +5311,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) { -@@ -3543,7 +3599,6 @@ static int setup_msi_irq(struct pci_dev +@@ -3531,7 +3588,6 @@ static int setup_msi_irq(struct pci_dev set_irq_msi(irq, msidesc); write_msi_msg(irq, &msg); @@ -5318,7 +5319,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (irq_remapped(irq)) { struct irq_desc *desc = irq_to_desc(irq); /* -@@ -3552,7 +3607,6 @@ static int setup_msi_irq(struct pci_dev +@@ -3540,7 +3596,6 @@ static int setup_msi_irq(struct pci_dev desc->status |= IRQ_MOVE_PCNTXT; set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); } else @@ -5326,7 +5327,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); -@@ -3560,60 +3614,26 @@ static int setup_msi_irq(struct pci_dev +@@ -3548,60 +3603,26 @@ static int setup_msi_irq(struct pci_dev return 0; } @@ -5393,7 +5394,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (!intr_remapping_enabled) goto no_ir; -@@ -3641,7 +3661,6 @@ int arch_setup_msi_irqs(struct pci_dev * +@@ -3629,7 +3650,6 @@ int arch_setup_msi_irqs(struct pci_dev * set_irte_irq(irq, iommu, index, sub_handle); } no_ir: @@ -5401,7 +5402,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches ret = setup_msi_irq(dev, msidesc, irq); if (ret < 0) goto error; -@@ -3659,7 +3678,7 @@ void arch_teardown_msi_irq(unsigned int +@@ -3647,7 +3667,7 @@ void arch_teardown_msi_irq(unsigned int destroy_irq(irq); } @@ -5410,7 +5411,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifdef CONFIG_SMP static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { -@@ -3740,7 +3759,7 @@ static void hpet_msi_set_affinity(unsign +@@ -3728,7 +3748,7 @@ static void hpet_msi_set_affinity(unsign #endif /* CONFIG_SMP */ @@ -5419,7 +5420,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches .name = "HPET_MSI", .unmask = hpet_msi_unmask, .mask = hpet_msi_mask, -@@ -3755,12 +3774,14 @@ int arch_setup_hpet_msi(unsigned int irq +@@ -3743,12 +3763,14 @@ int arch_setup_hpet_msi(unsigned int irq { int ret; struct msi_msg msg; @@ -5434,7 +5435,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, "edge"); -@@ -3823,13 +3844,17 @@ int arch_setup_ht_irq(unsigned int irq, +@@ -3811,13 +3833,17 @@ int arch_setup_ht_irq(unsigned int irq, struct irq_cfg *cfg; int err; @@ -5454,7 +5455,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); -@@ -3837,11 +3862,11 @@ int arch_setup_ht_irq(unsigned int irq, +@@ -3825,11 +3851,11 @@ int arch_setup_ht_irq(unsigned int irq, HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(cfg->vector) | @@ -5468,7 +5469,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches HT_IRQ_LOW_MT_FIXED : HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; -@@ -3857,7 +3882,7 @@ int arch_setup_ht_irq(unsigned int irq, +@@ -3845,7 +3871,7 @@ int arch_setup_ht_irq(unsigned int irq, } #endif /* CONFIG_HT_IRQ */ @@ -5477,7 +5478,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Re-target the irq to the specified CPU and enable the specified MMR located * on the specified blade to allow the sending of MSIs to the specified CPU. -@@ -3889,12 +3914,12 @@ int arch_enable_uv_irq(char *irq_name, u +@@ -3877,12 +3903,12 @@ int arch_enable_uv_irq(char *irq_name, u BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); entry->vector = cfg->vector; @@ -5493,7 +5494,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); -@@ -3957,7 +3982,29 @@ void __init probe_nr_irqs_gsi(void) +@@ -3945,7 +3971,29 @@ void __init probe_nr_irqs_gsi(void) printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); } @@ -5512,18 +5513,18 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + * for MSI and HT dyn irq + */ + nr += nr_irqs_gsi * 16; - #endif ++#endif + if (nr < nr_irqs) + nr_irqs = nr; + + return 0; +} -+#endif + #endif +#endif /* CONFIG_XEN */ /* -------------------------------------------------------------------------- ACPI-based IOAPIC Configuration -@@ -3985,7 +4032,7 @@ int __init io_apic_get_unique_id(int ioa +@@ -3973,7 +4021,7 @@ int __init io_apic_get_unique_id(int ioa */ if (physids_empty(apic_id_map)) @@ -5532,7 +5533,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); -@@ -4001,10 +4048,10 @@ int __init io_apic_get_unique_id(int ioa +@@ -3989,10 +4037,10 @@ int __init io_apic_get_unique_id(int ioa * Every APIC in a system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ @@ -5545,7 +5546,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches break; } -@@ -4017,7 +4064,7 @@ int __init io_apic_get_unique_id(int ioa +@@ -4005,7 +4053,7 @@ int __init io_apic_get_unique_id(int ioa apic_id = i; } @@ -5554,7 +5555,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches physids_or(apic_id_map, apic_id_map, tmp); if (reg_00.bits.ID != apic_id) { -@@ -4062,7 +4109,7 @@ int io_apic_set_pci_routing (int ioapic, +@@ -4050,7 +4098,7 @@ int io_apic_set_pci_routing (int ioapic, int cpu = boot_cpu_id; #ifdef CONFIG_XEN @@ -5563,7 +5564,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ %d\n", ioapic, irq); return -EINVAL; -@@ -4103,8 +4150,8 @@ int acpi_get_override_irq(int bus_irq, i +@@ -4091,8 +4139,8 @@ int acpi_get_override_irq(int bus_irq, i return -1; for (i = 0; i < mp_irq_entries; i++) @@ -5574,7 +5575,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches break; if (i >= mp_irq_entries) return -1; -@@ -4120,7 +4167,7 @@ int acpi_get_override_irq(int bus_irq, i +@@ -4108,7 +4156,7 @@ int acpi_get_override_irq(int bus_irq, i /* * This function currently is only a helper for the i386 smp boot process where * we need to reprogram the ioredtbls to cater for the cpus which have come online @@ -5583,7 +5584,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches */ #ifdef CONFIG_SMP void __init setup_ioapic_dest(void) -@@ -4159,15 +4206,13 @@ void __init setup_ioapic_dest(void) +@@ -4147,15 +4195,13 @@ void __init setup_ioapic_dest(void) */ if (desc->status & (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) @@ -5601,7 +5602,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches set_ioapic_affinity_irq_desc(desc, mask); } -@@ -4220,7 +4265,7 @@ void __init ioapic_init_mappings(void) +@@ -4208,7 +4254,7 @@ void __init ioapic_init_mappings(void) ioapic_res = ioapic_setup_resources(); for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { @@ -5610,7 +5611,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifdef CONFIG_X86_32 if (!ioapic_phys) { printk(KERN_ERR -@@ -4260,9 +4305,12 @@ static int __init ioapic_insert_resource +@@ -4248,9 +4294,12 @@ static int __init ioapic_insert_resource struct resource *r = ioapic_resources; if (!r) { @@ -5626,258 +5627,89 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } for (i = 0; i < nr_ioapics; i++) { ---- head-2010-05-25.orig/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/ipi-xen.c 2010-03-24 15:25:06.000000000 +0100 -@@ -17,38 +17,8 @@ - #include - #include - #include +--- head-2011-03-17.orig/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:56:59.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:57:27.000000000 +0100 +@@ -2,8 +2,8 @@ + #include + + #include +#include -#ifdef CONFIG_X86_32 --#ifndef CONFIG_XEN --#include --#include -- --/* -- * the following functions deal with sending IPIs between CPUs. -- * -- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. -- */ -- --static inline int __prepare_ICR(unsigned int shortcut, int vector) --{ -- unsigned int icr = shortcut | APIC_DEST_LOGICAL; -- -- switch (vector) { -- default: -- icr |= APIC_DM_FIXED | vector; -- break; -- case NMI_VECTOR: -- icr |= APIC_DM_NMI; -- break; -- } -- return icr; --} -- --static inline int __prepare_ICR2(unsigned int mask) --{ -- return SET_APIC_DEST_FIELD(mask); --} --#else #include DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]); -@@ -59,36 +29,10 @@ static inline void __send_IPI_one(unsign - BUG_ON(irq < 0); +@@ -15,32 +15,17 @@ static inline void __send_IPI_one(unsign notify_remote_via_irq(irq); } --#endif -void __send_IPI_shortcut(unsigned int shortcut, int vector) -+static void __send_IPI_shortcut(unsigned int shortcut, int vector) ++void xen_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { --#ifndef CONFIG_XEN -- /* -- * Subtle. In the case of the 'never do double writes' workaround -- * we have to lock out interrupts to be safe. As we don't care -- * of the value read we use an atomic rmw access to avoid costly -- * cli/sti. Otherwise we use an even cheaper single atomic write -- * to the APIC. -- */ -- unsigned int cfg; -- -- /* -- * Wait for idle. -- */ -- apic_wait_icr_idle(); -- -- /* -- * No need to touch the target chip field -- */ -- cfg = __prepare_ICR(shortcut, vector); -- -- /* -- * Send the IPI. The write to APIC_ICR fires this off. -- */ -- apic_write(APIC_ICR, cfg); --#else - int cpu; -+ unsigned int cpu; - - switch (shortcut) { - case APIC_DEST_SELF: -@@ -99,149 +43,53 @@ void __send_IPI_shortcut(unsigned int sh - if (cpu != smp_processor_id()) - __send_IPI_one(cpu, vector); - break; -+ case APIC_DEST_ALLINC: -+ for_each_online_cpu(cpu) -+ __send_IPI_one(cpu, vector); -+ break; - default: - printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut, - vector); - break; - } --#endif - } ++ unsigned int cpu, this_cpu = smp_processor_id(); --void send_IPI_self(int vector) -+void xen_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) - { -- __send_IPI_shortcut(APIC_DEST_SELF, vector); +- switch (shortcut) { +- case APIC_DEST_SELF: +- __send_IPI_one(smp_processor_id(), vector); +- break; +- case APIC_DEST_ALLBUT: +- for_each_online_cpu(cpu) +- if (cpu != smp_processor_id()) +- __send_IPI_one(cpu, vector); +- break; +- default: +- printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut, +- vector); +- break; +- } -} - --#ifndef CONFIG_XEN --/* -- * This is used to send an IPI with no shorthand notation (the destination is -- * specified in bits 56 to 63 of the ICR). -- */ --static inline void __send_IPI_dest_field(unsigned long mask, int vector) +-void send_IPI_self(int vector) -{ -- unsigned long cfg; -- -- /* -- * Wait for idle. -- */ -- if (unlikely(vector == NMI_VECTOR)) -- safe_apic_wait_icr_idle(); -- else -- apic_wait_icr_idle(); -- -- /* -- * prepare target chip field -- */ -- cfg = __prepare_ICR2(mask); -- apic_write(APIC_ICR2, cfg); -- -- /* -- * program the ICR -- */ -- cfg = __prepare_ICR(0, vector); -- -- /* -- * Send the IPI. The write to APIC_ICR fires this off. -- */ -- apic_write(APIC_ICR, cfg); --} --#endif -- --/* -- * This is only used on smaller machines. -- */ +- __send_IPI_shortcut(APIC_DEST_SELF, vector); ++ WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); ++ for_each_cpu_and(cpu, cpumask, cpu_online_mask) ++ if (cpu != this_cpu) ++ __send_IPI_one(cpu, vector); + } + -void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) --{ --#ifndef CONFIG_XEN -- unsigned long mask = cpumask_bits(cpumask)[0]; --#else ++void xen_send_IPI_mask(const struct cpumask *cpumask, int vector) + { unsigned int cpu; --#endif - unsigned long flags; - local_irq_save(flags); --#ifndef CONFIG_XEN -- WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); -- __send_IPI_dest_field(mask, vector); --#else - WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); - for_each_cpu_and(cpu, cpumask, cpu_online_mask) -- __send_IPI_one(cpu, vector); --#endif -+ if (cpu != smp_processor_id()) -+ __send_IPI_one(cpu, vector); - local_irq_restore(flags); +@@ -49,20 +34,17 @@ void send_IPI_mask_bitmask(const struct + __send_IPI_one(cpu, vector); } -void send_IPI_mask_sequence(const struct cpumask *mask, int vector) -+void xen_send_IPI_mask(const struct cpumask *cpumask, int vector) ++void xen_send_IPI_allbutself(int vector) { --#ifndef CONFIG_XEN -+ unsigned int cpu; - unsigned long flags; -- unsigned int query_cpu; -- -- /* -- * Hack. The clustered APIC addressing mode doesn't allow us to send -- * to an arbitrary mask, so I do a unicasts to each CPU instead. This -- * should be modified to do 1 message per cluster ID - mbligh -- */ - - local_irq_save(flags); -- for_each_cpu(query_cpu, mask) -- __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); -+ WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); -+ for_each_cpu_and(cpu, cpumask, cpu_online_mask) -+ __send_IPI_one(cpu, vector); - local_irq_restore(flags); --#else - send_IPI_mask_bitmask(mask, vector); --#endif ++ xen_send_IPI_mask_allbutself(cpu_online_mask, vector); } -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) -+void xen_send_IPI_allbutself(int vector) ++void xen_send_IPI_all(int vector) { -- unsigned long flags; - unsigned int query_cpu; - unsigned int this_cpu = smp_processor_id(); - -- /* See Hack comment above */ -- -- local_irq_save(flags); --#ifndef CONFIG_XEN -- for_each_cpu(query_cpu, mask) -- if (query_cpu != this_cpu) -- __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), -- vector); --#else - WARN_ON(!cpumask_subset(mask, cpu_online_mask)); - for_each_cpu_and(query_cpu, mask, cpu_online_mask) - if (query_cpu != this_cpu) - __send_IPI_one(query_cpu, vector); --#endif -- local_irq_restore(flags); -+ __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); ++ xen_send_IPI_mask(cpu_online_mask, vector); } --#ifndef CONFIG_XEN --/* must come after the send_IPI functions above for inlining */ --static int convert_apicid_to_cpu(int apic_id) --{ -- int i; -- -- for_each_possible_cpu(i) { -- if (per_cpu(x86_cpu_to_apicid, i) == apic_id) -- return i; -- } -- return -1; -+void xen_send_IPI_all(int vector) -+{ -+ __send_IPI_shortcut(APIC_DEST_ALLINC, vector); - } - --int safe_smp_processor_id(void) -+void xen_send_IPI_self(int vector) - { -- int apicid, cpuid; -- -- if (!boot_cpu_has(X86_FEATURE_APIC)) -- return 0; -- -- apicid = hard_smp_processor_id(); -- if (apicid == BAD_APICID) -- return 0; -- -- cpuid = convert_apicid_to_cpu(apicid); -- -- return cpuid >= 0 ? cpuid : 0; -+ __send_IPI_shortcut(APIC_DEST_SELF, vector); - } --#endif -#endif ++void xen_send_IPI_self(int vector) ++{ ++ __send_IPI_one(smp_processor_id(), vector); ++} --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/apic/probe_32-xen.c 2010-03-24 15:25:06.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/probe_32-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -0,0 +1,69 @@ +/* + * Default generic APIC driver. This handles up to 8 CPUs. @@ -5948,9 +5780,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + +struct apic *apic = &apic_xen; +EXPORT_SYMBOL_GPL(apic); ---- head-2010-05-25.orig/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/asm-offsets_32.c 2010-03-24 15:25:06.000000000 +0100 -@@ -115,6 +115,11 @@ void foo(void) +--- head-2011-03-17.orig/arch/x86/kernel/asm-offsets_32.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/asm-offsets_32.c 2011-02-01 14:44:12.000000000 +0100 +@@ -112,6 +112,11 @@ void foo(void) OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); @@ -5962,9 +5794,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifdef CONFIG_PARAVIRT BLANK(); OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:25:06.000000000 +0100 -@@ -1,101 +1,94 @@ +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:41:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:07.000000000 +0100 +@@ -1,105 +1,94 @@ -#include -#include -#include @@ -6051,33 +5883,37 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifndef CONFIG_XEN -cpumask_var_t cpu_callin_mask; cpumask_var_t cpu_callout_mask; -+cpumask_var_t cpu_callin_mask; - #endif +-#endif -cpumask_var_t cpu_initialized_mask; ++cpumask_var_t cpu_callin_mask; +-#ifndef CONFIG_XEN /* representing cpus for which sibling maps can be computed */ cpumask_var_t cpu_sibling_setup_mask; + #endif -#else /* CONFIG_X86_32 */ - +-#ifndef CONFIG_XEN +-cpumask_t cpu_callin_map; +-cpumask_t cpu_callout_map; +-#endif +-cpumask_t cpu_initialized; +/* correctly size the local cpu masks */ +void __init setup_cpu_local_masks(void) +{ + alloc_bootmem_cpumask_var(&cpu_initialized_mask); #ifndef CONFIG_XEN --cpumask_t cpu_callin_map; --cpumask_t cpu_callout_map; +-cpumask_t cpu_sibling_setup_map; + alloc_bootmem_cpumask_var(&cpu_callin_mask); + alloc_bootmem_cpumask_var(&cpu_callout_mask); - #endif --cpumask_t cpu_initialized; --cpumask_t cpu_sibling_setup_map; -- --#endif /* CONFIG_X86_32 */ -- + alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); + #endif +} +-#endif /* CONFIG_X86_32 */ +- +- -static struct cpu_dev *this_cpu __cpuinitdata; +static const struct cpu_dev *this_cpu __cpuinitdata; @@ -6124,7 +5960,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifndef CONFIG_XEN /* * Segments used for calling PnP BIOS have byte granularity. -@@ -103,33 +96,41 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_p +@@ -107,33 +96,41 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_p * the transfer segment sizes are set at run time. */ /* 32-bit code */ @@ -6176,8 +6012,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + #ifdef CONFIG_X86_32 static int cachesize_override __cpuinitdata = -1; - static int disable_x86_serial_nr __cpuinitdata = 1; -@@ -168,16 +169,17 @@ static inline int flag_is_changeable_p(u + +@@ -173,16 +170,17 @@ static inline int flag_is_changeable_p(u * the CPUID. Add "volatile" to not allow gcc to * optimize the subsequent calls to this function. */ @@ -6205,7 +6041,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches : "=&r" (f1), "=&r" (f2) : "ir" (flag)); -@@ -192,18 +194,22 @@ static int __cpuinit have_cpuid_p(void) +@@ -199,18 +197,22 @@ static int disable_x86_serial_nr __cpuin static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { @@ -6239,7 +6075,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } static int __init x86_serial_nr_setup(char *s) -@@ -228,16 +234,64 @@ static inline void squash_the_stupid_ser +@@ -235,16 +237,64 @@ static inline void squash_the_stupid_ser #endif /* @@ -6308,7 +6144,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (c->x86_model >= 16) return NULL; /* Range check */ -@@ -257,32 +311,52 @@ static char __cpuinit *table_lookup_mode +@@ -264,32 +314,52 @@ static char __cpuinit *table_lookup_mode __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; @@ -6372,7 +6208,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static void __cpuinit default_init(struct cpuinfo_x86 *c) { -@@ -301,7 +375,7 @@ static void __cpuinit default_init(struc +@@ -308,7 +378,7 @@ static void __cpuinit default_init(struc #endif } @@ -6381,7 +6217,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches .c_init = default_init, .c_vendor = "Unknown", .c_x86_vendor = X86_VENDOR_UNKNOWN, -@@ -315,22 +389,24 @@ static void __cpuinit get_model_name(str +@@ -322,22 +392,24 @@ static void __cpuinit get_model_name(str if (c->extended_cpuid_level < 0x80000004) return; @@ -6414,7 +6250,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } } -@@ -399,36 +475,30 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -406,36 +478,30 @@ void __cpuinit detect_ht(struct cpuinfo_ if (smp_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); @@ -6469,7 +6305,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches out: if ((c->x86_max_cores * smp_num_siblings) > 1) { -@@ -443,8 +513,8 @@ out: +@@ -450,8 +516,8 @@ out: static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; @@ -6479,7 +6315,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches for (i = 0; i < X86_VENDOR_NUM; i++) { if (!cpu_devs[i]) -@@ -453,6 +523,7 @@ static void __cpuinit get_cpu_vendor(str +@@ -460,6 +526,7 @@ static void __cpuinit get_cpu_vendor(str if (!strcmp(v, cpu_devs[i]->c_ident[0]) || (cpu_devs[i]->c_ident[1] && !strcmp(v, cpu_devs[i]->c_ident[1]))) { @@ -6487,7 +6323,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches this_cpu = cpu_devs[i]; c->x86_vendor = this_cpu->c_x86_vendor; return; -@@ -461,7 +532,9 @@ static void __cpuinit get_cpu_vendor(str +@@ -468,7 +535,9 @@ static void __cpuinit get_cpu_vendor(str if (!printed) { printed++; @@ -6498,7 +6334,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches printk(KERN_ERR "CPU: Your system may be unstable.\n"); } -@@ -481,14 +554,17 @@ void __cpuinit cpu_detect(struct cpuinfo +@@ -488,14 +557,17 @@ void __cpuinit cpu_detect(struct cpuinfo /* Intel-defined flags: level 0x00000001 */ if (c->cpuid_level >= 0x00000001) { u32 junk, tfms, cap0, misc; @@ -6516,7 +6352,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (cap0 & (1<<19)) { c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; c->x86_cache_alignment = c->x86_clflush_size; -@@ -504,6 +580,7 @@ static void __cpuinit get_cpu_cap(struct +@@ -511,6 +583,7 @@ static void __cpuinit get_cpu_cap(struct /* Intel-defined flags: level 0x00000001 */ if (c->cpuid_level >= 0x00000001) { u32 capability, excap; @@ -6524,7 +6360,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; c->x86_capability[4] = excap; -@@ -512,6 +589,7 @@ static void __cpuinit get_cpu_cap(struct +@@ -519,6 +592,7 @@ static void __cpuinit get_cpu_cap(struct /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); c->extended_cpuid_level = xlvl; @@ -6532,7 +6368,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if ((xlvl & 0xffff0000) == 0x80000000) { if (xlvl >= 0x80000001) { c->x86_capability[1] = cpuid_edx(0x80000001); -@@ -519,13 +597,15 @@ static void __cpuinit get_cpu_cap(struct +@@ -526,13 +600,15 @@ static void __cpuinit get_cpu_cap(struct } } @@ -6549,7 +6385,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif if (c->extended_cpuid_level >= 0x80000007) -@@ -572,8 +652,12 @@ static void __init early_identify_cpu(st +@@ -579,8 +655,12 @@ static void __init early_identify_cpu(st { #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; @@ -6562,7 +6398,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif c->x86_cache_alignment = c->x86_clflush_size; -@@ -596,21 +680,20 @@ static void __init early_identify_cpu(st +@@ -603,21 +683,20 @@ static void __init early_identify_cpu(st if (this_cpu->c_early_init) this_cpu->c_early_init(c); @@ -6588,7 +6424,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned int j; if (count >= X86_VENDOR_NUM) -@@ -621,7 +704,7 @@ void __init early_cpu_init(void) +@@ -628,7 +707,7 @@ void __init early_cpu_init(void) for (j = 0; j < 2; j++) { if (!cpudev->c_ident[j]) continue; @@ -6597,17 +6433,17 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches cpudev->c_ident[j]); } } -@@ -663,7 +746,7 @@ static void __cpuinit generic_identify(s +@@ -671,7 +750,7 @@ static void __cpuinit generic_identify(s c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; - #if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) + #ifdef CONFIG_X86_32 # ifdef CONFIG_X86_HT - c->apicid = phys_pkg_id(c->initial_apicid, 0); + c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); # else c->apicid = c->initial_apicid; # endif -@@ -697,9 +780,13 @@ static void __cpuinit identify_cpu(struc - c->x86_coreid_bits = 0; +@@ -708,9 +787,13 @@ static void __cpuinit identify_cpu(struc + #endif #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; + c->x86_phys_bits = 36; @@ -6620,7 +6456,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif c->x86_cache_alignment = c->x86_clflush_size; memset(&c->x86_capability, 0, sizeof c->x86_capability); -@@ -712,7 +799,7 @@ static void __cpuinit identify_cpu(struc +@@ -723,7 +806,7 @@ static void __cpuinit identify_cpu(struc this_cpu->c_identify(c); #if defined(CONFIG_X86_64) && !defined(CONFIG_XEN) @@ -6629,7 +6465,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif /* -@@ -732,13 +819,16 @@ static void __cpuinit identify_cpu(struc +@@ -743,13 +826,16 @@ static void __cpuinit identify_cpu(struc squash_the_stupid_serial_number(c); /* @@ -6649,7 +6485,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches p = table_lookup_model(c); if (p) strcpy(c->x86_model_id, p); -@@ -794,6 +884,7 @@ static void vgetcpu_set_mode(void) +@@ -805,6 +891,7 @@ static void vgetcpu_set_mode(void) void __init identify_boot_cpu(void) { identify_cpu(&boot_cpu_data); @@ -6657,7 +6493,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifdef CONFIG_X86_32 sysenter_setup(); enable_sep_cpu(); -@@ -813,11 +904,11 @@ void __cpuinit identify_secondary_cpu(st +@@ -824,11 +911,11 @@ void __cpuinit identify_secondary_cpu(st } struct msr_range { @@ -6672,7 +6508,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { 0x00000000, 0x00000418}, { 0xc0000000, 0xc000040b}, { 0xc0010000, 0xc0010142}, -@@ -826,14 +917,15 @@ static struct msr_range msr_range_array[ +@@ -837,14 +924,15 @@ static struct msr_range msr_range_array[ static void __cpuinit print_cpu_msr(void) { @@ -6689,7 +6525,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches for (index = index_min; index < index_max; index++) { if (rdmsrl_amd_safe(index, &val)) continue; -@@ -843,6 +935,7 @@ static void __cpuinit print_cpu_msr(void +@@ -854,6 +942,7 @@ static void __cpuinit print_cpu_msr(void } static int show_msr __cpuinitdata; @@ -6697,7 +6533,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static __init int setup_show_msr(char *arg) { int num; -@@ -864,12 +957,14 @@ __setup("noclflush", setup_noclflush); +@@ -875,12 +964,14 @@ __setup("noclflush", setup_noclflush); void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) { @@ -6716,7 +6552,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (vendor && !strstr(c->x86_model_id, vendor)) printk(KERN_CONT "%s ", vendor); -@@ -896,87 +991,57 @@ void __cpuinit print_cpu_info(struct cpu +@@ -907,87 +998,57 @@ void __cpuinit print_cpu_info(struct cpu static __init int setup_disablecpuid(char *arg) { int bit; @@ -6829,18 +6665,18 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches void __cpuinit syscall_init(void) { -@@ -1020,16 +1085,38 @@ unsigned long kernel_eflags; +@@ -1031,16 +1092,38 @@ unsigned long kernel_eflags; DEFINE_PER_CPU(struct orig_ist, orig_ist); #endif -#else +#else /* CONFIG_X86_64 */ - --/* Make sure %fs is initialized properly in idle threads */ ++ +#ifdef CONFIG_CC_STACKPROTECTOR +DEFINE_PER_CPU(unsigned long, stack_canary); +#endif -+ + +-/* Make sure %fs is initialized properly in idle threads */ +/* Make sure %fs and %gs are initialized properly in idle threads */ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) { @@ -6871,7 +6707,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * cpu_init() initializes state that is per-CPU. Some data is already -@@ -1039,24 +1126,31 @@ struct pt_regs * __cpuinit idle_regs(str +@@ -1050,24 +1133,31 @@ struct pt_regs * __cpuinit idle_regs(str * A lot of state is already set up in PDA init for 64 bit */ #ifdef CONFIG_X86_64 @@ -6910,7 +6746,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif me = current; -@@ -1073,7 +1167,9 @@ void __cpuinit cpu_init(void) +@@ -1084,7 +1174,9 @@ void __cpuinit cpu_init(void) * and set up the GDT descriptor: */ @@ -6921,7 +6757,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifndef CONFIG_X86_NO_IDT load_idt((const struct desc_ptr *)&idt_descr); #endif -@@ -1086,8 +1182,8 @@ void __cpuinit cpu_init(void) +@@ -1097,8 +1189,8 @@ void __cpuinit cpu_init(void) barrier(); check_efer(); @@ -6932,7 +6768,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches enable_x2apic(); #endif -@@ -1096,24 +1192,17 @@ void __cpuinit cpu_init(void) +@@ -1107,24 +1199,17 @@ void __cpuinit cpu_init(void) * set up and load the per-CPU TSS */ if (!orig_ist->ist[0]) { @@ -6961,7 +6797,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * <= is required because the CPU will access up to * 8 bits beyond the end of the IO permission bitmap. -@@ -1124,8 +1213,7 @@ void __cpuinit cpu_init(void) +@@ -1135,8 +1220,7 @@ void __cpuinit cpu_init(void) atomic_inc(&init_mm.mm_count); me->active_mm = &init_mm; @@ -6971,7 +6807,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches enter_lazy_tlb(&init_mm, me); load_sp0(t, ¤t->thread); -@@ -1144,22 +1232,9 @@ void __cpuinit cpu_init(void) +@@ -1155,22 +1239,9 @@ void __cpuinit cpu_init(void) */ if (kgdb_connected && arch_kgdb_ops.correct_hw_break) arch_kgdb_ops.correct_hw_break(); @@ -6996,7 +6832,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches fpu_init(); -@@ -1171,8 +1246,10 @@ void __cpuinit cpu_init(void) +@@ -1182,8 +1253,10 @@ void __cpuinit cpu_init(void) kernel_eflags &= ~X86_EFLAGS_IF; #endif @@ -7007,7 +6843,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } #else -@@ -1188,7 +1265,8 @@ void __cpuinit cpu_init(void) +@@ -1199,7 +1272,8 @@ void __cpuinit cpu_init(void) if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); @@ -7017,7 +6853,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } printk(KERN_INFO "Initializing CPU#%d\n", cpu); -@@ -1196,36 +1274,30 @@ void __cpuinit cpu_init(void) +@@ -1207,36 +1281,30 @@ void __cpuinit cpu_init(void) if (cpu_has_vme || cpu_has_de) clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); @@ -7061,16 +6897,16 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Force FPU initialization: -@@ -1245,6 +1317,4 @@ void __cpuinit cpu_init(void) +@@ -1256,6 +1324,4 @@ void __cpuinit cpu_init(void) xsave_init(); } - - #endif ---- head-2010-05-25.orig/arch/x86/kernel/cpu/intel.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/intel.c 2010-05-25 09:24:45.000000000 +0200 -@@ -91,8 +91,10 @@ static void __cpuinit early_init_intel(s +--- head-2011-03-17.orig/arch/x86/kernel/cpu/intel.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/intel.c 2011-02-01 14:44:12.000000000 +0100 +@@ -96,8 +96,10 @@ static void __cpuinit early_init_intel(s if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); @@ -7081,8 +6917,22 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } /* ---- head-2010-05-25.orig/arch/x86/kernel/e820-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820-xen.c 2010-03-24 15:25:06.000000000 +0100 +@@ -232,9 +234,13 @@ static void __cpuinit intel_workarounds( + rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); + if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { + printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); ++#ifndef CONFIG_XEN + printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); + lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; + wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); ++#else ++ pr_warning("CPU: Hypervisor update needed\n"); ++#endif + } + } + +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -129,19 +129,50 @@ int __init e820_all_mapped(u64 start, u6 /* * Add a memory region to the kernel e820 map. @@ -7340,8 +7190,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Try to copy the BIOS-supplied E820-map. * ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -12,8 +12,8 @@ #include #include @@ -7425,8 +7275,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { u32 dword; dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, ---- head-2010-05-25.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:44:12.000000000 +0100 @@ -30,12 +30,13 @@ * 1C(%esp) - %ds * 20(%esp) - %es @@ -8134,8 +7984,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* copy the iret frame of 12 bytes */ .rept 3 pushl 16(%esp) ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:44:12.000000000 +0100 @@ -51,10 +51,10 @@ #include #include @@ -8337,8 +8187,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches RESTORE_ALL 8 jmp irq_return paranoid_userspace: ---- head-2010-05-25.orig/arch/x86/kernel/head-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head-xen.c 2010-04-28 17:07:13.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -2,6 +2,7 @@ #include @@ -8355,7 +8205,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned int lowmem, ebda_addr; /* To determine the position of the EBDA and the */ -@@ -53,5 +53,174 @@ void __init reserve_ebda_region(void) +@@ -53,5 +53,158 @@ void __init reserve_ebda_region(void) /* reserve all memory between lowmem and the 1MB mark */ reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved"); @@ -8391,11 +8241,6 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + unsigned int i; + struct xen_machphys_mapping mapping; + unsigned long machine_to_phys_nr_ents; -+#ifdef CONFIG_X86_32 -+ struct xen_platform_parameters pp; -+ extern pte_t swapper_pg_fixmap[PTRS_PER_PTE]; -+ unsigned long addr; -+#endif + + xen_setup_features(); + @@ -8420,17 +8265,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + "Xen provided"); + +#ifdef CONFIG_X86_32 -+ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, -+ VMASST_TYPE_4gb_segments)); -+ -+ init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base; -+ -+ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) { -+ hypervisor_virt_start = pp.virt_start; -+ reserve_top_address(0UL - pp.virt_start); -+ } -+ -+ BUG_ON(pte_index(hypervisor_virt_start)); ++{ ++ extern pte_t swapper_pg_fixmap[PTRS_PER_PTE]; ++ unsigned long addr; + + /* Do an early initialization of the fixmap area */ + make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables); @@ -8439,6 +8276,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + addr), + addr), + __pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE)); ++} +#else + check_efer(); + xen_init_pt(); @@ -8487,12 +8325,10 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + .address = CALLBACK_ADDR(system_call) + }; +#endif -+#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) + static const struct callback_register __initconst nmi_cb = { + .type = CALLBACKTYPE_nmi, + .address = CALLBACK_ADDR(nmi) + }; -+#endif + + ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event); + if (ret == 0) @@ -8516,7 +8352,6 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +#endif + BUG_ON(ret); + -+#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) + ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb); +#if CONFIG_XEN_COMPAT <= 0x030002 + if (ret == -ENOSYS) { @@ -8526,12 +8361,11 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + + HYPERVISOR_nmi_op(XENNMI_register_callback, &cb); + } -+#endif #endif } +#endif /* CONFIG_XEN */ ---- head-2010-05-25.orig/arch/x86/kernel/head32-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head32-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head32-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head32-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -9,6 +9,7 @@ #include @@ -8540,8 +8374,26 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #include #include #include -@@ -18,7 +19,7 @@ void __init i386_start_kernel(void) +@@ -16,9 +17,25 @@ + + void __init i386_start_kernel(void) { ++#ifdef CONFIG_XEN ++ struct xen_platform_parameters pp; ++ ++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, ++ VMASST_TYPE_4gb_segments)); ++ ++ init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base; ++ ++ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) { ++ hypervisor_virt_start = pp.virt_start; ++ reserve_top_address(0UL - pp.virt_start); ++ } ++ ++ BUG_ON(pte_index(hypervisor_virt_start)); ++#endif ++ reserve_trampoline_memory(); - reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); @@ -8549,7 +8401,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifndef CONFIG_XEN #ifdef CONFIG_BLK_DEV_INITRD -@@ -30,14 +31,8 @@ void __init i386_start_kernel(void) +@@ -30,14 +47,8 @@ void __init i386_start_kernel(void) reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif @@ -8565,7 +8417,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { int max_cmdline; -@@ -46,9 +41,9 @@ void __init i386_start_kernel(void) +@@ -46,9 +57,9 @@ void __init i386_start_kernel(void) memcpy(boot_command_line, xen_start_info->cmd_line, max_cmdline); boot_command_line[max_cmdline-1] = '\0'; } @@ -8577,8 +8429,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * At this point everything still needed from the boot loader ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -7,9 +7,6 @@ * Modified for Xen. */ @@ -8694,8 +8546,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * At this point everything still needed from the boot loader ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_32-xen.S 2011-03-03 16:19:21.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_32-xen.S 2011-03-03 16:23:08.000000000 +0100 @@ -6,12 +6,14 @@ #include #include @@ -8722,9 +8574,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* get vendor info */ xorl %eax,%eax # call CPUID with 0 -> return vendor ID XEN_CPUID -@@ -63,7 +62,49 @@ ENTRY(startup_32) - - movb $1,X86_HARD_MATH +@@ -61,7 +60,49 @@ ENTRY(startup_32) + movb %cl,X86_MASK + movl %edx,X86_CAPABILITY - xorl %eax,%eax # Clear GS +#ifdef CONFIG_CC_STACKPROTECTOR @@ -8773,8 +8625,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches movl %eax,%gs cld # gcc2 wants the direction flag cleared at all times ---- head-2010-05-25.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_64-xen.S 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-01 14:44:12.000000000 +0100 @@ -21,6 +21,7 @@ #include #include @@ -8809,8 +8661,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ phys_##name = . - .text.head; \ ---- head-2010-05-25.orig/arch/x86/kernel/ioport-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ioport-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ioport-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ioport-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -91,9 +91,8 @@ static int do_iopl(unsigned int level, s } @@ -8822,8 +8674,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned int level = regs->bx; #else asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs) ---- head-2010-05-25.orig/arch/x86/kernel/irq-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/irq-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/irq-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -6,13 +6,20 @@ #include #include @@ -8878,7 +8730,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif #ifdef CONFIG_SMP seq_printf(p, "%*s: ", prec, "RES"); -@@ -86,12 +102,6 @@ static int show_other_interrupts(struct +@@ -91,12 +107,6 @@ static int show_other_interrupts(struct seq_printf(p, " Threshold APIC interrupts\n"); # endif #endif @@ -8891,7 +8743,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); -@@ -128,23 +138,15 @@ int show_interrupts(struct seq_file *p, +@@ -133,23 +143,15 @@ int show_interrupts(struct seq_file *p, return 0; spin_lock_irqsave(&desc->lock, flags); @@ -8915,7 +8767,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches seq_printf(p, " %8s", desc->chip->name); seq_printf(p, "-%-8s", desc->name); -@@ -169,6 +171,11 @@ u64 arch_irq_stat_cpu(unsigned int cpu) +@@ -174,6 +176,11 @@ u64 arch_irq_stat_cpu(unsigned int cpu) #ifdef CONFIG_X86_LOCAL_APIC sum += irq_stats(cpu)->apic_timer_irqs; @@ -8927,7 +8779,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif #ifdef CONFIG_SMP sum += irq_stats(cpu)->irq_resched_count; -@@ -183,9 +190,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu) +@@ -190,9 +197,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->irq_threshold_count; #endif #endif @@ -8937,7 +8789,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return sum; } -@@ -198,3 +202,64 @@ u64 arch_irq_stat(void) +@@ -205,3 +209,64 @@ u64 arch_irq_stat(void) #endif return sum; } @@ -9002,8 +8854,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + set_irq_regs(old_regs); +} +#endif ---- head-2010-05-25.orig/arch/x86/kernel/machine_kexec_64.c 2010-04-15 10:03:05.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/machine_kexec_64.c 2010-04-15 10:07:08.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/kernel/machine_kexec_64.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/machine_kexec_64.c 2011-02-01 14:44:12.000000000 +0100 @@ -92,13 +92,8 @@ void machine_kexec_setup_load_arg(xen_ke xki->page_list[PA_CONTROL_PAGE] = __ma(control_page); xki->page_list[PA_TABLE_PAGE] = __ma(table_page); @@ -9029,8 +8881,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches result = 0; out: return result; ---- head-2010-05-25.orig/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -21,28 +21,28 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. @@ -9099,8 +8951,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static int request_microcode(const char *name) { ---- head-2010-05-25.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -3,7 +3,7 @@ * compliant MP-table parsing routines. * @@ -9248,7 +9100,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static void __init MP_lintsrc_info(struct mpc_lintsrc *m) { -@@ -289,6 +286,20 @@ static int __init smp_check_mpc(struct m +@@ -291,6 +288,20 @@ static int __init smp_check_mpc(struct m return 1; } @@ -9269,11 +9121,12 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) { char str[16]; -@@ -300,17 +311,8 @@ static int __init smp_read_mpc(struct mp +@@ -302,19 +313,10 @@ static int __init smp_read_mpc(struct mp if (!smp_check_mpc(mpc, oem, str)) return 0; --#ifdef CONFIG_X86_32 ++#ifndef CONFIG_XEN + #ifdef CONFIG_X86_32 - /* - * need to make sure summit and es7000's mps_oem_check is safe to be - * called early via genericarch 's mps_oem_check @@ -9284,12 +9137,13 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -#endif - } else - mps_oem_check(mpc, oem, str); -+#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) + generic_mps_oem_check(mpc, oem, str); #endif +-#ifndef CONFIG_XEN /* save the local APIC address, it might be non-default */ if (!acpi_lapic) -@@ -333,61 +335,30 @@ static int __init smp_read_mpc(struct mp + mp_lapic_addr = mpc->lapic; +@@ -337,61 +339,30 @@ static int __init smp_read_mpc(struct mp while (count < mpc->length) { switch (*mpt) { case MP_PROCESSOR: @@ -9369,7 +9223,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches count = mpc->length; break; } -@@ -395,13 +366,13 @@ static int __init smp_read_mpc(struct mp +@@ -399,13 +370,13 @@ static int __init smp_read_mpc(struct mp (*x86_quirks->mpc_record)++; } @@ -9388,7 +9242,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; -@@ -426,7 +397,7 @@ static void __init construct_default_ioi +@@ -430,7 +401,7 @@ static void __init construct_default_ioi intsrc.type = MP_INTSRC; intsrc.irqflag = 0; /* conforming */ intsrc.srcbus = 0; @@ -9397,7 +9251,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches intsrc.irqtype = mp_INT; -@@ -579,14 +550,76 @@ static inline void __init construct_defa +@@ -585,7 +556,69 @@ static inline void __init construct_defa } } @@ -9468,15 +9322,16 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Scan the memory blocks for an SMP configuration block. - */ - static void __init __get_smp_config(unsigned int early) +@@ -597,7 +630,7 @@ void __init get_smp_config(void) + #define early 0 + #endif { - struct intel_mp_floating *mpf = mpf_found; + struct mpf_intel *mpf = mpf_found; if (!mpf) return; -@@ -607,9 +640,9 @@ static void __init __get_smp_config(unsi +@@ -618,9 +651,9 @@ void __init get_smp_config(void) } printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", @@ -9488,24 +9343,26 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); pic_mode = 1; } else { -@@ -620,7 +653,7 @@ static void __init __get_smp_config(unsi +@@ -631,7 +664,7 @@ void __init get_smp_config(void) /* * Now see if we need to read further. */ - if (mpf->mpf_feature1 != 0) { + if (mpf->feature1 != 0) { + #ifndef CONFIG_XEN if (early) { /* - * local APIC has default address -@@ -630,49 +663,12 @@ static void __init __get_smp_config(unsi - } +@@ -643,49 +676,12 @@ void __init get_smp_config(void) + #endif printk(KERN_INFO "Default MP configuration #%d\n", - mpf->mpf_feature1); - construct_default_ISA_mptable(mpf->mpf_feature1); - - } else if (mpf->mpf_physptr) { -- ++ mpf->feature1); ++ construct_default_ISA_mptable(mpf->feature1); + - /* - * Read the physical hardware table. Anything here will - * override the defaults. @@ -9518,15 +9375,13 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - "BIOS bug, MP table errors detected!...\n"); - printk(KERN_ERR "... disabling SMP support. " - "(tell your hw vendor)\n"); -- return; -- } -+ mpf->feature1); -+ construct_default_ISA_mptable(mpf->feature1); - -- if (early) + } else if (mpf->physptr) { + if (check_physptr(mpf, early)) return; +- } +- +- if (early) +- return; -#ifdef CONFIG_X86_IO_APIC - /* - * If there are no explicit MP IRQ entries, then we are @@ -9551,11 +9406,11 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } else BUG(); -@@ -693,58 +689,68 @@ void __init get_smp_config(void) +@@ -707,60 +703,68 @@ void __init get_smp_config(void) + { __get_smp_config(0); } - -+#ifndef CONFIG_XEN ++ +static void __init smp_reserve_bootmem(struct mpf_intel *mpf) +{ + unsigned long size = get_mpc_size(mpf->physptr); @@ -9580,8 +9435,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + reserve_bootmem_generic(mpf->physptr, size, BOOTMEM_DEFAULT); +#endif +} -+#endif -+ + #endif + static int __init smp_scan_config(unsigned long base, unsigned long length, unsigned reserve) { @@ -9648,7 +9503,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif return 1; } -@@ -826,15 +832,15 @@ static int __init get_MP_intsrc_index(s +@@ -842,15 +846,15 @@ static int __init get_MP_intsrc_index(s /* not legacy */ for (i = 0; i < mp_irq_entries; i++) { @@ -9668,7 +9523,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches continue; if (irq_used[i]) { /* already claimed */ -@@ -851,7 +857,58 @@ static int __init get_MP_intsrc_index(s +@@ -867,7 +871,58 @@ static int __init get_MP_intsrc_index(s #define SPARE_SLOT_NUM 20 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; @@ -9728,7 +9583,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static int __init replace_intsrc_all(struct mpc_table *mpc, unsigned long mpc_new_phys, -@@ -859,77 +916,33 @@ static int __init replace_intsrc_all(st +@@ -875,77 +930,33 @@ static int __init replace_intsrc_all(st { #ifdef CONFIG_X86_IO_APIC int i; @@ -9819,7 +9674,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches goto out; } } -@@ -939,10 +952,10 @@ static int __init replace_intsrc_all(st +@@ -955,10 +966,10 @@ static int __init replace_intsrc_all(st if (irq_used[i]) continue; @@ -9832,7 +9687,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches continue; if (nr_m_spare > 0) { -@@ -953,16 +966,8 @@ static int __init replace_intsrc_all(st +@@ -969,16 +980,8 @@ static int __init replace_intsrc_all(st } else { struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; count += sizeof(struct mpc_intsrc); @@ -9851,7 +9706,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches assign_to_mpc_intsrc(&mp_irqs[i], m); mpc->length = count; mpt += sizeof(struct mpc_intsrc); -@@ -1018,7 +1023,7 @@ static int __init update_mp_table(void) +@@ -1034,7 +1037,7 @@ static int __init update_mp_table(void) { char str[16]; char oem[10]; @@ -9860,7 +9715,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct mpc_table *mpc, *mpc_new; if (!enable_update_mptable) -@@ -1031,19 +1036,19 @@ static int __init update_mp_table(void) +@@ -1047,19 +1050,19 @@ static int __init update_mp_table(void) /* * Now see if we need to go further. */ @@ -9885,7 +9740,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (mpc_new_phys && mpc->length > mpc_new_length) { mpc_new_phys = 0; -@@ -1067,23 +1072,23 @@ static int __init update_mp_table(void) +@@ -1083,23 +1086,23 @@ static int __init update_mp_table(void) maddr_t mpc_new_bus; mpc_new_bus = phys_to_machine(mpc_new_phys); @@ -9916,8 +9771,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } /* ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -1,4 +1,5 @@ #include +#include @@ -10034,8 +9889,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches forbid_dac = 1; } } ---- head-2010-05-25.orig/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -24,7 +24,7 @@ do { \ static int @@ -10110,8 +9965,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches }; void __init no_iommu_init(void) ---- head-2010-05-25.orig/arch/x86/kernel/process-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:05:57.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:06:40.000000000 +0100 @@ -1,16 +1,19 @@ #include #include @@ -10324,7 +10179,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches * Idle related variables and functions */ unsigned long boot_option_idle_override = 0; -@@ -130,7 +309,7 @@ void stop_this_cpu(void *dummy) +@@ -101,7 +280,7 @@ void stop_this_cpu(void *dummy) /* * Remove this CPU: */ @@ -10333,7 +10188,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches disable_all_local_evtchn(); for (;;) { -@@ -283,12 +462,13 @@ static int __cpuinit check_c1e_idle(cons +@@ -254,12 +433,13 @@ static int __cpuinit check_c1e_idle(cons return 1; } @@ -10349,7 +10204,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } /* -@@ -317,8 +497,8 @@ static void c1e_idle(void) +@@ -288,8 +468,8 @@ static void c1e_idle(void) if (c1e_detected) { int cpu = smp_processor_id(); @@ -10360,7 +10215,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Force broadcast so ACPI can not interfere. Needs * to run with interrupts enabled as it uses -@@ -350,7 +530,7 @@ static void c1e_idle(void) +@@ -321,7 +501,7 @@ static void c1e_idle(void) void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { #ifndef CONFIG_XEN @@ -10369,7 +10224,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (pm_idle == poll_idle && smp_num_siblings > 1) { printk(KERN_WARNING "WARNING: polling idle and HT enabled," " performance may degrade.\n"); -@@ -373,6 +553,17 @@ void __cpuinit select_idle_routine(const +@@ -344,6 +524,17 @@ void __cpuinit select_idle_routine(const #endif } @@ -10387,8 +10242,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static int __init idle_setup(char *str) { if (!str) ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:36:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:37:24.000000000 +0100 @@ -11,6 +11,7 @@ #include @@ -10397,7 +10252,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #include #include #include -@@ -71,9 +72,6 @@ asmlinkage void cstar_ret_from_fork(void +@@ -69,9 +70,6 @@ asmlinkage void cstar_ret_from_fork(void DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); @@ -10407,7 +10262,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Return saved PC of a blocked thread. */ -@@ -99,6 +97,15 @@ void cpu_idle(void) +@@ -97,6 +95,15 @@ void cpu_idle(void) { int cpu = smp_processor_id(); @@ -10423,7 +10278,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ -@@ -113,7 +120,6 @@ void cpu_idle(void) +@@ -111,7 +118,6 @@ void cpu_idle(void) play_dead(); local_irq_disable(); @@ -10431,7 +10286,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* Don't trace irqs off for idle */ stop_critical_timings(); xen_idle(); -@@ -137,7 +143,7 @@ void __show_regs(struct pt_regs *regs, i +@@ -135,7 +141,7 @@ void __show_regs(struct pt_regs *regs, i if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; @@ -10440,7 +10295,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } else { sp = (unsigned long) (®s->sp); savesegment(ss, ss); -@@ -218,6 +224,7 @@ int kernel_thread(int (*fn)(void *), voi +@@ -216,6 +222,7 @@ int kernel_thread(int (*fn)(void *), voi regs.ds = __USER_DS; regs.es = __USER_DS; regs.fs = __KERNEL_PERCPU; @@ -10448,7 +10303,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches regs.orig_ax = -1; regs.ip = (unsigned long) kernel_thread_helper; regs.cs = __KERNEL_CS | get_kernel_rpl(); -@@ -228,47 +235,6 @@ int kernel_thread(int (*fn)(void *), voi +@@ -226,47 +233,6 @@ int kernel_thread(int (*fn)(void *), voi } EXPORT_SYMBOL(kernel_thread); @@ -10496,7 +10351,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); -@@ -284,7 +250,7 @@ void prepare_to_copy(struct task_struct +@@ -282,7 +248,7 @@ void prepare_to_copy(struct task_struct unlazy_fpu(tsk); } @@ -10505,7 +10360,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned long unused, struct task_struct *p, struct pt_regs *regs) { -@@ -302,7 +268,7 @@ int copy_thread(int nr, unsigned long cl +@@ -300,7 +266,7 @@ int copy_thread(int nr, unsigned long cl p->thread.ip = (unsigned long) ret_from_fork; @@ -10514,7 +10369,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches tsk = current; if (test_tsk_thread_flag(tsk, TIF_CSTAR)) -@@ -344,7 +310,7 @@ int copy_thread(int nr, unsigned long cl +@@ -342,7 +308,7 @@ int copy_thread(int nr, unsigned long cl void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { @@ -10523,7 +10378,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches regs->fs = 0; set_fs(USER_DS); regs->ds = __USER_DS; -@@ -360,98 +326,6 @@ start_thread(struct pt_regs *regs, unsig +@@ -358,98 +324,6 @@ start_thread(struct pt_regs *regs, unsig } EXPORT_SYMBOL_GPL(start_thread); @@ -10622,7 +10477,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * switch_to(x,yn) should switch tasks from x to y. * -@@ -532,7 +406,7 @@ __switch_to(struct task_struct *prev_p, +@@ -530,7 +404,7 @@ __switch_to(struct task_struct *prev_p, if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \ next->tls_array[i].b != prev->tls_array[i].b)) { \ mcl->op = __HYPERVISOR_update_descriptor; \ @@ -10631,7 +10486,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \ mcl++; \ -@@ -612,64 +486,44 @@ __switch_to(struct task_struct *prev_p, +@@ -610,64 +484,44 @@ __switch_to(struct task_struct *prev_p, * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) @@ -10710,8 +10565,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (error == 0) { /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:36:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:37:17.000000000 +0100 @@ -19,6 +19,7 @@ #include @@ -10726,9 +10581,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #include -#include #include - #include #include -@@ -67,6 +67,11 @@ + #include +@@ -63,6 +63,11 @@ asmlinkage extern void ret_from_fork(void); @@ -10740,7 +10595,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; static ATOMIC_NOTIFIER_HEAD(idle_notifier); -@@ -85,13 +90,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregist +@@ -81,13 +86,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregist void enter_idle(void) { @@ -10756,7 +10611,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return; atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); } -@@ -121,6 +126,16 @@ static inline void play_dead(void) +@@ -117,6 +122,16 @@ static inline void play_dead(void) void cpu_idle(void) { current_thread_info()->status |= TS_POLLING; @@ -10773,7 +10628,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* endless idle loop with no priority at all */ while (1) { tick_nohz_stop_sched_tick(1); -@@ -230,78 +245,11 @@ void show_regs(struct pt_regs *regs) +@@ -226,78 +241,11 @@ void show_regs(struct pt_regs *regs) show_trace(NULL, regs, (void *)(regs + 1), regs->bp); } @@ -10852,7 +10707,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { -@@ -343,7 +291,7 @@ void prepare_to_copy(struct task_struct +@@ -339,7 +287,7 @@ void prepare_to_copy(struct task_struct unlazy_fpu(tsk); } @@ -10861,7 +10716,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned long unused, struct task_struct *p, struct pt_regs *regs) { -@@ -434,103 +382,6 @@ start_thread(struct pt_regs *regs, unsig +@@ -430,103 +378,6 @@ start_thread(struct pt_regs *regs, unsig } EXPORT_SYMBOL_GPL(start_thread); @@ -10965,7 +10820,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * switch_to(x,y) should switch tasks from x to y. * -@@ -596,7 +447,7 @@ __switch_to(struct task_struct *prev_p, +@@ -592,7 +443,7 @@ __switch_to(struct task_struct *prev_p, if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \ next->tls_array[i].b != prev->tls_array[i].b)) { \ mcl->op = __HYPERVISOR_update_descriptor; \ @@ -10974,7 +10829,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\ mcl->args[1] = *(u64 *)&next->tls_array[i]; \ mcl++; \ -@@ -683,19 +534,11 @@ __switch_to(struct task_struct *prev_p, +@@ -679,19 +530,11 @@ __switch_to(struct task_struct *prev_p, /* * Switch the PDA context. */ @@ -10998,7 +10853,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Now maybe reload the debug registers -@@ -749,11 +592,6 @@ void set_personality_64bit(void) +@@ -745,11 +588,6 @@ void set_personality_64bit(void) current->personality &= ~READ_IMPLIES_EXEC; } @@ -11010,7 +10865,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) -@@ -763,22 +601,6 @@ sys_clone(unsigned long clone_flags, uns +@@ -759,22 +597,6 @@ sys_clone(unsigned long clone_flags, uns return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } @@ -11033,39 +10888,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned long get_wchan(struct task_struct *p) { unsigned long stack; ---- head-2010-05-25.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/quirks-xen.c 2010-03-24 15:25:06.000000000 +0100 -@@ -75,8 +75,7 @@ static void ich_force_hpet_resume(void) - if (!force_hpet_address) - return; - -- if (rcba_base == NULL) -- BUG(); -+ BUG_ON(rcba_base == NULL); - - /* read the Function Disable register, dword mode only */ - val = readl(rcba_base + 0x3404); -@@ -173,7 +172,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I - ich_force_enable_hpet); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, - ich_force_enable_hpet); -- -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */ -+ ich_force_enable_hpet); - - static struct pci_dev *cached_dev; - -@@ -262,8 +262,6 @@ static void old_ich_force_enable_hpet_us - { - if (hpet_force_user) - old_ich_force_enable_hpet(dev); -- else -- hpet_print_force_info(); - } - - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, ---- head-2010-05-25.orig/arch/x86/kernel/setup-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:22:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:22:49.000000000 +0100 @@ -74,14 +74,15 @@ #include #include @@ -11239,19 +11063,20 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; -@@ -738,19 +754,11 @@ void __init setup_arch(char **cmdline_p) - +@@ -739,12 +755,6 @@ void __init setup_arch(char **cmdline_p) /* Register a call for panic conditions. */ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); -- + - WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, - VMASST_TYPE_writable_pagetables)); -#ifdef CONFIG_X86_32 - WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, - VMASST_TYPE_4gb_segments)); -#endif + set_iopl.iopl = 1; + WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl)); #endif /* CONFIG_XEN */ - +@@ -752,7 +762,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); @@ -11259,7 +11084,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #else printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif -@@ -834,16 +842,7 @@ void __init setup_arch(char **cmdline_p) +@@ -836,16 +845,7 @@ void __init setup_arch(char **cmdline_p) init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; @@ -11277,7 +11102,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches code_resource.start = virt_to_phys(_text); code_resource.end = virt_to_phys(_etext)-1; -@@ -956,9 +955,8 @@ void __init setup_arch(char **cmdline_p) +@@ -958,9 +958,8 @@ void __init setup_arch(char **cmdline_p) num_physpages = max_pfn; max_mapnr = max_pfn; @@ -11289,7 +11114,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif /* How many end-of-memory variables you have, grandma! */ -@@ -975,6 +973,8 @@ void __init setup_arch(char **cmdline_p) +@@ -977,6 +976,8 @@ void __init setup_arch(char **cmdline_p) setup_bios_corruption_check(); #endif @@ -11298,7 +11123,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* max_pfn_mapped is updated here */ max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn< #include @@ -11633,8 +11467,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 32, SIGILL) == NOTIFY_STOP) return; ---- head-2010-05-25.orig/arch/x86/kernel/vmlinux.lds.S 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vmlinux.lds.S 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/vmlinux.lds.S 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vmlinux.lds.S 2011-02-01 14:44:12.000000000 +0100 @@ -16,8 +16,10 @@ #ifdef CONFIG_X86_32 @@ -11647,7 +11481,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif #include ---- head-2010-05-25.orig/arch/x86/mach-xen/Makefile 2007-06-12 13:12:48.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/mach-xen/Makefile 2007-06-12 13:12:48.000000000 +0200 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -# @@ -11655,9 +11489,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -# - -obj-y := setup.o ---- head-2010-05-25.orig/arch/x86/mach-xen/setup.c 2010-03-24 15:12:46.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mach-xen/setup.c 2011-02-03 14:23:14.000000000 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 -@@ -1,190 +0,0 @@ +@@ -1,186 +0,0 @@ -/* - * Machine specific setup for generic - */ @@ -11768,12 +11602,10 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - .address = CALLBACK_ADDR(system_call) - }; -#endif --#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) - static struct callback_register __initdata nmi_cb = { - .type = CALLBACKTYPE_nmi, - .address = CALLBACK_ADDR(nmi) - }; --#endif - - ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event); - if (ret == 0) @@ -11797,7 +11629,6 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -#endif - BUG_ON(ret); - --#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_32) - ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb); -#if CONFIG_XEN_COMPAT <= 0x030002 - if (ret == -ENOSYS) { @@ -11808,7 +11639,6 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - HYPERVISOR_nmi_op(XENNMI_register_callback, &cb); - } -#endif --#endif - -#ifdef CONFIG_X86_32 - /* Do an early initialization of the fixmap area */ @@ -11848,17 +11678,19 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - } -#endif -} ---- head-2010-05-25.orig/arch/x86/mm/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/Makefile 2010-03-24 15:25:06.000000000 +0100 -@@ -26,5 +26,6 @@ obj-$(CONFIG_K8_NUMA) += k8topology_64. +--- head-2011-03-17.orig/arch/x86/mm/Makefile 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/Makefile 2011-02-01 14:44:12.000000000 +0100 +@@ -27,7 +27,7 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology_6 obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o obj-$(CONFIG_XEN) += hypervisor.o -+disabled-obj-$(CONFIG_XEN) := tlb.o +-disabled-obj-$(CONFIG_XEN) := gup.o ++disabled-obj-$(CONFIG_XEN) := gup.o tlb.o - obj-$(CONFIG_MEMTEST) += memtest.o ---- head-2010-05-25.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault-xen.c 2010-03-24 15:25:06.000000000 +0100 + obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o + +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -1,73 +1,79 @@ /* * Copyright (C) 1995 Linus Torvalds @@ -12017,7 +11849,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +static inline int +check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, + unsigned char opcode, int *prefetch) - { ++{ + unsigned char instr_hi = opcode & 0xf0; + unsigned char instr_lo = opcode & 0x0f; + @@ -12063,7 +11895,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + +static int +is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) -+{ + { + unsigned char *max_instr; unsigned char *instr; - int scan_more = 1; @@ -12072,7 +11904,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * If it was a exec (instruction fetch) fault on NX page, then -@@ -106,99 +159,174 @@ static int is_prefetch(struct pt_regs *r +@@ -106,166 +159,45 @@ static int is_prefetch(struct pt_regs *r if (error_code & PF_INSTR) return 0; @@ -12117,71 +11949,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - * mode only uses well known segments or kernel. - */ - scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); -+ if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) - break; -+ } -+ return prefetch; -+} -+ -+static void -+force_sig_info_fault(int si_signo, int si_code, unsigned long address, -+ struct task_struct *tsk) -+{ -+ siginfo_t info; -+ -+ info.si_signo = si_signo; -+ info.si_errno = 0; -+ info.si_code = si_code; -+ info.si_addr = (void __user *)address; -+ -+ force_sig_info(si_signo, &info, tsk); -+} -+ -+DEFINE_SPINLOCK(pgd_lock); -+LIST_HEAD(pgd_list); -+ -+#ifdef CONFIG_X86_32 -+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) -+{ -+ unsigned index = pgd_index(address); -+ pgd_t *pgd_k; -+ pud_t *pud, *pud_k; -+ pmd_t *pmd, *pmd_k; -+ -+ pgd += index; -+ pgd_k = init_mm.pgd + index; -+ -+ if (!pgd_present(*pgd_k)) -+ return NULL; -+ -+ /* -+ * set_pgd(pgd, *pgd_k); here would be useless on PAE -+ * and redundant with the set_pmd() on non-PAE. As would -+ * set_pud. -+ */ -+ pud = pud_offset(pgd, address); -+ pud_k = pud_offset(pgd_k, address); -+ if (!pud_present(*pud_k)) -+ return NULL; -+ -+ pmd = pmd_offset(pud, address); -+ pmd_k = pmd_offset(pud_k, address); -+ if (!pmd_present(*pmd_k)) -+ return NULL; -+ -+ if (!pmd_present(*pmd)) { -+ bool lazy = percpu_read(xen_lazy_mmu); -+ -+ percpu_write(xen_lazy_mmu, false); -+#if CONFIG_XEN_COMPAT > 0x030002 -+ set_pmd(pmd, *pmd_k); -+#else -+ /* -+ * When running on older Xen we must launder *pmd_k through -+ * pmd_val() to ensure that _PAGE_PRESENT is correctly set. -+ */ -+ set_pmd(pmd, __pmd(pmd_val(*pmd_k))); - #endif +- break; +-#endif - case 0x60: - /* 0x64 thru 0x67 are valid prefixes in all modes. */ - scan_more = (instr_lo & 0xC) == 0x4; @@ -12193,150 +11962,354 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - case 0x00: - /* Prefetch instruction is 0x0F0D or 0x0F18 */ - scan_more = 0; -+ percpu_write(xen_lazy_mmu, lazy); -+ } else { -+ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); -+ } -+ -+ return pmd_k; -+} -+ -+void vmalloc_sync_all(void) -+{ -+ unsigned long address; -+ -+ if (SHARED_KERNEL_PMD) -+ return; -+ -+ for (address = VMALLOC_START & PMD_MASK; -+ address >= TASK_SIZE && address < FIXADDR_TOP; -+ address += PMD_SIZE) { -+ -+ unsigned long flags; -+ struct page *page; - +- - if (probe_kernel_address(instr, opcode)) -+ spin_lock_irqsave(&pgd_lock, flags); -+ list_for_each_entry(page, &pgd_list, lru) { -+ if (!vmalloc_sync_one(page_address(page), address)) - break; +- break; - prefetch = (instr_lo == 0xF) && - (opcode == 0x0D || opcode == 0x18); -- break; ++ if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) + break; - default: - scan_more = 0; - break; - } -+ spin_unlock_irqrestore(&pgd_lock, flags); +- } } -- return prefetch; + return prefetch; } -static void force_sig_info_fault(int si_signo, int si_code, - unsigned long address, struct task_struct *tsk) -+/* -+ * 32-bit: -+ * -+ * Handle a fault on the vmalloc or module mapping area -+ */ -+static noinline int vmalloc_fault(unsigned long address) ++static void ++force_sig_info_fault(int si_signo, int si_code, unsigned long address, ++ struct task_struct *tsk) { -- siginfo_t info; -+ unsigned long pgd_paddr; -+ pmd_t *pmd_k; -+ pte_t *pte_k; + siginfo_t info; - info.si_signo = si_signo; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *)address; - force_sig_info(si_signo, &info, tsk); -+ /* Make sure we are in vmalloc area: */ -+ if (!(address >= VMALLOC_START && address < VMALLOC_END)) -+ return -1; -+ -+ /* -+ * Synchronize this task's top level page-table -+ * with the 'reference' page table. -+ * -+ * Do _not_ use "current" here. We might be inside -+ * an interrupt in the middle of a task switch.. -+ */ -+ pgd_paddr = read_cr3(); -+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); -+ if (!pmd_k) -+ return -1; -+ -+ pte_k = pte_offset_kernel(pmd_k, address); -+ if (!pte_present(*pte_k)) -+ return -1; -+ -+ return 0; - } +-} ++ info.si_signo = si_signo; ++ info.si_errno = 0; ++ info.si_code = si_code; ++ info.si_addr = (void __user *)address; -#ifdef CONFIG_X86_64 -static int bad_address(void *p) -+/* -+ * Did it hit the DOS screen memory VA from vm86 mode? -+ */ -+static inline void -+check_v8086_mode(struct pt_regs *regs, unsigned long address, -+ struct task_struct *tsk) - { +-{ - unsigned long dummy; - return probe_kernel_address((unsigned long *)p, dummy); -+ unsigned long bit; -+ -+ if (!v8086_mode(regs)) -+ return; -+ -+ bit = (address - 0xA0000) >> PAGE_SHIFT; -+ if (bit < 32) -+ tsk->thread.screen_bitmap |= 1 << bit; ++ force_sig_info(si_signo, &info, tsk); } -#endif - - static void dump_pagetable(unsigned long address) - { +- +-static void dump_pagetable(unsigned long address) +-{ -#ifdef CONFIG_X86_32 - __typeof__(pte_val(__pte(0))) page; - - page = read_cr3(); - page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; -+ - #ifdef CONFIG_X86_PAE - printk("*pdpt = %016Lx ", page); - if ((page & _PAGE_PRESENT) -@@ -206,7 +334,7 @@ static void dump_pagetable(unsigned long - page = mfn_to_pfn(page >> PAGE_SHIFT); - page <<= PAGE_SHIFT; - page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) +- __typeof__(pte_val(__pte(0))) page; +- +- page = read_cr3(); +- page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; +-#ifdef CONFIG_X86_PAE +- printk("*pdpt = %016Lx ", page); +- if ((page & _PAGE_PRESENT) +- && mfn_to_local_pfn(page >> PAGE_SHIFT) < max_low_pfn) { +- page = mfn_to_pfn(page >> PAGE_SHIFT); +- page <<= PAGE_SHIFT; +- page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) - & (PTRS_PER_PMD - 1)]; -+ & (PTRS_PER_PMD - 1)]; - printk(KERN_CONT "*pde = %016Lx ", page); - page &= ~_PAGE_NX; - } -@@ -218,20 +346,146 @@ static void dump_pagetable(unsigned long - * We must not directly access the pte in the highpte - * case if the page table is located in highmem. - * And let's rather not kmap-atomic the pte, just in case +- printk(KERN_CONT "*pde = %016Lx ", page); +- page &= ~_PAGE_NX; +- } +-#else +- printk("*pde = %08lx ", page); +-#endif +- +- /* +- * We must not directly access the pte in the highpte +- * case if the page table is located in highmem. +- * And let's rather not kmap-atomic the pte, just in case - * it's allocated already. -+ * it's allocated already: - */ - if ((page & _PAGE_PRESENT) - && mfn_to_local_pfn(page >> PAGE_SHIFT) < max_low_pfn - && !(page & _PAGE_PSE)) { -+ - page = mfn_to_pfn(page >> PAGE_SHIFT); - page <<= PAGE_SHIFT; - page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) +- */ +- if ((page & _PAGE_PRESENT) +- && mfn_to_local_pfn(page >> PAGE_SHIFT) < max_low_pfn +- && !(page & _PAGE_PSE)) { +- page = mfn_to_pfn(page >> PAGE_SHIFT); +- page <<= PAGE_SHIFT; +- page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) - & (PTRS_PER_PTE - 1)]; -+ & (PTRS_PER_PTE - 1)]; - printk(KERN_CONT "*pte = %0*Lx ", sizeof(page)*2, (u64)page); +- printk(KERN_CONT "*pte = %0*Lx ", sizeof(page)*2, (u64)page); +- } +- +- printk(KERN_CONT "\n"); +-#else /* CONFIG_X86_64 */ +- pgd_t *pgd; +- pud_t *pud; +- pmd_t *pmd; +- pte_t *pte; +- +- pgd = (pgd_t *)read_cr3(); +- +- pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); +- pgd += pgd_index(address); +- if (bad_address(pgd)) goto bad; +- printk("PGD %lx ", pgd_val(*pgd)); +- if (!pgd_present(*pgd)) goto ret; +- +- pud = pud_offset(pgd, address); +- if (bad_address(pud)) goto bad; +- printk(KERN_CONT "PUD %lx ", pud_val(*pud)); +- if (!pud_present(*pud) || pud_large(*pud)) +- goto ret; + +- pmd = pmd_offset(pud, address); +- if (bad_address(pmd)) goto bad; +- printk(KERN_CONT "PMD %lx ", pmd_val(*pmd)); +- if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; ++DEFINE_SPINLOCK(pgd_lock); ++LIST_HEAD(pgd_list); + +- pte = pte_offset_kernel(pmd, address); +- if (bad_address(pte)) goto bad; +- printk(KERN_CONT "PTE %lx", pte_val(*pte)); +-ret: +- printk(KERN_CONT "\n"); +- return; +-bad: +- printk("BAD\n"); +-#endif +-} ++#define pgd_page_table(what, pg) \ ++ spin_##what(&((struct mm_struct *)(pg)->private)->page_table_lock) + + #ifdef CONFIG_X86_32 + static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) +@@ -286,7 +218,6 @@ static inline pmd_t *vmalloc_sync_one(pg + * and redundant with the set_pmd() on non-PAE. As would + * set_pud. + */ +- + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) +@@ -296,10 +227,11 @@ static inline pmd_t *vmalloc_sync_one(pg + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; ++ + if (!pmd_present(*pmd)) { +- bool lazy = x86_read_percpu(xen_lazy_mmu); ++ bool lazy = percpu_read(xen_lazy_mmu); + +- x86_write_percpu(xen_lazy_mmu, false); ++ percpu_write(xen_lazy_mmu, false); + #if CONFIG_XEN_COMPAT > 0x030002 + set_pmd(pmd, *pmd_k); + #else +@@ -309,143 +241,657 @@ static inline pmd_t *vmalloc_sync_one(pg + */ + set_pmd(pmd, __pmd(pmd_val(*pmd_k))); + #endif +- x86_write_percpu(xen_lazy_mmu, lazy); +- } else ++ percpu_write(xen_lazy_mmu, lazy); ++ } else { + BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); ++ } ++ + return pmd_k; + } +-#endif +- +-#ifdef CONFIG_X86_64 +-static const char errata93_warning[] = +-KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" +-KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" +-KERN_ERR "******* Please consider a BIOS update.\n" +-KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; +-#endif + +-/* Workaround for K8 erratum #93 & buggy BIOS. +- BIOS SMM functions are required to use a specific workaround +- to avoid corruption of the 64bit RIP register on C stepping K8. +- A lot of BIOS that didn't get tested properly miss this. +- The OS sees this as a page fault with the upper 32bits of RIP cleared. +- Try to work around it here. +- Note we only handle faults in kernel here. +- Does nothing for X86_32 +- */ +-static int is_errata93(struct pt_regs *regs, unsigned long address) ++void vmalloc_sync_all(void) + { +-#ifdef CONFIG_X86_64 +- static int warned; +- if (address != regs->ip) +- return 0; +- if ((address >> 32) != 0) +- return 0; +- address |= 0xffffffffUL << 32; +- if ((address >= (u64)_stext && address <= (u64)_etext) || +- (address >= MODULES_VADDR && address <= MODULES_END)) { +- if (!warned) { +- printk(errata93_warning); +- warned = 1; ++ unsigned long address; ++ ++ if (SHARED_KERNEL_PMD) ++ return; ++ ++ for (address = VMALLOC_START & PMD_MASK; ++ address >= TASK_SIZE && address < FIXADDR_TOP; ++ address += PMD_SIZE) { ++ ++ unsigned long flags; ++ struct page *page; ++ ++ spin_lock_irqsave(&pgd_lock, flags); ++ list_for_each_entry(page, &pgd_list, lru) { ++ pmd_t *pmd; ++ ++ pgd_page_table(lock, page); ++ pmd = vmalloc_sync_one(page_address(page), address); ++ pgd_page_table(unlock, page); ++ ++ if (!pmd) ++ break; + } +- regs->ip = address; +- return 1; ++ spin_unlock_irqrestore(&pgd_lock, flags); } +-#endif +- return 0; + } - printk(KERN_CONT "\n"); --#else /* CONFIG_X86_64 */ + /* +- * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal +- * addresses >4GB. We catch this in the page fault handler because these +- * addresses are not reachable. Just detect this case and return. Any code +- * segment in LDT is compatibility mode. ++ * 32-bit: ++ * ++ * Handle a fault on the vmalloc or module mapping area + */ +-static int is_errata100(struct pt_regs *regs, unsigned long address) ++static noinline int vmalloc_fault(unsigned long address) + { +-#ifdef CONFIG_X86_64 +- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && +- (address >> 32)) +- return 1; +-#endif +- return 0; +-} ++ unsigned long pgd_paddr; ++ pmd_t *pmd_k; ++ pte_t *pte_k; ++ ++ /* Make sure we are in vmalloc area: */ ++ if (!(address >= VMALLOC_START && address < VMALLOC_END)) ++ return -1; + +-static int is_f00f_bug(struct pt_regs *regs, unsigned long address) +-{ +-#ifdef CONFIG_X86_F00F_BUG +- unsigned long nr; + /* +- * Pentium F0 0F C7 C8 bug workaround. ++ * Synchronize this task's top level page-table ++ * with the 'reference' page table. ++ * ++ * Do _not_ use "current" here. We might be inside ++ * an interrupt in the middle of a task switch.. + */ +- if (boot_cpu_data.f00f_bug) { +- nr = (address - idt_descr.address) >> 3; ++ pgd_paddr = read_cr3(); ++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); ++ if (!pmd_k) ++ return -1; ++ ++ pte_k = pte_offset_kernel(pmd_k, address); ++ if (!pte_present(*pte_k)) ++ return -1; + +- if (nr == 6) { +- do_invalid_op(regs, 0); +- return 1; +- } +- } +-#endif + return 0; + } + +-static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, +- unsigned long address) ++/* ++ * Did it hit the DOS screen memory VA from vm86 mode? ++ */ ++static inline void ++check_v8086_mode(struct pt_regs *regs, unsigned long address, ++ struct task_struct *tsk) + { +-#ifdef CONFIG_X86_32 +- if (!oops_may_print()) ++ unsigned long bit; ++ ++ if (!v8086_mode(regs)) + return; +-#endif + +-#ifdef CONFIG_X86_PAE +- if (error_code & PF_INSTR) { +- unsigned int level; ++ bit = (address - 0xA0000) >> PAGE_SHIFT; ++ if (bit < 32) ++ tsk->thread.screen_bitmap |= 1 << bit; ++} ++ ++static void dump_pagetable(unsigned long address) ++{ ++ __typeof__(pte_val(__pte(0))) page; ++ ++ page = read_cr3(); ++ page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; ++ ++#ifdef CONFIG_X86_PAE ++ printk("*pdpt = %016Lx ", page); ++ if ((page & _PAGE_PRESENT) ++ && mfn_to_local_pfn(page >> PAGE_SHIFT) < max_low_pfn) { ++ page = mfn_to_pfn(page >> PAGE_SHIFT); ++ page <<= PAGE_SHIFT; ++ page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) ++ & (PTRS_PER_PMD - 1)]; ++ printk(KERN_CONT "*pde = %016Lx ", page); ++ page &= ~_PAGE_NX; ++ } ++#else ++ printk("*pde = %08lx ", page); ++#endif ++ ++ /* ++ * We must not directly access the pte in the highpte ++ * case if the page table is located in highmem. ++ * And let's rather not kmap-atomic the pte, just in case ++ * it's allocated already: ++ */ ++ if ((page & _PAGE_PRESENT) ++ && mfn_to_local_pfn(page >> PAGE_SHIFT) < max_low_pfn ++ && !(page & _PAGE_PSE)) { ++ ++ page = mfn_to_pfn(page >> PAGE_SHIFT); ++ page <<= PAGE_SHIFT; ++ page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) ++ & (PTRS_PER_PTE - 1)]; ++ printk(KERN_CONT "*pte = %0*Lx ", sizeof(page)*2, (u64)page); ++ } ++ ++ printk(KERN_CONT "\n"); +} + +#else /* CONFIG_X86_64: */ @@ -12359,10 +12332,12 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ pgd_page_table(lock, page); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); ++ pgd_page_table(unlock, page); + } + spin_unlock_irqrestore(&pgd_lock, flags); + } @@ -12463,126 +12438,54 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + +static void dump_pagetable(unsigned long address) +{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; -@@ -240,113 +494,77 @@ static void dump_pagetable(unsigned long - pgd = (pgd_t *)read_cr3(); - - pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *pte; + - pgd += pgd_index(address); -- if (bad_address(pgd)) goto bad; ++ pgd = (pgd_t *)read_cr3(); ++ ++ pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); ++ ++ pgd += pgd_index(address); + if (bad_address(pgd)) + goto bad; + - printk("PGD %lx ", pgd_val(*pgd)); -- if (!pgd_present(*pgd)) goto ret; ++ printk("PGD %lx ", pgd_val(*pgd)); + + if (!pgd_present(*pgd)) + goto out; - - pud = pud_offset(pgd, address); -- if (bad_address(pud)) goto bad; ++ ++ pud = pud_offset(pgd, address); + if (bad_address(pud)) + goto bad; + - printk(KERN_CONT "PUD %lx ", pud_val(*pud)); - if (!pud_present(*pud) || pud_large(*pud)) -- goto ret; ++ printk(KERN_CONT "PUD %lx ", pud_val(*pud)); ++ if (!pud_present(*pud) || pud_large(*pud)) + goto out; - - pmd = pmd_offset(pud, address); -- if (bad_address(pmd)) goto bad; ++ ++ pmd = pmd_offset(pud, address); + if (bad_address(pmd)) + goto bad; + - printk(KERN_CONT "PMD %lx ", pmd_val(*pmd)); -- if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; ++ printk(KERN_CONT "PMD %lx ", pmd_val(*pmd)); + if (!pmd_present(*pmd) || pmd_large(*pmd)) + goto out; - - pte = pte_offset_kernel(pmd, address); -- if (bad_address(pte)) goto bad; ++ ++ pte = pte_offset_kernel(pmd, address); + if (bad_address(pte)) + goto bad; + - printk(KERN_CONT "PTE %lx", pte_val(*pte)); --ret: ++ printk(KERN_CONT "PTE %lx", pte_val(*pte)); +out: - printk(KERN_CONT "\n"); - return; - bad: - printk("BAD\n"); --#endif --} -- --#ifdef CONFIG_X86_32 --static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) --{ -- unsigned index = pgd_index(address); -- pgd_t *pgd_k; -- pud_t *pud, *pud_k; -- pmd_t *pmd, *pmd_k; -- -- pgd += index; -- pgd_k = init_mm.pgd + index; -- -- if (!pgd_present(*pgd_k)) -- return NULL; -- -- /* -- * set_pgd(pgd, *pgd_k); here would be useless on PAE -- * and redundant with the set_pmd() on non-PAE. As would -- * set_pud. -- */ -- -- pud = pud_offset(pgd, address); -- pud_k = pud_offset(pgd_k, address); -- if (!pud_present(*pud_k)) -- return NULL; -- -- pmd = pmd_offset(pud, address); -- pmd_k = pmd_offset(pud_k, address); -- if (!pmd_present(*pmd_k)) -- return NULL; -- if (!pmd_present(*pmd)) { -- bool lazy = x86_read_percpu(xen_lazy_mmu); -- -- x86_write_percpu(xen_lazy_mmu, false); --#if CONFIG_XEN_COMPAT > 0x030002 -- set_pmd(pmd, *pmd_k); --#else -- /* -- * When running on older Xen we must launder *pmd_k through -- * pmd_val() to ensure that _PAGE_PRESENT is correctly set. -- */ -- set_pmd(pmd, __pmd(pmd_val(*pmd_k))); --#endif -- x86_write_percpu(xen_lazy_mmu, lazy); -- } else -- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); -- return pmd_k; - } --#endif - --#ifdef CONFIG_X86_64 --static const char errata93_warning[] = --KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" --KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" --KERN_ERR "******* Please consider a BIOS update.\n" --KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; --#endif ++ printk(KERN_CONT "\n"); ++ return; ++bad: ++ printk("BAD\n"); ++} ++ +#endif /* CONFIG_X86_64 */ - --/* Workaround for K8 erratum #93 & buggy BIOS. -- BIOS SMM functions are required to use a specific workaround -- to avoid corruption of the 64bit RIP register on C stepping K8. -- A lot of BIOS that didn't get tested properly miss this. -- The OS sees this as a page fault with the upper 32bits of RIP cleared. -- Try to work around it here. -- Note we only handle faults in kernel here. -- Does nothing for X86_32 ++ +/* + * Workaround for K8 erratum #93 & buggy BIOS. + * @@ -12596,85 +12499,81 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + * + * Note we only handle faults in kernel here. + * Does nothing on 32-bit. - */ - static int is_errata93(struct pt_regs *regs, unsigned long address) - { - #ifdef CONFIG_X86_64 -- static int warned; ++ */ ++static int is_errata93(struct pt_regs *regs, unsigned long address) ++{ ++#ifdef CONFIG_X86_64 + static int once; + - if (address != regs->ip) - return 0; ++ if (address != regs->ip) ++ return 0; + - if ((address >> 32) != 0) - return 0; ++ if ((address >> 32) != 0) ++ return 0; + - address |= 0xffffffffUL << 32; - if ((address >= (u64)_stext && address <= (u64)_etext) || - (address >= MODULES_VADDR && address <= MODULES_END)) { -- if (!warned) { ++ address |= 0xffffffffUL << 32; ++ if ((address >= (u64)_stext && address <= (u64)_etext) || ++ (address >= MODULES_VADDR && address <= MODULES_END)) { + if (!once) { - printk(errata93_warning); -- warned = 1; ++ printk(errata93_warning); + once = 1; - } - regs->ip = address; - return 1; -@@ -356,16 +574,17 @@ static int is_errata93(struct pt_regs *r - } - - /* -- * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal -- * addresses >4GB. We catch this in the page fault handler because these -- * addresses are not reachable. Just detect this case and return. Any code ++ } ++ regs->ip = address; ++ return 1; ++ } ++#endif ++ return 0; ++} ++ ++/* + * Work around K8 erratum #100 K8 in compat mode occasionally jumps + * to illegal addresses >4GB. + * + * We catch this in the page fault handler because these addresses + * are not reachable. Just detect this case and return. Any code - * segment in LDT is compatibility mode. - */ - static int is_errata100(struct pt_regs *regs, unsigned long address) - { - #ifdef CONFIG_X86_64 -- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && -- (address >> 32)) ++ * segment in LDT is compatibility mode. ++ */ ++static int is_errata100(struct pt_regs *regs, unsigned long address) ++{ ++#ifdef CONFIG_X86_64 + if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) - return 1; - #endif - return 0; -@@ -375,8 +594,9 @@ static int is_f00f_bug(struct pt_regs *r - { - #ifdef CONFIG_X86_F00F_BUG - unsigned long nr; ++ return 1; ++#endif ++ return 0; ++} + - /* -- * Pentium F0 0F C7 C8 bug workaround. ++static int is_f00f_bug(struct pt_regs *regs, unsigned long address) ++{ ++#ifdef CONFIG_X86_F00F_BUG ++ unsigned long nr; ++ ++ /* + * Pentium F0 0F C7 C8 bug workaround: - */ - if (boot_cpu_data.f00f_bug) { - nr = (address - idt_descr.address) >> 3; -@@ -390,62 +610,277 @@ static int is_f00f_bug(struct pt_regs *r - return 0; - } - --static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, -- unsigned long address) ++ */ ++ if (boot_cpu_data.f00f_bug) { ++ nr = (address - idt_descr.address) >> 3; ++ ++ if (nr == 6) { ++ do_invalid_op(regs, 0); ++ return 1; ++ } ++ } ++#endif ++ return 0; ++} ++ +static const char nx_warning[] = KERN_CRIT +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; + +static void +show_fault_oops(struct pt_regs *regs, unsigned long error_code, + unsigned long address) - { --#ifdef CONFIG_X86_32 - if (!oops_may_print()) - return; --#endif - --#ifdef CONFIG_X86_PAE - if (error_code & PF_INSTR) { - unsigned int level; ++{ ++ if (!oops_may_print()) ++ return; ++ ++ if (error_code & PF_INSTR) { ++ unsigned int level; + pte_t *pte = lookup_address(address, &level); @@ -12952,7 +12851,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if ((error_code & PF_INSTR) && !pte_exec(*pte)) return 0; -@@ -453,21 +888,25 @@ static int spurious_fault_check(unsigned +@@ -453,21 +899,25 @@ static int spurious_fault_check(unsigned } /* @@ -12985,10 +12884,19 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* Reserved-bit violation or user access to kernel space? */ if (error_code & (PF_USER | PF_RSVD)) -@@ -495,117 +934,62 @@ static int spurious_fault(unsigned long - if (!pte_present(*pte)) - return 0; +@@ -486,126 +936,71 @@ static int spurious_fault(unsigned long + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) +- return 0; +- +- if (pmd_large(*pmd)) +- return spurious_fault_check(error_code, (pte_t *) pmd); +- +- pte = pte_offset_kernel(pmd, address); +- if (!pte_present(*pte)) +- return 0; +- - return spurious_fault_check(error_code, pte); -} - @@ -13007,10 +12915,18 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - unsigned long pgd_paddr; - pmd_t *pmd_k; - pte_t *pte_k; -- ++ return 0; + - /* Make sure we are in vmalloc area */ - if (!(address >= VMALLOC_START && address < VMALLOC_END)) - return -1; ++ if (pmd_large(*pmd)) ++ return spurious_fault_check(error_code, (pte_t *) pmd); ++ ++ pte = pte_offset_kernel(pmd, address); ++ if (!pte_present(*pte)) ++ return 0; ++ + ret = spurious_fault_check(error_code, pte); + if (!ret) + return 0; @@ -13138,7 +13054,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* Set the "privileged fault" bit to something sane. */ if (user_mode_vm(regs)) -@@ -615,13 +999,12 @@ void __kprobes do_page_fault(struct pt_r +@@ -615,13 +1010,12 @@ void __kprobes do_page_fault(struct pt_r tsk = current; mm = tsk->mm; @@ -13154,7 +13070,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (unlikely(kmmio_fault(regs, address))) return; -@@ -638,328 +1021,158 @@ void __kprobes do_page_fault(struct pt_r +@@ -638,338 +1032,158 @@ void __kprobes do_page_fault(struct pt_r * (error_code & 4) == 0, and that the fault was not a * protection error (error_code & 9) == 0. */ @@ -13524,6 +13440,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -DEFINE_SPINLOCK(pgd_lock); -LIST_HEAD(pgd_list); - +-#define pgd_page_table(what, pg) \ +- spin_##what(&((struct mm_struct *)(pg)->private)->page_table_lock) +- -void vmalloc_sync_all(void) -{ - unsigned long address; @@ -13540,8 +13459,13 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - - spin_lock_irqsave(&pgd_lock, flags); - list_for_each_entry(page, &pgd_list, lru) { -- if (!vmalloc_sync_one(page_address(page), -- address)) +- pmd_t *pmd; +- +- pgd_page_table(lock, page); +- pmd = vmalloc_sync_one(page_address(page), address); +- pgd_page_table(unlock, page); +- +- if (!pmd) - break; - } - spin_unlock_irqrestore(&pgd_lock, flags); @@ -13559,17 +13483,19 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches - list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); +- pgd_page_table(lock, page); - if (pgd_none(*pgd)) - set_pgd(pgd, *pgd_ref); - else - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); +- pgd_page_table(unlock, page); - } - spin_unlock_irqrestore(&pgd_lock, flags); - } -#endif } ---- head-2010-05-25.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -1,5 +1,6 @@ #include #include @@ -13698,8 +13624,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + } + totalram_pages += totalhigh_pages; +} ---- head-2010-05-25.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/hypervisor.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-01 14:44:12.000000000 +0100 @@ -36,6 +36,7 @@ #include #include @@ -13718,7 +13644,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define NR_MC BITS_PER_LONG #define NR_MMU BITS_PER_LONG #define NR_MMUEXT (BITS_PER_LONG / 4) -@@ -538,7 +542,7 @@ int xen_create_contiguous_region( +@@ -536,7 +540,7 @@ int xen_create_contiguous_region( unsigned int level; if (vstart < __START_KERNEL_map @@ -13727,7 +13653,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return -EINVAL; ptep = lookup_address((unsigned long)__va(__pa(vstart)), &level); -@@ -953,6 +957,6 @@ int write_ldt_entry(struct desc_struct * +@@ -951,6 +955,6 @@ int write_ldt_entry(struct desc_struct * int write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) { @@ -13736,7 +13662,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return HYPERVISOR_update_descriptor(mach_gp, *(const u64*)desc); } --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/mm/init-xen.c 2010-03-24 15:25:06.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -0,0 +1,459 @@ +#include +#include @@ -14197,8 +14123,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + free_init_pages("initrd memory", start, end); +} +#endif ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -52,9 +52,7 @@ #include #include @@ -14894,8 +14820,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, int flags) { ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -51,6 +51,8 @@ #include #include @@ -15579,8 +15505,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, int flags) { ---- head-2010-05-25.orig/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -20,10 +20,11 @@ #include #include @@ -15642,8 +15568,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); ---- head-2010-05-25.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/ioremap-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:20.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:25.000000000 +0100 @@ -23,13 +23,17 @@ #include #include @@ -15720,7 +15646,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return false; return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); } -@@ -462,16 +459,17 @@ static void __iomem *__ioremap_caller(re +@@ -443,16 +440,17 @@ static void __iomem *__ioremap_caller(re return NULL; area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; @@ -15742,7 +15668,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return NULL; } -@@ -528,7 +526,7 @@ EXPORT_SYMBOL(ioremap_nocache); +@@ -509,7 +507,7 @@ EXPORT_SYMBOL(ioremap_nocache); * * Must be freed with iounmap. */ @@ -15751,7 +15677,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { if (pat_enabled) return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, -@@ -558,7 +556,8 @@ static void __iomem *ioremap_default(res +@@ -539,7 +537,8 @@ static void __iomem *ioremap_default(res * - UC_MINUS for non-WB-able memory with no other conflicting mappings * - Inherit from confliting mappings otherwise */ @@ -15761,7 +15687,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (err < 0) return NULL; -@@ -697,13 +696,19 @@ static inline pte_t * __init early_iorem +@@ -678,13 +677,19 @@ static inline pte_t * __init early_iorem return &bm_pte[pte_index(addr)]; } @@ -15781,7 +15707,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); memset(bm_pte, 0, sizeof(bm_pte)); make_lowmem_page_readonly(bm_pte, XENFEAT_writable_page_tables); -@@ -734,7 +739,7 @@ void __init early_ioremap_reset(void) +@@ -715,7 +720,7 @@ void __init early_ioremap_reset(void) } static void __init __early_set_fixmap(enum fixed_addresses idx, @@ -15790,7 +15716,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { unsigned long addr = __fix_to_virt(idx); pte_t *pte; -@@ -753,7 +758,7 @@ static void __init __early_set_fixmap(en +@@ -734,7 +739,7 @@ static void __init __early_set_fixmap(en } static inline void __init early_set_fixmap(enum fixed_addresses idx, @@ -15799,7 +15725,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { if (after_paging_init) __set_fixmap(idx, phys, prot); -@@ -771,6 +776,7 @@ static inline void __init early_clear_fi +@@ -752,6 +757,7 @@ static inline void __init early_clear_fi static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; @@ -15807,7 +15733,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches static int __init check_early_ioremap_leak(void) { int count = 0; -@@ -792,9 +798,11 @@ static int __init check_early_ioremap_le +@@ -773,9 +779,11 @@ static int __init check_early_ioremap_le } late_initcall(check_early_ioremap_leak); @@ -15821,7 +15747,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches unsigned int nrpages; enum fixed_addresses idx0, idx; int i, slot; -@@ -810,15 +818,15 @@ static void __init __iomem *__early_iore +@@ -791,15 +799,15 @@ static void __init __iomem *__early_iore } if (slot < 0) { @@ -15841,7 +15767,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches dump_stack(); } -@@ -858,20 +866,28 @@ static void __init __iomem *__early_iore +@@ -839,20 +847,28 @@ static void __init __iomem *__early_iore --nrpages; } if (early_ioremap_debug) @@ -15874,7 +15800,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { return __early_ioremap(phys_to_machine(phys_addr), size, PAGE_KERNEL); } -@@ -884,6 +900,15 @@ void __init early_iounmap(void __iomem * +@@ -865,6 +881,15 @@ void __init early_iounmap(void __iomem * enum fixed_addresses idx; int i, slot; @@ -15890,7 +15816,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (prev_map[i] == addr) { -@@ -928,8 +953,3 @@ void __init early_iounmap(void __iomem * +@@ -909,8 +934,3 @@ void __init early_iounmap(void __iomem * } prev_map[slot] = NULL; } @@ -15899,8 +15825,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -{ - WARN_ON(1); -} ---- head-2010-05-25.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -16,6 +16,7 @@ #include #include @@ -16438,8 +16364,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches int set_pages_x(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); ---- head-2010-05-25.orig/arch/x86/mm/pat-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pat-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pat-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -31,7 +31,7 @@ #ifdef CONFIG_X86_PAT int __read_mostly pat_enabled = 1; @@ -16835,8 +16761,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } #endif /* CONFIG_XEN */ ---- head-2010-05-25.orig/arch/x86/mm/pgtable-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -122,10 +122,6 @@ void __pud_free_tlb(struct mmu_gather *t #endif /* PAGETABLE_LEVELS > 3 */ #endif /* PAGETABLE_LEVELS > 2 */ @@ -16871,7 +16797,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (pgd_none(*pgd)) continue; pud = pud_offset(pgd, 0); -@@ -736,9 +732,26 @@ int ptep_clear_flush_young(struct vm_are +@@ -739,9 +735,26 @@ int ptep_clear_flush_young(struct vm_are return young; } @@ -16899,7 +16825,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { unsigned long address = __fix_to_virt(idx); pte_t pte; -@@ -757,6 +770,8 @@ void xen_set_fixmap(enum fixed_addresses +@@ -760,6 +773,8 @@ void xen_set_fixmap(enum fixed_addresses set_pte_vaddr_pud(level3_user_pgt, address, pte); break; case FIX_EARLYCON_MEM_BASE: @@ -16908,8 +16834,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches xen_l1_entry_update(level1_fixmap_pgt + pte_index(address), pfn_pte_ma(phys >> PAGE_SHIFT, flags)); fixmaps_set++; ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-02-01 14:44:12.000000000 +0100 @@ -25,6 +25,8 @@ #include #include @@ -16950,9 +16876,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the ---- head-2010-05-25.orig/drivers/acpi/Makefile 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/drivers/acpi/Makefile 2010-03-24 15:25:06.000000000 +0100 -@@ -64,8 +64,6 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_ +--- head-2011-03-17.orig/drivers/acpi/Makefile 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/Makefile 2011-02-01 14:44:12.000000000 +0100 +@@ -67,9 +67,7 @@ obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys. processor-y := processor_driver.o processor_throttling.o processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o @@ -16962,8 +16888,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches +processor-$(CONFIG_PROCESSOR_EXTERNAL_CONTROL) += processor_perflib.o processor_extcntl.o obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o ---- head-2010-05-25.orig/drivers/acpi/acpica/hwsleep.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/drivers/acpi/acpica/hwsleep.c 2010-03-24 15:25:06.000000000 +0100 + obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o +--- head-2011-03-17.orig/drivers/acpi/acpica/hwsleep.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/acpica/hwsleep.c 2011-02-01 14:44:12.000000000 +0100 @@ -394,7 +394,7 @@ acpi_status asmlinkage acpi_enter_sleep_ #else /* PV ACPI just need check hypercall return value */ @@ -16973,29 +16900,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (err) { printk(KERN_ERR "ACPI: Hypervisor failure [%d]\n", err); return_ACPI_STATUS(AE_ERROR); ---- head-2010-05-25.orig/drivers/acpi/processor_idle.c 2010-04-15 10:06:51.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_idle.c 2010-04-15 10:06:59.000000000 +0200 -@@ -606,7 +606,7 @@ static void acpi_processor_power_verify_ - #ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL - cx->latency_ticks = cx->latency; - #else -- cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); -+ cx->latency_ticks = us_to_pm_timer_ticks(cx->latency); - #endif - /* - * On older chipsets, BM_RLD needs to be set -@@ -643,7 +643,7 @@ static int acpi_processor_power_verify(s - #ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL - cx->latency_ticks = cx->latency; /* Normalize latency */ - #else -- cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); -+ cx->latency_ticks = us_to_pm_timer_ticks(cx->latency); - #endif - break; - ---- head-2010-05-25.orig/drivers/oprofile/oprofile_files.c 2010-03-24 15:02:17.000000000 +0100 -+++ head-2010-05-25/drivers/oprofile/oprofile_files.c 2010-03-24 15:25:06.000000000 +0100 -@@ -172,6 +172,7 @@ static const struct file_operations dump +--- head-2011-03-17.orig/drivers/oprofile/oprofile_files.c 2011-01-31 17:01:49.000000000 +0100 ++++ head-2011-03-17/drivers/oprofile/oprofile_files.c 2011-02-01 14:44:12.000000000 +0100 +@@ -181,6 +181,7 @@ static const struct file_operations dump }; #ifdef CONFIG_XEN @@ -17003,21 +16910,14 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #define TMPBUFSIZE 512 ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:25:06.000000000 +0100 -@@ -47,47 +47,50 @@ struct msi_pirq_entry { +--- head-2011-03-17.orig/drivers/pci/msi-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/pci/msi-xen.c 2011-02-01 14:44:12.000000000 +0100 +@@ -47,11 +47,12 @@ struct msi_pirq_entry { /* Arch hooks */ -int __attribute__ ((weak)) -arch_msi_check_device(struct pci_dev *dev, int nvec, int type) --{ -- return 0; --} -- --#ifndef CONFIG_XEN --int __attribute__ ((weak)) --arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) +#ifndef arch_msi_check_device +int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) { @@ -17025,59 +16925,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } +#endif --int __attribute__ ((weak)) --arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -+#ifndef arch_setup_msi_irqs -+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - { - struct msi_desc *entry; - int ret; - -+ /* -+ * If an architecture wants to support multiple MSI, it needs to -+ * override arch_setup_msi_irqs() -+ */ -+ if (type == PCI_CAP_ID_MSI && nvec > 1) -+ return 1; -+ - list_for_each_entry(entry, &dev->msi_list, list) { - ret = arch_setup_msi_irq(dev, entry); -- if (ret) -+ if (ret < 0) - return ret; -+ if (ret > 0) -+ return -ENOSPC; - } - - return 0; - } -+#endif - --void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) --{ -- return; --} -- --void __attribute__ ((weak)) --arch_teardown_msi_irqs(struct pci_dev *dev) -+#ifndef arch_teardown_msi_irqs -+void arch_teardown_msi_irqs(struct pci_dev *dev) + static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) { - struct msi_desc *entry; - - list_for_each_entry(entry, &dev->msi_list, list) { -- if (entry->irq != 0) -- arch_teardown_msi_irq(entry->irq); -+ int i, nvec; -+ if (entry->irq == 0) -+ continue; -+ nvec = 1 << entry->msi_attrib.multiple; -+ for (i = 0; i < nvec; i++) -+ arch_teardown_msi_irq(entry->irq + i); - } - } - #endif -@@ -347,13 +350,15 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state) +@@ -308,13 +309,15 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state) /** * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function @@ -17099,7 +16949,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { int pos, pirq; u16 control; -@@ -363,6 +368,7 @@ static int msi_capability_init(struct pc +@@ -324,6 +327,7 @@ static int msi_capability_init(struct pc pos = pci_find_capability(dev, PCI_CAP_ID_MSI); pci_read_config_word(dev, msi_control_reg(pos), &control); @@ -17107,7 +16957,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches pirq = msi_map_vector(dev, 0, 0); if (pirq < 0) return -EBUSY; -@@ -496,22 +502,34 @@ static int pci_msi_check_device(struct p +@@ -457,22 +461,34 @@ static int pci_msi_check_device(struct p } /** @@ -17153,7 +17003,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (status) return status; -@@ -521,6 +539,7 @@ int pci_enable_msi(struct pci_dev* dev) +@@ -482,6 +498,7 @@ int pci_enable_msi(struct pci_dev* dev) int ret; temp = dev->irq; @@ -17161,7 +17011,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches ret = pci_frontend_enable_msi(dev); if (ret) return ret; -@@ -535,23 +554,23 @@ int pci_enable_msi(struct pci_dev* dev) +@@ -496,23 +513,23 @@ int pci_enable_msi(struct pci_dev* dev) temp = dev->irq; @@ -17189,7 +17039,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { int pirq; struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev); -@@ -579,6 +598,7 @@ void pci_msi_shutdown(struct pci_dev* de +@@ -540,6 +557,7 @@ void pci_msi_shutdown(struct pci_dev* de pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; } @@ -17197,7 +17047,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches void pci_disable_msi(struct pci_dev* dev) { pci_msi_shutdown(dev); -@@ -586,6 +606,23 @@ void pci_disable_msi(struct pci_dev* dev +@@ -547,6 +565,23 @@ void pci_disable_msi(struct pci_dev* dev EXPORT_SYMBOL(pci_disable_msi); /** @@ -17221,7 +17071,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches * pci_enable_msix - configure device's MSI-X capability structure * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of MSI-X entries -@@ -604,9 +641,8 @@ extern int pci_frontend_enable_msix(stru +@@ -565,9 +600,8 @@ extern int pci_frontend_enable_msix(stru struct msix_entry *entries, int nvec); int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) { @@ -17232,7 +17082,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev); if (!entries) -@@ -653,9 +689,7 @@ int pci_enable_msix(struct pci_dev* dev, +@@ -614,9 +648,7 @@ int pci_enable_msix(struct pci_dev* dev, if (status) return status; @@ -17243,8 +17093,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (nvec > nr_entries) return -EINVAL; ---- head-2010-05-25.orig/drivers/xen/Kconfig 2010-03-24 15:18:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/Kconfig 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-02 15:37:07.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-01 14:44:12.000000000 +0100 @@ -14,7 +14,6 @@ menu "XEN" config XEN_PRIVILEGED_GUEST @@ -17264,8 +17114,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches config NO_IDLE_HZ def_bool y ---- head-2010-05-25.orig/drivers/xen/char/mem.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/char/mem.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/char/mem.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/char/mem.c 2011-02-01 14:44:12.000000000 +0100 @@ -158,21 +158,7 @@ static ssize_t write_mem(struct file * f } @@ -17288,8 +17138,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys #endif ---- head-2010-05-25.orig/drivers/xen/core/Makefile 2010-04-19 14:50:32.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/Makefile 2010-04-19 14:52:49.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/core/Makefile 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/Makefile 2011-02-01 14:44:12.000000000 +0100 @@ -10,5 +10,5 @@ obj-$(CONFIG_SYS_HYPERVISOR) += hypervis obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o @@ -17297,12 +17147,12 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches -obj-$(CONFIG_X86_SMP) += spinlock.o +obj-$(CONFIG_SMP) += spinlock.o obj-$(CONFIG_KEXEC) += machine_kexec.o ---- head-2010-05-25.orig/drivers/xen/core/evtchn.c 2010-04-23 15:19:25.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/evtchn.c 2010-04-23 15:19:37.000000000 +0200 -@@ -150,13 +150,15 @@ DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) - #ifdef CONFIG_SMP - - static u8 cpu_evtchn[NR_EVENT_CHANNELS]; +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-09 13:57:45.000000000 +0100 +@@ -154,13 +154,15 @@ static u8 cpu_evtchn[NR_EVENT_CHANNELS]; + #else + static u16 cpu_evtchn[NR_EVENT_CHANNELS]; + #endif -static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; +static DEFINE_PER_CPU(unsigned long[BITS_TO_LONGS(NR_EVENT_CHANNELS)], + cpu_evtchn_mask); @@ -17319,7 +17169,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches ~sh->evtchn_mask[idx]); } -@@ -168,10 +170,10 @@ static void bind_evtchn_to_cpu(unsigned +@@ -172,10 +174,10 @@ static void bind_evtchn_to_cpu(unsigned BUG_ON(!test_bit(chn, s->evtchn_mask)); if (irq != -1) @@ -17333,7 +17183,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches cpu_evtchn[chn] = cpu; } -@@ -184,11 +186,11 @@ static void init_evtchn_cpu_bindings(voi +@@ -188,14 +190,13 @@ static void init_evtchn_cpu_bindings(voi struct irq_desc *desc = irq_to_desc(i); if (desc) @@ -17342,12 +17192,16 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); -- memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); -+ memset(per_cpu(cpu_evtchn_mask, 0), ~0, sizeof(per_cpu(cpu_evtchn_mask, 0))); + for_each_possible_cpu(i) +- memset(cpu_evtchn_mask[i], +- (i == 0) ? ~0 : 0, +- sizeof(cpu_evtchn_mask[i])); ++ memset(per_cpu(cpu_evtchn_mask, i), -!i, ++ sizeof(per_cpu(cpu_evtchn_mask, i))); } static inline unsigned int cpu_from_evtchn(unsigned int evtchn) -@@ -198,9 +200,10 @@ static inline unsigned int cpu_from_evtc +@@ -205,9 +206,10 @@ static inline unsigned int cpu_from_evtc #else @@ -17360,7 +17214,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } -@@ -219,25 +222,15 @@ static inline unsigned int cpu_from_evtc +@@ -226,25 +228,15 @@ static inline unsigned int cpu_from_evtc #endif @@ -17386,7 +17240,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) -@@ -261,13 +254,12 @@ static DEFINE_PER_CPU(unsigned int, curr +@@ -268,13 +260,12 @@ static DEFINE_PER_CPU(unsigned int, curr /* NB. Interrupts are disabled on entry. */ asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs) { @@ -17402,7 +17256,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches exit_idle(); irq_enter(); -@@ -277,7 +269,8 @@ asmlinkage void __irq_entry evtchn_do_up +@@ -284,7 +275,8 @@ asmlinkage void __irq_entry evtchn_do_up vcpu_info->evtchn_upcall_pending = 0; /* Nested invocations bail immediately. */ @@ -17412,8 +17266,25 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches break; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ -@@ -286,8 +279,8 @@ asmlinkage void __irq_entry evtchn_do_up - #endif +@@ -297,18 +289,22 @@ asmlinkage void __irq_entry evtchn_do_up + * hardirq handlers see an up-to-date system time even if we + * have just woken from a long idle period. + */ +- if ((irq = __get_cpu_var(virq_to_irq)[VIRQ_TIMER]) != -1) { ++ if ((irq = percpu_read(virq_to_irq[VIRQ_TIMER])) != -1) { + port = evtchn_from_irq(irq); + l1i = port / BITS_PER_LONG; + l2i = port % BITS_PER_LONG; +- if (active_evtchns(cpu, s, l1i) & (1ul<evtchn_pending_sel, 0); - start_l1i = l1i = per_cpu(current_l1i, cpu); @@ -17423,7 +17294,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches for (i = 0; l1 != 0; i++) { masked_l1 = l1 & ((~0UL) << l1i); -@@ -298,7 +291,7 @@ asmlinkage void __irq_entry evtchn_do_up +@@ -319,7 +315,7 @@ asmlinkage void __irq_entry evtchn_do_up } l1i = __ffs(masked_l1); @@ -17432,18 +17303,23 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches l2i = 0; /* usually scan entire word from start */ if (l1i == start_l1i) { /* We scan the starting word in two parts. */ -@@ -318,17 +311,18 @@ asmlinkage void __irq_entry evtchn_do_up +@@ -339,17 +335,23 @@ asmlinkage void __irq_entry evtchn_do_up /* process port */ port = (l1i * BITS_PER_LONG) + l2i; - if ((irq = evtchn_to_irq[port]) != -1) - do_IRQ(irq, regs); - else -+ if (unlikely((irq = evtchn_to_irq[port]) == -1)) ++ mask_evtchn(port); ++ if ((irq = evtchn_to_irq[port]) != -1) { ++ clear_evtchn(port); ++ if (!handle_irq(irq, regs) ++ && printk_ratelimit()) ++ pr_emerg("No handler for " ++ "irq %d (port %u)\n", ++ irq, port); ++ } else evtchn_device_upcall(port); -+ else if (!handle_irq(irq, regs) && printk_ratelimit()) -+ printk(KERN_EMERG "%s(%d): No handler for irq %d\n", -+ __func__, smp_processor_id(), irq); l2i = (l2i + 1) % BITS_PER_LONG; @@ -17457,7 +17333,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } while (l2i != 0); -@@ -340,11 +334,12 @@ asmlinkage void __irq_entry evtchn_do_up +@@ -361,27 +363,26 @@ asmlinkage void __irq_entry evtchn_do_up } /* If there were nested callbacks then we have more to do. */ @@ -17471,8 +17347,72 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + set_irq_regs(old_regs); } - static struct irq_chip dynirq_chip; -@@ -551,7 +546,7 @@ static void unbind_from_irq(unsigned int +-static struct irq_chip dynirq_chip; +- +-static int find_unbound_irq(unsigned int cpu) ++static int find_unbound_irq(unsigned int cpu, struct irq_chip *chip) + { + static int warned; + int irq; + +- for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++) { ++ for (irq = DYNIRQ_BASE; irq < nr_irqs; irq++) { + struct irq_desc *desc = irq_to_desc_alloc_cpu(irq, cpu); + struct irq_cfg *cfg = desc->chip_data; + + if (!cfg->bindcount) { + desc->status |= IRQ_NOPROBE; +- set_irq_chip_and_handler_name(irq, &dynirq_chip, ++ set_irq_chip_and_handler_name(irq, chip, + handle_fasteoi_irq, + "fasteoi"); + return irq; +@@ -397,6 +398,8 @@ static int find_unbound_irq(unsigned int + return -ENOSPC; + } + ++static struct irq_chip dynirq_chip; ++ + static int bind_caller_port_to_irq(unsigned int caller_port) + { + int irq; +@@ -404,7 +407,7 @@ static int bind_caller_port_to_irq(unsig + spin_lock(&irq_mapping_update_lock); + + if ((irq = evtchn_to_irq[caller_port]) == -1) { +- if ((irq = find_unbound_irq(smp_processor_id())) < 0) ++ if ((irq = find_unbound_irq(smp_processor_id(), &dynirq_chip)) < 0) + goto out; + + evtchn_to_irq[caller_port] = irq; +@@ -427,7 +430,7 @@ static int bind_local_port_to_irq(unsign + + BUG_ON(evtchn_to_irq[local_port] != -1); + +- if ((irq = find_unbound_irq(smp_processor_id())) < 0) { ++ if ((irq = find_unbound_irq(smp_processor_id(), &dynirq_chip)) < 0) { + struct evtchn_close close = { .port = local_port }; + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) + BUG(); +@@ -480,7 +483,7 @@ static int bind_virq_to_irq(unsigned int + spin_lock(&irq_mapping_update_lock); + + if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { +- if ((irq = find_unbound_irq(cpu)) < 0) ++ if ((irq = find_unbound_irq(cpu, &dynirq_chip)) < 0) + goto out; + + bind_virq.virq = virq; +@@ -513,7 +516,7 @@ static int bind_ipi_to_irq(unsigned int + spin_lock(&irq_mapping_update_lock); + + if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) { +- if ((irq = find_unbound_irq(cpu)) < 0) ++ if ((irq = find_unbound_irq(cpu, &dynirq_chip)) < 0) + goto out; + + bind_ipi.vcpu = cpu; +@@ -572,7 +575,7 @@ static void unbind_from_irq(unsigned int /* Zap stats across IRQ changes of use. */ for_each_possible_cpu(cpu) @@ -17481,7 +17421,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches irq_to_desc(irq)->kstat_irqs[cpu] = 0; #else kstat_cpu(cpu).irqs[irq] = 0; -@@ -669,7 +664,8 @@ int bind_ipi_to_irqhandler( +@@ -690,7 +693,8 @@ int bind_ipi_to_irqhandler( if (irq < 0) return irq; @@ -17491,16 +17431,109 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (retval != 0) { unbind_from_irq(irq); return retval; -@@ -1134,7 +1130,7 @@ void irq_resume(void) +@@ -777,16 +781,6 @@ static unsigned int startup_dynirq(unsig + + #define shutdown_dynirq mask_dynirq + +-static void ack_dynirq(unsigned int irq) +-{ +- int evtchn = evtchn_from_irq(irq); +- +- if (VALID_EVTCHN(evtchn)) { +- mask_evtchn(evtchn); +- clear_evtchn(evtchn); +- } +-} +- + static void end_dynirq(unsigned int irq) + { + if (!(irq_to_desc(irq)->status & IRQ_DISABLED)) { +@@ -803,7 +797,6 @@ static struct irq_chip dynirq_chip = { + .disable = mask_dynirq, + .mask = mask_dynirq, + .unmask = unmask_dynirq, +- .ack = ack_dynirq, + .end = end_dynirq, + .eoi = end_dynirq, + #ifdef CONFIG_SMP +@@ -874,15 +867,18 @@ static void enable_pirq(unsigned int irq + { + struct evtchn_bind_pirq bind_pirq; + int evtchn = evtchn_from_irq(irq); ++ unsigned int pirq = irq - PIRQ_BASE; + + if (VALID_EVTCHN(evtchn)) { +- clear_bit(irq - PIRQ_BASE, probing_pirq); ++ if (pirq < nr_pirqs) ++ clear_bit(pirq, probing_pirq); + goto out; + } + + bind_pirq.pirq = evtchn_get_xen_pirq(irq); + /* NB. We are happy to share unless we are probing. */ +- bind_pirq.flags = test_and_clear_bit(irq - PIRQ_BASE, probing_pirq) ++ bind_pirq.flags = (pirq < nr_pirqs ++ && test_and_clear_bit(pirq, probing_pirq)) + || (irq_to_desc(irq)->status & IRQ_AUTODETECT) + ? 0 : BIND_PIRQ__WILL_SHARE; + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) { +@@ -938,7 +934,6 @@ static void unmask_pirq(unsigned int irq + } + + #define mask_pirq mask_dynirq +-#define ack_pirq ack_dynirq + + static void end_pirq(unsigned int irq) + { +@@ -962,7 +957,6 @@ static struct irq_chip pirq_chip = { + .disable = disable_pirq, + .mask = mask_pirq, + .unmask = unmask_pirq, +- .ack = ack_pirq, + .end = end_pirq, + .eoi = end_pirq, + .set_type = set_type_pirq, +@@ -1103,7 +1097,6 @@ static void restore_cpu_ipis(unsigned in + void irq_resume(void) + { + unsigned int cpu, irq, evtchn; +- struct irq_cfg *cfg; + + init_evtchn_cpu_bindings(); + +@@ -1119,17 +1112,22 @@ void irq_resume(void) + for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) mask_evtchn(evtchn); - /* Check that no PIRQs are still bound. */ +- /* Check that no PIRQs are still bound. */ - for (irq = PIRQ_BASE; irq < (PIRQ_BASE + NR_PIRQS); irq++) { -+ for (irq = PIRQ_BASE; irq < (PIRQ_BASE + nr_pirqs); irq++) { - cfg = irq_cfg(irq); - BUG_ON(cfg && cfg->info != IRQ_UNBOUND); +- cfg = irq_cfg(irq); +- BUG_ON(cfg && cfg->info != IRQ_UNBOUND); +- } +- + /* No IRQ <-> event-channel mappings. */ + for (irq = 0; irq < nr_irqs; irq++) { +- cfg = irq_cfg(irq); +- if (cfg) +- cfg->info &= ~((1U << _EVTCHN_BITS) - 1); ++ struct irq_cfg *cfg = irq_cfg(irq); ++ ++ if (!cfg) ++ continue; ++ ++ /* Check that no PIRQs are still bound. */ ++#ifdef CONFIG_SPARSE_IRQ ++ if (irq < PIRQ_BASE || irq >= PIRQ_BASE + nr_pirqs) ++ BUG_ON(type_from_irq_cfg(cfg) == IRQT_PIRQ); ++ else ++#endif ++ BUG_ON(cfg->info != IRQ_UNBOUND); ++ ++ cfg->info &= ~((1U << _EVTCHN_BITS) - 1); } -@@ -1171,7 +1167,7 @@ int arch_init_chip_data(struct irq_desc + for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) + evtchn_to_irq[evtchn] = -1; +@@ -1157,7 +1155,7 @@ int arch_init_chip_data(struct irq_desc { if (!desc->chip_data) { /* By default all event channels notify CPU#0. */ @@ -17509,43 +17542,54 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches desc->chip_data = kzalloc(sizeof(struct irq_cfg), GFP_ATOMIC); } -@@ -1185,11 +1181,44 @@ int arch_init_chip_data(struct irq_desc +@@ -1170,12 +1168,55 @@ int arch_init_chip_data(struct irq_desc + } #endif - #if defined(CONFIG_X86_IO_APIC) +#ifdef CONFIG_SPARSE_IRQ +int nr_pirqs = NR_PIRQS; +EXPORT_SYMBOL_GPL(nr_pirqs); + +int __init arch_probe_nr_irqs(void) +{ -+ int nr_irqs_gsi, nr = acpi_probe_gsi(); ++ int nr = 256, nr_irqs_gsi; + -+ if (nr <= NR_IRQS_LEGACY) { -+ /* for acpi=off or acpi not compiled in */ -+ int idx; -+ -+ for (nr = idx = 0; idx < nr_ioapics; idx++) -+ nr += io_apic_get_redir_entries(idx) + 1; -+ } -+ nr_irqs_gsi = max(nr, NR_IRQS_LEGACY); ++ if (is_initial_xendomain()) { ++ nr_irqs_gsi = acpi_probe_gsi(); ++#ifdef CONFIG_X86_IO_APIC ++ if (nr_irqs_gsi <= NR_IRQS_LEGACY) { ++ /* for acpi=off or acpi not compiled in */ ++ int idx; + -+ nr = nr_irqs_gsi + 8 * nr_cpu_ids; ++ for (nr_irq_gsi = idx = 0; idx < nr_ioapics; idx++) ++ nr_irqs_gsi += io_apic_get_redir_entries(idx) + 1; ++ } ++#endif ++ if (nr_irqs_gsi < NR_IRQS_LEGACY) ++ nr_irqs_gsi = NR_IRQS_LEGACY; +#ifdef CONFIG_PCI_MSI -+ nr += nr_irqs_gsi * 16; ++ nr += max(nr_irqs_gsi * 16, nr_cpu_ids * 8); ++#endif ++ } else { ++ nr_irqs_gsi = NR_VECTORS; ++#ifdef CONFIG_PCI_MSI ++ nr += max(NR_IRQS_LEGACY * 16, nr_cpu_ids * 8); +#endif -+ if (nr_pirqs > nr) { -+ nr_pirqs = nr; -+ nr_irqs = nr + NR_DYNIRQS; + } + -+ printk(KERN_DEBUG "nr_irqs_gsi=%d nr_pirqs=%d\n", -+ nr_irqs_gsi, nr_pirqs); ++ if (nr_pirqs > nr_irqs_gsi) ++ nr_pirqs = nr_irqs_gsi; ++ if (nr > min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS)) ++ nr = min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS); ++ nr_irqs = min_t(int, nr_pirqs + nr, PAGE_SIZE * 8); ++ ++ printk(KERN_DEBUG "nr_pirqs: %d\n", nr_pirqs); + + return 0; +} +#endif + + #if defined(CONFIG_X86_IO_APIC) int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) { struct physdev_irq irq_op; @@ -17555,7 +17599,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches return -EINVAL; if (cfg->vector) -@@ -1212,7 +1241,7 @@ int assign_irq_vector(int irq, struct ir +@@ -1198,7 +1239,7 @@ int assign_irq_vector(int irq, struct ir void evtchn_register_pirq(int irq) { @@ -17564,8 +17608,30 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches if (identity_mapped_irq(irq) || type_from_irq(irq) != IRQT_UNBOUND) return; irq_cfg(irq)->info = mk_irq_info(IRQT_PIRQ, irq, 0); -@@ -1225,7 +1254,7 @@ int evtchn_map_pirq(int irq, int xen_pir +@@ -1209,9 +1250,29 @@ void evtchn_register_pirq(int irq) + int evtchn_map_pirq(int irq, int xen_pirq) + { if (irq < 0) { ++#ifdef CONFIG_SPARSE_IRQ ++ spin_lock(&irq_mapping_update_lock); ++ irq = find_unbound_irq(smp_processor_id(), &pirq_chip); ++ if (irq >= 0) { ++ struct irq_desc *desc; ++ struct irq_cfg *cfg; ++ ++ desc = irq_to_desc_alloc_node(irq, numa_node_id()); ++ cfg = desc->chip_data; ++ BUG_ON(type_from_irq(irq) != IRQT_UNBOUND); ++ cfg->bindcount++; ++ cfg->info = mk_irq_info(IRQT_PIRQ, xen_pirq, 0); ++ } ++ spin_unlock(&irq_mapping_update_lock); ++ if (irq < 0) ++ return irq; ++ } else if (irq >= PIRQ_BASE && irq < PIRQ_BASE + nr_pirqs) { ++ WARN_ONCE(1, "Non-MSI IRQ#%d (Xen %d)\n", irq, xen_pirq); ++ return -EINVAL; ++#else static DEFINE_SPINLOCK(irq_alloc_lock); - irq = PIRQ_BASE + NR_PIRQS - 1; @@ -17573,17 +17639,41 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches spin_lock(&irq_alloc_lock); do { struct irq_desc *desc; -@@ -1285,7 +1314,7 @@ void __init xen_init_IRQ(void) +@@ -1233,6 +1294,7 @@ int evtchn_map_pirq(int irq, int xen_pir + return -ENOSPC; + set_irq_chip_and_handler_name(irq, &pirq_chip, + handle_fasteoi_irq, "fasteoi"); ++#endif + } else if (!xen_pirq) { + if (unlikely(type_from_irq(irq) != IRQT_PIRQ)) + return -EINVAL; +@@ -1244,6 +1306,9 @@ int evtchn_map_pirq(int irq, int xen_pir + */ + set_irq_chip_and_handler(irq, NULL, NULL); + irq_cfg(irq)->info = IRQ_UNBOUND; ++#ifdef CONFIG_SPARSE_IRQ ++ irq_cfg(irq)->bindcount--; ++#endif + return 0; + } else if (type_from_irq(irq) != IRQT_PIRQ + || index_from_irq(irq) != xen_pirq) { +@@ -1270,8 +1335,13 @@ void __init xen_init_IRQ(void) + init_evtchn_cpu_bindings(); ++#ifdef CONFIG_SPARSE_IRQ ++ i = nr_irqs; ++#else ++ i = nr_pirqs; ++#endif pirq_needs_eoi = alloc_bootmem_pages(sizeof(unsigned long) - * BITS_TO_LONGS(ALIGN(NR_PIRQS, PAGE_SIZE * 8))); -+ * BITS_TO_LONGS(ALIGN(nr_pirqs, PAGE_SIZE * 8))); ++ * BITS_TO_LONGS(ALIGN(i, PAGE_SIZE * 8))); eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT; if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn) == 0) pirq_eoi_does_unmask = true; -@@ -1301,7 +1330,7 @@ void __init xen_init_IRQ(void) - handle_level_irq, "level"); +@@ -1287,7 +1357,7 @@ void __init xen_init_IRQ(void) + handle_fasteoi_irq, "fasteoi"); } - for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_PIRQS); i++) { @@ -17591,8 +17681,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #else for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_IRQS_LEGACY); i++) { #endif ---- head-2010-05-25.orig/drivers/xen/core/smpboot.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/smpboot.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-02-01 14:44:12.000000000 +0100 @@ -18,7 +18,6 @@ #include #include @@ -17601,40 +17691,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #include #include #include -@@ -54,8 +53,8 @@ static char call1func_name[NR_CPUS][15]; - #define set_cpu_to_apicid(cpu, apicid) - #endif - --DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); --DEFINE_PER_CPU(cpumask_t, cpu_core_map); -+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); -+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); - - void __init prefill_possible_map(void) - { -@@ -88,8 +87,8 @@ set_cpu_sibling_map(unsigned int cpu) - cpu_data(cpu).phys_proc_id = cpu; - cpu_data(cpu).cpu_core_id = 0; - -- per_cpu(cpu_sibling_map, cpu) = cpumask_of_cpu(cpu); -- per_cpu(cpu_core_map, cpu) = cpumask_of_cpu(cpu); -+ cpumask_copy(cpu_sibling_mask(cpu), cpumask_of(cpu)); -+ cpumask_copy(cpu_core_mask(cpu), cpumask_of(cpu)); - - cpu_data(cpu).booted_cores = 1; - } -@@ -100,8 +99,8 @@ remove_siblinginfo(unsigned int cpu) - cpu_data(cpu).phys_proc_id = BAD_APICID; - cpu_data(cpu).cpu_core_id = BAD_APICID; - -- cpus_clear(per_cpu(cpu_sibling_map, cpu)); -- cpus_clear(per_cpu(cpu_core_map, cpu)); -+ cpumask_clear(cpu_sibling_mask(cpu)); -+ cpumask_clear(cpu_core_mask(cpu)); - - cpu_data(cpu).booted_cores = 0; - } -@@ -224,7 +223,7 @@ static void __cpuinit cpu_initialize_con +@@ -188,7 +187,7 @@ static void __cpuinit cpu_initialize_con smp_trap_init(ctxt.trap_ctxt); ctxt.ldt_ents = 0; @@ -17643,7 +17700,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches ctxt.gdt_ents = GDT_SIZE / 8; ctxt.user_regs.cs = __KERNEL_CS; -@@ -242,12 +241,13 @@ static void __cpuinit cpu_initialize_con +@@ -206,12 +205,13 @@ static void __cpuinit cpu_initialize_con ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); ctxt.user_regs.fs = __KERNEL_PERCPU; @@ -17658,20 +17715,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches #endif if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt)) -@@ -275,8 +275,10 @@ void __init smp_prepare_cpus(unsigned in - current_thread_info()->cpu = 0; - - for_each_possible_cpu (cpu) { -- cpus_clear(per_cpu(cpu_sibling_map, cpu)); -- cpus_clear(per_cpu(cpu_core_map, cpu)); -+ alloc_cpumask_var(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL); -+ alloc_cpumask_var(&per_cpu(cpu_core_map, cpu), GFP_KERNEL); -+ cpumask_clear(cpu_sibling_mask(cpu)); -+ cpumask_clear(cpu_core_mask(cpu)); - } - - set_cpu_sibling_map(0); -@@ -303,9 +305,6 @@ void __init smp_prepare_cpus(unsigned in +@@ -256,9 +256,6 @@ void __init smp_prepare_cpus(unsigned in if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); @@ -17681,8 +17725,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches gdt_addr = get_cpu_gdt_table(cpu); make_page_readonly(gdt_addr, XENFEAT_writable_descriptor_tables); -@@ -319,12 +318,12 @@ void __init smp_prepare_cpus(unsigned in - set_cpu_to_apicid(cpu, apicid); +@@ -269,12 +266,12 @@ void __init smp_prepare_cpus(unsigned in + cpu_data(cpu).cpu_index = cpu; #ifdef __x86_64__ - cpu_pda(cpu)->pcurrent = idle; @@ -17698,7 +17742,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches irq_ctx_init(cpu); -@@ -348,10 +347,7 @@ void __init smp_prepare_cpus(unsigned in +@@ -298,10 +295,7 @@ void __init smp_prepare_cpus(unsigned in void __init smp_prepare_boot_cpu(void) { @@ -17710,9 +17754,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches prefill_possible_map(); } ---- head-2010-05-25.orig/drivers/xen/core/spinlock.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/spinlock.c 2010-03-24 15:25:06.000000000 +0100 -@@ -78,13 +78,13 @@ static unsigned int spin_adjust(struct s +--- head-2011-03-17.orig/drivers/xen/core/spinlock.c 2011-03-15 16:51:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/spinlock.c 2011-03-15 16:43:45.000000000 +0100 +@@ -92,7 +92,7 @@ static unsigned int spin_adjust(struct s unsigned int xen_spin_adjust(const raw_spinlock_t *lock, unsigned int token) { @@ -17721,14 +17765,14 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } bool xen_spin_wait(raw_spinlock_t *lock, unsigned int *ptok, - unsigned int flags) - { -- int irq = x86_read_percpu(spinlock_irq); -+ int irq = percpu_read(spinlock_irq); - bool rc; - typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask; - raw_rwlock_t *rm_lock; -@@ -97,9 +97,9 @@ bool xen_spin_wait(raw_spinlock_t *lock, +@@ -105,21 +105,21 @@ bool xen_spin_wait(raw_spinlock_t *lock, + + /* If kicker interrupt not initialized yet, just spin. */ + if (unlikely(!cpu_online(raw_smp_processor_id())) +- || unlikely(!x86_read_percpu(poll_evtchn))) ++ || unlikely(!percpu_read(poll_evtchn))) + return false; + /* announce we're spinning */ spinning.ticket = *ptok >> TICKET_SHIFT; spinning.lock = lock; @@ -17740,18 +17784,58 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches upcall_mask = current_vcpu_info()->evtchn_upcall_mask; do { -@@ -184,7 +184,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + bool nested = false; + +- clear_evtchn(x86_read_percpu(poll_evtchn)); ++ clear_evtchn(percpu_read(poll_evtchn)); + + /* + * Check again to make sure it didn't become free while +@@ -132,7 +132,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + * without rechecking the lock. + */ + if (spinning.prev) +- set_evtchn(x86_read_percpu(poll_evtchn)); ++ set_evtchn(percpu_read(poll_evtchn)); + rc = true; + break; + } +@@ -187,7 +187,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + + current_vcpu_info()->evtchn_upcall_mask = upcall_mask; + +- rc = !test_evtchn(x86_read_percpu(poll_evtchn)); ++ rc = !test_evtchn(percpu_read(poll_evtchn)); + if (!rc) + inc_irq_stat(irq_lock_count); + } while (spinning.prev || rc); +@@ -199,11 +199,11 @@ bool xen_spin_wait(raw_spinlock_t *lock, /* announce we're done */ other = spinning.prev; - x86_write_percpu(spinning, other); + percpu_write(spinning, other); - rm_lock = &__get_cpu_var(spinning_rm_lock); raw_local_irq_disable(); - __raw_write_lock(rm_lock); ---- head-2010-05-25.orig/drivers/xen/netback/interface.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/interface.c 2010-03-24 15:25:06.000000000 +0100 -@@ -121,7 +121,7 @@ static void netbk_get_drvinfo(struct net +- rm_idx = x86_read_percpu(rm_seq.idx); ++ rm_idx = percpu_read(rm_seq.idx); + smp_wmb(); +- x86_write_percpu(rm_seq.idx, rm_idx + 1); ++ percpu_write(rm_seq.idx, rm_idx + 1); + mb(); + + /* +@@ -227,7 +227,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, + } + + rm_idx &= 1; +- while (x86_read_percpu(rm_seq.ctr[rm_idx].counter)) ++ while (percpu_read(rm_seq.ctr[rm_idx].counter)) + cpu_relax(); + raw_local_irq_restore(upcall_mask); + *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); +--- head-2011-03-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:15:18.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/interface.c 2011-02-17 10:16:00.000000000 +0100 +@@ -162,7 +162,7 @@ static void netbk_get_drvinfo(struct net struct ethtool_drvinfo *info) { strcpy(info->driver, "netbk"); @@ -17760,9 +17844,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } static const struct netif_stat { ---- head-2010-05-25.orig/drivers/xen/netback/netback.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/netback.c 2010-03-24 15:25:06.000000000 +0100 -@@ -333,7 +333,7 @@ int netif_be_start_xmit(struct sk_buff * +--- head-2011-03-17.orig/drivers/xen/netback/netback.c 2011-03-01 11:52:09.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/netback.c 2011-02-01 14:44:12.000000000 +0100 +@@ -342,7 +342,7 @@ int netif_be_start_xmit(struct sk_buff * */ netif->tx_queue_timeout.data = (unsigned long)netif; netif->tx_queue_timeout.function = tx_queue_callback; @@ -17771,7 +17855,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } } -@@ -354,7 +354,7 @@ static void xen_network_done_notify(void +@@ -363,7 +363,7 @@ static void xen_network_done_notify(void static struct net_device *eth0_dev = NULL; if (unlikely(eth0_dev == NULL)) eth0_dev = __dev_get_by_name(&init_net, "eth0"); @@ -17780,7 +17864,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } /* * Add following to poll() function in NAPI driver (Tigon3 is example): -@@ -1308,8 +1308,7 @@ static void net_tx_action(unsigned long +@@ -1305,8 +1305,7 @@ static void net_tx_action(unsigned long (unsigned long)netif; netif->credit_timeout.function = tx_credit_callback; @@ -17790,9 +17874,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches netif_put(netif); continue; } ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.c 2010-03-24 15:25:06.000000000 +0100 -@@ -102,7 +102,7 @@ static const int MODPARM_rx_flip = 0; +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-09 16:04:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-09 16:04:51.000000000 +0100 +@@ -103,7 +103,7 @@ static const int MODPARM_rx_flip = 0; static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ @@ -17801,7 +17885,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) -@@ -635,7 +635,7 @@ static int network_open(struct net_devic +@@ -632,7 +632,7 @@ static int network_open(struct net_devic if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); @@ -17810,7 +17894,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } } spin_unlock_bh(&np->rx_lock); -@@ -707,7 +707,7 @@ static void rx_refill_timeout(unsigned l +@@ -703,7 +703,7 @@ static void rx_refill_timeout(unsigned l netfront_accelerator_call_stop_napi_irq(np, dev); @@ -17819,7 +17903,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } static void network_alloc_rx_buffers(struct net_device *dev) -@@ -1064,7 +1064,7 @@ static irqreturn_t netif_int(int irq, vo +@@ -1057,7 +1057,7 @@ static irqreturn_t netif_int(int irq, vo if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); @@ -17828,7 +17912,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } } -@@ -1521,7 +1521,7 @@ err: +@@ -1514,7 +1514,7 @@ err: } if (!more_to_do && !accel_more_to_do) @@ -17837,8 +17921,45 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches local_irq_restore(flags); } ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:25:06.000000000 +0100 +@@ -1803,7 +1803,7 @@ static void netfront_get_drvinfo(struct + struct ethtool_drvinfo *info) + { + strcpy(info->driver, "netfront"); +- strcpy(info->bus_info, dev->dev.parent->bus_id); ++ strcpy(info->bus_info, dev_name(dev->dev.parent)); + } + + static int network_connect(struct net_device *dev) +--- head-2011-03-17.orig/drivers/xen/pciback/conf_space_header.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pciback/conf_space_header.c 2011-02-10 12:59:56.000000000 +0100 +@@ -24,7 +24,7 @@ static int command_read(struct pci_dev * + int ret; + + ret = pciback_read_config_word(dev, offset, value, data); +- if (!atomic_read(&dev->enable_cnt)) ++ if (!pci_is_enabled(dev)) + return ret; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) { +@@ -41,14 +41,14 @@ static int command_write(struct pci_dev + { + int err; + +- if (!atomic_read(&dev->enable_cnt) && is_enable_cmd(value)) { ++ if (!pci_is_enabled(dev) && is_enable_cmd(value)) { + if (unlikely(verbose_request)) + printk(KERN_DEBUG "pciback: %s: enable\n", + pci_name(dev)); + err = pci_enable_device(dev); + if (err) + return err; +- } else if (atomic_read(&dev->enable_cnt) && !is_enable_cmd(value)) { ++ } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { + if (unlikely(verbose_request)) + printk(KERN_DEBUG "pciback: %s: disable\n", + pci_name(dev)); +--- head-2011-03-17.orig/drivers/xen/sfc_netfront/accel_msg.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netfront/accel_msg.c 2011-02-01 14:44:12.000000000 +0100 @@ -47,7 +47,7 @@ static void vnic_start_interrupts(netfro netfront_accel_disable_net_interrupts(vnic); vnic->irq_enabled = 0; @@ -17857,8 +17978,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches } else { spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); ---- head-2010-05-25.orig/drivers/xen/usbback/usbstub.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbback/usbstub.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/usbback/usbstub.c 2011-03-11 10:55:46.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/usbstub.c 2011-02-01 14:44:12.000000000 +0100 @@ -188,7 +188,7 @@ static int usbstub_probe(struct usb_inte const struct usb_device_id *id) { @@ -17868,8 +17989,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches struct vusb_port_id *portid = NULL; struct usbstub *stub = NULL; usbif_t *usbif = NULL; ---- head-2010-05-25.orig/drivers/xen/usbfront/usbfront-dbg.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/usbfront/usbfront-dbg.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/usbfront/usbfront-dbg.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/usbfront-dbg.c 2011-02-01 14:44:12.000000000 +0100 @@ -64,7 +64,7 @@ static ssize_t show_statistics(struct de "%s\n" "xenhcd, hcd state %d\n", @@ -17879,8 +18000,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches hcd->product_desc, hcd->state); size -= temp; ---- head-2010-05-25.orig/drivers/xen/usbfront/xenbus.c 2010-04-15 09:53:49.000000000 +0200 -+++ head-2010-05-25/drivers/xen/usbfront/xenbus.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/usbfront/xenbus.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/xenbus.c 2011-02-01 14:44:12.000000000 +0100 @@ -252,10 +252,10 @@ static struct usb_hcd *create_hcd(struct } switch (usb_ver) { @@ -17894,9 +18015,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches break; default: xenbus_dev_fatal(dev, err, "invalid usb-ver"); ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:25:06.000000000 +0100 -@@ -230,7 +230,7 @@ static struct xen_bus_type xenbus_fronte +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:44:12.000000000 +0100 +@@ -231,7 +231,7 @@ static struct xen_bus_type xenbus_fronte }, #if defined(CONFIG_XEN) || defined(MODULE) .dev = { @@ -17905,8 +18026,8 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches }, #endif }; ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:25:06.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 14:44:12.000000000 +0100 @@ -129,7 +129,7 @@ static struct xen_bus_type xenbus_backen .dev_attrs = xenbus_backend_attrs, }, @@ -17916,39 +18037,34 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches }, }; ---- head-2010-05-25.orig/include/linux/interrupt.h 2010-03-24 14:53:41.000000000 +0100 -+++ head-2010-05-25/include/linux/interrupt.h 2010-03-24 15:25:06.000000000 +0100 -@@ -52,6 +52,7 @@ - * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. - * Used by threaded interrupts which need to keep the - * irq line disabled until the threaded handler has been run. -+ * IRQF_NO_SUSPEND - Prevent this interrupt from being disabled during suspend. - */ - #define IRQF_DISABLED 0x00000020 - #define IRQF_SAMPLE_RANDOM 0x00000040 -@@ -62,6 +63,7 @@ - #define IRQF_NOBALANCING 0x00000800 - #define IRQF_IRQPOLL 0x00001000 - #define IRQF_ONESHOT 0x00002000 -+#define IRQF_NO_SUSPEND 0x00008000 +--- head-2011-03-17.orig/kernel/sched.c 2011-03-17 14:35:45.000000000 +0100 ++++ head-2011-03-17/kernel/sched.c 2011-02-01 14:44:12.000000000 +0100 +@@ -4020,6 +4020,12 @@ need_resched_nonpreemptible: + EXPORT_SYMBOL(schedule); + #ifdef CONFIG_MUTEX_SPIN_ON_OWNER ++#include ++ ++#ifndef arch_cpu_is_running ++#define arch_cpu_is_running(cpu) true ++#endif ++ /* - * Bits used by threaded handlers: ---- head-2010-05-25.orig/kernel/irq/manage.c 2010-05-25 09:12:09.000000000 +0200 -+++ head-2010-05-25/kernel/irq/manage.c 2010-03-24 15:25:06.000000000 +0100 -@@ -200,7 +200,8 @@ static inline int setup_affinity(unsigne - void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) - { - if (suspend) { -- if (!desc->action || (desc->action->flags & IRQF_TIMER)) -+ if (!desc->action || -+ (desc->action->flags & (IRQF_TIMER | IRQF_NO_SUSPEND))) - return; - desc->status |= IRQ_SUSPENDED; - } ---- head-2010-05-25.orig/lib/swiotlb-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/lib/swiotlb-xen.c 2010-03-24 15:25:06.000000000 +0100 -@@ -175,7 +175,7 @@ static void *swiotlb_bus_to_virt(dma_add + * Look out! "owner" is an entirely speculative pointer + * access and not reliable. +@@ -4078,7 +4084,8 @@ int mutex_spin_on_owner(struct mutex *lo + /* + * Is that owner really running on that cpu? + */ +- if (task_thread_info(rq->curr) != owner || need_resched()) ++ if (task_thread_info(rq->curr) != owner || need_resched() ++ || !arch_cpu_is_running(cpu)) + return 0; + + arch_mutex_cpu_relax(); +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 14:44:12.000000000 +0100 +@@ -183,7 +183,7 @@ static void *swiotlb_bus_to_virt(dma_add return phys_to_virt(swiotlb_bus_to_phys(address)); } @@ -17957,7 +18073,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { return 0; } -@@ -523,13 +523,13 @@ swiotlb_full(struct device *dev, size_t +@@ -546,13 +546,13 @@ swiotlb_full(struct device *dev, size_t * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ @@ -17978,7 +18094,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches void *map; BUG_ON(dir == DMA_NONE); -@@ -539,44 +539,24 @@ _swiotlb_map_single(struct device *hwdev +@@ -562,44 +562,24 @@ _swiotlb_map_single(struct device *hwdev * we can safely return the device addr and not worry about bounce * buffering it. */ @@ -18029,7 +18145,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Unmap a single streaming mode DMA translation. The dma_addr and size must -@@ -586,9 +566,9 @@ swiotlb_map_single_phys(struct device *h +@@ -609,9 +589,9 @@ swiotlb_map_single_phys(struct device *h * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ @@ -18042,7 +18158,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { char *dma_addr = swiotlb_bus_to_virt(dev_addr); -@@ -598,15 +578,7 @@ swiotlb_unmap_single_attrs(struct device +@@ -621,15 +601,7 @@ swiotlb_unmap_single_attrs(struct device else gnttab_dma_unmap_page(dev_addr); } @@ -18059,25 +18175,25 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches /* * Make physical memory consistent for a single streaming mode DMA translation -@@ -620,7 +592,7 @@ EXPORT_SYMBOL(swiotlb_unmap_single); - */ +@@ -654,7 +626,7 @@ swiotlb_sync_single(struct device *hwdev + void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { - char *dma_addr = swiotlb_bus_to_virt(dev_addr); - -@@ -632,7 +604,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cp + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); + } +@@ -662,7 +634,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cp void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { - char *dma_addr = swiotlb_bus_to_virt(dev_addr); - -@@ -644,7 +616,8 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_de + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); + } +@@ -685,7 +657,8 @@ swiotlb_sync_single_range(struct device void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, @@ -18085,9 +18201,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + unsigned long offset, size_t size, + enum dma_data_direction dir) { - char *dma_addr = swiotlb_bus_to_virt(dev_addr); - -@@ -656,7 +629,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_ra + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_CPU); +@@ -694,7 +667,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_ra void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, @@ -18095,9 +18211,9 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches + unsigned long offset, size_t size, + enum dma_data_direction dir) { - char *dma_addr = swiotlb_bus_to_virt(dev_addr); - -@@ -684,7 +658,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_ra + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_DEVICE); +@@ -719,7 +693,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_ra */ int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, @@ -18106,7 +18222,7 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { struct scatterlist *sg; int i; -@@ -736,7 +710,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); +@@ -771,7 +745,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, @@ -18115,27 +18231,27 @@ Automatically created from "patches.kernel.org/patch-2.6.30" by xen-port-patches { struct scatterlist *sg; int i; -@@ -770,7 +744,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg); - */ +@@ -821,7 +795,7 @@ swiotlb_sync_sg(struct device *hwdev, st + void - swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sgl, + swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { - struct scatterlist *sg; - int i; -@@ -787,7 +761,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); + } +@@ -829,7 +803,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); void - swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sgl, + swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { - struct scatterlist *sg; - int i; ---- head-2010-05-25.orig/mm/page_alloc.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-25/mm/page_alloc.c 2010-03-24 15:25:06.000000000 +0100 -@@ -4685,11 +4685,9 @@ static void __setup_per_zone_wmarks(void + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); + } +--- head-2011-03-17.orig/mm/page_alloc.c 2011-02-08 10:03:14.000000000 +0100 ++++ head-2011-03-17/mm/page_alloc.c 2011-02-08 10:05:20.000000000 +0100 +@@ -5005,11 +5005,9 @@ static void __setup_per_zone_wmarks(void } #ifdef CONFIG_XEN diff --git a/patches.xen/xen3-patch-2.6.31 b/patches.xen/xen3-patch-2.6.31 index be58080..533b111 100644 --- a/patches.xen/xen3-patch-2.6.31 +++ b/patches.xen/xen3-patch-2.6.31 @@ -7,18 +7,18 @@ Patch-mainline: 2.6.31 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches.py ---- head-2010-05-12.orig/arch/x86/Kconfig 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/Kconfig 2010-03-24 15:25:21.000000000 +0100 -@@ -24,7 +24,7 @@ config X86 +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-01 14:50:44.000000000 +0100 +@@ -21,7 +21,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE -- select HAVE_PERF_EVENTS if (!M386 && !M486) -+ select HAVE_PERF_EVENTS if (!M386 && !M486 && !XEN) +- select HAVE_PERF_EVENTS ++ select HAVE_PERF_EVENTS if !XEN + select HAVE_IRQ_WORK select HAVE_IOREMAP_PROT select HAVE_KPROBES - select ARCH_WANT_OPTIONAL_GPIOLIB -@@ -942,7 +942,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS +@@ -879,7 +879,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS config X86_MCE bool "Machine Check / overheating reporting" @@ -27,25 +27,25 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches ---help--- Machine Check support allows the processor to notify the kernel if it detects a problem (e.g. overheating, data corruption). -@@ -975,7 +975,7 @@ config X86_MCE_AMD +@@ -912,7 +912,7 @@ config X86_MCE_AMD config X86_ANCIENT_MCE - def_bool n + bool "Support for old Pentium 5 / WinChip machine checks" - depends on X86_32 && X86_MCE + depends on X86_32 && X86_MCE && !XEN - prompt "Support for old Pentium 5 / WinChip machine checks" ---help--- Include support for machine check handling on old Pentium 5 or WinChip -@@ -1665,6 +1665,7 @@ config KEXEC_JUMP + systems. These typically need to be enabled explicitely on the command +@@ -1609,6 +1609,7 @@ config KEXEC_JUMP config PHYSICAL_START - hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP || XEN) + hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP || XEN) + default 0x100000 if XEN default "0x1000000" ---help--- This gives the physical address where the kernel is loaded. ---- head-2010-05-12.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:50:44.000000000 +0100 @@ -770,9 +770,11 @@ ia32_sys_call_table: .quad compat_sys_signalfd4 .quad sys_eventfd2 @@ -59,9 +59,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + .quad compat_sys_rt_tgsigqueueinfo /* 335 */ + .quad sys_perf_counter_open ia32_syscall_end: ---- head-2010-05-12.orig/arch/x86/include/asm/hw_irq.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/asm/hw_irq.h 2010-03-24 15:25:21.000000000 +0100 -@@ -133,6 +133,7 @@ extern asmlinkage void smp_invalidate_in +--- head-2011-03-17.orig/arch/x86/include/asm/hw_irq.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/hw_irq.h 2011-02-01 14:50:44.000000000 +0100 +@@ -142,6 +142,7 @@ extern asmlinkage void smp_invalidate_in extern irqreturn_t smp_reschedule_interrupt(int, void *); extern irqreturn_t smp_call_function_interrupt(int, void *); extern irqreturn_t smp_call_function_single_interrupt(int, void *); @@ -69,20 +69,20 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif #endif ---- head-2010-05-12.orig/arch/x86/include/asm/required-features.h 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/arch/x86/include/asm/required-features.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/required-features.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/required-features.h 2011-02-01 14:50:44.000000000 +0100 @@ -48,7 +48,7 @@ #endif #ifdef CONFIG_X86_64 --#ifdef CONFIG_PARAVIRT_MMU -+#if defined(CONFIG_PARAVIRT_MMU) || defined(CONFIG_XEN) +-#ifdef CONFIG_PARAVIRT ++#if defined(CONFIG_PARAVIRT) || defined(CONFIG_XEN) /* Paravirtualized systems may not have PSE or PGE available */ #define NEED_PSE 0 #define NEED_PGE 0 ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:25:21.000000000 +0100 -@@ -31,6 +31,7 @@ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/agp.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/agp.h 2011-02-01 14:50:44.000000000 +0100 +@@ -48,6 +48,7 @@ /* Convert a physical address to an address suitable for the GART. */ #define phys_to_gart(x) phys_to_machine(x) #define gart_to_phys(x) machine_to_phys(x) @@ -90,8 +90,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) ({ \ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:50:44.000000000 +0100 @@ -1,7 +1,6 @@ #ifndef _ASM_X86_DESC_H #define _ASM_X86_DESC_H @@ -130,8 +130,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_X86_DESC_H */ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:50:44.000000000 +0100 @@ -118,12 +118,9 @@ enum fixed_addresses { #ifdef CONFIG_PARAVIRT FIX_PARAVIRT_BOOTMAP, @@ -157,8 +157,74 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_X86_32 FIX_WP_TEST, #endif ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypercall.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypercall.h 2011-02-01 14:50:44.000000000 +0100 +@@ -265,7 +265,7 @@ HYPERVISOR_memory_op( + unsigned int cmd, void *arg) + { + if (arch_use_lazy_mmu_mode()) +- xen_multicall_flush(false); ++ xen_multicall_flush(); + return _hypercall2(int, memory_op, cmd, arg); + } + +@@ -336,7 +336,7 @@ HYPERVISOR_grant_table_op( + int rc; + + if (arch_use_lazy_mmu_mode()) +- xen_multicall_flush(false); ++ xen_multicall_flush(); + #ifdef GNTTABOP_map_grant_ref + if (cmd == GNTTABOP_map_grant_ref) + #endif +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-03-11 11:13:19.000000000 +0100 +@@ -144,7 +144,7 @@ void scrub_pages(void *, unsigned int); + + DECLARE_PER_CPU(bool, xen_lazy_mmu); + +-void xen_multicall_flush(bool); ++void xen_multicall_flush(void); + + int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t, + unsigned long flags); +@@ -162,7 +162,7 @@ static inline void arch_enter_lazy_mmu_m + static inline void arch_leave_lazy_mmu_mode(void) + { + percpu_write(xen_lazy_mmu, false); +- xen_multicall_flush(false); ++ xen_multicall_flush(); + } + + #define arch_use_lazy_mmu_mode() unlikely(percpu_read(xen_lazy_mmu)) +@@ -176,13 +176,13 @@ static inline void arch_leave_lazy_mmu_m + static inline void arch_flush_lazy_mmu_mode(void) + { + if (arch_use_lazy_mmu_mode()) +- xen_multicall_flush(false); ++ xen_multicall_flush(); + } + #endif + + #else /* !CONFIG_XEN || MODULE */ + +-static inline void xen_multicall_flush(bool ignore) {} ++static inline void xen_multicall_flush(void) {} + #define arch_use_lazy_mmu_mode() false + #define xen_multi_update_va_mapping(...) ({ BUG(); -ENOSYS; }) + #define xen_multi_mmu_update(...) ({ BUG(); -ENOSYS; }) +@@ -356,4 +356,9 @@ MULTI_grant_table_op(multicall_entry_t * + + #define uvm_multi(cpumask) ((unsigned long)cpus_addr(cpumask) | UVMF_MULTI) + ++#ifdef LINUX ++/* drivers/staging/ use Windows-style types, including VOID */ ++#undef VOID ++#endif ++ + #endif /* __HYPERVISOR_H__ */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:33:07.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:33:45.000000000 +0100 @@ -1,8 +1,11 @@ #ifndef _ASM_X86_IRQ_VECTORS_H #define _ASM_X86_IRQ_VECTORS_H @@ -173,16 +239,16 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif @@ -11,7 +14,8 @@ #define CALL_FUNCTION_VECTOR 1 - #define CALL_FUNC_SINGLE_VECTOR 2 - #define SPIN_UNLOCK_VECTOR 3 + #define NMI_VECTOR 0x02 + #define CALL_FUNC_SINGLE_VECTOR 3 -#define NR_IPIS 4 +#define REBOOT_VECTOR 4 +#define NR_IPIS 5 /* * The maximum number of vectors supported by i386 processors ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:50:44.000000000 +0100 @@ -97,7 +97,8 @@ extern void pci_iommu_alloc(void); #define PCI_DMA_BUS_IS_PHYS 0 @@ -201,8 +267,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_NUMA /* Returns the node based on pci bus */ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 14:50:44.000000000 +0100 @@ -51,7 +51,13 @@ static inline void pte_free(struct mm_st __pte_free(pte); } @@ -249,8 +315,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif /* PAGETABLE_LEVELS > 3 */ #endif /* PAGETABLE_LEVELS > 2 */ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:50:44.000000000 +0100 @@ -2,6 +2,7 @@ #define _ASM_X86_PGTABLE_H @@ -268,7 +334,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * The following only work if pte_present() is true. * Undefined behaviour if not.. -@@ -265,10 +268,17 @@ static inline pgprot_t pgprot_modify(pgp +@@ -264,10 +267,17 @@ static inline pgprot_t pgprot_modify(pgp #define canon_pgprot(p) __pgprot(massage_pgprot(p)) @@ -288,7 +354,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * Certain new memtypes are not allowed with certain * requested memtype: * - request is uncached, return cannot be write-back -@@ -313,6 +323,11 @@ static inline int pte_present(pte_t a) +@@ -312,6 +322,11 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } @@ -300,7 +366,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static inline int pmd_present(pmd_t pmd) { #if CONFIG_XEN_COMPAT <= 0x030002 -@@ -512,6 +527,8 @@ static inline int pgd_none(pgd_t pgd) +@@ -511,6 +526,8 @@ static inline int pgd_none(pgd_t pgd) #ifndef __ASSEMBLY__ @@ -309,8 +375,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* local pte updates need not use xchg for locking */ static inline pte_t xen_local_ptep_get_and_clear(pte_t *ptep, pte_t res) { ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:50:44.000000000 +0100 @@ -48,13 +48,17 @@ extern void set_pmd_pfn(unsigned long, u #endif @@ -331,8 +397,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) #else #define pte_offset_map(dir, address) \ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:50:44.000000000 +0100 @@ -33,10 +33,6 @@ extern pgd_t init_level4_pgt[]; extern void paging_init(void); @@ -365,8 +431,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #define __HAVE_ARCH_PTE_SAME #endif /* !__ASSEMBLY__ */ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pgtable_64_types.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pgtable_64_types.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64_types.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64_types.h 2011-02-01 14:50:44.000000000 +0100 @@ -51,11 +51,12 @@ typedef union { pteval_t pte; unsigned i #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) @@ -383,8 +449,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #define MODULES_VADDR _AC(0xffffffffa0000000, UL) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pgtable_types.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pgtable_types.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_types.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_types.h 2011-02-01 14:50:44.000000000 +0100 @@ -18,7 +18,7 @@ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ @@ -422,9 +488,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:25:21.000000000 +0100 -@@ -136,7 +136,8 @@ struct cpuinfo_x86 { +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:45:53.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:46:07.000000000 +0100 +@@ -146,7 +146,8 @@ struct cpuinfo_x86 { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; @@ -434,7 +500,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_SMP DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); -@@ -417,9 +418,6 @@ DECLARE_PER_CPU(unsigned long, stack_can +@@ -427,9 +428,6 @@ DECLARE_PER_CPU(unsigned long, stack_can extern unsigned int xstate_size; extern void free_thread_xstate(struct task_struct *); extern struct kmem_cache *task_xstate_cachep; @@ -444,7 +510,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct thread_struct { /* Cached TLS descriptors: */ -@@ -434,8 +432,12 @@ struct thread_struct { +@@ -444,8 +442,12 @@ struct thread_struct { unsigned short fsindex; unsigned short gsindex; #endif @@ -457,7 +523,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches unsigned long gs; /* Hardware debugging registers: */ unsigned long debugreg0; -@@ -464,14 +466,8 @@ struct thread_struct { +@@ -474,14 +476,8 @@ struct thread_struct { unsigned io_bitmap_max; /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ unsigned long debugctlmsr; @@ -473,7 +539,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches }; static inline unsigned long xen_get_debugreg(int regno) -@@ -741,6 +737,21 @@ static inline unsigned long get_debugctl +@@ -751,6 +747,21 @@ static inline unsigned long get_debugctl return debugctlmsr; } @@ -495,7 +561,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static inline void update_debugctlmsr(unsigned long debugctlmsr) { #ifndef CONFIG_X86_DEBUGCTLMSR -@@ -750,6 +761,18 @@ static inline void update_debugctlmsr(un +@@ -760,6 +771,18 @@ static inline void update_debugctlmsr(un wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); } @@ -514,7 +580,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * from system description table in BIOS. Mostly for MCA use, but * others may find it useful: -@@ -760,6 +783,7 @@ extern unsigned int BIOS_revision; +@@ -770,6 +793,7 @@ extern unsigned int BIOS_revision; /* Boot loader type from the setup header: */ extern int bootloader_type; @@ -522,7 +588,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches extern char ignore_fpu_irq; -@@ -820,7 +844,6 @@ static inline void spin_lock_prefetch(co +@@ -830,7 +854,6 @@ static inline void spin_lock_prefetch(co .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ @@ -530,9 +596,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } /* ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:25:21.000000000 +0100 -@@ -195,7 +195,7 @@ extern unsigned disabled_cpus __cpuinitd +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:50:44.000000000 +0100 +@@ -198,7 +198,7 @@ extern unsigned disabled_cpus __cpuinitd static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ @@ -541,8 +607,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:50:44.000000000 +0100 @@ -432,4 +432,8 @@ static inline void __raw_write_unlock(ra #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() @@ -552,8 +618,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches +#define ARCH_HAS_SMP_MB_AFTER_LOCK + #endif /* _ASM_X86_SPINLOCK_H */ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:50:44.000000000 +0100 @@ -111,6 +111,6 @@ static inline void flush_tlb_kernel_rang flush_tlb_all(); } @@ -562,8 +628,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches +extern void zap_low_mappings(bool early); #endif /* _ASM_X86_TLBFLUSH_H */ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/xor.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/xor.h 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/xor.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/xor.h 2011-02-01 14:50:44.000000000 +0100 @@ -1,4 +1,7 @@ -#ifdef CONFIG_X86_32 +#ifdef CONFIG_KMEMCHECK @@ -573,9 +639,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches # include "../../asm/xor_32.h" #else # include "xor_64.h" ---- head-2010-05-12.orig/arch/x86/kernel/Makefile 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/Makefile 2010-03-24 15:25:21.000000000 +0100 -@@ -137,6 +137,6 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-03-17.orig/arch/x86/kernel/Makefile 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/Makefile 2011-02-01 14:50:44.000000000 +0100 +@@ -128,6 +128,6 @@ ifeq ($(CONFIG_X86_64),y) endif disabled-obj-$(CONFIG_XEN) := %_uv.o crash.o early-quirks.o hpet.o i8253.o \ @@ -584,8 +650,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + i8259.o irqinit.o pci-swiotlb.o reboot.o smpboot.o tsc.o tsc_sync.o \ + uv_%.o vsmp_64.o disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += probe_roms_32.o ---- head-2010-05-12.orig/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -107,7 +107,7 @@ int acpi_save_state_mem(void) initial_gs = per_cpu_offset(smp_processor_id()); #endif @@ -595,17 +661,17 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif /* CONFIG_64BIT */ #endif ---- head-2010-05-12.orig/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:25:21.000000000 +0100 -@@ -59,6 +59,7 @@ +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:50:44.000000000 +0100 +@@ -51,6 +51,7 @@ + #include + #include #include - #include - #include +#include - #include - #include -@@ -145,12 +146,9 @@ struct irq_pin_list { + #include + +@@ -135,12 +136,9 @@ struct irq_pin_list { struct irq_pin_list *next; }; @@ -619,7 +685,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); -@@ -164,9 +162,6 @@ struct irq_cfg { +@@ -154,9 +152,6 @@ struct irq_cfg { unsigned move_cleanup_count; u8 vector; u8 move_in_progress : 1; @@ -629,7 +695,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches }; /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ -@@ -198,16 +193,18 @@ int __init arch_early_irq_init(void) +@@ -188,16 +183,18 @@ int __init arch_early_irq_init(void) struct irq_cfg *cfg; struct irq_desc *desc; int count; @@ -650,7 +716,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (i < NR_IRQS_LEGACY) cpumask_setall(cfg[i].domain); } -@@ -228,12 +225,9 @@ static struct irq_cfg *irq_cfg(unsigned +@@ -218,12 +215,9 @@ static struct irq_cfg *irq_cfg(unsigned return cfg; } @@ -664,7 +730,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); if (cfg) { -@@ -254,13 +248,13 @@ static struct irq_cfg *get_one_free_irq_ +@@ -244,13 +238,13 @@ static struct irq_cfg *get_one_free_irq_ return cfg; } @@ -680,7 +746,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (!desc->chip_data) { printk(KERN_ERR "can not alloc irq_cfg\n"); BUG_ON(1); -@@ -270,10 +264,9 @@ int arch_init_chip_data(struct irq_desc +@@ -260,10 +254,9 @@ int arch_init_chip_data(struct irq_desc return 0; } @@ -693,7 +759,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { struct irq_pin_list *old_entry, *head, *tail, *entry; -@@ -282,7 +275,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_ +@@ -272,7 +265,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_ if (!old_entry) return; @@ -702,7 +768,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (!entry) return; -@@ -292,7 +285,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_ +@@ -282,7 +275,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_ tail = entry; old_entry = old_entry->next; while (old_entry) { @@ -711,7 +777,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (!entry) { entry = head; while (entry) { -@@ -332,12 +325,12 @@ static void free_irq_2_pin(struct irq_cf +@@ -322,12 +315,12 @@ static void free_irq_2_pin(struct irq_cf } void arch_init_copy_chip_data(struct irq_desc *old_desc, @@ -726,7 +792,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (!cfg) return; -@@ -348,7 +341,7 @@ void arch_init_copy_chip_data(struct irq +@@ -338,7 +331,7 @@ void arch_init_copy_chip_data(struct irq memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); @@ -735,7 +801,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } static void free_irq_cfg(struct irq_cfg *old_cfg) -@@ -372,19 +365,7 @@ void arch_free_chip_data(struct irq_desc +@@ -362,19 +355,7 @@ void arch_free_chip_data(struct irq_desc old_desc->chip_data = NULL; } } @@ -756,7 +822,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #else static struct irq_cfg *irq_cfg(unsigned int irq) -@@ -394,13 +375,6 @@ static struct irq_cfg *irq_cfg(unsigned +@@ -384,13 +365,6 @@ static struct irq_cfg *irq_cfg(unsigned #endif @@ -770,7 +836,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct io_apic { unsigned int index; unsigned int unused[3]; -@@ -532,7 +506,8 @@ static struct IO_APIC_route_entry ioapic +@@ -522,7 +496,8 @@ static struct IO_APIC_route_entry ioapic static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { @@ -780,7 +846,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches eu.entry = e; io_apic_write(apic, 0x11 + 2*pin, eu.w2); io_apic_write(apic, 0x10 + 2*pin, eu.w1); -@@ -563,132 +538,18 @@ static void ioapic_mask_entry(int apic, +@@ -553,132 +528,18 @@ static void ioapic_mask_entry(int apic, spin_unlock_irqrestore(&ioapic_lock, flags); } @@ -915,7 +981,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (!entry) { printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", apic, pin); -@@ -708,7 +569,7 @@ static void add_pin_to_irq_cpu(struct ir +@@ -698,7 +559,7 @@ static void add_pin_to_irq_cpu(struct ir entry = entry->next; } @@ -924,7 +990,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches entry = entry->next; entry->apic = apic; entry->pin = pin; -@@ -717,7 +578,7 @@ static void add_pin_to_irq_cpu(struct ir +@@ -707,7 +568,7 @@ static void add_pin_to_irq_cpu(struct ir /* * Reroute an IRQ to a different pin. */ @@ -933,7 +999,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int oldapic, int oldpin, int newapic, int newpin) { -@@ -737,7 +598,7 @@ static void __init replace_pin_at_irq_cp +@@ -727,7 +588,7 @@ static void __init replace_pin_at_irq_cp /* why? call replace before add? */ if (!replaced) @@ -942,16 +1008,16 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } static inline void io_apic_modify_irq(struct irq_cfg *cfg, -@@ -857,7 +718,7 @@ static void clear_IO_APIC (void) +@@ -847,7 +708,7 @@ static void clear_IO_APIC (void) clear_IO_APIC_pin(apic, pin); } #else -#define add_pin_to_irq_cpu(cfg, cpu, apic, pin) +#define add_pin_to_irq_node(cfg, node, apic, pin) - #endif /* CONFIG_XEN */ + #endif /* !CONFIG_XEN */ #ifdef CONFIG_X86_32 -@@ -898,7 +759,7 @@ static int __init ioapic_pirq_setup(char +@@ -888,7 +749,7 @@ static int __init ioapic_pirq_setup(char __setup("pirq=", ioapic_pirq_setup); #endif /* CONFIG_X86_32 */ @@ -960,7 +1026,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct IO_APIC_route_entry **alloc_ioapic_entries(void) { int apic; -@@ -996,20 +857,6 @@ int restore_IO_APIC_setup(struct IO_APIC +@@ -986,20 +847,6 @@ int restore_IO_APIC_setup(struct IO_APIC return 0; } @@ -981,7 +1047,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) { int apic; -@@ -1019,7 +866,7 @@ void free_ioapic_entries(struct IO_APIC_ +@@ -1009,7 +856,7 @@ void free_ioapic_entries(struct IO_APIC_ kfree(ioapic_entries); } @@ -990,7 +1056,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * Find the IRQ entry number of a certain pin. -@@ -1082,54 +929,6 @@ static int __init find_isa_irq_apic(int +@@ -1072,54 +919,6 @@ static int __init find_isa_irq_apic(int } #endif @@ -1045,7 +1111,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #if defined(CONFIG_EISA) || defined(CONFIG_MCA) /* * EISA Edge/Level control register, ELCR -@@ -1348,6 +1147,64 @@ static int pin_2_irq(int idx, int apic, +@@ -1338,6 +1137,64 @@ static int pin_2_irq(int idx, int apic, return irq; } @@ -1110,7 +1176,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifndef CONFIG_XEN void lock_vector_lock(void) { -@@ -1619,6 +1476,9 @@ int setup_ioapic_entry(int apic_id, int +@@ -1609,6 +1466,9 @@ int setup_ioapic_entry(int apic_id, int irte.vector = vector; irte.dest_id = IRTE_DEST(destination); @@ -1120,7 +1186,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches modify_irte(irq, &irte); ir_entry->index2 = (index >> 15) & 0x1; -@@ -1693,63 +1553,75 @@ static void setup_IO_APIC_irq(int apic_i +@@ -1684,63 +1544,75 @@ static void setup_IO_APIC_irq(int apic_i ioapic_write_entry(apic_id, pin, entry); } @@ -1236,7 +1302,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } if (notcon) -@@ -1917,36 +1789,30 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1908,36 +1780,30 @@ __apicdebuginit(void) print_IO_APIC(void return; } @@ -1283,7 +1349,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches smp_processor_id(), hard_smp_processor_id()); v = apic_read(APIC_ID); printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); -@@ -1987,11 +1853,11 @@ __apicdebuginit(void) print_local_APIC(v +@@ -1978,11 +1844,11 @@ __apicdebuginit(void) print_local_APIC(v printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); printk(KERN_DEBUG "... APIC ISR field:\n"); @@ -1298,7 +1364,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (APIC_INTEGRATED(ver)) { /* !82489DX */ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ -@@ -2028,6 +1894,18 @@ __apicdebuginit(void) print_local_APIC(v +@@ -2019,6 +1885,18 @@ __apicdebuginit(void) print_local_APIC(v printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); v = apic_read(APIC_TDCR); printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); @@ -1317,7 +1383,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches printk("\n"); } -@@ -2076,6 +1954,11 @@ __apicdebuginit(void) print_PIC(void) +@@ -2067,6 +1945,11 @@ __apicdebuginit(void) print_PIC(void) __apicdebuginit(int) print_all_ICs(void) { print_PIC(); @@ -1329,7 +1395,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches print_all_local_APICs(); print_IO_APIC(); -@@ -2199,7 +2082,9 @@ void disable_IO_APIC(void) +@@ -2188,7 +2071,9 @@ void disable_IO_APIC(void) /* * Use virtual wire A mode when interrupt remapping is enabled. */ @@ -1340,7 +1406,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #ifdef CONFIG_X86_32 -@@ -2438,7 +2323,119 @@ static int ioapic_retrigger_irq(unsigned +@@ -2427,7 +2312,119 @@ static int ioapic_retrigger_irq(unsigned * races. */ @@ -1461,7 +1527,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_INTR_REMAP -@@ -2453,26 +2450,25 @@ static int ioapic_retrigger_irq(unsigned +@@ -2442,26 +2439,25 @@ static int ioapic_retrigger_irq(unsigned * Real vector that is used for interrupting cpu will be coming from * the interrupt-remapping table entry. */ @@ -1493,7 +1559,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); -@@ -2488,27 +2484,30 @@ migrate_ioapic_irq_desc(struct irq_desc +@@ -2477,27 +2473,30 @@ migrate_ioapic_irq_desc(struct irq_desc send_cleanup_vector(cfg); cpumask_copy(desc->affinity, mask); @@ -1529,7 +1595,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif -@@ -2570,86 +2569,19 @@ static void irq_complete_move(struct irq +@@ -2559,86 +2558,19 @@ static void irq_complete_move(struct irq struct irq_cfg *cfg = desc->chip_data; unsigned vector, me; @@ -1618,7 +1684,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static void ack_apic_edge(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); -@@ -2713,9 +2645,6 @@ static void ack_apic_level(unsigned int +@@ -2702,9 +2634,6 @@ static void ack_apic_level(unsigned int */ ack_APIC_irq(); @@ -1628,7 +1694,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* Now we can move and renable the irq */ if (unlikely(do_unmask_irq)) { /* Only migrate the irq if the ack has been received. -@@ -2762,22 +2691,50 @@ static void ack_apic_level(unsigned int +@@ -2751,22 +2680,50 @@ static void ack_apic_level(unsigned int } #ifdef CONFIG_INTR_REMAP @@ -1689,7 +1755,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif /* CONFIG_INTR_REMAP */ -@@ -2988,7 +2945,7 @@ static inline void __init check_timer(vo +@@ -2977,7 +2934,7 @@ static inline void __init check_timer(vo { struct irq_desc *desc = irq_to_desc(0); struct irq_cfg *cfg = desc->chip_data; @@ -1698,7 +1764,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int apic1, pin1, apic2, pin2; unsigned long flags; int no_pin1 = 0; -@@ -3054,7 +3011,7 @@ static inline void __init check_timer(vo +@@ -3043,7 +3000,7 @@ static inline void __init check_timer(vo * Ok, does IRQ0 through the IOAPIC work? */ if (no_pin1) { @@ -1707,7 +1773,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); } else { /* for edge trigger, setup_IO_APIC_irq already -@@ -3091,7 +3048,7 @@ static inline void __init check_timer(vo +@@ -3080,7 +3037,7 @@ static inline void __init check_timer(vo /* * legacy devices should be connected to IO APIC #0 */ @@ -1716,7 +1782,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); enable_8259A_irq(0); if (timer_irq_works()) { -@@ -3321,14 +3278,13 @@ static int nr_irqs_gsi = NR_IRQS_LEGACY; +@@ -3310,14 +3267,13 @@ static int nr_irqs_gsi = NR_IRQS_LEGACY; /* * Dynamic irq allocate and deallocation */ @@ -1732,7 +1798,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct irq_desc *desc_new = NULL; irq = 0; -@@ -3337,7 +3293,7 @@ unsigned int create_irq_nr(unsigned int +@@ -3326,7 +3282,7 @@ unsigned int create_irq_nr(unsigned int spin_lock_irqsave(&vector_lock, flags); for (new = irq_want; new < nr_irqs; new++) { @@ -1741,7 +1807,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (!desc_new) { printk(KERN_INFO "can not get irq_desc for %d\n", new); continue; -@@ -3346,6 +3302,9 @@ unsigned int create_irq_nr(unsigned int +@@ -3335,6 +3291,9 @@ unsigned int create_irq_nr(unsigned int if (cfg_new->vector != 0) continue; @@ -1751,7 +1817,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) irq = new; break; -@@ -3363,11 +3322,12 @@ unsigned int create_irq_nr(unsigned int +@@ -3352,11 +3311,12 @@ unsigned int create_irq_nr(unsigned int int create_irq(void) { @@ -1765,7 +1831,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (irq == 0) irq = -1; -@@ -3433,6 +3393,9 @@ static int msi_compose_msg(struct pci_de +@@ -3422,6 +3382,9 @@ static int msi_compose_msg(struct pci_de irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); @@ -1775,7 +1841,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches modify_irte(irq, &irte); msg->address_hi = MSI_ADDR_BASE_HI; -@@ -3470,7 +3433,7 @@ static int msi_compose_msg(struct pci_de +@@ -3459,7 +3422,7 @@ static int msi_compose_msg(struct pci_de } #ifdef CONFIG_SMP @@ -1784,7 +1850,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; -@@ -3479,7 +3442,7 @@ static void set_msi_irq_affinity(unsigne +@@ -3468,7 +3431,7 @@ static void set_msi_irq_affinity(unsigne dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) @@ -1793,7 +1859,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches cfg = desc->chip_data; -@@ -3491,13 +3454,15 @@ static void set_msi_irq_affinity(unsigne +@@ -3480,13 +3443,15 @@ static void set_msi_irq_affinity(unsigne msg.address_lo |= MSI_ADDR_DEST_ID(dest); write_msi_msg_desc(desc, &msg); @@ -1810,7 +1876,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); -@@ -3506,11 +3471,11 @@ ir_set_msi_irq_affinity(unsigned int irq +@@ -3495,11 +3460,11 @@ ir_set_msi_irq_affinity(unsigned int irq struct irte irte; if (get_irte(irq, &irte)) @@ -1824,7 +1890,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); -@@ -3527,6 +3492,8 @@ ir_set_msi_irq_affinity(unsigned int irq +@@ -3516,6 +3481,8 @@ ir_set_msi_irq_affinity(unsigned int irq */ if (cfg->move_in_progress) send_cleanup_vector(cfg); @@ -1833,7 +1899,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif -@@ -3622,15 +3589,17 @@ int arch_setup_msi_irqs(struct pci_dev * +@@ -3611,15 +3578,17 @@ int arch_setup_msi_irqs(struct pci_dev * unsigned int irq_want; struct intel_iommu *iommu = NULL; int index = 0; @@ -1852,7 +1918,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (irq == 0) return -1; irq_want = irq + 1; -@@ -3680,7 +3649,7 @@ void arch_teardown_msi_irq(unsigned int +@@ -3669,7 +3638,7 @@ void arch_teardown_msi_irq(unsigned int #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) #ifdef CONFIG_SMP @@ -1861,7 +1927,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; -@@ -3689,7 +3658,7 @@ static void dmar_msi_set_affinity(unsign +@@ -3678,7 +3647,7 @@ static void dmar_msi_set_affinity(unsign dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) @@ -1870,7 +1936,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches cfg = desc->chip_data; -@@ -3701,11 +3670,13 @@ static void dmar_msi_set_affinity(unsign +@@ -3690,11 +3659,13 @@ static void dmar_msi_set_affinity(unsign msg.address_lo |= MSI_ADDR_DEST_ID(dest); dmar_msi_write(irq, &msg); @@ -1885,7 +1951,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches .name = "DMAR_MSI", .unmask = dmar_msi_unmask, .mask = dmar_msi_mask, -@@ -3734,7 +3705,7 @@ int arch_setup_dmar_msi(unsigned int irq +@@ -3723,7 +3694,7 @@ int arch_setup_dmar_msi(unsigned int irq #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP @@ -1894,7 +1960,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; -@@ -3743,7 +3714,7 @@ static void hpet_msi_set_affinity(unsign +@@ -3732,7 +3703,7 @@ static void hpet_msi_set_affinity(unsign dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) @@ -1903,7 +1969,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches cfg = desc->chip_data; -@@ -3755,6 +3726,8 @@ static void hpet_msi_set_affinity(unsign +@@ -3744,6 +3715,8 @@ static void hpet_msi_set_affinity(unsign msg.address_lo |= MSI_ADDR_DEST_ID(dest); hpet_msi_write(irq, &msg); @@ -1912,7 +1978,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif /* CONFIG_SMP */ -@@ -3811,7 +3784,7 @@ static void target_ht_irq(unsigned int i +@@ -3800,7 +3773,7 @@ static void target_ht_irq(unsigned int i write_ht_irq_msg(irq, &msg); } @@ -1921,7 +1987,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; -@@ -3819,11 +3792,13 @@ static void set_ht_irq_affinity(unsigned +@@ -3808,11 +3781,13 @@ static void set_ht_irq_affinity(unsigned dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) @@ -1936,7 +2002,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif -@@ -3898,6 +3873,8 @@ int arch_enable_uv_irq(char *irq_name, u +@@ -3887,6 +3862,8 @@ int arch_enable_uv_irq(char *irq_name, u unsigned long flags; int err; @@ -1945,7 +2011,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, eligible_cpu); -@@ -3911,19 +3888,20 @@ int arch_enable_uv_irq(char *irq_name, u +@@ -3900,19 +3877,20 @@ int arch_enable_uv_irq(char *irq_name, u mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; @@ -1975,7 +2041,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return irq; } -@@ -3937,10 +3915,10 @@ void arch_disable_uv_irq(int mmr_blade, +@@ -3926,10 +3904,10 @@ void arch_disable_uv_irq(int mmr_blade, struct uv_IO_APIC_route_entry *entry; int mmr_pnode; @@ -1988,7 +2054,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches entry->mask = 1; mmr_pnode = uv_blade_to_pnode(mmr_blade); -@@ -4006,6 +3984,78 @@ int __init arch_probe_nr_irqs(void) +@@ -3995,14 +3973,85 @@ int __init arch_probe_nr_irqs(void) #endif #endif /* CONFIG_XEN */ @@ -2067,15 +2133,25 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* -------------------------------------------------------------------------- ACPI-based IOAPIC Configuration -------------------------------------------------------------------------- */ -@@ -4088,6 +4138,7 @@ int __init io_apic_get_unique_id(int ioa + + #ifdef CONFIG_ACPI + +-#ifdef CONFIG_X86_32 +-#ifndef CONFIG_XEN ++#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) + int __init io_apic_get_unique_id(int ioapic, int apic_id) + { + union IO_APIC_reg_00 reg_00; +@@ -4076,7 +4125,7 @@ int __init io_apic_get_unique_id(int ioa return apic_id; } +-#endif /* !CONFIG_XEN */ +#endif int __init io_apic_get_version(int ioapic) { -@@ -4100,47 +4151,6 @@ int __init io_apic_get_version(int ioapi +@@ -4089,47 +4138,6 @@ int __init io_apic_get_version(int ioapi return reg_01.bits.version; } @@ -2123,7 +2199,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) { -@@ -4172,51 +4182,44 @@ int acpi_get_override_irq(int bus_irq, i +@@ -4161,51 +4169,44 @@ int acpi_get_override_irq(int bus_irq, i #ifdef CONFIG_SMP void __init setup_ioapic_dest(void) { @@ -2202,7 +2278,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif -@@ -4299,29 +4302,21 @@ fake_ioapic_page: +@@ -4288,29 +4289,21 @@ fake_ioapic_page: } } @@ -2235,8 +2311,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches - * IO APICS that are mapped in on a BAR in PCI space. */ -late_initcall(ioapic_insert_resources); #endif /* !CONFIG_XEN */ ---- head-2010-05-12.orig/arch/x86/kernel/apic/probe_32-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/apic/probe_32-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/apic/probe_32-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/probe_32-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -20,23 +20,12 @@ #include #include @@ -2261,9 +2337,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static int xen_phys_pkg_id(int cpuid_apic, int index_msb) { ---- head-2010-05-12.orig/arch/x86/kernel/cpu/amd.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/cpu/amd.c 2010-03-24 15:25:21.000000000 +0100 -@@ -403,7 +403,7 @@ static void __cpuinit early_init_amd(str +--- head-2011-03-17.orig/arch/x86/kernel/cpu/amd.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/amd.c 2011-02-01 14:50:44.000000000 +0100 +@@ -415,7 +415,7 @@ static void __cpuinit early_init_amd(str (c->x86_model == 8 && c->x86_mask >= 8)) set_cpu_cap(c, X86_FEATURE_K6_MTRR); #endif @@ -2272,8 +2348,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* check CPU config space for extended APIC ID */ if (cpu_has_apic && c->x86 >= 0xf) { unsigned int val; ---- head-2010-05-12.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:07.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:17.000000000 +0100 @@ -13,6 +13,7 @@ #include @@ -2283,7 +2359,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #include #include @@ -66,7 +67,30 @@ void __init setup_cpu_local_masks(void) - alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); + #endif } -static const struct cpu_dev *this_cpu __cpuinitdata; @@ -2323,7 +2399,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, GDT_STACK_CANARY_INIT -@@ -309,7 +333,8 @@ static const char *__cpuinit table_looku +@@ -312,7 +336,8 @@ static const char *__cpuinit table_looku return NULL; /* Not found */ } @@ -2333,7 +2409,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches void load_percpu_segment(int cpu) { -@@ -358,29 +383,6 @@ void switch_to_new_gdt(int cpu) +@@ -361,29 +386,6 @@ void switch_to_new_gdt(int cpu) static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; @@ -2363,7 +2439,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static void __cpuinit get_model_name(struct cpuinfo_x86 *c) { unsigned int *v; -@@ -513,7 +515,6 @@ out: +@@ -516,7 +518,6 @@ out: static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; @@ -2371,7 +2447,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int i; for (i = 0; i < X86_VENDOR_NUM; i++) { -@@ -530,13 +531,9 @@ static void __cpuinit get_cpu_vendor(str +@@ -533,13 +534,9 @@ static void __cpuinit get_cpu_vendor(str } } @@ -2388,7 +2464,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches c->x86_vendor = X86_VENDOR_UNKNOWN; this_cpu = &default_cpu; -@@ -798,6 +795,12 @@ static void __cpuinit identify_cpu(struc +@@ -805,6 +802,12 @@ static void __cpuinit identify_cpu(struc if (this_cpu->c_identify) this_cpu->c_identify(c); @@ -2401,7 +2477,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #if defined(CONFIG_X86_64) && !defined(CONFIG_XEN) c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); #endif -@@ -843,6 +846,16 @@ static void __cpuinit identify_cpu(struc +@@ -850,6 +853,16 @@ static void __cpuinit identify_cpu(struc #endif init_hypervisor(c); @@ -2418,7 +2494,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are -@@ -855,10 +868,6 @@ static void __cpuinit identify_cpu(struc +@@ -862,10 +875,6 @@ static void __cpuinit identify_cpu(struc boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } @@ -2429,7 +2505,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_X86_MCE /* Init Machine Check Exception if available. */ mcheck_init(c); -@@ -891,6 +900,7 @@ void __init identify_boot_cpu(void) +@@ -898,6 +907,7 @@ void __init identify_boot_cpu(void) #else vgetcpu_set_mode(); #endif @@ -2437,17 +2513,30 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) ---- head-2010-05-12.orig/arch/x86/kernel/cpu/mcheck/Makefile 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/cpu/mcheck/Makefile 2010-03-24 15:25:21.000000000 +0100 -@@ -9,5 +9,3 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += thres - obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o - +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mcheck/Makefile 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mcheck/Makefile 2011-02-01 14:50:44.000000000 +0100 +@@ -11,5 +11,3 @@ obj-$(CONFIG_X86_MCE_INJECT) += mce-inje obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o + + obj-$(CONFIG_ACPI_APEI) += mce-apei.o - -disabled-obj-$(CONFIG_XEN) := therm_throt.o ---- head-2010-05-12.orig/arch/x86/kernel/cpu/mcheck/mce.c 2010-04-15 09:44:40.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/cpu/mcheck/mce.c 2010-04-15 10:07:27.000000000 +0200 -@@ -476,7 +476,9 @@ static inline void mce_get_rip(struct mc +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mcheck/mce.c 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mcheck/mce.c 2011-02-01 14:50:44.000000000 +0100 +@@ -137,10 +137,12 @@ void mce_setup(struct mce *m) + m->time = get_seconds(); + m->cpuvendor = boot_cpu_data.x86_vendor; + m->cpuid = cpuid_eax(1); ++#ifndef CONFIG_XEN + #ifdef CONFIG_SMP + m->socketid = cpu_data(m->extcpu).phys_proc_id; + #endif + m->apicid = cpu_data(m->extcpu).initial_apicid; ++#endif + rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); + } + +@@ -483,7 +485,9 @@ static inline void mce_get_rip(struct mc */ asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs) { @@ -2457,7 +2546,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches exit_idle(); irq_enter(); mce_notify_irq(); -@@ -499,7 +501,7 @@ static void mce_report_event(struct pt_r +@@ -506,7 +510,7 @@ static void mce_report_event(struct pt_r return; } @@ -2466,7 +2555,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * Without APIC do not notify. The event will be picked * up eventually. -@@ -2110,7 +2112,7 @@ static __init int mcheck_init_device(voi +@@ -2167,7 +2171,7 @@ static __init int mcheck_init_device(voi #ifdef CONFIG_X86_XEN_MCE if (is_initial_xendomain()) { /* Register vIRQ handler for MCE LOG processing */ @@ -2475,8 +2564,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches printk(KERN_DEBUG "MCE: bind virq for DOM0 logging\n"); bind_virq_for_mce(); ---- head-2010-05-12.orig/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2011-02-01 14:50:44.000000000 +0100 @@ -7,12 +7,17 @@ #include #include @@ -2511,7 +2600,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + break; + } + WARN_ON_ONCE(!found); -+ m.socketid = g_physinfo[i].mc_chipid; ++ m.socketid = mc_global->mc_socketid; + m.cpu = m.extcpu = g_physinfo[i].mc_cpunr; + m.cpuvendor = (__u8)g_physinfo[i].mc_vendor; + @@ -2546,7 +2635,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, NULL); + ret = HYPERVISOR_mca(&mc_op); + if (ret) { -+ printk(KERN_ERR "MCE: Failed to get physical CPU count\n"); ++ pr_err("MCE: Failed to get physical CPU count\n"); + kfree(g_mi); + return ret; + } @@ -2561,7 +2650,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo); + ret = HYPERVISOR_mca(&mc_op); + if (ret) { -+ printk(KERN_ERR "MCE: Failed to get physical CPUs' info\n"); ++ pr_err("MCE: Failed to get physical CPUs' info\n"); + kfree(g_mi); + kfree(g_physinfo); + return ret; @@ -2572,9 +2661,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches - g_mi = kmalloc(sizeof(struct mc_info), GFP_KERNEL); - if (ret < 0) -- printk(KERN_ERR "MCE_DOM0_LOG: bind_virq for DOM0 failed\n"); +- pr_err("MCE_DOM0_LOG: bind_virq for DOM0 failed\n"); + if (ret < 0) { -+ printk(KERN_ERR "MCE: Failed to bind vIRQ for Dom0\n"); ++ pr_err("MCE: Failed to bind vIRQ for Dom0\n"); + kfree(g_mi); + kfree(g_physinfo); + return ret; @@ -2586,8 +2675,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + return 0; } ---- head-2010-05-12.orig/arch/x86/kernel/e820-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/e820-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -659,7 +659,7 @@ __init int e820_search_gap(unsigned long */ __init void e820_setup_gap(void) @@ -2680,8 +2769,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #undef e820 ---- head-2010-05-12.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:50:44.000000000 +0100 @@ -48,7 +48,6 @@ #include #include @@ -2827,9 +2916,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches CFI_ENDPROC nmi_stack_fixup: ---- head-2010-05-12.orig/arch/x86/kernel/entry_64.S 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/entry_64.S 2010-03-24 15:25:21.000000000 +0100 -@@ -1410,7 +1410,7 @@ END(kdb_call) +--- head-2011-03-17.orig/arch/x86/kernel/entry_64.S 2011-02-16 16:02:30.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64.S 2011-02-16 16:02:54.000000000 +0100 +@@ -1363,7 +1363,7 @@ apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ paranoidzeroentry_ist debug do_debug DEBUG_STACK paranoidzeroentry_ist int3 do_int3 DEBUG_STACK paranoiderrorentry stack_segment do_stack_segment @@ -2838,8 +2927,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches zeroentry xen_debug do_debug zeroentry xen_int3 do_int3 errorentry xen_stack_segment do_stack_segment ---- head-2010-05-12.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:50:44.000000000 +0100 @@ -139,6 +139,7 @@ ENTRY(ftrace_graph_caller) leaq 8(%rbp), %rdi @@ -2927,9 +3016,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif #ifndef CONFIG_XEN ---- head-2010-05-12.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/head_32-xen.S 2010-03-24 15:25:21.000000000 +0100 -@@ -120,12 +120,6 @@ ENTRY(hypercall_page) +--- head-2011-03-17.orig/arch/x86/kernel/head_32-xen.S 2011-03-03 16:23:08.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_32-xen.S 2011-03-03 16:23:25.000000000 +0100 +@@ -118,12 +118,6 @@ ENTRY(hypercall_page) CFI_ENDPROC /* @@ -2942,8 +3031,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * BSS section */ .section ".bss.page_aligned","wa" ---- head-2010-05-12.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/head_64-xen.S 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-01 14:50:44.000000000 +0100 @@ -15,7 +15,6 @@ #include #include @@ -2952,8 +3041,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #include #include #include ---- head-2010-05-12.orig/arch/x86/kernel/init_task.c 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/init_task.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/init_task.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/init_task.c 2011-02-01 14:50:44.000000000 +0100 @@ -31,6 +31,7 @@ union thread_union init_thread_union __i struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task); @@ -2968,8 +3057,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; - +#endif ---- head-2010-05-12.orig/arch/x86/kernel/irq-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/irq-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/irq-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -12,6 +12,8 @@ #include #include @@ -3017,8 +3106,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif #ifndef CONFIG_XEN if (generic_interrupt_extension) { -@@ -90,17 +100,27 @@ static int show_other_interrupts(struct - seq_printf(p, " TLB shootdowns\n"); +@@ -95,17 +105,27 @@ static int show_other_interrupts(struct + seq_printf(p, " Spinlock wakeups\n"); #endif #endif -#ifdef CONFIG_X86_MCE @@ -3048,7 +3137,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) -@@ -172,6 +192,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu) +@@ -177,6 +197,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu) #ifdef CONFIG_X86_LOCAL_APIC sum += irq_stats(cpu)->apic_timer_irqs; sum += irq_stats(cpu)->irq_spurious_count; @@ -3057,8 +3146,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif #ifndef CONFIG_XEN if (generic_interrupt_extension) -@@ -184,11 +206,15 @@ u64 arch_irq_stat_cpu(unsigned int cpu) - sum += irq_stats(cpu)->irq_tlb_count; +@@ -191,11 +213,15 @@ u64 arch_irq_stat_cpu(unsigned int cpu) + sum += irq_stats(cpu)->irq_lock_count; #endif #endif -#ifdef CONFIG_X86_MCE @@ -3075,7 +3164,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif return sum; } -@@ -224,14 +250,11 @@ unsigned int __irq_entry do_IRQ(struct p +@@ -231,14 +257,11 @@ unsigned int __irq_entry do_IRQ(struct p irq = __get_cpu_var(vector_irq)[vector]; if (!handle_irq(irq, regs)) { @@ -3093,8 +3182,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } irq_exit(); ---- head-2010-05-12.orig/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -22,27 +22,21 @@ * 2 of the License, or (at your option) any later version. */ @@ -3251,8 +3340,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches - -module_init(microcode_init); module_exit(microcode_exit); ---- head-2010-05-12.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -17,6 +17,7 @@ #include #include @@ -3261,7 +3350,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #include #include -@@ -890,24 +891,17 @@ static +@@ -904,24 +905,17 @@ static inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} #endif /* CONFIG_X86_IO_APIC */ @@ -3294,7 +3383,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } static int __init replace_intsrc_all(struct mpc_table *mpc, -@@ -966,7 +960,7 @@ static int __init replace_intsrc_all(st +@@ -980,7 +974,7 @@ static int __init replace_intsrc_all(st } else { struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; count += sizeof(struct mpc_intsrc); @@ -3303,7 +3392,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches goto out; assign_to_mpc_intsrc(&mp_irqs[i], m); mpc->length = count; -@@ -983,11 +977,14 @@ out: +@@ -997,11 +991,14 @@ out: return 0; } @@ -3319,7 +3408,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return 0; } early_param("update_mptable", update_mptable_setup); -@@ -1000,6 +997,9 @@ static int __initdata alloc_mptable; +@@ -1014,6 +1011,9 @@ static int __initdata alloc_mptable; static int __init parse_alloc_mptable_opt(char *p) { enable_update_mptable = 1; @@ -3329,8 +3418,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches alloc_mptable = 1; if (!p) return 0; ---- head-2010-05-12.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -32,6 +32,8 @@ int no_iommu __read_mostly; /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly = 0; @@ -3360,8 +3449,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } /* Must execute after PCI subsystem */ fs_initcall(pci_iommu_init); ---- head-2010-05-12.orig/arch/x86/kernel/process-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/process-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:06:40.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:07:25.000000000 +0100 @@ -8,12 +8,15 @@ #include #include @@ -3405,7 +3494,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } void flush_thread(void) -@@ -500,16 +503,12 @@ static void c1e_idle(void) +@@ -471,16 +474,12 @@ static void c1e_idle(void) if (!cpumask_test_cpu(cpu, c1e_mask)) { cpumask_set_cpu(cpu, c1e_mask); /* @@ -3423,7 +3512,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); -@@ -604,3 +603,16 @@ static int __init idle_setup(char *str) +@@ -575,3 +574,16 @@ static int __init idle_setup(char *str) } early_param("idle", idle_setup); @@ -3440,8 +3529,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +} + ---- head-2010-05-12.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/process_32-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:37:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:37:43.000000000 +0100 @@ -9,8 +9,6 @@ * This file handles the architecture-dependent parts of process handling.. */ @@ -3459,7 +3548,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #include #include #include -@@ -299,7 +296,8 @@ int copy_thread(unsigned long clone_flag +@@ -297,7 +294,8 @@ int copy_thread(unsigned long clone_flag p->thread.io_bitmap_max = 0; } @@ -3469,7 +3558,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR); p->thread.debugctlmsr = 0; -@@ -470,7 +468,7 @@ __switch_to(struct task_struct *prev_p, +@@ -468,7 +466,7 @@ __switch_to(struct task_struct *prev_p, * done before math_state_restore, so the TS bit is up * to date. */ @@ -3478,7 +3567,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the -@@ -560,15 +558,3 @@ unsigned long get_wchan(struct task_stru +@@ -558,15 +556,3 @@ unsigned long get_wchan(struct task_stru return 0; } @@ -3494,8 +3583,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches - unsigned long range_end = mm->brk + 0x02000000; - return randomize_range(mm->brk, range_end, 0) ? : mm->brk; -} ---- head-2010-05-12.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/process_64-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:37:17.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:37:47.000000000 +0100 @@ -17,8 +17,6 @@ * This file handles the architecture-dependent parts of process handling.. */ @@ -3513,7 +3602,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #include #include #include -@@ -348,7 +345,8 @@ int copy_thread(unsigned long clone_flag +@@ -344,7 +341,8 @@ int copy_thread(unsigned long clone_flag } p->thread.iopl = current->thread.iopl; @@ -3523,7 +3612,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR); p->thread.debugctlmsr = 0; -@@ -510,7 +508,7 @@ __switch_to(struct task_struct *prev_p, +@@ -506,7 +504,7 @@ __switch_to(struct task_struct *prev_p, * done before math_state_restore, so the TS bit is up * to date. */ @@ -3532,7 +3621,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * Switch FS and GS. -@@ -727,15 +725,3 @@ long sys_arch_prctl(int code, unsigned l +@@ -723,15 +721,3 @@ long sys_arch_prctl(int code, unsigned l return do_arch_prctl(current, code, addr); } @@ -3548,53 +3637,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches - unsigned long range_end = mm->brk + 0x02000000; - return randomize_range(mm->brk, range_end, 0) ? : mm->brk; -} ---- head-2010-05-12.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/quirks-xen.c 2010-03-24 15:25:21.000000000 +0100 -@@ -492,5 +492,42 @@ void force_hpet_resume(void) - break; - } - } -+#endif -+ -+#if defined(CONFIG_PCI) && defined(CONFIG_NUMA) -+/* Set correct numa_node information for AMD NB functions */ -+static void __init quirk_amd_nb_node(struct pci_dev *dev) -+{ -+ struct pci_dev *nb_ht; -+ unsigned int devfn; -+ u32 val; -+ -+ devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); -+ nb_ht = pci_get_slot(dev->bus, devfn); -+ if (!nb_ht) -+ return; -+ -+ pci_read_config_dword(nb_ht, 0x60, &val); -+ set_dev_node(&dev->dev, val & 7); -+ pci_dev_put(dev); -+} - -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, -+ quirk_amd_nb_node); -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, -+ quirk_amd_nb_node); - #endif ---- head-2010-05-12.orig/arch/x86/kernel/setup-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/setup-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:22:49.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:23:32.000000000 +0100 @@ -142,6 +142,14 @@ EXPORT_SYMBOL(xen_start_info); #define ARCH_SETUP #endif @@ -3730,7 +3774,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif {} }; -@@ -785,6 +818,12 @@ void __init setup_arch(char **cmdline_p) +@@ -788,6 +821,12 @@ void __init setup_arch(char **cmdline_p) #endif saved_video_mode = boot_params.hdr.vid_mode; bootloader_type = boot_params.hdr.type_of_loader; @@ -3743,7 +3787,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_BLK_DEV_RAM rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; -@@ -967,14 +1006,22 @@ void __init setup_arch(char **cmdline_p) +@@ -970,14 +1009,22 @@ void __init setup_arch(char **cmdline_p) max_low_pfn = max_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; @@ -3791,17 +3835,15 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * x86_quirk_intr_init - post gate setup interrupt initialisation * * Description: ---- head-2010-05-12.orig/arch/x86/kernel/smp-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/smp-xen.c 2010-03-24 15:25:21.000000000 +0100 -@@ -135,11 +135,38 @@ void xen_send_call_func_ipi(const struct +--- head-2011-03-17.orig/arch/x86/kernel/smp-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/smp-xen.c 2011-02-01 14:50:44.000000000 +0100 +@@ -135,11 +135,36 @@ void xen_send_call_func_ipi(const struct * this function calls the 'stop' function on all other CPUs in the system. */ +irqreturn_t smp_reboot_interrupt(int irq, void *dev_id) +{ -+ irq_enter(); + stop_this_cpu(NULL); -+ irq_exit(); + + return IRQ_HANDLED; +} @@ -3833,8 +3875,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches local_irq_save(flags); disable_all_local_evtchn(); local_irq_restore(flags); ---- head-2010-05-12.orig/arch/x86/kernel/traps-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/traps-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -45,6 +45,7 @@ #include #endif @@ -3919,8 +3961,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ tsk->fpu_counter++; } ---- head-2010-05-12.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -132,15 +132,7 @@ static __always_inline void do_vgettimeo return; } @@ -3937,8 +3979,28 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches base = __vsyscall_gtod_data.clock.cycle_last; mask = __vsyscall_gtod_data.clock.mask; mult = __vsyscall_gtod_data.clock.mult; ---- head-2010-05-12.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/fault-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/dump_pagetables-xen.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/dump_pagetables-xen.c 2011-02-01 14:50:44.000000000 +0100 +@@ -173,13 +173,14 @@ static void note_page(struct seq_file *m + st->current_address >= st->marker[1].start_address) { + const char *unit = units; + unsigned long delta; ++ int width = sizeof(unsigned long) * 2; + + /* + * Now print the actual finished series + */ +- seq_printf(m, "0x%p-0x%p ", +- (void *)st->start_address, +- (void *)st->current_address); ++ seq_printf(m, "0x%0*lx-0x%0*lx ", ++ width, st->start_address, ++ width, st->current_address); + + delta = (st->current_address - st->start_address) >> 10; + while (!(delta & 1023) && unit[1]) { +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -3,40 +3,18 @@ * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar @@ -3992,7 +4054,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * Page fault error code bits: -@@ -225,10 +203,7 @@ static inline pmd_t *vmalloc_sync_one(pg +@@ -228,10 +206,7 @@ static inline pmd_t *vmalloc_sync_one(pg if (!pmd_present(*pmd_k)) return NULL; @@ -4004,7 +4066,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #if CONFIG_XEN_COMPAT > 0x030002 set_pmd(pmd, *pmd_k); #else -@@ -238,10 +213,8 @@ static inline pmd_t *vmalloc_sync_one(pg +@@ -241,10 +216,8 @@ static inline pmd_t *vmalloc_sync_one(pg */ set_pmd(pmd, __pmd(pmd_val(*pmd_k))); #endif @@ -4016,7 +4078,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return pmd_k; } -@@ -463,10 +436,11 @@ static noinline int vmalloc_fault(unsign +@@ -474,10 +447,11 @@ static noinline int vmalloc_fault(unsign } static const char errata93_warning[] = @@ -4032,7 +4094,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * No vm86 mode in 64-bit mode: -@@ -551,8 +525,6 @@ bad: +@@ -562,8 +536,6 @@ bad: static int is_errata93(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_64 @@ -4041,7 +4103,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (address != regs->ip) return 0; -@@ -562,10 +534,7 @@ static int is_errata93(struct pt_regs *r +@@ -573,10 +545,7 @@ static int is_errata93(struct pt_regs *r address |= 0xffffffffUL << 32; if ((address >= (u64)_stext && address <= (u64)_etext) || (address >= MODULES_VADDR && address <= MODULES_END)) { @@ -4053,7 +4115,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches regs->ip = address; return 1; } -@@ -738,7 +707,7 @@ show_signal_msg(struct pt_regs *regs, un +@@ -749,7 +718,7 @@ show_signal_msg(struct pt_regs *regs, un if (!printk_ratelimit()) return; @@ -4062,7 +4124,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, tsk->comm, task_pid_nr(tsk), address, (void *)regs->ip, (void *)regs->sp, error_code); -@@ -1000,11 +969,17 @@ do_page_fault(struct pt_regs *regs, unsi +@@ -1011,11 +980,17 @@ do_page_fault(struct pt_regs *regs, unsi tsk = current; mm = tsk->mm; @@ -4082,7 +4144,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (unlikely(kmmio_fault(regs, address))) return; -@@ -1033,9 +1008,13 @@ do_page_fault(struct pt_regs *regs, unsi +@@ -1044,9 +1019,13 @@ do_page_fault(struct pt_regs *regs, unsi return; } @@ -4099,7 +4161,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* Can handle a stale RO->RW TLB: */ if (spurious_fault(error_code, address)) -@@ -1074,6 +1053,8 @@ do_page_fault(struct pt_regs *regs, unsi +@@ -1085,6 +1064,8 @@ do_page_fault(struct pt_regs *regs, unsi if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); @@ -4108,7 +4170,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: -@@ -1160,17 +1141,22 @@ good_area: +@@ -1171,17 +1152,22 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault: */ @@ -4134,8 +4196,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches check_v8086_mode(regs, address, tsk); ---- head-2010-05-12.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -44,7 +44,6 @@ void *kmap_atomic_prot(struct page *page vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); @@ -4160,8 +4222,21 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_HIGHPTE EXPORT_SYMBOL(kmap_atomic_to_page); #endif ---- head-2010-05-12.orig/arch/x86/mm/init-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/init-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-01 14:50:44.000000000 +0100 +@@ -116,8 +116,8 @@ static int _xen_multicall_flush(bool ret + return 0; + } + +-void xen_multicall_flush(bool force) { +- if (force || use_lazy_mmu_mode()) ++void xen_multicall_flush(void) { ++ if (use_lazy_mmu_mode()) + _xen_multicall_flush(false); + } + +--- head-2011-03-17.orig/arch/x86/mm/init-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -1,3 +1,4 @@ +#include #include @@ -4302,8 +4377,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (use_gbpages) page_size_mask |= 1 << PG_LEVEL_1G; ---- head-2010-05-12.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/init_32-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -52,12 +52,9 @@ #include #include @@ -4433,8 +4508,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches SetPagePinned(virt_to_page(init_mm.pgd)); } ---- head-2010-05-12.orig/arch/x86/mm/init_64-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/init_64-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -56,21 +56,11 @@ #include @@ -4565,8 +4640,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #endif #ifndef CONFIG_XEN ---- head-2010-05-12.orig/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -84,7 +84,6 @@ iounmap_atomic(void *kvaddr, enum km_typ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) kpte_clear_flush(kmap_pte-idx, vaddr); @@ -4575,8 +4650,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches pagefault_enable(); } EXPORT_SYMBOL_GPL(iounmap_atomic); ---- head-2010-05-12.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/pageattr-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -11,6 +11,7 @@ #include #include @@ -4801,8 +4876,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches end = start + PAGE_SIZE; free_memtype(start, end); } ---- head-2010-05-12.orig/arch/x86/mm/pat-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/pat-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pat-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -639,7 +639,8 @@ static int reserve_pfn_range(u64 paddr, return ret; @@ -4813,8 +4888,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for %Lx-%Lx, got %s\n", ---- head-2010-05-12.orig/arch/x86/mm/pgtable-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/pgtable-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -8,9 +8,11 @@ #include #include @@ -4884,8 +4959,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (pgd == NULL) goto out; ---- head-2010-05-12.orig/arch/x86/pci/i386.c 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/arch/x86/pci/i386.c 2010-05-12 09:02:26.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/pci/i386.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/i386.c 2011-02-01 14:50:44.000000000 +0100 @@ -239,12 +239,14 @@ void __init pcibios_resource_survey(void pcibios_allocate_resources(1); @@ -4901,8 +4976,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } /** ---- head-2010-05-12.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/arch/x86/pci/irq-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -895,6 +895,9 @@ static int pcibios_lookup_irq(struct pci return 0; } @@ -5047,8 +5122,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } else if (pci_probe & PCI_BIOS_IRQ_SCAN) msg = ""; else ---- head-2010-05-12.orig/arch/x86/pci/pcifront.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/arch/x86/pci/pcifront.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/pci/pcifront.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/pcifront.c 2011-02-01 14:50:44.000000000 +0100 @@ -6,6 +6,7 @@ */ #include @@ -5057,16 +5132,17 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #include #include #include -@@ -15,6 +16,7 @@ static int pcifront_enable_irq(struct pc +@@ -15,6 +16,8 @@ static int pcifront_enable_irq(struct pc { u8 irq; pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); -+ irq_to_desc_alloc_node(irq, numa_node_id()); ++ if (!irq_to_desc_alloc_node(irq, numa_node_id())) ++ return -ENOMEM; evtchn_register_pirq(irq); dev->irq = irq; ---- head-2010-05-12.orig/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:50:44.000000000 +0100 @@ -377,6 +377,8 @@ int arch_setup_additional_pages(struct l } } @@ -5091,9 +5167,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches up_write(&mm->mmap_sem); return ret; ---- head-2010-05-12.orig/drivers/acpi/processor_driver.c 2010-04-15 10:05:23.000000000 +0200 -+++ head-2010-05-12/drivers/acpi/processor_driver.c 2010-04-15 10:07:40.000000000 +0200 -@@ -457,7 +457,14 @@ static int acpi_processor_get_info(struc +--- head-2011-03-17.orig/drivers/acpi/processor_driver.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_driver.c 2011-02-01 14:50:44.000000000 +0100 +@@ -340,7 +340,14 @@ static int acpi_processor_get_info(struc * generated as the following format: * CPU+CPU ID. */ @@ -5109,9 +5185,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, pr->acpi_id)); ---- head-2010-05-12.orig/drivers/char/agp/intel-agp.c 2010-04-15 09:52:07.000000000 +0200 -+++ head-2010-05-12/drivers/char/agp/intel-agp.c 2010-04-15 10:07:46.000000000 +0200 -@@ -607,7 +607,11 @@ static struct agp_memory *alloc_agpphysm +--- head-2011-03-17.orig/drivers/char/agp/intel-gtt.c 2011-03-11 10:53:08.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/intel-gtt.c 2011-03-11 11:00:05.000000000 +0100 +@@ -282,7 +282,11 @@ static struct agp_memory *alloc_agpphysm new->page_count = pg_count; new->num_scratch_pages = pg_count; new->type = AGP_PHYS_MEMORY; @@ -5123,19 +5199,19 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return new; } ---- head-2010-05-12.orig/drivers/edac/Kconfig 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/drivers/edac/Kconfig 2010-03-24 15:25:21.000000000 +0100 -@@ -72,6 +72,7 @@ config EDAC_MM_EDAC +--- head-2011-03-17.orig/drivers/edac/Kconfig 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/edac/Kconfig 2011-02-01 14:50:44.000000000 +0100 +@@ -77,6 +77,7 @@ config EDAC_MCE config EDAC_AMD64 - tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" - depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE + tristate "AMD64 (Opteron, Athlon64) K8, F10h" + depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE + depends on !XEN help - Support for error detection and correction on the AMD 64 - Families of Memory Controllers (K8, F10h and F11h) ---- head-2010-05-12.orig/drivers/gpu/drm/ttm/ttm_bo.c 2010-05-12 08:46:08.000000000 +0200 -+++ head-2010-05-12/drivers/gpu/drm/ttm/ttm_bo.c 2010-05-07 11:30:47.000000000 +0200 -@@ -1404,6 +1404,14 @@ int ttm_bo_global_init(struct ttm_global + Support for error detection and correction of DRAM ECC errors on + the AMD64 families of memory controllers (K8 and F10h) +--- head-2011-03-17.orig/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/ttm/ttm_bo.c 2011-02-01 14:50:44.000000000 +0100 +@@ -1440,6 +1440,14 @@ int ttm_bo_global_init(struct drm_global ret = -ENOMEM; goto out_no_drp; } @@ -5150,62 +5226,25 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches INIT_LIST_HEAD(&glob->swap_lru); INIT_LIST_HEAD(&glob->device_list); ---- head-2010-05-12.orig/drivers/gpu/drm/ttm/ttm_bo_util.c 2010-04-15 09:29:05.000000000 +0200 -+++ head-2010-05-12/drivers/gpu/drm/ttm/ttm_bo_util.c 2010-05-05 14:59:20.000000000 +0200 -@@ -519,6 +519,10 @@ int ttm_bo_pfn_prot(struct ttm_buffer_ob - PAGE_SHIFT)); - *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? - PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); -+#if defined(CONFIG_XEN) && defined(_PAGE_IOMAP) -+ if (bus_size != 0) -+ pgprot_val(*prot) |= _PAGE_IOMAP; -+#endif - - return 0; - } ---- head-2010-05-12.orig/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-02-24 19:52:17.000000000 +0100 -+++ head-2010-05-12/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-05-05 14:55:27.000000000 +0200 -@@ -158,6 +158,9 @@ static int ttm_bo_vm_fault(struct vm_are - if (is_iomem) { +--- head-2011-03-17.orig/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-02 11:54:22.000000000 +0100 +@@ -169,7 +169,13 @@ static int ttm_bo_vm_fault(struct vm_are + if (bo->mem.bus.is_iomem) { vma->vm_page_prot = ttm_io_prot(bo->mem.placement, vma->vm_page_prot); +#if defined(CONFIG_XEN) && defined(_PAGE_IOMAP) + pgprot_val(vma->vm_page_prot) |= _PAGE_IOMAP; +#endif } else { ++#if defined(CONFIG_XEN) && defined(_PAGE_IOMAP) ++ pgprot_val(vma->vm_page_prot) &= ~_PAGE_IOMAP; ++#endif ttm = bo->ttm; vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? ---- head-2010-05-12.orig/drivers/gpu/drm/ttm/ttm_tt.c 2010-04-15 09:29:05.000000000 +0200 -+++ head-2010-05-12/drivers/gpu/drm/ttm/ttm_tt.c 2010-05-06 14:33:11.000000000 +0200 -@@ -68,7 +68,25 @@ static struct page *ttm_tt_alloc_page(un - else - gfp_flags |= __GFP_HIGHMEM; - -+#ifndef CONFIG_XEN - return alloc_page(gfp_flags); -+#else -+ { -+ struct page *page = alloc_page(gfp_flags); -+ -+ if (page && (page_flags & TTM_PAGE_FLAG_DMA32)) { -+ int ret = xen_limit_pages_to_max_mfn(page, 0, 32); -+ -+ if (ret) -+ printk(KERN_WARNING TTM_PFX -+ "Error restricting pfn %lx: %d\n", -+ page_to_pfn(page), ret); -+ else if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) -+ clear_page(page_address(page)); -+ } -+ return page; -+ } -+#endif - } - - static void ttm_tt_free_user_pages(struct ttm_tt *ttm) ---- head-2010-05-12.orig/drivers/pci/msi-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/pci/msi-xen.c 2010-03-24 15:25:21.000000000 +0100 -@@ -95,22 +95,17 @@ void arch_teardown_msi_irqs(struct pci_d + vm_get_page_prot(vma->vm_flags) : +--- head-2011-03-17.orig/drivers/pci/msi-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/pci/msi-xen.c 2011-02-01 14:50:44.000000000 +0100 +@@ -54,22 +54,17 @@ int arch_msi_check_device(struct pci_dev } #endif @@ -5235,7 +5274,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } static void msix_set_enable(struct pci_dev *dev, int enable) -@@ -335,8 +330,11 @@ void pci_restore_msi_state(struct pci_de +@@ -294,8 +289,11 @@ void pci_restore_msi_state(struct pci_de return; pci_intx_for_msi(dev, 0); @@ -5249,7 +5288,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (dev->msix_enabled) msix_set_enable(dev, 0); -@@ -363,9 +361,9 @@ static int msi_capability_init(struct pc +@@ -322,9 +320,9 @@ static int msi_capability_init(struct pc int pos, pirq; u16 control; @@ -5261,7 +5300,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches pci_read_config_word(dev, msi_control_reg(pos), &control); WARN_ON(nvec > 1); /* XXX */ -@@ -375,7 +373,7 @@ static int msi_capability_init(struct pc +@@ -334,7 +332,7 @@ static int msi_capability_init(struct pc /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); @@ -5270,7 +5309,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches dev->msi_enabled = 1; dev->irq = pirq; -@@ -397,6 +395,7 @@ static int msix_capability_init(struct p +@@ -356,6 +354,7 @@ static int msix_capability_init(struct p { u64 table_base; int pirq, i, j, mapped, pos; @@ -5278,7 +5317,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev); struct msi_pirq_entry *pirq_entry; -@@ -406,11 +405,24 @@ static int msix_capability_init(struct p +@@ -365,11 +364,24 @@ static int msix_capability_init(struct p msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); @@ -5304,7 +5343,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches for (i = 0; i < nvec; i++) { mapped = 0; list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) { -@@ -447,10 +459,13 @@ static int msix_capability_init(struct p +@@ -406,10 +418,13 @@ static int msix_capability_init(struct p return avail; } @@ -5319,7 +5358,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return 0; } -@@ -572,7 +587,7 @@ EXPORT_SYMBOL(pci_enable_msi_block); +@@ -531,7 +546,7 @@ EXPORT_SYMBOL(pci_enable_msi_block); extern void pci_frontend_disable_msi(struct pci_dev* dev); void pci_msi_shutdown(struct pci_dev *dev) { @@ -5328,7 +5367,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev); if (!pci_msi_enable || !dev || !dev->msi_enabled) -@@ -594,7 +609,8 @@ void pci_msi_shutdown(struct pci_dev *de +@@ -553,7 +568,8 @@ void pci_msi_shutdown(struct pci_dev *de msi_unmap_pirq(dev, pirq); /* Disable MSI mode */ @@ -5338,7 +5377,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; } -@@ -634,8 +650,8 @@ int pci_msix_table_size(struct pci_dev * +@@ -593,8 +609,8 @@ int pci_msix_table_size(struct pci_dev * * indicates the successful configuration of MSI-X capability structure * with new allocated MSI-X irqs. A return of < 0 indicates a failure. * Or a return of > 0 indicates that driver request is exceeding the number @@ -5349,7 +5388,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches **/ extern int pci_frontend_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); -@@ -691,7 +707,7 @@ int pci_enable_msix(struct pci_dev* dev, +@@ -650,7 +666,7 @@ int pci_enable_msix(struct pci_dev* dev, nr_entries = pci_msix_table_size(dev); if (nvec > nr_entries) @@ -5358,9 +5397,21 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* Check for any invalid entries */ for (i = 0; i < nvec; i++) { ---- head-2010-05-12.orig/drivers/xen/Kconfig 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/Kconfig 2010-03-31 13:35:09.000000000 +0200 -@@ -374,7 +374,8 @@ config XEN_SCRUB_PAGES +--- head-2011-03-17.orig/drivers/staging/vt6655/ttype.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/staging/vt6655/ttype.h 2010-06-22 15:50:05.000000000 +0200 +@@ -30,6 +30,9 @@ + #ifndef __TTYPE_H__ + #define __TTYPE_H__ + ++#ifdef CONFIG_XEN ++#include ++#endif + + /******* Common definitions and typedefs ***********************************/ + +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-02 15:37:23.000000000 +0100 +@@ -371,7 +371,8 @@ config XEN_SCRUB_PAGES config XEN_DEV_EVTCHN tristate "Xen /dev/xen/evtchn device" @@ -5370,7 +5421,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches help The evtchn driver allows a userspace process to triger event channels and to receive notification of an event channel -@@ -406,7 +407,7 @@ config XEN_COMPAT_XENFS +@@ -411,7 +412,7 @@ config XEN_COMPAT_XENFS config XEN_SYS_HYPERVISOR bool "Create xen entries under /sys/hypervisor" @@ -5379,8 +5430,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches select SYS_HYPERVISOR default y help ---- head-2010-05-12.orig/drivers/xen/Makefile 2010-04-19 14:52:22.000000000 +0200 -+++ head-2010-05-12/drivers/xen/Makefile 2010-04-19 14:53:25.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-24 14:09:54.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-02-24 14:10:06.000000000 +0100 @@ -5,7 +5,6 @@ xen-balloon-$(CONFIG_PARAVIRT_XEN) := ba xen-balloon-$(CONFIG_XEN) := balloon/ obj-$(CONFIG_XEN) += core/ @@ -5389,19 +5440,19 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches obj-y += xenbus/ obj-$(CONFIG_XEN) += char/ -@@ -13,7 +12,9 @@ obj-$(CONFIG_XEN) += features.o util.o - obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) - obj-$(CONFIG_XEN_XENCOMM) += xencomm.o - obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) -+obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o - obj-$(CONFIG_XENFS) += xenfs/ +@@ -15,7 +14,9 @@ obj-$(CONFIG_XEN) += features.o $(xen- + obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) + obj-$(CONFIG_XEN_XENCOMM) += xencomm.o + obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) ++obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o + obj-$(CONFIG_XENFS) += xenfs/ +obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ - obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ ---- head-2010-05-12.orig/drivers/xen/balloon/balloon.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/drivers/xen/balloon/balloon.c 2010-03-24 15:25:21.000000000 +0100 -@@ -323,7 +323,7 @@ static int increase_reservation(unsigned + obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ blktap2-new/ +--- head-2011-03-17.orig/drivers/xen/balloon/balloon.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/balloon/balloon.c 2011-02-01 14:50:44.000000000 +0100 +@@ -321,7 +321,7 @@ static int increase_reservation(unsigned balloon_unlock(flags); #ifndef MODULE @@ -5410,9 +5461,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) ---- head-2010-05-12.orig/drivers/xen/blkback/blkback.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blkback/blkback.c 2010-03-24 15:25:21.000000000 +0100 -@@ -526,7 +526,7 @@ static int dispatch_rw_block_io(blkif_t +--- head-2011-03-17.orig/drivers/xen/blkback/blkback.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/blkback.c 2011-02-01 14:50:44.000000000 +0100 +@@ -495,7 +495,7 @@ static void dispatch_rw_block_io(blkif_t for (i = 0; i < nseg; i++) { if (((int)preq.sector_number|(int)seg[i].nsec) & @@ -5421,8 +5472,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DPRINTK("Misaligned I/O request from domain %d", blkif->domid); goto fail_put_bio; ---- head-2010-05-12.orig/drivers/xen/blkback/vbd.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blkback/vbd.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkback/vbd.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/vbd.c 2011-02-01 14:50:44.000000000 +0100 @@ -47,7 +47,7 @@ unsigned int vbd_info(struct vbd *vbd) unsigned long vbd_secsize(struct vbd *vbd) @@ -5432,8 +5483,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major, ---- head-2010-05-12.orig/drivers/xen/blkback/xenbus.c 2010-03-24 15:10:17.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blkback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkback/xenbus.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -108,7 +108,7 @@ static void update_blkif_status(blkif_t if (!get_device(_dev)) \ return ret; \ @@ -5479,8 +5530,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int err; DPRINTK("%s", xenbus_strstate(frontend_state)); ---- head-2010-05-12.orig/drivers/xen/blkfront/blkfront.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blkfront/blkfront.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-02-01 14:50:44.000000000 +0100 @@ -119,12 +119,12 @@ static int blkfront_probe(struct xenbus_ /* Front end dir is a number, which is used as the id. */ @@ -5514,7 +5565,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); -@@ -434,7 +434,7 @@ static void blkfront_closing(struct blkf +@@ -433,7 +433,7 @@ static void blkfront_closing(struct blkf static int blkfront_remove(struct xenbus_device *dev) { @@ -5523,7 +5574,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DPRINTK("blkfront_remove: %s removed\n", dev->nodename); -@@ -664,7 +664,7 @@ static int blkif_queue_request(struct re +@@ -682,7 +682,7 @@ static int blkif_queue_request(struct re info->shadow[id].request = (unsigned long)req; ring_req->id = id; @@ -5532,7 +5583,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? -@@ -720,25 +720,25 @@ void do_blkif_request(struct request_que +@@ -738,25 +738,25 @@ void do_blkif_request(struct request_que queued = 0; @@ -5570,7 +5621,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: -@@ -803,8 +803,7 @@ static irqreturn_t blkif_int(int irq, vo +@@ -822,8 +822,7 @@ static irqreturn_t blkif_int(int irq, vo DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); @@ -5580,7 +5631,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches break; default: BUG(); -@@ -930,7 +929,7 @@ static void blkif_recover(struct blkfron +@@ -953,7 +952,7 @@ static int blkif_recover(struct blkfront int blkfront_is_ready(struct xenbus_device *dev) { @@ -5589,9 +5640,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return info->is_ready && info->xbdev; } ---- head-2010-05-12.orig/drivers/xen/blkfront/vbd.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blkfront/vbd.c 2010-03-24 15:25:21.000000000 +0100 -@@ -313,7 +313,7 @@ xlvbd_init_blk_queue(struct gendisk *gd, +--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-02-01 14:50:44.000000000 +0100 +@@ -310,7 +310,7 @@ xlvbd_init_blk_queue(struct gendisk *gd, #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ @@ -5600,7 +5651,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ -@@ -500,7 +500,7 @@ static ssize_t show_media(struct device +@@ -499,7 +499,7 @@ static ssize_t show_media(struct device struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); @@ -5609,9 +5660,46 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); ---- head-2010-05-12.orig/drivers/xen/blktap/blktap.c 2010-04-29 09:51:40.000000000 +0200 -+++ head-2010-05-12/drivers/xen/blktap/blktap.c 2010-04-29 09:52:39.000000000 +0200 -@@ -567,7 +567,8 @@ void signal_tapdisk(int idx) +--- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:11:18.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-17 10:16:17.000000000 +0100 +@@ -279,6 +279,15 @@ static inline unsigned int OFFSET_TO_SEG + } while(0) + + ++static char *blktap_nodename(struct device *dev) ++{ ++ return kasprintf(GFP_KERNEL, "xen/blktap%u", MINOR(dev->devt)); ++} ++ ++static struct device_type blktap_type = { ++ .nodename = blktap_nodename ++}; ++ + /****************************************************************** + * BLKTAP VM OPS + */ +@@ -438,7 +447,6 @@ static const struct file_operations blkt + + static tap_blkif_t *get_next_free_dev(void) + { +- struct class *class; + tap_blkif_t *info; + int minor; + +@@ -501,9 +509,9 @@ found: + wmb(); + tapfds[minor] = info; + +- if ((class = get_xen_class()) != NULL) +- device_create(class, NULL, MKDEV(blktap_major, minor), +- NULL, "blktap%d", minor); ++ xen_class_device_create(&blktap_type, NULL, ++ MKDEV(blktap_major, minor), ++ NULL, "blktap%d", minor); + } + + out: +@@ -546,7 +554,8 @@ void signal_tapdisk(int idx) return; if (info->pid > 0) { @@ -5621,8 +5709,37 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (ptask) info->status = CLEANSHUTDOWN; } ---- head-2010-05-12.orig/drivers/xen/blktap/xenbus.c 2010-03-24 15:09:22.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blktap/xenbus.c 2010-04-29 09:52:52.000000000 +0200 +@@ -1700,7 +1709,6 @@ static void make_response(blkif_t *blkif + static int __init blkif_init(void) + { + int i, ret; +- struct class *class; + + if (!is_running_on_xen()) + return -ENODEV; +@@ -1736,7 +1744,7 @@ static int __init blkif_init(void) + DPRINTK("Created misc_dev %d:0 [/dev/xen/blktap0]\n", ret); + + /* Make sure the xen class exists */ +- if ((class = get_xen_class()) != NULL) { ++ if (get_xen_class()) { + /* + * This will allow udev to create the blktap ctrl device. + * We only want to create blktap0 first. We don't want +@@ -1744,8 +1752,9 @@ static int __init blkif_init(void) + * We only create the device when a request of a new device is + * made. + */ +- device_create(class, NULL, MKDEV(blktap_major, 0), NULL, +- "blktap0"); ++ xen_class_device_create(&blktap_type, NULL, ++ MKDEV(blktap_major, 0), NULL, ++ "blktap0"); + } else { + /* this is bad, but not fatal */ + WPRINTK("blktap: sysfs xen_class not created\n"); +--- head-2011-03-17.orig/drivers/xen/blktap/xenbus.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -128,7 +128,7 @@ static int blktap_name(blkif_t *blkif, c if (!get_device(_dev)) \ return ret; \ @@ -5676,7 +5793,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches be->xenbus_id = get_id(dev->nodename); be->blkif = tap_alloc_blkif(dev->otherend_id); -@@ -339,7 +339,7 @@ static void tap_backend_changed(struct x +@@ -351,7 +351,7 @@ static void blkif_disconnect(blkif_t *bl static void tap_frontend_changed(struct xenbus_device *dev, enum xenbus_state frontend_state) { @@ -5685,9 +5802,30 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int err; DPRINTK("fe_changed(%s,%d)\n", dev->nodename, frontend_state); ---- head-2010-05-12.orig/drivers/xen/blktap2/device.c 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blktap2/device.c 2010-04-19 14:53:31.000000000 +0200 -@@ -208,13 +208,6 @@ flush_tlb_kernel_page(unsigned long kvad +--- head-2011-03-17.orig/drivers/xen/blktap2/blktap.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/blktap.h 2011-02-01 14:50:44.000000000 +0100 +@@ -25,6 +25,8 @@ extern int blktap_debug_level; + #define BTWARN(_f, _a...) BTPRINTK(0, KERN_WARNING, 0, _f, ##_a) + #define BTERR(_f, _a...) BTPRINTK(0, KERN_ERR, 0, _f, ##_a) + ++#define BLKTAP2_DEV_DIR "xen/blktap-2/" ++ + #define MAX_BLKTAP_DEVICE 256 + + #define BLKTAP_CONTROL 1 +--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-02-01 14:50:44.000000000 +0100 +@@ -154,6 +154,7 @@ static const struct file_operations blkt + static struct miscdevice blktap_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "blktap-control", ++ .devnode = BLKTAP2_DEV_DIR "control", + .fops = &blktap_control_file_operations, + }; + +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-07 14:14:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-02-01 14:50:44.000000000 +0100 +@@ -206,13 +206,6 @@ flush_tlb_kernel_page(unsigned long kvad #endif } @@ -5701,7 +5839,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* * tap->tap_sem held on entry */ -@@ -381,7 +374,7 @@ blktap_device_fail_pending_requests(stru +@@ -378,7 +371,7 @@ blktap_device_fail_pending_requests(stru blktap_unmap(tap, request); req = (struct request *)(unsigned long)request->id; @@ -5710,7 +5848,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches blktap_request_free(tap, request); } -@@ -420,7 +413,7 @@ blktap_device_finish_request(struct blkt +@@ -417,7 +410,7 @@ blktap_device_finish_request(struct blkt if (unlikely(res->status != BLKIF_RSP_OKAY)) BTERR("Bad return from device data " "request: %x\n", res->status); @@ -5719,7 +5857,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches res->status == BLKIF_RSP_OKAY ? 0 : -EIO); break; default: -@@ -652,7 +645,7 @@ blktap_device_process_request(struct blk +@@ -647,7 +640,7 @@ blktap_device_process_request(struct blk ring = &tap->ring; usr_idx = request->usr_idx; blkif_req.id = usr_idx; @@ -5728,30 +5866,22 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches blkif_req.handle = 0; blkif_req.operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; -@@ -849,47 +842,46 @@ blktap_device_run_queue(struct blktap *t +@@ -844,20 +837,22 @@ blktap_device_run_queue(struct blktap *t BTDBG("running queue for %d\n", tap->minor); - while ((req = elv_next_request(rq)) != NULL) { + while ((req = blk_peek_request(rq)) != NULL) { -+ if (RING_FULL(&ring->ring)) { -+ wait: -+ /* Avoid pointless unplugs. */ -+ blk_stop_queue(rq); -+ blktap_defer(tap); -+ break; -+ } -+ -+ blk_start_request(req); -+ if (!blk_fs_request(req)) { - end_request(req, 0); ++ blk_start_request(req); + __blk_end_request_all(req, -EIO); continue; } if (blk_barrier_rq(req)) { - end_request(req, 0); ++ blk_start_request(req); + __blk_end_request_all(req, -EOPNOTSUPP); continue; } @@ -5759,22 +5889,11 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef ENABLE_PASSTHROUGH if (test_bit(BLKTAP_PASSTHROUGH, &tap->dev_inuse)) { - blkdev_dequeue_request(req); ++ blk_start_request(req); blktap_device_forward_request(tap, req); continue; } - #endif - -- if (RING_FULL(&ring->ring)) { -- wait: -- /* Avoid pointless unplugs. */ -- blk_stop_queue(rq); -- blktap_defer(tap); -- break; -- } -- - request = blktap_request_allocate(tap); - if (!request) { - tap->stats.st_oo_req++; +@@ -877,13 +872,13 @@ blktap_device_run_queue(struct blktap *t goto wait; } @@ -5788,11 +5907,11 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches rq_data_dir(req) ? "write" : "read", request); - blkdev_dequeue_request(req); -- - spin_unlock_irq(&dev->lock); - down_read(&tap->tap_sem); ++ blk_start_request(req); -@@ -897,7 +889,7 @@ blktap_device_run_queue(struct blktap *t + spin_unlock_irq(&dev->lock); + down_write(&tap->tap_sem); +@@ -892,7 +887,7 @@ blktap_device_run_queue(struct blktap *t if (!err) queued++; else { @@ -5801,12 +5920,12 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches blktap_request_free(tap, request); } -@@ -937,11 +929,13 @@ blktap_device_do_request(struct request_ +@@ -932,11 +927,12 @@ blktap_device_do_request(struct request_ return; fail: - while ((req = elv_next_request(rq))) { -+ while ((req = blk_peek_request(rq))) { ++ while ((req = blk_fetch_request(rq))) { BTERR("device closed: failing secs %llu - %llu\n", - (unsigned long long)req->sector, - (unsigned long long)req->sector + req->nr_sectors); @@ -5814,12 +5933,11 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + (unsigned long long)blk_rq_pos(req), + (unsigned long long)blk_rq_pos(req) + + blk_rq_cur_sectors(req)); -+ blk_start_request(req); + __blk_end_request_all(req, -EIO); } } -@@ -996,7 +990,7 @@ blktap_device_configure(struct blktap *t +@@ -991,7 +987,7 @@ blktap_device_configure(struct blktap *t set_capacity(dev->gd, tap->params.capacity); /* Hard sector size and max sectors impersonate the equiv. hardware. */ @@ -5828,8 +5946,53 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ ---- head-2010-05-12.orig/drivers/xen/console/console.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/drivers/xen/console/console.c 2010-03-24 15:25:21.000000000 +0100 +@@ -1089,6 +1085,12 @@ blktap_device_destroy(struct blktap *tap + return 0; + } + ++static char *blktap_nodename(struct gendisk *gd) ++{ ++ return kasprintf(GFP_KERNEL, BLKTAP2_DEV_DIR "tapdev%u", ++ gd->first_minor); ++} ++ + int + blktap_device_create(struct blktap *tap) + { +@@ -1125,6 +1127,7 @@ blktap_device_create(struct blktap *tap) + + gd->major = blktap_device_major; + gd->first_minor = minor; ++ gd->nodename = blktap_nodename; + gd->fops = &blktap_device_file_operations; + gd->private_data = dev; + +--- head-2011-03-17.orig/drivers/xen/blktap2/sysfs.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/sysfs.c 2011-02-01 14:50:44.000000000 +0100 +@@ -436,6 +436,12 @@ blktap_sysfs_free(void) + class_destroy(class); + } + ++static char *blktap_nodename(struct device *dev) ++{ ++ return kasprintf(GFP_KERNEL, BLKTAP2_DEV_DIR "blktap%u", ++ MINOR(dev->devt)); ++} ++ + int __init + blktap_sysfs_init(void) + { +@@ -449,6 +455,8 @@ blktap_sysfs_init(void) + if (IS_ERR(cls)) + return PTR_ERR(cls); + ++ cls->nodename = blktap_nodename; ++ + err = class_create_file(cls, &class_attr_verbosity); + if (!err) { + err = class_create_file(cls, &class_attr_devices); +--- head-2011-03-17.orig/drivers/xen/console/console.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/console/console.c 2011-02-01 14:50:44.000000000 +0100 @@ -46,7 +46,6 @@ #include #include @@ -5858,8 +6021,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches tty->closing = 0; spin_lock_irqsave(&xencons_lock, flags); xencons_tty = NULL; ---- head-2010-05-12.orig/drivers/xen/core/evtchn.c 2010-04-23 15:19:37.000000000 +0200 -+++ head-2010-05-12/drivers/xen/core/evtchn.c 2010-04-23 15:19:43.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-09 13:57:45.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-01 14:50:44.000000000 +0100 @@ -35,7 +35,6 @@ #include #include @@ -5881,28 +6044,126 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* IRQ <-> VIRQ mapping. */ DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; -@@ -311,9 +316,8 @@ asmlinkage void __irq_entry evtchn_do_up +@@ -328,6 +333,8 @@ asmlinkage void __irq_entry evtchn_do_up + } - /* process port */ - port = (l1i * BITS_PER_LONG) + l2i; -- if (unlikely((irq = evtchn_to_irq[port]) == -1)) + do { ++ bool handled = false; ++ + masked_l2 = l2 & ((~0UL) << l2i); + if (masked_l2 == 0) + break; +@@ -338,13 +345,12 @@ asmlinkage void __irq_entry evtchn_do_up + mask_evtchn(port); + if ((irq = evtchn_to_irq[port]) != -1) { + clear_evtchn(port); +- if (!handle_irq(irq, regs) +- && printk_ratelimit()) +- pr_emerg("No handler for " +- "irq %d (port %u)\n", +- irq, port); +- } else - evtchn_device_upcall(port); -- else if (!handle_irq(irq, regs) && printk_ratelimit()) -+ if ((unlikely((irq = evtchn_to_irq[port]) == -1) -+ || !handle_irq(irq, regs)) && printk_ratelimit()) - printk(KERN_EMERG "%s(%d): No handler for irq %d\n", - __func__, smp_processor_id(), irq); ++ handled = handle_irq(irq, regs); ++ } ++ if (!handled && printk_ratelimit()) ++ pr_emerg("No handler for irq %d" ++ " (port %u)\n", ++ irq, port); -@@ -350,7 +354,7 @@ static int find_unbound_irq(unsigned int + l2i = (l2i + 1) % BITS_PER_LONG; + +@@ -371,16 +377,26 @@ asmlinkage void __irq_entry evtchn_do_up + set_irq_regs(old_regs); + } + +-static int find_unbound_irq(unsigned int cpu, struct irq_chip *chip) ++static int find_unbound_irq(unsigned int node, struct irq_chip *chip) + { + static int warned; int irq; - for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++) { + for (irq = DYNIRQ_BASE; irq < nr_irqs; irq++) { - struct irq_desc *desc = irq_to_desc_alloc_cpu(irq, cpu); -+ struct irq_desc *desc = irq_to_desc_alloc_node(irq, cpu_to_node(cpu)); - struct irq_cfg *cfg = desc->chip_data; +- struct irq_cfg *cfg = desc->chip_data; ++ struct irq_desc *desc; ++ struct irq_cfg *cfg; + +- if (!cfg->bindcount) { ++ desc = irq_to_desc(irq); ++ if (!desc) ++ desc = irq_to_desc_alloc_node(irq, node); ++ else if (desc->chip != &no_irq_chip && ++ desc->chip != &dynirq_chip) ++ continue; ++ if (!desc) ++ return -ENOMEM; ++ ++ cfg = desc->chip_data; ++ if (cfg && !cfg->bindcount) { + desc->status |= IRQ_NOPROBE; + set_irq_chip_and_handler_name(irq, chip, + handle_fasteoi_irq, +@@ -407,7 +423,7 @@ static int bind_caller_port_to_irq(unsig + spin_lock(&irq_mapping_update_lock); + + if ((irq = evtchn_to_irq[caller_port]) == -1) { +- if ((irq = find_unbound_irq(smp_processor_id(), &dynirq_chip)) < 0) ++ if ((irq = find_unbound_irq(numa_node_id(), &dynirq_chip)) < 0) + goto out; + + evtchn_to_irq[caller_port] = irq; +@@ -430,9 +446,8 @@ static int bind_local_port_to_irq(unsign + + BUG_ON(evtchn_to_irq[local_port] != -1); + +- if ((irq = find_unbound_irq(smp_processor_id(), &dynirq_chip)) < 0) { +- struct evtchn_close close = { .port = local_port }; +- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) ++ if ((irq = find_unbound_irq(numa_node_id(), &dynirq_chip)) < 0) { ++ if (close_evtchn(local_port)) + BUG(); + goto out; + } +@@ -483,7 +498,8 @@ static int bind_virq_to_irq(unsigned int + spin_lock(&irq_mapping_update_lock); + + if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { +- if ((irq = find_unbound_irq(cpu, &dynirq_chip)) < 0) ++ if ((irq = find_unbound_irq(cpu_to_node(cpu), ++ &dynirq_chip)) < 0) + goto out; + + bind_virq.virq = virq; +@@ -516,7 +532,8 @@ static int bind_ipi_to_irq(unsigned int + spin_lock(&irq_mapping_update_lock); + + if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) { +- if ((irq = find_unbound_irq(cpu, &dynirq_chip)) < 0) ++ if ((irq = find_unbound_irq(cpu_to_node(cpu), ++ &dynirq_chip)) < 0) + goto out; + + bind_ipi.vcpu = cpu; +@@ -542,16 +559,14 @@ static int bind_ipi_to_irq(unsigned int + + static void unbind_from_irq(unsigned int irq) + { +- struct evtchn_close close; + unsigned int cpu; + int evtchn = evtchn_from_irq(irq); - if (!cfg->bindcount) { -@@ -703,9 +707,11 @@ static void rebind_irq_to_cpu(unsigned i + spin_lock(&irq_mapping_update_lock); + + if (!--irq_cfg(irq)->bindcount && VALID_EVTCHN(evtchn)) { +- close.port = evtchn; + if ((type_from_irq(irq) != IRQT_CALLER_PORT) && +- HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) ++ close_evtchn(evtchn)) + BUG(); + + switch (type_from_irq(irq)) { +@@ -732,9 +747,11 @@ static void rebind_irq_to_cpu(unsigned i rebind_evtchn_to_cpu(evtchn, tcpu); } @@ -5915,7 +6176,34 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } #endif -@@ -1262,7 +1268,7 @@ int evtchn_map_pirq(int irq, int xen_pir +@@ -908,7 +925,6 @@ static unsigned int startup_pirq(unsigne + + static void shutdown_pirq(unsigned int irq) + { +- struct evtchn_close close; + int evtchn = evtchn_from_irq(irq); + + if (!VALID_EVTCHN(evtchn)) +@@ -916,8 +932,7 @@ static void shutdown_pirq(unsigned int i + + mask_evtchn(evtchn); + +- close.port = evtchn; +- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) ++ if (close_evtchn(evtchn)) + BUG(); + + bind_evtchn_to_cpu(evtchn, 0); +@@ -1252,7 +1267,7 @@ int evtchn_map_pirq(int irq, int xen_pir + if (irq < 0) { + #ifdef CONFIG_SPARSE_IRQ + spin_lock(&irq_mapping_update_lock); +- irq = find_unbound_irq(smp_processor_id(), &pirq_chip); ++ irq = find_unbound_irq(numa_node_id(), &pirq_chip); + if (irq >= 0) { + struct irq_desc *desc; + struct irq_cfg *cfg; +@@ -1280,7 +1295,7 @@ int evtchn_map_pirq(int irq, int xen_pir if (identity_mapped_irq(irq)) continue; @@ -5924,21 +6212,21 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches cfg = desc->chip_data; if (!index_from_irq(irq)) { BUG_ON(type_from_irq(irq) != IRQT_UNBOUND); -@@ -1313,8 +1319,9 @@ void __init xen_init_IRQ(void) - - init_evtchn_cpu_bindings(); - +@@ -1340,8 +1355,9 @@ void __init xen_init_IRQ(void) + #else + i = nr_pirqs; + #endif - pirq_needs_eoi = alloc_bootmem_pages(sizeof(unsigned long) -- * BITS_TO_LONGS(ALIGN(nr_pirqs, PAGE_SIZE * 8))); -+ i = get_order(sizeof(unsigned long) * BITS_TO_LONGS(nr_pirqs)); +- * BITS_TO_LONGS(ALIGN(i, PAGE_SIZE * 8))); ++ i = get_order(sizeof(unsigned long) * BITS_TO_LONGS(i)); + pirq_needs_eoi = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, i); + BUILD_BUG_ON(NR_PIRQS > PAGE_SIZE * 8); eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT; if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn) == 0) pirq_eoi_does_unmask = true; ---- head-2010-05-12.orig/drivers/xen/core/smpboot.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/smpboot.c 2010-03-24 15:25:21.000000000 +0100 -@@ -43,9 +43,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-02-01 14:50:44.000000000 +0100 +@@ -40,9 +40,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); static DEFINE_PER_CPU(int, resched_irq); static DEFINE_PER_CPU(int, callfunc_irq); static DEFINE_PER_CPU(int, call1func_irq); @@ -5948,9 +6236,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static char call1func_name[NR_CPUS][15]; +static char reboot_name[NR_CPUS][15]; - #ifdef CONFIG_X86_LOCAL_APIC - #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid)) -@@ -110,7 +112,7 @@ static int __cpuinit xen_smp_intr_init(u + void __init prefill_possible_map(void) + { +@@ -74,7 +76,7 @@ static int __cpuinit xen_smp_intr_init(u int rc; per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = @@ -5959,7 +6247,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches sprintf(resched_name[cpu], "resched%u", cpu); rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, -@@ -145,6 +147,17 @@ static int __cpuinit xen_smp_intr_init(u +@@ -109,6 +111,17 @@ static int __cpuinit xen_smp_intr_init(u goto fail; per_cpu(call1func_irq, cpu) = rc; @@ -5977,7 +6265,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches rc = xen_spinlock_init(cpu); if (rc < 0) goto fail; -@@ -161,6 +174,8 @@ static int __cpuinit xen_smp_intr_init(u +@@ -125,6 +138,8 @@ static int __cpuinit xen_smp_intr_init(u unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); if (per_cpu(call1func_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); @@ -5986,7 +6274,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches xen_spinlock_cleanup(cpu); return rc; } -@@ -174,6 +189,7 @@ static void __cpuexit xen_smp_intr_exit( +@@ -138,6 +153,7 @@ static void __cpuinit xen_smp_intr_exit( unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); @@ -5994,8 +6282,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches xen_spinlock_cleanup(cpu); } #endif ---- head-2010-05-12.orig/drivers/xen/evtchn.c 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/drivers/xen/evtchn.c 2010-04-15 10:08:13.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/evtchn.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/evtchn.c 2011-02-01 14:50:44.000000000 +0100 @@ -48,10 +48,17 @@ #include #include @@ -6014,16 +6302,17 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct per_user_data { struct mutex bind_mutex; /* serialize bind/unbind operations */ -@@ -73,7 +80,7 @@ struct per_user_data { - static struct per_user_data *port_user[NR_EVENT_CHANNELS]; - static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ +@@ -278,6 +285,9 @@ static void evtchn_unbind_from_user(stru + int irq = irq_from_evtchn(port); --irqreturn_t evtchn_interrupt(int irq, void *data) -+static irqreturn_t evtchn_interrupt(int irq, void *data) - { - unsigned int port = (unsigned long)data; - struct per_user_data *u; -@@ -411,7 +418,8 @@ static int evtchn_open(struct inode *ino + unbind_from_irqhandler(irq, (void *)(unsigned long)port); ++#ifdef CONFIG_XEN ++ WARN_ON(close_evtchn(port)); ++#endif + + set_port_user(port, NULL); + } +@@ -450,7 +460,8 @@ static int evtchn_open(struct inode *ino if (u == NULL) return -ENOMEM; @@ -6033,8 +6322,29 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (u->name == NULL) { kfree(u); return -ENOMEM; ---- head-2010-05-12.orig/drivers/xen/fbfront/xenfb.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/drivers/xen/fbfront/xenfb.c 2010-03-24 15:25:21.000000000 +0100 +@@ -519,6 +530,7 @@ static const struct file_operations evtc + static struct miscdevice evtchn_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "xen/evtchn", ++ .devnode = "xen/evtchn", + .fops = &evtchn_fops, + }; + static int __init evtchn_init(void) +@@ -534,10 +546,10 @@ static int __init evtchn_init(void) + + spin_lock_init(&port_user_lock); + +- /* Create '/dev/misc/evtchn'. */ ++ /* Create '/dev/xen/evtchn'. */ + err = misc_register(&evtchn_miscdev); + if (err != 0) { +- printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); ++ pr_alert("Could not register /dev/xen/evtchn\n"); + return err; + } + +--- head-2011-03-17.orig/drivers/xen/fbfront/xenfb.c 2011-02-17 10:11:23.000000000 +0100 ++++ head-2011-03-17/drivers/xen/fbfront/xenfb.c 2011-02-17 10:16:12.000000000 +0100 @@ -597,7 +597,7 @@ static int __devinit xenfb_probe(struct fb_size = XENFB_DEFAULT_FB_LEN; } @@ -6062,7 +6372,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches del_timer(&info->refresh); if (info->kthread) -@@ -817,7 +817,7 @@ static void xenfb_disconnect_backend(str +@@ -819,7 +819,7 @@ static void xenfb_disconnect_backend(str static void xenfb_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { @@ -6071,8 +6381,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int val; switch (backend_state) { ---- head-2010-05-12.orig/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/fbfront/xenkbd.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/fbfront/xenkbd.c 2011-02-01 14:50:44.000000000 +0100 @@ -113,7 +113,7 @@ int __devinit xenkbd_probe(struct xenbus xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; @@ -6109,8 +6419,48 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int ret, val; switch (backend_state) { ---- head-2010-05-12.orig/drivers/xen/netback/accel.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-12/drivers/xen/netback/accel.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/gntdev/gntdev.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/gntdev/gntdev.c 2011-02-01 14:50:44.000000000 +0100 +@@ -371,10 +371,18 @@ nomem_out: + + /* Interface functions. */ + ++static char *gntdev_nodename(struct device *dev) ++{ ++ return kstrdup("xen/" GNTDEV_NAME, GFP_KERNEL); ++} ++ ++static struct device_type gntdev_type = { ++ .nodename = gntdev_nodename ++}; ++ + /* Initialises the driver. Called when the module is loaded. */ + static int __init gntdev_init(void) + { +- struct class *class; + struct device *device; + + if (!is_running_on_xen()) { +@@ -393,14 +401,9 @@ static int __init gntdev_init(void) + * device, and output the major number so that the device can be + * created manually using mknod. + */ +- if ((class = get_xen_class()) == NULL) { +- pr_err("Error setting up xen_class\n"); +- pr_err("gntdev created, major number = %d\n", gntdev_major); +- return 0; +- } +- +- device = device_create(class, NULL, MKDEV(gntdev_major, 0), +- NULL, GNTDEV_NAME); ++ device = xen_class_device_create(&gntdev_type, NULL, ++ MKDEV(gntdev_major, 0), ++ NULL, GNTDEV_NAME); + if (IS_ERR(device)) { + pr_err("Error creating gntdev device in xen_class\n"); + pr_err("gntdev created, major number = %d\n", gntdev_major); +--- head-2011-03-17.orig/drivers/xen/netback/accel.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/accel.c 2011-02-01 14:50:44.000000000 +0100 @@ -103,7 +103,7 @@ static int netback_accelerator_probe_bac struct xenbus_device *xendev = to_xenbus_device(dev); @@ -6129,9 +6479,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (be->accelerator == accelerator) { be->accelerator->hooks->remove(xendev); ---- head-2010-05-12.orig/drivers/xen/netback/loopback.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/drivers/xen/netback/loopback.c 2010-03-24 15:25:21.000000000 +0100 -@@ -137,8 +137,8 @@ static int loopback_start_xmit(struct sk +--- head-2011-03-17.orig/drivers/xen/netback/loopback.c 2011-03-01 11:52:05.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/loopback.c 2011-02-01 14:50:44.000000000 +0100 +@@ -139,8 +139,8 @@ static int loopback_start_xmit(struct sk return 0; } @@ -6142,9 +6492,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches skb_orphan(skb); ---- head-2010-05-12.orig/drivers/xen/netback/xenbus.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-12/drivers/xen/netback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 -@@ -35,7 +35,7 @@ static void backend_create_netif(struct +--- head-2011-03-17.orig/drivers/xen/netback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/xenbus.c 2011-02-01 14:50:44.000000000 +0100 +@@ -38,7 +38,7 @@ static void netback_disconnect(struct de static int netback_remove(struct xenbus_device *dev) { @@ -6153,16 +6503,25 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches netback_remove_accelerators(be, dev); -@@ -45,7 +45,7 @@ static int netback_remove(struct xenbus_ +@@ -49,7 +49,7 @@ static int netback_remove(struct xenbus_ + + static void netback_disconnect(struct device *xbdev_dev, int clear) + { +- struct backend_info *be = xbdev_dev->driver_data; ++ struct backend_info *be = dev_get_drvdata(xbdev_dev); + + if (be->netif) + kobject_uevent(&xbdev_dev->kobj, KOBJ_OFFLINE); +@@ -60,7 +60,7 @@ static void netback_disconnect(struct de be->netif = NULL; } - kfree(be); -- dev->dev.driver_data = NULL; -+ dev_set_drvdata(&dev->dev, NULL); - return 0; + if (clear) +- xbdev_dev->driver_data = NULL; ++ dev_set_drvdata(xbdev_dev, NULL); + up_write(&teardown_sem); } -@@ -70,7 +70,7 @@ static int netback_probe(struct xenbus_d +@@ -84,7 +84,7 @@ static int netback_probe(struct xenbus_d } be->dev = dev; @@ -6171,16 +6530,16 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches sg = 1; if (netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) -@@ -151,7 +151,7 @@ fail: - */ - static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env) - { -- struct backend_info *be = xdev->dev.driver_data; -+ struct backend_info *be = dev_get_drvdata(&xdev->dev); - netif_t *netif = be->netif; - char *val; - -@@ -207,7 +207,7 @@ static void backend_create_netif(struct +@@ -181,7 +181,7 @@ static int netback_uevent(struct xenbus_ + kfree(val); + + down_read(&teardown_sem); +- be = xdev->dev.driver_data; ++ be = dev_get_drvdata(&xdev->dev); + if (be && be->netif) + add_uevent_var(env, "vif=%s", be->netif->dev->name); + up_read(&teardown_sem); +@@ -224,7 +224,7 @@ static void backend_create_netif(struct static void frontend_changed(struct xenbus_device *dev, enum xenbus_state frontend_state) { @@ -6189,9 +6548,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DPRINTK("%s", xenbus_strstate(frontend_state)); ---- head-2010-05-12.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/netfront/netfront.c 2010-03-24 15:25:21.000000000 +0100 -@@ -257,7 +257,7 @@ static int __devinit netfront_probe(stru +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-09 16:04:51.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-01 14:50:44.000000000 +0100 +@@ -256,7 +256,7 @@ static int __devinit netfront_probe(stru } info = netdev_priv(netdev); @@ -6200,7 +6559,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches err = register_netdev(info->netdev); if (err) { -@@ -278,13 +278,13 @@ static int __devinit netfront_probe(stru +@@ -277,13 +277,13 @@ static int __devinit netfront_probe(stru fail: free_netdev(netdev); @@ -6216,7 +6575,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DPRINTK("%s\n", dev->nodename); -@@ -306,14 +306,14 @@ static int __devexit netfront_remove(str +@@ -305,14 +305,14 @@ static int __devexit netfront_remove(str static int netfront_suspend(struct xenbus_device *dev) { @@ -6233,7 +6592,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return netfront_accelerator_suspend_cancel(info, dev); } -@@ -326,7 +326,7 @@ static int netfront_suspend_cancel(struc +@@ -325,7 +325,7 @@ static int netfront_suspend_cancel(struc */ static int netfront_resume(struct xenbus_device *dev) { @@ -6242,7 +6601,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DPRINTK("%s\n", dev->nodename); -@@ -531,7 +531,7 @@ static int setup_device(struct xenbus_de +@@ -530,7 +530,7 @@ static int setup_device(struct xenbus_de static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { @@ -6251,8 +6610,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); ---- head-2010-05-12.orig/drivers/xen/pciback/xenbus.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-12/drivers/xen/pciback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/pciback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pciback/xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -24,7 +24,7 @@ static struct pciback_device *alloc_pdev dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); @@ -6289,8 +6648,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (pdev != NULL) free_pdev(pdev); ---- head-2010-05-12.orig/drivers/xen/pcifront/pci_op.c 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-12/drivers/xen/pcifront/pci_op.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/pcifront/pci_op.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pcifront/pci_op.c 2011-02-01 14:50:44.000000000 +0100 @@ -416,7 +416,7 @@ void pci_frontend_disable_msi(struct pci #endif /* CONFIG_PCI_MSI */ @@ -6309,8 +6668,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } int __devinit pcifront_scan_root(struct pcifront_device *pdev, ---- head-2010-05-12.orig/drivers/xen/pcifront/xenbus.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-12/drivers/xen/pcifront/xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/pcifront/xenbus.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pcifront/xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -34,7 +34,7 @@ static struct pcifront_device *alloc_pde /*Flag for registering PV AER handler*/ set_bit(_XEN_PCIB_AERHANDLER, (void*)&pdev->sh_info->flags); @@ -6320,16 +6679,16 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches pdev->xdev = xdev; INIT_LIST_HEAD(&pdev->root_buses); -@@ -70,7 +70,7 @@ static void free_pdev(struct pcifront_de - gnttab_end_foreign_access(pdev->gnt_ref, - (unsigned long)pdev->sh_info); +@@ -75,7 +75,7 @@ static void free_pdev(struct pcifront_de + else + free_page((unsigned long)pdev->sh_info); - pdev->xdev->dev.driver_data = NULL; + dev_set_drvdata(&pdev->xdev->dev, NULL); kfree(pdev); } -@@ -381,7 +381,7 @@ static int pcifront_detach_devices(struc +@@ -394,7 +394,7 @@ static int pcifront_detach_devices(struc static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev, enum xenbus_state be_state) { @@ -6338,7 +6697,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches switch (be_state) { case XenbusStateUnknown: -@@ -431,8 +431,8 @@ static int pcifront_xenbus_probe(struct +@@ -446,8 +446,8 @@ static int pcifront_xenbus_probe(struct static int pcifront_xenbus_remove(struct xenbus_device *xdev) { @@ -6349,8 +6708,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return 0; } ---- head-2010-05-12.orig/drivers/xen/scsiback/scsiback.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-12/drivers/xen/scsiback/scsiback.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/scsiback/scsiback.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/scsiback.c 2011-02-01 14:50:44.000000000 +0100 @@ -224,7 +224,7 @@ static void scsiback_cmd_done(struct req int errors; @@ -6360,7 +6719,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches errors = req->errors; if (errors != 0) { -@@ -339,21 +339,6 @@ fail_flush: +@@ -331,21 +331,6 @@ fail_flush: return -ENOMEM; } @@ -6382,7 +6741,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* quoted scsi_lib.c/scsi_bi_endio */ static void scsiback_bi_endio(struct bio *bio, int error) { -@@ -363,29 +348,28 @@ static void scsiback_bi_endio(struct bio +@@ -355,29 +340,28 @@ static void scsiback_bi_endio(struct bio /* quoted scsi_lib.c/scsi_req_map_sg . */ @@ -6420,7 +6779,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches nr_pages -= nr_vecs; bio = bio_alloc(GFP_KERNEL, nr_vecs); if (!bio) { -@@ -393,6 +377,11 @@ static int request_map_sg(struct request +@@ -385,6 +369,11 @@ static int request_map_sg(struct request goto free_bios; } bio->bi_end_io = scsiback_bi_endio; @@ -6432,7 +6791,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } if (bio_add_pc_page(q, bio, page, bytes, off) != -@@ -403,11 +392,9 @@ static int request_map_sg(struct request +@@ -395,11 +384,9 @@ static int request_map_sg(struct request } if (bio->bi_vcnt >= nr_vecs) { @@ -6447,7 +6806,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches bio = NULL; } -@@ -417,21 +404,15 @@ static int request_map_sg(struct request +@@ -409,21 +396,15 @@ static int request_map_sg(struct request } } @@ -6474,7 +6833,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } -@@ -439,7 +420,6 @@ void scsiback_cmd_exec(pending_req_t *pe +@@ -431,7 +412,6 @@ void scsiback_cmd_exec(pending_req_t *pe { int cmd_len = (int)pending_req->cmd_len; int data_dir = (int)pending_req->sc_data_direction; @@ -6482,7 +6841,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches unsigned int timeout; struct request *rq; int write; -@@ -453,7 +433,30 @@ void scsiback_cmd_exec(pending_req_t *pe +@@ -445,7 +425,30 @@ void scsiback_cmd_exec(pending_req_t *pe timeout = VSCSIIF_TIMEOUT; write = (data_dir == DMA_TO_DEVICE); @@ -6491,14 +6850,14 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + struct bio *bio = request_map_sg(pending_req); + + if (IS_ERR(bio)) { -+ printk(KERN_ERR "scsiback: SG Request Map Error\n"); ++ pr_err("scsiback: SG Request Map Error\n"); + return; + } + + rq = blk_make_request(pending_req->sdev->request_queue, bio, + GFP_KERNEL); + if (IS_ERR(rq)) { -+ printk(KERN_ERR "scsiback: Make Request Error\n"); ++ pr_err("scsiback: Make Request Error\n"); + return; + } + @@ -6507,21 +6866,21 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches + rq = blk_get_request(pending_req->sdev->request_queue, write, + GFP_KERNEL); + if (unlikely(!rq)) { -+ printk(KERN_ERR "scsiback: Get Request Error\n"); ++ pr_err("scsiback: Get Request Error\n"); + return; + } + } rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_len = cmd_len; -@@ -468,14 +471,6 @@ void scsiback_cmd_exec(pending_req_t *pe +@@ -460,14 +463,6 @@ void scsiback_cmd_exec(pending_req_t *pe rq->timeout = timeout; rq->end_io_data = pending_req; - if (nr_segments) { - - if (request_map_sg(rq, pending_req, nr_segments)) { -- printk(KERN_ERR "scsiback: SG Request Map Error\n"); +- pr_err("scsiback: SG Request Map Error\n"); - return; - } - } @@ -6529,9 +6888,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches scsiback_get(pending_req->info); blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done); ---- head-2010-05-12.orig/drivers/xen/scsiback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-12/drivers/xen/scsiback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 -@@ -224,7 +224,7 @@ static void scsiback_do_lun_hotplug(stru +--- head-2011-03-17.orig/drivers/xen/scsiback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/xenbus.c 2011-02-01 14:50:44.000000000 +0100 +@@ -226,7 +226,7 @@ static void scsiback_do_lun_hotplug(stru static void scsiback_frontend_changed(struct xenbus_device *dev, enum xenbus_state frontend_state) { @@ -6540,7 +6899,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int err; switch (frontend_state) { -@@ -281,7 +281,7 @@ static void scsiback_frontend_changed(st +@@ -283,7 +283,7 @@ static void scsiback_frontend_changed(st static int scsiback_remove(struct xenbus_device *dev) { @@ -6549,7 +6908,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches if (be->info) { scsiback_disconnect(be->info); -@@ -291,7 +291,7 @@ static int scsiback_remove(struct xenbus +@@ -293,7 +293,7 @@ static int scsiback_remove(struct xenbus } kfree(be); @@ -6558,7 +6917,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return 0; } -@@ -314,7 +314,7 @@ static int scsiback_probe(struct xenbus_ +@@ -316,7 +316,7 @@ static int scsiback_probe(struct xenbus_ return -ENOMEM; } be->dev = dev; @@ -6567,9 +6926,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches be->info = vscsibk_info_alloc(dev->otherend_id); if (IS_ERR(be->info)) { ---- head-2010-05-12.orig/drivers/xen/scsifront/xenbus.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-12/drivers/xen/scsifront/xenbus.c 2010-03-24 15:25:21.000000000 +0100 -@@ -185,7 +185,7 @@ static int scsifront_probe(struct xenbus +--- head-2011-03-17.orig/drivers/xen/scsifront/xenbus.c 2011-02-08 10:04:06.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsifront/xenbus.c 2011-02-08 10:05:30.000000000 +0100 +@@ -189,7 +189,7 @@ static int scsifront_probe(struct xenbus info->host = host; @@ -6578,7 +6937,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { -@@ -238,7 +238,7 @@ free_sring: +@@ -243,7 +243,7 @@ free_sring: static int scsifront_remove(struct xenbus_device *dev) { @@ -6587,7 +6946,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); -@@ -350,7 +350,7 @@ static void scsifront_do_lun_hotplug(str +@@ -355,7 +355,7 @@ static void scsifront_do_lun_hotplug(str static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { @@ -6596,8 +6955,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches DPRINTK("%p %u %u\n", dev, dev->state, backend_state); ---- head-2010-05-12.orig/drivers/xen/sfc_netback/accel_xenbus.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-12/drivers/xen/sfc_netback/accel_xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_xenbus.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/accel_xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -36,7 +36,7 @@ #define NODENAME_PATH_FMT "backend/vif/%d/%d" @@ -6625,8 +6984,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches bend = (struct netback_accel *) binfo->netback_accel_priv; DPRINTK("%s: dev %p bend %p\n", __FUNCTION__, dev, bend); ---- head-2010-05-12.orig/drivers/xen/sfc_netfront/accel_xenbus.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-12/drivers/xen/sfc_netfront/accel_xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/sfc_netfront/accel_xenbus.c 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netfront/accel_xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -727,8 +727,7 @@ int netfront_accel_probe(struct net_devi int netfront_accel_remove(struct xenbus_device *dev) @@ -6637,8 +6996,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches netfront_accel_vnic *vnic = (netfront_accel_vnic *)np->accel_priv; DPRINTK("%s %s\n", __FUNCTION__, dev->nodename); ---- head-2010-05-12.orig/drivers/xen/sys-hypervisor.c 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/drivers/xen/sys-hypervisor.c 2010-04-15 10:08:27.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/sys-hypervisor.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sys-hypervisor.c 2011-02-01 14:50:44.000000000 +0100 @@ -20,6 +20,8 @@ #include #include @@ -6659,8 +7018,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches return -EBUSY; vm = xenbus_read(XBT_NIL, "vm", "", NULL); ---- head-2010-05-12.orig/drivers/xen/tpmback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-12/drivers/xen/tpmback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/tpmback/xenbus.c 2011-01-31 17:32:22.000000000 +0100 ++++ head-2011-03-17/drivers/xen/tpmback/xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -54,7 +54,7 @@ long int tpmback_get_instance(struct bac static int tpmback_remove(struct xenbus_device *dev) @@ -6697,9 +7056,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int err; switch (frontend_state) { ---- head-2010-05-12.orig/drivers/xen/usbback/usbback.h 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-12/drivers/xen/usbback/usbback.h 2010-03-24 15:25:21.000000000 +0100 -@@ -64,6 +64,12 @@ +--- head-2011-03-17.orig/drivers/xen/usbback/usbback.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/usbback.h 2011-02-01 14:50:44.000000000 +0100 +@@ -63,6 +63,12 @@ struct usbstub; @@ -6712,7 +7071,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #define USB_DEV_ADDR_SIZE 128 typedef struct usbif_st { -@@ -111,7 +117,7 @@ typedef struct usbif_st { +@@ -110,7 +116,7 @@ typedef struct usbif_st { struct vusb_port_id { struct list_head id_list; @@ -6721,8 +7080,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches domid_t domid; unsigned int handle; int portnum; ---- head-2010-05-12.orig/drivers/xen/usbback/usbstub.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/usbback/usbstub.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/usbback/usbstub.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/usbstub.c 2011-02-01 14:50:44.000000000 +0100 @@ -56,7 +56,7 @@ struct vusb_port_id *find_portid_by_busi spin_lock_irqsave(&port_list_lock, flags); @@ -6750,8 +7109,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches goto out; /* invalid call */ } ---- head-2010-05-12.orig/drivers/xen/usbback/xenbus.c 2010-03-24 15:09:08.000000000 +0100 -+++ head-2010-05-12/drivers/xen/usbback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/usbback/xenbus.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -112,7 +112,7 @@ again: */ portid = find_portid(usbif->domid, usbif->handle, i); @@ -6788,7 +7147,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches err = xenbus_scanf(XBT_NIL, dev->nodename, "num-ports", "%d", &num_ports); -@@ -259,7 +259,7 @@ static int connect_rings(usbif_t *usbif) +@@ -260,7 +260,7 @@ static int connect_rings(usbif_t *usbif) static void frontend_changed(struct xenbus_device *dev, enum xenbus_state frontend_state) { @@ -6797,8 +7156,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int err; switch (frontend_state) { ---- head-2010-05-12.orig/drivers/xen/usbfront/xenbus.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/usbfront/xenbus.c 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/usbfront/xenbus.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/xenbus.c 2011-02-01 14:50:44.000000000 +0100 @@ -187,7 +187,7 @@ out: static int connect(struct xenbus_device *dev) @@ -6842,9 +7201,94 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct usb_hcd *hcd = info_to_hcd(info); destroy_rings(info); ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:25:21.000000000 +0100 -@@ -91,6 +91,11 @@ static int xenbus_probe_frontend(const c +--- head-2011-03-17.orig/drivers/xen/util.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/util.c 2011-02-01 14:50:44.000000000 +0100 +@@ -1,20 +1,74 @@ + #include + #include ++#include ++#include + #include + +-struct class *get_xen_class(void) ++static struct class *_get_xen_class(void) + { + static struct class *xen_class; ++ static DEFINE_MUTEX(xc_mutex); + +- if (xen_class) +- return xen_class; +- +- xen_class = class_create(THIS_MODULE, "xen"); +- if (IS_ERR(xen_class)) { ++ mutex_lock(&xc_mutex); ++ if (IS_ERR_OR_NULL(xen_class)) ++ xen_class = class_create(THIS_MODULE, "xen"); ++ mutex_unlock(&xc_mutex); ++ if (IS_ERR(xen_class)) + pr_err("failed to create xen sysfs class\n"); +- xen_class = NULL; +- } + + return xen_class; + } ++ ++struct class *get_xen_class(void) ++{ ++ struct class *class = _get_xen_class(); ++ ++ return !IS_ERR(class) ? class : NULL; ++} + EXPORT_SYMBOL_GPL(get_xen_class); ++ ++static void xcdev_release(struct device *dev) ++{ ++ kfree(dev); ++} ++ ++struct device *xen_class_device_create(struct device_type *type, ++ struct device *parent, ++ dev_t devt, void *drvdata, ++ const char *fmt, ...) ++{ ++ struct device *dev; ++ int err; ++ ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ if (dev) { ++ va_list vargs; ++ ++ va_start(vargs, fmt); ++ err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); ++ va_end(vargs); ++ } else ++ err = -ENOMEM; ++ ++ if (!err) { ++ dev->devt = devt; ++ dev->class = _get_xen_class(); ++ if (IS_ERR(dev->class)) ++ err = PTR_ERR(dev->class); ++ } ++ ++ if (!err) { ++ dev->type = type; ++ dev->parent = parent; ++ dev_set_drvdata(dev, drvdata); ++ dev->release = xcdev_release; ++ err = device_register(dev); ++ if (!err) ++ return dev; ++ put_device(dev); ++ } else ++ kfree(dev); ++ ++ return ERR_PTR(err); ++} ++EXPORT_SYMBOL_GPL(xen_class_device_create); +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:50:44.000000000 +0100 +@@ -92,6 +92,11 @@ static int xenbus_probe_frontend(const c static void xenbus_dev_shutdown(struct device *_dev); @@ -6856,7 +7300,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) -@@ -227,6 +232,10 @@ static struct xen_bus_type xenbus_fronte +@@ -228,6 +233,10 @@ static struct xen_bus_type xenbus_fronte #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif @@ -6867,7 +7311,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches }, #if defined(CONFIG_XEN) || defined(MODULE) .dev = { -@@ -763,6 +772,9 @@ void xenbus_dev_changed(const char *node +@@ -767,6 +776,9 @@ void xenbus_dev_changed(const char *node kfree(root); } @@ -6877,7 +7321,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) -@@ -778,7 +790,11 @@ static struct xenbus_watch fe_watch = { +@@ -782,7 +794,11 @@ static struct xenbus_watch fe_watch = { .callback = frontend_changed, }; @@ -6889,7 +7333,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { int err = 0; struct xenbus_driver *drv; -@@ -791,13 +807,18 @@ static int suspend_dev(struct device *de +@@ -795,13 +811,18 @@ static int suspend_dev(struct device *de drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) @@ -6899,8 +7343,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches err = drv->suspend(xdev); +#endif if (err) - printk(KERN_WARNING - "xenbus: suspend %s failed: %i\n", dev_name(dev), err); + pr_warning("xenbus: suspend %s failed: %i\n", + dev_name(dev), err); return 0; } @@ -6908,8 +7352,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; -@@ -818,8 +839,13 @@ static int suspend_cancel_dev(struct dev - dev_name(dev), err); +@@ -821,8 +842,13 @@ static int suspend_cancel_dev(struct dev + dev_name(dev), err); return 0; } +#endif @@ -6938,31 +7382,41 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:25:21.000000000 +0100 -@@ -73,7 +73,7 @@ static int read_frontend_details(struct - } +@@ -995,13 +1023,6 @@ static int xsd_port_read(char *page, cha + #endif - /* backend/// => -- */ --static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) -+static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) + #if defined(CONFIG_XEN_XENBUS_DEV) || defined(MODULE) +-static int xb_free_port(evtchn_port_t port) +-{ +- struct evtchn_close close; +- close.port = port; +- return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); +-} +- + int xenbus_conn(domid_t remote_dom, unsigned long *grant_ref, evtchn_port_t *local_port) { - int domid, err; - const char *devid, *type, *frontend; -@@ -103,8 +103,8 @@ static int backend_bus_id(char bus_id[BU - if (err) - return err; + struct evtchn_alloc_unbound alloc_unbound; +@@ -1015,7 +1036,7 @@ int xenbus_conn(domid_t remote_dom, unsi + remove_xen_proc_entry("xsd_port"); + #endif -- if (snprintf(bus_id, BUS_ID_SIZE, -- "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE) -+ if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", -+ typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) - return -ENOSPC; +- rc = xb_free_port(xen_store_evtchn); ++ rc = close_evtchn(xen_store_evtchn); + if (rc != 0) + goto fail0; + +@@ -1041,7 +1062,7 @@ int xenbus_conn(domid_t remote_dom, unsi return 0; - } ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_xs.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_xs.c 2010-03-24 15:25:21.000000000 +0100 -@@ -723,6 +723,10 @@ void xs_resume(void) + + fail1: +- rc2 = xb_free_port(xen_store_evtchn); ++ rc2 = close_evtchn(xen_store_evtchn); + if (rc2 != 0) + pr_warning("XENBUS: Error freeing xenstore event channel:" + " %d\n", rc2); +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_xs.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_xs.c 2011-02-01 14:50:44.000000000 +0100 +@@ -718,6 +718,10 @@ void xs_resume(void) struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; @@ -6973,8 +7427,8 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); ---- head-2010-05-12.orig/include/Kbuild 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/include/Kbuild 2010-03-24 15:25:21.000000000 +0100 +--- head-2011-03-17.orig/include/Kbuild 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/include/Kbuild 2011-02-01 14:50:44.000000000 +0100 @@ -8,6 +8,5 @@ header-y += mtd/ header-y += rdma/ header-y += video/ @@ -6982,13 +7436,31 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches -header-y += xen/public/ header-y += xen/ header-y += scsi/ ---- head-2010-05-12.orig/include/xen/Kbuild 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/include/xen/Kbuild 2010-03-24 15:25:21.000000000 +0100 -@@ -1 +1 @@ +--- head-2011-03-17.orig/include/xen/Kbuild 2011-01-31 14:31:28.000000000 +0100 ++++ head-2011-03-17/include/xen/Kbuild 2011-02-01 14:50:44.000000000 +0100 +@@ -1,3 +1,2 @@ -header-y += evtchn.h -+header-y += public/ ---- head-2010-05-12.orig/include/xen/evtchn.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/include/xen/evtchn.h 2010-03-31 14:37:09.000000000 +0200 + header-y += privcmd.h + header-y += public/ +--- head-2011-03-17.orig/include/xen/driver_util.h 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/include/xen/driver_util.h 2011-02-01 14:50:44.000000000 +0100 +@@ -1,8 +1,14 @@ + #ifndef __XEN_DRIVER_UTIL_H__ + #define __XEN_DRIVER_UTIL_H__ + ++#include + #include + + extern struct class *get_xen_class(void); ++extern struct device *xen_class_device_create(struct device_type *, ++ struct device *parent, ++ dev_t devt, void *drvdata, ++ const char *fmt, ...) ++ __printf(5, 6); + + #endif /* __XEN_DRIVER_UTIL_H__ */ +--- head-2011-03-17.orig/include/xen/evtchn.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/include/xen/evtchn.h 2011-02-01 14:50:44.000000000 +0100 @@ -113,9 +113,6 @@ void irq_resume(void); /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); @@ -7007,13 +7479,26 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); ---- head-2010-05-12.orig/include/xen/xenbus.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/include/xen/xenbus.h 2010-03-24 15:25:21.000000000 +0100 +@@ -163,6 +161,12 @@ static inline void notify_remote_via_evt + VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); + } + ++static inline int close_evtchn(int port) ++{ ++ struct evtchn_close close = { .port = port }; ++ return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); ++} ++ + /* + * Use these to access the event channel underlying the IRQ handle returned + * by bind_*_to_irqhandler(). +--- head-2011-03-17.orig/include/xen/xenbus.h 2011-02-02 16:58:42.000000000 +0100 ++++ head-2011-03-17/include/xen/xenbus.h 2011-02-02 16:59:07.000000000 +0100 @@ -104,8 +104,12 @@ struct xenbus_driver { void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); -+#if !defined(CONFIG_XEN) && !defined(MODULE) ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) + int (*suspend)(struct xenbus_device *dev, pm_message_t state); +#else int (*suspend)(struct xenbus_device *dev); @@ -7022,9 +7507,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); struct device_driver driver; ---- head-2010-05-12.orig/lib/swiotlb-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/lib/swiotlb-xen.c 2010-03-24 15:25:21.000000000 +0100 -@@ -39,8 +39,8 @@ int swiotlb; +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 14:50:44.000000000 +0100 +@@ -47,8 +47,8 @@ int swiotlb; int swiotlb_force; /* @@ -7035,7 +7520,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * API. */ static char *io_tlb_start, *io_tlb_end; -@@ -159,7 +159,7 @@ dma_addr_t swiotlb_phys_to_bus(struct de +@@ -167,7 +167,7 @@ dma_addr_t swiotlb_phys_to_bus(struct de return phys_to_machine(paddr); } @@ -7044,7 +7529,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { return machine_to_phys(baddr); } -@@ -170,9 +170,15 @@ static dma_addr_t swiotlb_virt_to_bus(st +@@ -178,9 +178,15 @@ static dma_addr_t swiotlb_virt_to_bus(st return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); } @@ -7062,7 +7547,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) -@@ -307,7 +313,7 @@ static void swiotlb_bounce(phys_addr_t p +@@ -315,7 +321,7 @@ static void swiotlb_bounce(phys_addr_t p unsigned long flags; while (size) { @@ -7071,7 +7556,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches local_irq_save(flags); buffer = kmap_atomic(pfn_to_page(pfn), -@@ -441,7 +447,7 @@ found: +@@ -449,7 +455,7 @@ found: * dma_addr is the kernel virtual address of the bounce buffer to unmap. */ static void @@ -7080,7 +7565,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; -@@ -521,7 +527,7 @@ swiotlb_full(struct device *dev, size_t +@@ -544,7 +550,7 @@ swiotlb_full(struct device *dev, size_t * PCI address to use is returned. * * Once the device is given the dma address, the device owns this memory until @@ -7089,7 +7574,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches */ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, -@@ -535,7 +541,7 @@ dma_addr_t swiotlb_map_page(struct devic +@@ -558,7 +564,7 @@ dma_addr_t swiotlb_map_page(struct devic BUG_ON(dir == DMA_NONE); /* @@ -7098,7 +7583,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * we can safely return the device addr and not worry about bounce * buffering it. */ -@@ -560,23 +566,32 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); +@@ -583,23 +589,32 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -7139,7 +7624,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } EXPORT_SYMBOL_GPL(swiotlb_unmap_page); -@@ -584,7 +599,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); +@@ -607,7 +622,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. * @@ -7148,9 +7633,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * using the cpu, yet do not wish to teardown the PCI dma mapping, you must * call this function before doing so. At the next point you give the PCI dma * address back to the card, you must first perform a -@@ -594,9 +609,10 @@ void - swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) +@@ -617,9 +632,10 @@ static void + swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, int target) { - char *dma_addr = swiotlb_bus_to_virt(dev_addr); + char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); @@ -7158,45 +7643,22 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches BUG_ON(dir == DMA_NONE); + if (is_swiotlb_buffer(dev_addr)) - sync_single(hwdev, dma_addr, size, dir); + sync_single(hwdev, dma_addr, size, dir, target); } -@@ -606,9 +622,10 @@ void - swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) - { -- char *dma_addr = swiotlb_bus_to_virt(dev_addr); -+ char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); - - BUG_ON(dir == DMA_NONE); -+ - if (is_swiotlb_buffer(dev_addr)) - sync_single(hwdev, dma_addr, size, dir); - } -@@ -619,11 +636,7 @@ swiotlb_sync_single_range_for_cpu(struct - unsigned long offset, size_t size, - enum dma_data_direction dir) +@@ -648,11 +664,7 @@ swiotlb_sync_single_range(struct device + unsigned long offset, size_t size, + int dir, int target) { - char *dma_addr = swiotlb_bus_to_virt(dev_addr); - - BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(dev_addr)) -- sync_single(hwdev, dma_addr + offset, size, dir); -+ swiotlb_sync_single_for_cpu(hwdev, dev_addr + offset, size, dir); +- sync_single(hwdev, dma_addr + offset, size, dir, target); ++ swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); } - EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); -@@ -632,17 +645,13 @@ swiotlb_sync_single_range_for_device(str - unsigned long offset, size_t size, - enum dma_data_direction dir) - { -- char *dma_addr = swiotlb_bus_to_virt(dev_addr); -- -- BUG_ON(dir == DMA_NONE); -- if (is_swiotlb_buffer(dev_addr)) -- sync_single(hwdev, dma_addr + offset, size, dir); -+ swiotlb_sync_single_for_device(hwdev, dev_addr + offset, size, dir); - } - EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); + void +@@ -677,7 +689,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_ra /* * Map a set of buffers described by scatterlist in streaming mode for DMA. @@ -7205,7 +7667,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * interface. Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}(SG). -@@ -653,7 +662,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_ra +@@ -688,7 +700,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_ra * The routine returns the number of addr/length pairs actually * used, at most nents. * @@ -7214,7 +7676,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches * same here. */ int -@@ -706,7 +715,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); +@@ -741,7 +753,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules @@ -7223,7 +7685,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, -@@ -717,13 +726,9 @@ swiotlb_unmap_sg_attrs(struct device *hw +@@ -752,13 +764,9 @@ swiotlb_unmap_sg_attrs(struct device *hw BUG_ON(dir == DMA_NONE); @@ -7240,7 +7702,7 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches } EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); -@@ -749,13 +754,9 @@ swiotlb_sync_sg_for_cpu(struct device *h +@@ -784,13 +792,9 @@ swiotlb_sync_sg(struct device *hwdev, st struct scatterlist *sg; int i; @@ -7249,36 +7711,18 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches - for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != sg_phys(sg)) - sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), -- sg->dma_length, dir); -- } + for_each_sg(sgl, sg, nelems, i) -+ swiotlb_sync_single_for_cpu(hwdev, sg->dma_address, -+ sg->dma_length, dir); - } - EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); - -@@ -766,13 +767,9 @@ swiotlb_sync_sg_for_device(struct device - struct scatterlist *sg; - int i; - -- BUG_ON(dir == DMA_NONE); -- -- for_each_sg(sgl, sg, nelems, i) { -- if (sg->dma_address != sg_phys(sg)) -- sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), -- sg->dma_length, dir); ++ swiotlb_sync_single(hwdev, sg->dma_address, + sg->dma_length, dir, target); - } -+ for_each_sg(sgl, sg, nelems, i) -+ swiotlb_sync_single_for_device(hwdev, sg->dma_address, -+ sg->dma_length, dir); } - EXPORT_SYMBOL(swiotlb_sync_sg_for_device); ---- head-2010-05-12.orig/mm/init-mm.c 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/mm/init-mm.c 2010-03-24 15:25:21.000000000 +0100 -@@ -8,6 +8,10 @@ - #include - #include + void +--- head-2011-03-17.orig/mm/init-mm.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/mm/init-mm.c 2011-02-01 14:50:44.000000000 +0100 +@@ -13,6 +13,10 @@ + #define INIT_MM_CONTEXT(name) + #endif +#ifdef CONFIG_X86_XEN +#define swapper_pg_dir ((pgd_t *)NULL) @@ -7287,9 +7731,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches struct mm_struct init_mm = { .mm_rb = RB_ROOT, .pgd = swapper_pg_dir, ---- head-2010-05-12.orig/mm/memory.c 2010-04-15 09:55:57.000000000 +0200 -+++ head-2010-05-12/mm/memory.c 2010-04-15 10:08:42.000000000 +0200 -@@ -1432,7 +1432,7 @@ int __get_user_pages(struct task_struct +--- head-2011-03-17.orig/mm/memory.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/mm/memory.c 2011-02-01 14:50:44.000000000 +0100 +@@ -1522,7 +1522,7 @@ int __get_user_pages(struct task_struct vmas[i] = vma; i++; start += PAGE_SIZE; @@ -7298,9 +7742,9 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches continue; } } ---- head-2010-05-12.orig/mm/page_alloc.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/mm/page_alloc.c 2010-03-24 15:25:21.000000000 +0100 -@@ -611,6 +611,7 @@ static void __free_pages_ok(struct page +--- head-2011-03-17.orig/mm/page_alloc.c 2011-02-08 10:05:20.000000000 +0100 ++++ head-2011-03-17/mm/page_alloc.c 2011-02-01 14:50:44.000000000 +0100 +@@ -649,6 +649,7 @@ static bool free_pages_prepare(struct pa #ifdef CONFIG_XEN if (PageForeign(page)) { @@ -7308,11 +7752,3 @@ Automatically created from "patches.kernel.org/patch-2.6.31" by xen-port-patches PageForeignDestructor(page, order); return; } -@@ -1119,6 +1120,7 @@ void free_hot_cold_page(struct page *pag - - #ifdef CONFIG_XEN - if (PageForeign(page)) { -+ WARN_ON(wasMlocked); - PageForeignDestructor(page, 0); - return; - } diff --git a/patches.xen/xen3-patch-2.6.32 b/patches.xen/xen3-patch-2.6.32 index bc6c42a..7664f0a 100644 --- a/patches.xen/xen3-patch-2.6.32 +++ b/patches.xen/xen3-patch-2.6.32 @@ -7,8 +7,8 @@ Patch-mainline: 2.6.32 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches.py ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:54:13.000000000 +0100 @@ -20,18 +20,15 @@ #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) #define __AUDIT_ARCH_LE 0x40000000 @@ -23,10 +23,10 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .macro IA32_ARG_FIXUP noebp=0 movl %edi,%r8d .if \noebp -+ jmp ia32_common ++ jmp .Lia32_common .else movl %ebp,%r9d -+ia32_common: ++.Lia32_common: .endif xchg %ecx,%esi movl %ebx,%edi @@ -48,10 +48,12 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .endm /* -@@ -144,17 +141,7 @@ ENTRY(ia32_sysenter_target) +@@ -142,19 +139,7 @@ ENTRY(ia32_sysenter_target) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) jnz sysenter_tracesys - cmpl $(IA32_NR_syscalls-1),%eax - ja ia32_badsys +- cmpl $(IA32_NR_syscalls-1),%eax +- ja ia32_badsys -sysenter_do_call: - IA32_ARG_FIXUP -sysenter_dispatch: @@ -63,11 +65,11 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches - testl $_TIF_ALLWORK_MASK,TI_flags(%r10) - jnz sysexit_audit - jmp int_ret_from_sys_call -+ jmp ia32_do_call ++ jmp .Lia32_check_call #ifdef CONFIG_AUDITSYSCALL .macro auditsys_entry_common -@@ -175,31 +162,10 @@ sysenter_dispatch: +@@ -175,48 +160,11 @@ sysenter_dispatch: movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */ .endm @@ -96,20 +98,28 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches - -sysexit_audit: - auditsys_exit sysexit_from_sys_call -+ jmp ia32_dispatch +-#endif +- +-sysenter_tracesys: +-#ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) +- jz sysenter_auditsys ++ jmp .Lia32_dispatch #endif - - sysenter_tracesys: -@@ -216,7 +182,7 @@ sysenter_tracesys: - RESTORE_REST - cmpl $(IA32_NR_syscalls-1),%eax - ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ +- SAVE_REST +- CLEAR_RREGS +- movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ +- movq %rsp,%rdi /* &pt_regs -> arg1 */ +- call syscall_trace_enter +- LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ +- RESTORE_REST +- cmpl $(IA32_NR_syscalls-1),%eax +- ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ - jmp sysenter_do_call -+ jmp ia32_do_call CFI_ENDPROC ENDPROC(ia32_sysenter_target) -@@ -272,24 +238,13 @@ ENTRY(ia32_cstar_target) +@@ -272,24 +220,13 @@ ENTRY(ia32_cstar_target) ja ia32_badsys cstar_do_call: IA32_ARG_FIXUP 1 @@ -131,11 +141,11 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches - -sysretl_audit: - auditsys_exit sysretl_from_sys_call, RCX /* user %ebp in RCX slot */ -+ jmp ia32_dispatch ++ jmp .Lia32_dispatch #endif cstar_tracesys: -@@ -299,7 +254,7 @@ cstar_tracesys: +@@ -299,7 +236,7 @@ cstar_tracesys: #endif xchgl %r9d,%ebp SAVE_REST @@ -144,19 +154,31 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter -@@ -367,9 +322,11 @@ ENTRY(ia32_syscall) +@@ -363,15 +300,23 @@ ENTRY(ia32_syscall) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + jnz ia32_tracesys ++.Lia32_check_call: + cmpl $(IA32_NR_syscalls-1),%eax ja ia32_badsys ia32_do_call: IA32_ARG_FIXUP -+ia32_dispatch: ++.Lia32_dispatch: call *ia32_sys_call_table(,%rax,8) # xxx: rip relative ia32_sysret: movq %rax,RAX-ARGOFFSET(%rsp) + CLEAR_RREGS -ARGOFFSET jmp int_ret_from_sys_call ++sysenter_tracesys: ++#ifdef CONFIG_AUDITSYSCALL ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) ++ jz sysenter_auditsys ++#endif ia32_tracesys: -@@ -387,8 +344,8 @@ END(ia32_syscall) + SAVE_REST + CLEAR_RREGS +@@ -387,8 +332,8 @@ END(ia32_syscall) ia32_badsys: movq $0,ORIG_RAX-ARGOFFSET(%rsp) @@ -167,7 +189,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches quiet_ni_syscall: movq $-ENOSYS,%rax -@@ -482,7 +439,7 @@ ia32_sys_call_table: +@@ -482,7 +427,7 @@ ia32_sys_call_table: .quad sys_mkdir .quad sys_rmdir /* 40 */ .quad sys_dup @@ -176,15 +198,28 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .quad compat_sys_times .quad quiet_ni_syscall /* old prof syscall holder */ .quad sys_brk /* 45 */ -@@ -776,5 +733,5 @@ ia32_sys_call_table: +@@ -776,5 +721,5 @@ ia32_sys_call_table: .quad compat_sys_preadv .quad compat_sys_pwritev .quad compat_sys_rt_tgsigqueueinfo /* 335 */ - .quad sys_perf_counter_open + .quad sys_perf_event_open ia32_syscall_end: ---- head-2010-05-25.orig/arch/x86/include/asm/time.h 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/asm/time.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/nmi.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/nmi.h 2011-02-16 09:49:15.000000000 +0100 +@@ -18,7 +18,10 @@ struct ctl_table; + extern int proc_nmi_enabled(struct ctl_table *, int , + void __user *, size_t *, loff_t *); + extern int unknown_nmi_panic; ++#endif + ++#if defined(CONFIG_X86_LOCAL_APIC) || \ ++ (defined(CONFIG_XEN_SMPBOOT) && CONFIG_XEN_COMPAT >= 0x030200) + void arch_trigger_all_cpu_backtrace(void); + #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace + #endif +--- head-2011-03-17.orig/arch/x86/include/asm/time.h 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/time.h 2011-02-01 14:54:13.000000000 +0100 @@ -8,8 +8,9 @@ extern void hpet_time_init(void); extern void time_init(void); @@ -196,8 +231,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches extern int xen_update_persistent_clock(void); #endif ---- head-2010-05-25.orig/arch/x86/include/asm/uv/uv_hub.h 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/uv/uv_hub.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/asm/uv/uv_hub.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/uv/uv_hub.h 2011-02-01 14:54:13.000000000 +0100 @@ -11,7 +11,7 @@ #ifndef _ASM_X86_UV_UV_HUB_H #define _ASM_X86_UV_UV_HUB_H @@ -207,9 +242,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/agp.h 2010-03-24 15:32:27.000000000 +0100 -@@ -28,10 +28,7 @@ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/agp.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/agp.h 2011-02-01 14:54:13.000000000 +0100 +@@ -45,10 +45,7 @@ */ #define flush_agp_cache() wbinvd() @@ -221,8 +256,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) ({ \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/desc.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/desc.h 2011-02-01 14:54:13.000000000 +0100 @@ -312,7 +312,14 @@ static inline void load_LDT(mm_context_t static inline unsigned long get_desc_base(const struct desc_struct *desc) @@ -252,8 +287,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifndef CONFIG_X86_NO_IDT static inline void _set_gate(int gate, unsigned type, void *addr, unsigned dpl, unsigned ist, unsigned seg) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/dma-mapping.h 2010-03-24 15:14:47.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/dma-mapping.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/dma-mapping.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/dma-mapping.h 2011-02-01 14:54:13.000000000 +0100 @@ -1,11 +1,24 @@ #ifndef _ASM_X86_DMA_MAPPING_H_ @@ -282,8 +317,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches extern int range_straddles_page_boundary(paddr_t p, size_t size); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:54:13.000000000 +0100 @@ -139,6 +139,9 @@ enum fixed_addresses { #ifdef CONFIG_X86_32 FIX_WP_TEST, @@ -294,26 +329,27 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches __end_of_fixed_addresses }; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/hypervisor.h 2010-03-24 15:32:27.000000000 +0100 -@@ -70,6 +70,7 @@ extern start_info_t *xen_start_info; +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-03-11 11:13:19.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-03-11 11:14:34.000000000 +0100 +@@ -71,6 +71,7 @@ extern start_info_t *xen_start_info; #endif #define init_hypervisor(c) ((void)((c)->x86_hyper_vendor = X86_HYPER_VENDOR_XEN)) +#define init_hypervisor_platform() init_hypervisor(&boot_cpu_data) + DECLARE_PER_CPU(struct vcpu_runstate_info, runstate); struct vcpu_runstate_info *setup_runstate_area(unsigned int cpu); - -@@ -351,6 +352,6 @@ MULTI_grant_table_op(multicall_entry_t * +@@ -354,7 +355,7 @@ MULTI_grant_table_op(multicall_entry_t * #endif -#define uvm_multi(cpumask) ((unsigned long)cpus_addr(cpumask) | UVMF_MULTI) +#define uvm_multi(cpumask) ((unsigned long)cpumask_bits(cpumask) | UVMF_MULTI) - #endif /* __HYPERVISOR_H__ */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irqflags.h 2010-03-24 15:32:27.000000000 +0100 + #ifdef LINUX + /* drivers/staging/ use Windows-style types, including VOID */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:54:13.000000000 +0100 @@ -1,7 +1,7 @@ #ifndef _X86_IRQFLAGS_H_ #define _X86_IRQFLAGS_H_ @@ -323,8 +359,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifndef __ASSEMBLY__ /* ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/mmu_context.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:54:13.000000000 +0100 @@ -88,12 +88,12 @@ static inline void switch_mm(struct mm_s !PagePinned(virt_to_page(next->pgd))); @@ -349,8 +385,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:54:13.000000000 +0100 @@ -151,7 +151,11 @@ static inline int __pcibus_to_node(const static inline const struct cpumask * cpumask_of_pcibus(const struct pci_bus *bus) @@ -364,8 +400,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:54:13.000000000 +0100 @@ -53,16 +53,6 @@ extern struct list_head pgd_list; #define pte_update(mm, addr, ptep) do { } while (0) #define pte_update_defer(mm, addr, ptep) do { } while (0) @@ -395,7 +431,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static inline int pmd_large(pmd_t pte) { return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == -@@ -363,7 +358,7 @@ static inline unsigned long pmd_page_vad +@@ -362,7 +357,7 @@ static inline unsigned long pmd_page_vad * this macro returns the index of the entry in the pmd page which would * control the given virtual address */ @@ -404,7 +440,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } -@@ -383,7 +378,7 @@ static inline unsigned pmd_index(unsigne +@@ -382,7 +377,7 @@ static inline unsigned pmd_index(unsigne * this function returns the index of the entry in the pte page which would * control the given virtual address */ @@ -413,7 +449,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } -@@ -439,11 +434,6 @@ static inline pmd_t *pmd_offset(pud_t *p +@@ -438,11 +433,6 @@ static inline pmd_t *pmd_offset(pud_t *p return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } @@ -425,7 +461,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static inline int pud_large(pud_t pud) { return (__pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == -@@ -479,7 +469,7 @@ static inline unsigned long pgd_page_vad +@@ -478,7 +468,7 @@ static inline unsigned long pgd_page_vad #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) /* to find an entry in a page-table-directory. */ @@ -434,7 +470,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); } -@@ -600,7 +590,7 @@ extern int ptep_clear_flush_young(struct +@@ -599,7 +589,7 @@ extern int ptep_clear_flush_young(struct if (!pte_none(__res) && \ ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte(0), \ @@ -443,8 +479,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches UVMF_INVLPG))) { \ __xen_pte_clear(__ptep); \ flush_tlb_page(vma, addr); \ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_types.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_types.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_types.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_types.h 2011-02-01 14:54:13.000000000 +0100 @@ -334,6 +334,7 @@ static inline pteval_t pte_flags(pte_t p typedef struct page *pgtable_t; @@ -468,8 +504,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches struct seq_file; extern void arch_report_meminfo(struct seq_file *m); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:46:07.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:46:37.000000000 +0100 @@ -27,6 +27,7 @@ struct mm_struct; #include #include @@ -478,7 +514,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #include #include -@@ -411,7 +412,17 @@ extern unsigned long kernel_eflags; +@@ -421,7 +422,17 @@ extern unsigned long kernel_eflags; extern asmlinkage void ignore_sysret(void); #else /* X86_64 */ #ifdef CONFIG_CC_STACKPROTECTOR @@ -497,7 +533,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #endif #endif /* X86_64 */ -@@ -647,13 +658,23 @@ static inline void cpu_relax(void) +@@ -657,13 +668,23 @@ static inline void cpu_relax(void) rep_nop(); } @@ -524,7 +560,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } static inline void __monitor(const void *eax, unsigned long ecx, -@@ -944,4 +965,35 @@ extern void start_thread(struct pt_regs +@@ -954,4 +975,35 @@ extern void start_thread(struct pt_regs extern int get_tsc_mode(unsigned long adr); extern int set_tsc_mode(unsigned int val); @@ -561,7 +597,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches + #endif /* _ASM_X86_PROCESSOR_H */ --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/setup.h 2010-03-24 15:32:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/setup.h 2011-02-01 14:54:13.000000000 +0100 @@ -0,0 +1,8 @@ +#ifndef __ASSEMBLY__ + @@ -571,8 +607,57 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches +#endif + +#include_next +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:08:16.000000000 +0100 +@@ -24,8 +24,8 @@ extern unsigned int num_processors; + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); + DECLARE_PER_CPU(cpumask_t, cpu_core_map); + DECLARE_PER_CPU(u16, cpu_llc_id); +-#endif + DECLARE_PER_CPU(int, cpu_number); ++#endif + + static inline const struct cpumask *cpu_sibling_mask(int cpu) + { +@@ -124,7 +124,6 @@ static inline void arch_send_call_functi + smp_ops.send_call_func_single_ipi(cpu); + } + +-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask + static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) + { + smp_ops.send_call_func_ipi(mask); +@@ -170,27 +169,7 @@ static inline int num_booting_cpus(void) + + extern unsigned disabled_cpus __cpuinitdata; + +-#ifdef CONFIG_X86_32_SMP +-/* +- * This function is needed by all SMP systems. It must _always_ be valid +- * from the initial startup. We map APIC_BASE very early in page_setup(), +- * so this is correct in the x86 case. +- */ +-#define raw_smp_processor_id() (percpu_read(cpu_number)) +-#define safe_smp_processor_id() smp_processor_id() +- +-#elif defined(CONFIG_X86_64_SMP) +-#define raw_smp_processor_id() (percpu_read(cpu_number)) +- +-#define stack_smp_processor_id() \ +-({ \ +- struct thread_info *ti; \ +- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ +- ti->cpu; \ +-}) +-#define safe_smp_processor_id() smp_processor_id() +- +-#endif ++#include + + #ifdef CONFIG_X86_LOCAL_APIC + --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp-processor-id.h 2010-03-24 15:32:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp-processor-id.h 2011-02-01 14:54:13.000000000 +0100 @@ -0,0 +1,36 @@ +#ifndef _ASM_X86_SMP_PROCESSOR_ID_H +#define _ASM_X86_SMP_PROCESSOR_ID_H @@ -610,47 +695,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches +#endif /* SMP && !__ASSEMBLY__ */ + +#endif /* _ASM_X86_SMP_PROCESSOR_ID_H */ ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:32:27.000000000 +0100 -@@ -121,7 +121,6 @@ static inline void arch_send_call_functi - smp_ops.send_call_func_single_ipi(cpu); - } - --#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask - static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) - { - smp_ops.send_call_func_ipi(mask); -@@ -167,27 +166,7 @@ static inline int num_booting_cpus(void) - - extern unsigned disabled_cpus __cpuinitdata; - --#ifdef CONFIG_X86_32_SMP --/* -- * This function is needed by all SMP systems. It must _always_ be valid -- * from the initial startup. We map APIC_BASE very early in page_setup(), -- * so this is correct in the x86 case. -- */ --#define raw_smp_processor_id() (percpu_read(cpu_number)) --#define safe_smp_processor_id() smp_processor_id() -- --#elif defined(CONFIG_X86_64_SMP) --#define raw_smp_processor_id() (percpu_read(cpu_number)) -- --#define stack_smp_processor_id() \ --({ \ -- struct thread_info *ti; \ -- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ -- ti->cpu; \ --}) --#define safe_smp_processor_id() smp_processor_id() -- --#endif -+#include - - #ifdef CONFIG_X86_LOCAL_APIC - ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:06:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:07:45.000000000 +0100 @@ -30,7 +30,7 @@ void __switch_to_xtra(struct task_struct "movl %P[task_canary](%[next]), %%ebx\n\t" \ "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" @@ -660,7 +706,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #define __switch_canary_iparam \ , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) #else /* CC_STACKPROTECTOR */ -@@ -149,33 +149,6 @@ do { \ +@@ -151,33 +151,6 @@ do { \ #endif #ifdef __KERNEL__ @@ -694,8 +740,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches extern void xen_load_gs_index(unsigned); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/tlbflush.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:54:13.000000000 +0100 @@ -74,9 +74,9 @@ static inline void reset_lazy_tlbstate(v #define local_flush_tlb() __flush_tlb() @@ -709,9 +755,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #define flush_tlb() flush_tlb_current_task() ---- head-2010-05-25.orig/arch/x86/kernel/Makefile 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/Makefile 2010-03-24 15:32:27.000000000 +0100 -@@ -132,8 +132,6 @@ ifeq ($(CONFIG_X86_64),y) +--- head-2011-03-17.orig/arch/x86/kernel/Makefile 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/Makefile 2011-02-01 14:54:13.000000000 +0100 +@@ -123,8 +123,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o @@ -720,9 +766,34 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches endif disabled-obj-$(CONFIG_XEN) := %_uv.o crash.o early-quirks.o hpet.o i8253.o \ ---- head-2010-05-25.orig/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:32:27.000000000 +0100 -@@ -79,6 +79,8 @@ unsigned long io_apic_irqs; +--- head-2011-03-17.orig/arch/x86/kernel/apic/hw_nmi.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/hw_nmi.c 2011-02-16 09:55:55.000000000 +0100 +@@ -25,6 +25,10 @@ u64 hw_nmi_get_sample_period(void) + #endif + + #ifdef arch_trigger_all_cpu_backtrace ++#ifdef CONFIG_XEN ++#include ++#endif ++ + /* For reliability, we're prepared to waste bits here. */ + static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; + +@@ -45,7 +49,11 @@ void arch_trigger_all_cpu_backtrace(void + cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); + + printk(KERN_INFO "sending NMI to all CPUs:\n"); ++#ifndef CONFIG_XEN + apic->send_IPI_all(NMI_VECTOR); ++#else /* this works even without CONFIG_X86_LOCAL_APIC */ ++ xen_send_IPI_all(NMI_VECTOR); ++#endif + + /* Wait for up to 10 seconds for all CPUs to do the backtrace */ + for (i = 0; i < 10 * 1000; i++) { +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:54:13.000000000 +0100 +@@ -69,6 +69,8 @@ unsigned long io_apic_irqs; #endif /* CONFIG_XEN */ #define __apicdebuginit(type) static type __init @@ -731,7 +802,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * Is the SiS APIC rmw bug present ? -@@ -100,12 +102,24 @@ int nr_ioapic_registers[MAX_IO_APICS]; +@@ -90,12 +92,24 @@ int nr_ioapic_registers[MAX_IO_APICS]; struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; int nr_ioapics; @@ -756,7 +827,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #if defined (CONFIG_MCA) || defined (CONFIG_EISA) int mp_bus_id_to_type[MAX_MP_BUSSES]; #endif -@@ -132,15 +146,6 @@ static int __init parse_noapic(char *str +@@ -122,15 +136,6 @@ static int __init parse_noapic(char *str early_param("noapic", parse_noapic); #ifndef CONFIG_XEN @@ -772,7 +843,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches struct irq_pin_list { int apic, pin; struct irq_pin_list *next; -@@ -155,6 +160,11 @@ static struct irq_pin_list *get_one_free +@@ -145,6 +150,11 @@ static struct irq_pin_list *get_one_free return pin; } @@ -784,7 +855,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches struct irq_cfg { struct irq_pin_list *irq_2_pin; cpumask_var_t domain; -@@ -188,6 +198,12 @@ static struct irq_cfg irq_cfgx[NR_IRQS] +@@ -178,6 +188,12 @@ static struct irq_cfg irq_cfgx[NR_IRQS] [15] = { .vector = IRQ15_VECTOR, }, }; @@ -797,7 +868,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches int __init arch_early_irq_init(void) { struct irq_cfg *cfg; -@@ -205,7 +221,7 @@ int __init arch_early_irq_init(void) +@@ -195,7 +211,7 @@ int __init arch_early_irq_init(void) desc->chip_data = &cfg[i]; zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); @@ -806,7 +877,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches cpumask_setall(cfg[i].domain); } -@@ -231,17 +247,14 @@ static struct irq_cfg *get_one_free_irq_ +@@ -221,17 +237,14 @@ static struct irq_cfg *get_one_free_irq_ cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); if (cfg) { @@ -826,7 +897,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } } -@@ -455,13 +468,10 @@ static bool io_apic_level_ack_pending(st +@@ -445,13 +458,10 @@ static bool io_apic_level_ack_pending(st unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); @@ -841,7 +912,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches pin = entry->pin; reg = io_apic_read(entry->apic, 0x10 + pin*2); /* Is the remote IRR bit set? */ -@@ -469,9 +479,6 @@ static bool io_apic_level_ack_pending(st +@@ -459,9 +469,6 @@ static bool io_apic_level_ack_pending(st spin_unlock_irqrestore(&ioapic_lock, flags); return true; } @@ -851,7 +922,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } spin_unlock_irqrestore(&ioapic_lock, flags); -@@ -543,72 +550,68 @@ static void ioapic_mask_entry(int apic, +@@ -533,72 +540,68 @@ static void ioapic_mask_entry(int apic, * shared ISA-space IRQs, so we have to support them. We are super * fast in the common case, and fast for shared ISA-space IRQs. */ @@ -860,8 +931,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches +add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) { - struct irq_pin_list *entry; -+ struct irq_pin_list **last, *entry; - +- - entry = cfg->irq_2_pin; - if (!entry) { - entry = get_one_free_irq_2_pin(node); @@ -875,7 +945,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches - entry->pin = pin; - return; - } -- ++ struct irq_pin_list **last, *entry; + - while (entry->next) { - /* not again, please */ + /* don't allow duplicates */ @@ -958,7 +1029,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches unsigned int reg; pin = entry->pin; reg = io_apic_read(entry->apic, 0x10 + pin * 2); -@@ -625,7 +628,6 @@ static void __unmask_IO_APIC_irq(struct +@@ -615,7 +618,6 @@ static void __unmask_IO_APIC_irq(struct io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); } @@ -966,7 +1037,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static void io_apic_sync(struct irq_pin_list *entry) { /* -@@ -641,11 +643,6 @@ static void __mask_IO_APIC_irq(struct ir +@@ -631,11 +633,6 @@ static void __mask_IO_APIC_irq(struct ir { io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); } @@ -978,7 +1049,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) { -@@ -658,7 +655,6 @@ static void __unmask_and_level_IO_APIC_i +@@ -648,7 +645,6 @@ static void __unmask_and_level_IO_APIC_i io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, IO_APIC_REDIR_LEVEL_TRIGGER, NULL); } @@ -986,15 +1057,15 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static void mask_IO_APIC_irq_desc(struct irq_desc *desc) { -@@ -719,6 +715,7 @@ static void clear_IO_APIC (void) +@@ -709,6 +705,7 @@ static void clear_IO_APIC (void) } #else #define add_pin_to_irq_node(cfg, node, apic, pin) +#define add_pin_to_irq_node_nopanic(cfg, node, apic, pin) 0 - #endif /* CONFIG_XEN */ + #endif /* !CONFIG_XEN */ #ifdef CONFIG_X86_32 -@@ -935,7 +932,7 @@ static int __init find_isa_irq_apic(int +@@ -925,7 +922,7 @@ static int __init find_isa_irq_apic(int */ static int EISA_ELCR(unsigned int irq) { @@ -1003,7 +1074,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches unsigned int port = 0x4d0 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } -@@ -1547,7 +1544,7 @@ static void setup_IO_APIC_irq(int apic_i +@@ -1538,7 +1535,7 @@ static void setup_IO_APIC_irq(int apic_i } ioapic_register_intr(irq, desc, trigger); @@ -1012,7 +1083,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches disable_8259A_irq(irq); ioapic_write_entry(apic_id, pin, entry); -@@ -1775,12 +1772,8 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1766,12 +1763,8 @@ __apicdebuginit(void) print_IO_APIC(void if (!entry) continue; printk(KERN_DEBUG "IRQ%d ", irq); @@ -1026,7 +1097,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches printk("\n"); } -@@ -1924,7 +1917,7 @@ __apicdebuginit(void) print_PIC(void) +@@ -1915,7 +1908,7 @@ __apicdebuginit(void) print_PIC(void) unsigned int v; unsigned long flags; @@ -1035,7 +1106,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches return; printk(KERN_DEBUG "\nprinting PIC contents\n"); -@@ -1956,7 +1949,7 @@ __apicdebuginit(int) print_all_ICs(void) +@@ -1947,7 +1940,7 @@ __apicdebuginit(int) print_all_ICs(void) print_PIC(); /* don't print out if apic is not there */ @@ -1044,7 +1115,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches return 0; print_all_local_APICs(); -@@ -1990,6 +1983,10 @@ void __init enable_IO_APIC(void) +@@ -1981,6 +1974,10 @@ void __init enable_IO_APIC(void) spin_unlock_irqrestore(&ioapic_lock, flags); nr_ioapic_registers[apic] = reg_01.bits.entries+1; } @@ -1055,7 +1126,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifndef CONFIG_XEN for(apic = 0; apic < nr_ioapics; apic++) { int pin; -@@ -2049,6 +2046,9 @@ void disable_IO_APIC(void) +@@ -2038,6 +2035,9 @@ void disable_IO_APIC(void) */ clear_IO_APIC(); @@ -1065,7 +1136,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * If the i8259 is routed through an IOAPIC * Put that IOAPIC in virtual wire mode -@@ -2082,7 +2082,7 @@ void disable_IO_APIC(void) +@@ -2071,7 +2071,7 @@ void disable_IO_APIC(void) /* * Use virtual wire A mode when interrupt remapping is enabled. */ @@ -1074,7 +1145,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches disconnect_bsp_APIC(!intr_remapping_enabled && ioapic_i8259.pin != -1); } -@@ -2095,7 +2095,7 @@ void disable_IO_APIC(void) +@@ -2084,7 +2084,7 @@ void disable_IO_APIC(void) * by Matt Domsch Tue Dec 21 12:25:05 CST 1999 */ @@ -1083,7 +1154,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { union IO_APIC_reg_00 reg_00; physid_mask_t phys_id_present_map; -@@ -2104,9 +2104,8 @@ static void __init setup_ioapic_ids_from +@@ -2093,9 +2093,8 @@ static void __init setup_ioapic_ids_from unsigned char old_id; unsigned long flags; @@ -1094,7 +1165,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * Don't check I/O APIC IDs for xAPIC systems. They have * no meaning without the serial APIC bus. -@@ -2280,7 +2279,7 @@ static unsigned int startup_ioapic_irq(u +@@ -2269,7 +2268,7 @@ static unsigned int startup_ioapic_irq(u struct irq_cfg *cfg; spin_lock_irqsave(&ioapic_lock, flags); @@ -1103,7 +1174,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches disable_8259A_irq(irq); if (i8259A_irq_pending(irq)) was_pending = 1; -@@ -2292,7 +2291,6 @@ static unsigned int startup_ioapic_irq(u +@@ -2281,7 +2280,6 @@ static unsigned int startup_ioapic_irq(u return was_pending; } @@ -1111,7 +1182,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static int ioapic_retrigger_irq(unsigned int irq) { -@@ -2305,14 +2303,6 @@ static int ioapic_retrigger_irq(unsigned +@@ -2294,14 +2292,6 @@ static int ioapic_retrigger_irq(unsigned return 1; } @@ -1126,7 +1197,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * Level and edge triggered IO-APIC interrupts need different handling, -@@ -2350,13 +2340,9 @@ static void __target_IO_APIC_irq(unsigne +@@ -2339,13 +2329,9 @@ static void __target_IO_APIC_irq(unsigne struct irq_pin_list *entry; u8 vector = cfg->vector; @@ -1141,7 +1212,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches apic = entry->apic; pin = entry->pin; /* -@@ -2369,9 +2355,6 @@ static void __target_IO_APIC_irq(unsigne +@@ -2358,9 +2344,6 @@ static void __target_IO_APIC_irq(unsigne reg &= ~IO_APIC_REDIR_VECTOR_MASK; reg |= vector; io_apic_modify(apic, 0x10 + pin*2, reg); @@ -1151,7 +1222,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } } -@@ -2596,11 +2579,8 @@ atomic_t irq_mis_count; +@@ -2585,11 +2568,8 @@ atomic_t irq_mis_count; static void ack_apic_level(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); @@ -1163,7 +1234,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches struct irq_cfg *cfg; int do_unmask_irq = 0; -@@ -2613,31 +2593,28 @@ static void ack_apic_level(unsigned int +@@ -2602,31 +2582,28 @@ static void ack_apic_level(unsigned int } #endif @@ -1213,7 +1284,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * We must acknowledge the irq before we move it or the acknowledge will -@@ -2679,7 +2656,7 @@ static void ack_apic_level(unsigned int +@@ -2668,7 +2645,7 @@ static void ack_apic_level(unsigned int unmask_IO_APIC_irq_desc(desc); } @@ -1222,7 +1293,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); spin_lock(&ioapic_lock); -@@ -2687,26 +2664,15 @@ static void ack_apic_level(unsigned int +@@ -2676,26 +2653,15 @@ static void ack_apic_level(unsigned int __unmask_and_level_IO_APIC_irq(cfg); spin_unlock(&ioapic_lock); } @@ -1251,7 +1322,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } static void -@@ -2796,7 +2762,7 @@ static inline void init_IO_APIC_traps(vo +@@ -2785,7 +2751,7 @@ static inline void init_IO_APIC_traps(vo * so default to an old-fashioned 8259 * interrupt if we can.. */ @@ -1260,7 +1331,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches make_8259A_irq(irq); else /* Strange. Oh, well.. */ -@@ -3136,7 +3102,7 @@ out: +@@ -3125,7 +3091,7 @@ out: * the I/O APIC in all cases now. No actual device should request * it anyway. --macro */ @@ -1269,7 +1340,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches void __init setup_IO_APIC(void) { -@@ -3148,23 +3114,21 @@ void __init setup_IO_APIC(void) +@@ -3137,23 +3103,21 @@ void __init setup_IO_APIC(void) * calling enable_IO_APIC() is moved to setup_local_APIC for BP */ #endif @@ -1298,7 +1369,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } /* -@@ -3274,7 +3238,6 @@ static int __init ioapic_init_sysfs(void +@@ -3263,7 +3227,6 @@ static int __init ioapic_init_sysfs(void device_initcall(ioapic_init_sysfs); @@ -1306,7 +1377,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * Dynamic irq allocate and deallocation */ -@@ -3346,8 +3309,7 @@ void destroy_irq(unsigned int irq) +@@ -3335,8 +3298,7 @@ void destroy_irq(unsigned int irq) cfg = desc->chip_data; dynamic_irq_cleanup(irq); /* connect back irq_cfg */ @@ -1316,7 +1387,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches free_irte(irq); spin_lock_irqsave(&vector_lock, flags); -@@ -4025,9 +3987,13 @@ static int __io_apic_set_pci_routing(str +@@ -4014,9 +3976,13 @@ static int __io_apic_set_pci_routing(str /* * IRQs < 16 are already in the irq_2_pin[] map */ @@ -1332,7 +1403,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); -@@ -4056,11 +4022,28 @@ int io_apic_set_pci_routing(struct devic +@@ -4045,11 +4011,30 @@ int io_apic_set_pci_routing(struct devic return __io_apic_set_pci_routing(dev, irq, irq_attr); } @@ -1342,10 +1413,12 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches +u8 __init io_apic_unique_id(u8 id) +{ +#ifdef CONFIG_X86_32 ++#ifndef CONFIG_XEN + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return io_apic_get_unique_id(nr_ioapics, id); + else ++#endif + return id; +#else + int i; @@ -1363,9 +1436,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches +#endif +} - #ifdef CONFIG_X86_32 + #if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) int __init io_apic_get_unique_id(int ioapic, int apic_id) -@@ -4171,8 +4154,6 @@ int acpi_get_override_irq(int bus_irq, i +@@ -4158,8 +4143,6 @@ int acpi_get_override_irq(int bus_irq, i return 0; } @@ -1374,7 +1447,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifndef CONFIG_XEN /* * This function currently is only a helper for the i386 smp boot process where -@@ -4227,7 +4208,7 @@ void __init setup_ioapic_dest(void) +@@ -4214,7 +4197,7 @@ void __init setup_ioapic_dest(void) static struct resource *ioapic_resources; @@ -1383,7 +1456,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { unsigned long n; struct resource *res; -@@ -4243,15 +4224,13 @@ static struct resource * __init ioapic_s +@@ -4230,15 +4213,13 @@ static struct resource * __init ioapic_s mem = alloc_bootmem(n); res = (void *)mem; @@ -1405,7 +1478,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } ioapic_resources = res; -@@ -4265,7 +4244,7 @@ void __init ioapic_init_mappings(void) +@@ -4252,7 +4233,7 @@ void __init ioapic_init_mappings(void) struct resource *ioapic_res; int i; @@ -1414,7 +1487,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { ioapic_phys = mp_ioapics[i].apicaddr; -@@ -4294,11 +4273,9 @@ fake_ioapic_page: +@@ -4281,11 +4262,9 @@ fake_ioapic_page: __fix_to_virt(idx), ioapic_phys); idx++; @@ -1429,7 +1502,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } } -@@ -4320,3 +4297,78 @@ void __init ioapic_insert_resources(void +@@ -4307,3 +4286,78 @@ void __init ioapic_insert_resources(void } } #endif /* !CONFIG_XEN */ @@ -1508,20 +1581,39 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches + + nr_ioapics++; +} ---- head-2010-05-25.orig/arch/x86/kernel/cpu/Makefile 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/Makefile 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:57:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:57:40.000000000 +0100 +@@ -11,6 +11,16 @@ DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS] + static inline void __send_IPI_one(unsigned int cpu, int vector) + { + int irq = per_cpu(ipi_to_irq, cpu)[vector]; ++ ++ if (vector == NMI_VECTOR) { ++ static int __read_mostly printed; ++ int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); ++ ++ if (rc && !printed) ++ pr_warning("Unable (%d) to send NMI to CPU#%u\n", ++ printed = rc, cpu); ++ return; ++ } + BUG_ON(irq < 0); + notify_remote_via_irq(irq); + } +--- head-2011-03-17.orig/arch/x86/kernel/cpu/Makefile 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/Makefile 2011-02-01 14:54:13.000000000 +0100 @@ -34,7 +34,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o --disabled-obj-$(CONFIG_XEN) := hypervisor.o vmware.o -+disabled-obj-$(CONFIG_XEN) := hypervisor.o sched.o vmware.o +-disabled-obj-$(CONFIG_XEN) := hypervisor.o perfctr-watchdog.o vmware.o ++disabled-obj-$(CONFIG_XEN) := hypervisor.o perfctr-watchdog.o sched.o vmware.o quiet_cmd_mkcapflags = MKCAP $@ cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ ---- head-2010-05-25.orig/arch/x86/kernel/cpu/amd.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/amd.c 2010-03-24 15:32:27.000000000 +0100 -@@ -313,7 +313,7 @@ static void __cpuinit amd_detect_cmp(str +--- head-2011-03-17.orig/arch/x86/kernel/cpu/amd.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/amd.c 2011-02-01 14:54:13.000000000 +0100 +@@ -325,7 +325,7 @@ static void __cpuinit amd_detect_cmp(str int amd_get_nb_id(int cpu) { int id = 0; @@ -1530,8 +1622,24 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches id = per_cpu(cpu_llc_id, cpu); #endif return id; -@@ -469,8 +469,10 @@ static void __cpuinit init_amd(struct cp - if (c->x86 == 0x10 || c->x86 == 0x11) +@@ -488,18 +488,26 @@ static void __cpuinit init_amd(struct cp + u64 val; + + clear_cpu_cap(c, X86_FEATURE_LAHF_LM); ++#ifndef CONFIG_XEN + if (!rdmsrl_amd_safe(0xc001100d, &val)) { + val &= ~(1ULL << 32); + wrmsrl_amd_safe(0xc001100d, val); + } ++#else ++ pr_warning("Long-mode LAHF feature wrongly enabled -" ++ "hypervisor update needed\n"); ++ (void)&val; ++#endif + } + + } + if (c->x86 >= 0x10) set_cpu_cap(c, X86_FEATURE_REP_GOOD); +#ifndef CONFIG_XEN @@ -1541,8 +1649,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #else /* ---- head-2010-05-25.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:17.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:24.000000000 +0100 @@ -13,13 +13,13 @@ #include @@ -1643,7 +1751,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches GDT_STACK_CANARY_INIT #endif } }; -@@ -900,7 +899,7 @@ void __init identify_boot_cpu(void) +@@ -907,7 +906,7 @@ void __init identify_boot_cpu(void) #else vgetcpu_set_mode(); #endif @@ -1652,7 +1760,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) -@@ -1013,7 +1012,7 @@ __setup("clearcpuid=", setup_disablecpui +@@ -1020,7 +1019,7 @@ __setup("clearcpuid=", setup_disablecpui #ifdef CONFIG_X86_64 #ifndef CONFIG_X86_NO_IDT @@ -1661,7 +1769,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #endif DEFINE_PER_CPU_FIRST(union irq_stack_union, -@@ -1027,13 +1026,21 @@ void xen_switch_pt(void) +@@ -1034,13 +1033,21 @@ void xen_switch_pt(void) #endif } @@ -1685,7 +1793,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches DEFINE_PER_CPU(unsigned int, irq_count) = -1; #ifndef CONFIG_X86_NO_TSS -@@ -1049,8 +1056,7 @@ static const unsigned int exception_stac +@@ -1056,8 +1063,7 @@ static const unsigned int exception_stac }; static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks @@ -1695,7 +1803,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #endif void __cpuinit syscall_init(void) -@@ -1097,8 +1103,11 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist +@@ -1104,8 +1110,11 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist #else /* CONFIG_X86_64 */ @@ -1708,9 +1816,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #endif /* Make sure %fs and %gs are initialized properly in idle threads */ ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mcheck/mce-inject.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/mcheck/mce-inject.c 2010-04-15 10:10:43.000000000 +0200 -@@ -144,7 +144,7 @@ static void raise_mce(struct mce *m) +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-02-01 14:54:13.000000000 +0100 +@@ -145,7 +145,7 @@ static void raise_mce(struct mce *m) if (context == MCJ_CTX_RANDOM) return; @@ -1719,8 +1827,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (m->inject_flags & MCJ_NMI_BROADCAST) { unsigned long start; int cpu; ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -1,10 +1,9 @@ -#include -#include @@ -1810,8 +1918,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches EXPORT_SYMBOL(mtrr_del); /* ---- head-2010-05-25.orig/arch/x86/kernel/e820-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -134,7 +134,7 @@ static void __init __e820_add_region(str { int x = e820x->nr_map; @@ -1910,8 +2018,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifdef CONFIG_XEN if (is_initial_xendomain()) { printk(KERN_INFO "Xen-provided machine memory map:\n"); ---- head-2010-05-25.orig/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/early_printk-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -178,7 +178,6 @@ static __init void early_serial_init(cha * mappings. Someone should fix this for domain 0. For now, use fake serial. */ @@ -2741,8 +2849,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:54:13.000000000 +0100 @@ -53,6 +53,7 @@ #include #include @@ -2818,8 +2926,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches apicinterrupt LOCAL_PENDING_VECTOR \ perf_pending_interrupt smp_perf_pending_interrupt #endif ---- head-2010-05-25.orig/arch/x86/kernel/head-xen.c 2010-04-28 17:07:13.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/head-xen.c 2010-04-15 10:10:51.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -59,7 +59,6 @@ void __init reserve_ebda_region(void) #include #include @@ -2828,7 +2936,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #include #include -@@ -164,7 +163,7 @@ void __init xen_start_kernel(void) +@@ -152,7 +151,7 @@ void __init xen_start_kernel(void) } @@ -2837,8 +2945,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { int ret; static const struct callback_register __initconst event = { ---- head-2010-05-25.orig/arch/x86/kernel/head32-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head32-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head32-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head32-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -9,11 +9,26 @@ #include @@ -2868,7 +2976,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches void __init i386_start_kernel(void) { -@@ -31,7 +46,16 @@ void __init i386_start_kernel(void) +@@ -47,7 +62,16 @@ void __init i386_start_kernel(void) reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif @@ -2886,7 +2994,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #else { int max_cmdline; -@@ -42,6 +66,7 @@ void __init i386_start_kernel(void) +@@ -58,6 +82,7 @@ void __init i386_start_kernel(void) boot_command_line[max_cmdline-1] = '\0'; } @@ -2894,8 +3002,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches xen_start_kernel(); #endif ---- head-2010-05-25.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head64-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -20,15 +20,14 @@ #include #include @@ -2913,8 +3021,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifndef CONFIG_XEN static void __init zap_identity_mappings(void) ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_32-xen.S 2011-03-03 16:23:25.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_32-xen.S 2011-03-03 16:24:06.000000000 +0100 @@ -30,7 +30,7 @@ #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id @@ -2924,7 +3032,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #define VIRT_ENTRY_OFFSET 0x0 .org VIRT_ENTRY_OFFSET ENTRY(startup_32) -@@ -69,7 +69,6 @@ ENTRY(startup_32) +@@ -67,7 +67,6 @@ ENTRY(startup_32) */ movl $per_cpu__gdt_page,%eax movl $per_cpu__stack_canary,%ecx @@ -2932,7 +3040,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) -@@ -122,7 +121,7 @@ ENTRY(hypercall_page) +@@ -120,7 +119,7 @@ ENTRY(hypercall_page) /* * BSS section */ @@ -2941,8 +3049,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .align PAGE_SIZE_asm ENTRY(swapper_pg_fixmap) .fill 1024,4,0 ---- head-2010-05-25.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_64-xen.S 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-01 14:54:13.000000000 +0100 @@ -23,7 +23,7 @@ #include #include @@ -2970,8 +3078,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .align PAGE_SIZE ENTRY(empty_zero_page) .skip PAGE_SIZE ---- head-2010-05-25.orig/arch/x86/kernel/irq-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/irq-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/irq-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -67,10 +67,10 @@ static int show_other_interrupts(struct for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); @@ -2985,7 +3093,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches seq_printf(p, "%*s: ", prec, "PND"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); -@@ -112,7 +112,7 @@ static int show_other_interrupts(struct +@@ -117,7 +117,7 @@ static int show_other_interrupts(struct seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); seq_printf(p, " Threshold APIC interrupts\n"); #endif @@ -2994,7 +3102,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches seq_printf(p, "%*s: ", prec, "MCE"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); -@@ -212,7 +212,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) +@@ -219,7 +219,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) #ifdef CONFIG_X86_MCE_THRESHOLD sum += irq_stats(cpu)->irq_threshold_count; #endif @@ -3003,8 +3111,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches sum += per_cpu(mce_exception_count, cpu); sum += per_cpu(mce_poll_count, cpu); #endif ---- head-2010-05-25.orig/arch/x86/kernel/ldt-xen.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ldt-xen.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ldt-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -70,8 +70,8 @@ static int alloc_ldt(mm_context_t *pc, i XENFEAT_writable_descriptor_tables); load_LDT(pc); @@ -3016,8 +3124,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #endif ---- head-2010-05-25.orig/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -97,8 +97,8 @@ static ssize_t microcode_write(struct fi { ssize_t ret = -EINVAL; @@ -3038,8 +3146,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .fops = µcode_fops, }; ---- head-2010-05-25.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -51,6 +51,13 @@ static int __init mpf_checksum(unsigned return sum & 0xFF; } @@ -3102,7 +3210,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches clear_bit(m->busid, mp_bus_not_pci); #if defined(CONFIG_EISA) || defined(CONFIG_MCA) -@@ -301,6 +307,8 @@ static void __init smp_dump_mptable(stru +@@ -303,6 +309,8 @@ static void __init smp_dump_mptable(stru 1, mpc, mpc->length, 1); } @@ -3111,7 +3219,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) { char str[16]; -@@ -322,16 +330,13 @@ static int __init smp_read_mpc(struct mp +@@ -326,16 +334,13 @@ static int __init smp_read_mpc(struct mp if (early) return 1; @@ -3131,7 +3239,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches while (count < mpc->length) { switch (*mpt) { -@@ -363,8 +368,7 @@ static int __init smp_read_mpc(struct mp +@@ -367,8 +372,7 @@ static int __init smp_read_mpc(struct mp count = mpc->length; break; } @@ -3141,7 +3249,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } #ifdef CONFIG_X86_BIGSMP -@@ -492,11 +496,11 @@ static void __init construct_ioapic_tabl +@@ -496,11 +500,11 @@ static void __init construct_ioapic_tabl MP_bus_info(&bus); } @@ -3158,16 +3266,32 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches MP_ioapic_info(&ioapic); /* -@@ -618,7 +622,7 @@ static int __init check_physptr(struct m +@@ -624,18 +628,18 @@ static int __init check_physptr(struct m /* * Scan the memory blocks for an SMP configuration block. */ +-#ifndef CONFIG_XEN -static void __init __get_smp_config(unsigned int early) +-#else +-void __init get_smp_config(void) +-#define early 0 +-#endif +void __init default_get_smp_config(unsigned int early) { struct mpf_intel *mpf = mpf_found; -@@ -635,11 +639,6 @@ static void __init __get_smp_config(unsi + if (!mpf) + return; + ++#ifdef CONFIG_XEN ++ BUG_ON(early); ++#define early 0 ++#endif ++ + if (acpi_lapic && early) + return; + +@@ -646,11 +650,6 @@ void __init get_smp_config(void) if (acpi_lapic && acpi_ioapic) return; @@ -3179,10 +3303,10 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->specification); #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) && !defined(CONFIG_XEN) -@@ -680,16 +679,6 @@ static void __init __get_smp_config(unsi - */ +@@ -695,16 +694,6 @@ void __init get_smp_config(void) } + #ifndef CONFIG_XEN -void __init early_get_smp_config(void) -{ - __get_smp_config(1); @@ -3193,10 +3317,10 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches - __get_smp_config(0); -} - - #ifndef CONFIG_XEN static void __init smp_reserve_bootmem(struct mpf_intel *mpf) { -@@ -761,16 +750,12 @@ static int __init smp_scan_config(unsign + unsigned long size = get_mpc_size(mpf->physptr); +@@ -775,16 +764,12 @@ static int __init smp_scan_config(unsign return 0; } @@ -3214,7 +3338,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * FIXME: Linux assumes you have 640K of base ram.. * this continues the error... -@@ -807,16 +792,6 @@ static void __init __find_smp_config(uns +@@ -821,16 +806,6 @@ static void __init __find_smp_config(uns #endif } @@ -3231,8 +3355,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifdef CONFIG_X86_IO_APIC static u8 __initdata irq_used[MAX_IRQ_SOURCES]; ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -3,6 +3,7 @@ #include #include @@ -3311,8 +3435,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifdef CONFIG_PCI /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ ---- head-2010-05-25.orig/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -36,7 +36,7 @@ gnttab_map_sg(struct device *hwdev, stru sg->dma_address = gnttab_dma_map_page(sg_page(sg)) + sg->offset; @@ -3375,8 +3499,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches }; void __init no_iommu_init(void) ---- head-2010-05-25.orig/arch/x86/kernel/process-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:07:25.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:07:49.000000000 +0100 @@ -9,7 +9,7 @@ #include #include @@ -3396,7 +3520,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; -@@ -285,9 +282,7 @@ static inline int hlt_use_halt(void) +@@ -256,9 +253,7 @@ EXPORT_SYMBOL(pm_idle); */ void xen_idle(void) { @@ -3407,7 +3531,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we -@@ -300,7 +295,6 @@ void xen_idle(void) +@@ -271,7 +266,6 @@ void xen_idle(void) else local_irq_enable(); current_thread_info()->status |= TS_POLLING; @@ -3415,7 +3539,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); -@@ -354,9 +348,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); +@@ -325,9 +319,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { @@ -3426,7 +3550,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (!need_resched()) { if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); -@@ -366,15 +358,13 @@ void mwait_idle_with_hints(unsigned long +@@ -337,15 +329,13 @@ void mwait_idle_with_hints(unsigned long if (!need_resched()) __mwait(ax, cx); } @@ -3443,7 +3567,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); -@@ -384,7 +374,6 @@ static void mwait_idle(void) +@@ -355,7 +345,6 @@ static void mwait_idle(void) __sti_mwait(0, 0); else local_irq_enable(); @@ -3451,7 +3575,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } else local_irq_enable(); } -@@ -397,13 +386,11 @@ static void mwait_idle(void) +@@ -368,13 +357,11 @@ static void mwait_idle(void) */ static void poll_idle(void) { @@ -3467,7 +3591,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } #ifndef CONFIG_XEN -@@ -556,10 +543,8 @@ void __init init_c1e_mask(void) +@@ -527,10 +514,8 @@ void __init init_c1e_mask(void) { #ifndef CONFIG_XEN /* If we're using c1e_idle, we need to allocate c1e_mask. */ @@ -3480,9 +3604,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #endif } ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-24 15:32:27.000000000 +0100 -@@ -66,9 +66,6 @@ +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:37:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:38:03.000000000 +0100 +@@ -64,9 +64,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void cstar_ret_from_fork(void) __asm__("cstar_ret_from_fork"); @@ -3492,7 +3616,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * Return saved PC of a blocked thread. */ -@@ -360,6 +357,7 @@ __switch_to(struct task_struct *prev_p, +@@ -358,6 +355,7 @@ __switch_to(struct task_struct *prev_p, #ifndef CONFIG_X86_NO_TSS struct tss_struct *tss = &per_cpu(init_tss, cpu); #endif @@ -3500,7 +3624,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #if CONFIG_XEN_COMPAT > 0x030002 struct physdev_set_iopl iopl_op; struct physdev_set_iobitmap iobmp_op; -@@ -373,15 +371,24 @@ __switch_to(struct task_struct *prev_p, +@@ -371,15 +369,24 @@ __switch_to(struct task_struct *prev_p, /* XEN NOTE: FS/GS saved in switch_mm(), not here. */ /* @@ -3528,7 +3652,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } #if 0 /* lazy fpu sanity check */ else BUG_ON(!(read_cr0() & 8)); -@@ -427,6 +434,14 @@ __switch_to(struct task_struct *prev_p, +@@ -425,6 +432,14 @@ __switch_to(struct task_struct *prev_p, mcl++; } @@ -3543,7 +3667,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { set_xen_guest_handle(iobmp_op.bitmap, (char *)next->io_bitmap_ptr); -@@ -451,7 +466,7 @@ __switch_to(struct task_struct *prev_p, +@@ -449,7 +464,7 @@ __switch_to(struct task_struct *prev_p, BUG(); /* we're going to use this soon, after a few expensive things */ @@ -3552,7 +3676,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches prefetch(next->xstate); /* -@@ -470,15 +485,8 @@ __switch_to(struct task_struct *prev_p, +@@ -468,15 +483,8 @@ __switch_to(struct task_struct *prev_p, */ arch_end_context_switch(next_p); @@ -3570,9 +3694,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * Restore %gs if needed (which is common) ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-03-24 15:32:27.000000000 +0100 -@@ -64,9 +64,6 @@ +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:37:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:37:59.000000000 +0100 +@@ -60,9 +60,6 @@ asmlinkage extern void ret_from_fork(void); @@ -3582,7 +3706,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static DEFINE_PER_CPU(unsigned char, is_idle); unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; -@@ -399,6 +396,7 @@ __switch_to(struct task_struct *prev_p, +@@ -395,6 +392,7 @@ __switch_to(struct task_struct *prev_p, #ifndef CONFIG_X86_NO_TSS struct tss_struct *tss = &per_cpu(init_tss, cpu); #endif @@ -3590,7 +3714,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #if CONFIG_XEN_COMPAT > 0x030002 struct physdev_set_iopl iopl_op; struct physdev_set_iobitmap iobmp_op; -@@ -409,8 +407,15 @@ __switch_to(struct task_struct *prev_p, +@@ -405,8 +403,15 @@ __switch_to(struct task_struct *prev_p, #endif multicall_entry_t _mcl[8], *mcl = _mcl; @@ -3607,7 +3731,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches prefetch(next->xstate); /* -@@ -422,12 +427,21 @@ __switch_to(struct task_struct *prev_p, +@@ -418,12 +423,21 @@ __switch_to(struct task_struct *prev_p, */ if (task_thread_info(prev_p)->status & TS_USEDFPU) { __save_init_fpu(prev_p); /* _not_ save_init_fpu() */ @@ -3632,7 +3756,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * Reload sp0. * This is load_sp0(tss, next) with a multicall. -@@ -545,15 +559,12 @@ __switch_to(struct task_struct *prev_p, +@@ -541,15 +555,12 @@ __switch_to(struct task_struct *prev_p, task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) __switch_to_xtra(prev_p, next_p); @@ -3653,19 +3777,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches return prev_p; } ---- head-2010-05-25.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/quirks-xen.c 2010-03-24 15:32:27.000000000 +0100 -@@ -509,7 +509,7 @@ static void __init quirk_amd_nb_node(str - - pci_read_config_dword(nb_ht, 0x60, &val); - set_dev_node(&dev->dev, val & 7); -- pci_dev_put(dev); -+ pci_dev_put(nb_ht); - } - - DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, ---- head-2010-05-25.orig/arch/x86/kernel/rtc.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/rtc.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/rtc.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/rtc.c 2011-02-01 14:54:13.000000000 +0100 @@ -189,8 +189,10 @@ void read_persistent_clock(struct timesp unsigned long retval, flags; @@ -3679,8 +3792,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #endif spin_lock_irqsave(&rtc_lock, flags); retval = x86_platform.get_wallclock(); ---- head-2010-05-25.orig/arch/x86/kernel/setup-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:23:32.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-04 15:09:48.000000000 +0100 @@ -27,6 +27,7 @@ #include #include @@ -3732,7 +3845,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches -}; - /* cpu data as detected by the assembly code in head.S */ - struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; + struct cpuinfo_x86 new_cpu_data __cpuinitdata = { .wp_works_ok = 1, .hard_math = 1 }; /* common cpu data for all cpus */ @@ -670,7 +661,7 @@ static struct resource standard_io_resou .flags = IORESOURCE_BUSY | IORESOURCE_IO } @@ -3768,7 +3881,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * AMI BIOS with low memory corruption was found on Intel DG45ID board. * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will -@@ -865,7 +859,7 @@ void __init setup_arch(char **cmdline_p) +@@ -868,7 +862,7 @@ void __init setup_arch(char **cmdline_p) copy_edid(); #endif /* CONFIG_XEN */ @@ -3777,7 +3890,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches setup_memory_map(); parse_setup_data(); -@@ -906,6 +900,16 @@ void __init setup_arch(char **cmdline_p) +@@ -909,6 +903,16 @@ void __init setup_arch(char **cmdline_p) strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; @@ -3794,7 +3907,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches parse_early_param(); #ifdef CONFIG_X86_64 -@@ -945,12 +949,9 @@ void __init setup_arch(char **cmdline_p) +@@ -948,12 +952,9 @@ void __init setup_arch(char **cmdline_p) * VMware detection requires dmi to be available, so this * needs to be done after dmi_scan_machine, for the BP. */ @@ -3809,7 +3922,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifndef CONFIG_XEN /* after parse_early_param, so could debug it */ -@@ -1103,10 +1104,11 @@ void __init setup_arch(char **cmdline_p) +@@ -1106,10 +1107,11 @@ void __init setup_arch(char **cmdline_p) kvmclock_init(); #endif @@ -3824,7 +3937,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifdef CONFIG_X86_64 map_vsyscall(); -@@ -1197,13 +1199,13 @@ void __init setup_arch(char **cmdline_p) +@@ -1200,13 +1202,13 @@ void __init setup_arch(char **cmdline_p) */ acpi_boot_init(); @@ -3840,7 +3953,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches prefill_possible_map(); -@@ -1227,11 +1229,7 @@ void __init setup_arch(char **cmdline_p) +@@ -1230,11 +1232,7 @@ void __init setup_arch(char **cmdline_p) e820_reserve_resources(); #endif @@ -3945,18 +4058,27 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #endif /* CONFIG_X86_32 */ #ifdef CONFIG_XEN ---- head-2010-05-25.orig/arch/x86/kernel/sfi.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/sfi.c 2010-03-24 15:32:27.000000000 +0100 -@@ -31,7 +31,7 @@ - #include +--- head-2011-03-17.orig/arch/x86/platform/sfi/sfi.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/platform/sfi/sfi.c 2011-02-02 08:45:00.000000000 +0100 +@@ -32,6 +32,7 @@ #include --#ifdef CONFIG_X86_LOCAL_APIC -+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) + #ifdef CONFIG_X86_LOCAL_APIC ++#ifndef CONFIG_XEN static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; - void __init mp_sfi_register_lapic_address(unsigned long address) -@@ -99,9 +99,12 @@ static int __init sfi_parse_ioapic(struc + /* All CPUs enumerated by SFI must be present and enabled */ +@@ -47,6 +48,9 @@ static void __cpuinit mp_sfi_register_la + + generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR))); + } ++#else ++#define mp_sfi_register_lapic(id) ++#endif + + static int __init sfi_parse_cpus(struct sfi_table_header *table) + { +@@ -86,9 +90,12 @@ static int __init sfi_parse_ioapic(struc pentry++; } @@ -3969,17 +4091,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches return 0; } #endif /* CONFIG_X86_IO_APIC */ -@@ -111,7 +114,7 @@ static int __init sfi_parse_ioapic(struc - */ - int __init sfi_platform_init(void) - { --#ifdef CONFIG_X86_LOCAL_APIC -+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) - mp_sfi_register_lapic_address(sfi_lapic_addr); - sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, sfi_parse_cpus); - #endif ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-05-12 09:02:08.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-05-12 09:02:39.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -1,31 +1,12 @@ /* - * Copyright (C) 1991, 1992, 1995 Linus Torvalds @@ -4035,7 +4148,55 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; #endif -@@ -415,38 +396,33 @@ unsigned long profile_pc(struct pt_regs +@@ -100,20 +81,6 @@ static DECLARE_WORK(clock_was_set_work, + */ + #define clobber_induction_variable(v) asm ( "" : "+r" (v) ) + +-static inline void __normalize_time(time_t *sec, s64 *nsec) +-{ +- while (*nsec >= NSEC_PER_SEC) { +- clobber_induction_variable(*nsec); +- (*nsec) -= NSEC_PER_SEC; +- (*sec)++; +- } +- while (*nsec < 0) { +- clobber_induction_variable(*nsec); +- (*nsec) += NSEC_PER_SEC; +- (*sec)--; +- } +-} +- + /* Does this guest OS track Xen time, or set its wall clock independently? */ + static int independent_wallclock = 0; + static int __init __independent_wallclock(char *str) +@@ -307,8 +274,7 @@ static void sync_xen_wallclock(unsigned + static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0); + static void sync_xen_wallclock(unsigned long dummy) + { +- time_t sec; +- s64 nsec; ++ struct timespec now; + struct xen_platform_op op; + + BUG_ON(!is_initial_xendomain()); +@@ -317,13 +283,11 @@ static void sync_xen_wallclock(unsigned + + write_seqlock_irq(&xtime_lock); + +- sec = xtime.tv_sec; +- nsec = xtime.tv_nsec; +- __normalize_time(&sec, &nsec); ++ set_normalized_timespec(&now, xtime.tv_sec, xtime.tv_nsec); + + op.cmd = XENPF_settime; +- op.u.settime.secs = sec; +- op.u.settime.nsecs = nsec; ++ op.u.settime.secs = now.tv_sec; ++ op.u.settime.nsecs = now.tv_nsec; + op.u.settime.system_time = processed_system_time; + WARN_ON(HYPERVISOR_platform_op(&op)); + +@@ -415,38 +379,33 @@ unsigned long profile_pc(struct pt_regs { unsigned long pc = instruction_pointer(regs); @@ -4087,7 +4248,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { s64 delta, delta_cpu, stolen, blocked; unsigned int i, cpu = smp_processor_id(); -@@ -568,8 +544,7 @@ irqreturn_t timer_interrupt(int irq, voi +@@ -571,8 +530,7 @@ irqreturn_t timer_interrupt(int irq, voi /* Local timer processing (see update_process_times()). */ run_local_timers(); @@ -4097,7 +4258,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches printk_tick(); scheduler_tick(); run_posix_cpu_timers(current); -@@ -669,7 +644,7 @@ static void init_missing_ticks_accountin +@@ -672,7 +630,7 @@ static void init_missing_ticks_accountin runstate->time[RUNSTATE_offline]; } @@ -4106,7 +4267,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { const shared_info_t *s = HYPERVISOR_shared_info; u32 version, sec, nsec; -@@ -686,7 +661,8 @@ unsigned long xen_read_persistent_clock( +@@ -689,7 +647,8 @@ unsigned long xen_read_persistent_clock( delta = local_clock() + (u64)sec * NSEC_PER_SEC + nsec; do_div(delta, NSEC_PER_SEC); @@ -4116,8 +4277,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } int xen_update_persistent_clock(void) ---- head-2010-05-25.orig/arch/x86/kernel/traps-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-16 13:56:25.000000000 +0100 @@ -14,7 +14,6 @@ #include #include @@ -4239,8 +4400,16 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } void __cpuinit smp_trap_init(trap_info_t *trap_ctxt) ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:32:27.000000000 +0100 +@@ -978,4 +968,7 @@ void __cpuinit smp_trap_init(trap_info_t + trap_ctxt[t->vector].cs = t->cs; + trap_ctxt[t->vector].address = t->address; + } ++ TI_SET_IF(trap_ctxt + NMI_VECTOR, 1); ++ trap_ctxt[NMI_VECTOR].cs = __KERNEL_CS; ++ trap_ctxt[NMI_VECTOR].address = (unsigned long)nmi; + } +--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wa vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; @@ -4271,7 +4440,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches }; --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/kernel/x86_init-xen.c 2010-03-24 15:32:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/x86_init-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2009 Thomas Gleixner @@ -4343,8 +4512,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches + .get_wallclock = mach_get_cmos_time, + .set_wallclock = mach_set_rtc_mmss, +}; ---- head-2010-05-25.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/fault-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -10,7 +10,7 @@ #include /* max_low_pfn */ #include /* __kprobes, ... */ @@ -4362,7 +4531,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches force_sig_info(si_signo, &info, tsk); } -@@ -293,27 +294,25 @@ check_v8086_mode(struct pt_regs *regs, u +@@ -302,27 +303,25 @@ check_v8086_mode(struct pt_regs *regs, u tsk->thread.screen_bitmap |= 1 << bit; } @@ -4404,7 +4573,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * We must not directly access the pte in the highpte -@@ -321,17 +320,12 @@ static void dump_pagetable(unsigned long +@@ -330,17 +329,12 @@ static void dump_pagetable(unsigned long * And let's rather not kmap-atomic the pte, just in case * it's allocated already: */ @@ -4427,7 +4596,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches printk(KERN_CONT "\n"); } -@@ -460,16 +454,12 @@ static int bad_address(void *p) +@@ -471,16 +465,12 @@ static int bad_address(void *p) static void dump_pagetable(unsigned long address) { @@ -4446,7 +4615,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (bad_address(pgd)) goto bad; -@@ -809,10 +799,12 @@ out_of_memory(struct pt_regs *regs, unsi +@@ -820,10 +810,12 @@ out_of_memory(struct pt_regs *regs, unsi } static void @@ -4460,7 +4629,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches up_read(&mm->mmap_sem); -@@ -828,7 +820,15 @@ do_sigbus(struct pt_regs *regs, unsigned +@@ -839,7 +831,15 @@ do_sigbus(struct pt_regs *regs, unsigned tsk->thread.error_code = error_code; tsk->thread.trap_no = 14; @@ -4477,7 +4646,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } static noinline void -@@ -838,8 +838,8 @@ mm_fault_error(struct pt_regs *regs, uns +@@ -849,8 +849,8 @@ mm_fault_error(struct pt_regs *regs, uns if (fault & VM_FAULT_OOM) { out_of_memory(regs, error_code, address); } else { @@ -4488,7 +4657,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches else BUG(); } -@@ -1053,7 +1053,7 @@ do_page_fault(struct pt_regs *regs, unsi +@@ -1064,7 +1064,7 @@ do_page_fault(struct pt_regs *regs, unsi if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); @@ -4497,7 +4666,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* * If we're in an interrupt, have no user context or are running -@@ -1150,11 +1150,11 @@ good_area: +@@ -1161,11 +1161,11 @@ good_area: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; @@ -4511,8 +4680,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches regs, address); } ---- head-2010-05-25.orig/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/highmem_32-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -24,7 +24,7 @@ void kunmap(struct page *page) * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. @@ -4532,8 +4701,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches EXPORT_SYMBOL(clear_highpage); EXPORT_SYMBOL(copy_highpage); ---- head-2010-05-25.orig/arch/x86/mm/init-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -36,69 +36,6 @@ extern unsigned long extend_init_mapping extern void xen_finish_init_mapping(void); #endif @@ -4604,8 +4773,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static void __init find_early_table_space(unsigned long end, int use_pse, int use_gbpages) { ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -87,7 +87,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE if (!(__pgd_val(*pgd) & _PAGE_PRESENT)) { @@ -4648,8 +4817,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -894,8 +894,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to #endif /* CONFIG_MEMORY_HOTPLUG */ @@ -4680,8 +4849,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches max_pfn << (PAGE_SHIFT-10), codesize >> 10, absent_pages << (PAGE_SHIFT-10), ---- head-2010-05-25.orig/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/iomap_32-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -22,7 +22,7 @@ #include #include @@ -4723,8 +4892,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) { ---- head-2010-05-25.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/ioremap-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:25.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:38.000000000 +0100 @@ -23,81 +23,7 @@ #include #include @@ -4808,7 +4977,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static int direct_remap_area_pte_fn(pte_t *pte, struct page *pmd_page, -@@ -407,30 +333,19 @@ static void __iomem *__ioremap_caller(re +@@ -388,30 +314,19 @@ static void __iomem *__ioremap_caller(re retval = reserve_memtype(phys_addr, (u64)phys_addr + size, prot_val, &new_prot_val); if (retval) { @@ -4844,7 +5013,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } prot_val = new_prot_val; } -@@ -456,27 +371,26 @@ static void __iomem *__ioremap_caller(re +@@ -437,27 +352,26 @@ static void __iomem *__ioremap_caller(re */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) @@ -4882,8 +5051,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } /** ---- head-2010-05-25.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -12,6 +12,7 @@ #include #include @@ -4963,8 +5132,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } else cpa_flush_all(cache); ---- head-2010-05-25.orig/arch/x86/mm/pat-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pat-xen.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pat-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -15,6 +15,7 @@ #include #include @@ -5550,9 +5719,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .start = memtype_seq_start, .next = memtype_seq_next, .stop = memtype_seq_stop, ---- head-2010-05-25.orig/arch/x86/mm/pgtable-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable-xen.c 2010-03-24 15:32:27.000000000 +0100 -@@ -692,8 +692,7 @@ int ptep_set_access_flags(struct vm_area +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-02-01 14:54:13.000000000 +0100 +@@ -695,8 +695,7 @@ int ptep_set_access_flags(struct vm_area if (likely(vma->vm_mm == current->mm)) { if (HYPERVISOR_update_va_mapping(address, entry, @@ -5562,8 +5731,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches BUG(); } else { xen_l1_entry_update(ptep, entry); ---- head-2010-05-25.orig/arch/x86/mm/physaddr.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/arch/x86/mm/physaddr.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/physaddr.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/physaddr.c 2011-02-01 14:54:13.000000000 +0100 @@ -8,6 +8,10 @@ #ifdef CONFIG_X86_64 @@ -5575,9 +5744,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches unsigned long __phys_addr(unsigned long x) { if (x >= __START_KERNEL_map) { ---- head-2010-05-25.orig/drivers/acpi/processor_driver.c 2010-04-15 10:07:40.000000000 +0200 -+++ head-2010-05-25/drivers/acpi/processor_driver.c 2010-05-25 09:25:03.000000000 +0200 -@@ -663,7 +663,7 @@ static int __cpuinit acpi_processor_add( +--- head-2011-03-17.orig/drivers/acpi/processor_driver.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_driver.c 2011-02-01 14:54:13.000000000 +0100 +@@ -548,7 +548,7 @@ static int __cpuinit acpi_processor_add( result = processor_extcntl_prepare(pr); if (result) @@ -5586,8 +5755,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches pr->cdev = thermal_cooling_device_register("Processor", device, &processor_cooling_ops); ---- head-2010-05-25.orig/drivers/char/agp/agp.h 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/agp.h 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/char/agp/agp.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/agp.h 2011-02-01 14:54:13.000000000 +0100 @@ -31,6 +31,10 @@ #include /* for flush_agp_cache() */ @@ -5599,18 +5768,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #define PFX "agpgart: " //#define AGP_DEBUG 1 ---- head-2010-05-25.orig/drivers/char/agp/amd-k7-agp.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/amd-k7-agp.c 2010-03-24 15:32:27.000000000 +0100 -@@ -44,7 +44,7 @@ static int amd_create_page_map(struct am - #ifndef CONFIG_X86 - SetPageReserved(virt_to_page(page_map->real)); - global_cache_flush(); -- page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), -+ page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), - PAGE_SIZE); - if (page_map->remapped == NULL) { - ClearPageReserved(virt_to_page(page_map->real)); -@@ -160,7 +160,7 @@ static int amd_create_gatt_table(struct +--- head-2011-03-17.orig/drivers/char/agp/amd-k7-agp.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/amd-k7-agp.c 2011-02-17 10:18:42.000000000 +0100 +@@ -142,7 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; @@ -5619,7 +5779,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* Get the address for the gart region. * This is a bus address even on the alpha, b/c its -@@ -173,7 +173,7 @@ static int amd_create_gatt_table(struct +@@ -155,7 +155,7 @@ static int amd_create_gatt_table(struct /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { @@ -5628,8 +5788,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches page_dir.remapped+GET_PAGE_DIR_OFF(addr)); readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ } ---- head-2010-05-25.orig/drivers/char/agp/amd64-agp.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/amd64-agp.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/char/agp/amd64-agp.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/amd64-agp.c 2011-02-01 14:54:13.000000000 +0100 @@ -178,7 +178,7 @@ static const struct aper_size_info_32 am static int amd_8151_configure(void) @@ -5638,8 +5798,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches + unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real); int i; - /* Configure AGP regs in each x86-64 host bridge. */ -@@ -558,7 +558,7 @@ static void __devexit agp_amd64_remove(s + if (!amd_nb_has_feature(AMD_NB_GART)) +@@ -583,7 +583,7 @@ static void __devexit agp_amd64_remove(s { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); @@ -5648,9 +5808,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches amd64_aperture_sizes[bridge->aperture_size_idx].size); agp_remove_bridge(bridge); agp_put_bridge(bridge); ---- head-2010-05-25.orig/drivers/char/agp/ati-agp.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/ati-agp.c 2010-03-24 15:32:27.000000000 +0100 -@@ -360,7 +360,7 @@ static int ati_create_gatt_table(struct +--- head-2011-03-17.orig/drivers/char/agp/ati-agp.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/ati-agp.c 2011-02-01 14:54:13.000000000 +0100 +@@ -361,7 +361,7 @@ static int ati_create_gatt_table(struct agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; @@ -5659,7 +5819,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* Write out the size register */ current_size = A_SIZE_LVL2(agp_bridge->current_size); -@@ -390,7 +390,7 @@ static int ati_create_gatt_table(struct +@@ -391,7 +391,7 @@ static int ati_create_gatt_table(struct /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { @@ -5668,9 +5828,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches page_dir.remapped+GET_PAGE_DIR_OFF(addr)); readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ } ---- head-2010-05-25.orig/drivers/char/agp/efficeon-agp.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/efficeon-agp.c 2010-03-24 15:32:27.000000000 +0100 -@@ -226,7 +226,7 @@ static int efficeon_create_gatt_table(st +--- head-2011-03-17.orig/drivers/char/agp/efficeon-agp.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/efficeon-agp.c 2011-02-01 14:54:13.000000000 +0100 +@@ -227,7 +227,7 @@ static int efficeon_create_gatt_table(st efficeon_private.l1_table[index] = page; @@ -5679,10 +5839,10 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches pci_write_config_dword(agp_bridge->dev, EFFICEON_ATTPAGE, value); ---- head-2010-05-25.orig/drivers/char/agp/generic.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/generic.c 2010-04-15 10:11:24.000000000 +0200 -@@ -989,7 +989,7 @@ int agp_generic_create_gatt_table(struct - set_memory_uc((unsigned long)table, 1 << page_order); +--- head-2011-03-17.orig/drivers/char/agp/generic.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/generic.c 2011-02-01 14:54:13.000000000 +0100 +@@ -954,7 +954,7 @@ int agp_generic_create_gatt_table(struct + bridge->gatt_table = (void *)table; #else - bridge->gatt_table = ioremap_nocache(virt_to_phys(table), @@ -5690,7 +5850,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches (PAGE_SIZE * (1 << page_order))); bridge->driver->cache_flush(); #endif -@@ -1002,7 +1002,7 @@ int agp_generic_create_gatt_table(struct +@@ -967,7 +967,7 @@ int agp_generic_create_gatt_table(struct return -ENOMEM; } @@ -5699,8 +5859,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches /* AK: bogus, should encode addresses > 4GB */ for (i = 0; i < num_entries; i++) { ---- head-2010-05-25.orig/drivers/char/agp/sworks-agp.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/char/agp/sworks-agp.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/char/agp/sworks-agp.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/char/agp/sworks-agp.c 2011-02-01 14:54:13.000000000 +0100 @@ -155,7 +155,7 @@ static int serverworks_create_gatt_table /* Create a fake scratch directory */ for (i = 0; i < 1024; i++) { @@ -5728,9 +5888,47 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches return 0; } ---- head-2010-05-25.orig/drivers/gpu/drm/radeon/radeon_device.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/gpu/drm/radeon/radeon_device.c 2010-05-07 11:25:36.000000000 +0200 -@@ -345,6 +345,18 @@ int radeon_dummy_page_init(struct radeon +--- head-2011-03-17.orig/drivers/dma/ioat/dma.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/dma/ioat/dma.h 2011-02-01 14:54:13.000000000 +0100 +@@ -362,8 +362,6 @@ __ioat_dca_init(struct pci_dev *pdev, vo + return NULL; + } + #define ioat_dca_init __ioat_dca_init +-#define ioat2_dca_init __ioat_dca_init +-#define ioat3_dca_init __ioat_dca_init + #endif + + #endif /* IOATDMA_H */ +--- head-2011-03-17.orig/drivers/dma/ioat/dma_v2.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/dma/ioat/dma_v2.h 2011-02-01 14:54:13.000000000 +0100 +@@ -176,4 +176,10 @@ int ioat2_quiesce(struct ioat_chan_commo + int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); + extern struct kobj_type ioat2_ktype; + extern struct kmem_cache *ioat2_cache; ++ ++#ifdef CONFIG_XEN ++#define ioat2_dca_init __ioat_dca_init ++#define ioat3_dca_init __ioat_dca_init ++#endif ++ + #endif /* IOATDMA_V2_H */ +--- head-2011-03-17.orig/drivers/dma/ioat/hw.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/dma/ioat/hw.h 2011-02-01 14:54:13.000000000 +0100 +@@ -39,7 +39,11 @@ + #define IOAT_VER_3_0 0x30 /* Version 3.0 */ + #define IOAT_VER_3_2 0x32 /* Version 3.2 */ + ++#ifndef CONFIG_XEN + int system_has_dca_enabled(struct pci_dev *pdev); ++#else ++static inline int system_has_dca_enabled(struct pci_dev *pdev) { return 0; } ++#endif + + struct ioat_dma_descriptor { + uint32_t size; +--- head-2011-03-17.orig/drivers/gpu/drm/radeon/radeon_device.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/radeon/radeon_device.c 2011-02-01 14:54:13.000000000 +0100 +@@ -430,6 +430,18 @@ int radeon_dummy_page_init(struct radeon rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); if (rdev->dummy_page.page == NULL) return -ENOMEM; @@ -5748,20 +5946,130 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches +#endif rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (!rdev->dummy_page.addr) { ---- head-2010-05-25.orig/drivers/net/Kconfig 2010-04-15 09:54:18.000000000 +0200 -+++ head-2010-05-25/drivers/net/Kconfig 2010-04-15 10:11:31.000000000 +0200 -@@ -3313,7 +3313,7 @@ config VIRTIO_NET + if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { +--- head-2011-03-17.orig/drivers/hwmon/coretemp-xen.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/coretemp-xen.c 2011-02-01 14:54:13.000000000 +0100 +@@ -162,17 +162,26 @@ static int adjust_tjmax(struct coretemp_ + /* The 100C is default for both mobile and non mobile CPUs */ + + int tjmax = 100000; +- int ismobile = 1; ++ int tjmax_ee = 85000; ++ int usemsr_ee = 1; + int err; + u32 eax, edx; + + /* Early chips have no MSR for TjMax */ + + if ((c->x86_model == 0xf) && (c->x86_mask < 4)) { +- ismobile = 0; ++ usemsr_ee = 0; + } + +- if ((c->x86_model > 0xe) && (ismobile)) { ++ /* Atoms seems to have TjMax at 90C */ ++ ++ if (c->x86_model == 0x1c) { ++ usemsr_ee = 0; ++ tjmax = 90000; ++ } ++ ++ if ((c->x86_model > 0xe) && (usemsr_ee)) { ++ u8 platform_id; + + /* Now we can detect the mobile CPU using Intel provided table + http://softwarecommunity.intel.com/Wiki/Mobility/720.htm +@@ -184,13 +193,29 @@ static int adjust_tjmax(struct coretemp_ + dev_warn(dev, + "Unable to access MSR 0x17, assuming desktop" + " CPU\n"); +- ismobile = 0; +- } else if (!(eax & 0x10000000)) { +- ismobile = 0; ++ usemsr_ee = 0; ++ } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) { ++ /* Trust bit 28 up to Penryn, I could not find any ++ documentation on that; if you happen to know ++ someone at Intel please ask */ ++ usemsr_ee = 0; ++ } else { ++ /* Platform ID bits 52:50 (EDX starts at bit 32) */ ++ platform_id = (edx >> 18) & 0x7; ++ ++ /* Mobile Penryn CPU seems to be platform ID 7 or 5 ++ (guesswork) */ ++ if ((c->x86_model == 0x17) && ++ ((platform_id == 5) || (platform_id == 7))) { ++ /* If MSR EE bit is set, set it to 90 degrees C, ++ otherwise 105 degrees C */ ++ tjmax_ee = 90000; ++ tjmax = 105000; ++ } + } + } + +- if (ismobile) { ++ if (usemsr_ee) { + + err = rdmsr_safe_on_pcpu(id, 0xee, &eax, &edx); + if (err < 0) { +@@ -198,9 +223,11 @@ static int adjust_tjmax(struct coretemp_ + "Unable to access MSR 0xEE, for Tjmax, left" + " at default"); + } else if (eax & 0x40000000) { +- tjmax = 85000; ++ tjmax = tjmax_ee; + } +- } else { ++ /* if we dont use msr EE it means we are desktop CPU (with exeception ++ of Atom) */ ++ } else if (tjmax == 100000) { + dev_warn(dev, "Using relative temperature scale!\n"); + } + +@@ -246,9 +273,9 @@ static int coretemp_probe(struct platfor + data->tjmax = adjust_tjmax(data, pdev->id, &pdev->dev); + + /* read the still undocumented IA32_TEMPERATURE_TARGET it exists +- on older CPUs but not in this register */ ++ on older CPUs but not in this register, Atoms don't have it either */ + +- if (data->x86_model > 0xe) { ++ if ((data->x86_model > 0xe) && (data->x86_model != 0x1c)) { + err = rdmsr_safe_on_pcpu(pdev->id, 0x1a2, &eax, &edx); + if (err < 0) { + dev_warn(&pdev->dev, "Unable to read" +@@ -360,11 +387,15 @@ static int coretemp_device_add(unsigned + if (err) + goto exit_entry_free; + +- /* check if family 6, models 0xe, 0xf, 0x16, 0x17, 0x1A */ ++ /* check if family 6, models 0xe (Pentium M DC), ++ 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm), ++ 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom), ++ 0x1e (Lynnfield) */ + if (info.x86 != 0x6 || + !((pdev_entry->x86_model == 0xe) || (pdev_entry->x86_model == 0xf) || + (pdev_entry->x86_model == 0x16) || (pdev_entry->x86_model == 0x17) || +- (pdev_entry->x86_model == 0x1A))) { ++ (pdev_entry->x86_model == 0x1a) || (pdev_entry->x86_model == 0x1c) || ++ (pdev_entry->x86_model == 0x1e))) { + + /* supported CPU not found, but report the unknown + family 6 CPU */ +--- head-2011-03-17.orig/drivers/net/Kconfig 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-17/drivers/net/Kconfig 2011-02-01 14:54:13.000000000 +0100 +@@ -3416,7 +3416,7 @@ config VIRTIO_NET config VMXNET3 - tristate "VMware VMXNET3 ethernet driver" -- depends on PCI && INET -+ depends on PCI && INET && !XEN - help - This driver supports VMware's vmxnet3 virtual ethernet NIC. - To compile this driver as a module, choose M here: the ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-03-24 15:32:27.000000000 +0100 + tristate "VMware VMXNET3 ethernet driver" +- depends on PCI && INET ++ depends on PCI && INET && !XEN + help + This driver supports VMware's vmxnet3 virtual ethernet NIC. + To compile this driver as a module, choose M here: the +--- head-2011-03-17.orig/drivers/pci/msi-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/pci/msi-xen.c 2011-02-01 14:54:13.000000000 +0100 @@ -16,12 +16,11 @@ #include #include @@ -5777,7 +6085,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #include "pci.h" #include "msi.h" -@@ -479,7 +478,7 @@ static int msix_capability_init(struct p +@@ -438,7 +437,7 @@ static int msix_capability_init(struct p * to determine if MSI/-X are supported for the device. If MSI/-X is * supported return 0, else return an error code. **/ @@ -5786,7 +6094,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { struct pci_bus *bus; int ret; -@@ -496,8 +495,9 @@ static int pci_msi_check_device(struct p +@@ -455,8 +454,9 @@ static int pci_msi_check_device(struct p if (nvec < 1) return -ERANGE; @@ -5798,7 +6106,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches * the secondary pci_bus. * We expect only arch-specific PCI host bus controller driver * or quirks for specific PCI bridges to be setting NO_MSI. -@@ -615,7 +615,7 @@ void pci_msi_shutdown(struct pci_dev *de +@@ -574,7 +574,7 @@ void pci_msi_shutdown(struct pci_dev *de dev->msi_enabled = 0; } @@ -5807,7 +6115,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { pci_msi_shutdown(dev); } -@@ -655,14 +655,14 @@ int pci_msix_table_size(struct pci_dev * +@@ -614,14 +614,14 @@ int pci_msix_table_size(struct pci_dev * **/ extern int pci_frontend_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); @@ -5824,7 +6132,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifdef CONFIG_XEN_PCIDEV_FRONTEND if (!is_initial_xendomain()) { -@@ -737,7 +737,7 @@ int pci_enable_msix(struct pci_dev* dev, +@@ -696,7 +696,7 @@ int pci_enable_msix(struct pci_dev* dev, EXPORT_SYMBOL(pci_enable_msix); extern void pci_frontend_disable_msix(struct pci_dev* dev); @@ -5833,7 +6141,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { if (!pci_msi_enable || !dev || !dev->msix_enabled) return; -@@ -770,7 +770,8 @@ void pci_msix_shutdown(struct pci_dev* d +@@ -729,7 +729,8 @@ void pci_msix_shutdown(struct pci_dev* d pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; } @@ -5843,7 +6151,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { pci_msix_shutdown(dev); } -@@ -785,14 +786,14 @@ EXPORT_SYMBOL(pci_disable_msix); +@@ -744,14 +745,14 @@ EXPORT_SYMBOL(pci_disable_msix); * allocated for this device function, are reclaimed to unused state, * which may be used later on. **/ @@ -5860,9 +6168,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches msi_dev_entry = get_msi_dev_pirq_list(dev); ---- head-2010-05-25.orig/drivers/pci/probe.c 2010-04-29 09:52:00.000000000 +0200 -+++ head-2010-05-25/drivers/pci/probe.c 2010-04-29 09:53:13.000000000 +0200 -@@ -1338,13 +1338,20 @@ int pci_scan_slot(struct pci_bus *bus, i +--- head-2011-03-17.orig/drivers/pci/probe.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/pci/probe.c 2011-02-01 14:54:13.000000000 +0100 +@@ -1340,13 +1340,20 @@ int pci_scan_slot(struct pci_bus *bus, i return 0; /* Already scanned the entire slot */ dev = pci_scan_single_device(bus, devfn); @@ -5885,9 +6193,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches else if (dev->multifunction) next_fn = next_trad_fn; ---- head-2010-05-25.orig/drivers/sfi/sfi_core.c 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/sfi/sfi_core.c 2010-03-24 15:32:27.000000000 +0100 -@@ -387,6 +387,11 @@ void __init sfi_init(void) +--- head-2011-03-17.orig/drivers/sfi/sfi_core.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/sfi/sfi_core.c 2011-02-01 14:54:13.000000000 +0100 +@@ -486,6 +486,11 @@ void __init sfi_init(void) if (!acpi_disabled) disable_sfi(); @@ -5899,8 +6207,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (sfi_disabled) return; ---- head-2010-05-25.orig/drivers/staging/hv/Kconfig 2010-05-25 09:12:08.000000000 +0200 -+++ head-2010-05-25/drivers/staging/hv/Kconfig 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/staging/hv/Kconfig 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/staging/hv/Kconfig 2011-02-01 14:54:13.000000000 +0100 @@ -1,6 +1,6 @@ config HYPERV tristate "Microsoft Hyper-V client drivers" @@ -5909,8 +6217,21 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches default n help Select this option to run Linux as a Hyper-V client operating ---- head-2010-05-25.orig/drivers/xen/Kconfig 2010-03-31 13:35:09.000000000 +0200 -+++ head-2010-05-25/drivers/xen/Kconfig 2010-03-31 14:01:28.000000000 +0200 +--- head-2011-03-17.orig/drivers/staging/vt6656/ttype.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/staging/vt6656/ttype.h 2010-08-25 14:42:20.000000000 +0200 +@@ -29,6 +29,10 @@ + #ifndef __TTYPE_H__ + #define __TTYPE_H__ + ++#ifdef CONFIG_XEN ++#include ++#endif ++ + /******* Common definitions and typedefs ***********************************/ + + typedef int BOOL; +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-02 15:37:23.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-01 14:54:13.000000000 +0100 @@ -22,6 +22,7 @@ config XEN_UNPRIVILEGED_GUEST select PM select PM_SLEEP @@ -5919,22 +6240,22 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches select SUSPEND config XEN_PRIVCMD ---- head-2010-05-25.orig/drivers/xen/Makefile 2010-04-19 14:53:25.000000000 +0200 -+++ head-2010-05-25/drivers/xen/Makefile 2010-04-19 14:53:46.000000000 +0200 -@@ -8,6 +8,11 @@ obj-$(CONFIG_XEN) += console/ - obj-y += xenbus/ - obj-$(CONFIG_XEN) += char/ +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-24 14:10:06.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-02-01 14:54:13.000000000 +0100 +@@ -10,6 +10,11 @@ obj-$(CONFIG_XEN) += char/ + + xen-backend-$(CONFIG_XEN_BACKEND) := util.o +nostackp := $(call cc-option, -fno-stack-protector) +ifeq ($(CONFIG_PARAVIRT_XEN),y) +CFLAGS_features.o := $(nostackp) +endif + - obj-$(CONFIG_XEN) += features.o util.o - obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) - obj-$(CONFIG_XEN_XENCOMM) += xencomm.o ---- head-2010-05-25.orig/drivers/xen/balloon/balloon.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/balloon/balloon.c 2010-04-15 10:11:45.000000000 +0200 + obj-$(CONFIG_XEN) += features.o $(xen-backend-y) $(xen-backend-m) + obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) + obj-$(CONFIG_XEN_XENCOMM) += xencomm.o +--- head-2011-03-17.orig/drivers/xen/balloon/balloon.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/balloon/balloon.c 2011-02-01 14:54:13.000000000 +0100 @@ -77,6 +77,11 @@ static DEFINE_MUTEX(balloon_mutex); */ DEFINE_SPINLOCK(balloon_lock); @@ -5947,7 +6268,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ -@@ -200,14 +205,27 @@ static struct page *balloon_next_page(st +@@ -198,14 +203,27 @@ static struct page *balloon_next_page(st static inline void balloon_free_page(struct page *page) { #ifndef MODULE @@ -5978,7 +6299,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); -@@ -320,7 +338,7 @@ static int increase_reservation(unsigned +@@ -318,7 +336,7 @@ static int increase_reservation(unsigned totalram_pages = bs.current_pages - totalram_bias; out: @@ -5987,7 +6308,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifndef MODULE setup_per_zone_wmarks(); -@@ -559,6 +577,7 @@ static int __init balloon_init(void) +@@ -557,6 +575,7 @@ static int __init balloon_init(void) IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN @@ -5995,7 +6316,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else -@@ -720,8 +739,8 @@ struct page **alloc_empty_pages_and_page +@@ -718,8 +737,8 @@ struct page **alloc_empty_pages_and_page } if (ret != 0) { @@ -6005,8 +6326,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches goto err; } ---- head-2010-05-25.orig/drivers/xen/blkfront/vbd.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/vbd.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-02-01 14:54:13.000000000 +0100 @@ -105,7 +105,7 @@ static struct xlbd_major_info *major_inf #define XLBD_MAJOR_VBD_ALT(idx) ((idx) ^ XLBD_MAJOR_VBD_START ^ (XLBD_MAJOR_VBD_START + 1)) @@ -6016,9 +6337,47 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { .owner = THIS_MODULE, .open = blkif_open, ---- head-2010-05-25.orig/drivers/xen/blktap2/device.c 2010-04-19 14:53:31.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blktap2/device.c 2010-03-24 15:32:27.000000000 +0100 -@@ -141,7 +141,7 @@ blktap_device_ioctl(struct block_device +--- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:16:17.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-17 10:18:48.000000000 +0100 +@@ -279,13 +279,13 @@ static inline unsigned int OFFSET_TO_SEG + } while(0) + + +-static char *blktap_nodename(struct device *dev) ++static char *blktap_devnode(struct device *dev, mode_t *mode) + { + return kasprintf(GFP_KERNEL, "xen/blktap%u", MINOR(dev->devt)); + } + + static struct device_type blktap_type = { +- .nodename = blktap_nodename ++ .devnode = blktap_devnode + }; + + /****************************************************************** +@@ -1729,7 +1729,7 @@ static int __init blkif_init(void) + tap_blkif_xenbus_init(); + + /* Dynamically allocate a major for this device */ +- ret = register_chrdev(0, "blktap", &blktap_fops); ++ ret = __register_chrdev(0, 0, MAX_TAP_DEV, "blktap", &blktap_fops); + + if (ret < 0) { + WPRINTK("Couldn't register /dev/xen/blktap\n"); +--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-02-01 14:54:13.000000000 +0100 +@@ -154,7 +154,7 @@ static const struct file_operations blkt + static struct miscdevice blktap_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "blktap-control", +- .devnode = BLKTAP2_DEV_DIR "control", ++ .nodename = BLKTAP2_DEV_DIR "control", + .fops = &blktap_control_file_operations, + }; + +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-02-01 14:54:13.000000000 +0100 +@@ -139,7 +139,7 @@ blktap_device_ioctl(struct block_device return 0; } @@ -6027,8 +6386,98 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches .owner = THIS_MODULE, .open = blktap_device_open, .release = blktap_device_release, ---- head-2010-05-25.orig/drivers/xen/core/evtchn.c 2010-04-23 15:19:43.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/evtchn.c 2010-03-31 14:37:57.000000000 +0200 +@@ -1085,7 +1085,7 @@ blktap_device_destroy(struct blktap *tap + return 0; + } + +-static char *blktap_nodename(struct gendisk *gd) ++static char *blktap_devnode(struct gendisk *gd, mode_t *mode) + { + return kasprintf(GFP_KERNEL, BLKTAP2_DEV_DIR "tapdev%u", + gd->first_minor); +@@ -1127,7 +1127,7 @@ blktap_device_create(struct blktap *tap) + + gd->major = blktap_device_major; + gd->first_minor = minor; +- gd->nodename = blktap_nodename; ++ gd->devnode = blktap_devnode; + gd->fops = &blktap_device_file_operations; + gd->private_data = dev; + +--- head-2011-03-17.orig/drivers/xen/blktap2/sysfs.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/sysfs.c 2011-02-01 14:54:13.000000000 +0100 +@@ -436,7 +436,7 @@ blktap_sysfs_free(void) + class_destroy(class); + } + +-static char *blktap_nodename(struct device *dev) ++static char *blktap_devnode(struct device *dev, mode_t *mode) + { + return kasprintf(GFP_KERNEL, BLKTAP2_DEV_DIR "blktap%u", + MINOR(dev->devt)); +@@ -455,7 +455,7 @@ blktap_sysfs_init(void) + if (IS_ERR(cls)) + return PTR_ERR(cls); + +- cls->nodename = blktap_nodename; ++ cls->devnode = blktap_devnode; + + err = class_create_file(cls, &class_attr_verbosity); + if (!err) { +--- head-2011-03-17.orig/drivers/xen/blktap2-new/ring.c 2011-02-24 15:14:47.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/ring.c 2011-02-24 14:19:13.000000000 +0100 +@@ -8,7 +8,6 @@ + #include "blktap.h" + + int blktap_ring_major; +-static struct cdev blktap_ring_cdev; + + /* + * BLKTAP - immediately before the mmap area, +@@ -511,26 +510,16 @@ blktap_ring_debug(struct blktap *tap, ch + int __init + blktap_ring_init(void) + { +- dev_t dev = 0; + int err; + +- cdev_init(&blktap_ring_cdev, &blktap_ring_file_operations); +- blktap_ring_cdev.owner = THIS_MODULE; +- +- err = alloc_chrdev_region(&dev, 0, MAX_BLKTAP_DEVICE, "blktap2"); ++ err = __register_chrdev(0, 0, MAX_BLKTAP_DEVICE, "blktap2", ++ &blktap_ring_file_operations); + if (err < 0) { + BTERR("error registering ring devices: %d\n", err); + return err; + } + +- err = cdev_add(&blktap_ring_cdev, dev, MAX_BLKTAP_DEVICE); +- if (err) { +- BTERR("error adding ring device: %d\n", err); +- unregister_chrdev_region(dev, MAX_BLKTAP_DEVICE); +- return err; +- } +- +- blktap_ring_major = MAJOR(dev); ++ blktap_ring_major = err; + BTINFO("blktap ring major: %d\n", blktap_ring_major); + + return 0; +@@ -542,9 +531,8 @@ blktap_ring_exit(void) + if (!blktap_ring_major) + return; + +- cdev_del(&blktap_ring_cdev); +- unregister_chrdev_region(MKDEV(blktap_ring_major, 0), +- MAX_BLKTAP_DEVICE); ++ __unregister_chrdev(blktap_ring_major, 0, MAX_BLKTAP_DEVICE, ++ "blktap2"); + + blktap_ring_major = 0; + } +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-01 14:54:13.000000000 +0100 @@ -144,13 +144,13 @@ unsigned int irq_from_evtchn(unsigned in EXPORT_SYMBOL_GPL(irq_from_evtchn); @@ -6045,20 +6494,67 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #ifdef CONFIG_SMP ---- head-2010-05-25.orig/drivers/xen/core/reboot.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/reboot.c 2010-03-24 15:32:27.000000000 +0100 -@@ -83,7 +83,7 @@ static int xen_suspend(void *__unused) +--- head-2011-03-17.orig/drivers/xen/core/reboot.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/reboot.c 2011-02-01 14:54:13.000000000 +0100 +@@ -82,7 +82,7 @@ static int xen_suspend(void *__unused) int err, old_state; daemonize("suspend"); - err = set_cpus_allowed(current, cpumask_of_cpu(0)); + err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { - printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); + pr_err("Xen suspend can't run on CPU0 (%d)\n", err); goto fail; ---- head-2010-05-25.orig/drivers/xen/netback/interface.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/interface.c 2010-03-24 15:32:27.000000000 +0100 -@@ -159,7 +159,7 @@ static void netbk_get_strings(struct net +--- head-2011-03-17.orig/drivers/xen/evtchn.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/evtchn.c 2011-02-01 14:54:13.000000000 +0100 +@@ -530,7 +530,7 @@ static const struct file_operations evtc + static struct miscdevice evtchn_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "xen/evtchn", +- .devnode = "xen/evtchn", ++ .nodename = "xen/evtchn", + .fops = &evtchn_fops, + }; + static int __init evtchn_init(void) +--- head-2011-03-17.orig/drivers/xen/gntdev/gntdev.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/gntdev/gntdev.c 2011-02-01 14:54:13.000000000 +0100 +@@ -371,13 +371,13 @@ nomem_out: + + /* Interface functions. */ + +-static char *gntdev_nodename(struct device *dev) ++static char *gntdev_devnode(struct device *dev, mode_t *mode) + { + return kstrdup("xen/" GNTDEV_NAME, GFP_KERNEL); + } + + static struct device_type gntdev_type = { +- .nodename = gntdev_nodename ++ .devnode = gntdev_devnode + }; + + /* Initialises the driver. Called when the module is loaded. */ +@@ -390,7 +390,7 @@ static int __init gntdev_init(void) + return -ENODEV; + } + +- gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops); ++ gntdev_major = __register_chrdev(0, 0, 1, GNTDEV_NAME, &gntdev_fops); + if (gntdev_major < 0) + { + pr_err("Could not register gntdev device\n"); +@@ -420,7 +420,7 @@ static void __exit gntdev_exit(void) + struct class *class; + if ((class = get_xen_class()) != NULL) + device_destroy(class, MKDEV(gntdev_major, 0)); +- unregister_chrdev(gntdev_major, GNTDEV_NAME); ++ __unregister_chrdev(gntdev_major, 0, 1, GNTDEV_NAME); + } + + /* Called when the device is opened. */ +--- head-2011-03-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:16:00.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/interface.c 2011-02-17 10:18:52.000000000 +0100 +@@ -205,7 +205,7 @@ static void netbk_get_strings(struct net } } @@ -6067,18 +6563,18 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches { .get_drvinfo = netbk_get_drvinfo, ---- head-2010-05-25.orig/drivers/xen/netback/loopback.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/loopback.c 2010-03-24 15:32:27.000000000 +0100 -@@ -134,7 +134,7 @@ static int loopback_start_xmit(struct sk +--- head-2011-03-17.orig/drivers/xen/netback/loopback.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/loopback.c 2011-03-01 11:52:41.000000000 +0100 +@@ -136,7 +136,7 @@ static int loopback_start_xmit(struct sk if (!skb_remove_foreign_references(skb)) { - np->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } dst_release(skb_dst(skb)); -@@ -173,7 +173,7 @@ static int loopback_start_xmit(struct sk +@@ -162,7 +162,7 @@ static int loopback_start_xmit(struct sk netif_rx(skb); @@ -6086,19 +6582,19 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches + return NETDEV_TX_OK; } - static struct net_device_stats *loopback_get_stats(struct net_device *dev) -@@ -182,7 +182,7 @@ static struct net_device_stats *loopback - return &np->stats; + static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +@@ -172,7 +172,7 @@ static void get_drvinfo(struct net_devic + loopback_priv(dev)->loop_idx); } -static struct ethtool_ops network_ethtool_ops = +static const struct ethtool_ops network_ethtool_ops = { - .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_csum, ---- head-2010-05-25.orig/drivers/xen/netback/netback.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netback/netback.c 2010-03-24 15:32:27.000000000 +0100 -@@ -340,12 +340,12 @@ int netif_be_start_xmit(struct sk_buff * + .get_drvinfo = get_drvinfo, + +--- head-2011-03-17.orig/drivers/xen/netback/netback.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netback/netback.c 2011-03-01 11:52:43.000000000 +0100 +@@ -349,12 +349,12 @@ int netif_be_start_xmit(struct sk_buff * skb_queue_tail(&rx_queue, skb); tasklet_schedule(&net_rx_tasklet); @@ -6106,16 +6602,16 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches + return NETDEV_TX_OK; drop: - netif->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } #if 0 ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.c 2010-03-24 15:32:27.000000000 +0100 -@@ -953,7 +953,7 @@ static int network_start_xmit(struct sk_ +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-09 16:05:04.000000000 +0100 +@@ -949,7 +949,7 @@ static int network_start_xmit(struct sk_ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ @@ -6124,7 +6620,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); -@@ -1042,12 +1042,12 @@ static int network_start_xmit(struct sk_ +@@ -1035,12 +1035,12 @@ static int network_start_xmit(struct sk_ spin_unlock_irq(&np->tx_lock); @@ -6132,24 +6628,24 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches + return NETDEV_TX_OK; drop: - np->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) -@@ -1872,7 +1872,7 @@ static void netif_uninit(struct net_devi +@@ -1912,7 +1912,7 @@ static void netif_uninit(struct net_devi gnttab_free_grant_references(np->gref_rx_head); } -static struct ethtool_ops network_ethtool_ops = +static const struct ethtool_ops network_ethtool_ops = { + .get_drvinfo = netfront_get_drvinfo, .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_csum, ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel_fwd.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel_fwd.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_fwd.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/accel_fwd.c 2011-02-01 14:54:13.000000000 +0100 @@ -181,11 +181,10 @@ int netback_accel_fwd_add(const __u8 *ma unsigned long flags; cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac); @@ -6203,8 +6699,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches spin_lock_irqsave(&fwd_set->fwd_lock, flags); /* ---- head-2010-05-25.orig/drivers/xen/sfc_netback/accel_msg.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netback/accel_msg.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_msg.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/accel_msg.c 2011-02-01 14:54:13.000000000 +0100 @@ -57,11 +57,10 @@ static void netback_accel_msg_tx_localma { unsigned long lock_state; @@ -6218,8 +6714,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU, &lock_state); ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_msg.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/sfc_netfront/accel_msg.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netfront/accel_msg.c 2011-02-01 14:54:13.000000000 +0100 @@ -327,10 +327,8 @@ static int vnic_process_localmac_msg(net cuckoo_hash_mac_key key; @@ -6233,8 +6729,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches key = cuckoo_mac_to_key(msg->u.localmac.mac); spin_lock_irqsave(&vnic->table_lock, flags); /* Try to remove it, not a big deal if not there */ ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel_vi.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/sfc_netfront/accel_vi.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netfront/accel_vi.c 2011-02-01 14:54:13.000000000 +0100 @@ -643,10 +643,7 @@ netfront_accel_vi_tx_post(netfront_accel (cuckoo_hash_key *)(&key), &value); @@ -6260,8 +6756,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches if (ip->protocol == IPPROTO_TCP) { struct tcphdr *tcp = (struct tcphdr *) ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_dev.c 2009-05-29 10:25:53.000000000 +0200 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_dev.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_dev.c 2011-01-03 12:43:21.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_dev.c 2011-02-01 14:54:13.000000000 +0100 @@ -36,6 +36,7 @@ #include #include @@ -6270,8 +6766,8 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:32:27.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:54:13.000000000 +0100 @@ -42,6 +42,7 @@ #include #include @@ -6280,9 +6776,21 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/lib/swiotlb-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/lib/swiotlb-xen.c 2010-03-24 15:32:27.000000000 +0100 -@@ -111,79 +111,11 @@ setup_io_tlb_npages(char *str) +--- head-2011-03-17.orig/include/linux/nmi.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/include/linux/nmi.h 2011-02-16 09:06:03.000000000 +0100 +@@ -18,6 +18,9 @@ + #include + extern void touch_nmi_watchdog(void); + #else ++#ifdef CONFIG_XEN ++#include ++#endif + static inline void touch_nmi_watchdog(void) + { + touch_softlockup_watchdog(); +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 14:54:13.000000000 +0100 +@@ -119,79 +119,11 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ @@ -6364,7 +6872,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } static void swiotlb_print_info(unsigned long bytes) -@@ -216,10 +148,35 @@ swiotlb_init_with_default_size(size_t de +@@ -224,10 +156,35 @@ swiotlb_init_with_default_size(size_t de /* * Get IO TLB memory from the low pages */ @@ -6402,7 +6910,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches io_tlb_end = io_tlb_start + bytes; /* -@@ -283,13 +240,10 @@ static inline int range_needs_mapping(ph +@@ -291,13 +248,10 @@ static inline int range_needs_mapping(ph static int is_swiotlb_buffer(dma_addr_t addr) { unsigned long pfn = mfn_to_local_pfn(PFN_DOWN(addr)); @@ -6419,7 +6927,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } /* -@@ -514,12 +468,15 @@ swiotlb_full(struct device *dev, size_t +@@ -537,12 +491,15 @@ swiotlb_full(struct device *dev, size_t printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %zu bytes at " "device %s\n", size, dev ? dev_name(dev) : "?"); @@ -6441,7 +6949,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches } /* -@@ -545,7 +502,7 @@ dma_addr_t swiotlb_map_page(struct devic +@@ -568,7 +525,7 @@ dma_addr_t swiotlb_map_page(struct devic * we can safely return the device addr and not worry about bounce * buffering it. */ @@ -6450,7 +6958,7 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches !range_needs_mapping(phys, size)) return dev_addr; -@@ -575,12 +532,12 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); +@@ -598,12 +555,12 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) { @@ -6465,24 +6973,9 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches return; } -@@ -609,12 +566,12 @@ void - swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) - { -- char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); -+ phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); - - BUG_ON(dir == DMA_NONE); - - if (is_swiotlb_buffer(dev_addr)) -- sync_single(hwdev, dma_addr, size, dir); -+ sync_single(hwdev, phys_to_virt(paddr), size, dir); - } - EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); - -@@ -622,12 +579,12 @@ void - swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) +@@ -632,12 +589,12 @@ static void + swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, int target) { - char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); @@ -6490,12 +6983,12 @@ Automatically created from "patches.kernel.org/patch-2.6.32" by xen-port-patches BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(dev_addr)) -- sync_single(hwdev, dma_addr, size, dir); -+ sync_single(hwdev, phys_to_virt(paddr), size, dir); +- sync_single(hwdev, dma_addr, size, dir, target); ++ sync_single(hwdev, phys_to_virt(paddr), size, dir, target); } - EXPORT_SYMBOL(swiotlb_sync_single_for_device); -@@ -680,8 +637,8 @@ swiotlb_map_sg_attrs(struct device *hwde + void +@@ -718,8 +675,8 @@ swiotlb_map_sg_attrs(struct device *hwde phys_addr_t paddr = page_to_pseudophys(sg_page(sg)) + sg->offset; diff --git a/patches.xen/xen3-patch-2.6.33 b/patches.xen/xen3-patch-2.6.33 index bfc8b63..cd1f14c 100644 --- a/patches.xen/xen3-patch-2.6.33 +++ b/patches.xen/xen3-patch-2.6.33 @@ -7,8 +7,8 @@ Patch-mainline: 2.6.33 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches.py ---- head-2010-05-12.orig/arch/ia64/include/asm/xen/hypervisor.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/ia64/include/asm/xen/hypervisor.h 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/ia64/include/asm/xen/hypervisor.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/ia64/include/asm/xen/hypervisor.h 2011-02-01 14:55:46.000000000 +0100 @@ -34,11 +34,11 @@ #define _ASM_IA64_XEN_HYPERVISOR_H @@ -22,20 +22,29 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #include extern struct shared_info *HYPERVISOR_shared_info; ---- head-2010-05-12.orig/arch/x86/Kconfig 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-12/arch/x86/Kconfig 2010-03-24 16:00:05.000000000 +0100 -@@ -51,7 +51,7 @@ config X86 - select HAVE_KERNEL_GZIP +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-01 14:55:46.000000000 +0100 +@@ -21,7 +21,7 @@ config X86 + select HAVE_UNSTABLE_SCHED_CLOCK + select HAVE_IDE + select HAVE_OPROFILE +- select HAVE_PERF_EVENTS if !XEN ++ select HAVE_PERF_EVENTS + select HAVE_IRQ_WORK + select HAVE_IOREMAP_PROT + select HAVE_KPROBES +@@ -52,7 +52,7 @@ config X86 select HAVE_KERNEL_BZIP2 if !XEN select HAVE_KERNEL_LZMA if !XEN + select HAVE_KERNEL_XZ - select HAVE_KERNEL_LZO + select HAVE_KERNEL_LZO if !XEN select HAVE_HW_BREAKPOINT + select HAVE_MIXED_BREAKPOINTS_REGS select PERF_EVENTS - select ANON_INODES ---- head-2010-05-12.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/ia32/ia32entry-xen.S 2010-03-24 16:00:05.000000000 +0100 -@@ -546,7 +546,7 @@ ia32_sys_call_table: +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:55:46.000000000 +0100 +@@ -534,7 +534,7 @@ ia32_sys_call_table: .quad compat_sys_writev .quad sys_getsid .quad sys_fdatasync @@ -44,7 +53,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches .quad sys_mlock /* 150 */ .quad sys_munlock .quad sys_mlockall -@@ -589,7 +589,7 @@ ia32_sys_call_table: +@@ -577,7 +577,7 @@ ia32_sys_call_table: .quad quiet_ni_syscall /* streams2 */ .quad stub32_vfork /* 190 */ .quad compat_sys_getrlimit @@ -53,34 +62,54 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches .quad sys32_truncate64 .quad sys32_ftruncate64 .quad sys32_stat64 /* 195 */ -@@ -734,4 +734,5 @@ ia32_sys_call_table: +@@ -722,4 +722,5 @@ ia32_sys_call_table: .quad compat_sys_pwritev .quad compat_sys_rt_tgsigqueueinfo /* 335 */ .quad sys_perf_event_open + .quad compat_sys_recvmmsg ia32_syscall_end: ---- head-2010-05-12.orig/arch/x86/include/asm/hw_irq.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/asm/hw_irq.h 2010-03-24 16:00:05.000000000 +0100 -@@ -78,6 +78,7 @@ static inline void set_io_apic_irq_attr( - irq_attr->polarity = polarity; - } +--- head-2011-03-17.orig/arch/x86/include/asm/hw_irq.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/hw_irq.h 2011-02-01 14:55:46.000000000 +0100 +@@ -85,6 +85,7 @@ struct irq_2_iommu { + u8 irte_mask; + }; +#ifndef CONFIG_XEN /* * This is performance-critical, we want to do it O(1) * -@@ -92,6 +93,9 @@ struct irq_cfg { +@@ -100,6 +101,9 @@ struct irq_cfg { + struct irq_2_iommu irq_2_iommu; + #endif }; - - extern struct irq_cfg *irq_cfg(unsigned int); +#else +struct irq_cfg; +#endif + extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); extern void send_cleanup_vector(struct irq_cfg *); - ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/pgtable.h 2010-03-24 16:00:05.000000000 +0100 +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/perf_event.h 2011-02-01 14:55:46.000000000 +0100 +@@ -0,0 +1,17 @@ ++#ifndef _ASM_X86_PERF_EVENT_H ++#define _ASM_X86_PERF_EVENT_H ++ ++#ifdef CONFIG_PERF_EVENTS ++ ++/* ++ * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. ++ * This flag is otherwise unused and ABI specified to be 0, so nobody should ++ * care what we do with it. ++ */ ++#define PERF_EFLAGS_EXACT (1UL << 3) ++ ++#endif ++ ++static inline void init_hw_perf_events(void) {} ++ ++#endif /* _ASM_X86_PERF_EVENT_H */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:55:46.000000000 +0100 @@ -16,6 +16,8 @@ #ifndef __ASSEMBLY__ @@ -90,7 +119,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. -@@ -268,9 +270,9 @@ static inline int is_new_memtype_allowed +@@ -267,9 +269,9 @@ static inline int is_new_memtype_allowed unsigned long new_flags) { /* @@ -102,8 +131,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return 1; /* ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/processor.h 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:46:37.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:46:54.000000000 +0100 @@ -31,6 +31,7 @@ struct mm_struct; #include #include @@ -112,7 +141,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * Default implementation of macro that returns current * instruction pointer ("program counter"). -@@ -181,7 +182,7 @@ static inline void xen_cpuid(unsigned in +@@ -191,7 +192,7 @@ static inline void xen_cpuid(unsigned in unsigned int *ecx, unsigned int *edx) { /* ecx is often an input as well as an output. */ @@ -121,7 +150,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), -@@ -430,6 +431,8 @@ extern unsigned int xstate_size; +@@ -440,6 +441,8 @@ extern unsigned int xstate_size; extern void free_thread_xstate(struct task_struct *); extern struct kmem_cache *task_xstate_cachep; @@ -130,7 +159,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches struct thread_struct { /* Cached TLS descriptors: */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; -@@ -450,13 +453,12 @@ struct thread_struct { +@@ -460,13 +463,12 @@ struct thread_struct { unsigned long fs; #endif unsigned long gs; @@ -150,8 +179,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* Fault info: */ unsigned long cr2; unsigned long trap_no; ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/spinlock.h 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:55:46.000000000 +0100 @@ -44,10 +44,10 @@ int xen_spinlock_init(unsigned int cpu); @@ -419,8 +448,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* The {read|write|spin}_lock() on x86 are full memory barriers. */ static inline void smp_mb__after_lock(void) { } ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:55:46.000000000 +0100 @@ -42,14 +42,14 @@ typedef union { #endif #endif @@ -440,8 +469,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_X86_SPINLOCK_TYPES_H */ ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/swiotlb.h 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/swiotlb.h 2011-02-01 14:55:46.000000000 +0100 @@ -1,4 +1,6 @@ #include_next @@ -449,8 +478,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches + dma_addr_t swiotlb_map_single_phys(struct device *, phys_addr_t, size_t size, int dir); ---- head-2010-05-12.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/include/mach-xen/asm/system.h 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:07:45.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:09:31.000000000 +0100 @@ -12,9 +12,9 @@ #include @@ -471,7 +500,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #ifdef CONFIG_X86_32 -@@ -127,8 +128,6 @@ do { \ +@@ -129,8 +130,6 @@ do { \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ "call __switch_to\n\t" \ @@ -480,7 +509,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches "movq "__percpu_arg([current_task])",%%rsi\n\t" \ __switch_canary \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ -@@ -156,19 +155,22 @@ extern void xen_load_gs_index(unsigned); +@@ -158,19 +157,22 @@ extern void xen_load_gs_index(unsigned); * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ @@ -516,8 +545,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * Save a segment register away ---- head-2010-05-12.orig/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/acpi/sleep-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -81,12 +81,9 @@ int acpi_save_state_mem(void) #ifndef CONFIG_64BIT store_gdt((struct desc_ptr *)&header->pmode_gdt); @@ -583,27 +612,17 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); ---- head-2010-05-12.orig/arch/x86/kernel/apic/Makefile 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/apic/Makefile 2010-03-24 16:00:05.000000000 +0100 -@@ -22,5 +22,5 @@ obj-$(CONFIG_XEN) += nmi.o +--- head-2011-03-17.orig/arch/x86/kernel/apic/Makefile 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/Makefile 2011-02-01 14:55:46.000000000 +0100 +@@ -24,4 +24,4 @@ obj-$(CONFIG_XEN) += nmi.o probe_64-$(CONFIG_XEN) := probe_32.o -disabled-obj-$(CONFIG_XEN) := apic_flat_$(BITS).o +disabled-obj-$(CONFIG_XEN) := apic_flat_$(BITS).o apic_noop.o - disabled-obj-$(filter-out $(CONFIG_SMP),$(CONFIG_XEN)) += ipi.o ---- head-2010-05-12.orig/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 16:00:05.000000000 +0100 -@@ -60,8 +60,6 @@ - #include - #include - #include --#include --#include - - #include - -@@ -160,20 +158,6 @@ static struct irq_pin_list *get_one_free +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:55:46.000000000 +0100 +@@ -150,20 +150,6 @@ static struct irq_pin_list *get_one_free return pin; } @@ -624,7 +643,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ #ifdef CONFIG_SPARSE_IRQ static struct irq_cfg irq_cfgx[] = { -@@ -229,7 +213,7 @@ int __init arch_early_irq_init(void) +@@ -219,7 +205,7 @@ int __init arch_early_irq_init(void) } #ifdef CONFIG_SPARSE_IRQ @@ -633,7 +652,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { struct irq_cfg *cfg = NULL; struct irq_desc *desc; -@@ -381,7 +365,7 @@ void arch_free_chip_data(struct irq_desc +@@ -371,7 +357,7 @@ void arch_free_chip_data(struct irq_desc /* end for move_irq_desc */ #else @@ -642,7 +661,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { return irq < nr_irqs ? irq_cfgx + irq : NULL; } -@@ -604,23 +588,41 @@ static void __init replace_pin_at_irq_no +@@ -594,23 +580,41 @@ static void __init replace_pin_at_irq_no add_pin_to_irq_node(cfg, node, newapic, newpin); } @@ -695,7 +714,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) -@@ -644,18 +646,6 @@ static void __mask_IO_APIC_irq(struct ir +@@ -634,18 +638,6 @@ static void __mask_IO_APIC_irq(struct ir io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); } @@ -714,7 +733,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches static void mask_IO_APIC_irq_desc(struct irq_desc *desc) { struct irq_cfg *cfg = desc->chip_data; -@@ -1235,7 +1225,7 @@ __assign_irq_vector(int irq, struct irq_ +@@ -1225,7 +1217,7 @@ __assign_irq_vector(int irq, struct irq_ int cpu, err; cpumask_var_t tmp_mask; @@ -723,7 +742,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return -EBUSY; if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) -@@ -1299,8 +1289,7 @@ next: +@@ -1289,8 +1281,7 @@ next: return err; } @@ -733,7 +752,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { int err; unsigned long flags; -@@ -1677,9 +1666,6 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1668,9 +1659,6 @@ __apicdebuginit(void) print_IO_APIC(void struct irq_desc *desc; unsigned int irq; @@ -743,7 +762,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); for (i = 0; i < nr_ioapics; i++) printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", -@@ -1786,9 +1772,6 @@ __apicdebuginit(void) print_APIC_field(i +@@ -1777,9 +1765,6 @@ __apicdebuginit(void) print_APIC_field(i { int i; @@ -753,7 +772,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches printk(KERN_DEBUG); for (i = 0; i < 8; i++) -@@ -1802,9 +1785,6 @@ __apicdebuginit(void) print_local_APIC(v +@@ -1793,9 +1778,6 @@ __apicdebuginit(void) print_local_APIC(v unsigned int i, v, ver, maxlvt; u64 icr; @@ -763,7 +782,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", smp_processor_id(), hard_smp_processor_id()); v = apic_read(APIC_ID); -@@ -1902,13 +1882,19 @@ __apicdebuginit(void) print_local_APIC(v +@@ -1893,13 +1875,19 @@ __apicdebuginit(void) print_local_APIC(v printk("\n"); } @@ -785,7 +804,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches preempt_enable(); } -@@ -1917,7 +1903,7 @@ __apicdebuginit(void) print_PIC(void) +@@ -1908,7 +1896,7 @@ __apicdebuginit(void) print_PIC(void) unsigned int v; unsigned long flags; @@ -794,7 +813,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return; printk(KERN_DEBUG "\nprinting PIC contents\n"); -@@ -1944,21 +1930,41 @@ __apicdebuginit(void) print_PIC(void) +@@ -1935,21 +1923,41 @@ __apicdebuginit(void) print_PIC(void) printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); } @@ -839,7 +858,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* Where if anywhere is the i8259 connect in external int mode */ -@@ -2117,7 +2123,7 @@ void __init setup_ioapic_ids_from_mpc(vo +@@ -2106,7 +2114,7 @@ void __init setup_ioapic_ids_from_mpc(vo * This is broken; anything with a real cpu count has to * circumvent this idiocy regardless. */ @@ -848,7 +867,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * Set the IOAPIC ID to the value stored in the MPC table. -@@ -2144,7 +2150,7 @@ void __init setup_ioapic_ids_from_mpc(vo +@@ -2133,7 +2141,7 @@ void __init setup_ioapic_ids_from_mpc(vo * system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ @@ -857,7 +876,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches mp_ioapics[apic_id].apicid)) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", apic_id, mp_ioapics[apic_id].apicid); -@@ -2159,7 +2165,7 @@ void __init setup_ioapic_ids_from_mpc(vo +@@ -2148,7 +2156,7 @@ void __init setup_ioapic_ids_from_mpc(vo mp_ioapics[apic_id].apicid = i; } else { physid_mask_t tmp; @@ -866,7 +885,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches apic_printk(APIC_VERBOSE, "Setting %d in the " "phys_id_present_map\n", mp_ioapics[apic_id].apicid); -@@ -2314,20 +2320,16 @@ static int ioapic_retrigger_irq(unsigned +@@ -2303,20 +2311,16 @@ static int ioapic_retrigger_irq(unsigned */ #ifdef CONFIG_SMP @@ -888,7 +907,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); free_cpumask_var(cleanup_mask); } -@@ -2358,31 +2360,30 @@ static void __target_IO_APIC_irq(unsigne +@@ -2347,31 +2351,30 @@ static void __target_IO_APIC_irq(unsigne } } @@ -928,7 +947,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } static int -@@ -2398,12 +2399,11 @@ set_ioapic_affinity_irq_desc(struct irq_ +@@ -2387,12 +2390,11 @@ set_ioapic_affinity_irq_desc(struct irq_ cfg = desc->chip_data; spin_lock_irqsave(&ioapic_lock, flags); @@ -943,7 +962,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } spin_unlock_irqrestore(&ioapic_lock, flags); -@@ -2518,8 +2518,13 @@ asmlinkage void smp_irq_move_cleanup_int +@@ -2507,8 +2509,13 @@ asmlinkage void smp_irq_move_cleanup_int continue; cfg = irq_cfg(irq); @@ -959,7 +978,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches goto unlock; if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) -@@ -2538,29 +2543,40 @@ asmlinkage void smp_irq_move_cleanup_int +@@ -2527,29 +2534,40 @@ asmlinkage void smp_irq_move_cleanup_int goto unlock; } __get_cpu_var(vector_irq)[vector] = -1; @@ -1005,7 +1024,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #else static inline void irq_complete_move(struct irq_desc **descp) {} #endif -@@ -2576,6 +2592,59 @@ static void ack_apic_edge(unsigned int i +@@ -2565,6 +2583,59 @@ static void ack_apic_edge(unsigned int i atomic_t irq_mis_count; @@ -1065,7 +1084,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches static void ack_apic_level(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); -@@ -2611,6 +2680,19 @@ static void ack_apic_level(unsigned int +@@ -2600,6 +2671,19 @@ static void ack_apic_level(unsigned int * level-triggered interrupt. We mask the source for the time of the * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro @@ -1085,7 +1104,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches */ cfg = desc->chip_data; i = cfg->vector; -@@ -2622,6 +2704,19 @@ static void ack_apic_level(unsigned int +@@ -2611,6 +2695,19 @@ static void ack_apic_level(unsigned int */ ack_APIC_irq(); @@ -1105,7 +1124,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* Now we can move and renable the irq */ if (unlikely(do_unmask_irq)) { /* Only migrate the irq if the ack has been received. -@@ -2655,41 +2750,9 @@ static void ack_apic_level(unsigned int +@@ -2644,41 +2741,9 @@ static void ack_apic_level(unsigned int move_masked_irq(irq); unmask_IO_APIC_irq_desc(desc); } @@ -1147,7 +1166,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches static void ir_ack_apic_edge(unsigned int irq) { ack_APIC_irq(); -@@ -3267,6 +3330,7 @@ unsigned int create_irq_nr(unsigned int +@@ -3256,6 +3321,7 @@ unsigned int create_irq_nr(unsigned int continue; desc_new = move_irq_desc(desc_new, node); @@ -1155,7 +1174,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) irq = new; -@@ -3322,7 +3386,8 @@ void destroy_irq(unsigned int irq) +@@ -3311,7 +3377,8 @@ void destroy_irq(unsigned int irq) * MSI message composition */ #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN) @@ -1165,7 +1184,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { struct irq_cfg *cfg; int err; -@@ -3356,7 +3421,10 @@ static int msi_compose_msg(struct pci_de +@@ -3345,7 +3412,10 @@ static int msi_compose_msg(struct pci_de irte.dest_id = IRTE_DEST(dest); /* Set source-id of interrupt request */ @@ -1177,7 +1196,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches modify_irte(irq, &irte); -@@ -3402,8 +3470,7 @@ static int set_msi_irq_affinity(unsigned +@@ -3391,8 +3461,7 @@ static int set_msi_irq_affinity(unsigned struct msi_msg msg; unsigned int dest; @@ -1187,7 +1206,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return -1; cfg = desc->chip_data; -@@ -3435,8 +3502,7 @@ ir_set_msi_irq_affinity(unsigned int irq +@@ -3424,8 +3493,7 @@ ir_set_msi_irq_affinity(unsigned int irq if (get_irte(irq, &irte)) return -1; @@ -1197,7 +1216,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return -1; irte.vector = cfg->vector; -@@ -3521,7 +3587,7 @@ static int setup_msi_irq(struct pci_dev +@@ -3510,7 +3578,7 @@ static int setup_msi_irq(struct pci_dev int ret; struct msi_msg msg; @@ -1206,7 +1225,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (ret < 0) return ret; -@@ -3618,8 +3684,7 @@ static int dmar_msi_set_affinity(unsigne +@@ -3607,8 +3675,7 @@ static int dmar_msi_set_affinity(unsigne struct msi_msg msg; unsigned int dest; @@ -1216,7 +1235,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return -1; cfg = desc->chip_data; -@@ -3654,7 +3719,7 @@ int arch_setup_dmar_msi(unsigned int irq +@@ -3643,7 +3710,7 @@ int arch_setup_dmar_msi(unsigned int irq int ret; struct msi_msg msg; @@ -1225,7 +1244,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (ret < 0) return ret; dmar_msi_write(irq, &msg); -@@ -3674,8 +3739,7 @@ static int hpet_msi_set_affinity(unsigne +@@ -3663,8 +3730,7 @@ static int hpet_msi_set_affinity(unsigne struct msi_msg msg; unsigned int dest; @@ -1235,7 +1254,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return -1; cfg = desc->chip_data; -@@ -3694,6 +3758,19 @@ static int hpet_msi_set_affinity(unsigne +@@ -3683,6 +3749,19 @@ static int hpet_msi_set_affinity(unsigne #endif /* CONFIG_SMP */ @@ -1255,7 +1274,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches static struct irq_chip hpet_msi_type = { .name = "HPET_MSI", .unmask = hpet_msi_unmask, -@@ -3705,20 +3782,36 @@ static struct irq_chip hpet_msi_type = { +@@ -3694,20 +3773,36 @@ static struct irq_chip hpet_msi_type = { .retrigger = ioapic_retrigger_irq, }; @@ -1296,7 +1315,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return 0; } -@@ -3752,8 +3845,7 @@ static int set_ht_irq_affinity(unsigned +@@ -3741,8 +3836,7 @@ static int set_ht_irq_affinity(unsigned struct irq_cfg *cfg; unsigned int dest; @@ -1306,7 +1325,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return -1; cfg = desc->chip_data; -@@ -3819,75 +3911,6 @@ int arch_setup_ht_irq(unsigned int irq, +@@ -3808,75 +3902,6 @@ int arch_setup_ht_irq(unsigned int irq, } #endif /* CONFIG_HT_IRQ */ @@ -1382,7 +1401,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches int __init io_apic_get_redir_entries (int ioapic) { union IO_APIC_reg_01 reg_01; -@@ -4065,7 +4088,7 @@ int __init io_apic_get_unique_id(int ioa +@@ -4055,7 +4080,7 @@ int __init io_apic_get_unique_id(int ioa */ if (physids_empty(apic_id_map)) @@ -1391,7 +1410,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); -@@ -4081,10 +4104,10 @@ int __init io_apic_get_unique_id(int ioa +@@ -4071,10 +4096,10 @@ int __init io_apic_get_unique_id(int ioa * Every APIC in a system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ @@ -1404,7 +1423,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches break; } -@@ -4097,7 +4120,7 @@ int __init io_apic_get_unique_id(int ioa +@@ -4087,7 +4112,7 @@ int __init io_apic_get_unique_id(int ioa apic_id = i; } @@ -1413,7 +1432,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches physids_or(apic_id_map, apic_id_map, tmp); if (reg_00.bits.ID != apic_id) { -@@ -4229,7 +4252,7 @@ static struct resource * __init ioapic_s +@@ -4218,7 +4243,7 @@ static struct resource * __init ioapic_s for (i = 0; i < nr_ioapics; i++) { res[i].name = mem; res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; @@ -1422,7 +1441,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches mem += IOAPIC_RESOURCE_NAME_SIZE; } -@@ -4263,18 +4286,17 @@ void __init ioapic_init_mappings(void) +@@ -4252,18 +4277,17 @@ void __init ioapic_init_mappings(void) #ifdef CONFIG_X86_32 fake_ioapic_page: #endif @@ -1446,19 +1465,20 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches ioapic_res++; } } ---- head-2010-05-12.orig/arch/x86/kernel/cpu/Makefile 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/cpu/Makefile 2010-03-24 16:00:05.000000000 +0100 -@@ -34,7 +34,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ +--- head-2011-03-17.orig/arch/x86/kernel/cpu/Makefile 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/Makefile 2011-02-01 14:55:46.000000000 +0100 +@@ -34,7 +34,8 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o --disabled-obj-$(CONFIG_XEN) := hypervisor.o sched.o vmware.o -+disabled-obj-$(CONFIG_XEN) := hypervisor.o perf_event.o sched.o vmware.o +-disabled-obj-$(CONFIG_XEN) := hypervisor.o perfctr-watchdog.o sched.o vmware.o ++disabled-obj-$(CONFIG_XEN) := hypervisor.o perfctr-watchdog.o perf_event.o \ ++ sched.o vmware.o quiet_cmd_mkcapflags = MKCAP $@ cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ ---- head-2010-05-12.orig/arch/x86/kernel/cpu/common-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/cpu/common-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:34.000000000 +0100 @@ -69,7 +69,7 @@ void __init setup_cpu_local_masks(void) static void __cpuinit default_init(struct cpuinfo_x86 *c) { @@ -1468,7 +1488,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #else /* Not much we can do here... */ /* Check if at least it has cpuid */ -@@ -411,7 +411,7 @@ static void __cpuinit get_model_name(str +@@ -414,7 +414,7 @@ static void __cpuinit get_model_name(str } } @@ -1477,7 +1497,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { unsigned int n, dummy, ebx, ecx, edx, l2size; -@@ -419,8 +419,6 @@ void __cpuinit display_cacheinfo(struct +@@ -422,8 +422,6 @@ void __cpuinit display_cacheinfo(struct if (n >= 0x80000005) { cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); @@ -1486,7 +1506,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches c->x86_cache_size = (ecx>>24) + (edx>>24); #ifdef CONFIG_X86_64 /* On K8 L1 TLB is inclusive, so don't count it */ -@@ -450,9 +448,6 @@ void __cpuinit display_cacheinfo(struct +@@ -453,9 +451,6 @@ void __cpuinit display_cacheinfo(struct #endif c->x86_cache_size = l2size; @@ -1496,7 +1516,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } void __cpuinit detect_ht(struct cpuinfo_x86 *c) -@@ -460,6 +455,7 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -463,6 +458,7 @@ void __cpuinit detect_ht(struct cpuinfo_ #ifdef CONFIG_X86_HT u32 eax, ebx, ecx, edx; int index_msb, core_bits; @@ -1504,7 +1524,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (!cpu_has(c, X86_FEATURE_HT)) return; -@@ -475,7 +471,7 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -478,7 +474,7 @@ void __cpuinit detect_ht(struct cpuinfo_ smp_num_siblings = (ebx & 0xff0000) >> 16; if (smp_num_siblings == 1) { @@ -1513,7 +1533,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches goto out; } -@@ -502,11 +498,12 @@ void __cpuinit detect_ht(struct cpuinfo_ +@@ -505,11 +501,12 @@ void __cpuinit detect_ht(struct cpuinfo_ ((1 << core_bits) - 1); out: @@ -1527,11 +1547,11 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } #endif } -@@ -687,24 +684,31 @@ void __init early_cpu_init(void) +@@ -690,24 +687,31 @@ void __init early_cpu_init(void) const struct cpu_dev *const *cdev; int count = 0; -+#ifdef PROCESSOR_SELECT ++#ifdef CONFIG_PROCESSOR_SELECT printk(KERN_INFO "KERNEL supported cpus:\n"); +#endif + @@ -1549,7 +1569,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches - continue; - printk(KERN_INFO " %s %s\n", cpudev->c_vendor, - cpudev->c_ident[j]); -+#ifdef PROCESSOR_SELECT ++#ifdef CONFIG_PROCESSOR_SELECT + { + unsigned int j; + @@ -1566,7 +1586,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches early_identify_cpu(&boot_cpu_data); } -@@ -867,10 +871,8 @@ static void __cpuinit identify_cpu(struc +@@ -874,10 +878,8 @@ static void __cpuinit identify_cpu(struc boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } @@ -1578,13 +1598,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches select_idle_routine(c); -@@ -899,9 +901,15 @@ void __init identify_boot_cpu(void) - #else - vgetcpu_set_mode(); - #endif -+#ifndef CONFIG_XEN +@@ -909,6 +911,10 @@ void __init identify_boot_cpu(void) init_hw_perf_events(); -+#endif } +#ifdef CONFIG_XEN @@ -1594,7 +1609,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) { BUG_ON(c == &boot_cpu_data); -@@ -1149,7 +1157,7 @@ static void clear_all_debug_regs(void) +@@ -1156,7 +1162,7 @@ static void clear_all_debug_regs(void) void __cpuinit cpu_init(void) { #ifndef CONFIG_X86_NO_TSS @@ -1603,7 +1618,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches struct tss_struct *t; unsigned long v; int i; -@@ -1163,7 +1171,7 @@ void __cpuinit cpu_init(void) +@@ -1170,7 +1176,7 @@ void __cpuinit cpu_init(void) xen_switch_pt(); #ifndef CONFIG_X86_NO_TSS t = &per_cpu(init_tss, cpu); @@ -1612,7 +1627,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif #ifdef CONFIG_NUMA -@@ -1177,7 +1185,7 @@ void __cpuinit cpu_init(void) +@@ -1184,7 +1190,7 @@ void __cpuinit cpu_init(void) if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) panic("CPU#%d already initialized!\n", cpu); @@ -1621,7 +1636,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); -@@ -1200,7 +1208,7 @@ void __cpuinit cpu_init(void) +@@ -1207,7 +1213,7 @@ void __cpuinit cpu_init(void) wrmsrl(MSR_KERNEL_GS_BASE, 0); barrier(); @@ -1630,7 +1645,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #ifdef CONFIG_X86_LOCAL_APIC if (cpu != 0) enable_x2apic(); -@@ -1210,12 +1218,12 @@ void __cpuinit cpu_init(void) +@@ -1217,12 +1223,12 @@ void __cpuinit cpu_init(void) /* * set up and load the per-CPU TSS */ @@ -1645,8 +1660,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches (unsigned long)estacks; } } ---- head-2010-05-12.orig/arch/x86/kernel/e820-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/e820-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -771,7 +771,7 @@ core_initcall(e820_mark_nvs_memory); /* * Early reserved memory areas. @@ -1673,8 +1688,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif {} }; ---- head-2010-05-12.orig/arch/x86/kernel/entry_32-xen.S 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/entry_32-xen.S 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:55:46.000000000 +0100 @@ -338,6 +338,10 @@ ENTRY(ret_from_fork) END(ret_from_fork) @@ -1857,8 +1872,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif #include ---- head-2010-05-12.orig/arch/x86/kernel/entry_64-xen.S 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/entry_64-xen.S 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:55:46.000000000 +0100 @@ -160,11 +160,11 @@ GLOBAL(return_to_handler) call ftrace_return_to_handler @@ -1973,8 +1988,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif END(error_entry) ---- head-2010-05-12.orig/arch/x86/kernel/head-xen.c 2010-04-15 10:10:51.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/head-xen.c 2010-04-15 10:13:18.000000000 +0200 +--- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -1,5 +1,6 @@ #include #include @@ -1982,16 +1997,16 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #include #ifndef CONFIG_XEN -@@ -133,7 +134,7 @@ void __init xen_start_kernel(void) - addr), +@@ -121,7 +122,7 @@ void __init xen_start_kernel(void) __pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE)); + } #else - check_efer(); + x86_configure_nx(); xen_init_pt(); #endif -@@ -161,6 +162,8 @@ void __init xen_start_kernel(void) +@@ -149,6 +150,8 @@ void __init xen_start_kernel(void) virt_to_machine(empty_zero_page), PAGE_KERNEL_RO); @@ -2000,19 +2015,19 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } void __init xen_arch_setup(void) ---- head-2010-05-12.orig/arch/x86/kernel/head32-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/head32-xen.c 2010-03-24 16:00:05.000000000 +0100 -@@ -32,8 +32,6 @@ static void __init i386_default_early_se +--- head-2011-03-17.orig/arch/x86/kernel/head32-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head32-xen.c 2011-02-01 14:55:46.000000000 +0100 +@@ -48,8 +48,6 @@ void __init i386_start_kernel(void) + BUG_ON(pte_index(hypervisor_virt_start)); + #endif - void __init i386_start_kernel(void) - { - reserve_trampoline_memory(); - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifndef CONFIG_XEN ---- head-2010-05-12.orig/arch/x86/kernel/head64-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/head64-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -119,8 +119,6 @@ void __init x86_64_start_reservations(ch { copy_bootdata(__va(real_mode_data)); @@ -2022,8 +2037,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); /* ---- head-2010-05-12.orig/arch/x86/kernel/head_64-xen.S 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/head_64-xen.S 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-01 14:55:46.000000000 +0100 @@ -51,9 +51,9 @@ startup_64: #define NEXT_PAGE(name) \ @@ -2045,8 +2060,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches CFI_STARTPROC .rept 0x1000 / 0x20 .skip 1 /* push %rcx */ ---- head-2010-05-12.orig/arch/x86/kernel/ioport-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/ioport-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/ioport-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/ioport-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -75,8 +75,9 @@ asmlinkage long sys_ioperm(unsigned long * beyond the 0x3ff range: to get the full 65536 ports bitmapped * you'd need 8kB of bitmaps/process, which is a bit excessive. @@ -2088,8 +2103,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches + + return 0; } ---- head-2010-05-12.orig/arch/x86/kernel/irq-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/irq-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/irq-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -19,7 +19,7 @@ atomic_t irq_err_count; #ifndef CONFIG_XEN @@ -2112,7 +2127,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches seq_printf(p, " Platform interrupts\n"); } #endif -@@ -157,7 +157,7 @@ int show_interrupts(struct seq_file *p, +@@ -162,7 +162,7 @@ int show_interrupts(struct seq_file *p, if (!desc) return 0; @@ -2121,7 +2136,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; -@@ -178,7 +178,7 @@ int show_interrupts(struct seq_file *p, +@@ -183,7 +183,7 @@ int show_interrupts(struct seq_file *p, seq_putc(p, '\n'); out: @@ -2130,7 +2145,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return 0; } -@@ -196,8 +196,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu) +@@ -201,8 +201,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->apic_pending_irqs; #endif #ifndef CONFIG_XEN @@ -2141,7 +2156,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif #ifdef CONFIG_SMP sum += irq_stats(cpu)->irq_resched_count; -@@ -264,9 +264,9 @@ unsigned int __irq_entry do_IRQ(struct p +@@ -271,9 +271,9 @@ unsigned int __irq_entry do_IRQ(struct p } /* @@ -2153,7 +2168,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { struct pt_regs *old_regs = set_irq_regs(regs); -@@ -276,13 +276,95 @@ void smp_generic_interrupt(struct pt_reg +@@ -283,13 +283,95 @@ void smp_generic_interrupt(struct pt_reg irq_enter(); @@ -2196,7 +2211,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches + + affinity = desc->affinity; + if (!irq_has_action(irq) || -+ cpumask_equal(affinity, cpu_online_mask)) { ++ cpumask_subset(affinity, cpu_online_mask)) { + raw_spin_unlock(&desc->lock); + continue; + } @@ -2214,7 +2229,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches + + if (desc->chip->set_affinity) + desc->chip->set_affinity(irq, affinity); -+ else if (!(warned++)) ++ else if (desc->chip != &no_irq_chip && !(warned++)) + set_affinity = 0; + + if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) @@ -2252,8 +2267,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches + } +} +#endif ---- head-2010-05-12.orig/arch/x86/kernel/microcode_core-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/microcode_core-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -21,10 +21,12 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. @@ -2313,9 +2328,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return 0; } ---- head-2010-05-12.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/mpparse-xen.c 2010-03-24 16:00:05.000000000 +0100 -@@ -371,13 +371,6 @@ static int __init smp_read_mpc(struct mp +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:55:46.000000000 +0100 +@@ -375,13 +375,6 @@ static int __init smp_read_mpc(struct mp x86_init.mpparse.mpc_record(1); } @@ -2329,7 +2344,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; -@@ -680,37 +673,21 @@ void __init default_get_smp_config(unsig +@@ -694,37 +687,21 @@ void __init default_get_smp_config(unsig } #ifndef CONFIG_XEN @@ -2374,7 +2389,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", bp, length); -@@ -732,12 +709,10 @@ static int __init smp_scan_config(unsign +@@ -746,12 +723,10 @@ static int __init smp_scan_config(unsign printk(KERN_INFO "found SMP MP-table at [%p] %llx\n", mpf, (u64)virt_to_phys(mpf)); @@ -2390,7 +2405,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #else printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", mpf, ((void *)bp - _bus_to_virt(base)) + base); -@@ -750,7 +725,7 @@ static int __init smp_scan_config(unsign +@@ -764,7 +739,7 @@ static int __init smp_scan_config(unsign return 0; } @@ -2399,7 +2414,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { #ifndef CONFIG_XEN unsigned int address; -@@ -764,9 +739,9 @@ void __init default_find_smp_config(unsi +@@ -778,9 +753,9 @@ void __init default_find_smp_config(unsi * 2) Scan the top 1K of base RAM * 3) Scan the 64K of bios */ @@ -2412,7 +2427,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return; /* * If it is an SMP machine we should know now, unless the -@@ -788,7 +763,7 @@ void __init default_find_smp_config(unsi +@@ -802,7 +777,7 @@ void __init default_find_smp_config(unsi #ifndef CONFIG_XEN address = get_bios_ebda(); if (address) @@ -2421,7 +2436,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif } -@@ -987,9 +962,6 @@ void __init early_reserve_e820_mpc_new(v +@@ -1001,9 +976,6 @@ void __init early_reserve_e820_mpc_new(v { if (enable_update_mptable && alloc_mptable) { u64 startt = 0; @@ -2431,8 +2446,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); } } ---- head-2010-05-12.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/pci-dma-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -11,10 +11,11 @@ #include #include @@ -2526,8 +2541,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* Must execute after PCI subsystem */ rootfs_initcall(pci_iommu_init); ---- head-2010-05-12.orig/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/pci-nommu-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-nommu-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -112,12 +112,3 @@ struct dma_map_ops nommu_dma_ops = { .sync_sg_for_device = nommu_sync_sg_for_device, .dma_supported = nommu_dma_supported, @@ -2541,8 +2556,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches - force_iommu = 0; /* no HW IOMMU */ - dma_ops = &nommu_dma_ops; -} ---- head-2010-05-12.orig/arch/x86/kernel/process-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/process-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:07:49.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:09:35.000000000 +0100 @@ -9,7 +9,11 @@ #include #include @@ -2718,8 +2733,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * Idle related variables and functions ---- head-2010-05-12.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/process_32-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:38:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:07.000000000 +0100 @@ -23,7 +23,6 @@ #include #include @@ -2736,7 +2751,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #include #include #include -@@ -62,6 +60,7 @@ +@@ -60,6 +58,7 @@ #include #include #include @@ -2744,7 +2759,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void cstar_ret_from_fork(void) __asm__("cstar_ret_from_fork"); -@@ -132,39 +131,29 @@ void __show_regs(struct pt_regs *regs, i +@@ -130,39 +129,29 @@ void __show_regs(struct pt_regs *regs, i unsigned long d0, d1, d2, d3, d6, d7; unsigned long sp; unsigned short ss, gs; @@ -2790,7 +2805,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) -@@ -174,61 +163,28 @@ void __show_regs(struct pt_regs *regs, i +@@ -172,61 +161,28 @@ void __show_regs(struct pt_regs *regs, i cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); @@ -2856,7 +2871,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); -@@ -264,7 +220,12 @@ int copy_thread(unsigned long clone_flag +@@ -262,7 +218,12 @@ int copy_thread(unsigned long clone_flag task_user_gs(p) = get_user_gs(regs); @@ -2869,7 +2884,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (test_tsk_thread_flag(tsk, TIF_CSTAR)) p->thread.ip = (unsigned long) cstar_ret_from_fork; if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { -@@ -499,46 +460,6 @@ __switch_to(struct task_struct *prev_p, +@@ -497,46 +458,6 @@ __switch_to(struct task_struct *prev_p, return prev_p; } @@ -2916,8 +2931,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #define top_esp (THREAD_SIZE - sizeof(unsigned long)) #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) ---- head-2010-05-12.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/process_64-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:37:59.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:12.000000000 +0100 @@ -29,7 +29,6 @@ #include #include @@ -2934,15 +2949,13 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #include #include -@@ -59,6 +57,7 @@ +@@ -57,13 +55,12 @@ #include #include #include +#include - #include - -@@ -66,8 +65,6 @@ asmlinkage extern void ret_from_fork(voi + asmlinkage extern void ret_from_fork(void); static DEFINE_PER_CPU(unsigned char, is_idle); @@ -2951,7 +2964,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) -@@ -170,31 +167,21 @@ void __show_regs(struct pt_regs *regs, i +@@ -166,31 +163,21 @@ void __show_regs(struct pt_regs *regs, i unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; unsigned int ds, cs, es; @@ -2991,7 +3004,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches regs->r13, regs->r14, regs->r15); asm("movl %%ds,%0" : "=r" (ds)); -@@ -215,27 +202,26 @@ void __show_regs(struct pt_regs *regs, i +@@ -211,27 +198,26 @@ void __show_regs(struct pt_regs *regs, i cr3 = read_cr3(); cr4 = read_cr4(); @@ -3025,7 +3038,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches show_trace(NULL, regs, (void *)(regs + 1), regs->bp); } -@@ -243,6 +229,7 @@ void xen_load_gs_index(unsigned gs) +@@ -239,6 +225,7 @@ void xen_load_gs_index(unsigned gs) { WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs)); } @@ -3033,7 +3046,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches void release_thread(struct task_struct *dead_task) { -@@ -298,8 +285,9 @@ int copy_thread(unsigned long clone_flag +@@ -294,8 +281,9 @@ int copy_thread(unsigned long clone_flag *childregs = *regs; childregs->ax = 0; @@ -3045,7 +3058,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches childregs->sp = (unsigned long)childregs; p->thread.sp = (unsigned long) childregs; -@@ -309,12 +297,16 @@ int copy_thread(unsigned long clone_flag +@@ -305,12 +293,16 @@ int copy_thread(unsigned long clone_flag p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; @@ -3062,7 +3075,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { -@@ -354,28 +346,45 @@ out: +@@ -350,28 +342,45 @@ out: kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } @@ -3116,7 +3129,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * switch_to(x,y) should switch tasks from x to y. -@@ -565,26 +574,8 @@ __switch_to(struct task_struct *prev_p, +@@ -561,26 +570,8 @@ __switch_to(struct task_struct *prev_p, */ if (preload_fpu) __math_state_restore(); @@ -3144,7 +3157,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } void set_personality_64bit(void) -@@ -601,13 +592,16 @@ void set_personality_64bit(void) +@@ -597,13 +588,16 @@ void set_personality_64bit(void) current->personality &= ~READ_IMPLIES_EXEC; } @@ -3168,53 +3181,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } unsigned long get_wchan(struct task_struct *p) ---- head-2010-05-12.orig/arch/x86/kernel/quirks-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/quirks-xen.c 2010-03-24 16:00:05.000000000 +0100 -@@ -492,6 +492,19 @@ void force_hpet_resume(void) - break; - } - } -+ -+/* -+ * HPET MSI on some boards (ATI SB700/SB800) has side effect on -+ * floppy DMA. Disable HPET MSI on such platforms. -+ */ -+static void force_disable_hpet_msi(struct pci_dev *unused) -+{ -+ hpet_msi_disable = 1; -+} -+ -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, -+ force_disable_hpet_msi); -+ - #endif - - #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) -@@ -500,6 +513,7 @@ static void __init quirk_amd_nb_node(str - { - struct pci_dev *nb_ht; - unsigned int devfn; -+ u32 node; - u32 val; - - devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); -@@ -508,7 +522,13 @@ static void __init quirk_amd_nb_node(str - return; - - pci_read_config_dword(nb_ht, 0x60, &val); -- set_dev_node(&dev->dev, val & 7); -+ node = val & 7; -+ /* -+ * Some hardware may return an invalid node ID, -+ * so check it first: -+ */ -+ if (node_online(node)) -+ set_dev_node(&dev->dev, node); - pci_dev_put(nb_ht); - } - ---- head-2010-05-12.orig/arch/x86/kernel/setup-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/setup-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-04 15:09:48.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:24:24.000000000 +0100 @@ -73,6 +73,7 @@ #include @@ -3394,7 +3362,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #ifdef CONFIG_XEN unsigned int i; unsigned long p2m_pages; -@@ -900,21 +884,18 @@ void __init setup_arch(char **cmdline_p) +@@ -903,21 +887,18 @@ void __init setup_arch(char **cmdline_p) strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; @@ -3423,7 +3391,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* Must be before kernel pagetables are setup */ vmi_activate(); -@@ -1021,6 +1002,20 @@ void __init setup_arch(char **cmdline_p) +@@ -1024,6 +1005,20 @@ void __init setup_arch(char **cmdline_p) reserve_brk(); @@ -3444,7 +3412,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches init_gbpages(); /* max_pfn_mapped is updated here */ -@@ -1048,6 +1043,8 @@ void __init setup_arch(char **cmdline_p) +@@ -1051,6 +1046,8 @@ void __init setup_arch(char **cmdline_p) reserve_initrd(); #ifndef CONFIG_XEN @@ -3453,7 +3421,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches vsmp_init(); #endif -@@ -1071,23 +1068,15 @@ void __init setup_arch(char **cmdline_p) +@@ -1074,23 +1071,15 @@ void __init setup_arch(char **cmdline_p) /* * Parse SRAT to discover nodes. */ @@ -3482,7 +3450,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #if defined(CONFIG_X86_64) && !defined(CONFIG_XEN) /* -@@ -1115,6 +1104,9 @@ void __init setup_arch(char **cmdline_p) +@@ -1118,6 +1107,9 @@ void __init setup_arch(char **cmdline_p) #endif #ifdef CONFIG_XEN @@ -3501,9 +3469,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } #ifdef CONFIG_X86_32 ---- head-2010-05-12.orig/arch/x86/kernel/time-xen.c 2010-05-12 09:02:39.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/time-xen.c 2010-05-12 09:02:50.000000000 +0200 -@@ -953,28 +953,23 @@ core_initcall(cpufreq_time_setup); +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-02-01 14:55:46.000000000 +0100 +@@ -937,28 +937,23 @@ core_initcall(cpufreq_time_setup); */ static ctl_table xen_subtable[] = { { @@ -3532,8 +3500,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches .procname = "xen", .mode = 0555, .child = xen_subtable ---- head-2010-05-12.orig/arch/x86/kernel/traps-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/traps-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-16 13:56:25.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -522,77 +522,56 @@ asmlinkage __kprobes struct pt_regs *syn dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) { @@ -3639,8 +3607,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return; } ---- head-2010-05-12.orig/arch/x86/kernel/vmlinux.lds.S 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/vmlinux.lds.S 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/vmlinux.lds.S 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vmlinux.lds.S 2011-02-01 14:55:46.000000000 +0100 @@ -43,7 +43,7 @@ ENTRY(phys_startup_64) jiffies_64 = jiffies; #endif @@ -3650,8 +3618,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA * we retain large page mappings for boundaries spanning kernel text, rodata ---- head-2010-05-12.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -73,7 +73,8 @@ void update_vsyscall_tz(void) write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } @@ -3680,18 +3648,18 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches .child = kernel_table2 }, {} }; ---- head-2010-05-12.orig/arch/x86/kernel/x8664_ksyms_64.c 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/arch/x86/kernel/x8664_ksyms_64.c 2010-03-24 16:00:05.000000000 +0100 -@@ -55,6 +55,6 @@ EXPORT_SYMBOL(__memcpy); +--- head-2011-03-17.orig/arch/x86/kernel/x8664_ksyms_64.c 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/x8664_ksyms_64.c 2011-02-01 14:55:46.000000000 +0100 +@@ -54,6 +54,6 @@ EXPORT_SYMBOL(memcpy); + EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(empty_zero_page); - EXPORT_SYMBOL(init_level4_pgt); --#ifndef CONFIG_PARAVIRT_CPU -+#if !defined(CONFIG_PARAVIRT_CPU) && !defined(CONFIG_XEN) +-#ifndef CONFIG_PARAVIRT ++#if !defined(CONFIG_PARAVIRT) && !defined(CONFIG_XEN) EXPORT_SYMBOL(native_load_gs_index); #endif ---- head-2010-05-12.orig/arch/x86/kernel/x86_init-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/kernel/x86_init-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/kernel/x86_init-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/x86_init-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -13,10 +13,13 @@ #include #include @@ -3722,8 +3690,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches .set_wallclock = mach_set_rtc_mmss, + .is_untracked_pat_range = is_ISA_range, }; ---- head-2010-05-12.orig/arch/x86/mm/fault-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/fault-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -38,7 +38,8 @@ enum x86_pf_error_code { * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: @@ -3743,7 +3711,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { int ret = 0; -@@ -248,7 +249,7 @@ void vmalloc_sync_all(void) +@@ -257,7 +258,7 @@ void vmalloc_sync_all(void) * * Handle a fault on the vmalloc or module mapping area */ @@ -3752,7 +3720,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { unsigned long pgd_paddr; pmd_t *pmd_k; -@@ -365,7 +366,7 @@ void vmalloc_sync_all(void) +@@ -376,7 +377,7 @@ void vmalloc_sync_all(void) * * This assumes no large pages in there. */ @@ -3761,7 +3729,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { pgd_t *pgd, *pgd_ref; pud_t *pud, *pud_ref; -@@ -666,7 +667,7 @@ no_context(struct pt_regs *regs, unsigne +@@ -677,7 +678,7 @@ no_context(struct pt_regs *regs, unsigne show_fault_oops(regs, error_code, address); stackend = end_of_stack(tsk); @@ -3770,7 +3738,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); tsk->thread.cr2 = address; -@@ -868,7 +869,7 @@ static int spurious_fault_check(unsigned +@@ -879,7 +880,7 @@ static int spurious_fault_check(unsigned * There are no security implications to leaving a stale TLB when * increasing the permissions on a page. */ @@ -3779,8 +3747,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches spurious_fault(unsigned long error_code, unsigned long address) { pgd_t *pgd; ---- head-2010-05-12.orig/arch/x86/mm/init-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/init-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -163,10 +163,6 @@ unsigned long __init_refok init_memory_m use_gbpages = direct_gbpages; #endif @@ -3792,8 +3760,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* Enable PSE if available */ if (cpu_has_pse) set_in_cr4(X86_CR4_PSE); ---- head-2010-05-12.orig/arch/x86/mm/init_32-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/init_32-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -448,7 +448,7 @@ static void __init permanent_kmaps_init( pkmap_page_table = pte; } @@ -3842,8 +3810,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches void set_kernel_text_rw(void) { ---- head-2010-05-12.orig/arch/x86/mm/init_64-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/init_64-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -53,6 +53,7 @@ #include #include @@ -3969,9 +3937,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } #endif ---- head-2010-05-12.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/ioremap-xen.c 2010-03-24 16:00:05.000000000 +0100 -@@ -457,32 +457,6 @@ void __iomem *ioremap_cache(resource_siz +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:38.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:45.000000000 +0100 +@@ -438,32 +438,6 @@ void __iomem *ioremap_cache(resource_siz } EXPORT_SYMBOL(ioremap_cache); @@ -4004,7 +3972,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { -@@ -558,7 +532,7 @@ void *xlate_dev_mem_ptr(unsigned long ph +@@ -539,7 +513,7 @@ void *xlate_dev_mem_ptr(unsigned long ph if (page_is_ram(start >> PAGE_SHIFT)) return __va(phys); @@ -4013,8 +3981,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (addr) addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); ---- head-2010-05-12.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/pageattr-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -281,6 +281,22 @@ static inline pgprot_t static_protection __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; @@ -4057,8 +4025,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); } EXPORT_SYMBOL(set_memory_nx); ---- head-2010-05-12.orig/arch/x86/mm/pat-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/arch/x86/mm/pat-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/mm/pat-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -20,6 +20,7 @@ #include #include @@ -4139,8 +4107,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return 0; } ---- head-2010-05-12.orig/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-12/arch/x86/vdso/vdso32-setup-xen.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:55:46.000000000 +0100 @@ -436,7 +436,6 @@ static ctl_table abi_table2[] = { static ctl_table abi_root_table2[] = { @@ -4149,26 +4117,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches .procname = "abi", .mode = 0555, .child = abi_table2 ---- head-2010-05-12.orig/drivers/gpu/drm/nouveau/nouveau_sgdma.c 2010-04-15 09:29:04.000000000 +0200 -+++ head-2010-05-12/drivers/gpu/drm/nouveau/nouveau_sgdma.c 2010-05-05 15:19:54.000000000 +0200 -@@ -267,6 +267,15 @@ nouveau_sgdma_init(struct drm_device *de - - dev_priv->gart_info.sg_dummy_page = - alloc_page(GFP_KERNEL|__GFP_DMA32); -+#ifdef CONFIG_XEN -+ if (!dev_priv->gart_info.sg_dummy_page) -+ ret = ENOMEM; -+ else -+ ret = xen_limit_pages_to_max_mfn( -+ dev_priv->gart_info.sg_dummy_page, 0, 32); -+ if (ret) -+ NV_WARN(dev, "Error restricting SG dummy page: %d\n", ret); -+#endif - set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); - dev_priv->gart_info.sg_dummy_bus = - pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, ---- head-2010-05-12.orig/drivers/gpu/drm/vmwgfx/Kconfig 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/drivers/gpu/drm/vmwgfx/Kconfig 2010-04-15 10:13:09.000000000 +0200 +--- head-2011-03-17.orig/drivers/gpu/drm/vmwgfx/Kconfig 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/vmwgfx/Kconfig 2011-02-01 14:55:46.000000000 +0100 @@ -1,6 +1,6 @@ config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" @@ -4177,9 +4127,419 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches select FB_DEFERRED_IO select FB_CFB_FILLRECT select FB_CFB_COPYAREA ---- head-2010-05-12.orig/drivers/oprofile/cpu_buffer.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/drivers/oprofile/cpu_buffer.c 2010-03-24 16:00:05.000000000 +0100 -@@ -449,7 +449,7 @@ void oprofile_add_pc(unsigned long pc, i +--- head-2011-03-17.orig/drivers/hwmon/Kconfig 2011-01-31 17:32:29.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/Kconfig 2011-03-11 11:00:24.000000000 +0100 +@@ -943,7 +943,7 @@ config SENSORS_TMP421 + + config SENSORS_VIA_CPUTEMP + tristate "VIA CPU temperature sensor" +- depends on X86 ++ depends on X86 && !XEN + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of +--- head-2011-03-17.orig/drivers/hwmon/coretemp-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/coretemp-xen.c 2011-02-01 14:55:46.000000000 +0100 +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + #include + #include "../xen/core/domctl.h" +@@ -166,6 +167,7 @@ static int adjust_tjmax(struct coretemp_ + int usemsr_ee = 1; + int err; + u32 eax, edx; ++ struct pci_dev *host_bridge; + + /* Early chips have no MSR for TjMax */ + +@@ -173,11 +175,21 @@ static int adjust_tjmax(struct coretemp_ + usemsr_ee = 0; + } + +- /* Atoms seems to have TjMax at 90C */ ++ /* Atom CPUs */ + + if (c->x86_model == 0x1c) { + usemsr_ee = 0; +- tjmax = 90000; ++ ++ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); ++ ++ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL ++ && (host_bridge->device == 0xa000 /* NM10 based nettop */ ++ || host_bridge->device == 0xa010)) /* NM10 based netbook */ ++ tjmax = 100000; ++ else ++ tjmax = 90000; ++ ++ pci_dev_put(host_bridge); + } + + if ((c->x86_model > 0xe) && (usemsr_ee)) { +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/drivers/hwmon/via-cputemp-xen.c 2011-02-01 14:55:46.000000000 +0100 +@@ -0,0 +1,354 @@ ++/* ++ * via-cputemp.c - Driver for VIA CPU core temperature monitoring ++ * Copyright (C) 2009 VIA Technologies, Inc. ++ * ++ * based on existing coretemp.c, which is ++ * ++ * Copyright (C) 2007 Rudolf Marek ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA ++ * 02110-1301 USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../xen/core/domctl.h" ++ ++#define DRVNAME "via_cputemp" ++ ++enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW; ++ ++/* ++ * Functions declaration ++ */ ++ ++struct pdev_entry { ++ struct list_head list; ++ struct platform_device *pdev; ++ struct device *hwmon_dev; ++ const char *name; ++ u8 x86_model; ++ u32 msr; ++}; ++#define via_cputemp_data pdev_entry ++ ++/* ++ * Sysfs stuff ++ */ ++ ++static ssize_t show_name(struct device *dev, struct device_attribute ++ *devattr, char *buf) ++{ ++ int ret; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct via_cputemp_data *data = dev_get_drvdata(dev); ++ ++ if (attr->index == SHOW_NAME) ++ ret = sprintf(buf, "%s\n", data->name); ++ else /* show label */ ++ ret = sprintf(buf, "Core %d\n", data->pdev->id); ++ return ret; ++} ++ ++static ssize_t show_temp(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct via_cputemp_data *data = dev_get_drvdata(dev); ++ u32 eax, edx; ++ int err; ++ ++ err = rdmsr_safe_on_pcpu(data->pdev->id, data->msr, &eax, &edx); ++ if (err < 0) ++ return -EAGAIN; ++ ++ return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); ++} ++ ++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, ++ SHOW_TEMP); ++static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); ++static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); ++ ++static struct attribute *via_cputemp_attributes[] = { ++ &sensor_dev_attr_name.dev_attr.attr, ++ &sensor_dev_attr_temp1_label.dev_attr.attr, ++ &sensor_dev_attr_temp1_input.dev_attr.attr, ++ NULL ++}; ++ ++static const struct attribute_group via_cputemp_group = { ++ .attrs = via_cputemp_attributes, ++}; ++ ++static int via_cputemp_probe(struct platform_device *pdev) ++{ ++ struct via_cputemp_data *data = platform_get_drvdata(pdev); ++ int err; ++ u32 eax, edx; ++ ++ data->name = "via_cputemp"; ++ ++ switch (data->x86_model) { ++ case 0xA: ++ /* C7 A */ ++ case 0xD: ++ /* C7 D */ ++ data->msr = 0x1169; ++ break; ++ case 0xF: ++ /* Nano */ ++ data->msr = 0x1423; ++ break; ++ default: ++ return -ENODEV; ++ } ++ ++ /* test if we can access the TEMPERATURE MSR */ ++ err = rdmsr_safe_on_pcpu(pdev->id, data->msr, &eax, &edx); ++ if (err >= 0) { ++ dev_err(&pdev->dev, ++ "Unable to access TEMPERATURE MSR, giving up\n"); ++ return err; ++ } ++ ++ err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group); ++ if (err) ++ return err; ++ ++ data->hwmon_dev = hwmon_device_register(&pdev->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ err = PTR_ERR(data->hwmon_dev); ++ dev_err(&pdev->dev, "Class registration failed (%d)\n", ++ err); ++ goto exit_remove; ++ } ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group); ++ return err; ++} ++ ++static int via_cputemp_remove(struct platform_device *pdev) ++{ ++ struct via_cputemp_data *data = platform_get_drvdata(pdev); ++ ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group); ++ return 0; ++} ++ ++static struct platform_driver via_cputemp_driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRVNAME, ++ }, ++ .probe = via_cputemp_probe, ++ .remove = via_cputemp_remove, ++}; ++ ++static LIST_HEAD(pdev_list); ++static DEFINE_MUTEX(pdev_list_mutex); ++ ++struct cpu_info { ++ struct pdev_entry *pdev_entry; ++ u8 x86; ++}; ++ ++static void get_cpuid_info(void *arg) ++{ ++ struct cpu_info *info = arg; ++ struct pdev_entry *pdev_entry = info->pdev_entry; ++ u32 val = cpuid_eax(1); ++ ++ info->x86 = ((val >> 8) & 0xf) + ((val >> 20) & 0xff); ++ pdev_entry->x86_model = ((val >> 4) & 0xf) | ((val >> 12) & 0xf0); ++} ++ ++static int via_cputemp_device_add(unsigned int cpu) ++{ ++ int err; ++ struct cpu_info info; ++ struct platform_device *pdev; ++ struct pdev_entry *pdev_entry; ++ ++ pdev_entry = kzalloc(sizeof(*pdev_entry), GFP_KERNEL); ++ if (!pdev_entry) ++ return -ENOMEM; ++ ++ info.pdev_entry = pdev_entry; ++ err = xen_set_physical_cpu_affinity(cpu); ++ if (!err) { ++ get_cpuid_info(&info); ++ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); ++ } else if (err > 0) { ++ static bool warned; ++ ++ if (!warned) { ++ warned = true; ++ printk(KERN_WARNING DRVNAME ++ "Cannot set physical CPU affinity" ++ " (assuming use of dom0_vcpus_pin)\n"); ++ } ++ err = smp_call_function_single(cpu, get_cpuid_info, &info, 1); ++ } ++ if (err) ++ goto exit_entry_free; ++ ++ if (info.x86 != 6) ++ goto exit_entry_free; ++ ++ if (pdev_entry->x86_model < 0x0a) ++ goto exit_entry_free; ++ ++ if (pdev_entry->x86_model > 0x0f) { ++ printk(KERN_WARNING DRVNAME ": Unknown CPU " ++ "model 0x%x\n", pdev_entry->x86_model); ++ goto exit_entry_free; ++ } ++ ++ pdev = platform_device_alloc(DRVNAME, cpu); ++ if (!pdev) { ++ err = -ENOMEM; ++ printk(KERN_ERR DRVNAME ": Device allocation failed\n"); ++ goto exit_entry_free; ++ } ++ ++ platform_set_drvdata(pdev, pdev_entry); ++ pdev_entry->pdev = pdev; ++ ++ err = platform_device_add(pdev); ++ if (err) { ++ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", ++ err); ++ goto exit_device_put; ++ } ++ ++ mutex_lock(&pdev_list_mutex); ++ list_add_tail(&pdev_entry->list, &pdev_list); ++ mutex_unlock(&pdev_list_mutex); ++ ++ return 0; ++ ++exit_device_put: ++ platform_device_put(pdev); ++exit_entry_free: ++ kfree(pdev_entry); ++ return err; ++} ++ ++static void via_cputemp_device_remove(unsigned int cpu) ++{ ++ struct pdev_entry *p; ++ ++ mutex_lock(&pdev_list_mutex); ++ list_for_each_entry(p, &pdev_list, list) { ++ if (p->pdev->id == cpu) { ++ platform_device_unregister(p->pdev); ++ list_del(&p->list); ++ kfree(p); ++ break; ++ } ++ } ++ mutex_unlock(&pdev_list_mutex); ++} ++ ++static int via_cputemp_cpu_callback(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ unsigned int cpu = (unsigned long) hcpu; ++ ++ switch (action) { ++ case CPU_ONLINE: ++ via_cputemp_device_add(cpu); ++ break; ++ case CPU_DEAD: ++ via_cputemp_device_remove(cpu); ++ break; ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block via_cputemp_cpu_notifier = { ++ .notifier_call = via_cputemp_cpu_callback, ++}; ++ ++static int __init via_cputemp_init(void) ++{ ++ int err; ++ ++ if (!is_initial_xendomain()) ++ return -ENODEV; ++ ++ if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) { ++ printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n"); ++ err = -ENODEV; ++ goto exit; ++ } ++ ++ err = platform_driver_register(&via_cputemp_driver); ++ if (err) ++ goto exit; ++ ++ err = register_pcpu_notifier(&via_cputemp_cpu_notifier); ++ if (err) ++ goto exit_driver_unreg; ++ ++ if (list_empty(&pdev_list)) { ++ err = -ENODEV; ++ goto exit_notifier_unreg; ++ } ++ ++ return 0; ++ ++exit_notifier_unreg: ++ unregister_pcpu_notifier(&via_cputemp_cpu_notifier); ++exit_driver_unreg: ++ platform_driver_unregister(&via_cputemp_driver); ++exit: ++ return err; ++} ++ ++static void __exit via_cputemp_exit(void) ++{ ++ struct pdev_entry *p, *n; ++ ++ unregister_pcpu_notifier(&via_cputemp_cpu_notifier); ++ mutex_lock(&pdev_list_mutex); ++ list_for_each_entry_safe(p, n, &pdev_list, list) { ++ platform_device_unregister(p->pdev); ++ list_del(&p->list); ++ kfree(p); ++ } ++ mutex_unlock(&pdev_list_mutex); ++ platform_driver_unregister(&via_cputemp_driver); ++} ++ ++MODULE_AUTHOR("Harald Welte "); ++MODULE_DESCRIPTION("VIA CPU temperature monitor"); ++MODULE_LICENSE("GPL"); ++ ++module_init(via_cputemp_init) ++module_exit(via_cputemp_exit) +--- head-2011-03-17.orig/drivers/oprofile/cpu_buffer.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/oprofile/cpu_buffer.c 2011-02-01 14:55:46.000000000 +0100 +@@ -422,7 +422,7 @@ void oprofile_add_pc(unsigned long pc, i */ void oprofile_add_mode(int cpu_mode) { @@ -4188,9 +4548,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (op_add_code(cpu_buf, 1, cpu_mode, current)) cpu_buf->sample_lost_overflow++; ---- head-2010-05-12.orig/drivers/pci/Kconfig 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-12/drivers/pci/Kconfig 2010-03-24 16:00:05.000000000 +0100 -@@ -82,7 +82,7 @@ config PCI_IOV +--- head-2011-03-17.orig/drivers/pci/Kconfig 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/pci/Kconfig 2011-02-01 14:55:46.000000000 +0100 +@@ -104,7 +104,7 @@ config PCI_IOV config PCI_IOAPIC bool @@ -4199,9 +4559,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches depends on ACPI depends on HOTPLUG default y ---- head-2010-05-12.orig/drivers/scsi/Kconfig 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/drivers/scsi/Kconfig 2010-03-24 16:00:05.000000000 +0100 -@@ -650,7 +650,7 @@ config SCSI_FLASHPOINT +--- head-2011-03-17.orig/drivers/scsi/Kconfig 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/drivers/scsi/Kconfig 2011-02-01 14:55:46.000000000 +0100 +@@ -659,7 +659,7 @@ config SCSI_FLASHPOINT config VMWARE_PVSCSI tristate "VMware PVSCSI driver support" @@ -4210,53 +4570,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches help This driver supports VMware's para virtualized SCSI HBA. To compile this driver as a module, choose M here: the ---- head-2010-05-12.orig/drivers/xen/blktap2/sysfs.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-12/drivers/xen/blktap2/sysfs.c 2010-03-24 16:00:05.000000000 +0100 -@@ -39,11 +39,11 @@ blktap_sysfs_exit(struct blktap *tap) - static ssize_t blktap_sysfs_pause_device(struct device *, - struct device_attribute *, - const char *, size_t); --DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); -+static DEVICE_ATTR(pause, S_IWUSR, NULL, blktap_sysfs_pause_device); - static ssize_t blktap_sysfs_resume_device(struct device *, - struct device_attribute *, - const char *, size_t); --DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); -+static DEVICE_ATTR(resume, S_IWUSR, NULL, blktap_sysfs_resume_device); - - static ssize_t - blktap_sysfs_set_name(struct device *dev, struct device_attribute *attr, -@@ -103,8 +103,8 @@ blktap_sysfs_get_name(struct device *dev - - return size; - } --DEVICE_ATTR(name, S_IRUSR | S_IWUSR, -- blktap_sysfs_get_name, blktap_sysfs_set_name); -+static DEVICE_ATTR(name, S_IRUSR | S_IWUSR, -+ blktap_sysfs_get_name, blktap_sysfs_set_name); - - static ssize_t - blktap_sysfs_remove_device(struct device *dev, struct device_attribute *attr, -@@ -123,7 +123,7 @@ blktap_sysfs_remove_device(struct device - - return (err ? : size); - } --DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); -+static DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); - - static ssize_t - blktap_sysfs_pause_device(struct device *dev, struct device_attribute *attr, -@@ -293,7 +293,7 @@ out: - - return ret; - } --DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); -+static DEVICE_ATTR(debug, S_IRUSR, blktap_sysfs_debug_device, NULL); - - int - blktap_sysfs_create(struct blktap *tap) ---- head-2010-05-12.orig/drivers/xen/char/mem.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/char/mem.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/char/mem.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/char/mem.c 2011-02-01 14:55:46.000000000 +0100 @@ -5,7 +5,7 @@ * * Added devfs support. @@ -4342,10 +4657,27 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches if (!range_is_allowed(p >> PAGE_SHIFT, sz)) return -EPERM; ---- head-2010-05-12.orig/drivers/xen/core/spinlock.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-12/drivers/xen/core/spinlock.c 2010-03-24 16:00:05.000000000 +0100 -@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(int, spinlock_irq) - static char spinlock_name[NR_CPUS][15]; +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-01 14:55:46.000000000 +0100 +@@ -1050,6 +1050,14 @@ void disable_all_local_evtchn(void) + synch_set_bit(i, &s->evtchn_mask[0]); + } + ++/* Test an irq's pending state. */ ++int xen_test_irq_pending(int irq) ++{ ++ int evtchn = evtchn_from_irq(irq); ++ ++ return VALID_EVTCHN(evtchn) && test_evtchn(evtchn); ++} ++ + #ifdef CONFIG_PM_SLEEP + static void restore_cpu_virqs(unsigned int cpu) + { +--- head-2011-03-17.orig/drivers/xen/core/spinlock.c 2011-03-15 16:43:45.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/spinlock.c 2011-03-15 16:44:19.000000000 +0100 +@@ -16,7 +16,7 @@ + #include struct spinning { - raw_spinlock_t *lock; @@ -4353,17 +4685,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches unsigned int ticket; struct spinning *prev; }; -@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(struct spinning *, - * removal itself doesn't need protection - what needs to be prevented is - * removed objects going out of scope (as they're allocated on the stack. - */ --static DEFINE_PER_CPU(raw_rwlock_t, spinning_rm_lock) = __RAW_RW_LOCK_UNLOCKED; -+static DEFINE_PER_CPU(arch_rwlock_t, spinning_rm_lock) = __ARCH_RW_LOCK_UNLOCKED; - - int __cpuinit xen_spinlock_init(unsigned int cpu) - { -@@ -58,7 +58,7 @@ void __cpuinit xen_spinlock_cleanup(unsi - } +@@ -72,7 +72,7 @@ void __cpuinit spinlock_resume(void) + #endif static unsigned int spin_adjust(struct spinning *spinning, - const raw_spinlock_t *lock, @@ -4371,7 +4694,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches unsigned int token) { for (; spinning; spinning = spinning->prev) -@@ -76,18 +76,18 @@ static unsigned int spin_adjust(struct s +@@ -90,12 +90,12 @@ static unsigned int spin_adjust(struct s return token; } @@ -4383,17 +4706,10 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches -bool xen_spin_wait(raw_spinlock_t *lock, unsigned int *ptok, +bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, - unsigned int flags) + unsigned int flags) { - int irq = percpu_read(spinlock_irq); bool rc; - typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask; -- raw_rwlock_t *rm_lock; -+ arch_rwlock_t *rm_lock; - struct spinning spinning, *other; - - /* If kicker interrupt not initialized yet, just spin. */ -@@ -137,7 +137,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, +@@ -151,7 +151,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, * reduce latency after the current lock was * released), but don't acquire the lock. */ @@ -4402,53 +4718,17 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches raw_local_irq_disable(); while (lock->cur == other->ticket) { -@@ -187,8 +187,8 @@ bool xen_spin_wait(raw_spinlock_t *lock, - percpu_write(spinning, other); - rm_lock = &__get_cpu_var(spinning_rm_lock); - raw_local_irq_disable(); -- __raw_write_lock(rm_lock); -- __raw_write_unlock(rm_lock); -+ arch_write_lock(rm_lock); -+ arch_write_unlock(rm_lock); - *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); - - /* -@@ -211,13 +211,13 @@ bool xen_spin_wait(raw_spinlock_t *lock, +@@ -235,7 +235,7 @@ bool xen_spin_wait(raw_spinlock_t *lock, return rc; } -void xen_spin_kick(raw_spinlock_t *lock, unsigned int token) +void xen_spin_kick(arch_spinlock_t *lock, unsigned int token) { - unsigned int cpu; - - token &= (1U << TICKET_SHIFT) - 1; - for_each_online_cpu(cpu) { -- raw_rwlock_t *rm_lock; -+ arch_rwlock_t *rm_lock; - unsigned long flags; - struct spinning *spinning; - -@@ -226,7 +226,7 @@ void xen_spin_kick(raw_spinlock_t *lock, - - rm_lock = &per_cpu(spinning_rm_lock, cpu); - raw_local_irq_save(flags); -- __raw_read_lock(rm_lock); -+ arch_read_lock(rm_lock); - - spinning = per_cpu(spinning, cpu); - smp_rmb(); -@@ -236,7 +236,7 @@ void xen_spin_kick(raw_spinlock_t *lock, - spinning = spinning->prev; - } - -- __raw_read_unlock(rm_lock); -+ arch_read_unlock(rm_lock); - raw_local_irq_restore(flags); + unsigned int cpu = raw_smp_processor_id(), ancor = cpu; - if (unlikely(spinning)) { ---- head-2010-05-12.orig/drivers/xen/evtchn.c 2010-04-15 10:08:13.000000000 +0200 -+++ head-2010-05-12/drivers/xen/evtchn.c 2010-04-15 10:13:26.000000000 +0200 +--- head-2011-03-17.orig/drivers/xen/evtchn.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/drivers/xen/evtchn.c 2011-02-01 14:55:46.000000000 +0100 @@ -48,15 +48,14 @@ #include #include @@ -4466,35 +4746,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #define bind_evtchn_to_irqhandler bind_caller_port_to_irqhandler #endif ---- head-2010-05-12.orig/drivers/xen/netback/interface.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/drivers/xen/netback/interface.c 2010-03-24 16:00:05.000000000 +0100 -@@ -131,9 +131,13 @@ static const struct netif_stat { - { "copied_skbs", offsetof(netif_t, nr_copied_skbs) }, - }; - --static int netbk_get_stats_count(struct net_device *dev) -+static int netbk_get_sset_count(struct net_device *dev, int sset) - { -- return ARRAY_SIZE(netbk_stats); -+ switch (sset) { -+ case ETH_SS_STATS: -+ return ARRAY_SIZE(netbk_stats); -+ } -+ return -EINVAL; - } - - static void netbk_get_ethtool_stats(struct net_device *dev, -@@ -171,7 +175,7 @@ static const struct ethtool_ops network_ - .set_tso = netbk_set_tso, - .get_link = ethtool_op_get_link, - -- .get_stats_count = netbk_get_stats_count, -+ .get_sset_count = netbk_get_sset_count, - .get_ethtool_stats = netbk_get_ethtool_stats, - .get_strings = netbk_get_strings, - }; ---- head-2010-05-12.orig/drivers/xen/privcmd/compat_privcmd.c 2010-03-24 15:06:12.000000000 +0100 -+++ head-2010-05-12/drivers/xen/privcmd/compat_privcmd.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/privcmd/compat_privcmd.c 2011-01-31 17:29:16.000000000 +0100 ++++ head-2011-03-17/drivers/xen/privcmd/compat_privcmd.c 2011-02-01 14:55:46.000000000 +0100 @@ -26,17 +26,16 @@ #include #include @@ -4554,8 +4807,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches p = compat_alloc_user_space(sizeof(*p)); if (copy_from_user(&n32, p32, sizeof(n32)) || put_user(n32.num, &p->num) || ---- head-2010-05-12.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:55:46.000000000 +0100 @@ -62,6 +62,8 @@ #endif #else @@ -4565,18 +4818,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #include #include #include -@@ -562,7 +564,7 @@ static ssize_t xendev_show_modalias(stru - { - return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); - } --DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); -+static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); - - int xenbus_probe_node(struct xen_bus_type *bus, - const char *type, ---- head-2010-05-12.orig/fs/compat_ioctl.c 2010-05-12 08:57:55.000000000 +0200 -+++ head-2010-05-12/fs/compat_ioctl.c 2010-05-12 09:02:56.000000000 +0200 -@@ -1527,9 +1527,6 @@ IGNORE_IOCTL(FBIOGCURSOR32) +--- head-2011-03-17.orig/fs/compat_ioctl.c 2011-01-31 14:53:38.000000000 +0100 ++++ head-2011-03-17/fs/compat_ioctl.c 2011-02-01 14:55:46.000000000 +0100 +@@ -1417,9 +1417,6 @@ IGNORE_IOCTL(FBIOGCURSOR32) #endif #ifdef CONFIG_XEN @@ -4586,7 +4830,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL) COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ) COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN) -@@ -1605,6 +1602,12 @@ static long do_ioctl_trans(int fd, unsig +@@ -1484,6 +1481,12 @@ static long do_ioctl_trans(int fd, unsig return do_video_stillpicture(fd, cmd, argp); case VIDEO_SET_SPU_PALETTE: return do_video_set_spu_palette(fd, cmd, argp); @@ -4599,9 +4843,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches } /* ---- head-2010-05-12.orig/include/acpi/processor.h 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-12/include/acpi/processor.h 2010-03-24 16:00:05.000000000 +0100 -@@ -324,7 +324,7 @@ static inline void acpi_processor_ppc_ex +--- head-2011-03-17.orig/include/acpi/processor.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/include/acpi/processor.h 2011-02-01 14:55:46.000000000 +0100 +@@ -323,7 +323,7 @@ static inline void acpi_processor_ppc_ex return; } #ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL @@ -4610,7 +4854,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #else static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) -@@ -339,11 +339,11 @@ static inline int acpi_processor_ppc_has +@@ -338,11 +338,11 @@ static inline int acpi_processor_ppc_has } return 0; } @@ -4623,8 +4867,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif /* CONFIG_CPU_FREQ */ ---- head-2010-05-12.orig/include/xen/compat_ioctl.h 2010-01-18 15:23:12.000000000 +0100 -+++ head-2010-05-12/include/xen/compat_ioctl.h 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/include/xen/compat_ioctl.h 2010-01-18 15:23:12.000000000 +0100 ++++ head-2011-03-17/include/xen/compat_ioctl.h 2011-02-01 14:55:46.000000000 +0100 @@ -29,7 +29,7 @@ #define xen_pfn32_t __u32 #endif @@ -4634,8 +4878,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches struct privcmd_mmap_32 { int num; domid_t dom; ---- head-2010-05-12.orig/include/xen/evtchn.h 2010-03-31 14:37:09.000000000 +0200 -+++ head-2010-05-12/include/xen/evtchn.h 2010-03-31 14:02:34.000000000 +0200 +--- head-2011-03-17.orig/include/xen/evtchn.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/include/xen/evtchn.h 2011-02-01 14:55:46.000000000 +0100 @@ -48,6 +48,7 @@ * LOW-LEVEL DEFINITIONS */ @@ -4654,8 +4898,18 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches /* * Dynamically bind an event source to an IRQ-like callback handler. ---- head-2010-05-12.orig/include/xen/xen.h 2010-05-12 08:55:23.000000000 +0200 -+++ head-2010-05-12/include/xen/xen.h 2010-03-31 14:03:59.000000000 +0200 +@@ -167,6 +167,9 @@ static inline int close_evtchn(int port) + return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); + } + ++/* Test an irq's pending state. */ ++int xen_test_irq_pending(int irq); ++ + /* + * Use these to access the event channel underlying the IRQ handle returned + * by bind_*_to_irqhandler(). +--- head-2011-03-17.orig/include/xen/xen.h 2011-03-17 14:35:44.000000000 +0100 ++++ head-2011-03-17/include/xen/xen.h 2011-02-01 14:55:46.000000000 +0100 @@ -7,8 +7,10 @@ enum xen_domain_type { XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ }; @@ -4677,9 +4931,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #else /* !CONFIG_XEN_DOM0 */ #define xen_initial_domain() (0) #endif /* CONFIG_XEN_DOM0 */ ---- head-2010-05-12.orig/kernel/sysctl_binary.c 2010-04-15 09:55:52.000000000 +0200 -+++ head-2010-05-12/kernel/sysctl_binary.c 2010-04-15 10:13:33.000000000 +0200 -@@ -875,9 +875,10 @@ static const struct bin_table bin_bus_ta +--- head-2011-03-17.orig/kernel/sysctl_binary.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-17/kernel/sysctl_binary.c 2011-02-01 14:55:46.000000000 +0100 +@@ -874,9 +874,10 @@ static const struct bin_table bin_bus_ta #ifdef CONFIG_XEN @@ -4693,7 +4947,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches {} }; #endif -@@ -922,7 +923,7 @@ static const struct bin_table bin_root_t +@@ -921,7 +922,7 @@ static const struct bin_table bin_root_t { CTL_DIR, CTL_ABI, "abi" }, /* CTL_CPU not used */ #ifdef CONFIG_XEN @@ -4702,8 +4956,8 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches #endif /* CTL_ARLAN "arlan" no longer used */ { CTL_DIR, CTL_S390DBF, "s390dbf", bin_s390dbf_table }, ---- head-2010-05-12.orig/kernel/sysctl_check.c 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-12/kernel/sysctl_check.c 2010-03-24 16:00:05.000000000 +0100 +--- head-2011-03-17.orig/kernel/sysctl_check.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/kernel/sysctl_check.c 2011-02-01 14:55:46.000000000 +0100 @@ -4,7 +4,6 @@ #include #include @@ -4712,9 +4966,9 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches static int sysctl_depth(struct ctl_table *table) ---- head-2010-05-12.orig/lib/swiotlb-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-12/lib/swiotlb-xen.c 2010-03-24 16:00:05.000000000 +0100 -@@ -106,6 +106,7 @@ setup_io_tlb_npages(char *str) +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 14:55:46.000000000 +0100 +@@ -114,6 +114,7 @@ setup_io_tlb_npages(char *str) swiotlb_force = 1; else if (!strcmp(str, "off")) swiotlb_force = -1; @@ -4722,7 +4976,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches return 1; } __setup("swiotlb=", setup_io_tlb_npages); -@@ -118,8 +119,10 @@ static dma_addr_t swiotlb_virt_to_bus(st +@@ -126,8 +127,10 @@ static dma_addr_t swiotlb_virt_to_bus(st return phys_to_dma(hwdev, virt_to_phys(address)); } @@ -4734,7 +4988,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches printk(KERN_INFO "Software IO TLB enabled: \n" " Aperture: %lu megabytes\n" " Address size: %u bits\n" -@@ -133,7 +136,7 @@ static void swiotlb_print_info(unsigned +@@ -141,7 +144,7 @@ static void swiotlb_print_info(unsigned * structures for the software IO TLB used to implement the PCI DMA API. */ void __init @@ -4743,7 +4997,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { unsigned long i, bytes; int rc; -@@ -204,12 +207,12 @@ swiotlb_init_with_default_size(size_t de +@@ -212,12 +215,12 @@ swiotlb_init_with_default_size(size_t de } while (rc && dma_bits++ < max_dma_bits); if (rc) panic("No suitable physical memory available for SWIOTLB overflow buffer!\n"); @@ -4759,7 +5013,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches { long ram_end; size_t defsz = 64 * (1 << 20); /* 64MB default size */ -@@ -227,7 +230,7 @@ swiotlb_init(void) +@@ -235,7 +238,7 @@ swiotlb_init(void) } if (swiotlb) @@ -4768,7 +5022,7 @@ Automatically created from "patches.kernel.org/patch-2.6.33" by xen-port-patches else printk(KERN_INFO "Software IO TLB disabled\n"); } -@@ -416,7 +419,7 @@ do_unmap_single(struct device *hwdev, ch +@@ -424,7 +427,7 @@ do_unmap_single(struct device *hwdev, ch /* * Return the buffer to the free list by setting the corresponding diff --git a/patches.xen/xen3-patch-2.6.34 b/patches.xen/xen3-patch-2.6.34 index 41fc9d5..2a2c860 100644 --- a/patches.xen/xen3-patch-2.6.34 +++ b/patches.xen/xen3-patch-2.6.34 @@ -7,20 +7,20 @@ Patch-mainline: 2.6.34 Acked-by: Jeff Mahoney Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches.py ---- head-2010-05-25.orig/arch/x86/Kconfig 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/Kconfig 2010-03-25 16:41:03.000000000 +0100 -@@ -106,7 +106,7 @@ config SBUS +--- head-2011-03-11.orig/arch/x86/Kconfig 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/Kconfig 2011-02-01 15:03:03.000000000 +0100 +@@ -116,7 +116,7 @@ config SBUS bool config NEED_DMA_MAP_STATE - def_bool (X86_64 || DMAR || DMA_API_DEBUG) + def_bool (X86_64 || DMAR || DMA_API_DEBUG || SWIOTLB) - config GENERIC_ISA_DMA + config NEED_SG_DMA_LENGTH def_bool y ---- head-2010-05-25.orig/arch/x86/ia32/ia32entry-xen.S 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/ia32/ia32entry-xen.S 2010-05-12 09:08:52.000000000 +0200 -@@ -456,7 +456,7 @@ ia32_sys_call_table: +--- head-2011-03-11.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/ia32/ia32entry-xen.S 2011-02-01 15:03:03.000000000 +0100 +@@ -444,7 +444,7 @@ ia32_sys_call_table: .quad quiet_ni_syscall /* old mpx syscall holder */ .quad sys_setpgid .quad quiet_ni_syscall /* old ulimit syscall holder */ @@ -29,7 +29,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches .quad sys_umask /* 60 */ .quad sys_chroot .quad compat_sys_ustat -@@ -479,7 +479,7 @@ ia32_sys_call_table: +@@ -467,7 +467,7 @@ ia32_sys_call_table: .quad compat_sys_settimeofday .quad sys_getgroups16 /* 80 */ .quad sys_setgroups16 @@ -38,7 +38,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches .quad sys_symlink .quad sys_lstat .quad sys_readlink /* 85 */ -@@ -506,7 +506,7 @@ ia32_sys_call_table: +@@ -494,7 +494,7 @@ ia32_sys_call_table: .quad compat_sys_newstat .quad compat_sys_newlstat .quad compat_sys_newfstat @@ -47,7 +47,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches .quad stub32_iopl /* 110 */ .quad sys_vhangup .quad quiet_ni_syscall /* old "idle" system call */ -@@ -519,7 +519,7 @@ ia32_sys_call_table: +@@ -507,7 +507,7 @@ ia32_sys_call_table: .quad stub32_sigreturn .quad stub32_clone /* 120 */ .quad sys_setdomainname @@ -56,15 +56,17 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches .quad sys_modify_ldt .quad compat_sys_adjtimex .quad sys32_mprotect /* 125 */ ---- head-2010-05-25.orig/arch/x86/include/asm/i8259.h 2010-05-25 09:31:21.000000000 +0200 -+++ head-2010-05-25/arch/x86/include/asm/i8259.h 2010-03-25 11:31:58.000000000 +0100 -@@ -54,11 +54,13 @@ extern struct irq_chip i8259A_chip; +--- head-2011-03-11.orig/arch/x86/include/asm/i8259.h 2011-03-15 16:29:56.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/asm/i8259.h 2011-02-01 15:03:03.000000000 +0100 +@@ -54,6 +54,7 @@ extern struct irq_chip i8259A_chip; struct legacy_pic { int nr_legacy_irqs; +#ifndef CONFIG_XEN struct irq_chip *chip; - void (*mask_all)(void); + void (*mask)(unsigned int irq); + void (*unmask)(unsigned int irq); +@@ -61,6 +62,7 @@ struct legacy_pic { void (*restore_mask)(void); void (*init)(int auto_eoi); int (*irq_pending)(unsigned int irq); @@ -72,8 +74,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches void (*make_irq)(unsigned int irq); }; ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/fixmap.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/fixmap.h 2010-04-15 10:29:09.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 15:03:03.000000000 +0100 @@ -82,6 +82,9 @@ enum fixed_addresses { #endif FIX_DBGP_BASE, @@ -113,8 +115,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #ifdef CONFIG_X86_32 FIX_WP_TEST, #endif ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/io.h 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/io.h 2010-05-12 09:09:25.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/io.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/io.h 2011-02-01 15:03:03.000000000 +0100 @@ -1,8 +1,42 @@ #ifndef _ASM_X86_IO_H #define _ASM_X86_IO_H @@ -299,8 +301,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #define IO_SPACE_LIMIT 0xffff ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/irq_vectors.h 2010-03-29 18:11:31.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:33:45.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:49:16.000000000 +0100 @@ -3,11 +3,9 @@ #define MCE_VECTOR 0x12 @@ -332,25 +334,10 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches +# define CPU_VECTOR_LIMIT PIRQ_MAX(32 * NR_CPUS) +#endif - #ifdef CONFIG_X86_IO_APIC - # if !defined(NR_CPUS) || !defined(MAX_IO_APICS) -@@ -69,10 +75,11 @@ static inline int invalid_vm86_irq(int i - (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ - (NR_VECTORS + CPU_VECTOR_LIMIT) : \ - (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) --# elif NR_CPUS < MAX_IO_APICS --# define NR_PIRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) - # else --# define NR_PIRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) -+# define NR_PIRQS \ -+ (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \ -+ (NR_VECTORS + CPU_VECTOR_LIMIT) : \ -+ (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) - # endif - #elif defined(CONFIG_XEN_PCIDEV_FRONTEND) - # define NR_PIRQS (NR_VECTORS + CPU_VECTOR_LIMIT) ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pci.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pci.h 2010-03-25 17:13:42.000000000 +0100 + #if defined(CONFIG_X86_IO_APIC) + # ifdef CONFIG_SPARSE_IRQ +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 15:03:03.000000000 +0100 @@ -48,8 +48,15 @@ static inline int pci_proc_domain(struct #ifdef CONFIG_PCI @@ -412,8 +399,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgalloc.h 2010-03-25 16:41:03.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 15:03:03.000000000 +0100 @@ -27,6 +27,11 @@ pmd_t *early_get_pmd(unsigned long va); #endif @@ -426,8 +413,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches * Allocate and free page tables. */ extern pgd_t *pgd_alloc(struct mm_struct *); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_32.h 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 15:03:03.000000000 +0100 @@ -18,7 +18,6 @@ #include @@ -445,8 +432,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches void make_lowmem_page_readonly(void *va, unsigned int feature); void make_lowmem_page_writable(void *va, unsigned int feature); ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-25 16:41:03.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 15:03:03.000000000 +0100 @@ -136,7 +136,7 @@ static inline int pgd_large(pgd_t pgd) { #define pte_unmap(pte) /* NOP */ #define pte_unmap_nested(pte) /* NOP */ @@ -456,18 +443,18 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/smp.h 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/smp.h 2010-04-26 11:32:06.000000000 +0200 -@@ -135,6 +135,8 @@ int native_cpu_disable(void); +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:08:16.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:10:16.000000000 +0100 +@@ -138,6 +138,8 @@ int native_cpu_disable(void); void native_cpu_die(unsigned int cpu); void native_play_dead(void); void play_dead_common(void); +void wbinvd_on_cpu(int cpu); +int wbinvd_on_all_cpus(void); - #else /* CONFIG_XEN */ - -@@ -162,8 +164,19 @@ static inline int num_booting_cpus(void) + void smp_store_cpu_info(int id); + #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +@@ -165,8 +167,19 @@ static inline int num_booting_cpus(void) { return cpumask_weight(cpu_callout_mask); } @@ -487,8 +474,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches extern unsigned disabled_cpus __cpuinitdata; #include ---- head-2010-05-25.orig/arch/x86/include/mach-xen/asm/system.h 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/include/mach-xen/asm/system.h 2010-03-25 16:41:14.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:09:31.000000000 +0100 ++++ head-2011-03-11/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:10:13.000000000 +0100 @@ -31,7 +31,7 @@ extern void show_regs_common(void); "movl %P[task_canary](%[next]), %%ebx\n\t" \ "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" @@ -498,7 +485,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #define __switch_canary_iparam \ , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) #else /* CC_STACKPROTECTOR */ -@@ -113,7 +113,7 @@ do { \ +@@ -115,7 +115,7 @@ do { \ "movq %P[task_canary](%%rsi),%%r8\n\t" \ "movq %%r8,"__percpu_arg([gs_canary])"\n\t" #define __switch_canary_oparam \ @@ -507,7 +494,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #define __switch_canary_iparam \ , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) #else /* CC_STACKPROTECTOR */ -@@ -132,7 +132,7 @@ do { \ +@@ -134,7 +134,7 @@ do { \ __switch_canary \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ "movq %%rax,%%rdi\n\t" \ @@ -516,7 +503,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches "jnz ret_from_fork\n\t" \ RESTORE_CONTEXT \ : "=a" (last) \ -@@ -142,7 +142,7 @@ do { \ +@@ -144,7 +144,7 @@ do { \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \ [_tif_fork] "i" (_TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \ @@ -525,9 +512,37 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches __switch_canary_iparam \ : "memory", "cc" __EXTRA_CLOBBER) #endif ---- head-2010-05-25.orig/arch/x86/kernel/apic/io_apic-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/apic/io_apic-xen.c 2010-05-12 09:09:25.000000000 +0200 -@@ -36,6 +36,7 @@ +--- head-2011-03-11.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-02-01 15:03:03.000000000 +0100 +@@ -190,12 +190,12 @@ static struct processor_extcntl_ops xen_ + .hotplug = xen_hotplug_notifier, + }; + +-void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops) ++static int __init init_extcntl(void) + { + unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8; + + if (!pmbits) +- return; ++ return 0; + if (pmbits & XEN_PROCESSOR_PM_CX) + xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier; + if (pmbits & XEN_PROCESSOR_PM_PX) +@@ -203,6 +203,8 @@ void arch_acpi_processor_init_extcntl(co + if (pmbits & XEN_PROCESSOR_PM_TX) + xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier; + +- *ops = &xen_extcntl_ops; ++ processor_extcntl_ops = &xen_extcntl_ops; ++ ++ return 0; + } +-EXPORT_SYMBOL(arch_acpi_processor_init_extcntl); ++arch_initcall(init_extcntl); +--- head-2011-03-11.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:03:03.000000000 +0100 +@@ -34,6 +34,7 @@ #include #include #include /* time_after() */ @@ -535,7 +550,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #ifdef CONFIG_ACPI #include #endif -@@ -69,9 +70,12 @@ +@@ -61,9 +62,12 @@ #include /* Fake i8259 */ @@ -551,7 +566,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches unsigned long io_apic_irqs; #endif /* CONFIG_XEN */ -@@ -86,9 +90,9 @@ unsigned long io_apic_irqs; +@@ -78,9 +82,9 @@ unsigned long io_apic_irqs; */ int sis_apic_bug = -1; @@ -563,7 +578,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #endif /* -@@ -110,12 +114,8 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCE +@@ -102,12 +106,8 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCE int mp_irq_entries; #ifndef CONFIG_XEN @@ -576,7 +591,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #endif #if defined (CONFIG_MCA) || defined (CONFIG_EISA) -@@ -160,33 +160,10 @@ static struct irq_pin_list *get_one_free +@@ -152,33 +152,10 @@ static struct irq_pin_list *get_one_free /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ #ifdef CONFIG_SPARSE_IRQ @@ -612,7 +627,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches int __init arch_early_irq_init(void) { -@@ -196,6 +173,11 @@ int __init arch_early_irq_init(void) +@@ -188,6 +165,11 @@ int __init arch_early_irq_init(void) int node; int i; @@ -624,7 +639,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches cfg = irq_cfgx; count = ARRAY_SIZE(irq_cfgx); node= cpu_to_node(boot_cpu_id); -@@ -205,8 +187,14 @@ int __init arch_early_irq_init(void) +@@ -197,8 +179,14 @@ int __init arch_early_irq_init(void) desc->chip_data = &cfg[i]; zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); @@ -641,7 +656,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } return 0; -@@ -451,7 +439,7 @@ static bool io_apic_level_ack_pending(st +@@ -443,7 +431,7 @@ static bool io_apic_level_ack_pending(st struct irq_pin_list *entry; unsigned long flags; @@ -650,7 +665,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches for_each_irq_pin(entry, cfg->irq_2_pin) { unsigned int reg; int pin; -@@ -460,11 +448,11 @@ static bool io_apic_level_ack_pending(st +@@ -452,11 +440,11 @@ static bool io_apic_level_ack_pending(st reg = io_apic_read(entry->apic, 0x10 + pin*2); /* Is the remote IRR bit set? */ if (reg & IO_APIC_REDIR_REMOTE_IRR) { @@ -664,7 +679,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return false; } -@@ -480,10 +468,10 @@ static struct IO_APIC_route_entry ioapic +@@ -472,10 +460,10 @@ static struct IO_APIC_route_entry ioapic { union entry_union eu; unsigned long flags; @@ -677,7 +692,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return eu.entry; } #endif -@@ -507,9 +495,9 @@ __ioapic_write_entry(int apic, int pin, +@@ -499,9 +487,9 @@ __ioapic_write_entry(int apic, int pin, void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; @@ -689,7 +704,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } #ifndef CONFIG_XEN -@@ -523,10 +511,10 @@ static void ioapic_mask_entry(int apic, +@@ -515,10 +503,10 @@ static void ioapic_mask_entry(int apic, unsigned long flags; union entry_union eu = { .entry.mask = 1 }; @@ -702,7 +717,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } /* -@@ -653,9 +641,9 @@ static void mask_IO_APIC_irq_desc(struct +@@ -645,9 +633,9 @@ static void mask_IO_APIC_irq_desc(struct BUG_ON(!cfg); @@ -714,7 +729,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) -@@ -663,9 +651,9 @@ static void unmask_IO_APIC_irq_desc(stru +@@ -655,9 +643,9 @@ static void unmask_IO_APIC_irq_desc(stru struct irq_cfg *cfg = desc->chip_data; unsigned long flags; @@ -726,7 +741,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static void mask_IO_APIC_irq(unsigned int irq) -@@ -922,7 +910,7 @@ static int __init find_isa_irq_apic(int +@@ -914,7 +902,7 @@ static int __init find_isa_irq_apic(int */ static int EISA_ELCR(unsigned int irq) { @@ -735,7 +750,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches unsigned int port = 0x4d0 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } -@@ -1198,12 +1186,12 @@ void lock_vector_lock(void) +@@ -1190,12 +1178,12 @@ void lock_vector_lock(void) /* Used to the online set of cpus does not change * during assign_irq_vector. */ @@ -750,7 +765,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static int -@@ -1220,7 +1208,8 @@ __assign_irq_vector(int irq, struct irq_ +@@ -1212,7 +1200,8 @@ __assign_irq_vector(int irq, struct irq_ * Also, we've got to be careful not to trash gate * 0x80, because int 0x80 is hm, kind of importantish. ;) */ @@ -760,7 +775,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches unsigned int old_vector; int cpu, err; cpumask_var_t tmp_mask; -@@ -1256,7 +1245,7 @@ next: +@@ -1248,7 +1237,7 @@ next: if (vector >= first_system_vector) { /* If out of vectors on large boxen, must share them. */ offset = (offset + 1) % 8; @@ -769,7 +784,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } if (unlikely(current_vector == vector)) continue; -@@ -1294,9 +1283,9 @@ int assign_irq_vector(int irq, struct ir +@@ -1286,9 +1275,9 @@ int assign_irq_vector(int irq, struct ir int err; unsigned long flags; @@ -781,7 +796,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return err; } -@@ -1330,14 +1319,27 @@ static void __clear_irq_vector(int irq, +@@ -1322,14 +1311,27 @@ static void __clear_irq_vector(int irq, void __setup_vector_irq(int cpu) { /* Initialize vector_irq on a new cpu */ @@ -810,7 +825,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (!cpumask_test_cpu(cpu, cfg->domain)) continue; vector = cfg->vector; -@@ -1353,6 +1355,7 @@ void __setup_vector_irq(int cpu) +@@ -1345,6 +1347,7 @@ void __setup_vector_irq(int cpu) if (!cpumask_test_cpu(cpu, cfg->domain)) per_cpu(vector_irq, cpu)[vector] = -1; } @@ -818,7 +833,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static struct irq_chip ioapic_chip; -@@ -1508,6 +1511,16 @@ static void setup_IO_APIC_irq(int apic_i +@@ -1501,6 +1504,16 @@ static void setup_IO_APIC_irq(int apic_i cfg = desc->chip_data; @@ -835,7 +850,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (assign_irq_vector(irq, cfg, apic->target_cpus())) return; -@@ -1533,8 +1546,10 @@ static void setup_IO_APIC_irq(int apic_i +@@ -1526,8 +1539,10 @@ static void setup_IO_APIC_irq(int apic_i } ioapic_register_intr(irq, desc, trigger); @@ -848,7 +863,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches ioapic_write_entry(apic_id, pin, entry); } -@@ -1545,7 +1560,7 @@ static struct { +@@ -1538,7 +1553,7 @@ static struct { static void __init setup_IO_APIC_irqs(void) { @@ -857,7 +872,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches int notcon = 0; struct irq_desc *desc; struct irq_cfg *cfg; -@@ -1553,14 +1568,7 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1546,14 +1561,7 @@ static void __init setup_IO_APIC_irqs(vo apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); @@ -873,7 +888,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { idx = find_irq_entry(apic_id, pin, mp_INT); if (idx == -1) { -@@ -1582,6 +1590,9 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1575,6 +1583,9 @@ static void __init setup_IO_APIC_irqs(vo irq = pin_2_irq(idx, apic_id, pin); @@ -883,7 +898,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #ifdef CONFIG_XEN if (irq < PIRQ_BASE || irq >= PIRQ_BASE + nr_pirqs) continue; -@@ -1615,6 +1626,60 @@ static void __init setup_IO_APIC_irqs(vo +@@ -1608,6 +1619,60 @@ static void __init setup_IO_APIC_irqs(vo " (apicid-pin) not connected\n"); } @@ -944,7 +959,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #ifndef CONFIG_XEN /* * Set up the timer pin, possibly with the 8259A-master behind. -@@ -1679,14 +1744,14 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1672,14 +1737,14 @@ __apicdebuginit(void) print_IO_APIC(void for (apic = 0; apic < nr_ioapics; apic++) { @@ -961,7 +976,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches printk("\n"); printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); -@@ -1725,7 +1790,7 @@ __apicdebuginit(void) print_IO_APIC(void +@@ -1718,7 +1783,7 @@ __apicdebuginit(void) print_IO_APIC(void printk(KERN_DEBUG ".... IRQ redirection table:\n"); printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" @@ -970,7 +985,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches for (i = 0; i <= reg_01.bits.entries; i++) { struct IO_APIC_route_entry entry; -@@ -1903,12 +1968,12 @@ __apicdebuginit(void) print_PIC(void) +@@ -1896,12 +1961,12 @@ __apicdebuginit(void) print_PIC(void) unsigned int v; unsigned long flags; @@ -985,7 +1000,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches v = inb(0xa1) << 8 | inb(0x21); printk(KERN_DEBUG "... PIC IMR: %04x\n", v); -@@ -1922,7 +1987,7 @@ __apicdebuginit(void) print_PIC(void) +@@ -1915,7 +1980,7 @@ __apicdebuginit(void) print_PIC(void) outb(0x0a,0xa0); outb(0x0a,0x20); @@ -994,7 +1009,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches printk(KERN_DEBUG "... PIC ISR: %04x\n", v); -@@ -1984,13 +2049,13 @@ void __init enable_IO_APIC(void) +@@ -1977,13 +2042,13 @@ void __init enable_IO_APIC(void) * The number of IO-APIC IRQ registers (== #pins): */ for (apic = 0; apic < nr_ioapics; apic++) { @@ -1011,7 +1026,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return; #ifndef CONFIG_XEN -@@ -2052,7 +2117,7 @@ void disable_IO_APIC(void) +@@ -2043,7 +2108,7 @@ void disable_IO_APIC(void) */ clear_IO_APIC(); @@ -1020,7 +1035,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return; /* -@@ -2131,9 +2196,9 @@ void __init setup_ioapic_ids_from_mpc(vo +@@ -2122,9 +2187,9 @@ void __init setup_ioapic_ids_from_mpc(vo for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { /* Read the register 0 value */ @@ -1032,7 +1047,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches old_id = mp_ioapics[apic_id].apicid; -@@ -2192,16 +2257,16 @@ void __init setup_ioapic_ids_from_mpc(vo +@@ -2183,16 +2248,16 @@ void __init setup_ioapic_ids_from_mpc(vo mp_ioapics[apic_id].apicid); reg_00.bits.ID = mp_ioapics[apic_id].apicid; @@ -1053,7 +1068,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) printk("could not set ID!\n"); else -@@ -2284,15 +2349,15 @@ static unsigned int startup_ioapic_irq(u +@@ -2275,15 +2340,15 @@ static unsigned int startup_ioapic_irq(u unsigned long flags; struct irq_cfg *cfg; @@ -1074,7 +1089,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return was_pending; } -@@ -2303,9 +2368,9 @@ static int ioapic_retrigger_irq(unsigned +@@ -2294,9 +2359,9 @@ static int ioapic_retrigger_irq(unsigned struct irq_cfg *cfg = irq_cfg(irq); unsigned long flags; @@ -1086,7 +1101,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return 1; } -@@ -2398,14 +2463,14 @@ set_ioapic_affinity_irq_desc(struct irq_ +@@ -2389,14 +2454,14 @@ set_ioapic_affinity_irq_desc(struct irq_ irq = desc->irq; cfg = desc->chip_data; @@ -1103,7 +1118,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return ret; } -@@ -2575,6 +2640,9 @@ void irq_force_complete_move(int irq) +@@ -2566,6 +2631,9 @@ void irq_force_complete_move(int irq) struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg = desc->chip_data; @@ -1113,7 +1128,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches __irq_complete_move(&desc, cfg->vector); } #else -@@ -2640,9 +2708,9 @@ static void eoi_ioapic_irq(struct irq_de +@@ -2631,9 +2699,9 @@ static void eoi_ioapic_irq(struct irq_de irq = desc->irq; cfg = desc->chip_data; @@ -1125,7 +1140,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static void ack_apic_level(unsigned int irq) -@@ -2825,8 +2893,8 @@ static inline void init_IO_APIC_traps(vo +@@ -2816,8 +2884,8 @@ static inline void init_IO_APIC_traps(vo * so default to an old-fashioned 8259 * interrupt if we can.. */ @@ -1136,7 +1151,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches else /* Strange. Oh, well.. */ desc->chip = &no_irq_chip; -@@ -2984,7 +3052,7 @@ static inline void __init check_timer(vo +@@ -2975,7 +3043,7 @@ static inline void __init check_timer(vo /* * get/set the timer IRQ vector: */ @@ -1145,7 +1160,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches assign_irq_vector(0, cfg, apic->target_cpus()); /* -@@ -2997,7 +3065,7 @@ static inline void __init check_timer(vo +@@ -2988,7 +3056,7 @@ static inline void __init check_timer(vo * automatically. */ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); @@ -1154,7 +1169,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #ifdef CONFIG_X86_32 { unsigned int ver; -@@ -3056,7 +3124,7 @@ static inline void __init check_timer(vo +@@ -3047,7 +3115,7 @@ static inline void __init check_timer(vo if (timer_irq_works()) { if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); @@ -1163,7 +1178,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } if (disable_timer_pin_1 > 0) clear_IO_APIC_pin(0, pin1); -@@ -3079,14 +3147,14 @@ static inline void __init check_timer(vo +@@ -3070,14 +3138,14 @@ static inline void __init check_timer(vo */ replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); @@ -1181,7 +1196,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } goto out; } -@@ -3094,7 +3162,7 @@ static inline void __init check_timer(vo +@@ -3085,7 +3153,7 @@ static inline void __init check_timer(vo * Cleanup, just in case ... */ local_irq_disable(); @@ -1190,7 +1205,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); } -@@ -3113,22 +3181,22 @@ static inline void __init check_timer(vo +@@ -3104,22 +3172,22 @@ static inline void __init check_timer(vo lapic_register_intr(0, desc); apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ @@ -1217,7 +1232,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches apic_write(APIC_LVT0, APIC_DM_EXTINT); unlock_ExtINT_logic(); -@@ -3177,7 +3245,7 @@ void __init setup_IO_APIC(void) +@@ -3168,7 +3236,7 @@ void __init setup_IO_APIC(void) * calling enable_IO_APIC() is moved to setup_local_APIC for BP */ #endif @@ -1226,7 +1241,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); /* -@@ -3190,7 +3258,7 @@ void __init setup_IO_APIC(void) +@@ -3181,7 +3249,7 @@ void __init setup_IO_APIC(void) #endif setup_IO_APIC_irqs(); init_IO_APIC_traps(); @@ -1235,7 +1250,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches check_timer(); } -@@ -3248,13 +3316,13 @@ static int ioapic_resume(struct sys_devi +@@ -3239,13 +3307,13 @@ static int ioapic_resume(struct sys_devi data = container_of(dev, struct sysfs_ioapic_data, dev); entry = data->entry; @@ -1251,7 +1266,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches for (i = 0; i < nr_ioapic_registers[dev->id]; i++) ioapic_write_entry(dev->id, i, entry[i]); -@@ -3317,7 +3385,7 @@ unsigned int create_irq_nr(unsigned int +@@ -3308,7 +3376,7 @@ unsigned int create_irq_nr(unsigned int if (irq_want < nr_irqs_gsi) irq_want = nr_irqs_gsi; @@ -1260,7 +1275,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches for (new = irq_want; new < nr_irqs; new++) { desc_new = irq_to_desc_alloc_node(new, node); if (!desc_new) { -@@ -3336,14 +3404,11 @@ unsigned int create_irq_nr(unsigned int +@@ -3327,14 +3395,11 @@ unsigned int create_irq_nr(unsigned int irq = new; break; } @@ -1279,7 +1294,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return irq; } -@@ -3365,20 +3430,13 @@ int create_irq(void) +@@ -3356,20 +3421,13 @@ int create_irq(void) void destroy_irq(unsigned int irq) { unsigned long flags; @@ -1302,9 +1317,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches + __clear_irq_vector(irq, get_irq_chip_data(irq)); + raw_spin_unlock_irqrestore(&vector_lock, flags); } - #endif /* CONFIG_XEN */ + #endif /* !CONFIG_XEN */ -@@ -3916,9 +3974,9 @@ int __init io_apic_get_redir_entries (in +@@ -3907,9 +3965,9 @@ int __init io_apic_get_redir_entries (in union IO_APIC_reg_01 reg_01; unsigned long flags; @@ -1316,7 +1331,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return reg_01.bits.entries; } -@@ -4010,7 +4068,7 @@ static int __io_apic_set_pci_routing(str +@@ -4001,7 +4059,7 @@ static int __io_apic_set_pci_routing(str /* * IRQs < 16 are already in the irq_2_pin[] map */ @@ -1325,7 +1340,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches cfg = desc->chip_data; if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { printk(KERN_INFO "can not add pin %d for irq %d\n", -@@ -4090,9 +4148,9 @@ int __init io_apic_get_unique_id(int ioa +@@ -4082,9 +4140,9 @@ int __init io_apic_get_unique_id(int ioa if (physids_empty(apic_id_map)) apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); @@ -1337,7 +1352,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (apic_id >= get_physical_broadcast()) { printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " -@@ -4126,10 +4184,10 @@ int __init io_apic_get_unique_id(int ioa +@@ -4118,10 +4176,10 @@ int __init io_apic_get_unique_id(int ioa if (reg_00.bits.ID != apic_id) { reg_00.bits.ID = apic_id; @@ -1350,7 +1365,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* Sanity check */ if (reg_00.bits.ID != apic_id) { -@@ -4151,9 +4209,9 @@ int __init io_apic_get_version(int ioapi +@@ -4142,9 +4200,9 @@ int __init io_apic_get_version(int ioapi union IO_APIC_reg_01 reg_01; unsigned long flags; @@ -1362,7 +1377,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return reg_01.bits.version; } -@@ -4186,27 +4244,23 @@ int acpi_get_override_irq(int bus_irq, i +@@ -4177,27 +4235,23 @@ int acpi_get_override_irq(int bus_irq, i #ifdef CONFIG_SMP void __init setup_ioapic_dest(void) { @@ -1395,7 +1410,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches desc = irq_to_desc(irq); /* -@@ -4394,3 +4448,26 @@ void __init mp_register_ioapic(int id, u +@@ -4385,3 +4439,26 @@ void __init mp_register_ioapic(int id, u nr_ioapics++; } @@ -1422,28 +1437,31 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches + setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); +} +#endif ---- head-2010-05-25.orig/arch/x86/kernel/cpu/intel_cacheinfo.c 2010-05-25 09:20:14.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/cpu/intel_cacheinfo.c 2010-05-25 09:25:34.000000000 +0200 -@@ -301,7 +301,7 @@ struct _cache_attr { +--- head-2011-03-11.orig/arch/x86/kernel/cpu/intel.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/intel.c 2011-02-01 15:03:03.000000000 +0100 +@@ -60,6 +60,9 @@ static void __cpuinit early_init_intel(s + * need the microcode to have already been loaded... so if it is + * not, recommend a BIOS update and disable large pages. + */ ++#ifdef CONFIG_XEN ++ if (cpu_has(c, X86_FEATURE_PSE)) ++#endif + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) { + u32 ucode, junk; + +--- head-2011-03-11.orig/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 10:04:17.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 10:06:37.000000000 +0100 +@@ -309,7 +309,7 @@ struct _cache_attr { ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); }; --#ifdef CONFIG_CPU_SUP_AMD -+#if defined(CONFIG_CPU_SUP_AMD) && !defined(CONFIG_XEN) - static unsigned int __cpuinit amd_calc_l3_indices(void) - { - /* -@@ -873,7 +873,7 @@ static struct attribute *default_attrs[] - - static struct attribute *default_l3_attrs[] = { - DEFAULT_SYSFS_CACHE_ATTRS, --#ifdef CONFIG_CPU_SUP_AMD -+#if defined(CONFIG_CPU_SUP_AMD) && !defined(CONFIG_XEN) - &cache_disable_0.attr, - &cache_disable_1.attr, - #endif ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2010-04-15 13:39:58.000000000 +0200 +-#ifdef CONFIG_AMD_NB ++#if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN) + + /* + * L3 cache descriptors +--- head-2011-03-11.orig/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2011-02-01 15:03:03.000000000 +0100 @@ -1,6 +1,7 @@ #include #include @@ -1452,8 +1470,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2010-03-25 11:33:03.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -25,12 +25,12 @@ void generic_get_mtrr(unsigned int reg, *type = op.u.read_memtype.type; } @@ -1469,8 +1487,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches unsigned int num_var_ranges; unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; ---- head-2010-05-25.orig/arch/x86/kernel/e820-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/e820-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/e820-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/e820-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -12,17 +12,10 @@ #include #include @@ -1933,8 +1951,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches reserve_region_with_split(&iomem_resource, start, end, "RAM buffer"); } ---- head-2010-05-25.orig/arch/x86/kernel/head32-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head32-xen.c 2010-04-15 10:29:09.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/head32-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head32-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -7,6 +7,7 @@ #include @@ -1943,10 +1961,10 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include -@@ -32,15 +33,26 @@ static void __init i386_default_early_se +@@ -48,15 +49,26 @@ void __init i386_start_kernel(void) + BUG_ON(pte_index(hypervisor_virt_start)); + #endif - void __init i386_start_kernel(void) - { +#ifdef CONFIG_X86_TRAMPOLINE + /* + * But first pinch a few for the stack/trampoline stuff @@ -1971,9 +1989,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif ---- head-2010-05-25.orig/arch/x86/kernel/head_32-xen.S 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/head_32-xen.S 2010-03-25 11:52:54.000000000 +0100 -@@ -67,8 +67,8 @@ ENTRY(startup_32) +--- head-2011-03-11.orig/arch/x86/kernel/head_32-xen.S 2011-03-03 16:24:06.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/head_32-xen.S 2011-03-03 16:24:36.000000000 +0100 +@@ -65,8 +65,8 @@ ENTRY(startup_32) * The linker can't handle this by relocation. Manually set * base address in stack canary segment descriptor. */ @@ -1984,7 +2002,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) -@@ -79,7 +79,7 @@ ENTRY(startup_32) +@@ -77,7 +77,7 @@ ENTRY(startup_32) # need to be preserved. movl XEN_START_mfn_list(%esi), %ebx @@ -1993,7 +2011,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches shrl $PAGE_SHIFT, %eax movl (%ebx,%eax,4), %ecx pushl %ecx # frame number for set_gdt below -@@ -89,7 +89,7 @@ ENTRY(startup_32) +@@ -87,7 +87,7 @@ ENTRY(startup_32) shldl $PAGE_SHIFT, %ecx, %edx shll $PAGE_SHIFT, %ecx orl $_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_DIRTY, %ecx @@ -2002,8 +2020,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches movl $__HYPERVISOR_update_va_mapping, %eax int $0x82 ---- head-2010-05-25.orig/arch/x86/kernel/ldt-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/ldt-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/ldt-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/ldt-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -7,6 +7,7 @@ */ @@ -2012,9 +2030,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/arch/x86/kernel/mpparse-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/mpparse-xen.c 2010-04-15 10:48:32.000000000 +0200 -@@ -677,7 +677,7 @@ static void __init smp_reserve_memory(st +--- head-2011-03-11.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:03:03.000000000 +0100 +@@ -691,7 +691,7 @@ static void __init smp_reserve_memory(st { unsigned long size = get_mpc_size(mpf->physptr); @@ -2023,7 +2041,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } #endif -@@ -710,7 +710,7 @@ static int __init smp_scan_config(unsign +@@ -724,7 +724,7 @@ static int __init smp_scan_config(unsign mpf, (u64)virt_to_phys(mpf)); mem = virt_to_phys(mpf); @@ -2032,8 +2050,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (mpf->physptr) smp_reserve_memory(mpf); #else ---- head-2010-05-25.orig/arch/x86/kernel/pci-dma-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/pci-dma-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/pci-dma-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -2,6 +2,7 @@ #include #include @@ -2087,8 +2105,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (pci_swiotlb_detect()) goto out; ---- head-2010-05-25.orig/arch/x86/kernel/process-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process-xen.c 2010-05-25 09:30:59.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:09:35.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process-xen.c 2011-03-03 16:10:10.000000000 +0100 @@ -94,6 +94,13 @@ void exit_thread(void) } } @@ -2103,7 +2121,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches void show_regs_common(void) { const char *board, *product; -@@ -503,21 +510,39 @@ static int __cpuinit mwait_usable(const +@@ -474,21 +481,39 @@ static int __cpuinit mwait_usable(const } /* @@ -2151,7 +2169,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static cpumask_var_t c1e_mask; -@@ -586,7 +611,7 @@ void __cpuinit select_idle_routine(const +@@ -557,7 +582,7 @@ void __cpuinit select_idle_routine(const #ifndef CONFIG_XEN #ifdef CONFIG_SMP if (pm_idle == poll_idle && smp_num_siblings > 1) { @@ -2160,9 +2178,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches " performance may degrade.\n"); } #endif ---- head-2010-05-25.orig/arch/x86/kernel/process_32-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_32-xen.c 2010-03-25 10:38:31.000000000 +0100 -@@ -179,12 +179,6 @@ void __show_regs(struct pt_regs *regs, i +--- head-2011-03-11.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:07.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:25.000000000 +0100 +@@ -177,12 +177,6 @@ void __show_regs(struct pt_regs *regs, i d6, d7); } @@ -2175,9 +2193,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); ---- head-2010-05-25.orig/arch/x86/kernel/process_64-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/process_64-xen.c 2010-05-12 09:09:00.000000000 +0200 -@@ -219,12 +219,6 @@ void __show_regs(struct pt_regs *regs, i +--- head-2011-03-11.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:12.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:22.000000000 +0100 +@@ -215,12 +215,6 @@ void __show_regs(struct pt_regs *regs, i printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); } @@ -2190,7 +2208,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches void xen_load_gs_index(unsigned gs) { WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs)); -@@ -295,12 +289,12 @@ int copy_thread(unsigned long clone_flag +@@ -291,12 +285,12 @@ int copy_thread(unsigned long clone_flag set_tsk_thread_flag(p, TIF_FORK); @@ -2205,8 +2223,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches savesegment(es, p->thread.es); savesegment(ds, p->thread.ds); ---- head-2010-05-25.orig/arch/x86/kernel/setup-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/setup-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:24:24.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/setup-xen.c 2011-03-03 16:24:33.000000000 +0100 @@ -55,7 +55,6 @@ #include #include @@ -2320,7 +2338,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures -@@ -940,7 +975,7 @@ void __init setup_arch(char **cmdline_p) +@@ -943,7 +978,7 @@ void __init setup_arch(char **cmdline_p) insert_resource(&iomem_resource, &data_resource); insert_resource(&iomem_resource, &bss_resource); @@ -2329,7 +2347,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #ifdef CONFIG_X86_32 if (ppro_with_ram_bug()) { e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM, -@@ -1007,6 +1042,8 @@ void __init setup_arch(char **cmdline_p) +@@ -1010,6 +1045,8 @@ void __init setup_arch(char **cmdline_p) */ find_smp_config(); @@ -2338,7 +2356,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches reserve_trampoline_memory(); #ifdef CONFIG_ACPI_SLEEP -@@ -1077,17 +1114,11 @@ void __init setup_arch(char **cmdline_p) +@@ -1080,17 +1117,11 @@ void __init setup_arch(char **cmdline_p) #endif initmem_init(0, max_pfn, acpi, k8); @@ -2359,8 +2377,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #ifdef CONFIG_KVM_CLOCK kvmclock_init(); ---- head-2010-05-25.orig/arch/x86/kernel/smp-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/smp-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/kernel/smp-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/smp-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -21,6 +21,7 @@ #include #include @@ -2369,9 +2387,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include ---- head-2010-05-25.orig/arch/x86/kernel/time-xen.c 2010-05-12 09:02:50.000000000 +0200 -+++ head-2010-05-25/arch/x86/kernel/time-xen.c 2010-05-12 09:03:15.000000000 +0200 -@@ -597,7 +597,7 @@ static cycle_t xen_clocksource_read(stru +--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-02-01 15:03:03.000000000 +0100 +@@ -583,7 +583,7 @@ static cycle_t xen_clocksource_read(stru #endif } @@ -2380,7 +2398,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches { extern void time_resume(void); -@@ -619,18 +619,18 @@ static struct clocksource clocksource_xe +@@ -605,18 +605,18 @@ static struct clocksource clocksource_xe struct vcpu_runstate_info *setup_runstate_area(unsigned int cpu) { struct vcpu_register_runstate_memory_area area; @@ -2403,8 +2421,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static void init_missing_ticks_accounting(unsigned int cpu) ---- head-2010-05-25.orig/arch/x86/kernel/traps-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/traps-xen.c 2010-03-25 16:41:03.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/traps-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/traps-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -527,6 +527,9 @@ dotraplinkage void __kprobes do_debug(st get_debugreg(dr6, 6); @@ -2415,8 +2433,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* Catch kmemcheck conditions first of all! */ if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) return; ---- head-2010-05-25.orig/arch/x86/kernel/vsyscall_64-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/vsyscall_64-xen.c 2010-03-25 10:38:31.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -308,7 +308,8 @@ static int __init vsyscall_init(void) register_sysctl_table(kernel_root_table2); #endif @@ -2427,8 +2445,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return 0; } ---- head-2010-05-25.orig/arch/x86/kernel/x86_init-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/kernel/x86_init-xen.c 2010-03-25 17:21:48.000000000 +0100 +--- head-2011-03-11.orig/arch/x86/kernel/x86_init-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/kernel/x86_init-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -5,8 +5,12 @@ */ #include @@ -2455,8 +2473,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches }; struct x86_platform_ops x86_platform = { ---- head-2010-05-25.orig/arch/x86/lib/Makefile 2010-03-24 15:01:37.000000000 +0100 -+++ head-2010-05-25/arch/x86/lib/Makefile 2010-04-28 16:13:29.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/lib/Makefile 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-11/arch/x86/lib/Makefile 2011-02-01 15:03:03.000000000 +0100 @@ -15,6 +15,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c clean-files := inat-tables.c @@ -2466,7 +2484,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches lib-y := delay.o lib-y += thunk_$(BITS).o --- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-05-25/arch/x86/lib/cache-smp-xen.c 2010-05-07 11:12:27.000000000 +0200 ++++ head-2011-03-11/arch/x86/lib/cache-smp-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -0,0 +1,27 @@ +#include +#include @@ -2495,8 +2513,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches + return on_each_cpu(__wbinvd, NULL, 1); +} +EXPORT_SYMBOL(wbinvd_on_all_cpus); ---- head-2010-05-25.orig/arch/x86/mm/init-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/init-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -1,3 +1,4 @@ +#include #include @@ -2593,8 +2611,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches + free_init_pages("initrd memory", start, PAGE_ALIGN(end)); } #endif ---- head-2010-05-25.orig/arch/x86/mm/init_32-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_32-xen.c 2010-04-15 10:51:33.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/init_32-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_32-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -25,11 +25,11 @@ #include #include @@ -2688,8 +2706,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches after_bootmem = 1; } ---- head-2010-05-25.orig/arch/x86/mm/init_64-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/init_64-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/init_64-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/init_64-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -32,6 +32,7 @@ #include #include @@ -2753,9 +2771,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (!p) return -ENOMEM; ---- head-2010-05-25.orig/arch/x86/mm/ioremap-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/ioremap-xen.c 2010-05-12 09:13:00.000000000 +0200 -@@ -142,6 +142,11 @@ int direct_kernel_remap_pfn_range(unsign +--- head-2011-03-11.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:45.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:54.000000000 +0100 +@@ -138,6 +138,11 @@ int direct_kernel_remap_pfn_range(unsign } EXPORT_SYMBOL(direct_kernel_remap_pfn_range); @@ -2767,9 +2785,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches static int lookup_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { -@@ -177,45 +182,6 @@ int touch_pte_range(struct mm_struct *mm +@@ -158,45 +163,6 @@ int create_lookup_pte_addr(struct mm_str - EXPORT_SYMBOL(touch_pte_range); + EXPORT_SYMBOL(create_lookup_pte_addr); -int page_is_ram(unsigned long pagenr) -{ @@ -2813,7 +2831,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. -@@ -606,6 +572,10 @@ void __init early_ioremap_init(void) +@@ -587,6 +553,10 @@ void __init early_ioremap_init(void) * The boot-ioremap range spans multiple pmds, for which * we are not prepared: */ @@ -2824,7 +2842,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); printk(KERN_WARNING "pmd %p != %p\n", -@@ -665,6 +635,22 @@ static inline void __init early_clear_fi +@@ -646,6 +616,22 @@ static inline void __init early_clear_fi static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; @@ -2847,8 +2865,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches static int __init check_early_ioremap_leak(void) { int count = 0; ---- head-2010-05-25.orig/arch/x86/mm/pageattr-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pageattr-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pageattr-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -6,13 +6,13 @@ #include #include @@ -2896,8 +2914,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #endif prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); ---- head-2010-05-25.orig/arch/x86/mm/pat-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pat-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/pat-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pat-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -12,7 +12,7 @@ #include #include @@ -2907,8 +2925,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/arch/x86/mm/pgtable-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable-xen.c 2010-04-15 10:53:40.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/pgtable-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pgtable-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -1,4 +1,5 @@ #include +#include @@ -2967,8 +2985,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches void __pte_free(pgtable_t pte) { if (!PageHighMem(pte)) { ---- head-2010-05-25.orig/arch/x86/mm/pgtable_32-xen.c 2010-03-24 15:25:06.000000000 +0100 -+++ head-2010-05-25/arch/x86/mm/pgtable_32-xen.c 2010-05-12 09:09:25.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/mm/pgtable_32-xen.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-11/arch/x86/mm/pgtable_32-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -6,7 +6,6 @@ #include #include @@ -2985,8 +3003,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches return 0; } early_param("reservetop", parse_reservetop); ---- head-2010-05-25.orig/arch/x86/pci/irq-xen.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/arch/x86/pci/irq-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/arch/x86/pci/irq-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/arch/x86/pci/irq-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -8,7 +8,6 @@ #include #include @@ -3058,8 +3076,94 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } static void pirq_penalize_isa_irq(int irq, int active) ---- head-2010-05-25.orig/drivers/char/tpm/tpm_vtpm.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-25/drivers/char/tpm/tpm_vtpm.c 2010-04-15 13:41:04.000000000 +0200 +--- head-2011-03-11.orig/drivers/acpi/processor_core.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_core.c 2011-02-01 15:03:03.000000000 +0100 +@@ -19,6 +19,15 @@ + #define _COMPONENT ACPI_PROCESSOR_COMPONENT + ACPI_MODULE_NAME("processor_core"); + ++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL ++/* ++ * External processor control logic may register with its own set of ++ * ops to get ACPI related notification. One example is like VMM. ++ */ ++const struct processor_extcntl_ops *processor_extcntl_ops; ++EXPORT_SYMBOL(processor_extcntl_ops); ++#endif ++ + static int set_no_mwait(const struct dmi_system_id *id) + { + printk(KERN_NOTICE PREFIX "%s detected - " +@@ -232,6 +241,8 @@ static bool processor_physically_present + } + + type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; ++ if (processor_cntl_external()) ++ type = ~type; + cpuid = acpi_get_cpuid(handle, type, acpi_id); + + if ((cpuid == -1) && (num_possible_cpus() > 1)) +--- head-2011-03-11.orig/drivers/acpi/processor_driver.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_driver.c 2011-02-01 15:03:03.000000000 +0100 +@@ -326,8 +326,8 @@ static int acpi_processor_get_info(struc + if (pr->id == -1) { + if (ACPI_FAILURE + (acpi_processor_hotadd_init(pr->handle, &pr->id)) && +- get_cpu_id(pr->handle, ~device_declaration, +- pr->acpi_id) < 0) { ++ acpi_get_cpuid(pr->handle, ~device_declaration, ++ pr->acpi_id) < 0) { + return -ENODEV; + } + } +@@ -475,8 +475,6 @@ static int __cpuinit acpi_processor_add( + strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); + device->driver_data = pr; + +- processor_extcntl_init(); +- + result = acpi_processor_get_info(device); + if (result || + ((pr->id == -1) && !processor_cntl_external())) { +--- head-2011-03-11.orig/drivers/acpi/processor_extcntl.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-11/drivers/acpi/processor_extcntl.c 2011-02-01 15:03:03.000000000 +0100 +@@ -36,12 +36,6 @@ ACPI_MODULE_NAME("processor_extcntl") + + static int processor_extcntl_parse_csd(struct acpi_processor *pr); + static int processor_extcntl_get_performance(struct acpi_processor *pr); +-/* +- * External processor control logic may register with its own set of +- * ops to get ACPI related notification. One example is like VMM. +- */ +-const struct processor_extcntl_ops *processor_extcntl_ops; +-EXPORT_SYMBOL(processor_extcntl_ops); + + static int processor_notify_smm(void) + { +@@ -102,21 +96,6 @@ int processor_notify_external(struct acp + } + + /* +- * External control logic can decide to grab full or part of physical +- * processor control bits. Take a VMM for example, physical processors +- * are owned by VMM and thus existence information like hotplug is +- * always required to be notified to VMM. Similar is processor idle +- * state which is also necessarily controlled by VMM. But for other +- * control bits like performance/throttle states, VMM may choose to +- * control or not upon its own policy. +- */ +-void processor_extcntl_init(void) +-{ +- if (!processor_extcntl_ops) +- arch_acpi_processor_init_extcntl(&processor_extcntl_ops); +-} +- +-/* + * This is called from ACPI processor init, and targeted to hold + * some tricky housekeeping jobs to satisfy external control model. + * For example, we may put dependency parse stub here for idle +--- head-2011-03-11.orig/drivers/char/tpm/tpm_vtpm.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_vtpm.c 2011-02-01 15:03:03.000000000 +0100 @@ -16,6 +16,7 @@ #include @@ -3068,8 +3172,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/char/tpm/tpm_xen.c 2010-03-24 15:09:15.000000000 +0100 -+++ head-2010-05-25/drivers/char/tpm/tpm_xen.c 2010-04-15 13:41:56.000000000 +0200 +--- head-2011-03-11.orig/drivers/char/tpm/tpm_xen.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/char/tpm/tpm_xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -37,6 +37,7 @@ #include #include @@ -3078,9 +3182,29 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/misc/Kconfig 2010-05-25 09:31:21.000000000 +0200 -+++ head-2010-05-25/drivers/misc/Kconfig 2010-04-29 10:01:27.000000000 +0200 -@@ -313,7 +313,7 @@ config TI_DAC7512 +--- head-2011-03-11.orig/drivers/hwmon/coretemp-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/drivers/hwmon/coretemp-xen.c 2011-02-01 15:03:03.000000000 +0100 +@@ -233,7 +233,7 @@ static int adjust_tjmax(struct coretemp_ + if (err < 0) { + dev_warn(dev, + "Unable to access MSR 0xEE, for Tjmax, left" +- " at default"); ++ " at default\n"); + } else if (eax & 0x40000000) { + tjmax = tjmax_ee; + } +@@ -413,7 +413,7 @@ static int coretemp_device_add(unsigned + family 6 CPU */ + if ((info.x86 == 0x6) && (pdev_entry->x86_model > 0xf)) + printk(KERN_WARNING DRVNAME ": Unknown CPU " +- "model 0x%x", pdev_entry->x86_model); ++ "model 0x%x\n", pdev_entry->x86_model); + goto exit_entry_free; + } + +--- head-2011-03-11.orig/drivers/misc/Kconfig 2011-03-15 16:29:56.000000000 +0100 ++++ head-2011-03-11/drivers/misc/Kconfig 2011-02-01 15:03:03.000000000 +0100 +@@ -406,7 +406,7 @@ config TI_DAC7512 config VMWARE_BALLOON tristate "VMware Balloon Driver" @@ -3089,8 +3213,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches help This is VMware physical memory management driver which acts like a "balloon" that can be inflated to reclaim physical pages ---- head-2010-05-25.orig/drivers/pci/msi-xen.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/drivers/pci/msi-xen.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/drivers/pci/msi-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/pci/msi-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -18,6 +18,7 @@ #include #include @@ -3099,8 +3223,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include ---- head-2010-05-25.orig/drivers/xen/Kconfig 2010-03-31 14:01:28.000000000 +0200 -+++ head-2010-05-25/drivers/xen/Kconfig 2010-03-31 14:08:31.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/Kconfig 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/Kconfig 2011-02-02 15:37:42.000000000 +0100 @@ -23,6 +23,7 @@ config XEN_UNPRIVILEGED_GUEST select PM_SLEEP select PM_SLEEP_SMP if SMP @@ -3120,7 +3244,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches config XEN_SMPBOOT def_bool y depends on SMP && !PPC_XEN -@@ -375,7 +380,6 @@ config XEN_SCRUB_PAGES +@@ -372,7 +377,6 @@ config XEN_SCRUB_PAGES config XEN_DEV_EVTCHN tristate "Xen /dev/xen/evtchn device" @@ -3128,8 +3252,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches default PARAVIRT_XEN || XEN_PRIVILEGED_GUEST || m help The evtchn driver allows a userspace process to triger event ---- head-2010-05-25.orig/drivers/xen/balloon/balloon.c 2010-04-15 10:11:45.000000000 +0200 -+++ head-2010-05-25/drivers/xen/balloon/balloon.c 2010-04-15 11:00:29.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/balloon/balloon.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/balloon/balloon.c 2011-02-01 15:03:03.000000000 +0100 @@ -43,7 +43,7 @@ #include #include @@ -3139,17 +3263,17 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/blkback/blkback-pagemap.c 2009-06-09 15:01:37.000000000 +0200 -+++ head-2010-05-25/drivers/xen/blkback/blkback-pagemap.c 2010-04-15 13:39:30.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/blkback/blkback-pagemap.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkback/blkback-pagemap.c 2011-02-01 15:03:03.000000000 +0100 @@ -1,4 +1,5 @@ #include +#include #include "blkback-pagemap.h" static int blkback_pagemap_size; ---- head-2010-05-25.orig/drivers/xen/blkfront/vbd.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blkfront/vbd.c 2010-03-25 16:41:12.000000000 +0100 -@@ -314,15 +314,14 @@ xlvbd_init_blk_queue(struct gendisk *gd, +--- head-2011-03-11.orig/drivers/xen/blkfront/vbd.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blkfront/vbd.c 2011-02-01 15:03:03.000000000 +0100 +@@ -311,15 +311,14 @@ xlvbd_init_blk_queue(struct gendisk *gd, /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); @@ -3167,8 +3291,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); ---- head-2010-05-25.orig/drivers/xen/blktap2/blktap.h 2010-03-24 15:12:36.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/blktap.h 2010-04-15 11:24:08.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/blktap2/blktap.h 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/blktap.h 2011-02-01 15:03:03.000000000 +0100 @@ -1,6 +1,7 @@ #ifndef _BLKTAP_H_ #define _BLKTAP_H_ @@ -3177,9 +3301,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/blktap2/device.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/device.c 2010-04-19 14:54:02.000000000 +0200 -@@ -991,15 +991,14 @@ blktap_device_configure(struct blktap *t +--- head-2011-03-11.orig/drivers/xen/blktap2/device.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/device.c 2011-02-01 15:03:03.000000000 +0100 +@@ -988,15 +988,14 @@ blktap_device_configure(struct blktap *t /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, tap->params.sector_size); @@ -3197,8 +3321,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); ---- head-2010-05-25.orig/drivers/xen/blktap2/sysfs.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/drivers/xen/blktap2/sysfs.c 2010-05-25 09:25:30.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/blktap2/sysfs.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2/sysfs.c 2011-02-24 14:59:15.000000000 +0100 @@ -379,13 +379,15 @@ blktap_sysfs_destroy(struct blktap *tap) } @@ -3217,8 +3341,68 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches { int level; -@@ -400,7 +402,8 @@ CLASS_ATTR(verbosity, S_IRUSR | S_IWUSR, - blktap_sysfs_show_verbosity, blktap_sysfs_set_verbosity); +@@ -400,7 +402,8 @@ static CLASS_ATTR(verbosity, S_IRUSR | S + blktap_sysfs_show_verbosity, blktap_sysfs_set_verbosity); + + static ssize_t +-blktap_sysfs_show_devices(struct class *class, char *buf) ++blktap_sysfs_show_devices(struct class *class, struct class_attribute *attr, ++ char *buf) + { + int i, ret; + struct blktap *tap; +--- head-2011-03-11.orig/drivers/xen/blktap2-new/blktap.h 2011-02-24 14:08:40.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2-new/blktap.h 2011-02-24 15:00:29.000000000 +0100 +@@ -2,6 +2,7 @@ + #define _BLKTAP_H_ + + #include ++#include + #include + #include + #include +--- head-2011-03-11.orig/drivers/xen/blktap2-new/device.c 2011-02-24 14:51:43.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2-new/device.c 2011-02-24 15:01:27.000000000 +0100 +@@ -290,15 +290,14 @@ blktap_device_configure(struct blktap *t + + /* Hard sector size and max sectors impersonate the equiv. hardware. */ + blk_queue_logical_block_size(rq, params->sector_size); +- blk_queue_max_sectors(rq, 512); ++ blk_queue_max_hw_sectors(rq, 512); + + /* Each segment in a request is up to an aligned page in size. */ + blk_queue_segment_boundary(rq, PAGE_SIZE - 1); + blk_queue_max_segment_size(rq, PAGE_SIZE); + + /* Ensure a merged request will fit in a single I/O ring slot. */ +- blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); +- blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); ++ blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); + + /* Make sure buffer addresses are sector-aligned. */ + blk_queue_dma_alignment(rq, 511); +--- head-2011-03-11.orig/drivers/xen/blktap2-new/sysfs.c 2011-02-24 14:40:36.000000000 +0100 ++++ head-2011-03-11/drivers/xen/blktap2-new/sysfs.c 2011-02-24 15:02:50.000000000 +0100 +@@ -213,13 +213,15 @@ blktap_sysfs_destroy(struct blktap *tap) + } + + static ssize_t +-blktap_sysfs_show_verbosity(struct class *class, char *buf) ++blktap_sysfs_show_verbosity(struct class *class, struct class_attribute *attr, ++ char *buf) + { + return sprintf(buf, "%d\n", blktap_debug_level); + } + + static ssize_t +-blktap_sysfs_set_verbosity(struct class *class, const char *buf, size_t size) ++blktap_sysfs_set_verbosity(struct class *class, struct class_attribute *attr, ++ const char *buf, size_t size) + { + int level; + +@@ -234,7 +236,8 @@ static CLASS_ATTR(verbosity, S_IRUGO|S_I + blktap_sysfs_show_verbosity, blktap_sysfs_set_verbosity); static ssize_t -blktap_sysfs_show_devices(struct class *class, char *buf) @@ -3227,8 +3411,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches { int i, ret; struct blktap *tap; ---- head-2010-05-25.orig/drivers/xen/char/mem.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/drivers/xen/char/mem.c 2010-04-15 10:48:32.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/char/mem.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/drivers/xen/char/mem.c 2011-02-01 15:03:03.000000000 +0100 @@ -3,7 +3,7 @@ * * Copyright (C) 1991, 1992 Linus Torvalds @@ -3309,8 +3493,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); return ret; ---- head-2010-05-25.orig/drivers/xen/core/evtchn.c 2010-03-31 14:37:57.000000000 +0200 -+++ head-2010-05-25/drivers/xen/core/evtchn.c 2010-04-15 11:03:28.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-02-01 15:03:03.000000000 +0100 @@ -31,6 +31,7 @@ */ @@ -3319,8 +3503,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/core/gnttab.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/gnttab.c 2010-04-15 11:04:07.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/core/gnttab.c 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/gnttab.c 2011-02-01 15:03:03.000000000 +0100 @@ -32,6 +32,7 @@ */ @@ -3329,8 +3513,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/core/hypervisor_sysfs.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/hypervisor_sysfs.c 2010-03-25 14:27:48.000000000 +0100 +--- head-2011-03-11.orig/drivers/xen/core/hypervisor_sysfs.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/hypervisor_sysfs.c 2011-02-01 15:03:03.000000000 +0100 @@ -36,7 +36,7 @@ static ssize_t hyp_sysfs_store(struct ko return 0; } @@ -3340,9 +3524,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches .show = hyp_sysfs_show, .store = hyp_sysfs_store, }; ---- head-2010-05-25.orig/drivers/xen/core/reboot.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/reboot.c 2010-04-15 11:07:05.000000000 +0200 -@@ -3,6 +3,7 @@ +--- head-2011-03-11.orig/drivers/xen/core/reboot.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/reboot.c 2011-02-01 15:03:03.000000000 +0100 +@@ -2,6 +2,7 @@ #include #include #include @@ -3350,18 +3534,18 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/core/spinlock.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/spinlock.c 2010-04-15 10:14:50.000000000 +0200 -@@ -22,7 +22,7 @@ struct spinning { +--- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:17:10.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:33:52.000000000 +0100 +@@ -20,7 +20,7 @@ struct spinning { unsigned int ticket; struct spinning *prev; }; -static DEFINE_PER_CPU(struct spinning *, spinning); +static DEFINE_PER_CPU(struct spinning *, _spinning); + static DEFINE_PER_CPU(evtchn_port_t, poll_evtchn); /* * Protect removal of objects: Addition can be done lockless, and even - * removal itself doesn't need protection - what needs to be prevented is -@@ -78,7 +78,7 @@ static unsigned int spin_adjust(struct s +@@ -92,7 +92,7 @@ static unsigned int spin_adjust(struct s unsigned int xen_spin_adjust(const arch_spinlock_t *lock, unsigned int token) { @@ -3370,7 +3554,7 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches } bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, -@@ -97,9 +97,9 @@ bool xen_spin_wait(arch_spinlock_t *lock +@@ -111,9 +111,9 @@ bool xen_spin_wait(arch_spinlock_t *lock /* announce we're spinning */ spinning.ticket = *ptok >> TICKET_SHIFT; spinning.lock = lock; @@ -3382,26 +3566,26 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches upcall_mask = current_vcpu_info()->evtchn_upcall_mask; do { -@@ -184,7 +184,7 @@ bool xen_spin_wait(arch_spinlock_t *lock +@@ -199,7 +199,7 @@ bool xen_spin_wait(arch_spinlock_t *lock /* announce we're done */ other = spinning.prev; - percpu_write(spinning, other); + percpu_write(_spinning, other); - rm_lock = &__get_cpu_var(spinning_rm_lock); raw_local_irq_disable(); - arch_write_lock(rm_lock); -@@ -228,7 +228,7 @@ void xen_spin_kick(arch_spinlock_t *lock - raw_local_irq_save(flags); - arch_read_lock(rm_lock); - -- spinning = per_cpu(spinning, cpu); -+ spinning = per_cpu(_spinning, cpu); - smp_rmb(); - while (spinning) { - if (spinning->lock == lock && spinning->ticket == token) ---- head-2010-05-25.orig/drivers/xen/core/xen_sysfs.c 2010-03-24 15:10:37.000000000 +0100 -+++ head-2010-05-25/drivers/xen/core/xen_sysfs.c 2010-04-15 11:04:56.000000000 +0200 + rm_idx = percpu_read(rm_seq.idx); + smp_wmb(); +@@ -267,7 +267,7 @@ void xen_spin_kick(arch_spinlock_t *lock + #else + smp_mb(); + #endif +- spinning = per_cpu(spinning, cpu); ++ spinning = per_cpu(_spinning, cpu); + smp_rmb(); + if (rm_idx == per_cpu(rm_seq.idx, cpu)) + break; +--- head-2011-03-11.orig/drivers/xen/core/xen_sysfs.c 2011-01-31 18:01:51.000000000 +0100 ++++ head-2011-03-11/drivers/xen/core/xen_sysfs.c 2011-02-01 15:03:03.000000000 +0100 @@ -11,6 +11,7 @@ #include #include @@ -3410,8 +3594,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/fbfront/xenfb.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/fbfront/xenfb.c 2010-04-15 11:11:34.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/fbfront/xenfb.c 2011-02-17 10:16:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/fbfront/xenfb.c 2011-02-01 15:03:03.000000000 +0100 @@ -25,6 +25,7 @@ #include #include @@ -3420,8 +3604,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/fbfront/xenkbd.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/fbfront/xenkbd.c 2010-04-15 11:11:42.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/fbfront/xenkbd.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/drivers/xen/fbfront/xenkbd.c 2011-02-01 15:03:03.000000000 +0100 @@ -20,6 +20,7 @@ #include #include @@ -3430,8 +3614,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/gntdev/gntdev.c 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/gntdev/gntdev.c 2010-04-15 11:13:05.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/gntdev/gntdev.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/gntdev/gntdev.c 2011-02-01 15:03:03.000000000 +0100 @@ -23,6 +23,7 @@ #include #include @@ -3440,8 +3624,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/netfront/netfront.h 2010-03-24 15:10:29.000000000 +0100 -+++ head-2010-05-25/drivers/xen/netfront/netfront.h 2010-04-15 11:15:06.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/netfront/netfront.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/netfront/netfront.h 2011-02-09 15:03:03.000000000 +0100 @@ -34,6 +34,7 @@ #define NETFRONT_H @@ -3450,8 +3634,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/pciback/conf_space_capability_msi.c 2008-09-15 13:40:15.000000000 +0200 -+++ head-2010-05-25/drivers/xen/pciback/conf_space_capability_msi.c 2010-04-15 11:21:45.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/pciback/conf_space_capability_msi.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/conf_space_capability_msi.c 2011-02-01 15:03:03.000000000 +0100 @@ -1,12 +1,10 @@ /* * PCI Backend -- Configuration overlay for MSI capability @@ -3466,8 +3650,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches int pciback_enable_msi(struct pciback_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) ---- head-2010-05-25.orig/drivers/xen/pciback/pciback.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/pciback.h 2010-04-15 11:20:39.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/pciback/pciback.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/pciback.h 2011-02-01 15:03:03.000000000 +0100 @@ -6,6 +6,7 @@ #ifndef __XEN_PCIBACK_H__ #define __XEN_PCIBACK_H__ @@ -3476,8 +3660,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/pciback/slot.c 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/slot.c 2010-04-15 11:21:14.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/pciback/slot.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/slot.c 2011-02-01 15:03:03.000000000 +0100 @@ -6,10 +6,6 @@ * Author: Tristan Gingold , from vpci.c */ @@ -3489,8 +3673,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include "pciback.h" /* There are at most 32 slots in a pci bus. */ ---- head-2010-05-25.orig/drivers/xen/pciback/vpci.c 2009-03-18 10:39:32.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pciback/vpci.c 2010-04-15 11:21:09.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/pciback/vpci.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pciback/vpci.c 2011-02-01 15:03:03.000000000 +0100 @@ -5,10 +5,6 @@ * Author: Ryan Wilson */ @@ -3502,8 +3686,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include "pciback.h" #define PCI_SLOT_MAX 32 ---- head-2010-05-25.orig/drivers/xen/pcifront/pcifront.h 2010-03-24 15:08:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/pcifront/pcifront.h 2010-04-15 11:14:10.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/pcifront/pcifront.h 2011-01-31 17:32:16.000000000 +0100 ++++ head-2011-03-11/drivers/xen/pcifront/pcifront.h 2011-02-01 15:03:03.000000000 +0100 @@ -6,6 +6,7 @@ #ifndef __XEN_PCIFRONT_H__ #define __XEN_PCIFRONT_H__ @@ -3512,9 +3696,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/scsiback/xenbus.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsiback/xenbus.c 2010-03-25 14:20:20.000000000 +0100 -@@ -353,7 +353,7 @@ fail: +--- head-2011-03-11.orig/drivers/xen/scsiback/xenbus.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/drivers/xen/scsiback/xenbus.c 2011-02-01 15:03:03.000000000 +0100 +@@ -355,7 +355,7 @@ fail: } @@ -3523,17 +3707,17 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches { "vscsi" }, { "" } }; ---- head-2010-05-25.orig/drivers/xen/scsifront/xenbus.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/scsifront/xenbus.c 2010-04-15 11:07:44.000000000 +0200 -@@ -30,6 +30,7 @@ - +--- head-2011-03-11.orig/drivers/xen/scsifront/xenbus.c 2011-02-08 10:05:30.000000000 +0100 ++++ head-2011-03-11/drivers/xen/scsifront/xenbus.c 2011-02-08 10:06:25.000000000 +0100 +@@ -34,6 +34,7 @@ + */ #include +#include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) -@@ -393,7 +394,7 @@ static void scsifront_backend_changed(st +@@ -398,7 +399,7 @@ static void scsifront_backend_changed(st } @@ -3542,8 +3726,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches { "vscsi" }, { "" } }; ---- head-2010-05-25.orig/drivers/xen/sfc_netfront/accel.h 2010-03-24 15:12:46.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netfront/accel.h 2010-04-15 11:23:26.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/sfc_netfront/accel.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netfront/accel.h 2011-02-01 15:03:03.000000000 +0100 @@ -35,6 +35,7 @@ #include @@ -3552,8 +3736,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include ---- head-2010-05-25.orig/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2008-02-20 09:32:49.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2010-04-15 11:11:11.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2008-02-20 09:32:49.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2011-02-01 15:03:03.000000000 +0100 @@ -24,6 +24,7 @@ #include /* needed for linux/random.h */ @@ -3562,8 +3746,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include "accel_cuckoo_hash.h" #include "accel_util.h" ---- head-2010-05-25.orig/drivers/xen/sfc_netutil/accel_util.c 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-05-25/drivers/xen/sfc_netutil/accel_util.c 2010-04-15 11:10:59.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/sfc_netutil/accel_util.c 2011-01-31 17:49:31.000000000 +0100 ++++ head-2011-03-11/drivers/xen/sfc_netutil/accel_util.c 2011-02-01 15:03:03.000000000 +0100 @@ -22,6 +22,7 @@ **************************************************************************** */ @@ -3572,8 +3756,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_client.c 2010-03-24 15:17:58.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_client.c 2010-04-15 11:16:49.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_client.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_client.c 2011-02-01 15:03:03.000000000 +0100 @@ -30,8 +30,8 @@ * IN THE SOFTWARE. */ @@ -3583,9 +3767,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches +#if defined(CONFIG_XEN) || defined(MODULE) #include #include - #include ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_dev.c 2010-03-24 15:32:27.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_dev.c 2010-04-15 11:19:13.000000000 +0200 + #else +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_dev.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_dev.c 2011-02-01 15:03:03.000000000 +0100 @@ -33,6 +33,7 @@ */ @@ -3594,8 +3778,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe.c 2010-04-15 11:18:19.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 15:03:03.000000000 +0100 @@ -47,6 +47,7 @@ #include #include @@ -3604,8 +3788,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include ---- head-2010-05-25.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/drivers/xen/xenbus/xenbus_probe_backend.c 2010-04-15 11:18:42.000000000 +0200 +--- head-2011-03-11.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-11/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 15:03:03.000000000 +0100 @@ -42,6 +42,7 @@ #include #include @@ -3614,8 +3798,8 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include ---- head-2010-05-25.orig/fs/proc/kcore.c 2010-05-25 09:31:21.000000000 +0200 -+++ head-2010-05-25/fs/proc/kcore.c 2010-04-15 10:15:01.000000000 +0200 +--- head-2011-03-11.orig/fs/proc/kcore.c 2011-03-15 16:29:56.000000000 +0100 ++++ head-2011-03-11/fs/proc/kcore.c 2011-02-01 15:03:03.000000000 +0100 @@ -130,7 +130,7 @@ static void __kcore_update_ram(struct li } @@ -3637,8 +3821,30 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches ent->type = KCORE_RAM; list_add(&ent->list, &head); __kcore_update_ram(&head); ---- head-2010-05-25.orig/include/xen/xenbus.h 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/include/xen/xenbus.h 2010-04-15 11:30:32.000000000 +0200 +--- head-2011-03-11.orig/include/acpi/processor.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/include/acpi/processor.h 2011-02-01 15:03:03.000000000 +0100 +@@ -450,11 +450,9 @@ static inline int processor_pmthr_extern + + extern int processor_notify_external(struct acpi_processor *pr, + int event, int type); +-extern void processor_extcntl_init(void); + extern int processor_extcntl_prepare(struct acpi_processor *pr); + extern int acpi_processor_get_performance_info(struct acpi_processor *pr); + extern int acpi_processor_get_psd(struct acpi_processor *pr); +-void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **); + #else + static inline int processor_cntl_external(void) {return 0;} + static inline int processor_pm_external(void) {return 0;} +@@ -465,7 +463,6 @@ static inline int processor_notify_exter + { + return 0; + } +-static inline void processor_extcntl_init(void) {} + static inline int processor_extcntl_prepare(struct acpi_processor *pr) + { + return 0; +--- head-2011-03-11.orig/include/xen/xenbus.h 2011-02-02 16:59:07.000000000 +0100 ++++ head-2011-03-11/include/xen/xenbus.h 2011-02-01 15:03:03.000000000 +0100 @@ -39,6 +39,7 @@ #include #include @@ -3647,56 +3853,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/kernel/early_res.c 2010-05-25 09:31:21.000000000 +0200 -+++ head-2010-05-25/kernel/early_res.c 2010-04-15 10:17:11.000000000 +0200 -@@ -321,11 +321,19 @@ void __init free_early(u64 start, u64 en - - i = find_overlapped_early(start, end); - r = &early_res[i]; -+#ifdef CONFIG_XEN /* Shouldn't it always be this way? */ -+ if (i >= max_early_res || r->end < end || r->start > start) -+ panic("free_early on not reserved area: %llx-%llx!", -+ start, end - 1); -+ -+ drop_range_partial(i, start, end); -+#else - if (i >= max_early_res || r->end != end || r->start != start) - panic("free_early on not reserved area: %llx-%llx!", - start, end - 1); - - drop_range(i); -+#endif - } - - void __init free_early_partial(u64 start, u64 end) -@@ -393,9 +401,7 @@ static void __init subtract_early_res(st - int __init get_free_all_memory_range(struct range **rangep, int nodeid) - { - int i, count; -- u64 start = 0, end; -- u64 size; -- u64 mem; -+ u64 end, size, mem = -1ULL; - struct range *range; - int nr_range; - -@@ -409,9 +415,11 @@ int __init get_free_all_memory_range(str - end = get_max_mapped(); - #ifdef MAX_DMA32_PFN - if (end > (MAX_DMA32_PFN << PAGE_SHIFT)) -- start = MAX_DMA32_PFN << PAGE_SHIFT; -+ mem = find_fw_memmap_area(MAX_DMA32_PFN << PAGE_SHIFT, end, -+ size, sizeof(struct range)); - #endif -- mem = find_fw_memmap_area(start, end, size, sizeof(struct range)); -+ if (mem == -1ULL) -+ mem = find_fw_memmap_area(0, end, size, sizeof(struct range)); - if (mem == -1ULL) - panic("can not find more space for range free"); - ---- head-2010-05-25.orig/kernel/resource.c 2010-05-25 09:31:21.000000000 +0200 -+++ head-2010-05-25/kernel/resource.c 2010-04-15 10:17:16.000000000 +0200 -@@ -343,6 +343,7 @@ int walk_system_ram_range(unsigned long +--- head-2011-03-11.orig/kernel/resource.c 2011-03-15 16:29:56.000000000 +0100 ++++ head-2011-03-11/kernel/resource.c 2011-02-01 15:03:03.000000000 +0100 +@@ -344,6 +344,7 @@ int walk_system_ram_range(unsigned long #endif @@ -3704,16 +3863,16 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) { return 1; -@@ -355,6 +356,7 @@ int __weak page_is_ram(unsigned long pfn +@@ -356,6 +357,7 @@ int __weak page_is_ram(unsigned long pfn { return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; } +#endif - /* - * Find empty slot in the resource tree given range and alignment. ---- head-2010-05-25.orig/lib/swiotlb-xen.c 2010-03-24 16:00:05.000000000 +0100 -+++ head-2010-05-25/lib/swiotlb-xen.c 2010-04-15 10:54:48.000000000 +0200 + void __weak arch_remove_reservations(struct resource *avail) + { +--- head-2011-03-11.orig/lib/swiotlb-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-11/lib/swiotlb-xen.c 2011-02-01 15:03:03.000000000 +0100 @@ -25,6 +25,8 @@ #include #include @@ -3723,9 +3882,9 @@ Automatically created from "patches.kernel.org/patch-2.6.34" by xen-port-patches #include #include #include ---- head-2010-05-25.orig/mm/page_alloc.c 2010-03-24 15:25:21.000000000 +0100 -+++ head-2010-05-25/mm/page_alloc.c 2010-03-25 16:31:14.000000000 +0100 -@@ -4696,7 +4696,8 @@ static void __setup_per_zone_wmarks(void +--- head-2011-03-11.orig/mm/page_alloc.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-11/mm/page_alloc.c 2011-02-08 10:06:32.000000000 +0100 +@@ -5015,7 +5015,8 @@ static void __setup_per_zone_wmarks(void high = percpu_pagelist_fraction ? zone->present_pages / percpu_pagelist_fraction : 5 * zone_batchsize(zone); diff --git a/patches.xen/xen3-patch-2.6.35 b/patches.xen/xen3-patch-2.6.35 new file mode 100644 index 0000000..617d628 --- /dev/null +++ b/patches.xen/xen3-patch-2.6.35 @@ -0,0 +1,2667 @@ +From: Linux Kernel Mailing List +Subject: Linux: 2.6.35 +Patch-mainline: 2.6.35 + + This patch contains the differences between 2.6.34 and 2.6.35. + +Acked-by: Jeff Mahoney + +Automatically created from "patches.kernel.org/patch-2.6.35" by xen-port-patches.py + +--- head-2011-03-17.orig/arch/x86/include/asm/thread_info.h 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/thread_info.h 2011-02-01 15:03:10.000000000 +0100 +@@ -153,8 +153,7 @@ struct thread_info { + (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) + + #else +-#define _TIF_WORK_CTXSW (_TIF_NOTSC \ +- /*todo | _TIF_DEBUGCTLMSR | _TIF_DS_AREA_MSR */) ++#define _TIF_WORK_CTXSW (_TIF_NOTSC /*todo | _TIF_BLOCKSTEP */) + #endif + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-03-11 11:14:34.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 15:03:10.000000000 +0100 +@@ -70,7 +70,7 @@ extern start_info_t *xen_start_info; + #define is_initial_xendomain() 0 + #endif + +-#define init_hypervisor(c) ((void)((c)->x86_hyper_vendor = X86_HYPER_VENDOR_XEN)) ++#define init_hypervisor(c) ((void)(c)) + #define init_hypervisor_platform() init_hypervisor(&boot_cpu_data) + + DECLARE_PER_CPU(struct vcpu_runstate_info, runstate); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/perf_event.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/perf_event.h 2011-02-01 15:03:10.000000000 +0100 +@@ -10,6 +10,15 @@ + */ + #define PERF_EFLAGS_EXACT (1UL << 3) + ++#define perf_instruction_pointer(regs) instruction_pointer(regs) ++ ++#define perf_misc_flags(regs) ({ \ ++ struct pt_regs *_r_ = (regs); \ ++ unsigned long _f_ = user_mode(_r_) ? PERF_RECORD_MISC_USER \ ++ : PERF_RECORD_MISC_KERNEL; \ ++ _r_->flags & PERF_EFLAGS_EXACT ? _f_ | PERF_RECORD_MISC_EXACT_IP : _f_; \ ++}) ++ + #endif + + static inline void init_hw_perf_events(void) {} +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:46:54.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:47:17.000000000 +0100 +@@ -21,7 +21,6 @@ struct mm_struct; + #include + #include + #include +-#include + + #include + #include +@@ -29,6 +28,8 @@ struct mm_struct; + #include + #include + #include ++#include ++ + #include + + #define HBP_NUM 4 +@@ -124,7 +125,6 @@ struct cpuinfo_x86 { + /* Index into per_cpu list: */ + u16 cpu_index; + #endif +- unsigned int x86_hyper_vendor; + } __attribute__((__aligned__(SMP_CACHE_BYTES))); + + #define X86_VENDOR_INTEL 0 +@@ -138,10 +138,6 @@ struct cpuinfo_x86 { + + #define X86_VENDOR_UNKNOWN 0xff + +-#define X86_HYPER_VENDOR_NONE 0 +-#define X86_HYPER_VENDOR_VMWARE 1 +-#define X86_HYPER_VENDOR_XEN 'X' +- + /* + * capabilities of CPUs + */ +@@ -396,6 +392,10 @@ union thread_xstate { + struct xsave_struct xsave; + }; + ++struct fpu { ++ union thread_xstate *state; ++}; ++ + #ifdef CONFIG_X86_64 + #ifndef CONFIG_X86_NO_TSS + DECLARE_PER_CPU(struct orig_ist, orig_ist); +@@ -474,7 +474,7 @@ struct thread_struct { + unsigned long trap_no; + unsigned long error_code; + /* floating point and extended processor state */ +- union thread_xstate *xstate; ++ struct fpu fpu; + #ifdef CONFIG_X86_32 + /* Virtual 86 mode info */ + struct vm86_struct __user *vm86_info; +@@ -487,10 +487,6 @@ struct thread_struct { + unsigned long iopl; + /* Max allowed port in the bitmap, in bytes: */ + unsigned io_bitmap_max; +-/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ +- unsigned long debugctlmsr; +- /* Debug Store context; see asm/ds.h */ +- struct ds_context *ds_ctx; + }; + + static inline unsigned long xen_get_debugreg(int regno) +@@ -749,6 +745,8 @@ static inline void wbinvd_halt(void) + extern void enable_sep_cpu(void); + extern int sysenter_setup(void); + ++extern void early_trap_init(void); ++ + /* Defined in head.S */ + extern struct desc_ptr early_gdt_descr; + +@@ -759,7 +757,7 @@ extern void cpu_init(void); + + static inline unsigned long get_debugctlmsr(void) + { +- unsigned long debugctlmsr = 0; ++ unsigned long debugctlmsr = 0; + + #ifndef CONFIG_X86_DEBUGCTLMSR + if (boot_cpu_data.x86 < 6) +@@ -767,21 +765,6 @@ static inline unsigned long get_debugctl + #endif + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); + +- return debugctlmsr; +-} +- +-static inline unsigned long get_debugctlmsr_on_cpu(int cpu) +-{ +- u64 debugctlmsr = 0; +- u32 val1, val2; +- +-#ifndef CONFIG_X86_DEBUGCTLMSR +- if (boot_cpu_data.x86 < 6) +- return 0; +-#endif +- rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2); +- debugctlmsr = val1 | ((u64)val2 << 32); +- + return debugctlmsr; + } + +@@ -794,18 +777,6 @@ static inline void update_debugctlmsr(un + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); + } + +-static inline void update_debugctlmsr_on_cpu(int cpu, +- unsigned long debugctlmsr) +-{ +-#ifndef CONFIG_X86_DEBUGCTLMSR +- if (boot_cpu_data.x86 < 6) +- return; +-#endif +- wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, +- (u32)((u64)debugctlmsr), +- (u32)((u64)debugctlmsr >> 32)); +-} +- + /* + * from system description table in BIOS. Mostly for MCA use, but + * others may find it useful: +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:10:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:10:31.000000000 +0100 +@@ -435,7 +435,7 @@ void stop_this_cpu(void *dummy); + * + * (Could use an alternative three way for this if there was one.) + */ +-static inline void rdtsc_barrier(void) ++static __always_inline void rdtsc_barrier(void) + { + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); +--- head-2011-03-17.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -63,7 +63,7 @@ static int xen_cx_notifier(struct acpi_p + data->reg.space_id = cx->reg.space_id; + data->reg.bit_width = cx->reg.bit_width; + data->reg.bit_offset = cx->reg.bit_offset; +- data->reg.access_size = cx->reg.reserved; ++ data->reg.access_size = cx->reg.access_size; + data->reg.address = cx->reg.address; + + /* Get dependency relationships */ +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -164,13 +164,16 @@ static int __init acpi_sleep_setup(char + #ifdef CONFIG_HIBERNATION + if (strncmp(str, "s4_nohwsig", 10) == 0) + acpi_no_s4_hw_signature(); +- if (strncmp(str, "s4_nonvs", 8) == 0) +- acpi_s4_no_nvs(); ++ if (strncmp(str, "s4_nonvs", 8) == 0) { ++ pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, " ++ "please use acpi_sleep=nonvs instead"); ++ acpi_nvs_nosave(); ++ } + #endif ++ if (strncmp(str, "nonvs", 5) == 0) ++ acpi_nvs_nosave(); + if (strncmp(str, "old_ordering", 12) == 0) + acpi_old_suspend_ordering(); +- if (strncmp(str, "sci_force_enable", 16) == 0) +- acpi_set_sci_en_on_resume(); + str = strchr(str, ','); + if (str != NULL) + str += strspn(str, ", \t"); +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -99,6 +99,9 @@ int nr_ioapics; + /* IO APIC gsi routing info */ + struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; + ++/* The one past the highest gsi number used */ ++u32 gsi_top; ++ + /* MP IRQ source entries */ + struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; + +@@ -1063,10 +1066,9 @@ static inline int irq_trigger(int idx) + return MPBIOS_trigger(idx); + } + +-int (*ioapic_renumber_irq)(int ioapic, int irq); + static int pin_2_irq(int idx, int apic, int pin) + { +- int irq, i; ++ int irq; + int bus = mp_irqs[idx].srcbus; + + /* +@@ -1078,18 +1080,12 @@ static int pin_2_irq(int idx, int apic, + if (test_bit(bus, mp_bus_not_pci)) { + irq = mp_irqs[idx].srcbusirq; + } else { +- /* +- * PCI IRQs are mapped in order +- */ +- i = irq = 0; +- while (i < apic) +- irq += nr_ioapic_registers[i++]; +- irq += pin; +- /* +- * For MPS mode, so far only needed by ES7000 platform +- */ +- if (ioapic_renumber_irq) +- irq = ioapic_renumber_irq(apic, irq); ++ u32 gsi = mp_gsi_routing[apic].gsi_base + pin; ++ ++ if (gsi >= NR_IRQS_LEGACY) ++ irq = gsi; ++ else ++ irq = gsi_top + gsi; + } + + #ifdef CONFIG_X86_32 +@@ -2027,31 +2023,15 @@ fs_initcall(print_ICs); + + /* Where if anywhere is the i8259 connect in external int mode */ + static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; +-#endif /* !CONFIG_XEN */ + + void __init enable_IO_APIC(void) + { +- union IO_APIC_reg_01 reg_01; +-#ifndef CONFIG_XEN + int i8259_apic, i8259_pin; +-#endif + int apic; +- unsigned long flags; +- +- /* +- * The number of IO-APIC IRQ registers (== #pins): +- */ +- for (apic = 0; apic < nr_ioapics; apic++) { +- raw_spin_lock_irqsave(&ioapic_lock, flags); +- reg_01.raw = io_apic_read(apic, 1); +- raw_spin_unlock_irqrestore(&ioapic_lock, flags); +- nr_ioapic_registers[apic] = reg_01.bits.entries+1; +- } + + if (!legacy_pic->nr_legacy_irqs) + return; + +-#ifndef CONFIG_XEN + for(apic = 0; apic < nr_ioapics; apic++) { + int pin; + /* See if any of the pins is in ExtINT mode */ +@@ -2094,10 +2074,8 @@ void __init enable_IO_APIC(void) + * Do not trust the IO-APIC being empty at bootup + */ + clear_IO_APIC(); +-#endif + } + +-#ifndef CONFIG_XEN + /* + * Not an __init, needed by the reboot code + */ +@@ -3229,13 +3207,9 @@ out: + void __init setup_IO_APIC(void) + { + +-#ifdef CONFIG_XEN +- enable_IO_APIC(); +-#else + /* + * calling enable_IO_APIC() is moved to setup_local_APIC for BP + */ +-#endif + io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; + + apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); +@@ -3969,28 +3943,21 @@ int __init io_apic_get_redir_entries (in + reg_01.raw = io_apic_read(ioapic, 1); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + +- return reg_01.bits.entries; ++ /* The register returns the maximum index redir index ++ * supported, which is one less than the total number of redir ++ * entries. ++ */ ++ return reg_01.bits.entries + 1; + } + + #ifndef CONFIG_XEN + void __init probe_nr_irqs_gsi(void) + { +- int nr = 0; ++ int nr; + +- nr = acpi_probe_gsi(); +- if (nr > nr_irqs_gsi) { ++ nr = gsi_top + NR_IRQS_LEGACY; ++ if (nr > nr_irqs_gsi) + nr_irqs_gsi = nr; +- } else { +- /* for acpi=off or acpi is not compiled in */ +- int idx; +- +- nr = 0; +- for (idx = 0; idx < nr_ioapics; idx++) +- nr += io_apic_get_redir_entries(idx) + 1; +- +- if (nr > nr_irqs_gsi) +- nr_irqs_gsi = nr; +- } + + printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); + } +@@ -4207,22 +4174,27 @@ int __init io_apic_get_version(int ioapi + return reg_01.bits.version; + } + +-int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) ++int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) + { +- int i; ++ int ioapic, pin, idx; + + if (skip_ioapic_setup) + return -1; + +- for (i = 0; i < mp_irq_entries; i++) +- if (mp_irqs[i].irqtype == mp_INT && +- mp_irqs[i].srcbusirq == bus_irq) +- break; +- if (i >= mp_irq_entries) ++ ioapic = mp_find_ioapic(gsi); ++ if (ioapic < 0) ++ return -1; ++ ++ pin = mp_find_ioapic_pin(ioapic, gsi); ++ if (pin < 0) ++ return -1; ++ ++ idx = find_irq_entry(ioapic, pin, mp_INT); ++ if (idx < 0) + return -1; + +- *trigger = irq_trigger(i); +- *polarity = irq_polarity(i); ++ *trigger = irq_trigger(idx); ++ *polarity = irq_polarity(idx); + return 0; + } + +@@ -4365,7 +4337,7 @@ void __init ioapic_insert_resources(void + } + #endif /* !CONFIG_XEN */ + +-int mp_find_ioapic(int gsi) ++int mp_find_ioapic(u32 gsi) + { + int i = 0; + +@@ -4380,7 +4352,7 @@ int mp_find_ioapic(int gsi) + return -1; + } + +-int mp_find_ioapic_pin(int ioapic, int gsi) ++int mp_find_ioapic_pin(int ioapic, u32 gsi) + { + if (WARN_ON(ioapic == -1)) + return -1; +@@ -4408,6 +4380,7 @@ static int bad_ioapic(unsigned long addr + void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) + { + int idx = 0; ++ int entries; + + if (bad_ioapic(address)) + return; +@@ -4428,9 +4401,17 @@ void __init mp_register_ioapic(int id, u + * Build basic GSI lookup table to facilitate gsi->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI GSIs). + */ ++ entries = io_apic_get_redir_entries(idx); + mp_gsi_routing[idx].gsi_base = gsi_base; +- mp_gsi_routing[idx].gsi_end = gsi_base + +- io_apic_get_redir_entries(idx); ++ mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1; ++ ++ /* ++ * The number of IO-APIC IRQ registers (== #pins): ++ */ ++ nr_ioapic_registers[idx] = entries; ++ ++ if (mp_gsi_routing[idx].gsi_end >= gsi_top) ++ gsi_top = mp_gsi_routing[idx].gsi_end + 1; + + printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " + "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, +--- head-2011-03-17.orig/arch/x86/kernel/cpu/Makefile 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/Makefile 2011-02-01 15:03:10.000000000 +0100 +@@ -34,8 +34,8 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ + + obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o + +-disabled-obj-$(CONFIG_XEN) := hypervisor.o perfctr-watchdog.o perf_event.o \ +- sched.o vmware.o ++disabled-obj-$(CONFIG_XEN) := hypervisor.o mshyperv.o perfctr-watchdog.o \ ++ perf_event.o sched.o vmware.o + + quiet_cmd_mkcapflags = MKCAP $@ + cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:34.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:47.000000000 +0100 +@@ -1150,6 +1150,20 @@ static void clear_all_debug_regs(void) + } + } + ++#ifdef CONFIG_KGDB ++/* ++ * Restore debug regs if using kgdbwait and you have a kernel debugger ++ * connection established. ++ */ ++static void dbg_restore_debug_regs(void) ++{ ++ if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) ++ arch_kgdb_ops.correct_hw_break(); ++} ++#else /* ! CONFIG_KGDB */ ++#define dbg_restore_debug_regs() ++#endif /* ! CONFIG_KGDB */ ++ + /* + * cpu_init() initializes state that is per-CPU. Some data is already + * initialized (naturally) in the bootstrap process, such as the GDT +@@ -1180,9 +1194,9 @@ void __cpuinit cpu_init(void) + #endif + + #ifdef CONFIG_NUMA +- if (cpu != 0 && percpu_read(node_number) == 0 && +- cpu_to_node(cpu) != NUMA_NO_NODE) +- percpu_write(node_number, cpu_to_node(cpu)); ++ if (cpu != 0 && percpu_read(numa_node) == 0 && ++ early_cpu_to_node(cpu) != NUMA_NO_NODE) ++ set_numa_node(early_cpu_to_node(cpu)); + #endif + + me = current; +@@ -1255,18 +1269,8 @@ void __cpuinit cpu_init(void) + #endif + load_LDT(&init_mm.context); + +-#ifdef CONFIG_KGDB +- /* +- * If the kgdb is connected no debug regs should be altered. This +- * is only applicable when KGDB and a KGDB I/O module are built +- * into the kernel and you are using early debugging with +- * kgdbwait. KGDB will control the kernel HW breakpoint registers. +- */ +- if (kgdb_connected && arch_kgdb_ops.correct_hw_break) +- arch_kgdb_ops.correct_hw_break(); +- else +-#endif +- clear_all_debug_regs(); ++ clear_all_debug_regs(); ++ dbg_restore_debug_regs(); + + fpu_init(); + +@@ -1330,14 +1334,12 @@ void __cpuinit cpu_init(void) + #endif + + clear_all_debug_regs(); ++ dbg_restore_debug_regs(); + + /* + * Force FPU initialization: + */ +- if (cpu_has_xsave) +- current_thread_info()->status = TS_XSAVE; +- else +- current_thread_info()->status = 0; ++ current_thread_info()->status = 0; + clear_used_math(); + mxcsr_feature_mask_init(); + +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -776,7 +776,7 @@ static int __init e820_mark_nvs_memory(v + struct e820entry *ei = &e820.map[i]; + + if (ei->type == E820_NVS) +- hibernate_nvs_register(ei->addr, ei->size); ++ suspend_nvs_register(ei->addr, ei->size); + } + + return 0; +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -41,6 +41,14 @@ static void early_vga_write(struct conso + writew(0x720, VGABASE + 2*(max_xpos*j + i)); + current_ypos = max_ypos-1; + } ++#ifdef CONFIG_KGDB_KDB ++ if (c == '\b') { ++ if (current_xpos > 0) ++ current_xpos--; ++ } else if (c == '\r') { ++ current_xpos = 0; ++ } else ++#endif + if (c == '\n') { + current_xpos = 0; + current_ypos++; +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:03:10.000000000 +0100 +@@ -53,6 +53,7 @@ + #include + #include + #include ++#include + #include + + /* Avoid __ASSEMBLER__'ifying just for this. */ +@@ -1123,7 +1124,25 @@ ENTRY(simd_coprocessor_error) + RING0_INT_FRAME + pushl $0 + CFI_ADJUST_CFA_OFFSET 4 ++#ifdef CONFIG_X86_INVD_BUG ++ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ ++661: pushl $do_general_protection ++662: ++.section .altinstructions,"a" ++ .balign 4 ++ .long 661b ++ .long 663f ++ .byte X86_FEATURE_XMM ++ .byte 662b-661b ++ .byte 664f-663f ++.previous ++.section .altinstr_replacement,"ax" ++663: pushl $do_simd_coprocessor_error ++664: ++.previous ++#else + pushl $do_simd_coprocessor_error ++#endif + CFI_ADJUST_CFA_OFFSET 4 + jmp error_code + CFI_ENDPROC +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:03:10.000000000 +0100 +@@ -582,8 +582,8 @@ auditsys: + * masked off. + */ + sysret_audit: +- movq %rax,%rsi /* second arg, syscall return value */ +- cmpq $0,%rax /* is it < 0? */ ++ movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */ ++ cmpq $0,%rsi /* is it < 0? */ + setl %al /* 1 if so, 0 if not */ + movzbl %al,%edi /* zero-extend that into %edi */ + inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ +--- head-2011-03-17.orig/arch/x86/kernel/microcode_core-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode_core-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -88,9 +88,9 @@ static int do_microcode_update(const voi + return err; + } + +-static int microcode_open(struct inode *unused1, struct file *unused2) ++static int microcode_open(struct inode *inode, struct file *file) + { +- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; ++ return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM; + } + + static ssize_t microcode_write(struct file *file, const char __user *buf, +@@ -145,6 +145,7 @@ static void microcode_dev_exit(void) + } + + MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); ++MODULE_ALIAS("devname:cpu/microcode"); + #else + #define microcode_dev_init() 0 + #define microcode_dev_exit() do { } while (0) +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -127,21 +127,6 @@ static void __init MP_bus_info(struct mp + printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); + } + +-static int bad_ioapic(unsigned long address) +-{ +- if (nr_ioapics >= MAX_IO_APICS) { +- printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " +- "(found %d)\n", MAX_IO_APICS, nr_ioapics); +- panic("Recompile kernel with bigger MAX_IO_APICS!\n"); +- } +- if (!address) { +- printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" +- " found in table, skipping!\n"); +- return 1; +- } +- return 0; +-} +- + static void __init MP_ioapic_info(struct mpc_ioapic *m) + { + if (!(m->flags & MPC_APIC_USABLE)) +@@ -150,15 +135,7 @@ static void __init MP_ioapic_info(struct + printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", + m->apicid, m->apicver, m->apicaddr); + +- if (bad_ioapic(m->apicaddr)) +- return; +- +- mp_ioapics[nr_ioapics].apicaddr = m->apicaddr; +- mp_ioapics[nr_ioapics].apicid = m->apicid; +- mp_ioapics[nr_ioapics].type = m->type; +- mp_ioapics[nr_ioapics].apicver = m->apicver; +- mp_ioapics[nr_ioapics].flags = m->flags; +- nr_ioapics++; ++ mp_register_ioapic(m->apicid, m->apicaddr, gsi_top); + } + + static void print_MP_intsrc_info(struct mpc_intsrc *m) +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -135,8 +135,6 @@ static struct dma_map_ops swiotlb_dma_op + .unmap_page = swiotlb_unmap_page, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, +- .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, +- .sync_single_range_for_device = swiotlb_sync_single_range_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device, + .map_sg = swiotlb_map_sg_attrs, +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:10:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:10:40.000000000 +0100 +@@ -20,7 +20,6 @@ + #include + #include + #include +-#include + #include + #include + +@@ -33,26 +32,22 @@ struct kmem_cache *task_xstate_cachep; + + int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) + { ++ int ret; ++ + *dst = *src; +- if (src->thread.xstate) { +- dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, +- GFP_KERNEL); +- if (!dst->thread.xstate) +- return -ENOMEM; +- WARN_ON((unsigned long)dst->thread.xstate & 15); +- memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); ++ if (fpu_allocated(&src->thread.fpu)) { ++ memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); ++ ret = fpu_alloc(&dst->thread.fpu); ++ if (ret) ++ return ret; ++ fpu_copy(&dst->thread.fpu, &src->thread.fpu); + } + return 0; + } + + void free_thread_xstate(struct task_struct *tsk) + { +- if (tsk->thread.xstate) { +- kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); +- tsk->thread.xstate = NULL; +- } +- +- WARN(tsk->thread.ds_ctx, "leaking DS context\n"); ++ fpu_free(&tsk->thread.fpu); + } + + void free_thread_info(struct thread_info *ti) +@@ -199,11 +194,16 @@ void __switch_to_xtra(struct task_struct + prev = &prev_p->thread; + next = &next_p->thread; + +- if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || +- test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) +- ds_switch_to(prev_p, next_p); +- else if (next->debugctlmsr != prev->debugctlmsr) +- update_debugctlmsr(next->debugctlmsr); ++ if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ ++ test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) { ++ unsigned long debugctl = get_debugctlmsr(); ++ ++ debugctl &= ~DEBUGCTLMSR_BTF; ++ if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) ++ debugctl |= DEBUGCTLMSR_BTF; ++ ++ update_debugctlmsr(debugctl); ++ } + + if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ + test_tsk_thread_flag(next_p, TIF_NOTSC)) { +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:25.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:43.000000000 +0100 +@@ -57,7 +57,6 @@ + #include + #include + #include +-#include + #include + + asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); +@@ -245,13 +244,6 @@ int copy_thread(unsigned long clone_flag + kfree(p->thread.io_bitmap_ptr); + p->thread.io_bitmap_max = 0; + } +- +- clear_tsk_thread_flag(p, TIF_DS_AREA_MSR); +- p->thread.ds_ctx = NULL; +- +- clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR); +- p->thread.debugctlmsr = 0; +- + return err; + } + +@@ -420,7 +412,7 @@ __switch_to(struct task_struct *prev_p, + + /* we're going to use this soon, after a few expensive things */ + if (preload_fpu) +- prefetch(next->xstate); ++ prefetch(next->fpu.state); + + /* + * Now maybe handle debug registers +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:22.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:47.000000000 +0100 +@@ -54,7 +54,6 @@ + #include + #include + #include +-#include + #include + + asmlinkage extern void ret_from_fork(void); +@@ -324,12 +323,6 @@ int copy_thread(unsigned long clone_flag + } + p->thread.iopl = current->thread.iopl; + +- clear_tsk_thread_flag(p, TIF_DS_AREA_MSR); +- p->thread.ds_ctx = NULL; +- +- clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR); +- p->thread.debugctlmsr = 0; +- + err = 0; + out: + if (err && p->thread.io_bitmap_ptr) { +@@ -415,7 +408,7 @@ __switch_to(struct task_struct *prev_p, + + /* we're going to use this soon, after a few expensive things */ + if (preload_fpu) +- prefetch(next->xstate); ++ prefetch(next->fpu.state); + + /* + * This is basically '__unlazy_fpu', except that we queue a +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:24:33.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:24:49.000000000 +0100 +@@ -746,6 +746,17 @@ static struct dmi_system_id __initdata b + DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), + }, + }, ++ /* ++ * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so ++ * match on the product name. ++ */ ++ { ++ .callback = dmi_low_memory_corruption, ++ .ident = "Phoenix BIOS", ++ .matches = { ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), ++ }, ++ }, + #endif + {} + }; +@@ -815,6 +826,7 @@ void __init setup_arch(char **cmdline_p) + /* VMI may relocate the fixmap; do this before touching ioremap area */ + vmi_init(); + ++ early_trap_init(); + early_cpu_init(); + early_ioremap_init(); + +--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -30,7 +30,7 @@ + #include + + #include +-DEFINE_SPINLOCK(i8253_lock); ++DEFINE_RAW_SPINLOCK(i8253_lock); + EXPORT_SYMBOL(i8253_lock); + + #ifdef CONFIG_X86_64 +@@ -69,12 +69,6 @@ DEFINE_PER_CPU(struct vcpu_runstate_info + /* Must be signed, as it's compared with s64 quantities which can be -ve. */ + #define NS_PER_TICK (1000000000LL/HZ) + +-static void __clock_was_set(struct work_struct *unused) +-{ +- clock_was_set(); +-} +-static DECLARE_WORK(clock_was_set_work, __clock_was_set); +- + /* + * GCC 4.3 can turn loops over an induction variable into division. We do + * not support arbitrary 64-bit division, and so must break the induction. +@@ -187,33 +181,13 @@ static u64 get_nsec_offset(struct shadow + return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); + } + +-static void __update_wallclock(time_t sec, long nsec) +-{ +- long wtm_nsec, xtime_nsec; +- time_t wtm_sec, xtime_sec; +- u64 tmp, wc_nsec; +- +- /* Adjust wall-clock time base. */ +- wc_nsec = processed_system_time; +- wc_nsec += sec * (u64)NSEC_PER_SEC; +- wc_nsec += nsec; +- +- /* Split wallclock base into seconds and nanoseconds. */ +- tmp = wc_nsec; +- xtime_nsec = do_div(tmp, 1000000000); +- xtime_sec = (time_t)tmp; +- +- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec); +- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec); +- +- set_normalized_timespec(&xtime, xtime_sec, xtime_nsec); +- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); +-} +- + static void update_wallclock(void) + { ++ static DEFINE_MUTEX(uwc_mutex); + shared_info_t *s = HYPERVISOR_shared_info; + ++ mutex_lock(&uwc_mutex); ++ + do { + shadow_tv_version = s->wc_version; + rmb(); +@@ -222,9 +196,24 @@ static void update_wallclock(void) + rmb(); + } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version)); + +- if (!independent_wallclock) +- __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec); ++ if (!independent_wallclock) { ++ u64 tmp = processed_system_time; ++ long nsec = do_div(tmp, NSEC_PER_SEC); ++ struct timespec tv; ++ ++ set_normalized_timespec(&tv, shadow_tv.tv_sec + tmp, ++ shadow_tv.tv_nsec + nsec); ++ do_settimeofday(&tv); ++ } ++ ++ mutex_unlock(&uwc_mutex); ++} ++ ++static void _update_wallclock(struct work_struct *unused) ++{ ++ update_wallclock(); + } ++static DECLARE_WORK(update_wallclock_work, _update_wallclock); + + /* + * Reads a consistent set of time-base values from Xen, into a shadow data +@@ -275,15 +264,19 @@ static DEFINE_TIMER(sync_xen_wallclock_t + static void sync_xen_wallclock(unsigned long dummy) + { + struct timespec now; ++ unsigned long seq; + struct xen_platform_op op; + + BUG_ON(!is_initial_xendomain()); + if (!ntp_synced() || independent_wallclock) + return; + +- write_seqlock_irq(&xtime_lock); ++ do { ++ seq = read_seqbegin(&xtime_lock); ++ now = __current_kernel_time(); ++ } while (read_seqretry(&xtime_lock, seq)); + +- set_normalized_timespec(&now, xtime.tv_sec, xtime.tv_nsec); ++ set_normalized_timespec(&now, now.tv_sec, now.tv_nsec); + + op.cmd = XENPF_settime; + op.u.settime.secs = now.tv_sec; +@@ -293,8 +286,6 @@ static void sync_xen_wallclock(unsigned + + update_wallclock(); + +- write_sequnlock_irq(&xtime_lock); +- + /* Once per minute. */ + mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ); + } +@@ -409,7 +400,6 @@ static irqreturn_t timer_interrupt(int i + { + s64 delta, delta_cpu, stolen, blocked; + unsigned int i, cpu = smp_processor_id(); +- int schedule_clock_was_set_work = 0; + struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); + struct vcpu_runstate_info runstate; + +@@ -464,15 +454,11 @@ static irqreturn_t timer_interrupt(int i + do_timer(delta); + } + +- if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) { +- update_wallclock(); +- schedule_clock_was_set_work = 1; +- } +- + write_sequnlock(&xtime_lock); + +- if (schedule_clock_was_set_work && keventd_up()) +- schedule_work(&clock_was_set_work); ++ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version ++ && keventd_up()) ++ schedule_work(&update_wallclock_work); + + /* + * Account stolen ticks. +@@ -679,6 +665,12 @@ static struct vcpu_set_periodic_timer xe + .period_ns = NS_PER_TICK + }; + ++static void __init _late_time_init(void) ++{ ++ update_wallclock(); ++ setup_cpu0_timer_irq(); ++} ++ + void __init time_init(void) + { + init_cpu_khz(); +@@ -704,12 +696,13 @@ void __init time_init(void) + + clocksource_register(&clocksource_xen); + +- update_wallclock(); +- + use_tsc_delay(); + +- /* Cannot request_irq() until kmem is initialised. */ +- late_time_init = setup_cpu0_timer_irq; ++ /* ++ * Cannot request_irq() until kmem is initialised, and cannot ++ * do_settimeofday() (i.e. clock_was_set()) until interrupts are on. ++ */ ++ late_time_init = _late_time_init; + } + + /* Convert jiffies to system time. */ +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -112,15 +113,6 @@ static inline void preempt_conditional_c + dec_preempt_count(); + } + +-#ifdef CONFIG_X86_32 +-static inline void +-die_if_kernel(const char *str, struct pt_regs *regs, long err) +-{ +- if (!user_mode_vm(regs)) +- die(str, regs, err); +-} +-#endif +- + static void __kprobes + do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, + long error_code, siginfo_t *info) +@@ -453,6 +445,11 @@ void restart_nmi(void) + /* May run on IST stack. */ + dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) + { ++#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP ++ if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) ++ == NOTIFY_STOP) ++ return; ++#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ + #ifdef CONFIG_KPROBES + if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) + == NOTIFY_STOP) +@@ -522,6 +519,7 @@ asmlinkage __kprobes struct pt_regs *syn + dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + { + struct task_struct *tsk = current; ++ int user_icebp = 0; + unsigned long dr6; + int si_code; + +@@ -530,17 +528,25 @@ dotraplinkage void __kprobes do_debug(st + /* Filter out all the reserved bits which are preset to 1 */ + dr6 &= ~DR6_RESERVED; + ++ /* ++ * If dr6 has no reason to give us about the origin of this trap, ++ * then it's very likely the result of an icebp/int01 trap. ++ * User wants a sigtrap for that. ++ */ ++ if (!dr6 && user_mode(regs)) ++ user_icebp = 1; ++ + /* Catch kmemcheck conditions first of all! */ + if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) + return; + + /* DR6 may or may not be cleared by the CPU */ + set_debugreg(0, 6); ++ + /* + * The processor cleared BTF, so don't mark that we need it set. + */ +- clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); +- tsk->thread.debugctlmsr = 0; ++ clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); + + /* Store the virtualized DR6 value */ + tsk->thread.debugreg6 = dr6; +@@ -571,62 +577,74 @@ dotraplinkage void __kprobes do_debug(st + regs->flags &= ~X86_EFLAGS_TF; + } + si_code = get_si_code(tsk->thread.debugreg6); +- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) ++ if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) + send_sigtrap(tsk, regs, error_code, si_code); + preempt_conditional_cli(regs); + + return; + } + +-#ifdef CONFIG_X86_64 +-static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) +-{ +- if (fixup_exception(regs)) +- return 1; +- +- notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE); +- /* Illegal floating point operation in the kernel */ +- current->thread.trap_no = trapnr; +- die(str, regs, 0); +- return 0; +-} +-#endif +- + /* + * Note that we play around with the 'TS' bit in an attempt to get + * the correct behaviour even in the presence of the asynchronous + * IRQ13 behaviour + */ +-void math_error(void __user *ip) ++void math_error(struct pt_regs *regs, int error_code, int trapnr) + { +- struct task_struct *task; ++ struct task_struct *task = current; + siginfo_t info; +- unsigned short cwd, swd, err; ++ unsigned short err; ++ char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; ++ ++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) ++ return; ++ conditional_sti(regs); ++ ++ if (!user_mode_vm(regs)) ++ { ++ if (!fixup_exception(regs)) { ++ task->thread.error_code = error_code; ++ task->thread.trap_no = trapnr; ++ die(str, regs, error_code); ++ } ++ return; ++ } + + /* + * Save the info for the exception handler and clear the error. + */ +- task = current; + save_init_fpu(task); +- task->thread.trap_no = 16; +- task->thread.error_code = 0; ++ task->thread.trap_no = trapnr; ++ task->thread.error_code = error_code; + info.si_signo = SIGFPE; + info.si_errno = 0; +- info.si_addr = ip; +- /* +- * (~cwd & swd) will mask out exceptions that are not set to unmasked +- * status. 0x3f is the exception bits in these regs, 0x200 is the +- * C1 reg you need in case of a stack fault, 0x040 is the stack +- * fault bit. We should only be taking one exception at a time, +- * so if this combination doesn't produce any single exception, +- * then we have a bad program that isn't synchronizing its FPU usage +- * and it will suffer the consequences since we won't be able to +- * fully reproduce the context of the exception +- */ +- cwd = get_fpu_cwd(task); +- swd = get_fpu_swd(task); ++ info.si_addr = (void __user *)regs->ip; ++ if (trapnr == 16) { ++ unsigned short cwd, swd; ++ /* ++ * (~cwd & swd) will mask out exceptions that are not set to unmasked ++ * status. 0x3f is the exception bits in these regs, 0x200 is the ++ * C1 reg you need in case of a stack fault, 0x040 is the stack ++ * fault bit. We should only be taking one exception at a time, ++ * so if this combination doesn't produce any single exception, ++ * then we have a bad program that isn't synchronizing its FPU usage ++ * and it will suffer the consequences since we won't be able to ++ * fully reproduce the context of the exception ++ */ ++ cwd = get_fpu_cwd(task); ++ swd = get_fpu_swd(task); + +- err = swd & ~cwd; ++ err = swd & ~cwd; ++ } else { ++ /* ++ * The SIMD FPU exceptions are handled a little differently, as there ++ * is only a single status/control register. Thus, to determine which ++ * unmasked exception was caught we must mask the exception mask bits ++ * at 0x1f80, and then use these to mask the exception bits at 0x3f. ++ */ ++ unsigned short mxcsr = get_fpu_mxcsr(task); ++ err = ~(mxcsr >> 7) & mxcsr; ++ } + + if (err & 0x001) { /* Invalid op */ + /* +@@ -655,97 +673,17 @@ void math_error(void __user *ip) + + dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) + { +- conditional_sti(regs); +- + #ifdef CONFIG_X86_32 + ignore_fpu_irq = 1; +-#else +- if (!user_mode(regs) && +- kernel_math_error(regs, "kernel x87 math error", 16)) +- return; + #endif + +- math_error((void __user *)regs->ip); +-} +- +-static void simd_math_error(void __user *ip) +-{ +- struct task_struct *task; +- siginfo_t info; +- unsigned short mxcsr; +- +- /* +- * Save the info for the exception handler and clear the error. +- */ +- task = current; +- save_init_fpu(task); +- task->thread.trap_no = 19; +- task->thread.error_code = 0; +- info.si_signo = SIGFPE; +- info.si_errno = 0; +- info.si_code = __SI_FAULT; +- info.si_addr = ip; +- /* +- * The SIMD FPU exceptions are handled a little differently, as there +- * is only a single status/control register. Thus, to determine which +- * unmasked exception was caught we must mask the exception mask bits +- * at 0x1f80, and then use these to mask the exception bits at 0x3f. +- */ +- mxcsr = get_fpu_mxcsr(task); +- switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { +- case 0x000: +- default: +- break; +- case 0x001: /* Invalid Op */ +- info.si_code = FPE_FLTINV; +- break; +- case 0x002: /* Denormalize */ +- case 0x010: /* Underflow */ +- info.si_code = FPE_FLTUND; +- break; +- case 0x004: /* Zero Divide */ +- info.si_code = FPE_FLTDIV; +- break; +- case 0x008: /* Overflow */ +- info.si_code = FPE_FLTOVF; +- break; +- case 0x020: /* Precision */ +- info.si_code = FPE_FLTRES; +- break; +- } +- force_sig_info(SIGFPE, &info, task); ++ math_error(regs, error_code, 16); + } + + dotraplinkage void + do_simd_coprocessor_error(struct pt_regs *regs, long error_code) + { +- conditional_sti(regs); +- +-#ifdef CONFIG_X86_32 +- if (cpu_has_xmm) { +- /* Handle SIMD FPU exceptions on PIII+ processors. */ +- ignore_fpu_irq = 1; +- simd_math_error((void __user *)regs->ip); +- return; +- } +- /* +- * Handle strange cache flush from user space exception +- * in all other cases. This is undocumented behaviour. +- */ +- if (regs->flags & X86_VM_MASK) { +- handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); +- return; +- } +- current->thread.trap_no = 19; +- current->thread.error_code = error_code; +- die_if_kernel("cache flush denied", regs, error_code); +- force_sig(SIGSEGV, current); +-#else +- if (!user_mode(regs) && +- kernel_math_error(regs, "kernel simd math error", 19)) +- return; +- simd_math_error((void __user *)regs->ip); +-#endif ++ math_error(regs, error_code, 19); + } + + #ifndef CONFIG_XEN +@@ -877,12 +815,18 @@ dotraplinkage void do_iret_error(struct + * NB. All these are "trap gates" (i.e. events_mask isn't set) except + * for those that specify |4 in the second field. + */ +-static const trap_info_t __cpuinitconst trap_table[] = { + #ifdef CONFIG_X86_32 + #define X 0 + #else + #define X 4 + #endif ++static const trap_info_t __initconst early_trap_table[] = { ++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug }, ++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 }, ++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault }, ++ { 0, 0, 0, 0 } ++}; ++static const trap_info_t __cpuinitconst trap_table[] = { + { 0, 0|X, __KERNEL_CS, (unsigned long)divide_error }, + { 1, 0|4, __KERNEL_CS, (unsigned long)debug }, + { 3, 3|4, __KERNEL_CS, (unsigned long)int3 }, +@@ -911,6 +855,16 @@ static const trap_info_t __cpuinitconst + { 0, 0, 0, 0 } + }; + ++/* Set of traps needed for early debugging. */ ++void __init early_trap_init(void) ++{ ++ int ret; ++ ++ ret = HYPERVISOR_set_trap_table(early_trap_table); ++ if (ret) ++ printk("early set_trap_table failed (%d)\n", ret); ++} ++ + void __init trap_init(void) + { + int ret; +--- head-2011-03-17.orig/arch/x86/kernel/x86_init-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/x86_init-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -80,9 +81,14 @@ struct x86_init_ops x86_init __initdata + }, + }; + ++static int default_i8042_detect(void) { return 1; }; ++ + struct x86_platform_ops x86_platform = { + .calibrate_tsc = NULL, + .get_wallclock = mach_get_cmos_time, + .set_wallclock = mach_set_rtc_mmss, + .is_untracked_pat_range = is_ISA_range, ++ .i8042_detect = default_i8042_detect + }; ++ ++EXPORT_SYMBOL_GPL(x86_platform); +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -1063,7 +1063,8 @@ out_err: + } + EXPORT_SYMBOL(set_memory_uc); + +-int set_memory_array_uc(unsigned long *addr, int addrinarray) ++int _set_memory_array(unsigned long *addr, int addrinarray, ++ unsigned long new_type) + { + int i, j; + int ret; +@@ -1073,13 +1074,19 @@ int set_memory_array_uc(unsigned long *a + */ + for (i = 0; i < addrinarray; i++) { + ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, +- _PAGE_CACHE_UC_MINUS, NULL); ++ new_type, NULL); + if (ret) + goto out_free; + } + + ret = change_page_attr_set(addr, addrinarray, + __pgprot(_PAGE_CACHE_UC_MINUS), 1); ++ ++ if (!ret && new_type == _PAGE_CACHE_WC) ++ ret = change_page_attr_set_clr(addr, addrinarray, ++ __pgprot(_PAGE_CACHE_WC), ++ __pgprot(_PAGE_CACHE_MASK), ++ 0, CPA_ARRAY, NULL); + if (ret) + goto out_free; + +@@ -1091,8 +1098,19 @@ out_free: + + return ret; + } ++ ++int set_memory_array_uc(unsigned long *addr, int addrinarray) ++{ ++ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); ++} + EXPORT_SYMBOL(set_memory_array_uc); + ++int set_memory_array_wc(unsigned long *addr, int addrinarray) ++{ ++ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); ++} ++EXPORT_SYMBOL(set_memory_array_wc); ++ + int _set_memory_wc(unsigned long addr, int numpages) + { + int ret; +@@ -1219,26 +1237,34 @@ int set_pages_uc(struct page *page, int + } + EXPORT_SYMBOL(set_pages_uc); + +-int set_pages_array_uc(struct page **pages, int addrinarray) ++static int _set_pages_array(struct page **pages, int addrinarray, ++ unsigned long new_type) + { + unsigned long start; + unsigned long end; + int i; + int free_idx; ++ int ret; + + for (i = 0; i < addrinarray; i++) { + if (PageHighMem(pages[i])) + continue; + start = page_to_pfn(pages[i]) << PAGE_SHIFT; + end = start + PAGE_SIZE; +- if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) ++ if (reserve_memtype(start, end, new_type, NULL)) + goto err_out; + } + +- if (cpa_set_pages_array(pages, addrinarray, +- __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) { +- return 0; /* Success */ +- } ++ ret = cpa_set_pages_array(pages, addrinarray, ++ __pgprot(_PAGE_CACHE_UC_MINUS)); ++ if (!ret && new_type == _PAGE_CACHE_WC) ++ ret = change_page_attr_set_clr(NULL, addrinarray, ++ __pgprot(_PAGE_CACHE_WC), ++ __pgprot(_PAGE_CACHE_MASK), ++ 0, CPA_PAGES_ARRAY, pages); ++ if (ret) ++ goto err_out; ++ return 0; /* Success */ + err_out: + free_idx = i; + for (i = 0; i < free_idx; i++) { +@@ -1250,8 +1276,19 @@ err_out: + } + return -EINVAL; + } ++ ++int set_pages_array_uc(struct page **pages, int addrinarray) ++{ ++ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); ++} + EXPORT_SYMBOL(set_pages_array_uc); + ++int set_pages_array_wc(struct page **pages, int addrinarray) ++{ ++ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); ++} ++EXPORT_SYMBOL(set_pages_array_wc); ++ + int set_pages_wb(struct page *page, int numpages) + { + unsigned long addr = (unsigned long)page_address(page); +--- head-2011-03-17.orig/arch/x86/mm/pat-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -30,6 +30,8 @@ + #include + #include + ++#include "pat_internal.h" ++ + #ifdef CONFIG_X86_PAT + int __read_mostly pat_enabled = 1; + +@@ -53,19 +55,15 @@ static inline void pat_disable(const cha + #endif + + +-static int debug_enable; ++int pat_debug_enable; + + static int __init pat_debug_setup(char *str) + { +- debug_enable = 1; ++ pat_debug_enable = 1; + return 0; + } + __setup("debugpat", pat_debug_setup); + +-#define dprintk(fmt, arg...) \ +- do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) +- +- + static u64 __read_mostly boot_pat_state; + + enum { +@@ -142,86 +140,7 @@ void pat_init(void) + + #undef PAT + +-static char *cattr_name(unsigned long flags) +-{ +- switch (flags & _PAGE_CACHE_MASK) { +- case _PAGE_CACHE_UC: return "uncached"; +- case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; +- case _PAGE_CACHE_WB: return "write-back"; +- case _PAGE_CACHE_WC: return "write-combining"; +- case _PAGE_CACHE_WP: return "write-protected"; +- case _PAGE_CACHE_WT: return "write-through"; +- default: return "broken"; +- } +-} +- +-/* +- * The global memtype list keeps track of memory type for specific +- * physical memory areas. Conflicting memory types in different +- * mappings can cause CPU cache corruption. To avoid this we keep track. +- * +- * The list is sorted based on starting address and can contain multiple +- * entries for each address (this allows reference counting for overlapping +- * areas). All the aliases have the same cache attributes of course. +- * Zero attributes are represented as holes. +- * +- * The data structure is a list that is also organized as an rbtree +- * sorted on the start address of memtype range. +- * +- * memtype_lock protects both the linear list and rbtree. +- */ +- +-struct memtype { +- u64 start; +- u64 end; +- unsigned long type; +- struct list_head nd; +- struct rb_node rb; +-}; +- +-static struct rb_root memtype_rbroot = RB_ROOT; +-static LIST_HEAD(memtype_list); +-static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ +- +-static struct memtype *memtype_rb_search(struct rb_root *root, u64 start) +-{ +- struct rb_node *node = root->rb_node; +- struct memtype *last_lower = NULL; +- +- while (node) { +- struct memtype *data = container_of(node, struct memtype, rb); +- +- if (data->start < start) { +- last_lower = data; +- node = node->rb_right; +- } else if (data->start > start) { +- node = node->rb_left; +- } else +- return data; +- } +- +- /* Will return NULL if there is no entry with its start <= start */ +- return last_lower; +-} +- +-static void memtype_rb_insert(struct rb_root *root, struct memtype *data) +-{ +- struct rb_node **new = &(root->rb_node); +- struct rb_node *parent = NULL; +- +- while (*new) { +- struct memtype *this = container_of(*new, struct memtype, rb); +- +- parent = *new; +- if (data->start <= this->start) +- new = &((*new)->rb_left); +- else if (data->start > this->start) +- new = &((*new)->rb_right); +- } +- +- rb_link_node(&data->rb, parent, new); +- rb_insert_color(&data->rb, root); +-} ++static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ + + static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end); + static inline u8 _mtrr_type_lookup(u64 start, u64 end) +@@ -259,33 +178,6 @@ static unsigned long pat_x_mtrr_type(u64 + return req_type; + } + +-static int +-chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) +-{ +- if (new->type != entry->type) { +- if (type) { +- new->type = entry->type; +- *type = entry->type; +- } else +- goto conflict; +- } +- +- /* check overlaps with more than one entry in the list */ +- list_for_each_entry_continue(entry, &memtype_list, nd) { +- if (new->end <= entry->start) +- break; +- else if (new->type != entry->type) +- goto conflict; +- } +- return 0; +- +- conflict: +- printk(KERN_INFO "%s:%d conflicting memory types " +- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, +- new->end, cattr_name(new->type), cattr_name(entry->type)); +- return -EBUSY; +-} +- + static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) + { + int ram_page = 0, not_rampage = 0; +@@ -318,8 +210,6 @@ static int pat_pagerange_is_ram(resource + * Here we do two pass: + * - Find the memtype of all the pages in the range, look for any conflicts + * - In case of no conflicts, set the new memtype for pages in the range +- * +- * Caller must hold memtype_lock for atomicity. + */ + static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, + unsigned long *new_type) +@@ -390,9 +280,8 @@ static int free_ram_pages_type(u64 start + int reserve_memtype(u64 start, u64 end, unsigned long req_type, + unsigned long *new_type) + { +- struct memtype *new, *entry; ++ struct memtype *new; + unsigned long actual_type; +- struct list_head *where; + int is_range_ram; + int err = 0; + +@@ -430,16 +319,14 @@ int reserve_memtype(u64 start, u64 end, + is_range_ram = pat_pagerange_is_ram(start, end); + if (is_range_ram == 1) { + +- spin_lock(&memtype_lock); + err = reserve_ram_pages_type(start, end, req_type, new_type); +- spin_unlock(&memtype_lock); + + return err; + } else if (is_range_ram < 0) { + return -EINVAL; + } + +- new = kmalloc(sizeof(struct memtype), GFP_KERNEL); ++ new = kzalloc(sizeof(struct memtype), GFP_KERNEL); + if (!new) + return -ENOMEM; + +@@ -449,42 +336,7 @@ int reserve_memtype(u64 start, u64 end, + + spin_lock(&memtype_lock); + +- /* Search for existing mapping that overlaps the current range */ +- where = NULL; +- list_for_each_entry(entry, &memtype_list, nd) { +- if (end <= entry->start) { +- where = entry->nd.prev; +- break; +- } else if (start <= entry->start) { /* end > entry->start */ +- err = chk_conflict(new, entry, new_type); +- if (!err) { +- dprintk("Overlap at 0x%Lx-0x%Lx\n", +- entry->start, entry->end); +- where = entry->nd.prev; +- } +- break; +- } else if (start < entry->end) { /* start > entry->start */ +- err = chk_conflict(new, entry, new_type); +- if (!err) { +- dprintk("Overlap at 0x%Lx-0x%Lx\n", +- entry->start, entry->end); +- +- /* +- * Move to right position in the linked +- * list to add this new entry +- */ +- list_for_each_entry_continue(entry, +- &memtype_list, nd) { +- if (start <= entry->start) { +- where = entry->nd.prev; +- break; +- } +- } +- } +- break; +- } +- } +- ++ err = rbt_memtype_check_insert(new, new_type); + if (err) { + printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " + "track %s, req %s\n", +@@ -495,13 +347,6 @@ int reserve_memtype(u64 start, u64 end, + return err; + } + +- if (where) +- list_add(&new->nd, where); +- else +- list_add_tail(&new->nd, &memtype_list); +- +- memtype_rb_insert(&memtype_rbroot, new); +- + spin_unlock(&memtype_lock); + + dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", +@@ -513,9 +358,9 @@ int reserve_memtype(u64 start, u64 end, + + int free_memtype(u64 start, u64 end) + { +- struct memtype *entry, *saved_entry; + int err = -EINVAL; + int is_range_ram; ++ struct memtype *entry; + + if (!pat_enabled) + return 0; +@@ -527,9 +372,7 @@ int free_memtype(u64 start, u64 end) + is_range_ram = pat_pagerange_is_ram(start, end); + if (is_range_ram == 1) { + +- spin_lock(&memtype_lock); + err = free_ram_pages_type(start, end); +- spin_unlock(&memtype_lock); + + return err; + } else if (is_range_ram < 0) { +@@ -537,56 +380,20 @@ int free_memtype(u64 start, u64 end) + } + + spin_lock(&memtype_lock); +- +- entry = memtype_rb_search(&memtype_rbroot, start); +- if (unlikely(entry == NULL)) +- goto unlock_ret; +- +- /* +- * Saved entry points to an entry with start same or less than what +- * we searched for. Now go through the list in both directions to look +- * for the entry that matches with both start and end, with list stored +- * in sorted start address +- */ +- saved_entry = entry; +- list_for_each_entry_from(entry, &memtype_list, nd) { +- if (entry->start == start && entry->end == end) { +- rb_erase(&entry->rb, &memtype_rbroot); +- list_del(&entry->nd); +- kfree(entry); +- err = 0; +- break; +- } else if (entry->start > start) { +- break; +- } +- } +- +- if (!err) +- goto unlock_ret; +- +- entry = saved_entry; +- list_for_each_entry_reverse(entry, &memtype_list, nd) { +- if (entry->start == start && entry->end == end) { +- rb_erase(&entry->rb, &memtype_rbroot); +- list_del(&entry->nd); +- kfree(entry); +- err = 0; +- break; +- } else if (entry->start < start) { +- break; +- } +- } +-unlock_ret: ++ entry = rbt_memtype_erase(start, end); + spin_unlock(&memtype_lock); + +- if (err) { ++ if (!entry) { + printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", + current->comm, current->pid, start, end); ++ return -EINVAL; + } + ++ kfree(entry); ++ + dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); + +- return err; ++ return 0; + } + + +@@ -610,10 +417,8 @@ static unsigned long lookup_memtype(u64 + + if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { + struct page *page; +- spin_lock(&memtype_lock); + page = pfn_to_page(paddr >> PAGE_SHIFT); + rettype = get_page_memtype(page); +- spin_unlock(&memtype_lock); + /* + * -1 from get_page_memtype() implies RAM page is in its + * default state and not reserved, and hence of type WB +@@ -626,7 +431,7 @@ static unsigned long lookup_memtype(u64 + + spin_lock(&memtype_lock); + +- entry = memtype_rb_search(&memtype_rbroot, paddr); ++ entry = rbt_memtype_lookup(paddr); + if (entry != NULL) + rettype = entry->type; + else +@@ -950,29 +755,25 @@ EXPORT_SYMBOL_GPL(pgprot_writecombine); + + #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) + +-/* get Nth element of the linked list */ + static struct memtype *memtype_get_idx(loff_t pos) + { +- struct memtype *list_node, *print_entry; +- int i = 1; ++ struct memtype *print_entry; ++ int ret; + +- print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); ++ print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); + if (!print_entry) + return NULL; + + spin_lock(&memtype_lock); +- list_for_each_entry(list_node, &memtype_list, nd) { +- if (pos == i) { +- *print_entry = *list_node; +- spin_unlock(&memtype_lock); +- return print_entry; +- } +- ++i; +- } ++ ret = rbt_memtype_copy_nth_element(print_entry, pos); + spin_unlock(&memtype_lock); +- kfree(print_entry); + +- return NULL; ++ if (!ret) { ++ return print_entry; ++ } else { ++ kfree(print_entry); ++ return NULL; ++ } + } + + static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) +--- head-2011-03-17.orig/arch/x86/mm/pat_internal.h 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pat_internal.h 2011-02-01 15:03:10.000000000 +0100 +@@ -21,6 +21,10 @@ static inline char *cattr_name(unsigned + case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; + case _PAGE_CACHE_WB: return "write-back"; + case _PAGE_CACHE_WC: return "write-combining"; ++#ifdef CONFIG_XEN ++ case _PAGE_CACHE_WP: return "write-protected"; ++ case _PAGE_CACHE_WT: return "write-through"; ++#endif + default: return "broken"; + } + } +--- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -9,7 +9,6 @@ + #include + #include + #include +-#include + + #include + #include +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -595,8 +595,6 @@ static __init int intel_router_probe(str + case PCI_DEVICE_ID_INTEL_ICH10_1: + case PCI_DEVICE_ID_INTEL_ICH10_2: + case PCI_DEVICE_ID_INTEL_ICH10_3: +- case PCI_DEVICE_ID_INTEL_CPT_LPC1: +- case PCI_DEVICE_ID_INTEL_CPT_LPC2: + r->name = "PIIX/ICH"; + r->get = pirq_piix_get; + r->set = pirq_piix_set; +@@ -611,6 +609,13 @@ static __init int intel_router_probe(str + return 1; + } + ++ if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) && ++ (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) { ++ r->name = "PIIX/ICH"; ++ r->get = pirq_piix_get; ++ r->set = pirq_piix_set; ++ return 1; ++ } + return 0; + } + +--- head-2011-03-17.orig/drivers/acpi/processor_driver.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_driver.c 2011-02-01 15:03:10.000000000 +0100 +@@ -331,6 +331,11 @@ static int acpi_processor_get_info(struc + return -ENODEV; + } + } ++#if defined(CONFIG_SMP) && defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) ++ if (pr->id >= setup_max_cpus && pr->id > 0) ++ pr->id = -1; ++#endif ++ + /* + * On some boxes several processors use the same processor bus id. + * But they are located in different scope. For example: +@@ -483,8 +488,11 @@ static int __cpuinit acpi_processor_add( + } + + #ifdef CONFIG_SMP +- if (pr->id >= setup_max_cpus && pr->id != 0) +- return 0; ++ if (pr->id >= setup_max_cpus && pr->id != 0) { ++ if (!processor_cntl_external()) ++ return 0; ++ WARN_ON(pr->id != -1); ++ } + #endif + + BUG_ON(!processor_cntl_external() && +@@ -541,7 +549,8 @@ static int __cpuinit acpi_processor_add( + acpi_processor_get_limit_info(pr); + } + +- if (cpuidle_get_driver() == &acpi_idle_driver) ++ if (cpuidle_get_driver() == &acpi_idle_driver ++ || processor_pm_external()) + acpi_processor_power_init(pr, device); + + result = processor_extcntl_prepare(pr); +@@ -864,6 +873,7 @@ static int __init acpi_processor_init(vo + + memset(&errata, 0, sizeof(errata)); + ++#ifdef CONFIG_CPU_IDLE + if (!cpuidle_register_driver(&acpi_idle_driver)) { + printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", + acpi_idle_driver.name); +@@ -871,6 +881,7 @@ static int __init acpi_processor_init(vo + printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", + cpuidle_get_driver()->name); + } ++#endif + + result = acpi_bus_register_driver(&acpi_processor_driver); + if (result < 0) +--- head-2011-03-17.orig/drivers/acpi/processor_idle.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/acpi/processor_idle.c 2011-02-01 15:03:10.000000000 +0100 +@@ -1068,12 +1068,6 @@ static int acpi_processor_setup_cpuidle( + + return 0; + } +- +-#else /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */ +-static inline int acpi_processor_setup_cpuidle(struct acpi_processor *pr) +-{ +- return 0; +-} + #endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */ + + int acpi_processor_cst_has_changed(struct acpi_processor *pr) +@@ -1104,10 +1098,12 @@ int acpi_processor_cst_has_changed(struc + cpuidle_pause_and_lock(); + cpuidle_disable_device(&pr->power.dev); + acpi_processor_get_power_info(pr); ++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + if (pr->flags.power) { + acpi_processor_setup_cpuidle(pr); + ret = cpuidle_enable_device(&pr->power.dev); + } ++#endif + cpuidle_resume_and_unlock(); + + return ret; +@@ -1147,6 +1143,7 @@ int __cpuinit acpi_processor_power_init( + acpi_processor_get_power_info(pr); + pr->flags.power_setup_done = 1; + ++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + /* + * Install the idle handler if processor power management is supported. + * Note that we use previously set idle handler will be used on +@@ -1157,6 +1154,7 @@ int __cpuinit acpi_processor_power_init( + if (cpuidle_register_device(&pr->power.dev)) + return -EIO; + } ++#endif + + if (processor_pm_external()) + processor_notify_external(pr, +--- head-2011-03-17.orig/drivers/edac/i7core_edac.c 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/drivers/edac/i7core_edac.c 2011-02-01 15:03:10.000000000 +0100 +@@ -1842,8 +1842,11 @@ static int i7core_mce_check_error(void * + if (mce->bank != 8) + return 0; + +-#ifdef CONFIG_SMP + /* Only handle if it is the right mc controller */ ++#if defined(CONFIG_XEN) /* Could easily be used for native too. */ ++ if (mce->socketid != pvt->i7core_dev->socket) ++ return 0; ++#elif defined(CONFIG_SMP) + if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket) + return 0; + #endif +--- head-2011-03-17.orig/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-02-01 15:03:10.000000000 +0100 +@@ -514,6 +514,21 @@ static int ttm_alloc_new_pages(struct li + goto out; + } + ++#ifdef CONFIG_XEN ++ if (gfp_flags & __GFP_DMA32) { ++ r = xen_limit_pages_to_max_mfn(p, 0, 32); ++ ++ if (r) { ++ __free_page(p); ++ printk(KERN_ERR TTM_PFX ++ "Cannot restrict page (%d).", r); ++ break; ++ } ++ if (gfp_flags & __GFP_ZERO) ++ clear_page(page_address(p)); ++ } ++#endif ++ + #ifdef CONFIG_HIGHMEM + /* gfp flags of highmem page should never be dma32 so we + * we should be fine in such case +@@ -689,6 +704,22 @@ int ttm_get_pages(struct list_head *page + return -ENOMEM; + } + ++#ifdef CONFIG_XEN ++ if (flags & TTM_PAGE_FLAG_DMA32) { ++ int rc = xen_limit_pages_to_max_mfn(p, 0, 32); ++ ++ if (rc) { ++ __free_page(p); ++ printk(KERN_ERR TTM_PFX ++ "Unable to restrict page (%d).", ++ rc); ++ return rc; ++ } ++ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) ++ clear_page(page_address(p)); ++ } ++#endif ++ + list_add(&p->lru, pages); + } + return 0; +--- head-2011-03-17.orig/drivers/hwmon/coretemp-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/coretemp-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -54,6 +54,7 @@ struct pdev_entry { + struct device *hwmon_dev; + struct mutex update_lock; + const char *name; ++ u32 cpu_core_id, phys_proc_id; + u8 x86_model, x86_mask; + u32 ucode_rev; + char valid; /* zero until following fields are valid */ +@@ -78,7 +79,7 @@ static ssize_t show_name(struct device * + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ +- ret = sprintf(buf, "Core %d\n", data->pdev->id); ++ ret = sprintf(buf, "Core %d\n", data->cpu_core_id); + return ret; + } + +@@ -246,6 +247,52 @@ static int adjust_tjmax(struct coretemp_ + return tjmax; + } + ++static int get_tjmax(struct coretemp_data *c, u32 id, struct device *dev) ++{ ++ /* The 100C is default for both mobile and non mobile CPUs */ ++ int err; ++ u32 eax, edx; ++ u32 val; ++ ++ /* A new feature of current Intel(R) processors, the ++ IA32_TEMPERATURE_TARGET contains the TjMax value */ ++ err = rdmsr_safe_on_pcpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); ++ if (err < 0) { ++ dev_warn(dev, "Unable to read TjMax from CPU.\n"); ++ } else { ++ val = (eax >> 16) & 0xff; ++ /* ++ * If the TjMax is not plausible, an assumption ++ * will be used ++ */ ++ if ((val > 80) && (val < 120)) { ++ dev_info(dev, "TjMax is %d C.\n", val); ++ return val * 1000; ++ } ++ } ++ ++ /* ++ * An assumption is made for early CPUs and unreadable MSR. ++ * NOTE: the given value may not be correct. ++ */ ++ ++ switch (c->x86_model) { ++ case 0xe: ++ case 0xf: ++ case 0x16: ++ case 0x1a: ++ dev_warn(dev, "TjMax is assumed as 100 C!\n"); ++ return 100000; ++ case 0x17: ++ case 0x1c: /* Atom CPUs */ ++ return adjust_tjmax(c, id, dev); ++ default: ++ dev_warn(dev, "CPU (model=0x%x) is not supported yet," ++ " using default TjMax of 100C.\n", c->x86_model); ++ return 100000; ++ } ++} ++ + static int coretemp_probe(struct platform_device *pdev) + { + struct coretemp_data *data = platform_get_drvdata(pdev); +@@ -282,13 +329,17 @@ static int coretemp_probe(struct platfor + } + } + +- data->tjmax = adjust_tjmax(data, pdev->id, &pdev->dev); ++ data->tjmax = get_tjmax(data, pdev->id, &pdev->dev); + +- /* read the still undocumented IA32_TEMPERATURE_TARGET it exists +- on older CPUs but not in this register, Atoms don't have it either */ ++ /* ++ * read the still undocumented IA32_TEMPERATURE_TARGET. It exists ++ * on older CPUs but not in this register, ++ * Atoms don't have it either. ++ */ + + if ((data->x86_model > 0xe) && (data->x86_model != 0x1c)) { +- err = rdmsr_safe_on_pcpu(pdev->id, 0x1a2, &eax, &edx); ++ err = rdmsr_safe_on_pcpu(pdev->id, MSR_IA32_TEMPERATURE_TARGET, ++ &eax, &edx); + if (err < 0) { + dev_warn(&pdev->dev, "Unable to read" + " IA32_TEMPERATURE_TARGET MSR\n"); +@@ -346,7 +397,6 @@ static DEFINE_MUTEX(pdev_list_mutex); + + struct cpu_info { + struct pdev_entry *pdev_entry; +- u8 x86; + u32 cpuid_6_eax; + }; + +@@ -356,11 +406,11 @@ static void get_cpuid_info(void *arg) + struct pdev_entry *pdev_entry = info->pdev_entry; + u32 val = cpuid_eax(1); + +- info->x86 = ((val >> 8) & 0xf) + ((val >> 20) & 0xff); + pdev_entry->x86_model = ((val >> 4) & 0xf) | ((val >> 12) & 0xf0); + pdev_entry->x86_mask = val & 0xf; + +- if (info->x86 != 6 || !pdev_entry->x86_model ++ if (((val >> 8) & 0xf) != 6 || ((val >> 20) & 0xff) ++ || !pdev_entry->x86_model + || wrmsr_safe(MSR_IA32_UCODE_REV, 0, 0) < 0 + || (sync_core(), rdmsr_safe(MSR_IA32_UCODE_REV, + &val, &pdev_entry->ucode_rev)) < 0) +@@ -376,11 +426,10 @@ static int coretemp_device_add(unsigned + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + +- pdev_entry = kzalloc(sizeof(*pdev_entry), GFP_KERNEL); ++ info.pdev_entry = kzalloc(sizeof(*pdev_entry), GFP_KERNEL); + if (!info.pdev_entry) + return -ENOMEM; + +- info.pdev_entry = pdev_entry; + err = xen_set_physical_cpu_affinity(cpu); + if (!err) { + get_cpuid_info(&info); +@@ -399,31 +448,41 @@ static int coretemp_device_add(unsigned + if (err) + goto exit_entry_free; + +- /* check if family 6, models 0xe (Pentium M DC), +- 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm), +- 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom), +- 0x1e (Lynnfield) */ +- if (info.x86 != 0x6 || +- !((pdev_entry->x86_model == 0xe) || (pdev_entry->x86_model == 0xf) || +- (pdev_entry->x86_model == 0x16) || (pdev_entry->x86_model == 0x17) || +- (pdev_entry->x86_model == 0x1a) || (pdev_entry->x86_model == 0x1c) || +- (pdev_entry->x86_model == 0x1e))) { +- +- /* supported CPU not found, but report the unknown +- family 6 CPU */ +- if ((info.x86 == 0x6) && (pdev_entry->x86_model > 0xf)) +- printk(KERN_WARNING DRVNAME ": Unknown CPU " +- "model 0x%x\n", pdev_entry->x86_model); ++ /* ++ * CPUID.06H.EAX[0] indicates whether the CPU has thermal ++ * sensors. We check this bit only, all the early CPUs ++ * without thermal sensors will be filtered out. ++ */ ++ if (!(info.cpuid_6_eax & 0x1)) { ++ printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" ++ " has no thermal sensor.\n", info.pdev_entry->x86_model); ++ goto exit_entry_free; ++ } ++ ++ err = xen_get_topology_info(cpu, &info.pdev_entry->cpu_core_id, ++ &info.pdev_entry->phys_proc_id, NULL); ++ if (err) + goto exit_entry_free; ++ ++ mutex_lock(&pdev_list_mutex); ++ ++ /* Skip second HT entry of each core */ ++ list_for_each_entry(pdev_entry, &pdev_list, list) { ++ if (info.pdev_entry->phys_proc_id == pdev_entry->phys_proc_id && ++ info.pdev_entry->cpu_core_id == pdev_entry->cpu_core_id) { ++ err = 0; /* Not an error */ ++ goto exit; ++ } + } + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + printk(KERN_ERR DRVNAME ": Device allocation failed\n"); +- goto exit_entry_free; ++ goto exit; + } + ++ pdev_entry = info.pdev_entry; + platform_set_drvdata(pdev, pdev_entry); + pdev_entry->pdev = pdev; + +@@ -434,7 +493,6 @@ static int coretemp_device_add(unsigned + goto exit_device_put; + } + +- mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + +@@ -442,6 +500,8 @@ static int coretemp_device_add(unsigned + + exit_device_put: + platform_device_put(pdev); ++exit: ++ mutex_unlock(&pdev_list_mutex); + exit_entry_free: + kfree(info.pdev_entry); + return err; +@@ -450,14 +510,36 @@ exit_entry_free: + static void coretemp_device_remove(unsigned int cpu) + { + struct pdev_entry *p; ++ unsigned int i; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { +- if (p->pdev->id == cpu) { +- platform_device_unregister(p->pdev); +- list_del(&p->list); +- kfree(p); ++ if (p->pdev->id != cpu) ++ continue; ++ ++ platform_device_unregister(p->pdev); ++ list_del(&p->list); ++ mutex_unlock(&pdev_list_mutex); ++ for (i = 0; ; ++i) { ++ u32 cpu_core_id, phys_proc_id; ++ int err; ++ ++ if (i == cpu) ++ continue; ++ err = xen_get_topology_info(i, &cpu_core_id, ++ &phys_proc_id, NULL); ++ if (err == -ENOENT) ++ continue; ++ if (err) ++ break; ++ if (phys_proc_id != p->phys_proc_id || ++ cpu_core_id != p->cpu_core_id) ++ continue; ++ if (!coretemp_device_add(i)) ++ break; + } ++ kfree(p); ++ return; + } + mutex_unlock(&pdev_list_mutex); + } +--- head-2011-03-17.orig/drivers/xen/balloon/balloon.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/balloon/balloon.c 2011-02-01 15:03:10.000000000 +0100 +@@ -343,7 +343,7 @@ static int increase_reservation(unsigned + if (rc > 0) + kswapd_run(0); + if (need_zonelists_rebuild) +- build_all_zonelists(); ++ build_all_zonelists(NULL); + else + vm_total_pages = nr_free_pagecache_pages(); + #endif +--- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:18:48.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-17 10:19:12.000000000 +0100 +@@ -1768,3 +1768,4 @@ static int __init blkif_init(void) + module_init(blkif_init); + + MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_ALIAS("devname:xen/blktap0"); +--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-02-01 15:03:10.000000000 +0100 +@@ -283,3 +283,4 @@ fail: + module_init(blktap_init); + module_exit(blktap_exit); + MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_ALIAS("devname:" BLKTAP2_DEV_DIR "control"); +--- head-2011-03-17.orig/drivers/xen/blktap2-new/control.c 2011-02-24 15:14:41.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/control.c 2011-02-24 15:03:58.000000000 +0100 +@@ -314,3 +314,4 @@ fail: + module_init(blktap_init); + module_exit(blktap_exit); + MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_ALIAS("devname:" BLKTAP2_DEV_DIR "control"); +--- head-2011-03-17.orig/drivers/xen/console/console.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/console/console.c 2011-02-01 15:03:10.000000000 +0100 +@@ -365,27 +365,23 @@ void xencons_rx(char *buf, unsigned len) + + for (i = 0; i < len; i++) { + #ifdef CONFIG_MAGIC_SYSRQ +- if (sysrq_on()) { +- static unsigned long sysrq_requested; ++ static unsigned long sysrq_requested; + +- if (buf[i] == '\x0f') { /* ^O */ +- if (!sysrq_requested) { +- sysrq_requested = jiffies; +- continue; /* don't print sysrq key */ +- } +- sysrq_requested = 0; +- } else if (sysrq_requested) { +- unsigned long sysrq_timeout = +- sysrq_requested + HZ*2; +- sysrq_requested = 0; +- if (time_before(jiffies, sysrq_timeout)) { +- spin_unlock_irqrestore( +- &xencons_lock, flags); +- handle_sysrq(buf[i], xencons_tty); +- spin_lock_irqsave( +- &xencons_lock, flags); +- continue; +- } ++ if (buf[i] == '\x0f') { /* ^O */ ++ if (!sysrq_requested) { ++ sysrq_requested = jiffies; ++ continue; /* don't print sysrq key */ ++ } ++ sysrq_requested = 0; ++ } else if (sysrq_requested) { ++ unsigned long sysrq_timeout = sysrq_requested + HZ*2; ++ ++ sysrq_requested = 0; ++ if (time_before(jiffies, sysrq_timeout)) { ++ spin_unlock_irqrestore(&xencons_lock, flags); ++ handle_sysrq(buf[i], xencons_tty); ++ spin_lock_irqsave(&xencons_lock, flags); ++ continue; + } + } + #endif +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-09 12:45:24.000000000 +0100 +@@ -1201,18 +1201,10 @@ int __init arch_probe_nr_irqs(void) + int nr = 256, nr_irqs_gsi; + + if (is_initial_xendomain()) { +- nr_irqs_gsi = acpi_probe_gsi(); ++ nr_irqs_gsi = NR_IRQS_LEGACY; + #ifdef CONFIG_X86_IO_APIC +- if (nr_irqs_gsi <= NR_IRQS_LEGACY) { +- /* for acpi=off or acpi not compiled in */ +- int idx; +- +- for (nr_irq_gsi = idx = 0; idx < nr_ioapics; idx++) +- nr_irqs_gsi += io_apic_get_redir_entries(idx) + 1; +- } ++ nr_irqs_gsi += gsi_top; + #endif +- if (nr_irqs_gsi < NR_IRQS_LEGACY) +- nr_irqs_gsi = NR_IRQS_LEGACY; + #ifdef CONFIG_PCI_MSI + nr += max(nr_irqs_gsi * 16, nr_cpu_ids * 8); + #endif +--- head-2011-03-17.orig/drivers/xen/core/machine_reboot.c 2011-02-01 14:42:26.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/machine_reboot.c 2011-02-01 15:03:10.000000000 +0100 +@@ -222,12 +222,6 @@ int __xen_suspend(int fast_suspend, void + if (num_possible_cpus() == 1) + fast_suspend = 0; + +- if (fast_suspend) { +- err = stop_machine_create(); +- if (err) +- return err; +- } +- + suspend.fast_suspend = fast_suspend; + suspend.resume_notifier = resume_notifier; + +@@ -254,8 +248,6 @@ int __xen_suspend(int fast_suspend, void + + if (!fast_suspend) + smp_resume(); +- else +- stop_machine_destroy(); + + return 0; + } +--- head-2011-03-17.orig/drivers/xen/evtchn.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/drivers/xen/evtchn.c 2011-02-01 15:03:10.000000000 +0100 +@@ -569,3 +569,4 @@ module_init(evtchn_init); + module_exit(evtchn_cleanup); + + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("devname:xen/evtchn"); +--- head-2011-03-17.orig/drivers/xen/gntdev/gntdev.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/gntdev/gntdev.c 2011-02-01 15:03:10.000000000 +0100 +@@ -44,6 +44,9 @@ MODULE_LICENSE("GPL"); + MODULE_AUTHOR(DRIVER_AUTHOR); + MODULE_DESCRIPTION(DRIVER_DESC); + ++#define GNTDEV_NAME "gntdev" ++MODULE_ALIAS("devname:xen/" GNTDEV_NAME); ++ + #define MAX_GRANTS_LIMIT 1024 + #define DEFAULT_MAX_GRANTS 128 + +@@ -162,8 +165,6 @@ static struct vm_operations_struct gntde + /* The driver major number, for use when unregistering the driver. */ + static int gntdev_major; + +-#define GNTDEV_NAME "gntdev" +- + /* Memory mapping functions + * ------------------------ + * +--- head-2011-03-17.orig/drivers/xen/usbback/usbback.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbback/usbback.c 2011-02-01 15:03:10.000000000 +0100 +@@ -73,7 +73,6 @@ typedef struct { + void *buffer; + dma_addr_t transfer_dma; + struct usb_ctrlrequest *setup; +- dma_addr_t setup_dma; + + /* request segments */ + uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ +@@ -239,7 +238,7 @@ static int usbbk_alloc_urb(usbif_urb_req + } + + if (req->buffer_length) { +- pending_req->buffer = usb_buffer_alloc(pending_req->stub->udev, ++ pending_req->buffer = usb_alloc_coherent(pending_req->stub->udev, + req->buffer_length, GFP_KERNEL, + &pending_req->transfer_dma); + if (!pending_req->buffer) { +@@ -250,9 +249,8 @@ static int usbbk_alloc_urb(usbif_urb_req + } + + if (usb_pipecontrol(req->pipe)) { +- pending_req->setup = usb_buffer_alloc(pending_req->stub->udev, +- sizeof(struct usb_ctrlrequest), GFP_KERNEL, +- &pending_req->setup_dma); ++ pending_req->setup = kmalloc(sizeof(struct usb_ctrlrequest), ++ GFP_KERNEL); + if (!pending_req->setup) { + pr_err("usbback: can't alloc usb_ctrlrequest\n"); + ret = -ENOMEM; +@@ -264,8 +262,10 @@ static int usbbk_alloc_urb(usbif_urb_req + + fail_free_buffer: + if (req->buffer_length) +- usb_buffer_free(pending_req->stub->udev, req->buffer_length, +- pending_req->buffer, pending_req->transfer_dma); ++ usb_free_coherent(pending_req->stub->udev, ++ req->buffer_length, ++ pending_req->buffer, ++ pending_req->transfer_dma); + fail_free_urb: + usb_free_urb(pending_req->urb); + fail: +@@ -284,11 +284,10 @@ static void usbbk_free_urb(struct urb *u + static void _usbbk_free_urb(struct urb *urb) + { + if (usb_pipecontrol(urb->pipe)) +- usb_buffer_free(urb->dev, sizeof(struct usb_ctrlrequest), +- urb->setup_packet, urb->setup_dma); ++ kfree(urb->setup_packet); + if (urb->transfer_buffer_length) +- usb_buffer_free(urb->dev, urb->transfer_buffer_length, +- urb->transfer_buffer, urb->transfer_dma); ++ usb_free_coherent(urb->dev, urb->transfer_buffer_length, ++ urb->transfer_buffer, urb->transfer_dma); + barrier(); + usb_free_urb(urb); + } +@@ -534,9 +533,7 @@ static void usbbk_init_urb(usbif_urb_req + pending_req->buffer, req->buffer_length, + usbbk_urb_complete, pending_req); + memcpy(pending_req->setup, req->u.ctrl, 8); +- urb->setup_dma = pending_req->setup_dma; + urb->transfer_flags = req->transfer_flags; +- urb->transfer_flags |= URB_NO_SETUP_DMA_MAP; + + break; + case PIPE_BULK: +--- head-2011-03-17.orig/drivers/xen/usbfront/usbfront.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/usbfront.h 2011-02-01 15:03:10.000000000 +0100 +@@ -51,6 +51,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -58,14 +59,6 @@ + #include + #include + +-/* +- * usbfront needs USB HCD headers, +- * drivers/usb/core/hcd.h and drivers/usb/core/hub.h, +- * but, they are not in public include path. +- */ +-#include "../../usb/core/hcd.h" +-#include "../../usb/core/hub.h" +- + static inline struct usbfront_info *hcd_to_info(struct usb_hcd *hcd) + { + return (struct usbfront_info *) (hcd->hcd_priv); +--- head-2011-03-17.orig/include/acpi/processor.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/include/acpi/processor.h 2011-02-01 15:03:10.000000000 +0100 +@@ -80,8 +80,9 @@ struct acpi_processor_cx { + u32 power; + u32 usage; + u64 time; ++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + u8 bm_sts_skip; +-#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL ++#else + /* Require raw information for external control logic */ + struct acpi_power_register reg; + u32 csd_count; +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 15:03:10.000000000 +0100 +@@ -619,37 +619,6 @@ swiotlb_sync_single_for_device(struct de + EXPORT_SYMBOL(swiotlb_sync_single_for_device); + + /* +- * Same as above, but for a sub-range of the mapping. +- */ +-static void +-swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, +- unsigned long offset, size_t size, +- int dir, int target) +-{ +- swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); +-} +- +-void +-swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, +- unsigned long offset, size_t size, +- enum dma_data_direction dir) +-{ +- swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, +- SYNC_FOR_CPU); +-} +-EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); +- +-void +-swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, +- unsigned long offset, size_t size, +- enum dma_data_direction dir) +-{ +- swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, +- SYNC_FOR_DEVICE); +-} +-EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); +- +-/* + * Map a set of buffers described by scatterlist in streaming mode for DMA. + * This is the scatter-gather version of the above swiotlb_map_page + * interface. Here the scatter gather list elements are each tagged with the +--- head-2011-03-17.orig/mm/page_alloc.c 2011-02-08 10:06:32.000000000 +0100 ++++ head-2011-03-17/mm/page_alloc.c 2011-02-08 10:06:44.000000000 +0100 +@@ -649,9 +649,8 @@ static bool free_pages_prepare(struct pa + + #ifdef CONFIG_XEN + if (PageForeign(page)) { +- WARN_ON(wasMlocked); + PageForeignDestructor(page, order); +- return; ++ return false; + } + #endif + +@@ -681,6 +680,9 @@ static void __free_pages_ok(struct page + unsigned long flags; + int wasMlocked = __TestClearPageMlocked(page); + ++#ifdef CONFIG_XEN ++ WARN_ON(PageForeign(page) && wasMlocked); ++#endif + if (!free_pages_prepare(page, order)) + return; + +@@ -1171,6 +1173,9 @@ void free_hot_cold_page(struct page *pag + int migratetype; + int wasMlocked = __TestClearPageMlocked(page); + ++#ifdef CONFIG_XEN ++ WARN_ON(PageForeign(page) && wasMlocked); ++#endif + if (!free_pages_prepare(page, 0)) + return; + diff --git a/patches.xen/xen3-patch-2.6.36 b/patches.xen/xen3-patch-2.6.36 new file mode 100644 index 0000000..d47834e --- /dev/null +++ b/patches.xen/xen3-patch-2.6.36 @@ -0,0 +1,2833 @@ +From: Linux Kernel Mailing List +Subject: Linux: 2.6.36 +Patch-mainline: 2.6.36 + + This patch contains the differences between 2.6.35 and 2.6.36. + +Acked-by: Jeff Mahoney +Automatically created from "patches.kernel.org/patch-2.6.36" by xen-port-patches.py + +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-17 13:43:12.000000000 +0100 +@@ -56,7 +56,7 @@ config X86 + select HAVE_HW_BREAKPOINT + select HAVE_MIXED_BREAKPOINTS_REGS + select PERF_EVENTS +- select HAVE_PERF_EVENTS_NMI ++ select HAVE_PERF_EVENTS_NMI if !XEN + select ANON_INODES + select HAVE_ARCH_KMEMCHECK + select HAVE_USER_RETURN_NOTIFIER +@@ -248,7 +248,7 @@ config KTIME_SCALAR + + config ARCH_CPU_PROBE_RELEASE + def_bool y +- depends on HOTPLUG_CPU ++ depends on HOTPLUG_CPU && !XEN + + source "init/Kconfig" + source "kernel/Kconfig.freezer" +@@ -1064,7 +1064,7 @@ config X86_CPUID + + choice + prompt "High Memory Support" +- default HIGHMEM64G if X86_NUMAQ ++ default HIGHMEM64G if X86_NUMAQ || XEN + default HIGHMEM4G + depends on X86_32 + +@@ -1107,7 +1107,7 @@ config NOHIGHMEM + + config HIGHMEM4G + bool "4GB" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !XEN + ---help--- + Select this if you have a 32-bit processor and between 1 and 4 + gigabytes of physical RAM. +--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-02-01 15:04:27.000000000 +0100 +@@ -47,7 +47,12 @@ + /* + * Reload arg registers from stack in case ptrace changed them. + * We don't reload %eax because syscall_trace_enter() returned +- * the value it wants us to use in the table lookup. ++ * the %rax value we should see. Instead, we just truncate that ++ * value to 32 bits again as we did on entry from user mode. ++ * If it's a new value set by user_regset during entry tracing, ++ * this matches the normal truncation of the user-mode value. ++ * If it's -1 to make us punt the syscall, then (u32)-1 is still ++ * an appropriately invalid value. + */ + .macro LOAD_ARGS32 offset, _r9=0 + .if \_r9 +@@ -57,6 +62,7 @@ + movl \offset+48(%rsp),%edx + movl \offset+56(%rsp),%esi + movl \offset+64(%rsp),%edi ++ movl %eax,%eax /* zero extension */ + .endm + + .macro CFI_STARTPROC32 simple +@@ -151,7 +157,7 @@ ENTRY(ia32_sysenter_target) + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ + call audit_syscall_entry + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + movl %ebx,%edi /* reload 1st syscall arg */ + movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ +@@ -216,7 +222,7 @@ ENTRY(ia32_cstar_target) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + jnz cstar_tracesys +- cmpl $IA32_NR_syscalls-1,%eax ++ cmpq $IA32_NR_syscalls-1,%rax + ja ia32_badsys + cstar_do_call: + IA32_ARG_FIXUP 1 +@@ -243,7 +249,7 @@ cstar_tracesys: + LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ + RESTORE_REST + xchgl %ebp,%r9d +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ + jmp cstar_do_call + END(ia32_cstar_target) +@@ -301,7 +307,7 @@ ENTRY(ia32_syscall) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + jnz ia32_tracesys + .Lia32_check_call: +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + ia32_do_call: + IA32_ARG_FIXUP +@@ -325,7 +331,7 @@ ia32_tracesys: + call syscall_trace_enter + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ + jmp ia32_do_call + END(ia32_syscall) +@@ -723,4 +729,7 @@ ia32_sys_call_table: + .quad compat_sys_rt_tgsigqueueinfo /* 335 */ + .quad sys_perf_event_open + .quad compat_sys_recvmmsg ++ .quad sys_fanotify_init ++ .quad sys32_fanotify_mark ++ .quad sys_prlimit64 /* 340 */ + ia32_syscall_end: +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 15:04:27.000000000 +0100 +@@ -60,7 +60,7 @@ void *kmap(struct page *page); + void kunmap(struct page *page); + void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); + void *kmap_atomic(struct page *page, enum km_type type); +-void kunmap_atomic(void *kvaddr, enum km_type type); ++void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); + void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); + void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); + struct page *kmap_atomic_to_page(void *ptr); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 15:04:27.000000000 +0100 +@@ -30,6 +30,9 @@ extern struct pci_bus *pci_scan_bus_on_n + int node); + extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); + ++#ifdef CONFIG_PCI ++ ++#ifdef CONFIG_PCI_DOMAINS + static inline int pci_domain_nr(struct pci_bus *bus) + { + struct pci_sysdata *sd = bus->sysdata; +@@ -40,13 +43,12 @@ static inline int pci_proc_domain(struct + { + return pci_domain_nr(bus); + } +- ++#endif + + /* Can be used to override the logic in pci_scan_bus for skipping + already-configured bus numbers - to be used for buggy BIOSes + or architectures with incomplete PCI setup by the loader */ + +-#ifdef CONFIG_PCI + extern unsigned int pcibios_assign_all_busses(void); + extern int pci_legacy_init(void); + # ifdef CONFIG_ACPI +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/perf_event.h 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/perf_event.h 2011-02-01 15:04:27.000000000 +0100 +@@ -19,6 +19,19 @@ + _r_->flags & PERF_EFLAGS_EXACT ? _f_ | PERF_RECORD_MISC_EXACT_IP : _f_; \ + }) + ++#include ++ ++/* ++ * We abuse bit 3 from flags to pass exact information, see perf_misc_flags ++ * and the comment with PERF_EFLAGS_EXACT. ++ */ ++#define perf_arch_fetch_caller_regs(regs, __ip) { \ ++ (regs)->ip = (__ip); \ ++ (regs)->bp = caller_frame_pointer(); \ ++ (regs)->cs = __KERNEL_CS; \ ++ regs->flags = 0; \ ++} ++ + #endif + + static inline void init_hw_perf_events(void) {} +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 14:44:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 15:04:27.000000000 +0100 +@@ -91,7 +91,7 @@ static inline void pud_clear(pud_t *pudp + static inline pte_t xen_ptep_get_and_clear(pte_t *ptep, pte_t res) + { + uint64_t val = __pte_val(res); +- if (__cmpxchg64(ptep, val, 0) != val) { ++ if (__cmpxchg64(&ptep->pte, val, 0) != val) { + /* xchg acts as a barrier before the setting of the high bits */ + res.pte_low = xchg(&ptep->pte_low, 0); + res.pte_high = ptep->pte_high; +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 15:04:27.000000000 +0100 +@@ -25,6 +25,7 @@ + struct vm_area_struct; + + extern pgd_t *swapper_pg_dir; ++extern pgd_t trampoline_pg_dir[1024]; + + static inline void pgtable_cache_init(void) { } + static inline void check_pgt_cache(void) { } +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 15:04:27.000000000 +0100 +@@ -133,8 +133,8 @@ static inline int pgd_large(pgd_t pgd) { + /* x86-64 always has all page tables mapped. */ + #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) + #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) +-#define pte_unmap(pte) /* NOP */ +-#define pte_unmap_nested(pte) /* NOP */ ++#define pte_unmap(pte) ((void)(pte))/* NOP */ ++#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */ + + #define update_mmu_cache(vma, address, ptep) do { } while (0) + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:47:17.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:47:27.000000000 +0100 +@@ -716,6 +716,7 @@ extern void init_c1e_mask(void); + extern unsigned long boot_option_idle_override; + extern unsigned long idle_halt; + extern unsigned long idle_nomwait; ++extern bool c1e_detected; + + #ifndef CONFIG_XEN + /* +@@ -979,4 +980,24 @@ unsigned long calc_aperfmperf_ratio(stru + return ratio; + } + ++/* ++ * AMD errata checking ++ */ ++#ifdef CONFIG_CPU_SUP_AMD ++extern const int amd_erratum_383[]; ++extern const int amd_erratum_400[]; ++extern bool cpu_has_amd_erratum(const int *); ++ ++#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } ++#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } ++#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ ++ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) ++#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) ++#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) ++#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) ++ ++#else ++#define cpu_has_amd_erratum(x) (false) ++#endif /* CONFIG_CPU_SUP_AMD */ ++ + #endif /* _ASM_X86_PROCESSOR_H */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:10:31.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:11:05.000000000 +0100 +@@ -441,4 +441,11 @@ static __always_inline void rdtsc_barrie + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); + } + ++/* ++ * We handle most unaligned accesses in hardware. On the other hand ++ * unaligned DMA can be quite expensive on some Nehalem processors. ++ * ++ * Based on this we disable the IP header alignment in network drivers. ++ */ ++#define NET_IP_ALIGN 0 + #endif /* _ASM_X86_SYSTEM_H */ +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -2,7 +2,7 @@ + * sleep.c - x86-specific ACPI sleep support. + * + * Copyright (C) 2001-2003 Patrick Mochel +- * Copyright (C) 2001-2003 Pavel Machek ++ * Copyright (C) 2001-2003 Pavel Machek + */ + + #include +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -319,14 +319,19 @@ void arch_init_copy_chip_data(struct irq + + old_cfg = old_desc->chip_data; + +- memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); ++ cfg->vector = old_cfg->vector; ++ cfg->move_in_progress = old_cfg->move_in_progress; ++ cpumask_copy(cfg->domain, old_cfg->domain); ++ cpumask_copy(cfg->old_domain, old_cfg->old_domain); + + init_copy_irq_2_pin(old_cfg, cfg, node); + } + +-static void free_irq_cfg(struct irq_cfg *old_cfg) ++static void free_irq_cfg(struct irq_cfg *cfg) + { +- kfree(old_cfg); ++ free_cpumask_var(cfg->domain); ++ free_cpumask_var(cfg->old_domain); ++ kfree(cfg); + } + + void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) +@@ -1808,6 +1813,8 @@ __apicdebuginit(void) print_IO_APIC(void + struct irq_pin_list *entry; + + cfg = desc->chip_data; ++ if (!cfg) ++ continue; + entry = cfg->irq_2_pin; + if (!entry) + continue; +@@ -3498,7 +3505,7 @@ static int set_msi_irq_affinity(unsigned + + cfg = desc->chip_data; + +- read_msi_msg_desc(desc, &msg); ++ get_cached_msi_msg_desc(desc, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:42:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:43:00.000000000 +0100 +@@ -150,10 +150,18 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); + static int __init x86_xsave_setup(char *s) + { + setup_clear_cpu_cap(X86_FEATURE_XSAVE); ++ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + return 1; + } + __setup("noxsave", x86_xsave_setup); + ++static int __init x86_xsaveopt_setup(char *s) ++{ ++ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); ++ return 1; ++} ++__setup("noxsaveopt", x86_xsaveopt_setup); ++ + #ifdef CONFIG_X86_32 + static int cachesize_override __cpuinitdata = -1; + +@@ -568,7 +576,7 @@ void __cpuinit cpu_detect(struct cpuinfo + } + } + +-static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) ++void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) + { + u32 tfms, xlvl; + u32 ebx; +@@ -582,6 +590,16 @@ static void __cpuinit get_cpu_cap(struct + c->x86_capability[4] = excap; + } + ++ /* Additional Intel-defined flags: level 0x00000007 */ ++ if (c->cpuid_level >= 0x00000007) { ++ u32 eax, ebx, ecx, edx; ++ ++ cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); ++ ++ if (eax > 0) ++ c->x86_capability[9] = ebx; ++ } ++ + /* AMD-defined flags: level 0x80000001 */ + xlvl = cpuid_eax(0x80000000); + c->extended_cpuid_level = xlvl; +@@ -607,6 +625,7 @@ static void __cpuinit get_cpu_cap(struct + if (c->extended_cpuid_level >= 0x80000007) + c->x86_power = cpuid_edx(0x80000007); + ++ init_scattered_cpuid_features(c); + } + + static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) +@@ -764,7 +783,6 @@ static void __cpuinit generic_identify(s + + get_model_name(c); /* Default name */ + +- init_scattered_cpuid_features(c); + detect_nopl(c); + } + +@@ -1273,6 +1291,7 @@ void __cpuinit cpu_init(void) + dbg_restore_debug_regs(); + + fpu_init(); ++ xsave_init(); + + #ifndef CONFIG_XEN + raw_local_save_flags(kernel_eflags); +@@ -1343,12 +1362,7 @@ void __cpuinit cpu_init(void) + clear_used_math(); + mxcsr_feature_mask_init(); + +- /* +- * Boot processor to setup the FP and extended state context info. +- */ +- if (smp_processor_id() == boot_cpu_id) +- init_thread_xstate(); +- ++ fpu_init(); + xsave_init(); + } + #endif +--- head-2011-03-17.orig/arch/x86/kernel/cpu/intel.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/intel.c 2011-02-01 15:04:27.000000000 +0100 +@@ -288,6 +288,7 @@ static void __cpuinit intel_workarounds( + } + #endif + ++#ifndef CONFIG_XEN + static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) + { + #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +@@ -306,7 +307,6 @@ static void __cpuinit srat_detect_node(s + #endif + } + +-#ifndef CONFIG_XEN + /* + * find out the number of processor cores on the die + */ +@@ -324,7 +324,6 @@ static int __cpuinit intel_num_cpu_cores + else + return 1; + } +-#endif + + static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) + { +@@ -363,6 +362,7 @@ static void __cpuinit detect_vmx_virtcap + set_cpu_cap(c, X86_FEATURE_VPID); + } + } ++#endif + + static void __cpuinit init_intel(struct cpuinfo_x86 *c) + { +@@ -459,13 +459,13 @@ static void __cpuinit init_intel(struct + detect_ht(c); + #endif + } +-#endif + + /* Work around errata */ + srat_detect_node(c); + + if (cpu_has(c, X86_FEATURE_VMX)) + detect_vmx_virtcap(c); ++#endif + } + + #ifdef CONFIG_X86_32 +--- head-2011-03-17.orig/arch/x86/kernel/cpu/scattered.c 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/scattered.c 2011-02-01 15:04:27.000000000 +0100 +@@ -40,6 +40,7 @@ void __cpuinit init_scattered_cpuid_feat + { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, + { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, + { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, ++#ifndef CONFIG_XEN + { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, + { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, + { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, +@@ -50,6 +51,7 @@ void __cpuinit init_scattered_cpuid_feat + { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 }, + { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 }, + { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 }, ++#endif + { 0, 0, 0, 0, 0 } + }; + +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:04:27.000000000 +0100 +@@ -655,14 +655,14 @@ ldt_ss: + * compensating for the offset by changing to the ESPFIX segment with + * a base address that matches for the difference. + */ ++#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) + mov %esp, %edx /* load kernel esp */ + mov PT_OLDESP(%esp), %eax /* load userspace esp */ + mov %dx, %ax /* eax: new kernel esp */ + sub %eax, %edx /* offset (low word is 0) */ +- PER_CPU(gdt_page, %ebx) + shr $16, %edx +- mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */ +- mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */ ++ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ ++ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ + pushl $__ESPFIX_SS + CFI_ADJUST_CFA_OFFSET 4 + push %eax /* new kernel esp */ +@@ -861,9 +861,8 @@ ptregs_clone: + * normal stack and adjusts ESP with the matching offset. + */ + /* fixup the stack */ +- PER_CPU(gdt_page, %ebx) +- mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */ +- mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */ ++ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ ++ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ + shl $16, %eax + addl %esp, %eax /* the adjusted stack pointer */ + pushl $__KERNEL_DS +@@ -1132,7 +1131,7 @@ ENTRY(simd_coprocessor_error) + .balign 4 + .long 661b + .long 663f +- .byte X86_FEATURE_XMM ++ .word X86_FEATURE_XMM + .byte 662b-661b + .byte 664f-663f + .previous +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:04:27.000000000 +0100 +@@ -1112,13 +1112,13 @@ END(kernel_thread_helper) + * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. + * + * C extern interface: +- * extern long execve(char *name, char **argv, char **envp) ++ * extern long execve(const char *name, char **argv, char **envp) + * + * asm input arguments: + * rdi: name, rsi: argv, rdx: envp + * + * We want to fallback into: +- * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs) ++ * extern long sys_execve(const char *name, char **argv,char **envp, struct pt_regs *regs) + * + * do_sys_execve asm fallback arguments: + * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -288,6 +288,20 @@ static void __init smp_dump_mptable(stru + + void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } + ++static void __init smp_register_lapic_address(unsigned long address) ++{ ++#ifndef CONFIG_XEN ++ mp_lapic_addr = address; ++ ++ set_fixmap_nocache(FIX_APIC_BASE, address); ++ if (boot_cpu_physical_apicid == -1U) { ++ boot_cpu_physical_apicid = read_apic_id(); ++ apic_version[boot_cpu_physical_apicid] = ++ GET_APIC_VERSION(apic_read(APIC_LVR)); ++ } ++#endif ++} ++ + static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) + { + char str[16]; +@@ -311,6 +325,10 @@ static int __init smp_read_mpc(struct mp + if (early) + return 1; + ++ /* Initialize the lapic mapping */ ++ if (!acpi_lapic) ++ smp_register_lapic_address(mpc->lapic); ++ + if (mpc->oemptr) + x86_init.mpparse.smp_read_mpc_oem(mpc); + +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -142,12 +142,23 @@ static struct dma_map_ops swiotlb_dma_op + .dma_supported = swiotlb_dma_supported + }; + ++#define pci_xen_swiotlb_detect() 1 ++ ++static void __init pci_xen_swiotlb_init(void) ++{ ++ swiotlb_init(1); ++ if (swiotlb) { ++ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); ++ dma_ops = &swiotlb_dma_ops; ++ } ++} ++ + void __init pci_iommu_alloc(void) + { + /* free the range so iommu could get some range less than 4G */ + dma32_free_bootmem(); + +- if (pci_swiotlb_detect()) ++ if (pci_xen_swiotlb_detect() || pci_swiotlb_detect()) + goto out; + + gart_iommu_hole_init(); +@@ -159,11 +170,7 @@ void __init pci_iommu_alloc(void) + /* needs to be called after gart_iommu_hole_init */ + amd_iommu_detect(); + out: +- swiotlb_init(1); +- if (swiotlb) { +- printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); +- dma_ops = &swiotlb_dma_ops; +- } ++ pci_xen_swiotlb_init(); + } + + void *dma_generic_alloc_coherent(struct device *dev, size_t size, +@@ -376,7 +383,7 @@ static int __init pci_iommu_init(void) + x86_init.iommu.iommu_init(); + + #ifndef CONFIG_XEN +- if (swiotlb) { ++ if (swiotlb || xen_swiotlb) { + printk(KERN_INFO "PCI-DMA: " + "Using software bounce buffering for IO (SWIOTLB)\n"); + swiotlb_print_info(); +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:10:40.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:11:01.000000000 +0100 +@@ -29,6 +29,7 @@ unsigned long idle_nomwait; + EXPORT_SYMBOL(idle_nomwait); + + struct kmem_cache *task_xstate_cachep; ++EXPORT_SYMBOL_GPL(task_xstate_cachep); + + int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) + { +@@ -287,8 +288,9 @@ EXPORT_SYMBOL(kernel_thread); + /* + * sys_execve() executes a new program. + */ +-long sys_execve(char __user *name, char __user * __user *argv, +- char __user * __user *envp, struct pt_regs *regs) ++long sys_execve(const char __user *name, ++ const char __user *const __user *argv, ++ const char __user *const __user *envp, struct pt_regs *regs) + { + long error; + char *filename; +@@ -328,7 +330,7 @@ EXPORT_SYMBOL(pm_idle); + */ + void xen_idle(void) + { +- trace_power_start(POWER_CSTATE, 1); ++ trace_power_start(POWER_CSTATE, 1, smp_processor_id()); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we +@@ -394,7 +396,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); + */ + void mwait_idle_with_hints(unsigned long ax, unsigned long cx) + { +- trace_power_start(POWER_CSTATE, (ax>>4)+1); ++ trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); + if (!need_resched()) { + if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) + clflush((void *)¤t_thread_info()->flags); +@@ -410,7 +412,7 @@ void mwait_idle_with_hints(unsigned long + static void mwait_idle(void) + { + if (!need_resched()) { +- trace_power_start(POWER_CSTATE, 1); ++ trace_power_start(POWER_CSTATE, 1, smp_processor_id()); + if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) + clflush((void *)¤t_thread_info()->flags); + +@@ -432,7 +434,7 @@ static void mwait_idle(void) + */ + static void poll_idle(void) + { +- trace_power_start(POWER_CSTATE, 0); ++ trace_power_start(POWER_CSTATE, 0, smp_processor_id()); + local_irq_enable(); + while (!need_resched()) + cpu_relax(); +@@ -480,44 +482,10 @@ static int __cpuinit mwait_usable(const + return (edx & MWAIT_EDX_C1); + } + +-/* +- * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e. +- * For more information see +- * - Erratum #400 for NPT family 0xf and family 0x10 CPUs +- * - Erratum #365 for family 0x11 (not affected because C1e not in use) +- */ +-static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) +-{ +- u64 val; +- if (c->x86_vendor != X86_VENDOR_AMD) +- goto no_c1e_idle; +- +- /* Family 0x0f models < rev F do not have C1E */ +- if (c->x86 == 0x0F && c->x86_model >= 0x40) +- return 1; +- +- if (c->x86 == 0x10) { +- /* +- * check OSVW bit for CPUs that are not affected +- * by erratum #400 +- */ +- if (cpu_has(c, X86_FEATURE_OSVW)) { +- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); +- if (val >= 2) { +- rdmsrl(MSR_AMD64_OSVW_STATUS, val); +- if (!(val & BIT(1))) +- goto no_c1e_idle; +- } +- } +- return 1; +- } +- +-no_c1e_idle: +- return 0; +-} ++bool c1e_detected; ++EXPORT_SYMBOL(c1e_detected); + + static cpumask_var_t c1e_mask; +-static int c1e_detected; + + void c1e_remove_cpu(int cpu) + { +@@ -539,12 +507,12 @@ static void c1e_idle(void) + u32 lo, hi; + + rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); ++ + if (lo & K8_INTP_C1E_ACTIVE_MASK) { +- c1e_detected = 1; ++ c1e_detected = true; + if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halt in AMD C1E"); + printk(KERN_INFO "System has AMD C1E enabled\n"); +- set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); + } + } + +@@ -595,7 +563,8 @@ void __cpuinit select_idle_routine(const + */ + printk(KERN_INFO "using mwait in idle threads.\n"); + pm_idle = mwait_idle; +- } else if (check_c1e_idle(c)) { ++ } else if (cpu_has_amd_erratum(amd_erratum_400)) { ++ /* E400: APIC timer interrupt does not wake up CPU from C1e */ + printk(KERN_INFO "using C1E aware idle routine\n"); + pm_idle = c1e_idle; + } else +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:59.000000000 +0100 +@@ -59,6 +59,8 @@ + #include + #include + ++#include ++ + asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); + asmlinkage void cstar_ret_from_fork(void) __asm__("cstar_ret_from_fork"); + +@@ -114,6 +116,8 @@ void cpu_idle(void) + stop_critical_timings(); + xen_idle(); + start_critical_timings(); ++ ++ trace_power_end(smp_processor_id()); + } + tick_nohz_restart_sched_tick(); + preempt_enable_no_resched(); +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:56.000000000 +0100 +@@ -56,6 +56,8 @@ + #include + #include + ++#include ++ + asmlinkage extern void ret_from_fork(void); + + static DEFINE_PER_CPU(unsigned char, is_idle); +@@ -142,6 +144,9 @@ void cpu_idle(void) + stop_critical_timings(); + xen_idle(); + start_critical_timings(); ++ ++ trace_power_end(smp_processor_id()); ++ + /* In many cases the interrupt that ended idle + has already called exit_idle. But some idle + loops can be woken up without interrupt. */ +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:24:49.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:25:01.000000000 +0100 +@@ -102,6 +102,7 @@ + + #include + #include ++#include + + #include + #include +@@ -826,10 +827,15 @@ void __init setup_arch(char **cmdline_p) + /* VMI may relocate the fixmap; do this before touching ioremap area */ + vmi_init(); + ++ /* OFW also may relocate the fixmap */ ++ olpc_ofw_detect(); ++ + early_trap_init(); + early_cpu_init(); + early_ioremap_init(); + ++ setup_olpc_ofw_pgd(); ++ + #ifndef CONFIG_XEN + ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); + screen_info = boot_params.screen_info; +@@ -1143,6 +1149,8 @@ void __init setup_arch(char **cmdline_p) + paging_init(); + x86_init.paging.pagetable_setup_done(swapper_pg_dir); + ++ setup_trampoline_page_table(); ++ + tboot_probe(); + + #ifdef CONFIG_X86_64 +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -385,7 +385,13 @@ static notrace __kprobes void default_do + if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) + == NOTIFY_STOP) + return; ++ + #ifdef CONFIG_X86_LOCAL_APIC ++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) ++ == NOTIFY_STOP) ++ return; ++ ++#ifndef CONFIG_LOCKUP_DETECTOR + /* + * Ok, so this is none of the documented NMI sources, + * so it must be the NMI watchdog. +@@ -393,6 +399,7 @@ static notrace __kprobes void default_do + if (nmi_watchdog_tick(regs, reason)) + return; + if (!do_nmi_callback(regs, cpu)) ++#endif /* !CONFIG_LOCKUP_DETECTOR */ + unknown_nmi_error(reason, regs); + #else + unknown_nmi_error(reason, regs); +--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -73,8 +73,8 @@ void update_vsyscall_tz(void) + write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + } + +-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, +- u32 mult) ++void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, ++ struct clocksource *clock, u32 mult) + { + unsigned long flags; + +@@ -87,7 +87,7 @@ void update_vsyscall(struct timespec *wa + vsyscall_gtod_data.clock.shift = clock->shift; + vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; + vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; +- vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; ++ vsyscall_gtod_data.wall_to_monotonic = *wtm; + vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); + write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + } +@@ -169,13 +169,18 @@ int __vsyscall(0) vgettimeofday(struct t + * unlikely */ + time_t __vsyscall(1) vtime(time_t *t) + { +- struct timeval tv; ++ unsigned seq; + time_t result; + if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) + return time_syscall(t); + +- vgettimeofday(&tv, NULL); +- result = tv.tv_sec; ++ do { ++ seq = read_seqbegin(&__vsyscall_gtod_data.lock); ++ ++ result = __vsyscall_gtod_data.wall_time_sec; ++ ++ } while (read_seqretry(&__vsyscall_gtod_data.lock, seq)); ++ + if (t) + *t = result; + return result; +--- head-2011-03-17.orig/arch/x86/mm/dump_pagetables-xen.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/dump_pagetables-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -40,6 +40,29 @@ struct addr_marker { + const char *name; + }; + ++/* indices for address_markers; keep sync'd w/ address_markers below */ ++enum address_markers_idx { ++ USER_SPACE_NR = 0, ++#ifdef CONFIG_X86_64 ++ XEN_SPACE_NR, ++ LOW_KERNEL_NR, ++ VMALLOC_START_NR, ++ VMEMMAP_START_NR, ++ HIGH_KERNEL_NR, ++ MODULES_VADDR_NR, ++ MODULES_END_NR, ++#else ++ KERNEL_SPACE_NR, ++ VMALLOC_START_NR, ++ VMALLOC_END_NR, ++# ifdef CONFIG_HIGHMEM ++ PKMAP_BASE_NR, ++# endif ++ FIXADDR_START_NR, ++ XEN_SPACE_NR, ++#endif ++}; ++ + /* Address space markers hints */ + static struct addr_marker address_markers[] = { + { 0, "User Space" }, +@@ -346,16 +369,13 @@ static int __init pt_dump_init(void) + + #ifdef CONFIG_X86_32 + /* Not a compile-time constant on x86-32 */ +- address_markers[2].start_address = VMALLOC_START; +- address_markers[3].start_address = VMALLOC_END; ++ address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; ++ address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; + # ifdef CONFIG_HIGHMEM +- address_markers[4].start_address = PKMAP_BASE; +- address_markers[5].start_address = FIXADDR_START; +- address_markers[6].start_address = hypervisor_virt_start; +-# else +- address_markers[4].start_address = FIXADDR_START; +- address_markers[5].start_address = hypervisor_virt_start; ++ address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; + # endif ++ address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; ++ address_markers[XEN_SPACE_NR].start_address = hypervisor_virt_start; + #endif + + pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -821,8 +821,10 @@ do_sigbus(struct pt_regs *regs, unsigned + up_read(&mm->mmap_sem); + + /* Kernel mode? Handle exceptions or die: */ +- if (!(error_code & PF_USER)) ++ if (!(error_code & PF_USER)) { + no_context(regs, error_code, address); ++ return; ++ } + + /* User-space => ok to do another page fault: */ + if (is_prefetch(regs, error_code, address)) +--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page, enu + return kmap_atomic_prot(page, type, kmap_prot); + } + +-void kunmap_atomic(void *kvaddr, enum km_type type) ++void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) + { + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); +@@ -147,7 +147,7 @@ void copy_highpage(struct page *to, stru + EXPORT_SYMBOL(kmap); + EXPORT_SYMBOL(kunmap); + EXPORT_SYMBOL(kmap_atomic); +-EXPORT_SYMBOL(kunmap_atomic); ++EXPORT_SYMBOL(kunmap_atomic_notypecheck); + EXPORT_SYMBOL(kmap_atomic_prot); + EXPORT_SYMBOL(kmap_atomic_to_page); + EXPORT_SYMBOL(clear_highpage); +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -2,7 +2,7 @@ + * linux/arch/x86_64/mm/init.c + * + * Copyright (C) 1995 Linus Torvalds +- * Copyright (C) 2000 Pavel Machek ++ * Copyright (C) 2000 Pavel Machek + * Copyright (C) 2002,2003 Andi Kleen + * + * Jun Nakajima +--- head-2011-03-17.orig/arch/x86/mm/iomap_32-xen.c 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/iomap_32-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -75,7 +75,7 @@ void *kmap_atomic_prot_pfn(unsigned long + /* + * Map 'mfn' using fixed map 'type' and protections 'prot' + */ +-void * ++void __iomem * + iomap_atomic_prot_pfn(unsigned long mfn, enum km_type type, pgprot_t prot) + { + /* +@@ -88,12 +88,12 @@ iomap_atomic_prot_pfn(unsigned long mfn, + prot = PAGE_KERNEL_UC_MINUS; + + pgprot_val(prot) |= _PAGE_IOMAP; +- return kmap_atomic_prot_pfn(mfn, type, prot); ++ return (void __force __iomem *) kmap_atomic_prot_pfn(mfn, type, prot); + } + EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); + + void +-iounmap_atomic(void *kvaddr, enum km_type type) ++iounmap_atomic(void __iomem *kvaddr, enum km_type type) + { + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:41:54.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:42:02.000000000 +0100 +@@ -221,7 +221,7 @@ static void __iomem *__ioremap_caller(re + unsigned long size, unsigned long prot_val, void *caller) + { + unsigned long offset, vaddr; +- phys_addr_t mfn, last_addr; ++ phys_addr_t mfn, last_mfn, last_addr; + const resource_size_t unaligned_phys_addr = phys_addr; + const unsigned long unaligned_size = size; + struct vm_struct *area; +@@ -259,7 +259,8 @@ static void __iomem *__ioremap_caller(re + /* + * Don't allow anybody to remap normal RAM that we're using.. + */ +- for (mfn = PFN_DOWN(phys_addr); mfn < PFN_UP(last_addr); mfn++) { ++ last_mfn = PFN_DOWN(last_addr); ++ for (mfn = PFN_DOWN(phys_addr); mfn <= last_mfn; mfn++) { + unsigned long pfn = mfn_to_local_pfn(mfn); + + if (pfn_valid(pfn)) { +@@ -274,7 +275,7 @@ static void __iomem *__ioremap_caller(re + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; +- phys_addr &= PAGE_MASK; ++ phys_addr &= PHYSICAL_PAGE_MASK; + size = PAGE_ALIGN(last_addr+1) - phys_addr; + + retval = reserve_memtype(phys_addr, (u64)phys_addr + size, +@@ -798,7 +799,7 @@ void __init early_iounmap(void __iomem * + return; + } + offset = virt_addr & ~PAGE_MASK; +- nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; ++ nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; + + idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; + while (nrpages > 0) { +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -995,7 +995,7 @@ static int pcibios_lookup_irq(struct pci + dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq); + + /* Update IRQ for all devices with the same pirq value */ +- while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { ++ for_each_pci_dev(dev2) { + pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); + if (!pin) + continue; +@@ -1034,7 +1034,7 @@ void __init pcibios_fixup_irqs(void) + u8 pin; + + DBG(KERN_DEBUG "PCI: IRQ fixup\n"); +- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { ++ for_each_pci_dev(dev) { + /* + * If the BIOS has set an out of range IRQ number, just + * ignore it. Also keep track of which IRQ's are +@@ -1058,7 +1058,7 @@ void __init pcibios_fixup_irqs(void) + return; + + dev = NULL; +- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { ++ for_each_pci_dev(dev) { + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + if (!pin) + continue; +--- head-2011-03-17.orig/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/vdso/vdso32-setup-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -413,11 +413,7 @@ int arch_setup_additional_pages(struct l + + #ifdef CONFIG_X86_64 + +-/* +- * This must be done early in case we have an initrd containing 32-bit +- * binaries (e.g., hotplug). This could be pushed upstream. +- */ +-core_initcall(sysenter_setup); ++subsys_initcall(sysenter_setup); + + #ifdef CONFIG_SYSCTL + /* Register vsyscall32 into the ABI table */ +--- head-2011-03-17.orig/arch/x86/xen/Kconfig 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/arch/x86/xen/Kconfig 2011-02-01 15:04:27.000000000 +0100 +@@ -25,7 +25,7 @@ config XEN_PRIVILEGED_GUEST + + config XEN_PVHVM + def_bool y +- depends on XEN ++ depends on PARAVIRT_XEN + depends on X86_LOCAL_APIC + + config XEN_MAX_DOMAIN_MEMORY +--- head-2011-03-17.orig/arch/x86/xen/enlighten.c 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/xen/enlighten.c 2011-02-01 15:04:27.000000000 +0100 +@@ -115,8 +115,8 @@ static int have_vcpu_info_placement = 1; + static void clamp_max_cpus(void) + { + #ifdef CONFIG_SMP +- if (setup_max_cpus > MAX_VIRT_CPUS) +- setup_max_cpus = MAX_VIRT_CPUS; ++ if (setup_max_cpus > XEN_LEGACY_MAX_VCPUS) ++ setup_max_cpus = XEN_LEGACY_MAX_VCPUS; + #endif + } + +@@ -128,11 +128,11 @@ static void xen_vcpu_setup(int cpu) + + BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + +- if (cpu < MAX_VIRT_CPUS) ++ if (cpu < XEN_LEGACY_MAX_VCPUS) + per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; + + if (!have_vcpu_info_placement) { +- if (cpu >= MAX_VIRT_CPUS) ++ if (cpu >= XEN_LEGACY_MAX_VCPUS) + clamp_max_cpus(); + return; + } +--- head-2011-03-17.orig/drivers/hwmon/Kconfig 2011-03-11 11:00:24.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/Kconfig 2011-02-01 15:04:27.000000000 +0100 +@@ -400,7 +400,7 @@ config SENSORS_CORETEMP + + config SENSORS_PKGTEMP + tristate "Intel processor package temperature sensor" +- depends on X86 && EXPERIMENTAL ++ depends on X86 && !XEN && EXPERIMENTAL + help + If you say yes here you get support for the package level temperature + sensor inside your CPU. Check documentation/driver for details. +--- head-2011-03-17.orig/drivers/hwmon/coretemp-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/coretemp-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -583,15 +583,16 @@ static int __init coretemp_init(void) + if (err) + goto exit_driver_unreg; + ++#ifndef CONFIG_ACPI_HOTPLUG_CPU + if (list_empty(&pdev_list)) { ++ unregister_pcpu_notifier(&coretemp_cpu_notifier); + err = -ENODEV; +- goto exit_notifier_unreg; ++ goto exit_driver_unreg; + } ++#endif + + return 0; + +-exit_notifier_unreg: +- unregister_pcpu_notifier(&coretemp_cpu_notifier); + exit_driver_unreg: + platform_driver_unregister(&coretemp_driver); + exit: +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/drivers/hwmon/pkgtemp-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -0,0 +1,452 @@ ++/* ++ * pkgtemp.c - Linux kernel module for processor package hardware monitoring ++ * ++ * Copyright (C) 2010 Fenghua Yu ++ * ++ * Inspired from many hwmon drivers especially coretemp. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA ++ * 02110-1301 USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../xen/core/domctl.h" ++ ++#define DRVNAME "pkgtemp" ++#define pkgtemp_data pdev_entry ++ ++enum { SHOW_TEMP, SHOW_TJMAX, SHOW_TTARGET, SHOW_LABEL, SHOW_NAME }; ++ ++/* ++ * Functions declaration ++ */ ++ ++static struct pkgtemp_data *pkgtemp_update_device(struct device *dev); ++ ++struct pdev_entry { ++ struct list_head list; ++ struct platform_device *pdev; ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ const char *name; ++ u32 phys_proc_id; ++ char valid; /* zero until following fields are valid */ ++ unsigned long last_updated; /* in jiffies */ ++ int temp; ++ int tjmax; ++ int ttarget; ++ u8 alarm; ++}; ++ ++/* ++ * Sysfs stuff ++ */ ++ ++static ssize_t show_name(struct device *dev, struct device_attribute ++ *devattr, char *buf) ++{ ++ int ret; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct pkgtemp_data *data = dev_get_drvdata(dev); ++ ++ if (attr->index == SHOW_NAME) ++ ret = sprintf(buf, "%s\n", data->name); ++ else /* show label */ ++ ret = sprintf(buf, "physical id %d\n", ++ data->phys_proc_id); ++ return ret; ++} ++ ++static ssize_t show_alarm(struct device *dev, struct device_attribute ++ *devattr, char *buf) ++{ ++ struct pkgtemp_data *data = pkgtemp_update_device(dev); ++ /* read the Out-of-spec log, never clear */ ++ return sprintf(buf, "%d\n", data->alarm); ++} ++ ++static ssize_t show_temp(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct pkgtemp_data *data = pkgtemp_update_device(dev); ++ int err = 0; ++ ++ if (attr->index == SHOW_TEMP) ++ err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN; ++ else if (attr->index == SHOW_TJMAX) ++ err = sprintf(buf, "%d\n", data->tjmax); ++ else ++ err = sprintf(buf, "%d\n", data->ttarget); ++ return err; ++} ++ ++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, SHOW_TEMP); ++static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, SHOW_TJMAX); ++static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, SHOW_TTARGET); ++static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL); ++static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); ++static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); ++ ++static struct attribute *pkgtemp_attributes[] = { ++ &sensor_dev_attr_name.dev_attr.attr, ++ &sensor_dev_attr_temp1_label.dev_attr.attr, ++ &dev_attr_temp1_crit_alarm.attr, ++ &sensor_dev_attr_temp1_input.dev_attr.attr, ++ &sensor_dev_attr_temp1_crit.dev_attr.attr, ++ NULL ++}; ++ ++static const struct attribute_group pkgtemp_group = { ++ .attrs = pkgtemp_attributes, ++}; ++ ++static struct pkgtemp_data *pkgtemp_update_device(struct device *dev) ++{ ++ struct pkgtemp_data *data = dev_get_drvdata(dev); ++ int err; ++ ++ mutex_lock(&data->update_lock); ++ ++ if (!data->valid || time_after(jiffies, data->last_updated + HZ)) { ++ u32 eax, edx; ++ ++ data->valid = 0; ++ err = rdmsr_safe_on_pcpu(data->pdev->id, ++ MSR_IA32_PACKAGE_THERM_STATUS, ++ &eax, &edx); ++ if (err >= 0) { ++ data->alarm = (eax >> 5) & 1; ++ data->temp = data->tjmax - (((eax >> 16) ++ & 0x7f) * 1000); ++ data->valid = 1; ++ } else ++ dev_dbg(dev, "Temperature data invalid (0x%x)\n", eax); ++ ++ data->last_updated = jiffies; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ return data; ++} ++ ++static int get_tjmax(int cpu, struct device *dev) ++{ ++ int default_tjmax = 100000; ++ int err; ++ u32 eax, edx; ++ u32 val; ++ ++ /* IA32_TEMPERATURE_TARGET contains the TjMax value */ ++ err = rdmsr_safe_on_pcpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); ++ if (err >= 0) { ++ val = (eax >> 16) & 0xff; ++ if ((val > 80) && (val < 120)) { ++ dev_info(dev, "TjMax is %d C.\n", val); ++ return val * 1000; ++ } ++ } ++ dev_warn(dev, "Unable to read TjMax from CPU.\n"); ++ return default_tjmax; ++} ++ ++static int pkgtemp_probe(struct platform_device *pdev) ++{ ++ struct pkgtemp_data *data = platform_get_drvdata(pdev); ++ int err; ++ u32 eax, edx; ++ ++ data->name = "pkgtemp"; ++ mutex_init(&data->update_lock); ++ ++ /* test if we can access the THERM_STATUS MSR */ ++ err = rdmsr_safe_on_pcpu(pdev->id, MSR_IA32_PACKAGE_THERM_STATUS, ++ &eax, &edx); ++ if (err < 0) { ++ dev_err(&pdev->dev, ++ "Unable to access THERM_STATUS MSR, giving up\n"); ++ return err; ++ } ++ ++ data->tjmax = get_tjmax(pdev->id, &pdev->dev); ++ ++ err = rdmsr_safe_on_pcpu(pdev->id, MSR_IA32_TEMPERATURE_TARGET, ++ &eax, &edx); ++ if (err < 0) { ++ dev_warn(&pdev->dev, "Unable to read" ++ " IA32_TEMPERATURE_TARGET MSR\n"); ++ } else { ++ data->ttarget = data->tjmax - (((eax >> 8) & 0xff) * 1000); ++ err = device_create_file(&pdev->dev, ++ &sensor_dev_attr_temp1_max.dev_attr); ++ if (err) ++ return err; ++ } ++ ++ err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); ++ if (err) ++ goto exit_dev; ++ ++ data->hwmon_dev = hwmon_device_register(&pdev->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ err = PTR_ERR(data->hwmon_dev); ++ dev_err(&pdev->dev, "Class registration failed (%d)\n", ++ err); ++ goto exit_class; ++ } ++ ++ return 0; ++ ++exit_class: ++ sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); ++exit_dev: ++ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); ++ return err; ++} ++ ++static int pkgtemp_remove(struct platform_device *pdev) ++{ ++ struct pkgtemp_data *data = platform_get_drvdata(pdev); ++ ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); ++ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); ++ return 0; ++} ++ ++static struct platform_driver pkgtemp_driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRVNAME, ++ }, ++ .probe = pkgtemp_probe, ++ .remove = pkgtemp_remove, ++}; ++ ++static LIST_HEAD(pdev_list); ++static DEFINE_MUTEX(pdev_list_mutex); ++ ++struct cpu_info { ++ u32 cpuid_6_eax; ++}; ++ ++static void get_cpuid_info(void *arg) ++{ ++ struct cpu_info *info = arg; ++ ++ info->cpuid_6_eax = cpuid_eax(0) >= 6 ? cpuid_eax(6) : 0; ++} ++ ++static int pkgtemp_device_add(unsigned int cpu) ++{ ++ int err; ++ struct cpu_info info; ++ struct platform_device *pdev; ++ struct pdev_entry *pdev_entry, *entry; ++ ++ err = xen_set_physical_cpu_affinity(cpu); ++ if (!err) { ++ get_cpuid_info(&info); ++ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); ++ } else if (err > 0) { ++ static bool warned; ++ ++ if (!warned) { ++ warned = true; ++ printk(KERN_WARNING DRVNAME ++ "Cannot set physical CPU affinity" ++ " (assuming use of dom0_vcpus_pin)\n"); ++ } ++ err = smp_call_function_single(cpu, get_cpuid_info, &info, 1); ++ } ++ if (err) ++ return err; ++ ++ if (!(info.cpuid_6_eax & 0x40)) ++ return 0; ++ ++ pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); ++ if (!pdev_entry) ++ return -ENOMEM; ++ ++ err = xen_get_topology_info(cpu, NULL, ++ &pdev_entry->phys_proc_id, NULL); ++ if (err) ++ goto exit_entry_free; ++ ++ mutex_lock(&pdev_list_mutex); ++ ++ /* Only keep the first entry in each package */ ++ list_for_each_entry(entry, &pdev_list, list) { ++ if (entry->phys_proc_id == pdev_entry->phys_proc_id) { ++ err = 0; /* Not an error */ ++ goto exit; ++ } ++ } ++ ++ pdev = platform_device_alloc(DRVNAME, cpu); ++ if (!pdev) { ++ err = -ENOMEM; ++ printk(KERN_ERR DRVNAME ": Device allocation failed\n"); ++ goto exit; ++ } ++ ++ platform_set_drvdata(pdev, pdev_entry); ++ pdev_entry->pdev = pdev; ++ ++ err = platform_device_add(pdev); ++ if (err) { ++ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", ++ err); ++ goto exit_device_put; ++ } ++ ++ list_add_tail(&pdev_entry->list, &pdev_list); ++ mutex_unlock(&pdev_list_mutex); ++ ++ return 0; ++ ++exit_device_put: ++ platform_device_put(pdev); ++exit: ++ mutex_unlock(&pdev_list_mutex); ++exit_entry_free: ++ kfree(pdev_entry); ++ return err; ++} ++ ++static void pkgtemp_device_remove(unsigned int cpu) ++{ ++ struct pdev_entry *p; ++ unsigned int i; ++ ++ mutex_lock(&pdev_list_mutex); ++ list_for_each_entry(p, &pdev_list, list) { ++ if (p->pdev->id != cpu) ++ continue; ++ ++ platform_device_unregister(p->pdev); ++ list_del(&p->list); ++ mutex_unlock(&pdev_list_mutex); ++ for (i = 0; ; ++i) { ++ u32 phys_proc_id; ++ int err; ++ ++ if (i == cpu) ++ continue; ++ err = xen_get_topology_info(i, NULL, &phys_proc_id, ++ NULL); ++ if (err == -ENOENT) ++ continue; ++ if (err) ++ break; ++ if (phys_proc_id != p->phys_proc_id) ++ continue; ++ if (!pkgtemp_device_add(i)) ++ break; ++ } ++ kfree(p); ++ return; ++ } ++ mutex_unlock(&pdev_list_mutex); ++} ++ ++static int pkgtemp_cpu_callback(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ unsigned int cpu = (unsigned long) hcpu; ++ ++ switch (action) { ++ case CPU_ONLINE: ++ pkgtemp_device_add(cpu); ++ break; ++ case CPU_DEAD: ++ pkgtemp_device_remove(cpu); ++ break; ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block pkgtemp_cpu_notifier = { ++ .notifier_call = pkgtemp_cpu_callback, ++}; ++ ++static int __init pkgtemp_init(void) ++{ ++ int err = -ENODEV; ++ ++ if (!is_initial_xendomain()) ++ goto exit; ++ ++ /* quick check if we run Intel */ ++ if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) ++ goto exit; ++ ++ err = platform_driver_register(&pkgtemp_driver); ++ if (err) ++ goto exit; ++ ++ err = register_pcpu_notifier(&pkgtemp_cpu_notifier); ++ if (err) ++ goto exit_driver_unreg; ++ ++#ifndef CONFIG_ACPI_HOTPLUG_CPU ++ if (list_empty(&pdev_list)) { ++ unregister_pcpu_notifier(&pkgtemp_cpu_notifier); ++ err = -ENODEV; ++ goto exit_driver_unreg; ++ } ++#endif ++ ++ return 0; ++ ++exit_driver_unreg: ++ platform_driver_unregister(&pkgtemp_driver); ++exit: ++ return err; ++} ++ ++static void __exit pkgtemp_exit(void) ++{ ++ struct pdev_entry *p, *n; ++ ++ unregister_pcpu_notifier(&pkgtemp_cpu_notifier); ++ mutex_lock(&pdev_list_mutex); ++ list_for_each_entry_safe(p, n, &pdev_list, list) { ++ platform_device_unregister(p->pdev); ++ list_del(&p->list); ++ kfree(p); ++ } ++ mutex_unlock(&pdev_list_mutex); ++ platform_driver_unregister(&pkgtemp_driver); ++} ++ ++MODULE_AUTHOR("Fenghua Yu "); ++MODULE_DESCRIPTION("Intel processor package temperature monitor"); ++MODULE_LICENSE("GPL"); ++ ++module_init(pkgtemp_init) ++module_exit(pkgtemp_exit) +--- head-2011-03-17.orig/drivers/hwmon/via-cputemp-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/via-cputemp-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -37,7 +37,7 @@ + + #define DRVNAME "via_cputemp" + +-enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW; ++enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME }; + + /* + * Functions declaration +@@ -316,15 +316,16 @@ static int __init via_cputemp_init(void) + if (err) + goto exit_driver_unreg; + ++#ifndef CONFIG_ACPI_HOTPLUG_CPU + if (list_empty(&pdev_list)) { ++ unregister_pcpu_notifier(&via_cputemp_cpu_notifier); + err = -ENODEV; +- goto exit_notifier_unreg; ++ goto exit_driver_unreg; + } ++#endif + + return 0; + +-exit_notifier_unreg: +- unregister_pcpu_notifier(&via_cputemp_cpu_notifier); + exit_driver_unreg: + platform_driver_unregister(&via_cputemp_driver); + exit: +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-02 15:37:42.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-02 15:37:53.000000000 +0100 +@@ -448,7 +448,7 @@ config XEN_PLATFORM_PCI + + config SWIOTLB_XEN + def_bool y +- depends on PCI ++ depends on PARAVIRT_XEN && PCI + select SWIOTLB + + config XEN_XENCOMM +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-02-24 15:05:06.000000000 +0100 +@@ -22,6 +22,8 @@ obj-$(CONFIG_XEN_BALLOON) += $(xen-ball + obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o + obj-$(CONFIG_XENFS) += xenfs/ + obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o ++obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o ++obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o + obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ + obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ + obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ blktap2-new/ +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-02-01 15:04:27.000000000 +0100 +@@ -328,7 +328,7 @@ static void connect(struct blkfront_info + unsigned long long sectors; + unsigned long sector_size; + unsigned int binfo; +- int err; ++ int err, barrier; + + switch (info->connected) { + case BLKIF_STATE_CONNECTED: +@@ -364,10 +364,25 @@ static void connect(struct blkfront_info + } + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, +- "feature-barrier", "%lu", &info->feature_barrier, ++ "feature-barrier", "%lu", &barrier, + NULL); ++ /* ++ * If there's no "feature-barrier" defined, then it means ++ * we're dealing with a very old backend which writes ++ * synchronously; draining will do what needs to get done. ++ * ++ * If there are barriers, then we can do full queued writes ++ * with tagged barriers. ++ * ++ * If barriers are not supported, then there's no much we can ++ * do, so just set ordering to NONE. ++ */ + if (err) +- info->feature_barrier = 0; ++ info->feature_barrier = QUEUE_ORDERED_DRAIN; ++ else if (barrier) ++ info->feature_barrier = QUEUE_ORDERED_TAG; ++ else ++ info->feature_barrier = QUEUE_ORDERED_NONE; + + err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); + if (err) { +@@ -687,7 +702,7 @@ static int blkif_queue_request(struct re + + ring_req->operation = rq_data_dir(req) ? + BLKIF_OP_WRITE : BLKIF_OP_READ; +- if (blk_barrier_rq(req)) ++ if (req->cmd_flags & REQ_HARDBARRIER) + ring_req->operation = BLKIF_OP_WRITE_BARRIER; + + ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); +@@ -746,7 +761,7 @@ void do_blkif_request(struct request_que + + blk_start_request(req); + +- if (!blk_fs_request(req)) { ++ if (req->cmd_type != REQ_TYPE_FS) { + __blk_end_request_all(req, -EIO); + continue; + } +@@ -812,7 +827,7 @@ static irqreturn_t blkif_int(int irq, vo + " write barrier op failed\n", + info->gd->disk_name); + ret = -EOPNOTSUPP; +- info->feature_barrier = 0; ++ info->feature_barrier = QUEUE_ORDERED_NONE; + xlvbd_barrier(info); + } + /* fall through */ +--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-02-01 15:04:27.000000000 +0100 +@@ -422,8 +422,7 @@ xlvbd_add(blkif_sector_t capacity, int v + info->rq = gd->queue; + info->gd = gd; + +- if (info->feature_barrier) +- xlvbd_barrier(info); ++ xlvbd_barrier(info); + + if (vdisk_info & VDISK_READONLY) + set_disk_ro(gd, 1); +@@ -474,21 +473,28 @@ int + xlvbd_barrier(struct blkfront_info *info) + { + int err; ++ const char *barrier; ++ ++ switch (info->feature_barrier) { ++ case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; ++ case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; ++ case QUEUE_ORDERED_NONE: barrier = "disabled"; break; ++ default: return -EINVAL; ++ } + +- err = blk_queue_ordered(info->rq, +- info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); ++ err = blk_queue_ordered(info->rq, info->feature_barrier); + if (err) + return err; + pr_info("blkfront: %s: barriers %s\n", +- info->gd->disk_name, +- info->feature_barrier ? "enabled" : "disabled"); ++ info->gd->disk_name, barrier); + return 0; + } + #else + int + xlvbd_barrier(struct blkfront_info *info) + { +- pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); ++ if (info->feature_barrier) ++ pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); + return -ENOSYS; + } + #endif +--- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:19:12.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-17 10:19:19.000000000 +0100 +@@ -431,14 +431,14 @@ static tap_blkif_t *get_next_free_dev(vo + static int blktap_open(struct inode *inode, struct file *filp); + static int blktap_release(struct inode *inode, struct file *filp); + static int blktap_mmap(struct file *filp, struct vm_area_struct *vma); +-static int blktap_ioctl(struct inode *inode, struct file *filp, +- unsigned int cmd, unsigned long arg); ++static long blktap_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); + static unsigned int blktap_poll(struct file *file, poll_table *wait); + + static const struct file_operations blktap_fops = { + .owner = THIS_MODULE, + .poll = blktap_poll, +- .ioctl = blktap_ioctl, ++ .unlocked_ioctl = blktap_ioctl, + .open = blktap_open, + .release = blktap_release, + .mmap = blktap_mmap, +@@ -757,8 +757,8 @@ static int blktap_mmap(struct file *filp + } + + +-static int blktap_ioctl(struct inode *inode, struct file *filp, +- unsigned int cmd, unsigned long arg) ++static long blktap_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg) + { + tap_blkif_t *info = filp->private_data; + +--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-02-24 15:17:25.000000000 +0100 +@@ -103,9 +103,8 @@ found: + return tap; + } + +-static int +-blktap_control_ioctl(struct inode *inode, struct file *filp, +- unsigned int cmd, unsigned long arg) ++static long ++blktap_control_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + unsigned long dev; + struct blktap *tap; +@@ -148,7 +147,7 @@ blktap_control_ioctl(struct inode *inode + + static const struct file_operations blktap_control_file_operations = { + .owner = THIS_MODULE, +- .ioctl = blktap_control_ioctl, ++ .unlocked_ioctl = blktap_control_ioctl, + }; + + static struct miscdevice blktap_misc = { +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-02-01 15:04:27.000000000 +0100 +@@ -838,13 +838,13 @@ blktap_device_run_queue(struct blktap *t + BTDBG("running queue for %d\n", tap->minor); + + while ((req = blk_peek_request(rq)) != NULL) { +- if (!blk_fs_request(req)) { ++ if (req->cmd_type != REQ_TYPE_FS) { + blk_start_request(req); + __blk_end_request_all(req, -EIO); + continue; + } + +- if (blk_barrier_rq(req)) { ++ if (req->cmd_flags & REQ_HARDBARRIER) { + blk_start_request(req); + __blk_end_request_all(req, -EOPNOTSUPP); + continue; +--- head-2011-03-17.orig/drivers/xen/blktap2/ring.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/ring.c 2011-02-01 15:04:27.000000000 +0100 +@@ -363,9 +363,8 @@ blktap_ring_set_message(struct blktap *t + up_read(&tap->tap_sem); + } + +-static int +-blktap_ring_ioctl(struct inode *inode, struct file *filp, +- unsigned int cmd, unsigned long arg) ++static long ++blktap_ring_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + struct blktap_params params; + struct blktap *tap = filp->private_data; +@@ -482,7 +481,7 @@ static const struct file_operations blkt + .owner = THIS_MODULE, + .open = blktap_ring_open, + .release = blktap_ring_release, +- .ioctl = blktap_ring_ioctl, ++ .unlocked_ioctl = blktap_ring_ioctl, + .mmap = blktap_ring_mmap, + .poll = blktap_ring_poll, + }; +--- head-2011-03-17.orig/drivers/xen/blktap2-new/control.c 2011-02-24 15:03:58.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/control.c 2011-02-24 15:17:28.000000000 +0100 +@@ -120,9 +120,8 @@ blktap_control_destroy_tap(struct blktap + return 0; + } + +-static int +-blktap_control_ioctl(struct inode *inode, struct file *filp, +- unsigned int cmd, unsigned long arg) ++static long ++blktap_control_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + struct blktap *tap; + +@@ -166,7 +165,7 @@ blktap_control_ioctl(struct inode *inode + + static const struct file_operations blktap_control_file_operations = { + .owner = THIS_MODULE, +- .ioctl = blktap_control_ioctl, ++ .unlocked_ioctl = blktap_control_ioctl, + }; + + static struct miscdevice blktap_control = { +--- head-2011-03-17.orig/drivers/xen/blktap2-new/device.c 2011-02-24 15:01:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/device.c 2011-02-24 16:23:08.000000000 +0100 +@@ -240,7 +240,7 @@ blktap_device_run_queue(struct blktap *t + if (!rq) + break; + +- if (!blk_fs_request(rq)) { ++ if (rq->cmd_type != REQ_TYPE_FS) { + __blktap_end_queued_rq(rq, -EOPNOTSUPP); + continue; + } +@@ -303,7 +303,7 @@ blktap_device_configure(struct blktap *t + blk_queue_dma_alignment(rq, 511); + + /* We are reordering, but cacheless. */ +- blk_queue_ordered(rq, QUEUE_ORDERED_DRAIN, NULL); ++ blk_queue_ordered(rq, QUEUE_ORDERED_DRAIN); + + spin_unlock_irq(&dev->lock); + } +--- head-2011-03-17.orig/drivers/xen/blktap2-new/ring.c 2011-02-24 14:19:13.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/ring.c 2011-02-24 15:10:15.000000000 +0100 +@@ -370,9 +370,8 @@ fail: + return err; + } + +-static int +-blktap_ring_ioctl(struct inode *inode, struct file *filp, +- unsigned int cmd, unsigned long arg) ++static long ++blktap_ring_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + struct blktap *tap = filp->private_data; + struct blktap_ring *ring = &tap->ring; +@@ -438,7 +437,7 @@ static const struct file_operations blkt + .owner = THIS_MODULE, + .open = blktap_ring_open, + .release = blktap_ring_release, +- .ioctl = blktap_ring_ioctl, ++ .unlocked_ioctl = blktap_ring_ioctl, + .mmap = blktap_ring_mmap, + .poll = blktap_ring_poll, + }; +--- head-2011-03-17.orig/drivers/xen/console/console.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/drivers/xen/console/console.c 2011-02-01 15:04:27.000000000 +0100 +@@ -379,7 +379,7 @@ void xencons_rx(char *buf, unsigned len) + sysrq_requested = 0; + if (time_before(jiffies, sysrq_timeout)) { + spin_unlock_irqrestore(&xencons_lock, flags); +- handle_sysrq(buf[i], xencons_tty); ++ handle_sysrq(buf[i]); + spin_lock_irqsave(&xencons_lock, flags); + continue; + } +--- head-2011-03-17.orig/drivers/xen/core/reboot.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/reboot.c 2011-02-01 15:04:27.000000000 +0100 +@@ -240,7 +240,7 @@ static void sysrq_handler(struct xenbus_ + + #ifdef CONFIG_MAGIC_SYSRQ + if (sysrq_key != '\0') +- handle_sysrq(sysrq_key, NULL); ++ handle_sysrq(sysrq_key); + #endif + } + +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-09 16:05:04.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-09 16:05:34.000000000 +0100 +@@ -50,7 +50,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -219,7 +218,6 @@ static void netif_disconnect_backend(str + static int network_connect(struct net_device *); + static void network_tx_buf_gc(struct net_device *); + static void network_alloc_rx_buffers(struct net_device *); +-static void send_fake_arp(struct net_device *); + + static irqreturn_t netif_int(int irq, void *dev_id); + +@@ -236,6 +234,25 @@ static inline int xennet_can_sg(struct n + return dev->features & NETIF_F_SG; + } + ++/* ++ * Work around net.ipv4.conf.*.arp_notify no being enabled by default. ++ */ ++static void __devinit netfront_enable_arp_notify(struct netfront_info *info) ++{ ++#ifdef CONFIG_INET ++ struct in_device *in_dev; ++ ++ rtnl_lock(); ++ in_dev = __in_dev_get_rtnl(info->netdev); ++ if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY)) ++ IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1); ++ rtnl_unlock(); ++ if (!in_dev) ++ printk(KERN_WARNING "Cannot enable ARP notification on %s\n", ++ info->xbdev->nodename); ++#endif ++} ++ + /** + * Entry point to this code when a new device is created. Allocate the basic + * structures and the ring buffers for communication with the backend, and +@@ -265,6 +282,8 @@ static int __devinit netfront_probe(stru + goto fail; + } + ++ netfront_enable_arp_notify(info); ++ + err = xennet_sysfs_addif(info->netdev); + if (err) { + unregister_netdev(info->netdev); +@@ -551,7 +570,7 @@ static void backend_changed(struct xenbu + if (network_connect(netdev) != 0) + break; + xenbus_switch_state(dev, XenbusStateConnected); +- send_fake_arp(netdev); ++ netif_notify_peers(netdev); + break; + + case XenbusStateClosing: +@@ -560,36 +579,6 @@ static void backend_changed(struct xenbu + } + } + +-/** Send a packet on a net device to encourage switches to learn the +- * MAC. We send a fake ARP request. +- * +- * @param dev device +- * @return 0 on success, error code otherwise +- */ +-static void send_fake_arp(struct net_device *dev) +-{ +-#ifdef CONFIG_INET +- struct sk_buff *skb; +- u32 src_ip, dst_ip; +- +- dst_ip = INADDR_BROADCAST; +- src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); +- +- /* No IP? Then nothing to do. */ +- if (src_ip == 0) +- return; +- +- skb = arp_create(ARPOP_REPLY, ETH_P_ARP, +- dst_ip, dev, src_ip, +- /*dst_hw*/ NULL, /*src_hw*/ NULL, +- /*target_hw*/ dev->dev_addr); +- if (skb == NULL) +- return; +- +- dev_queue_xmit(skb); +-#endif +-} +- + static inline int netfront_tx_slot_available(struct netfront_info *np) + { + return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < +@@ -2154,32 +2143,6 @@ static struct net_device * __devinit cre + return ERR_PTR(err); + } + +-#ifdef CONFIG_INET +-/* +- * We use this notifier to send out a fake ARP reply to reset switches and +- * router ARP caches when an IP interface is brought up on a VIF. +- */ +-static int +-inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) +-{ +- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; +- struct net_device *dev = ifa->ifa_dev->dev; +- +- /* UP event and is it one of our devices? */ +- if (event == NETDEV_UP && dev->netdev_ops->ndo_open == network_open) +- send_fake_arp(dev); +- +- return NOTIFY_DONE; +-} +- +-static struct notifier_block notifier_inetdev = { +- .notifier_call = inetdev_notify, +- .next = NULL, +- .priority = 0 +-}; +-#endif +- +- + static void netif_disconnect_backend(struct netfront_info *info) + { + /* Stop old i/f to prevent errors whilst we rebuild the state. */ +@@ -2233,8 +2196,6 @@ static struct xenbus_driver netfront_dri + + static int __init netif_init(void) + { +- int err; +- + if (!is_running_on_xen()) + return -ENODEV; + +@@ -2252,26 +2213,13 @@ static int __init netif_init(void) + + IPRINTK("Initialising virtual ethernet driver.\n"); + +-#ifdef CONFIG_INET +- (void)register_inetaddr_notifier(¬ifier_inetdev); +-#endif +- +- err = xenbus_register_frontend(&netfront_driver); +- if (err) { +-#ifdef CONFIG_INET +- unregister_inetaddr_notifier(¬ifier_inetdev); +-#endif +- } +- return err; ++ return xenbus_register_frontend(&netfront_driver); + } + module_init(netif_init); + + + static void __exit netif_exit(void) + { +-#ifdef CONFIG_INET +- unregister_inetaddr_notifier(¬ifier_inetdev); +-#endif + xenbus_unregister_driver(&netfront_driver); + + netif_exit_accel(); +--- head-2011-03-17.orig/drivers/xen/scsiback/scsiback.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsiback/scsiback.c 2011-02-01 15:04:27.000000000 +0100 +@@ -386,7 +386,7 @@ static struct bio *request_map_sg(pendin + if (bio->bi_vcnt >= nr_vecs) { + bio->bi_flags &= ~(1 << BIO_SEG_VALID); + if (pending_req->sc_data_direction == WRITE) +- bio->bi_rw |= (1 << BIO_RW); ++ bio->bi_rw |= REQ_WRITE; + bio = NULL; + } + +--- head-2011-03-17.orig/drivers/xen/usbfront/usbfront-hcd.c 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/usbfront/usbfront-hcd.c 2011-02-01 15:04:27.000000000 +0100 +@@ -86,7 +86,7 @@ static int xenhcd_setup(struct usb_hcd * + static int xenhcd_run(struct usb_hcd *hcd) + { + hcd->uses_new_polling = 1; +- hcd->poll_rh = 0; ++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + hcd->state = HC_STATE_RUNNING; + create_debug_file(hcd_to_info(hcd)); + return 0; +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_client.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_client.c 2011-02-01 15:04:27.000000000 +0100 +@@ -165,17 +165,12 @@ int xenbus_watch_pathfmt(struct xenbus_d + EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); + #endif + ++static void xenbus_switch_fatal(struct xenbus_device *, int, int, ++ const char *, ...); + +-/** +- * xenbus_switch_state +- * @dev: xenbus device +- * @state: new state +- * +- * Advertise in the store a change of the given driver to the given new_state. +- * Return 0 on success, or -errno on error. On error, the device will switch +- * to XenbusStateClosing, and the error will be saved in the store. +- */ +-int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) ++static int ++__xenbus_switch_state(struct xenbus_device *dev, ++ enum xenbus_state state, int depth) + { + /* We check whether the state is currently set to the given value, and + if not, then the state is set. We don't want to unconditionally +@@ -190,29 +185,58 @@ int xenbus_switch_state(struct xenbus_de + would not get reset if the transaction was aborted. + */ + ++ struct xenbus_transaction xbt; + int current_state; +- int err; ++ int err, abort; + + if (state == dev->state) + return 0; + +- err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", +- ¤t_state); +- if (err != 1) ++again: ++ abort = 1; ++ ++ err = xenbus_transaction_start(&xbt); ++ if (err) { ++ xenbus_switch_fatal(dev, depth, err, "starting transaction"); + return 0; ++ } ++ ++ err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); ++ if (err != 1) ++ goto abort; + +- err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); ++ err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); + if (err) { +- if (state != XenbusStateClosing) /* Avoid looping */ +- xenbus_dev_fatal(dev, err, "writing new state"); +- return err; ++ xenbus_switch_fatal(dev, depth, err, "writing new state"); ++ goto abort; + } + +- dev->state = state; ++ abort = 0; ++abort: ++ err = xenbus_transaction_end(xbt, abort); ++ if (err) { ++ if (err == -EAGAIN && !abort) ++ goto again; ++ xenbus_switch_fatal(dev, depth, err, "ending transaction"); ++ } else ++ dev->state = state; + + return 0; + } + ++/** ++ * xenbus_switch_state ++ * @dev: xenbus device ++ * @state: new state ++ * ++ * Advertise in the store a change of the given driver to the given new_state. ++ * Return 0 on success, or -errno on error. On error, the device will switch ++ * to XenbusStateClosing, and the error will be saved in the store. ++ */ ++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) ++{ ++ return __xenbus_switch_state(dev, state, 0); ++} + EXPORT_SYMBOL_GPL(xenbus_switch_state); + + int xenbus_frontend_closed(struct xenbus_device *dev) +@@ -234,41 +258,22 @@ static char *error_path(struct xenbus_de + + + static void _dev_error(struct xenbus_device *dev, int err, +- const char *fmt, va_list ap) ++ const char *fmt, va_list *ap) + { +- int ret; +- unsigned int len; +- char *printf_buffer = NULL, *path_buffer = NULL; +- +-#define PRINTF_BUFFER_SIZE 4096 +- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); +- if (printf_buffer == NULL) +- goto fail; ++ char *printf_buffer, *path_buffer; ++ struct va_format vaf = { .fmt = fmt, .va = ap }; + +- len = sprintf(printf_buffer, "%i ", -err); +- ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); +- +- BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); +- +- dev_err(&dev->dev, "%s\n", printf_buffer); ++ printf_buffer = kasprintf(GFP_KERNEL, "%i %pV", -err, &vaf); ++ if (printf_buffer) ++ dev_err(&dev->dev, "%s\n", printf_buffer); + + path_buffer = error_path(dev); +- +- if (path_buffer == NULL) { ++ if (!printf_buffer || !path_buffer ++ || xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer)) + dev_err(&dev->dev, + "xenbus: failed to write error node for %s (%s)\n", + dev->nodename, printf_buffer); +- goto fail; +- } + +- if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { +- dev_err(&dev->dev, +- "xenbus: failed to write error node for %s (%s)\n", +- dev->nodename, printf_buffer); +- goto fail; +- } +- +-fail: + kfree(printf_buffer); + kfree(path_buffer); + } +@@ -288,7 +293,7 @@ void xenbus_dev_error(struct xenbus_devi + va_list ap; + + va_start(ap, fmt); +- _dev_error(dev, err, fmt, ap); ++ _dev_error(dev, err, fmt, &ap); + va_end(ap); + } + EXPORT_SYMBOL_GPL(xenbus_dev_error); +@@ -309,13 +314,29 @@ void xenbus_dev_fatal(struct xenbus_devi + va_list ap; + + va_start(ap, fmt); +- _dev_error(dev, err, fmt, ap); ++ _dev_error(dev, err, fmt, &ap); + va_end(ap); + + xenbus_switch_state(dev, XenbusStateClosing); + } + EXPORT_SYMBOL_GPL(xenbus_dev_fatal); + ++/** ++ * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps ++ * avoiding recursion within xenbus_switch_state. ++ */ ++static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, ++ const char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ _dev_error(dev, err, fmt, &ap); ++ va_end(ap); ++ ++ if (!depth) ++ __xenbus_switch_state(dev, XenbusStateClosing, 1); ++} + + /** + * xenbus_grant_ring +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 15:04:27.000000000 +0100 +@@ -58,9 +58,6 @@ + #include + #include + #include +-#ifdef MODULE +-#include +-#endif + #else + #include + +@@ -68,6 +65,12 @@ + #include + #include + #include ++ ++#include ++#endif ++ ++#ifndef CONFIG_XEN ++#include + #endif + + #include "xenbus_comms.h" +@@ -962,7 +965,23 @@ void xenbus_probe(struct work_struct *un + /* Notify others that xenstore is up */ + blocking_notifier_call_chain(&xenstore_chain, 0, NULL); + } ++#if !defined(CONFIG_XEN) && !defined(MODULE) ++EXPORT_SYMBOL_GPL(xenbus_probe); + ++static int __init xenbus_probe_initcall(void) ++{ ++ if (!xen_domain()) ++ return -ENODEV; ++ ++ if (xen_initial_domain() || xen_hvm_domain()) ++ return 0; ++ ++ xenbus_probe(NULL); ++ return 0; ++} ++ ++device_initcall(xenbus_probe_initcall); ++#endif + + #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) + static struct file_operations xsd_kva_fops; +@@ -1077,9 +1096,9 @@ fail0: + #endif + + #ifndef MODULE +-static int __init xenbus_probe_init(void) ++static int __init xenbus_init(void) + #else +-static int __devinit xenbus_probe_init(void) ++int __devinit xenbus_init(void) + #endif + { + int err = 0; +@@ -1147,17 +1166,36 @@ static int __devinit xenbus_probe_init(v + #endif + xen_store_interface = mfn_to_virt(xen_store_mfn); + } else { +- atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); ++#if !defined(CONFIG_XEN) && !defined(MODULE) ++ if (xen_hvm_domain()) { ++#endif ++#ifndef CONFIG_XEN ++ uint64_t v = 0; ++ ++ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); ++ if (err) ++ goto err; ++ xen_store_evtchn = (int)v; ++ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); ++ if (err) ++ goto err; ++ xen_store_mfn = (unsigned long)v; ++ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, ++ PAGE_SIZE); ++#endif ++#if !defined(CONFIG_XEN) && !defined(MODULE) ++ } else { ++#endif + #ifndef MODULE +- xen_store_evtchn = xen_start_info->store_evtchn; +- xen_store_mfn = xen_start_info->store_mfn; +- xen_store_interface = mfn_to_virt(xen_store_mfn); +-#else +- xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); +- xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); +- xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, +- PAGE_SIZE); ++ xen_store_evtchn = xen_start_info->store_evtchn; ++ xen_store_mfn = xen_start_info->store_mfn; ++ xen_store_interface = mfn_to_virt(xen_store_mfn); + #endif ++#if !defined(CONFIG_XEN) && !defined(MODULE) ++ } ++#endif ++ atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); ++ + /* Initialize the shared memory rings to talk to xenstored */ + err = xb_init_comms(); + if (err) +@@ -1189,8 +1227,10 @@ static int __devinit xenbus_probe_init(v + #endif + xenbus_backend_device_register(); + ++#if defined(CONFIG_XEN) || defined(MODULE) + if (!is_initial_xendomain()) + xenbus_probe(NULL); ++#endif + + #if defined(CONFIG_XEN_COMPAT_XENFS) && !defined(MODULE) + /* +@@ -1217,17 +1257,12 @@ static int __devinit xenbus_probe_init(v + } + + #ifndef MODULE +-postcore_initcall(xenbus_probe_init); ++postcore_initcall(xenbus_init); + #ifdef CONFIG_XEN + MODULE_LICENSE("Dual BSD/GPL"); + #else + MODULE_LICENSE("GPL"); + #endif +-#else +-int __devinit xenbus_init(void) +-{ +- return xenbus_probe_init(); +-} + #endif + + static int is_device_connecting(struct device *dev, void *data) +@@ -1345,6 +1380,11 @@ static void wait_for_devices(struct xenb + #ifndef MODULE + static int __init boot_wait_for_devices(void) + { ++#if !defined(CONFIG_XEN) && !defined(MODULE) ++ if (xen_hvm_domain() && !xen_platform_pci_unplug) ++ return -ENODEV; ++#endif ++ + if (!xenbus_frontend.error) { + ready_to_wait_for_devices = 1; + wait_for_devices(NULL); +--- head-2011-03-17.orig/include/xen/hvm.h 2011-01-31 17:56:27.000000000 +0100 ++++ head-2011-03-17/include/xen/hvm.h 2011-02-01 15:04:27.000000000 +0100 +@@ -3,8 +3,11 @@ + #define XEN_HVM_H__ + + #include ++#ifndef HAVE_XEN_PLATFORM_COMPAT_H ++#include ++#endif + +-static inline unsigned long hvm_get_parameter(int idx) ++static inline int hvm_get_parameter(int idx, uint64_t *value) + { + struct xen_hvm_param xhv; + int r; +@@ -14,9 +17,15 @@ static inline unsigned long hvm_get_para + r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); + if (r < 0) { + pr_err("Cannot get hvm parameter %d: %d!\n", idx, r); +- return 0; ++ return r; + } +- return xhv.value; ++ *value = xhv.value; ++ return r; + } + ++#define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 ++#define HVM_CALLBACK_VIA_TYPE_SHIFT 56 ++#define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ ++ HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) ++ + #endif /* XEN_HVM_H__ */ +--- head-2011-03-17.orig/include/xen/interface/hvm/hvm_op.h 2011-03-17 13:50:24.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/hvm/hvm_op.h 2011-03-17 14:14:21.000000000 +0100 +@@ -33,6 +33,7 @@ struct xen_hvm_param { + uint32_t index; /* IN */ + uint64_t value; /* IN/OUT */ + }; ++DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_param); + typedef struct xen_hvm_param xen_hvm_param_t; + DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); + +@@ -140,6 +141,7 @@ struct xen_hvm_pagetable_dying { + /* guest physical address of the toplevel pagetable dying */ + uint64_t gpa; + }; ++DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_pagetable_dying); + typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t; + DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t); + +--- head-2011-03-17.orig/include/xen/interface/memory.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/memory.h 2011-02-01 15:04:27.000000000 +0100 +@@ -132,6 +132,7 @@ struct xen_memory_exchange { + */ + xen_ulong_t nr_exchanged; + }; ++DEFINE_GUEST_HANDLE_STRUCT(xen_memory_exchange); + typedef struct xen_memory_exchange xen_memory_exchange_t; + DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); + +@@ -294,4 +295,14 @@ typedef struct xen_pod_target xen_pod_ta + */ + #define XENMEM_get_sharing_freed_pages 18 + ++#ifndef CONFIG_XEN ++#include ++ ++/* ++ * Prevent the balloon driver from changing the memory reservation ++ * during a driver critical region. ++ */ ++extern spinlock_t xen_reservation_lock; ++#endif ++ + #endif /* __XEN_PUBLIC_MEMORY_H__ */ +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 15:04:27.000000000 +0100 +@@ -37,20 +37,12 @@ + + #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1))) + +-/* +- * Enumeration for sync targets +- */ +-enum dma_sync_target { +- SYNC_FOR_CPU = 0, +- SYNC_FOR_DEVICE = 1, +-}; +- + int swiotlb; + int swiotlb_force; + + /* +- * Used to do a quick range check in unmap_single and +- * sync_single_*, to see if the memory was in fact allocated by this ++ * Used to do a quick range check in swiotlb_tbl_unmap_single and ++ * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this + * API. + */ + static char *io_tlb_start, *io_tlb_end; +@@ -141,44 +133,30 @@ void swiotlb_print_info(void) + io_tlb_start, io_tlb_end); + } + +-/* +- * Statically reserve bounce buffer space and initialize bounce buffer data +- * structures for the software IO TLB used to implement the PCI DMA API. +- */ +-void __init +-swiotlb_init_with_default_size(size_t default_size, int verbose) ++void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) + { + unsigned long i, bytes; + int rc; + +- if (!io_tlb_nslabs) { +- io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); +- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); +- } ++ bytes = nslabs << IO_TLB_SHIFT; + +- bytes = io_tlb_nslabs << IO_TLB_SHIFT; +- +- /* +- * Get IO TLB memory from the low pages +- */ +- io_tlb_start = alloc_bootmem_pages(bytes); +- if (!io_tlb_start) +- panic("Cannot allocate SWIOTLB buffer!\n"); ++ io_tlb_nslabs = nslabs; ++ io_tlb_start = tlb; + dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; +- for (i = 0; i < io_tlb_nslabs; i += IO_TLB_SEGSIZE) { ++ for (nslabs = 0; nslabs < io_tlb_nslabs; nslabs += IO_TLB_SEGSIZE) { + do { + rc = xen_create_contiguous_region( +- (unsigned long)io_tlb_start + (i << IO_TLB_SHIFT), ++ (unsigned long)io_tlb_start + (nslabs << IO_TLB_SHIFT), + get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT), + dma_bits); + } while (rc && dma_bits++ < max_dma_bits); + if (rc) { +- if (i == 0) ++ if (nslabs == 0) + panic("No suitable physical memory available for SWIOTLB buffer!\n" + "Use dom0_mem Xen boot parameter to reserve\n" + "some DMA memory (e.g., dom0_mem=-128M).\n"); +- io_tlb_nslabs = i; +- i <<= IO_TLB_SHIFT; ++ io_tlb_nslabs = nslabs; ++ i = nslabs << IO_TLB_SHIFT; + free_bootmem(__pa(io_tlb_start + i), bytes - i); + bytes = i; + for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) { +@@ -221,6 +199,32 @@ swiotlb_init_with_default_size(size_t de + swiotlb_print_info(); + } + ++/* ++ * Statically reserve bounce buffer space and initialize bounce buffer data ++ * structures for the software IO TLB used to implement the DMA API. ++ */ ++void __init ++swiotlb_init_with_default_size(size_t default_size, int verbose) ++{ ++ unsigned long bytes; ++ ++ if (!io_tlb_nslabs) { ++ io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); ++ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); ++ } ++ ++ bytes = io_tlb_nslabs << IO_TLB_SHIFT; ++ ++ /* ++ * Get IO TLB memory from the low pages ++ */ ++ io_tlb_start = alloc_bootmem_pages(bytes); ++ if (!io_tlb_start) ++ panic("Cannot allocate SWIOTLB buffer"); ++ ++ swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); ++} ++ + void __init + swiotlb_init(int verbose) + { +@@ -267,8 +271,8 @@ static int is_swiotlb_buffer(dma_addr_t + * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an + * unnecessary copy from the aperture to the host buffer, and a page fault. + */ +-static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, +- enum dma_data_direction dir) ++void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, ++ enum dma_data_direction dir) + { + unsigned long pfn = PFN_DOWN(phys); + +@@ -306,12 +310,11 @@ static void swiotlb_bounce(phys_addr_t p + /* inaccessible */; + } + } ++EXPORT_SYMBOL_GPL(swiotlb_bounce); + +-/* +- * Allocates bounce buffer and returns its kernel virtual address. +- */ +-static void * +-map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) ++void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, ++ phys_addr_t phys, size_t size, ++ enum dma_data_direction dir) + { + unsigned long flags; + char *dma_addr; +@@ -409,12 +412,27 @@ found: + + return dma_addr; + } ++EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); ++ ++/* ++ * Allocates bounce buffer and returns its kernel virtual address. ++ */ ++ ++static void * ++map_single(struct device *hwdev, phys_addr_t phys, size_t size, ++ enum dma_data_direction dir) ++{ ++ dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); ++ ++ return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); ++} + + /* + * dma_addr is the kernel virtual address of the bounce buffer to unmap. + */ +-static void +-do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) ++void ++swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, ++ enum dma_data_direction dir) + { + unsigned long flags; + int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; +@@ -455,10 +473,12 @@ do_unmap_single(struct device *hwdev, ch + } + spin_unlock_irqrestore(&io_tlb_lock, flags); + } ++EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); + +-static void +-sync_single(struct device *hwdev, char *dma_addr, size_t size, +- int dir, int target) ++void ++swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, ++ enum dma_data_direction dir, ++ enum dma_sync_target target) + { + int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; + phys_addr_t phys = io_tlb_orig_addr[index]; +@@ -482,9 +502,11 @@ sync_single(struct device *hwdev, char * + BUG(); + } + } ++EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); + + static void +-swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) ++swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, ++ int do_panic) + { + /* + * Ran out of IOMMU space for this operation. This is very bad. +@@ -558,14 +580,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); + * whatever the device wrote there. + */ + static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, +- size_t size, int dir) ++ size_t size, enum dma_data_direction dir) + { + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); + + BUG_ON(dir == DMA_NONE); + + if (is_swiotlb_buffer(dev_addr)) { +- do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); ++ swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); + return; + } + +@@ -592,14 +614,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); + */ + static void + swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, +- size_t size, int dir, int target) ++ size_t size, enum dma_data_direction dir, ++ enum dma_sync_target target) + { + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); + + BUG_ON(dir == DMA_NONE); + + if (is_swiotlb_buffer(dev_addr)) +- sync_single(hwdev, phys_to_virt(paddr), size, dir, target); ++ swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, ++ target); + } + + void +@@ -676,7 +700,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs); + + int + swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, +- int dir) ++ enum dma_data_direction dir) + { + return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); + } +@@ -703,7 +727,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); + + void + swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, +- int dir) ++ enum dma_data_direction dir) + { + return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); + } +@@ -718,7 +742,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg); + */ + static void + swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, +- int nelems, int dir, int target) ++ int nelems, enum dma_data_direction dir, ++ enum dma_sync_target target) + { + struct scatterlist *sg; + int i; diff --git a/patches.xen/xen3-patch-2.6.37 b/patches.xen/xen3-patch-2.6.37 new file mode 100644 index 0000000..8e5d7f7 --- /dev/null +++ b/patches.xen/xen3-patch-2.6.37 @@ -0,0 +1,7443 @@ +From: Linux Kernel Mailing List +Subject: Linux: 2.6.37 +Patch-mainline: 2.6.37 + + This patch contains the differences between 2.6.36 and 2.6.37. + +Acked-by: Jeff Mahoney +Automatically created from "patches.kernel.org/patch-2.6.37" by xen-port-patches.py + +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-17 13:43:12.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-01 15:09:47.000000000 +0100 +@@ -1782,7 +1782,6 @@ config USE_PERCPU_NUMA_NODE_ID + depends on NUMA + + menu "Power management and ACPI options" +- depends on !XEN_UNPRIVILEGED_GUEST + + config ARCH_HIBERNATION_HEADER + def_bool y +@@ -1790,6 +1789,8 @@ config ARCH_HIBERNATION_HEADER + + source "kernel/power/Kconfig" + ++if !XEN_UNPRIVILEGED_GUEST ++ + source "drivers/acpi/Kconfig" + + source "drivers/sfi/Kconfig" +@@ -1925,6 +1926,8 @@ source "drivers/cpuidle/Kconfig" + + source "drivers/idle/Kconfig" + ++endif # !XEN_UNPRIVILEGED_GUEST ++ + endmenu + + +@@ -2005,7 +2008,7 @@ config PCI_OLPC + + config PCI_XEN + def_bool y +- depends on PCI && XEN ++ depends on PCI && PARAVIRT_XEN + select SWIOTLB_XEN + + config PCI_DOMAINS +@@ -2030,21 +2033,6 @@ config PCI_CNB20LE_QUIRK + + You should say N unless you know you need this. + +-config XEN_PCIDEV_FRONTEND +- def_bool y +- prompt "Xen PCI Frontend" if X86_64 +- depends on PCI && XEN && (PCI_GOXEN_FE || PCI_GOANY || X86_64) +- select HOTPLUG +- help +- The PCI device frontend driver allows the kernel to import arbitrary +- PCI devices from a PCI backend to support PCI driver domains. +- +-config XEN_PCIDEV_FE_DEBUG +- bool "Xen PCI Frontend Debugging" +- depends on XEN_PCIDEV_FRONTEND +- help +- Enables some debug statements within the PCI Frontend. +- + config DMAR + bool "Support for DMA Remapping Devices (EXPERIMENTAL)" + depends on PCI_MSI && ACPI && !XEN && EXPERIMENTAL +--- head-2011-03-17.orig/arch/x86/include/asm/hw_irq.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/hw_irq.h 2011-02-01 15:09:47.000000000 +0100 +@@ -78,6 +78,7 @@ static inline void set_io_apic_irq_attr( + irq_attr->polarity = polarity; + } + ++#ifndef CONFIG_XEN + struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; +@@ -85,7 +86,6 @@ struct irq_2_iommu { + u8 irte_mask; + }; + +-#ifndef CONFIG_XEN + /* + * This is performance-critical, we want to do it O(1) + * +@@ -147,6 +147,7 @@ extern irqreturn_t smp_reschedule_interr + extern irqreturn_t smp_call_function_interrupt(int, void *); + extern irqreturn_t smp_call_function_single_interrupt(int, void *); + extern irqreturn_t smp_reboot_interrupt(int, void *); ++extern irqreturn_t smp_irq_work_interrupt(int, void *); + #endif + #endif + +--- head-2011-03-17.orig/arch/x86/include/asm/io.h 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/io.h 2011-02-01 15:09:47.000000000 +0100 +@@ -353,7 +353,7 @@ extern void early_iounmap(void __iomem * + extern void fixup_early_ioremap(void); + extern bool is_early_ioremap_ptep(pte_t *ptep); + +-#ifdef CONFIG_XEN ++#ifdef CONFIG_PARAVIRT_XEN + struct bio_vec; + + extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, +@@ -362,7 +362,7 @@ extern bool xen_biovec_phys_mergeable(co + #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) +-#endif /* CONFIG_XEN */ ++#endif /* CONFIG_PARAVIRT_XEN */ + + #define IO_SPACE_LIMIT 0xffff + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 15:09:47.000000000 +0100 +@@ -217,5 +217,20 @@ static inline unsigned long virt_to_fix( + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); + } ++ ++/* Return an pointer with offset calculated */ ++static __always_inline unsigned long ++__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) ++{ ++ __set_fixmap(idx, phys, flags); ++ return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); ++} ++ ++#define set_fixmap_offset(idx, phys) \ ++ __set_fixmap_offset(idx, phys, PAGE_KERNEL) ++ ++#define set_fixmap_offset_nocache(idx, phys) \ ++ __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE) ++ + #endif /* !__ASSEMBLY__ */ + #endif /* _ASM_X86_FIXMAP_H */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/highmem.h 2011-02-01 15:09:47.000000000 +0100 +@@ -58,15 +58,16 @@ extern void kunmap_high(struct page *pag + + void *kmap(struct page *page); + void kunmap(struct page *page); +-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); +-void *kmap_atomic(struct page *page, enum km_type type); +-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); +-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); +-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); ++ ++void *kmap_atomic_prot(struct page *page, pgprot_t prot); ++void *__kmap_atomic(struct page *page); ++void __kunmap_atomic(void *kvaddr); ++void *kmap_atomic_pfn(unsigned long pfn); ++void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); + struct page *kmap_atomic_to_page(void *ptr); + +-#define kmap_atomic_pte(page, type) \ +- kmap_atomic_prot(page, type, \ ++#define kmap_atomic_pte(page) \ ++ kmap_atomic_prot(page, \ + PagePinned(page) ? PAGE_KERNEL_RO : kmap_prot) + + #define flush_cache_kmaps() do { } while (0) +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/io.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/io.h 2011-02-01 15:09:47.000000000 +0100 +@@ -212,6 +212,7 @@ static inline void __iomem *ioremap(reso + + extern void iounmap(volatile void __iomem *addr); + ++extern void set_iounmap_nonlazy(void); + + #ifdef __KERNEL__ + +@@ -353,6 +354,7 @@ extern void __iomem *early_memremap(reso + unsigned long size); + extern void early_iounmap(void __iomem *addr, unsigned long size); + extern void fixup_early_ioremap(void); ++extern bool is_early_ioremap_ptep(pte_t *ptep); + + #define IO_SPACE_LIMIT 0xffff + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:49:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:50:13.000000000 +0100 +@@ -13,7 +13,12 @@ + #define NMI_VECTOR 0x02 + #define CALL_FUNC_SINGLE_VECTOR 3 + #define REBOOT_VECTOR 4 ++#ifdef CONFIG_IRQ_WORK ++#define IRQ_WORK_VECTOR 5 ++#define NR_IPIS 6 ++#else + #define NR_IPIS 5 ++#endif + + /* + * The maximum number of vectors supported by i386 processors +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 15:09:47.000000000 +0100 +@@ -47,19 +47,19 @@ void xen_safe_halt(void); + + void xen_halt(void); + +-#define __raw_local_save_flags() xen_save_fl() ++#define arch_local_save_flags() xen_save_fl() + +-#define raw_local_irq_restore(flags) xen_restore_fl(flags) ++#define arch_local_irq_restore(flags) xen_restore_fl(flags) + +-#define raw_local_irq_disable() xen_irq_disable() ++#define arch_local_irq_disable() xen_irq_disable() + +-#define raw_local_irq_enable() xen_irq_enable() ++#define arch_local_irq_enable() xen_irq_enable() + + /* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +-static inline void raw_safe_halt(void) ++static inline void arch_safe_halt(void) + { + xen_safe_halt(); + } +@@ -76,11 +76,11 @@ static inline void halt(void) + /* + * For spinlocks, etc: + */ +-#define __raw_local_irq_save() \ ++#define arch_local_irq_save() \ + ({ \ +- unsigned long flags = __raw_local_save_flags(); \ ++ unsigned long flags = arch_local_save_flags(); \ + \ +- raw_local_irq_disable(); \ ++ arch_local_irq_disable(); \ + \ + flags; \ + }) +@@ -140,22 +140,16 @@ sysexit_ecrit: /**** END OF SYSEXIT CRIT + #endif /* __ASSEMBLY__ */ + + #ifndef __ASSEMBLY__ +-#define raw_local_save_flags(flags) \ +- do { (flags) = __raw_local_save_flags(); } while (0) +- +-#define raw_local_irq_save(flags) \ +- do { (flags) = __raw_local_irq_save(); } while (0) +- +-static inline int raw_irqs_disabled_flags(unsigned long flags) ++static inline int arch_irqs_disabled_flags(unsigned long flags) + { + return (flags != 0); + } + +-#define raw_irqs_disabled() \ ++#define arch_irqs_disabled() \ + ({ \ +- unsigned long flags = __raw_local_save_flags(); \ ++ unsigned long flags = arch_local_save_flags(); \ + \ +- raw_irqs_disabled_flags(flags); \ ++ arch_irqs_disabled_flags(flags); \ + }) + + #else +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 15:09:47.000000000 +0100 +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #ifdef __KERNEL__ + +@@ -100,9 +101,36 @@ static inline void early_quirks(void) { + + extern void pci_iommu_alloc(void); + +-/* MSI arch hooks */ +-#define arch_setup_msi_irqs arch_setup_msi_irqs +-#define arch_teardown_msi_irqs arch_teardown_msi_irqs ++#if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN) ++/* MSI arch specific hooks */ ++static inline int x86_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ++{ ++ return x86_msi.setup_msi_irqs(dev, nvec, type); ++} ++ ++static inline void x86_teardown_msi_irqs(struct pci_dev *dev) ++{ ++ x86_msi.teardown_msi_irqs(dev); ++} ++ ++static inline void x86_teardown_msi_irq(unsigned int irq) ++{ ++ x86_msi.teardown_msi_irq(irq); ++} ++#define arch_setup_msi_irqs x86_setup_msi_irqs ++#define arch_teardown_msi_irqs x86_teardown_msi_irqs ++#define arch_teardown_msi_irq x86_teardown_msi_irq ++/* implemented in arch/x86/kernel/apic/io_apic. */ ++int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); ++void native_teardown_msi_irq(unsigned int irq); ++/* default to the implementation in drivers/lib/msi.c */ ++#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS ++void default_teardown_msi_irqs(struct pci_dev *dev); ++#else ++#define native_setup_msi_irqs NULL ++#define native_teardown_msi_irq NULL ++#define default_teardown_msi_irqs NULL ++#endif + + #define PCI_DMA_BUS_IS_PHYS 0 + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 15:09:47.000000000 +0100 +@@ -28,6 +28,8 @@ extern unsigned long empty_zero_page[PAG + extern spinlock_t pgd_lock; + extern struct list_head pgd_list; + ++extern struct mm_struct *pgd_page_get_mm(struct page *page); ++ + #define set_pte(ptep, pte) xen_set_pte(ptep, pte) + #define set_pte_at(mm, addr, ptep, pte) xen_set_pte_at(mm, addr, ptep, pte) + +@@ -637,6 +639,8 @@ static inline void ptep_set_wrprotect(st + set_pte_at(mm, addr, ptep, pte_wrprotect(pte)); + } + ++#define flush_tlb_fix_spurious_fault(vma, address) ++ + /* + * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); + * +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-01 15:09:47.000000000 +0100 +@@ -25,7 +25,7 @@ + struct vm_area_struct; + + extern pgd_t *swapper_pg_dir; +-extern pgd_t trampoline_pg_dir[1024]; ++extern pgd_t initial_page_table[1024]; + + static inline void pgtable_cache_init(void) { } + static inline void check_pgt_cache(void) { } +@@ -48,24 +48,14 @@ extern void set_pmd_pfn(unsigned long, u + #endif + + #if defined(CONFIG_HIGHPTE) +-#define __KM_PTE \ +- (in_nmi() ? KM_NMI_PTE : \ +- in_irq() ? KM_IRQ_PTE : \ +- KM_PTE0) + #define pte_offset_map(dir, address) \ +- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \ ++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir))) + \ + pte_index((address))) +-#define pte_offset_map_nested(dir, address) \ +- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ +- pte_index((address))) +-#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) +-#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) ++#define pte_unmap(pte) kunmap_atomic((pte)) + #else + #define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) +-#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) + #define pte_unmap(pte) do { } while (0) +-#define pte_unmap_nested(pte) do { } while (0) + #endif + + /* Clear a kernel PTE and flush it from the TLB */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 15:09:47.000000000 +0100 +@@ -109,6 +109,8 @@ static inline void xen_pgd_clear(pgd_t * + + #define __pte_mfn(_pte) (((_pte).pte & PTE_PFN_MASK) >> PAGE_SHIFT) + ++extern void sync_global_pgds(unsigned long start, unsigned long end); ++ + /* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. +@@ -132,9 +134,7 @@ static inline int pgd_large(pgd_t pgd) { + + /* x86-64 always has all page tables mapped. */ + #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) +-#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) + #define pte_unmap(pte) ((void)(pte))/* NOP */ +-#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */ + + #define update_mmu_cache(vma, address, ptep) do { } while (0) + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:47:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:47:48.000000000 +0100 +@@ -120,6 +120,8 @@ struct cpuinfo_x86 { + u16 phys_proc_id; + /* Core id: */ + u16 cpu_core_id; ++ /* Compute unit id */ ++ u8 compute_unit_id; + #endif + #ifdef CONFIG_SMP + /* Index into per_cpu list: */ +@@ -556,7 +558,7 @@ extern unsigned long mmu_cr4_features; + + static inline void set_in_cr4(unsigned long mask) + { +- unsigned cr4; ++ unsigned long cr4; + + mmu_cr4_features |= mask; + cr4 = read_cr4(); +@@ -566,7 +568,7 @@ static inline void set_in_cr4(unsigned l + + static inline void clear_in_cr4(unsigned long mask) + { +- unsigned cr4; ++ unsigned long cr4; + + mmu_cr4_features &= ~mask; + cr4 = read_cr4(); +@@ -718,31 +720,6 @@ extern unsigned long idle_halt; + extern unsigned long idle_nomwait; + extern bool c1e_detected; + +-#ifndef CONFIG_XEN +-/* +- * on systems with caches, caches must be flashed as the absolute +- * last instruction before going into a suspended halt. Otherwise, +- * dirty data can linger in the cache and become stale on resume, +- * leading to strange errors. +- * +- * perform a variety of operations to guarantee that the compiler +- * will not reorder instructions. wbinvd itself is serializing +- * so the processor will not reorder. +- * +- * Systems without cache can just go into halt. +- */ +-static inline void wbinvd_halt(void) +-{ +- mb(); +- /* check for clflush to determine if wbinvd is legal */ +- if (cpu_has_clflush) +- asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); +- else +- while (1) +- halt(); +-} +-#endif +- + extern void enable_sep_cpu(void); + extern int sysenter_setup(void); + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:10:16.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:12:15.000000000 +0100 +@@ -57,7 +57,7 @@ struct smp_ops { + void (*smp_prepare_cpus)(unsigned max_cpus); + void (*smp_cpus_done)(unsigned max_cpus); + +- void (*smp_send_stop)(void); ++ void (*stop_other_cpus)(int wait); + void (*smp_send_reschedule)(int cpu); + + int (*cpu_up)(unsigned cpu); +@@ -76,7 +76,12 @@ extern struct smp_ops smp_ops; + + static inline void smp_send_stop(void) + { +- smp_ops.smp_send_stop(); ++ smp_ops.stop_other_cpus(0); ++} ++ ++static inline void stop_other_cpus(void) ++{ ++ smp_ops.stop_other_cpus(1); + } + + static inline void smp_prepare_boot_cpu(void) +@@ -148,12 +153,16 @@ void smp_store_cpu_info(int id); + + extern int __cpu_disable(void); + extern void __cpu_die(unsigned int cpu); +-void xen_smp_send_stop(void); ++void xen_stop_other_cpus(int wait); + void xen_smp_send_reschedule(int cpu); + void xen_send_call_func_ipi(const struct cpumask *mask); + void xen_send_call_func_single_ipi(int cpu); + +-#define smp_send_stop xen_smp_send_stop ++static inline void smp_send_stop(void) ++{ ++ xen_stop_other_cpus(0); ++} ++ + #define smp_send_reschedule xen_smp_send_reschedule + #define arch_send_call_function_single_ipi xen_send_call_func_single_ipi + #define arch_send_call_function_ipi_mask xen_send_call_func_ipi +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 15:09:47.000000000 +0100 +@@ -200,16 +200,16 @@ static inline int __ticket_spin_is_conte + static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) + { + unsigned int token, count; +- unsigned int flags = __raw_local_irq_save(); ++ unsigned int flags = arch_local_irq_save(); + bool free; + + __ticket_spin_lock_preamble; + if (likely(free)) { +- raw_local_irq_restore(flags); ++ arch_local_irq_restore(flags); + return; + } + token = xen_spin_adjust(lock, token); +- raw_local_irq_restore(flags); ++ arch_local_irq_restore(flags); + do { + count = 1 << 10; + __ticket_spin_lock_body; +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/swiotlb.h 2011-02-01 15:09:47.000000000 +0100 +@@ -1,6 +1,4 @@ + #include_next + +-#define pci_swiotlb_detect() 1 +- + dma_addr_t swiotlb_map_single_phys(struct device *, phys_addr_t, size_t size, + int dir); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush.h 2011-02-01 15:09:47.000000000 +0100 +@@ -111,6 +111,4 @@ static inline void flush_tlb_kernel_rang + flush_tlb_all(); + } + +-extern void zap_low_mappings(bool early); +- + #endif /* _ASM_X86_TLBFLUSH_H */ +--- head-2011-03-17.orig/arch/x86/kernel/Makefile 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/Makefile 2011-02-01 15:09:47.000000000 +0100 +@@ -125,7 +125,6 @@ ifeq ($(CONFIG_X86_64),y) + obj-y += vsmp_64.o + endif + +-disabled-obj-$(CONFIG_XEN) := %_uv.o crash.o early-quirks.o hpet.o i8253.o \ +- i8259.o irqinit.o pci-swiotlb.o reboot.o smpboot.o tsc.o tsc_sync.o \ +- uv_%.o vsmp_64.o ++disabled-obj-$(CONFIG_XEN) := crash.o early-quirks.o hpet.o i8253.o i8259.o \ ++ irqinit.o pci-swiotlb.o reboot.o smpboot.o tsc.o tsc_sync.o vsmp_64.o + disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += probe_roms_32.o +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -7,11 +7,16 @@ + + #include + #include ++#include + #include + #include + #include + #include + ++#ifdef CONFIG_X86_32 ++#include ++#endif ++ + #include "realmode/wakeup.h" + #include "sleep.h" + +@@ -93,7 +98,7 @@ int acpi_save_state_mem(void) + + #ifndef CONFIG_64BIT + header->pmode_entry = (u32)&wakeup_pmode_return; +- header->pmode_cr3 = (u32)(swsusp_pg_dir - __PAGE_OFFSET); ++ header->pmode_cr3 = (u32)__pa(&initial_page_table); + saved_magic = 0x12345678; + #else /* CONFIG_64BIT */ + header->trampoline_segment = setup_trampoline() >> 4; +@@ -130,7 +135,7 @@ void acpi_restore_state_mem(void) + void __init acpi_reserve_wakeup_memory(void) + { + #ifndef CONFIG_ACPI_PV_SLEEP +- unsigned long mem; ++ phys_addr_t mem; + + if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { + printk(KERN_ERR +@@ -138,15 +143,15 @@ void __init acpi_reserve_wakeup_memory(v + return; + } + +- mem = find_e820_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); ++ mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); + +- if (mem == -1L) { ++ if (mem == MEMBLOCK_ERROR) { + printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); + return; + } + acpi_realmode = (unsigned long) phys_to_virt(mem); + acpi_wakeup_address = mem; +- reserve_early(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); ++ memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); + #endif + } + +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -144,13 +144,9 @@ struct irq_pin_list { + struct irq_pin_list *next; + }; + +-static struct irq_pin_list *get_one_free_irq_2_pin(int node) ++static struct irq_pin_list *alloc_irq_pin_list(int node) + { +- struct irq_pin_list *pin; +- +- pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); +- +- return pin; ++ return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); + } + + /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ +@@ -163,10 +159,7 @@ static struct irq_cfg irq_cfgx[NR_IRQS]; + int __init arch_early_irq_init(void) + { + struct irq_cfg *cfg; +- struct irq_desc *desc; +- int count; +- int node; +- int i; ++ int count, node, i; + + if (!legacy_pic->nr_legacy_irqs) { + nr_irqs_gsi = 0; +@@ -175,13 +168,15 @@ int __init arch_early_irq_init(void) + + cfg = irq_cfgx; + count = ARRAY_SIZE(irq_cfgx); +- node= cpu_to_node(boot_cpu_id); ++ node = cpu_to_node(0); ++ ++ /* Make sure the legacy interrupts are marked in the bitmap */ ++ irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); + + for (i = 0; i < count; i++) { +- desc = irq_to_desc(i); +- desc->chip_data = &cfg[i]; +- zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); +- zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); ++ set_irq_chip_data(i, &cfg[i]); ++ zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); ++ zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); + /* + * For legacy IRQ's, start with assigning irq0 to irq15 to + * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. +@@ -196,170 +191,88 @@ int __init arch_early_irq_init(void) + } + + #ifdef CONFIG_SPARSE_IRQ +-struct irq_cfg *irq_cfg(unsigned int irq) ++static struct irq_cfg *irq_cfg(unsigned int irq) + { +- struct irq_cfg *cfg = NULL; +- struct irq_desc *desc; +- +- desc = irq_to_desc(irq); +- if (desc) +- cfg = desc->chip_data; +- +- return cfg; ++ return get_irq_chip_data(irq); + } + +-static struct irq_cfg *get_one_free_irq_cfg(int node) ++static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) + { + struct irq_cfg *cfg; + +- cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); +- if (cfg) { +- if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { +- kfree(cfg); +- cfg = NULL; +- } else if (!zalloc_cpumask_var_node(&cfg->old_domain, +- GFP_ATOMIC, node)) { +- free_cpumask_var(cfg->domain); +- kfree(cfg); +- cfg = NULL; +- } +- } +- ++ cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); ++ if (!cfg) ++ return NULL; ++ if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) ++ goto out_cfg; ++ if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) ++ goto out_domain; + return cfg; ++out_domain: ++ free_cpumask_var(cfg->domain); ++out_cfg: ++ kfree(cfg); ++ return NULL; + } + +-int arch_init_chip_data(struct irq_desc *desc, int node) +-{ +- struct irq_cfg *cfg; +- +- cfg = desc->chip_data; +- if (!cfg) { +- desc->chip_data = get_one_free_irq_cfg(node); +- if (!desc->chip_data) { +- printk(KERN_ERR "can not alloc irq_cfg\n"); +- BUG_ON(1); +- } +- } +- +- return 0; +-} +- +-/* for move_irq_desc */ +-static void +-init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) ++static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) + { +- struct irq_pin_list *old_entry, *head, *tail, *entry; +- +- cfg->irq_2_pin = NULL; +- old_entry = old_cfg->irq_2_pin; +- if (!old_entry) +- return; +- +- entry = get_one_free_irq_2_pin(node); +- if (!entry) ++ if (!cfg) + return; ++ set_irq_chip_data(at, NULL); ++ free_cpumask_var(cfg->domain); ++ free_cpumask_var(cfg->old_domain); ++ kfree(cfg); ++} + +- entry->apic = old_entry->apic; +- entry->pin = old_entry->pin; +- head = entry; +- tail = entry; +- old_entry = old_entry->next; +- while (old_entry) { +- entry = get_one_free_irq_2_pin(node); +- if (!entry) { +- entry = head; +- while (entry) { +- head = entry->next; +- kfree(entry); +- entry = head; +- } +- /* still use the old one */ +- return; +- } +- entry->apic = old_entry->apic; +- entry->pin = old_entry->pin; +- tail->next = entry; +- tail = entry; +- old_entry = old_entry->next; +- } ++#else + +- tail->next = NULL; +- cfg->irq_2_pin = head; ++struct irq_cfg *irq_cfg(unsigned int irq) ++{ ++ return irq < nr_irqs ? irq_cfgx + irq : NULL; + } + +-static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) ++static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) + { +- struct irq_pin_list *entry, *next; +- +- if (old_cfg->irq_2_pin == cfg->irq_2_pin) +- return; ++ return irq_cfgx + irq; ++} + +- entry = old_cfg->irq_2_pin; ++static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } + +- while (entry) { +- next = entry->next; +- kfree(entry); +- entry = next; +- } +- old_cfg->irq_2_pin = NULL; +-} ++#endif + +-void arch_init_copy_chip_data(struct irq_desc *old_desc, +- struct irq_desc *desc, int node) ++static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) + { ++ int res = irq_alloc_desc_at(at, node); + struct irq_cfg *cfg; +- struct irq_cfg *old_cfg; +- +- cfg = get_one_free_irq_cfg(node); +- +- if (!cfg) +- return; +- +- desc->chip_data = cfg; + +- old_cfg = old_desc->chip_data; +- +- cfg->vector = old_cfg->vector; +- cfg->move_in_progress = old_cfg->move_in_progress; +- cpumask_copy(cfg->domain, old_cfg->domain); +- cpumask_copy(cfg->old_domain, old_cfg->old_domain); +- +- init_copy_irq_2_pin(old_cfg, cfg, node); +-} ++ if (res < 0) { ++ if (res != -EEXIST) ++ return NULL; ++ cfg = get_irq_chip_data(at); ++ if (cfg) ++ return cfg; ++ } + +-static void free_irq_cfg(struct irq_cfg *cfg) +-{ +- free_cpumask_var(cfg->domain); +- free_cpumask_var(cfg->old_domain); +- kfree(cfg); ++ cfg = alloc_irq_cfg(at, node); ++ if (cfg) ++ set_irq_chip_data(at, cfg); ++ else ++ irq_free_desc(at); ++ return cfg; + } + +-void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) ++static int alloc_irq_from(unsigned int from, int node) + { +- struct irq_cfg *old_cfg, *cfg; +- +- old_cfg = old_desc->chip_data; +- cfg = desc->chip_data; +- +- if (old_cfg == cfg) +- return; +- +- if (old_cfg) { +- free_irq_2_pin(old_cfg, cfg); +- free_irq_cfg(old_cfg); +- old_desc->chip_data = NULL; +- } ++ return irq_alloc_desc_from(from, node); + } +-/* end for move_irq_desc */ + +-#else +-struct irq_cfg *irq_cfg(unsigned int irq) ++static void free_irq_at(unsigned int at, struct irq_cfg *cfg) + { +- return irq < nr_irqs ? irq_cfgx + irq : NULL; ++ free_irq_cfg(at, cfg); ++ irq_free_desc(at); + } + +-#endif +- + struct io_apic { + unsigned int index; + unsigned int unused[3]; +@@ -492,7 +405,7 @@ __ioapic_write_entry(int apic, int pin, + io_apic_write(apic, 0x10 + 2*pin, eu.w1); + } + +-void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) ++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) + { + unsigned long flags; + raw_spin_lock_irqsave(&ioapic_lock, flags); +@@ -523,7 +436,7 @@ static void ioapic_mask_entry(int apic, + * fast in the common case, and fast for shared ISA-space IRQs. + */ + static int +-add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) ++__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) + { + struct irq_pin_list **last, *entry; + +@@ -535,7 +448,7 @@ add_pin_to_irq_node_nopanic(struct irq_c + last = &entry->next; + } + +- entry = get_one_free_irq_2_pin(node); ++ entry = alloc_irq_pin_list(node); + if (!entry) { + printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", + node, apic, pin); +@@ -550,7 +463,7 @@ add_pin_to_irq_node_nopanic(struct irq_c + + static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) + { +- if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) ++ if (__add_pin_to_irq_node(cfg, node, apic, pin)) + panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); + } + +@@ -613,11 +526,6 @@ static void __unmask_and_level_IO_APIC_i + IO_APIC_REDIR_LEVEL_TRIGGER, NULL); + } + +-static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) +-{ +- io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); +-} +- + static void io_apic_sync(struct irq_pin_list *entry) + { + /* +@@ -629,44 +537,37 @@ static void io_apic_sync(struct irq_pin_ + readl(&io_apic->data); + } + +-static void __mask_IO_APIC_irq(struct irq_cfg *cfg) ++static void mask_ioapic(struct irq_cfg *cfg) + { ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&ioapic_lock, flags); + io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); ++ raw_spin_unlock_irqrestore(&ioapic_lock, flags); + } + +-static void mask_IO_APIC_irq_desc(struct irq_desc *desc) ++static void mask_ioapic_irq(struct irq_data *data) + { +- struct irq_cfg *cfg = desc->chip_data; +- unsigned long flags; +- +- BUG_ON(!cfg); ++ mask_ioapic(data->chip_data); ++} + +- raw_spin_lock_irqsave(&ioapic_lock, flags); +- __mask_IO_APIC_irq(cfg); +- raw_spin_unlock_irqrestore(&ioapic_lock, flags); ++static void __unmask_ioapic(struct irq_cfg *cfg) ++{ ++ io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); + } + +-static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) ++static void unmask_ioapic(struct irq_cfg *cfg) + { +- struct irq_cfg *cfg = desc->chip_data; + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); +- __unmask_IO_APIC_irq(cfg); ++ __unmask_ioapic(cfg); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + } + +-static void mask_IO_APIC_irq(unsigned int irq) ++static void unmask_ioapic_irq(struct irq_data *data) + { +- struct irq_desc *desc = irq_to_desc(irq); +- +- mask_IO_APIC_irq_desc(desc); +-} +-static void unmask_IO_APIC_irq(unsigned int irq) +-{ +- struct irq_desc *desc = irq_to_desc(irq); +- +- unmask_IO_APIC_irq_desc(desc); ++ unmask_ioapic(data->chip_data); + } + + static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) +@@ -693,7 +594,7 @@ static void clear_IO_APIC (void) + } + #else + #define add_pin_to_irq_node(cfg, node, apic, pin) +-#define add_pin_to_irq_node_nopanic(cfg, node, apic, pin) 0 ++#define __add_pin_to_irq_node(cfg, node, apic, pin) 0 + #endif /* !CONFIG_XEN */ + + #ifdef CONFIG_X86_32 +@@ -741,14 +642,14 @@ struct IO_APIC_route_entry **alloc_ioapi + struct IO_APIC_route_entry **ioapic_entries; + + ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, +- GFP_ATOMIC); ++ GFP_KERNEL); + if (!ioapic_entries) + return 0; + + for (apic = 0; apic < nr_ioapics; apic++) { + ioapic_entries[apic] = + kzalloc(sizeof(struct IO_APIC_route_entry) * +- nr_ioapic_registers[apic], GFP_ATOMIC); ++ nr_ioapic_registers[apic], GFP_KERNEL); + if (!ioapic_entries[apic]) + goto nomem; + } +@@ -1314,7 +1215,6 @@ void __setup_vector_irq(int cpu) + /* Initialize vector_irq on a new cpu */ + int irq, vector; + struct irq_cfg *cfg; +- struct irq_desc *desc; + + /* + * vector_lock will make sure that we don't run into irq vector +@@ -1323,9 +1223,10 @@ void __setup_vector_irq(int cpu) + */ + raw_spin_lock(&vector_lock); + /* Mark the inuse vectors */ +- for_each_irq_desc(irq, desc) { +- cfg = desc->chip_data; +- ++ for_each_active_irq(irq) { ++ cfg = get_irq_chip_data(irq); ++ if (!cfg) ++ continue; + /* + * If it is a legacy IRQ handled by the legacy PIC, this cpu + * will be part of the irq_cfg's domain. +@@ -1382,17 +1283,17 @@ static inline int IO_APIC_irq_trigger(in + } + #endif + +-static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) ++static void ioapic_register_intr(unsigned int irq, unsigned long trigger) + { + + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || + trigger == IOAPIC_LEVEL) +- desc->status |= IRQ_LEVEL; ++ irq_set_status_flags(irq, IRQ_LEVEL); + else +- desc->status &= ~IRQ_LEVEL; ++ irq_clear_status_flags(irq, IRQ_LEVEL); + +- if (irq_remapped(irq)) { +- desc->status |= IRQ_MOVE_PCNTXT; ++ if (irq_remapped(get_irq_chip_data(irq))) { ++ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); + if (trigger) + set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, + handle_fasteoi_irq, +@@ -1414,13 +1315,13 @@ static void ioapic_register_intr(int irq + } + #else /* !CONFIG_XEN */ + #define __clear_irq_vector(irq, cfg) ((void)0) +-#define ioapic_register_intr(irq, desc, trigger) evtchn_register_pirq(irq) ++#define ioapic_register_intr(irq, trigger) evtchn_register_pirq(irq) + #endif + +-int setup_ioapic_entry(int apic_id, int irq, +- struct IO_APIC_route_entry *entry, +- unsigned int destination, int trigger, +- int polarity, int vector, int pin) ++static int setup_ioapic_entry(int apic_id, int irq, ++ struct IO_APIC_route_entry *entry, ++ unsigned int destination, int trigger, ++ int polarity, int vector, int pin) + { + /* + * add it to the IO-APIC irq-routing table: +@@ -1442,21 +1343,7 @@ int setup_ioapic_entry(int apic_id, int + if (index < 0) + panic("Failed to allocate IRTE for ioapic %d\n", apic_id); + +- memset(&irte, 0, sizeof(irte)); +- +- irte.present = 1; +- irte.dst_mode = apic->irq_dest_mode; +- /* +- * Trigger mode in the IRTE will always be edge, and the +- * actual level or edge trigger will be setup in the IO-APIC +- * RTE. This will help simplify level triggered irq migration. +- * For more details, see the comments above explainig IO-APIC +- * irq migration in the presence of interrupt-remapping. +- */ +- irte.trigger_mode = 0; +- irte.dlvry_mode = apic->irq_delivery_mode; +- irte.vector = vector; +- irte.dest_id = IRTE_DEST(destination); ++ prepare_irte(&irte, vector, destination); + + /* Set source-id of interrupt request */ + set_ioapic_sid(&irte, apic_id); +@@ -1493,18 +1380,14 @@ int setup_ioapic_entry(int apic_id, int + return 0; + } + +-static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, +- int trigger, int polarity) ++static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, ++ struct irq_cfg *cfg, int trigger, int polarity) + { +- struct irq_cfg *cfg; + struct IO_APIC_route_entry entry; + unsigned int dest; + + if (!IO_APIC_IRQ(irq)) + return; +- +- cfg = desc->chip_data; +- + #ifndef CONFIG_XEN + /* + * For legacy irqs, cfg->domain starts with cpu 0 for legacy +@@ -1539,10 +1422,10 @@ static void setup_IO_APIC_irq(int apic_i + return; + } + +- ioapic_register_intr(irq, desc, trigger); ++ ioapic_register_intr(irq, trigger); + #ifndef CONFIG_XEN + if (irq < legacy_pic->nr_legacy_irqs) +- legacy_pic->chip->mask(irq); ++ legacy_pic->mask(irq); + #endif + + ioapic_write_entry(apic_id, pin, entry); +@@ -1554,11 +1437,9 @@ static struct { + + static void __init setup_IO_APIC_irqs(void) + { +- int apic_id, pin, idx, irq; +- int notcon = 0; +- struct irq_desc *desc; ++ int apic_id, pin, idx, irq, notcon = 0; ++ int node = cpu_to_node(0); + struct irq_cfg *cfg; +- int node = cpu_to_node(boot_cpu_id); + + apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); + +@@ -1600,19 +1481,17 @@ static void __init setup_IO_APIC_irqs(vo + continue; + #endif + +- desc = irq_to_desc_alloc_node(irq, node); +- if (!desc) { +- printk(KERN_INFO "can not get irq_desc for %d\n", irq); ++ cfg = alloc_irq_and_cfg_at(irq, node); ++ if (!cfg) + continue; +- } +- cfg = desc->chip_data; ++ + add_pin_to_irq_node(cfg, node, apic_id, pin); + /* + * don't mark it in pin_programmed, so later acpi could + * set it correctly when irq < 16 + */ +- setup_IO_APIC_irq(apic_id, pin, irq, desc, +- irq_trigger(idx), irq_polarity(idx)); ++ setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), ++ irq_polarity(idx)); + } + + if (notcon) +@@ -1627,9 +1506,7 @@ static void __init setup_IO_APIC_irqs(vo + */ + void setup_IO_APIC_irq_extra(u32 gsi) + { +- int apic_id = 0, pin, idx, irq; +- int node = cpu_to_node(boot_cpu_id); +- struct irq_desc *desc; ++ int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); + struct irq_cfg *cfg; + + /* +@@ -1649,18 +1526,15 @@ void setup_IO_APIC_irq_extra(u32 gsi) + if (irq < PIRQ_BASE || irq >= PIRQ_BASE + nr_pirqs) + return; + #endif +-#ifdef CONFIG_SPARSE_IRQ +- desc = irq_to_desc(irq); +- if (desc) ++ ++ /* Only handle the non legacy irqs on secondary ioapics */ ++ if (apic_id == 0 || irq < NR_IRQS_LEGACY) + return; +-#endif +- desc = irq_to_desc_alloc_node(irq, node); +- if (!desc) { +- printk(KERN_INFO "can not get irq_desc for %d\n", irq); ++ ++ cfg = alloc_irq_and_cfg_at(irq, node); ++ if (!cfg) + return; +- } + +- cfg = desc->chip_data; + add_pin_to_irq_node(cfg, node, apic_id, pin); + + if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { +@@ -1670,7 +1544,7 @@ void setup_IO_APIC_irq_extra(u32 gsi) + } + set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); + +- setup_IO_APIC_irq(apic_id, pin, irq, desc, ++ setup_ioapic_irq(apic_id, pin, irq, cfg, + irq_trigger(idx), irq_polarity(idx)); + } + +@@ -1722,7 +1596,6 @@ __apicdebuginit(void) print_IO_APIC(void + union IO_APIC_reg_03 reg_03; + unsigned long flags; + struct irq_cfg *cfg; +- struct irq_desc *desc; + unsigned int irq; + + printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); +@@ -1809,10 +1682,10 @@ __apicdebuginit(void) print_IO_APIC(void + } + } + printk(KERN_DEBUG "IRQ to pin mappings:\n"); +- for_each_irq_desc(irq, desc) { ++ for_each_active_irq(irq) { + struct irq_pin_list *entry; + +- cfg = desc->chip_data; ++ cfg = get_irq_chip_data(irq); + if (!cfg) + continue; + entry = cfg->irq_2_pin; +@@ -2319,29 +2192,26 @@ static int __init timer_irq_works(void) + * an edge even if it isn't on the 8259A... + */ + +-static unsigned int startup_ioapic_irq(unsigned int irq) ++static unsigned int startup_ioapic_irq(struct irq_data *data) + { +- int was_pending = 0; ++ int was_pending = 0, irq = data->irq; + unsigned long flags; +- struct irq_cfg *cfg; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + if (irq < legacy_pic->nr_legacy_irqs) { +- legacy_pic->chip->mask(irq); ++ legacy_pic->mask(irq); + if (legacy_pic->irq_pending(irq)) + was_pending = 1; + } +- cfg = irq_cfg(irq); +- __unmask_IO_APIC_irq(cfg); ++ __unmask_ioapic(data->chip_data); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + return was_pending; + } + +-static int ioapic_retrigger_irq(unsigned int irq) ++static int ioapic_retrigger_irq(struct irq_data *data) + { +- +- struct irq_cfg *cfg = irq_cfg(irq); ++ struct irq_cfg *cfg = data->chip_data; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); +@@ -2392,7 +2262,7 @@ static void __target_IO_APIC_irq(unsigne + * With interrupt-remapping, destination information comes + * from interrupt-remapping table entry. + */ +- if (!irq_remapped(irq)) ++ if (!irq_remapped(cfg)) + io_apic_write(apic, 0x11 + pin*2, dest); + reg = io_apic_read(apic, 0x10 + pin*2); + reg &= ~IO_APIC_REDIR_VECTOR_MASK; +@@ -2402,65 +2272,46 @@ static void __target_IO_APIC_irq(unsigne + } + + /* +- * Either sets desc->affinity to a valid value, and returns ++ * Either sets data->affinity to a valid value, and returns + * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and +- * leaves desc->affinity untouched. ++ * leaves data->affinity untouched. + */ +-unsigned int +-set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, +- unsigned int *dest_id) ++int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ++ unsigned int *dest_id) + { +- struct irq_cfg *cfg; +- unsigned int irq; ++ struct irq_cfg *cfg = data->chip_data; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return -1; + +- irq = desc->irq; +- cfg = desc->chip_data; +- if (assign_irq_vector(irq, cfg, mask)) ++ if (assign_irq_vector(data->irq, data->chip_data, mask)) + return -1; + +- cpumask_copy(desc->affinity, mask); ++ cpumask_copy(data->affinity, mask); + +- *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); ++ *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); + return 0; + } + + static int +-set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) ++ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ++ bool force) + { +- struct irq_cfg *cfg; ++ unsigned int dest, irq = data->irq; + unsigned long flags; +- unsigned int dest; +- unsigned int irq; +- int ret = -1; +- +- irq = desc->irq; +- cfg = desc->chip_data; ++ int ret; + + raw_spin_lock_irqsave(&ioapic_lock, flags); +- ret = set_desc_affinity(desc, mask, &dest); ++ ret = __ioapic_set_affinity(data, mask, &dest); + if (!ret) { + /* Only the high 8 bits are valid. */ + dest = SET_APIC_LOGICAL_ID(dest); +- __target_IO_APIC_irq(irq, dest, cfg); ++ __target_IO_APIC_irq(irq, dest, data->chip_data); + } + raw_spin_unlock_irqrestore(&ioapic_lock, flags); +- + return ret; + } + +-static int +-set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) +-{ +- struct irq_desc *desc; +- +- desc = irq_to_desc(irq); +- +- return set_ioapic_affinity_irq_desc(desc, mask); +-} +- + #ifdef CONFIG_INTR_REMAP + + /* +@@ -2475,24 +2326,21 @@ set_ioapic_affinity_irq(unsigned int irq + * the interrupt-remapping table entry. + */ + static int +-migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) ++ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ++ bool force) + { +- struct irq_cfg *cfg; ++ struct irq_cfg *cfg = data->chip_data; ++ unsigned int dest, irq = data->irq; + struct irte irte; +- unsigned int dest; +- unsigned int irq; +- int ret = -1; + + if (!cpumask_intersects(mask, cpu_online_mask)) +- return ret; ++ return -EINVAL; + +- irq = desc->irq; + if (get_irte(irq, &irte)) +- return ret; ++ return -EBUSY; + +- cfg = desc->chip_data; + if (assign_irq_vector(irq, cfg, mask)) +- return ret; ++ return -EBUSY; + + dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); + +@@ -2507,29 +2355,14 @@ migrate_ioapic_irq_desc(struct irq_desc + if (cfg->move_in_progress) + send_cleanup_vector(cfg); + +- cpumask_copy(desc->affinity, mask); +- ++ cpumask_copy(data->affinity, mask); + return 0; + } + +-/* +- * Migrates the IRQ destination in the process context. +- */ +-static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, +- const struct cpumask *mask) +-{ +- return migrate_ioapic_irq_desc(desc, mask); +-} +-static int set_ir_ioapic_affinity_irq(unsigned int irq, +- const struct cpumask *mask) +-{ +- struct irq_desc *desc = irq_to_desc(irq); +- +- return set_ir_ioapic_affinity_irq_desc(desc, mask); +-} + #else +-static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, +- const struct cpumask *mask) ++static inline int ++ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ++ bool force) + { + return 0; + } +@@ -2591,10 +2424,8 @@ unlock: + irq_exit(); + } + +-static void __irq_complete_move(struct irq_desc **descp, unsigned vector) ++static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) + { +- struct irq_desc *desc = *descp; +- struct irq_cfg *cfg = desc->chip_data; + unsigned me; + + if (likely(!cfg->move_in_progress)) +@@ -2606,31 +2437,28 @@ static void __irq_complete_move(struct i + send_cleanup_vector(cfg); + } + +-static void irq_complete_move(struct irq_desc **descp) ++static void irq_complete_move(struct irq_cfg *cfg) + { +- __irq_complete_move(descp, ~get_irq_regs()->orig_ax); ++ __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); + } + + void irq_force_complete_move(int irq) + { +- struct irq_desc *desc = irq_to_desc(irq); +- struct irq_cfg *cfg = desc->chip_data; ++ struct irq_cfg *cfg = get_irq_chip_data(irq); + + if (!cfg) + return; + +- __irq_complete_move(&desc, cfg->vector); ++ __irq_complete_move(cfg, cfg->vector); + } + #else +-static inline void irq_complete_move(struct irq_desc **descp) {} ++static inline void irq_complete_move(struct irq_cfg *cfg) { } + #endif + +-static void ack_apic_edge(unsigned int irq) ++static void ack_apic_edge(struct irq_data *data) + { +- struct irq_desc *desc = irq_to_desc(irq); +- +- irq_complete_move(&desc); +- move_native_irq(irq); ++ irq_complete_move(data->chip_data); ++ move_native_irq(data->irq); + ack_APIC_irq(); + } + +@@ -2652,10 +2480,12 @@ atomic_t irq_mis_count; + * Otherwise, we simulate the EOI message manually by changing the trigger + * mode to edge and then back to level, with RTE being masked during this. + */ +-static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) ++static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) + { + struct irq_pin_list *entry; ++ unsigned long flags; + ++ raw_spin_lock_irqsave(&ioapic_lock, flags); + for_each_irq_pin(entry, cfg->irq_2_pin) { + if (mp_ioapics[entry->apic].apicver >= 0x20) { + /* +@@ -2664,7 +2494,7 @@ static void __eoi_ioapic_irq(unsigned in + * intr-remapping table entry. Hence for the io-apic + * EOI we use the pin number. + */ +- if (irq_remapped(irq)) ++ if (irq_remapped(cfg)) + io_apic_eoi(entry->apic, entry->pin); + else + io_apic_eoi(entry->apic, cfg->vector); +@@ -2673,36 +2503,21 @@ static void __eoi_ioapic_irq(unsigned in + __unmask_and_level_IO_APIC_irq(entry); + } + } +-} +- +-static void eoi_ioapic_irq(struct irq_desc *desc) +-{ +- struct irq_cfg *cfg; +- unsigned long flags; +- unsigned int irq; +- +- irq = desc->irq; +- cfg = desc->chip_data; +- +- raw_spin_lock_irqsave(&ioapic_lock, flags); +- __eoi_ioapic_irq(irq, cfg); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + } + +-static void ack_apic_level(unsigned int irq) ++static void ack_apic_level(struct irq_data *data) + { +- struct irq_desc *desc = irq_to_desc(irq); ++ struct irq_cfg *cfg = data->chip_data; ++ int i, do_unmask_irq = 0, irq = data->irq; + unsigned long v; +- int i; +- struct irq_cfg *cfg; +- int do_unmask_irq = 0; + +- irq_complete_move(&desc); ++ irq_complete_move(cfg); + #ifdef CONFIG_GENERIC_PENDING_IRQ + /* If we are moving the irq we need to mask it */ +- if (unlikely(desc->status & IRQ_MOVE_PENDING)) { ++ if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { + do_unmask_irq = 1; +- mask_IO_APIC_irq_desc(desc); ++ mask_ioapic(cfg); + } + #endif + +@@ -2738,7 +2553,6 @@ static void ack_apic_level(unsigned int + * we use the above logic (mask+edge followed by unmask+level) from + * Manfred Spraul to clear the remote IRR. + */ +- cfg = desc->chip_data; + i = cfg->vector; + v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); + +@@ -2758,7 +2572,7 @@ static void ack_apic_level(unsigned int + if (!(v & (1 << (i & 0x1f)))) { + atomic_inc(&irq_mis_count); + +- eoi_ioapic_irq(desc); ++ eoi_ioapic_irq(irq, cfg); + } + + /* Now we can move and renable the irq */ +@@ -2789,62 +2603,58 @@ static void ack_apic_level(unsigned int + * accurate and is causing problems then it is a hardware bug + * and you can go talk to the chipset vendor about it. + */ +- cfg = desc->chip_data; + if (!io_apic_level_ack_pending(cfg)) + move_masked_irq(irq); +- unmask_IO_APIC_irq_desc(desc); ++ unmask_ioapic(cfg); + } + } + + #ifdef CONFIG_INTR_REMAP +-static void ir_ack_apic_edge(unsigned int irq) ++static void ir_ack_apic_edge(struct irq_data *data) + { + ack_APIC_irq(); + } + +-static void ir_ack_apic_level(unsigned int irq) ++static void ir_ack_apic_level(struct irq_data *data) + { +- struct irq_desc *desc = irq_to_desc(irq); +- + ack_APIC_irq(); +- eoi_ioapic_irq(desc); ++ eoi_ioapic_irq(data->irq, data->chip_data); + } + #endif /* CONFIG_INTR_REMAP */ + + static struct irq_chip ioapic_chip __read_mostly = { +- .name = "IO-APIC", +- .startup = startup_ioapic_irq, +- .mask = mask_IO_APIC_irq, +- .unmask = unmask_IO_APIC_irq, +- .ack = ack_apic_edge, +- .eoi = ack_apic_level, ++ .name = "IO-APIC", ++ .irq_startup = startup_ioapic_irq, ++ .irq_mask = mask_ioapic_irq, ++ .irq_unmask = unmask_ioapic_irq, ++ .irq_ack = ack_apic_edge, ++ .irq_eoi = ack_apic_level, + #ifdef CONFIG_SMP +- .set_affinity = set_ioapic_affinity_irq, ++ .irq_set_affinity = ioapic_set_affinity, + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + + static struct irq_chip ir_ioapic_chip __read_mostly = { +- .name = "IR-IO-APIC", +- .startup = startup_ioapic_irq, +- .mask = mask_IO_APIC_irq, +- .unmask = unmask_IO_APIC_irq, ++ .name = "IR-IO-APIC", ++ .irq_startup = startup_ioapic_irq, ++ .irq_mask = mask_ioapic_irq, ++ .irq_unmask = unmask_ioapic_irq, + #ifdef CONFIG_INTR_REMAP +- .ack = ir_ack_apic_edge, +- .eoi = ir_ack_apic_level, ++ .irq_ack = ir_ack_apic_edge, ++ .irq_eoi = ir_ack_apic_level, + #ifdef CONFIG_SMP +- .set_affinity = set_ir_ioapic_affinity_irq, ++ .irq_set_affinity = ir_ioapic_set_affinity, + #endif + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + #endif /* !CONFIG_XEN */ + + static inline void init_IO_APIC_traps(void) + { +- int irq; +- struct irq_desc *desc; + struct irq_cfg *cfg; ++ unsigned int irq; + + /* + * NOTE! The local APIC isn't very good at handling +@@ -2857,12 +2667,12 @@ static inline void init_IO_APIC_traps(vo + * Also, we've got to be careful not to trash gate + * 0x80, because int 0x80 is hm, kind of importantish. ;) + */ +- for_each_irq_desc(irq, desc) { ++ for_each_active_irq(irq) { + #ifdef CONFIG_XEN + if (irq < PIRQ_BASE || irq >= PIRQ_BASE + nr_pirqs) + continue; + #endif +- cfg = desc->chip_data; ++ cfg = get_irq_chip_data(irq); + if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { + /* + * Hmm.. We don't have an entry for this, +@@ -2873,7 +2683,7 @@ static inline void init_IO_APIC_traps(vo + legacy_pic->make_irq(irq); + else + /* Strange. Oh, well.. */ +- desc->chip = &no_irq_chip; ++ set_irq_chip(irq, &no_irq_chip); + } + } + } +@@ -2883,7 +2693,7 @@ static inline void init_IO_APIC_traps(vo + * The local APIC irq-chip implementation: + */ + +-static void mask_lapic_irq(unsigned int irq) ++static void mask_lapic_irq(struct irq_data *data) + { + unsigned long v; + +@@ -2891,7 +2701,7 @@ static void mask_lapic_irq(unsigned int + apic_write(APIC_LVT0, v | APIC_LVT_MASKED); + } + +-static void unmask_lapic_irq(unsigned int irq) ++static void unmask_lapic_irq(struct irq_data *data) + { + unsigned long v; + +@@ -2899,21 +2709,21 @@ static void unmask_lapic_irq(unsigned in + apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); + } + +-static void ack_lapic_irq(unsigned int irq) ++static void ack_lapic_irq(struct irq_data *data) + { + ack_APIC_irq(); + } + + static struct irq_chip lapic_chip __read_mostly = { + .name = "local-APIC", +- .mask = mask_lapic_irq, +- .unmask = unmask_lapic_irq, +- .ack = ack_lapic_irq, ++ .irq_mask = mask_lapic_irq, ++ .irq_unmask = unmask_lapic_irq, ++ .irq_ack = ack_lapic_irq, + }; + +-static void lapic_register_intr(int irq, struct irq_desc *desc) ++static void lapic_register_intr(int irq) + { +- desc->status &= ~IRQ_LEVEL; ++ irq_clear_status_flags(irq, IRQ_LEVEL); + set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, + "edge"); + } +@@ -3016,9 +2826,8 @@ int timer_through_8259 __initdata; + */ + static inline void __init check_timer(void) + { +- struct irq_desc *desc = irq_to_desc(0); +- struct irq_cfg *cfg = desc->chip_data; +- int node = cpu_to_node(boot_cpu_id); ++ struct irq_cfg *cfg = get_irq_chip_data(0); ++ int node = cpu_to_node(0); + int apic1, pin1, apic2, pin2; + unsigned long flags; + int no_pin1 = 0; +@@ -3028,7 +2837,7 @@ static inline void __init check_timer(vo + /* + * get/set the timer IRQ vector: + */ +- legacy_pic->chip->mask(0); ++ legacy_pic->mask(0); + assign_irq_vector(0, cfg, apic->target_cpus()); + + /* +@@ -3087,7 +2896,7 @@ static inline void __init check_timer(vo + add_pin_to_irq_node(cfg, node, apic1, pin1); + setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); + } else { +- /* for edge trigger, setup_IO_APIC_irq already ++ /* for edge trigger, setup_ioapic_irq already + * leave it unmasked. + * so only need to unmask if it is level-trigger + * do we really have level trigger timer? +@@ -3095,12 +2904,12 @@ static inline void __init check_timer(vo + int idx; + idx = find_irq_entry(apic1, pin1, mp_INT); + if (idx != -1 && irq_trigger(idx)) +- unmask_IO_APIC_irq_desc(desc); ++ unmask_ioapic(cfg); + } + if (timer_irq_works()) { + if (nmi_watchdog == NMI_IO_APIC) { + setup_nmi(); +- legacy_pic->chip->unmask(0); ++ legacy_pic->unmask(0); + } + if (disable_timer_pin_1 > 0) + clear_IO_APIC_pin(0, pin1); +@@ -3123,14 +2932,14 @@ static inline void __init check_timer(vo + */ + replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); + setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); +- legacy_pic->chip->unmask(0); ++ legacy_pic->unmask(0); + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); + timer_through_8259 = 1; + if (nmi_watchdog == NMI_IO_APIC) { +- legacy_pic->chip->mask(0); ++ legacy_pic->mask(0); + setup_nmi(); +- legacy_pic->chip->unmask(0); ++ legacy_pic->unmask(0); + } + goto out; + } +@@ -3138,7 +2947,7 @@ static inline void __init check_timer(vo + * Cleanup, just in case ... + */ + local_irq_disable(); +- legacy_pic->chip->mask(0); ++ legacy_pic->mask(0); + clear_IO_APIC_pin(apic2, pin2); + apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); + } +@@ -3155,16 +2964,16 @@ static inline void __init check_timer(vo + apic_printk(APIC_QUIET, KERN_INFO + "...trying to set up timer as Virtual Wire IRQ...\n"); + +- lapic_register_intr(0, desc); ++ lapic_register_intr(0); + apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ +- legacy_pic->chip->unmask(0); ++ legacy_pic->unmask(0); + + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); + goto out; + } + local_irq_disable(); +- legacy_pic->chip->mask(0); ++ legacy_pic->mask(0); + apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); + apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); + +@@ -3344,49 +3153,42 @@ device_initcall(ioapic_init_sysfs); + /* + * Dynamic irq allocate and deallocation + */ +-unsigned int create_irq_nr(unsigned int irq_want, int node) ++unsigned int create_irq_nr(unsigned int from, int node) + { +- /* Allocate an unused irq */ +- unsigned int irq; +- unsigned int new; ++ struct irq_cfg *cfg; + unsigned long flags; +- struct irq_cfg *cfg_new = NULL; +- struct irq_desc *desc_new = NULL; +- +- irq = 0; +- if (irq_want < nr_irqs_gsi) +- irq_want = nr_irqs_gsi; +- +- raw_spin_lock_irqsave(&vector_lock, flags); +- for (new = irq_want; new < nr_irqs; new++) { +- desc_new = irq_to_desc_alloc_node(new, node); +- if (!desc_new) { +- printk(KERN_INFO "can not get irq_desc for %d\n", new); +- continue; +- } +- cfg_new = desc_new->chip_data; +- +- if (cfg_new->vector != 0) +- continue; ++ unsigned int ret = 0; ++ int irq; + +- desc_new = move_irq_desc(desc_new, node); +- cfg_new = desc_new->chip_data; ++ if (from < nr_irqs_gsi) ++ from = nr_irqs_gsi; + +- if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) +- irq = new; +- break; ++ irq = alloc_irq_from(from, node); ++ if (irq < 0) ++ return 0; ++ cfg = alloc_irq_cfg(irq, node); ++ if (!cfg) { ++ free_irq_at(irq, NULL); ++ return 0; + } +- raw_spin_unlock_irqrestore(&vector_lock, flags); + +- if (irq > 0) +- dynamic_irq_init_keep_chip_data(irq); ++ raw_spin_lock_irqsave(&vector_lock, flags); ++ if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) ++ ret = irq; ++ raw_spin_unlock_irqrestore(&vector_lock, flags); + +- return irq; ++ if (ret) { ++ set_irq_chip_data(irq, cfg); ++ irq_clear_status_flags(irq, IRQ_NOREQUEST); ++ } else { ++ free_irq_at(irq, cfg); ++ } ++ return ret; + } + + int create_irq(void) + { +- int node = cpu_to_node(boot_cpu_id); ++ int node = cpu_to_node(0); + unsigned int irq_want; + int irq; + +@@ -3401,14 +3203,17 @@ int create_irq(void) + + void destroy_irq(unsigned int irq) + { ++ struct irq_cfg *cfg = get_irq_chip_data(irq); + unsigned long flags; + +- dynamic_irq_cleanup_keep_chip_data(irq); ++ irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); + +- free_irte(irq); ++ if (irq_remapped(cfg)) ++ free_irte(irq); + raw_spin_lock_irqsave(&vector_lock, flags); +- __clear_irq_vector(irq, get_irq_chip_data(irq)); ++ __clear_irq_vector(irq, cfg); + raw_spin_unlock_irqrestore(&vector_lock, flags); ++ free_irq_at(irq, cfg); + } + #endif /* !CONFIG_XEN */ + +@@ -3433,7 +3238,7 @@ static int msi_compose_msg(struct pci_de + + dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); + +- if (irq_remapped(irq)) { ++ if (irq_remapped(get_irq_chip_data(irq))) { + struct irte irte; + int ir_index; + u16 sub_handle; +@@ -3441,14 +3246,7 @@ static int msi_compose_msg(struct pci_de + ir_index = map_irq_to_irte_handle(irq, &sub_handle); + BUG_ON(ir_index == -1); + +- memset (&irte, 0, sizeof(irte)); +- +- irte.present = 1; +- irte.dst_mode = apic->irq_dest_mode; +- irte.trigger_mode = 0; /* edge */ +- irte.dlvry_mode = apic->irq_delivery_mode; +- irte.vector = cfg->vector; +- irte.dest_id = IRTE_DEST(dest); ++ prepare_irte(&irte, cfg->vector, dest); + + /* Set source-id of interrupt request */ + if (pdev) +@@ -3493,26 +3291,24 @@ static int msi_compose_msg(struct pci_de + } + + #ifdef CONFIG_SMP +-static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) ++static int ++msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) + { +- struct irq_desc *desc = irq_to_desc(irq); +- struct irq_cfg *cfg; ++ struct irq_cfg *cfg = data->chip_data; + struct msi_msg msg; + unsigned int dest; + +- if (set_desc_affinity(desc, mask, &dest)) ++ if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + +- cfg = desc->chip_data; +- +- get_cached_msi_msg_desc(desc, &msg); ++ __get_cached_msi_msg(data->msi_desc, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + +- write_msi_msg_desc(desc, &msg); ++ __write_msi_msg(data->msi_desc, &msg); + + return 0; + } +@@ -3522,17 +3318,17 @@ static int set_msi_irq_affinity(unsigned + * done in the process context using interrupt-remapping hardware. + */ + static int +-ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) ++ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, ++ bool force) + { +- struct irq_desc *desc = irq_to_desc(irq); +- struct irq_cfg *cfg = desc->chip_data; +- unsigned int dest; ++ struct irq_cfg *cfg = data->chip_data; ++ unsigned int dest, irq = data->irq; + struct irte irte; + + if (get_irte(irq, &irte)) + return -1; + +- if (set_desc_affinity(desc, mask, &dest)) ++ if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + + irte.vector = cfg->vector; +@@ -3562,27 +3358,27 @@ ir_set_msi_irq_affinity(unsigned int irq + * which implement the MSI or MSI-X Capability Structure. + */ + static struct irq_chip msi_chip = { +- .name = "PCI-MSI", +- .unmask = unmask_msi_irq, +- .mask = mask_msi_irq, +- .ack = ack_apic_edge, ++ .name = "PCI-MSI", ++ .irq_unmask = unmask_msi_irq, ++ .irq_mask = mask_msi_irq, ++ .irq_ack = ack_apic_edge, + #ifdef CONFIG_SMP +- .set_affinity = set_msi_irq_affinity, ++ .irq_set_affinity = msi_set_affinity, + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + + static struct irq_chip msi_ir_chip = { +- .name = "IR-PCI-MSI", +- .unmask = unmask_msi_irq, +- .mask = mask_msi_irq, ++ .name = "IR-PCI-MSI", ++ .irq_unmask = unmask_msi_irq, ++ .irq_mask = mask_msi_irq, + #ifdef CONFIG_INTR_REMAP +- .ack = ir_ack_apic_edge, ++ .irq_ack = ir_ack_apic_edge, + #ifdef CONFIG_SMP +- .set_affinity = ir_set_msi_irq_affinity, ++ .irq_set_affinity = ir_msi_set_affinity, + #endif + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + + /* +@@ -3614,8 +3410,8 @@ static int msi_alloc_irte(struct pci_dev + + static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) + { +- int ret; + struct msi_msg msg; ++ int ret; + + ret = msi_compose_msg(dev, irq, &msg, -1); + if (ret < 0) +@@ -3624,12 +3420,8 @@ static int setup_msi_irq(struct pci_dev + set_irq_msi(irq, msidesc); + write_msi_msg(irq, &msg); + +- if (irq_remapped(irq)) { +- struct irq_desc *desc = irq_to_desc(irq); +- /* +- * irq migration in process context +- */ +- desc->status |= IRQ_MOVE_PCNTXT; ++ if (irq_remapped(get_irq_chip_data(irq))) { ++ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); + set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); + } else + set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); +@@ -3639,15 +3431,12 @@ static int setup_msi_irq(struct pci_dev + return 0; + } + +-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ++int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + { +- unsigned int irq; +- int ret, sub_handle; ++ int node, ret, sub_handle, index = 0; ++ unsigned int irq, irq_want; + struct msi_desc *msidesc; +- unsigned int irq_want; + struct intel_iommu *iommu = NULL; +- int index = 0; +- int node; + + /* x86 doesn't support multiple MSI yet */ + if (type == PCI_CAP_ID_MSI && nvec > 1) +@@ -3700,31 +3489,31 @@ error: + return ret; + } + +-void arch_teardown_msi_irq(unsigned int irq) ++void native_teardown_msi_irq(unsigned int irq) + { + destroy_irq(irq); + } + + #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) + #ifdef CONFIG_SMP +-static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) ++static int ++dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, ++ bool force) + { +- struct irq_desc *desc = irq_to_desc(irq); +- struct irq_cfg *cfg; ++ struct irq_cfg *cfg = data->chip_data; ++ unsigned int dest, irq = data->irq; + struct msi_msg msg; +- unsigned int dest; + +- if (set_desc_affinity(desc, mask, &dest)) ++ if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + +- cfg = desc->chip_data; +- + dmar_msi_read(irq, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); ++ msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); + + dmar_msi_write(irq, &msg); + +@@ -3734,14 +3523,14 @@ static int dmar_msi_set_affinity(unsigne + #endif /* CONFIG_SMP */ + + static struct irq_chip dmar_msi_type = { +- .name = "DMAR_MSI", +- .unmask = dmar_msi_unmask, +- .mask = dmar_msi_mask, +- .ack = ack_apic_edge, ++ .name = "DMAR_MSI", ++ .irq_unmask = dmar_msi_unmask, ++ .irq_mask = dmar_msi_mask, ++ .irq_ack = ack_apic_edge, + #ifdef CONFIG_SMP +- .set_affinity = dmar_msi_set_affinity, ++ .irq_set_affinity = dmar_msi_set_affinity, + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + + int arch_setup_dmar_msi(unsigned int irq) +@@ -3762,26 +3551,24 @@ int arch_setup_dmar_msi(unsigned int irq + #ifdef CONFIG_HPET_TIMER + + #ifdef CONFIG_SMP +-static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) ++static int hpet_msi_set_affinity(struct irq_data *data, ++ const struct cpumask *mask, bool force) + { +- struct irq_desc *desc = irq_to_desc(irq); +- struct irq_cfg *cfg; ++ struct irq_cfg *cfg = data->chip_data; + struct msi_msg msg; + unsigned int dest; + +- if (set_desc_affinity(desc, mask, &dest)) ++ if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + +- cfg = desc->chip_data; +- +- hpet_msi_read(irq, &msg); ++ hpet_msi_read(data->handler_data, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + +- hpet_msi_write(irq, &msg); ++ hpet_msi_write(data->handler_data, &msg); + + return 0; + } +@@ -3789,34 +3576,33 @@ static int hpet_msi_set_affinity(unsigne + #endif /* CONFIG_SMP */ + + static struct irq_chip ir_hpet_msi_type = { +- .name = "IR-HPET_MSI", +- .unmask = hpet_msi_unmask, +- .mask = hpet_msi_mask, ++ .name = "IR-HPET_MSI", ++ .irq_unmask = hpet_msi_unmask, ++ .irq_mask = hpet_msi_mask, + #ifdef CONFIG_INTR_REMAP +- .ack = ir_ack_apic_edge, ++ .irq_ack = ir_ack_apic_edge, + #ifdef CONFIG_SMP +- .set_affinity = ir_set_msi_irq_affinity, ++ .irq_set_affinity = ir_msi_set_affinity, + #endif + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + + static struct irq_chip hpet_msi_type = { + .name = "HPET_MSI", +- .unmask = hpet_msi_unmask, +- .mask = hpet_msi_mask, +- .ack = ack_apic_edge, ++ .irq_unmask = hpet_msi_unmask, ++ .irq_mask = hpet_msi_mask, ++ .irq_ack = ack_apic_edge, + #ifdef CONFIG_SMP +- .set_affinity = hpet_msi_set_affinity, ++ .irq_set_affinity = hpet_msi_set_affinity, + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + + int arch_setup_hpet_msi(unsigned int irq, unsigned int id) + { +- int ret; + struct msi_msg msg; +- struct irq_desc *desc = irq_to_desc(irq); ++ int ret; + + if (intr_remapping_enabled) { + struct intel_iommu *iommu = map_hpet_to_ir(id); +@@ -3834,9 +3620,9 @@ int arch_setup_hpet_msi(unsigned int irq + if (ret < 0) + return ret; + +- hpet_msi_write(irq, &msg); +- desc->status |= IRQ_MOVE_PCNTXT; +- if (irq_remapped(irq)) ++ hpet_msi_write(get_irq_data(irq), &msg); ++ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); ++ if (irq_remapped(get_irq_chip_data(irq))) + set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, + handle_edge_irq, "edge"); + else +@@ -3869,33 +3655,30 @@ static void target_ht_irq(unsigned int i + write_ht_irq_msg(irq, &msg); + } + +-static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) ++static int ++ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) + { +- struct irq_desc *desc = irq_to_desc(irq); +- struct irq_cfg *cfg; ++ struct irq_cfg *cfg = data->chip_data; + unsigned int dest; + +- if (set_desc_affinity(desc, mask, &dest)) ++ if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + +- cfg = desc->chip_data; +- +- target_ht_irq(irq, dest, cfg->vector); +- ++ target_ht_irq(data->irq, dest, cfg->vector); + return 0; + } + + #endif + + static struct irq_chip ht_irq_chip = { +- .name = "PCI-HT", +- .mask = mask_ht_irq, +- .unmask = unmask_ht_irq, +- .ack = ack_apic_edge, ++ .name = "PCI-HT", ++ .irq_mask = mask_ht_irq, ++ .irq_unmask = unmask_ht_irq, ++ .irq_ack = ack_apic_edge, + #ifdef CONFIG_SMP +- .set_affinity = set_ht_irq_affinity, ++ .irq_set_affinity = ht_set_affinity, + #endif +- .retrigger = ioapic_retrigger_irq, ++ .irq_retrigger = ioapic_retrigger_irq, + }; + + int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) +@@ -3969,6 +3752,11 @@ void __init probe_nr_irqs_gsi(void) + printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); + } + ++int get_nr_irqs_gsi(void) ++{ ++ return nr_irqs_gsi; ++} ++ + #ifdef CONFIG_SPARSE_IRQ + int __init arch_probe_nr_irqs(void) + { +@@ -3987,7 +3775,7 @@ int __init arch_probe_nr_irqs(void) + if (nr < nr_irqs) + nr_irqs = nr; + +- return 0; ++ return NR_IRQS_LEGACY; + } + #endif + #endif /* CONFIG_XEN */ +@@ -3995,7 +3783,6 @@ int __init arch_probe_nr_irqs(void) + static int __io_apic_set_pci_routing(struct device *dev, int irq, + struct io_apic_irq_attr *irq_attr) + { +- struct irq_desc *desc; + struct irq_cfg *cfg; + int node; + int ioapic, pin; +@@ -4018,13 +3805,11 @@ static int __io_apic_set_pci_routing(str + if (dev) + node = dev_to_node(dev); + else +- node = cpu_to_node(boot_cpu_id); ++ node = cpu_to_node(0); + +- desc = irq_to_desc_alloc_node(irq, node); +- if (!desc) { +- printk(KERN_INFO "can not get irq_desc %d\n", irq); ++ cfg = alloc_irq_and_cfg_at(irq, node); ++ if (!cfg) + return 0; +- } + + pin = irq_attr->ioapic_pin; + trigger = irq_attr->trigger; +@@ -4034,15 +3819,14 @@ static int __io_apic_set_pci_routing(str + * IRQs < 16 are already in the irq_2_pin[] map + */ + if (irq >= legacy_pic->nr_legacy_irqs) { +- cfg = desc->chip_data; +- if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { ++ if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { + printk(KERN_INFO "can not add pin %d for irq %d\n", + pin, irq); + return 0; + } + } + +- setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); ++ setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); + + return 0; + } +@@ -4238,14 +4022,14 @@ void __init setup_ioapic_dest(void) + */ + if (desc->status & + (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) +- mask = desc->affinity; ++ mask = desc->irq_data.affinity; + else + mask = apic->target_cpus(); + + if (intr_remapping_enabled) +- set_ir_ioapic_affinity_irq_desc(desc, mask); ++ ir_ioapic_set_affinity(&desc->irq_data, mask, false); + else +- set_ioapic_affinity_irq_desc(desc, mask); ++ ioapic_set_affinity(&desc->irq_data, mask, false); + } + + } +@@ -4433,20 +4217,19 @@ void __init mp_register_ioapic(int id, u + void __init pre_init_apic_IRQ0(void) + { + struct irq_cfg *cfg; +- struct irq_desc *desc; + + printk(KERN_INFO "Early APIC setup for system timer0\n"); + #ifndef CONFIG_SMP + phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); + #endif +- desc = irq_to_desc_alloc_node(0, 0); ++ /* Make sure the irq descriptor is set up */ ++ cfg = alloc_irq_and_cfg_at(0, 0); + + setup_local_APIC(); + +- cfg = irq_cfg(0); + add_pin_to_irq_node(cfg, 0, 0, 0); + set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); + +- setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); ++ setup_ioapic_irq(0, 0, 0, cfg, 0, 0); + } + #endif +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:43:00.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:43:08.000000000 +0100 +@@ -696,7 +696,7 @@ static void __init early_identify_cpu(st + this_cpu->c_early_init(c); + + #ifdef CONFIG_SMP +- c->cpu_index = boot_cpu_id; ++ c->cpu_index = 0; + #endif + filter_cpuid_features(c, false); + } +@@ -735,16 +735,21 @@ void __init early_cpu_init(void) + } + + /* +- * The NOPL instruction is supposed to exist on all CPUs with +- * family >= 6; unfortunately, that's not true in practice because +- * of early VIA chips and (more importantly) broken virtualizers that +- * are not easy to detect. In the latter case it doesn't even *fail* +- * reliably, so probing for it doesn't even work. Disable it completely ++ * The NOPL instruction is supposed to exist on all CPUs of family >= 6; ++ * unfortunately, that's not true in practice because of early VIA ++ * chips and (more importantly) broken virtualizers that are not easy ++ * to detect. In the latter case it doesn't even *fail* reliably, so ++ * probing for it doesn't even work. Disable it completely on 32-bit + * unless we can find a reliable way to detect all the broken cases. ++ * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). + */ + static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) + { ++#ifdef CONFIG_X86_32 + clear_cpu_cap(c, X86_FEATURE_NOPL); ++#else ++ set_cpu_cap(c, X86_FEATURE_NOPL); ++#endif + } + + static void __cpuinit generic_identify(struct cpuinfo_x86 *c) +@@ -1355,13 +1360,6 @@ void __cpuinit cpu_init(void) + clear_all_debug_regs(); + dbg_restore_debug_regs(); + +- /* +- * Force FPU initialization: +- */ +- current_thread_info()->status = 0; +- clear_used_math(); +- mxcsr_feature_mask_init(); +- + fpu_init(); + xsave_init(); + } +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -786,73 +787,7 @@ core_initcall(e820_mark_nvs_memory); + #endif + + /* +- * Find a free area with specified alignment in a specific range. +- */ +-u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) +-{ +- int i; +- +- for (i = 0; i < e820.nr_map; i++) { +- struct e820entry *ei = &e820.map[i]; +- u64 addr; +- u64 ei_start, ei_last; +- +- if (ei->type != E820_RAM) +- continue; +- +- ei_last = ei->addr + ei->size; +- ei_start = ei->addr; +- addr = find_early_area(ei_start, ei_last, start, end, +- size, align); +- +- if (addr != -1ULL) +- return addr; +- } +- return -1ULL; +-} +- +-u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) +-{ +- return find_e820_area(start, end, size, align); +-} +- +-u64 __init get_max_mapped(void) +-{ +- u64 end = max_pfn_mapped; +- +- end <<= PAGE_SHIFT; +- +- return end; +-} +-/* +- * Find next free range after *start +- */ +-u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) +-{ +- int i; +- +- for (i = 0; i < e820.nr_map; i++) { +- struct e820entry *ei = &e820.map[i]; +- u64 addr; +- u64 ei_start, ei_last; +- +- if (ei->type != E820_RAM) +- continue; +- +- ei_last = ei->addr + ei->size; +- ei_start = ei->addr; +- addr = find_early_area_size(ei_start, ei_last, start, +- sizep, align); +- +- if (addr != -1ULL) +- return addr; +- } +- +- return -1ULL; +-} +- +-/* +- * pre allocated 4k and reserved it in e820 ++ * pre allocated 4k and reserved it in memblock and e820_saved + */ + u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) + { +@@ -869,8 +804,8 @@ u64 __init early_reserve_e820(u64 startt + } + #endif + for (start = startt; ; start += size) { +- start = find_e820_area_size(start, &size, align); +- if (!(start + 1)) ++ start = memblock_x86_find_in_range_size(start, &size, align); ++ if (start == MEMBLOCK_ERROR) + return 0; + if (size >= sizet) + break; +@@ -924,10 +859,9 @@ u64 __init early_reserve_e820(u64 startt + return 0; + } + #endif +- e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); ++ memblock_x86_reserve_range(addr, addr + sizet, "new next"); + e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); +- printk(KERN_INFO "update e820 for early_reserve_e820\n"); +- update_e820(); ++ printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); + update_e820_saved(); + + return addr; +@@ -989,83 +923,6 @@ unsigned long __init e820_end_of_low_ram + { + return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); + } +-/* +- * Finds an active region in the address range from start_pfn to last_pfn and +- * returns its range in ei_startpfn and ei_endpfn for the e820 entry. +- */ +-int __init e820_find_active_region(const struct e820entry *ei, +- unsigned long start_pfn, +- unsigned long last_pfn, +- unsigned long *ei_startpfn, +- unsigned long *ei_endpfn) +-{ +- u64 align = PAGE_SIZE; +- +-#ifdef CONFIG_XEN +- if (last_pfn > xen_start_info->nr_pages) +- last_pfn = xen_start_info->nr_pages; +-#endif +- +- *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT; +- *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT; +- +- /* Skip map entries smaller than a page */ +- if (*ei_startpfn >= *ei_endpfn) +- return 0; +- +- /* Skip if map is outside the node */ +- if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || +- *ei_startpfn >= last_pfn) +- return 0; +- +- /* Check for overlaps */ +- if (*ei_startpfn < start_pfn) +- *ei_startpfn = start_pfn; +- if (*ei_endpfn > last_pfn) +- *ei_endpfn = last_pfn; +- +- return 1; +-} +- +-/* Walk the e820 map and register active regions within a node */ +-void __init e820_register_active_regions(int nid, unsigned long start_pfn, +- unsigned long last_pfn) +-{ +- unsigned long ei_startpfn; +- unsigned long ei_endpfn; +- int i; +- +- for (i = 0; i < e820.nr_map; i++) +- if (e820_find_active_region(&e820.map[i], +- start_pfn, last_pfn, +- &ei_startpfn, &ei_endpfn)) +- add_active_range(nid, ei_startpfn, ei_endpfn); +-#ifdef CONFIG_XEN +- BUG_ON(nid); +- add_active_range(nid, last_pfn, last_pfn); +-#endif +-} +- +-/* +- * Find the hole size (in bytes) in the memory range. +- * @start: starting address of the memory range to scan +- * @end: ending address of the memory range to scan +- */ +-u64 __init e820_hole_size(u64 start, u64 end) +-{ +- unsigned long start_pfn = start >> PAGE_SHIFT; +- unsigned long last_pfn = end >> PAGE_SHIFT; +- unsigned long ei_startpfn, ei_endpfn, ram = 0; +- int i; +- +- for (i = 0; i < e820.nr_map; i++) { +- if (e820_find_active_region(&e820.map[i], +- start_pfn, last_pfn, +- &ei_startpfn, &ei_endpfn)) +- ram += ei_endpfn - ei_startpfn; +- } +- return end - start - ((u64)ram << PAGE_SHIFT); +-} + + static void early_panic(char *msg) + { +@@ -1344,3 +1201,48 @@ void __init setup_memory_map(void) + printk(KERN_INFO "Xen-provided physical RAM map:\n"); + _e820_print_map(&e820, who); + } ++ ++void __init memblock_x86_fill(void) ++{ ++ int i; ++ u64 end; ++ ++ /* ++ * EFI may have more than 128 entries ++ * We are safe to enable resizing, beause memblock_x86_fill() ++ * is rather later for x86 ++ */ ++ memblock_can_resize = 1; ++ ++ for (i = 0; i < e820.nr_map; i++) { ++ struct e820entry *ei = &e820.map[i]; ++ ++ end = ei->addr + ei->size; ++ if (end != (resource_size_t)end) ++ continue; ++ ++ if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) ++ continue; ++ ++ memblock_add(ei->addr, ei->size); ++ } ++ ++ memblock_analyze(); ++ memblock_dump_all(); ++} ++ ++void __init memblock_find_dma_reserve(void) ++{ ++#ifdef CONFIG_X86_64 ++ u64 free_size_pfn; ++ u64 mem_size_pfn; ++ /* ++ * need to find out used area below MAX_DMA_PFN ++ * need to use memblock to get free size in [0, MAX_DMA_PFN] ++ * at first, and assume boot_mem will not take below MAX_DMA_PFN ++ */ ++ mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; ++ free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; ++ set_dma_reserve(mem_size_pfn - free_size_pfn); ++#endif ++} +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -271,6 +272,18 @@ static int __init setup_early_printk(cha + if (!strncmp(buf, "xen", 3)) + early_console_register(&xenboot_console, keep); + #endif ++#ifdef CONFIG_X86_MRST_EARLY_PRINTK ++ if (!strncmp(buf, "mrst", 4)) { ++ mrst_early_console_init(); ++ early_console_register(&early_mrst_console, keep); ++ } ++ ++ if (!strncmp(buf, "hsu", 3)) { ++ hsu_early_console_init(); ++ early_console_register(&early_hsu_console, keep); ++ } ++ ++#endif + buf++; + } + return 0; +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:09:47.000000000 +0100 +@@ -119,8 +119,7 @@ NMI_MASK = 0x80000000 + + /* unfortunately push/pop can't be no-op */ + .macro PUSH_GS +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 + .endm + .macro POP_GS pop=0 + addl $(4 + \pop), %esp +@@ -144,14 +143,12 @@ NMI_MASK = 0x80000000 + #else /* CONFIG_X86_32_LAZY_GS */ + + .macro PUSH_GS +- pushl %gs +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %gs + /*CFI_REL_OFFSET gs, 0*/ + .endm + + .macro POP_GS pop=0 +-98: popl %gs +- CFI_ADJUST_CFA_OFFSET -4 ++98: popl_cfi %gs + /*CFI_RESTORE gs*/ + .if \pop <> 0 + add $\pop, %esp +@@ -199,35 +196,25 @@ NMI_MASK = 0x80000000 + .macro SAVE_ALL + cld + PUSH_GS +- pushl %fs +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %fs + /*CFI_REL_OFFSET fs, 0;*/ +- pushl %es +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %es + /*CFI_REL_OFFSET es, 0;*/ +- pushl %ds +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ds + /*CFI_REL_OFFSET ds, 0;*/ +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + CFI_REL_OFFSET eax, 0 +- pushl %ebp +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ebp + CFI_REL_OFFSET ebp, 0 +- pushl %edi +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %edi + CFI_REL_OFFSET edi, 0 +- pushl %esi +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %esi + CFI_REL_OFFSET esi, 0 +- pushl %edx +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %edx + CFI_REL_OFFSET edx, 0 +- pushl %ecx +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ecx + CFI_REL_OFFSET ecx, 0 +- pushl %ebx +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 + movl $(__USER_DS), %edx + movl %edx, %ds +@@ -238,39 +225,29 @@ NMI_MASK = 0x80000000 + .endm + + .macro RESTORE_INT_REGS +- popl %ebx +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %ebx + CFI_RESTORE ebx +- popl %ecx +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %ecx + CFI_RESTORE ecx +- popl %edx +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %edx + CFI_RESTORE edx +- popl %esi +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %esi + CFI_RESTORE esi +- popl %edi +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %edi + CFI_RESTORE edi +- popl %ebp +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %ebp + CFI_RESTORE ebp +- popl %eax +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %eax + CFI_RESTORE eax + .endm + + .macro RESTORE_REGS pop=0 + RESTORE_INT_REGS +-1: popl %ds +- CFI_ADJUST_CFA_OFFSET -4 ++1: popl_cfi %ds + /*CFI_RESTORE ds;*/ +-2: popl %es +- CFI_ADJUST_CFA_OFFSET -4 ++2: popl_cfi %es + /*CFI_RESTORE es;*/ +-3: popl %fs +- CFI_ADJUST_CFA_OFFSET -4 ++3: popl_cfi %fs + /*CFI_RESTORE fs;*/ + POP_GS \pop + .pushsection .fixup, "ax" +@@ -324,16 +301,12 @@ NMI_MASK = 0x80000000 + + ENTRY(ret_from_fork) + CFI_STARTPROC +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + call schedule_tail + GET_THREAD_INFO(%ebp) +- popl %eax +- CFI_ADJUST_CFA_OFFSET -4 +- pushl $0x0202 # Reset kernel eflags +- CFI_ADJUST_CFA_OFFSET 4 +- popfl +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %eax ++ pushl_cfi $0x0202 # Reset kernel eflags ++ popfl_cfi + jmp syscall_exit + CFI_ENDPROC + END(ret_from_fork) +@@ -413,29 +386,23 @@ sysenter_past_esp: + * enough kernel state to call TRACE_IRQS_OFF can be called - but + * we immediately enable interrupts at that point anyway. + */ +- pushl $(__USER_DS) +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $__USER_DS + /*CFI_REL_OFFSET ss, 0*/ +- pushl %ebp +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ebp + CFI_REL_OFFSET esp, 0 +- pushfl ++ pushfl_cfi + orl $X86_EFLAGS_IF, (%esp) +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $(__USER_CS) +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $__USER_CS + /*CFI_REL_OFFSET cs, 0*/ + /* + * Push current_thread_info()->sysenter_return to the stack. + * A tiny bit of offset fixup is necessary - 4*4 means the 4 words + * pushed above; +8 corresponds to copy_thread's esp0 setting. + */ +- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) + CFI_REL_OFFSET eip, 0 + +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + SAVE_ALL + ENABLE_INTERRUPTS(CLBR_NONE) + +@@ -490,8 +457,7 @@ sysenter_audit: + movl %eax,%edx /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ + call audit_syscall_entry +- pushl %ebx +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ebx + movl PT_EAX(%esp),%eax /* reload syscall number */ + jmp sysenter_do_call + +@@ -535,8 +501,7 @@ ENTRY(ia32pv_sysenter_target) + addl $4,%esp + CFI_ADJUST_CFA_OFFSET -4 + /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */ +- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) + /* + * Load the potential sixth argument from user stack. + * Careful about security. +@@ -559,8 +524,7 @@ ENDPROC(ia32pv_sysenter_target) + # system call handler stub + ENTRY(system_call) + RING0_INT_FRAME # can't unwind into user space anyway +- pushl %eax # save orig_eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax # save orig_eax + SAVE_ALL + GET_THREAD_INFO(%ebp) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) +@@ -609,7 +573,6 @@ restore_nocheck: + jnz restore_all_enable_events # != 0 => enable event delivery + #endif + RESTORE_REGS 4 # skip orig_eax/error_code +- CFI_ADJUST_CFA_OFFSET -4 + irq_return: + INTERRUPT_RETURN + .section .fixup,"ax" +@@ -663,10 +626,8 @@ ldt_ss: + shr $16, %edx + mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ + mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ +- pushl $__ESPFIX_SS +- CFI_ADJUST_CFA_OFFSET 4 +- push %eax /* new kernel esp */ +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $__ESPFIX_SS ++ pushl_cfi %eax /* new kernel esp */ + /* Disable interrupts, but do not irqtrace this section: we + * will soon execute iret and the tracer was already set to + * the irqstate after the iret */ +@@ -735,11 +696,9 @@ work_notifysig: # deal with pending s + + ALIGN + work_notifysig_v86: +- pushl %ecx # save ti_flags for do_notify_resume +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ecx # save ti_flags for do_notify_resume + call save_v86_state # %eax contains pt_regs pointer +- popl %ecx +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %ecx + movl %eax, %esp + #else + movl %esp, %eax +@@ -819,14 +778,18 @@ ptregs_##name: \ + #define PTREGSCALL3(name) \ + ALIGN; \ + ptregs_##name: \ ++ CFI_STARTPROC; \ + leal 4(%esp),%eax; \ +- pushl %eax; \ ++ pushl_cfi %eax; \ + movl PT_EDX(%eax),%ecx; \ + movl PT_ECX(%eax),%edx; \ + movl PT_EBX(%eax),%eax; \ + call sys_##name; \ + addl $4,%esp; \ +- ret ++ CFI_ADJUST_CFA_OFFSET -4; \ ++ ret; \ ++ CFI_ENDPROC; \ ++ENDPROC(ptregs_##name) + + PTREGSCALL1(iopl) + PTREGSCALL0(fork) +@@ -841,15 +804,19 @@ PTREGSCALL1(vm86old) + /* Clone is an oddball. The 4th arg is in %edi */ + ALIGN; + ptregs_clone: ++ CFI_STARTPROC + leal 4(%esp),%eax +- pushl %eax +- pushl PT_EDI(%eax) ++ pushl_cfi %eax ++ pushl_cfi PT_EDI(%eax) + movl PT_EDX(%eax),%ecx + movl PT_ECX(%eax),%edx + movl PT_EBX(%eax),%eax + call sys_clone + addl $8,%esp ++ CFI_ADJUST_CFA_OFFSET -8 + ret ++ CFI_ENDPROC ++ENDPROC(ptregs_clone) + + #ifndef CONFIG_XEN + .macro FIXUP_ESPFIX_STACK +@@ -865,10 +832,8 @@ ptregs_clone: + mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ + shl $16, %eax + addl %esp, %eax /* the adjusted stack pointer */ +- pushl $__KERNEL_DS +- CFI_ADJUST_CFA_OFFSET 4 +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $__KERNEL_DS ++ pushl_cfi %eax + lss (%esp), %esp /* switch to the normal stack segment */ + CFI_ADJUST_CFA_OFFSET -8 + .endm +@@ -905,8 +870,7 @@ vector=FIRST_EXTERNAL_VECTOR + .if vector <> FIRST_EXTERNAL_VECTOR + CFI_ADJUST_CFA_OFFSET -4 + .endif +-1: pushl $(~vector+0x80) /* Note: always in signed byte range */ +- CFI_ADJUST_CFA_OFFSET 4 ++1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ + .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 + jmp 2f + .endif +@@ -946,8 +910,7 @@ ENDPROC(common_interrupt) + #define BUILD_INTERRUPT3(name, nr, fn) \ + ENTRY(name) \ + RING0_INT_FRAME; \ +- pushl $~(nr); \ +- CFI_ADJUST_CFA_OFFSET 4; \ ++ pushl_cfi $~(nr); \ + SAVE_ALL; \ + TRACE_IRQS_OFF \ + movl %esp,%eax; \ +@@ -984,8 +947,7 @@ ENDPROC(name) + # so we can simply throw away the new one. + ENTRY(hypervisor_callback) + RING0_INT_FRAME +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + SAVE_ALL + movl PT_CS(%esp),%ecx + movl PT_EIP(%esp),%eax +@@ -1005,8 +967,7 @@ ENTRY(hypervisor_callback) + addl $PT_OLDESP,%esp # Remove eflags...ebx from stack frame. + #endif + .Ldo_upcall: +- push %esp +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %esp + call evtchn_do_upcall + add $4,%esp + CFI_ADJUST_CFA_OFFSET -4 +@@ -1111,21 +1072,18 @@ ENTRY(failsafe_callback) + + ENTRY(coprocessor_error) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_coprocessor_error +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 ++ pushl_cfi $do_coprocessor_error + jmp error_code + CFI_ENDPROC + END(coprocessor_error) + + ENTRY(simd_coprocessor_error) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 + #ifdef CONFIG_X86_INVD_BUG + /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ +-661: pushl $do_general_protection ++661: pushl_cfi $do_general_protection + 662: + .section .altinstructions,"a" + .balign 4 +@@ -1140,19 +1098,16 @@ ENTRY(simd_coprocessor_error) + 664: + .previous + #else +- pushl $do_simd_coprocessor_error ++ pushl_cfi $do_simd_coprocessor_error + #endif +- CFI_ADJUST_CFA_OFFSET 4 + jmp error_code + CFI_ENDPROC + END(simd_coprocessor_error) + + ENTRY(device_not_available) + RING0_INT_FRAME +- pushl $-1 # mark this as an int +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_device_not_available +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $-1 # mark this as an int ++ pushl_cfi $do_device_not_available + jmp error_code + CFI_ENDPROC + END(device_not_available) +@@ -1174,82 +1129,68 @@ END(native_irq_enable_sysexit) + + ENTRY(overflow) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_overflow +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 ++ pushl_cfi $do_overflow + jmp error_code + CFI_ENDPROC + END(overflow) + + ENTRY(bounds) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_bounds +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 ++ pushl_cfi $do_bounds + jmp error_code + CFI_ENDPROC + END(bounds) + + ENTRY(invalid_op) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_invalid_op +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 ++ pushl_cfi $do_invalid_op + jmp error_code + CFI_ENDPROC + END(invalid_op) + + ENTRY(coprocessor_segment_overrun) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_coprocessor_segment_overrun +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 ++ pushl_cfi $do_coprocessor_segment_overrun + jmp error_code + CFI_ENDPROC + END(coprocessor_segment_overrun) + + ENTRY(invalid_TSS) + RING0_EC_FRAME +- pushl $do_invalid_TSS +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $do_invalid_TSS + jmp error_code + CFI_ENDPROC + END(invalid_TSS) + + ENTRY(segment_not_present) + RING0_EC_FRAME +- pushl $do_segment_not_present +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $do_segment_not_present + jmp error_code + CFI_ENDPROC + END(segment_not_present) + + ENTRY(stack_segment) + RING0_EC_FRAME +- pushl $do_stack_segment +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $do_stack_segment + jmp error_code + CFI_ENDPROC + END(stack_segment) + + ENTRY(alignment_check) + RING0_EC_FRAME +- pushl $do_alignment_check +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $do_alignment_check + jmp error_code + CFI_ENDPROC + END(alignment_check) + + ENTRY(divide_error) + RING0_INT_FRAME +- pushl $0 # no error code +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_divide_error +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 # no error code ++ pushl_cfi $do_divide_error + jmp error_code + CFI_ENDPROC + END(divide_error) +@@ -1257,10 +1198,8 @@ END(divide_error) + #ifdef CONFIG_X86_MCE + ENTRY(machine_check) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 +- pushl machine_check_vector +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 ++ pushl_cfi machine_check_vector + jmp error_code + CFI_ENDPROC + END(machine_check) +@@ -1269,18 +1208,15 @@ END(machine_check) + #ifndef CONFIG_XEN + ENTRY(spurious_interrupt_bug) + RING0_INT_FRAME +- pushl $0 +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $do_spurious_interrupt_bug +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $0 ++ pushl_cfi $do_spurious_interrupt_bug + jmp error_code + CFI_ENDPROC + #endif /* !CONFIG_XEN */ + + ENTRY(fixup_4gb_segment) + RING0_EC_FRAME +- pushl $do_fixup_4gb_segment +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $do_fixup_4gb_segment + jmp error_code + CFI_ENDPROC + END(spurious_interrupt_bug) +@@ -1413,8 +1349,7 @@ ENTRY(ia32pv_cstar_target) + movl %ebp,%ecx + movl $__USER_CS,4(%esp) + movl 12(%esp),%ebp +- pushl %eax # save orig_eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax # save orig_eax + /* + * Load the potential sixth argument from user stack. + * Careful about security. +@@ -1545,40 +1480,29 @@ mask=0 + + ENTRY(page_fault) + RING0_EC_FRAME +- pushl $do_page_fault +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $do_page_fault + ALIGN + error_code: + /* the function address is in %gs's slot on the stack */ +- pushl %fs +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %fs + /*CFI_REL_OFFSET fs, 0*/ +- pushl %es +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %es + /*CFI_REL_OFFSET es, 0*/ +- pushl %ds +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ds + /*CFI_REL_OFFSET ds, 0*/ +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + CFI_REL_OFFSET eax, 0 +- pushl %ebp +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ebp + CFI_REL_OFFSET ebp, 0 +- pushl %edi +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %edi + CFI_REL_OFFSET edi, 0 +- pushl %esi +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %esi + CFI_REL_OFFSET esi, 0 +- pushl %edx +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %edx + CFI_REL_OFFSET edx, 0 +- pushl %ecx +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ecx + CFI_REL_OFFSET ecx, 0 +- pushl %ebx +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 + cld + movl $(__KERNEL_PERCPU), %ecx +@@ -1621,12 +1545,9 @@ END(page_fault) + movl TSS_sysenter_sp0 + \offset(%esp), %esp + CFI_DEF_CFA esp, 0 + CFI_UNDEFINED eip +- pushfl +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $__KERNEL_CS +- CFI_ADJUST_CFA_OFFSET 4 +- pushl $sysenter_past_esp +- CFI_ADJUST_CFA_OFFSET 4 ++ pushfl_cfi ++ pushl_cfi $__KERNEL_CS ++ pushl_cfi $sysenter_past_esp + CFI_REL_OFFSET eip, 0 + .endm + #endif /* CONFIG_XEN */ +@@ -1639,8 +1560,7 @@ ENTRY(debug) + FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn + debug_stack_correct: + #endif /* !CONFIG_XEN */ +- pushl $-1 # mark this as an int +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $-1 # mark this as an int + SAVE_ALL + TRACE_IRQS_OFF + xorl %edx,%edx # error code 0 +@@ -1660,33 +1580,28 @@ END(debug) + */ + ENTRY(nmi) + RING0_INT_FRAME +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + #ifndef CONFIG_XEN + movl %ss, %eax + cmpw $__ESPFIX_SS, %ax +- popl %eax +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %eax + je nmi_espfix_stack + cmpl $ia32_sysenter_target,(%esp) + je nmi_stack_fixup +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + movl %esp,%eax + /* Do not access memory above the end of our stack page, + * it might not exist. + */ + andl $(THREAD_SIZE-1),%eax + cmpl $(THREAD_SIZE-20),%eax +- popl %eax +- CFI_ADJUST_CFA_OFFSET -4 ++ popl_cfi %eax + jae nmi_stack_correct + cmpl $ia32_sysenter_target,12(%esp) + je nmi_debug_stack_check + nmi_stack_correct: + /* We have a RING0_INT_FRAME here */ +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + SAVE_ALL + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer +@@ -1715,18 +1630,14 @@ nmi_espfix_stack: + * + * create the pointer to lss back + */ +- pushl %ss +- CFI_ADJUST_CFA_OFFSET 4 +- pushl %esp +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %ss ++ pushl_cfi %esp + addl $4, (%esp) + /* copy the iret frame of 12 bytes */ + .rept 3 +- pushl 16(%esp) +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi 16(%esp) + .endr +- pushl %eax +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi %eax + SAVE_ALL + FIXUP_ESPFIX_STACK # %eax == %esp + xorl %edx,%edx # zero error code +@@ -1748,8 +1659,7 @@ END(nmi) + + ENTRY(int3) + RING0_INT_FRAME +- pushl $-1 # mark this as an int +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $-1 # mark this as an int + SAVE_ALL + TRACE_IRQS_OFF + xorl %edx,%edx # zero error code +@@ -1761,8 +1671,7 @@ END(int3) + + ENTRY(general_protection) + RING0_EC_FRAME +- pushl $do_general_protection +- CFI_ADJUST_CFA_OFFSET 4 ++ pushl_cfi $do_general_protection + jmp error_code + CFI_ENDPROC + END(general_protection) +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:09:47.000000000 +0100 +@@ -204,23 +204,17 @@ NMI_MASK = 0x80000000 + .macro FAKE_STACK_FRAME child_rip + /* push in order ss, rsp, eflags, cs, rip */ + xorl %eax, %eax +- pushq $__KERNEL_DS /* ss */ +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi $__KERNEL_DS /* ss */ + /*CFI_REL_OFFSET ss,0*/ +- pushq %rax /* rsp */ +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi %rax /* rsp */ + CFI_REL_OFFSET rsp,0 +- pushq $X86_EFLAGS_IF /* eflags - interrupts on */ +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */ + /*CFI_REL_OFFSET rflags,0*/ +- pushq $__KERNEL_CS /* cs */ +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi $__KERNEL_CS /* cs */ + /*CFI_REL_OFFSET cs,0*/ +- pushq \child_rip /* rip */ +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi \child_rip /* rip */ + CFI_REL_OFFSET rip,0 +- pushq %rax /* orig rax */ +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi %rax /* orig rax */ + .endm + + .macro UNFAKE_STACK_FRAME +@@ -333,6 +327,7 @@ NMI_MASK = 0x80000000 + + #ifndef CONFIG_XEN + /* save partial stack frame */ ++ .pushsection .kprobes.text, "ax" + ENTRY(save_args) + XCPT_FRAME + cld +@@ -372,6 +367,7 @@ ENTRY(save_args) + ret + CFI_ENDPROC + END(save_args) ++ .popsection + #endif + + ENTRY(save_rest) +@@ -433,10 +429,8 @@ ENTRY(ret_from_fork) + + LOCK ; btr $TIF_FORK,TI_flags(%r8) + +- push kernel_eflags(%rip) +- CFI_ADJUST_CFA_OFFSET 8 +- popf # reset kernel eflags +- CFI_ADJUST_CFA_OFFSET -8 ++ pushq_cfi kernel_eflags(%rip) ++ popfq_cfi # reset kernel eflags + + call schedule_tail # rdi: 'prev' task parameter + +@@ -532,11 +526,9 @@ sysret_careful: + jnc sysret_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) +- pushq %rdi +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi %rdi + call schedule +- popq %rdi +- CFI_ADJUST_CFA_OFFSET -8 ++ popq_cfi %rdi + jmp sysret_check + + /* Handle a signal */ +@@ -649,11 +641,9 @@ int_careful: + jnc int_very_careful + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) +- pushq %rdi +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi %rdi + call schedule +- popq %rdi +- CFI_ADJUST_CFA_OFFSET -8 ++ popq_cfi %rdi + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + jmp int_with_check +@@ -667,12 +657,10 @@ int_check_syscall_exit_work: + /* Check for syscall exit trace */ + testl $_TIF_WORK_SYSCALL_EXIT,%edx + jz int_signal +- pushq %rdi +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi %rdi + leaq 8(%rsp),%rdi # &ptregs -> arg1 + call syscall_trace_leave +- popq %rdi +- CFI_ADJUST_CFA_OFFSET -8 ++ popq_cfi %rdi + andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi + jmp int_restore_rest + +@@ -729,9 +717,8 @@ END(ptregscall_common) + + ENTRY(stub_execve) + CFI_STARTPROC +- popq %r11 +- CFI_ADJUST_CFA_OFFSET -8 +- CFI_REGISTER rip, r11 ++ addq $8, %rsp ++ PARTIAL_FRAME 0 + SAVE_REST + FIXUP_TOP_OF_STACK %r11 + movq %rsp, %rcx +@@ -750,7 +737,7 @@ END(stub_execve) + ENTRY(stub_rt_sigreturn) + CFI_STARTPROC + addq $8, %rsp +- CFI_ADJUST_CFA_OFFSET -8 ++ PARTIAL_FRAME 0 + SAVE_REST + movq %rsp,%rdi + FIXUP_TOP_OF_STACK %r11 +@@ -792,11 +779,9 @@ retint_careful: + jnc retint_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) +- pushq %rdi +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi %rdi + call schedule +- popq %rdi +- CFI_ADJUST_CFA_OFFSET -8 ++ popq_cfi %rdi + GET_THREAD_INFO(%rcx) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +@@ -842,8 +827,7 @@ END(retint_check) + .macro apicinterrupt num sym do_sym + ENTRY(\sym) + INTR_FRAME +- pushq $~(\num) +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi $~(\num) + interrupt \do_sym + jmp error_entry + CFI_ENDPROC +@@ -867,22 +851,10 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ + x86_platform_ipi smp_x86_platform_ipi + + #ifdef CONFIG_SMP +-apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ +- invalidate_interrupt0 smp_invalidate_interrupt +-apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ +- invalidate_interrupt1 smp_invalidate_interrupt +-apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \ +- invalidate_interrupt2 smp_invalidate_interrupt +-apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \ +- invalidate_interrupt3 smp_invalidate_interrupt +-apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \ +- invalidate_interrupt4 smp_invalidate_interrupt +-apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \ +- invalidate_interrupt5 smp_invalidate_interrupt +-apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ +- invalidate_interrupt6 smp_invalidate_interrupt +-apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \ +- invalidate_interrupt7 smp_invalidate_interrupt ++.irpc idx, "01234567" ++apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ ++ invalidate_interrupt\idx smp_invalidate_interrupt ++.endr + #endif + + apicinterrupt THRESHOLD_APIC_VECTOR \ +@@ -909,9 +881,9 @@ apicinterrupt ERROR_APIC_VECTOR \ + apicinterrupt SPURIOUS_APIC_VECTOR \ + spurious_interrupt smp_spurious_interrupt + +-#ifdef CONFIG_PERF_EVENTS +-apicinterrupt LOCAL_PENDING_VECTOR \ +- perf_pending_interrupt smp_perf_pending_interrupt ++#ifdef CONFIG_IRQ_WORK ++apicinterrupt IRQ_WORK_VECTOR \ ++ irq_work_interrupt smp_irq_work_interrupt + #endif + #endif /* !CONFIG_XEN */ + +@@ -926,8 +898,8 @@ ENTRY(\sym) + movq 8(%rsp),%r11 + CFI_RESTORE r11 + movq $-1,8(%rsp) /* ORIG_RAX: no syscall to restart */ +- subq $(15-1)*8,%rsp +- CFI_ADJUST_CFA_OFFSET (15-1)*8 ++ subq $ORIG_RAX-R15-1*8,%rsp ++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15-1*8 + call error_entry + DEFAULT_FRAME 0 + movq %rsp,%rdi /* pt_regs pointer */ +@@ -953,8 +925,8 @@ ENTRY(\sym) + CFI_RESTORE rcx + movq 8(%rsp),%r11 + CFI_RESTORE r11 +- subq $(15-2)*8,%rsp +- CFI_ADJUST_CFA_OFFSET (15-2)*8 ++ subq $ORIG_RAX-R15-2*8,%rsp ++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15-2*8 + call error_entry + DEFAULT_FRAME 0 + movq %rsp,%rdi /* pt_regs pointer */ +@@ -1074,8 +1046,7 @@ ENTRY(failsafe_callback) + CFI_RESTORE r11 + addq $0x30,%rsp + CFI_ADJUST_CFA_OFFSET -0x30 +- pushq $0 +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi $0 + SAVE_ALL + jmp error_exit + CFI_ENDPROC +@@ -1143,8 +1114,7 @@ END(kernel_execve) + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(call_softirq) + CFI_STARTPROC +- push %rbp +- CFI_ADJUST_CFA_OFFSET 8 ++ pushq_cfi %rbp + CFI_REL_OFFSET rbp,0 + mov %rsp,%rbp + CFI_DEF_CFA_REGISTER rbp +@@ -1153,6 +1123,7 @@ ENTRY(call_softirq) + push %rbp # backlink for old unwinder + call __do_softirq + leaveq ++ CFI_RESTORE rbp + CFI_DEF_CFA_REGISTER rsp + CFI_ADJUST_CFA_OFFSET -8 + decl PER_CPU_VAR(irq_count) +@@ -1191,7 +1162,7 @@ paranoidzeroentry machine_check *machine + + /* ebx: no swapgs flag */ + ENTRY(paranoid_exit) +- INTR_FRAME ++ DEFAULT_FRAME + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + testl %ebx,%ebx /* swapgs needed? */ +@@ -1271,7 +1242,6 @@ error_sti: + #endif + TRACE_IRQS_OFF + ret +- CFI_ENDPROC + + #ifndef CONFIG_XEN + /* +@@ -1298,6 +1268,7 @@ bstep_iret: + movq %rcx,RIP+8(%rsp) + jmp error_swapgs + #endif ++ CFI_ENDPROC + END(error_entry) + + +@@ -1338,11 +1309,9 @@ END(do_nmi_callback) + #ifndef CONFIG_IA32_EMULATION + ENTRY(ignore_sysret) + INTR_FRAME +- popq %rcx +- CFI_ADJUST_CFA_OFFSET -8 ++ popq_cfi %rcx + CFI_RESTORE rcx +- popq %r11 +- CFI_ADJUST_CFA_OFFSET -8 ++ popq_cfi %r11 + CFI_RESTORE r11 + mov $-ENOSYS,%eax + HYPERVISOR_IRET 0 +--- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -1,5 +1,6 @@ + #include + #include ++#include + #include + + #include +@@ -53,7 +54,7 @@ void __init reserve_ebda_region(void) + lowmem = 0x9f000; + + /* reserve all memory between lowmem and the 1MB mark */ +- reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved"); ++ memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); + } + #else /* CONFIG_XEN */ + #include +@@ -103,10 +104,12 @@ void __init xen_start_kernel(void) + WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, + VMASST_TYPE_writable_pagetables)); + +- reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE), +- __pa(xen_start_info->pt_base) +- + (xen_start_info->nr_pt_frames << PAGE_SHIFT), +- "Xen provided"); ++ memblock_init(); ++ memblock_x86_reserve_range(ALIGN(__pa_symbol(&_end), PAGE_SIZE), ++ __pa(xen_start_info->pt_base) ++ + (xen_start_info->nr_pt_frames ++ << PAGE_SHIFT), ++ "Xen provided"); + + #ifdef CONFIG_X86_32 + { +--- head-2011-03-17.orig/arch/x86/kernel/head32-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head32-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -17,6 +18,7 @@ + #include + #include + #include ++#include + + static void __init i386_default_early_setup(void) + { +@@ -49,17 +51,18 @@ void __init i386_start_kernel(void) + BUG_ON(pte_index(hypervisor_virt_start)); + #endif + ++ memblock_init(); ++ + #ifdef CONFIG_X86_TRAMPOLINE + /* + * But first pinch a few for the stack/trampoline stuff + * FIXME: Don't need the extra page at 4K, but need to fix + * trampoline before removing it. (see the GDT stuff) + */ +- reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, +- "EX TRAMPOLINE"); ++ memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); + #endif + +- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); ++ memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + + #ifndef CONFIG_XEN + #ifdef CONFIG_BLK_DEV_INITRD +@@ -69,7 +72,7 @@ void __init i386_start_kernel(void) + u64 ramdisk_image = boot_params.hdr.ramdisk_image; + u64 ramdisk_size = boot_params.hdr.ramdisk_size; + u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); +- reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); ++ memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); + } + #endif + +--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -119,7 +120,9 @@ void __init x86_64_start_reservations(ch + { + copy_bootdata(__va(real_mode_data)); + +- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); ++ memblock_init(); ++ ++ memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + + /* + * At this point everything still needed from the boot loader +--- head-2011-03-17.orig/arch/x86/kernel/irq-xen.c 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -71,10 +71,10 @@ static int show_other_interrupts(struct + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); + seq_printf(p, " Performance monitoring interrupts\n"); +- seq_printf(p, "%*s: ", prec, "PND"); ++ seq_printf(p, "%*s: ", prec, "IWI"); + for_each_online_cpu(j) +- seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); +- seq_printf(p, " Performance pending work\n"); ++ seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); ++ seq_printf(p, " IRQ work interrupts\n"); + #endif + #ifndef CONFIG_XEN + if (x86_platform_ipi_callback) { +@@ -172,7 +172,7 @@ int show_interrupts(struct seq_file *p, + seq_printf(p, "%*d: ", prec, i); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); +- seq_printf(p, " %8s", desc->chip->name); ++ seq_printf(p, " %8s", desc->irq_data.chip->name); + seq_printf(p, "-%-8s", desc->name); + + if (action) { +@@ -198,7 +198,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) + sum += irq_stats(cpu)->apic_timer_irqs; + sum += irq_stats(cpu)->irq_spurious_count; + sum += irq_stats(cpu)->apic_perf_irqs; +- sum += irq_stats(cpu)->apic_pending_irqs; ++ sum += irq_stats(cpu)->apic_irq_work_irqs; + #endif + #ifndef CONFIG_XEN + if (x86_platform_ipi_callback) +@@ -302,6 +302,7 @@ void fixup_irqs(void) + unsigned int irq; + static int warned; + struct irq_desc *desc; ++ struct irq_data *data; + static DECLARE_BITMAP(irqs_used, NR_IRQS); + + for_each_irq_desc(irq, desc) { +@@ -317,7 +318,8 @@ void fixup_irqs(void) + /* interrupt's are disabled at this point */ + raw_spin_lock(&desc->lock); + +- affinity = desc->affinity; ++ data = &desc->irq_data; ++ affinity = data->affinity; + if (!irq_has_action(irq) || + cpumask_subset(affinity, cpu_online_mask)) { + raw_spin_unlock(&desc->lock); +@@ -332,16 +334,16 @@ void fixup_irqs(void) + affinity = cpu_all_mask; + } + +- if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) +- desc->chip->mask(irq); ++ if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) ++ data->chip->irq_mask(data); + +- if (desc->chip->set_affinity) +- desc->chip->set_affinity(irq, affinity); +- else if (desc->chip != &no_irq_chip && !(warned++)) ++ if (data->chip->irq_set_affinity) ++ data->chip->irq_set_affinity(data, affinity, true); ++ else if (data->chip != &no_irq_chip && !(warned++)) + set_affinity = 0; + +- if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) +- desc->chip->unmask(irq); ++ if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) ++ data->chip->irq_unmask(data); + + raw_spin_unlock(&desc->lock); + +@@ -367,9 +369,10 @@ void fixup_irqs(void) + continue; + + if (xen_test_irq_pending(irq)) { ++ data = irq_get_irq_data(irq); + raw_spin_lock(&desc->lock); +- if (desc->chip->retrigger) +- desc->chip->retrigger(irq); ++ if (data->chip->irq_retrigger) ++ data->chip->irq_retrigger(data); + raw_spin_unlock(&desc->lock); + } + } +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ head-2011-03-17/arch/x86/kernel/irq_work-xen.c 2011-02-03 11:19:35.000000000 +0100 +@@ -0,0 +1,23 @@ ++/* ++ * x86/Xen specific code for irq_work ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_SMP ++irqreturn_t smp_irq_work_interrupt(int irq, void *dev_id) ++{ ++ inc_irq_stat(apic_irq_work_irqs); ++ irq_work_run(); ++ ++ return IRQ_HANDLED; ++} ++ ++void arch_irq_work_raise(void) ++{ ++ xen_send_IPI_self(IRQ_WORK_VECTOR); ++} ++#endif +--- head-2011-03-17.orig/arch/x86/kernel/microcode_core-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/microcode_core-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -12,7 +12,7 @@ + * Software Developer's Manual + * Order Number 253668 or free download from: + * +- * http://developer.intel.com/design/pentium4/manuals/253668.htm ++ * http://developer.intel.com/Assets/PDF/manual/253668.pdf + * + * For more information, go to http://www.urbanmyth.org/microcode + * +@@ -117,6 +117,7 @@ static const struct file_operations micr + .owner = THIS_MODULE, + .write = microcode_write, + .open = microcode_open, ++ .llseek = no_llseek, + }; + + static struct miscdevice microcode_dev = { +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -686,7 +687,7 @@ static void __init smp_reserve_memory(st + { + unsigned long size = get_mpc_size(mpf->physptr); + +- reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); ++ memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc"); + } + #endif + +@@ -719,7 +720,7 @@ static int __init smp_scan_config(unsign + mpf, (u64)virt_to_phys(mpf)); + + mem = virt_to_phys(mpf); +- reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); ++ memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); + if (mpf->physptr) + smp_reserve_memory(mpf); + #else +--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -11,8 +11,8 @@ + #include + #include + #include +-#include + #include ++#include + + static int forbid_dac __read_mostly; + +@@ -44,6 +44,8 @@ int iommu_detected __read_mostly = 0; + */ + int iommu_pass_through __read_mostly; + ++extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; ++ + /* Dummy device used for NULL arguments (normally ISA). */ + struct device x86_dma_fallback_dev = { + .init_name = "fallback device", +@@ -142,7 +144,10 @@ static struct dma_map_ops swiotlb_dma_op + .dma_supported = swiotlb_dma_supported + }; + +-#define pci_xen_swiotlb_detect() 1 ++static int __init pci_xen_swiotlb_detect(void) ++{ ++ return 1; ++} + + static void __init pci_xen_swiotlb_init(void) + { +@@ -153,26 +158,28 @@ static void __init pci_xen_swiotlb_init( + } + } + ++IOMMU_INIT_FINISH(pci_xen_swiotlb_detect, NULL, pci_xen_swiotlb_init, NULL); ++ + void __init pci_iommu_alloc(void) + { ++ struct iommu_table_entry *p; ++ + /* free the range so iommu could get some range less than 4G */ + dma32_free_bootmem(); + +- if (pci_xen_swiotlb_detect() || pci_swiotlb_detect()) +- goto out; +- +- gart_iommu_hole_init(); +- +- detect_calgary(); ++ sort_iommu_table(__iommu_table, __iommu_table_end); ++ check_iommu_entries(__iommu_table, __iommu_table_end); + +- detect_intel_iommu(); +- +- /* needs to be called after gart_iommu_hole_init */ +- amd_iommu_detect(); +-out: +- pci_xen_swiotlb_init(); ++ for (p = __iommu_table; p < __iommu_table_end; p++) { ++ if (p && p->detect && p->detect() > 0) { ++ p->flags |= IOMMU_DETECTED; ++ if (p->early_init) ++ p->early_init(); ++ if (p->flags & IOMMU_FINISH_IF_DETECTED) ++ break; ++ } ++ } + } +- + void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t flag) + { +@@ -375,6 +382,7 @@ EXPORT_SYMBOL(dma_supported); + + static int __init pci_iommu_init(void) + { ++ struct iommu_table_entry *p; + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + + #ifdef CONFIG_PCI +@@ -382,14 +390,10 @@ static int __init pci_iommu_init(void) + #endif + x86_init.iommu.iommu_init(); + +-#ifndef CONFIG_XEN +- if (swiotlb || xen_swiotlb) { +- printk(KERN_INFO "PCI-DMA: " +- "Using software bounce buffering for IO (SWIOTLB)\n"); +- swiotlb_print_info(); +- } else +- swiotlb_free(); +-#endif ++ for (p = __iommu_table; p < __iommu_table_end; p++) { ++ if (p && (p->flags & IOMMU_DETECTED) && p->late_init) ++ p->late_init(); ++ } + + return 0; + } +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:25:01.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:25:11.000000000 +0100 +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -83,7 +84,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -107,11 +107,12 @@ + #include + #include + #include +-#include ++#include + #ifdef CONFIG_X86_64 + #include + #endif + #include ++#include + + #ifdef CONFIG_XEN + #include +@@ -155,7 +156,6 @@ unsigned long max_pfn_mapped; + RESERVE_BRK(dmi_alloc, 65536); + #endif + +-unsigned int boot_cpu_id __read_mostly; + + static __initdata unsigned long _brk_start = (unsigned long)__brk_base; + unsigned long _brk_end = (unsigned long)__brk_base; +@@ -337,7 +337,7 @@ static inline void init_gbpages(void) + static void __init reserve_brk(void) + { + if (_brk_end > _brk_start) +- reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK"); ++ memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); + + /* Mark brk area as locked down and no longer taking any + new allocations */ +@@ -360,17 +360,16 @@ static void __init relocate_initrd(void) + char *p, *q; + + /* We need to move the initrd down into lowmem */ +- ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, ++ ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, + PAGE_SIZE); + +- if (ramdisk_here == -1ULL) ++ if (ramdisk_here == MEMBLOCK_ERROR) + panic("Cannot find place for new RAMDISK of size %lld\n", + ramdisk_size); + + /* Note: this includes all the lowmem currently occupied by + the initrd, we rely on that fact to keep the data intact. */ +- reserve_early(ramdisk_here, ramdisk_here + area_size, +- "NEW RAMDISK"); ++ memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); + initrd_start = ramdisk_here + PAGE_OFFSET; + initrd_end = initrd_start + ramdisk_size; + printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", +@@ -443,7 +442,7 @@ static void __init reserve_initrd(void) + initrd_start = 0; + + if (ramdisk_size >= (end_of_lowmem>>1)) { +- free_early(ramdisk_image, ramdisk_end); ++ memblock_x86_free_range(ramdisk_image, ramdisk_end); + printk(KERN_ERR "initrd too large to handle, " + "disabling initrd\n"); + return; +@@ -469,7 +468,7 @@ static void __init reserve_initrd(void) + + relocate_initrd(); + +- free_early(ramdisk_image, ramdisk_end); ++ memblock_x86_free_range(ramdisk_image, ramdisk_end); + } + #else + static void __init reserve_initrd(void) +@@ -529,7 +528,7 @@ static void __init e820_reserve_setup_da + #endif + } + +-static void __init reserve_early_setup_data(void) ++static void __init memblock_x86_reserve_range_setup_data(void) + { + #ifndef CONFIG_XEN + struct setup_data *data; +@@ -542,7 +541,7 @@ static void __init reserve_early_setup_d + while (pa_data) { + data = early_memremap(pa_data, sizeof(*data)); + sprintf(buf, "setup data %x", data->type); +- reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); ++ memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf); + pa_data = data->next; + early_iounmap(data, sizeof(*data)); + } +@@ -565,6 +564,18 @@ static inline unsigned long long get_tot + return total << PAGE_SHIFT; + } + ++/* ++ * Keep the crash kernel below this limit. On 32 bits earlier kernels ++ * would limit the kernel to the low 512 MiB due to mapping restrictions. ++ * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this ++ * limit once kexec-tools are fixed. ++ */ ++#ifdef CONFIG_X86_32 ++# define CRASH_KERNEL_ADDR_MAX (512 << 20) ++#else ++# define CRASH_KERNEL_ADDR_MAX (896 << 20) ++#endif ++ + static void __init reserve_crashkernel(void) + { + unsigned long long total_mem; +@@ -582,23 +593,27 @@ static void __init reserve_crashkernel(v + if (crash_base <= 0) { + const unsigned long long alignment = 16<<20; /* 16M */ + +- crash_base = find_e820_area(alignment, ULONG_MAX, crash_size, +- alignment); +- if (crash_base == -1ULL) { ++ /* ++ * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX ++ */ ++ crash_base = memblock_find_in_range(alignment, ++ CRASH_KERNEL_ADDR_MAX, crash_size, alignment); ++ ++ if (crash_base == MEMBLOCK_ERROR) { + pr_info("crashkernel reservation failed - No suitable area found.\n"); + return; + } + } else { + unsigned long long start; + +- start = find_e820_area(crash_base, ULONG_MAX, crash_size, +- 1<<20); ++ start = memblock_find_in_range(crash_base, ++ crash_base + crash_size, crash_size, 1<<20); + if (start != crash_base) { + pr_info("crashkernel reservation failed - memory is in use.\n"); + return; + } + } +- reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL"); ++ memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); + + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", +@@ -684,93 +699,27 @@ static __init void reserve_ibft_region(v + + #ifndef CONFIG_XEN + if (size) +- reserve_early_overlap_ok(addr, addr + size, "ibft"); ++ memblock_x86_reserve_range(addr, addr + size, "* ibft"); + #endif + } + +-#ifdef CONFIG_X86_RESERVE_LOW_64K +-static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) +-{ +- printk(KERN_NOTICE +- "%s detected: BIOS may corrupt low RAM, working around it.\n", +- d->ident); +- +- e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); +- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); +- +- return 0; +-} +-#endif +- +-/* List of systems that have known low memory corruption BIOS problems */ +-static struct dmi_system_id __initdata bad_bios_dmi_table[] = { +-#ifdef CONFIG_X86_RESERVE_LOW_64K +- { +- .callback = dmi_low_memory_corruption, +- .ident = "AMI BIOS", +- .matches = { +- DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), +- }, +- }, +- { +- .callback = dmi_low_memory_corruption, +- .ident = "Phoenix BIOS", +- .matches = { +- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), +- }, +- }, +- { +- .callback = dmi_low_memory_corruption, +- .ident = "Phoenix/MSC BIOS", +- .matches = { +- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), +- }, +- }, +- /* +- * AMI BIOS with low memory corruption was found on Intel DG45ID and +- * DG45FC boards. +- * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will +- * match only DMI_BOARD_NAME and see if there is more bad products +- * with this vendor. +- */ +- { +- .callback = dmi_low_memory_corruption, +- .ident = "AMI BIOS", +- .matches = { +- DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), +- }, +- }, +- { +- .callback = dmi_low_memory_corruption, +- .ident = "AMI BIOS", +- .matches = { +- DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), +- }, +- }, +- /* +- * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so +- * match on the product name. +- */ +- { +- .callback = dmi_low_memory_corruption, +- .ident = "Phoenix BIOS", +- .matches = { +- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), +- }, +- }, +-#endif +- {} +-}; +- + #ifndef CONFIG_XEN ++static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; ++ + static void __init trim_bios_range(void) + { + /* + * A special case is the first 4Kb of memory; + * This is a BIOS owned area, not kernel ram, but generally + * not listed as such in the E820 table. ++ * ++ * This typically reserves additional memory (64KiB by default) ++ * since some BIOSes are known to corrupt low memory. See the ++ * Kconfig help text for X86_RESERVE_LOW. + */ +- e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED); ++ e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE), ++ E820_RAM, E820_RESERVED); ++ + /* + * special case: Some BIOSen report the PC BIOS + * area (640->1Mb) as ram even though it is not. +@@ -779,8 +728,39 @@ static void __init trim_bios_range(void) + e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); + } ++ ++static int __init parse_reservelow(char *p) ++{ ++ unsigned long long size; ++ ++ if (!p) ++ return -EINVAL; ++ ++ size = memparse(p, &p); ++ ++ if (size < 4096) ++ size = 4096; ++ ++ if (size > 640*1024) ++ size = 640*1024; ++ ++ reserve_low = size; ++ ++ return 0; ++} ++ ++early_param("reservelow", parse_reservelow); + #endif + ++static u64 __init get_max_mapped(void) ++{ ++ u64 end = max_pfn_mapped; ++ ++ end <<= PAGE_SHIFT; ++ ++ return end; ++} ++ + /* + * Determine if we were loaded by an EFI loader. If so, then we have also been + * passed the efi memmap, systab, etc., so we should use these data structures +@@ -798,6 +778,7 @@ void __init setup_arch(char **cmdline_p) + { + int acpi = 0; + int k8 = 0; ++ unsigned long flags; + #ifdef CONFIG_XEN + unsigned int i; + unsigned long p2m_pages; +@@ -820,14 +801,27 @@ void __init setup_arch(char **cmdline_p) + #ifdef CONFIG_X86_32 + memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); + visws_early_detect(); ++ ++#ifndef CONFIG_XEN ++ /* ++ * copy kernel address range established so far and switch ++ * to the proper swapper page table ++ */ ++ clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ initial_page_table + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ ++ load_cr3(swapper_pg_dir); ++ __flush_tlb_all(); ++#endif + #else + printk(KERN_INFO "Command line: %s\n", boot_command_line); + #endif + +- /* VMI may relocate the fixmap; do this before touching ioremap area */ +- vmi_init(); +- +- /* OFW also may relocate the fixmap */ ++ /* ++ * If we have OLPC OFW, we might end up relocating the fixmap due to ++ * reserve_top(), so do this before touching the ioremap area. ++ */ + olpc_ofw_detect(); + + early_trap_init(); +@@ -873,7 +867,7 @@ void __init setup_arch(char **cmdline_p) + #endif + 4)) { + efi_enabled = 1; +- efi_reserve_early(); ++ efi_memblock_x86_reserve_range(); + } + #endif + #else /* CONFIG_XEN */ +@@ -901,6 +895,7 @@ void __init setup_arch(char **cmdline_p) + + x86_init.oem.arch_setup(); + ++ iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; + setup_memory_map(); + parse_setup_data(); + /* update the e820_saved too */ +@@ -953,11 +948,8 @@ void __init setup_arch(char **cmdline_p) + + x86_report_nx(); + +- /* Must be before kernel pagetables are setup */ +- vmi_activate(); +- + /* after early param, so could get panic from serial */ +- reserve_early_setup_data(); ++ memblock_x86_reserve_range_setup_data(); + + if (acpi_mps_check()) { + #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) +@@ -976,12 +968,9 @@ void __init setup_arch(char **cmdline_p) + if (efi_enabled) + efi_init(); + +- if (is_initial_xendomain()) { ++ if (is_initial_xendomain()) + dmi_scan_machine(); + +- dmi_check_system(bad_bios_dmi_table); +- } +- + /* + * VMware detection requires dmi to be available, so this + * needs to be done after dmi_scan_machine, for the BP. +@@ -1016,8 +1005,6 @@ void __init setup_arch(char **cmdline_p) + */ + max_pfn = e820_end_of_ram_pfn(); + +- /* preallocate 4k for mptable mpc */ +- early_reserve_e820_mpc_new(); + /* update e820 for memory not covered by WB MTRRs */ + mtrr_bp_init(); + #ifndef CONFIG_XEN +@@ -1044,20 +1031,8 @@ void __init setup_arch(char **cmdline_p) + max_low_pfn = max_pfn; + + high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; +-#ifndef CONFIG_XEN +- max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; +-#endif + #endif + +-#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION +- setup_bios_corruption_check(); +-#endif +- +- printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", +- max_pfn_mapped< 1) { + xen_send_IPI_allbutself(REBOOT_VECTOR); + +- /* Don't wait longer than a second */ +- wait = USEC_PER_SEC; +- while (num_online_cpus() > 1 && wait--) ++ /* ++ * Don't wait longer than a second if the caller ++ * didn't ask us to wait. ++ */ ++ timeout = USEC_PER_SEC; ++ while (num_online_cpus() > 1 && (wait || timeout--)) + udelay(1); + } + +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -568,6 +568,7 @@ dotraplinkage void __kprobes do_debug(st + if (regs->flags & X86_VM_MASK) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, + error_code, 1); ++ preempt_conditional_cli(regs); + return; + } + +@@ -770,21 +771,10 @@ asmlinkage void math_state_restore(void) + } + EXPORT_SYMBOL_GPL(math_state_restore); + +-#ifndef CONFIG_MATH_EMULATION +-void math_emulate(struct math_emu_info *info) +-{ +- printk(KERN_EMERG +- "math-emulation not enabled and no coprocessor found.\n"); +- printk(KERN_EMERG "killing %s.\n", current->comm); +- force_sig(SIGFPE, current); +- schedule(); +-} +-#endif /* CONFIG_MATH_EMULATION */ +- + dotraplinkage void __kprobes + do_device_not_available(struct pt_regs *regs, long error_code) + { +-#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN) ++#ifdef CONFIG_MATH_EMULATION + if (read_cr0() & X86_CR0_EM) { + struct math_emu_info info = { }; + +@@ -792,12 +782,12 @@ do_device_not_available(struct pt_regs * + + info.regs = regs; + math_emulate(&info); +- } else { +- math_state_restore(); /* interrupts still off */ +- conditional_sti(regs); ++ return; + } +-#else +- math_state_restore(); ++#endif ++ math_state_restore(); /* interrupts still off */ ++#ifdef CONFIG_X86_32 ++ conditional_sti(regs); + #endif + } + +@@ -880,20 +870,6 @@ void __init trap_init(void) + if (ret) + printk("HYPERVISOR_set_trap_table failed (%d)\n", ret); + +-#ifdef CONFIG_X86_32 +- if (cpu_has_fxsr) { +- printk(KERN_INFO "Enabling fast FPU save and restore... "); +- set_in_cr4(X86_CR4_OSFXSR); +- printk("done.\n"); +- } +- if (cpu_has_xmm) { +- printk(KERN_INFO +- "Enabling unmasked SIMD FPU exception support... "); +- set_in_cr4(X86_CR4_OSXMMEXCPT); +- printk("done.\n"); +- } +- +-#endif + /* + * Should be a barrier for any external CPU state: + */ +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -11,6 +11,7 @@ + #include /* __kprobes, ... */ + #include /* kmmio_handler, ... */ + #include /* perf_sw_event */ ++#include /* hstate_index_to_shift */ + + #include /* dotraplinkage, ... */ + #include /* pgd_*(), ... */ +@@ -160,15 +161,20 @@ is_prefetch(struct pt_regs *regs, unsign + + static void + force_sig_info_fault(int si_signo, int si_code, unsigned long address, +- struct task_struct *tsk) ++ struct task_struct *tsk, int fault) + { ++ unsigned lsb = 0; + siginfo_t info; + + info.si_signo = si_signo; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void __user *)address; +- info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0; ++ if (fault & VM_FAULT_HWPOISON_LARGE) ++ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); ++ if (fault & VM_FAULT_HWPOISON) ++ lsb = PAGE_SHIFT; ++ info.si_addr_lsb = lsb; + + force_sig_info(si_signo, &info, tsk); + } +@@ -176,9 +182,6 @@ force_sig_info_fault(int si_signo, int s + DEFINE_SPINLOCK(pgd_lock); + LIST_HEAD(pgd_list); + +-#define pgd_page_table(what, pg) \ +- spin_##what(&((struct mm_struct *)(pg)->private)->page_table_lock) +- + #ifdef CONFIG_X86_32 + static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) + { +@@ -240,13 +243,16 @@ void vmalloc_sync_all(void) + + spin_lock_irqsave(&pgd_lock, flags); + list_for_each_entry(page, &pgd_list, lru) { +- pmd_t *pmd; ++ spinlock_t *pgt_lock; ++ pmd_t *ret; ++ ++ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + +- pgd_page_table(lock, page); +- pmd = vmalloc_sync_one(page_address(page), address); +- pgd_page_table(unlock, page); ++ spin_lock(pgt_lock); ++ ret = vmalloc_sync_one(page_address(page), address); ++ spin_unlock(pgt_lock); + +- if (!pmd) ++ if (!ret) + break; + } + spin_unlock_irqrestore(&pgd_lock, flags); +@@ -268,6 +274,8 @@ static noinline __kprobes int vmalloc_fa + if (!(address >= VMALLOC_START && address < VMALLOC_END)) + return -1; + ++ WARN_ON_ONCE(in_nmi()); ++ + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. +@@ -343,31 +351,7 @@ out: + + void vmalloc_sync_all(void) + { +- unsigned long address; +- +- for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; +- address += PGDIR_SIZE) { +- +- const pgd_t *pgd_ref = pgd_offset_k(address); +- unsigned long flags; +- struct page *page; +- +- if (pgd_none(*pgd_ref)) +- continue; +- +- spin_lock_irqsave(&pgd_lock, flags); +- list_for_each_entry(page, &pgd_list, lru) { +- pgd_t *pgd; +- pgd = (pgd_t *)page_address(page) + pgd_index(address); +- pgd_page_table(lock, page); +- if (pgd_none(*pgd)) +- set_pgd(pgd, *pgd_ref); +- else +- BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); +- pgd_page_table(unlock, page); +- } +- spin_unlock_irqrestore(&pgd_lock, flags); +- } ++ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); + } + + /* +@@ -388,6 +372,8 @@ static noinline __kprobes int vmalloc_fa + if (!(address >= VMALLOC_START && address < VMALLOC_END)) + return -1; + ++ WARN_ON_ONCE(in_nmi()); ++ + /* + * Copy kernel mappings over when needed. This can also + * happen within a race in page table update. In the later +@@ -750,7 +736,7 @@ __bad_area_nosemaphore(struct pt_regs *r + tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.trap_no = 14; + +- force_sig_info_fault(SIGSEGV, si_code, address, tsk); ++ force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); + + return; + } +@@ -835,14 +821,14 @@ do_sigbus(struct pt_regs *regs, unsigned + tsk->thread.trap_no = 14; + + #ifdef CONFIG_MEMORY_FAILURE +- if (fault & VM_FAULT_HWPOISON) { ++ if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + printk(KERN_ERR + "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", + tsk->comm, tsk->pid, address); + code = BUS_MCEERR_AR; + } + #endif +- force_sig_info_fault(SIGBUS, code, address, tsk); ++ force_sig_info_fault(SIGBUS, code, address, tsk, fault); + } + + static noinline void +@@ -852,7 +838,8 @@ mm_fault_error(struct pt_regs *regs, uns + if (fault & VM_FAULT_OOM) { + out_of_memory(regs, error_code, address); + } else { +- if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON)) ++ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| ++ VM_FAULT_HWPOISON_LARGE)) + do_sigbus(regs, error_code, address, fault); + else + BUG(); +@@ -913,8 +900,14 @@ spurious_fault(unsigned long error_code, + if (pmd_large(*pmd)) + return spurious_fault_check(error_code, (pte_t *) pmd); + ++ /* ++ * Note: don't use pte_present() here, since it returns true ++ * if the _PAGE_PROTNONE bit is set. However, this aliases the ++ * _PAGE_GLOBAL bit, which for kernel pages give false positives ++ * when CONFIG_DEBUG_PAGEALLOC is used. ++ */ + pte = pte_offset_kernel(pmd, address); +- if (!pte_present(*pte)) ++ if (!(pte_flags(*pte) & _PAGE_PRESENT)) + return 0; + + ret = spurious_fault_check(error_code, pte); +@@ -934,9 +927,9 @@ spurious_fault(unsigned long error_code, + int show_unhandled_signals = 1; + + static inline int +-access_error(unsigned long error_code, int write, struct vm_area_struct *vma) ++access_error(unsigned long error_code, struct vm_area_struct *vma) + { +- if (write) { ++ if (error_code & PF_WRITE) { + /* write, present and write, not present: */ + if (unlikely(!(vma->vm_flags & VM_WRITE))) + return 1; +@@ -971,8 +964,10 @@ do_page_fault(struct pt_regs *regs, unsi + struct task_struct *tsk; + unsigned long address; + struct mm_struct *mm; +- int write; + int fault; ++ int write = error_code & PF_WRITE; ++ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | ++ (write ? FAULT_FLAG_WRITE : 0); + + /* Set the "privileged fault" bit to something sane. */ + if (user_mode_vm(regs)) +@@ -1100,6 +1095,7 @@ do_page_fault(struct pt_regs *regs, unsi + bad_area_nosemaphore(regs, error_code, address); + return; + } ++retry: + down_read(&mm->mmap_sem); + } else { + /* +@@ -1143,9 +1139,7 @@ do_page_fault(struct pt_regs *regs, unsi + * we can handle it.. + */ + good_area: +- write = error_code & PF_WRITE; +- +- if (unlikely(access_error(error_code, write, vma))) { ++ if (unlikely(access_error(error_code, vma))) { + bad_area_access_error(regs, error_code, address); + return; + } +@@ -1155,21 +1149,34 @@ good_area: + * make sure we exit gracefully rather than endlessly redo + * the fault: + */ +- fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); ++ fault = handle_mm_fault(mm, vma, address, flags); + + if (unlikely(fault & VM_FAULT_ERROR)) { + mm_fault_error(regs, error_code, address, fault); + return; + } + +- if (fault & VM_FAULT_MAJOR) { +- tsk->maj_flt++; +- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, +- regs, address); +- } else { +- tsk->min_flt++; +- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, +- regs, address); ++ /* ++ * Major/minor page fault accounting is only done on the ++ * initial attempt. If we go through a retry, it is extremely ++ * likely that the page will be found in page cache at that point. ++ */ ++ if (flags & FAULT_FLAG_ALLOW_RETRY) { ++ if (fault & VM_FAULT_MAJOR) { ++ tsk->maj_flt++; ++ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, ++ regs, address); ++ } else { ++ tsk->min_flt++; ++ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, ++ regs, address); ++ } ++ if (fault & VM_FAULT_RETRY) { ++ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk ++ * of starvation. */ ++ flags &= ~FAULT_FLAG_ALLOW_RETRY; ++ goto retry; ++ } + } + + check_v8086_mode(regs, address, tsk); +--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -9,6 +9,7 @@ void *kmap(struct page *page) + return page_address(page); + return kmap_high(page); + } ++EXPORT_SYMBOL(kmap); + + void kunmap(struct page *page) + { +@@ -18,6 +19,7 @@ void kunmap(struct page *page) + return; + kunmap_high(page); + } ++EXPORT_SYMBOL(kunmap); + + /* + * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because +@@ -27,10 +29,10 @@ void kunmap(struct page *page) + * However when holding an atomic kmap it is not legal to sleep, so atomic + * kmaps are appropriate for short, tight code paths only. + */ +-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ++void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { +- enum fixed_addresses idx; + unsigned long vaddr; ++ int idx, type; + + /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + pagefault_disable(); +@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page + if (!PageHighMem(page)) + return page_address(page); + +- debug_kmap_atomic(type); +- ++ type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +@@ -47,44 +48,57 @@ void *kmap_atomic_prot(struct page *page + + return (void *)vaddr; + } ++EXPORT_SYMBOL(kmap_atomic_prot); + +-void *kmap_atomic(struct page *page, enum km_type type) ++void *__kmap_atomic(struct page *page) ++{ ++ return kmap_atomic_prot(page, kmap_prot); ++} ++EXPORT_SYMBOL(__kmap_atomic); ++ ++/* ++ * This is the same as kmap_atomic() but can map memory that doesn't ++ * have a struct page associated with it. ++ */ ++void *kmap_atomic_pfn(unsigned long pfn) + { +- return kmap_atomic_prot(page, type, kmap_prot); ++ return kmap_atomic_prot_pfn(pfn, kmap_prot); + } ++EXPORT_SYMBOL_GPL(kmap_atomic_pfn); + +-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) ++void __kunmap_atomic(void *kvaddr) + { + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; +- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + +- /* +- * Force other mappings to Oops if they'll try to access this pte +- * without first remap it. Keeping stale mappings around is a bad idea +- * also, in case the page changes cacheability attributes or becomes +- * a protected page in a hypervisor. +- */ +- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) ++ if (vaddr >= __fix_to_virt(FIX_KMAP_END) && ++ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { ++ int idx, type; ++ ++ type = kmap_atomic_idx(); ++ idx = type + KM_TYPE_NR * smp_processor_id(); ++ ++#ifdef CONFIG_DEBUG_HIGHMEM ++ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++#endif ++ /* ++ * Force other mappings to Oops if they'll try to access this ++ * pte without first remap it. Keeping stale mappings around ++ * is a bad idea also, in case the page changes cacheability ++ * attributes or becomes a protected page in a hypervisor. ++ */ + kpte_clear_flush(kmap_pte-idx, vaddr); +- else { ++ kmap_atomic_idx_pop(); ++ } + #ifdef CONFIG_DEBUG_HIGHMEM ++ else { + BUG_ON(vaddr < PAGE_OFFSET); + BUG_ON(vaddr >= (unsigned long)high_memory); +-#endif + } ++#endif + + pagefault_enable(); + } +- +-/* +- * This is the same as kmap_atomic() but can map memory that doesn't +- * have a struct page associated with it. +- */ +-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +-{ +- return kmap_atomic_prot_pfn(pfn, type, kmap_prot); +-} +-EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ ++EXPORT_SYMBOL(__kunmap_atomic); + + struct page *kmap_atomic_to_page(void *ptr) + { +@@ -98,6 +112,7 @@ struct page *kmap_atomic_to_page(void *p + pte = kmap_pte - (idx - FIX_KMAP_BEGIN); + return pte_page(*pte); + } ++EXPORT_SYMBOL(kmap_atomic_to_page); + + void clear_highpage(struct page *page) + { +@@ -117,6 +132,7 @@ void clear_highpage(struct page *page) + clear_page(kaddr); + kunmap_atomic(kaddr, KM_USER0); + } ++EXPORT_SYMBOL(clear_highpage); + + void copy_highpage(struct page *to, struct page *from) + { +@@ -143,14 +159,6 @@ void copy_highpage(struct page *to, stru + kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vto, KM_USER1); + } +- +-EXPORT_SYMBOL(kmap); +-EXPORT_SYMBOL(kunmap); +-EXPORT_SYMBOL(kmap_atomic); +-EXPORT_SYMBOL(kunmap_atomic_notypecheck); +-EXPORT_SYMBOL(kmap_atomic_prot); +-EXPORT_SYMBOL(kmap_atomic_to_page); +-EXPORT_SYMBOL(clear_highpage); + EXPORT_SYMBOL(copy_highpage); + + void __init set_highmem_pages_init(void) +--- head-2011-03-17.orig/arch/x86/mm/init-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -2,6 +2,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -340,7 +341,7 @@ unsigned long __init_refok init_memory_m + __flush_tlb_all(); + + if (!after_bootmem && e820_table_top > e820_table_start) +- reserve_early(e820_table_start << PAGE_SHIFT, ++ memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, + e820_table_top << PAGE_SHIFT, "PGTABLE"); + + if (!after_bootmem) +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -70,7 +71,7 @@ static __init void *alloc_low_page(void) + panic("alloc_low_page: ran out of memory"); + + adr = __va(pfn * PAGE_SIZE); +- memset(adr, 0, PAGE_SIZE); ++ clear_page(adr); + return adr; + } + +@@ -458,49 +459,28 @@ static void __init add_one_highpage_init + totalhigh_pages++; + } + +-struct add_highpages_data { +- unsigned long start_pfn; +- unsigned long end_pfn; +-}; +- +-static int __init add_highpages_work_fn(unsigned long start_pfn, +- unsigned long end_pfn, void *datax) +-{ +- int node_pfn; +- struct page *page; +- unsigned long final_start_pfn, final_end_pfn; +- struct add_highpages_data *data; +- +- data = (struct add_highpages_data *)datax; +- +- final_start_pfn = max(start_pfn, data->start_pfn); +- final_end_pfn = min(end_pfn, data->end_pfn); +- if (final_start_pfn >= final_end_pfn) +- return 0; +- +- for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; +- node_pfn++) { +- if (!pfn_valid(node_pfn)) +- continue; +- page = pfn_to_page(node_pfn); +- add_one_highpage_init(page); +- } +- +- return 0; +- +-} +- +-void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, +- unsigned long end_pfn) ++void __init add_highpages_with_active_regions(int nid, ++ unsigned long start_pfn, unsigned long end_pfn) + { +- struct add_highpages_data data; ++ struct range *range; ++ int nr_range; ++ int i; + +- data.start_pfn = start_pfn; +- data.end_pfn = end_pfn; ++ nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); + +- work_with_active_regions(nid, add_highpages_work_fn, &data); ++ for (i = 0; i < nr_range; i++) { ++ struct page *page; ++ int node_pfn; ++ ++ for (node_pfn = range[i].start; node_pfn < range[i].end; ++ node_pfn++) { ++ if (!pfn_valid(node_pfn)) ++ continue; ++ page = pfn_to_page(node_pfn); ++ add_one_highpage_init(page); ++ } ++ } + } +- + #else + static inline void permanent_kmaps_init(pgd_t *pgd_base) + { +@@ -550,48 +530,6 @@ static void __init pagetable_init(void) + permanent_kmaps_init(pgd_base); + } + +-#if defined(CONFIG_ACPI_SLEEP) && !defined(CONFIG_XEN) +-/* +- * ACPI suspend needs this for resume, because things like the intel-agp +- * driver might have split up a kernel 4MB mapping. +- */ +-char swsusp_pg_dir[PAGE_SIZE] +- __attribute__ ((aligned(PAGE_SIZE))); +- +-static inline void save_pg_dir(void) +-{ +- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); +-} +-#else /* !CONFIG_ACPI_SLEEP */ +-static inline void save_pg_dir(void) +-{ +-} +-#endif /* !CONFIG_ACPI_SLEEP */ +- +-void zap_low_mappings(bool early) +-{ +- int i; +- +- /* +- * Zap initial low-memory mappings. +- * +- * Note that "pgd_clear()" doesn't do it for +- * us, because pgd_clear() is a no-op on i386. +- */ +- for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) { +-#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN) +- set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); +-#else +- set_pgd(swapper_pg_dir+i, __pgd(0)); +-#endif +- } +- +- if (early) +- __flush_tlb(); +- else +- flush_tlb_all(); +-} +- + pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL); + EXPORT_SYMBOL_GPL(__supported_pte_mask); + +@@ -714,14 +652,14 @@ void __init initmem_init(unsigned long s + highstart_pfn = highend_pfn = max_pfn; + if (max_pfn > max_low_pfn) + highstart_pfn = max_low_pfn; +- e820_register_active_regions(0, 0, highend_pfn); ++ memblock_x86_register_active_regions(0, 0, highend_pfn); + sparse_memory_present_with_active_regions(0); + printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", + pages_to_mb(highend_pfn - highstart_pfn)); + num_physpages = highend_pfn; + high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; + #else +- e820_register_active_regions(0, 0, max_low_pfn); ++ memblock_x86_register_active_regions(0, 0, max_low_pfn); + sparse_memory_present_with_active_regions(0); + num_physpages = max_low_pfn; + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; +@@ -752,75 +690,18 @@ static void __init zone_sizes_init(void) + free_area_init_nodes(max_zone_pfns); + } + +-#ifndef CONFIG_NO_BOOTMEM +-static unsigned long __init setup_node_bootmem(int nodeid, +- unsigned long start_pfn, +- unsigned long end_pfn, +- unsigned long bootmap) +-{ +- unsigned long bootmap_size; +- +- /* don't touch min_low_pfn */ +- bootmap_size = init_bootmem_node(NODE_DATA(nodeid), +- bootmap >> PAGE_SHIFT, +- start_pfn, end_pfn); +- printk(KERN_INFO " node %d low ram: %08lx - %08lx\n", +- nodeid, start_pfn<nr_pages); +- +- /* +- * Initialize the boot-time allocator (with low memory only): +- */ +- bootmap_size = bootmem_bootmap_pages(end_xen_pfn)<nr_pages)< xen_start_info->nr_pages) +- reserve_early(xen_start_info->nr_pages << PAGE_SHIFT, +- max_low_pfn << PAGE_SHIFT, "BALLOON"); ++ memblock_x86_reserve_range(xen_start_info->nr_pages << PAGE_SHIFT, ++ max_low_pfn << PAGE_SHIFT, "BALLOON"); + #endif + + printk(KERN_INFO " mapped low ram: 0 - %08lx\n", + max_pfn_mapped< end_xen_pfn) +- continue; +- if (end_pfn > end_xen_pfn) +- end_pfn = end_xen_pfn; +-#else +- start_pfn = 0; +- end_pfn = end_xen_pfn; +-#endif +- bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn, +- bootmap); +- } +-#endif +- + after_bootmem = 1; + } + +@@ -870,8 +751,8 @@ unsigned long __init extend_init_mapping + } + + if (start_pfn > start) +- reserve_early(start << PAGE_SHIFT, +- start_pfn << PAGE_SHIFT, "INITMAP"); ++ memblock_x86_reserve_range(start << PAGE_SHIFT, ++ start_pfn << PAGE_SHIFT, "INITMAP"); + + return start_pfn; + } +@@ -1026,9 +907,6 @@ void __init mem_init(void) + if (boot_cpu_data.wp_works_ok < 0) + test_wp_bit(); + +- save_pg_dir(); +- zap_low_mappings(true); +- + SetPagePinned(virt_to_page(init_mm.pgd)); + } + +@@ -1139,8 +1017,3 @@ void mark_rodata_ro(void) + } + #endif + +-int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, +- int flags) +-{ +- return reserve_bootmem(phys, len, flags); +-} +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -54,7 +55,6 @@ + #include + #include + #include +-#include + + #include + +@@ -164,6 +164,43 @@ static int __init nonx32_setup(char *str + __setup("noexec32=", nonx32_setup); + + /* ++ * When memory was added/removed make sure all the processes MM have ++ * suitable PGD entries in the local PGD level page. ++ */ ++void sync_global_pgds(unsigned long start, unsigned long end) ++{ ++ unsigned long address; ++ ++ for (address = start; address <= end; address += PGDIR_SIZE) { ++ const pgd_t *pgd_ref = pgd_offset_k(address); ++ unsigned long flags; ++ struct page *page; ++ ++ if (pgd_none(*pgd_ref)) ++ continue; ++ ++ spin_lock_irqsave(&pgd_lock, flags); ++ list_for_each_entry(page, &pgd_list, lru) { ++ pgd_t *pgd; ++ spinlock_t *pgt_lock; ++ ++ pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; ++ spin_lock(pgt_lock); ++ ++ if (pgd_none(*pgd)) ++ set_pgd(pgd, *pgd_ref); ++ else ++ BUG_ON(pgd_page_vaddr(*pgd) ++ != pgd_page_vaddr(*pgd_ref)); ++ ++ spin_unlock(pgt_lock); ++ } ++ spin_unlock_irqrestore(&pgd_lock, flags); ++ } ++} ++ ++/* + * NOTE: This function is marked __ref because it calls __init function + * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. + */ +@@ -370,7 +407,7 @@ static __ref void *alloc_low_page(unsign + panic("alloc_low_page: ran out of memory"); + + adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); +- memset(adr, 0, PAGE_SIZE); ++ clear_page(adr); + *phys = pfn * PAGE_SIZE; + return adr; + } +@@ -772,11 +809,13 @@ kernel_physical_mapping_init(unsigned lo + unsigned long end, + unsigned long page_size_mask) + { +- ++ bool pgd_changed = false; + unsigned long next, last_map_addr = end; ++ unsigned long addr; + + start = (unsigned long)__va(start); + end = (unsigned long)__va(end); ++ addr = start; + + for (; start < end; start = next) { + pgd_t *pgd = pgd_offset_k(start); +@@ -808,9 +847,13 @@ kernel_physical_mapping_init(unsigned lo + spin_lock(&init_mm.page_table_lock); + pgd_populate(&init_mm, pgd, __va(pud_phys)); + spin_unlock(&init_mm.page_table_lock); ++ pgd_changed = true; + } + } + ++ if (pgd_changed) ++ sync_global_pgds(addr, end); ++ + return last_map_addr; + } + +@@ -818,31 +861,11 @@ kernel_physical_mapping_init(unsigned lo + void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, + int acpi, int k8) + { +-#ifndef CONFIG_NO_BOOTMEM +- unsigned long bootmap_size, bootmap; +- +- e820_register_active_regions(0, start_pfn, end_pfn); +-#ifdef CONFIG_XEN +- if (end_pfn > xen_start_info->nr_pages) +- end_pfn = xen_start_info->nr_pages; +-#endif +- bootmap_size = bootmem_bootmap_pages(end_pfn)<> PAGE_SHIFT, +- 0, end_pfn); +- free_bootmem_with_active_regions(0, end_pfn); +-#else +- e820_register_active_regions(0, start_pfn, end_pfn); ++ memblock_x86_register_active_regions(0, start_pfn, end_pfn); + #ifdef CONFIG_XEN + if (end_pfn > xen_start_info->nr_pages) +- reserve_early(xen_start_info->nr_pages << PAGE_SHIFT, +- end_pfn << PAGE_SHIFT, "BALLOON"); +-#endif ++ memblock_x86_reserve_range(xen_start_info->nr_pages << PAGE_SHIFT, ++ end_pfn << PAGE_SHIFT, "BALLOON"); + #endif + } + #endif +@@ -1062,54 +1085,6 @@ void mark_rodata_ro(void) + + #endif + +-int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, +- int flags) +-{ +-#ifdef CONFIG_NUMA +- int nid, next_nid; +- int ret; +-#endif +- unsigned long pfn = phys >> PAGE_SHIFT; +- +- if (pfn >= max_pfn) { +- /* +- * This can happen with kdump kernels when accessing +- * firmware tables: +- */ +- if (pfn < max_pfn_mapped) +- return -EFAULT; +- +- printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n", +- phys, len); +- return -EFAULT; +- } +- +- /* Should check here against the e820 map to avoid double free */ +-#ifdef CONFIG_NUMA +- nid = phys_to_nid(phys); +- next_nid = phys_to_nid(phys + len - 1); +- if (nid == next_nid) +- ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags); +- else +- ret = reserve_bootmem(phys, len, flags); +- +- if (ret != 0) +- return ret; +- +-#else +- reserve_bootmem(phys, len, flags); +-#endif +- +-#ifndef CONFIG_XEN +- if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { +- dma_reserve += len / PAGE_SIZE; +- set_dma_reserve(dma_reserve); +- } +-#endif +- +- return 0; +-} +- + int kern_addr_valid(unsigned long addr) + { + unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; +@@ -1281,6 +1256,7 @@ vmemmap_populate(struct page *start_page + } + + } ++ sync_global_pgds((unsigned long)start_page, end); + return 0; + } + +--- head-2011-03-17.orig/arch/x86/mm/iomap_32-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/iomap_32-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -49,21 +49,20 @@ int iomap_create_wc(resource_size_t base + } + EXPORT_SYMBOL_GPL(iomap_create_wc); + +-void +-iomap_free(resource_size_t base, unsigned long size) ++void iomap_free(resource_size_t base, unsigned long size) + { + io_free_memtype(base, base + size); + } + EXPORT_SYMBOL_GPL(iomap_free); + +-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) ++void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { +- enum fixed_addresses idx; + unsigned long vaddr; ++ int idx, type; + + pagefault_disable(); + +- debug_kmap_atomic(type); ++ type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte_at(&init_mm, vaddr, kmap_pte - idx, pfn_pte(pfn, prot)); +@@ -73,10 +72,10 @@ void *kmap_atomic_prot_pfn(unsigned long + } + + /* +- * Map 'mfn' using fixed map 'type' and protections 'prot' ++ * Map 'mfn' using protections 'prot' + */ + void __iomem * +-iomap_atomic_prot_pfn(unsigned long mfn, enum km_type type, pgprot_t prot) ++iomap_atomic_prot_pfn(unsigned long mfn, pgprot_t prot) + { + /* + * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. +@@ -88,24 +87,34 @@ iomap_atomic_prot_pfn(unsigned long mfn, + prot = PAGE_KERNEL_UC_MINUS; + + pgprot_val(prot) |= _PAGE_IOMAP; +- return (void __force __iomem *) kmap_atomic_prot_pfn(mfn, type, prot); ++ return (void __force __iomem *) kmap_atomic_prot_pfn(mfn, prot); + } + EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); + + void +-iounmap_atomic(void __iomem *kvaddr, enum km_type type) ++iounmap_atomic(void __iomem *kvaddr) + { + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; +- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + +- /* +- * Force other mappings to Oops if they'll try to access this pte +- * without first remap it. Keeping stale mappings around is a bad idea +- * also, in case the page changes cacheability attributes or becomes +- * a protected page in a hypervisor. +- */ +- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) ++ if (vaddr >= __fix_to_virt(FIX_KMAP_END) && ++ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { ++ int idx, type; ++ ++ type = kmap_atomic_idx(); ++ idx = type + KM_TYPE_NR * smp_processor_id(); ++ ++#ifdef CONFIG_DEBUG_HIGHMEM ++ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++#endif ++ /* ++ * Force other mappings to Oops if they'll try to access this ++ * pte without first remap it. Keeping stale mappings around ++ * is a bad idea also, in case the page changes cacheability ++ * attributes or becomes a protected page in a hypervisor. ++ */ + kpte_clear_flush(kmap_pte-idx, vaddr); ++ kmap_atomic_idx_pop(); ++ } + + pagefault_enable(); + } +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:42:02.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:42:09.000000000 +0100 +@@ -532,6 +532,11 @@ static inline pte_t * __init early_iorem + return &bm_pte[pte_index(addr)]; + } + ++bool __init is_early_ioremap_ptep(pte_t *ptep) ++{ ++ return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; ++} ++ + static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; + + void __init early_ioremap_init(void) +--- head-2011-03-17.orig/arch/x86/mm/memblock.c 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/memblock.c 2011-02-01 15:09:47.000000000 +0100 +@@ -293,6 +293,11 @@ static int __init memblock_x86_find_acti + { + u64 align = PAGE_SIZE; + ++#ifdef CONFIG_XEN ++ if (last_pfn > xen_start_info->nr_pages) ++ last_pfn = xen_start_info->nr_pages; ++#endif ++ + *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT; + *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT; + +@@ -325,6 +330,11 @@ void __init memblock_x86_register_active + if (memblock_x86_find_active_region(r, start_pfn, last_pfn, + &ei_startpfn, &ei_endpfn)) + add_active_range(nid, ei_startpfn, ei_endpfn); ++ ++#ifdef CONFIG_XEN ++ BUG_ON(nid); ++ add_active_range(nid, last_pfn, last_pfn); ++#endif + } + + /* +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -428,7 +428,19 @@ static inline void pgd_list_del(pgd_t *p + #define UNSHARED_PTRS_PER_PGD \ + (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) + +-static void pgd_ctor(pgd_t *pgd) ++ ++static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) ++{ ++ BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); ++ virt_to_page(pgd)->index = (pgoff_t)mm; ++} ++ ++struct mm_struct *pgd_page_get_mm(struct page *page) ++{ ++ return (struct mm_struct *)page->index; ++} ++ ++static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) + { + pgd_test_and_unpin(pgd); + +@@ -441,10 +453,6 @@ static void pgd_ctor(pgd_t *pgd) + clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +- paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, +- __pa(swapper_pg_dir) >> PAGE_SHIFT, +- KERNEL_PGD_BOUNDARY, +- KERNEL_PGD_PTRS); + } + + #ifdef CONFIG_X86_64 +@@ -454,8 +462,10 @@ static void pgd_ctor(pgd_t *pgd) + #endif + + /* list required to sync kernel mapping updates */ +- if (!SHARED_KERNEL_PMD) ++ if (!SHARED_KERNEL_PMD) { ++ pgd_set_mm(pgd, mm); + pgd_list_add(pgd); ++ } + } + + static void pgd_dtor(pgd_t *pgd) +@@ -662,12 +672,9 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + } + #endif + +- pgd_ctor(pgd); ++ pgd_ctor(mm, pgd); + pgd_prepopulate_pmd(mm, pgd, pmds); + +- /* Store a back link for vmalloc_sync_all(). */ +- set_page_private(virt_to_page(pgd), (unsigned long)mm); +- + spin_unlock_irqrestore(&pgd_lock, flags); + + return pgd; +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -590,27 +590,28 @@ static __init int intel_router_probe(str + case PCI_DEVICE_ID_INTEL_ICH9_3: + case PCI_DEVICE_ID_INTEL_ICH9_4: + case PCI_DEVICE_ID_INTEL_ICH9_5: +- case PCI_DEVICE_ID_INTEL_TOLAPAI_0: ++ case PCI_DEVICE_ID_INTEL_EP80579_0: + case PCI_DEVICE_ID_INTEL_ICH10_0: + case PCI_DEVICE_ID_INTEL_ICH10_1: + case PCI_DEVICE_ID_INTEL_ICH10_2: + case PCI_DEVICE_ID_INTEL_ICH10_3: ++ case PCI_DEVICE_ID_INTEL_PATSBURG_LPC: + r->name = "PIIX/ICH"; + r->get = pirq_piix_get; + r->set = pirq_piix_set; + return 1; + } + +- if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && +- (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) { ++ if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) && ++ (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) { + r->name = "PIIX/ICH"; + r->get = pirq_piix_get; + r->set = pirq_piix_set; + return 1; + } + +- if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) && +- (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) { ++ if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) && ++ (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) { + r->name = "PIIX/ICH"; + r->get = pirq_piix_get; + r->set = pirq_piix_set; +--- head-2011-03-17.orig/arch/x86/pci/pcifront.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/pcifront.c 2011-02-01 15:09:47.000000000 +0100 +@@ -16,7 +16,7 @@ static int pcifront_enable_irq(struct pc + { + u8 irq; + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); +- if (!irq_to_desc_alloc_node(irq, numa_node_id())) ++ if (!alloc_irq_and_cfg_at(irq, numa_node_id())) + return -ENOMEM; + evtchn_register_pirq(irq); + dev->irq = irq; +--- head-2011-03-17.orig/arch/x86/xen/Kconfig 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/xen/Kconfig 2011-02-01 15:09:47.000000000 +0100 +@@ -15,13 +15,16 @@ config PARAVIRT_XEN + + config XEN_DOM0 + def_bool y +- depends on XEN && PCI_XEN && SWIOTLB_XEN ++ depends on PARAVIRT_XEN && PCI_XEN && SWIOTLB_XEN + depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI + + # Dummy symbol since people have come to rely on the PRIVILEGED_GUEST + # name in tools. +-config XEN_PRIVILEGED_GUEST +- def_bool XEN_DOM0 ++# This doesn't work together with our identical symbol in drivers/xen/Kconfig ++# (produces a recursive dependency), and renaming it is pointless given that ++# it's meant as a compatibility thing. ++#config XEN_PRIVILEGED_GUEST ++# def_bool XEN_DOM0 + + config XEN_PVHVM + def_bool y +--- head-2011-03-17.orig/drivers/pci/Kconfig 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/drivers/pci/Kconfig 2011-02-01 15:09:47.000000000 +0100 +@@ -61,9 +61,9 @@ config PCI_STUB + + When in doubt, say N. + +-config XEN_PCIDEV_FRONTEND ++config PARAVIRT_XEN_PCIDEV_FRONTEND + tristate "Xen PCI Frontend" +- depends on PCI && X86 && XEN ++ depends on PCI && X86 && PARAVIRT_XEN + select HOTPLUG + select PCI_XEN + select XEN_XENBUS_FRONTEND +@@ -72,9 +72,18 @@ config XEN_PCIDEV_FRONTEND + The PCI device frontend driver allows the kernel to import arbitrary + PCI devices from a PCI backend to support PCI driver domains. + ++config XEN_PCIDEV_FRONTEND ++ def_bool y ++ prompt "Xen PCI Frontend" if X86_64 ++ depends on PCI && XEN && (PCI_GOXEN_FE || PCI_GOANY || X86_64) ++ select HOTPLUG ++ help ++ The PCI device frontend driver allows the kernel to import arbitrary ++ PCI devices from a PCI backend to support PCI driver domains. ++ + config XEN_PCIDEV_FE_DEBUG + bool "Xen PCI Frontend debugging" +- depends on XEN_PCIDEV_FRONTEND && PCI_DEBUG ++ depends on XEN_PCIDEV_FRONTEND || (PARAVIRT_XEN_PCIDEV_FRONTEND && PCI_DEBUG) + help + Say Y here if you want the Xen PCI frontend to produce a bunch of debug + messages to the system log. Select this if you are having a +--- head-2011-03-17.orig/drivers/pci/Makefile 2011-01-31 14:32:40.000000000 +0100 ++++ head-2011-03-17/drivers/pci/Makefile 2011-02-01 15:09:47.000000000 +0100 +@@ -71,6 +71,6 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o + + obj-$(CONFIG_PCI_STUB) += pci-stub.o + +-obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o ++obj-$(CONFIG_PARAVIRT_XEN_PCIDEV_FRONTEND) += xen-pcifront.o + + ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG +--- head-2011-03-17.orig/drivers/pci/xen-pcifront.c 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/drivers/pci/xen-pcifront.c 2011-02-01 15:09:47.000000000 +0100 +@@ -1118,7 +1118,6 @@ static const struct xenbus_device_id xen + + static struct xenbus_driver xenbus_pcifront_driver = { + .name = "pcifront", +- .owner = THIS_MODULE, + .ids = xenpci_ids, + .probe = pcifront_xenbus_probe, + .remove = pcifront_xenbus_remove, +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-02 15:37:53.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-01 15:09:47.000000000 +0100 +@@ -20,10 +20,6 @@ config XEN_PRIVILEGED_GUEST + config XEN_UNPRIVILEGED_GUEST + def_bool !XEN_PRIVILEGED_GUEST + select PM +- select PM_SLEEP +- select PM_SLEEP_SMP if SMP +- select PM_RUNTIME if PCI +- select PM_OPS if PCI + select SUSPEND + + config XEN_PRIVCMD +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-24 15:05:06.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-02-24 15:17:40.000000000 +0100 +@@ -1,6 +1,8 @@ + obj-$(CONFIG_PARAVIRT_XEN) += grant-table.o features.o events.o manage.o ++xen-biomerge-$(CONFIG_PARAVIRT_XEN) := biomerge.o + xen-hotplug-$(CONFIG_PARAVIRT_XEN) := cpu_hotplug.o + xen-balloon-$(CONFIG_PARAVIRT_XEN) := balloon.o ++xen-evtchn-name-$(CONFIG_PARAVIRT_XEN) := xen-evtchn + + xen-balloon-$(CONFIG_XEN) := balloon/ + obj-$(CONFIG_XEN) += core/ +@@ -9,6 +11,7 @@ obj-y += xenbus/ + obj-$(CONFIG_XEN) += char/ + + xen-backend-$(CONFIG_XEN_BACKEND) := util.o ++xen-evtchn-name-$(CONFIG_XEN) := evtchn + + nostackp := $(call cc-option, -fno-stack-protector) + ifeq ($(CONFIG_PARAVIRT_XEN),y) +@@ -16,14 +19,19 @@ CFLAGS_features.o := $(nostackp) + endif + + obj-$(CONFIG_XEN) += features.o $(xen-backend-y) $(xen-backend-m) ++obj-$(CONFIG_BLOCK) += $(xen-biomerge-y) + obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotplug-y) + obj-$(CONFIG_XEN_XENCOMM) += xencomm.o + obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) +-obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o ++obj-$(CONFIG_XEN_DEV_EVTCHN) += $(xen-evtchn-name-y).o + obj-$(CONFIG_XENFS) += xenfs/ + obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o + obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o + obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o ++obj-$(CONFIG_XEN_DOM0) += pci.o ++ ++xen-evtchn-y := evtchn.o ++ + obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ + obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ + obj-$(CONFIG_XEN_BLKDEV_TAP2) += blktap2/ blktap2-new/ +--- head-2011-03-17.orig/drivers/xen/balloon/balloon.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/drivers/xen/balloon/balloon.c 2011-02-01 15:09:47.000000000 +0100 +@@ -39,7 +39,6 @@ + #include + #include + #include +-#include + #include + #include + #include +--- head-2011-03-17.orig/drivers/xen/blkback/blkback.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/blkback.c 2011-02-01 15:09:47.000000000 +0100 +@@ -406,7 +406,7 @@ static void dispatch_rw_block_io(blkif_t + operation = WRITE; + break; + case BLKIF_OP_WRITE_BARRIER: +- operation = WRITE_BARRIER; ++ operation = WRITE_FLUSH_FUA; + break; + default: + operation = 0; /* make gcc happy */ +@@ -415,7 +415,7 @@ static void dispatch_rw_block_io(blkif_t + + /* Check that number of segments is sane. */ + nseg = req->nr_segments; +- if (unlikely(nseg == 0 && operation != WRITE_BARRIER) || ++ if (unlikely(nseg == 0 && req->operation != BLKIF_OP_WRITE_BARRIER) || + unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { + DPRINTK("Bad number of segments in request (%d)\n", nseg); + goto fail_response; +@@ -525,7 +525,7 @@ static void dispatch_rw_block_io(blkif_t + } + + if (!bio) { +- BUG_ON(operation != WRITE_BARRIER); ++ BUG_ON(!(operation & (REQ_FLUSH|REQ_FUA))); + bio = bio_alloc(GFP_KERNEL, 0); + if (unlikely(bio == NULL)) + goto fail_put_bio; +@@ -540,7 +540,7 @@ static void dispatch_rw_block_io(blkif_t + + if (operation == READ) + blkif->st_rd_sect += preq.nr_sects; +- else if (operation == WRITE || operation == WRITE_BARRIER) ++ else + blkif->st_wr_sect += preq.nr_sects; + + return; +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-02-01 15:09:47.000000000 +0100 +@@ -369,20 +369,23 @@ static void connect(struct blkfront_info + /* + * If there's no "feature-barrier" defined, then it means + * we're dealing with a very old backend which writes +- * synchronously; draining will do what needs to get done. ++ * synchronously; nothing to do. + * +- * If there are barriers, then we can do full queued writes +- * with tagged barriers. +- * +- * If barriers are not supported, then there's no much we can +- * do, so just set ordering to NONE. ++ * If there are barriers, then we use flush. + */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) ++ if (!err && barrier) ++ info->feature_flush = REQ_FLUSH | REQ_FUA; ++ else ++ info->feature_flush = 0; ++#else + if (err) +- info->feature_barrier = QUEUE_ORDERED_DRAIN; ++ info->feature_flush = QUEUE_ORDERED_DRAIN; + else if (barrier) +- info->feature_barrier = QUEUE_ORDERED_TAG; ++ info->feature_flush = QUEUE_ORDERED_TAG; + else +- info->feature_barrier = QUEUE_ORDERED_NONE; ++ info->feature_flush = QUEUE_ORDERED_NONE; ++#endif + + err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); + if (err) { +@@ -477,7 +480,7 @@ static inline void ADD_ID_TO_FREELIST( + struct blkfront_info *info, unsigned long id) + { + info->shadow[id].req.id = info->shadow_free; +- info->shadow[id].request = 0; ++ info->shadow[id].request = NULL; + info->shadow_free = id; + } + +@@ -658,14 +661,11 @@ int blkif_getgeo(struct block_device *bd + + + /* +- * blkif_queue_request ++ * Generate a Xen blkfront IO request from a blk layer request. Reads ++ * and writes are handled as expected. Since we lack a loose flush ++ * request, we map flushes into a full ordered barrier. + * +- * request block io +- * +- * id: for guest use only. +- * operation: BLKIF_OP_{READ,WRITE,PROBE} +- * buffer: buffer to read/write into. this should be a +- * virtual address in the guest os. ++ * @req: a request struct + */ + static int blkif_queue_request(struct request *req) + { +@@ -694,7 +694,7 @@ static int blkif_queue_request(struct re + /* Fill out a communications ring structure. */ + ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); + id = GET_ID_FROM_FREELIST(info); +- info->shadow[id].request = (unsigned long)req; ++ info->shadow[id].request = req; + + ring_req->id = id; + ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); +@@ -702,7 +702,11 @@ static int blkif_queue_request(struct re + + ring_req->operation = rq_data_dir(req) ? + BLKIF_OP_WRITE : BLKIF_OP_READ; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) ++ if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) ++#else + if (req->cmd_flags & REQ_HARDBARRIER) ++#endif + ring_req->operation = BLKIF_OP_WRITE_BARRIER; + + ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); +@@ -813,7 +817,7 @@ static irqreturn_t blkif_int(int irq, vo + + bret = RING_GET_RESPONSE(&info->ring, i); + id = bret->id; +- req = (struct request *)info->shadow[id].request; ++ req = info->shadow[id].request; + + blkif_completion(&info->shadow[id]); + +@@ -827,8 +831,23 @@ static irqreturn_t blkif_int(int irq, vo + " write barrier op failed\n", + info->gd->disk_name); + ret = -EOPNOTSUPP; +- info->feature_barrier = QUEUE_ORDERED_NONE; +- xlvbd_barrier(info); ++ } ++ if (unlikely(bret->status == BLKIF_RSP_ERROR && ++ info->shadow[id].req.nr_segments == 0)) { ++ pr_warning("blkfront: %s:" ++ " empty write barrier op failed\n", ++ info->gd->disk_name); ++ ret = -EOPNOTSUPP; ++ } ++ if (unlikely(ret)) { ++ if (ret == -EOPNOTSUPP) ++ ret = 0; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) ++ info->feature_flush = 0; ++#else ++ info->feature_flush = QUEUE_ORDERED_NONE; ++#endif ++ xlvbd_flush(info); + } + /* fall through */ + case BLKIF_OP_READ: +@@ -919,7 +938,7 @@ static int blkif_recover(struct blkfront + /* Stage 3: Find pending requests and requeue them. */ + for (i = 0; i < BLK_RING_SIZE; i++) { + /* Not in use? */ +- if (copy[i].request == 0) ++ if (!copy[i].request) + continue; + + /* Grab a request slot and copy shadow state into it. */ +@@ -937,8 +956,7 @@ static int blkif_recover(struct blkfront + req->seg[j].gref, + info->xbdev->otherend_id, + pfn_to_mfn(info->shadow[req->id].frame[j]), +- rq_data_dir((struct request *) +- info->shadow[req->id].request) ? ++ rq_data_dir(info->shadow[req->id].request) ? + GTF_readonly : 0); + info->shadow[req->id].req = *req; + +--- head-2011-03-17.orig/drivers/xen/blkfront/block.h 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/block.h 2011-02-01 15:09:47.000000000 +0100 +@@ -83,7 +83,7 @@ struct xlbd_major_info + + struct blk_shadow { + blkif_request_t req; +- unsigned long request; ++ struct request *request; + unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + }; + +@@ -111,7 +111,7 @@ struct blkfront_info + struct gnttab_free_callback callback; + struct blk_shadow shadow[BLK_RING_SIZE]; + unsigned long shadow_free; +- int feature_barrier; ++ int feature_flush; + int is_ready; + + /** +@@ -146,7 +146,7 @@ extern void do_blkif_request (struct req + int xlvbd_add(blkif_sector_t capacity, int device, + u16 vdisk_info, u16 sector_size, struct blkfront_info *info); + void xlvbd_del(struct blkfront_info *info); +-int xlvbd_barrier(struct blkfront_info *info); ++void xlvbd_flush(struct blkfront_info *info); + + #ifdef CONFIG_SYSFS + int xlvbd_sysfs_addif(struct blkfront_info *info); +--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-02-01 15:09:47.000000000 +0100 +@@ -422,7 +422,7 @@ xlvbd_add(blkif_sector_t capacity, int v + info->rq = gd->queue; + info->gd = gd; + +- xlvbd_barrier(info); ++ xlvbd_flush(info); + + if (vdisk_info & VDISK_READONLY) + set_disk_ro(gd, 1); +@@ -468,36 +468,35 @@ xlvbd_del(struct blkfront_info *info) + info->rq = NULL; + } + +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) +-int +-xlvbd_barrier(struct blkfront_info *info) ++void ++xlvbd_flush(struct blkfront_info *info) + { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) ++ blk_queue_flush(info->rq, info->feature_flush); ++ pr_info("blkfront: %s: barriers %s\n", ++ info->gd->disk_name, ++ info->feature_flush ? "enabled" : "disabled"); ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) + int err; + const char *barrier; + +- switch (info->feature_barrier) { ++ switch (info->feature_flush) { + case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; + case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; + case QUEUE_ORDERED_NONE: barrier = "disabled"; break; + default: return -EINVAL; + } + +- err = blk_queue_ordered(info->rq, info->feature_barrier); ++ err = blk_queue_ordered(info->rq, info->feature_flush); + if (err) + return err; + pr_info("blkfront: %s: barriers %s\n", + info->gd->disk_name, barrier); +- return 0; +-} + #else +-int +-xlvbd_barrier(struct blkfront_info *info) +-{ +- if (info->feature_barrier) ++ if (info->feature_flush) + pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); +- return -ENOSYS; +-} + #endif ++} + + #ifdef CONFIG_SYSFS + static ssize_t show_media(struct device *dev, +--- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:19:19.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-17 10:19:26.000000000 +0100 +@@ -441,6 +441,7 @@ static const struct file_operations blkt + .unlocked_ioctl = blktap_ioctl, + .open = blktap_open, + .release = blktap_release, ++ .llseek = no_llseek, + .mmap = blktap_mmap, + }; + +@@ -573,6 +574,8 @@ static int blktap_open(struct inode *ino + tap_blkif_t *info; + int i; + ++ nonseekable_open(inode, filp); ++ + /* ctrl device, treat differently */ + if (!idx) + return 0; +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-02-01 15:09:47.000000000 +0100 +@@ -844,7 +844,7 @@ blktap_device_run_queue(struct blktap *t + continue; + } + +- if (req->cmd_flags & REQ_HARDBARRIER) { ++ if (req->cmd_flags & (REQ_FLUSH|REQ_FUA)) { + blk_start_request(req); + __blk_end_request_all(req, -EOPNOTSUPP); + continue; +--- head-2011-03-17.orig/drivers/xen/blktap2-new/device.c 2011-02-24 16:23:08.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2-new/device.c 2011-02-24 16:31:17.000000000 +0100 +@@ -302,9 +302,6 @@ blktap_device_configure(struct blktap *t + /* Make sure buffer addresses are sector-aligned. */ + blk_queue_dma_alignment(rq, 511); + +- /* We are reordering, but cacheless. */ +- blk_queue_ordered(rq, QUEUE_ORDERED_DRAIN); +- + spin_unlock_irq(&dev->lock); + } + +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-09 12:45:24.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-03 11:12:32.000000000 +0100 +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -89,14 +90,17 @@ static struct irq_cfg _irq_cfg[] = { + static inline struct irq_cfg *__pure irq_cfg(unsigned int irq) + { + #ifdef CONFIG_SPARSE_IRQ +- struct irq_desc *desc = irq_to_desc(irq); +- +- return desc ? desc->chip_data : NULL; ++ return get_irq_chip_data(irq); + #else + return irq < NR_IRQS ? _irq_cfg + irq : NULL; + #endif + } + ++static inline struct irq_cfg *__pure irq_data_cfg(struct irq_data *data) ++{ ++ return irq_data_get_irq_chip_data(data); ++} ++ + /* Constructor for packed IRQ information. */ + static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn) + { +@@ -116,26 +120,47 @@ static inline u32 mk_irq_info(u32 type, + * Accessors for packed IRQ information. + */ + ++static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg) ++{ ++ return cfg->info & ((1U << _EVTCHN_BITS) - 1); ++} ++ ++static inline unsigned int evtchn_from_irq_data(struct irq_data *data) ++{ ++ const struct irq_cfg *cfg = irq_data_cfg(data); ++ ++ return cfg ? evtchn_from_irq_cfg(cfg) : 0; ++} ++ + static inline unsigned int evtchn_from_irq(int irq) + { +- const struct irq_cfg *cfg = irq_cfg(irq); ++ struct irq_data *data = irq_get_irq_data(irq); + +- return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0; ++ return data ? evtchn_from_irq_data(data) : 0; ++} ++ ++static inline unsigned int index_from_irq_cfg(const struct irq_cfg *cfg) ++{ ++ return (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1); + } + + static inline unsigned int index_from_irq(int irq) + { + const struct irq_cfg *cfg = irq_cfg(irq); + +- return cfg ? (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1) +- : 0; ++ return cfg ? index_from_irq_cfg(cfg) : 0; ++} ++ ++static inline unsigned int type_from_irq_cfg(const struct irq_cfg *cfg) ++{ ++ return cfg->info >> (32 - _IRQT_BITS); + } + + static inline unsigned int type_from_irq(int irq) + { + const struct irq_cfg *cfg = irq_cfg(irq); + +- return cfg ? cfg->info >> (32 - _IRQT_BITS) : IRQT_UNBOUND; ++ return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND; + } + + unsigned int irq_from_evtchn(unsigned int port) +@@ -180,7 +205,7 @@ static void bind_evtchn_to_cpu(unsigned + BUG_ON(!test_bit(chn, s->evtchn_mask)); + + if (irq != -1) +- cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); ++ cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu)); + + clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn])); + set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); +@@ -193,10 +218,10 @@ static void init_evtchn_cpu_bindings(voi + + /* By default all event channels notify CPU#0. */ + for (i = 0; i < nr_irqs; i++) { +- struct irq_desc *desc = irq_to_desc(i); ++ struct irq_data *data = irq_get_irq_data(i); + +- if (desc) +- cpumask_copy(desc->affinity, cpumask_of(0)); ++ if (data) ++ cpumask_copy(data->affinity, cpumask_of(0)); + } + + memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); +@@ -378,26 +403,24 @@ asmlinkage void __irq_entry evtchn_do_up + set_irq_regs(old_regs); + } + +-static int find_unbound_irq(unsigned int node, struct irq_chip *chip) ++static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg, ++ struct irq_chip *chip) + { + static int warned; + int irq; + + for (irq = DYNIRQ_BASE; irq < nr_irqs; irq++) { +- struct irq_desc *desc; +- struct irq_cfg *cfg; ++ struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); ++ struct irq_desc *desc = irq_to_desc(irq); + +- desc = irq_to_desc(irq); +- if (!desc) +- desc = irq_to_desc_alloc_node(irq, node); +- else if (desc->chip != &no_irq_chip && +- desc->chip != &dynirq_chip) +- continue; +- if (!desc) ++ if (unlikely(!cfg)) + return -ENOMEM; ++ if (desc->irq_data.chip != &no_irq_chip && ++ desc->irq_data.chip != chip) ++ continue; + +- cfg = desc->chip_data; +- if (cfg && !cfg->bindcount) { ++ if (!cfg->bindcount) { ++ *pcfg = cfg; + desc->status |= IRQ_NOPROBE; + set_irq_chip_and_handler_name(irq, chip, + handle_fasteoi_irq, +@@ -419,20 +442,22 @@ static struct irq_chip dynirq_chip; + + static int bind_caller_port_to_irq(unsigned int caller_port) + { ++ struct irq_cfg *cfg; + int irq; + + spin_lock(&irq_mapping_update_lock); + + if ((irq = evtchn_to_irq[caller_port]) == -1) { +- if ((irq = find_unbound_irq(numa_node_id(), &dynirq_chip)) < 0) ++ if ((irq = find_unbound_irq(numa_node_id(), &cfg, ++ &dynirq_chip)) < 0) + goto out; + + evtchn_to_irq[caller_port] = irq; +- irq_cfg(irq)->info = mk_irq_info(IRQT_CALLER_PORT, +- 0, caller_port); +- } ++ cfg->info = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port); ++ } else ++ cfg = irq_cfg(irq); + +- irq_cfg(irq)->bindcount++; ++ cfg->bindcount++; + + out: + spin_unlock(&irq_mapping_update_lock); +@@ -441,21 +466,22 @@ static int bind_caller_port_to_irq(unsig + + static int bind_local_port_to_irq(unsigned int local_port) + { ++ struct irq_cfg *cfg; + int irq; + + spin_lock(&irq_mapping_update_lock); + + BUG_ON(evtchn_to_irq[local_port] != -1); + +- if ((irq = find_unbound_irq(numa_node_id(), &dynirq_chip)) < 0) { ++ if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip)) < 0) { + if (close_evtchn(local_port)) + BUG(); + goto out; + } + + evtchn_to_irq[local_port] = irq; +- irq_cfg(irq)->info = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port); +- irq_cfg(irq)->bindcount++; ++ cfg->info = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port); ++ cfg->bindcount++; + + out: + spin_unlock(&irq_mapping_update_lock); +@@ -494,12 +520,13 @@ static int bind_interdomain_evtchn_to_ir + static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) + { + struct evtchn_bind_virq bind_virq; ++ struct irq_cfg *cfg; + int evtchn, irq; + + spin_lock(&irq_mapping_update_lock); + + if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { +- if ((irq = find_unbound_irq(cpu_to_node(cpu), ++ if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, + &dynirq_chip)) < 0) + goto out; + +@@ -511,14 +538,15 @@ static int bind_virq_to_irq(unsigned int + evtchn = bind_virq.port; + + evtchn_to_irq[evtchn] = irq; +- irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq, evtchn); ++ cfg->info = mk_irq_info(IRQT_VIRQ, virq, evtchn); + + per_cpu(virq_to_irq, cpu)[virq] = irq; + + bind_evtchn_to_cpu(evtchn, cpu); +- } ++ } else ++ cfg = irq_cfg(irq); + +- irq_cfg(irq)->bindcount++; ++ cfg->bindcount++; + + out: + spin_unlock(&irq_mapping_update_lock); +@@ -528,12 +556,13 @@ static int bind_virq_to_irq(unsigned int + static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) + { + struct evtchn_bind_ipi bind_ipi; ++ struct irq_cfg *cfg; + int evtchn, irq; + + spin_lock(&irq_mapping_update_lock); + + if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) { +- if ((irq = find_unbound_irq(cpu_to_node(cpu), ++ if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, + &dynirq_chip)) < 0) + goto out; + +@@ -544,14 +573,15 @@ static int bind_ipi_to_irq(unsigned int + evtchn = bind_ipi.port; + + evtchn_to_irq[evtchn] = irq; +- irq_cfg(irq)->info = mk_irq_info(IRQT_IPI, ipi, evtchn); ++ cfg->info = mk_irq_info(IRQT_IPI, ipi, evtchn); + + per_cpu(ipi_to_irq, cpu)[ipi] = irq; + + bind_evtchn_to_cpu(evtchn, cpu); +- } ++ } else ++ cfg = irq_cfg(irq); + +- irq_cfg(irq)->bindcount++; ++ cfg->bindcount++; + + out: + spin_unlock(&irq_mapping_update_lock); +@@ -561,23 +591,24 @@ static int bind_ipi_to_irq(unsigned int + static void unbind_from_irq(unsigned int irq) + { + unsigned int cpu; +- int evtchn = evtchn_from_irq(irq); ++ struct irq_cfg *cfg = irq_cfg(irq); ++ int evtchn = evtchn_from_irq_cfg(cfg); + + spin_lock(&irq_mapping_update_lock); + +- if (!--irq_cfg(irq)->bindcount && VALID_EVTCHN(evtchn)) { +- if ((type_from_irq(irq) != IRQT_CALLER_PORT) && ++ if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) { ++ if ((type_from_irq_cfg(cfg) != IRQT_CALLER_PORT) && + close_evtchn(evtchn)) + BUG(); + +- switch (type_from_irq(irq)) { ++ switch (type_from_irq_cfg(cfg)) { + case IRQT_VIRQ: + per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) +- [index_from_irq(irq)] = -1; ++ [index_from_irq_cfg(cfg)] = -1; + break; + case IRQT_IPI: + per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) +- [index_from_irq(irq)] = -1; ++ [index_from_irq_cfg(cfg)] = -1; + break; + default: + break; +@@ -587,7 +618,7 @@ static void unbind_from_irq(unsigned int + bind_evtchn_to_cpu(evtchn, 0); + + evtchn_to_irq[evtchn] = -1; +- irq_cfg(irq)->info = IRQ_UNBOUND; ++ cfg->info = IRQ_UNBOUND; + + /* Zap stats across IRQ changes of use. */ + for_each_possible_cpu(cpu) +@@ -740,25 +771,26 @@ void rebind_evtchn_to_cpu(int port, unsi + unmask_evtchn(port); + } + +-static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu) ++static void rebind_irq_to_cpu(struct irq_data *data, unsigned int tcpu) + { +- int evtchn = evtchn_from_irq(irq); ++ int evtchn = evtchn_from_irq_data(data); + + if (VALID_EVTCHN(evtchn)) + rebind_evtchn_to_cpu(evtchn, tcpu); + } + +-static int set_affinity_irq(unsigned int irq, const struct cpumask *dest) ++static int set_affinity_irq(struct irq_data *data, ++ const struct cpumask *dest, bool force) + { +- rebind_irq_to_cpu(irq, cpumask_first(dest)); ++ rebind_irq_to_cpu(data, cpumask_first(dest)); + + return 0; + } + #endif + +-int resend_irq_on_evtchn(unsigned int irq) ++int resend_irq_on_evtchn(struct irq_data *data) + { +- int masked, evtchn = evtchn_from_irq(irq); ++ int masked, evtchn = evtchn_from_irq_data(data); + + if (!VALID_EVTCHN(evtchn)) + return 1; +@@ -775,52 +807,51 @@ int resend_irq_on_evtchn(unsigned int ir + * Interface to generic handling in irq.c + */ + +-static void unmask_dynirq(unsigned int irq) ++static void unmask_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(irq); ++ int evtchn = evtchn_from_irq_data(data); + + if (VALID_EVTCHN(evtchn)) + unmask_evtchn(evtchn); + } + +-static void mask_dynirq(unsigned int irq) ++static void mask_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(irq); ++ int evtchn = evtchn_from_irq_data(data); + + if (VALID_EVTCHN(evtchn)) + mask_evtchn(evtchn); + } + +-static unsigned int startup_dynirq(unsigned int irq) ++static unsigned int startup_dynirq(struct irq_data *data) + { +- unmask_dynirq(irq); ++ unmask_dynirq(data); + return 0; + } + + #define shutdown_dynirq mask_dynirq + +-static void end_dynirq(unsigned int irq) ++static void end_dynirq(struct irq_data *data) + { +- if (!(irq_to_desc(irq)->status & IRQ_DISABLED)) { +- move_masked_irq(irq); +- unmask_dynirq(irq); ++ if (!(irq_to_desc(data->irq)->status & IRQ_DISABLED)) { ++ move_masked_irq(data->irq); ++ unmask_dynirq(data); + } + } + + static struct irq_chip dynirq_chip = { +- .name = "Dynamic", +- .startup = startup_dynirq, +- .shutdown = shutdown_dynirq, +- .enable = unmask_dynirq, +- .disable = mask_dynirq, +- .mask = mask_dynirq, +- .unmask = unmask_dynirq, +- .end = end_dynirq, +- .eoi = end_dynirq, ++ .name = "Dynamic", ++ .irq_startup = startup_dynirq, ++ .irq_shutdown = shutdown_dynirq, ++ .irq_enable = unmask_dynirq, ++ .irq_disable = mask_dynirq, ++ .irq_mask = mask_dynirq, ++ .irq_unmask = unmask_dynirq, ++ .irq_eoi = end_dynirq, + #ifdef CONFIG_SMP +- .set_affinity = set_affinity_irq, ++ .irq_set_affinity = set_affinity_irq, + #endif +- .retrigger = resend_irq_on_evtchn, ++ .irq_retrigger = resend_irq_on_evtchn, + }; + + /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */ +@@ -873,18 +904,20 @@ static inline void pirq_query_unmask(int + set_bit(irq - PIRQ_BASE, pirq_needs_eoi); + } + +-static int set_type_pirq(unsigned int irq, unsigned int type) ++static int set_type_pirq(struct irq_data *data, unsigned int type) + { + if (type != IRQ_TYPE_PROBE) + return -EINVAL; +- set_bit(irq - PIRQ_BASE, probing_pirq); ++ set_bit(data->irq - PIRQ_BASE, probing_pirq); + return 0; + } + +-static void enable_pirq(unsigned int irq) ++static void enable_pirq(struct irq_data *data) + { + struct evtchn_bind_pirq bind_pirq; +- int evtchn = evtchn_from_irq(irq); ++ unsigned int irq = data->irq; ++ struct irq_cfg *cfg = irq_data_cfg(data); ++ int evtchn = evtchn_from_irq_cfg(cfg); + unsigned int pirq = irq - PIRQ_BASE; + + if (VALID_EVTCHN(evtchn)) { +@@ -910,7 +943,7 @@ static void enable_pirq(unsigned int irq + + evtchn_to_irq[evtchn] = irq; + bind_evtchn_to_cpu(evtchn, 0); +- irq_cfg(irq)->info = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn); ++ cfg->info = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn); + + out: + pirq_unmask_and_notify(evtchn, irq); +@@ -918,15 +951,16 @@ static void enable_pirq(unsigned int irq + + #define disable_pirq mask_pirq + +-static unsigned int startup_pirq(unsigned int irq) ++static unsigned int startup_pirq(struct irq_data *data) + { +- enable_pirq(irq); ++ enable_pirq(data); + return 0; + } + +-static void shutdown_pirq(unsigned int irq) ++static void shutdown_pirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(irq); ++ struct irq_cfg *cfg = irq_data_cfg(data); ++ int evtchn = evtchn_from_irq_cfg(cfg); + + if (!VALID_EVTCHN(evtchn)) + return; +@@ -938,48 +972,47 @@ static void shutdown_pirq(unsigned int i + + bind_evtchn_to_cpu(evtchn, 0); + evtchn_to_irq[evtchn] = -1; +- irq_cfg(irq)->info = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0); ++ cfg->info = mk_irq_info(IRQT_PIRQ, index_from_irq_cfg(cfg), 0); + } + +-static void unmask_pirq(unsigned int irq) ++static void unmask_pirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(irq); ++ int evtchn = evtchn_from_irq_data(data); + + if (VALID_EVTCHN(evtchn)) +- pirq_unmask_and_notify(evtchn, irq); ++ pirq_unmask_and_notify(evtchn, data->irq); + } + + #define mask_pirq mask_dynirq + +-static void end_pirq(unsigned int irq) ++static void end_pirq(struct irq_data *data) + { +- const struct irq_desc *desc = irq_to_desc(irq); ++ const struct irq_desc *desc = irq_to_desc(data->irq); + + if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == + (IRQ_DISABLED|IRQ_PENDING)) +- shutdown_pirq(irq); ++ shutdown_pirq(data); + else { + if (!(desc->status & IRQ_DISABLED)) +- move_masked_irq(irq); +- unmask_pirq(irq); ++ move_masked_irq(data->irq); ++ unmask_pirq(data); + } + } + + static struct irq_chip pirq_chip = { +- .name = "Phys", +- .startup = startup_pirq, +- .shutdown = shutdown_pirq, +- .enable = enable_pirq, +- .disable = disable_pirq, +- .mask = mask_pirq, +- .unmask = unmask_pirq, +- .end = end_pirq, +- .eoi = end_pirq, +- .set_type = set_type_pirq, ++ .name = "Phys", ++ .irq_startup = startup_pirq, ++ .irq_shutdown = shutdown_pirq, ++ .irq_enable = enable_pirq, ++ .irq_disable = disable_pirq, ++ .irq_mask = mask_pirq, ++ .irq_unmask = unmask_pirq, ++ .irq_eoi = end_pirq, ++ .irq_set_type = set_type_pirq, + #ifdef CONFIG_SMP +- .set_affinity = set_affinity_irq, ++ .irq_set_affinity = set_affinity_irq, + #endif +- .retrigger = resend_irq_on_evtchn, ++ .irq_retrigger = resend_irq_on_evtchn, + }; + + int irq_ignore_unhandled(unsigned int irq) +@@ -1169,28 +1202,39 @@ int __init arch_early_irq_init(void) + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(_irq_cfg); i++) +- irq_to_desc(i)->chip_data = _irq_cfg + i; ++ set_irq_chip_data(i, _irq_cfg + i); + + return 0; + } + +-#ifdef CONFIG_SPARSE_IRQ +-int arch_init_chip_data(struct irq_desc *desc, int cpu) ++struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) + { +- if (!desc->chip_data) { +- /* By default all event channels notify CPU#0. */ +- cpumask_copy(desc->affinity, cpumask_of(0)); ++ int res = irq_alloc_desc_at(at, node); ++ struct irq_cfg *cfg = NULL; + +- desc->chip_data = kzalloc(sizeof(struct irq_cfg), GFP_ATOMIC); +- } +- if (!desc->chip_data) { +- pr_emerg("cannot alloc irq_cfg\n"); +- BUG(); ++ if (res < 0) { ++ if (res != -EEXIST) ++ return NULL; ++ cfg = get_irq_chip_data(at); ++ if (cfg) ++ return cfg; + } + +- return 0; +-} ++#ifdef CONFIG_SPARSE_IRQ ++ /* By default all event channels notify CPU#0. */ ++ cpumask_copy(irq_get_irq_data(at)->affinity, cpumask_of(0)); ++ ++ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); ++ if (cfg) ++ set_irq_chip_data(at, cfg); ++ else ++ irq_free_desc(at); ++ ++ return cfg; ++#else ++ return irq_cfg(at); + #endif ++} + + #ifdef CONFIG_SPARSE_IRQ + int nr_pirqs = NR_PIRQS; +@@ -1223,7 +1267,7 @@ int __init arch_probe_nr_irqs(void) + + printk(KERN_DEBUG "nr_pirqs: %d\n", nr_pirqs); + +- return 0; ++ return ARRAY_SIZE(_irq_cfg); + } + #endif + +@@ -1255,10 +1299,12 @@ int assign_irq_vector(int irq, struct ir + + void evtchn_register_pirq(int irq) + { ++ struct irq_cfg *cfg = irq_cfg(irq); ++ + BUG_ON(irq < PIRQ_BASE || irq - PIRQ_BASE >= nr_pirqs); +- if (identity_mapped_irq(irq) || type_from_irq(irq) != IRQT_UNBOUND) ++ if (identity_mapped_irq(irq) || type_from_irq_cfg(cfg) != IRQT_UNBOUND) + return; +- irq_cfg(irq)->info = mk_irq_info(IRQT_PIRQ, irq, 0); ++ cfg->info = mk_irq_info(IRQT_PIRQ, irq, 0); + set_irq_chip_and_handler_name(irq, &pirq_chip, handle_fasteoi_irq, + "fasteoi"); + } +@@ -1267,15 +1313,12 @@ int evtchn_map_pirq(int irq, int xen_pir + { + if (irq < 0) { + #ifdef CONFIG_SPARSE_IRQ ++ struct irq_cfg *cfg; ++ + spin_lock(&irq_mapping_update_lock); +- irq = find_unbound_irq(numa_node_id(), &pirq_chip); ++ irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip); + if (irq >= 0) { +- struct irq_desc *desc; +- struct irq_cfg *cfg; +- +- desc = irq_to_desc_alloc_node(irq, numa_node_id()); +- cfg = desc->chip_data; +- BUG_ON(type_from_irq(irq) != IRQT_UNBOUND); ++ BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND); + cfg->bindcount++; + cfg->info = mk_irq_info(IRQT_PIRQ, xen_pirq, 0); + } +@@ -1291,15 +1334,17 @@ int evtchn_map_pirq(int irq, int xen_pir + irq = PIRQ_BASE + nr_pirqs - 1; + spin_lock(&irq_alloc_lock); + do { +- struct irq_desc *desc; + struct irq_cfg *cfg; + + if (identity_mapped_irq(irq)) + continue; +- desc = irq_to_desc_alloc_node(irq, numa_node_id()); +- cfg = desc->chip_data; +- if (!index_from_irq(irq)) { +- BUG_ON(type_from_irq(irq) != IRQT_UNBOUND); ++ cfg = alloc_irq_and_cfg_at(irq, numa_node_id()); ++ if (unlikely(!cfg)) { ++ spin_unlock(&irq_alloc_lock); ++ return -ENOMEM; ++ } ++ if (!index_from_irq_cfg(cfg)) { ++ BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND); + cfg->info = mk_irq_info(IRQT_PIRQ, + xen_pirq, 0); + break; +@@ -1312,7 +1357,9 @@ int evtchn_map_pirq(int irq, int xen_pir + handle_fasteoi_irq, "fasteoi"); + #endif + } else if (!xen_pirq) { +- if (unlikely(type_from_irq(irq) != IRQT_PIRQ)) ++ struct irq_cfg *cfg = irq_cfg(irq); ++ ++ if (!cfg || unlikely(type_from_irq_cfg(cfg) != IRQT_PIRQ)) + return -EINVAL; + /* + * dynamic_irq_cleanup(irq) would seem to be the correct thing +@@ -1321,9 +1368,9 @@ int evtchn_map_pirq(int irq, int xen_pir + * then causes a warning in dynamic_irq_cleanup(). + */ + set_irq_chip_and_handler(irq, NULL, NULL); +- irq_cfg(irq)->info = IRQ_UNBOUND; ++ cfg->info = IRQ_UNBOUND; + #ifdef CONFIG_SPARSE_IRQ +- irq_cfg(irq)->bindcount--; ++ cfg->bindcount--; + #endif + return 0; + } else if (type_from_irq(irq) != IRQT_PIRQ +@@ -1338,10 +1385,12 @@ int evtchn_map_pirq(int irq, int xen_pir + + int evtchn_get_xen_pirq(int irq) + { ++ struct irq_cfg *cfg = irq_cfg(irq); ++ + if (identity_mapped_irq(irq)) + return irq; +- BUG_ON(type_from_irq(irq) != IRQT_PIRQ); +- return index_from_irq(irq); ++ BUG_ON(type_from_irq_cfg(cfg) != IRQT_PIRQ); ++ return index_from_irq_cfg(cfg); + } + + void __init xen_init_IRQ(void) +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-03-03 16:11:42.000000000 +0100 +@@ -11,7 +11,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -34,7 +33,7 @@ extern void smp_trap_init(trap_info_t *) + + cpumask_var_t vcpu_initialized_mask; + +-DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info); ++DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); + EXPORT_PER_CPU_SYMBOL(cpu_info); + + static DEFINE_PER_CPU(int, resched_irq); +@@ -46,6 +45,11 @@ static char callfunc_name[NR_CPUS][15]; + static char call1func_name[NR_CPUS][15]; + static char reboot_name[NR_CPUS][15]; + ++#ifdef CONFIG_IRQ_WORK ++static DEFINE_PER_CPU(int, irq_work_irq); ++static char irq_work_name[NR_CPUS][15]; ++#endif ++ + void __init prefill_possible_map(void) + { + int i, rc; +@@ -76,6 +80,9 @@ static int __cpuinit xen_smp_intr_init(u + int rc; + + per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = ++#ifdef CONFIG_IRQ_WORK ++ per_cpu(irq_work_irq, cpu) = ++#endif + per_cpu(call1func_irq, cpu) = per_cpu(reboot_irq, cpu) = -1; + + sprintf(resched_name[cpu], "resched%u", cpu); +@@ -122,6 +129,19 @@ static int __cpuinit xen_smp_intr_init(u + goto fail; + per_cpu(reboot_irq, cpu) = rc; + ++#ifdef CONFIG_IRQ_WORK ++ sprintf(irq_work_name[cpu], "irqwork%u", cpu); ++ rc = bind_ipi_to_irqhandler(IRQ_WORK_VECTOR, ++ cpu, ++ smp_irq_work_interrupt, ++ IRQF_DISABLED|IRQF_NOBALANCING, ++ irq_work_name[cpu], ++ NULL); ++ if (rc < 0) ++ goto fail; ++ per_cpu(irq_work_irq, cpu) = rc; ++#endif ++ + rc = xen_spinlock_init(cpu); + if (rc < 0) + goto fail; +@@ -140,6 +160,10 @@ static int __cpuinit xen_smp_intr_init(u + unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); + if (per_cpu(reboot_irq, cpu) >= 0) + unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL); ++#ifdef CONFIG_IRQ_WORK ++ if (per_cpu(irq_work_irq, cpu) >= 0) ++ unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL); ++#endif + xen_spinlock_cleanup(cpu); + return rc; + } +@@ -154,6 +178,9 @@ static void __cpuinit xen_smp_intr_exit( + unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); + unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); + unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL); ++#ifdef CONFIG_IRQ_WORK ++ unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL); ++#endif + xen_spinlock_cleanup(cpu); + } + #endif +--- head-2011-03-17.orig/drivers/xen/core/spinlock.c 2011-03-15 16:33:52.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/spinlock.c 2011-03-03 16:11:45.000000000 +0100 +@@ -21,7 +21,7 @@ struct spinning { + struct spinning *prev; + }; + static DEFINE_PER_CPU(struct spinning *, _spinning); +-static DEFINE_PER_CPU(evtchn_port_t, poll_evtchn); ++static DEFINE_PER_CPU_READ_MOSTLY(evtchn_port_t, poll_evtchn); + /* + * Protect removal of objects: Addition can be done lockless, and even + * removal itself doesn't need protection - what needs to be prevented is +@@ -153,7 +153,7 @@ bool xen_spin_wait(arch_spinlock_t *lock + */ + arch_spinlock_t *lock = other->lock; + +- raw_local_irq_disable(); ++ arch_local_irq_disable(); + while (lock->cur == other->ticket) { + unsigned int token; + bool kick, free; +@@ -175,7 +175,7 @@ bool xen_spin_wait(arch_spinlock_t *lock + } + + /* +- * No need to use raw_local_irq_restore() here, as the ++ * No need to use arch_local_irq_restore() here, as the + * intended event processing will happen with the poll + * call. + */ +@@ -200,7 +200,7 @@ bool xen_spin_wait(arch_spinlock_t *lock + /* announce we're done */ + other = spinning.prev; + percpu_write(_spinning, other); +- raw_local_irq_disable(); ++ arch_local_irq_disable(); + rm_idx = percpu_read(rm_seq.idx); + smp_wmb(); + percpu_write(rm_seq.idx, rm_idx + 1); +@@ -229,7 +229,7 @@ bool xen_spin_wait(arch_spinlock_t *lock + rm_idx &= 1; + while (percpu_read(rm_seq.ctr[rm_idx].counter)) + cpu_relax(); +- raw_local_irq_restore(upcall_mask); ++ arch_local_irq_restore(upcall_mask); + *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); + + return rc; +@@ -256,7 +256,7 @@ void xen_spin_kick(arch_spinlock_t *lock + return; + } + +- flags = __raw_local_irq_save(); ++ flags = arch_local_irq_save(); + for (;;) { + unsigned int rm_idx = per_cpu(rm_seq.idx, cpu); + +@@ -281,7 +281,7 @@ void xen_spin_kick(arch_spinlock_t *lock + } + + atomic_dec(rm_ctr); +- raw_local_irq_restore(flags); ++ arch_local_irq_restore(flags); + + if (unlikely(spinning)) { + notify_remote_via_evtchn(per_cpu(poll_evtchn, cpu)); +--- head-2011-03-17.orig/drivers/xen/evtchn.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/drivers/xen/evtchn.c 2011-02-01 15:09:47.000000000 +0100 +@@ -528,7 +528,11 @@ static const struct file_operations evtc + + static struct miscdevice evtchn_miscdev = { + .minor = MISC_DYNAMIC_MINOR, ++#ifdef CONFIG_PARAVIRT_XEN + .name = "xen/evtchn", ++#else ++ .name = "evtchn", ++#endif + .nodename = "xen/evtchn", + .fops = &evtchn_fops, + }; +--- head-2011-03-17.orig/drivers/xen/gntdev/gntdev.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/drivers/xen/gntdev/gntdev.c 2011-02-01 15:09:47.000000000 +0100 +@@ -145,6 +145,7 @@ static long gntdev_ioctl(struct file *fl + static const struct file_operations gntdev_fops = { + .owner = THIS_MODULE, + .open = gntdev_open, ++ .llseek = no_llseek, + .release = gntdev_release, + .mmap = gntdev_mmap, + .unlocked_ioctl = gntdev_ioctl +@@ -430,6 +431,8 @@ static int gntdev_open(struct inode *ino + { + gntdev_file_private_data_t *private_data; + ++ nonseekable_open(inode, flip); ++ + try_module_get(THIS_MODULE); + + /* Allocate space for the per-instance private data. */ +--- head-2011-03-17.orig/drivers/xen/privcmd/privcmd.c 2011-01-31 18:07:35.000000000 +0100 ++++ head-2011-03-17/drivers/xen/privcmd/privcmd.c 2011-02-01 15:09:47.000000000 +0100 +@@ -14,7 +14,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -416,7 +415,8 @@ static int privcmd_mmap(struct file * fi + if (xen_feature(XENFEAT_auto_translated_physmap)) + return -ENOSYS; + +- /* DONTCOPY is essential for Xen as copy_page_range is broken. */ ++ /* DONTCOPY is essential for Xen because copy_page_range doesn't know ++ * how to recreate these mappings */ + vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTCOPY; + vma->vm_ops = &privcmd_vm_ops; + vma->vm_private_data = NULL; +@@ -426,6 +426,8 @@ static int privcmd_mmap(struct file * fi + #endif + + static const struct file_operations privcmd_file_ops = { ++ .open = nonseekable_open, ++ .llseek = no_llseek, + .unlocked_ioctl = privcmd_ioctl, + .mmap = privcmd_mmap, + }; +--- head-2011-03-17.orig/drivers/xen/scsifront/scsifront.c 2011-02-08 10:04:41.000000000 +0100 ++++ head-2011-03-17/drivers/xen/scsifront/scsifront.c 2011-02-08 10:08:14.000000000 +0100 +@@ -315,11 +315,12 @@ big_to_sg: + return ref_cnt; + } + +-static int scsifront_queuecommand(struct scsi_cmnd *sc, +- void (*done)(struct scsi_cmnd *)) ++static int scsifront_queuecommand(struct Scsi_Host *shost, ++ struct scsi_cmnd *sc) + { +- struct vscsifrnt_info *info = shost_priv(sc->device->host); ++ struct vscsifrnt_info *info = shost_priv(shost); + vscsiif_request_t *ring_req; ++ unsigned long flags; + int ref_cnt; + uint16_t rqid; + +@@ -328,11 +329,12 @@ static int scsifront_queuecommand(struct + sc->cmnd[0],sc->cmnd[1],sc->cmnd[2],sc->cmnd[3],sc->cmnd[4], + sc->cmnd[5],sc->cmnd[6],sc->cmnd[7],sc->cmnd[8],sc->cmnd[9]); + */ ++ spin_lock_irqsave(shost->host_lock, flags); + if (RING_FULL(&info->ring)) { +- goto out_host_busy; ++ spin_unlock_irqrestore(shost->host_lock, flags); ++ return SCSI_MLQUEUE_HOST_BUSY; + } + +- sc->scsi_done = done; + sc->result = 0; + + ring_req = scsifront_pre_request(info); +@@ -361,27 +363,21 @@ static int scsifront_queuecommand(struct + ref_cnt = map_data_for_request(info, sc, ring_req, rqid); + if (ref_cnt < 0) { + add_id_to_freelist(info, rqid); ++ spin_unlock_irqrestore(shost->host_lock, flags); + if (ref_cnt == (-ENOMEM)) +- goto out_host_busy; +- else { +- sc->result = (DID_ERROR << 16); +- goto out_fail_command; +- } ++ return SCSI_MLQUEUE_HOST_BUSY; ++ sc->result = (DID_ERROR << 16); ++ sc->scsi_done(sc); ++ return 0; + } + + ring_req->nr_segments = (uint8_t)ref_cnt; + info->shadow[rqid].nr_segments = ref_cnt; + + scsifront_do_request(info); ++ spin_unlock_irqrestore(shost->host_lock, flags); + + return 0; +- +-out_host_busy: +- return SCSI_MLQUEUE_HOST_BUSY; +- +-out_fail_command: +- done(sc); +- return 0; + } + + +--- head-2011-03-17.orig/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h 2008-02-20 09:32:49.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h 2011-02-01 15:09:47.000000000 +0100 +@@ -54,7 +54,6 @@ + #include + #include + #include +-#include + #include + #include + #include +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_dev.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_dev.c 2011-02-01 15:09:47.000000000 +0100 +@@ -454,6 +454,7 @@ static const struct file_operations xenb + .write = xenbus_dev_write, + .open = xenbus_dev_open, + .release = xenbus_dev_release, ++ .llseek = no_llseek, + .poll = xenbus_dev_poll, + #ifdef HAVE_UNLOCKED_IOCTL + .unlocked_ioctl = xenbus_dev_ioctl +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 15:09:47.000000000 +0100 +@@ -82,10 +82,13 @@ + + int xen_store_evtchn; + #if !defined(CONFIG_XEN) && !defined(MODULE) +-EXPORT_SYMBOL(xen_store_evtchn); ++EXPORT_SYMBOL_GPL(xen_store_evtchn); + #endif + + struct xenstore_domain_interface *xen_store_interface; ++#if !defined(CONFIG_XEN) && !defined(MODULE) ++EXPORT_SYMBOL_GPL(xen_store_interface); ++#endif + + static unsigned long xen_store_mfn; + +@@ -1102,9 +1105,7 @@ int __devinit xenbus_init(void) + #endif + { + int err = 0; +-#if defined(CONFIG_XEN) || defined(MODULE) + unsigned long page = 0; +-#endif + + DPRINTK(""); + +@@ -1122,7 +1123,6 @@ int __devinit xenbus_init(void) + * Domain0 doesn't have a store_evtchn or store_mfn yet. + */ + if (is_initial_xendomain()) { +-#if defined(CONFIG_XEN) || defined(MODULE) + struct evtchn_alloc_unbound alloc_unbound; + + /* Allocate Xenstore page */ +@@ -1161,9 +1161,6 @@ int __devinit xenbus_init(void) + if (xsd_port_intf) + xsd_port_intf->read_proc = xsd_port_read; + #endif +-#else +- /* dom0 not yet supported */ +-#endif + xen_store_interface = mfn_to_virt(xen_store_mfn); + } else { + #if !defined(CONFIG_XEN) && !defined(MODULE) +@@ -1249,10 +1246,8 @@ int __devinit xenbus_init(void) + * registered. + */ + +-#if defined(CONFIG_XEN) || defined(MODULE) + if (page != 0) + free_page(page); +-#endif + return err; + } + +--- head-2011-03-17.orig/include/xen/Kbuild 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/include/xen/Kbuild 2011-02-01 15:09:47.000000000 +0100 +@@ -1,2 +1 @@ +-header-y += privcmd.h + header-y += public/ +--- head-2011-03-17.orig/include/xen/evtchn.h 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/include/xen/evtchn.h 2011-02-01 15:09:47.000000000 +0100 +@@ -58,6 +58,7 @@ struct irq_cfg { + #endif + }; + }; ++struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node); + #endif + + /* +--- head-2011-03-17.orig/include/xen/interface/memory.h 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/include/xen/interface/memory.h 2011-02-01 15:09:47.000000000 +0100 +@@ -198,6 +198,7 @@ struct xen_machphys_mapping { + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ + }; ++DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping); + typedef struct xen_machphys_mapping xen_machphys_mapping_t; + DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); + +@@ -252,6 +253,7 @@ struct xen_memory_map { + */ + XEN_GUEST_HANDLE(void) buffer; + }; ++DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map); + typedef struct xen_memory_map xen_memory_map_t; + DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); + +--- head-2011-03-17.orig/include/xen/privcmd.h 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/include/xen/privcmd.h 2011-02-01 15:09:47.000000000 +0100 +@@ -1,77 +1,3 @@ +-/****************************************************************************** +- * privcmd.h +- * +- * Interface to /proc/xen/privcmd. +- * +- * Copyright (c) 2003-2005, K A Fraser +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation; or, when distributed +- * separately from the Linux kernel or incorporated into other +- * software packages, subject to the following license: +- * +- * Permission is hereby granted, free of charge, to any person obtaining a copy +- * of this source file (the "Software"), to deal in the Software without +- * restriction, including without limitation the rights to use, copy, modify, +- * merge, publish, distribute, sublicense, and/or sell copies of the Software, +- * and to permit persons to whom the Software is furnished to do so, subject to +- * the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +- * IN THE SOFTWARE. +- */ +- +-#ifndef __LINUX_PUBLIC_PRIVCMD_H__ +-#define __LINUX_PUBLIC_PRIVCMD_H__ +- +-#include +-#include +- +-typedef unsigned long xen_pfn_t; +- +-struct privcmd_hypercall { +- __u64 op; +- __u64 arg[5]; +-}; +- +-struct privcmd_mmap_entry { +- __u64 va; +- __u64 mfn; +- __u64 npages; +-}; +- +-struct privcmd_mmap { +- int num; +- domid_t dom; /* target domain */ +- struct privcmd_mmap_entry __user *entry; +-}; +- +-struct privcmd_mmapbatch { +- int num; /* number of pages to populate */ +- domid_t dom; /* target domain */ +- __u64 addr; /* virtual address */ +- xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ +-}; +- +-/* +- * @cmd: IOCTL_PRIVCMD_HYPERCALL +- * @arg: &privcmd_hypercall_t +- * Return: Value returned from execution of the specified hypercall. +- */ +-#define IOCTL_PRIVCMD_HYPERCALL \ +- _IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall)) +-#define IOCTL_PRIVCMD_MMAP \ +- _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap)) +-#define IOCTL_PRIVCMD_MMAPBATCH \ +- _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch)) +- +-#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ ++#if defined(CONFIG_PARAVIRT_XEN) || !defined(__KERNEL__) ++#include "public/privcmd.h" ++#endif +--- head-2011-03-17.orig/include/xen/public/privcmd.h 2011-02-01 14:38:38.000000000 +0100 ++++ head-2011-03-17/include/xen/public/privcmd.h 2011-02-01 15:09:47.000000000 +0100 +@@ -34,6 +34,7 @@ + #define __LINUX_PUBLIC_PRIVCMD_H__ + + #include ++#include + + typedef struct privcmd_hypercall + { +--- head-2011-03-17.orig/kernel/power/Kconfig 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/kernel/power/Kconfig 2011-02-01 15:09:47.000000000 +0100 +@@ -65,7 +65,7 @@ config PM_TRACE + config PM_TRACE_RTC + bool "Suspend/resume event tracing" + depends on CAN_PM_TRACE +- depends on X86 ++ depends on X86 && !XEN_UNPRIVILEGED_GUEST + select PM_TRACE + default n + ---help--- +@@ -111,7 +111,7 @@ config SUSPEND + + config PM_TEST_SUSPEND + bool "Test suspend/resume and wakealarm during bootup" +- depends on SUSPEND && PM_DEBUG && RTC_CLASS=y ++ depends on SUSPEND && PM_DEBUG && RTC_CLASS=y && !XEN_UNPRIVILEGED_GUEST + ---help--- + This option will let you suspend your machine during bootup, and + make it wake up a few seconds later using an RTC wakeup alarm. +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-02-01 15:09:47.000000000 +0100 +@@ -58,7 +58,7 @@ static unsigned long io_tlb_nslabs; + */ + static unsigned long io_tlb_overflow = 32*1024; + +-void *io_tlb_overflow_buffer; ++static void *io_tlb_overflow_buffer; + + /* + * This is a free list describing the number of free entries available from +@@ -174,16 +174,16 @@ void __init swiotlb_init_with_tbl(char * + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE. + */ +- io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); ++ io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); + for (i = 0; i < io_tlb_nslabs; i++) + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_index = 0; +- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); ++ io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); + + /* + * Get the overflow emergency buffer + */ +- io_tlb_overflow_buffer = alloc_bootmem(io_tlb_overflow); ++ io_tlb_overflow_buffer = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_overflow)); + if (!io_tlb_overflow_buffer) + panic("Cannot allocate SWIOTLB overflow buffer!\n"); + +@@ -218,7 +218,7 @@ swiotlb_init_with_default_size(size_t de + /* + * Get IO TLB memory from the low pages + */ +- io_tlb_start = alloc_bootmem_pages(bytes); ++ io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); + if (!io_tlb_start) + panic("Cannot allocate SWIOTLB buffer"); + +--- head-2011-03-17.orig/mm/vmalloc.c 2011-02-01 14:39:24.000000000 +0100 ++++ head-2011-03-17/mm/vmalloc.c 2011-02-01 15:09:47.000000000 +0100 +@@ -478,8 +478,6 @@ static void vmap_debug_free_range(unsign + #ifdef CONFIG_DEBUG_PAGEALLOC + vunmap_page_range(start, end); + flush_tlb_kernel_range(start, end); +-#elif defined(CONFIG_XEN) && defined(CONFIG_X86) +- vunmap_page_range(start, end); + #endif + } + diff --git a/patches.xen/xen3-patch-2.6.38 b/patches.xen/xen3-patch-2.6.38 new file mode 100644 index 0000000..e315f03 --- /dev/null +++ b/patches.xen/xen3-patch-2.6.38 @@ -0,0 +1,3670 @@ +From: Linux Kernel Mailing List +Subject: Linux: 2.6.38 +Patch-mainline: 2.6.38 + + This patch contains the differences between 2.6.37 and 2.6.38. + +Acked-by: Jeff Mahoney +Automatically created from "patches.kernel.org/patch-2.6.38" by xen-port-patches.py + +--- head-2011-03-17.orig/arch/x86/Kconfig 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/Kconfig 2011-02-01 16:43:32.000000000 +0100 +@@ -51,7 +51,7 @@ config X86 + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_BZIP2 if !XEN + select HAVE_KERNEL_LZMA if !XEN +- select HAVE_KERNEL_XZ ++ select HAVE_KERNEL_XZ if !XEN + select HAVE_KERNEL_LZO if !XEN + select HAVE_HW_BREAKPOINT + select HAVE_MIXED_BREAKPOINTS_REGS +@@ -513,7 +513,7 @@ config X86_ES7000 + + config X86_32_IRIS + tristate "Eurobraille/Iris poweroff module" +- depends on X86_32 ++ depends on X86_32 && !XEN + ---help--- + The Iris machines from EuroBraille do not have APM or ACPI support + to shut themselves down properly. A special I/O sequence is +--- head-2011-03-17.orig/arch/x86/include/asm/apic.h 2011-01-31 14:53:50.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/apic.h 2011-02-17 10:23:17.000000000 +0100 +@@ -237,7 +237,11 @@ extern void setup_local_APIC(void); + extern void end_local_APIC_setup(void); + extern void bsp_end_local_APIC_setup(void); + extern void init_apic_mappings(void); ++#ifndef CONFIG_XEN + void register_lapic_address(unsigned long address); ++#else ++#define register_lapic_address(address) ++#endif + extern void setup_boot_APIC_clock(void); + extern void setup_secondary_APIC_clock(void); + extern int APIC_init_uniprocessor(void); +--- head-2011-03-17.orig/arch/x86/include/asm/xen/hypervisor.h 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/asm/xen/hypervisor.h 2011-02-03 12:23:23.000000000 +0100 +@@ -58,7 +58,7 @@ static inline uint32_t xen_cpuid_base(vo + return 0; + } + +-#ifdef CONFIG_XEN ++#ifdef CONFIG_PARAVIRT_XEN + extern bool xen_hvm_need_lapic(void); + + static inline bool xen_x2apic_para_available(void) +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 15:41:35.000000000 +0100 +@@ -123,7 +123,11 @@ enum fixed_addresses { + #endif + FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ + FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ ++#ifdef CONFIG_X86_MRST ++ FIX_LNW_VRTC, ++#endif + __end_of_permanent_fixed_addresses, ++ + /* + * 256 temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mach_traps.h 2007-06-12 13:14:02.000000000 +0200 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mach_traps.h 2011-02-07 12:21:00.000000000 +0100 +@@ -9,7 +9,11 @@ + #include + #include + +-static inline void clear_mem_error(unsigned char reason) {} ++#define NMI_REASON_SERR 0x80 ++#define NMI_REASON_IOCHK 0x40 ++#define NMI_REASON_MASK (NMI_REASON_SERR | NMI_REASON_IOCHK) ++ ++static inline void clear_serr_error(unsigned char reason) {} + static inline void clear_io_check_error(unsigned char reason) {} + + static inline unsigned char get_nmi_reason(void) +@@ -21,9 +25,9 @@ static inline unsigned char get_nmi_reas + * port 0x61. + */ + if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason)) +- reason |= 0x40; ++ reason |= NMI_REASON_IOCHK; + if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason)) +- reason |= 0x80; ++ reason |= NMI_REASON_SERR; + + return reason; + } +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-08 10:25:49.000000000 +0100 +@@ -87,8 +87,6 @@ static inline void switch_mm(struct mm_s + BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && + !PagePinned(virt_to_page(next->pgd))); + +- /* stop flush ipis for the previous mm */ +- cpumask_clear_cpu(cpu, mm_cpumask(prev)); + #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) /* XEN: no lazy tlb */ + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + percpu_write(cpu_tlbstate.active_mm, next); +@@ -119,6 +117,9 @@ static inline void switch_mm(struct mm_s + } + + BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); ++ ++ /* stop TLB flushes for the previous mm */ ++ cpumask_clear_cpu(cpu, mm_cpumask(prev)); + } + #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) /* XEN: no lazy tlb */ + else { +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pci.h 2011-02-01 15:41:35.000000000 +0100 +@@ -71,6 +71,7 @@ extern unsigned long pci_mem_start; + + #define PCIBIOS_MIN_CARDBUS_IO 0x4000 + ++extern int pcibios_enabled; + void pcibios_config_init(void); + struct pci_bus *pcibios_scan_root(int bus); + +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/perf_event.h 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/perf_event.h 2011-02-01 15:41:55.000000000 +0100 +@@ -34,6 +34,4 @@ + + #endif + +-static inline void init_hw_perf_events(void) {} +- + #endif /* _ASM_X86_PERF_EVENT_H */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 15:41:35.000000000 +0100 +@@ -106,7 +106,7 @@ static inline void pmd_free(struct mm_st + extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); + + static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, +- unsigned long adddress) ++ unsigned long address) + { + ___pmd_free_tlb(tlb, pmd); + } +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 15:55:04.000000000 +0100 +@@ -32,6 +32,7 @@ extern struct mm_struct *pgd_page_get_mm + + #define set_pte(ptep, pte) xen_set_pte(ptep, pte) + #define set_pte_at(mm, addr, ptep, pte) xen_set_pte_at(mm, addr, ptep, pte) ++#define set_pmd_at(mm, addr, pmdp, pmd) xen_set_pmd_at(mm, addr, pmdp, pmd) + + #define set_pte_atomic(ptep, pte) \ + xen_set_pte_atomic(ptep, pte) +@@ -56,6 +57,8 @@ extern struct mm_struct *pgd_page_get_mm + + #define pte_update(mm, addr, ptep) do { } while (0) + #define pte_update_defer(mm, addr, ptep) do { } while (0) ++#define pmd_update(mm, addr, ptep) do { } while (0) ++#define pmd_update_defer(mm, addr, ptep) do { } while (0) + + #define pgd_val(x) xen_pgd_val(x) + #define __pgd(x) xen_make_pgd(x) +@@ -89,6 +92,11 @@ static inline int pte_young(pte_t pte) + return pte_flags(pte) & _PAGE_ACCESSED; + } + ++static inline int pmd_young(pmd_t pmd) ++{ ++ return pmd_flags(pmd) & _PAGE_ACCESSED; ++} ++ + static inline int pte_write(pte_t pte) + { + return pte_flags(pte) & _PAGE_RW; +@@ -139,6 +147,23 @@ static inline int pmd_large(pmd_t pte) + (_PAGE_PSE | _PAGE_PRESENT); + } + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++static inline int pmd_trans_splitting(pmd_t pmd) ++{ ++ return pmd_val(pmd) & _PAGE_SPLITTING; ++} ++ ++static inline int pmd_trans_huge(pmd_t pmd) ++{ ++ return pmd_val(pmd) & _PAGE_PSE; ++} ++ ++static inline int has_transparent_hugepage(void) ++{ ++ return cpu_has_pse; ++} ++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ ++ + static inline pte_t pte_set_flags(pte_t pte, pteval_t set) + { + pteval_t v = __pte_val(pte); +@@ -213,6 +238,57 @@ static inline pte_t pte_mkspecial(pte_t + return pte_set_flags(pte, _PAGE_SPECIAL); + } + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) ++{ ++ pmdval_t v = native_pmd_val(pmd); ++ ++ return __pmd(v | set); ++} ++ ++static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) ++{ ++ pmdval_t v = native_pmd_val(pmd); ++ ++ return __pmd(v & ~clear); ++} ++ ++static inline pmd_t pmd_mkold(pmd_t pmd) ++{ ++ return pmd_clear_flags(pmd, _PAGE_ACCESSED); ++} ++ ++static inline pmd_t pmd_wrprotect(pmd_t pmd) ++{ ++ return pmd_clear_flags(pmd, _PAGE_RW); ++} ++ ++static inline pmd_t pmd_mkdirty(pmd_t pmd) ++{ ++ return pmd_set_flags(pmd, _PAGE_DIRTY); ++} ++ ++static inline pmd_t pmd_mkhuge(pmd_t pmd) ++{ ++ return pmd_set_flags(pmd, _PAGE_PSE); ++} ++ ++static inline pmd_t pmd_mkyoung(pmd_t pmd) ++{ ++ return pmd_set_flags(pmd, _PAGE_ACCESSED); ++} ++ ++static inline pmd_t pmd_mkwrite(pmd_t pmd) ++{ ++ return pmd_set_flags(pmd, _PAGE_RW); ++} ++ ++static inline pmd_t pmd_mknotpresent(pmd_t pmd) ++{ ++ return pmd_clear_flags(pmd, _PAGE_PRESENT); ++} ++#endif ++ + /* + * Mask out unsupported bits in a present pgprot. Non-present pgprots + * can use those bits for other purposes, so leave them be. +@@ -253,6 +329,18 @@ static inline pte_t pte_modify(pte_t pte + return __pte(val); + } + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) ++{ ++ pmdval_t val = pmd_val(pmd); ++ ++ val &= _HPAGE_CHG_MASK; ++ val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; ++ ++ return __pmd(val); ++} ++#endif ++ + /* mprotect needs to preserve PAT bits when updating vm_page_prot */ + #define pgprot_modify pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +@@ -353,7 +441,7 @@ static inline unsigned long pmd_page_vad + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +-#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) ++#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) + + /* + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] +@@ -529,6 +617,14 @@ static inline pte_t xen_local_ptep_get_a + return res; + } + ++static inline pmd_t xen_local_pmdp_get_and_clear(pmd_t *pmdp) ++{ ++ pmd_t res = *pmdp; ++ ++ xen_pmd_clear(pmdp); ++ return res; ++} ++ + static inline void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep , pte_t pte) + { +@@ -537,6 +633,12 @@ static inline void xen_set_pte_at(struct + xen_set_pte(ptep, pte); + } + ++static inline void xen_set_pmd_at(struct mm_struct *mm, unsigned long addr, ++ pmd_t *pmdp , pmd_t pmd) ++{ ++ xen_set_pmd(pmdp, pmd); ++} ++ + static inline void xen_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) + { +@@ -641,6 +743,53 @@ static inline void ptep_set_wrprotect(st + + #define flush_tlb_fix_spurious_fault(vma, address) + ++#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) ++ ++#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS ++extern int pmdp_set_access_flags(struct vm_area_struct *vma, ++ unsigned long address, pmd_t *pmdp, ++ pmd_t entry, int dirty); ++ ++#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG ++extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, ++ unsigned long addr, pmd_t *pmdp); ++ ++#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH ++extern int pmdp_clear_flush_young(struct vm_area_struct *vma, ++ unsigned long address, pmd_t *pmdp); ++ ++ ++#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH ++extern void pmdp_splitting_flush(struct vm_area_struct *vma, ++ unsigned long addr, pmd_t *pmdp); ++ ++#define __HAVE_ARCH_PMD_WRITE ++static inline int pmd_write(pmd_t pmd) ++{ ++ return pmd_flags(pmd) & _PAGE_RW; ++} ++ ++#define __HAVE_ARCH_PMDP_GET_AND_CLEAR ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, ++ pmd_t *pmdp) ++{ ++ pmd_t pmd = xen_pmdp_get_and_clear(pmdp); ++ pmd_update(mm, addr, pmdp); ++ return pmd; ++} ++#endif ++ ++#define __HAVE_ARCH_PMDP_SET_WRPROTECT ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++static inline void pmdp_set_wrprotect(struct mm_struct *mm, ++ unsigned long addr, pmd_t *pmdp) ++{ ++ clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); ++ pmd_update(mm, addr, pmdp); ++} ++#endif ++ + /* + * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); + * +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 15:47:16.000000000 +0100 +@@ -106,6 +106,31 @@ static inline pte_t xen_ptep_get_and_cle + #define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \ + ((_pte).pte_high << (32-PAGE_SHIFT))) + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++#ifdef CONFIG_SMP ++union split_pmd { ++ struct { ++ u32 pmd_low; ++ u32 pmd_high; ++ }; ++ pmd_t pmd; ++}; ++static inline pmd_t xen_pmdp_get_and_clear(pmd_t *pmdp) ++{ ++ union split_pmd res, *orig = (union split_pmd *)pmdp; ++ ++ /* xchg acts as a barrier before setting of the high bits */ ++ res.pmd_low = xchg(&orig->pmd_low, 0); ++ res.pmd_high = orig->pmd_high; ++ orig->pmd_high = 0; ++ ++ return res.pmd; ++} ++#else ++#define xen_pmdp_get_and_clear(xp) xen_local_pmdp_get_and_clear(xp) ++#endif ++#endif ++ + /* + * Bits 0, 6 and 7 are taken in the low part of the pte, + * put the 32 bits of offset into the high part. +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:39:36.000000000 +0100 +@@ -65,6 +65,16 @@ static inline void xen_set_pte_atomic(pt + xen_set_pte(ptep, pte); + } + ++static inline void xen_set_pmd(pmd_t *pmdp, pmd_t pmd) ++{ ++ xen_l2_entry_update(pmdp, pmd); ++} ++ ++static inline void xen_pmd_clear(pmd_t *pmd) ++{ ++ xen_set_pmd(pmd, xen_make_pmd(0)); ++} ++ + #ifdef CONFIG_SMP + static inline pte_t xen_ptep_get_and_clear(pte_t *xp, pte_t ret) + { +@@ -74,15 +84,16 @@ static inline pte_t xen_ptep_get_and_cle + #define xen_ptep_get_and_clear(xp, pte) xen_local_ptep_get_and_clear(xp, pte) + #endif + +-static inline void xen_set_pmd(pmd_t *pmdp, pmd_t pmd) +-{ +- xen_l2_entry_update(pmdp, pmd); +-} +- +-static inline void xen_pmd_clear(pmd_t *pmd) ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++#ifdef CONFIG_SMP ++static inline pmd_t xen_pmdp_get_and_clear(pmd_t *xp) + { +- xen_set_pmd(pmd, xen_make_pmd(0)); ++ return xen_make_pmd(xchg(&xp->pmd, 0)); + } ++#else ++#define xen_pmdp_get_and_clear(xp) xen_local_pmdp_get_and_clear(xp) ++#endif ++#endif + + static inline void xen_set_pud(pud_t *pudp, pud_t pud) + { +@@ -175,6 +186,7 @@ extern void cleanup_highmap(void); + #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) + + #define __HAVE_ARCH_PTE_SAME ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_X86_PGTABLE_64_H */ +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_types.h 2011-02-01 14:54:13.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_types.h 2011-02-01 15:41:35.000000000 +0100 +@@ -22,6 +22,7 @@ + #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ + #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 + #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 ++#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ + #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + + /* If _PAGE_BIT_PRESENT is clear, we use these: */ +@@ -45,6 +46,7 @@ + #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) + #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) + #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) ++#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING) + #define __HAVE_ARCH_PTE_SPECIAL + + #ifdef CONFIG_KMEMCHECK +@@ -78,6 +80,7 @@ extern unsigned int __kernel_page_user; + /* Set of bits not changed in pte_modify */ + #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_CACHE_MASK | _PAGE_IOMAP | \ + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) ++#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) + + /* + * PAT settings are part of the hypervisor interface, which sets the +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:47:48.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor.h 2011-03-03 16:47:59.000000000 +0100 +@@ -152,10 +152,9 @@ extern __u32 cpu_caps_set[NCAPINTS]; + #ifdef CONFIG_SMP + DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); + #define cpu_data(cpu) per_cpu(cpu_info, cpu) +-#define current_cpu_data __get_cpu_var(cpu_info) + #else ++#define cpu_info boot_cpu_data + #define cpu_data(cpu) boot_cpu_data +-#define current_cpu_data boot_cpu_data + #endif + + extern const struct seq_operations cpuinfo_op; +@@ -716,10 +715,11 @@ extern void select_idle_routine(const st + extern void init_c1e_mask(void); + + extern unsigned long boot_option_idle_override; +-extern unsigned long idle_halt; +-extern unsigned long idle_nomwait; + extern bool c1e_detected; + ++enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, ++ IDLE_POLL, IDLE_FORCE_MWAIT}; ++ + extern void enable_sep_cpu(void); + extern int sysenter_setup(void); + +@@ -856,7 +856,7 @@ extern unsigned long thread_saved_pc(str + /* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. + * This is necessary to guarantee that the entire "struct pt_regs" +- * is accessable even if the CPU haven't stored the SS/ESP registers ++ * is accessible even if the CPU haven't stored the SS/ESP registers + * on the stack (interrupt gate does not save these registers + * when switching to the same priv ring). + * Therefore beware: accessing the ss/esp fields of the +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:12:15.000000000 +0100 ++++ head-2011-03-17/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:12:54.000000000 +0100 +@@ -47,10 +47,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_ + #ifndef CONFIG_XEN + + /* Static state in head.S used to set up a CPU */ +-extern struct { +- void *sp; +- unsigned short ss; +-} stack_start; ++extern unsigned long stack_start; /* Initial stack pointer address */ + + struct smp_ops { + void (*smp_prepare_boot_cpu)(void); +--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_64.h 2011-02-01 14:39:24.000000000 +0100 ++++ /dev/null 1970-01-01 00:00:00.000000000 +0000 +@@ -1,20 +0,0 @@ +-#ifndef _ASM_X86_SYSTEM_64_H +-#define _ASM_X86_SYSTEM_64_H +- +-#include +-#include +- +- +-static inline unsigned long read_cr8(void) +-{ +- return 0; +-} +- +-static inline void write_cr8(unsigned long val) +-{ +- BUG_ON(val); +-} +- +-#include +- +-#endif /* _ASM_X86_SYSTEM_64_H */ +--- head-2011-03-17.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 10:59:49.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/boot.c 2011-03-11 11:06:22.000000000 +0100 +@@ -73,10 +73,11 @@ int acpi_sci_override_gsi __initdata; + #ifndef CONFIG_XEN + int acpi_skip_timer_override __initdata; + int acpi_use_timer_override __initdata; ++int acpi_fix_pin2_polarity __initdata; + #else + #define acpi_skip_timer_override 0 ++#define acpi_fix_pin2_polarity 0 + #endif +-int acpi_fix_pin2_polarity __initdata; + + #ifdef CONFIG_X86_LOCAL_APIC + static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; +--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/acpi/sleep-xen.c 2011-02-08 10:25:49.000000000 +0100 +@@ -12,10 +12,8 @@ + #include + #include + #include +- +-#ifdef CONFIG_X86_32 + #include +-#endif ++#include + + #include "realmode/wakeup.h" + #include "sleep.h" +@@ -103,7 +101,7 @@ int acpi_save_state_mem(void) + #else /* CONFIG_64BIT */ + header->trampoline_segment = setup_trampoline() >> 4; + #ifdef CONFIG_SMP +- stack_start.sp = temp_stack + sizeof(temp_stack); ++ stack_start = (unsigned long)temp_stack + sizeof(temp_stack); + early_gdt_descr.address = + (unsigned long)get_cpu_gdt_table(smp_processor_id()); + initial_gs = per_cpu_offset(smp_processor_id()); +@@ -155,8 +153,17 @@ void __init acpi_reserve_wakeup_memory(v + #endif + } + +- + #ifndef CONFIG_ACPI_PV_SLEEP ++int __init acpi_configure_wakeup_memory(void) ++{ ++ if (acpi_realmode) ++ set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); ++ ++ return 0; ++} ++arch_initcall(acpi_configure_wakeup_memory); ++ ++ + static int __init acpi_sleep_setup(char *str) + { + while ((str != NULL) && (*str != '\0')) { +--- head-2011-03-17.orig/arch/x86/kernel/apic/Makefile 2011-02-01 14:55:46.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/Makefile 2011-02-03 14:05:07.000000000 +0100 +@@ -20,8 +20,6 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o + obj-$(CONFIG_X86_ES7000) += es7000_32.o + obj-$(CONFIG_X86_SUMMIT) += summit_32.o + +-obj-$(CONFIG_XEN) += nmi.o +- + probe_64-$(CONFIG_XEN) := probe_32.o + + disabled-obj-$(CONFIG_XEN) := apic_flat_$(BITS).o apic_noop.o +--- head-2011-03-17.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/apic/io_apic-xen.c 2011-02-17 10:30:00.000000000 +0100 +@@ -50,7 +50,6 @@ + #include + #include + #include +-#include + #include + #include + +@@ -138,6 +137,26 @@ static int __init parse_noapic(char *str + } + early_param("noapic", parse_noapic); + ++/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ ++void mp_save_irq(struct mpc_intsrc *m) ++{ ++ int i; ++ ++ apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," ++ " IRQ %02x, APIC ID %x, APIC INT %02x\n", ++ m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, ++ m->srcbusirq, m->dstapic, m->dstirq); ++ ++ for (i = 0; i < mp_irq_entries; i++) { ++ if (!memcmp(&mp_irqs[i], m, sizeof(*m))) ++ return; ++ } ++ ++ memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); ++ if (++mp_irq_entries == MAX_IRQ_SOURCES) ++ panic("Max # of irq sources exceeded!!\n"); ++} ++ + #ifndef CONFIG_XEN + struct irq_pin_list { + int apic, pin; +@@ -149,6 +168,7 @@ static struct irq_pin_list *alloc_irq_pi + return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); + } + ++ + /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ + #ifdef CONFIG_SPARSE_IRQ + static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; +@@ -2014,8 +2034,7 @@ void disable_IO_APIC(void) + * + * by Matt Domsch Tue Dec 21 12:25:05 CST 1999 + */ +- +-void __init setup_ioapic_ids_from_mpc(void) ++void __init setup_ioapic_ids_from_mpc_nocheck(void) + { + union IO_APIC_reg_00 reg_00; + physid_mask_t phys_id_present_map; +@@ -2024,15 +2043,6 @@ void __init setup_ioapic_ids_from_mpc(vo + unsigned char old_id; + unsigned long flags; + +- if (acpi_ioapic) +- return; +- /* +- * Don't check I/O APIC IDs for xAPIC systems. They have +- * no meaning without the serial APIC bus. +- */ +- if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) +- || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) +- return; + /* + * This is broken; anything with a real cpu count has to + * circumvent this idiocy regardless. +@@ -2086,7 +2096,6 @@ void __init setup_ioapic_ids_from_mpc(vo + physids_or(phys_id_present_map, phys_id_present_map, tmp); + } + +- + /* + * We need to adjust the IRQ routing table + * if the ID changed. +@@ -2098,9 +2107,12 @@ void __init setup_ioapic_ids_from_mpc(vo + = mp_ioapics[apic_id].apicid; + + /* +- * Read the right value from the MPC table and +- * write it into the ID register. ++ * Update the ID register according to the right value ++ * from the MPC table if they are different. + */ ++ if (mp_ioapics[apic_id].apicid == reg_00.bits.ID) ++ continue; ++ + apic_printk(APIC_VERBOSE, KERN_INFO + "...changing IO-APIC physical APIC ID to %d ...", + mp_ioapics[apic_id].apicid); +@@ -2122,6 +2134,21 @@ void __init setup_ioapic_ids_from_mpc(vo + apic_printk(APIC_VERBOSE, " ok.\n"); + } + } ++ ++void __init setup_ioapic_ids_from_mpc(void) ++{ ++ ++ if (acpi_ioapic) ++ return; ++ /* ++ * Don't check I/O APIC IDs for xAPIC systems. They have ++ * no meaning without the serial APIC bus. ++ */ ++ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ++ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) ++ return; ++ setup_ioapic_ids_from_mpc_nocheck(); ++} + #endif + + int no_timer_check __initdata; +@@ -2382,7 +2409,7 @@ asmlinkage void smp_irq_move_cleanup_int + unsigned int irr; + struct irq_desc *desc; + struct irq_cfg *cfg; +- irq = __get_cpu_var(vector_irq)[vector]; ++ irq = __this_cpu_read(vector_irq[vector]); + + if (irq == -1) + continue; +@@ -2416,7 +2443,7 @@ asmlinkage void smp_irq_move_cleanup_int + apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); + goto unlock; + } +- __get_cpu_var(vector_irq)[vector] = -1; ++ __this_cpu_write(vector_irq[vector], -1); + unlock: + raw_spin_unlock(&desc->lock); + } +@@ -2728,24 +2755,6 @@ static void lapic_register_intr(int irq) + "edge"); + } + +-static void __init setup_nmi(void) +-{ +- /* +- * Dirty trick to enable the NMI watchdog ... +- * We put the 8259A master into AEOI mode and +- * unmask on all local APICs LVT0 as NMI. +- * +- * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') +- * is from Maciej W. Rozycki - so we do not have to EOI from +- * the NMI handler or the timer interrupt. +- */ +- apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); +- +- enable_NMI_through_LVT0(); +- +- apic_printk(APIC_VERBOSE, " done.\n"); +-} +- + /* + * This looks a bit hackish but it's about the only one way of sending + * a few INTA cycles to 8259As and any associated glue logic. ICR does +@@ -2851,15 +2860,6 @@ static inline void __init check_timer(vo + */ + apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); + legacy_pic->init(1); +-#ifdef CONFIG_X86_32 +- { +- unsigned int ver; +- +- ver = apic_read(APIC_LVR); +- ver = GET_APIC_VERSION(ver); +- timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); +- } +-#endif + + pin1 = find_isa_irq_pin(0, mp_INT); + apic1 = find_isa_irq_apic(0, mp_INT); +@@ -2907,10 +2907,6 @@ static inline void __init check_timer(vo + unmask_ioapic(cfg); + } + if (timer_irq_works()) { +- if (nmi_watchdog == NMI_IO_APIC) { +- setup_nmi(); +- legacy_pic->unmask(0); +- } + if (disable_timer_pin_1 > 0) + clear_IO_APIC_pin(0, pin1); + goto out; +@@ -2936,11 +2932,6 @@ static inline void __init check_timer(vo + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); + timer_through_8259 = 1; +- if (nmi_watchdog == NMI_IO_APIC) { +- legacy_pic->mask(0); +- setup_nmi(); +- legacy_pic->unmask(0); +- } + goto out; + } + /* +@@ -2952,15 +2943,6 @@ static inline void __init check_timer(vo + apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); + } + +- if (nmi_watchdog == NMI_IO_APIC) { +- apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work " +- "through the IO-APIC - disabling NMI Watchdog!\n"); +- nmi_watchdog = NMI_NONE; +- } +-#ifdef CONFIG_X86_32 +- timer_ack = 0; +-#endif +- + apic_printk(APIC_QUIET, KERN_INFO + "...trying to set up timer as Virtual Wire IRQ...\n"); + +@@ -3741,7 +3723,7 @@ int __init io_apic_get_redir_entries (in + } + + #ifndef CONFIG_XEN +-void __init probe_nr_irqs_gsi(void) ++static void __init probe_nr_irqs_gsi(void) + { + int nr; + +@@ -4069,7 +4051,7 @@ static struct resource * __init ioapic_s + return res; + } + +-void __init ioapic_init_mappings(void) ++void __init ioapic_and_gsi_init(void) + { + unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; + struct resource *ioapic_res; +@@ -4107,6 +4089,8 @@ fake_ioapic_page: + ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; + ioapic_res++; + } ++ ++ probe_nr_irqs_gsi(); + } + + void __init ioapic_insert_resources(void) +@@ -4132,6 +4116,9 @@ int mp_find_ioapic(u32 gsi) + { + int i = 0; + ++ if (nr_ioapics == 0) ++ return -1; ++ + /* Find the IOAPIC that manages this GSI. */ + for (i = 0; i < nr_ioapics; i++) { + if ((gsi >= mp_gsi_routing[i].gsi_base) +@@ -4220,7 +4207,8 @@ void __init pre_init_apic_IRQ0(void) + + printk(KERN_INFO "Early APIC setup for system timer0\n"); + #ifndef CONFIG_SMP +- phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); ++ physid_set_mask_of_physid(boot_cpu_physical_apicid, ++ &phys_cpu_present_map); + #endif + /* Make sure the irq descriptor is set up */ + cfg = alloc_irq_and_cfg_at(0, 0); +--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:43:08.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:43:14.000000000 +0100 +@@ -931,7 +931,6 @@ void __init identify_boot_cpu(void) + #else + vgetcpu_set_mode(); + #endif +- init_hw_perf_events(); + } + + #ifdef CONFIG_XEN +--- head-2011-03-17.orig/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 10:06:37.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-02-08 10:08:56.000000000 +0100 +@@ -965,7 +965,7 @@ static struct attribute *default_attrs[] + NULL + }; + +-#ifdef CONFIG_AMD_NB ++#if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN) + static struct attribute ** __cpuinit amd_l3_attrs(void) + { + static struct attribute **attrs; +@@ -1105,7 +1105,7 @@ static int __cpuinit cache_add_dev(struc + this_leaf = CPUID4_INFO_IDX(cpu, i); + + ktype_cache.default_attrs = default_attrs; +-#ifdef CONFIG_AMD_NB ++#if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN) + if (this_leaf->l3) + ktype_cache.default_attrs = amd_l3_attrs(); + #endif +--- head-2011-03-17.orig/arch/x86/kernel/e820-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/e820-xen.c 2011-02-01 15:41:35.000000000 +0100 +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + +--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-02-01 15:41:35.000000000 +0100 +@@ -272,7 +272,7 @@ static int __init setup_early_printk(cha + if (!strncmp(buf, "xen", 3)) + early_console_register(&xenboot_console, keep); + #endif +-#ifdef CONFIG_X86_MRST_EARLY_PRINTK ++#ifdef CONFIG_EARLY_PRINTK_MRST + if (!strncmp(buf, "mrst", 4)) { + mrst_early_console_init(); + early_console_register(&early_mrst_console, keep); +@@ -282,7 +282,6 @@ static int __init setup_early_printk(cha + hsu_early_console_init(); + early_console_register(&early_hsu_console, keep); + } +- + #endif + buf++; + } +--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:41:35.000000000 +0100 +@@ -1676,6 +1676,16 @@ ENTRY(general_protection) + CFI_ENDPROC + END(general_protection) + ++#ifdef CONFIG_KVM_GUEST ++ENTRY(async_page_fault) ++ RING0_EC_FRAME ++ pushl $do_async_page_fault ++ CFI_ADJUST_CFA_OFFSET 4 ++ jmp error_code ++ CFI_ENDPROC ++END(apf_page_fault) ++#endif ++ + /* + * End of kprobes section + */ +--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:41:35.000000000 +0100 +@@ -331,17 +331,21 @@ NMI_MASK = 0x80000000 + ENTRY(save_args) + XCPT_FRAME + cld +- movq_cfi rdi, RDI+16-ARGOFFSET +- movq_cfi rsi, RSI+16-ARGOFFSET +- movq_cfi rdx, RDX+16-ARGOFFSET +- movq_cfi rcx, RCX+16-ARGOFFSET +- movq_cfi rax, RAX+16-ARGOFFSET +- movq_cfi r8, R8+16-ARGOFFSET +- movq_cfi r9, R9+16-ARGOFFSET +- movq_cfi r10, R10+16-ARGOFFSET +- movq_cfi r11, R11+16-ARGOFFSET ++ /* ++ * start from rbp in pt_regs and jump over ++ * return address. ++ */ ++ movq_cfi rdi, RDI+8-RBP ++ movq_cfi rsi, RSI+8-RBP ++ movq_cfi rdx, RDX+8-RBP ++ movq_cfi rcx, RCX+8-RBP ++ movq_cfi rax, RAX+8-RBP ++ movq_cfi r8, R8+8-RBP ++ movq_cfi r9, R9+8-RBP ++ movq_cfi r10, R10+8-RBP ++ movq_cfi r11, R11+8-RBP + +- leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ ++ leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ + movq_cfi rbp, 8 /* push %rbp */ + leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ + testl $3, CS(%rdi) +@@ -1142,6 +1146,9 @@ paranoidzeroentry_ist int3 do_int3 DEBUG + paranoiderrorentry stack_segment do_stack_segment + errorentry general_protection do_general_protection + errorentry page_fault do_page_fault ++#ifdef CONFIG_KVM_GUEST ++errorentry async_page_fault do_async_page_fault ++#endif + #ifdef CONFIG_X86_MCE + paranoidzeroentry machine_check *machine_check_vector(%rip) + #endif +--- head-2011-03-17.orig/arch/x86/kernel/head32-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/head32-xen.c 2011-02-01 15:41:35.000000000 +0100 +@@ -81,6 +81,9 @@ void __init i386_start_kernel(void) + case X86_SUBARCH_MRST: + x86_mrst_early_setup(); + break; ++ case X86_SUBARCH_CE4100: ++ x86_ce4100_early_setup(); ++ break; + default: + i386_default_early_setup(); + break; +--- head-2011-03-17.orig/arch/x86/kernel/irq-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/irq-xen.c 2011-02-18 15:17:23.000000000 +0100 +@@ -4,6 +4,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -254,7 +255,7 @@ unsigned int __irq_entry do_IRQ(struct p + exit_idle(); + irq_enter(); + +- irq = __get_cpu_var(vector_irq)[vector]; ++ irq = __this_cpu_read(vector_irq[vector]); + + if (!handle_irq(irq, regs)) { + ack_APIC_irq(); +@@ -294,6 +295,15 @@ void smp_x86_platform_ipi(struct pt_regs + } + #endif + ++#ifdef CONFIG_OF ++unsigned int irq_create_of_mapping(struct device_node *controller, ++ const u32 *intspec, unsigned int intsize) ++{ ++ return intspec[0]; ++} ++EXPORT_SYMBOL_GPL(irq_create_of_mapping); ++#endif ++ + #ifdef CONFIG_HOTPLUG_CPU + #include + /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ +@@ -369,7 +379,8 @@ void fixup_irqs(void) + continue; + + if (xen_test_irq_pending(irq)) { +- data = irq_get_irq_data(irq); ++ desc = irq_to_desc(irq); ++ data = &desc->irq_data; + raw_spin_lock(&desc->lock); + if (data->chip->irq_retrigger) + data->chip->irq_retrigger(data); +--- head-2011-03-17.orig/arch/x86/kernel/mpparse-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/mpparse-xen.c 2011-02-01 16:09:24.000000000 +0100 +@@ -130,21 +130,8 @@ static void __init MP_bus_info(struct mp + + static void __init MP_ioapic_info(struct mpc_ioapic *m) + { +- if (!(m->flags & MPC_APIC_USABLE)) +- return; +- +- printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", +- m->apicid, m->apicver, m->apicaddr); +- +- mp_register_ioapic(m->apicid, m->apicaddr, gsi_top); +-} +- +-static void print_MP_intsrc_info(struct mpc_intsrc *m) +-{ +- apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," +- " IRQ %02x, APIC ID %x, APIC INT %02x\n", +- m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, +- m->srcbusirq, m->dstapic, m->dstirq); ++ if (m->flags & MPC_APIC_USABLE) ++ mp_register_ioapic(m->apicid, m->apicaddr, gsi_top); + } + + static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq) +@@ -156,73 +143,11 @@ static void __init print_mp_irq_info(str + mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq); + } + +-static void __init assign_to_mp_irq(struct mpc_intsrc *m, +- struct mpc_intsrc *mp_irq) +-{ +- mp_irq->dstapic = m->dstapic; +- mp_irq->type = m->type; +- mp_irq->irqtype = m->irqtype; +- mp_irq->irqflag = m->irqflag; +- mp_irq->srcbus = m->srcbus; +- mp_irq->srcbusirq = m->srcbusirq; +- mp_irq->dstirq = m->dstirq; +-} +- +-static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq, +- struct mpc_intsrc *m) +-{ +- m->dstapic = mp_irq->dstapic; +- m->type = mp_irq->type; +- m->irqtype = mp_irq->irqtype; +- m->irqflag = mp_irq->irqflag; +- m->srcbus = mp_irq->srcbus; +- m->srcbusirq = mp_irq->srcbusirq; +- m->dstirq = mp_irq->dstirq; +-} +- +-static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq, +- struct mpc_intsrc *m) +-{ +- if (mp_irq->dstapic != m->dstapic) +- return 1; +- if (mp_irq->type != m->type) +- return 2; +- if (mp_irq->irqtype != m->irqtype) +- return 3; +- if (mp_irq->irqflag != m->irqflag) +- return 4; +- if (mp_irq->srcbus != m->srcbus) +- return 5; +- if (mp_irq->srcbusirq != m->srcbusirq) +- return 6; +- if (mp_irq->dstirq != m->dstirq) +- return 7; +- +- return 0; +-} +- +-static void __init MP_intsrc_info(struct mpc_intsrc *m) +-{ +- int i; +- +- print_MP_intsrc_info(m); +- +- for (i = 0; i < mp_irq_entries; i++) { +- if (!mp_irq_mpc_intsrc_cmp(&mp_irqs[i], m)) +- return; +- } +- +- assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]); +- if (++mp_irq_entries == MAX_IRQ_SOURCES) +- panic("Max # of irq sources exceeded!!\n"); +-} + #else /* CONFIG_X86_IO_APIC */ + static inline void __init MP_bus_info(struct mpc_bus *m) {} + static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {} +-static inline void __init MP_intsrc_info(struct mpc_intsrc *m) {} + #endif /* CONFIG_X86_IO_APIC */ + +- + static void __init MP_lintsrc_info(struct mpc_lintsrc *m) + { + apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x," +@@ -234,7 +159,6 @@ static void __init MP_lintsrc_info(struc + /* + * Read/parse the MPC + */ +- + static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) + { + +@@ -289,20 +213,6 @@ static void __init smp_dump_mptable(stru + + void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } + +-static void __init smp_register_lapic_address(unsigned long address) +-{ +-#ifndef CONFIG_XEN +- mp_lapic_addr = address; +- +- set_fixmap_nocache(FIX_APIC_BASE, address); +- if (boot_cpu_physical_apicid == -1U) { +- boot_cpu_physical_apicid = read_apic_id(); +- apic_version[boot_cpu_physical_apicid] = +- GET_APIC_VERSION(apic_read(APIC_LVR)); +- } +-#endif +-} +- + static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) + { + char str[16]; +@@ -318,18 +228,14 @@ static int __init smp_read_mpc(struct mp + #ifdef CONFIG_X86_32 + generic_mps_oem_check(mpc, oem, str); + #endif +- /* save the local APIC address, it might be non-default */ ++ /* Initialize the lapic mapping */ + if (!acpi_lapic) +- mp_lapic_addr = mpc->lapic; ++ register_lapic_address(mpc->lapic); + #endif + + if (early) + return 1; + +- /* Initialize the lapic mapping */ +- if (!acpi_lapic) +- smp_register_lapic_address(mpc->lapic); +- + if (mpc->oemptr) + x86_init.mpparse.smp_read_mpc_oem(mpc); + +@@ -355,7 +261,7 @@ static int __init smp_read_mpc(struct mp + skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); + break; + case MP_INTSRC: +- MP_intsrc_info((struct mpc_intsrc *)mpt); ++ mp_save_irq((struct mpc_intsrc *)mpt); + skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); + break; + case MP_LINTSRC: +@@ -447,13 +353,13 @@ static void __init construct_default_ioi + + intsrc.srcbusirq = i; + intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ +- MP_intsrc_info(&intsrc); ++ mp_save_irq(&intsrc); + } + + intsrc.irqtype = mp_ExtINT; + intsrc.srcbusirq = 0; + intsrc.dstirq = 0; /* 8259A to INTIN0 */ +- MP_intsrc_info(&intsrc); ++ mp_save_irq(&intsrc); + } + + +@@ -824,11 +730,11 @@ static void __init check_irq_src(struct + int i; + + apic_printk(APIC_VERBOSE, "OLD "); +- print_MP_intsrc_info(m); ++ print_mp_irq_info(m); + + i = get_MP_intsrc_index(m); + if (i > 0) { +- assign_to_mpc_intsrc(&mp_irqs[i], m); ++ memcpy(m, &mp_irqs[i], sizeof(*m)); + apic_printk(APIC_VERBOSE, "NEW "); + print_mp_irq_info(&mp_irqs[i]); + return; +@@ -915,14 +821,14 @@ static int __init replace_intsrc_all(st + if (nr_m_spare > 0) { + apic_printk(APIC_VERBOSE, "*NEW* found\n"); + nr_m_spare--; +- assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); ++ memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i])); + m_spare[nr_m_spare] = NULL; + } else { + struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; + count += sizeof(struct mpc_intsrc); + if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) + goto out; +- assign_to_mpc_intsrc(&mp_irqs[i], m); ++ memcpy(m, &mp_irqs[i], sizeof(*m)); + mpc->length = count; + mpt += sizeof(struct mpc_intsrc); + } +--- head-2011-03-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:11:01.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process-xen.c 2011-03-03 16:13:18.000000000 +0100 +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -23,11 +24,6 @@ + #include + #include + +-unsigned long idle_halt; +-EXPORT_SYMBOL(idle_halt); +-unsigned long idle_nomwait; +-EXPORT_SYMBOL(idle_nomwait); +- + struct kmem_cache *task_xstate_cachep; + EXPORT_SYMBOL_GPL(task_xstate_cachep); + +@@ -93,27 +89,36 @@ void exit_thread(void) + void show_regs(struct pt_regs *regs) + { + show_registers(regs); +- show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), +- regs->bp); ++ show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs)); + } + + void show_regs_common(void) + { +- const char *board, *product; ++ const char *vendor, *product, *board; + +- board = dmi_get_system_info(DMI_BOARD_NAME); +- if (!board) +- board = ""; ++ vendor = dmi_get_system_info(DMI_SYS_VENDOR); ++ if (!vendor) ++ vendor = ""; + product = dmi_get_system_info(DMI_PRODUCT_NAME); + if (!product) + product = ""; + ++ /* Board Name is optional */ ++ board = dmi_get_system_info(DMI_BOARD_NAME); ++ + printk(KERN_CONT "\n"); +- printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", ++ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", + current->pid, current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), +- init_utsname()->version, board, product); ++ init_utsname()->version); ++ printk(KERN_CONT " "); ++ printk(KERN_CONT "%s %s", vendor, product); ++ if (board) { ++ printk(KERN_CONT "/"); ++ printk(KERN_CONT "%s", board); ++ } ++ printk(KERN_CONT "\n"); + } + + void flush_thread(void) +@@ -315,7 +320,7 @@ long sys_execve(const char __user *name, + /* + * Idle related variables and functions + */ +-unsigned long boot_option_idle_override = 0; ++unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; + EXPORT_SYMBOL(boot_option_idle_override); + + /* +@@ -331,6 +336,7 @@ EXPORT_SYMBOL(pm_idle); + void xen_idle(void) + { + trace_power_start(POWER_CSTATE, 1, smp_processor_id()); ++ trace_cpu_idle(1, smp_processor_id()); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we +@@ -343,6 +349,8 @@ void xen_idle(void) + else + local_irq_enable(); + current_thread_info()->status |= TS_POLLING; ++ trace_power_end(smp_processor_id()); ++ trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + } + #ifdef CONFIG_APM_MODULE + EXPORT_SYMBOL(default_idle); +@@ -396,9 +404,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); + */ + void mwait_idle_with_hints(unsigned long ax, unsigned long cx) + { +- trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); + if (!need_resched()) { +- if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) ++ if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) + clflush((void *)¤t_thread_info()->flags); + + __monitor((void *)¤t_thread_info()->flags, 0, 0); +@@ -413,7 +420,8 @@ static void mwait_idle(void) + { + if (!need_resched()) { + trace_power_start(POWER_CSTATE, 1, smp_processor_id()); +- if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) ++ trace_cpu_idle(1, smp_processor_id()); ++ if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) + clflush((void *)¤t_thread_info()->flags); + + __monitor((void *)¤t_thread_info()->flags, 0, 0); +@@ -422,6 +430,8 @@ static void mwait_idle(void) + __sti_mwait(0, 0); + else + local_irq_enable(); ++ trace_power_end(smp_processor_id()); ++ trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + } else + local_irq_enable(); + } +@@ -435,10 +445,12 @@ static void mwait_idle(void) + static void poll_idle(void) + { + trace_power_start(POWER_CSTATE, 0, smp_processor_id()); ++ trace_cpu_idle(0, smp_processor_id()); + local_irq_enable(); + while (!need_resched()) + cpu_relax(); +- trace_power_end(0); ++ trace_power_end(smp_processor_id()); ++ trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + } + + #ifndef CONFIG_XEN +@@ -454,17 +466,16 @@ static void poll_idle(void) + * + * idle=mwait overrides this decision and forces the usage of mwait. + */ +-static int __cpuinitdata force_mwait; + + #define MWAIT_INFO 0x05 + #define MWAIT_ECX_EXTENDED_INFO 0x01 + #define MWAIT_EDX_C1 0xf0 + +-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) ++int mwait_usable(const struct cpuinfo_x86 *c) + { + u32 eax, ebx, ecx, edx; + +- if (force_mwait) ++ if (boot_option_idle_override == IDLE_FORCE_MWAIT) + return 1; + + if (c->cpuid_level < MWAIT_INFO) +@@ -589,10 +600,11 @@ static int __init idle_setup(char *str) + if (!strcmp(str, "poll")) { + printk("using polling idle threads.\n"); + pm_idle = poll_idle; ++ boot_option_idle_override = IDLE_POLL; + #ifndef CONFIG_XEN +- } else if (!strcmp(str, "mwait")) +- force_mwait = 1; +- else if (!strcmp(str, "halt")) { ++ } else if (!strcmp(str, "mwait")) { ++ boot_option_idle_override = IDLE_FORCE_MWAIT; ++ } else if (!strcmp(str, "halt")) { + /* + * When the boot option of idle=halt is added, halt is + * forced to be used for CPU idle. In such case CPU C2/C3 +@@ -601,8 +613,7 @@ static int __init idle_setup(char *str) + * the boot_option_idle_override. + */ + pm_idle = default_idle; +- idle_halt = 1; +- return 0; ++ boot_option_idle_override = IDLE_HALT; + } else if (!strcmp(str, "nomwait")) { + /* + * If the boot option of "idle=nomwait" is added, +@@ -610,13 +621,11 @@ static int __init idle_setup(char *str) + * states. In such case it won't touch the variable + * of boot_option_idle_override. + */ +- idle_nomwait = 1; +- return 0; ++ boot_option_idle_override = IDLE_NOMWAIT; + #endif + } else + return -1; + +- boot_option_idle_override = 1; + return 0; + } + early_param("idle", idle_setup); +--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-02-02 08:47:59.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-02-02 08:48:21.000000000 +0100 +@@ -59,8 +59,6 @@ + #include + #include + +-#include +- + asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); + asmlinkage void cstar_ret_from_fork(void) __asm__("cstar_ret_from_fork"); + +@@ -116,8 +114,6 @@ void cpu_idle(void) + stop_critical_timings(); + xen_idle(); + start_critical_timings(); +- +- trace_power_end(smp_processor_id()); + } + tick_nohz_restart_sched_tick(); + preempt_enable_no_resched(); +--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:47:56.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:48:24.000000000 +0100 +@@ -56,8 +56,6 @@ + #include + #include + +-#include +- + asmlinkage extern void ret_from_fork(void); + + static DEFINE_PER_CPU(unsigned char, is_idle); +@@ -145,8 +143,6 @@ void cpu_idle(void) + xen_idle(); + start_critical_timings(); + +- trace_power_end(smp_processor_id()); +- + /* In many cases the interrupt that ended idle + has already called exit_idle. But some idle + loops can be woken up without interrupt. */ +--- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-03-03 16:25:11.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-03-03 16:25:29.000000000 +0100 +@@ -777,7 +777,7 @@ static u64 __init get_max_mapped(void) + void __init setup_arch(char **cmdline_p) + { + int acpi = 0; +- int k8 = 0; ++ int amd = 0; + unsigned long flags; + #ifdef CONFIG_XEN + unsigned int i; +@@ -1125,12 +1125,12 @@ void __init setup_arch(char **cmdline_p) + acpi = acpi_numa_init(); + #endif + +-#ifdef CONFIG_K8_NUMA ++#ifdef CONFIG_AMD_NUMA + if (!acpi) +- k8 = !k8_numa_init(0, max_pfn); ++ amd = !amd_numa_init(0, max_pfn); + #endif + +- initmem_init(0, max_pfn, acpi, k8); ++ initmem_init(0, max_pfn, acpi, amd); + memblock_find_dma_reserve(); + dma32_reserve_bootmem(); + +@@ -1259,10 +1259,7 @@ void __init setup_arch(char **cmdline_p) + + #ifndef CONFIG_XEN + init_apic_mappings(); +- ioapic_init_mappings(); +- +- /* need to wait for io_apic is mapped */ +- probe_nr_irqs_gsi(); ++ ioapic_and_gsi_init(); + + kvm_guest_init(); + +--- head-2011-03-17.orig/arch/x86/kernel/traps-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/kernel/traps-xen.c 2011-02-01 16:25:32.000000000 +0100 +@@ -87,6 +87,13 @@ EXPORT_SYMBOL_GPL(used_vectors); + + static int ignore_nmis; + ++int unknown_nmi_panic; ++/* ++ * Prevent NMI reason port (0x61) being accessed simultaneously, can ++ * only be used in NMI handler. ++ */ ++static DEFINE_RAW_SPINLOCK(nmi_reason_lock); ++ + static inline void conditional_sti(struct pt_regs *regs) + { + if (regs->flags & X86_EFLAGS_IF) +@@ -304,16 +311,23 @@ gp_in_kernel: + die("general protection fault", regs, error_code); + } + +-static notrace __kprobes void +-mem_parity_error(unsigned char reason, struct pt_regs *regs) ++static int __init setup_unknown_nmi_panic(char *str) + { +- printk(KERN_EMERG +- "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", +- reason, smp_processor_id()); ++ unknown_nmi_panic = 1; ++ return 1; ++} ++__setup("unknown_nmi_panic", setup_unknown_nmi_panic); + +- printk(KERN_EMERG +- "You have some hardware problem, likely on the PCI bus.\n"); ++static notrace __kprobes void ++pci_serr_error(unsigned char reason, struct pt_regs *regs) ++{ ++ pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", ++ reason, smp_processor_id()); + ++ /* ++ * On some machines, PCI SERR line is used to report memory ++ * errors. EDAC makes use of it. ++ */ + #if defined(CONFIG_EDAC) + if (edac_handler_set()) { + edac_atomic_assert_error(); +@@ -324,16 +338,18 @@ mem_parity_error(unsigned char reason, s + if (panic_on_unrecovered_nmi) + panic("NMI: Not continuing"); + +- printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); ++ pr_emerg("Dazed and confused, but trying to continue\n"); + +- /* Clear and disable the memory parity error line. */ +- clear_mem_error(reason); ++ /* Clear and disable the PCI SERR error line. */ ++ clear_serr_error(reason); + } + + static notrace __kprobes void + io_check_error(unsigned char reason, struct pt_regs *regs) + { +- printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); ++ pr_emerg( ++ "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", ++ reason, smp_processor_id()); + show_registers(regs); + + if (panic_on_io_nmi) +@@ -359,69 +375,50 @@ unknown_nmi_error(unsigned char reason, + return; + } + #endif +- printk(KERN_EMERG +- "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", +- reason, smp_processor_id()); ++ pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", ++ reason, smp_processor_id()); + +- printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); +- if (panic_on_unrecovered_nmi) ++ pr_emerg("Do you have a strange power saving mode enabled?\n"); ++ if (unknown_nmi_panic || panic_on_unrecovered_nmi) + panic("NMI: Not continuing"); + +- printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); ++ pr_emerg("Dazed and confused, but trying to continue\n"); + } + + static notrace __kprobes void default_do_nmi(struct pt_regs *regs) + { + unsigned char reason = 0; +- int cpu; +- +- cpu = smp_processor_id(); +- +- /* Only the BSP gets external NMIs from the system. */ +- if (!cpu) +- reason = get_nmi_reason(); +- +- if (!(reason & 0xc0)) { +- if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) +- == NOTIFY_STOP) +- return; +- +-#ifdef CONFIG_X86_LOCAL_APIC +- if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) +- == NOTIFY_STOP) +- return; + +-#ifndef CONFIG_LOCKUP_DETECTOR ++ /* ++ * CPU-specific NMI must be processed before non-CPU-specific ++ * NMI, otherwise we may lose it, because the CPU-specific ++ * NMI can not be detected/processed on other CPUs. ++ */ ++ if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) ++ return; ++ ++ /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ ++ raw_spin_lock(&nmi_reason_lock); ++ reason = get_nmi_reason(); ++ ++ if (reason & NMI_REASON_MASK) { ++ if (reason & NMI_REASON_SERR) ++ pci_serr_error(reason, regs); ++ else if (reason & NMI_REASON_IOCHK) ++ io_check_error(reason, regs); ++#ifdef CONFIG_X86_32 + /* +- * Ok, so this is none of the documented NMI sources, +- * so it must be the NMI watchdog. ++ * Reassert NMI in case it became active ++ * meanwhile as it's edge-triggered: + */ +- if (nmi_watchdog_tick(regs, reason)) +- return; +- if (!do_nmi_callback(regs, cpu)) +-#endif /* !CONFIG_LOCKUP_DETECTOR */ +- unknown_nmi_error(reason, regs); +-#else +- unknown_nmi_error(reason, regs); ++ reassert_nmi(); + #endif +- ++ raw_spin_unlock(&nmi_reason_lock); + return; + } +- if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) +- return; ++ raw_spin_unlock(&nmi_reason_lock); + +- /* AK: following checks seem to be broken on modern chipsets. FIXME */ +- if (reason & 0x80) +- mem_parity_error(reason, regs); +- if (reason & 0x40) +- io_check_error(reason, regs); +-#ifdef CONFIG_X86_32 +- /* +- * Reassert NMI in case it became active meanwhile +- * as it's edge-triggered: +- */ +- reassert_nmi(); +-#endif ++ unknown_nmi_error(reason, regs); + } + + dotraplinkage notrace __kprobes void +@@ -439,14 +436,12 @@ do_nmi(struct pt_regs *regs, long error_ + + void stop_nmi(void) + { +- acpi_nmi_disable(); + ignore_nmis++; + } + + void restart_nmi(void) + { + ignore_nmis--; +- acpi_nmi_enable(); + } + + /* May run on IST stack. */ +--- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-03-17 14:22:21.000000000 +0100 +@@ -237,15 +237,14 @@ void vmalloc_sync_all(void) + for (address = VMALLOC_START & PMD_MASK; + address >= TASK_SIZE && address < FIXADDR_TOP; + address += PMD_SIZE) { +- +- unsigned long flags; + struct page *page; + +- spin_lock_irqsave(&pgd_lock, flags); ++ spin_lock(&pgd_lock); + list_for_each_entry(page, &pgd_list, lru) { + spinlock_t *pgt_lock; + pmd_t *ret; + ++ /* the pgt_lock only for Xen */ + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + + spin_lock(pgt_lock); +@@ -255,7 +254,7 @@ void vmalloc_sync_all(void) + if (!ret) + break; + } +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + } + } + +@@ -836,6 +835,13 @@ mm_fault_error(struct pt_regs *regs, uns + unsigned long address, unsigned int fault) + { + if (fault & VM_FAULT_OOM) { ++ /* Kernel mode? Handle exceptions or die: */ ++ if (!(error_code & PF_USER)) { ++ up_read(¤t->mm->mmap_sem); ++ no_context(regs, error_code, address); ++ return; ++ } ++ + out_of_memory(regs, error_code, address); + } else { + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| +--- head-2011-03-17.orig/arch/x86/mm/init-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-01 15:41:35.000000000 +0100 +@@ -404,8 +404,9 @@ void free_init_pages(char *what, unsigne + /* + * We just marked the kernel text read only above, now that + * we are going to free part of that, we need to make that +- * writeable first. ++ * writeable and non-executable first. + */ ++ set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); + set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); + + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-01 15:41:35.000000000 +0100 +@@ -47,6 +47,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -242,7 +243,7 @@ page_table_range_init(unsigned long star + + static inline int is_kernel_text(unsigned long addr) + { +- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) ++ if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) + return 1; + return 0; + } +@@ -775,6 +776,7 @@ void __init paging_init(void) + /* + * NOTE: at this point the bootmem allocator is fully available. + */ ++ olpc_dt_build_devicetree(); + sparse_init(); + zone_sizes_init(); + } +@@ -980,6 +982,23 @@ void set_kernel_text_ro(void) + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); + } + ++static void mark_nxdata_nx(void) ++{ ++ /* ++ * When this called, init has already been executed and released, ++ * so everything past _etext sould be NX. ++ */ ++ unsigned long start = PFN_ALIGN(_etext); ++ /* ++ * This comes from is_kernel_text upper limit. Also HPAGE where used: ++ */ ++ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; ++ ++ if (__supported_pte_mask & _PAGE_NX) ++ printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); ++ set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); ++} ++ + void mark_rodata_ro(void) + { + unsigned long start = PFN_ALIGN(_text); +@@ -1014,6 +1033,7 @@ void mark_rodata_ro(void) + printk(KERN_INFO "Testing CPA: write protecting again\n"); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); + #endif ++ mark_nxdata_nx(); + } + #endif + +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-03-17 14:22:21.000000000 +0100 +@@ -173,18 +173,18 @@ void sync_global_pgds(unsigned long star + + for (address = start; address <= end; address += PGDIR_SIZE) { + const pgd_t *pgd_ref = pgd_offset_k(address); +- unsigned long flags; + struct page *page; + + if (pgd_none(*pgd_ref)) + continue; + +- spin_lock_irqsave(&pgd_lock, flags); ++ spin_lock(&pgd_lock); + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + spinlock_t *pgt_lock; + + pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ /* the pgt_lock only for Xen */ + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + spin_lock(pgt_lock); + +@@ -196,7 +196,7 @@ void sync_global_pgds(unsigned long star + + spin_unlock(pgt_lock); + } +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + } + } + +--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-02-07 15:42:09.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:42:18.000000000 +0100 +@@ -163,6 +163,16 @@ int create_lookup_pte_addr(struct mm_str + + EXPORT_SYMBOL(create_lookup_pte_addr); + ++#ifdef CONFIG_MODULES ++/* ++ * Force the implementation of ioremap_page_range() to be pulled in from ++ * lib/lib.a even if there is no other reference from the core kernel to it ++ * (native uses it in __ioremap_caller()), so that it gets exported. ++ */ ++static void *const __section(.discard.ioremap) __used ++_ioremap_page_range = ioremap_page_range; ++#endif ++ + /* + * Fix up the linear direct mapping of the kernel to avoid cache attribute + * conflicts. +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-02-01 15:03:10.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-03-17 14:22:21.000000000 +0100 +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -56,12 +57,10 @@ static unsigned long direct_pages_count[ + + void update_page_count(int level, unsigned long pages) + { +- unsigned long flags; +- + /* Protect against CPA */ +- spin_lock_irqsave(&pgd_lock, flags); ++ spin_lock(&pgd_lock); + direct_pages_count[level] += pages; +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + } + + static void split_page_count(int level) +@@ -256,12 +255,12 @@ static inline pgprot_t static_protection + { + pgprot_t forbidden = __pgprot(0); + +-#ifndef CONFIG_XEN + /* + * The BIOS area between 640k and 1Mb needs to be executable for + * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. + */ +- if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) ++#ifdef CONFIG_PCI_BIOS ++ if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) + pgprot_val(forbidden) |= _PAGE_NX; + #endif + +@@ -405,16 +404,16 @@ static int + try_preserve_large_page(pte_t *kpte, unsigned long address, + struct cpa_data *cpa) + { +- unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; ++ unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; + pte_t new_pte, old_pte, *tmp; +- pgprot_t old_prot, new_prot; ++ pgprot_t old_prot, new_prot, req_prot; + int i, do_split = 1; + unsigned int level; + + if (cpa->force_split) + return 1; + +- spin_lock_irqsave(&pgd_lock, flags); ++ spin_lock(&pgd_lock); + /* + * Check for races, another CPU might have split this page + * up already: +@@ -452,10 +451,10 @@ try_preserve_large_page(pte_t *kpte, uns + * We are safe now. Check whether the new pgprot is the same: + */ + old_pte = *kpte; +- old_prot = new_prot = pte_pgprot(old_pte); ++ old_prot = new_prot = req_prot = pte_pgprot(old_pte); + +- pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); +- pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); ++ pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); ++ pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); + + /* + * old_pte points to the large page base address. So we need +@@ -464,22 +463,21 @@ try_preserve_large_page(pte_t *kpte, uns + pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); + cpa->pfn = pfn; + +- new_prot = static_protections(new_prot, address, pfn); ++ new_prot = static_protections(req_prot, address, pfn); + + /* + * We need to check the full range, whether + * static_protection() requires a different pgprot for one of + * the pages in the range we try to preserve: + */ +- if (pfn < max_mapnr) { +- addr = address + PAGE_SIZE; +- for (i = 1; i < cpa->numpages && ++pfn < max_mapnr; +- i++, addr += PAGE_SIZE) { +- pgprot_t chk_prot = static_protections(new_prot, addr, pfn); ++ addr = address & pmask; ++ pfn = pte_pfn(old_pte); ++ for (i = 0; i < (psize >> PAGE_SHIFT) && pfn < max_mapnr; ++ i++, addr += PAGE_SIZE, pfn++) { ++ pgprot_t chk_prot = static_protections(req_prot, addr, pfn); + +- if (pgprot_val(chk_prot) != pgprot_val(new_prot)) +- goto out_unlock; +- } ++ if (pgprot_val(chk_prot) != pgprot_val(new_prot)) ++ goto out_unlock; + } + + /* +@@ -499,7 +497,7 @@ try_preserve_large_page(pte_t *kpte, uns + * that we limited the number of possible pages already to + * the number of pages in the large page. + */ +- if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { ++ if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { + /* + * The address is aligned and the number of pages + * covers the full page. +@@ -511,14 +509,14 @@ try_preserve_large_page(pte_t *kpte, uns + } + + out_unlock: +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + + return do_split; + } + + static int split_large_page(pte_t *kpte, unsigned long address) + { +- unsigned long flags, mfn, mfninc = 1; ++ unsigned long mfn, mfninc = 1; + unsigned int i, level; + pte_t *pbase, *tmp; + pgprot_t ref_prot; +@@ -532,7 +530,7 @@ static int split_large_page(pte_t *kpte, + if (!base) + return -ENOMEM; + +- spin_lock_irqsave(&pgd_lock, flags); ++ spin_lock(&pgd_lock); + /* + * Check for races, another CPU might have split this page + * up for us already: +@@ -608,7 +606,7 @@ out_unlock: + */ + if (base) + __free_page(base); +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + + return 0; + } +--- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-03-17 14:26:03.000000000 +0100 +@@ -358,23 +358,23 @@ void mm_unpin(struct mm_struct *mm) + void mm_pin_all(void) + { + struct page *page; +- unsigned long flags; + + if (xen_feature(XENFEAT_writable_page_tables)) + return; + + /* + * Allow uninterrupted access to the pgd_list. Also protects +- * __pgd_pin() by disabling preemption. ++ * __pgd_pin() by ensuring preemption is disabled. + * All other CPUs must be at a safe point (e.g., in stop_machine + * or offlined entirely). + */ +- spin_lock_irqsave(&pgd_lock, flags); ++ BUG_ON(!irqs_disabled()); ++ spin_lock(&pgd_lock); + list_for_each_entry(page, &pgd_list, lru) { + if (!PagePinned(page)) + __pgd_pin((pgd_t *)page_address(page)); + } +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + } + + void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +@@ -470,12 +470,10 @@ static void pgd_ctor(struct mm_struct *m + + static void pgd_dtor(pgd_t *pgd) + { +- unsigned long flags; /* can be called from interrupt context */ +- + if (!SHARED_KERNEL_PMD) { +- spin_lock_irqsave(&pgd_lock, flags); ++ spin_lock(&pgd_lock); + pgd_list_del(pgd); +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + } + + pgd_test_and_unpin(pgd); +@@ -641,7 +639,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + { + pgd_t *pgd; + pmd_t *pmds[PREALLOCATED_PMDS]; +- unsigned long flags; + + pgd = (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ORDER); + +@@ -661,13 +658,13 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + * respect to anything walking the pgd_list, so that they + * never see a partially populated pgd. + */ +- spin_lock_irqsave(&pgd_lock, flags); ++ spin_lock(&pgd_lock); + + #ifdef CONFIG_X86_PAE + /* Protect against save/restore: move below 4GB under pgd_lock. */ + if (!xen_feature(XENFEAT_pae_pgdir_above_4gb) + && xen_create_contiguous_region((unsigned long)pgd, 0, 32)) { +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + goto out_free_pmds; + } + #endif +@@ -675,7 +672,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + pgd_ctor(mm, pgd); + pgd_prepopulate_pmd(mm, pgd, pmds); + +- spin_unlock_irqrestore(&pgd_lock, flags); ++ spin_unlock(&pgd_lock); + + return pgd; + +@@ -735,6 +732,25 @@ int ptep_set_access_flags(struct vm_area + return changed; + } + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++int pmdp_set_access_flags(struct vm_area_struct *vma, ++ unsigned long address, pmd_t *pmdp, ++ pmd_t entry, int dirty) ++{ ++ int changed = !pmd_same(*pmdp, entry); ++ ++ VM_BUG_ON(address & ~HPAGE_PMD_MASK); ++ ++ if (changed && dirty) { ++ *pmdp = entry; ++ pmd_update_defer(vma->vm_mm, address, pmdp); ++ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); ++ } ++ ++ return changed; ++} ++#endif ++ + int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) + { +@@ -750,6 +766,23 @@ int ptep_test_and_clear_young(struct vm_ + return ret; + } + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++int pmdp_test_and_clear_young(struct vm_area_struct *vma, ++ unsigned long addr, pmd_t *pmdp) ++{ ++ int ret = 0; ++ ++ if (pmd_young(*pmdp)) ++ ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, ++ (unsigned long *)pmdp); ++ ++ if (ret) ++ pmd_update(vma->vm_mm, addr, pmdp); ++ ++ return ret; ++} ++#endif ++ + int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) + { +@@ -765,6 +798,36 @@ int ptep_clear_flush_young(struct vm_are + return young; + } + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++int pmdp_clear_flush_young(struct vm_area_struct *vma, ++ unsigned long address, pmd_t *pmdp) ++{ ++ int young; ++ ++ VM_BUG_ON(address & ~HPAGE_PMD_MASK); ++ ++ young = pmdp_test_and_clear_young(vma, address, pmdp); ++ if (young) ++ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); ++ ++ return young; ++} ++ ++void pmdp_splitting_flush(struct vm_area_struct *vma, ++ unsigned long address, pmd_t *pmdp) ++{ ++ int set; ++ VM_BUG_ON(address & ~HPAGE_PMD_MASK); ++ set = !test_and_set_bit(_PAGE_BIT_SPLITTING, ++ (unsigned long *)pmdp); ++ if (set) { ++ pmd_update(vma->vm_mm, address, pmdp); ++ /* need tlb flush only to serialize against gup-fast */ ++ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); ++ } ++} ++#endif ++ + /** + * reserve_top_address - reserves a hole in the top of kernel address space + * @reserve - size of hole to reserve +--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-02-01 15:41:35.000000000 +0100 +@@ -595,7 +595,8 @@ static __init int intel_router_probe(str + case PCI_DEVICE_ID_INTEL_ICH10_1: + case PCI_DEVICE_ID_INTEL_ICH10_2: + case PCI_DEVICE_ID_INTEL_ICH10_3: +- case PCI_DEVICE_ID_INTEL_PATSBURG_LPC: ++ case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0: ++ case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1: + r->name = "PIIX/ICH"; + r->get = pirq_piix_get; + r->set = pirq_piix_set; +--- head-2011-03-17.orig/drivers/hwmon/coretemp-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/coretemp-xen.c 2011-02-01 16:38:02.000000000 +0100 +@@ -20,6 +20,8 @@ + * 02110-1301 USA. + */ + ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ + #include + #include + #include +@@ -454,8 +456,8 @@ static int coretemp_device_add(unsigned + * without thermal sensors will be filtered out. + */ + if (!(info.cpuid_6_eax & 0x1)) { +- printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" +- " has no thermal sensor.\n", info.pdev_entry->x86_model); ++ pr_info("CPU (model=0x%x) has no thermal sensor\n", ++ info.pdev_entry->x86_model); + goto exit_entry_free; + } + +@@ -478,7 +480,7 @@ static int coretemp_device_add(unsigned + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; +- printk(KERN_ERR DRVNAME ": Device allocation failed\n"); ++ pr_err("Device allocation failed\n"); + goto exit; + } + +@@ -488,8 +490,7 @@ static int coretemp_device_add(unsigned + + err = platform_device_add(pdev); + if (err) { +- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", +- err); ++ pr_err("Device addition failed (%d)\n", err); + goto exit_device_put; + } + +--- head-2011-03-17.orig/drivers/hwmon/pkgtemp-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/pkgtemp-xen.c 2011-02-01 16:38:31.000000000 +0100 +@@ -20,6 +20,8 @@ + * 02110-1301 USA. + */ + ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ + #include + #include + #include +@@ -310,7 +312,7 @@ static int pkgtemp_device_add(unsigned i + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; +- printk(KERN_ERR DRVNAME ": Device allocation failed\n"); ++ pr_err("Device allocation failed\n"); + goto exit; + } + +@@ -319,8 +321,7 @@ static int pkgtemp_device_add(unsigned i + + err = platform_device_add(pdev); + if (err) { +- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", +- err); ++ pr_err("Device addition failed (%d)\n", err); + goto exit_device_put; + } + +--- head-2011-03-17.orig/drivers/hwmon/via-cputemp-xen.c 2011-02-01 15:04:27.000000000 +0100 ++++ head-2011-03-17/drivers/hwmon/via-cputemp-xen.c 2011-02-01 16:40:53.000000000 +0100 +@@ -21,6 +21,8 @@ + * 02110-1301 USA. + */ + ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ + #include + #include + #include +@@ -224,15 +226,14 @@ static int via_cputemp_device_add(unsign + goto exit_entry_free; + + if (pdev_entry->x86_model > 0x0f) { +- printk(KERN_WARNING DRVNAME ": Unknown CPU " +- "model 0x%x\n", pdev_entry->x86_model); ++ pr_warn("Unknown CPU model 0x%x\n", pdev_entry->x86_model); + goto exit_entry_free; + } + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; +- printk(KERN_ERR DRVNAME ": Device allocation failed\n"); ++ pr_err("Device allocation failed\n"); + goto exit_entry_free; + } + +@@ -241,8 +242,7 @@ static int via_cputemp_device_add(unsign + + err = platform_device_add(pdev); + if (err) { +- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", +- err); ++ pr_err("Device addition failed (%d)\n", err); + goto exit_device_put; + } + +@@ -268,8 +268,9 @@ static void via_cputemp_device_remove(un + if (p->pdev->id == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); ++ mutex_unlock(&pdev_list_mutex); + kfree(p); +- break; ++ return; + } + } + mutex_unlock(&pdev_list_mutex); +--- head-2011-03-17.orig/drivers/scsi/arcmsr/arcmsr.h 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/drivers/scsi/arcmsr/arcmsr.h 2011-02-17 10:23:02.000000000 +0100 +@@ -46,7 +46,7 @@ + struct device_attribute; + /*The limit of outstanding scsi command that firmware can handle*/ + #define ARCMSR_MAX_OUTSTANDING_CMD 256 +-#ifdef CONFIG_XEN ++#if defined(CONFIG_XEN) || defined(CONFIG_PARAVIRT_XEN) + #define ARCMSR_MAX_FREECCB_NUM 160 + #else + #define ARCMSR_MAX_FREECCB_NUM 320 +--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Kconfig 2011-02-02 17:03:22.000000000 +0100 +@@ -380,7 +380,7 @@ config XEN_DEV_EVTCHN + firing. + If in doubt, say yes. + +-config XEN_BACKEND ++config PARAVIRT_XEN_BACKEND + bool "Backend driver support" + depends on XEN_DOM0 + default y +@@ -427,7 +427,7 @@ config XEN_XENBUS_FRONTEND + + config XEN_GNTDEV + tristate "userspace grant access device driver" +- depends on XEN ++ depends on PARAVIRT_XEN + select MMU_NOTIFIER + help + Allows userspace processes to use grants. +--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-24 15:17:40.000000000 +0100 ++++ head-2011-03-17/drivers/xen/Makefile 2011-02-07 14:39:12.000000000 +0100 +@@ -24,13 +24,17 @@ obj-$(CONFIG_HOTPLUG_CPU) += $(xen-hotp + obj-$(CONFIG_XEN_XENCOMM) += xencomm.o + obj-$(CONFIG_XEN_BALLOON) += $(xen-balloon-y) + obj-$(CONFIG_XEN_DEV_EVTCHN) += $(xen-evtchn-name-y).o ++obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o + obj-$(CONFIG_XENFS) += xenfs/ + obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o +-obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o ++obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o + obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o + obj-$(CONFIG_XEN_DOM0) += pci.o + + xen-evtchn-y := evtchn.o ++xen-gntdev-y := gntdev.o ++ ++xen-platform-pci-y := platform-pci.o + + obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ + obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ +--- head-2011-03-17.orig/drivers/xen/blkback/vbd.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkback/vbd.c 2011-02-07 14:04:20.000000000 +0100 +@@ -63,8 +63,10 @@ int vbd_create(blkif_t *blkif, blkif_vde + + vbd->pdevice = MKDEV(major, minor); + +- bdev = open_by_devnum(vbd->pdevice, +- vbd->readonly ? FMODE_READ : FMODE_WRITE); ++ bdev = blkdev_get_by_dev(vbd->pdevice, ++ FMODE_READ | (vbd->readonly ? 0 ++ : FMODE_WRITE | FMODE_EXCL), ++ blkif); + + if (IS_ERR(bdev)) { + DPRINTK("vbd_creat: device %08x could not be opened.\n", +@@ -96,7 +98,8 @@ void vbd_free(struct vbd *vbd) + { + if (vbd->bdev) + blkdev_put(vbd->bdev, +- vbd->readonly ? FMODE_READ : FMODE_WRITE); ++ FMODE_READ | (vbd->readonly ? 0 ++ : FMODE_WRITE | FMODE_EXCL)); + vbd->bdev = NULL; + } + +--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-02-03 12:37:02.000000000 +0100 +@@ -437,7 +437,7 @@ static void blkfront_closing(struct blkf + spin_unlock_irqrestore(&blkif_io_lock, flags); + + /* Flush gnttab callback work. Must be done with no locks held. */ +- flush_scheduled_work(); ++ flush_work_sync(&info->work); + + xlvbd_sysfs_delif(info); + +@@ -894,7 +894,7 @@ static void blkif_free(struct blkfront_i + spin_unlock_irq(&blkif_io_lock); + + /* Flush gnttab callback work. Must be done with no locks held. */ +- flush_scheduled_work(); ++ flush_work_sync(&info->work); + + /* Free resources associated with old device channel. */ + if (info->ring_ref != GRANT_INVALID_REF) { +--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-02-07 14:13:37.000000000 +0100 +@@ -754,7 +754,7 @@ blktap_device_close_bdev(struct blktap * + dev = &tap->device; + + if (dev->bdev) +- blkdev_put(dev->bdev, FMODE_WRITE); ++ blkdev_put(dev->bdev, FMODE_WRITE|FMODE_EXCL); + + dev->bdev = NULL; + clear_bit(BLKTAP_PASSTHROUGH, &tap->dev_inuse); +@@ -768,7 +768,7 @@ blktap_device_open_bdev(struct blktap *t + + dev = &tap->device; + +- bdev = open_by_devnum(pdev, FMODE_WRITE); ++ bdev = blkdev_get_by_dev(pdev, FMODE_WRITE|FMODE_EXCL, tap); + if (IS_ERR(bdev)) { + BTERR("opening device %x:%x failed: %ld\n", + MAJOR(pdev), MINOR(pdev), PTR_ERR(bdev)); +@@ -778,7 +778,7 @@ blktap_device_open_bdev(struct blktap *t + if (!bdev->bd_disk) { + BTERR("device %x:%x doesn't exist\n", + MAJOR(pdev), MINOR(pdev)); +- blkdev_put(bdev, FMODE_WRITE); ++ blkdev_put(bdev, FMODE_WRITE|FMODE_EXCL); + return -ENOENT; + } + +--- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-03 11:12:32.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-10 16:24:57.000000000 +0100 +@@ -621,12 +621,16 @@ static void unbind_from_irq(unsigned int + cfg->info = IRQ_UNBOUND; + + /* Zap stats across IRQ changes of use. */ +- for_each_possible_cpu(cpu) ++ for_each_possible_cpu(cpu) { + #ifdef CONFIG_GENERIC_HARDIRQS +- irq_to_desc(irq)->kstat_irqs[cpu] = 0; ++ struct irq_desc *desc = irq_to_desc(irq); ++ ++ if (desc->kstat_irqs) ++ *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; + #else + kstat_cpu(cpu).irqs[irq] = 0; + #endif ++ } + } + + spin_unlock(&irq_mapping_update_lock); +--- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:11:42.000000000 +0100 ++++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-03-03 16:13:04.000000000 +0100 +@@ -188,7 +188,7 @@ static void __cpuinit xen_smp_intr_exit( + static void __cpuinit cpu_bringup(void) + { + cpu_init(); +- identify_secondary_cpu(¤t_cpu_data); ++ identify_secondary_cpu(__this_cpu_ptr(&cpu_info)); + touch_softlockup_watchdog(); + preempt_disable(); + local_irq_enable(); +--- head-2011-03-17.orig/drivers/xen/fbfront/xenfb.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/fbfront/xenfb.c 2011-02-08 10:37:50.000000000 +0100 +@@ -555,12 +555,12 @@ xenfb_make_preferred_console(void) + if (console_set_on_cmdline) + return; + +- acquire_console_sem(); +- for (c = console_drivers; c; c = c->next) { ++ console_lock(); ++ for_each_console(c) { + if (!strcmp(c->name, "tty") && c->index == 0) + break; + } +- release_console_sem(); ++ console_unlock(); + if (c) { + unregister_console(c); + c->flags |= CON_CONSDEV; +--- head-2011-03-17.orig/drivers/xen/netfront/netfront.c 2011-02-09 16:05:34.000000000 +0100 ++++ head-2011-03-17/drivers/xen/netfront/netfront.c 2011-02-03 12:49:56.000000000 +0100 +@@ -132,17 +132,18 @@ static inline int skb_gso_ok(struct sk_b + return (features & NETIF_F_TSO); + } + +-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) ++#define netif_skb_features(skb) ((skb)->dev->features) ++static inline int netif_needs_gso(struct sk_buff *skb, int features) + { + return skb_is_gso(skb) && +- (!skb_gso_ok(skb, dev->features) || ++ (!skb_gso_ok(skb, features) || + unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); + } + #else + #define HAVE_GSO 0 + #define HAVE_TSO 0 + #define HAVE_CSUM_OFFLOAD 0 +-#define netif_needs_gso(dev, skb) 0 ++#define netif_needs_gso(skb, feat) 0 + #define dev_disable_gso_features(dev) ((void)0) + #define ethtool_op_set_tso(dev, data) (-ENOSYS) + #endif +@@ -952,7 +953,7 @@ static int network_start_xmit(struct sk_ + + if (unlikely(!netfront_carrier_ok(np) || + (frags > 1 && !xennet_can_sg(dev)) || +- netif_needs_gso(dev, skb))) { ++ netif_needs_gso(skb, netif_skb_features(skb)))) { + spin_unlock_irq(&np->tx_lock); + goto drop; + } +--- head-2011-03-17.orig/drivers/xen/pcifront/xenbus.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/pcifront/xenbus.c 2011-02-03 12:39:42.000000000 +0100 +@@ -61,7 +61,7 @@ static void free_pdev(struct pcifront_de + pcifront_free_roots(pdev); + + /*For PCIE_AER error handling job*/ +- flush_scheduled_work(); ++ flush_work_sync(&pdev->op_work); + + if (pdev->irq > 0) + unbind_from_irqhandler(pdev->irq, pdev); +--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_xenbus.c 2011-02-01 14:50:44.000000000 +0100 ++++ head-2011-03-17/drivers/xen/sfc_netback/accel_xenbus.c 2011-02-03 12:38:43.000000000 +0100 +@@ -701,7 +701,7 @@ fail_config_watch: + * Flush the scheduled work queue before freeing bend to get + * rid of any pending netback_accel_msg_rx_handler() + */ +- flush_scheduled_work(); ++ flush_work_sync(&bend->handle_msg); + + mutex_lock(&bend->bend_mutex); + net_accel_update_state(dev, XenbusStateUnknown); +@@ -781,7 +781,7 @@ int netback_accel_remove(struct xenbus_d + * Flush the scheduled work queue before freeing bend to get + * rid of any pending netback_accel_msg_rx_handler() + */ +- flush_scheduled_work(); ++ flush_work_sync(&bend->handle_msg); + + mutex_lock(&bend->bend_mutex); + +--- head-2011-03-17.orig/drivers/xen/xenbus/Makefile 2011-02-02 17:06:11.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/Makefile 2011-02-02 17:08:58.000000000 +0100 +@@ -7,3 +7,6 @@ xenbus_be-objs += xenbus_backend_client. + xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o + obj-y += $(xenbus-y) $(xenbus-m) + obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o ++ ++obj-$(CONFIG_PARAVIRT_XEN_BACKEND) += xenbus_probe_backend.o ++obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-02-07 12:54:16.000000000 +0100 +@@ -65,8 +65,6 @@ + #include + #include + #include +- +-#include + #endif + + #ifndef CONFIG_XEN +@@ -96,15 +94,12 @@ extern struct mutex xenwatch_mutex; + + static BLOCKING_NOTIFIER_HEAD(xenstore_chain); + ++#if defined(CONFIG_XEN) || defined(MODULE) + static void wait_for_devices(struct xenbus_driver *xendrv); + +-static int xenbus_probe_frontend(const char *type, const char *name); +- +-static void xenbus_dev_shutdown(struct device *_dev); +- +-#if !defined(CONFIG_XEN) && !defined(MODULE) +-static int xenbus_dev_suspend(struct device *dev, pm_message_t state); +-static int xenbus_dev_resume(struct device *dev); ++#define PARAVIRT_EXPORT_SYMBOL(sym) __typeof__(sym) sym; ++#else ++#define PARAVIRT_EXPORT_SYMBOL EXPORT_SYMBOL_GPL + #endif + + /* If something in array of ids matches this device, return it. */ +@@ -127,24 +122,7 @@ int xenbus_match(struct device *_dev, st + + return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; + } +- +-/* device// => - */ +-static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) +-{ +- nodename = strchr(nodename, '/'); +- if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { +- pr_warning("XENBUS: bad frontend %s\n", nodename); +- return -EINVAL; +- } +- +- strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); +- if (!strchr(bus_id, '/')) { +- pr_warning("XENBUS: bus_id %s no slash\n", bus_id); +- return -EINVAL; +- } +- *strchr(bus_id, '/') = '-'; +- return 0; +-} ++PARAVIRT_EXPORT_SYMBOL(xenbus_match); + + + static void free_otherend_details(struct xenbus_device *dev) +@@ -164,7 +142,7 @@ static void free_otherend_watch(struct x + } + + +-int read_otherend_details(struct xenbus_device *xendev, ++int xenbus_read_otherend_details(struct xenbus_device *xendev, + char *id_node, char *path_node) + { + int err = xenbus_gather(XBT_NIL, xendev->nodename, +@@ -189,74 +167,22 @@ int read_otherend_details(struct xenbus_ + + return 0; + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_read_otherend_details); + ++#if defined(CONFIG_XEN) || defined(MODULE) + + static int read_backend_details(struct xenbus_device *xendev) + { +- return read_otherend_details(xendev, "backend-id", "backend"); +-} +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) +-static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) +-{ +- struct xenbus_device *xdev; +- +- if (dev == NULL) +- return -ENODEV; +- xdev = to_xenbus_device(dev); +- if (xdev == NULL) +- return -ENODEV; +- +- /* stuff we want to pass to /sbin/hotplug */ +-#if defined(CONFIG_XEN) || defined(MODULE) +- add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); +- add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); +-#endif +- add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); +- +- return 0; ++ return xenbus_read_otherend_details(xendev, "backend-id", "backend"); + } +-#endif +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) +-static struct device_attribute xenbus_dev_attrs[] = { +- __ATTR_NULL +-}; +-#endif +- +-/* Bus type for frontend drivers. */ +-static struct xen_bus_type xenbus_frontend = { +- .root = "device", +- .levels = 2, /* device/type/ */ +- .get_bus_id = frontend_bus_id, +- .probe = xenbus_probe_frontend, +- .error = -ENODEV, +- .bus = { +- .name = "xen", +- .match = xenbus_match, +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) +- .probe = xenbus_dev_probe, +- .remove = xenbus_dev_remove, +- .shutdown = xenbus_dev_shutdown, +- .uevent = xenbus_uevent_frontend, +-#endif +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) +- .dev_attrs = xenbus_dev_attrs, +-#endif +-#if !defined(CONFIG_XEN) && !defined(MODULE) +- .suspend = xenbus_dev_suspend, +- .resume = xenbus_dev_resume, +-#endif +- }, +-#if defined(CONFIG_XEN) || defined(MODULE) +- .dev = { +- .init_name = "xen", +- }, +-#endif +-}; + + static void otherend_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) ++#else /* !CONFIG_XEN && !MODULE */ ++void xenbus_otherend_changed(struct xenbus_watch *watch, ++ const char **vec, unsigned int len, ++ int ignore_on_shutdown) ++#endif /* CONFIG_XEN || MODULE */ + { + struct xenbus_device *dev = + container_of(watch, struct xenbus_device, otherend_watch); +@@ -284,11 +210,15 @@ static void otherend_changed(struct xenb + * work that can fail e.g., when the rootfs is gone. + */ + if (system_state > SYSTEM_RUNNING) { +- struct xen_bus_type *bus = bus; +- bus = container_of(dev->dev.bus, struct xen_bus_type, bus); + /* If we're frontend, drive the state machine to Closed. */ + /* This should cause the backend to release our resources. */ +- if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) ++# if defined(CONFIG_XEN) || defined(MODULE) ++ const struct xen_bus_type *bus = ++ container_of(dev->dev.bus, struct xen_bus_type, bus); ++ int ignore_on_shutdown = (bus->levels == 2); ++# endif ++ ++ if (ignore_on_shutdown && (state == XenbusStateClosing)) + xenbus_frontend_closed(dev); + return; + } +@@ -297,6 +227,7 @@ static void otherend_changed(struct xenb + if (drv->otherend_changed) + drv->otherend_changed(dev, state); + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_otherend_changed); + + + static int talk_to_otherend(struct xenbus_device *dev) +@@ -317,7 +248,11 @@ static int watch_otherend(struct xenbus_ + return xenbus_watch_path2(dev, dev->otherend, "state", + &dev->otherend_watch, otherend_changed); + #else +- return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, ++ struct xen_bus_type *bus = ++ container_of(dev->dev.bus, struct xen_bus_type, bus); ++ ++ return xenbus_watch_pathfmt(dev, &dev->otherend_watch, ++ bus->otherend_changed, + "%s/%s", dev->otherend, "state"); + #endif + } +@@ -367,8 +302,13 @@ int xenbus_dev_probe(struct device *_dev + fail: + xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); + xenbus_switch_state(dev, XenbusStateClosed); ++#if defined(CONFIG_XEN) || defined(MODULE) + return -ENODEV; ++#else ++ return err; ++#endif + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_dev_probe); + + int xenbus_dev_remove(struct device *_dev) + { +@@ -386,8 +326,9 @@ int xenbus_dev_remove(struct device *_de + xenbus_switch_state(dev, XenbusStateClosed); + return 0; + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_dev_remove); + +-static void xenbus_dev_shutdown(struct device *_dev) ++void xenbus_dev_shutdown(struct device *_dev) + { + struct xenbus_device *dev = to_xenbus_device(_dev); + unsigned long timeout = 5*HZ; +@@ -420,6 +361,7 @@ static void xenbus_dev_shutdown(struct d + out: + put_device(&dev->dev); + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_dev_shutdown); + + int xenbus_register_driver_common(struct xenbus_driver *drv, + struct xen_bus_type *bus, +@@ -450,25 +392,7 @@ int xenbus_register_driver_common(struct + mutex_unlock(&xenwatch_mutex); + return ret; + } +- +-int __xenbus_register_frontend(struct xenbus_driver *drv, +- struct module *owner, const char *mod_name) +-{ +- int ret; +- +- drv->read_otherend_details = read_backend_details; +- +- ret = xenbus_register_driver_common(drv, &xenbus_frontend, +- owner, mod_name); +- if (ret) +- return ret; +- +- /* If this driver is loaded as a module wait for devices to attach. */ +- wait_for_devices(drv); +- +- return 0; +-} +-EXPORT_SYMBOL_GPL(__xenbus_register_frontend); ++PARAVIRT_EXPORT_SYMBOL(xenbus_register_driver_common); + + void xenbus_unregister_driver(struct xenbus_driver *drv) + { +@@ -661,9 +585,31 @@ fail: + kfree(xendev); + return err; + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_probe_node); ++ ++#if defined(CONFIG_XEN) || defined(MODULE) ++ ++/* device// => - */ ++static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) ++{ ++ nodename = strchr(nodename, '/'); ++ if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { ++ pr_warning("XENBUS: bad frontend %s\n", nodename); ++ return -EINVAL; ++ } ++ ++ strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); ++ if (!strchr(bus_id, '/')) { ++ pr_warning("XENBUS: bus_id %s no slash\n", bus_id); ++ return -EINVAL; ++ } ++ *strchr(bus_id, '/') = '-'; ++ return 0; ++} + + /* device// */ +-static int xenbus_probe_frontend(const char *type, const char *name) ++static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, ++ const char *name) + { + char *nodename; + int err; +@@ -671,18 +617,90 @@ static int xenbus_probe_frontend(const c + if (!strcmp(type, "console")) + return 0; + +- nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", +- xenbus_frontend.root, type, name); ++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); + if (!nodename) + return -ENOMEM; + + DPRINTK("%s", nodename); + +- err = xenbus_probe_node(&xenbus_frontend, type, nodename); ++ err = xenbus_probe_node(bus, type, nodename); + kfree(nodename); + return err; + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) ++static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) ++{ ++ struct xenbus_device *xdev; ++ ++ if (dev == NULL) ++ return -ENODEV; ++ xdev = to_xenbus_device(dev); ++ if (xdev == NULL) ++ return -ENODEV; ++ ++ /* stuff we want to pass to /sbin/hotplug */ ++ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype) || ++ add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename) || ++ add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype)) ++ return -ENOMEM; ++ ++ return 0; ++} ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) ++static struct device_attribute xenbus_dev_attrs[] = { ++ __ATTR_NULL ++}; ++#endif ++ ++/* Bus type for frontend drivers. */ ++static struct xen_bus_type xenbus_frontend = { ++ .root = "device", ++ .levels = 2, /* device/type/ */ ++ .get_bus_id = frontend_bus_id, ++ .probe = xenbus_probe_frontend, ++ .error = -ENODEV, ++ .bus = { ++ .name = "xen", ++ .match = xenbus_match, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) ++ .probe = xenbus_dev_probe, ++ .remove = xenbus_dev_remove, ++ .shutdown = xenbus_dev_shutdown, ++ .uevent = xenbus_uevent_frontend, ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) ++ .dev_attrs = xenbus_dev_attrs, ++#endif ++ }, ++ .dev = { ++ .init_name = "xen", ++ }, ++}; ++ ++int __xenbus_register_frontend(struct xenbus_driver *drv, ++ struct module *owner, const char *mod_name) ++{ ++ int ret; ++ ++ drv->read_otherend_details = read_backend_details; ++ ++ ret = xenbus_register_driver_common(drv, &xenbus_frontend, ++ owner, mod_name); ++ if (ret) ++ return ret; ++ ++ /* If this driver is loaded as a module wait for devices to attach. */ ++ wait_for_devices(drv); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(__xenbus_register_frontend); ++ ++#endif ++ + static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) + { + int err = 0; +@@ -695,7 +713,7 @@ static int xenbus_probe_device_type(stru + return PTR_ERR(dir); + + for (i = 0; i < dir_n; i++) { +- err = bus->probe(type, dir[i]); ++ err = bus->probe(bus, type, dir[i]); + if (err) + break; + } +@@ -726,6 +744,7 @@ int xenbus_probe_devices(struct xen_bus_ + kfree(dir); + return err; + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_probe_devices); + + static unsigned int char_count(const char *str, char c) + { +@@ -786,10 +805,9 @@ void xenbus_dev_changed(const char *node + + kfree(root); + } +-#if !defined(CONFIG_XEN) && !defined(MODULE) +-EXPORT_SYMBOL_GPL(xenbus_dev_changed); +-#endif ++PARAVIRT_EXPORT_SYMBOL(xenbus_dev_changed); + ++#if defined(CONFIG_XEN) || defined(MODULE) + static void frontend_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) + { +@@ -804,22 +822,21 @@ static struct xenbus_watch fe_watch = { + .callback = frontend_changed, + }; + +-#if !defined(CONFIG_XEN) && !defined(MODULE) +-static int xenbus_dev_suspend(struct device *dev, pm_message_t state) +-#else + static int suspend_dev(struct device *dev, void *data) ++#else ++int xenbus_dev_suspend(struct device *dev, pm_message_t state) + #endif + { + int err = 0; + struct xenbus_driver *drv; +- struct xenbus_device *xdev; ++ struct xenbus_device *xdev ++ = container_of(dev, struct xenbus_device, dev); + +- DPRINTK(""); ++ DPRINTK("%s", xdev->nodename); + + if (dev->driver == NULL) + return 0; + drv = to_xenbus_driver(dev->driver); +- xdev = container_of(dev, struct xenbus_device, dev); + if (drv->suspend) + #if !defined(CONFIG_XEN) && !defined(MODULE) + err = drv->suspend(xdev, state); +@@ -831,6 +848,7 @@ static int suspend_dev(struct device *de + dev_name(dev), err); + return 0; + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_dev_suspend); + + #if defined(CONFIG_XEN) || defined(MODULE) + static int suspend_cancel_dev(struct device *dev, void *data) +@@ -852,26 +870,22 @@ static int suspend_cancel_dev(struct dev + dev_name(dev), err); + return 0; + } +-#endif + +-#if !defined(CONFIG_XEN) && !defined(MODULE) +-static int xenbus_dev_resume(struct device *dev) +-#else + static int resume_dev(struct device *dev, void *data) ++#else ++int xenbus_dev_resume(struct device *dev) + #endif + { + int err; + struct xenbus_driver *drv; +- struct xenbus_device *xdev; ++ struct xenbus_device *xdev ++ = container_of(dev, struct xenbus_device, dev); + +- DPRINTK(""); ++ DPRINTK("%s", xdev->nodename); + + if (dev->driver == NULL) + return 0; +- + drv = to_xenbus_driver(dev->driver); +- xdev = container_of(dev, struct xenbus_device, dev); +- + err = talk_to_otherend(xdev); + if (err) { + pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n", +@@ -899,6 +913,7 @@ static int resume_dev(struct device *dev + + return 0; + } ++PARAVIRT_EXPORT_SYMBOL(xenbus_dev_resume); + + #if defined(CONFIG_XEN) || defined(MODULE) + void xenbus_suspend(void) +@@ -960,17 +975,19 @@ void xenbus_probe(struct work_struct *un + { + BUG_ON(!is_xenstored_ready()); + ++#if defined(CONFIG_XEN) || defined(MODULE) + /* Enumerate devices in xenstore and watch for changes. */ + xenbus_probe_devices(&xenbus_frontend); + register_xenbus_watch(&fe_watch); + xenbus_backend_probe_and_watch(); ++#endif + + /* Notify others that xenstore is up */ + blocking_notifier_call_chain(&xenstore_chain, 0, NULL); + } +-#if !defined(CONFIG_XEN) && !defined(MODULE) +-EXPORT_SYMBOL_GPL(xenbus_probe); ++PARAVIRT_EXPORT_SYMBOL(xenbus_probe); + ++#if !defined(CONFIG_XEN) && !defined(MODULE) + static int __init xenbus_probe_initcall(void) + { + if (!xen_domain()) +@@ -1112,12 +1129,14 @@ int __devinit xenbus_init(void) + if (!is_running_on_xen()) + return -ENODEV; + ++#if defined(CONFIG_XEN) || defined(MODULE) + /* Register ourselves with the kernel bus subsystem */ + xenbus_frontend.error = bus_register(&xenbus_frontend.bus); + if (xenbus_frontend.error) + pr_warning("XENBUS: Error registering frontend bus: %i\n", + xenbus_frontend.error); + xenbus_backend_bus_register(); ++#endif + + /* + * Domain0 doesn't have a store_evtchn or store_mfn yet. +@@ -1221,10 +1240,8 @@ int __devinit xenbus_init(void) + " %d\n", xenbus_frontend.error); + } + } +-#endif + xenbus_backend_device_register(); + +-#if defined(CONFIG_XEN) || defined(MODULE) + if (!is_initial_xendomain()) + xenbus_probe(NULL); + #endif +@@ -1248,6 +1265,7 @@ int __devinit xenbus_init(void) + + if (page != 0) + free_page(page); ++ + return err; + } + +@@ -1260,6 +1278,8 @@ MODULE_LICENSE("GPL"); + #endif + #endif + ++#if defined(CONFIG_XEN) || defined(MODULE) ++ + static int is_device_connecting(struct device *dev, void *data) + { + struct xenbus_device *xendev = to_xenbus_device(dev); +@@ -1395,3 +1415,5 @@ int xenbus_for_each_frontend(void *arg, + return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); + } + EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); ++ ++#endif /* CONFIG_XEN || MODULE */ +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.h 2011-02-07 14:42:39.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.h 2011-02-07 14:43:11.000000000 +0100 +@@ -67,11 +67,15 @@ struct xen_bus_type + int error; + unsigned int levels; + int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); +- int (*probe)(const char *type, const char *dir); +- struct bus_type bus; +-#if defined(CONFIG_XEN) || defined(MODULE) ++ int (*probe)(struct xen_bus_type *bus, const char *type, ++ const char *dir); ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++ void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, ++ unsigned int len); ++#else + struct device dev; + #endif ++ struct bus_type bus; + }; + + extern int xenbus_match(struct device *_dev, struct device_driver *_drv); +@@ -88,4 +92,16 @@ extern int xenbus_probe_devices(struct x + + extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); + ++extern void xenbus_dev_shutdown(struct device *_dev); ++ ++extern int xenbus_dev_suspend(struct device *dev, pm_message_t state); ++extern int xenbus_dev_resume(struct device *dev); ++ ++extern void xenbus_otherend_changed(struct xenbus_watch *watch, ++ const char **vec, unsigned int len, ++ int ignore_on_shutdown); ++ ++extern int xenbus_read_otherend_details(struct xenbus_device *xendev, ++ char *id_node, char *path_node); ++ + #endif +--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-01 15:03:03.000000000 +0100 ++++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-02-03 08:30:05.000000000 +0100 +@@ -33,7 +33,7 @@ + + #define DPRINTK(fmt, args...) \ + pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ +- __FUNCTION__, __LINE__, ##args) ++ __func__, __LINE__, ##args) + + #include + #include +@@ -45,14 +45,17 @@ + #include + #include + +-#include + #include +-#include + #include ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++#include ++#endif + #include + #include ++#if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) + #include + #include ++#endif + #include + + #include "xenbus_comms.h" +@@ -62,17 +65,6 @@ + #include + #endif + +-static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env); +-static int xenbus_probe_backend(const char *type, const char *domid); +- +-extern int read_otherend_details(struct xenbus_device *xendev, +- char *id_node, char *path_node); +- +-static int read_frontend_details(struct xenbus_device *xendev) +-{ +- return read_otherend_details(xendev, "frontend-id", "frontend"); +-} +- + /* backend/// => -- */ + static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) + { +@@ -110,34 +102,12 @@ static int backend_bus_id(char bus_id[XE + return 0; + } + +-static struct device_attribute xenbus_backend_attrs[] = { +- __ATTR_NULL +-}; +- +-static struct xen_bus_type xenbus_backend = { +- .root = "backend", +- .levels = 3, /* backend/type// */ +- .get_bus_id = backend_bus_id, +- .probe = xenbus_probe_backend, +- .error = -ENODEV, +- .bus = { +- .name = "xen-backend", +- .match = xenbus_match, +- .probe = xenbus_dev_probe, +- .remove = xenbus_dev_remove, +-// .shutdown = xenbus_dev_shutdown, +- .uevent = xenbus_uevent_backend, +- .dev_attrs = xenbus_backend_attrs, +- }, +- .dev = { +- .init_name = "xen-backend", +- }, +-}; +- +-static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) ++static int xenbus_uevent_backend(struct device *dev, ++ struct kobj_uevent_env *env) + { + struct xenbus_device *xdev; + struct xenbus_driver *drv; ++ struct xen_bus_type *bus; + + DPRINTK(""); + +@@ -145,15 +115,19 @@ static int xenbus_uevent_backend(struct + return -ENODEV; + + xdev = to_xenbus_device(dev); ++ bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); + if (xdev == NULL) + return -ENODEV; + + /* stuff we want to pass to /sbin/hotplug */ +- add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); ++ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) ++ return -ENOMEM; + +- add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); ++ if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) ++ return -ENOMEM; + +- add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root); ++ if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) ++ return -ENOMEM; + + if (dev->driver) { + drv = to_xenbus_driver(dev->driver); +@@ -164,18 +138,9 @@ static int xenbus_uevent_backend(struct + return 0; + } + +-int __xenbus_register_backend(struct xenbus_driver *drv, +- struct module *owner, const char *mod_name) +-{ +- drv->read_otherend_details = read_frontend_details; +- +- return xenbus_register_driver_common(drv, &xenbus_backend, +- owner, mod_name); +-} +-EXPORT_SYMBOL_GPL(__xenbus_register_backend); +- + /* backend/// */ +-static int xenbus_probe_backend_unit(const char *dir, ++static int xenbus_probe_backend_unit(struct xen_bus_type *bus, ++ const char *dir, + const char *type, + const char *name) + { +@@ -188,13 +153,14 @@ static int xenbus_probe_backend_unit(con + + DPRINTK("%s\n", nodename); + +- err = xenbus_probe_node(&xenbus_backend, type, nodename); ++ err = xenbus_probe_node(bus, type, nodename); + kfree(nodename); + return err; + } + + /* backend// */ +-static int xenbus_probe_backend(const char *type, const char *domid) ++static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, ++ const char *domid) + { + char *nodename; + int err = 0; +@@ -203,7 +169,7 @@ static int xenbus_probe_backend(const ch + + DPRINTK(""); + +- nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); ++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); + if (!nodename) + return -ENOMEM; + +@@ -214,7 +180,7 @@ static int xenbus_probe_backend(const ch + } + + for (i = 0; i < dir_n; i++) { +- err = xenbus_probe_backend_unit(nodename, type, dir[i]); ++ err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); + if (err) + break; + } +@@ -223,6 +189,44 @@ static int xenbus_probe_backend(const ch + return err; + } + ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++static void frontend_changed(struct xenbus_watch *watch, ++ const char **vec, unsigned int len) ++{ ++ xenbus_otherend_changed(watch, vec, len, 0); ++} ++#endif ++ ++static struct device_attribute xenbus_backend_dev_attrs[] = { ++ __ATTR_NULL ++}; ++ ++static struct xen_bus_type xenbus_backend = { ++ .root = "backend", ++ .levels = 3, /* backend/type// */ ++ .get_bus_id = backend_bus_id, ++ .probe = xenbus_probe_backend, ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++ .otherend_changed = frontend_changed, ++#else ++ .dev = { ++ .init_name = "xen-backend", ++ }, ++#endif ++ .error = -ENODEV, ++ .bus = { ++ .name = "xen-backend", ++ .match = xenbus_match, ++ .uevent = xenbus_uevent_backend, ++ .probe = xenbus_dev_probe, ++ .remove = xenbus_dev_remove, ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++ .shutdown = xenbus_dev_shutdown, ++#endif ++ .dev_attrs = xenbus_backend_dev_attrs, ++ }, ++}; ++ + static void backend_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) + { +@@ -236,6 +240,47 @@ static struct xenbus_watch be_watch = { + .callback = backend_changed, + }; + ++static int read_frontend_details(struct xenbus_device *xendev) ++{ ++ return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); ++} ++ ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++ ++int xenbus_dev_is_online(struct xenbus_device *dev) ++{ ++ int rc, val; ++ ++ rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); ++ if (rc != 1) ++ val = 0; /* no online node present */ ++ ++ return val; ++} ++EXPORT_SYMBOL_GPL(xenbus_dev_is_online); ++ ++int __xenbus_register_backend(struct xenbus_driver *drv, ++ struct module *owner, const char *mod_name) ++{ ++ drv->read_otherend_details = read_frontend_details; ++ ++ return xenbus_register_driver_common(drv, &xenbus_backend, ++ owner, mod_name); ++} ++EXPORT_SYMBOL_GPL(__xenbus_register_backend); ++ ++#else ++ ++int __xenbus_register_backend(struct xenbus_driver *drv, ++ struct module *owner, const char *mod_name) ++{ ++ drv->read_otherend_details = read_frontend_details; ++ ++ return xenbus_register_driver_common(drv, &xenbus_backend, ++ owner, mod_name); ++} ++EXPORT_SYMBOL_GPL(__xenbus_register_backend); ++ + void xenbus_backend_suspend(int (*fn)(struct device *, void *)) + { + DPRINTK(""); +@@ -250,12 +295,49 @@ void xenbus_backend_resume(int (*fn)(str + bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); + } + ++#endif ++ ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++static int backend_probe_and_watch(struct notifier_block *notifier, ++ unsigned long event, ++ void *data) ++#else + void xenbus_backend_probe_and_watch(void) ++#endif + { ++ /* Enumerate devices in xenstore and watch for changes. */ + xenbus_probe_devices(&xenbus_backend); + register_xenbus_watch(&be_watch); ++ ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++ return NOTIFY_DONE; ++#endif + } + ++#if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) ++ ++static int __init xenbus_probe_backend_init(void) ++{ ++ static struct notifier_block xenstore_notifier = { ++ .notifier_call = backend_probe_and_watch ++ }; ++ int err; ++ ++ DPRINTK(""); ++ ++ /* Register ourselves with the kernel bus subsystem */ ++ err = bus_register(&xenbus_backend.bus); ++ if (err) ++ return err; ++ ++ register_xenstore_notifier(&xenstore_notifier); ++ ++ return 0; ++} ++subsys_initcall(xenbus_probe_backend_init); ++ ++#else ++ + void xenbus_backend_bus_register(void) + { + xenbus_backend.error = bus_register(&xenbus_backend.bus); +@@ -282,3 +364,5 @@ int xenbus_for_each_backend(void *arg, i + return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); + } + EXPORT_SYMBOL_GPL(xenbus_for_each_backend); ++ ++#endif +--- head-2011-03-17.orig/include/xen/gntdev.h 2011-03-17 14:35:43.000000000 +0100 ++++ head-2011-03-17/include/xen/gntdev.h 2011-02-03 13:52:59.000000000 +0100 +@@ -1,119 +1,3 @@ +-/****************************************************************************** +- * gntdev.h +- * +- * Interface to /dev/xen/gntdev. +- * +- * Copyright (c) 2007, D G Murray +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation; or, when distributed +- * separately from the Linux kernel or incorporated into other +- * software packages, subject to the following license: +- * +- * Permission is hereby granted, free of charge, to any person obtaining a copy +- * of this source file (the "Software"), to deal in the Software without +- * restriction, including without limitation the rights to use, copy, modify, +- * merge, publish, distribute, sublicense, and/or sell copies of the Software, +- * and to permit persons to whom the Software is furnished to do so, subject to +- * the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +- * IN THE SOFTWARE. +- */ +- +-#ifndef __LINUX_PUBLIC_GNTDEV_H__ +-#define __LINUX_PUBLIC_GNTDEV_H__ +- +-struct ioctl_gntdev_grant_ref { +- /* The domain ID of the grant to be mapped. */ +- uint32_t domid; +- /* The grant reference of the grant to be mapped. */ +- uint32_t ref; +-}; +- +-/* +- * Inserts the grant references into the mapping table of an instance +- * of gntdev. N.B. This does not perform the mapping, which is deferred +- * until mmap() is called with @index as the offset. +- */ +-#define IOCTL_GNTDEV_MAP_GRANT_REF \ +-_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) +-struct ioctl_gntdev_map_grant_ref { +- /* IN parameters */ +- /* The number of grants to be mapped. */ +- uint32_t count; +- uint32_t pad; +- /* OUT parameters */ +- /* The offset to be used on a subsequent call to mmap(). */ +- uint64_t index; +- /* Variable IN parameter. */ +- /* Array of grant references, of size @count. */ +- struct ioctl_gntdev_grant_ref refs[1]; +-}; +- +-/* +- * Removes the grant references from the mapping table of an instance of +- * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) +- * before this ioctl is called, or an error will result. +- */ +-#define IOCTL_GNTDEV_UNMAP_GRANT_REF \ +-_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) +-struct ioctl_gntdev_unmap_grant_ref { +- /* IN parameters */ +- /* The offset was returned by the corresponding map operation. */ +- uint64_t index; +- /* The number of pages to be unmapped. */ +- uint32_t count; +- uint32_t pad; +-}; +- +-/* +- * Returns the offset in the driver's address space that corresponds +- * to @vaddr. This can be used to perform a munmap(), followed by an +- * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by +- * the caller. The number of pages that were allocated at the same time as +- * @vaddr is returned in @count. +- * +- * N.B. Where more than one page has been mapped into a contiguous range, the +- * supplied @vaddr must correspond to the start of the range; otherwise +- * an error will result. It is only possible to munmap() the entire +- * contiguously-allocated range at once, and not any subrange thereof. +- */ +-#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ +-_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) +-struct ioctl_gntdev_get_offset_for_vaddr { +- /* IN parameters */ +- /* The virtual address of the first mapped page in a range. */ +- uint64_t vaddr; +- /* OUT parameters */ +- /* The offset that was used in the initial mmap() operation. */ +- uint64_t offset; +- /* The number of pages mapped in the VM area that begins at @vaddr. */ +- uint32_t count; +- uint32_t pad; +-}; +- +-/* +- * Sets the maximum number of grants that may mapped at once by this gntdev +- * instance. +- * +- * N.B. This must be called before any other ioctl is performed on the device. +- */ +-#define IOCTL_GNTDEV_SET_MAX_GRANTS \ +-_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) +-struct ioctl_gntdev_set_max_grants { +- /* IN parameter */ +- /* The maximum number of grants that may be mapped at once. */ +- uint32_t count; +-}; +- +-#endif /* __LINUX_PUBLIC_GNTDEV_H__ */ ++#if defined(CONFIG_PARAVIRT_XEN) || !defined(__KERNEL__) ++#include "public/gntdev.h" ++#endif +--- head-2011-03-17.orig/include/xen/public/gntdev.h 2008-04-02 12:34:02.000000000 +0200 ++++ head-2011-03-17/include/xen/public/gntdev.h 2011-02-03 13:52:28.000000000 +0100 +@@ -66,7 +66,7 @@ struct ioctl_gntdev_map_grant_ref { + * before this ioctl is called, or an error will result. + */ + #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ +-_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) ++_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) + struct ioctl_gntdev_unmap_grant_ref { + /* IN parameters */ + /* The offset was returned by the corresponding map operation. */ +--- head-2011-03-17.orig/lib/swiotlb-xen.c 2011-02-01 15:09:47.000000000 +0100 ++++ head-2011-03-17/lib/swiotlb-xen.c 2011-03-11 11:06:22.000000000 +0100 +@@ -48,7 +48,7 @@ int swiotlb_force; + static char *io_tlb_start, *io_tlb_end; + + /* +- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and ++ * The number of IO TLB blocks (in groups of 64) between io_tlb_start and + * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. + */ + static unsigned long io_tlb_nslabs; +@@ -567,6 +567,15 @@ dma_addr_t swiotlb_map_page(struct devic + } + + dev_addr = swiotlb_virt_to_bus(dev, map); ++ ++ /* ++ * Ensure that the address returned is DMA'ble ++ */ ++ if (!dma_capable(dev, dev_addr, size)) { ++ swiotlb_tbl_unmap_single(dev, map, size, dir); ++ dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); ++ } ++ + return dev_addr; + } + EXPORT_SYMBOL_GPL(swiotlb_map_page); +--- head-2011-03-17.orig/mm/Kconfig 2011-01-31 14:34:25.000000000 +0100 ++++ head-2011-03-17/mm/Kconfig 2011-02-01 16:44:46.000000000 +0100 +@@ -304,7 +304,7 @@ config NOMMU_INITIAL_TRIM_EXCESS + + config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" +- depends on X86 && MMU ++ depends on X86 && !XEN && MMU + select COMPACTION + help + Transparent Hugepages allows the kernel to use huge pages and diff --git a/patches.xen/xen3-seccomp-disable-tsc-option b/patches.xen/xen3-seccomp-disable-tsc-option index 7c38bb4..bb9a045 100644 --- a/patches.xen/xen3-seccomp-disable-tsc-option +++ b/patches.xen/xen3-seccomp-disable-tsc-option @@ -13,9 +13,9 @@ Signed-off-by: Andrea Arcangeli Acked-by: Jeff Mahoney Automatically created from "patches.fixes/seccomp-disable-tsc-option" by xen-port-patches.py ---- head-2010-03-24.orig/arch/x86/kernel/process-xen.c 2010-03-25 10:38:31.000000000 +0100 -+++ head-2010-03-24/arch/x86/kernel/process-xen.c 2010-03-25 14:37:33.000000000 +0100 -@@ -141,6 +141,7 @@ static void hard_disable_TSC(void) +--- head-2011-02-17.orig/arch/x86/kernel/process-xen.c 2011-03-03 16:13:18.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/process-xen.c 2011-03-03 16:13:29.000000000 +0100 +@@ -142,6 +142,7 @@ static void hard_disable_TSC(void) void disable_TSC(void) { @@ -23,7 +23,7 @@ Automatically created from "patches.fixes/seccomp-disable-tsc-option" by xen-por preempt_disable(); if (!test_and_set_thread_flag(TIF_NOTSC)) /* -@@ -149,6 +150,7 @@ void disable_TSC(void) +@@ -150,6 +151,7 @@ void disable_TSC(void) */ hard_disable_TSC(); preempt_enable(); diff --git a/patches.xen/xen3-stack-unwind b/patches.xen/xen3-stack-unwind index 5008f25..75bda32 100644 --- a/patches.xen/xen3-stack-unwind +++ b/patches.xen/xen3-stack-unwind @@ -7,12 +7,14 @@ This includes reverting f1883f86dea84fe47a71a39fc1afccc005915ed8. Update Jan 17 2009 jeffm: - Something in 2.6.29-rc1 tweaked the frame pointer code somehow, so I fixed that up. +Update Jul 02 2010 jbeulich: +- fix after upstream commit 9e565292270a2d55524be38835104c564ac8f795 Automatically created from "patches.suse/stack-unwind" by xen-port-patches.py ---- head-2010-03-15.orig/arch/x86/include/mach-xen/asm/system.h 2010-02-24 12:33:54.000000000 +0100 -+++ head-2010-03-15/arch/x86/include/mach-xen/asm/system.h 2010-01-25 13:43:44.000000000 +0100 -@@ -122,12 +122,22 @@ do { \ +--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:11:05.000000000 +0100 ++++ head-2011-02-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:13:47.000000000 +0100 +@@ -124,12 +124,22 @@ do { \ #define __switch_canary_iparam #endif /* CC_STACKPROTECTOR */ @@ -35,9 +37,9 @@ Automatically created from "patches.suse/stack-unwind" by xen-port-patches.py "movq "__percpu_arg([current_task])",%%rsi\n\t" \ __switch_canary \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ ---- head-2010-03-15.orig/arch/x86/kernel/entry_32-xen.S 2010-01-28 14:42:29.000000000 +0100 -+++ head-2010-03-15/arch/x86/kernel/entry_32-xen.S 2010-01-25 15:45:18.000000000 +0100 -@@ -1271,6 +1271,41 @@ END(spurious_interrupt_bug) +--- head-2011-02-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/entry_32-xen.S 2011-02-02 15:07:22.000000000 +0100 +@@ -1225,6 +1225,41 @@ END(spurious_interrupt_bug) */ .popsection @@ -79,9 +81,9 @@ Automatically created from "patches.suse/stack-unwind" by xen-port-patches.py ENTRY(kernel_thread_helper) pushl $0 # fake return address for unwinder CFI_STARTPROC ---- head-2010-03-15.orig/arch/x86/kernel/entry_64-xen.S 2010-03-17 14:25:44.000000000 +0100 -+++ head-2010-03-15/arch/x86/kernel/entry_64-xen.S 2010-03-17 14:27:13.000000000 +0100 -@@ -1160,6 +1160,40 @@ ENTRY(call_softirq) +--- head-2011-02-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-02-17/arch/x86/kernel/entry_64-xen.S 2011-02-02 15:07:22.000000000 +0100 +@@ -1135,6 +1135,40 @@ ENTRY(call_softirq) CFI_ENDPROC END(call_softirq) diff --git a/patches.xen/xen3-x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing b/patches.xen/xen3-x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing deleted file mode 100644 index 57b7bf4..0000000 --- a/patches.xen/xen3-x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing +++ /dev/null @@ -1,48 +0,0 @@ -From: Roland McGrath -Date: Tue, 14 Sep 2010 19:22:58 +0000 (-0700) -Subject: x86-64, compat: Retruncate rax after ia32 syscall entry tracing -Git-commit: eefdca043e8391dcd719711716492063030b55ac -References: CVE-2010-3301 bnc#639708 -Patch-mainline: 2.6.36 -Introduced-by: 2.6.27 - -x86-64, compat: Retruncate rax after ia32 syscall entry tracing - -In commit d4d6715, we reopened an old hole for a 64-bit ptracer touching a -32-bit tracee in system call entry. A %rax value set via ptrace at the -entry tracing stop gets used whole as a 32-bit syscall number, while we -only check the low 32 bits for validity. - -Fix it by truncating %rax back to 32 bits after syscall_trace_enter, -in addition to testing the full 64 bits as has already been added. - -Reported-by: Ben Hawkes -Signed-off-by: Roland McGrath -Signed-off-by: H. Peter Anvin -Acked-by: Jeff Mahoney -Automatically created from "patches.fixes/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing" by xen-port-patches.py - ---- 11.3-2010-09-14.orig/arch/x86/ia32/ia32entry-xen.S 2010-05-12 09:08:52.000000000 +0200 -+++ 11.3-2010-09-14/arch/x86/ia32/ia32entry-xen.S 2010-09-17 11:18:22.000000000 +0200 -@@ -47,7 +47,12 @@ ia32_common: - /* - * Reload arg registers from stack in case ptrace changed them. - * We don't reload %eax because syscall_trace_enter() returned -- * the value it wants us to use in the table lookup. -+ * the %rax value we should see. Instead, we just truncate that -+ * value to 32 bits again as we did on entry from user mode. -+ * If it's a new value set by user_regset during entry tracing, -+ * this matches the normal truncation of the user-mode value. -+ * If it's -1 to make us punt the syscall, then (u32)-1 is still -+ * an appropriately invalid value. - */ - .macro LOAD_ARGS32 offset, _r9=0 - .if \_r9 -@@ -57,6 +62,7 @@ ia32_common: - movl \offset+48(%rsp),%edx - movl \offset+56(%rsp),%esi - movl \offset+64(%rsp),%edi -+ movl %eax,%eax /* zero extension */ - .endm - - .macro CFI_STARTPROC32 simple diff --git a/patches.xen/xen3-x86-64-compat-test-rax-for-the-syscall-number-not-eax b/patches.xen/xen3-x86-64-compat-test-rax-for-the-syscall-number-not-eax deleted file mode 100644 index fb33935..0000000 --- a/patches.xen/xen3-x86-64-compat-test-rax-for-the-syscall-number-not-eax +++ /dev/null @@ -1,96 +0,0 @@ -From: H. Peter Anvin -Date: Tue, 14 Sep 2010 19:42:41 +0000 (-0700) -Subject: x86-64, compat: Test %rax for the syscall number, not %eax -Git-commit: 36d001c70d8a0144ac1d038f6876c484849a74de -References: CVE-2010-3301 bnc#639708 -Patch-mainline: 2.6.36 -Introduced-by: 2.6.27 - -x86-64, compat: Test %rax for the syscall number, not %eax - -On 64 bits, we always, by necessity, jump through the system call -table via %rax. For 32-bit system calls, in theory the system call -number is stored in %eax, and the code was testing %eax for a valid -system call number. At one point we loaded the stored value back from -the stack to enforce zero-extension, but that was removed in checkin -d4d67150165df8bf1cc05e532f6efca96f907cab. An actual 32-bit process -will not be able to introduce a non-zero-extended number, but it can -happen via ptrace. - -Instead of re-introducing the zero-extension, test what we are -actually going to use, i.e. %rax. This only adds a handful of REX -prefixes to the code. - -Reported-by: Ben Hawkes -Signed-off-by: H. Peter Anvin -Cc: -Cc: Roland McGrath -Cc: Andrew Morton -Acked-by: Jeff Mahoney -Automatically created from "patches.fixes/x86-64-compat-test-rax-for-the-syscall-number-not-eax" by xen-port-patches.py - ---- 11.3-2010-09-14.orig/arch/x86/ia32/ia32entry-xen.S 2010-09-17 11:18:22.000000000 +0200 -+++ 11.3-2010-09-14/arch/x86/ia32/ia32entry-xen.S 2010-09-17 11:18:28.000000000 +0200 -@@ -145,7 +145,7 @@ ENTRY(ia32_sysenter_target) - orl $TS_COMPAT,TI_status(%r10) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) - jnz sysenter_tracesys -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys - jmp ia32_do_call - -@@ -159,7 +159,7 @@ ENTRY(ia32_sysenter_target) - movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ - call audit_syscall_entry - movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys - movl %ebx,%edi /* reload 1st syscall arg */ - movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ -@@ -186,7 +186,7 @@ sysenter_tracesys: - call syscall_trace_enter - LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ - RESTORE_REST -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ - jmp ia32_do_call - CFI_ENDPROC -@@ -240,7 +240,7 @@ ENTRY(ia32_cstar_target) - orl $TS_COMPAT,TI_status(%r10) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) - jnz cstar_tracesys -- cmpl $IA32_NR_syscalls-1,%eax -+ cmpq $IA32_NR_syscalls-1,%rax - ja ia32_badsys - cstar_do_call: - IA32_ARG_FIXUP 1 -@@ -267,7 +267,7 @@ cstar_tracesys: - LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ - RESTORE_REST - xchgl %ebp,%r9d -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ - jmp cstar_do_call - END(ia32_cstar_target) -@@ -324,7 +324,7 @@ ENTRY(ia32_syscall) - orl $TS_COMPAT,TI_status(%r10) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) - jnz ia32_tracesys -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys - ia32_do_call: - IA32_ARG_FIXUP -@@ -343,7 +343,7 @@ ia32_tracesys: - call syscall_trace_enter - LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ - RESTORE_REST -- cmpl $(IA32_NR_syscalls-1),%eax -+ cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ - jmp ia32_do_call - END(ia32_syscall) diff --git a/patches.xen/xen3-x86-mark_rodata_rw.patch b/patches.xen/xen3-x86-mark_rodata_rw.patch index ac82fb0..2b7d983 100644 --- a/patches.xen/xen3-x86-mark_rodata_rw.patch +++ b/patches.xen/xen3-x86-mark_rodata_rw.patch @@ -19,11 +19,11 @@ Acked-by: Andres Gruenbacher Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-patches.py ---- head-2010-04-15.orig/arch/x86/mm/init_32-xen.c 2010-04-15 10:51:33.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/init_32-xen.c 2010-03-25 14:37:41.000000000 +0100 -@@ -1137,6 +1137,20 @@ void mark_rodata_ro(void) - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); +--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 15:41:35.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-02 15:07:16.000000000 +0100 +@@ -1035,5 +1035,19 @@ void mark_rodata_ro(void) #endif + mark_nxdata_nx(); } +EXPORT_SYMBOL_GPL(mark_rodata_ro); + @@ -41,10 +41,9 @@ Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-p +EXPORT_SYMBOL_GPL(mark_rodata_rw); #endif - int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, ---- head-2010-04-15.orig/arch/x86/mm/init_64-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/init_64-xen.c 2010-04-15 11:41:27.000000000 +0200 -@@ -1019,6 +1019,7 @@ void set_kernel_text_ro(void) +--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-03-17 14:22:21.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-02 15:07:16.000000000 +0100 +@@ -1042,6 +1042,7 @@ void set_kernel_text_ro(void) set_memory_ro(start, (end - start) >> PAGE_SHIFT); } @@ -52,7 +51,7 @@ Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-p void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); -@@ -1051,15 +1052,33 @@ void mark_rodata_ro(void) +@@ -1074,15 +1075,33 @@ void mark_rodata_ro(void) set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif @@ -91,10 +90,10 @@ Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-p +EXPORT_SYMBOL_GPL(mark_rodata_rw); #endif - int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, ---- head-2010-04-15.orig/arch/x86/mm/pageattr-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/pageattr-xen.c 2010-03-25 14:37:41.000000000 +0100 -@@ -245,6 +245,8 @@ static void cpa_flush_array(unsigned lon + int kern_addr_valid(unsigned long addr) +--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-03-17 14:22:21.000000000 +0100 ++++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-03-17 14:33:38.000000000 +0100 +@@ -244,6 +244,8 @@ static void cpa_flush_array(unsigned lon } } @@ -103,7 +102,7 @@ Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-p /* * Certain areas of memory on x86 require very specific protection flags, * for example the BIOS area or kernel text. Callers don't always get this -@@ -278,8 +280,10 @@ static inline pgprot_t static_protection +@@ -277,8 +279,10 @@ static inline pgprot_t static_protection * catches all aliases. */ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, @@ -116,7 +115,7 @@ Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-p #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) && !defined(CONFIG_XEN) /* -@@ -1200,6 +1204,21 @@ int set_memory_rw(unsigned long addr, in +@@ -1216,6 +1220,21 @@ int set_memory_rw(unsigned long addr, in } EXPORT_SYMBOL_GPL(set_memory_rw); @@ -138,7 +137,7 @@ Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-p int set_memory_np(unsigned long addr, int numpages) { return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); -@@ -1314,6 +1333,13 @@ int set_pages_rw(struct page *page, int +@@ -1349,6 +1368,13 @@ int set_pages_rw(struct page *page, int return set_memory_rw(addr, numpages); } diff --git a/patches.xen/xen3-x86-mcp51-no-dac b/patches.xen/xen3-x86-mcp51-no-dac index 6afb6dd..f4be9f2 100644 --- a/patches.xen/xen3-x86-mcp51-no-dac +++ b/patches.xen/xen3-x86-mcp51-no-dac @@ -13,9 +13,9 @@ Reported-by: pgnet Signed-off-by: Tejun Heo Automatically created from "patches.arch/x86-mcp51-no-dac" by xen-port-patches.py ---- head-2010-04-15.orig/arch/x86/kernel/pci-dma-xen.c 2010-04-15 10:48:32.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/pci-dma-xen.c 2010-04-15 11:41:14.000000000 +0200 -@@ -402,4 +402,18 @@ static __devinit void via_no_dac(struct +--- head-2010-11-22.orig/arch/x86/kernel/pci-dma-xen.c 2010-11-24 11:26:31.000000000 +0100 ++++ head-2010-11-22/arch/x86/kernel/pci-dma-xen.c 2010-11-24 11:43:02.000000000 +0100 +@@ -411,4 +411,18 @@ static __devinit void via_no_dac(struct } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); diff --git a/patches.xen/xen3-x86_64-unwind-annotations b/patches.xen/xen3-x86_64-unwind-annotations index bf85989..f1e9ad0 100644 --- a/patches.xen/xen3-x86_64-unwind-annotations +++ b/patches.xen/xen3-x86_64-unwind-annotations @@ -5,103 +5,92 @@ References: bnc#472783 Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port-patches.py ---- head-2010-03-15.orig/arch/x86/kernel/entry_64-xen.S 2010-03-17 14:27:13.000000000 +0100 -+++ head-2010-03-15/arch/x86/kernel/entry_64-xen.S 2010-03-17 16:10:12.000000000 +0100 -@@ -41,6 +41,7 @@ - */ +--- head-2011-01-30.orig/arch/x86/kernel/entry_64-xen.S 2011-02-02 15:07:22.000000000 +0100 ++++ head-2011-01-30/arch/x86/kernel/entry_64-xen.S 2011-02-02 15:09:22.000000000 +0100 +@@ -223,23 +223,16 @@ NMI_MASK = 0x80000000 + .endm - #include -+#include - #include - #include - #include -@@ -231,21 +232,21 @@ NMI_MASK = 0x80000000 /* - * initial frame state for interrupts (and exceptions without error code) +- * initial frame state for interrupts (and exceptions without error code) ++ * initial frame state for syscall */ - .macro EMPTY_FRAME start=1 offset=0 -- .if \start -+ .macro EMPTY_FRAME offset=0 ++ .macro BASIC_FRAME start=1 offset=0 + .if \start CFI_STARTPROC simple CFI_SIGNAL_FRAME - CFI_DEF_CFA rsp,8+\offset -- .else ++ CFI_DEF_CFA rsp, SS+8+\offset-RIP + .else - CFI_DEF_CFA_OFFSET 8+\offset -- .endif -+ CFI_DEF_CFA rsp,\offset - .endm - - /* - * initial frame state for syscall - */ - .macro BASIC_FRAME start=1 offset=0 -- EMPTY_FRAME \start, SS+8+\offset-RIP -+ .if \start -+ EMPTY_FRAME __stringify(SS+8+\offset-RIP) -+ .else + CFI_DEF_CFA_OFFSET SS+8+\offset-RIP -+ .endif + .endif +- .endm +- +-/* +- * initial frame state for syscall +- */ +- .macro BASIC_FRAME start=1 offset=0 +- EMPTY_FRAME \start, SS+8+\offset-RIP /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ CFI_REL_OFFSET rsp, RSP+\offset-RIP /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ -@@ -271,15 +272,16 @@ NMI_MASK = 0x80000000 - * with vector already pushed) +@@ -266,14 +259,15 @@ NMI_MASK = 0x80000000 */ .macro XCPT_FRAME start=1 offset=0 -- INTR_FRAME \start, RIP+\offset-ORIG_RAX + INTR_FRAME \start, RIP+\offset-ORIG_RAX - /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ -+ INTR_FRAME \start, __stringify(RIP+\offset-ORIG_RAX) .endm /* * frame that enables calling into C. */ .macro PARTIAL_FRAME start=1 offset=0 -- XCPT_FRAME 2*\start, ORIG_RAX+\offset-ARGOFFSET + .if \start >= 0 -+ XCPT_FRAME 2*\start, __stringify(ORIG_RAX+\offset-ARGOFFSET) + XCPT_FRAME 2*\start, ORIG_RAX+\offset-ARGOFFSET + .endif CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET -@@ -295,7 +297,9 @@ NMI_MASK = 0x80000000 +@@ -289,7 +283,9 @@ NMI_MASK = 0x80000000 * frame that enables passing a complete pt_regs to a C function. */ .macro DEFAULT_FRAME start=1 offset=0 -- PARTIAL_FRAME \start, R11+\offset-R15 + .if \start >= -1 -+ PARTIAL_FRAME \start, __stringify(R11+\offset-R15) + PARTIAL_FRAME \start, R11+\offset-R15 + .endif CFI_REL_OFFSET rbx, RBX+\offset CFI_REL_OFFSET rbp, RBP+\offset CFI_REL_OFFSET r12, R12+\offset -@@ -334,21 +338,23 @@ NMI_MASK = 0x80000000 - #ifndef CONFIG_XEN +@@ -329,25 +325,27 @@ NMI_MASK = 0x80000000 /* save partial stack frame */ + .pushsection .kprobes.text, "ax" ENTRY(save_args) - XCPT_FRAME -+ XCPT_FRAME offset=__stringify(ORIG_RAX-ARGOFFSET+16) ++ XCPT_FRAME offset=ORIG_RAX-RBP+8 cld -- movq_cfi rdi, RDI+16-ARGOFFSET -- movq_cfi rsi, RSI+16-ARGOFFSET -- movq_cfi rdx, RDX+16-ARGOFFSET -- movq_cfi rcx, RCX+16-ARGOFFSET -- movq_cfi rax, RAX+16-ARGOFFSET -- movq_cfi r8, R8+16-ARGOFFSET -- movq_cfi r9, R9+16-ARGOFFSET -- movq_cfi r10, R10+16-ARGOFFSET -- movq_cfi r11, R11+16-ARGOFFSET -+ movq %rdi, RDI+16-ARGOFFSET(%rsp) -+ movq %rsi, RSI+16-ARGOFFSET(%rsp) -+ movq %rdx, RDX+16-ARGOFFSET(%rsp) -+ movq %rcx, RCX+16-ARGOFFSET(%rsp) -+ movq_cfi rax, __stringify(RAX+16-ARGOFFSET) -+ movq %r8, R8+16-ARGOFFSET(%rsp) -+ movq %r9, R9+16-ARGOFFSET(%rsp) -+ movq %r10, R10+16-ARGOFFSET(%rsp) -+ movq_cfi r11, __stringify(R11+16-ARGOFFSET) + /* + * start from rbp in pt_regs and jump over + * return address. + */ + movq_cfi rdi, RDI+8-RBP +- movq_cfi rsi, RSI+8-RBP +- movq_cfi rdx, RDX+8-RBP +- movq_cfi rcx, RCX+8-RBP ++ movq %rsi, RSI+8-RBP(%rsp) ++ movq %rdx, RDX+8-RBP(%rsp) ++ movq %rcx, RCX+8-RBP(%rsp) + movq_cfi rax, RAX+8-RBP +- movq_cfi r8, R8+8-RBP +- movq_cfi r9, R9+8-RBP +- movq_cfi r10, R10+8-RBP +- movq_cfi r11, R11+8-RBP ++ movq %r8, R8+8-RBP(%rsp) ++ movq %r9, R9+8-RBP(%rsp) ++ movq %r10, R10+8-RBP(%rsp) ++ movq %r11, R11+8-RBP(%rsp) - leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ + leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ movq_cfi rbp, 8 /* push %rbp */ leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ + CFI_DEF_CFA_REGISTER rbp @@ -109,7 +98,7 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- testl $3, CS(%rdi) je 1f SWAPGS -@@ -360,11 +366,10 @@ ENTRY(save_args) +@@ -359,11 +357,10 @@ ENTRY(save_args) */ 1: incl PER_CPU_VAR(irq_count) jne 2f @@ -124,7 +113,7 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- /* * We entered an interrupt context - irqs are off: */ -@@ -375,14 +380,14 @@ END(save_args) +@@ -375,14 +372,14 @@ END(save_args) #endif ENTRY(save_rest) @@ -146,38 +135,34 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- movq %r11, 8(%rsp) /* return address */ FIXUP_TOP_OF_STACK %r11, 16 ret -@@ -393,23 +398,23 @@ END(save_rest) +@@ -393,23 +390,23 @@ END(save_rest) /* save complete stack frame */ .pushsection .kprobes.text, "ax" ENTRY(save_paranoid) - XCPT_FRAME 1 RDI+8 -+ XCPT_FRAME offset=__stringify(ORIG_RAX-R15+8) ++ XCPT_FRAME offset=ORIG_RAX-R15+8 cld - movq_cfi rdi, RDI+8 - movq_cfi rsi, RSI+8 -- movq_cfi rdx, RDX+8 -- movq_cfi rcx, RCX+8 -- movq_cfi rax, RAX+8 ++ movq %rdi, RDI+8(%rsp) ++ movq %rsi, RSI+8(%rsp) + movq_cfi rdx, RDX+8 + movq_cfi rcx, RCX+8 + movq_cfi rax, RAX+8 - movq_cfi r8, R8+8 - movq_cfi r9, R9+8 - movq_cfi r10, R10+8 - movq_cfi r11, R11+8 -- movq_cfi rbx, RBX+8 ++ movq %r8, R8+8(%rsp) ++ movq %r9, R9+8(%rsp) ++ movq %r10, R10+8(%rsp) ++ movq %r11, R11+8(%rsp) + movq_cfi rbx, RBX+8 - movq_cfi rbp, RBP+8 - movq_cfi r12, R12+8 - movq_cfi r13, R13+8 - movq_cfi r14, R14+8 - movq_cfi r15, R15+8 -+ movq %rdi, RDI+8(%rsp) -+ movq %rsi, RSI+8(%rsp) -+ movq_cfi rdx, __stringify(RDX+8) -+ movq_cfi rcx, __stringify(RCX+8) -+ movq_cfi rax, __stringify(RAX+8) -+ movq %r8, R8+8(%rsp) -+ movq %r9, R9+8(%rsp) -+ movq %r10, R10+8(%rsp) -+ movq %r11, R11+8(%rsp) -+ movq_cfi rbx, __stringify(RBX+8) + movq %rbp, RBP+8(%rsp) + movq %r12, R12+8(%rsp) + movq %r13, R13+8(%rsp) @@ -186,7 +171,7 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- movl $1,%ebx movl $MSR_GS_BASE,%ecx rdmsr -@@ -700,7 +705,7 @@ ENTRY(\label) +@@ -692,7 +689,7 @@ ENTRY(\label) subq $REST_SKIP, %rsp CFI_ADJUST_CFA_OFFSET REST_SKIP call save_rest @@ -195,89 +180,31 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- leaq 8(%rsp), \arg /* pt_regs pointer */ call \func jmp ptregscall_common -@@ -717,21 +722,20 @@ END(\label) - ENTRY(ptregscall_common) - DEFAULT_FRAME 1 8 /* offset 8: return address */ - RESTORE_TOP_OF_STACK %r11, 8 -- movq_cfi_restore R15+8, r15 -- movq_cfi_restore R14+8, r14 -- movq_cfi_restore R13+8, r13 -- movq_cfi_restore R12+8, r12 -- movq_cfi_restore RBP+8, rbp -- movq_cfi_restore RBX+8, rbx -+ movq_cfi_restore __stringify(R15+8), r15 -+ movq_cfi_restore __stringify(R14+8), r14 -+ movq_cfi_restore __stringify(R13+8), r13 -+ movq_cfi_restore __stringify(R12+8), r12 -+ movq_cfi_restore __stringify(RBP+8), rbp -+ movq_cfi_restore __stringify(RBX+8), rbx - ret $REST_SKIP /* pop extended registers */ - CFI_ENDPROC - END(ptregscall_common) - - ENTRY(stub_execve) - CFI_STARTPROC -- popq %r11 -- CFI_ADJUST_CFA_OFFSET -8 -- CFI_REGISTER rip, r11 -+ addq $8, %rsp -+ PARTIAL_FRAME 0 - SAVE_REST - FIXUP_TOP_OF_STACK %r11 - movq %rsp, %rcx -@@ -750,7 +754,7 @@ END(stub_execve) - ENTRY(stub_rt_sigreturn) - CFI_STARTPROC - addq $8, %rsp -- CFI_ADJUST_CFA_OFFSET -8 -+ PARTIAL_FRAME 0 - SAVE_REST - movq %rsp,%rdi - FIXUP_TOP_OF_STACK %r11 -@@ -926,10 +930,10 @@ ENTRY(\sym) - movq 8(%rsp),%r11 - CFI_RESTORE r11 - movq $-1,8(%rsp) /* ORIG_RAX: no syscall to restart */ -- subq $(15-1)*8,%rsp -- CFI_ADJUST_CFA_OFFSET (15-1)*8 -+ subq $ORIG_RAX-R15-8, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15-8 +@@ -905,7 +902,7 @@ ENTRY(\sym) + subq $ORIG_RAX-R15-1*8,%rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15-1*8 call error_entry - DEFAULT_FRAME 0 + DEFAULT_FRAME -1 movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ call \do_sym -@@ -953,10 +957,10 @@ ENTRY(\sym) - CFI_RESTORE rcx - movq 8(%rsp),%r11 - CFI_RESTORE r11 -- subq $(15-2)*8,%rsp -- CFI_ADJUST_CFA_OFFSET (15-2)*8 -+ subq $ORIG_RAX-R15-2*8, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15-2*8 +@@ -932,7 +929,7 @@ ENTRY(\sym) + subq $ORIG_RAX-R15-2*8,%rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15-2*8 call error_entry - DEFAULT_FRAME 0 + DEFAULT_FRAME -1 movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ -@@ -1225,7 +1229,7 @@ paranoidzeroentry machine_check *machine - - /* ebx: no swapgs flag */ - ENTRY(paranoid_exit) -- INTR_FRAME -+ DEFAULT_FRAME - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - testl %ebx,%ebx /* swapgs needed? */ -@@ -1276,25 +1280,24 @@ END(paranoid_exit) +@@ -1254,25 +1251,24 @@ END(paranoid_exit) * returns in "no swapgs flag" in %ebx. */ ENTRY(error_entry) - XCPT_FRAME 2 - CFI_ADJUST_CFA_OFFSET 15*8 -+ XCPT_FRAME start=2 offset=__stringify(ORIG_RAX-R15+8) ++ XCPT_FRAME start=2 offset=ORIG_RAX-R15+8 /* oldrax contains error code */ cld - movq_cfi rdi, RDI+8 @@ -289,12 +216,6 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- - movq_cfi r9, R9+8 - movq_cfi r10, R10+8 - movq_cfi r11, R11+8 -- movq_cfi rbx, RBX+8 -- movq_cfi rbp, RBP+8 -- movq_cfi r12, R12+8 -- movq_cfi r13, R13+8 -- movq_cfi r14, R14+8 -- movq_cfi r15, R15+8 + movq %rdi, RDI+8(%rsp) + movq %rsi, RSI+8(%rsp) + movq %rdx, RDX+8(%rsp) @@ -304,7 +225,12 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- + movq %r9, R9+8(%rsp) + movq %r10, R10+8(%rsp) + movq %r11, R11+8(%rsp) -+ movq_cfi rbx, __stringify(RBX+8) + movq_cfi rbx, RBX+8 +- movq_cfi rbp, RBP+8 +- movq_cfi r12, R12+8 +- movq_cfi r13, R13+8 +- movq_cfi r14, R14+8 +- movq_cfi r15, R15+8 + movq %rbp, RBP+8(%rsp) + movq %r12, R12+8(%rsp) + movq %r13, R13+8(%rsp) @@ -313,15 +239,7 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- #ifndef CONFIG_XEN xorl %ebx,%ebx testl $3,CS+8(%rsp) -@@ -1305,7 +1308,6 @@ error_sti: - #endif - TRACE_IRQS_OFF - ret -- CFI_ENDPROC - - #ifndef CONFIG_XEN - /* -@@ -1316,6 +1318,7 @@ error_sti: +@@ -1293,6 +1289,7 @@ error_sti: * compat mode. Check for these here too. */ error_kernelspace: @@ -329,11 +247,3 @@ Automatically created from "patches.arch/x86_64-unwind-annotations" by xen-port- incl %ebx leaq irq_return(%rip),%rcx cmpq %rcx,RIP+8(%rsp) -@@ -1332,6 +1335,7 @@ bstep_iret: - movq %rcx,RIP+8(%rsp) - jmp error_swapgs - #endif -+ CFI_ENDPROC - END(error_entry) - - diff --git a/patches.xen/xen3-x86_cpufreq_make_trace_power_frequency_cpufreq_driver_independent.patch b/patches.xen/xen3-x86_cpufreq_make_trace_power_frequency_cpufreq_driver_independent.patch deleted file mode 100644 index 383b5cb..0000000 --- a/patches.xen/xen3-x86_cpufreq_make_trace_power_frequency_cpufreq_driver_independent.patch +++ /dev/null @@ -1,73 +0,0 @@ -From: Thomas Renninger -Subject: x86 cpufreq: Make trace_power_frequency cpufreq driver independent -Patch-Mainline: submitted - please revert after 2.6.35 (whether it's in or not) -References: none - -and fix the broken case if a core's frequency depends on others. - -trace_power_frequency was only implemented in a rather ungeneric way -in acpi-cpufreq driver's target() function only. --> Move the call to trace_power_frequency to - cpufreq.c:cpufreq_notify_transition() where CPUFREQ_POSTCHANGE - notifier is triggered. - This will support power frequency tracing by all cpufreq drivers - -trace_power_frequency did not trace frequency changes correctly when -the userspace governor was used or when CPU cores' frequency depend -on each other. --> Moving this into the CPUFREQ_POSTCHANGE notifier and pass the cpu - which gets switched automatically fixes this. - -Robert Schoene provided some important fixes on top of my initial -quick shot version which are integrated in this patch: -- Forgot some changes in power_end trace (TP_printk/variable names) -- Variable dummy in power_end must now be cpu_id -- Use static 64 bit variable instead of unsigned int for cpu_id - -Signed-off-by: Thomas Renninger -CC: davej@redhat.com -CC: arjan@infradead.org -CC: davej@redhat.com -CC: linux-kernel@vger.kernel.org -CC: robert.schoene@tu-dresden.de -Tested-by: robert.schoene@tu-dresden.de -Automatically created from "patches.trace/x86_cpufreq_make_trace_power_frequency_cpufreq_driver_independent.patch" by xen-port-patches.py - ---- head-2010-04-29.orig/arch/x86/kernel/process-xen.c 2010-03-25 14:37:33.000000000 +0100 -+++ head-2010-04-29/arch/x86/kernel/process-xen.c 2010-04-29 10:14:28.000000000 +0200 -@@ -359,7 +359,7 @@ static inline int hlt_use_halt(void) - */ - void xen_idle(void) - { -- trace_power_start(POWER_CSTATE, 1); -+ trace_power_start(POWER_CSTATE, 1, smp_processor_id()); - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we -@@ -425,7 +425,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); - */ - void mwait_idle_with_hints(unsigned long ax, unsigned long cx) - { -- trace_power_start(POWER_CSTATE, (ax>>4)+1); -+ trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); - if (!need_resched()) { - if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) - clflush((void *)¤t_thread_info()->flags); -@@ -441,7 +441,7 @@ void mwait_idle_with_hints(unsigned long - static void mwait_idle(void) - { - if (!need_resched()) { -- trace_power_start(POWER_CSTATE, 1); -+ trace_power_start(POWER_CSTATE, 1, smp_processor_id()); - if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) - clflush((void *)¤t_thread_info()->flags); - -@@ -463,7 +463,7 @@ static void mwait_idle(void) - */ - static void poll_idle(void) - { -- trace_power_start(POWER_CSTATE, 0); -+ trace_power_start(POWER_CSTATE, 0, smp_processor_id()); - local_irq_enable(); - while (!need_resched()) - cpu_relax(); diff --git a/rel b/rel index 48082f7..d00491f 100644 --- a/rel +++ b/rel @@ -1 +1 @@ -12 +1 diff --git a/series.conf b/series.conf index e1ebb73..614145e 100644 --- a/series.conf +++ b/series.conf @@ -35,10 +35,8 @@ ######################################################## patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning - - patches.rpmify/dmar-fix-section-mismatch - patches.rpmify/ppc-crashdump-typefix - patches.rpmify/powerpc-kvm-build-failure-workaround + patches.rpmify/qla4xx-missing-readq-definition + patches.rpmify/dw_spi-fix-PPC-build.patch ######################################################## # kABI consistency patches @@ -64,19 +62,15 @@ # kbuild/module infrastructure fixes ######################################################## patches.suse/supported-flag - patches.suse/supported-flag-sysfs patches.suse/supported-flag-enterprise - patches.fixes/kbuild-fix-generating-of-.symtypes-files patches.suse/genksyms-add-override-flag.diff patches.suse/kconfig-automate-kernel-desktop ######################################################## # Simple export additions/removals ######################################################## - patches.suse/reiser4-exports patches.suse/export-release_open_intent patches.suse/export-security_inode_permission -+still_needed-33? patches.suse/export-sync_page_range ######################################################## # Bug workarounds for binutils @@ -90,25 +84,6 @@ patches.suse/hung_task_timeout-configurable-default +needs_update-33 patches.suse/sched-revert-latency-defaults - # writable limits - patches.suse/rlim-0015-SECURITY-add-task_struct-to-setrlimit.patch - patches.suse/rlim-0016-core-add-task_struct-to-update_rlimit_cpu.patch - patches.suse/rlim-0017-sys_setrlimit-make-sure-rlim_max-never-grows.patch - patches.suse/rlim-0018-core-split-sys_setrlimit.patch - patches.suse/rlim-0019-core-allow-setrlimit-to-non-current-tasks.patch - patches.suse/rlim-0020-core-optimize-setrlimit-for-current-task.patch - patches.suse/rlim-0021-FS-proc-switch-limits-reading-to-fops.patch - patches.suse/rlim-0022-FS-proc-make-limits-writable.patch - patches.suse/rlim-0023-core-do-security-check-under-task_lock.patch - - patches.fixes/make-note_interrupt-fast.diff - patches.fixes/twl6030-fix-note_interrupt-call - patches.fixes/use-rcu-lock-in-setpgid.patch - patches.fixes/compat-make-compat_alloc_user_space-incorporate-the-access_ok - patches.fixes/setup_arg_pages-diagnose-excessive-argument-size - patches.fixes/execve-improve-interactivity-with-large-arguments - patches.fixes/execve-make-responsive-to-sigkill-with-large-arguments - ######################################################## # Architecture-specific patches. These used to be all # at the end of series.conf, but since we don't do @@ -123,7 +98,6 @@ patches.arch/mm-avoid-bad-page-on-lru patches.arch/ia64-page-migration patches.arch/ia64-page-migration.fix - patches.fixes/taskstats-alignment ######################################################## # i386 @@ -140,31 +114,22 @@ ######################################################## # x86_64/i386 biarch ######################################################## - patches.arch/x86-hpet-pre-read ++needs_update37 patches.arch/x86-hpet-pre-read +needs_update-33 patches.arch/x86_64-hpet-64bit-timer.patch patches.arch/x86-mcp51-no-dac - patches.arch/kvm-split-paravirt-ops-by-functionality - patches.arch/kvm-only-export-selected-pv-ops-feature-structs - patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature - patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic ++needs_update patches.arch/kvm-split-paravirt-ops-by-functionality ++needs_update patches.arch/kvm-only-export-selected-pv-ops-feature-structs ++needs_update patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature ++needs_update patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic patches.suse/x86-mark_rodata_rw.patch - patches.fixes/dmar-fix-oops-with-no-dmar-table - patches.fixes/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing - patches.fixes/x86-64-compat-test-rax-for-the-syscall-number-not-eax - ######################################################## # x86 MCE/MCA (Machine Check Error/Architecture) extensions ######################################################## - # Needed on Boxboro/Westmere-EX to correctly decode the physical - # address of correctable errors - patches.arch/x86_mce_intel_decode_physical_address.patch - patches.arch/x86_mce_intel_decode_physical_address_rename_fix.patch - patches.arch/x86_mce_intel_decode_physical_address_compile_fix.patch ######################################################## # x86_64/4096CPUS - from SGI @@ -174,8 +139,6 @@ ######################################################## # x86 UV patches from SGI ######################################################## - # bug 566745 - patches.arch/UV-Expose-irq_desc-node-in-proc.patch ######################################################## # x86_64/i386 depending on the UV patchset @@ -184,13 +147,11 @@ ######################################################## # powerpc/generic ######################################################## - patches.suse/of_platform_driver.module-owner.patch patches.suse/led_classdev.sysfs-name.patch patches.suse/radeon-monitor-jsxx-quirk.patch patches.suse/8250-sysrq-ctrl_o.patch - patches.suse/ppc-no-LDFLAGS_MODULE.patch - patches.arch/ppc-vio-modalias.patch ++needs_update? patches.suse/ppc-no-LDFLAGS_MODULE.patch patches.arch/ppc-pegasos-console-autodetection.patch patches.suse/ppc-powerbook-usb-fn-key-default.patch patches.drivers/ppc64-adb @@ -199,8 +160,6 @@ patches.arch/ppc-prom-nodisplay.patch patches.fixes/ptrace-getsiginfo patches.arch/ppc-ipic-suspend-without-83xx-fix - patches.arch/ppc-vmcoreinfo.diff -# patches.fixes/powerpc-fix-handling-of-strnlen-with-zero-len ######################################################## # PS3 @@ -213,6 +172,7 @@ patches.suse/s390-System.map.diff patches.arch/s390-message-catalog.diff + patches.arch/s390-message-catalog-fix.diff patches.arch/kmsg-fix-parameter-limitations patches.suse/s390-Kerntypes.diff @@ -221,14 +181,12 @@ # VM/FS patches ######################################################## patches.suse/unmap_vmas-lat - patches.suse/silent-stack-overflow-2.patch patches.fixes/oom-warning patches.suse/shmall-bigger patches.fixes/grab-swap-token-oops +needs_update-32 patches.suse/osync-error patches.fixes/remount-no-shrink-dcache - patches.suse/reiser4-set_page_dirty_notag patches.suse/file-capabilities-disable-by-default.diff +npiggin patches.suse/files-slab-rcu.patch @@ -236,8 +194,9 @@ patches.suse/mm-devzero-optimisation.patch - patches.fixes/aggressive-zone-reclaim.patch ++36 patches.fixes/aggressive-zone-reclaim.patch patches.suse/readahead-request-tunables.patch + patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops ######################################################## # IPC patches @@ -255,6 +214,10 @@ patches.suse/connector-read-mostly patches.suse/kbd-ignore-gfx.patch + patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files + patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file + patches.fixes/ubifs-restrict-world-writable-debugfs-files + ######################################################## # # ACPI patches @@ -264,14 +227,16 @@ patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch +trenn patches.suse/acpi-dsdt-initrd-v0.9a-2.6.25.patch - patches.suse/add-initramfs-file_read_write - patches.suse/init-move-populate_rootfs-back-to-start_kernel - patches.suse/acpi-generic-initramfs-table-override-support - patches.suse/acpi-don-t-preempt-until-the-system-is-up ++jeffm patches.suse/add-initramfs-file_read_write ++jeffm patches.suse/init-move-populate_rootfs-back-to-start_kernel ++jeffm patches.suse/acpi-generic-initramfs-table-override-support ++jeffm patches.suse/acpi-don-t-preempt-until-the-system-is-up patches.arch/acpi_thermal_passive_blacklist.patch patches.arch/acpi-export-hotplug_execute + patches.arch/acpi_fix_fadt_32_bit_zero_length.patch + +needs_update-32 patches.arch/acpi_ec_provide_non_interrupt_mode_boot_param.patch # Adjust this patch for every new product (at least Enterprise @@ -283,13 +248,8 @@ patches.arch/acpi_srat-pxm-rev-ia64.patch patches.arch/acpi_srat-pxm-rev-x86-64.patch - # HP WMI patches - patches.fixes/hp-wmi_detect_keys.patch - patches.fixes/hp_wmi_catch_unkown_event_key_codes.patch - patches.fixes/hp_wmi_use_prefix_string.patch - patches.fixes/hp_wmi_add_media_key.patch - - patches.fixes/acpi_processor_check_maxcpus.patch + # Queued for 2.6.36 -> just revert after some time + patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch ######################################################## # CPUFREQ @@ -307,21 +267,18 @@ # patches.suse/cpuidle-documentation patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch - patches.fixes/acpi-cpufreq_fix_cpu_any_notification.patch - patches.arch/x86-cpu-add-amd-core-boosting-feature-flag-to-proc-cpuinfo - patches.arch/powernow-k8-add-core-performance-boost-support - patches.arch/x86-cpufreq-add-aperf-mperf-support-for-amd-processors +## cpuidle perf events cleanups and related + patches.arch/perf_timechart_fix_zero_timestamps.patch ######################################################## # AGP, graphics related stuff ######################################################## - patches.arch/x86_agpgart-g33-stoeln-fix-2.patch ++needs_update37 patches.arch/x86_agpgart-g33-stoeln-fix-2.patch ######################################################## # Suse specific stuff ######################################################## # TIOCGDEV - suse special - patches.fixes/tiocgdev +still_needed? patches.suse/mm-increase-dirty-limits.patch +needs_to_die-33 patches.suse/panic-on-io-nmi-SLE11-user-space-api.patch @@ -329,17 +286,14 @@ # Networking, IPv6 ######################################################## patches.fixes/bridge-module-get-put.patch - patches.fixes/net-sched-fix-some-kernel-memory-leaks + patches.fixes/bonding-Incorrect-TX-queue-offset.patch ######################################################## # NFS ######################################################## patches.fixes/nfs-slot-table-alloc - patches.fixes/nfsd-05-sunrpc-cache-allow-thread-to-block-while-waiting-for.patch - patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch - patches.fixes/nfsd-07-nfsd-idmap-drop-special-request-deferal-in-favour-of.patch - patches.fixes/sunrpc-monotonic-expiry ++needs_update37 patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch ######################################################## # lockd + statd @@ -348,24 +302,28 @@ ######################################################## # cifs patches ######################################################## -# patches.fixes/cifs-fix-oops-due-to-null-nameidata ######################################################## # ext2/ext3 ######################################################## patches.suse/ext3-barrier-default # patches.suse/ext2-fsync-err - patches.fixes/ext3-mark-super-uptodate ######################################################## # ext4 ######################################################## + ######################################################## + # btrfs + ######################################################## + ######################################################## # Reiserfs Patches ######################################################## patches.suse/reiserfs-barrier-default patches.fixes/reiserfs-remove-2-tb-file-size-limit + patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash + patches.suse/reiser4-dependencies ######################################################## # dlm @@ -375,31 +333,20 @@ # ocfs2 ######################################################## - patches.suse/ocfs2-allocation-resrvations.patch - ######################################################## - # gfs2 read-only support for migration + # gfs2 ######################################################## - patches.suse/gfs2-ro-mounts-only.patch ######################################################## # xfs ######################################################## - patches.suse/xfs-dmapi-src - patches.suse/xfs-dmapi-enable - patches.suse/xfs-dmapi-xfs-enable - patches.suse/xfs-dmapi-re-add-flags-for-xfs_free_eofblocks - patches.suse/xfs-nfsd-dmapi-aware - patches.fixes/xfs-dmapi-fixes - patches.fixes/xfs-export-debug - patches.suse/xfs-dmapi-2-6-34-api-changes - patches.suse/xfs-dmapi-fix-incompatible-pointer-type-warning ######################################################## # novfs ######################################################## patches.suse/novfs-client-module patches.suse/novfs-fix-debug-message.patch + patches.suse/novfs-2.6.35-api-changes patches.fixes/novfs-err_ptr-fix.diff patches.fixes/novfs-fix-inode-uid patches.fixes/novfs-incorrect-filesize-fix @@ -408,17 +355,44 @@ patches.fixes/novfs-dentry-cache-limit.patch patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir patches.fixes/novfs-LFS-initialization + patches.fixes/novfs-overflow-fixes + patches.fixes/novfs-bdi-init.diff + patches.fixes/novfs-xattr-memleak + patches.fixes/novfs-xattr-errcode-cleanup + patches.fixes/novfs-xattr-errcode-cleanup2 + patches.fixes/novfs-lindent + patches.suse/novfs-fix-ioctl-usage + patches.suse/novfs-use-evict_inode + patches.fixes/novfs-unlink-oops + patches.suse/novfs-2.6.37-api-changes + patches.fixes/novfs-fragment-size-fix.patch + patches.suse/novfs-build-fix + + ######################################################## + # other filesystem stuff: richacls + ######################################################## + patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch + patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch + patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch + patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch + patches.suse/0005-richacl-Permission-mapping-functions.patch + patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch + patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch + patches.suse/0008-richacl-Permission-check-algorithm.patch + patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch + patches.suse/0010-richacl-Create-time-inheritance.patch + patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch + patches.suse/0012-richacl-Automatic-Inheritance.patch + patches.suse/0013-richacl-Restrict-access-check-algorithm.patch + patches.suse/0014-richacl-xattr-mapping-functions.patch + patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch + patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch + patches.suse/richacl-fix ######################################################## # other filesystem stuff ######################################################## - patches.suse/parser-match_string.diff - patches.suse/fs-may_iops.diff - patches.suse/fs-knows-MAY_APPEND.diff - patches.suse/nfs4acl-common.diff - patches.suse/nfs4acl-ext3.diff - patches.suse/nfs4acl-ai.diff - + patches.fixes/hfs-avoid-crash-in-hfs_bnode_create ######################################################## # Swap-over-NFS @@ -431,6 +405,7 @@ patches.suse/SoN-06-mm-kmem_estimate_pages.patch patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch patches.suse/SoN-08-mm-page_alloc-emerg.patch + patches.suse/SoN-08a-mm-page_alloc-emerg.patch patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch patches.suse/SoN-11-mm-reserve.patch @@ -439,6 +414,7 @@ patches.suse/SoN-14-net-sk_allocation.patch patches.suse/SoN-15-netvm-reserve.patch patches.suse/SoN-16-netvm-reserve-inet.patch + patches.suse/SoN-16a-netvm-reserve-inet.patch patches.suse/SoN-17-netvm-reserve-inet.patch-fix patches.suse/SoN-18-netvm-skbuff-reserve.patch patches.suse/SoN-19-netvm-sk_filter.patch @@ -448,24 +424,31 @@ patches.suse/SoN-23-mm-swapfile.patch patches.suse/SoN-24-mm-page_file_methods.patch patches.suse/SoN-25-nfs-swapcache.patch + patches.suse/SoN-25a-nfs-swapcache.patch + patches.suse/SoN-25b-nfs-swapcache.patch patches.suse/SoN-26-nfs-swapper.patch patches.suse/SoN-27-nfs-swap_ops.patch + patches.suse/SoN-27a-nfs-swap_ops.patch patches.suse/SoN-28-nfs-alloc-recursions.patch patches.suse/SoN-29-fix-swap_sync_page-race patches.suse/SoN-30-fix-uninitialized-var.patch + patches.suse/SoN-31-fix-null-pointer-dereference + patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles - # don't want to rediff SoN until this gets more testing + # don't want to rediff SoN until this gets more testing patches.suse/slab-handle-memoryless-nodes-v2a.patch + patches.suse/SoN-fix + patches.suse/SoN-33-slab-leak-fix.patch + + patches.fixes/nfs-adaptive-readdir-plus ######################################################## # Netfilter ######################################################## - patches.suse/netfilter-ipt_LOG-mac - patches.suse/netfilter-ip_conntrack_slp.patch patches.fixes/fix-nf_conntrack_slp - patches.fixes/netfilter-remove-pointless-config_nf_ct_acct-warning + patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack ######################################################## # @@ -480,7 +463,6 @@ ######################################################## # libata - patches.drivers/libata-add-waits-for-govault patches.drivers/libata-unlock-hpa-by-default # Block layer fixes @@ -497,7 +479,7 @@ patches.fixes/scsi-check-host-lookup-failure patches.drivers/megaraid-mbox-fix-SG_IO - patches.drivers/qla4xxx-5.01.00-k9-5.01.00.00.11.01-k10.patch ++needs_update-35 patches.drivers/qla4xxx-5.01.00-k9-5.01.00.00.11.01-k10.patch # Remaining SCSI patches (garloff) patches.suse/scsi-error-test-unit-ready-timeout @@ -515,13 +497,10 @@ # DRM/Video ######################################################## - patches.drivers/drm-nouveau-allow-cursor-image-and-position-to-survi.patch - patches.drivers/drm-nouveau-Don-t-clear-AGPCMD-completely-on-INIT_RE.patch - - patches.drivers/0001-drm-i915-Use-spatio-temporal-dithering-on-PCH.patch - patches.drivers/0002-drm-i915-Honor-sync-polarity-from-VBT-panel-timing-d.patch - patches.drivers/0003-drm-i915-Add-the-support-of-eDP-on-DP-D-for-Ibex-CPT.patch - patches.drivers/0004-drm-i915-Configure-the-PIPECONF-dither-correctly-for.patch + ######################################################## + # video4linux + ######################################################## + patches.fixes/flexcop-fix-registering-braindead-stupid-names ######################################################## # Network @@ -530,7 +509,6 @@ patches.suse/nameif-track-rename.patch patches.fixes/tg3-fix-default-wol.patch patches.drivers/ehea-modinfo.patch - patches.fixes/tehuti-firmware-name # entropy FATE##307517 patches.drivers/bnx2-entropy-source.patch @@ -541,15 +519,19 @@ patches.drivers/tg3-entropy-source.patch patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch - -+needs_update patches.drivers/e1000-enhance-frame-fragment-detection.patch -+needs_update patches.drivers/e1000e-enhance-frame-fragment-detection.patch + ######################################################## # Wireless Networking ######################################################## patches.suse/wireless-no-aes-select patches.suse/b43-missing-firmware-info.patch ++needs_update patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices + + ######################################################## + # ISDN + ######################################################## + patches.fixes/misdn-add-support-for-group-membership-check ######################################################## # iSCSI @@ -558,16 +540,15 @@ ######################################################## # PCI and PCI hotplug ######################################################## - patches.drivers/pci-disable-msi-on-K8M800 ######################################################## # sysfs / driver core ######################################################## -# patches.drivers/driver-core-add-devname-module-aliases-to-allow-module-on-demand-auto-loading.patch ######################################################## # USB ######################################################## + patches.fixes/cdc-phonet-handle-empty-phonet-header.patch ######################################################## # I2C @@ -582,22 +563,24 @@ patches.suse/bootsplash-scaler patches.suse/bootsplash-console-fix patches.drivers/elousb.patch + patches.suse/elousb-2.6.35-api-changes patches.fixes/input-add-acer-aspire-5710-to-nomux.patch patches.drivers/input-Add-LED-support-to-Synaptics-device + patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch ########################################################## # Sound ########################################################## + patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo + patches.drivers/alsa-hda-0019-Increase-default-buffer-size + + patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl ######################################################## # Other driver fixes ######################################################## - patches.fixes/ieee1394-sbp2_long_sysfs_ieee1394_id.patch - patches.fixes/parport-mutex - # suse-2.4 compatible crypto loop driver - patches.suse/twofish-2.6 # Allow setting maximum number of raw devices patches.suse/raw_device_max_minors_param.diff patches.suse/no-partition-scan @@ -614,19 +597,22 @@ # device-mapper ######################################################## patches.suse/dm-emulate-blkrrpart-ioctl - patches.suse/dm-raid45_2.6.27_20081027.patch + patches.suse/dm-raid45-26-Nov-2009.patch patches.suse/dmraid45-dm_dirty_log_create-api-fix patches.suse/dmraid45-dm_get_device-takes-fewer-arguments + patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md + patches.suse/dm-raid45-api-update-no-barriers patches.fixes/dm-mpath-reattach-dh patches.suse/dm-mpath-leastpending-path-update - patches.suse/dm-mpath-accept-failed-paths - patches.suse/dm-mpath-detach-existing-hardware-handler ++needs_update patches.suse/dm-mpath-accept-failed-paths ++needs_update patches.suse/dm-mpath-detach-existing-hardware-handler patches.suse/dm-mpath-null-pgs patches.fixes/dm-table-switch-to-readonly patches.suse/dm-mpath-evaluate-request-result-and-sense patches.fixes/dm-release-map_lock-before-set_disk_ro patches.suse/dm-mpath-no-activate-for-offlined-paths patches.suse/dm-mpath-no-partitions-feature + patches.suse/mpath-fix ######################################################## # md @@ -637,13 +623,19 @@ # Security stuff # ########################################################## - patches.fixes/keys-fix-rcu-no-lock-warning-in-keyctl_session_to_parent - patches.fixes/keys-fix-bug-in-keyctl_session_to_parent-if-parent-has-no-session-keyring ########################################################## # Audit ########################################################## + ########################################################## + # AppArmor + ########################################################## + patches.apparmor/apparmor-compatibility-patch-for-v5-network-control + patches.apparmor/apparmor-compatibility-patch-for-v5-interface + patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace + + ######################################################## # Address space layout randomization ######################################################## @@ -652,19 +644,6 @@ # KDB v4.4 ######################################################## - patches.suse/kdb-common - patches.suse/kdb-x86 - patches.suse/kdb-ia64 - patches.suse/kdb-build-fixes - patches.suse/kdb-x86-build-fixes - patches.suse/kdb-usb-rework - patches.suse/kdb_fix_ia64_build.patch - patches.suse/kdb_dont_touch_i8042_early.patch - patches.suse/kdb-fix-assignment-from-incompatible-pointer-warnings - patches.suse/kdb-handle-nonexistance-keyboard-controller - patches.suse/kdb-fix-kdb_cmds-to-include-the-arch-common-macro - patches.suse/kdb-vm-api-changes-for-2-6-34 - ######################################################## # Other patches for debugging ######################################################## @@ -672,6 +651,7 @@ patches.suse/stack-unwind patches.suse/no-frame-pointer-select patches.arch/x86_64-unwind-annotations + patches.arch/i386-unwind-annotations ######################################################## # Kdump @@ -681,7 +661,6 @@ ######################################################## # cgroups ######################################################## - patches.suse/cgroup-disable-memory.patch ######################################################## # audit subsystem @@ -692,14 +671,24 @@ # Performance Monitoring, Tracing etc ######################################################## + patches.fixes/oprofile_bios_ctr.patch + + patches.trace/utrace-core + + patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch + ######################################################## # KVM patches ######################################################## + patches.fixes/kvm-ioapic.patch + patches.fixes/kvm-macos.patch + ######################################################## # Staging tree patches # new drivers that are going upstream ######################################################## + patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch ######################################################## # "fastboot" patches @@ -707,9 +696,7 @@ # moblin to try to speed up the boot process ######################################################## patches.suse/linux-2.6.29-dont-wait-for-mouse.patch - patches.suse/linux-2.6.29-enable-async-by-default.patch - patches.suse/linux-2.6.29-even-faster-kms.patch - patches.suse/linux-2.6.29-silence-acer-message.patch patches.suse/linux-2.6.29-kms-after-sata.patch patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch @@ -753,6 +740,7 @@ patches.xen/xen3-auto-xen-arch.diff patches.xen/xen3-auto-xen-drivers.diff patches.xen/xen3-auto-include-xen-interface.diff + patches.xen/xen3-auto-blktap2-pvops.diff # kconfig bits for xen patches.xen/xen3-auto-xen-kconfig.diff @@ -775,6 +763,7 @@ patches.xen/xen3-fixup-kconfig patches.xen/xen3-fixup-common patches.xen/xen3-fixup-arch-x86 + patches.xen/xen3-fixup-blktap2-pvops # ports of other patches patches.xen/xen3-patch-2.6.18 @@ -794,67 +783,64 @@ patches.xen/xen3-patch-2.6.32 patches.xen/xen3-patch-2.6.33 patches.xen/xen3-patch-2.6.34 + patches.xen/xen3-patch-2.6.35 + patches.xen/xen3-patch-2.6.36 + patches.xen/xen3-patch-2.6.37 + patches.xen/xen3-patch-2.6.38 patches.xen/xen3-seccomp-disable-tsc-option patches.xen/xen3-x86-mcp51-no-dac patches.xen/xen3-x86-mark_rodata_rw.patch - patches.xen/xen3-x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing - patches.xen/xen3-x86-64-compat-test-rax-for-the-syscall-number-not-eax - patches.xen/xen3-acpi_processor_check_maxcpus.patch - patches.xen/xen3-kdb-x86 patches.xen/xen3-stack-unwind patches.xen/xen3-x86_64-unwind-annotations -# patches.xen/xen3-x86_cpufreq_make_trace_power_frequency_cpufreq_driver_independent.patch # bugfixes and enhancements patches.xen/xen-balloon-max-target - patches.xen/xen-modular-blktap + patches.xen/xen-block-backends-cleanup patches.xen/xen-blkback-bimodal-suse patches.xen/xen-blkif-protocol-fallback-hack patches.xen/xen-blkback-cdrom + patches.xen/xen-blktap-modular patches.xen/xen-blktap-write-barriers + patches.xen/xen-blktap2-use-after-free patches.xen/xen-op-packet patches.xen/xen-blkfront-cdrom patches.xen/xen-sections patches.xen/xen-swiotlb-heuristics patches.xen/xen-kconfig-compat patches.xen/xen-cpufreq-report - patches.xen/xen-staging-build patches.xen/xen-sysdev-suspend patches.xen/xen-ipi-per-cpu-irq patches.xen/xen-virq-per-cpu-irq patches.xen/xen-clockevents - patches.xen/xen-no-reboot-vector patches.xen/xen-spinlock-poll-early + patches.xen/xen-pcpu-hotplug + patches.xen/xen-mem-hotplug patches.xen/xen-configurable-guest-devices patches.xen/xen-netback-nr-irqs patches.xen/xen-netback-notify-multi patches.xen/xen-netback-generalize patches.xen/xen-netback-multiple-tasklets patches.xen/xen-netback-kernel-threads - patches.xen/xen-netfront-ethtool + patches.xen/xen-tmem-v1 + patches.xen/xen-cxgb3 patches.xen/xen-dcdbas patches.xen/xen-floppy patches.xen/xen-kzalloc patches.xen/xen-unpriv-build + patches.xen/xen-setup-gsi + patches.xen/xen-watchdog patches.xen/xen-x86-panic-no-reboot patches.xen/xen-x86-dcr-fallback - patches.xen/xen-x86-consistent-nmi patches.xen/xen-x86-no-lapic patches.xen/xen-x86-pmd-handling patches.xen/xen-x86-bigmem - patches.xen/xen-x86-machphys-prediction patches.xen/xen-x86-exit-mmap patches.xen/xen-x86-per-cpu-vcpu-info + patches.xen/xen-x86-msr-on-pcpu patches.xen/xen-x86_64-pgd-pin patches.xen/xen-x86_64-pgd-alloc-order patches.xen/xen-x86_64-dump-user-pgt patches.xen/xen-x86_64-note-init-p2m - - patches.xen/pcifront-enforce-scan.patch - patches.xen/pcifront-claim.patch - patches.xen/pcifront-dont-race-udev.patch - patches.xen/pcifront-irq-not-evtchn.patch + patches.xen/xen-x86_64-unmapped-initrd patches.qubes/nuke_balloon_minimum_target.patch - patches.fixes/fixes-for-make-3.82.patch - patches.fixes/netbk-for-new-udev.patch diff --git a/version b/version index c6a7435..bb7b1b6 100644 --- a/version +++ b/version @@ -1 +1 @@ -2.6.34.1 +2.6.38.3